diff options
Diffstat (limited to 'plumbing')
-rw-r--r-- | plumbing/format/idxfile/idxfile.go | 30 | ||||
-rw-r--r-- | plumbing/format/index/decoder.go | 37 | ||||
-rw-r--r-- | plumbing/object/commit.go | 4 | ||||
-rw-r--r-- | plumbing/object/common.go | 12 | ||||
-rw-r--r-- | plumbing/object/tag.go | 4 | ||||
-rw-r--r-- | plumbing/object/tree.go | 23 |
6 files changed, 79 insertions, 31 deletions
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go index 5fed278..26cca59 100644 --- a/plumbing/format/idxfile/idxfile.go +++ b/plumbing/format/idxfile/idxfile.go @@ -55,7 +55,8 @@ type MemoryIndex struct { PackfileChecksum [20]byte IdxChecksum [20]byte - offsetHash map[int64]plumbing.Hash + offsetHash map[int64]plumbing.Hash + offsetHashIsFull bool } var _ Index = (*MemoryIndex)(nil) @@ -121,7 +122,17 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { return 0, plumbing.ErrObjectNotFound } - return idx.getOffset(k, i) + offset, err := idx.getOffset(k, i) + + if !idx.offsetHashIsFull { + // Save the offset for reverse lookup + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) + } + idx.offsetHash[offset] = h + } + + return offset, err } const isO64Mask = uint64(1) << 31 @@ -167,14 +178,24 @@ func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) { // FindHash implements the Index interface. func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { + var hash plumbing.Hash + var ok bool + + if idx.offsetHash != nil { + if hash, ok = idx.offsetHash[o]; ok { + return hash, nil + } + } + // Lazily generate the reverse offset/hash map if required. - if idx.offsetHash == nil { + if !idx.offsetHashIsFull || idx.offsetHash == nil { if err := idx.genOffsetHash(); err != nil { return plumbing.ZeroHash, err } + + hash, ok = idx.offsetHash[o] } - hash, ok := idx.offsetHash[o] if !ok { return plumbing.ZeroHash, plumbing.ErrObjectNotFound } @@ -190,6 +211,7 @@ func (idx *MemoryIndex) genOffsetHash() error { } idx.offsetHash = make(map[int64]plumbing.Hash, count) + idx.offsetHashIsFull = true iter, err := idx.Entries() if err != nil { diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go index ac57d08..98f92fd 100644 --- a/plumbing/format/index/decoder.go +++ b/plumbing/format/index/decoder.go @@ -1,6 +1,7 @@ package index import ( + "bufio" "bytes" "crypto/sha1" "errors" @@ -42,14 +43,17 @@ type Decoder struct { r io.Reader hash hash.Hash lastEntry *Entry + + extReader *bufio.Reader } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { h := sha1.New() return &Decoder{ - r: io.TeeReader(r, h), - hash: h, + r: io.TeeReader(r, h), + hash: h, + extReader: bufio.NewReader(nil), } } @@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) { func (d *Decoder) doReadEntryName(len uint16) (string, error) { name := make([]byte, len) - if err := binary.Read(d.r, &name); err != nil { - return "", err - } + _, err := io.ReadFull(d.r, name[:]) - return string(name), nil + return string(name), err } // Index entries are padded out to the next 8 byte alignment @@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error { return nil } -func (d *Decoder) getExtensionReader() (io.Reader, error) { +func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { len, err := binary.ReadUint32(d.r) if err != nil { return nil, err } - return &io.LimitedReader{R: d.r, N: int64(len)}, nil + d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) + return d.extReader, nil } func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { var h plumbing.Hash copy(h[:4], alreadyRead[:]) - if err := binary.Read(d.r, h[4:]); err != nil { + if _, err := io.ReadFull(d.r, h[4:]); err != nil { return err } @@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) { } type treeExtensionDecoder struct { - r io.Reader + r *bufio.Reader } func (d *treeExtensionDecoder) Decode(t *Tree) error { @@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { } e.Trees = i - - if err := binary.Read(d.r, &e.Hash); err != nil { - return nil, err - } + _, err = io.ReadFull(d.r, e.Hash[:]) return e, nil } type resolveUndoDecoder struct { - r io.Reader + r *bufio.Reader } func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { @@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { for s := range e.Stages { var hash plumbing.Hash - if err := binary.Read(d.r, hash[:]); err != nil { + if _, err := io.ReadFull(d.r, hash[:]); err != nil { return nil, err } @@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { } type endOfIndexEntryDecoder struct { - r io.Reader + r *bufio.Reader } func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { @@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { return err } - return binary.Read(d.r, &e.Hash) + _, err = io.ReadFull(d.r, e.Hash[:]) + return err } diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go index b569d3c..511242d 100644 --- a/plumbing/object/commit.go +++ b/plumbing/object/commit.go @@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) var message bool var pgpsig bool diff --git a/plumbing/object/common.go b/plumbing/object/common.go new file mode 100644 index 0000000..3591f5f --- /dev/null +++ b/plumbing/object/common.go @@ -0,0 +1,12 @@ +package object + +import ( + "bufio" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, +} diff --git a/plumbing/object/tag.go b/plumbing/object/tag.go index 7f5e406..bc03477 100644 --- a/plumbing/object/tag.go +++ b/plumbing/object/tag.go @@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) for { var line []byte line, err = r.ReadBytes('\n') diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go index 1f9ea26..d30cf6e 100644 --- a/plumbing/object/tree.go +++ b/plumbing/object/tree.go @@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) for { str, err := r.ReadString(' ') if err != nil { @@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa // underlying repository will be skipped automatically. It is possible that this // may change in future versions. func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { - var obj Object + var obj *Tree for { current := len(w.stack) - 1 if current < 0 { @@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { // Finished with the current tree, move back up to the parent w.stack = w.stack[:current] w.base, _ = path.Split(w.base) - w.base = path.Clean(w.base) // Remove trailing slash + w.base = strings.TrimSuffix(w.base, "/") continue } @@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { obj, err = GetTree(w.s, entry.Hash) } - name = path.Join(w.base, entry.Name) + name = simpleJoin(w.base, entry.Name) if err != nil { err = io.EOF @@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { return } - if t, ok := obj.(*Tree); ok { - w.stack = append(w.stack, &treeEntryIter{t, 0}) - w.base = path.Join(w.base, entry.Name) + if obj != nil { + w.stack = append(w.stack, &treeEntryIter{obj, 0}) + w.base = simpleJoin(w.base, entry.Name) } return @@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error { return cb(t) }) } + +func simpleJoin(parent, child string) string { + if len(parent) > 0 { + return parent + "/" + child + } + return child +}
\ No newline at end of file |