diff options
-rw-r--r-- | plumbing/format/idxfile/idxfile.go | 30 | ||||
-rw-r--r-- | plumbing/format/index/decoder.go | 37 | ||||
-rw-r--r-- | plumbing/format/packfile/packfile.go | 5 | ||||
-rw-r--r-- | plumbing/object/commit.go | 4 | ||||
-rw-r--r-- | plumbing/object/common.go | 12 | ||||
-rw-r--r-- | plumbing/object/tag.go | 4 | ||||
-rw-r--r-- | plumbing/object/tree.go | 23 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit.go | 20 | ||||
-rw-r--r-- | storage/filesystem/index.go | 3 | ||||
-rw-r--r-- | storage/filesystem/object.go | 145 | ||||
-rw-r--r-- | storage/filesystem/object_test.go | 18 | ||||
-rw-r--r-- | storage/filesystem/storage.go | 4 | ||||
-rw-r--r-- | utils/binary/read.go | 15 | ||||
-rw-r--r-- | utils/binary/read_test.go | 10 |
14 files changed, 249 insertions, 81 deletions
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go index 5fed278..26cca59 100644 --- a/plumbing/format/idxfile/idxfile.go +++ b/plumbing/format/idxfile/idxfile.go @@ -55,7 +55,8 @@ type MemoryIndex struct { PackfileChecksum [20]byte IdxChecksum [20]byte - offsetHash map[int64]plumbing.Hash + offsetHash map[int64]plumbing.Hash + offsetHashIsFull bool } var _ Index = (*MemoryIndex)(nil) @@ -121,7 +122,17 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { return 0, plumbing.ErrObjectNotFound } - return idx.getOffset(k, i) + offset, err := idx.getOffset(k, i) + + if !idx.offsetHashIsFull { + // Save the offset for reverse lookup + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) + } + idx.offsetHash[offset] = h + } + + return offset, err } const isO64Mask = uint64(1) << 31 @@ -167,14 +178,24 @@ func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) { // FindHash implements the Index interface. func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { + var hash plumbing.Hash + var ok bool + + if idx.offsetHash != nil { + if hash, ok = idx.offsetHash[o]; ok { + return hash, nil + } + } + // Lazily generate the reverse offset/hash map if required. - if idx.offsetHash == nil { + if !idx.offsetHashIsFull || idx.offsetHash == nil { if err := idx.genOffsetHash(); err != nil { return plumbing.ZeroHash, err } + + hash, ok = idx.offsetHash[o] } - hash, ok := idx.offsetHash[o] if !ok { return plumbing.ZeroHash, plumbing.ErrObjectNotFound } @@ -190,6 +211,7 @@ func (idx *MemoryIndex) genOffsetHash() error { } idx.offsetHash = make(map[int64]plumbing.Hash, count) + idx.offsetHashIsFull = true iter, err := idx.Entries() if err != nil { diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go index ac57d08..98f92fd 100644 --- a/plumbing/format/index/decoder.go +++ b/plumbing/format/index/decoder.go @@ -1,6 +1,7 @@ package index import ( + "bufio" "bytes" "crypto/sha1" "errors" @@ -42,14 +43,17 @@ type Decoder struct { r io.Reader hash hash.Hash lastEntry *Entry + + extReader *bufio.Reader } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { h := sha1.New() return &Decoder{ - r: io.TeeReader(r, h), - hash: h, + r: io.TeeReader(r, h), + hash: h, + extReader: bufio.NewReader(nil), } } @@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) { func (d *Decoder) doReadEntryName(len uint16) (string, error) { name := make([]byte, len) - if err := binary.Read(d.r, &name); err != nil { - return "", err - } + _, err := io.ReadFull(d.r, name[:]) - return string(name), nil + return string(name), err } // Index entries are padded out to the next 8 byte alignment @@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error { return nil } -func (d *Decoder) getExtensionReader() (io.Reader, error) { +func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { len, err := binary.ReadUint32(d.r) if err != nil { return nil, err } - return &io.LimitedReader{R: d.r, N: int64(len)}, nil + d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) + return d.extReader, nil } func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { var h plumbing.Hash copy(h[:4], alreadyRead[:]) - if err := binary.Read(d.r, h[4:]); err != nil { + if _, err := io.ReadFull(d.r, h[4:]); err != nil { return err } @@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) { } type treeExtensionDecoder struct { - r io.Reader + r *bufio.Reader } func (d *treeExtensionDecoder) Decode(t *Tree) error { @@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { } e.Trees = i - - if err := binary.Read(d.r, &e.Hash); err != nil { - return nil, err - } + _, err = io.ReadFull(d.r, e.Hash[:]) return e, nil } type resolveUndoDecoder struct { - r io.Reader + r *bufio.Reader } func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { @@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { for s := range e.Stages { var hash plumbing.Hash - if err := binary.Read(d.r, hash[:]); err != nil { + if _, err := io.ReadFull(d.r, hash[:]); err != nil { return nil, err } @@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { } type endOfIndexEntryDecoder struct { - r io.Reader + r *bufio.Reader } func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { @@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { return err } - return binary.Read(d.r, &e.Hash) + _, err = io.ReadFull(d.r, e.Hash[:]) + return err } diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index 69b6e85..c09286e 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -414,6 +414,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) { return hash, nil } +// Scanner returns the packfile's Scanner +func (p *Packfile) Scanner() *Scanner { + return p.s +} + // Close the packfile and its resources. func (p *Packfile) Close() error { closer, ok := p.file.(io.Closer) diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go index b569d3c..511242d 100644 --- a/plumbing/object/commit.go +++ b/plumbing/object/commit.go @@ -171,7 +171,9 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) var message bool var pgpsig bool diff --git a/plumbing/object/common.go b/plumbing/object/common.go new file mode 100644 index 0000000..3591f5f --- /dev/null +++ b/plumbing/object/common.go @@ -0,0 +1,12 @@ +package object + +import ( + "bufio" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, +} diff --git a/plumbing/object/tag.go b/plumbing/object/tag.go index 7f5e406..bc03477 100644 --- a/plumbing/object/tag.go +++ b/plumbing/object/tag.go @@ -93,7 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) for { var line []byte line, err = r.ReadBytes('\n') diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go index 1f9ea26..d30cf6e 100644 --- a/plumbing/object/tree.go +++ b/plumbing/object/tree.go @@ -230,7 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(reader, &err) - r := bufio.NewReader(reader) + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) for { str, err := r.ReadString(' ') if err != nil { @@ -383,7 +385,7 @@ func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWa // underlying repository will be skipped automatically. It is possible that this // may change in future versions. func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { - var obj Object + var obj *Tree for { current := len(w.stack) - 1 if current < 0 { @@ -403,7 +405,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { // Finished with the current tree, move back up to the parent w.stack = w.stack[:current] w.base, _ = path.Split(w.base) - w.base = path.Clean(w.base) // Remove trailing slash + w.base = strings.TrimSuffix(w.base, "/") continue } @@ -419,7 +421,7 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { obj, err = GetTree(w.s, entry.Hash) } - name = path.Join(w.base, entry.Name) + name = simpleJoin(w.base, entry.Name) if err != nil { err = io.EOF @@ -433,9 +435,9 @@ func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { return } - if t, ok := obj.(*Tree); ok { - w.stack = append(w.stack, &treeEntryIter{t, 0}) - w.base = path.Join(w.base, entry.Name) + if obj != nil { + w.stack = append(w.stack, &treeEntryIter{obj, 0}) + w.base = simpleJoin(w.base, entry.Name) } return @@ -509,3 +511,10 @@ func (iter *TreeIter) ForEach(cb func(*Tree) error) error { return cb(t) }) } + +func simpleJoin(parent, child string) string { + if len(parent) > 0 { + return parent + "/" + child + } + return child +}
\ No newline at end of file diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index ba9667e..111769b 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -83,7 +83,7 @@ type DotGit struct { packList []plumbing.Hash packMap map[plumbing.Hash]struct{} - files map[string]billy.File + files map[plumbing.Hash]billy.File } // New returns a DotGit value ready to be used. The path argument must @@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { } func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { - if d.files == nil { - d.files = make(map[string]billy.File) + if d.options.KeepDescriptors && extension == "pack" { + if d.files == nil { + d.files = make(map[plumbing.Hash]billy.File) + } + + f, ok := d.files[hash] + if ok { + return f, nil + } } err := d.hasPack(hash) @@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil } path := d.objectPackPath(hash, extension) - f, ok := d.files[path] - if ok { - return f, nil - } - pack, err := d.fs.Open(path) if err != nil { if os.IsNotExist(err) { @@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil } if d.options.KeepDescriptors && extension == "pack" { - d.files[path] = pack + d.files[hash] = pack } return pack, nil diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go index 2ebf57e..d04195c 100644 --- a/storage/filesystem/index.go +++ b/storage/filesystem/index.go @@ -1,6 +1,7 @@ package filesystem import ( + "bufio" "os" "gopkg.in/src-d/go-git.v4/plumbing/format/index" @@ -41,7 +42,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) { defer ioutil.CheckClose(f, &err) - d := index.NewDecoder(f) + d := index.NewDecoder(bufio.NewReader(f)) err = d.Decode(idx) return idx, err } diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3eb62a2..ad5d8d0 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -26,6 +26,10 @@ type ObjectStorage struct { dir *dotgit.DotGit index map[plumbing.Hash]idxfile.Index + + packList []plumbing.Hash + packListIdx int + packfiles map[plumbing.Hash]*packfile.Packfile } // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. @@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( return size, err } +func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { + if p := s.packfileFromCache(pack); p != nil { + return p, nil + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return nil, err + } + + var p *packfile.Packfile + if s.objectCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p, s.storePackfileInCache(pack, p) +} + +func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { + if s.packfiles == nil { + if s.options.KeepDescriptors { + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) + } else if s.options.MaxOpenDescriptors > 0 { + s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) + } + } + + return s.packfiles[hash] +} + +func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { + if s.options.KeepDescriptors { + s.packfiles[hash] = p + return nil + } + + if s.options.MaxOpenDescriptors <= 0 { + return nil + } + + // start over as the limit of packList is hit + if s.packListIdx >= len(s.packList) { + s.packListIdx = 0 + } + + // close the existing packfile if open + if next := s.packList[s.packListIdx]; !next.IsZero() { + open := s.packfiles[next] + delete(s.packfiles, next) + if open != nil { + if err := open.Close(); err != nil { + return err + } + } + } + + // cache newly open packfile + s.packList[s.packListIdx] = hash + s.packfiles[hash] = p + s.packListIdx++ + + return nil +} + func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( size int64, err error) { if err := s.requireIndex(); err != nil { @@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( return 0, plumbing.ErrObjectNotFound } - f, err := s.dir.ObjectPack(pack) - if err != nil { - return 0, err - } - defer ioutil.CheckClose(f, &err) - idx := s.index[pack] hash, err := idx.FindHash(offset) if err == nil { @@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( return 0, err } - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) + p, err := s.packfile(idx, pack) + if err != nil { + return 0, err + } + + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) } return p.GetSizeByOffset(offset) @@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( return nil, plumbing.ErrObjectNotFound } - f, err := s.dir.ObjectPack(pack) + idx := s.index[pack] + p, err := s.packfile(idx, pack) if err != nil { return nil, err } - if !s.options.KeepDescriptors { - defer ioutil.CheckClose(f, &err) + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) } - idx := s.index[pack] if canBeDelta { - return s.decodeDeltaObjectAt(f, idx, offset, hash) + return s.decodeDeltaObjectAt(p, offset, hash) } - return s.decodeObjectAt(f, idx, offset) + return s.decodeObjectAt(p, offset) } func (s *ObjectStorage) decodeObjectAt( - f billy.File, - idx idxfile.Index, + p *packfile.Packfile, offset int64, ) (plumbing.EncodedObject, error) { - hash, err := idx.FindHash(offset) + hash, err := p.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { @@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) - } - return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( - f billy.File, - idx idxfile.Index, + p *packfile.Packfile, offset int64, hash plumbing.Hash, ) (plumbing.EncodedObject, error) { - if _, err := f.Seek(0, io.SeekStart); err != nil { - return nil, err - } - - p := packfile.NewScanner(f) - header, err := p.SeekObjectHeader(offset) + scan := p.Scanner() + header, err := scan.SeekObjectHeader(offset) if err != nil { return nil, err } @@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt( case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: - base, err = idx.FindHash(header.OffsetReference) + base, err = p.FindHash(header.OffsetReference) if err != nil { return nil, err } default: - return s.decodeObjectAt(f, idx, offset) + return s.decodeObjectAt(p, offset) } obj := &plumbing.MemoryObject{} @@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt( return nil, err } - if _, _, err := p.NextObject(w); err != nil { + if _, _, err := scan.NextObject(w); err != nil { return nil, err } @@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters( // Close closes all opened files. func (s *ObjectStorage) Close() error { - return s.dir.Close() + var firstError error + if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { + for _, packfile := range s.packfiles { + err := packfile.Close() + if firstError == nil && err != nil { + firstError = err + } + } + } + + s.packfiles = nil + s.dir.Close() + + return firstError } type lazyPackfilesIter struct { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 5cfb227..c2461db 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -86,6 +86,24 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { }) } +func (s *FsSuite) TestGetFromPackfileMaxOpenDescriptors(c *C) { + fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() + o := NewObjectStorageWithOptions(dotgit.New(fs), cache.NewObjectLRUDefault(), Options{MaxOpenDescriptors: 1}) + + expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3") + obj, err := o.getFromPackfile(expected, false) + c.Assert(err, IsNil) + c.Assert(obj.Hash(), Equals, expected) + + expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db") + obj, err = o.getFromPackfile(expected, false) + c.Assert(err, IsNil) + c.Assert(obj.Hash(), Equals, expected) + + err = o.Close() + c.Assert(err, IsNil) +} + func (s *FsSuite) TestGetSizeOfObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 370f7bd..88d1ed4 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -31,6 +31,9 @@ type Options struct { // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool + // MaxOpenDescriptors is the max number of file descriptors to keep + // open. If KeepDescriptors is true, all file descriptors will remain open. + MaxOpenDescriptors int } // NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. @@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, - KeepDescriptors: ops.KeepDescriptors, } dir := dotgit.NewWithOptions(fs, dirOps) diff --git a/utils/binary/read.go b/utils/binary/read.go index 50da1ff..12e57c3 100644 --- a/utils/binary/read.go +++ b/utils/binary/read.go @@ -25,6 +25,10 @@ func Read(r io.Reader, data ...interface{}) error { // ReadUntil reads from r untin delim is found func ReadUntil(r io.Reader, delim byte) ([]byte, error) { + if bufr, ok := r.(*bufio.Reader); ok { + return ReadUntilFromBufioReader(bufr, delim) + } + var buf [1]byte value := make([]byte, 0, 16) for { @@ -44,6 +48,17 @@ func ReadUntil(r io.Reader, delim byte) ([]byte, error) { } } +// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter +// from the result. +func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) { + value, err := r.ReadBytes(delim) + if err != nil || len(value) == 0 { + return nil, err + } + + return value[:len(value)-1], nil +} + // ReadVariableWidthInt reads and returns an int in Git VLQ special format: // // Ordinary VLQ has some redundancies, example: the number 358 can be diff --git a/utils/binary/read_test.go b/utils/binary/read_test.go index 5674653..22867c2 100644 --- a/utils/binary/read_test.go +++ b/utils/binary/read_test.go @@ -1,6 +1,7 @@ package binary import ( + "bufio" "bytes" "encoding/binary" "testing" @@ -39,6 +40,15 @@ func (s *BinarySuite) TestReadUntil(c *C) { c.Assert(string(b), Equals, "foo") } +func (s *BinarySuite) TestReadUntilFromBufioReader(c *C) { + buf := bufio.NewReader(bytes.NewBuffer([]byte("foo bar"))) + + b, err := ReadUntilFromBufioReader(buf, ' ') + c.Assert(err, IsNil) + c.Assert(b, HasLen, 3) + c.Assert(string(b), Equals, "foo") +} + func (s *BinarySuite) TestReadVariableWidthInt(c *C) { buf := bytes.NewBuffer([]byte{129, 110}) |