From 52c1f982ea0004de419d1a7f69d7eaf8b8d6b659 Mon Sep 17 00:00:00 2001 From: Jeremy Stribling Date: Sun, 10 Sep 2017 20:59:31 -0700 Subject: config: support a configurable, and turn-off-able, pack.window One use of go-git is to transfer git data from a non-standard git repo (not stored in a file system, for example) to a "remote" backed by a standard, local .git repo. In this scenario, delta compression is not needed to reduce transfer time over the "network", because there is no network. The underlying storage layer has already taken care of the data tranfer, and sending the objects to local .git storage doesn't require compression. So this PR gives the user the option to turn off compression when it isn't needed. Of course, this results in a larger, uncompressed local .git repo, but the user can then run git gc or git repack on that repo if they care about the storage costs. Turning the pack window to 0 on reduces total push time of a 36K repo by 50 seconds (out of a pre-PR total of 3m26s). --- plumbing/format/packfile/delta_selector.go | 47 +++++++++++++++++------ plumbing/format/packfile/delta_selector_test.go | 31 ++++++++++----- plumbing/format/packfile/encoder.go | 23 ++++++----- plumbing/format/packfile/encoder_advanced_test.go | 17 ++++++-- plumbing/format/packfile/encoder_test.go | 8 ++-- plumbing/transport/server/server.go | 3 +- plumbing/transport/test/receive_pack.go | 2 +- 7 files changed, 92 insertions(+), 39 deletions(-) (limited to 'plumbing') diff --git a/plumbing/format/packfile/delta_selector.go b/plumbing/format/packfile/delta_selector.go index 0b3539d..77573ac 100644 --- a/plumbing/format/packfile/delta_selector.go +++ b/plumbing/format/packfile/delta_selector.go @@ -9,9 +9,6 @@ import ( ) const ( - // How far back in the sorted list to search for deltas. 10 is - // the default in command line git. - deltaWindowSize = 10 // deltas based on deltas, how many steps we can do. // 50 is the default value used in JGit maxDepth = int64(50) @@ -31,14 +28,24 @@ func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { return &deltaSelector{s} } -// ObjectsToPack creates a list of ObjectToPack from the hashes provided, -// creating deltas if it's suitable, using an specific internal logic -func (dw *deltaSelector) ObjectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, error) { - otp, err := dw.objectsToPack(hashes) +// ObjectsToPack creates a list of ObjectToPack from the hashes +// provided, creating deltas if it's suitable, using an specific +// internal logic. `packWindow` specifies the size of the sliding +// window used to compare objects for delta compression; 0 turns off +// delta compression entirely. +func (dw *deltaSelector) ObjectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { + otp, err := dw.objectsToPack(hashes, packWindow) if err != nil { return nil, err } + if packWindow == 0 { + return otp, nil + } + dw.sort(otp) var objectGroups [][]*ObjectToPack @@ -60,7 +67,7 @@ func (dw *deltaSelector) ObjectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, objs := objs wg.Add(1) go func() { - if walkErr := dw.walk(objs); walkErr != nil { + if walkErr := dw.walk(objs, packWindow); walkErr != nil { once.Do(func() { err = walkErr }) @@ -77,10 +84,19 @@ func (dw *deltaSelector) ObjectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, return otp, nil } -func (dw *deltaSelector) objectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, error) { +func (dw *deltaSelector) objectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { var objectsToPack []*ObjectToPack for _, h := range hashes { - o, err := dw.encodedDeltaObject(h) + var o plumbing.EncodedObject + var err error + if packWindow == 0 { + o, err = dw.encodedObject(h) + } else { + o, err = dw.encodedDeltaObject(h) + } if err != nil { return nil, err } @@ -93,6 +109,10 @@ func (dw *deltaSelector) objectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, objectsToPack = append(objectsToPack, otp) } + if packWindow == 0 { + return objectsToPack, nil + } + if err := dw.fixAndBreakChains(objectsToPack); err != nil { return nil, err } @@ -201,7 +221,10 @@ func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { sort.Sort(byTypeAndSize(objectsToPack)) } -func (dw *deltaSelector) walk(objectsToPack []*ObjectToPack) error { +func (dw *deltaSelector) walk( + objectsToPack []*ObjectToPack, + packWindow uint, +) error { indexMap := make(map[plumbing.Hash]*deltaIndex) for i := 0; i < len(objectsToPack); i++ { target := objectsToPack[i] @@ -218,7 +241,7 @@ func (dw *deltaSelector) walk(objectsToPack []*ObjectToPack) error { continue } - for j := i - 1; j >= 0 && i-j < deltaWindowSize; j-- { + for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { base := objectsToPack[j] // Objects must use only the same type as their delta base. // Since objectsToPack is sorted by type and size, once we find diff --git a/plumbing/format/packfile/delta_selector_test.go b/plumbing/format/packfile/delta_selector_test.go index ca4a96b..7d7fd0c 100644 --- a/plumbing/format/packfile/delta_selector_test.go +++ b/plumbing/format/packfile/delta_selector_test.go @@ -146,7 +146,8 @@ func (s *DeltaSelectorSuite) createTestObjects() { func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // Different type hashes := []plumbing.Hash{s.hashes["base"], s.hashes["treeType"]} - otp, err := s.ds.ObjectsToPack(hashes) + deltaWindowSize := uint(10) + otp, err := s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["base"]]) @@ -154,7 +155,7 @@ func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // Size radically different hashes = []plumbing.Hash{s.hashes["bigBase"], s.hashes["target"]} - otp, err = s.ds.ObjectsToPack(hashes) + otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["bigBase"]]) @@ -162,7 +163,7 @@ func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // Delta Size Limit with no best delta yet hashes = []plumbing.Hash{s.hashes["smallBase"], s.hashes["smallTarget"]} - otp, err = s.ds.ObjectsToPack(hashes) + otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["smallBase"]]) @@ -170,7 +171,7 @@ func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // It will create the delta hashes = []plumbing.Hash{s.hashes["base"], s.hashes["target"]} - otp, err = s.ds.ObjectsToPack(hashes) + otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 2) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["target"]]) @@ -185,7 +186,7 @@ func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { s.hashes["o2"], s.hashes["o3"], } - otp, err = s.ds.ObjectsToPack(hashes) + otp, err = s.ds.ObjectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) c.Assert(len(otp), Equals, 3) c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["o1"]]) @@ -201,20 +202,32 @@ func (s *DeltaSelectorSuite) TestObjectsToPack(c *C) { // a delta. hashes = make([]plumbing.Hash, 0, deltaWindowSize+2) hashes = append(hashes, s.hashes["base"]) - for i := 0; i < deltaWindowSize; i++ { + for i := uint(0); i < deltaWindowSize; i++ { hashes = append(hashes, s.hashes["smallTarget"]) } hashes = append(hashes, s.hashes["target"]) // Don't sort so we can easily check the sliding window without // creating a bunch of new objects. - otp, err = s.ds.objectsToPack(hashes) + otp, err = s.ds.objectsToPack(hashes, deltaWindowSize) c.Assert(err, IsNil) - err = s.ds.walk(otp) + err = s.ds.walk(otp, deltaWindowSize) c.Assert(err, IsNil) - c.Assert(len(otp), Equals, deltaWindowSize+2) + c.Assert(len(otp), Equals, int(deltaWindowSize)+2) targetIdx := len(otp) - 1 c.Assert(otp[targetIdx].IsDelta(), Equals, false) + + // Check that no deltas are created, and the objects are unsorted, + // if compression is off. + hashes = []plumbing.Hash{s.hashes["base"], s.hashes["target"]} + otp, err = s.ds.ObjectsToPack(hashes, 0) + c.Assert(err, IsNil) + c.Assert(len(otp), Equals, 2) + c.Assert(otp[0].Object, Equals, s.store.Objects[s.hashes["base"]]) + c.Assert(otp[0].IsDelta(), Equals, false) + c.Assert(otp[1].Original, Equals, s.store.Objects[s.hashes["target"]]) + c.Assert(otp[1].IsDelta(), Equals, false) + c.Assert(otp[1].Depth, Equals, 0) } func (s *DeltaSelectorSuite) TestMaxDepth(c *C) { diff --git a/plumbing/format/packfile/encoder.go b/plumbing/format/packfile/encoder.go index 1426559..7ee6546 100644 --- a/plumbing/format/packfile/encoder.go +++ b/plumbing/format/packfile/encoder.go @@ -14,10 +14,10 @@ import ( // Encoder gets the data from the storage and write it into the writer in PACK // format type Encoder struct { - selector *deltaSelector - w *offsetWriter - zw *zlib.Writer - hasher plumbing.Hasher + selector *deltaSelector + w *offsetWriter + zw *zlib.Writer + hasher plumbing.Hasher // offsets is a map of object hashes to corresponding offsets in the packfile. // It is used to determine offset of the base of a delta when a OFS_DELTA is // used. @@ -45,10 +45,15 @@ func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *E } } -// Encode creates a packfile containing all the objects referenced in hashes -// and writes it to the writer in the Encoder. -func (e *Encoder) Encode(hashes []plumbing.Hash) (plumbing.Hash, error) { - objects, err := e.selector.ObjectsToPack(hashes) +// Encode creates a packfile containing all the objects referenced in +// hashes and writes it to the writer in the Encoder. `packWindow` +// specifies the size of the sliding window used to compare objects +// for delta compression; 0 turns off delta compression entirely. +func (e *Encoder) Encode( + hashes []plumbing.Hash, + packWindow uint, +) (plumbing.Hash, error) { + objects, err := e.selector.ObjectsToPack(hashes, packWindow) if err != nil { return plumbing.ZeroHash, err } @@ -137,7 +142,7 @@ func (e *Encoder) writeOfsDeltaHeader(deltaOffset int64, base plumbing.Hash) err // for OFS_DELTA, offset of the base is interpreted as negative offset // relative to the type-byte of the header of the ofs-delta entry. - relativeOffset := deltaOffset-baseOffset + relativeOffset := deltaOffset - baseOffset if relativeOffset <= 0 { return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) } diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go index d92e2c4..39c0700 100644 --- a/plumbing/format/packfile/encoder_advanced_test.go +++ b/plumbing/format/packfile/encoder_advanced_test.go @@ -27,12 +27,23 @@ func (s *EncoderAdvancedSuite) TestEncodeDecode(c *C) { fixs.Test(c, func(f *fixtures.Fixture) { storage, err := filesystem.NewStorage(f.DotGit()) c.Assert(err, IsNil) - s.testEncodeDecode(c, storage) + s.testEncodeDecode(c, storage, 10) }) } -func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer) { +func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) { + fixs := fixtures.Basic().ByTag("packfile").ByTag(".git") + fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git"). + ByTag("packfile").ByTag(".git").One()) + fixs.Test(c, func(f *fixtures.Fixture) { + storage, err := filesystem.NewStorage(f.DotGit()) + c.Assert(err, IsNil) + s.testEncodeDecode(c, storage, 0) + }) +} + +func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, packWindow uint) { objIter, err := storage.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) @@ -57,7 +68,7 @@ func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer) { buf := bytes.NewBuffer(nil) enc := NewEncoder(buf, storage, false) - _, err = enc.Encode(hashes) + _, err = enc.Encode(hashes, packWindow) c.Assert(err, IsNil) scanner := NewScanner(buf) diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go index b5b0c42..2cb9094 100644 --- a/plumbing/format/packfile/encoder_test.go +++ b/plumbing/format/packfile/encoder_test.go @@ -26,7 +26,7 @@ func (s *EncoderSuite) SetUpTest(c *C) { } func (s *EncoderSuite) TestCorrectPackHeader(c *C) { - hash, err := s.enc.Encode([]plumbing.Hash{}) + hash, err := s.enc.Encode([]plumbing.Hash{}, 10) c.Assert(err, IsNil) hb := [20]byte(hash) @@ -47,7 +47,7 @@ func (s *EncoderSuite) TestCorrectPackWithOneEmptyObject(c *C) { _, err := s.store.SetEncodedObject(o) c.Assert(err, IsNil) - hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}) + hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}, 10) c.Assert(err, IsNil) // PACK + VERSION(2) + OBJECT NUMBER(1) @@ -74,13 +74,13 @@ func (s *EncoderSuite) TestMaxObjectSize(c *C) { o.SetType(plumbing.CommitObject) _, err := s.store.SetEncodedObject(o) c.Assert(err, IsNil) - hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}) + hash, err := s.enc.Encode([]plumbing.Hash{o.Hash()}, 10) c.Assert(err, IsNil) c.Assert(hash.IsZero(), Not(Equals), true) } func (s *EncoderSuite) TestHashNotFound(c *C) { - h, err := s.enc.Encode([]plumbing.Hash{plumbing.NewHash("BAD")}) + h, err := s.enc.Encode([]plumbing.Hash{plumbing.NewHash("BAD")}, 10) c.Assert(h, Equals, plumbing.ZeroHash) c.Assert(err, NotNil) c.Assert(err, Equals, plumbing.ErrObjectNotFound) diff --git a/plumbing/transport/server/server.go b/plumbing/transport/server/server.go index be36de5..f896f7a 100644 --- a/plumbing/transport/server/server.go +++ b/plumbing/transport/server/server.go @@ -165,7 +165,8 @@ func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest pr, pw := io.Pipe() e := packfile.NewEncoder(pw, s.storer, false) go func() { - _, err := e.Encode(objs) + // TODO: plumb through a pack window. + _, err := e.Encode(objs, 10) pw.CloseWithError(err) }() diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go index d29d9ca..ed0f517 100644 --- a/plumbing/transport/test/receive_pack.go +++ b/plumbing/transport/test/receive_pack.go @@ -348,7 +348,7 @@ func (s *ReceivePackSuite) testSendPackDeleteReference(c *C) { func (s *ReceivePackSuite) emptyPackfile() io.ReadCloser { var buf bytes.Buffer e := packfile.NewEncoder(&buf, memory.NewStorage(), false) - _, err := e.Encode(nil) + _, err := e.Encode(nil, 10) if err != nil { panic(err) } -- cgit