aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing
diff options
context:
space:
mode:
authorSantiago M. Mola <santi@mola.io>2017-05-24 11:42:54 +0200
committerGitHub <noreply@github.com>2017-05-24 11:42:54 +0200
commit7e249dfcf28765939bde8f38784b3274b522f880 (patch)
treef3d79534783d1dbe438f076edb00e0962122feca /plumbing
parentf663a9384619965ed8df7a7224e6f15ad18ed4af (diff)
parentf369a7820ddc6224d5318d562de49e992942ad1f (diff)
downloadgo-git-7e249dfcf28765939bde8f38784b3274b522f880.tar.gz
Merge pull request #400 from ajnavarro/improvement/diff-deltav4.0.0-rc10
format/packfile: improve binary delta algorithm
Diffstat (limited to 'plumbing')
-rw-r--r--plumbing/format/packfile/diff.go397
-rw-r--r--plumbing/format/packfile/diff_delta.go138
2 files changed, 94 insertions, 441 deletions
diff --git a/plumbing/format/packfile/diff.go b/plumbing/format/packfile/diff.go
deleted file mode 100644
index ff34329..0000000
--- a/plumbing/format/packfile/diff.go
+++ /dev/null
@@ -1,397 +0,0 @@
-package packfile
-
-/*
-
-Copyright (c) 2013, Patrick Mezard
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
- The names of its contributors may not be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-// Code based on https://github.com/pmezard/go-difflib
-// Removed unnecessary code for this use case and changed string inputs to byte
-
-type match struct {
- A int
- B int
- Size int
-}
-
-type opCode struct {
- Tag byte
- I1 int
- I2 int
- J1 int
- J2 int
-}
-
-// SequenceMatcher compares sequence of bytes. The basic
-// algorithm predates, and is a little fancier than, an algorithm
-// published in the late 1980's by Ratcliff and Obershelp under the
-// hyperbolic name "gestalt pattern matching". The basic idea is to find
-// the longest contiguous matching subsequence that contains no "junk"
-// elements (R-O doesn't address junk). The same idea is then applied
-// recursively to the pieces of the sequences to the left and to the right
-// of the matching subsequence. This does not yield minimal edit
-// sequences, but does tend to yield matches that "look right" to people.
-//
-// SequenceMatcher tries to compute a "human-friendly diff" between two
-// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
-// longest *contiguous* & junk-free matching subsequence. That's what
-// catches peoples' eyes. The Windows(tm) windiff has another interesting
-// notion, pairing up elements that appear uniquely in each sequence.
-// That, and the method here, appear to yield more intuitive difference
-// reports than does diff. This method appears to be the least vulnerable
-// to synching up on blocks of "junk lines", though (like blank lines in
-// ordinary text files, or maybe "<P>" lines in HTML files). That may be
-// because this is the only method of the 3 that has a *concept* of
-// "junk" <wink>.
-//
-// Timing: Basic R-O is cubic time worst case and quadratic time expected
-// case. SequenceMatcher is quadratic time for the worst case and has
-// expected-case behavior dependent in a complicated way on how many
-// elements the sequences have in common; best case time is linear.
-type sequenceMatcher struct {
- a []byte
- b []byte
- b2j map[byte][]int
- IsJunk func(byte) bool
- autoJunk bool
- bJunk map[byte]struct{}
- matchingBlocks []match
- fullBCount map[byte]int
- bPopular map[byte]struct{}
- opCodes []opCode
-}
-
-func newMatcher(a, b []byte) *sequenceMatcher {
- m := sequenceMatcher{autoJunk: true}
- m.SetSeqs(a, b)
- return &m
-}
-
-// Set two sequences to be compared.
-func (m *sequenceMatcher) SetSeqs(a, b []byte) {
- m.SetSeq1(a)
- m.SetSeq2(b)
-}
-
-// Set the first sequence to be compared. The second sequence to be compared is
-// not changed.
-//
-// SequenceMatcher computes and caches detailed information about the second
-// sequence, so if you want to compare one sequence S against many sequences,
-// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
-// sequences.
-//
-// See also SetSeqs() and SetSeq2().
-func (m *sequenceMatcher) SetSeq1(a []byte) {
- if &a == &m.a {
- return
- }
- m.a = a
- m.matchingBlocks = nil
- m.opCodes = nil
-}
-
-// Set the second sequence to be compared. The first sequence to be compared is
-// not changed.
-func (m *sequenceMatcher) SetSeq2(b []byte) {
- if &b == &m.b {
- return
- }
- m.b = b
- m.matchingBlocks = nil
- m.opCodes = nil
- m.fullBCount = nil
- m.chainB()
-}
-
-func (m *sequenceMatcher) chainB() {
- // Populate line -> index mapping
- b2j := map[byte][]int{}
- for i, s := range m.b {
- indices := b2j[s]
- indices = append(indices, i)
- b2j[s] = indices
- }
-
- // Purge junk elements
- m.bJunk = map[byte]struct{}{}
- if m.IsJunk != nil {
- junk := m.bJunk
- for s := range b2j {
- if m.IsJunk(s) {
- junk[s] = struct{}{}
- }
- }
- for s := range junk {
- delete(b2j, s)
- }
- }
-
- // Purge remaining popular elements
- popular := map[byte]struct{}{}
- n := len(m.b)
- if m.autoJunk && n >= 200 {
- ntest := n/100 + 1
- for s, indices := range b2j {
- if len(indices) > ntest {
- popular[s] = struct{}{}
- }
- }
- for s := range popular {
- delete(b2j, s)
- }
- }
- m.bPopular = popular
- m.b2j = b2j
-}
-
-func (m *sequenceMatcher) isBJunk(s byte) bool {
- _, ok := m.bJunk[s]
- return ok
-}
-
-// Find longest matching block in a[alo:ahi] and b[blo:bhi].
-//
-// If IsJunk is not defined:
-//
-// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-// alo <= i <= i+k <= ahi
-// blo <= j <= j+k <= bhi
-// and for all (i',j',k') meeting those conditions,
-// k >= k'
-// i <= i'
-// and if i == i', j <= j'
-//
-// In other words, of all maximal matching blocks, return one that
-// starts earliest in a, and of all those maximal matching blocks that
-// start earliest in a, return the one that starts earliest in b.
-//
-// If IsJunk is defined, first the longest matching block is
-// determined as above, but with the additional restriction that no
-// junk element appears in the block. Then that block is extended as
-// far as possible by matching (only) junk elements on both sides. So
-// the resulting block never matches on junk except as identical junk
-// happens to be adjacent to an "interesting" match.
-//
-// If no blocks match, return (alo, blo, 0).
-func (m *sequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) match {
- // CAUTION: stripping common prefix or suffix would be incorrect.
- // E.g.,
- // ab
- // acab
- // Longest matching block is "ab", but if common prefix is
- // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- // strip, so ends up claiming that ab is changed to acab by
- // inserting "ca" in the middle. That's minimal but unintuitive:
- // "it's obvious" that someone inserted "ac" at the front.
- // Windiff ends up at the same place as diff, but by pairing up
- // the unique 'b's and then matching the first two 'a's.
- besti, bestj, bestsize := alo, blo, 0
-
- // find longest junk-free match
- // during an iteration of the loop, j2len[j] = length of longest
- // junk-free match ending with a[i-1] and b[j]
- j2len := map[int]int{}
- for i := alo; i != ahi; i++ {
- // look at all instances of a[i] in b; note that because
- // b2j has no junk keys, the loop is skipped if a[i] is junk
- newj2len := map[int]int{}
- for _, j := range m.b2j[m.a[i]] {
- // a[i] matches b[j]
- if j < blo {
- continue
- }
- if j >= bhi {
- break
- }
- k := j2len[j-1] + 1
- newj2len[j] = k
- if k > bestsize {
- besti, bestj, bestsize = i-k+1, j-k+1, k
- }
- }
- j2len = newj2len
- }
-
- // Extend the best by non-junk elements on each end. In particular,
- // "popular" non-junk elements aren't in b2j, which greatly speeds
- // the inner loop above, but also means "the best" match so far
- // doesn't contain any junk *or* popular non-junk elements.
- for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- !m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize += 1
- }
-
- // Now that we have a wholly interesting match (albeit possibly
- // empty!), we may as well suck up the matching junk on each
- // side of it too. Can't think of a good reason not to, and it
- // saves post-processing the (possibly considerable) expense of
- // figuring out what to do with it. In the case of an empty
- // interesting match, this is clearly the right thing to do,
- // because no other kind of match is possible in the regions.
- for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
- m.a[besti-1] == m.b[bestj-1] {
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- }
- for besti+bestsize < ahi && bestj+bestsize < bhi &&
- m.isBJunk(m.b[bestj+bestsize]) &&
- m.a[besti+bestsize] == m.b[bestj+bestsize] {
- bestsize += 1
- }
-
- return match{A: besti, B: bestj, Size: bestsize}
-}
-
-// Return list of triples describing matching subsequences.
-//
-// Each triple is of the form (i, j, n), and means that
-// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
-// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
-// adjacent triples in the list, and the second is not the last triple in the
-// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
-// adjacent equal blocks.
-//
-// The last triple is a dummy, (len(a), len(b), 0), and is the only
-// triple with n==0.
-func (m *sequenceMatcher) GetMatchingBlocks() []match {
- if m.matchingBlocks != nil {
- return m.matchingBlocks
- }
-
- var matchBlocks func(alo, ahi, blo, bhi int, matched []match) []match
- matchBlocks = func(alo, ahi, blo, bhi int, matched []match) []match {
- match := m.findLongestMatch(alo, ahi, blo, bhi)
- i, j, k := match.A, match.B, match.Size
- if match.Size > 0 {
- if alo < i && blo < j {
- matched = matchBlocks(alo, i, blo, j, matched)
- }
- matched = append(matched, match)
- if i+k < ahi && j+k < bhi {
- matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
- }
- }
- return matched
- }
- matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
-
- // It's possible that we have adjacent equal blocks in the
- // matching_blocks list now.
- nonAdjacent := []match{}
- i1, j1, k1 := 0, 0, 0
- for _, b := range matched {
- // Is this block adjacent to i1, j1, k1?
- i2, j2, k2 := b.A, b.B, b.Size
- if i1+k1 == i2 && j1+k1 == j2 {
- // Yes, so collapse them -- this just increases the length of
- // the first block by the length of the second, and the first
- // block so lengthened remains the block to compare against.
- k1 += k2
- } else {
- // Not adjacent. Remember the first block (k1==0 means it's
- // the dummy we started with), and make the second block the
- // new block to compare against.
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, match{i1, j1, k1})
- }
- i1, j1, k1 = i2, j2, k2
- }
- }
- if k1 > 0 {
- nonAdjacent = append(nonAdjacent, match{i1, j1, k1})
- }
-
- nonAdjacent = append(nonAdjacent, match{len(m.a), len(m.b), 0})
- m.matchingBlocks = nonAdjacent
- return m.matchingBlocks
-}
-
-const (
- tagReplace = 'r'
- tagDelete = 'd'
- tagInsert = 'i'
- tagEqual = 'e'
-)
-
-// Return list of 5-tuples describing how to turn a into b.
-//
-// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
-// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
-// tuple preceding it, and likewise for j1 == the previous j2.
-//
-// The tags are characters, with these meanings:
-//
-// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
-//
-// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
-//
-// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
-//
-// 'e' (equal): a[i1:i2] == b[j1:j2]
-func (m *sequenceMatcher) GetOpCodes() []opCode {
- if m.opCodes != nil {
- return m.opCodes
- }
- i, j := 0, 0
- matching := m.GetMatchingBlocks()
- opCodes := make([]opCode, 0, len(matching))
- for _, m := range matching {
- // invariant: we've pumped out correct diffs to change
- // a[:i] into b[:j], and the next matching block is
- // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- // out a diff to change a[i:ai] into b[j:bj], pump out
- // the matching block, and move (i,j) beyond the match
- ai, bj, size := m.A, m.B, m.Size
- tag := byte(0)
- if i < ai && j < bj {
- tag = tagReplace
- } else if i < ai {
- tag = tagDelete
- } else if j < bj {
- tag = tagInsert
- }
- if tag > 0 {
- opCodes = append(opCodes, opCode{tag, i, ai, j, bj})
- }
- i, j = ai+size, bj+size
- // the list of matching blocks is terminated by a
- // sentinel with size 0
- if size > 0 {
- opCodes = append(opCodes, opCode{tagEqual, ai, i, bj, j})
- }
- }
- m.opCodes = opCodes
- return m.opCodes
-}
diff --git a/plumbing/format/packfile/diff_delta.go b/plumbing/format/packfile/diff_delta.go
index 40d450f..e3438aa 100644
--- a/plumbing/format/packfile/diff_delta.go
+++ b/plumbing/format/packfile/diff_delta.go
@@ -1,6 +1,8 @@
package packfile
import (
+ "bytes"
+ "hash/adler32"
"io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
@@ -11,7 +13,8 @@ import (
// for more info
const (
- maxCopyLen = 0xffff
+ // Standard chunk size used to generate fingerprints
+ s = 16
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
@@ -51,52 +54,99 @@ func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, erro
return delta, nil
}
-// DiffDelta returns the delta that transforms baseBuf into targetBuf.
-func DiffDelta(baseBuf []byte, targetBuf []byte) []byte {
- var outBuff []byte
-
- outBuff = append(outBuff, deltaEncodeSize(len(baseBuf))...)
- outBuff = append(outBuff, deltaEncodeSize(len(targetBuf))...)
-
- sm := newMatcher(baseBuf, targetBuf)
- for _, op := range sm.GetOpCodes() {
- switch {
- case op.Tag == tagEqual:
- copyStart := op.I1
- copyLen := op.I2 - op.I1
- for {
- if copyLen <= 0 {
- break
- }
- var toCopy int
- if copyLen < maxCopyLen {
- toCopy = copyLen
- } else {
- toCopy = maxCopyLen
- }
-
- outBuff = append(outBuff, encodeCopyOperation(copyStart, toCopy)...)
- copyStart += toCopy
- copyLen -= toCopy
- }
- case op.Tag == tagReplace || op.Tag == tagInsert:
- s := op.J2 - op.J1
- o := op.J1
- for {
- if s <= 127 {
- break
- }
- outBuff = append(outBuff, byte(127))
- outBuff = append(outBuff, targetBuf[o:o+127]...)
- s -= 127
- o += 127
- }
- outBuff = append(outBuff, byte(s))
- outBuff = append(outBuff, targetBuf[o:o+s]...)
+// DiffDelta returns the delta that transforms src into tgt.
+func DiffDelta(src []byte, tgt []byte) []byte {
+ buf := bytes.NewBuffer(nil)
+ buf.Write(deltaEncodeSize(len(src)))
+ buf.Write(deltaEncodeSize(len(tgt)))
+
+ sindex := initMatch(src)
+
+ ibuf := bytes.NewBuffer(nil)
+ for i := 0; i < len(tgt); i++ {
+ offset, l := findMatch(src, tgt, sindex, i)
+
+ if l < s {
+ ibuf.WriteByte(tgt[i])
+ } else {
+ encodeInsertOperation(ibuf, buf)
+ buf.Write(encodeCopyOperation(offset, l))
+ i += l - 1
+ }
+ }
+
+ encodeInsertOperation(ibuf, buf)
+
+ return buf.Bytes()
+}
+
+func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
+ if ibuf.Len() == 0 {
+ return
+ }
+
+ b := ibuf.Bytes()
+ s := ibuf.Len()
+ o := 0
+ for {
+ if s <= 127 {
+ break
}
+ buf.WriteByte(byte(127))
+ buf.Write(b[o : o+127])
+ s -= 127
+ o += 127
+ }
+ buf.WriteByte(byte(s))
+ buf.Write(b[o : o+s])
+
+ ibuf.Reset()
+}
+
+func initMatch(src []byte) map[uint32]int {
+ i := 0
+ index := make(map[uint32]int)
+ for {
+ if i+s > len(src) {
+ break
+ }
+
+ ch := adler32.Checksum(src[i : i+s])
+ index[ch] = i
+ i += s
+ }
+
+ return index
+}
+
+func findMatch(src, tgt []byte, sindex map[uint32]int, tgtOffset int) (srcOffset, l int) {
+ if len(tgt) >= tgtOffset+s {
+ ch := adler32.Checksum(tgt[tgtOffset : tgtOffset+s])
+ var ok bool
+ srcOffset, ok = sindex[ch]
+ if !ok {
+ return
+ }
+
+ l = matchLength(src, tgt, tgtOffset, srcOffset)
+ }
+
+ return
+}
+
+func matchLength(src, tgt []byte, otgt, osrc int) int {
+ l := 0
+ for {
+ if (osrc >= len(src) || otgt >= len(tgt)) || src[osrc] != tgt[otgt] {
+ break
+ }
+
+ l++
+ osrc++
+ otgt++
}
- return outBuff
+ return l
}
func deltaEncodeSize(size int) []byte {