diff options
Diffstat (limited to 'plumbing/format')
20 files changed, 2179 insertions, 232 deletions
diff --git a/plumbing/format/commitgraph/commitgraph.go b/plumbing/format/commitgraph/commitgraph.go new file mode 100644 index 0000000..e43cd89 --- /dev/null +++ b/plumbing/format/commitgraph/commitgraph.go @@ -0,0 +1,35 @@ +package commitgraph
+
+import (
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// CommitData is a reduced representation of Commit as presented in the commit graph
+// file. It is merely useful as an optimization for walking the commit graphs.
+type CommitData struct {
+ // TreeHash is the hash of the root tree of the commit.
+ TreeHash plumbing.Hash
+ // ParentIndexes are the indexes of the parent commits of the commit.
+ ParentIndexes []int
+ // ParentHashes are the hashes of the parent commits of the commit.
+ ParentHashes []plumbing.Hash
+ // Generation number is the pre-computed generation in the commit graph
+ // or zero if not available
+ Generation int
+ // When is the timestamp of the commit.
+ When time.Time
+}
+
+// Index represents a representation of commit graph that allows indexed
+// access to the nodes using commit object hash
+type Index interface {
+ // GetIndexByHash gets the index in the commit graph from commit hash, if available
+ GetIndexByHash(h plumbing.Hash) (int, error)
+ // GetNodeByIndex gets the commit node from the commit graph using index
+ // obtained from child node, if available
+ GetCommitDataByIndex(i int) (*CommitData, error)
+ // Hashes returns all the hashes that are available in the index
+ Hashes() []plumbing.Hash
+}
diff --git a/plumbing/format/commitgraph/commitgraph_test.go b/plumbing/format/commitgraph/commitgraph_test.go new file mode 100644 index 0000000..0214f49 --- /dev/null +++ b/plumbing/format/commitgraph/commitgraph_test.go @@ -0,0 +1,132 @@ +package commitgraph_test
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/plumbing/format/commitgraph"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommitgraphSuite struct {
+ fixtures.Suite
+}
+
+var _ = Suite(&CommitgraphSuite{})
+
+func testDecodeHelper(c *C, path string) {
+ reader, err := os.Open(path)
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ // Root commit
+ nodeIndex, err := index.GetIndexByHash(plumbing.NewHash("347c91919944a68e9413581a1bc15519550a3afe"))
+ c.Assert(err, IsNil)
+ commitData, err := index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 0)
+ c.Assert(len(commitData.ParentHashes), Equals, 0)
+
+ // Regular commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("e713b52d7e13807e87a002e812041f248db3f643"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 1)
+ c.Assert(len(commitData.ParentHashes), Equals, 1)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "347c91919944a68e9413581a1bc15519550a3afe")
+
+ // Merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("b29328491a0682c259bcce28741eac71f3499f7d"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 2)
+ c.Assert(len(commitData.ParentHashes), Equals, 2)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+
+ // Octopus merge commit
+ nodeIndex, err = index.GetIndexByHash(plumbing.NewHash("6f6c5d2be7852c782be1dd13e36496dd7ad39560"))
+ c.Assert(err, IsNil)
+ commitData, err = index.GetCommitDataByIndex(nodeIndex)
+ c.Assert(err, IsNil)
+ c.Assert(len(commitData.ParentIndexes), Equals, 3)
+ c.Assert(len(commitData.ParentHashes), Equals, 3)
+ c.Assert(commitData.ParentHashes[0].String(), Equals, "ce275064ad67d51e99f026084e20827901a8361c")
+ c.Assert(commitData.ParentHashes[1].String(), Equals, "bb13916df33ed23004c3ce9ed3b8487528e655c1")
+ c.Assert(commitData.ParentHashes[2].String(), Equals, "a45273fe2d63300e1962a9e26a6b15c276cd7082")
+
+ // Check all hashes
+ hashes := index.Hashes()
+ c.Assert(len(hashes), Equals, 11)
+ c.Assert(hashes[0].String(), Equals, "03d2c021ff68954cf3ef0a36825e194a4b98f981")
+ c.Assert(hashes[10].String(), Equals, "e713b52d7e13807e87a002e812041f248db3f643")
+}
+
+func (s *CommitgraphSuite) TestDecode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+ testDecodeHelper(c, path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ })
+}
+
+func (s *CommitgraphSuite) TestReencode(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := os.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ defer reader.Close()
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(index)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
+
+func (s *CommitgraphSuite) TestReencodeInMemory(c *C) {
+ fixtures.ByTag("commit-graph").Test(c, func(f *fixtures.Fixture) {
+ dotgit := f.DotGit()
+
+ reader, err := os.Open(path.Join(dotgit.Root(), "objects", "info", "commit-graph"))
+ c.Assert(err, IsNil)
+ index, err := commitgraph.OpenFileIndex(reader)
+ c.Assert(err, IsNil)
+ memoryIndex := commitgraph.NewMemoryIndex()
+ for i, hash := range index.Hashes() {
+ commitData, err := index.GetCommitDataByIndex(i)
+ c.Assert(err, IsNil)
+ memoryIndex.Add(hash, commitData)
+ }
+ reader.Close()
+
+ writer, err := ioutil.TempFile(dotgit.Root(), "commit-graph")
+ c.Assert(err, IsNil)
+ tmpName := writer.Name()
+ defer os.Remove(tmpName)
+ encoder := commitgraph.NewEncoder(writer)
+ err = encoder.Encode(memoryIndex)
+ c.Assert(err, IsNil)
+ writer.Close()
+
+ testDecodeHelper(c, tmpName)
+ })
+}
diff --git a/plumbing/format/commitgraph/doc.go b/plumbing/format/commitgraph/doc.go new file mode 100644 index 0000000..41cd8b1 --- /dev/null +++ b/plumbing/format/commitgraph/doc.go @@ -0,0 +1,103 @@ +// Package commitgraph implements encoding and decoding of commit-graph files. +// +// Git commit graph format +// ======================= +// +// The Git commit graph stores a list of commit OIDs and some associated +// metadata, including: +// +// - The generation number of the commit. Commits with no parents have +// generation number 1; commits with parents have generation number +// one more than the maximum generation number of its parents. We +// reserve zero as special, and can be used to mark a generation +// number invalid or as "not computed". +// +// - The root tree OID. +// +// - The commit date. +// +// - The parents of the commit, stored using positional references within +// the graph file. +// +// These positional references are stored as unsigned 32-bit integers +// corresponding to the array position within the list of commit OIDs. Due +// to some special constants we use to track parents, we can store at most +// (1 << 30) + (1 << 29) + (1 << 28) - 1 (around 1.8 billion) commits. +// +// == Commit graph files have the following format: +// +// In order to allow extensions that add extra data to the graph, we organize +// the body into "chunks" and provide a binary lookup table at the beginning +// of the body. The header includes certain values, such as number of chunks +// and hash type. +// +// All 4-byte numbers are in network order. +// +// HEADER: +// +// 4-byte signature: +// The signature is: {'C', 'G', 'P', 'H'} +// +// 1-byte version number: +// Currently, the only valid version is 1. +// +// 1-byte Hash Version (1 = SHA-1) +// We infer the hash length (H) from this value. +// +// 1-byte number (C) of "chunks" +// +// 1-byte (reserved for later use) +// Current clients should ignore this value. +// +// CHUNK LOOKUP: +// +// (C + 1) * 12 bytes listing the table of contents for the chunks: +// First 4 bytes describe the chunk id. Value 0 is a terminating label. +// Other 8 bytes provide the byte-offset in current file for chunk to +// start. (Chunks are ordered contiguously in the file, so you can infer +// the length using the next chunk position if necessary.) Each chunk +// ID appears at most once. +// +// The remaining data in the body is described one chunk at a time, and +// these chunks may be given in any order. Chunks are required unless +// otherwise specified. +// +// CHUNK DATA: +// +// OID Fanout (ID: {'O', 'I', 'D', 'F'}) (256 * 4 bytes) +// The ith entry, F[i], stores the number of OIDs with first +// byte at most i. Thus F[255] stores the total +// number of commits (N). +// +// OID Lookup (ID: {'O', 'I', 'D', 'L'}) (N * H bytes) +// The OIDs for all commits in the graph, sorted in ascending order. +// +// Commit Data (ID: {'C', 'D', 'A', 'T' }) (N * (H + 16) bytes) +// * The first H bytes are for the OID of the root tree. +// * The next 8 bytes are for the positions of the first two parents +// of the ith commit. Stores value 0x7000000 if no parent in that +// position. If there are more than two parents, the second value +// has its most-significant bit on and the other bits store an array +// position into the Extra Edge List chunk. +// * The next 8 bytes store the generation number of the commit and +// the commit time in seconds since EPOCH. The generation number +// uses the higher 30 bits of the first 4 bytes, while the commit +// time uses the 32 bits of the second 4 bytes, along with the lowest +// 2 bits of the lowest byte, storing the 33rd and 34th bit of the +// commit time. +// +// Extra Edge List (ID: {'E', 'D', 'G', 'E'}) [Optional] +// This list of 4-byte values store the second through nth parents for +// all octopus merges. The second parent value in the commit data stores +// an array position within this list along with the most-significant bit +// on. Starting at that array position, iterate through this list of commit +// positions for the parents until reaching a value with the most-significant +// bit on. The other bits correspond to the position of the last parent. +// +// TRAILER: +// +// H-byte HASH-checksum of all of the above. +// +// Source: +// https://raw.githubusercontent.com/git/git/master/Documentation/technical/commit-graph-format.txt +package commitgraph diff --git a/plumbing/format/commitgraph/encoder.go b/plumbing/format/commitgraph/encoder.go new file mode 100644 index 0000000..a06871c --- /dev/null +++ b/plumbing/format/commitgraph/encoder.go @@ -0,0 +1,190 @@ +package commitgraph
+
+import (
+ "crypto/sha1"
+ "hash"
+ "io"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// Encoder writes MemoryIndex structs to an output stream.
+type Encoder struct {
+ io.Writer
+ hash hash.Hash
+}
+
+// NewEncoder returns a new stream encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ h := sha1.New()
+ mw := io.MultiWriter(w, h)
+ return &Encoder{mw, h}
+}
+
+// Encode writes an index into the commit-graph file
+func (e *Encoder) Encode(idx Index) error {
+ var err error
+
+ // Get all the hashes in the input index
+ hashes := idx.Hashes()
+
+ // Sort the inout and prepare helper structures we'll need for encoding
+ hashToIndex, fanout, extraEdgesCount := e.prepare(idx, hashes)
+
+ chunkSignatures := [][]byte{oidFanoutSignature, oidLookupSignature, commitDataSignature}
+ chunkSizes := []uint64{4 * 256, uint64(len(hashes)) * 20, uint64(len(hashes)) * 36}
+ if extraEdgesCount > 0 {
+ chunkSignatures = append(chunkSignatures, extraEdgeListSignature)
+ chunkSizes = append(chunkSizes, uint64(extraEdgesCount)*4)
+ }
+
+ if err = e.encodeFileHeader(len(chunkSignatures)); err != nil {
+ return err
+ }
+ if err = e.encodeChunkHeaders(chunkSignatures, chunkSizes); err != nil {
+ return err
+ }
+ if err = e.encodeFanout(fanout); err != nil {
+ return err
+ }
+ if err = e.encodeOidLookup(hashes); err != nil {
+ return err
+ }
+ if extraEdges, err := e.encodeCommitData(hashes, hashToIndex, idx); err == nil {
+ if err = e.encodeExtraEdges(extraEdges); err != nil {
+ return err
+ }
+ }
+ if err != nil {
+ return err
+ }
+ return e.encodeChecksum()
+}
+
+func (e *Encoder) prepare(idx Index, hashes []plumbing.Hash) (hashToIndex map[plumbing.Hash]uint32, fanout []uint32, extraEdgesCount uint32) {
+ // Sort the hashes and build our index
+ plumbing.HashesSort(hashes)
+ hashToIndex = make(map[plumbing.Hash]uint32)
+ fanout = make([]uint32, 256)
+ for i, hash := range hashes {
+ hashToIndex[hash] = uint32(i)
+ fanout[hash[0]]++
+ }
+
+ // Convert the fanout to cumulative values
+ for i := 1; i <= 0xff; i++ {
+ fanout[i] += fanout[i-1]
+ }
+
+ // Find out if we will need extra edge table
+ for i := 0; i < len(hashes); i++ {
+ v, _ := idx.GetCommitDataByIndex(i)
+ if len(v.ParentHashes) > 2 {
+ extraEdgesCount += uint32(len(v.ParentHashes) - 1)
+ break
+ }
+ }
+
+ return
+}
+
+func (e *Encoder) encodeFileHeader(chunkCount int) (err error) {
+ if _, err = e.Write(commitFileSignature); err == nil {
+ _, err = e.Write([]byte{1, 1, byte(chunkCount), 0})
+ }
+ return
+}
+
+func (e *Encoder) encodeChunkHeaders(chunkSignatures [][]byte, chunkSizes []uint64) (err error) {
+ // 8 bytes of file header, 12 bytes for each chunk header and 12 byte for terminator
+ offset := uint64(8 + len(chunkSignatures)*12 + 12)
+ for i, signature := range chunkSignatures {
+ if _, err = e.Write(signature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ if err != nil {
+ return
+ }
+ offset += chunkSizes[i]
+ }
+ if _, err = e.Write(lastSignature); err == nil {
+ err = binary.WriteUint64(e, offset)
+ }
+ return
+}
+
+func (e *Encoder) encodeFanout(fanout []uint32) (err error) {
+ for i := 0; i <= 0xff; i++ {
+ if err = binary.WriteUint32(e, fanout[i]); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeOidLookup(hashes []plumbing.Hash) (err error) {
+ for _, hash := range hashes {
+ if _, err = e.Write(hash[:]); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeCommitData(hashes []plumbing.Hash, hashToIndex map[plumbing.Hash]uint32, idx Index) (extraEdges []uint32, err error) {
+ for _, hash := range hashes {
+ origIndex, _ := idx.GetIndexByHash(hash)
+ commitData, _ := idx.GetCommitDataByIndex(origIndex)
+ if _, err = e.Write(commitData.TreeHash[:]); err != nil {
+ return
+ }
+
+ var parent1, parent2 uint32
+ if len(commitData.ParentHashes) == 0 {
+ parent1 = parentNone
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 1 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = parentNone
+ } else if len(commitData.ParentHashes) == 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = hashToIndex[commitData.ParentHashes[1]]
+ } else if len(commitData.ParentHashes) > 2 {
+ parent1 = hashToIndex[commitData.ParentHashes[0]]
+ parent2 = uint32(len(extraEdges)) | parentOctopusUsed
+ for _, parentHash := range commitData.ParentHashes[1:] {
+ extraEdges = append(extraEdges, hashToIndex[parentHash])
+ }
+ extraEdges[len(extraEdges)-1] |= parentLast
+ }
+
+ if err = binary.WriteUint32(e, parent1); err == nil {
+ err = binary.WriteUint32(e, parent2)
+ }
+ if err != nil {
+ return
+ }
+
+ unixTime := uint64(commitData.When.Unix())
+ unixTime |= uint64(commitData.Generation) << 34
+ if err = binary.WriteUint64(e, unixTime); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeExtraEdges(extraEdges []uint32) (err error) {
+ for _, parent := range extraEdges {
+ if err = binary.WriteUint32(e, parent); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (e *Encoder) encodeChecksum() error {
+ _, err := e.Write(e.hash.Sum(nil)[:20])
+ return err
+}
diff --git a/plumbing/format/commitgraph/file.go b/plumbing/format/commitgraph/file.go new file mode 100644 index 0000000..175d279 --- /dev/null +++ b/plumbing/format/commitgraph/file.go @@ -0,0 +1,259 @@ +package commitgraph
+
+import (
+ "bytes"
+ encbin "encoding/binary"
+ "errors"
+ "io"
+ "time"
+
+ "gopkg.in/src-d/go-git.v4/plumbing"
+ "gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+var (
+ // ErrUnsupportedVersion is returned by OpenFileIndex when the commit graph
+ // file version is not supported.
+ ErrUnsupportedVersion = errors.New("Unsupported version")
+ // ErrUnsupportedHash is returned by OpenFileIndex when the commit graph
+ // hash function is not supported. Currently only SHA-1 is defined and
+ // supported
+ ErrUnsupportedHash = errors.New("Unsupported hash algorithm")
+ // ErrMalformedCommitGraphFile is returned by OpenFileIndex when the commit
+ // graph file is corrupted.
+ ErrMalformedCommitGraphFile = errors.New("Malformed commit graph file")
+
+ commitFileSignature = []byte{'C', 'G', 'P', 'H'}
+ oidFanoutSignature = []byte{'O', 'I', 'D', 'F'}
+ oidLookupSignature = []byte{'O', 'I', 'D', 'L'}
+ commitDataSignature = []byte{'C', 'D', 'A', 'T'}
+ extraEdgeListSignature = []byte{'E', 'D', 'G', 'E'}
+ lastSignature = []byte{0, 0, 0, 0}
+
+ parentNone = uint32(0x70000000)
+ parentOctopusUsed = uint32(0x80000000)
+ parentOctopusMask = uint32(0x7fffffff)
+ parentLast = uint32(0x80000000)
+)
+
+type fileIndex struct {
+ reader io.ReaderAt
+ fanout [256]int
+ oidFanoutOffset int64
+ oidLookupOffset int64
+ commitDataOffset int64
+ extraEdgeListOffset int64
+}
+
+// OpenFileIndex opens a serialized commit graph file in the format described at
+// https://github.com/git/git/blob/master/Documentation/technical/commit-graph-format.txt
+func OpenFileIndex(reader io.ReaderAt) (Index, error) {
+ fi := &fileIndex{reader: reader}
+
+ if err := fi.verifyFileHeader(); err != nil {
+ return nil, err
+ }
+ if err := fi.readChunkHeaders(); err != nil {
+ return nil, err
+ }
+ if err := fi.readFanout(); err != nil {
+ return nil, err
+ }
+
+ return fi, nil
+}
+
+func (fi *fileIndex) verifyFileHeader() error {
+ // Verify file signature
+ var signature = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(signature, 0); err != nil {
+ return err
+ }
+ if !bytes.Equal(signature, commitFileSignature) {
+ return ErrMalformedCommitGraphFile
+ }
+
+ // Read and verify the file header
+ var header = make([]byte, 4)
+ if _, err := fi.reader.ReadAt(header, 4); err != nil {
+ return err
+ }
+ if header[0] != 1 {
+ return ErrUnsupportedVersion
+ }
+ if header[1] != 1 {
+ return ErrUnsupportedHash
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readChunkHeaders() error {
+ var chunkID = make([]byte, 4)
+ for i := 0; ; i++ {
+ chunkHeader := io.NewSectionReader(fi.reader, 8+(int64(i)*12), 12)
+ if _, err := io.ReadAtLeast(chunkHeader, chunkID, 4); err != nil {
+ return err
+ }
+ chunkOffset, err := binary.ReadUint64(chunkHeader)
+ if err != nil {
+ return err
+ }
+
+ if bytes.Equal(chunkID, oidFanoutSignature) {
+ fi.oidFanoutOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, oidLookupSignature) {
+ fi.oidLookupOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, commitDataSignature) {
+ fi.commitDataOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, extraEdgeListSignature) {
+ fi.extraEdgeListOffset = int64(chunkOffset)
+ } else if bytes.Equal(chunkID, lastSignature) {
+ break
+ }
+ }
+
+ if fi.oidFanoutOffset <= 0 || fi.oidLookupOffset <= 0 || fi.commitDataOffset <= 0 {
+ return ErrMalformedCommitGraphFile
+ }
+
+ return nil
+}
+
+func (fi *fileIndex) readFanout() error {
+ fanoutReader := io.NewSectionReader(fi.reader, fi.oidFanoutOffset, 256*4)
+ for i := 0; i < 256; i++ {
+ fanoutValue, err := binary.ReadUint32(fanoutReader)
+ if err != nil {
+ return err
+ }
+ if fanoutValue > 0x7fffffff {
+ return ErrMalformedCommitGraphFile
+ }
+ fi.fanout[i] = int(fanoutValue)
+ }
+ return nil
+}
+
+func (fi *fileIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ var oid plumbing.Hash
+
+ // Find the hash in the oid lookup table
+ var low int
+ if h[0] == 0 {
+ low = 0
+ } else {
+ low = fi.fanout[h[0]-1]
+ }
+ high := fi.fanout[h[0]]
+ for low < high {
+ mid := (low + high) >> 1
+ offset := fi.oidLookupOffset + int64(mid)*20
+ if _, err := fi.reader.ReadAt(oid[:], offset); err != nil {
+ return 0, err
+ }
+ cmp := bytes.Compare(h[:], oid[:])
+ if cmp < 0 {
+ high = mid
+ } else if cmp == 0 {
+ return mid, nil
+ } else {
+ low = mid + 1
+ }
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+func (fi *fileIndex) GetCommitDataByIndex(idx int) (*CommitData, error) {
+ if idx >= fi.fanout[0xff] {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ offset := fi.commitDataOffset + int64(idx)*36
+ commitDataReader := io.NewSectionReader(fi.reader, offset, 36)
+
+ treeHash, err := binary.ReadHash(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent1, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ parent2, err := binary.ReadUint32(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+ genAndTime, err := binary.ReadUint64(commitDataReader)
+ if err != nil {
+ return nil, err
+ }
+
+ var parentIndexes []int
+ if parent2&parentOctopusUsed == parentOctopusUsed {
+ // Octopus merge
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ offset := fi.extraEdgeListOffset + 4*int64(parent2&parentOctopusMask)
+ buf := make([]byte, 4)
+ for {
+ _, err := fi.reader.ReadAt(buf, offset)
+ if err != nil {
+ return nil, err
+ }
+
+ parent := encbin.BigEndian.Uint32(buf)
+ offset += 4
+ parentIndexes = append(parentIndexes, int(parent&parentOctopusMask))
+ if parent&parentLast == parentLast {
+ break
+ }
+ }
+ } else if parent2 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask), int(parent2 & parentOctopusMask)}
+ } else if parent1 != parentNone {
+ parentIndexes = []int{int(parent1 & parentOctopusMask)}
+ }
+
+ parentHashes, err := fi.getHashesFromIndexes(parentIndexes)
+ if err != nil {
+ return nil, err
+ }
+
+ return &CommitData{
+ TreeHash: treeHash,
+ ParentIndexes: parentIndexes,
+ ParentHashes: parentHashes,
+ Generation: int(genAndTime >> 34),
+ When: time.Unix(int64(genAndTime&0x3FFFFFFFF), 0),
+ }, nil
+}
+
+func (fi *fileIndex) getHashesFromIndexes(indexes []int) ([]plumbing.Hash, error) {
+ hashes := make([]plumbing.Hash, len(indexes))
+
+ for i, idx := range indexes {
+ if idx >= fi.fanout[0xff] {
+ return nil, ErrMalformedCommitGraphFile
+ }
+
+ offset := fi.oidLookupOffset + int64(idx)*20
+ if _, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil {
+ return nil, err
+ }
+ }
+
+ return hashes, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (fi *fileIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, fi.fanout[0xff])
+ for i := 0; i < int(fi.fanout[0xff]); i++ {
+ offset := fi.oidLookupOffset + int64(i)*20
+ if n, err := fi.reader.ReadAt(hashes[i][:], offset); err != nil || n < 20 {
+ return nil
+ }
+ }
+ return hashes
+}
diff --git a/plumbing/format/commitgraph/memory.go b/plumbing/format/commitgraph/memory.go new file mode 100644 index 0000000..a4a96e9 --- /dev/null +++ b/plumbing/format/commitgraph/memory.go @@ -0,0 +1,72 @@ +package commitgraph
+
+import (
+ "gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+// MemoryIndex provides a way to build the commit-graph in memory
+// for later encoding to file.
+type MemoryIndex struct {
+ commitData []*CommitData
+ indexMap map[plumbing.Hash]int
+}
+
+// NewMemoryIndex creates in-memory commit graph representation
+func NewMemoryIndex() *MemoryIndex {
+ return &MemoryIndex{
+ indexMap: make(map[plumbing.Hash]int),
+ }
+}
+
+// GetIndexByHash gets the index in the commit graph from commit hash, if available
+func (mi *MemoryIndex) GetIndexByHash(h plumbing.Hash) (int, error) {
+ i, ok := mi.indexMap[h]
+ if ok {
+ return i, nil
+ }
+
+ return 0, plumbing.ErrObjectNotFound
+}
+
+// GetCommitDataByIndex gets the commit node from the commit graph using index
+// obtained from child node, if available
+func (mi *MemoryIndex) GetCommitDataByIndex(i int) (*CommitData, error) {
+ if int(i) >= len(mi.commitData) {
+ return nil, plumbing.ErrObjectNotFound
+ }
+
+ commitData := mi.commitData[i]
+
+ // Map parent hashes to parent indexes
+ if commitData.ParentIndexes == nil {
+ parentIndexes := make([]int, len(commitData.ParentHashes))
+ for i, parentHash := range commitData.ParentHashes {
+ var err error
+ if parentIndexes[i], err = mi.GetIndexByHash(parentHash); err != nil {
+ return nil, err
+ }
+ }
+ commitData.ParentIndexes = parentIndexes
+ }
+
+ return commitData, nil
+}
+
+// Hashes returns all the hashes that are available in the index
+func (mi *MemoryIndex) Hashes() []plumbing.Hash {
+ hashes := make([]plumbing.Hash, 0, len(mi.indexMap))
+ for k := range mi.indexMap {
+ hashes = append(hashes, k)
+ }
+ return hashes
+}
+
+// Add adds new node to the memory index
+func (mi *MemoryIndex) Add(hash plumbing.Hash, commitData *CommitData) {
+ // The parent indexes are calculated lazily in GetNodeByIndex
+ // which allows adding nodes out of order as long as all parents
+ // are eventually resolved
+ commitData.ParentIndexes = nil
+ mi.indexMap[hash] = len(mi.commitData)
+ mi.commitData = append(mi.commitData, commitData)
+}
diff --git a/plumbing/format/gitattributes/attributes.go b/plumbing/format/gitattributes/attributes.go new file mode 100644 index 0000000..d13c2a9 --- /dev/null +++ b/plumbing/format/gitattributes/attributes.go @@ -0,0 +1,214 @@ +package gitattributes + +import ( + "errors" + "io" + "io/ioutil" + "strings" +) + +const ( + commentPrefix = "#" + eol = "\n" + macroPrefix = "[attr]" +) + +var ( + ErrMacroNotAllowed = errors.New("macro not allowed") + ErrInvalidAttributeName = errors.New("Invalid attribute name") +) + +type MatchAttribute struct { + Name string + Pattern Pattern + Attributes []Attribute +} + +type attributeState byte + +const ( + attributeUnknown attributeState = 0 + attributeSet attributeState = 1 + attributeUnspecified attributeState = '!' + attributeUnset attributeState = '-' + attributeSetValue attributeState = '=' +) + +type Attribute interface { + Name() string + IsSet() bool + IsUnset() bool + IsUnspecified() bool + IsValueSet() bool + Value() string + String() string +} + +type attribute struct { + name string + state attributeState + value string +} + +func (a attribute) Name() string { + return a.name +} + +func (a attribute) IsSet() bool { + return a.state == attributeSet +} + +func (a attribute) IsUnset() bool { + return a.state == attributeUnset +} + +func (a attribute) IsUnspecified() bool { + return a.state == attributeUnspecified +} + +func (a attribute) IsValueSet() bool { + return a.state == attributeSetValue +} + +func (a attribute) Value() string { + return a.value +} + +func (a attribute) String() string { + switch a.state { + case attributeSet: + return a.name + ": set" + case attributeUnset: + return a.name + ": unset" + case attributeUnspecified: + return a.name + ": unspecified" + default: + return a.name + ": " + a.value + } +} + +// ReadAttributes reads patterns and attributes from the gitattributes format. +func ReadAttributes(r io.Reader, domain []string, allowMacro bool) (attributes []MatchAttribute, err error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + for _, line := range strings.Split(string(data), eol) { + attribute, err := ParseAttributesLine(line, domain, allowMacro) + if err != nil { + return attributes, err + } + if len(attribute.Name) == 0 { + continue + } + + attributes = append(attributes, attribute) + } + + return attributes, nil +} + +// ParseAttributesLine parses a gitattribute line, extracting path pattern and +// attributes. +func ParseAttributesLine(line string, domain []string, allowMacro bool) (m MatchAttribute, err error) { + line = strings.TrimSpace(line) + + if strings.HasPrefix(line, commentPrefix) || len(line) == 0 { + return + } + + name, unquoted := unquote(line) + attrs := strings.Fields(unquoted) + if len(name) == 0 { + name = attrs[0] + attrs = attrs[1:] + } + + var macro bool + macro, name, err = checkMacro(name, allowMacro) + if err != nil { + return + } + + m.Name = name + m.Attributes = make([]Attribute, 0, len(attrs)) + + for _, attrName := range attrs { + attr := attribute{ + name: attrName, + state: attributeSet, + } + + // ! and - prefixes + state := attributeState(attr.name[0]) + if state == attributeUnspecified || state == attributeUnset { + attr.state = state + attr.name = attr.name[1:] + } + + kv := strings.SplitN(attrName, "=", 2) + if len(kv) == 2 { + attr.name = kv[0] + attr.value = kv[1] + attr.state = attributeSetValue + } + + if !validAttributeName(attr.name) { + return m, ErrInvalidAttributeName + } + m.Attributes = append(m.Attributes, attr) + } + + if !macro { + m.Pattern = ParsePattern(name, domain) + } + return +} + +func checkMacro(name string, allowMacro bool) (macro bool, macroName string, err error) { + if !strings.HasPrefix(name, macroPrefix) { + return false, name, nil + } + if !allowMacro { + return true, name, ErrMacroNotAllowed + } + + macroName = name[len(macroPrefix):] + if !validAttributeName(macroName) { + return true, name, ErrInvalidAttributeName + } + return true, macroName, nil +} + +func validAttributeName(name string) bool { + if len(name) == 0 || name[0] == '-' { + return false + } + + for _, ch := range name { + if !(ch == '-' || ch == '.' || ch == '_' || + ('0' <= ch && ch <= '9') || + ('a' <= ch && ch <= 'z') || + ('A' <= ch && ch <= 'Z')) { + return false + } + } + return true +} + +func unquote(str string) (string, string) { + if str[0] != '"' { + return "", str + } + + for i := 1; i < len(str); i++ { + switch str[i] { + case '\\': + i++ + case '"': + return str[1:i], str[i+1:] + } + } + return "", str +} diff --git a/plumbing/format/gitattributes/attributes_test.go b/plumbing/format/gitattributes/attributes_test.go new file mode 100644 index 0000000..aea70ba --- /dev/null +++ b/plumbing/format/gitattributes/attributes_test.go @@ -0,0 +1,67 @@ +package gitattributes + +import ( + "strings" + + . "gopkg.in/check.v1" +) + +type AttributesSuite struct{} + +var _ = Suite(&AttributesSuite{}) + +func (s *AttributesSuite) TestAttributes_ReadAttributes(c *C) { + lines := []string{ + "[attr]sub -a", + "[attr]add a", + "* sub a", + "* !a foo=bar -b c", + } + + mas, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true) + c.Assert(err, IsNil) + c.Assert(len(mas), Equals, 4) + + c.Assert(mas[0].Name, Equals, "sub") + c.Assert(mas[0].Pattern, IsNil) + c.Assert(mas[0].Attributes[0].IsUnset(), Equals, true) + + c.Assert(mas[1].Name, Equals, "add") + c.Assert(mas[1].Pattern, IsNil) + c.Assert(mas[1].Attributes[0].IsSet(), Equals, true) + + c.Assert(mas[2].Name, Equals, "*") + c.Assert(mas[2].Pattern, NotNil) + c.Assert(mas[2].Attributes[0].IsSet(), Equals, true) + + c.Assert(mas[3].Name, Equals, "*") + c.Assert(mas[3].Pattern, NotNil) + c.Assert(mas[3].Attributes[0].IsUnspecified(), Equals, true) + c.Assert(mas[3].Attributes[1].IsValueSet(), Equals, true) + c.Assert(mas[3].Attributes[1].Value(), Equals, "bar") + c.Assert(mas[3].Attributes[2].IsUnset(), Equals, true) + c.Assert(mas[3].Attributes[3].IsSet(), Equals, true) + c.Assert(mas[3].Attributes[0].String(), Equals, "a: unspecified") + c.Assert(mas[3].Attributes[1].String(), Equals, "foo: bar") + c.Assert(mas[3].Attributes[2].String(), Equals, "b: unset") + c.Assert(mas[3].Attributes[3].String(), Equals, "c: set") +} + +func (s *AttributesSuite) TestAttributes_ReadAttributesDisallowMacro(c *C) { + lines := []string{ + "[attr]sub -a", + "* a add", + } + + _, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, false) + c.Assert(err, Equals, ErrMacroNotAllowed) +} + +func (s *AttributesSuite) TestAttributes_ReadAttributesInvalidName(c *C) { + lines := []string{ + "[attr]foo!bar -a", + } + + _, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true) + c.Assert(err, Equals, ErrInvalidAttributeName) +} diff --git a/plumbing/format/gitattributes/dir.go b/plumbing/format/gitattributes/dir.go new file mode 100644 index 0000000..d5c1e6a --- /dev/null +++ b/plumbing/format/gitattributes/dir.go @@ -0,0 +1,126 @@ +package gitattributes + +import ( + "os" + "os/user" + + "gopkg.in/src-d/go-billy.v4" + "gopkg.in/src-d/go-git.v4/plumbing/format/config" + gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +const ( + coreSection = "core" + attributesfile = "attributesfile" + gitDir = ".git" + gitattributesFile = ".gitattributes" + gitconfigFile = ".gitconfig" + systemFile = "/etc/gitconfig" +) + +func ReadAttributesFile(fs billy.Filesystem, path []string, attributesFile string, allowMacro bool) ([]MatchAttribute, error) { + f, err := fs.Open(fs.Join(append(path, attributesFile)...)) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + return ReadAttributes(f, path, allowMacro) +} + +// ReadPatterns reads gitattributes patterns recursively through the directory +// structure. The result is in ascending order of priority (last higher). +// +// The .gitattribute file in the root directory will allow custom macro +// definitions. Custom macro definitions in other directories .gitattributes +// will return an error. +func ReadPatterns(fs billy.Filesystem, path []string) (attributes []MatchAttribute, err error) { + attributes, err = ReadAttributesFile(fs, path, gitattributesFile, true) + if err != nil { + return + } + + attrs, err := walkDirectory(fs, path) + return append(attributes, attrs...), err +} + +func walkDirectory(fs billy.Filesystem, root []string) (attributes []MatchAttribute, err error) { + fis, err := fs.ReadDir(fs.Join(root...)) + if err != nil { + return attributes, err + } + + for _, fi := range fis { + if !fi.IsDir() || fi.Name() == ".git" { + continue + } + + path := append(root, fi.Name()) + + dirAttributes, err := ReadAttributesFile(fs, path, gitattributesFile, false) + if err != nil { + return attributes, err + } + + subAttributes, err := walkDirectory(fs, path) + if err != nil { + return attributes, err + } + + attributes = append(attributes, append(dirAttributes, subAttributes...)...) + } + + return +} + +func loadPatterns(fs billy.Filesystem, path string) ([]MatchAttribute, error) { + f, err := fs.Open(path) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + defer gioutil.CheckClose(f, &err) + + raw := config.New() + if err = config.NewDecoder(f).Decode(raw); err != nil { + return nil, nil + } + + path = raw.Section(coreSection).Options.Get(attributesfile) + if path == "" { + return nil, nil + } + + return ReadAttributesFile(fs, nil, path, true) +} + +// LoadGlobalPatterns loads gitattributes patterns and attributes from the +// gitattributes file declared in a user's ~/.gitconfig file. If the +// ~/.gitconfig file does not exist the function will return nil. If the +// core.attributesFile property is not declared, the function will return nil. +// If the file pointed to by the core.attributesfile property does not exist, +// the function will return nil. The function assumes fs is rooted at the root +// filesystem. +func LoadGlobalPatterns(fs billy.Filesystem) (attributes []MatchAttribute, err error) { + usr, err := user.Current() + if err != nil { + return + } + + return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) +} + +// LoadSystemPatterns loads gitattributes patterns and attributes from the +// gitattributes file declared in a system's /etc/gitconfig file. If the +// /etc/gitconfig file does not exist the function will return nil. If the +// core.attributesfile property is not declared, the function will return nil. +// If the file pointed to by the core.attributesfile property does not exist, +// the function will return nil. The function assumes fs is rooted at the root +// filesystem. +func LoadSystemPatterns(fs billy.Filesystem) (attributes []MatchAttribute, err error) { + return loadPatterns(fs, systemFile) +} diff --git a/plumbing/format/gitattributes/dir_test.go b/plumbing/format/gitattributes/dir_test.go new file mode 100644 index 0000000..34b915d --- /dev/null +++ b/plumbing/format/gitattributes/dir_test.go @@ -0,0 +1,199 @@ +package gitattributes + +import ( + "os" + "os/user" + "strconv" + + . "gopkg.in/check.v1" + "gopkg.in/src-d/go-billy.v4" + "gopkg.in/src-d/go-billy.v4/memfs" +) + +type MatcherSuite struct { + GFS billy.Filesystem // git repository root + RFS billy.Filesystem // root that contains user home + MCFS billy.Filesystem // root that contains user home, but missing ~/.gitattributes + MEFS billy.Filesystem // root that contains user home, but missing attributesfile entry + MIFS billy.Filesystem // root that contains user home, but missing .gitattributes + + SFS billy.Filesystem // root that contains /etc/gitattributes +} + +var _ = Suite(&MatcherSuite{}) + +func (s *MatcherSuite) SetUpTest(c *C) { + // setup root that contains user home + usr, err := user.Current() + c.Assert(err, IsNil) + + gitAttributesGlobal := func(fs billy.Filesystem, filename string) { + f, err := fs.Create(filename) + c.Assert(err, IsNil) + _, err = f.Write([]byte("# IntelliJ\n")) + c.Assert(err, IsNil) + _, err = f.Write([]byte(".idea/** text\n")) + c.Assert(err, IsNil) + _, err = f.Write([]byte("*.iml -text\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + } + + // setup generic git repository root + fs := memfs.New() + f, err := fs.Create(".gitattributes") + c.Assert(err, IsNil) + _, err = f.Write([]byte("vendor/g*/** foo=bar\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + err = fs.MkdirAll("vendor", os.ModePerm) + c.Assert(err, IsNil) + f, err = fs.Create("vendor/.gitattributes") + c.Assert(err, IsNil) + _, err = f.Write([]byte("github.com/** -foo\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + fs.MkdirAll("another", os.ModePerm) + fs.MkdirAll("vendor/github.com", os.ModePerm) + fs.MkdirAll("vendor/gopkg.in", os.ModePerm) + + gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global")) + + s.GFS = fs + + fs = memfs.New() + err = fs.MkdirAll(usr.HomeDir, os.ModePerm) + c.Assert(err, IsNil) + + f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) + c.Assert(err, IsNil) + _, err = f.Write([]byte("[core]\n")) + c.Assert(err, IsNil) + _, err = f.Write([]byte(" attributesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitattributes_global")) + "\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global")) + + s.RFS = fs + + // root that contains user home, but missing ~/.gitconfig + fs = memfs.New() + gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global")) + + s.MCFS = fs + + // setup root that contains user home, but missing attributesfile entry + fs = memfs.New() + err = fs.MkdirAll(usr.HomeDir, os.ModePerm) + c.Assert(err, IsNil) + + f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) + c.Assert(err, IsNil) + _, err = f.Write([]byte("[core]\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + gitAttributesGlobal(fs, fs.Join(usr.HomeDir, ".gitattributes_global")) + + s.MEFS = fs + + // setup root that contains user home, but missing .gitattributes + fs = memfs.New() + err = fs.MkdirAll(usr.HomeDir, os.ModePerm) + c.Assert(err, IsNil) + + f, err = fs.Create(fs.Join(usr.HomeDir, gitconfigFile)) + c.Assert(err, IsNil) + _, err = f.Write([]byte("[core]\n")) + c.Assert(err, IsNil) + _, err = f.Write([]byte(" attributesfile = " + strconv.Quote(fs.Join(usr.HomeDir, ".gitattributes_global")) + "\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + s.MIFS = fs + + // setup root that contains user home + fs = memfs.New() + err = fs.MkdirAll("etc", os.ModePerm) + c.Assert(err, IsNil) + + f, err = fs.Create(systemFile) + c.Assert(err, IsNil) + _, err = f.Write([]byte("[core]\n")) + c.Assert(err, IsNil) + _, err = f.Write([]byte(" attributesfile = /etc/gitattributes_global\n")) + c.Assert(err, IsNil) + err = f.Close() + c.Assert(err, IsNil) + + gitAttributesGlobal(fs, "/etc/gitattributes_global") + + s.SFS = fs +} + +func (s *MatcherSuite) TestDir_ReadPatterns(c *C) { + ps, err := ReadPatterns(s.GFS, nil) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 2) + + m := NewMatcher(ps) + results, _ := m.Match([]string{"vendor", "gopkg.in", "file"}, nil) + c.Assert(results["foo"].Value(), Equals, "bar") + + results, _ = m.Match([]string{"vendor", "github.com", "file"}, nil) + c.Assert(results["foo"].IsUnset(), Equals, false) +} + +func (s *MatcherSuite) TestDir_LoadGlobalPatterns(c *C) { + ps, err := LoadGlobalPatterns(s.RFS) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 2) + + m := NewMatcher(ps) + + results, _ := m.Match([]string{"go-git.v4.iml"}, nil) + c.Assert(results["text"].IsUnset(), Equals, true) + + results, _ = m.Match([]string{".idea", "file"}, nil) + c.Assert(results["text"].IsSet(), Equals, true) +} + +func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitconfig(c *C) { + ps, err := LoadGlobalPatterns(s.MCFS) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 0) +} + +func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingAttributesfile(c *C) { + ps, err := LoadGlobalPatterns(s.MEFS) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 0) +} + +func (s *MatcherSuite) TestDir_LoadGlobalPatternsMissingGitattributes(c *C) { + ps, err := LoadGlobalPatterns(s.MIFS) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 0) +} + +func (s *MatcherSuite) TestDir_LoadSystemPatterns(c *C) { + ps, err := LoadSystemPatterns(s.SFS) + c.Assert(err, IsNil) + c.Assert(ps, HasLen, 2) + + m := NewMatcher(ps) + results, _ := m.Match([]string{"go-git.v4.iml"}, nil) + c.Assert(results["text"].IsUnset(), Equals, true) + + results, _ = m.Match([]string{".idea", "file"}, nil) + c.Assert(results["text"].IsSet(), Equals, true) +} diff --git a/plumbing/format/gitattributes/matcher.go b/plumbing/format/gitattributes/matcher.go new file mode 100644 index 0000000..df12864 --- /dev/null +++ b/plumbing/format/gitattributes/matcher.go @@ -0,0 +1,78 @@ +package gitattributes + +// Matcher defines a global multi-pattern matcher for gitattributes patterns +type Matcher interface { + // Match matches patterns in the order of priorities. + Match(path []string, attributes []string) (map[string]Attribute, bool) +} + +type MatcherOptions struct{} + +// NewMatcher constructs a new matcher. Patterns must be given in the order of +// increasing priority. That is the most generic settings files first, then the +// content of the repo .gitattributes, then content of .gitattributes down the +// path. +func NewMatcher(stack []MatchAttribute) Matcher { + m := &matcher{stack: stack} + m.init() + + return m +} + +type matcher struct { + stack []MatchAttribute + macros map[string]MatchAttribute +} + +func (m *matcher) init() { + m.macros = make(map[string]MatchAttribute) + + for _, attr := range m.stack { + if attr.Pattern == nil { + m.macros[attr.Name] = attr + } + } +} + +// Match matches path against the patterns in gitattributes files and returns +// the attributes associated with the path. +// +// Specific attributes can be specified otherwise all attributes are returned. +// +// Matched is true if any path was matched to a rule, even if the results map +// is empty. +func (m *matcher) Match(path []string, attributes []string) (results map[string]Attribute, matched bool) { + results = make(map[string]Attribute, len(attributes)) + + n := len(m.stack) + for i := n - 1; i >= 0; i-- { + if len(attributes) > 0 && len(attributes) == len(results) { + return + } + + pattern := m.stack[i].Pattern + if pattern == nil { + continue + } + + if match := pattern.Match(path); match { + matched = true + for _, attr := range m.stack[i].Attributes { + if attr.IsSet() { + m.expandMacro(attr.Name(), results) + } + results[attr.Name()] = attr + } + } + } + return +} + +func (m *matcher) expandMacro(name string, results map[string]Attribute) bool { + if macro, ok := m.macros[name]; ok { + for _, attr := range macro.Attributes { + results[attr.Name()] = attr + } + } + return false +} diff --git a/plumbing/format/gitattributes/matcher_test.go b/plumbing/format/gitattributes/matcher_test.go new file mode 100644 index 0000000..edb71a1 --- /dev/null +++ b/plumbing/format/gitattributes/matcher_test.go @@ -0,0 +1,29 @@ +package gitattributes + +import ( + "strings" + + . "gopkg.in/check.v1" +) + +func (s *MatcherSuite) TestMatcher_Match(c *C) { + lines := []string{ + "[attr]binary -diff -merge -text", + "**/middle/v[uo]l?ano binary text eol=crlf", + "volcano -eol", + "foobar diff merge text eol=lf foo=bar", + } + + ma, err := ReadAttributes(strings.NewReader(strings.Join(lines, "\n")), nil, true) + c.Assert(err, IsNil) + + m := NewMatcher(ma) + results, matched := m.Match([]string{"head", "middle", "vulkano"}, nil) + + c.Assert(matched, Equals, true) + c.Assert(results["binary"].IsSet(), Equals, true) + c.Assert(results["diff"].IsUnset(), Equals, true) + c.Assert(results["merge"].IsUnset(), Equals, true) + c.Assert(results["text"].IsSet(), Equals, true) + c.Assert(results["eol"].Value(), Equals, "crlf") +} diff --git a/plumbing/format/gitattributes/pattern.go b/plumbing/format/gitattributes/pattern.go new file mode 100644 index 0000000..c5ca0c7 --- /dev/null +++ b/plumbing/format/gitattributes/pattern.go @@ -0,0 +1,101 @@ +package gitattributes + +import ( + "path/filepath" + "strings" +) + +const ( + patternDirSep = "/" + zeroToManyDirs = "**" +) + +// Pattern defines a gitattributes pattern. +type Pattern interface { + // Match matches the given path to the pattern. + Match(path []string) bool +} + +type pattern struct { + domain []string + pattern []string +} + +// ParsePattern parses a gitattributes pattern string into the Pattern +// structure. +func ParsePattern(p string, domain []string) Pattern { + return &pattern{ + domain: domain, + pattern: strings.Split(p, patternDirSep), + } +} + +func (p *pattern) Match(path []string) bool { + if len(path) <= len(p.domain) { + return false + } + for i, e := range p.domain { + if path[i] != e { + return false + } + } + + if len(p.pattern) == 1 { + // for a simple rule, .gitattribute matching rules differs from + // .gitignore and only the last part of the path is considered. + path = path[len(path)-1:] + } else { + path = path[len(p.domain):] + } + + pattern := p.pattern + var match, doublestar bool + var err error + for _, part := range path { + // skip empty + if pattern[0] == "" { + pattern = pattern[1:] + } + + // eat doublestar + if pattern[0] == zeroToManyDirs { + pattern = pattern[1:] + if len(pattern) == 0 { + return true + } + doublestar = true + } + + switch true { + case strings.Contains(pattern[0], "**"): + return false + + // keep going down the path until we hit a match + case doublestar: + match, err = filepath.Match(pattern[0], part) + if err != nil { + return false + } + + if match { + doublestar = false + pattern = pattern[1:] + } + + default: + match, err = filepath.Match(pattern[0], part) + if err != nil { + return false + } + if !match { + return false + } + pattern = pattern[1:] + } + } + + if len(pattern) > 0 { + return false + } + return match +} diff --git a/plumbing/format/gitattributes/pattern_test.go b/plumbing/format/gitattributes/pattern_test.go new file mode 100644 index 0000000..f95be6e --- /dev/null +++ b/plumbing/format/gitattributes/pattern_test.go @@ -0,0 +1,229 @@ +package gitattributes + +import ( + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } + +type PatternSuite struct{} + +var _ = Suite(&PatternSuite{}) + +func (s *PatternSuite) TestMatch_domainLonger_mismatch(c *C) { + p := ParsePattern("value", []string{"head", "middle", "tail"}) + r := p.Match([]string{"head", "middle"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestMatch_domainSameLength_mismatch(c *C) { + p := ParsePattern("value", []string{"head", "middle", "tail"}) + r := p.Match([]string{"head", "middle", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestMatch_domainMismatch_mismatch(c *C) { + p := ParsePattern("value", []string{"head", "middle", "tail"}) + r := p.Match([]string{"head", "middle", "_tail_", "value"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_match(c *C) { + p := ParsePattern("vul?ano", nil) + r := p.Match([]string{"value", "vulkano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_withDomain(c *C) { + p := ParsePattern("middle/tail", []string{"value", "volcano"}) + r := p.Match([]string{"value", "volcano", "middle", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_onlyMatchInDomain_mismatch(c *C) { + p := ParsePattern("value/volcano", []string{"value", "volcano"}) + r := p.Match([]string{"value", "volcano", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_atStart(c *C) { + p := ParsePattern("value", nil) + r := p.Match([]string{"value", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_inTheMiddle(c *C) { + p := ParsePattern("value", nil) + r := p.Match([]string{"head", "value", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_atEnd(c *C) { + p := ParsePattern("value", nil) + r := p.Match([]string{"head", "value"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_mismatch(c *C) { + p := ParsePattern("value", nil) + r := p.Match([]string{"head", "val", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_valueLonger_mismatch(c *C) { + p := ParsePattern("tai", nil) + r := p.Match([]string{"head", "value", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestSimpleMatch_withAsterisk(c *C) { + p := ParsePattern("t*l", nil) + r := p.Match([]string{"value", "vulkano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_withQuestionMark(c *C) { + p := ParsePattern("ta?l", nil) + r := p.Match([]string{"value", "vulkano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_magicChars(c *C) { + p := ParsePattern("v[ou]l[kc]ano", nil) + r := p.Match([]string{"value", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestSimpleMatch_wrongPattern_mismatch(c *C) { + p := ParsePattern("v[ou]l[", nil) + r := p.Match([]string{"value", "vol["}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_fromRootWithSlash(c *C) { + p := ParsePattern("/value/vul?ano/tail", nil) + r := p.Match([]string{"value", "vulkano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_withDomain(c *C) { + p := ParsePattern("middle/tail", []string{"value", "volcano"}) + r := p.Match([]string{"value", "volcano", "middle", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_onlyMatchInDomain_mismatch(c *C) { + p := ParsePattern("volcano/tail", []string{"value", "volcano"}) + r := p.Match([]string{"value", "volcano", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_fromRootWithoutSlash(c *C) { + p := ParsePattern("value/vul?ano/tail", nil) + r := p.Match([]string{"value", "vulkano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_fromRoot_mismatch(c *C) { + p := ParsePattern("value/vulkano", nil) + r := p.Match([]string{"value", "volcano"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_fromRoot_tooShort_mismatch(c *C) { + p := ParsePattern("value/vul?ano", nil) + r := p.Match([]string{"value"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_fromRoot_notAtRoot_mismatch(c *C) { + p := ParsePattern("/value/volcano", nil) + r := p.Match([]string{"value", "value", "volcano"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_leadingAsterisks_atStart(c *C) { + p := ParsePattern("**/*lue/vol?ano/ta?l", nil) + r := p.Match([]string{"value", "volcano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_leadingAsterisks_notAtStart(c *C) { + p := ParsePattern("**/*lue/vol?ano/tail", nil) + r := p.Match([]string{"head", "value", "volcano", "tail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_leadingAsterisks_mismatch(c *C) { + p := ParsePattern("**/*lue/vol?ano/tail", nil) + r := p.Match([]string{"head", "value", "Volcano", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_tailingAsterisks(c *C) { + p := ParsePattern("/*lue/vol?ano/**", nil) + r := p.Match([]string{"value", "volcano", "tail", "moretail"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_tailingAsterisks_single(c *C) { + p := ParsePattern("/*lue/**", nil) + r := p.Match([]string{"value", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_tailingAsterisks_exactMatch(c *C) { + p := ParsePattern("/*lue/vol?ano/**", nil) + r := p.Match([]string{"value", "volcano"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_middleAsterisks_emptyMatch(c *C) { + p := ParsePattern("/*lue/**/vol?ano", nil) + r := p.Match([]string{"value", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_middleAsterisks_oneMatch(c *C) { + p := ParsePattern("/*lue/**/vol?ano", nil) + r := p.Match([]string{"value", "middle", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_middleAsterisks_multiMatch(c *C) { + p := ParsePattern("/*lue/**/vol?ano", nil) + r := p.Match([]string{"value", "middle1", "middle2", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_wrongDoubleAsterisk_mismatch(c *C) { + p := ParsePattern("/*lue/**foo/vol?ano/tail", nil) + r := p.Match([]string{"value", "foo", "volcano", "tail"}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_magicChars(c *C) { + p := ParsePattern("**/head/v[ou]l[kc]ano", nil) + r := p.Match([]string{"value", "head", "volcano"}) + c.Assert(r, Equals, true) +} + +func (s *PatternSuite) TestGlobMatch_wrongPattern_noTraversal_mismatch(c *C) { + p := ParsePattern("**/head/v[ou]l[", nil) + r := p.Match([]string{"value", "head", "vol["}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_wrongPattern_onTraversal_mismatch(c *C) { + p := ParsePattern("/value/**/v[ou]l[", nil) + r := p.Match([]string{"value", "head", "vol["}) + c.Assert(r, Equals, false) +} + +func (s *PatternSuite) TestGlobMatch_issue_923(c *C) { + p := ParsePattern("**/android/**/GeneratedPluginRegistrant.java", nil) + r := p.Match([]string{"packages", "flutter_tools", "lib", "src", "android", "gradle.dart"}) + c.Assert(r, Equals, false) +} diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go index 5fed278..14b5860 100644 --- a/plumbing/format/idxfile/idxfile.go +++ b/plumbing/format/idxfile/idxfile.go @@ -5,8 +5,9 @@ import ( "io" "sort" + encbin "encoding/binary" + "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/utils/binary" ) const ( @@ -55,7 +56,8 @@ type MemoryIndex struct { PackfileChecksum [20]byte IdxChecksum [20]byte - offsetHash map[int64]plumbing.Hash + offsetHash map[int64]plumbing.Hash + offsetHashIsFull bool } var _ Index = (*MemoryIndex)(nil) @@ -121,31 +123,32 @@ func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { return 0, plumbing.ErrObjectNotFound } - return idx.getOffset(k, i) + offset := idx.getOffset(k, i) + + if !idx.offsetHashIsFull { + // Save the offset for reverse lookup + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) + } + idx.offsetHash[int64(offset)] = h + } + + return int64(offset), nil } const isO64Mask = uint64(1) << 31 -func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) { +func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { offset := secondLevel << 2 - buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4]) - ofs, err := binary.ReadUint32(buf) - if err != nil { - return -1, err - } + ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) if (uint64(ofs) & isO64Mask) != 0 { offset := 8 * (uint64(ofs) & ^isO64Mask) - buf := bytes.NewBuffer(idx.Offset64[offset : offset+8]) - n, err := binary.ReadUint64(buf) - if err != nil { - return -1, err - } - - return int64(n), nil + n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) + return n } - return int64(ofs), nil + return uint64(ofs) } // FindCRC32 implements the Index interface. @@ -156,25 +159,34 @@ func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { return 0, plumbing.ErrObjectNotFound } - return idx.getCRC32(k, i) + return idx.getCRC32(k, i), nil } -func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) { +func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { offset := secondLevel << 2 - buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4]) - return binary.ReadUint32(buf) + return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) } // FindHash implements the Index interface. func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { + var hash plumbing.Hash + var ok bool + + if idx.offsetHash != nil { + if hash, ok = idx.offsetHash[o]; ok { + return hash, nil + } + } + // Lazily generate the reverse offset/hash map if required. - if idx.offsetHash == nil { + if !idx.offsetHashIsFull || idx.offsetHash == nil { if err := idx.genOffsetHash(); err != nil { return plumbing.ZeroHash, err } + + hash, ok = idx.offsetHash[o] } - hash, ok := idx.offsetHash[o] if !ok { return plumbing.ZeroHash, plumbing.ErrObjectNotFound } @@ -190,23 +202,21 @@ func (idx *MemoryIndex) genOffsetHash() error { } idx.offsetHash = make(map[int64]plumbing.Hash, count) - - iter, err := idx.Entries() - if err != nil { - return err - } - - for { - entry, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err + idx.offsetHashIsFull = true + + var hash plumbing.Hash + i := uint32(0) + for firstLevel, fanoutValue := range idx.Fanout { + mappedFirstLevel := idx.FanoutMapping[firstLevel] + for secondLevel := uint32(0); i < fanoutValue; i++ { + copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) + offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) + idx.offsetHash[offset] = hash + secondLevel++ } - - idx.offsetHash[int64(entry.Offset)] = entry.Hash } + + return nil } // Count implements the Index interface. @@ -275,22 +285,11 @@ func (i *idxfileEntryIter) Next() (*Entry, error) { continue } + mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] entry := new(Entry) - ofs := i.secondLevel * objectIDLength - copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:]) - - pos := i.idx.FanoutMapping[entry.Hash[0]] - - offset, err := i.idx.getOffset(pos, i.secondLevel) - if err != nil { - return nil, err - } - entry.Offset = uint64(offset) - - entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel) - if err != nil { - return nil, err - } + copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) + entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) + entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) i.secondLevel++ i.total++ diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go index ac57d08..98f92fd 100644 --- a/plumbing/format/index/decoder.go +++ b/plumbing/format/index/decoder.go @@ -1,6 +1,7 @@ package index import ( + "bufio" "bytes" "crypto/sha1" "errors" @@ -42,14 +43,17 @@ type Decoder struct { r io.Reader hash hash.Hash lastEntry *Entry + + extReader *bufio.Reader } // NewDecoder returns a new decoder that reads from r. func NewDecoder(r io.Reader) *Decoder { h := sha1.New() return &Decoder{ - r: io.TeeReader(r, h), - hash: h, + r: io.TeeReader(r, h), + hash: h, + extReader: bufio.NewReader(nil), } } @@ -184,11 +188,9 @@ func (d *Decoder) doReadEntryNameV4() (string, error) { func (d *Decoder) doReadEntryName(len uint16) (string, error) { name := make([]byte, len) - if err := binary.Read(d.r, &name); err != nil { - return "", err - } + _, err := io.ReadFull(d.r, name[:]) - return string(name), nil + return string(name), err } // Index entries are padded out to the next 8 byte alignment @@ -279,20 +281,21 @@ func (d *Decoder) readExtension(idx *Index, header []byte) error { return nil } -func (d *Decoder) getExtensionReader() (io.Reader, error) { +func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { len, err := binary.ReadUint32(d.r) if err != nil { return nil, err } - return &io.LimitedReader{R: d.r, N: int64(len)}, nil + d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) + return d.extReader, nil } func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { var h plumbing.Hash copy(h[:4], alreadyRead[:]) - if err := binary.Read(d.r, h[4:]); err != nil { + if _, err := io.ReadFull(d.r, h[4:]); err != nil { return err } @@ -326,7 +329,7 @@ func validateHeader(r io.Reader) (version uint32, err error) { } type treeExtensionDecoder struct { - r io.Reader + r *bufio.Reader } func (d *treeExtensionDecoder) Decode(t *Tree) error { @@ -386,16 +389,13 @@ func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { } e.Trees = i - - if err := binary.Read(d.r, &e.Hash); err != nil { - return nil, err - } + _, err = io.ReadFull(d.r, e.Hash[:]) return e, nil } type resolveUndoDecoder struct { - r io.Reader + r *bufio.Reader } func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { @@ -433,7 +433,7 @@ func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { for s := range e.Stages { var hash plumbing.Hash - if err := binary.Read(d.r, hash[:]); err != nil { + if _, err := io.ReadFull(d.r, hash[:]); err != nil { return nil, err } @@ -462,7 +462,7 @@ func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { } type endOfIndexEntryDecoder struct { - r io.Reader + r *bufio.Reader } func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { @@ -472,5 +472,6 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { return err } - return binary.Read(d.r, &e.Hash) + _, err = io.ReadFull(d.r, e.Hash[:]) + return err } diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go index 0d9ed54..f82c1ab 100644 --- a/plumbing/format/packfile/common.go +++ b/plumbing/format/packfile/common.go @@ -2,6 +2,7 @@ package packfile import ( "bytes" + "compress/zlib" "io" "sync" @@ -66,3 +67,12 @@ var bufPool = sync.Pool{ return bytes.NewBuffer(nil) }, } + +var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} + +var zlibReaderPool = sync.Pool{ + New: func() interface{} { + r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) + return r + }, +} diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index 69b6e85..f528073 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -76,20 +76,18 @@ func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { return nil, err } - return p.GetByOffset(offset) + return p.objectAtOffset(offset, h) } -// GetByOffset retrieves the encoded object from the packfile with the given +// GetByOffset retrieves the encoded object from the packfile at the given // offset. func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { hash, err := p.FindHash(o) - if err == nil { - if obj, ok := p.deltaBaseCache.Get(hash); ok { - return obj, nil - } + if err != nil { + return nil, err } - return p.objectAtOffset(o) + return p.objectAtOffset(o, hash) } // GetSizeByOffset retrieves the size of the encoded object from the @@ -122,6 +120,13 @@ func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { return h, err } +func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { + delta := buf.Bytes() + _, delta = decodeLEB128(delta) // skip src size + sz, _ := decodeLEB128(delta) + return int64(sz) +} + func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { switch h.Type { case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: @@ -135,10 +140,7 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { return 0, err } - delta := buf.Bytes() - _, delta = decodeLEB128(delta) // skip src size - sz, _ := decodeLEB128(delta) - return int64(sz), nil + return p.getDeltaObjectSize(buf), nil default: return 0, ErrInvalidObject.AddDetails("type %q", h.Type) } @@ -176,10 +178,16 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err err = ErrInvalidObject.AddDetails("type %q", h.Type) } + p.offsetToType[h.Offset] = typ + return } -func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) { +func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { + if obj, ok := p.cacheGet(hash); ok { + return obj, nil + } + h, err := p.objectHeaderAtOffset(offset) if err != nil { if err == io.EOF || isInvalid(err) { @@ -188,27 +196,54 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) return nil, err } + return p.getNextObject(h, hash) +} + +func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { + var err error + // If we have no filesystem, we will return a MemoryObject instead // of an FSObject. if p.fs == nil { - return p.getNextObject(h) + return p.getNextMemoryObject(h) } - // If the object is not a delta and it's small enough then read it - // completely into memory now since it is already read from disk - // into buffer anyway. - if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { - return p.getNextObject(h) - } + // If the object is small enough then read it completely into memory now since + // it is already read from disk into buffer anyway. For delta objects we want + // to perform the optimization too, but we have to be careful about applying + // small deltas on big objects. + var size int64 + if h.Length <= smallObjectThreshold { + if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { + return p.getNextMemoryObject(h) + } - hash, err := p.FindHash(h.Offset) - if err != nil { - return nil, err - } + // For delta objects we read the delta data and apply the small object + // optimization only if the expanded version of the object still meets + // the small object threshold condition. + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + if _, _, err := p.s.NextObject(buf); err != nil { + return nil, err + } + defer bufPool.Put(buf) - size, err := p.getObjectSize(h) - if err != nil { - return nil, err + size = p.getDeltaObjectSize(buf) + if size <= smallObjectThreshold { + var obj = new(plumbing.MemoryObject) + obj.SetSize(size) + if h.Type == plumbing.REFDeltaObject { + err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) + } else { + err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) + } + return obj, err + } + } else { + size, err = p.getObjectSize(h) + if err != nil { + return nil, err + } } typ, err := p.getObjectType(h) @@ -231,25 +266,14 @@ func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) } func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { - ref, err := p.FindHash(offset) - if err == nil { - obj, ok := p.cacheGet(ref) - if ok { - reader, err := obj.Reader() - if err != nil { - return nil, err - } - - return reader, nil - } - } - h, err := p.objectHeaderAtOffset(offset) if err != nil { return nil, err } - obj, err := p.getNextObject(h) + // getObjectContent is called from FSObject, so we have to explicitly + // get memory object here to avoid recursive cycle + obj, err := p.getNextMemoryObject(h) if err != nil { return nil, err } @@ -257,7 +281,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { return obj.Reader() } -func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) { +func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { var obj = new(plumbing.MemoryObject) obj.SetSize(h.Length) obj.SetType(h.Type) @@ -278,6 +302,8 @@ func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error return nil, err } + p.offsetToType[h.Offset] = obj.Type() + return obj, nil } @@ -300,6 +326,13 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu if err != nil { return err } + defer bufPool.Put(buf) + + return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) +} + +func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { + var err error base, ok := p.cacheGet(ref) if !ok { @@ -312,30 +345,31 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu obj.SetType(base.Type()) err = ApplyDelta(obj, base, buf.Bytes()) p.cachePut(obj) - bufPool.Put(buf) return err } func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { - buf := bytes.NewBuffer(nil) + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() _, _, err := p.s.NextObject(buf) if err != nil { return err } + defer bufPool.Put(buf) + + return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) +} - var base plumbing.EncodedObject - var ok bool +func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { hash, err := p.FindHash(offset) - if err == nil { - base, ok = p.cacheGet(hash) + if err != nil { + return err } - if !ok { - base, err = p.GetByOffset(offset) - if err != nil { - return err - } + base, err := p.objectAtOffset(offset, hash) + if err != nil { + return err } obj.SetType(base.Type()) @@ -414,6 +448,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) { return hash, nil } +// Scanner returns the packfile's Scanner +func (p *Packfile) Scanner() *Scanner { + return p.s +} + // Close the packfile and its resources. func (p *Packfile) Close() error { closer, ok := p.file.(io.Closer) @@ -437,14 +476,50 @@ func (i *objectIter) Next() (plumbing.EncodedObject, error) { return nil, err } - obj, err := i.p.GetByOffset(int64(e.Offset)) + if i.typ != plumbing.AnyObject { + if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { + if typ != i.typ { + continue + } + } else if obj, ok := i.p.cacheGet(e.Hash); ok { + if obj.Type() != i.typ { + i.p.offsetToType[int64(e.Offset)] = obj.Type() + continue + } + return obj, nil + } else { + h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) + if err != nil { + return nil, err + } + + if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { + typ, err := i.p.getObjectType(h) + if err != nil { + return nil, err + } + if typ != i.typ { + i.p.offsetToType[int64(e.Offset)] = typ + continue + } + // getObjectType will seek in the file so we cannot use getNextObject safely + return i.p.objectAtOffset(int64(e.Offset), e.Hash) + } else { + if h.Type != i.typ { + i.p.offsetToType[int64(e.Offset)] = h.Type + continue + } + return i.p.getNextObject(h, e.Hash) + } + } + } + + obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) if err != nil { return nil, err } - if i.typ == plumbing.AnyObject || obj.Type() == i.typ { - return obj, nil - } + return obj, nil } } diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go index 614b0d1..7b44192 100644 --- a/plumbing/format/packfile/scanner.go +++ b/plumbing/format/packfile/scanner.go @@ -39,8 +39,7 @@ type ObjectHeader struct { } type Scanner struct { - r reader - zr readerResetter + r *scannerReader crc hash.Hash32 // pendingObject is used to detect if an object has been read, or still @@ -56,19 +55,27 @@ type Scanner struct { // NewScanner returns a new Scanner based on a reader, if the given reader // implements io.ReadSeeker the Scanner will be also Seekable func NewScanner(r io.Reader) *Scanner { - seeker, ok := r.(io.ReadSeeker) - if !ok { - seeker = &trackableReader{Reader: r} - } + _, ok := r.(io.ReadSeeker) crc := crc32.NewIEEE() return &Scanner{ - r: newTeeReader(newByteReadSeeker(seeker), crc), + r: newScannerReader(r, crc), crc: crc, IsSeekable: ok, } } +func (s *Scanner) Reset(r io.Reader) { + _, ok := r.(io.ReadSeeker) + + s.r.Reset(r) + s.crc.Reset() + s.IsSeekable = ok + s.pendingObject = nil + s.version = 0 + s.objects = 0 +} + // Header reads the whole packfile header (signature, version and object count). // It returns the version and the object count and performs checks on the // validity of the signature and the version fields. @@ -182,8 +189,7 @@ func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { // nextObjectHeader returns the ObjectHeader for the next object in the reader // without the Offset field func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { - defer s.Flush() - + s.r.Flush() s.crc.Reset() h := &ObjectHeader{} @@ -304,35 +310,29 @@ func (s *Scanner) readLength(first byte) (int64, error) { // NextObject writes the content of the next object into the reader, returns // the number of bytes written, the CRC32 of the content and an error, if any func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { - defer s.crc.Reset() - s.pendingObject = nil written, err = s.copyObject(w) - s.Flush() + + s.r.Flush() crc32 = s.crc.Sum32() + s.crc.Reset() + return } // ReadRegularObject reads and write a non-deltified object // from it zlib stream in an object entry in the packfile. func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { - if s.zr == nil { - var zr io.ReadCloser - zr, err = zlib.NewReader(s.r) - if err != nil { - return 0, fmt.Errorf("zlib initialization error: %s", err) - } + zr := zlibReaderPool.Get().(io.ReadCloser) + defer zlibReaderPool.Put(zr) - s.zr = zr.(readerResetter) - } else { - if err = s.zr.Reset(s.r, nil); err != nil { - return 0, fmt.Errorf("zlib reset error: %s", err) - } + if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { + return 0, fmt.Errorf("zlib reset error: %s", err) } - defer ioutil.CheckClose(s.zr, &err) + defer ioutil.CheckClose(zr, &err) buf := byteSlicePool.Get().([]byte) - n, err = io.CopyBuffer(w, s.zr, buf) + n, err = io.CopyBuffer(w, zr, buf) byteSlicePool.Put(buf) return } @@ -378,110 +378,89 @@ func (s *Scanner) Close() error { return err } -// Flush finishes writing the buffer to crc hasher in case we are using -// a teeReader. Otherwise it is a no-op. +// Flush is a no-op (deprecated) func (s *Scanner) Flush() error { - tee, ok := s.r.(*teeReader) - if ok { - return tee.Flush() - } return nil } -type trackableReader struct { - count int64 - io.Reader +// scannerReader has the following characteristics: +// - Provides an io.SeekReader impl for bufio.Reader, when the underlying +// reader supports it. +// - Keeps track of the current read position, for when the underlying reader +// isn't an io.SeekReader, but we still want to know the current offset. +// - Writes to the hash writer what it reads, with the aid of a smaller buffer. +// The buffer helps avoid a performance penality for performing small writes +// to the crc32 hash writer. +type scannerReader struct { + reader io.Reader + crc io.Writer + rbuf *bufio.Reader + wbuf *bufio.Writer + offset int64 } -// Read reads up to len(p) bytes into p. -func (r *trackableReader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - r.count += int64(n) - - return -} - -// Seek only supports io.SeekCurrent, any other operation fails -func (r *trackableReader) Seek(offset int64, whence int) (int64, error) { - if whence != io.SeekCurrent { - return -1, ErrSeekNotSupported +func newScannerReader(r io.Reader, h io.Writer) *scannerReader { + sr := &scannerReader{ + rbuf: bufio.NewReader(nil), + wbuf: bufio.NewWriterSize(nil, 64), + crc: h, } + sr.Reset(r) - return r.count, nil + return sr } -func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker { - return &bufferedSeeker{ - r: r, - Reader: *bufio.NewReader(r), - } -} +func (r *scannerReader) Reset(reader io.Reader) { + r.reader = reader + r.rbuf.Reset(r.reader) + r.wbuf.Reset(r.crc) -type bufferedSeeker struct { - r io.ReadSeeker - bufio.Reader -} - -func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent && offset == 0 { - current, err := r.r.Seek(offset, whence) - if err != nil { - return current, err - } - - return current - int64(r.Buffered()), nil + r.offset = 0 + if seeker, ok := r.reader.(io.ReadSeeker); ok { + r.offset, _ = seeker.Seek(0, io.SeekCurrent) } - - defer r.Reader.Reset(r.r) - return r.r.Seek(offset, whence) } -type readerResetter interface { - io.ReadCloser - zlib.Resetter -} +func (r *scannerReader) Read(p []byte) (n int, err error) { + n, err = r.rbuf.Read(p) -type reader interface { - io.Reader - io.ByteReader - io.Seeker + r.offset += int64(n) + if _, err := r.wbuf.Write(p[:n]); err != nil { + return n, err + } + return } -type teeReader struct { - reader - w hash.Hash32 - bufWriter *bufio.Writer +func (r *scannerReader) ReadByte() (b byte, err error) { + b, err = r.rbuf.ReadByte() + if err == nil { + r.offset++ + return b, r.wbuf.WriteByte(b) + } + return } -func newTeeReader(r reader, h hash.Hash32) *teeReader { - return &teeReader{ - reader: r, - w: h, - bufWriter: bufio.NewWriter(h), - } +func (r *scannerReader) Flush() error { + return r.wbuf.Flush() } -func (r *teeReader) Read(p []byte) (n int, err error) { - r.Flush() +// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, +// then only whence=io.SeekCurrent is supported, any other operation fails. +func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { + var err error - n, err = r.reader.Read(p) - if n > 0 { - if n, err := r.w.Write(p[:n]); err != nil { - return n, err + if seeker, ok := r.reader.(io.ReadSeeker); !ok { + if whence != io.SeekCurrent || offset != 0 { + return -1, ErrSeekNotSupported + } + } else { + if whence == io.SeekCurrent && offset == 0 { + return r.offset, nil } - } - return -} -func (r *teeReader) ReadByte() (b byte, err error) { - b, err = r.reader.ReadByte() - if err == nil { - return b, r.bufWriter.WriteByte(b) + r.offset, err = seeker.Seek(offset, whence) + r.rbuf.Reset(r.reader) } - return -} - -func (r *teeReader) Flush() (err error) { - return r.bufWriter.Flush() + return r.offset, err } diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go index 091b457..a401d6d 100644 --- a/plumbing/format/packfile/scanner_test.go +++ b/plumbing/format/packfile/scanner_test.go @@ -135,6 +135,55 @@ func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) { c.Assert(err, Equals, ErrSeekNotSupported) } +func (s *ScannerSuite) TestReaderReset(c *C) { + r := fixtures.Basic().One().Packfile() + p := NewScanner(r) + + version, objects, err := p.Header() + c.Assert(version, Equals, VersionSupported) + c.Assert(objects, Equals, uint32(31)) + + h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset) + c.Assert(err, IsNil) + c.Assert(h, DeepEquals, &expectedHeadersOFS[0]) + + p.Reset(r) + c.Assert(p.pendingObject, IsNil) + c.Assert(p.version, Equals, uint32(0)) + c.Assert(p.objects, Equals, uint32(0)) + c.Assert(p.r.reader, Equals, r) + c.Assert(p.r.offset > expectedHeadersOFS[0].Offset, Equals, true) + + p.Reset(bytes.NewReader(nil)) + c.Assert(p.r.offset, Equals, int64(0)) +} + +func (s *ScannerSuite) TestReaderResetSeeks(c *C) { + r := fixtures.Basic().One().Packfile() + + // seekable + p := NewScanner(r) + c.Assert(p.IsSeekable, Equals, true) + h, err := p.SeekObjectHeader(expectedHeadersOFS[0].Offset) + c.Assert(err, IsNil) + c.Assert(h, DeepEquals, &expectedHeadersOFS[0]) + + // reset with seekable + p.Reset(r) + c.Assert(p.IsSeekable, Equals, true) + h, err = p.SeekObjectHeader(expectedHeadersOFS[1].Offset) + c.Assert(err, IsNil) + c.Assert(h, DeepEquals, &expectedHeadersOFS[1]) + + // reset with non-seekable + f := fixtures.Basic().ByTag("ref-delta").One() + p.Reset(io.MultiReader(f.Packfile())) + c.Assert(p.IsSeekable, Equals, false) + + _, err = p.SeekObjectHeader(expectedHeadersOFS[4].Offset) + c.Assert(err, Equals, ErrSeekNotSupported) +} + var expectedHeadersOFS = []ObjectHeader{ {Type: plumbing.CommitObject, Offset: 12, Length: 254}, {Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12}, |