diff options
-rw-r--r-- | config/config.go | 5 | ||||
-rw-r--r-- | config/config_test.go | 2 | ||||
-rw-r--r-- | plumbing/format/idxfile/idxfile.go | 69 | ||||
-rw-r--r-- | plumbing/format/idxfile/idxfile_test.go | 15 | ||||
-rw-r--r-- | plumbing/format/packfile/fsobject.go | 10 | ||||
-rw-r--r-- | plumbing/format/packfile/packfile.go | 17 | ||||
-rw-r--r-- | plumbing/object/commit.go | 16 | ||||
-rw-r--r-- | plumbing/object/commit_test.go | 34 | ||||
-rw-r--r-- | status.go | 13 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit.go | 71 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_test.go | 51 | ||||
-rw-r--r-- | storage/filesystem/object.go | 18 | ||||
-rw-r--r-- | worktree.go | 56 | ||||
-rw-r--r-- | worktree_test.go | 9 |
14 files changed, 359 insertions, 27 deletions
diff --git a/config/config.go b/config/config.go index ce6506d..a637f6d 100644 --- a/config/config.go +++ b/config/config.go @@ -40,6 +40,9 @@ type Config struct { IsBare bool // Worktree is the path to the root of the working tree. Worktree string + // CommentChar is the character indicating the start of a + // comment for commands like commit and tag + CommentChar string } Pack struct { @@ -113,6 +116,7 @@ const ( urlKey = "url" bareKey = "bare" worktreeKey = "worktree" + commentCharKey = "commentChar" windowKey = "window" mergeKey = "merge" @@ -151,6 +155,7 @@ func (c *Config) unmarshalCore() { } c.Core.Worktree = s.Options.Get(worktreeKey) + c.Core.CommentChar = s.Options.Get(commentCharKey) } func (c *Config) unmarshalPack() error { diff --git a/config/config_test.go b/config/config_test.go index 5cd713e..fe73de8 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -13,6 +13,7 @@ func (s *ConfigSuite) TestUnmarshall(c *C) { input := []byte(`[core] bare = true worktree = foo + commentchar = bar [pack] window = 20 [remote "origin"] @@ -38,6 +39,7 @@ func (s *ConfigSuite) TestUnmarshall(c *C) { c.Assert(cfg.Core.IsBare, Equals, true) c.Assert(cfg.Core.Worktree, Equals, "foo") + c.Assert(cfg.Core.CommentChar, Equals, "bar") c.Assert(cfg.Pack.Window, Equals, uint(20)) c.Assert(cfg.Remotes, HasLen, 2) c.Assert(cfg.Remotes["origin"].Name, Equals, "origin") diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go index c977bee..5fed278 100644 --- a/plumbing/format/idxfile/idxfile.go +++ b/plumbing/format/idxfile/idxfile.go @@ -3,6 +3,7 @@ package idxfile import ( "bytes" "io" + "sort" "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/utils/binary" @@ -34,6 +35,9 @@ type Index interface { Count() (int64, error) // Entries returns an iterator to retrieve all index entries. Entries() (EntryIter, error) + // EntriesByOffset returns an iterator to retrieve all index entries ordered + // by offset. + EntriesByOffset() (EntryIter, error) } // MemoryIndex is the in memory representation of an idx file. @@ -215,6 +219,36 @@ func (idx *MemoryIndex) Entries() (EntryIter, error) { return &idxfileEntryIter{idx, 0, 0, 0}, nil } +// EntriesByOffset implements the Index interface. +func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { + count, err := idx.Count() + if err != nil { + return nil, err + } + + iter := &idxfileEntryOffsetIter{ + entries: make(entriesByOffset, count), + } + + entries, err := idx.Entries() + if err != nil { + return nil, err + } + + for pos := 0; int64(pos) < count; pos++ { + entry, err := entries.Next() + if err != nil { + return nil, err + } + + iter.entries[pos] = entry + } + + sort.Sort(iter.entries) + + return iter, nil +} + // EntryIter is an iterator that will return the entries in a packfile index. type EntryIter interface { // Next returns the next entry in the packfile index. @@ -276,3 +310,38 @@ type Entry struct { CRC32 uint32 Offset uint64 } + +type idxfileEntryOffsetIter struct { + entries entriesByOffset + pos int +} + +func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { + if i.pos >= len(i.entries) { + return nil, io.EOF + } + + entry := i.entries[i.pos] + i.pos++ + + return entry, nil +} + +func (i *idxfileEntryOffsetIter) Close() error { + i.pos = len(i.entries) + 1 + return nil +} + +type entriesByOffset []*Entry + +func (o entriesByOffset) Len() int { + return len(o) +} + +func (o entriesByOffset) Less(i int, j int) bool { + return o[i].Offset < o[j].Offset +} + +func (o entriesByOffset) Swap(i int, j int) { + o[i], o[j] = o[j], o[i] +} diff --git a/plumbing/format/idxfile/idxfile_test.go b/plumbing/format/idxfile/idxfile_test.go index d15accf..0e0ca2a 100644 --- a/plumbing/format/idxfile/idxfile_test.go +++ b/plumbing/format/idxfile/idxfile_test.go @@ -115,6 +115,21 @@ func (s *IndexSuite) TestFindHash(c *C) { } } +func (s *IndexSuite) TestEntriesByOffset(c *C) { + idx, err := fixtureIndex() + c.Assert(err, IsNil) + + entries, err := idx.EntriesByOffset() + c.Assert(err, IsNil) + + for _, pos := range fixtureOffsets { + e, err := entries.Next() + c.Assert(err, IsNil) + + c.Assert(e.Offset, Equals, uint64(pos)) + } +} + var fixtureHashes = []plumbing.Hash{ plumbing.NewHash("303953e5aa461c203a324821bc1717f9b4fff895"), plumbing.NewHash("5296768e3d9f661387ccbff18c4dea6c997fd78c"), diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go index 6fd3ca5..330cb73 100644 --- a/plumbing/format/packfile/fsobject.go +++ b/plumbing/format/packfile/fsobject.go @@ -47,6 +47,16 @@ func NewFSObject( // Reader implements the plumbing.EncodedObject interface. func (o *FSObject) Reader() (io.ReadCloser, error) { + obj, ok := o.cache.Get(o.hash) + if ok { + reader, err := obj.Reader() + if err != nil { + return nil, err + } + + return reader, nil + } + f, err := o.fs.Open(o.path) if err != nil { return nil, err diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go index 5feb781..852a834 100644 --- a/plumbing/format/packfile/packfile.go +++ b/plumbing/format/packfile/packfile.go @@ -258,6 +258,19 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) { } func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { + ref, err := p.FindHash(offset) + if err == nil { + obj, ok := p.cacheGet(ref) + if ok { + reader, err := obj.Reader() + if err != nil { + return nil, err + } + + return reader, nil + } + } + if _, err := p.s.SeekFromStart(offset); err != nil { return nil, err } @@ -306,6 +319,8 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error { } _, _, err = p.s.NextObject(w) + p.cachePut(obj) + return err } @@ -394,7 +409,7 @@ func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, plumbing.TreeObject, plumbing.CommitObject, plumbing.TagObject: - entries, err := p.Entries() + entries, err := p.EntriesByOffset() if err != nil { return nil, err } diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go index 00ae3f1..e254342 100644 --- a/plumbing/object/commit.go +++ b/plumbing/object/commit.go @@ -199,17 +199,23 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } split := bytes.SplitN(line, []byte{' '}, 2) + + var data []byte + if len(split) == 2 { + data = split[1] + } + switch string(split[0]) { case "tree": - c.TreeHash = plumbing.NewHash(string(split[1])) + c.TreeHash = plumbing.NewHash(string(data)) case "parent": - c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(split[1]))) + c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) case "author": - c.Author.Decode(split[1]) + c.Author.Decode(data) case "committer": - c.Committer.Decode(split[1]) + c.Committer.Decode(data) case headerpgp: - c.PGPSignature += string(split[1]) + "\n" + c.PGPSignature += string(data) + "\n" pgpsig = true } } else { diff --git a/plumbing/object/commit_test.go b/plumbing/object/commit_test.go index b5dfbe3..e72b703 100644 --- a/plumbing/object/commit_test.go +++ b/plumbing/object/commit_test.go @@ -325,6 +325,22 @@ RUysgqjcpT8+iQM1PblGfHR4XAhuOqN5Fx06PSaFZhqvWFezJ28/CLyX5q+oIVk= c.Assert(err, IsNil) c.Assert(decoded.PGPSignature, Equals, pgpsignature) + // signature with extra empty line, it caused "index out of range" when + // parsing it + + pgpsignature2 := "\n" + pgpsignature + + commit.PGPSignature = pgpsignature2 + encoded = &plumbing.MemoryObject{} + decoded = &Commit{} + + err = commit.Encode(encoded) + c.Assert(err, IsNil) + + err = decoded.Decode(encoded) + c.Assert(err, IsNil) + c.Assert(decoded.PGPSignature, Equals, pgpsignature2) + // signature in author name commit.PGPSignature = "" @@ -461,3 +477,21 @@ func (s *SuiteCommit) TestPatchCancel(c *C) { c.Assert(err, ErrorMatches, "operation canceled") } + +func (s *SuiteCommit) TestMalformedHeader(c *C) { + encoded := &plumbing.MemoryObject{} + decoded := &Commit{} + commit := *s.Commit + + commit.PGPSignature = "\n" + commit.Author.Name = "\n" + commit.Author.Email = "\n" + commit.Committer.Name = "\n" + commit.Committer.Email = "\n" + + err := commit.Encode(encoded) + c.Assert(err, IsNil) + + err = decoded.Decode(encoded) + c.Assert(err, IsNil) +} @@ -1,7 +1,10 @@ package git -import "fmt" -import "bytes" +import ( + "bytes" + "fmt" + "path/filepath" +) // Status represents the current status of a Worktree. // The key of the map is the path of the file. @@ -17,6 +20,12 @@ func (s Status) File(path string) *FileStatus { return s[path] } +// IsUntracked checks if file for given path is 'Untracked' +func (s Status) IsUntracked(path string) bool { + stat, ok := (s)[filepath.ToSlash(path)] + return ok && stat.Worktree == Untracked +} + // IsClean returns true if all the files aren't in Unmodified status. func (s Status) IsClean() bool { for _, status := range s { diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index af07eb5..df4f756 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -61,6 +61,10 @@ var ( // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { fs billy.Filesystem + + // incoming object directory information + incomingChecked bool + incomingDirName string } // New returns a DotGit value ready to be used. The path argument must @@ -279,19 +283,80 @@ func (d *DotGit) objectPath(h plumbing.Hash) string { return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) } +// incomingObjectPath is intended to add support for a git pre-receive hook +// to be written it adds support for go-git to find objects in an "incoming" +// directory, so that the library can be used to write a pre-receive hook +// that deals with the incoming objects. +// +// More on git hooks found here : https://git-scm.com/docs/githooks +// More on 'quarantine'/incoming directory here: +// https://git-scm.com/docs/git-receive-pack +func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { + hString := h.String() + + if d.incomingDirName == "" { + return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) + } + + return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) +} + +// hasIncomingObjects searches for an incoming directory and keeps its name +// so it doesn't have to be found each time an object is accessed. +func (d *DotGit) hasIncomingObjects() bool { + if !d.incomingChecked { + directoryContents, err := d.fs.ReadDir(objectsPath) + if err == nil { + for _, file := range directoryContents { + if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { + d.incomingDirName = file.Name() + } + } + } + + d.incomingChecked = true + } + + return d.incomingDirName != "" +} + // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { - return d.fs.Open(d.objectPath(h)) + obj1, err1 := d.fs.Open(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 } // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { - return d.fs.Stat(d.objectPath(h)) + obj1, err1 := d.fs.Stat(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 } // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { - return d.fs.Remove(d.objectPath(h)) + err1 := d.fs.Remove(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + err2 := d.fs.Remove(d.incomingObjectPath(h)) + if err2 != nil { + return err1 + } + return err2 + } + return err1 } func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index 7733eef..64c2aee 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -537,6 +537,57 @@ func (s *SuiteDotGit) TestObject(c *C) { file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), Equals, true, ) + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) + fs.MkdirAll(incomingDirPath, os.FileMode(0755)) + fs.Create(incomingFilePath) + + file, err = dir.Object(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestObjectStat(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + _, err := dir.ObjectStat(hash) + c.Assert(err, IsNil) + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) + fs.MkdirAll(incomingDirPath, os.FileMode(0755)) + fs.Create(incomingFilePath) + + _, err = dir.ObjectStat(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestObjectDelete(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + err := dir.ObjectDelete(hash) + c.Assert(err, IsNil) + + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2]) + incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40]) + + err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755)) + c.Assert(err, IsNil) + + f, err := fs.Create(incomingFilePath) + c.Assert(err, IsNil) + + err = f.Close() + c.Assert(err, IsNil) + + err = dir.ObjectDelete(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectNotFound(c *C) { diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 6958e32..3a3a2bd 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -295,7 +295,14 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - return packfile.NewPackfile(idx, s.dir.Fs(), f).GetByOffset(offset) + var p *packfile.Packfile + if s.deltaBaseCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( @@ -486,7 +493,14 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - iter, err := packfile.NewPackfile(index, fs, f).GetByType(t) + var p *packfile.Packfile + if cache != nil { + p = packfile.NewPackfileWithCache(index, fs, f, cache) + } else { + p = packfile.NewPackfile(index, fs, f) + } + + iter, err := p.GetByType(t) if err != nil { return nil, err } diff --git a/worktree.go b/worktree.go index 99b2cd1..e45d815 100644 --- a/worktree.go +++ b/worktree.go @@ -713,29 +713,54 @@ func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { } // Clean the worktree by removing untracked files. +// An empty dir could be removed - this is what `git clean -f -d .` does. func (w *Worktree) Clean(opts *CleanOptions) error { s, err := w.Status() if err != nil { return err } - // Check Worktree status to be Untracked, obtain absolute path and delete. - for relativePath, status := range s { - // Check if the path contains a directory and if Dir options is false, - // skip the path. - if relativePath != filepath.Base(relativePath) && !opts.Dir { + root := "" + files, err := w.Filesystem.ReadDir(root) + if err != nil { + return err + } + return w.doClean(s, opts, root, files) +} + +func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { + for _, fi := range files { + if fi.Name() == ".git" { continue } - // Remove the file only if it's an untracked file. - if status.Worktree == Untracked { - absPath := filepath.Join(w.Filesystem.Root(), relativePath) - if err := os.Remove(absPath); err != nil { + // relative path under the root + path := filepath.Join(dir, fi.Name()) + if fi.IsDir() { + if !opts.Dir { + continue + } + + subfiles, err := w.Filesystem.ReadDir(path) + if err != nil { + return err + } + err = w.doClean(status, opts, path, subfiles) + if err != nil { return err } + } else { + if status.IsUntracked(path) { + if err := w.Filesystem.Remove(path); err != nil { + return err + } + } } } + if opts.Dir { + return doCleanDirectories(w.Filesystem, dir) + } return nil } @@ -881,15 +906,18 @@ func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { return err } - path := filepath.Dir(name) - files, err := fs.ReadDir(path) + dir := filepath.Dir(name) + return doCleanDirectories(fs, dir) +} + +// doCleanDirectories removes empty subdirs (without files) +func doCleanDirectories(fs billy.Filesystem, dir string) error { + files, err := fs.ReadDir(dir) if err != nil { return err } - if len(files) == 0 { - fs.Remove(path) + return fs.Remove(dir) } - return nil } diff --git a/worktree_test.go b/worktree_test.go index df191b0..c714011 100644 --- a/worktree_test.go +++ b/worktree_test.go @@ -1591,6 +1591,10 @@ func (s *WorktreeSuite) TestClean(c *C) { c.Assert(len(status), Equals, 1) + fi, err := fs.Lstat("pkgA") + c.Assert(err, IsNil) + c.Assert(fi.IsDir(), Equals, true) + // Clean with Dir: true. err = wt.Clean(&CleanOptions{Dir: true}) c.Assert(err, IsNil) @@ -1599,6 +1603,11 @@ func (s *WorktreeSuite) TestClean(c *C) { c.Assert(err, IsNil) c.Assert(len(status), Equals, 0) + + // An empty dir should be deleted, as well. + _, err = fs.Lstat("pkgA") + c.Assert(err, ErrorMatches, ".*(no such file or directory.*|.*file does not exist)*.") + } func (s *WorktreeSuite) TestAlternatesRepo(c *C) { |