diff options
author | Antonio Jesus Navarro Perez <antnavper@gmail.com> | 2018-06-05 18:33:27 +0200 |
---|---|---|
committer | Antonio Jesus Navarro Perez <antnavper@gmail.com> | 2018-06-05 18:34:08 +0200 |
commit | b0d807a1ae0687ef3a01d78c1dc5e55f7217268f (patch) | |
tree | 3a9d6f21cad1af18d940abcc17ddb1337e78146d /storage/filesystem/dotgit | |
parent | 8955f060a3cba36a56ac334576eba4123f6e918a (diff) | |
download | go-git-b0d807a1ae0687ef3a01d78c1dc5e55f7217268f.tar.gz |
dotgit: Move package outside internal.
Signed-off-by: Antonio Jesus Navarro Perez <antnavper@gmail.com>
Diffstat (limited to 'storage/filesystem/dotgit')
-rw-r--r-- | storage/filesystem/dotgit/dotgit.go | 808 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go | 17 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go | 34 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go | 42 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_setref.go | 43 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_setref_norwfs.go | 47 | ||||
-rw-r--r-- | storage/filesystem/dotgit/dotgit_test.go | 683 | ||||
-rw-r--r-- | storage/filesystem/dotgit/writers.go | 282 | ||||
-rw-r--r-- | storage/filesystem/dotgit/writers_test.go | 156 |
9 files changed, 2112 insertions, 0 deletions
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go new file mode 100644 index 0000000..52b621c --- /dev/null +++ b/storage/filesystem/dotgit/dotgit.go @@ -0,0 +1,808 @@ +// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt +package dotgit + +import ( + "bufio" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/ioutil" + + "gopkg.in/src-d/go-billy.v4" +) + +const ( + suffix = ".git" + packedRefsPath = "packed-refs" + configPath = "config" + indexPath = "index" + shallowPath = "shallow" + modulePath = "modules" + objectsPath = "objects" + packPath = "pack" + refsPath = "refs" + + tmpPackedRefsPrefix = "._packed-refs" + + packExt = ".pack" + idxExt = ".idx" +) + +var ( + // ErrNotFound is returned by New when the path is not found. + ErrNotFound = errors.New("path not found") + // ErrIdxNotFound is returned by Idxfile when the idx file is not found + ErrIdxNotFound = errors.New("idx file not found") + // ErrPackfileNotFound is returned by Packfile when the packfile is not found + ErrPackfileNotFound = errors.New("packfile not found") + // ErrConfigNotFound is returned by Config when the config is not found + ErrConfigNotFound = errors.New("config file not found") + // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is + // found in the packed-ref file. This is usually the case for corrupted git + // repositories. + ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") + // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. + ErrPackedRefsBadFormat = errors.New("malformed packed-ref") + // ErrSymRefTargetNotFound is returned when a symbolic reference is + // targeting a non-existing object. This usually means the repository + // is corrupt. + ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") +) + +// The DotGit type represents a local git repository on disk. This +// type is not zero-value-safe, use the New function to initialize it. +type DotGit struct { + fs billy.Filesystem +} + +// New returns a DotGit value ready to be used. The path argument must +// be the absolute path of a git repository directory (e.g. +// "/foo/bar/.git"). +func New(fs billy.Filesystem) *DotGit { + return &DotGit{fs: fs} +} + +// Initialize creates all the folder scaffolding. +func (d *DotGit) Initialize() error { + mustExists := []string{ + d.fs.Join("objects", "info"), + d.fs.Join("objects", "pack"), + d.fs.Join("refs", "heads"), + d.fs.Join("refs", "tags"), + } + + for _, path := range mustExists { + _, err := d.fs.Stat(path) + if err == nil { + continue + } + + if !os.IsNotExist(err) { + return err + } + + if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { + return err + } + } + + return nil +} + +// ConfigWriter returns a file pointer for write to the config file +func (d *DotGit) ConfigWriter() (billy.File, error) { + return d.fs.Create(configPath) +} + +// Config returns a file pointer for read to the config file +func (d *DotGit) Config() (billy.File, error) { + return d.fs.Open(configPath) +} + +// IndexWriter returns a file pointer for write to the index file +func (d *DotGit) IndexWriter() (billy.File, error) { + return d.fs.Create(indexPath) +} + +// Index returns a file pointer for read to the index file +func (d *DotGit) Index() (billy.File, error) { + return d.fs.Open(indexPath) +} + +// ShallowWriter returns a file pointer for write to the shallow file +func (d *DotGit) ShallowWriter() (billy.File, error) { + return d.fs.Create(shallowPath) +} + +// Shallow returns a file pointer for read to the shallow file +func (d *DotGit) Shallow() (billy.File, error) { + f, err := d.fs.Open(shallowPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + return f, nil +} + +// NewObjectPack return a writer for a new packfile, it saves the packfile to +// disk and also generates and save the index for the given packfile. +func (d *DotGit) NewObjectPack() (*PackWriter, error) { + return newPackWrite(d.fs) +} + +// ObjectPacks returns the list of availables packfiles +func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { + packDir := d.fs.Join(objectsPath, packPath) + files, err := d.fs.ReadDir(packDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var packs []plumbing.Hash + for _, f := range files { + if !strings.HasSuffix(f.Name(), packExt) { + continue + } + + n := f.Name() + h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + packs = append(packs, h) + } + + return packs, nil +} + +func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { + return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) +} + +func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { + pack, err := d.fs.Open(d.objectPackPath(hash, extension)) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrPackfileNotFound + } + + return nil, err + } + + return pack, nil +} + +// ObjectPack returns a fs.File of the given packfile +func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { + return d.objectPackOpen(hash, `pack`) +} + +// ObjectPackIdx returns a fs.File of the index file for a given packfile +func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { + return d.objectPackOpen(hash, `idx`) +} + +func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { + path := d.objectPackPath(hash, `pack`) + if !t.IsZero() { + fi, err := d.fs.Stat(path) + if err != nil { + return err + } + // too new, skip deletion. + if !fi.ModTime().Before(t) { + return nil + } + } + err := d.fs.Remove(path) + if err != nil { + return err + } + return d.fs.Remove(d.objectPackPath(hash, `idx`)) +} + +// NewObject return a writer for a new object file. +func (d *DotGit) NewObject() (*ObjectWriter, error) { + return newObjectWriter(d.fs) +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) Objects() ([]plumbing.Hash, error) { + var objects []plumbing.Hash + err := d.ForEachObjectHash(func(hash plumbing.Hash) error { + objects = append(objects, hash) + return nil + }) + if err != nil { + return nil, err + } + return objects, nil +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { + files, err := d.fs.ReadDir(objectsPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { + base := f.Name() + d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) + if err != nil { + return err + } + + for _, o := range d { + h := plumbing.NewHash(base + o.Name()) + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + err = fun(h) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (d *DotGit) objectPath(h plumbing.Hash) string { + hash := h.String() + return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) +} + +// Object returns a fs.File pointing the object file, if exists +func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { + return d.fs.Open(d.objectPath(h)) +} + +// ObjectStat returns a os.FileInfo pointing the object file, if exists +func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { + return d.fs.Stat(d.objectPath(h)) +} + +// ObjectDelete removes the object file, if exists +func (d *DotGit) ObjectDelete(h plumbing.Hash) error { + return d.fs.Remove(d.objectPath(h)) +} + +func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { + b, err := stdioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + line := strings.TrimSpace(string(b)) + return plumbing.NewReferenceFromStrings(name, line), nil +} + +func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { + if old == nil { + return nil + } + ref, err := d.readReferenceFrom(f, old.Name().String()) + if err != nil { + return err + } + if ref.Hash() != old.Hash() { + return fmt.Errorf("reference has changed concurrently") + } + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return err + } + return f.Truncate(0) +} + +func (d *DotGit) SetRef(r, old *plumbing.Reference) error { + var content string + switch r.Type() { + case plumbing.SymbolicReference: + content = fmt.Sprintf("ref: %s\n", r.Target()) + case plumbing.HashReference: + content = fmt.Sprintln(r.Hash().String()) + } + + fileName := r.Name().String() + + return d.setRef(fileName, content, old) +} + +// Refs scans the git directory collecting references, which it returns. +// Symbolic references are resolved and included in the output. +func (d *DotGit) Refs() ([]*plumbing.Reference, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefFromHEAD(&refs); err != nil { + return nil, err + } + + return refs, nil +} + +// Ref returns the reference for a given reference name. +func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, err := d.readReferenceFile(".", name.String()) + if err == nil { + return ref, nil + } + + return d.packedRef(name) +} + +func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { + s := bufio.NewScanner(f) + var refs []*plumbing.Reference + for s.Scan() { + ref, err := d.processLine(s.Text()) + if err != nil { + return nil, err + } + + if ref != nil { + refs = append(refs, ref) + } + } + + return refs, s.Err() +} + +func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { + f, err := d.fs.Open(packedRefsPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + defer ioutil.CheckClose(f, &err) + return d.findPackedRefsInFile(f) +} + +func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { + refs, err := d.findPackedRefs() + if err != nil { + return nil, err + } + + for _, ref := range refs { + if ref.Name() == name { + return ref, nil + } + } + + return nil, plumbing.ErrReferenceNotFound +} + +// RemoveRef removes a reference by name. +func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { + path := d.fs.Join(".", name.String()) + _, err := d.fs.Stat(path) + if err == nil { + err = d.fs.Remove(path) + // Drop down to remove it from the packed refs file, too. + } + + if err != nil && !os.IsNotExist(err) { + return err + } + + return d.rewritePackedRefsWithoutRef(name) +} + +func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefs() + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefsInFile(f) + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( + pr billy.File, err error) { + var f billy.File + defer func() { + if err != nil && f != nil { + ioutil.CheckClose(f, &err) + } + }() + + // File mode is retrieved from a constant defined in the target specific + // files (dotgit_rewrite_packed_refs_*). Some modes are not available + // in all filesystems. + openFlags := openAndLockPackedRefsMode + if doCreate { + openFlags |= os.O_CREATE + } + + // Keep trying to open and lock the file until we're sure the file + // didn't change between the open and the lock. + for { + f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) + if err != nil { + if os.IsNotExist(err) && !doCreate { + return nil, nil + } + + return nil, err + } + fi, err := d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + mtime := fi.ModTime() + + err = f.Lock() + if err != nil { + return nil, err + } + + fi, err = d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + if mtime.Equal(fi.ModTime()) { + break + } + // The file has changed since we opened it. Close and retry. + err = f.Close() + if err != nil { + return nil, err + } + } + return f, nil +} + +func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { + pr, err := d.openAndLockPackedRefs(false) + if err != nil { + return err + } + if pr == nil { + return nil + } + defer ioutil.CheckClose(pr, &err) + + // Creating the temp file in the same directory as the target file + // improves our chances for rename operation to be atomic. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + s := bufio.NewScanner(pr) + found := false + for s.Scan() { + line := s.Text() + ref, err := d.processLine(line) + if err != nil { + return err + } + + if ref != nil && ref.Name() == name { + found = true + continue + } + + if _, err := fmt.Fprintln(tmp, line); err != nil { + return err + } + } + + if err := s.Err(); err != nil { + return err + } + + if !found { + return nil + } + + return d.rewritePackedRefsWhileLocked(tmp, pr) +} + +// process lines from a packed-refs file +func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { + if len(line) == 0 { + return nil, nil + } + + switch line[0] { + case '#': // comment - ignore + return nil, nil + case '^': // annotated tag commit of the previous line - ignore + return nil, nil + default: + ws := strings.Split(line, " ") // hash then ref + if len(ws) != 2 { + return nil, ErrPackedRefsBadFormat + } + + return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil + } +} + +func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { + return d.walkReferencesTree(refs, []string{refsPath}, seen) +} + +func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { + files, err := d.fs.ReadDir(d.fs.Join(relPath...)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + newRelPath := append(append([]string(nil), relPath...), f.Name()) + if f.IsDir() { + if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { + return err + } + + continue + } + + ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) + if err != nil { + return err + } + + if ref != nil && !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + + return nil +} + +func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { + ref, err := d.readReferenceFile(".", "HEAD") + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + *refs = append(*refs, ref) + return nil +} + +func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { + path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) + f, err := d.fs.Open(path) + if err != nil { + return nil, err + } + defer ioutil.CheckClose(f, &err) + + return d.readReferenceFrom(f, name) +} + +func (d *DotGit) CountLooseRefs() (int, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return 0, err + } + + return len(refs), nil +} + +// PackRefs packs all loose refs into the packed-refs file. +// +// This implementation only works under the assumption that the view +// of the file system won't be updated during this operation. This +// strategy would not work on a general file system though, without +// locking each loose reference and checking it again before deleting +// the file, because otherwise an updated reference could sneak in and +// then be deleted by the packed-refs process. Alternatively, every +// ref update could also lock packed-refs, so only one lock is +// required during ref-packing. But that would worsen performance in +// the common case. +// +// TODO: add an "all" boolean like the `git pack-refs --all` flag. +// When `all` is false, it would only pack refs that have already been +// packed, plus all tags. +func (d *DotGit) PackRefs() (err error) { + // Lock packed-refs, and create it if it doesn't exist yet. + f, err := d.openAndLockPackedRefs(true) + if err != nil { + return err + } + defer ioutil.CheckClose(f, &err) + + // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. + var refs []*plumbing.Reference + seen := make(map[plumbing.ReferenceName]bool) + if err = d.addRefsFromRefDir(&refs, seen); err != nil { + return err + } + if len(refs) == 0 { + // Nothing to do! + return nil + } + numLooseRefs := len(refs) + if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { + return err + } + + // Write them all to a new temp packed-refs file. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + w := bufio.NewWriter(tmp) + for _, ref := range refs { + _, err = w.WriteString(ref.String() + "\n") + if err != nil { + return err + } + } + err = w.Flush() + if err != nil { + return err + } + + // Rename the temp packed-refs file. + err = d.rewritePackedRefsWhileLocked(tmp, f) + if err != nil { + return err + } + + // Delete all the loose refs, while still holding the packed-refs + // lock. + for _, ref := range refs[:numLooseRefs] { + path := d.fs.Join(".", ref.Name().String()) + err = d.fs.Remove(path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + + return nil +} + +// Module return a billy.Filesystem pointing to the module folder +func (d *DotGit) Module(name string) (billy.Filesystem, error) { + return d.fs.Chroot(d.fs.Join(modulePath, name)) +} + +// Alternates returns DotGit(s) based off paths in objects/info/alternates if +// available. This can be used to checks if it's a shared repository. +func (d *DotGit) Alternates() ([]*DotGit, error) { + altpath := d.fs.Join("objects", "info", "alternates") + f, err := d.fs.Open(altpath) + if err != nil { + return nil, err + } + defer f.Close() + + var alternates []*DotGit + + // Read alternate paths line-by-line and create DotGit objects. + scanner := bufio.NewScanner(f) + for scanner.Scan() { + path := scanner.Text() + if !filepath.IsAbs(path) { + // For relative paths, we can perform an internal conversion to + // slash so that they work cross-platform. + slashPath := filepath.ToSlash(path) + // If the path is not absolute, it must be relative to object + // database (.git/objects/info). + // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html + // Hence, derive a path relative to DotGit's root. + // "../../../reponame/.git/" -> "../../reponame/.git" + // Remove the first ../ + relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) + normalPath := filepath.FromSlash(relpath) + path = filepath.Join(d.fs.Root(), normalPath) + } + fs := osfs.New(filepath.Dir(path)) + alternates = append(alternates, New(fs)) + } + + if err = scanner.Err(); err != nil { + return nil, err + } + + return alternates, nil +} + +func isHex(s string) bool { + for _, b := range []byte(s) { + if isNum(b) { + continue + } + if isHexAlpha(b) { + continue + } + + return false + } + + return true +} + +func isNum(b byte) bool { + return b >= '0' && b <= '9' +} + +func isHexAlpha(b byte) bool { + return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go new file mode 100644 index 0000000..c760793 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go @@ -0,0 +1,17 @@ +// +build !windows,!norwfs + +package dotgit + +import ( + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDWR + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // On non-Windows platforms, we can have atomic rename. + return d.fs.Rename(tmp.Name(), pr.Name()) +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go new file mode 100644 index 0000000..6e43b42 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go @@ -0,0 +1,34 @@ +// +build norwfs + +package dotgit + +import ( + "io" + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDONLY + +// Instead of renaming that can not be supported in simpler filesystems +// a full copy is done. +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + + prWrite, err := d.fs.Create(pr.Name()) + if err != nil { + return err + } + + defer prWrite.Close() + + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + + _, err = io.Copy(prWrite, tmp) + + return err +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go new file mode 100644 index 0000000..897d2c9 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go @@ -0,0 +1,42 @@ +// +build windows,!norwfs + +package dotgit + +import ( + "io" + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDWR + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // If we aren't using the bare Windows filesystem as the storage + // layer, we might be able to get away with a rename over a locked + // file. + err := d.fs.Rename(tmp.Name(), pr.Name()) + if err == nil { + return nil + } + + // Otherwise, Windows doesn't let us rename over a locked file, so + // we have to do a straight copy. Unfortunately this could result + // in a partially-written file if the process fails before the + // copy completes. + _, err = pr.Seek(0, io.SeekStart) + if err != nil { + return err + } + err = pr.Truncate(0) + if err != nil { + return err + } + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + _, err = io.Copy(pr, tmp) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_setref.go b/storage/filesystem/dotgit/dotgit_setref.go new file mode 100644 index 0000000..d27c1a3 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_setref.go @@ -0,0 +1,43 @@ +// +build !norwfs + +package dotgit + +import ( + "os" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { + // If we are not checking an old ref, just truncate the file. + mode := os.O_RDWR | os.O_CREATE + if old == nil { + mode |= os.O_TRUNC + } + + f, err := d.fs.OpenFile(fileName, mode, 0666) + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + // Lock is unlocked by the deferred Close above. This is because Unlock + // does not imply a fsync and thus there would be a race between + // Unlock+Close and other concurrent writers. Adding Sync to go-billy + // could work, but this is better (and avoids superfluous syncs). + err = f.Lock() + if err != nil { + return err + } + + // this is a no-op to call even when old is nil. + err = d.checkReferenceAndTruncate(f, old) + if err != nil { + return err + } + + _, err = f.Write([]byte(content)) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_setref_norwfs.go b/storage/filesystem/dotgit/dotgit_setref_norwfs.go new file mode 100644 index 0000000..5695bd3 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_setref_norwfs.go @@ -0,0 +1,47 @@ +// +build norwfs + +package dotgit + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +// There are some filesystems that don't support opening files in RDWD mode. +// In these filesystems the standard SetRef function can not be used as i +// reads the reference file to check that it's not modified before updating it. +// +// This version of the function writes the reference without extra checks +// making it compatible with these simple filesystems. This is usually not +// a problem as they should be accessed by only one process at a time. +func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error { + _, err := d.fs.Stat(fileName) + if err == nil && old != nil { + fRead, err := d.fs.Open(fileName) + if err != nil { + return err + } + + ref, err := d.readReferenceFrom(fRead, old.Name().String()) + fRead.Close() + + if err != nil { + return err + } + + if ref.Hash() != old.Hash() { + return fmt.Errorf("reference has changed concurrently") + } + } + + f, err := d.fs.Create(fileName) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.Write([]byte(content)) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go new file mode 100644 index 0000000..7733eef --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -0,0 +1,683 @@ +package dotgit + +import ( + "bufio" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "gopkg.in/src-d/go-git.v4/plumbing" + + . "gopkg.in/check.v1" + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git-fixtures.v3" +) + +func Test(t *testing.T) { TestingT(t) } + +type SuiteDotGit struct { + fixtures.Suite +} + +var _ = Suite(&SuiteDotGit{}) + +func (s *SuiteDotGit) TestInitialize(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + err = dir.Initialize() + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("objects", "info")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("objects", "pack")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("refs", "heads")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("refs", "tags")) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestSetRefs(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + firstFoo := plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ) + err = dir.SetRef(firstFoo, nil) + + c.Assert(err, IsNil) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/symbolic", + "ref: refs/heads/foo", + ), nil) + + c.Assert(err, IsNil) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "bar", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + + ref := findReference(refs, "refs/heads/foo") + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + ref = findReference(refs, "refs/heads/symbolic") + c.Assert(ref, NotNil) + c.Assert(ref.Target().String(), Equals, "refs/heads/foo") + + ref = findReference(refs, "bar") + c.Assert(ref, IsNil) + + ref, err = dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + ref, err = dir.Ref("refs/heads/symbolic") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Target().String(), Equals, "refs/heads/foo") + + ref, err = dir.Ref("bar") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + // Check that SetRef with a non-nil `old` works. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", + ), firstFoo) + c.Assert(err, IsNil) + + // `firstFoo` is no longer the right `old` reference, so this + // should fail. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", + ), firstFoo) + c.Assert(err, NotNil) +} + +func (s *SuiteDotGit) TestRefsFromPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "refs/remotes/origin/branch") + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + +} + +func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "refs/remotes/origin/HEAD") + c.Assert(ref, NotNil) + c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) + c.Assert(string(ref.Target()), Equals, "refs/remotes/origin/master") + +} + +func BenchmarkRefMultipleTimes(b *testing.B) { + fixtures.Init() + fs := fixtures.Basic().ByTag(".git").One().DotGit() + refname := plumbing.ReferenceName("refs/remotes/origin/branch") + + dir := New(fs) + _, err := dir.Ref(refname) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + + for i := 0; i < b.N; i++ { + _, err := dir.Ref(refname) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + } +} + +func (s *SuiteDotGit) TestRemoveRefFromReferenceFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + name := plumbing.ReferenceName("refs/remotes/origin/HEAD") + err := dir.RemoveRef(name) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, string(name)) + c.Assert(ref, IsNil) +} + +func (s *SuiteDotGit) TestRemoveRefFromPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + name := plumbing.ReferenceName("refs/remotes/origin/master") + err := dir.RemoveRef(name) + c.Assert(err, IsNil) + + b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(string(b), Equals, ""+ + "# pack-refs with: peeled fully-peeled \n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ + "e8d3ffab552895c19b9fcf7aa264d277cde33881 refs/remotes/origin/branch\n") +} + +func (s *SuiteDotGit) TestRemoveRefFromReferenceFileAndPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + // Make a ref file for a ref that's already in `packed-refs`. + err := dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/remotes/origin/branch", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + + // Make sure it only appears once in the refs list. + refs, err := dir.Refs() + c.Assert(err, IsNil) + found := false + for _, ref := range refs { + if ref.Name() == "refs/remotes/origin/branch" { + c.Assert(found, Equals, false) + found = true + } + } + + name := plumbing.ReferenceName("refs/remotes/origin/branch") + err = dir.RemoveRef(name) + c.Assert(err, IsNil) + + b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(string(b), Equals, ""+ + "# pack-refs with: peeled fully-peeled \n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/remotes/origin/master\n") + + refs, err = dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, string(name)) + c.Assert(ref, IsNil) +} + +func (s *SuiteDotGit) TestRemoveRefNonExistent(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + before, err := ioutil.ReadFile(packedRefs) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, IsNil) + + after, err := ioutil.ReadFile(packedRefs) + c.Assert(err, IsNil) + + c.Assert(string(before), Equals, string(after)) +} + +func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + brokenContent := "BROKEN STUFF REALLY BROKEN" + + err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, NotNil) + + after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(brokenContent, Equals, string(after)) +} + +func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs2(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + brokenContent := strings.Repeat("a", bufio.MaxScanTokenSize*2) + + err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, NotNil) + + after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(brokenContent, Equals, string(after)) +} + +func (s *SuiteDotGit) TestRefsFromHEADFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "HEAD") + c.Assert(ref, NotNil) + c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) + c.Assert(string(ref.Target()), Equals, "refs/heads/master") +} + +func (s *SuiteDotGit) TestConfig(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + file, err := dir.Config() + c.Assert(err, IsNil) + c.Assert(filepath.Base(file.Name()), Equals, "config") +} + +func (s *SuiteDotGit) TestConfigWriteAndConfig(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.ConfigWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Config() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func (s *SuiteDotGit) TestIndex(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + idx, err := dir.Index() + c.Assert(err, IsNil) + c.Assert(idx, NotNil) +} + +func (s *SuiteDotGit) TestIndexWriteAndIndex(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.IndexWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Index() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func (s *SuiteDotGit) TestShallow(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + file, err := dir.Shallow() + c.Assert(err, IsNil) + c.Assert(file, IsNil) +} + +func (s *SuiteDotGit) TestShallowWriteAndShallow(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.ShallowWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Shallow() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func findReference(refs []*plumbing.Reference, name string) *plumbing.Reference { + n := plumbing.ReferenceName(name) + for _, ref := range refs { + if ref.Name() == n { + return ref + } + } + + return nil +} + +func (s *SuiteDotGit) TestObjectPacks(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + hashes, err := dir.ObjectPacks() + c.Assert(err, IsNil) + c.Assert(hashes, HasLen, 1) + c.Assert(hashes[0], Equals, f.PackfileHash) + + // Make sure that a random file in the pack directory doesn't + // break everything. + badFile, err := fs.Create("objects/pack/OOPS_THIS_IS_NOT_RIGHT.pack") + c.Assert(err, IsNil) + err = badFile.Close() + c.Assert(err, IsNil) + + hashes2, err := dir.ObjectPacks() + c.Assert(err, IsNil) + c.Assert(hashes2, HasLen, 1) + c.Assert(hashes[0], Equals, hashes2[0]) +} + +func (s *SuiteDotGit) TestObjectPack(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + pack, err := dir.ObjectPack(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") +} + +func (s *SuiteDotGit) TestObjectPackIdx(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + idx, err := dir.ObjectPackIdx(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(filepath.Ext(idx.Name()), Equals, ".idx") + c.Assert(idx.Close(), IsNil) +} + +func (s *SuiteDotGit) TestObjectPackNotFound(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + pack, err := dir.ObjectPack(plumbing.ZeroHash) + c.Assert(err, Equals, ErrPackfileNotFound) + c.Assert(pack, IsNil) + + idx, err := dir.ObjectPackIdx(plumbing.ZeroHash) + c.Assert(err, Equals, ErrPackfileNotFound) + c.Assert(idx, IsNil) +} + +func (s *SuiteDotGit) TestNewObject(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + w, err := dir.NewObject() + c.Assert(err, IsNil) + + err = w.WriteHeader(plumbing.BlobObject, 14) + c.Assert(err, IsNil) + n, err := w.Write([]byte("this is a test")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 14) + + c.Assert(w.Hash().String(), Equals, "a8a940627d132695a9769df883f85992f0ff4a43") + + err = w.Close() + c.Assert(err, IsNil) + + i, err := fs.Stat("objects/a8/a940627d132695a9769df883f85992f0ff4a43") + c.Assert(err, IsNil) + c.Assert(i.Size(), Equals, int64(34)) +} + +func (s *SuiteDotGit) TestObjects(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hashes, err := dir.Objects() + c.Assert(err, IsNil) + c.Assert(hashes, HasLen, 187) + c.Assert(hashes[0].String(), Equals, "0097821d427a3c3385898eb13b50dcbc8702b8a3") + c.Assert(hashes[1].String(), Equals, "01d5fa556c33743006de7e76e67a2dfcd994ca04") + c.Assert(hashes[2].String(), Equals, "03db8e1fbe133a480f2867aac478fd866686d69e") +} + +func (s *SuiteDotGit) TestObjectsNoFolder(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + hash, err := dir.Objects() + c.Assert(err, IsNil) + c.Assert(hash, HasLen, 0) +} + +func (s *SuiteDotGit) TestObject(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + file, err := dir.Object(hash) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix( + file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), + Equals, true, + ) +} + +func (s *SuiteDotGit) TestObjectNotFound(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("not-found-object") + file, err := dir.Object(hash) + c.Assert(err, NotNil) + c.Assert(file, IsNil) +} + +func (s *SuiteDotGit) TestSubmodules(c *C) { + fs := fixtures.ByTag("submodule").One().DotGit() + dir := New(fs) + + m, err := dir.Module("basic") + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(m.Root(), m.Join(".git", "modules", "basic")), Equals, true) +} + +func (s *SuiteDotGit) TestPackRefs(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/bar", + "a8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err := dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 2) + + err = dir.PackRefs() + c.Assert(err, IsNil) + + // Make sure the refs are still there, but no longer loose. + refs, err = dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 0) + + ref, err := dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + ref, err = dir.Ref("refs/heads/bar") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "a8d3ffab552895c19b9fcf7aa264d277cde33881") + + // Now update one of them, re-pack, and check again. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "b8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 1) + err = dir.PackRefs() + c.Assert(err, IsNil) + + // Make sure the refs are still there, but no longer loose. + refs, err = dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 0) + + ref, err = dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "b8d3ffab552895c19b9fcf7aa264d277cde33881") +} + +func (s *SuiteDotGit) TestAlternates(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + // Create a new billy fs. + fs := osfs.New(tmp) + + // Create a new dotgit object and initialize. + dir := New(fs) + err = dir.Initialize() + c.Assert(err, IsNil) + + // Create alternates file. + altpath := filepath.Join("objects", "info", "alternates") + f, err := fs.Create(altpath) + c.Assert(err, IsNil) + + // Multiple alternates. + var strContent string + if runtime.GOOS == "windows" { + strContent = "C:\\Users\\username\\repo1\\.git\\objects\r\n..\\..\\..\\rep2\\.git\\objects" + } else { + strContent = "/Users/username/rep1//.git/objects\n../../../rep2//.git/objects" + } + content := []byte(strContent) + f.Write(content) + f.Close() + + dotgits, err := dir.Alternates() + c.Assert(err, IsNil) + if runtime.GOOS == "windows" { + c.Assert(dotgits[0].fs.Root(), Equals, "C:\\Users\\username\\repo1\\.git") + } else { + c.Assert(dotgits[0].fs.Root(), Equals, "/Users/username/rep1/.git") + } + + // For relative path: + // /some/absolute/path/to/dot-git -> /some/absolute/path + pathx := strings.Split(tmp, string(filepath.Separator)) + pathx = pathx[:len(pathx)-2] + // Use string.Join() to avoid malformed absolutepath on windows + // C:Users\\User\\... instead of C:\\Users\\appveyor\\... . + resolvedPath := strings.Join(pathx, string(filepath.Separator)) + // Append the alternate path to the resolvedPath + expectedPath := filepath.Join(string(filepath.Separator), resolvedPath, "rep2", ".git") + if runtime.GOOS == "windows" { + expectedPath = filepath.Join(resolvedPath, "rep2", ".git") + } + c.Assert(dotgits[1].fs.Root(), Equals, expectedPath) +} diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go new file mode 100644 index 0000000..c2b420f --- /dev/null +++ b/storage/filesystem/dotgit/writers.go @@ -0,0 +1,282 @@ +package dotgit + +import ( + "fmt" + "io" + "sync/atomic" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + + "gopkg.in/src-d/go-billy.v4" +) + +// PackWriter is a io.Writer that generates the packfile index simultaneously, +// a packfile.Decoder is used with a file reader to read the file being written +// this operation is synchronized with the write operations. +// The packfile is written in a temp file, when Close is called this file +// is renamed/moved (depends on the Filesystem implementation) to the final +// location, if the PackWriter is not used, nothing is written +type PackWriter struct { + Notify func(plumbing.Hash, *packfile.Index) + + fs billy.Filesystem + fr, fw billy.File + synced *syncedReader + checksum plumbing.Hash + index *packfile.Index + result chan error +} + +func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { + fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") + if err != nil { + return nil, err + } + + fr, err := fs.Open(fw.Name()) + if err != nil { + return nil, err + } + + writer := &PackWriter{ + fs: fs, + fw: fw, + fr: fr, + synced: newSyncedReader(fw, fr), + result: make(chan error), + } + + go writer.buildIndex() + return writer, nil +} + +func (w *PackWriter) buildIndex() { + s := packfile.NewScanner(w.synced) + d, err := packfile.NewDecoder(s, nil) + if err != nil { + w.result <- err + return + } + + checksum, err := d.Decode() + if err != nil { + w.result <- err + return + } + + w.checksum = checksum + w.index = d.Index() + w.result <- err +} + +// waitBuildIndex waits until buildIndex function finishes, this can terminate +// with a packfile.ErrEmptyPackfile, this means that nothing was written so we +// ignore the error +func (w *PackWriter) waitBuildIndex() error { + err := <-w.result + if err == packfile.ErrEmptyPackfile { + return nil + } + + return err +} + +func (w *PackWriter) Write(p []byte) (int, error) { + return w.synced.Write(p) +} + +// Close closes all the file descriptors and save the final packfile, if nothing +// was written, the tempfiles are deleted without writing a packfile. +func (w *PackWriter) Close() error { + defer func() { + if w.Notify != nil && w.index != nil && w.index.Size() > 0 { + w.Notify(w.checksum, w.index) + } + + close(w.result) + }() + + if err := w.synced.Close(); err != nil { + return err + } + + if err := w.waitBuildIndex(); err != nil { + return err + } + + if err := w.fr.Close(); err != nil { + return err + } + + if err := w.fw.Close(); err != nil { + return err + } + + if w.index == nil || w.index.Size() == 0 { + return w.clean() + } + + return w.save() +} + +func (w *PackWriter) clean() error { + return w.fs.Remove(w.fw.Name()) +} + +func (w *PackWriter) save() error { + base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) + idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) + if err != nil { + return err + } + + if err := w.encodeIdx(idx); err != nil { + return err + } + + if err := idx.Close(); err != nil { + return err + } + + return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) +} + +func (w *PackWriter) encodeIdx(writer io.Writer) error { + idx := w.index.ToIdxFile() + idx.PackfileChecksum = w.checksum + idx.Version = idxfile.VersionSupported + e := idxfile.NewEncoder(writer) + _, err := e.Encode(idx) + return err +} + +type syncedReader struct { + w io.Writer + r io.ReadSeeker + + blocked, done uint32 + written, read uint64 + news chan bool +} + +func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { + return &syncedReader{ + w: w, + r: r, + news: make(chan bool), + } +} + +func (s *syncedReader) Write(p []byte) (n int, err error) { + defer func() { + written := atomic.AddUint64(&s.written, uint64(n)) + read := atomic.LoadUint64(&s.read) + if written > read { + s.wake() + } + }() + + n, err = s.w.Write(p) + return +} + +func (s *syncedReader) Read(p []byte) (n int, err error) { + defer func() { atomic.AddUint64(&s.read, uint64(n)) }() + + for { + s.sleep() + n, err = s.r.Read(p) + if err == io.EOF && !s.isDone() && n == 0 { + continue + } + + break + } + + return +} + +func (s *syncedReader) isDone() bool { + return atomic.LoadUint32(&s.done) == 1 +} + +func (s *syncedReader) isBlocked() bool { + return atomic.LoadUint32(&s.blocked) == 1 +} + +func (s *syncedReader) wake() { + if s.isBlocked() { + // fmt.Println("wake") + atomic.StoreUint32(&s.blocked, 0) + s.news <- true + } +} + +func (s *syncedReader) sleep() { + read := atomic.LoadUint64(&s.read) + written := atomic.LoadUint64(&s.written) + if read >= written { + atomic.StoreUint32(&s.blocked, 1) + // fmt.Println("sleep", read, written) + <-s.news + } + +} + +func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + return s.r.Seek(offset, whence) + } + + p, err := s.r.Seek(offset, whence) + atomic.StoreUint64(&s.read, uint64(p)) + + return p, err +} + +func (s *syncedReader) Close() error { + atomic.StoreUint32(&s.done, 1) + close(s.news) + return nil +} + +type ObjectWriter struct { + objfile.Writer + fs billy.Filesystem + f billy.File +} + +func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { + f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") + if err != nil { + return nil, err + } + + return &ObjectWriter{ + Writer: (*objfile.NewWriter(f)), + fs: fs, + f: f, + }, nil +} + +func (w *ObjectWriter) Close() error { + if err := w.Writer.Close(); err != nil { + return err + } + + if err := w.f.Close(); err != nil { + return err + } + + return w.save() +} + +func (w *ObjectWriter) save() error { + hash := w.Hash().String() + file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) + + return w.fs.Rename(w.f.Name(), file) +} diff --git a/storage/filesystem/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go new file mode 100644 index 0000000..bf00762 --- /dev/null +++ b/storage/filesystem/dotgit/writers_test.go @@ -0,0 +1,156 @@ +package dotgit + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strconv" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + + . "gopkg.in/check.v1" + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git-fixtures.v3" +) + +func (s *SuiteDotGit) TestNewObjectPack(c *C) { + f := fixtures.Basic().One() + + dir, err := ioutil.TempDir("", "example") + if err != nil { + log.Fatal(err) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + dot := New(fs) + + w, err := dot.NewObjectPack() + c.Assert(err, IsNil) + + _, err = io.Copy(w, f.Packfile()) + c.Assert(err, IsNil) + + c.Assert(w.Close(), IsNil) + + pfPath := fmt.Sprintf("objects/pack/pack-%s.pack", f.PackfileHash) + idxPath := fmt.Sprintf("objects/pack/pack-%s.idx", f.PackfileHash) + + stat, err := fs.Stat(pfPath) + c.Assert(err, IsNil) + c.Assert(stat.Size(), Equals, int64(84794)) + + stat, err = fs.Stat(idxPath) + c.Assert(err, IsNil) + c.Assert(stat.Size(), Equals, int64(1940)) + + pf, err := fs.Open(pfPath) + c.Assert(err, IsNil) + pfs := packfile.NewScanner(pf) + _, objects, err := pfs.Header() + c.Assert(err, IsNil) + for i := uint32(0); i < objects; i++ { + _, err := pfs.NextObjectHeader() + if err != nil { + c.Assert(err, IsNil) + break + } + } + c.Assert(pfs.Close(), IsNil) +} + +func (s *SuiteDotGit) TestNewObjectPackUnused(c *C) { + dir, err := ioutil.TempDir("", "example") + if err != nil { + log.Fatal(err) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + dot := New(fs) + + w, err := dot.NewObjectPack() + c.Assert(err, IsNil) + + c.Assert(w.Close(), IsNil) + + info, err := fs.ReadDir("objects/pack") + c.Assert(err, IsNil) + c.Assert(info, HasLen, 0) + + // check clean up of temporary files + info, err = fs.ReadDir("") + c.Assert(err, IsNil) + for _, fi := range info { + c.Assert(fi.IsDir(), Equals, true) + } +} + +func (s *SuiteDotGit) TestSyncedReader(c *C) { + tmpw, err := ioutil.TempFile("", "example") + c.Assert(err, IsNil) + + tmpr, err := os.Open(tmpw.Name()) + c.Assert(err, IsNil) + + defer func() { + tmpw.Close() + tmpr.Close() + os.Remove(tmpw.Name()) + }() + + synced := newSyncedReader(tmpw, tmpr) + + go func() { + for i := 0; i < 281; i++ { + _, err := synced.Write([]byte(strconv.Itoa(i) + "\n")) + c.Assert(err, IsNil) + } + + synced.Close() + }() + + o, err := synced.Seek(1002, io.SeekStart) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(1002)) + + head := make([]byte, 3) + n, err := io.ReadFull(synced, head) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(string(head), Equals, "278") + + o, err = synced.Seek(1010, io.SeekStart) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(1010)) + + n, err = io.ReadFull(synced, head) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(string(head), Equals, "280") +} + +func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) { + dir, err := ioutil.TempDir("", "example") + if err != nil { + c.Assert(err, IsNil) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + + w, err := newPackWrite(fs) + c.Assert(err, IsNil) + + w.Notify = func(h plumbing.Hash, idx *packfile.Index) { + c.Fatal("unexpected call to PackWriter.Notify") + } + + c.Assert(w.Close(), IsNil) +} |