From b0d807a1ae0687ef3a01d78c1dc5e55f7217268f Mon Sep 17 00:00:00 2001 From: Antonio Jesus Navarro Perez Date: Tue, 5 Jun 2018 18:33:27 +0200 Subject: dotgit: Move package outside internal. Signed-off-by: Antonio Jesus Navarro Perez --- storage/filesystem/config.go | 2 +- storage/filesystem/config_test.go | 2 +- storage/filesystem/dotgit/dotgit.go | 808 +++++++++++++++++++++ .../dotgit/dotgit_rewrite_packed_refs_nix.go | 17 + .../dotgit/dotgit_rewrite_packed_refs_norwfs.go | 34 + .../dotgit/dotgit_rewrite_packed_refs_windows.go | 42 ++ storage/filesystem/dotgit/dotgit_setref.go | 43 ++ storage/filesystem/dotgit/dotgit_setref_norwfs.go | 47 ++ storage/filesystem/dotgit/dotgit_test.go | 683 +++++++++++++++++ storage/filesystem/dotgit/writers.go | 282 +++++++ storage/filesystem/dotgit/writers_test.go | 156 ++++ storage/filesystem/index.go | 2 +- storage/filesystem/internal/dotgit/dotgit.go | 808 --------------------- .../dotgit/dotgit_rewrite_packed_refs_nix.go | 17 - .../dotgit/dotgit_rewrite_packed_refs_norwfs.go | 34 - .../dotgit/dotgit_rewrite_packed_refs_windows.go | 42 -- .../filesystem/internal/dotgit/dotgit_setref.go | 43 -- .../internal/dotgit/dotgit_setref_norwfs.go | 47 -- storage/filesystem/internal/dotgit/dotgit_test.go | 683 ----------------- storage/filesystem/internal/dotgit/writers.go | 282 ------- storage/filesystem/internal/dotgit/writers_test.go | 156 ---- storage/filesystem/module.go | 2 +- storage/filesystem/object.go | 2 +- storage/filesystem/object_test.go | 2 +- storage/filesystem/reference.go | 2 +- storage/filesystem/shallow.go | 2 +- storage/filesystem/storage.go | 2 +- 27 files changed, 2121 insertions(+), 2121 deletions(-) create mode 100644 storage/filesystem/dotgit/dotgit.go create mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go create mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go create mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go create mode 100644 storage/filesystem/dotgit/dotgit_setref.go create mode 100644 storage/filesystem/dotgit/dotgit_setref_norwfs.go create mode 100644 storage/filesystem/dotgit/dotgit_test.go create mode 100644 storage/filesystem/dotgit/writers.go create mode 100644 storage/filesystem/dotgit/writers_test.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_setref.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go delete mode 100644 storage/filesystem/internal/dotgit/dotgit_test.go delete mode 100644 storage/filesystem/internal/dotgit/writers.go delete mode 100644 storage/filesystem/internal/dotgit/writers_test.go (limited to 'storage') diff --git a/storage/filesystem/config.go b/storage/filesystem/config.go index 85feaf0..be812e4 100644 --- a/storage/filesystem/config.go +++ b/storage/filesystem/config.go @@ -5,7 +5,7 @@ import ( "os" "gopkg.in/src-d/go-git.v4/config" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) diff --git a/storage/filesystem/config_test.go b/storage/filesystem/config_test.go index cc03119..71c947d 100644 --- a/storage/filesystem/config_test.go +++ b/storage/filesystem/config_test.go @@ -5,7 +5,7 @@ import ( "os" "gopkg.in/src-d/go-git.v4/config" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go new file mode 100644 index 0000000..52b621c --- /dev/null +++ b/storage/filesystem/dotgit/dotgit.go @@ -0,0 +1,808 @@ +// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt +package dotgit + +import ( + "bufio" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/ioutil" + + "gopkg.in/src-d/go-billy.v4" +) + +const ( + suffix = ".git" + packedRefsPath = "packed-refs" + configPath = "config" + indexPath = "index" + shallowPath = "shallow" + modulePath = "modules" + objectsPath = "objects" + packPath = "pack" + refsPath = "refs" + + tmpPackedRefsPrefix = "._packed-refs" + + packExt = ".pack" + idxExt = ".idx" +) + +var ( + // ErrNotFound is returned by New when the path is not found. + ErrNotFound = errors.New("path not found") + // ErrIdxNotFound is returned by Idxfile when the idx file is not found + ErrIdxNotFound = errors.New("idx file not found") + // ErrPackfileNotFound is returned by Packfile when the packfile is not found + ErrPackfileNotFound = errors.New("packfile not found") + // ErrConfigNotFound is returned by Config when the config is not found + ErrConfigNotFound = errors.New("config file not found") + // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is + // found in the packed-ref file. This is usually the case for corrupted git + // repositories. + ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") + // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. + ErrPackedRefsBadFormat = errors.New("malformed packed-ref") + // ErrSymRefTargetNotFound is returned when a symbolic reference is + // targeting a non-existing object. This usually means the repository + // is corrupt. + ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") +) + +// The DotGit type represents a local git repository on disk. This +// type is not zero-value-safe, use the New function to initialize it. +type DotGit struct { + fs billy.Filesystem +} + +// New returns a DotGit value ready to be used. The path argument must +// be the absolute path of a git repository directory (e.g. +// "/foo/bar/.git"). +func New(fs billy.Filesystem) *DotGit { + return &DotGit{fs: fs} +} + +// Initialize creates all the folder scaffolding. +func (d *DotGit) Initialize() error { + mustExists := []string{ + d.fs.Join("objects", "info"), + d.fs.Join("objects", "pack"), + d.fs.Join("refs", "heads"), + d.fs.Join("refs", "tags"), + } + + for _, path := range mustExists { + _, err := d.fs.Stat(path) + if err == nil { + continue + } + + if !os.IsNotExist(err) { + return err + } + + if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { + return err + } + } + + return nil +} + +// ConfigWriter returns a file pointer for write to the config file +func (d *DotGit) ConfigWriter() (billy.File, error) { + return d.fs.Create(configPath) +} + +// Config returns a file pointer for read to the config file +func (d *DotGit) Config() (billy.File, error) { + return d.fs.Open(configPath) +} + +// IndexWriter returns a file pointer for write to the index file +func (d *DotGit) IndexWriter() (billy.File, error) { + return d.fs.Create(indexPath) +} + +// Index returns a file pointer for read to the index file +func (d *DotGit) Index() (billy.File, error) { + return d.fs.Open(indexPath) +} + +// ShallowWriter returns a file pointer for write to the shallow file +func (d *DotGit) ShallowWriter() (billy.File, error) { + return d.fs.Create(shallowPath) +} + +// Shallow returns a file pointer for read to the shallow file +func (d *DotGit) Shallow() (billy.File, error) { + f, err := d.fs.Open(shallowPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + return f, nil +} + +// NewObjectPack return a writer for a new packfile, it saves the packfile to +// disk and also generates and save the index for the given packfile. +func (d *DotGit) NewObjectPack() (*PackWriter, error) { + return newPackWrite(d.fs) +} + +// ObjectPacks returns the list of availables packfiles +func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { + packDir := d.fs.Join(objectsPath, packPath) + files, err := d.fs.ReadDir(packDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var packs []plumbing.Hash + for _, f := range files { + if !strings.HasSuffix(f.Name(), packExt) { + continue + } + + n := f.Name() + h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + packs = append(packs, h) + } + + return packs, nil +} + +func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { + return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) +} + +func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { + pack, err := d.fs.Open(d.objectPackPath(hash, extension)) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrPackfileNotFound + } + + return nil, err + } + + return pack, nil +} + +// ObjectPack returns a fs.File of the given packfile +func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { + return d.objectPackOpen(hash, `pack`) +} + +// ObjectPackIdx returns a fs.File of the index file for a given packfile +func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { + return d.objectPackOpen(hash, `idx`) +} + +func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { + path := d.objectPackPath(hash, `pack`) + if !t.IsZero() { + fi, err := d.fs.Stat(path) + if err != nil { + return err + } + // too new, skip deletion. + if !fi.ModTime().Before(t) { + return nil + } + } + err := d.fs.Remove(path) + if err != nil { + return err + } + return d.fs.Remove(d.objectPackPath(hash, `idx`)) +} + +// NewObject return a writer for a new object file. +func (d *DotGit) NewObject() (*ObjectWriter, error) { + return newObjectWriter(d.fs) +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) Objects() ([]plumbing.Hash, error) { + var objects []plumbing.Hash + err := d.ForEachObjectHash(func(hash plumbing.Hash) error { + objects = append(objects, hash) + return nil + }) + if err != nil { + return nil, err + } + return objects, nil +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { + files, err := d.fs.ReadDir(objectsPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { + base := f.Name() + d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) + if err != nil { + return err + } + + for _, o := range d { + h := plumbing.NewHash(base + o.Name()) + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + err = fun(h) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (d *DotGit) objectPath(h plumbing.Hash) string { + hash := h.String() + return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) +} + +// Object returns a fs.File pointing the object file, if exists +func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { + return d.fs.Open(d.objectPath(h)) +} + +// ObjectStat returns a os.FileInfo pointing the object file, if exists +func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { + return d.fs.Stat(d.objectPath(h)) +} + +// ObjectDelete removes the object file, if exists +func (d *DotGit) ObjectDelete(h plumbing.Hash) error { + return d.fs.Remove(d.objectPath(h)) +} + +func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { + b, err := stdioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + line := strings.TrimSpace(string(b)) + return plumbing.NewReferenceFromStrings(name, line), nil +} + +func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { + if old == nil { + return nil + } + ref, err := d.readReferenceFrom(f, old.Name().String()) + if err != nil { + return err + } + if ref.Hash() != old.Hash() { + return fmt.Errorf("reference has changed concurrently") + } + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return err + } + return f.Truncate(0) +} + +func (d *DotGit) SetRef(r, old *plumbing.Reference) error { + var content string + switch r.Type() { + case plumbing.SymbolicReference: + content = fmt.Sprintf("ref: %s\n", r.Target()) + case plumbing.HashReference: + content = fmt.Sprintln(r.Hash().String()) + } + + fileName := r.Name().String() + + return d.setRef(fileName, content, old) +} + +// Refs scans the git directory collecting references, which it returns. +// Symbolic references are resolved and included in the output. +func (d *DotGit) Refs() ([]*plumbing.Reference, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefFromHEAD(&refs); err != nil { + return nil, err + } + + return refs, nil +} + +// Ref returns the reference for a given reference name. +func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, err := d.readReferenceFile(".", name.String()) + if err == nil { + return ref, nil + } + + return d.packedRef(name) +} + +func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { + s := bufio.NewScanner(f) + var refs []*plumbing.Reference + for s.Scan() { + ref, err := d.processLine(s.Text()) + if err != nil { + return nil, err + } + + if ref != nil { + refs = append(refs, ref) + } + } + + return refs, s.Err() +} + +func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { + f, err := d.fs.Open(packedRefsPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + defer ioutil.CheckClose(f, &err) + return d.findPackedRefsInFile(f) +} + +func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { + refs, err := d.findPackedRefs() + if err != nil { + return nil, err + } + + for _, ref := range refs { + if ref.Name() == name { + return ref, nil + } + } + + return nil, plumbing.ErrReferenceNotFound +} + +// RemoveRef removes a reference by name. +func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { + path := d.fs.Join(".", name.String()) + _, err := d.fs.Stat(path) + if err == nil { + err = d.fs.Remove(path) + // Drop down to remove it from the packed refs file, too. + } + + if err != nil && !os.IsNotExist(err) { + return err + } + + return d.rewritePackedRefsWithoutRef(name) +} + +func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefs() + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefsInFile(f) + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( + pr billy.File, err error) { + var f billy.File + defer func() { + if err != nil && f != nil { + ioutil.CheckClose(f, &err) + } + }() + + // File mode is retrieved from a constant defined in the target specific + // files (dotgit_rewrite_packed_refs_*). Some modes are not available + // in all filesystems. + openFlags := openAndLockPackedRefsMode + if doCreate { + openFlags |= os.O_CREATE + } + + // Keep trying to open and lock the file until we're sure the file + // didn't change between the open and the lock. + for { + f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) + if err != nil { + if os.IsNotExist(err) && !doCreate { + return nil, nil + } + + return nil, err + } + fi, err := d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + mtime := fi.ModTime() + + err = f.Lock() + if err != nil { + return nil, err + } + + fi, err = d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + if mtime.Equal(fi.ModTime()) { + break + } + // The file has changed since we opened it. Close and retry. + err = f.Close() + if err != nil { + return nil, err + } + } + return f, nil +} + +func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { + pr, err := d.openAndLockPackedRefs(false) + if err != nil { + return err + } + if pr == nil { + return nil + } + defer ioutil.CheckClose(pr, &err) + + // Creating the temp file in the same directory as the target file + // improves our chances for rename operation to be atomic. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + s := bufio.NewScanner(pr) + found := false + for s.Scan() { + line := s.Text() + ref, err := d.processLine(line) + if err != nil { + return err + } + + if ref != nil && ref.Name() == name { + found = true + continue + } + + if _, err := fmt.Fprintln(tmp, line); err != nil { + return err + } + } + + if err := s.Err(); err != nil { + return err + } + + if !found { + return nil + } + + return d.rewritePackedRefsWhileLocked(tmp, pr) +} + +// process lines from a packed-refs file +func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { + if len(line) == 0 { + return nil, nil + } + + switch line[0] { + case '#': // comment - ignore + return nil, nil + case '^': // annotated tag commit of the previous line - ignore + return nil, nil + default: + ws := strings.Split(line, " ") // hash then ref + if len(ws) != 2 { + return nil, ErrPackedRefsBadFormat + } + + return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil + } +} + +func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { + return d.walkReferencesTree(refs, []string{refsPath}, seen) +} + +func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { + files, err := d.fs.ReadDir(d.fs.Join(relPath...)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + newRelPath := append(append([]string(nil), relPath...), f.Name()) + if f.IsDir() { + if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { + return err + } + + continue + } + + ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) + if err != nil { + return err + } + + if ref != nil && !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + + return nil +} + +func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { + ref, err := d.readReferenceFile(".", "HEAD") + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + *refs = append(*refs, ref) + return nil +} + +func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { + path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) + f, err := d.fs.Open(path) + if err != nil { + return nil, err + } + defer ioutil.CheckClose(f, &err) + + return d.readReferenceFrom(f, name) +} + +func (d *DotGit) CountLooseRefs() (int, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return 0, err + } + + return len(refs), nil +} + +// PackRefs packs all loose refs into the packed-refs file. +// +// This implementation only works under the assumption that the view +// of the file system won't be updated during this operation. This +// strategy would not work on a general file system though, without +// locking each loose reference and checking it again before deleting +// the file, because otherwise an updated reference could sneak in and +// then be deleted by the packed-refs process. Alternatively, every +// ref update could also lock packed-refs, so only one lock is +// required during ref-packing. But that would worsen performance in +// the common case. +// +// TODO: add an "all" boolean like the `git pack-refs --all` flag. +// When `all` is false, it would only pack refs that have already been +// packed, plus all tags. +func (d *DotGit) PackRefs() (err error) { + // Lock packed-refs, and create it if it doesn't exist yet. + f, err := d.openAndLockPackedRefs(true) + if err != nil { + return err + } + defer ioutil.CheckClose(f, &err) + + // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. + var refs []*plumbing.Reference + seen := make(map[plumbing.ReferenceName]bool) + if err = d.addRefsFromRefDir(&refs, seen); err != nil { + return err + } + if len(refs) == 0 { + // Nothing to do! + return nil + } + numLooseRefs := len(refs) + if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { + return err + } + + // Write them all to a new temp packed-refs file. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + w := bufio.NewWriter(tmp) + for _, ref := range refs { + _, err = w.WriteString(ref.String() + "\n") + if err != nil { + return err + } + } + err = w.Flush() + if err != nil { + return err + } + + // Rename the temp packed-refs file. + err = d.rewritePackedRefsWhileLocked(tmp, f) + if err != nil { + return err + } + + // Delete all the loose refs, while still holding the packed-refs + // lock. + for _, ref := range refs[:numLooseRefs] { + path := d.fs.Join(".", ref.Name().String()) + err = d.fs.Remove(path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + + return nil +} + +// Module return a billy.Filesystem pointing to the module folder +func (d *DotGit) Module(name string) (billy.Filesystem, error) { + return d.fs.Chroot(d.fs.Join(modulePath, name)) +} + +// Alternates returns DotGit(s) based off paths in objects/info/alternates if +// available. This can be used to checks if it's a shared repository. +func (d *DotGit) Alternates() ([]*DotGit, error) { + altpath := d.fs.Join("objects", "info", "alternates") + f, err := d.fs.Open(altpath) + if err != nil { + return nil, err + } + defer f.Close() + + var alternates []*DotGit + + // Read alternate paths line-by-line and create DotGit objects. + scanner := bufio.NewScanner(f) + for scanner.Scan() { + path := scanner.Text() + if !filepath.IsAbs(path) { + // For relative paths, we can perform an internal conversion to + // slash so that they work cross-platform. + slashPath := filepath.ToSlash(path) + // If the path is not absolute, it must be relative to object + // database (.git/objects/info). + // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html + // Hence, derive a path relative to DotGit's root. + // "../../../reponame/.git/" -> "../../reponame/.git" + // Remove the first ../ + relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) + normalPath := filepath.FromSlash(relpath) + path = filepath.Join(d.fs.Root(), normalPath) + } + fs := osfs.New(filepath.Dir(path)) + alternates = append(alternates, New(fs)) + } + + if err = scanner.Err(); err != nil { + return nil, err + } + + return alternates, nil +} + +func isHex(s string) bool { + for _, b := range []byte(s) { + if isNum(b) { + continue + } + if isHexAlpha(b) { + continue + } + + return false + } + + return true +} + +func isNum(b byte) bool { + return b >= '0' && b <= '9' +} + +func isHexAlpha(b byte) bool { + return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go new file mode 100644 index 0000000..c760793 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go @@ -0,0 +1,17 @@ +// +build !windows,!norwfs + +package dotgit + +import ( + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDWR + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // On non-Windows platforms, we can have atomic rename. + return d.fs.Rename(tmp.Name(), pr.Name()) +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go new file mode 100644 index 0000000..6e43b42 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go @@ -0,0 +1,34 @@ +// +build norwfs + +package dotgit + +import ( + "io" + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDONLY + +// Instead of renaming that can not be supported in simpler filesystems +// a full copy is done. +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + + prWrite, err := d.fs.Create(pr.Name()) + if err != nil { + return err + } + + defer prWrite.Close() + + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + + _, err = io.Copy(prWrite, tmp) + + return err +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go new file mode 100644 index 0000000..897d2c9 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go @@ -0,0 +1,42 @@ +// +build windows,!norwfs + +package dotgit + +import ( + "io" + "os" + + "gopkg.in/src-d/go-billy.v4" +) + +const openAndLockPackedRefsMode = os.O_RDWR + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // If we aren't using the bare Windows filesystem as the storage + // layer, we might be able to get away with a rename over a locked + // file. + err := d.fs.Rename(tmp.Name(), pr.Name()) + if err == nil { + return nil + } + + // Otherwise, Windows doesn't let us rename over a locked file, so + // we have to do a straight copy. Unfortunately this could result + // in a partially-written file if the process fails before the + // copy completes. + _, err = pr.Seek(0, io.SeekStart) + if err != nil { + return err + } + err = pr.Truncate(0) + if err != nil { + return err + } + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + _, err = io.Copy(pr, tmp) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_setref.go b/storage/filesystem/dotgit/dotgit_setref.go new file mode 100644 index 0000000..d27c1a3 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_setref.go @@ -0,0 +1,43 @@ +// +build !norwfs + +package dotgit + +import ( + "os" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { + // If we are not checking an old ref, just truncate the file. + mode := os.O_RDWR | os.O_CREATE + if old == nil { + mode |= os.O_TRUNC + } + + f, err := d.fs.OpenFile(fileName, mode, 0666) + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + // Lock is unlocked by the deferred Close above. This is because Unlock + // does not imply a fsync and thus there would be a race between + // Unlock+Close and other concurrent writers. Adding Sync to go-billy + // could work, but this is better (and avoids superfluous syncs). + err = f.Lock() + if err != nil { + return err + } + + // this is a no-op to call even when old is nil. + err = d.checkReferenceAndTruncate(f, old) + if err != nil { + return err + } + + _, err = f.Write([]byte(content)) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_setref_norwfs.go b/storage/filesystem/dotgit/dotgit_setref_norwfs.go new file mode 100644 index 0000000..5695bd3 --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_setref_norwfs.go @@ -0,0 +1,47 @@ +// +build norwfs + +package dotgit + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +// There are some filesystems that don't support opening files in RDWD mode. +// In these filesystems the standard SetRef function can not be used as i +// reads the reference file to check that it's not modified before updating it. +// +// This version of the function writes the reference without extra checks +// making it compatible with these simple filesystems. This is usually not +// a problem as they should be accessed by only one process at a time. +func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error { + _, err := d.fs.Stat(fileName) + if err == nil && old != nil { + fRead, err := d.fs.Open(fileName) + if err != nil { + return err + } + + ref, err := d.readReferenceFrom(fRead, old.Name().String()) + fRead.Close() + + if err != nil { + return err + } + + if ref.Hash() != old.Hash() { + return fmt.Errorf("reference has changed concurrently") + } + } + + f, err := d.fs.Create(fileName) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.Write([]byte(content)) + return err +} diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go new file mode 100644 index 0000000..7733eef --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -0,0 +1,683 @@ +package dotgit + +import ( + "bufio" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "gopkg.in/src-d/go-git.v4/plumbing" + + . "gopkg.in/check.v1" + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git-fixtures.v3" +) + +func Test(t *testing.T) { TestingT(t) } + +type SuiteDotGit struct { + fixtures.Suite +} + +var _ = Suite(&SuiteDotGit{}) + +func (s *SuiteDotGit) TestInitialize(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + err = dir.Initialize() + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("objects", "info")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("objects", "pack")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("refs", "heads")) + c.Assert(err, IsNil) + + _, err = fs.Stat(fs.Join("refs", "tags")) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestSetRefs(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + firstFoo := plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ) + err = dir.SetRef(firstFoo, nil) + + c.Assert(err, IsNil) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/symbolic", + "ref: refs/heads/foo", + ), nil) + + c.Assert(err, IsNil) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "bar", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + + ref := findReference(refs, "refs/heads/foo") + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + ref = findReference(refs, "refs/heads/symbolic") + c.Assert(ref, NotNil) + c.Assert(ref.Target().String(), Equals, "refs/heads/foo") + + ref = findReference(refs, "bar") + c.Assert(ref, IsNil) + + ref, err = dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + ref, err = dir.Ref("refs/heads/symbolic") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Target().String(), Equals, "refs/heads/foo") + + ref, err = dir.Ref("bar") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + + // Check that SetRef with a non-nil `old` works. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", + ), firstFoo) + c.Assert(err, IsNil) + + // `firstFoo` is no longer the right `old` reference, so this + // should fail. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", + ), firstFoo) + c.Assert(err, NotNil) +} + +func (s *SuiteDotGit) TestRefsFromPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "refs/remotes/origin/branch") + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + +} + +func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "refs/remotes/origin/HEAD") + c.Assert(ref, NotNil) + c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) + c.Assert(string(ref.Target()), Equals, "refs/remotes/origin/master") + +} + +func BenchmarkRefMultipleTimes(b *testing.B) { + fixtures.Init() + fs := fixtures.Basic().ByTag(".git").One().DotGit() + refname := plumbing.ReferenceName("refs/remotes/origin/branch") + + dir := New(fs) + _, err := dir.Ref(refname) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + + for i := 0; i < b.N; i++ { + _, err := dir.Ref(refname) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + } +} + +func (s *SuiteDotGit) TestRemoveRefFromReferenceFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + name := plumbing.ReferenceName("refs/remotes/origin/HEAD") + err := dir.RemoveRef(name) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, string(name)) + c.Assert(ref, IsNil) +} + +func (s *SuiteDotGit) TestRemoveRefFromPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + name := plumbing.ReferenceName("refs/remotes/origin/master") + err := dir.RemoveRef(name) + c.Assert(err, IsNil) + + b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(string(b), Equals, ""+ + "# pack-refs with: peeled fully-peeled \n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ + "e8d3ffab552895c19b9fcf7aa264d277cde33881 refs/remotes/origin/branch\n") +} + +func (s *SuiteDotGit) TestRemoveRefFromReferenceFileAndPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + // Make a ref file for a ref that's already in `packed-refs`. + err := dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/remotes/origin/branch", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + + // Make sure it only appears once in the refs list. + refs, err := dir.Refs() + c.Assert(err, IsNil) + found := false + for _, ref := range refs { + if ref.Name() == "refs/remotes/origin/branch" { + c.Assert(found, Equals, false) + found = true + } + } + + name := plumbing.ReferenceName("refs/remotes/origin/branch") + err = dir.RemoveRef(name) + c.Assert(err, IsNil) + + b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(string(b), Equals, ""+ + "# pack-refs with: peeled fully-peeled \n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ + "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/remotes/origin/master\n") + + refs, err = dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, string(name)) + c.Assert(ref, IsNil) +} + +func (s *SuiteDotGit) TestRemoveRefNonExistent(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + before, err := ioutil.ReadFile(packedRefs) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, IsNil) + + after, err := ioutil.ReadFile(packedRefs) + c.Assert(err, IsNil) + + c.Assert(string(before), Equals, string(after)) +} + +func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + brokenContent := "BROKEN STUFF REALLY BROKEN" + + err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, NotNil) + + after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(brokenContent, Equals, string(after)) +} + +func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs2(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + packedRefs := filepath.Join(fs.Root(), packedRefsPath) + brokenContent := strings.Repeat("a", bufio.MaxScanTokenSize*2) + + err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) + c.Assert(err, IsNil) + + name := plumbing.ReferenceName("refs/heads/nonexistent") + err = dir.RemoveRef(name) + c.Assert(err, NotNil) + + after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) + c.Assert(err, IsNil) + + c.Assert(brokenContent, Equals, string(after)) +} + +func (s *SuiteDotGit) TestRefsFromHEADFile(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + + ref := findReference(refs, "HEAD") + c.Assert(ref, NotNil) + c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) + c.Assert(string(ref.Target()), Equals, "refs/heads/master") +} + +func (s *SuiteDotGit) TestConfig(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + file, err := dir.Config() + c.Assert(err, IsNil) + c.Assert(filepath.Base(file.Name()), Equals, "config") +} + +func (s *SuiteDotGit) TestConfigWriteAndConfig(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.ConfigWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Config() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func (s *SuiteDotGit) TestIndex(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + idx, err := dir.Index() + c.Assert(err, IsNil) + c.Assert(idx, NotNil) +} + +func (s *SuiteDotGit) TestIndexWriteAndIndex(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.IndexWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Index() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func (s *SuiteDotGit) TestShallow(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + file, err := dir.Shallow() + c.Assert(err, IsNil) + c.Assert(file, IsNil) +} + +func (s *SuiteDotGit) TestShallowWriteAndShallow(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + f, err := dir.ShallowWriter() + c.Assert(err, IsNil) + + _, err = f.Write([]byte("foo")) + c.Assert(err, IsNil) + + f, err = dir.Shallow() + c.Assert(err, IsNil) + + cnt, err := ioutil.ReadAll(f) + c.Assert(err, IsNil) + + c.Assert(string(cnt), Equals, "foo") +} + +func findReference(refs []*plumbing.Reference, name string) *plumbing.Reference { + n := plumbing.ReferenceName(name) + for _, ref := range refs { + if ref.Name() == n { + return ref + } + } + + return nil +} + +func (s *SuiteDotGit) TestObjectPacks(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + hashes, err := dir.ObjectPacks() + c.Assert(err, IsNil) + c.Assert(hashes, HasLen, 1) + c.Assert(hashes[0], Equals, f.PackfileHash) + + // Make sure that a random file in the pack directory doesn't + // break everything. + badFile, err := fs.Create("objects/pack/OOPS_THIS_IS_NOT_RIGHT.pack") + c.Assert(err, IsNil) + err = badFile.Close() + c.Assert(err, IsNil) + + hashes2, err := dir.ObjectPacks() + c.Assert(err, IsNil) + c.Assert(hashes2, HasLen, 1) + c.Assert(hashes[0], Equals, hashes2[0]) +} + +func (s *SuiteDotGit) TestObjectPack(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + pack, err := dir.ObjectPack(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") +} + +func (s *SuiteDotGit) TestObjectPackIdx(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := New(fs) + + idx, err := dir.ObjectPackIdx(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(filepath.Ext(idx.Name()), Equals, ".idx") + c.Assert(idx.Close(), IsNil) +} + +func (s *SuiteDotGit) TestObjectPackNotFound(c *C) { + fs := fixtures.Basic().ByTag(".git").One().DotGit() + dir := New(fs) + + pack, err := dir.ObjectPack(plumbing.ZeroHash) + c.Assert(err, Equals, ErrPackfileNotFound) + c.Assert(pack, IsNil) + + idx, err := dir.ObjectPackIdx(plumbing.ZeroHash) + c.Assert(err, Equals, ErrPackfileNotFound) + c.Assert(idx, IsNil) +} + +func (s *SuiteDotGit) TestNewObject(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + w, err := dir.NewObject() + c.Assert(err, IsNil) + + err = w.WriteHeader(plumbing.BlobObject, 14) + c.Assert(err, IsNil) + n, err := w.Write([]byte("this is a test")) + c.Assert(err, IsNil) + c.Assert(n, Equals, 14) + + c.Assert(w.Hash().String(), Equals, "a8a940627d132695a9769df883f85992f0ff4a43") + + err = w.Close() + c.Assert(err, IsNil) + + i, err := fs.Stat("objects/a8/a940627d132695a9769df883f85992f0ff4a43") + c.Assert(err, IsNil) + c.Assert(i.Size(), Equals, int64(34)) +} + +func (s *SuiteDotGit) TestObjects(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hashes, err := dir.Objects() + c.Assert(err, IsNil) + c.Assert(hashes, HasLen, 187) + c.Assert(hashes[0].String(), Equals, "0097821d427a3c3385898eb13b50dcbc8702b8a3") + c.Assert(hashes[1].String(), Equals, "01d5fa556c33743006de7e76e67a2dfcd994ca04") + c.Assert(hashes[2].String(), Equals, "03db8e1fbe133a480f2867aac478fd866686d69e") +} + +func (s *SuiteDotGit) TestObjectsNoFolder(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + hash, err := dir.Objects() + c.Assert(err, IsNil) + c.Assert(hash, HasLen, 0) +} + +func (s *SuiteDotGit) TestObject(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + file, err := dir.Object(hash) + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix( + file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), + Equals, true, + ) +} + +func (s *SuiteDotGit) TestObjectNotFound(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("not-found-object") + file, err := dir.Object(hash) + c.Assert(err, NotNil) + c.Assert(file, IsNil) +} + +func (s *SuiteDotGit) TestSubmodules(c *C) { + fs := fixtures.ByTag("submodule").One().DotGit() + dir := New(fs) + + m, err := dir.Module("basic") + c.Assert(err, IsNil) + c.Assert(strings.HasSuffix(m.Root(), m.Join(".git", "modules", "basic")), Equals, true) +} + +func (s *SuiteDotGit) TestPackRefs(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + fs := osfs.New(tmp) + dir := New(fs) + + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "e8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/bar", + "a8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + + refs, err := dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err := dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 2) + + err = dir.PackRefs() + c.Assert(err, IsNil) + + // Make sure the refs are still there, but no longer loose. + refs, err = dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 0) + + ref, err := dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") + ref, err = dir.Ref("refs/heads/bar") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "a8d3ffab552895c19b9fcf7aa264d277cde33881") + + // Now update one of them, re-pack, and check again. + err = dir.SetRef(plumbing.NewReferenceFromStrings( + "refs/heads/foo", + "b8d3ffab552895c19b9fcf7aa264d277cde33881", + ), nil) + c.Assert(err, IsNil) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 1) + err = dir.PackRefs() + c.Assert(err, IsNil) + + // Make sure the refs are still there, but no longer loose. + refs, err = dir.Refs() + c.Assert(err, IsNil) + c.Assert(refs, HasLen, 2) + looseCount, err = dir.CountLooseRefs() + c.Assert(err, IsNil) + c.Assert(looseCount, Equals, 0) + + ref, err = dir.Ref("refs/heads/foo") + c.Assert(err, IsNil) + c.Assert(ref, NotNil) + c.Assert(ref.Hash().String(), Equals, "b8d3ffab552895c19b9fcf7aa264d277cde33881") +} + +func (s *SuiteDotGit) TestAlternates(c *C) { + tmp, err := ioutil.TempDir("", "dot-git") + c.Assert(err, IsNil) + defer os.RemoveAll(tmp) + + // Create a new billy fs. + fs := osfs.New(tmp) + + // Create a new dotgit object and initialize. + dir := New(fs) + err = dir.Initialize() + c.Assert(err, IsNil) + + // Create alternates file. + altpath := filepath.Join("objects", "info", "alternates") + f, err := fs.Create(altpath) + c.Assert(err, IsNil) + + // Multiple alternates. + var strContent string + if runtime.GOOS == "windows" { + strContent = "C:\\Users\\username\\repo1\\.git\\objects\r\n..\\..\\..\\rep2\\.git\\objects" + } else { + strContent = "/Users/username/rep1//.git/objects\n../../../rep2//.git/objects" + } + content := []byte(strContent) + f.Write(content) + f.Close() + + dotgits, err := dir.Alternates() + c.Assert(err, IsNil) + if runtime.GOOS == "windows" { + c.Assert(dotgits[0].fs.Root(), Equals, "C:\\Users\\username\\repo1\\.git") + } else { + c.Assert(dotgits[0].fs.Root(), Equals, "/Users/username/rep1/.git") + } + + // For relative path: + // /some/absolute/path/to/dot-git -> /some/absolute/path + pathx := strings.Split(tmp, string(filepath.Separator)) + pathx = pathx[:len(pathx)-2] + // Use string.Join() to avoid malformed absolutepath on windows + // C:Users\\User\\... instead of C:\\Users\\appveyor\\... . + resolvedPath := strings.Join(pathx, string(filepath.Separator)) + // Append the alternate path to the resolvedPath + expectedPath := filepath.Join(string(filepath.Separator), resolvedPath, "rep2", ".git") + if runtime.GOOS == "windows" { + expectedPath = filepath.Join(resolvedPath, "rep2", ".git") + } + c.Assert(dotgits[1].fs.Root(), Equals, expectedPath) +} diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go new file mode 100644 index 0000000..c2b420f --- /dev/null +++ b/storage/filesystem/dotgit/writers.go @@ -0,0 +1,282 @@ +package dotgit + +import ( + "fmt" + "io" + "sync/atomic" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + + "gopkg.in/src-d/go-billy.v4" +) + +// PackWriter is a io.Writer that generates the packfile index simultaneously, +// a packfile.Decoder is used with a file reader to read the file being written +// this operation is synchronized with the write operations. +// The packfile is written in a temp file, when Close is called this file +// is renamed/moved (depends on the Filesystem implementation) to the final +// location, if the PackWriter is not used, nothing is written +type PackWriter struct { + Notify func(plumbing.Hash, *packfile.Index) + + fs billy.Filesystem + fr, fw billy.File + synced *syncedReader + checksum plumbing.Hash + index *packfile.Index + result chan error +} + +func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { + fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") + if err != nil { + return nil, err + } + + fr, err := fs.Open(fw.Name()) + if err != nil { + return nil, err + } + + writer := &PackWriter{ + fs: fs, + fw: fw, + fr: fr, + synced: newSyncedReader(fw, fr), + result: make(chan error), + } + + go writer.buildIndex() + return writer, nil +} + +func (w *PackWriter) buildIndex() { + s := packfile.NewScanner(w.synced) + d, err := packfile.NewDecoder(s, nil) + if err != nil { + w.result <- err + return + } + + checksum, err := d.Decode() + if err != nil { + w.result <- err + return + } + + w.checksum = checksum + w.index = d.Index() + w.result <- err +} + +// waitBuildIndex waits until buildIndex function finishes, this can terminate +// with a packfile.ErrEmptyPackfile, this means that nothing was written so we +// ignore the error +func (w *PackWriter) waitBuildIndex() error { + err := <-w.result + if err == packfile.ErrEmptyPackfile { + return nil + } + + return err +} + +func (w *PackWriter) Write(p []byte) (int, error) { + return w.synced.Write(p) +} + +// Close closes all the file descriptors and save the final packfile, if nothing +// was written, the tempfiles are deleted without writing a packfile. +func (w *PackWriter) Close() error { + defer func() { + if w.Notify != nil && w.index != nil && w.index.Size() > 0 { + w.Notify(w.checksum, w.index) + } + + close(w.result) + }() + + if err := w.synced.Close(); err != nil { + return err + } + + if err := w.waitBuildIndex(); err != nil { + return err + } + + if err := w.fr.Close(); err != nil { + return err + } + + if err := w.fw.Close(); err != nil { + return err + } + + if w.index == nil || w.index.Size() == 0 { + return w.clean() + } + + return w.save() +} + +func (w *PackWriter) clean() error { + return w.fs.Remove(w.fw.Name()) +} + +func (w *PackWriter) save() error { + base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) + idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) + if err != nil { + return err + } + + if err := w.encodeIdx(idx); err != nil { + return err + } + + if err := idx.Close(); err != nil { + return err + } + + return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) +} + +func (w *PackWriter) encodeIdx(writer io.Writer) error { + idx := w.index.ToIdxFile() + idx.PackfileChecksum = w.checksum + idx.Version = idxfile.VersionSupported + e := idxfile.NewEncoder(writer) + _, err := e.Encode(idx) + return err +} + +type syncedReader struct { + w io.Writer + r io.ReadSeeker + + blocked, done uint32 + written, read uint64 + news chan bool +} + +func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { + return &syncedReader{ + w: w, + r: r, + news: make(chan bool), + } +} + +func (s *syncedReader) Write(p []byte) (n int, err error) { + defer func() { + written := atomic.AddUint64(&s.written, uint64(n)) + read := atomic.LoadUint64(&s.read) + if written > read { + s.wake() + } + }() + + n, err = s.w.Write(p) + return +} + +func (s *syncedReader) Read(p []byte) (n int, err error) { + defer func() { atomic.AddUint64(&s.read, uint64(n)) }() + + for { + s.sleep() + n, err = s.r.Read(p) + if err == io.EOF && !s.isDone() && n == 0 { + continue + } + + break + } + + return +} + +func (s *syncedReader) isDone() bool { + return atomic.LoadUint32(&s.done) == 1 +} + +func (s *syncedReader) isBlocked() bool { + return atomic.LoadUint32(&s.blocked) == 1 +} + +func (s *syncedReader) wake() { + if s.isBlocked() { + // fmt.Println("wake") + atomic.StoreUint32(&s.blocked, 0) + s.news <- true + } +} + +func (s *syncedReader) sleep() { + read := atomic.LoadUint64(&s.read) + written := atomic.LoadUint64(&s.written) + if read >= written { + atomic.StoreUint32(&s.blocked, 1) + // fmt.Println("sleep", read, written) + <-s.news + } + +} + +func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + return s.r.Seek(offset, whence) + } + + p, err := s.r.Seek(offset, whence) + atomic.StoreUint64(&s.read, uint64(p)) + + return p, err +} + +func (s *syncedReader) Close() error { + atomic.StoreUint32(&s.done, 1) + close(s.news) + return nil +} + +type ObjectWriter struct { + objfile.Writer + fs billy.Filesystem + f billy.File +} + +func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { + f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") + if err != nil { + return nil, err + } + + return &ObjectWriter{ + Writer: (*objfile.NewWriter(f)), + fs: fs, + f: f, + }, nil +} + +func (w *ObjectWriter) Close() error { + if err := w.Writer.Close(); err != nil { + return err + } + + if err := w.f.Close(); err != nil { + return err + } + + return w.save() +} + +func (w *ObjectWriter) save() error { + hash := w.Hash().String() + file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) + + return w.fs.Rename(w.f.Name(), file) +} diff --git a/storage/filesystem/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go new file mode 100644 index 0000000..bf00762 --- /dev/null +++ b/storage/filesystem/dotgit/writers_test.go @@ -0,0 +1,156 @@ +package dotgit + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strconv" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + + . "gopkg.in/check.v1" + "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git-fixtures.v3" +) + +func (s *SuiteDotGit) TestNewObjectPack(c *C) { + f := fixtures.Basic().One() + + dir, err := ioutil.TempDir("", "example") + if err != nil { + log.Fatal(err) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + dot := New(fs) + + w, err := dot.NewObjectPack() + c.Assert(err, IsNil) + + _, err = io.Copy(w, f.Packfile()) + c.Assert(err, IsNil) + + c.Assert(w.Close(), IsNil) + + pfPath := fmt.Sprintf("objects/pack/pack-%s.pack", f.PackfileHash) + idxPath := fmt.Sprintf("objects/pack/pack-%s.idx", f.PackfileHash) + + stat, err := fs.Stat(pfPath) + c.Assert(err, IsNil) + c.Assert(stat.Size(), Equals, int64(84794)) + + stat, err = fs.Stat(idxPath) + c.Assert(err, IsNil) + c.Assert(stat.Size(), Equals, int64(1940)) + + pf, err := fs.Open(pfPath) + c.Assert(err, IsNil) + pfs := packfile.NewScanner(pf) + _, objects, err := pfs.Header() + c.Assert(err, IsNil) + for i := uint32(0); i < objects; i++ { + _, err := pfs.NextObjectHeader() + if err != nil { + c.Assert(err, IsNil) + break + } + } + c.Assert(pfs.Close(), IsNil) +} + +func (s *SuiteDotGit) TestNewObjectPackUnused(c *C) { + dir, err := ioutil.TempDir("", "example") + if err != nil { + log.Fatal(err) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + dot := New(fs) + + w, err := dot.NewObjectPack() + c.Assert(err, IsNil) + + c.Assert(w.Close(), IsNil) + + info, err := fs.ReadDir("objects/pack") + c.Assert(err, IsNil) + c.Assert(info, HasLen, 0) + + // check clean up of temporary files + info, err = fs.ReadDir("") + c.Assert(err, IsNil) + for _, fi := range info { + c.Assert(fi.IsDir(), Equals, true) + } +} + +func (s *SuiteDotGit) TestSyncedReader(c *C) { + tmpw, err := ioutil.TempFile("", "example") + c.Assert(err, IsNil) + + tmpr, err := os.Open(tmpw.Name()) + c.Assert(err, IsNil) + + defer func() { + tmpw.Close() + tmpr.Close() + os.Remove(tmpw.Name()) + }() + + synced := newSyncedReader(tmpw, tmpr) + + go func() { + for i := 0; i < 281; i++ { + _, err := synced.Write([]byte(strconv.Itoa(i) + "\n")) + c.Assert(err, IsNil) + } + + synced.Close() + }() + + o, err := synced.Seek(1002, io.SeekStart) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(1002)) + + head := make([]byte, 3) + n, err := io.ReadFull(synced, head) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(string(head), Equals, "278") + + o, err = synced.Seek(1010, io.SeekStart) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(1010)) + + n, err = io.ReadFull(synced, head) + c.Assert(err, IsNil) + c.Assert(n, Equals, 3) + c.Assert(string(head), Equals, "280") +} + +func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) { + dir, err := ioutil.TempDir("", "example") + if err != nil { + c.Assert(err, IsNil) + } + + defer os.RemoveAll(dir) + + fs := osfs.New(dir) + + w, err := newPackWrite(fs) + c.Assert(err, IsNil) + + w.Notify = func(h plumbing.Hash, idx *packfile.Index) { + c.Fatal("unexpected call to PackWriter.Notify") + } + + c.Assert(w.Close(), IsNil) +} diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go index 092edec..2ebf57e 100644 --- a/storage/filesystem/index.go +++ b/storage/filesystem/index.go @@ -4,7 +4,7 @@ import ( "os" "gopkg.in/src-d/go-git.v4/plumbing/format/index" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) diff --git a/storage/filesystem/internal/dotgit/dotgit.go b/storage/filesystem/internal/dotgit/dotgit.go deleted file mode 100644 index 52b621c..0000000 --- a/storage/filesystem/internal/dotgit/dotgit.go +++ /dev/null @@ -1,808 +0,0 @@ -// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt -package dotgit - -import ( - "bufio" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "gopkg.in/src-d/go-billy.v4/osfs" - "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/utils/ioutil" - - "gopkg.in/src-d/go-billy.v4" -) - -const ( - suffix = ".git" - packedRefsPath = "packed-refs" - configPath = "config" - indexPath = "index" - shallowPath = "shallow" - modulePath = "modules" - objectsPath = "objects" - packPath = "pack" - refsPath = "refs" - - tmpPackedRefsPrefix = "._packed-refs" - - packExt = ".pack" - idxExt = ".idx" -) - -var ( - // ErrNotFound is returned by New when the path is not found. - ErrNotFound = errors.New("path not found") - // ErrIdxNotFound is returned by Idxfile when the idx file is not found - ErrIdxNotFound = errors.New("idx file not found") - // ErrPackfileNotFound is returned by Packfile when the packfile is not found - ErrPackfileNotFound = errors.New("packfile not found") - // ErrConfigNotFound is returned by Config when the config is not found - ErrConfigNotFound = errors.New("config file not found") - // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is - // found in the packed-ref file. This is usually the case for corrupted git - // repositories. - ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") - // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. - ErrPackedRefsBadFormat = errors.New("malformed packed-ref") - // ErrSymRefTargetNotFound is returned when a symbolic reference is - // targeting a non-existing object. This usually means the repository - // is corrupt. - ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") -) - -// The DotGit type represents a local git repository on disk. This -// type is not zero-value-safe, use the New function to initialize it. -type DotGit struct { - fs billy.Filesystem -} - -// New returns a DotGit value ready to be used. The path argument must -// be the absolute path of a git repository directory (e.g. -// "/foo/bar/.git"). -func New(fs billy.Filesystem) *DotGit { - return &DotGit{fs: fs} -} - -// Initialize creates all the folder scaffolding. -func (d *DotGit) Initialize() error { - mustExists := []string{ - d.fs.Join("objects", "info"), - d.fs.Join("objects", "pack"), - d.fs.Join("refs", "heads"), - d.fs.Join("refs", "tags"), - } - - for _, path := range mustExists { - _, err := d.fs.Stat(path) - if err == nil { - continue - } - - if !os.IsNotExist(err) { - return err - } - - if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { - return err - } - } - - return nil -} - -// ConfigWriter returns a file pointer for write to the config file -func (d *DotGit) ConfigWriter() (billy.File, error) { - return d.fs.Create(configPath) -} - -// Config returns a file pointer for read to the config file -func (d *DotGit) Config() (billy.File, error) { - return d.fs.Open(configPath) -} - -// IndexWriter returns a file pointer for write to the index file -func (d *DotGit) IndexWriter() (billy.File, error) { - return d.fs.Create(indexPath) -} - -// Index returns a file pointer for read to the index file -func (d *DotGit) Index() (billy.File, error) { - return d.fs.Open(indexPath) -} - -// ShallowWriter returns a file pointer for write to the shallow file -func (d *DotGit) ShallowWriter() (billy.File, error) { - return d.fs.Create(shallowPath) -} - -// Shallow returns a file pointer for read to the shallow file -func (d *DotGit) Shallow() (billy.File, error) { - f, err := d.fs.Open(shallowPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - return f, nil -} - -// NewObjectPack return a writer for a new packfile, it saves the packfile to -// disk and also generates and save the index for the given packfile. -func (d *DotGit) NewObjectPack() (*PackWriter, error) { - return newPackWrite(d.fs) -} - -// ObjectPacks returns the list of availables packfiles -func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { - packDir := d.fs.Join(objectsPath, packPath) - files, err := d.fs.ReadDir(packDir) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - var packs []plumbing.Hash - for _, f := range files { - if !strings.HasSuffix(f.Name(), packExt) { - continue - } - - n := f.Name() - h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - packs = append(packs, h) - } - - return packs, nil -} - -func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { - return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) -} - -func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { - pack, err := d.fs.Open(d.objectPackPath(hash, extension)) - if err != nil { - if os.IsNotExist(err) { - return nil, ErrPackfileNotFound - } - - return nil, err - } - - return pack, nil -} - -// ObjectPack returns a fs.File of the given packfile -func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { - return d.objectPackOpen(hash, `pack`) -} - -// ObjectPackIdx returns a fs.File of the index file for a given packfile -func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { - return d.objectPackOpen(hash, `idx`) -} - -func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { - path := d.objectPackPath(hash, `pack`) - if !t.IsZero() { - fi, err := d.fs.Stat(path) - if err != nil { - return err - } - // too new, skip deletion. - if !fi.ModTime().Before(t) { - return nil - } - } - err := d.fs.Remove(path) - if err != nil { - return err - } - return d.fs.Remove(d.objectPackPath(hash, `idx`)) -} - -// NewObject return a writer for a new object file. -func (d *DotGit) NewObject() (*ObjectWriter, error) { - return newObjectWriter(d.fs) -} - -// Objects returns a slice with the hashes of objects found under the -// .git/objects/ directory. -func (d *DotGit) Objects() ([]plumbing.Hash, error) { - var objects []plumbing.Hash - err := d.ForEachObjectHash(func(hash plumbing.Hash) error { - objects = append(objects, hash) - return nil - }) - if err != nil { - return nil, err - } - return objects, nil -} - -// Objects returns a slice with the hashes of objects found under the -// .git/objects/ directory. -func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { - files, err := d.fs.ReadDir(objectsPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { - base := f.Name() - d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) - if err != nil { - return err - } - - for _, o := range d { - h := plumbing.NewHash(base + o.Name()) - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - err = fun(h) - if err != nil { - return err - } - } - } - } - - return nil -} - -func (d *DotGit) objectPath(h plumbing.Hash) string { - hash := h.String() - return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) -} - -// Object returns a fs.File pointing the object file, if exists -func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { - return d.fs.Open(d.objectPath(h)) -} - -// ObjectStat returns a os.FileInfo pointing the object file, if exists -func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { - return d.fs.Stat(d.objectPath(h)) -} - -// ObjectDelete removes the object file, if exists -func (d *DotGit) ObjectDelete(h plumbing.Hash) error { - return d.fs.Remove(d.objectPath(h)) -} - -func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { - b, err := stdioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - line := strings.TrimSpace(string(b)) - return plumbing.NewReferenceFromStrings(name, line), nil -} - -func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { - if old == nil { - return nil - } - ref, err := d.readReferenceFrom(f, old.Name().String()) - if err != nil { - return err - } - if ref.Hash() != old.Hash() { - return fmt.Errorf("reference has changed concurrently") - } - _, err = f.Seek(0, io.SeekStart) - if err != nil { - return err - } - return f.Truncate(0) -} - -func (d *DotGit) SetRef(r, old *plumbing.Reference) error { - var content string - switch r.Type() { - case plumbing.SymbolicReference: - content = fmt.Sprintf("ref: %s\n", r.Target()) - case plumbing.HashReference: - content = fmt.Sprintln(r.Hash().String()) - } - - fileName := r.Name().String() - - return d.setRef(fileName, content, old) -} - -// Refs scans the git directory collecting references, which it returns. -// Symbolic references are resolved and included in the output. -func (d *DotGit) Refs() ([]*plumbing.Reference, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefFromHEAD(&refs); err != nil { - return nil, err - } - - return refs, nil -} - -// Ref returns the reference for a given reference name. -func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { - ref, err := d.readReferenceFile(".", name.String()) - if err == nil { - return ref, nil - } - - return d.packedRef(name) -} - -func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { - s := bufio.NewScanner(f) - var refs []*plumbing.Reference - for s.Scan() { - ref, err := d.processLine(s.Text()) - if err != nil { - return nil, err - } - - if ref != nil { - refs = append(refs, ref) - } - } - - return refs, s.Err() -} - -func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { - f, err := d.fs.Open(packedRefsPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer ioutil.CheckClose(f, &err) - return d.findPackedRefsInFile(f) -} - -func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { - refs, err := d.findPackedRefs() - if err != nil { - return nil, err - } - - for _, ref := range refs { - if ref.Name() == name { - return ref, nil - } - } - - return nil, plumbing.ErrReferenceNotFound -} - -// RemoveRef removes a reference by name. -func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { - path := d.fs.Join(".", name.String()) - _, err := d.fs.Stat(path) - if err == nil { - err = d.fs.Remove(path) - // Drop down to remove it from the packed refs file, too. - } - - if err != nil && !os.IsNotExist(err) { - return err - } - - return d.rewritePackedRefsWithoutRef(name) -} - -func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefs() - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefsInFile(f) - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( - pr billy.File, err error) { - var f billy.File - defer func() { - if err != nil && f != nil { - ioutil.CheckClose(f, &err) - } - }() - - // File mode is retrieved from a constant defined in the target specific - // files (dotgit_rewrite_packed_refs_*). Some modes are not available - // in all filesystems. - openFlags := openAndLockPackedRefsMode - if doCreate { - openFlags |= os.O_CREATE - } - - // Keep trying to open and lock the file until we're sure the file - // didn't change between the open and the lock. - for { - f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) - if err != nil { - if os.IsNotExist(err) && !doCreate { - return nil, nil - } - - return nil, err - } - fi, err := d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - mtime := fi.ModTime() - - err = f.Lock() - if err != nil { - return nil, err - } - - fi, err = d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - if mtime.Equal(fi.ModTime()) { - break - } - // The file has changed since we opened it. Close and retry. - err = f.Close() - if err != nil { - return nil, err - } - } - return f, nil -} - -func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { - pr, err := d.openAndLockPackedRefs(false) - if err != nil { - return err - } - if pr == nil { - return nil - } - defer ioutil.CheckClose(pr, &err) - - // Creating the temp file in the same directory as the target file - // improves our chances for rename operation to be atomic. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - s := bufio.NewScanner(pr) - found := false - for s.Scan() { - line := s.Text() - ref, err := d.processLine(line) - if err != nil { - return err - } - - if ref != nil && ref.Name() == name { - found = true - continue - } - - if _, err := fmt.Fprintln(tmp, line); err != nil { - return err - } - } - - if err := s.Err(); err != nil { - return err - } - - if !found { - return nil - } - - return d.rewritePackedRefsWhileLocked(tmp, pr) -} - -// process lines from a packed-refs file -func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { - if len(line) == 0 { - return nil, nil - } - - switch line[0] { - case '#': // comment - ignore - return nil, nil - case '^': // annotated tag commit of the previous line - ignore - return nil, nil - default: - ws := strings.Split(line, " ") // hash then ref - if len(ws) != 2 { - return nil, ErrPackedRefsBadFormat - } - - return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil - } -} - -func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { - return d.walkReferencesTree(refs, []string{refsPath}, seen) -} - -func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { - files, err := d.fs.ReadDir(d.fs.Join(relPath...)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - newRelPath := append(append([]string(nil), relPath...), f.Name()) - if f.IsDir() { - if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { - return err - } - - continue - } - - ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) - if err != nil { - return err - } - - if ref != nil && !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - - return nil -} - -func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { - ref, err := d.readReferenceFile(".", "HEAD") - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - *refs = append(*refs, ref) - return nil -} - -func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { - path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) - f, err := d.fs.Open(path) - if err != nil { - return nil, err - } - defer ioutil.CheckClose(f, &err) - - return d.readReferenceFrom(f, name) -} - -func (d *DotGit) CountLooseRefs() (int, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return 0, err - } - - return len(refs), nil -} - -// PackRefs packs all loose refs into the packed-refs file. -// -// This implementation only works under the assumption that the view -// of the file system won't be updated during this operation. This -// strategy would not work on a general file system though, without -// locking each loose reference and checking it again before deleting -// the file, because otherwise an updated reference could sneak in and -// then be deleted by the packed-refs process. Alternatively, every -// ref update could also lock packed-refs, so only one lock is -// required during ref-packing. But that would worsen performance in -// the common case. -// -// TODO: add an "all" boolean like the `git pack-refs --all` flag. -// When `all` is false, it would only pack refs that have already been -// packed, plus all tags. -func (d *DotGit) PackRefs() (err error) { - // Lock packed-refs, and create it if it doesn't exist yet. - f, err := d.openAndLockPackedRefs(true) - if err != nil { - return err - } - defer ioutil.CheckClose(f, &err) - - // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. - var refs []*plumbing.Reference - seen := make(map[plumbing.ReferenceName]bool) - if err = d.addRefsFromRefDir(&refs, seen); err != nil { - return err - } - if len(refs) == 0 { - // Nothing to do! - return nil - } - numLooseRefs := len(refs) - if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { - return err - } - - // Write them all to a new temp packed-refs file. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - w := bufio.NewWriter(tmp) - for _, ref := range refs { - _, err = w.WriteString(ref.String() + "\n") - if err != nil { - return err - } - } - err = w.Flush() - if err != nil { - return err - } - - // Rename the temp packed-refs file. - err = d.rewritePackedRefsWhileLocked(tmp, f) - if err != nil { - return err - } - - // Delete all the loose refs, while still holding the packed-refs - // lock. - for _, ref := range refs[:numLooseRefs] { - path := d.fs.Join(".", ref.Name().String()) - err = d.fs.Remove(path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - - return nil -} - -// Module return a billy.Filesystem pointing to the module folder -func (d *DotGit) Module(name string) (billy.Filesystem, error) { - return d.fs.Chroot(d.fs.Join(modulePath, name)) -} - -// Alternates returns DotGit(s) based off paths in objects/info/alternates if -// available. This can be used to checks if it's a shared repository. -func (d *DotGit) Alternates() ([]*DotGit, error) { - altpath := d.fs.Join("objects", "info", "alternates") - f, err := d.fs.Open(altpath) - if err != nil { - return nil, err - } - defer f.Close() - - var alternates []*DotGit - - // Read alternate paths line-by-line and create DotGit objects. - scanner := bufio.NewScanner(f) - for scanner.Scan() { - path := scanner.Text() - if !filepath.IsAbs(path) { - // For relative paths, we can perform an internal conversion to - // slash so that they work cross-platform. - slashPath := filepath.ToSlash(path) - // If the path is not absolute, it must be relative to object - // database (.git/objects/info). - // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html - // Hence, derive a path relative to DotGit's root. - // "../../../reponame/.git/" -> "../../reponame/.git" - // Remove the first ../ - relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) - normalPath := filepath.FromSlash(relpath) - path = filepath.Join(d.fs.Root(), normalPath) - } - fs := osfs.New(filepath.Dir(path)) - alternates = append(alternates, New(fs)) - } - - if err = scanner.Err(); err != nil { - return nil, err - } - - return alternates, nil -} - -func isHex(s string) bool { - for _, b := range []byte(s) { - if isNum(b) { - continue - } - if isHexAlpha(b) { - continue - } - - return false - } - - return true -} - -func isNum(b byte) bool { - return b >= '0' && b <= '9' -} - -func isHexAlpha(b byte) bool { - return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' -} diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go deleted file mode 100644 index c760793..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_nix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !windows,!norwfs - -package dotgit - -import ( - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDWR - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // On non-Windows platforms, we can have atomic rename. - return d.fs.Rename(tmp.Name(), pr.Name()) -} diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go deleted file mode 100644 index 6e43b42..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_norwfs.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build norwfs - -package dotgit - -import ( - "io" - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDONLY - -// Instead of renaming that can not be supported in simpler filesystems -// a full copy is done. -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - - prWrite, err := d.fs.Create(pr.Name()) - if err != nil { - return err - } - - defer prWrite.Close() - - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - - _, err = io.Copy(prWrite, tmp) - - return err -} diff --git a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go deleted file mode 100644 index 897d2c9..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_rewrite_packed_refs_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build windows,!norwfs - -package dotgit - -import ( - "io" - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDWR - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // If we aren't using the bare Windows filesystem as the storage - // layer, we might be able to get away with a rename over a locked - // file. - err := d.fs.Rename(tmp.Name(), pr.Name()) - if err == nil { - return nil - } - - // Otherwise, Windows doesn't let us rename over a locked file, so - // we have to do a straight copy. Unfortunately this could result - // in a partially-written file if the process fails before the - // copy completes. - _, err = pr.Seek(0, io.SeekStart) - if err != nil { - return err - } - err = pr.Truncate(0) - if err != nil { - return err - } - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - _, err = io.Copy(pr, tmp) - return err -} diff --git a/storage/filesystem/internal/dotgit/dotgit_setref.go b/storage/filesystem/internal/dotgit/dotgit_setref.go deleted file mode 100644 index d27c1a3..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_setref.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build !norwfs - -package dotgit - -import ( - "os" - - "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/utils/ioutil" -) - -func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { - // If we are not checking an old ref, just truncate the file. - mode := os.O_RDWR | os.O_CREATE - if old == nil { - mode |= os.O_TRUNC - } - - f, err := d.fs.OpenFile(fileName, mode, 0666) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - // Lock is unlocked by the deferred Close above. This is because Unlock - // does not imply a fsync and thus there would be a race between - // Unlock+Close and other concurrent writers. Adding Sync to go-billy - // could work, but this is better (and avoids superfluous syncs). - err = f.Lock() - if err != nil { - return err - } - - // this is a no-op to call even when old is nil. - err = d.checkReferenceAndTruncate(f, old) - if err != nil { - return err - } - - _, err = f.Write([]byte(content)) - return err -} diff --git a/storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go b/storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go deleted file mode 100644 index 5695bd3..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_setref_norwfs.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build norwfs - -package dotgit - -import ( - "fmt" - - "gopkg.in/src-d/go-git.v4/plumbing" -) - -// There are some filesystems that don't support opening files in RDWD mode. -// In these filesystems the standard SetRef function can not be used as i -// reads the reference file to check that it's not modified before updating it. -// -// This version of the function writes the reference without extra checks -// making it compatible with these simple filesystems. This is usually not -// a problem as they should be accessed by only one process at a time. -func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error { - _, err := d.fs.Stat(fileName) - if err == nil && old != nil { - fRead, err := d.fs.Open(fileName) - if err != nil { - return err - } - - ref, err := d.readReferenceFrom(fRead, old.Name().String()) - fRead.Close() - - if err != nil { - return err - } - - if ref.Hash() != old.Hash() { - return fmt.Errorf("reference has changed concurrently") - } - } - - f, err := d.fs.Create(fileName) - if err != nil { - return err - } - - defer f.Close() - - _, err = f.Write([]byte(content)) - return err -} diff --git a/storage/filesystem/internal/dotgit/dotgit_test.go b/storage/filesystem/internal/dotgit/dotgit_test.go deleted file mode 100644 index 7733eef..0000000 --- a/storage/filesystem/internal/dotgit/dotgit_test.go +++ /dev/null @@ -1,683 +0,0 @@ -package dotgit - -import ( - "bufio" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "gopkg.in/src-d/go-git.v4/plumbing" - - . "gopkg.in/check.v1" - "gopkg.in/src-d/go-billy.v4/osfs" - "gopkg.in/src-d/go-git-fixtures.v3" -) - -func Test(t *testing.T) { TestingT(t) } - -type SuiteDotGit struct { - fixtures.Suite -} - -var _ = Suite(&SuiteDotGit{}) - -func (s *SuiteDotGit) TestInitialize(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - err = dir.Initialize() - c.Assert(err, IsNil) - - _, err = fs.Stat(fs.Join("objects", "info")) - c.Assert(err, IsNil) - - _, err = fs.Stat(fs.Join("objects", "pack")) - c.Assert(err, IsNil) - - _, err = fs.Stat(fs.Join("refs", "heads")) - c.Assert(err, IsNil) - - _, err = fs.Stat(fs.Join("refs", "tags")) - c.Assert(err, IsNil) -} - -func (s *SuiteDotGit) TestSetRefs(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - firstFoo := plumbing.NewReferenceFromStrings( - "refs/heads/foo", - "e8d3ffab552895c19b9fcf7aa264d277cde33881", - ) - err = dir.SetRef(firstFoo, nil) - - c.Assert(err, IsNil) - - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/symbolic", - "ref: refs/heads/foo", - ), nil) - - c.Assert(err, IsNil) - - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "bar", - "e8d3ffab552895c19b9fcf7aa264d277cde33881", - ), nil) - c.Assert(err, IsNil) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - c.Assert(refs, HasLen, 2) - - ref := findReference(refs, "refs/heads/foo") - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") - - ref = findReference(refs, "refs/heads/symbolic") - c.Assert(ref, NotNil) - c.Assert(ref.Target().String(), Equals, "refs/heads/foo") - - ref = findReference(refs, "bar") - c.Assert(ref, IsNil) - - ref, err = dir.Ref("refs/heads/foo") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") - - ref, err = dir.Ref("refs/heads/symbolic") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Target().String(), Equals, "refs/heads/foo") - - ref, err = dir.Ref("bar") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") - - // Check that SetRef with a non-nil `old` works. - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/foo", - "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", - ), firstFoo) - c.Assert(err, IsNil) - - // `firstFoo` is no longer the right `old` reference, so this - // should fail. - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/foo", - "6ecf0ef2c2dffb796033e5a02219af86ec6584e5", - ), firstFoo) - c.Assert(err, NotNil) -} - -func (s *SuiteDotGit) TestRefsFromPackedRefs(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - - ref := findReference(refs, "refs/remotes/origin/branch") - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") - -} - -func (s *SuiteDotGit) TestRefsFromReferenceFile(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - - ref := findReference(refs, "refs/remotes/origin/HEAD") - c.Assert(ref, NotNil) - c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) - c.Assert(string(ref.Target()), Equals, "refs/remotes/origin/master") - -} - -func BenchmarkRefMultipleTimes(b *testing.B) { - fixtures.Init() - fs := fixtures.Basic().ByTag(".git").One().DotGit() - refname := plumbing.ReferenceName("refs/remotes/origin/branch") - - dir := New(fs) - _, err := dir.Ref(refname) - if err != nil { - b.Fatalf("unexpected error: %s", err) - } - - for i := 0; i < b.N; i++ { - _, err := dir.Ref(refname) - if err != nil { - b.Fatalf("unexpected error: %s", err) - } - } -} - -func (s *SuiteDotGit) TestRemoveRefFromReferenceFile(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - name := plumbing.ReferenceName("refs/remotes/origin/HEAD") - err := dir.RemoveRef(name) - c.Assert(err, IsNil) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - - ref := findReference(refs, string(name)) - c.Assert(ref, IsNil) -} - -func (s *SuiteDotGit) TestRemoveRefFromPackedRefs(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - name := plumbing.ReferenceName("refs/remotes/origin/master") - err := dir.RemoveRef(name) - c.Assert(err, IsNil) - - b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) - c.Assert(err, IsNil) - - c.Assert(string(b), Equals, ""+ - "# pack-refs with: peeled fully-peeled \n"+ - "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ - "e8d3ffab552895c19b9fcf7aa264d277cde33881 refs/remotes/origin/branch\n") -} - -func (s *SuiteDotGit) TestRemoveRefFromReferenceFileAndPackedRefs(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - // Make a ref file for a ref that's already in `packed-refs`. - err := dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/remotes/origin/branch", - "e8d3ffab552895c19b9fcf7aa264d277cde33881", - ), nil) - - // Make sure it only appears once in the refs list. - refs, err := dir.Refs() - c.Assert(err, IsNil) - found := false - for _, ref := range refs { - if ref.Name() == "refs/remotes/origin/branch" { - c.Assert(found, Equals, false) - found = true - } - } - - name := plumbing.ReferenceName("refs/remotes/origin/branch") - err = dir.RemoveRef(name) - c.Assert(err, IsNil) - - b, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) - c.Assert(err, IsNil) - - c.Assert(string(b), Equals, ""+ - "# pack-refs with: peeled fully-peeled \n"+ - "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/heads/master\n"+ - "6ecf0ef2c2dffb796033e5a02219af86ec6584e5 refs/remotes/origin/master\n") - - refs, err = dir.Refs() - c.Assert(err, IsNil) - - ref := findReference(refs, string(name)) - c.Assert(ref, IsNil) -} - -func (s *SuiteDotGit) TestRemoveRefNonExistent(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - packedRefs := filepath.Join(fs.Root(), packedRefsPath) - before, err := ioutil.ReadFile(packedRefs) - c.Assert(err, IsNil) - - name := plumbing.ReferenceName("refs/heads/nonexistent") - err = dir.RemoveRef(name) - c.Assert(err, IsNil) - - after, err := ioutil.ReadFile(packedRefs) - c.Assert(err, IsNil) - - c.Assert(string(before), Equals, string(after)) -} - -func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - packedRefs := filepath.Join(fs.Root(), packedRefsPath) - brokenContent := "BROKEN STUFF REALLY BROKEN" - - err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) - c.Assert(err, IsNil) - - name := plumbing.ReferenceName("refs/heads/nonexistent") - err = dir.RemoveRef(name) - c.Assert(err, NotNil) - - after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) - c.Assert(err, IsNil) - - c.Assert(brokenContent, Equals, string(after)) -} - -func (s *SuiteDotGit) TestRemoveRefInvalidPackedRefs2(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - packedRefs := filepath.Join(fs.Root(), packedRefsPath) - brokenContent := strings.Repeat("a", bufio.MaxScanTokenSize*2) - - err := ioutil.WriteFile(packedRefs, []byte(brokenContent), os.FileMode(0755)) - c.Assert(err, IsNil) - - name := plumbing.ReferenceName("refs/heads/nonexistent") - err = dir.RemoveRef(name) - c.Assert(err, NotNil) - - after, err := ioutil.ReadFile(filepath.Join(fs.Root(), packedRefsPath)) - c.Assert(err, IsNil) - - c.Assert(brokenContent, Equals, string(after)) -} - -func (s *SuiteDotGit) TestRefsFromHEADFile(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - - ref := findReference(refs, "HEAD") - c.Assert(ref, NotNil) - c.Assert(ref.Type(), Equals, plumbing.SymbolicReference) - c.Assert(string(ref.Target()), Equals, "refs/heads/master") -} - -func (s *SuiteDotGit) TestConfig(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - file, err := dir.Config() - c.Assert(err, IsNil) - c.Assert(filepath.Base(file.Name()), Equals, "config") -} - -func (s *SuiteDotGit) TestConfigWriteAndConfig(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - f, err := dir.ConfigWriter() - c.Assert(err, IsNil) - - _, err = f.Write([]byte("foo")) - c.Assert(err, IsNil) - - f, err = dir.Config() - c.Assert(err, IsNil) - - cnt, err := ioutil.ReadAll(f) - c.Assert(err, IsNil) - - c.Assert(string(cnt), Equals, "foo") -} - -func (s *SuiteDotGit) TestIndex(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - idx, err := dir.Index() - c.Assert(err, IsNil) - c.Assert(idx, NotNil) -} - -func (s *SuiteDotGit) TestIndexWriteAndIndex(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - f, err := dir.IndexWriter() - c.Assert(err, IsNil) - - _, err = f.Write([]byte("foo")) - c.Assert(err, IsNil) - - f, err = dir.Index() - c.Assert(err, IsNil) - - cnt, err := ioutil.ReadAll(f) - c.Assert(err, IsNil) - - c.Assert(string(cnt), Equals, "foo") -} - -func (s *SuiteDotGit) TestShallow(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - file, err := dir.Shallow() - c.Assert(err, IsNil) - c.Assert(file, IsNil) -} - -func (s *SuiteDotGit) TestShallowWriteAndShallow(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - f, err := dir.ShallowWriter() - c.Assert(err, IsNil) - - _, err = f.Write([]byte("foo")) - c.Assert(err, IsNil) - - f, err = dir.Shallow() - c.Assert(err, IsNil) - - cnt, err := ioutil.ReadAll(f) - c.Assert(err, IsNil) - - c.Assert(string(cnt), Equals, "foo") -} - -func findReference(refs []*plumbing.Reference, name string) *plumbing.Reference { - n := plumbing.ReferenceName(name) - for _, ref := range refs { - if ref.Name() == n { - return ref - } - } - - return nil -} - -func (s *SuiteDotGit) TestObjectPacks(c *C) { - f := fixtures.Basic().ByTag(".git").One() - fs := f.DotGit() - dir := New(fs) - - hashes, err := dir.ObjectPacks() - c.Assert(err, IsNil) - c.Assert(hashes, HasLen, 1) - c.Assert(hashes[0], Equals, f.PackfileHash) - - // Make sure that a random file in the pack directory doesn't - // break everything. - badFile, err := fs.Create("objects/pack/OOPS_THIS_IS_NOT_RIGHT.pack") - c.Assert(err, IsNil) - err = badFile.Close() - c.Assert(err, IsNil) - - hashes2, err := dir.ObjectPacks() - c.Assert(err, IsNil) - c.Assert(hashes2, HasLen, 1) - c.Assert(hashes[0], Equals, hashes2[0]) -} - -func (s *SuiteDotGit) TestObjectPack(c *C) { - f := fixtures.Basic().ByTag(".git").One() - fs := f.DotGit() - dir := New(fs) - - pack, err := dir.ObjectPack(f.PackfileHash) - c.Assert(err, IsNil) - c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") -} - -func (s *SuiteDotGit) TestObjectPackIdx(c *C) { - f := fixtures.Basic().ByTag(".git").One() - fs := f.DotGit() - dir := New(fs) - - idx, err := dir.ObjectPackIdx(f.PackfileHash) - c.Assert(err, IsNil) - c.Assert(filepath.Ext(idx.Name()), Equals, ".idx") - c.Assert(idx.Close(), IsNil) -} - -func (s *SuiteDotGit) TestObjectPackNotFound(c *C) { - fs := fixtures.Basic().ByTag(".git").One().DotGit() - dir := New(fs) - - pack, err := dir.ObjectPack(plumbing.ZeroHash) - c.Assert(err, Equals, ErrPackfileNotFound) - c.Assert(pack, IsNil) - - idx, err := dir.ObjectPackIdx(plumbing.ZeroHash) - c.Assert(err, Equals, ErrPackfileNotFound) - c.Assert(idx, IsNil) -} - -func (s *SuiteDotGit) TestNewObject(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - w, err := dir.NewObject() - c.Assert(err, IsNil) - - err = w.WriteHeader(plumbing.BlobObject, 14) - c.Assert(err, IsNil) - n, err := w.Write([]byte("this is a test")) - c.Assert(err, IsNil) - c.Assert(n, Equals, 14) - - c.Assert(w.Hash().String(), Equals, "a8a940627d132695a9769df883f85992f0ff4a43") - - err = w.Close() - c.Assert(err, IsNil) - - i, err := fs.Stat("objects/a8/a940627d132695a9769df883f85992f0ff4a43") - c.Assert(err, IsNil) - c.Assert(i.Size(), Equals, int64(34)) -} - -func (s *SuiteDotGit) TestObjects(c *C) { - fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() - dir := New(fs) - - hashes, err := dir.Objects() - c.Assert(err, IsNil) - c.Assert(hashes, HasLen, 187) - c.Assert(hashes[0].String(), Equals, "0097821d427a3c3385898eb13b50dcbc8702b8a3") - c.Assert(hashes[1].String(), Equals, "01d5fa556c33743006de7e76e67a2dfcd994ca04") - c.Assert(hashes[2].String(), Equals, "03db8e1fbe133a480f2867aac478fd866686d69e") -} - -func (s *SuiteDotGit) TestObjectsNoFolder(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - hash, err := dir.Objects() - c.Assert(err, IsNil) - c.Assert(hash, HasLen, 0) -} - -func (s *SuiteDotGit) TestObject(c *C) { - fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() - dir := New(fs) - - hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") - file, err := dir.Object(hash) - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix( - file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), - Equals, true, - ) -} - -func (s *SuiteDotGit) TestObjectNotFound(c *C) { - fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() - dir := New(fs) - - hash := plumbing.NewHash("not-found-object") - file, err := dir.Object(hash) - c.Assert(err, NotNil) - c.Assert(file, IsNil) -} - -func (s *SuiteDotGit) TestSubmodules(c *C) { - fs := fixtures.ByTag("submodule").One().DotGit() - dir := New(fs) - - m, err := dir.Module("basic") - c.Assert(err, IsNil) - c.Assert(strings.HasSuffix(m.Root(), m.Join(".git", "modules", "basic")), Equals, true) -} - -func (s *SuiteDotGit) TestPackRefs(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - fs := osfs.New(tmp) - dir := New(fs) - - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/foo", - "e8d3ffab552895c19b9fcf7aa264d277cde33881", - ), nil) - c.Assert(err, IsNil) - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/bar", - "a8d3ffab552895c19b9fcf7aa264d277cde33881", - ), nil) - c.Assert(err, IsNil) - - refs, err := dir.Refs() - c.Assert(err, IsNil) - c.Assert(refs, HasLen, 2) - looseCount, err := dir.CountLooseRefs() - c.Assert(err, IsNil) - c.Assert(looseCount, Equals, 2) - - err = dir.PackRefs() - c.Assert(err, IsNil) - - // Make sure the refs are still there, but no longer loose. - refs, err = dir.Refs() - c.Assert(err, IsNil) - c.Assert(refs, HasLen, 2) - looseCount, err = dir.CountLooseRefs() - c.Assert(err, IsNil) - c.Assert(looseCount, Equals, 0) - - ref, err := dir.Ref("refs/heads/foo") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "e8d3ffab552895c19b9fcf7aa264d277cde33881") - ref, err = dir.Ref("refs/heads/bar") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "a8d3ffab552895c19b9fcf7aa264d277cde33881") - - // Now update one of them, re-pack, and check again. - err = dir.SetRef(plumbing.NewReferenceFromStrings( - "refs/heads/foo", - "b8d3ffab552895c19b9fcf7aa264d277cde33881", - ), nil) - c.Assert(err, IsNil) - looseCount, err = dir.CountLooseRefs() - c.Assert(err, IsNil) - c.Assert(looseCount, Equals, 1) - err = dir.PackRefs() - c.Assert(err, IsNil) - - // Make sure the refs are still there, but no longer loose. - refs, err = dir.Refs() - c.Assert(err, IsNil) - c.Assert(refs, HasLen, 2) - looseCount, err = dir.CountLooseRefs() - c.Assert(err, IsNil) - c.Assert(looseCount, Equals, 0) - - ref, err = dir.Ref("refs/heads/foo") - c.Assert(err, IsNil) - c.Assert(ref, NotNil) - c.Assert(ref.Hash().String(), Equals, "b8d3ffab552895c19b9fcf7aa264d277cde33881") -} - -func (s *SuiteDotGit) TestAlternates(c *C) { - tmp, err := ioutil.TempDir("", "dot-git") - c.Assert(err, IsNil) - defer os.RemoveAll(tmp) - - // Create a new billy fs. - fs := osfs.New(tmp) - - // Create a new dotgit object and initialize. - dir := New(fs) - err = dir.Initialize() - c.Assert(err, IsNil) - - // Create alternates file. - altpath := filepath.Join("objects", "info", "alternates") - f, err := fs.Create(altpath) - c.Assert(err, IsNil) - - // Multiple alternates. - var strContent string - if runtime.GOOS == "windows" { - strContent = "C:\\Users\\username\\repo1\\.git\\objects\r\n..\\..\\..\\rep2\\.git\\objects" - } else { - strContent = "/Users/username/rep1//.git/objects\n../../../rep2//.git/objects" - } - content := []byte(strContent) - f.Write(content) - f.Close() - - dotgits, err := dir.Alternates() - c.Assert(err, IsNil) - if runtime.GOOS == "windows" { - c.Assert(dotgits[0].fs.Root(), Equals, "C:\\Users\\username\\repo1\\.git") - } else { - c.Assert(dotgits[0].fs.Root(), Equals, "/Users/username/rep1/.git") - } - - // For relative path: - // /some/absolute/path/to/dot-git -> /some/absolute/path - pathx := strings.Split(tmp, string(filepath.Separator)) - pathx = pathx[:len(pathx)-2] - // Use string.Join() to avoid malformed absolutepath on windows - // C:Users\\User\\... instead of C:\\Users\\appveyor\\... . - resolvedPath := strings.Join(pathx, string(filepath.Separator)) - // Append the alternate path to the resolvedPath - expectedPath := filepath.Join(string(filepath.Separator), resolvedPath, "rep2", ".git") - if runtime.GOOS == "windows" { - expectedPath = filepath.Join(resolvedPath, "rep2", ".git") - } - c.Assert(dotgits[1].fs.Root(), Equals, expectedPath) -} diff --git a/storage/filesystem/internal/dotgit/writers.go b/storage/filesystem/internal/dotgit/writers.go deleted file mode 100644 index c2b420f..0000000 --- a/storage/filesystem/internal/dotgit/writers.go +++ /dev/null @@ -1,282 +0,0 @@ -package dotgit - -import ( - "fmt" - "io" - "sync/atomic" - - "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" - "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" - "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" - - "gopkg.in/src-d/go-billy.v4" -) - -// PackWriter is a io.Writer that generates the packfile index simultaneously, -// a packfile.Decoder is used with a file reader to read the file being written -// this operation is synchronized with the write operations. -// The packfile is written in a temp file, when Close is called this file -// is renamed/moved (depends on the Filesystem implementation) to the final -// location, if the PackWriter is not used, nothing is written -type PackWriter struct { - Notify func(plumbing.Hash, *packfile.Index) - - fs billy.Filesystem - fr, fw billy.File - synced *syncedReader - checksum plumbing.Hash - index *packfile.Index - result chan error -} - -func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { - fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") - if err != nil { - return nil, err - } - - fr, err := fs.Open(fw.Name()) - if err != nil { - return nil, err - } - - writer := &PackWriter{ - fs: fs, - fw: fw, - fr: fr, - synced: newSyncedReader(fw, fr), - result: make(chan error), - } - - go writer.buildIndex() - return writer, nil -} - -func (w *PackWriter) buildIndex() { - s := packfile.NewScanner(w.synced) - d, err := packfile.NewDecoder(s, nil) - if err != nil { - w.result <- err - return - } - - checksum, err := d.Decode() - if err != nil { - w.result <- err - return - } - - w.checksum = checksum - w.index = d.Index() - w.result <- err -} - -// waitBuildIndex waits until buildIndex function finishes, this can terminate -// with a packfile.ErrEmptyPackfile, this means that nothing was written so we -// ignore the error -func (w *PackWriter) waitBuildIndex() error { - err := <-w.result - if err == packfile.ErrEmptyPackfile { - return nil - } - - return err -} - -func (w *PackWriter) Write(p []byte) (int, error) { - return w.synced.Write(p) -} - -// Close closes all the file descriptors and save the final packfile, if nothing -// was written, the tempfiles are deleted without writing a packfile. -func (w *PackWriter) Close() error { - defer func() { - if w.Notify != nil && w.index != nil && w.index.Size() > 0 { - w.Notify(w.checksum, w.index) - } - - close(w.result) - }() - - if err := w.synced.Close(); err != nil { - return err - } - - if err := w.waitBuildIndex(); err != nil { - return err - } - - if err := w.fr.Close(); err != nil { - return err - } - - if err := w.fw.Close(); err != nil { - return err - } - - if w.index == nil || w.index.Size() == 0 { - return w.clean() - } - - return w.save() -} - -func (w *PackWriter) clean() error { - return w.fs.Remove(w.fw.Name()) -} - -func (w *PackWriter) save() error { - base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) - idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) - if err != nil { - return err - } - - if err := w.encodeIdx(idx); err != nil { - return err - } - - if err := idx.Close(); err != nil { - return err - } - - return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) -} - -func (w *PackWriter) encodeIdx(writer io.Writer) error { - idx := w.index.ToIdxFile() - idx.PackfileChecksum = w.checksum - idx.Version = idxfile.VersionSupported - e := idxfile.NewEncoder(writer) - _, err := e.Encode(idx) - return err -} - -type syncedReader struct { - w io.Writer - r io.ReadSeeker - - blocked, done uint32 - written, read uint64 - news chan bool -} - -func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { - return &syncedReader{ - w: w, - r: r, - news: make(chan bool), - } -} - -func (s *syncedReader) Write(p []byte) (n int, err error) { - defer func() { - written := atomic.AddUint64(&s.written, uint64(n)) - read := atomic.LoadUint64(&s.read) - if written > read { - s.wake() - } - }() - - n, err = s.w.Write(p) - return -} - -func (s *syncedReader) Read(p []byte) (n int, err error) { - defer func() { atomic.AddUint64(&s.read, uint64(n)) }() - - for { - s.sleep() - n, err = s.r.Read(p) - if err == io.EOF && !s.isDone() && n == 0 { - continue - } - - break - } - - return -} - -func (s *syncedReader) isDone() bool { - return atomic.LoadUint32(&s.done) == 1 -} - -func (s *syncedReader) isBlocked() bool { - return atomic.LoadUint32(&s.blocked) == 1 -} - -func (s *syncedReader) wake() { - if s.isBlocked() { - // fmt.Println("wake") - atomic.StoreUint32(&s.blocked, 0) - s.news <- true - } -} - -func (s *syncedReader) sleep() { - read := atomic.LoadUint64(&s.read) - written := atomic.LoadUint64(&s.written) - if read >= written { - atomic.StoreUint32(&s.blocked, 1) - // fmt.Println("sleep", read, written) - <-s.news - } - -} - -func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { - return s.r.Seek(offset, whence) - } - - p, err := s.r.Seek(offset, whence) - atomic.StoreUint64(&s.read, uint64(p)) - - return p, err -} - -func (s *syncedReader) Close() error { - atomic.StoreUint32(&s.done, 1) - close(s.news) - return nil -} - -type ObjectWriter struct { - objfile.Writer - fs billy.Filesystem - f billy.File -} - -func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { - f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") - if err != nil { - return nil, err - } - - return &ObjectWriter{ - Writer: (*objfile.NewWriter(f)), - fs: fs, - f: f, - }, nil -} - -func (w *ObjectWriter) Close() error { - if err := w.Writer.Close(); err != nil { - return err - } - - if err := w.f.Close(); err != nil { - return err - } - - return w.save() -} - -func (w *ObjectWriter) save() error { - hash := w.Hash().String() - file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) - - return w.fs.Rename(w.f.Name(), file) -} diff --git a/storage/filesystem/internal/dotgit/writers_test.go b/storage/filesystem/internal/dotgit/writers_test.go deleted file mode 100644 index bf00762..0000000 --- a/storage/filesystem/internal/dotgit/writers_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package dotgit - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "os" - "strconv" - - "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" - - . "gopkg.in/check.v1" - "gopkg.in/src-d/go-billy.v4/osfs" - "gopkg.in/src-d/go-git-fixtures.v3" -) - -func (s *SuiteDotGit) TestNewObjectPack(c *C) { - f := fixtures.Basic().One() - - dir, err := ioutil.TempDir("", "example") - if err != nil { - log.Fatal(err) - } - - defer os.RemoveAll(dir) - - fs := osfs.New(dir) - dot := New(fs) - - w, err := dot.NewObjectPack() - c.Assert(err, IsNil) - - _, err = io.Copy(w, f.Packfile()) - c.Assert(err, IsNil) - - c.Assert(w.Close(), IsNil) - - pfPath := fmt.Sprintf("objects/pack/pack-%s.pack", f.PackfileHash) - idxPath := fmt.Sprintf("objects/pack/pack-%s.idx", f.PackfileHash) - - stat, err := fs.Stat(pfPath) - c.Assert(err, IsNil) - c.Assert(stat.Size(), Equals, int64(84794)) - - stat, err = fs.Stat(idxPath) - c.Assert(err, IsNil) - c.Assert(stat.Size(), Equals, int64(1940)) - - pf, err := fs.Open(pfPath) - c.Assert(err, IsNil) - pfs := packfile.NewScanner(pf) - _, objects, err := pfs.Header() - c.Assert(err, IsNil) - for i := uint32(0); i < objects; i++ { - _, err := pfs.NextObjectHeader() - if err != nil { - c.Assert(err, IsNil) - break - } - } - c.Assert(pfs.Close(), IsNil) -} - -func (s *SuiteDotGit) TestNewObjectPackUnused(c *C) { - dir, err := ioutil.TempDir("", "example") - if err != nil { - log.Fatal(err) - } - - defer os.RemoveAll(dir) - - fs := osfs.New(dir) - dot := New(fs) - - w, err := dot.NewObjectPack() - c.Assert(err, IsNil) - - c.Assert(w.Close(), IsNil) - - info, err := fs.ReadDir("objects/pack") - c.Assert(err, IsNil) - c.Assert(info, HasLen, 0) - - // check clean up of temporary files - info, err = fs.ReadDir("") - c.Assert(err, IsNil) - for _, fi := range info { - c.Assert(fi.IsDir(), Equals, true) - } -} - -func (s *SuiteDotGit) TestSyncedReader(c *C) { - tmpw, err := ioutil.TempFile("", "example") - c.Assert(err, IsNil) - - tmpr, err := os.Open(tmpw.Name()) - c.Assert(err, IsNil) - - defer func() { - tmpw.Close() - tmpr.Close() - os.Remove(tmpw.Name()) - }() - - synced := newSyncedReader(tmpw, tmpr) - - go func() { - for i := 0; i < 281; i++ { - _, err := synced.Write([]byte(strconv.Itoa(i) + "\n")) - c.Assert(err, IsNil) - } - - synced.Close() - }() - - o, err := synced.Seek(1002, io.SeekStart) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(1002)) - - head := make([]byte, 3) - n, err := io.ReadFull(synced, head) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(string(head), Equals, "278") - - o, err = synced.Seek(1010, io.SeekStart) - c.Assert(err, IsNil) - c.Assert(o, Equals, int64(1010)) - - n, err = io.ReadFull(synced, head) - c.Assert(err, IsNil) - c.Assert(n, Equals, 3) - c.Assert(string(head), Equals, "280") -} - -func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) { - dir, err := ioutil.TempDir("", "example") - if err != nil { - c.Assert(err, IsNil) - } - - defer os.RemoveAll(dir) - - fs := osfs.New(dir) - - w, err := newPackWrite(fs) - c.Assert(err, IsNil) - - w.Notify = func(h plumbing.Hash, idx *packfile.Index) { - c.Fatal("unexpected call to PackWriter.Notify") - } - - c.Assert(w.Close(), IsNil) -} diff --git a/storage/filesystem/module.go b/storage/filesystem/module.go index 6f3de3f..7c8c8d8 100644 --- a/storage/filesystem/module.go +++ b/storage/filesystem/module.go @@ -2,7 +2,7 @@ package filesystem import ( "gopkg.in/src-d/go-git.v4/storage" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" ) type ModuleStorage struct { diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 26190fd..54f268a 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -11,7 +11,7 @@ import ( "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/ioutil" diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index de8f2b2..4b57a67 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -2,7 +2,7 @@ package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" . "gopkg.in/check.v1" "gopkg.in/src-d/go-git-fixtures.v3" diff --git a/storage/filesystem/reference.go b/storage/filesystem/reference.go index 7313f05..a891b83 100644 --- a/storage/filesystem/reference.go +++ b/storage/filesystem/reference.go @@ -3,7 +3,7 @@ package filesystem import ( "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/plumbing/storer" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" ) type ReferenceStorage struct { diff --git a/storage/filesystem/shallow.go b/storage/filesystem/shallow.go index 173767c..502d406 100644 --- a/storage/filesystem/shallow.go +++ b/storage/filesystem/shallow.go @@ -5,7 +5,7 @@ import ( "fmt" "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-git.v4/utils/ioutil" ) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 82b137c..d7aa18b 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,7 +2,7 @@ package filesystem import ( - "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" ) -- cgit From ecd2bd553ce223252d9784572fd47bd9f597618e Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Fri, 8 Jun 2018 12:31:15 +0200 Subject: storage: filesystem, make ObjectStorage constructor public Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 5 +++-- storage/filesystem/object_test.go | 10 +++++----- storage/filesystem/storage.go | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 54f268a..9ffe4dc 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -26,7 +26,8 @@ type ObjectStorage struct { index map[plumbing.Hash]*packfile.Index } -func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { +// NewObjectStorage creates a new ObjectStorage with the given .git directory. +func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { s := ObjectStorage{ deltaBaseCache: cache.NewObjectLRUDefault(), dir: dir, @@ -166,7 +167,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { - o, oe := newObjectStorage(dg) + o, oe := NewObjectStorage(dg) if oe != nil { continue } diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 4b57a67..ecd6beb 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -24,7 +24,7 @@ var _ = Suite(&FsSuite{ func (s *FsSuite) TestGetFromObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() - o, err := newObjectStorage(dotgit.New(fs)) + o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) expected := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e") @@ -36,7 +36,7 @@ func (s *FsSuite) TestGetFromObjectFile(c *C) { func (s *FsSuite) TestGetFromPackfile(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() - o, err := newObjectStorage(dotgit.New(fs)) + o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") @@ -48,7 +48,7 @@ func (s *FsSuite) TestGetFromPackfile(c *C) { func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() - o, err := newObjectStorage(dotgit.New(fs)) + o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3") @@ -65,7 +65,7 @@ func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { func (s *FsSuite) TestIter(c *C) { fixtures.ByTag(".git").ByTag("packfile").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() - o, err := newObjectStorage(dotgit.New(fs)) + o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) iter, err := o.IterEncodedObjects(plumbing.AnyObject) @@ -86,7 +86,7 @@ func (s *FsSuite) TestIterWithType(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { for _, t := range s.Types { fs := f.DotGit() - o, err := newObjectStorage(dotgit.New(fs)) + o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) iter, err := o.IterEncodedObjects(t) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index d7aa18b..622bb4a 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -25,7 +25,7 @@ type Storage struct { // NewStorage returns a new Storage backed by a given `fs.Filesystem` func NewStorage(fs billy.Filesystem) (*Storage, error) { dir := dotgit.New(fs) - o, err := newObjectStorage(dir) + o, err := NewObjectStorage(dir) if err != nil { return nil, err } -- cgit From da5d474fb43dffd1b28cd5662b3a5bf7e446cd5c Mon Sep 17 00:00:00 2001 From: "Santiago M. Mola" Date: Fri, 15 Jun 2018 17:20:51 +0200 Subject: storage/filesystem: avoid norwfs build flag norwfs build flag was used to work on filesystems that do not support neither opening a file in read/write mode or renaming a file (e.f. sivafs). This had two problems: - go-git could not be compiled to work properly both with regular filesystems and limited filesystems at the same time. - the norwfs trick was not available on Windows. This PR removes the norwfs build flag, as well as the windows conditional flag on the dotgit package. For the file open mode, we use the new billy capabilities, to check at runtime if the filesystem supports opening a file in read/write mode or not. For the renaming, we just try and fallback to alternative methods if billy.ErrNotSupported is returned. Signed-off-by: Santiago M. Mola --- storage/filesystem/dotgit/dotgit.go | 2 +- .../dotgit/dotgit_rewrite_packed_refs.go | 81 ++++++++++++++++++++++ .../dotgit/dotgit_rewrite_packed_refs_nix.go | 17 ----- .../dotgit/dotgit_rewrite_packed_refs_norwfs.go | 34 --------- .../dotgit/dotgit_rewrite_packed_refs_windows.go | 42 ----------- 5 files changed, 82 insertions(+), 94 deletions(-) create mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go delete mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go delete mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go delete mode 100644 storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index 52b621c..dc12f23 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -469,7 +469,7 @@ func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( // File mode is retrieved from a constant defined in the target specific // files (dotgit_rewrite_packed_refs_*). Some modes are not available // in all filesystems. - openFlags := openAndLockPackedRefsMode + openFlags := d.openAndLockPackedRefsMode() if doCreate { openFlags |= os.O_CREATE } diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go new file mode 100644 index 0000000..7f1c02c --- /dev/null +++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go @@ -0,0 +1,81 @@ +package dotgit + +import ( + "io" + "os" + "runtime" + + "gopkg.in/src-d/go-billy.v4" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +func (d *DotGit) openAndLockPackedRefsMode() int { + if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { + return os.O_RDWR + } + + return os.O_RDONLY +} + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // Try plain rename. If we aren't using the bare Windows filesystem as the + // storage layer, we might be able to get away with a rename over a locked + // file. + err := d.fs.Rename(tmp.Name(), pr.Name()) + if err == nil { + return nil + } + + // If we are in a filesystem that does not support rename (e.g. sivafs) + // a full copy is done. + if err == billy.ErrNotSupported { + return d.copyNewFile(tmp, pr) + } + + if runtime.GOOS != "windows" { + return err + } + + // Otherwise, Windows doesn't let us rename over a locked file, so + // we have to do a straight copy. Unfortunately this could result + // in a partially-written file if the process fails before the + // copy completes. + return d.copyToExistingFile(tmp, pr) +} + +func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { + _, err := pr.Seek(0, io.SeekStart) + if err != nil { + return err + } + err = pr.Truncate(0) + if err != nil { + return err + } + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + _, err = io.Copy(pr, tmp) + + return err +} + +func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { + prWrite, err := d.fs.Create(pr.Name()) + if err != nil { + return err + } + + defer ioutil.CheckClose(prWrite, &err) + + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + + _, err = io.Copy(prWrite, tmp) + + return err +} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go deleted file mode 100644 index c760793..0000000 --- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !windows,!norwfs - -package dotgit - -import ( - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDWR - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // On non-Windows platforms, we can have atomic rename. - return d.fs.Rename(tmp.Name(), pr.Name()) -} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go deleted file mode 100644 index 6e43b42..0000000 --- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build norwfs - -package dotgit - -import ( - "io" - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDONLY - -// Instead of renaming that can not be supported in simpler filesystems -// a full copy is done. -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - - prWrite, err := d.fs.Create(pr.Name()) - if err != nil { - return err - } - - defer prWrite.Close() - - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - - _, err = io.Copy(prWrite, tmp) - - return err -} diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go deleted file mode 100644 index 897d2c9..0000000 --- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build windows,!norwfs - -package dotgit - -import ( - "io" - "os" - - "gopkg.in/src-d/go-billy.v4" -) - -const openAndLockPackedRefsMode = os.O_RDWR - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // If we aren't using the bare Windows filesystem as the storage - // layer, we might be able to get away with a rename over a locked - // file. - err := d.fs.Rename(tmp.Name(), pr.Name()) - if err == nil { - return nil - } - - // Otherwise, Windows doesn't let us rename over a locked file, so - // we have to do a straight copy. Unfortunately this could result - // in a partially-written file if the process fails before the - // copy completes. - _, err = pr.Seek(0, io.SeekStart) - if err != nil { - return err - } - err = pr.Truncate(0) - if err != nil { - return err - } - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - _, err = io.Copy(pr, tmp) - return err -} -- cgit From 009f1069a1248c1e9189a9e4c342f6d017156ec4 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 19 Jul 2018 15:20:10 +0200 Subject: plumbing/format/idxfile: add new Index and MemoryIndex Signed-off-by: Miguel Molina --- storage/filesystem/index.go | 47 -------------------------------------------- storage/filesystem/object.go | 2 +- 2 files changed, 1 insertion(+), 48 deletions(-) delete mode 100644 storage/filesystem/index.go (limited to 'storage') diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go deleted file mode 100644 index 2ebf57e..0000000 --- a/storage/filesystem/index.go +++ /dev/null @@ -1,47 +0,0 @@ -package filesystem - -import ( - "os" - - "gopkg.in/src-d/go-git.v4/plumbing/format/index" - "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" - "gopkg.in/src-d/go-git.v4/utils/ioutil" -) - -type IndexStorage struct { - dir *dotgit.DotGit -} - -func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { - f, err := s.dir.IndexWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - e := index.NewEncoder(f) - err = e.Encode(idx) - return err -} - -func (s *IndexStorage) Index() (i *index.Index, err error) { - idx := &index.Index{ - Version: 2, - } - - f, err := s.dir.Index() - if err != nil { - if os.IsNotExist(err) { - return idx, nil - } - - return nil, err - } - - defer ioutil.CheckClose(f, &err) - - d := index.NewDecoder(f) - err = d.Decode(idx) - return idx, err -} diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9ffe4dc..ef67f50 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -63,7 +63,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { } defer ioutil.CheckClose(f, &err) - idxf := idxfile.NewIdxfile() + idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { return err -- cgit From 79f249465b24104b73c9dc220d9098cecdab4d77 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 26 Jul 2018 13:42:51 +0200 Subject: plumbing, storage: integrate new index Now dotgit.PackWriter uses the new packfile.Parser and index. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/writers.go | 33 ++++++++++------------ storage/filesystem/dotgit/writers_test.go | 3 +- storage/filesystem/object.go | 46 +++++++++++++++++++------------ 3 files changed, 46 insertions(+), 36 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go index c2b420f..e1ede3c 100644 --- a/storage/filesystem/dotgit/writers.go +++ b/storage/filesystem/dotgit/writers.go @@ -20,13 +20,14 @@ import ( // is renamed/moved (depends on the Filesystem implementation) to the final // location, if the PackWriter is not used, nothing is written type PackWriter struct { - Notify func(plumbing.Hash, *packfile.Index) + Notify func(plumbing.Hash, *idxfile.Writer) fs billy.Filesystem fr, fw billy.File synced *syncedReader checksum plumbing.Hash - index *packfile.Index + parser *packfile.Parser + writer *idxfile.Writer result chan error } @@ -55,20 +56,16 @@ func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { func (w *PackWriter) buildIndex() { s := packfile.NewScanner(w.synced) - d, err := packfile.NewDecoder(s, nil) - if err != nil { - w.result <- err - return - } + w.writer = new(idxfile.Writer) + w.parser = packfile.NewParser(s, w.writer) - checksum, err := d.Decode() + checksum, err := w.parser.Parse() if err != nil { w.result <- err return } w.checksum = checksum - w.index = d.Index() w.result <- err } @@ -92,8 +89,8 @@ func (w *PackWriter) Write(p []byte) (int, error) { // was written, the tempfiles are deleted without writing a packfile. func (w *PackWriter) Close() error { defer func() { - if w.Notify != nil && w.index != nil && w.index.Size() > 0 { - w.Notify(w.checksum, w.index) + if w.Notify != nil && w.writer != nil && w.writer.Finished() { + w.Notify(w.checksum, w.writer) } close(w.result) @@ -115,7 +112,7 @@ func (w *PackWriter) Close() error { return err } - if w.index == nil || w.index.Size() == 0 { + if w.writer == nil || !w.writer.Finished() { return w.clean() } @@ -145,11 +142,13 @@ func (w *PackWriter) save() error { } func (w *PackWriter) encodeIdx(writer io.Writer) error { - idx := w.index.ToIdxFile() - idx.PackfileChecksum = w.checksum - idx.Version = idxfile.VersionSupported + idx, err := w.writer.Index() + if err != nil { + return err + } + e := idxfile.NewEncoder(writer) - _, err := e.Encode(idx) + _, err = e.Encode(idx) return err } @@ -209,7 +208,6 @@ func (s *syncedReader) isBlocked() bool { func (s *syncedReader) wake() { if s.isBlocked() { - // fmt.Println("wake") atomic.StoreUint32(&s.blocked, 0) s.news <- true } @@ -220,7 +218,6 @@ func (s *syncedReader) sleep() { written := atomic.LoadUint64(&s.written) if read >= written { atomic.StoreUint32(&s.blocked, 1) - // fmt.Println("sleep", read, written) <-s.news } diff --git a/storage/filesystem/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go index bf00762..5a5f7b4 100644 --- a/storage/filesystem/dotgit/writers_test.go +++ b/storage/filesystem/dotgit/writers_test.go @@ -9,6 +9,7 @@ import ( "strconv" "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" . "gopkg.in/check.v1" @@ -148,7 +149,7 @@ func (s *SuiteDotGit) TestPackWriterUnusedNotify(c *C) { w, err := newPackWrite(fs) c.Assert(err, IsNil) - w.Notify = func(h plumbing.Hash, idx *packfile.Index) { + w.Notify = func(h plumbing.Hash, idx *idxfile.Writer) { c.Fatal("unexpected call to PackWriter.Notify") } diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index ef67f50..b73b309 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -23,7 +23,7 @@ type ObjectStorage struct { deltaBaseCache cache.Object dir *dotgit.DotGit - index map[plumbing.Hash]*packfile.Index + index map[plumbing.Hash]idxfile.Index } // NewObjectStorage creates a new ObjectStorage with the given .git directory. @@ -41,7 +41,7 @@ func (s *ObjectStorage) requireIndex() error { return nil } - s.index = make(map[plumbing.Hash]*packfile.Index) + s.index = make(map[plumbing.Hash]idxfile.Index) packs, err := s.dir.ObjectPacks() if err != nil { return err @@ -69,7 +69,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { return err } - s.index[h] = packfile.NewIndexFromIdxFile(idxf) + s.index[h] = idxf return err } @@ -87,8 +87,11 @@ func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { return nil, err } - w.Notify = func(h plumbing.Hash, idx *packfile.Index) { - s.index[h] = idx + w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { + index, err := writer.Index() + if err == nil { + s.index[h] = index + } } return w, nil @@ -278,7 +281,7 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( func (s *ObjectStorage) decodeObjectAt( f billy.File, - idx *packfile.Index, + idx idxfile.Index, offset int64) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err @@ -299,7 +302,7 @@ func (s *ObjectStorage) decodeObjectAt( func (s *ObjectStorage) decodeDeltaObjectAt( f billy.File, - idx *packfile.Index, + idx idxfile.Index, offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { @@ -324,12 +327,10 @@ func (s *ObjectStorage) decodeDeltaObjectAt( case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: - e, ok := idx.LookupOffset(uint64(header.OffsetReference)) - if !ok { - return nil, plumbing.ErrObjectNotFound + base, err = idx.FindHash(header.OffsetReference) + if err != nil { + return nil, err } - - base = e.Hash default: return s.decodeObjectAt(f, idx, offset) } @@ -350,8 +351,9 @@ func (s *ObjectStorage) decodeDeltaObjectAt( func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { for packfile, index := range s.index { - if e, ok := index.LookupHash(h); ok { - return packfile, e.Hash, int64(e.Offset) + offset, err := index.FindOffset(h) + if err == nil { + return packfile, h, offset } } @@ -460,12 +462,22 @@ type packfileIter struct { total uint32 } -func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) { +// NewPackfileIter returns a new EncodedObjectIter for the provided packfile +// and object type. +func NewPackfileIter( + f billy.File, + t plumbing.ObjectType, +) (storer.EncodedObjectIter, error) { return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil) } -func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, - index *packfile.Index, cache cache.Object) (storer.EncodedObjectIter, error) { +func newPackfileIter( + f billy.File, + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, + index idxfile.Index, + cache cache.Object, +) (storer.EncodedObjectIter, error) { s := packfile.NewScanner(f) _, total, err := s.Header() if err != nil { -- cgit From ffdfb7dbabb78090b27ca29b762b803969c89fd7 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Fri, 20 Jul 2018 15:51:15 +0200 Subject: plumbing: packfile, new Packfile representation Signed-off-by: Miguel Molina --- storage/filesystem/storage.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'storage') diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 622bb4a..6af906d 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,6 +2,9 @@ package filesystem import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" @@ -51,3 +54,15 @@ func (s *Storage) Filesystem() billy.Filesystem { func (s *Storage) Init() error { return s.dir.Initialize() } + +type IndexStorage struct { + dir *dotgit.DotGit +} + +func (IndexStorage) SetIndex(*index.Index) error { + return fmt.Errorf("not implemented") +} + +func (IndexStorage) Index() (*index.Index, error) { + return nil, fmt.Errorf("not implemented") +} -- cgit From 3657a32e0ead55601a2af578abecd65dd2d8b64b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 27 Jul 2018 12:24:09 +0200 Subject: storage/filesystem: add back IndexStorage Signed-off-by: Javi Fontan --- storage/filesystem/index.go | 47 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 storage/filesystem/index.go (limited to 'storage') diff --git a/storage/filesystem/index.go b/storage/filesystem/index.go new file mode 100644 index 0000000..2ebf57e --- /dev/null +++ b/storage/filesystem/index.go @@ -0,0 +1,47 @@ +package filesystem + +import ( + "os" + + "gopkg.in/src-d/go-git.v4/plumbing/format/index" + "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +type IndexStorage struct { + dir *dotgit.DotGit +} + +func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { + f, err := s.dir.IndexWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + e := index.NewEncoder(f) + err = e.Encode(idx) + return err +} + +func (s *IndexStorage) Index() (i *index.Index, err error) { + idx := &index.Index{ + Version: 2, + } + + f, err := s.dir.Index() + if err != nil { + if os.IsNotExist(err) { + return idx, nil + } + + return nil, err + } + + defer ioutil.CheckClose(f, &err) + + d := index.NewDecoder(f) + err = d.Decode(idx) + return idx, err +} -- cgit From ccd0fa0bc17f0680038529b00f5c5a44f8e77b41 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Fri, 27 Jul 2018 15:07:25 +0200 Subject: plumbing: packfile, lazy object reads with DiskObjects Signed-off-by: Miguel Molina --- storage/memory/storage.go | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'storage') diff --git a/storage/memory/storage.go b/storage/memory/storage.go index 2e32509..a950a62 100644 --- a/storage/memory/storage.go +++ b/storage/memory/storage.go @@ -91,6 +91,16 @@ type ObjectStorage struct { Tags map[plumbing.Hash]plumbing.EncodedObject } +func NewObjectStorage() *ObjectStorage { + return &ObjectStorage{ + Objects: make(map[plumbing.Hash]plumbing.EncodedObject), + Commits: make(map[plumbing.Hash]plumbing.EncodedObject), + Trees: make(map[plumbing.Hash]plumbing.EncodedObject), + Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), + Tags: make(map[plumbing.Hash]plumbing.EncodedObject), + } +} + func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } -- cgit From 6f8f2ed229cc88a175d6ea47a53135b6dcef6912 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 27 Jul 2018 18:17:43 +0200 Subject: storage/filesystem: remove duplicated IndexStorage Signed-off-by: Javi Fontan --- storage/filesystem/storage.go | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 6af906d..622bb4a 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,9 +2,6 @@ package filesystem import ( - "fmt" - - "gopkg.in/src-d/go-git.v4/plumbing/format/index" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" @@ -54,15 +51,3 @@ func (s *Storage) Filesystem() billy.Filesystem { func (s *Storage) Init() error { return s.dir.Initialize() } - -type IndexStorage struct { - dir *dotgit.DotGit -} - -func (IndexStorage) SetIndex(*index.Index) error { - return fmt.Errorf("not implemented") -} - -func (IndexStorage) Index() (*index.Index, error) { - return nil, fmt.Errorf("not implemented") -} -- cgit From 6f7fc05543861ee074aa17f75e1d1b5c1b948d48 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Mon, 30 Jul 2018 17:11:01 +0200 Subject: plumbing: packfile, fix package tests Signed-off-by: Miguel Molina --- storage/memory/storage.go | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'storage') diff --git a/storage/memory/storage.go b/storage/memory/storage.go index a950a62..2e32509 100644 --- a/storage/memory/storage.go +++ b/storage/memory/storage.go @@ -91,16 +91,6 @@ type ObjectStorage struct { Tags map[plumbing.Hash]plumbing.EncodedObject } -func NewObjectStorage() *ObjectStorage { - return &ObjectStorage{ - Objects: make(map[plumbing.Hash]plumbing.EncodedObject), - Commits: make(map[plumbing.Hash]plumbing.EncodedObject), - Trees: make(map[plumbing.Hash]plumbing.EncodedObject), - Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), - Tags: make(map[plumbing.Hash]plumbing.EncodedObject), - } -} - func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } -- cgit From 6a24b4c1f0cb9e5daf30fa7979f2643a967af1ad Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Tue, 7 Aug 2018 18:41:19 +0200 Subject: *: use parser to populate non writable storages and bug fixes Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 77 +++++++++++++++------------------------ storage/filesystem/object_test.go | 6 ++- 2 files changed, 35 insertions(+), 48 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index b73b309..2032eac 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -12,7 +12,6 @@ import ( "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" - "gopkg.in/src-d/go-git.v4/storage/memory" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" @@ -282,29 +281,34 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( func (s *ObjectStorage) decodeObjectAt( f billy.File, idx idxfile.Index, - offset int64) (plumbing.EncodedObject, error) { - if _, err := f.Seek(0, io.SeekStart); err != nil { - return nil, err + offset int64, +) (plumbing.EncodedObject, error) { + hash, err := idx.FindHash(offset) + if err == nil { + obj, ok := s.deltaBaseCache.Get(hash) + if ok { + return obj, nil + } } - p := packfile.NewScanner(f) + if err != nil && err != plumbing.ErrObjectNotFound { + return nil, err + } - d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(), - s.deltaBaseCache) + obj, err := packfile.NewPackfile(idx, f).GetByOffset(offset) if err != nil { return nil, err } - d.SetIndex(idx) - obj, err := d.DecodeObjectAt(offset) - return obj, err + return packfile.MemoryObjectFromDisk(obj) } func (s *ObjectStorage) decodeDeltaObjectAt( f billy.File, idx idxfile.Index, offset int64, - hash plumbing.Hash) (plumbing.EncodedObject, error) { + hash plumbing.Hash, +) (plumbing.EncodedObject, error) { if _, err := f.Seek(0, io.SeekStart); err != nil { return nil, err } @@ -453,22 +457,23 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - f billy.File - d *packfile.Decoder - t plumbing.ObjectType - - seen map[plumbing.Hash]struct{} - position uint32 - total uint32 + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. func NewPackfileIter( f billy.File, + idxFile billy.File, t plumbing.ObjectType, ) (storer.EncodedObjectIter, error) { - return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil) + idx := idxfile.NewMemoryIndex() + if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { + return nil, err + } + + return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) } func newPackfileIter( @@ -478,47 +483,26 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - s := packfile.NewScanner(f) - _, total, err := s.Header() + iter, err := packfile.NewPackfile(index, f).GetByType(t) if err != nil { return nil, err } - d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache) - if err != nil { - return nil, err - } - - d.SetIndex(index) - return &packfileIter{ - f: f, - d: d, - t: t, - - total: total, - seen: seen, + iter: iter, + seen: seen, }, nil } func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { for { - if iter.position >= iter.total { - return nil, io.EOF - } - - obj, err := iter.d.DecodeObject() + obj, err := iter.iter.Next() if err != nil { return nil, err } - iter.position++ - if obj == nil { - continue - } - if _, ok := iter.seen[obj.Hash()]; ok { - return iter.Next() + continue } return obj, nil @@ -531,8 +515,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { } func (iter *packfileIter) Close() { - iter.f.Close() - iter.d.Close() + iter.iter.Close() } type objectsIter struct { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index ecd6beb..ae11c3b 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -115,7 +115,11 @@ func (s *FsSuite) TestPackfileIter(c *C) { for _, h := range ph { f, err := dg.ObjectPack(h) c.Assert(err, IsNil) - iter, err := NewPackfileIter(f, t) + + idxf, err := dg.ObjectPackIdx(h) + c.Assert(err, IsNil) + + iter, err := NewPackfileIter(f, idxf, t) c.Assert(err, IsNil) err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) -- cgit From 5889a3b669f0f515ff445aa040afc1e7eeb2bbd1 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Wed, 8 Aug 2018 16:56:20 +0200 Subject: plumbing: packfile, allow non-seekable sources on Parser Signed-off-by: Miguel Molina --- storage/filesystem/dotgit/writers.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go index e1ede3c..93d2d8c 100644 --- a/storage/filesystem/dotgit/writers.go +++ b/storage/filesystem/dotgit/writers.go @@ -57,7 +57,12 @@ func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { func (w *PackWriter) buildIndex() { s := packfile.NewScanner(w.synced) w.writer = new(idxfile.Writer) - w.parser = packfile.NewParser(s, w.writer) + var err error + w.parser, err = packfile.NewParser(s, w.writer) + if err != nil { + w.result <- err + return + } checksum, err := w.parser.Parse() if err != nil { -- cgit From 34cc506735ee0cd29362da51592b49a217df8159 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 12:16:57 +0200 Subject: storage: filesystem, benchmark PackfileIter Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 30 +++++++++++++-- storage/filesystem/object_test.go | 79 +++++++++++++++++++++++++++++++++------ 2 files changed, 94 insertions(+), 15 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 2032eac..4757938 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -509,9 +509,20 @@ func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { } } -// ForEach is never called since is used inside of a MultiObjectIterator func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return nil + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } } func (iter *packfileIter) Close() { @@ -543,9 +554,20 @@ func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { return obj, err } -// ForEach is never called since is used inside of a MultiObjectIterator func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return nil + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } } func (iter *objectsIter) Close() { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index ae11c3b..0dc19fe 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -1,6 +1,8 @@ package filesystem import ( + "testing" + "gopkg.in/src-d/go-git.v4/plumbing" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" @@ -10,17 +12,16 @@ import ( type FsSuite struct { fixtures.Suite - Types []plumbing.ObjectType } -var _ = Suite(&FsSuite{ - Types: []plumbing.ObjectType{ - plumbing.CommitObject, - plumbing.TagObject, - plumbing.TreeObject, - plumbing.BlobObject, - }, -}) +var objectTypes = []plumbing.ObjectType{ + plumbing.CommitObject, + plumbing.TagObject, + plumbing.TreeObject, + plumbing.BlobObject, +} + +var _ = Suite(&FsSuite{}) func (s *FsSuite) TestGetFromObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() @@ -84,7 +85,7 @@ func (s *FsSuite) TestIter(c *C) { func (s *FsSuite) TestIterWithType(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { - for _, t := range s.Types { + for _, t := range objectTypes { fs := f.DotGit() o, err := NewObjectStorage(dotgit.New(fs)) c.Assert(err, IsNil) @@ -108,7 +109,7 @@ func (s *FsSuite) TestPackfileIter(c *C) { fs := f.DotGit() dg := dotgit.New(fs) - for _, t := range s.Types { + for _, t := range objectTypes { ph, err := dg.ObjectPacks() c.Assert(err, IsNil) @@ -132,3 +133,59 @@ func (s *FsSuite) TestPackfileIter(c *C) { }) } + +func BenchmarkPackfileIter(b *testing.B) { + if err := fixtures.Init(); err != nil { + b.Fatal(err) + } + + defer func() { + if err := fixtures.Clean(); err != nil { + b.Fatal(err) + } + }() + + for _, f := range fixtures.ByTag(".git") { + b.Run(f.URL, func(b *testing.B) { + fs := f.DotGit() + dg := dotgit.New(fs) + + for i := 0; i < b.N; i++ { + for _, t := range objectTypes { + ph, err := dg.ObjectPacks() + if err != nil { + b.Fatal(err) + } + + for _, h := range ph { + f, err := dg.ObjectPack(h) + if err != nil { + b.Fatal(err) + } + + idxf, err := dg.ObjectPackIdx(h) + if err != nil { + b.Fatal(err) + } + + iter, err := NewPackfileIter(f, idxf, t) + if err != nil { + b.Fatal(err) + } + + err = iter.ForEach(func(o plumbing.EncodedObject) error { + if o.Type() != t { + b.Errorf("expecting %s, got %s", t, o.Type()) + } + return nil + }) + + if err != nil { + b.Fatal(err) + } + } + } + } + }) + } +} -- cgit From 038cf238e6250094c7aeb387fd7ea92438719699 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 12:36:37 +0200 Subject: storage: filesystem, close Packfile after iterating objects Signed-off-by: Miguel Molina --- storage/filesystem/object.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 4757938..86d0da9 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -457,12 +457,14 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { + pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile -// and object type. +// and object type. Packfile and index file will be closed after they're +// used. func NewPackfileIter( f billy.File, idxFile billy.File, @@ -473,6 +475,10 @@ func NewPackfileIter( return nil, err } + if err := idxFile.Close(); err != nil { + return nil, err + } + return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) } @@ -489,6 +495,7 @@ func newPackfileIter( } return &packfileIter{ + pack: f, iter: iter, seen: seen, }, nil @@ -514,6 +521,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { o, err := iter.Next() if err != nil { if err == io.EOF { + iter.Close() return nil } return err @@ -527,6 +535,7 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() + _ = iter.pack.Close() } type objectsIter struct { -- cgit From d93b3869f366df7488286614b0205968bc6263df Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 13:11:37 +0200 Subject: storage: filesystem, add PackfileIter benchmark reading object content Signed-off-by: Miguel Molina --- storage/filesystem/object_test.go | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) (limited to 'storage') diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 0dc19fe..88f22bf 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -1,6 +1,7 @@ package filesystem import ( + "io/ioutil" "testing" "gopkg.in/src-d/go-git.v4/plumbing" @@ -189,3 +190,69 @@ func BenchmarkPackfileIter(b *testing.B) { }) } } + +func BenchmarkPackfileIterReadContent(b *testing.B) { + if err := fixtures.Init(); err != nil { + b.Fatal(err) + } + + defer func() { + if err := fixtures.Clean(); err != nil { + b.Fatal(err) + } + }() + + for _, f := range fixtures.ByTag(".git") { + b.Run(f.URL, func(b *testing.B) { + fs := f.DotGit() + dg := dotgit.New(fs) + + for i := 0; i < b.N; i++ { + for _, t := range objectTypes { + ph, err := dg.ObjectPacks() + if err != nil { + b.Fatal(err) + } + + for _, h := range ph { + f, err := dg.ObjectPack(h) + if err != nil { + b.Fatal(err) + } + + idxf, err := dg.ObjectPackIdx(h) + if err != nil { + b.Fatal(err) + } + + iter, err := NewPackfileIter(f, idxf, t) + if err != nil { + b.Fatal(err) + } + + err = iter.ForEach(func(o plumbing.EncodedObject) error { + if o.Type() != t { + b.Errorf("expecting %s, got %s", t, o.Type()) + } + + r, err := o.Reader() + if err != nil { + b.Fatal(err) + } + + if _, err := ioutil.ReadAll(r); err != nil { + b.Fatal(err) + } + + return r.Close() + }) + + if err != nil { + b.Fatal(err) + } + } + } + } + }) + } +} -- cgit From 56c5e91b158bc4569b38bfd5d27d4b4be5e06a27 Mon Sep 17 00:00:00 2001 From: Miguel Molina Date: Thu, 9 Aug 2018 16:53:00 +0200 Subject: plumbing: packfile, open and close packfile on FSObject reads Signed-off-by: Miguel Molina --- storage/filesystem/dotgit/dotgit.go | 5 +++++ storage/filesystem/object.go | 15 ++++++-------- storage/filesystem/object_test.go | 40 ++++++++++++++++++++++++++++++++++--- 3 files changed, 48 insertions(+), 12 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index dc12f23..af07eb5 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -784,6 +784,11 @@ func (d *DotGit) Alternates() ([]*DotGit, error) { return alternates, nil } +// Fs returns the underlying filesystem of the DotGit folder. +func (d *DotGit) Fs() billy.Filesystem { + return d.fs +} + func isHex(s string) bool { for _, b := range []byte(s) { if isNum(b) { diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 86d0da9..6958e32 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -295,12 +295,7 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - obj, err := packfile.NewPackfile(idx, f).GetByOffset(offset) - if err != nil { - return nil, err - } - - return packfile.MemoryObjectFromDisk(obj) + return packfile.NewPackfile(idx, s.dir.Fs(), f).GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( @@ -404,7 +399,7 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) }, }, nil } @@ -466,6 +461,7 @@ type packfileIter struct { // and object type. Packfile and index file will be closed after they're // used. func NewPackfileIter( + fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, @@ -479,17 +475,18 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), idx, nil) + return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) } func newPackfileIter( + fs billy.Filesystem, f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - iter, err := packfile.NewPackfile(index, f).GetByType(t) + iter, err := packfile.NewPackfile(index, fs, f).GetByType(t) if err != nil { return nil, err } diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 88f22bf..b1408b7 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -121,7 +121,7 @@ func (s *FsSuite) TestPackfileIter(c *C) { idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) - iter, err := NewPackfileIter(f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t) c.Assert(err, IsNil) err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) @@ -169,7 +169,7 @@ func BenchmarkPackfileIter(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t) if err != nil { b.Fatal(err) } @@ -225,7 +225,7 @@ func BenchmarkPackfileIterReadContent(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t) if err != nil { b.Fatal(err) } @@ -256,3 +256,37 @@ func BenchmarkPackfileIterReadContent(b *testing.B) { }) } } + +func BenchmarkGetObjectFromPackfile(b *testing.B) { + if err := fixtures.Init(); err != nil { + b.Fatal(err) + } + + defer func() { + if err := fixtures.Clean(); err != nil { + b.Fatal(err) + } + }() + + for _, f := range fixtures.Basic() { + b.Run(f.URL, func(b *testing.B) { + fs := f.DotGit() + o, err := NewObjectStorage(dotgit.New(fs)) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < b.N; i++ { + expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") + obj, err := o.EncodedObject(plumbing.AnyObject, expected) + if err != nil { + b.Fatal(err) + } + + if obj.Hash() != expected { + b.Errorf("expecting %s, got %s", expected, obj.Hash()) + } + } + }) + } +} -- cgit From ae5501623169ec981091a5f1cfb56ab8e7688031 Mon Sep 17 00:00:00 2001 From: noxora Date: Tue, 14 Aug 2018 14:02:26 -0500 Subject: added hook support Signed-off-by: noxora trying a possible fix to the delete test Signed-off-by: noxora still trying to fix this test Signed-off-by: noxora fixes did not work, seems to be a windows env problem Signed-off-by: noxora --- storage/filesystem/dotgit/dotgit.go | 53 ++++++++++++++++++++++++++++++-- storage/filesystem/dotgit/dotgit_test.go | 49 ++++++++++++++++++++++++++++- 2 files changed, 98 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index dc12f23..d16a77d 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -279,19 +279,66 @@ func (d *DotGit) objectPath(h plumbing.Hash) string { return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) } +//incomingObjectPath is intended to add support for a git pre-recieve hook to be written +//it adds support for go-git to find objects in an "incoming" directory, so that the library +//can be used to write a pre-recieve hook that deals with the incoming objects. +//More on git hooks found here : https://git-scm.com/docs/githooks +//More on 'quarantine'/incoming directory here : https://git-scm.com/docs/git-receive-pack +func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { + hString := h.String() + directoryContents, err := d.fs.ReadDir(objectsPath) + if err != nil { + return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) + } + var incomingDirName string + for _, file := range directoryContents { + if strings.Split(file.Name(), "-")[0] == "incoming" && file.IsDir() { + incomingDirName = file.Name() + } + } + if incomingDirName == "" { + return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) + } + return d.fs.Join(objectsPath, incomingDirName, hString[0:2], hString[2:40]) +} + // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { - return d.fs.Open(d.objectPath(h)) + obj1, err1 := d.fs.Open(d.objectPath(h)) + if os.IsNotExist(err1) { + obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 } // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { - return d.fs.Stat(d.objectPath(h)) + obj1, err1 := d.fs.Stat(d.objectPath(h)) + if os.IsNotExist(err1) { + obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 } // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { - return d.fs.Remove(d.objectPath(h)) + err1 := d.fs.Remove(d.objectPath(h)) + if os.IsNotExist(err1) { + err2 := d.fs.Remove(d.incomingObjectPath(h)) + if err2 != nil { + return err1 + } + return err2 + } + return err1 } func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index 7733eef..ce30500 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -9,11 +9,11 @@ import ( "strings" "testing" + fixtures "gopkg.in/src-d/go-git-fixtures.v3" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" - "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } @@ -537,6 +537,53 @@ func (s *SuiteDotGit) TestObject(c *C) { file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")), Equals, true, ) + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) + fs.MkdirAll(incomingDirPath, os.FileMode(0755)) + fs.Create(incomingFilePath) + + file, err = dir.Object(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestObjectStat(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + _, err := dir.ObjectStat(hash) + c.Assert(err, IsNil) + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40]) + fs.MkdirAll(incomingDirPath, os.FileMode(0755)) + fs.Create(incomingFilePath) + + _, err = dir.ObjectStat(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) +} + +func (s *SuiteDotGit) TestObjectDelete(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := New(fs) + + hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") + err := dir.ObjectDelete(hash) + c.Assert(err, IsNil) + //incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + //incomingDirPath := fs.Join("objects", "incoming-123456") + //incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2]) + //incomingFilePath := fs.Join(incomingDirPath, incomingHash[2:40]) + //err = fs.MkdirAll(incomingDirPath, os.FileMode(0755)) + //c.Assert(err, IsNil) + //err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755)) + //c.Assert(err, IsNil) + //_, err = fs.Create(incomingFilePath) + //c.Assert(err, IsNil) + + //err = dir.ObjectDelete(plumbing.NewHash(incomingHash)) + //c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectNotFound(c *C) { -- cgit From 8cf7edbc99282245bc8803a322dbf499ab77575d Mon Sep 17 00:00:00 2001 From: "Santiago M. Mola" Date: Fri, 17 Aug 2018 12:18:06 +0200 Subject: dotgit: fix object delete test Signed-off-by: Santiago M. Mola --- storage/filesystem/dotgit/dotgit_test.go | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index ce30500..64c2aee 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -9,11 +9,11 @@ import ( "strings" "testing" - fixtures "gopkg.in/src-d/go-git-fixtures.v3" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" "gopkg.in/src-d/go-billy.v4/osfs" + "gopkg.in/src-d/go-git-fixtures.v3" ) func Test(t *testing.T) { TestingT(t) } @@ -571,19 +571,23 @@ func (s *SuiteDotGit) TestObjectDelete(c *C) { hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e") err := dir.ObjectDelete(hash) c.Assert(err, IsNil) - //incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash - //incomingDirPath := fs.Join("objects", "incoming-123456") - //incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2]) - //incomingFilePath := fs.Join(incomingDirPath, incomingHash[2:40]) - //err = fs.MkdirAll(incomingDirPath, os.FileMode(0755)) - //c.Assert(err, IsNil) - //err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755)) - //c.Assert(err, IsNil) - //_, err = fs.Create(incomingFilePath) - //c.Assert(err, IsNil) - - //err = dir.ObjectDelete(plumbing.NewHash(incomingHash)) - //c.Assert(err, IsNil) + + incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash + incomingDirPath := fs.Join("objects", "incoming-123456") + incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2]) + incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40]) + + err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755)) + c.Assert(err, IsNil) + + f, err := fs.Create(incomingFilePath) + c.Assert(err, IsNil) + + err = f.Close() + c.Assert(err, IsNil) + + err = dir.ObjectDelete(plumbing.NewHash(incomingHash)) + c.Assert(err, IsNil) } func (s *SuiteDotGit) TestObjectNotFound(c *C) { -- cgit From 790191ef92ec6382ce65cc30286c901863b3b7a3 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 22 Aug 2018 16:46:50 +0200 Subject: plumbing, storage: add bases to the common cache After clone only resolved deltas were added to the cache. This caused slowdowns in small repositories where most objects can be held in cache. It also makes packfiles reuse delta cache from the store. Previously it created a new delta cache each time a packfile object was created. This also slowed down a bit accessing objects and had an impact on memory consumption when bases are added to the cache. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 6958e32..3a3a2bd 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -295,7 +295,14 @@ func (s *ObjectStorage) decodeObjectAt( return nil, err } - return packfile.NewPackfile(idx, s.dir.Fs(), f).GetByOffset(offset) + var p *packfile.Packfile + if s.deltaBaseCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( @@ -486,7 +493,14 @@ func newPackfileIter( index idxfile.Index, cache cache.Object, ) (storer.EncodedObjectIter, error) { - iter, err := packfile.NewPackfile(index, fs, f).GetByType(t) + var p *packfile.Packfile + if cache != nil { + p = packfile.NewPackfileWithCache(index, fs, f, cache) + } else { + p = packfile.NewPackfile(index, fs, f) + } + + iter, err := p.GetByType(t) if err != nil { return nil, err } -- cgit From c7a4011d78a00bd93a2f82a39bb67c2dda5453f5 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Sat, 25 Aug 2018 19:17:45 +0200 Subject: storage/dotgit: search for incoming dir only once Search for incoming object directory was done once each time objects were accessed. This means a ReadDir of the objects path that is expensive. Now incoming directory is searched the first time an object is accessed and its name kept in DotGit to be reused. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 41 +++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 13 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index d0a14ae..addb64c 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -61,6 +61,10 @@ var ( // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { fs billy.Filesystem + + // incoming object directory information + incomingChecked bool + incomingDirName string } // New returns a DotGit value ready to be used. The path argument must @@ -286,26 +290,37 @@ func (d *DotGit) objectPath(h plumbing.Hash) string { //More on 'quarantine'/incoming directory here : https://git-scm.com/docs/git-receive-pack func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { hString := h.String() - directoryContents, err := d.fs.ReadDir(objectsPath) - if err != nil { + + if d.incomingDirName == "" { return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) } - var incomingDirName string - for _, file := range directoryContents { - if strings.Split(file.Name(), "-")[0] == "incoming" && file.IsDir() { - incomingDirName = file.Name() + + return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) +} + +// hasIncomingObjects searches for an incoming directory and keeps its name +// so it doesn't have to be found each time an object is accessed. +func (d *DotGit) hasIncomingObjects() bool { + if !d.incomingChecked { + directoryContents, err := d.fs.ReadDir(objectsPath) + if err == nil { + for _, file := range directoryContents { + if strings.Split(file.Name(), "-")[0] == "incoming" && file.IsDir() { + d.incomingDirName = file.Name() + } + } } + + d.incomingChecked = true } - if incomingDirName == "" { - return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) - } - return d.fs.Join(objectsPath, incomingDirName, hString[0:2], hString[2:40]) + + return d.incomingDirName != "" } // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { obj1, err1 := d.fs.Open(d.objectPath(h)) - if os.IsNotExist(err1) { + if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 @@ -318,7 +333,7 @@ func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { obj1, err1 := d.fs.Stat(d.objectPath(h)) - if os.IsNotExist(err1) { + if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 @@ -331,7 +346,7 @@ func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { err1 := d.fs.Remove(d.objectPath(h)) - if os.IsNotExist(err1) { + if os.IsNotExist(err1) && d.hasIncomingObjects() { err2 := d.fs.Remove(d.incomingObjectPath(h)) if err2 != nil { return err1 -- cgit From b1da90b0dde34b521cb252bc28c59e4ffd840d1d Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 27 Aug 2018 14:45:24 +0200 Subject: storage/dotgit: use HasPrefix instead of Split Also reformatted function comment and fixed some typos. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index addb64c..df4f756 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -283,11 +283,14 @@ func (d *DotGit) objectPath(h plumbing.Hash) string { return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) } -//incomingObjectPath is intended to add support for a git pre-recieve hook to be written -//it adds support for go-git to find objects in an "incoming" directory, so that the library -//can be used to write a pre-recieve hook that deals with the incoming objects. -//More on git hooks found here : https://git-scm.com/docs/githooks -//More on 'quarantine'/incoming directory here : https://git-scm.com/docs/git-receive-pack +// incomingObjectPath is intended to add support for a git pre-receive hook +// to be written it adds support for go-git to find objects in an "incoming" +// directory, so that the library can be used to write a pre-receive hook +// that deals with the incoming objects. +// +// More on git hooks found here : https://git-scm.com/docs/githooks +// More on 'quarantine'/incoming directory here: +// https://git-scm.com/docs/git-receive-pack func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { hString := h.String() @@ -305,7 +308,7 @@ func (d *DotGit) hasIncomingObjects() bool { directoryContents, err := d.fs.ReadDir(objectsPath) if err == nil { for _, file := range directoryContents { - if strings.Split(file.Name(), "-")[0] == "incoming" && file.IsDir() { + if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { d.incomingDirName = file.Name() } } -- cgit From 1e1a7d0623459807d6f1e871492147f971f7540c Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Aug 2018 15:29:51 +0200 Subject: git: add Static option to PlainOpen Also adds Static configuration to Storage and DotGit. This option means that the git repository is not expected to be modified while open and enables some optimizations. Each time a file is accessed the storer tries to open an object file for the requested hash. When this is done for a lot of objects it is expensive. With Static option a list of object files is generated the first time an object is accessed and used to check if exists instead of using system calls. A similar optimization is done for packfiles. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 182 +++++++++++++++++++++++++++++++++++- storage/filesystem/storage.go | 27 +++++- storage/filesystem/storage_test.go | 18 ++++ 3 files changed, 221 insertions(+), 6 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index df4f756..2048ddc 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -60,18 +60,39 @@ var ( // The DotGit type represents a local git repository on disk. This // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { + DotGitOptions fs billy.Filesystem // incoming object directory information incomingChecked bool incomingDirName string + + objectList []plumbing.Hash + objectMap map[plumbing.Hash]struct{} + packList []plumbing.Hash + packMap map[plumbing.Hash]struct{} +} + +// DotGitOptions holds configuration options for new DotGit objects. +type DotGitOptions struct { + // Static means that the filesystem won't be changed while the repo is open. + Static bool } // New returns a DotGit value ready to be used. The path argument must // be the absolute path of a git repository directory (e.g. // "/foo/bar/.git"). func New(fs billy.Filesystem) *DotGit { - return &DotGit{fs: fs} + return NewWithOptions(fs, DotGitOptions{}) +} + +// NewWithOptions creates a new DotGit and sets non default configuration +// options. See New for complete help. +func NewWithOptions(fs billy.Filesystem, o DotGitOptions) *DotGit { + return &DotGit{ + DotGitOptions: o, + fs: fs, + } } // Initialize creates all the folder scaffolding. @@ -143,11 +164,25 @@ func (d *DotGit) Shallow() (billy.File, error) { // NewObjectPack return a writer for a new packfile, it saves the packfile to // disk and also generates and save the index for the given packfile. func (d *DotGit) NewObjectPack() (*PackWriter, error) { + d.cleanPackList() return newPackWrite(d.fs) } // ObjectPacks returns the list of availables packfiles func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { + if !d.Static { + return d.objectPacks() + } + + err := d.genPackList() + if err != nil { + return nil, err + } + + return d.packList, nil +} + +func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { packDir := d.fs.Join(objectsPath, packPath) files, err := d.fs.ReadDir(packDir) if err != nil { @@ -181,6 +216,11 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { } func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { + err := d.hasPack(hash) + if err != nil { + return nil, err + } + pack, err := d.fs.Open(d.objectPackPath(hash, extension)) if err != nil { if os.IsNotExist(err) { @@ -195,15 +235,27 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil // ObjectPack returns a fs.File of the given packfile func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { + err := d.hasPack(hash) + if err != nil { + return nil, err + } + return d.objectPackOpen(hash, `pack`) } // ObjectPackIdx returns a fs.File of the index file for a given packfile func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { + err := d.hasPack(hash) + if err != nil { + return nil, err + } + return d.objectPackOpen(hash, `idx`) } func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { + d.cleanPackList() + path := d.objectPackPath(hash, `pack`) if !t.IsZero() { fi, err := d.fs.Stat(path) @@ -224,12 +276,23 @@ func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) er // NewObject return a writer for a new object file. func (d *DotGit) NewObject() (*ObjectWriter, error) { + d.cleanObjectList() + return newObjectWriter(d.fs) } // Objects returns a slice with the hashes of objects found under the // .git/objects/ directory. func (d *DotGit) Objects() ([]plumbing.Hash, error) { + if d.Static { + err := d.genObjectList() + if err != nil { + return nil, err + } + + return d.objectList, nil + } + var objects []plumbing.Hash err := d.ForEachObjectHash(func(hash plumbing.Hash) error { objects = append(objects, hash) @@ -241,9 +304,29 @@ func (d *DotGit) Objects() ([]plumbing.Hash, error) { return objects, nil } -// Objects returns a slice with the hashes of objects found under the -// .git/objects/ directory. +// ForEachObjectHash iterates over the hashes of objects found under the +// .git/objects/ directory and executes the provided . func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { + if !d.Static { + return d.forEachObjectHash(fun) + } + + err := d.genObjectList() + if err != nil { + return err + } + + for _, h := range d.objectList { + err := fun(h) + if err != nil { + return err + } + } + + return nil +} + +func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { files, err := d.fs.ReadDir(objectsPath) if err != nil { if os.IsNotExist(err) { @@ -278,6 +361,87 @@ func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { return nil } +func (d *DotGit) cleanObjectList() { + d.objectMap = nil + d.objectList = nil +} + +func (d *DotGit) genObjectList() error { + if d.objectMap != nil { + return nil + } + + d.objectMap = make(map[plumbing.Hash]struct{}) + return d.forEachObjectHash(func(h plumbing.Hash) error { + d.objectList = append(d.objectList, h) + d.objectMap[h] = struct{}{} + + return nil + }) +} + +func (d *DotGit) hasObject(h plumbing.Hash) error { + if !d.Static { + return nil + } + + err := d.genObjectList() + if err != nil { + return err + } + + _, ok := d.objectMap[h] + if !ok { + return plumbing.ErrObjectNotFound + } + + return nil +} + +func (d *DotGit) cleanPackList() { + d.packMap = nil + d.packList = nil +} + +func (d *DotGit) genPackList() error { + if d.packMap != nil { + return nil + } + + op, err := d.objectPacks() + if err != nil { + return err + } + + d.packMap = make(map[plumbing.Hash]struct{}) + d.packList = nil + + for _, h := range op { + d.packList = append(d.packList, h) + d.packMap[h] = struct{}{} + } + + return nil +} + +func (d *DotGit) hasPack(h plumbing.Hash) error { + if !d.Static { + return nil + } + + err := d.genPackList() + if err != nil { + return err + } + + _, ok := d.packMap[h] + if !ok { + return ErrPackfileNotFound + } + + return nil +} + func (d *DotGit) objectPath(h plumbing.Hash) string { hash := h.String() return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) @@ -322,6 +486,11 @@ func (d *DotGit) hasIncomingObjects() bool { // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { + err := d.hasObject(h) + if err != nil { + return nil, err + } + obj1, err1 := d.fs.Open(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) @@ -335,6 +504,11 @@ func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { + err := d.hasObject(h) + if err != nil { + return nil, err + } + obj1, err1 := d.fs.Stat(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) @@ -348,6 +522,8 @@ func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { + d.cleanObjectList() + err1 := d.fs.Remove(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { err2 := d.fs.Remove(d.incomingObjectPath(h)) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 622bb4a..a969a1f 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -11,6 +11,8 @@ import ( // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { + StorageOptions + fs billy.Filesystem dir *dotgit.DotGit @@ -22,17 +24,36 @@ type Storage struct { ModuleStorage } +// StorageOptions holds configuration for the storage. +type StorageOptions struct { + // Static means that the filesystem is not modified while the repo is open. + Static bool +} + // NewStorage returns a new Storage backed by a given `fs.Filesystem` func NewStorage(fs billy.Filesystem) (*Storage, error) { - dir := dotgit.New(fs) + return NewStorageWithOptions(fs, StorageOptions{}) +} + +// NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem` +func NewStorageWithOptions( + fs billy.Filesystem, + ops StorageOptions, +) (*Storage, error) { + dOps := dotgit.DotGitOptions{ + Static: ops.Static, + } + + dir := dotgit.NewWithOptions(fs, dOps) o, err := NewObjectStorage(dir) if err != nil { return nil, err } return &Storage{ - fs: fs, - dir: dir, + StorageOptions: ops, + fs: fs, + dir: dir, ObjectStorage: o, ReferenceStorage: ReferenceStorage{dir: dir}, diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go index 4d9ba6f..d7ebf71 100644 --- a/storage/filesystem/storage_test.go +++ b/storage/filesystem/storage_test.go @@ -26,6 +26,10 @@ func (s *StorageSuite) SetUpTest(c *C) { storage, err := NewStorage(osfs.New(s.dir)) c.Assert(err, IsNil) + setUpTest(s, c, storage) +} + +func setUpTest(s *StorageSuite, c *C, storage *Storage) { // ensure that right interfaces are implemented var _ storer.EncodedObjectStorer = storage var _ storer.IndexStorer = storage @@ -51,3 +55,17 @@ func (s *StorageSuite) TestNewStorageShouldNotAddAnyContentsToDir(c *C) { c.Assert(err, IsNil) c.Assert(fis, HasLen, 0) } + +type StorageStaticSuite struct { + StorageSuite +} + +var _ = Suite(&StorageStaticSuite{}) + +func (s *StorageStaticSuite) SetUpTest(c *C) { + s.dir = c.MkDir() + storage, err := NewStorageWithOptions(osfs.New(s.dir), StorageOptions{Static: true}) + c.Assert(err, IsNil) + + setUpTest(&s.StorageSuite, c, storage) +} -- cgit From 82945e31dd8bce5fc51d4fd16d696a6d326e5f44 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Aug 2018 18:33:37 +0200 Subject: git, storer: use a common storer.Options for storer and PlainOpen Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 17 ++++++----------- storage/filesystem/storage.go | 25 ++++++++----------------- storage/filesystem/storage_test.go | 4 +++- 3 files changed, 17 insertions(+), 29 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index 2048ddc..41e5c75 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -14,6 +14,7 @@ import ( "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" @@ -60,7 +61,7 @@ var ( // The DotGit type represents a local git repository on disk. This // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { - DotGitOptions + storer.Options fs billy.Filesystem // incoming object directory information @@ -73,25 +74,19 @@ type DotGit struct { packMap map[plumbing.Hash]struct{} } -// DotGitOptions holds configuration options for new DotGit objects. -type DotGitOptions struct { - // Static means that the filesystem won't be changed while the repo is open. - Static bool -} - // New returns a DotGit value ready to be used. The path argument must // be the absolute path of a git repository directory (e.g. // "/foo/bar/.git"). func New(fs billy.Filesystem) *DotGit { - return NewWithOptions(fs, DotGitOptions{}) + return NewWithOptions(fs, storer.Options{}) } // NewWithOptions creates a new DotGit and sets non default configuration // options. See New for complete help. -func NewWithOptions(fs billy.Filesystem, o DotGitOptions) *DotGit { +func NewWithOptions(fs billy.Filesystem, o storer.Options) *DotGit { return &DotGit{ - DotGitOptions: o, - fs: fs, + Options: o, + fs: fs, } } diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index a969a1f..24e6454 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,6 +2,7 @@ package filesystem import ( + "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" @@ -11,7 +12,7 @@ import ( // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { - StorageOptions + storer.Options fs billy.Filesystem dir *dotgit.DotGit @@ -24,36 +25,26 @@ type Storage struct { ModuleStorage } -// StorageOptions holds configuration for the storage. -type StorageOptions struct { - // Static means that the filesystem is not modified while the repo is open. - Static bool -} - // NewStorage returns a new Storage backed by a given `fs.Filesystem` func NewStorage(fs billy.Filesystem) (*Storage, error) { - return NewStorageWithOptions(fs, StorageOptions{}) + return NewStorageWithOptions(fs, storer.Options{}) } // NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem` func NewStorageWithOptions( fs billy.Filesystem, - ops StorageOptions, + ops storer.Options, ) (*Storage, error) { - dOps := dotgit.DotGitOptions{ - Static: ops.Static, - } - - dir := dotgit.NewWithOptions(fs, dOps) + dir := dotgit.NewWithOptions(fs, ops) o, err := NewObjectStorage(dir) if err != nil { return nil, err } return &Storage{ - StorageOptions: ops, - fs: fs, - dir: dir, + Options: ops, + fs: fs, + dir: dir, ObjectStorage: o, ReferenceStorage: ReferenceStorage{dir: dir}, diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go index d7ebf71..23628c7 100644 --- a/storage/filesystem/storage_test.go +++ b/storage/filesystem/storage_test.go @@ -64,7 +64,9 @@ var _ = Suite(&StorageStaticSuite{}) func (s *StorageStaticSuite) SetUpTest(c *C) { s.dir = c.MkDir() - storage, err := NewStorageWithOptions(osfs.New(s.dir), StorageOptions{Static: true}) + storage, err := NewStorageWithOptions( + osfs.New(s.dir), + storer.Options{Static: true}) c.Assert(err, IsNil) setUpTest(&s.StorageSuite, c, storage) -- cgit From d7e6cf5b73947108d0c16b9c04b38891de47ef5d Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Aug 2018 18:35:39 +0200 Subject: dotgit: fix typo in comment Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index 41e5c75..c42ed88 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -300,7 +300,7 @@ func (d *DotGit) Objects() ([]plumbing.Hash, error) { } // ForEachObjectHash iterates over the hashes of objects found under the -// .git/objects/ directory and executes the provided . +// .git/objects/ directory and executes the provided function. func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { if !d.Static { return d.forEachObjectHash(fun) -- cgit From cf626677508238893c7c88c3c786a02f17afcc4c Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 31 Aug 2018 14:56:23 +0200 Subject: plumbing/storer: rename Static option to ExclusiveAccess Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 10 +++++----- storage/filesystem/storage_test.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index c42ed88..7626078 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -165,7 +165,7 @@ func (d *DotGit) NewObjectPack() (*PackWriter, error) { // ObjectPacks returns the list of availables packfiles func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { - if !d.Static { + if !d.ExclusiveAccess { return d.objectPacks() } @@ -279,7 +279,7 @@ func (d *DotGit) NewObject() (*ObjectWriter, error) { // Objects returns a slice with the hashes of objects found under the // .git/objects/ directory. func (d *DotGit) Objects() ([]plumbing.Hash, error) { - if d.Static { + if d.ExclusiveAccess { err := d.genObjectList() if err != nil { return nil, err @@ -302,7 +302,7 @@ func (d *DotGit) Objects() ([]plumbing.Hash, error) { // ForEachObjectHash iterates over the hashes of objects found under the // .git/objects/ directory and executes the provided function. func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { - if !d.Static { + if !d.ExclusiveAccess { return d.forEachObjectHash(fun) } @@ -376,7 +376,7 @@ func (d *DotGit) genObjectList() error { } func (d *DotGit) hasObject(h plumbing.Hash) error { - if !d.Static { + if !d.ExclusiveAccess { return nil } @@ -420,7 +420,7 @@ func (d *DotGit) genPackList() error { } func (d *DotGit) hasPack(h plumbing.Hash) error { - if !d.Static { + if !d.ExclusiveAccess { return nil } diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go index 23628c7..11bf4fc 100644 --- a/storage/filesystem/storage_test.go +++ b/storage/filesystem/storage_test.go @@ -66,7 +66,7 @@ func (s *StorageStaticSuite) SetUpTest(c *C) { s.dir = c.MkDir() storage, err := NewStorageWithOptions( osfs.New(s.dir), - storer.Options{Static: true}) + storer.Options{ExclusiveAccess: true}) c.Assert(err, IsNil) setUpTest(&s.StorageSuite, c, storage) -- cgit From 95acbf6c3958b7540a8549aa049051325fcecd8b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 3 Sep 2018 11:17:22 +0200 Subject: storage/filesystem: make Storage options private Signed-off-by: Javi Fontan --- storage/filesystem/storage.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 24e6454..d2c5287 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -12,7 +12,7 @@ import ( // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { - storer.Options + options storer.Options fs billy.Filesystem dir *dotgit.DotGit @@ -42,7 +42,7 @@ func NewStorageWithOptions( } return &Storage{ - Options: ops, + options: ops, fs: fs, dir: dir, -- cgit From 874f669becc25489081306bbbcbbc27b970f6295 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 3 Sep 2018 19:40:22 +0200 Subject: storage/filesystem: move Options to filesytem and dotgit Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 28 +++++++++++++++++----------- storage/filesystem/object.go | 12 ++++++++++++ storage/filesystem/storage.go | 27 +++++++++++++++++---------- storage/filesystem/storage_test.go | 8 ++++---- 4 files changed, 50 insertions(+), 25 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index 7626078..00dd2a4 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -14,7 +14,6 @@ import ( "gopkg.in/src-d/go-billy.v4/osfs" "gopkg.in/src-d/go-git.v4/plumbing" - "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/utils/ioutil" "gopkg.in/src-d/go-billy.v4" @@ -58,11 +57,18 @@ var ( ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") ) +// Options holds configuration for the storage. +type Options struct { + // ExclusiveAccess means that the filesystem is not modified externally + // while the repo is open. + ExclusiveAccess bool +} + // The DotGit type represents a local git repository on disk. This // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { - storer.Options - fs billy.Filesystem + options Options + fs billy.Filesystem // incoming object directory information incomingChecked bool @@ -78,14 +84,14 @@ type DotGit struct { // be the absolute path of a git repository directory (e.g. // "/foo/bar/.git"). func New(fs billy.Filesystem) *DotGit { - return NewWithOptions(fs, storer.Options{}) + return NewWithOptions(fs, Options{}) } // NewWithOptions creates a new DotGit and sets non default configuration // options. See New for complete help. -func NewWithOptions(fs billy.Filesystem, o storer.Options) *DotGit { +func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { return &DotGit{ - Options: o, + options: o, fs: fs, } } @@ -165,7 +171,7 @@ func (d *DotGit) NewObjectPack() (*PackWriter, error) { // ObjectPacks returns the list of availables packfiles func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { - if !d.ExclusiveAccess { + if !d.options.ExclusiveAccess { return d.objectPacks() } @@ -279,7 +285,7 @@ func (d *DotGit) NewObject() (*ObjectWriter, error) { // Objects returns a slice with the hashes of objects found under the // .git/objects/ directory. func (d *DotGit) Objects() ([]plumbing.Hash, error) { - if d.ExclusiveAccess { + if d.options.ExclusiveAccess { err := d.genObjectList() if err != nil { return nil, err @@ -302,7 +308,7 @@ func (d *DotGit) Objects() ([]plumbing.Hash, error) { // ForEachObjectHash iterates over the hashes of objects found under the // .git/objects/ directory and executes the provided function. func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { - if !d.ExclusiveAccess { + if !d.options.ExclusiveAccess { return d.forEachObjectHash(fun) } @@ -376,7 +382,7 @@ func (d *DotGit) genObjectList() error { } func (d *DotGit) hasObject(h plumbing.Hash) error { - if !d.ExclusiveAccess { + if !d.options.ExclusiveAccess { return nil } @@ -420,7 +426,7 @@ func (d *DotGit) genPackList() error { } func (d *DotGit) hasPack(h plumbing.Hash) error { - if !d.ExclusiveAccess { + if !d.options.ExclusiveAccess { return nil } diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3a3a2bd..3519385 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -18,6 +18,8 @@ import ( ) type ObjectStorage struct { + options Options + // deltaBaseCache is an object cache uses to cache delta's bases when deltaBaseCache cache.Object @@ -27,7 +29,17 @@ type ObjectStorage struct { // NewObjectStorage creates a new ObjectStorage with the given .git directory. func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { + return NewObjectStorageWithOptions(dir, Options{}) +} + +// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git +// directory and sets its options. +func NewObjectStorageWithOptions( + dir *dotgit.DotGit, + ops Options, +) (ObjectStorage, error) { s := ObjectStorage{ + options: ops, deltaBaseCache: cache.NewObjectLRUDefault(), dir: dir, } diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index d2c5287..25b3653 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,7 +2,6 @@ package filesystem import ( - "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" @@ -12,8 +11,6 @@ import ( // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { - options storer.Options - fs billy.Filesystem dir *dotgit.DotGit @@ -25,26 +22,36 @@ type Storage struct { ModuleStorage } +// Options holds configuration for the storage. +type Options struct { + // ExclusiveAccess means that the filesystem is not modified externally + // while the repo is open. + ExclusiveAccess bool +} + // NewStorage returns a new Storage backed by a given `fs.Filesystem` func NewStorage(fs billy.Filesystem) (*Storage, error) { - return NewStorageWithOptions(fs, storer.Options{}) + return NewStorageWithOptions(fs, Options{}) } // NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem` func NewStorageWithOptions( fs billy.Filesystem, - ops storer.Options, + ops Options, ) (*Storage, error) { - dir := dotgit.NewWithOptions(fs, ops) - o, err := NewObjectStorage(dir) + dirOps := dotgit.Options{ + ExclusiveAccess: ops.ExclusiveAccess, + } + + dir := dotgit.NewWithOptions(fs, dirOps) + o, err := NewObjectStorageWithOptions(dir, ops) if err != nil { return nil, err } return &Storage{ - options: ops, - fs: fs, - dir: dir, + fs: fs, + dir: dir, ObjectStorage: o, ReferenceStorage: ReferenceStorage{dir: dir}, diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go index 11bf4fc..7f85ef5 100644 --- a/storage/filesystem/storage_test.go +++ b/storage/filesystem/storage_test.go @@ -56,17 +56,17 @@ func (s *StorageSuite) TestNewStorageShouldNotAddAnyContentsToDir(c *C) { c.Assert(fis, HasLen, 0) } -type StorageStaticSuite struct { +type StorageExclusiveSuite struct { StorageSuite } -var _ = Suite(&StorageStaticSuite{}) +var _ = Suite(&StorageExclusiveSuite{}) -func (s *StorageStaticSuite) SetUpTest(c *C) { +func (s *StorageExclusiveSuite) SetUpTest(c *C) { s.dir = c.MkDir() storage, err := NewStorageWithOptions( osfs.New(s.dir), - storer.Options{ExclusiveAccess: true}) + Options{ExclusiveAccess: true}) c.Assert(err, IsNil) setUpTest(&s.StorageSuite, c, storage) -- cgit From 659ec443b4a975e3adf78f24e59ad69d210d2c0b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Mon, 3 Sep 2018 20:02:25 +0200 Subject: storage/dotgit: add ExclusiveAccess tests in dotgit This functionality was already tested in storage/filesystem. The coverage tool only takes into account files from the same package of the test. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit_test.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index 64c2aee..c34543e 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "gopkg.in/src-d/go-billy.v4" "gopkg.in/src-d/go-git.v4/plumbing" . "gopkg.in/check.v1" @@ -424,6 +425,18 @@ func (s *SuiteDotGit) TestObjectPacks(c *C) { fs := f.DotGit() dir := New(fs) + testObjectPacks(c, fs, dir, f) +} + +func (s *SuiteDotGit) TestObjectPacksExclusive(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := NewWithOptions(fs, Options{ExclusiveAccess: true}) + + testObjectPacks(c, fs, dir, f) +} + +func testObjectPacks(c *C, fs billy.Filesystem, dir *DotGit, f *fixtures.Fixture) { hashes, err := dir.ObjectPacks() c.Assert(err, IsNil) c.Assert(hashes, HasLen, 1) @@ -506,6 +519,17 @@ func (s *SuiteDotGit) TestObjects(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() dir := New(fs) + testObjects(c, fs, dir) +} + +func (s *SuiteDotGit) TestObjectsExclusive(c *C) { + fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() + dir := NewWithOptions(fs, Options{ExclusiveAccess: true}) + + testObjects(c, fs, dir) +} + +func testObjects(c *C, fs billy.Filesystem, dir *DotGit) { hashes, err := dir.Objects() c.Assert(err, IsNil) c.Assert(hashes, HasLen, 187) -- cgit From 6384ab93a2dbac9045ee19099455cbcfe82d0201 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 30 Aug 2018 20:28:40 +0200 Subject: storage/dotgit: add KeepDescriptors option This option maintains packfile file descriptors opened after reading objects from them. It improves performance as it does not have to be opening packfiles each time an object is needed. Also adds Close to EncodedObjectStorer to close all the files manualy. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit.go | 43 +++++++++++++++++++++++++++++++- storage/filesystem/dotgit/dotgit_test.go | 28 +++++++++++++++++++++ storage/filesystem/object.go | 10 +++++++- storage/filesystem/storage.go | 4 +++ storage/memory/storage.go | 3 +++ 5 files changed, 86 insertions(+), 2 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index 00dd2a4..df5cd10 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -62,6 +62,9 @@ type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool + // KeepDescriptors makes the file descriptors to be reused but they will + // need to be manually closed calling Close(). + KeepDescriptors bool } // The DotGit type represents a local git repository on disk. This @@ -78,6 +81,8 @@ type DotGit struct { objectMap map[plumbing.Hash]struct{} packList []plumbing.Hash packMap map[plumbing.Hash]struct{} + + files map[string]billy.File } // New returns a DotGit value ready to be used. The path argument must @@ -123,6 +128,28 @@ func (d *DotGit) Initialize() error { return nil } +// Close closes all opened files. +func (d *DotGit) Close() error { + var firstError error + if d.files != nil { + for _, f := range d.files { + err := f.Close() + if err != nil && firstError == nil { + firstError = err + continue + } + } + + d.files = nil + } + + if firstError != nil { + return firstError + } + + return nil +} + // ConfigWriter returns a file pointer for write to the config file func (d *DotGit) ConfigWriter() (billy.File, error) { return d.fs.Create(configPath) @@ -217,12 +244,22 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { } func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { + if d.files == nil { + d.files = make(map[string]billy.File) + } + err := d.hasPack(hash) if err != nil { return nil, err } - pack, err := d.fs.Open(d.objectPackPath(hash, extension)) + path := d.objectPackPath(hash, extension) + f, ok := d.files[path] + if ok { + return f, nil + } + + pack, err := d.fs.Open(path) if err != nil { if os.IsNotExist(err) { return nil, ErrPackfileNotFound @@ -231,6 +268,10 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil return nil, err } + if d.options.KeepDescriptors && extension == "pack" { + d.files[path] = pack + } + return pack, nil } diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index c34543e..50f8e64 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -465,6 +465,34 @@ func (s *SuiteDotGit) TestObjectPack(c *C) { c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") } +func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) { + f := fixtures.Basic().ByTag(".git").One() + fs := f.DotGit() + dir := NewWithOptions(fs, Options{KeepDescriptors: true}) + + pack, err := dir.ObjectPack(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") + + pack2, err := dir.ObjectPack(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(pack, Equals, pack2) + + err = dir.Close() + c.Assert(err, IsNil) + + pack2, err = dir.ObjectPack(f.PackfileHash) + c.Assert(err, IsNil) + c.Assert(pack, Not(Equals), pack2) + + err = pack2.Close() + c.Assert(err, IsNil) + + err = dir.Close() + c.Assert(err, NotNil) + +} + func (s *SuiteDotGit) TestObjectPackIdx(c *C) { f := fixtures.Basic().ByTag(".git").One() fs := f.DotGit() diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3519385..3545e27 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -74,6 +74,7 @@ func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { } defer ioutil.CheckClose(f, &err) + idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { @@ -280,7 +281,9 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( return nil, err } - defer ioutil.CheckClose(f, &err) + if !s.options.KeepDescriptors { + defer ioutil.CheckClose(f, &err) + } idx := s.index[pack] if canBeDelta { @@ -423,6 +426,11 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb }, nil } +// Close closes all opened files. +func (s *ObjectStorage) Close() error { + return s.dir.Close() +} + type lazyPackfilesIter struct { hashes []plumbing.Hash open func(h plumbing.Hash) (storer.EncodedObjectIter, error) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 25b3653..7fae789 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -27,6 +27,9 @@ type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool + // KeepDescriptors makes the file descriptors to be reused but they will + // need to be manually closed calling Close(). + KeepDescriptors bool } // NewStorage returns a new Storage backed by a given `fs.Filesystem` @@ -41,6 +44,7 @@ func NewStorageWithOptions( ) (*Storage, error) { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, + KeepDescriptors: ops.KeepDescriptors, } dir := dotgit.NewWithOptions(fs, dirOps) diff --git a/storage/memory/storage.go b/storage/memory/storage.go index 2e32509..3d3b348 100644 --- a/storage/memory/storage.go +++ b/storage/memory/storage.go @@ -183,6 +183,9 @@ func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { return nil } +func (s *ObjectStorage) Close() error { + return nil +} var errNotSupported = fmt.Errorf("Not supported") -- cgit From 9013dde72d0387a74b728ee336019728ba159d1c Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Wed, 5 Sep 2018 11:01:06 +0200 Subject: storage/filesystem: add KeepDescriptors test Also delete Close from MockObjectStorage and memory storer. Signed-off-by: Javi Fontan --- storage/filesystem/object_test.go | 31 +++++++++++++++++++++++++++++++ storage/memory/storage.go | 3 --- 2 files changed, 31 insertions(+), 3 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index b1408b7..6feb6ae 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -48,6 +48,37 @@ func (s *FsSuite) TestGetFromPackfile(c *C) { }) } +func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { + fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + dg := dotgit.NewWithOptions(fs, dotgit.Options{KeepDescriptors: true}) + o, err := NewObjectStorageWithOptions(dg, Options{KeepDescriptors: true}) + c.Assert(err, IsNil) + + expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") + obj, err := o.EncodedObject(plumbing.AnyObject, expected) + c.Assert(err, IsNil) + c.Assert(obj.Hash(), Equals, expected) + + packfiles, err := dg.ObjectPacks() + c.Assert(err, IsNil) + + pack1, err := dg.ObjectPack(packfiles[0]) + c.Assert(err, IsNil) + + err = o.Close() + c.Assert(err, IsNil) + + pack2, err := dg.ObjectPack(packfiles[0]) + c.Assert(err, IsNil) + c.Assert(pack1, Not(Equals), pack2) + + err = o.Close() + c.Assert(err, IsNil) + + }) +} + func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() o, err := NewObjectStorage(dotgit.New(fs)) diff --git a/storage/memory/storage.go b/storage/memory/storage.go index 3d3b348..2e32509 100644 --- a/storage/memory/storage.go +++ b/storage/memory/storage.go @@ -183,9 +183,6 @@ func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { return nil } -func (s *ObjectStorage) Close() error { - return nil -} var errNotSupported = fmt.Errorf("Not supported") -- cgit From 8176f084d861891d1846a2d46bf669d0d3463ebd Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 6 Sep 2018 19:09:02 +0200 Subject: storage/filesystem: compare files using offset in test Using equals to compare files it uses diff to do so. This can potentially consume lots of ram. Changed the comparison to use file offsets. If the descriptor is reused the offset is maintained. Signed-off-by: Javi Fontan --- storage/filesystem/dotgit/dotgit_test.go | 15 +++++++++++++-- storage/filesystem/object_test.go | 8 +++++++- 2 files changed, 20 insertions(+), 3 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go index 50f8e64..308c6b7 100644 --- a/storage/filesystem/dotgit/dotgit_test.go +++ b/storage/filesystem/dotgit/dotgit_test.go @@ -474,16 +474,27 @@ func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) { c.Assert(err, IsNil) c.Assert(filepath.Ext(pack.Name()), Equals, ".pack") + // Move to an specific offset + pack.Seek(42, os.SEEK_SET) + pack2, err := dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) - c.Assert(pack, Equals, pack2) + + // If the file is the same the offset should be the same + offset, err := pack2.Seek(0, os.SEEK_CUR) + c.Assert(err, IsNil) + c.Assert(offset, Equals, int64(42)) err = dir.Close() c.Assert(err, IsNil) pack2, err = dir.ObjectPack(f.PackfileHash) c.Assert(err, IsNil) - c.Assert(pack, Not(Equals), pack2) + + // If the file is opened again its offset should be 0 + offset, err = pack2.Seek(0, os.SEEK_CUR) + c.Assert(err, IsNil) + c.Assert(offset, Equals, int64(0)) err = pack2.Close() c.Assert(err, IsNil) diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 6feb6ae..4a921a9 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -2,6 +2,7 @@ package filesystem import ( "io/ioutil" + "os" "testing" "gopkg.in/src-d/go-git.v4/plumbing" @@ -66,12 +67,17 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { pack1, err := dg.ObjectPack(packfiles[0]) c.Assert(err, IsNil) + pack1.Seek(42, os.SEEK_SET) + err = o.Close() c.Assert(err, IsNil) pack2, err := dg.ObjectPack(packfiles[0]) c.Assert(err, IsNil) - c.Assert(pack1, Not(Equals), pack2) + + offset, err := pack2.Seek(0, os.SEEK_CUR) + c.Assert(err, IsNil) + c.Assert(offset, Equals, int64(0)) err = o.Close() c.Assert(err, IsNil) -- cgit From 8f6b3127c1ff7661113fff2662416c328971a285 Mon Sep 17 00:00:00 2001 From: kuba-- Date: Fri, 7 Sep 2018 09:27:35 +0200 Subject: Expose Storage cache. Signed-off-by: kuba-- --- storage/filesystem/dotgit/dotgit.go | 4 ++-- storage/filesystem/module.go | 3 ++- storage/filesystem/object.go | 29 ++++++++++------------------- storage/filesystem/object_test.go | 25 ++++++++----------------- storage/filesystem/storage.go | 29 ++++++++++++++--------------- storage/filesystem/storage_test.go | 11 +++++------ 6 files changed, 41 insertions(+), 60 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go index df5cd10..a58c248 100644 --- a/storage/filesystem/dotgit/dotgit.go +++ b/storage/filesystem/dotgit/dotgit.go @@ -92,8 +92,8 @@ func New(fs billy.Filesystem) *DotGit { return NewWithOptions(fs, Options{}) } -// NewWithOptions creates a new DotGit and sets non default configuration -// options. See New for complete help. +// NewWithOptions sets non default configuration options. +// See New for complete help. func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { return &DotGit{ options: o, diff --git a/storage/filesystem/module.go b/storage/filesystem/module.go index 7c8c8d8..9272206 100644 --- a/storage/filesystem/module.go +++ b/storage/filesystem/module.go @@ -1,6 +1,7 @@ package filesystem import ( + "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" ) @@ -15,5 +16,5 @@ func (s *ModuleStorage) Module(name string) (storage.Storer, error) { return nil, err } - return NewStorage(fs) + return NewStorage(fs, cache.NewObjectLRUDefault()), nil } diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 3545e27..9eb085f 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -27,24 +27,18 @@ type ObjectStorage struct { index map[plumbing.Hash]idxfile.Index } -// NewObjectStorage creates a new ObjectStorage with the given .git directory. -func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { - return NewObjectStorageWithOptions(dir, Options{}) -} - -// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git -// directory and sets its options. -func NewObjectStorageWithOptions( - dir *dotgit.DotGit, - ops Options, -) (ObjectStorage, error) { - s := ObjectStorage{ +// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. +func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage { + return NewObjectStorageWithOptions(dir, cache, Options{}) +} + +// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options +func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage { + return &ObjectStorage{ options: ops, - deltaBaseCache: cache.NewObjectLRUDefault(), + deltaBaseCache: cache, dir: dir, } - - return s, nil } func (s *ObjectStorage) requireIndex() error { @@ -182,10 +176,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { - o, oe := NewObjectStorage(dg) - if oe != nil { - continue - } + o := NewObjectStorage(dg, s.deltaBaseCache) enobj, enerr := o.EncodedObject(t, h) if enerr != nil { continue diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index 4a921a9..bd4a94b 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -6,6 +6,7 @@ import ( "testing" "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" . "gopkg.in/check.v1" @@ -27,8 +28,7 @@ var _ = Suite(&FsSuite{}) func (s *FsSuite) TestGetFromObjectFile(c *C) { fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - c.Assert(err, IsNil) + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("f3dfe29d268303fc6e1bbce268605fc99573406e") obj, err := o.EncodedObject(plumbing.AnyObject, expected) @@ -39,8 +39,7 @@ func (s *FsSuite) TestGetFromObjectFile(c *C) { func (s *FsSuite) TestGetFromPackfile(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - c.Assert(err, IsNil) + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) @@ -53,8 +52,7 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() dg := dotgit.NewWithOptions(fs, dotgit.Options{KeepDescriptors: true}) - o, err := NewObjectStorageWithOptions(dg, Options{KeepDescriptors: true}) - c.Assert(err, IsNil) + o := NewObjectStorageWithOptions(dg, cache.NewObjectLRUDefault(), Options{KeepDescriptors: true}) expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) @@ -87,8 +85,7 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) { func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - c.Assert(err, IsNil) + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3") obj, err := o.getFromPackfile(expected, false) @@ -104,8 +101,7 @@ func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) { func (s *FsSuite) TestIter(c *C) { fixtures.ByTag(".git").ByTag("packfile").Test(c, func(f *fixtures.Fixture) { fs := f.DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - c.Assert(err, IsNil) + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) iter, err := o.IterEncodedObjects(plumbing.AnyObject) c.Assert(err, IsNil) @@ -125,8 +121,7 @@ func (s *FsSuite) TestIterWithType(c *C) { fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { for _, t := range objectTypes { fs := f.DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - c.Assert(err, IsNil) + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) iter, err := o.IterEncodedObjects(t) c.Assert(err, IsNil) @@ -308,11 +303,7 @@ func BenchmarkGetObjectFromPackfile(b *testing.B) { for _, f := range fixtures.Basic() { b.Run(f.URL, func(b *testing.B) { fs := f.DotGit() - o, err := NewObjectStorage(dotgit.New(fs)) - if err != nil { - b.Fatal(err) - } - + o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault()) for i := 0; i < b.N; i++ { expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5") obj, err := o.EncodedObject(plumbing.AnyObject, expected) diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go index 7fae789..14a772a 100644 --- a/storage/filesystem/storage.go +++ b/storage/filesystem/storage.go @@ -2,6 +2,7 @@ package filesystem import ( + "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit" "gopkg.in/src-d/go-billy.v4" @@ -32,38 +33,35 @@ type Options struct { KeepDescriptors bool } -// NewStorage returns a new Storage backed by a given `fs.Filesystem` -func NewStorage(fs billy.Filesystem) (*Storage, error) { - return NewStorageWithOptions(fs, Options{}) +// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. +func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { + return NewStorageWithOptions(fs, cache, Options{}) } -// NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem` -func NewStorageWithOptions( - fs billy.Filesystem, - ops Options, -) (*Storage, error) { +// NewStorageWithOptions returns a new Storage with extra options, +// backed by a given `fs.Filesystem` and cache. +func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, KeepDescriptors: ops.KeepDescriptors, } - dir := dotgit.NewWithOptions(fs, dirOps) - o, err := NewObjectStorageWithOptions(dir, ops) - if err != nil { - return nil, err - } return &Storage{ fs: fs, dir: dir, - ObjectStorage: o, + ObjectStorage: ObjectStorage{ + options: ops, + deltaBaseCache: cache, + dir: dir, + }, ReferenceStorage: ReferenceStorage{dir: dir}, IndexStorage: IndexStorage{dir: dir}, ShallowStorage: ShallowStorage{dir: dir}, ConfigStorage: ConfigStorage{dir: dir}, ModuleStorage: ModuleStorage{dir: dir}, - }, nil + } } // Filesystem returns the underlying filesystem @@ -71,6 +69,7 @@ func (s *Storage) Filesystem() billy.Filesystem { return s.fs } +// Init initializes .git directory func (s *Storage) Init() error { return s.dir.Initialize() } diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go index 7f85ef5..6fa0d90 100644 --- a/storage/filesystem/storage_test.go +++ b/storage/filesystem/storage_test.go @@ -4,6 +4,7 @@ import ( "io/ioutil" "testing" + "gopkg.in/src-d/go-git.v4/plumbing/cache" "gopkg.in/src-d/go-git.v4/plumbing/storer" "gopkg.in/src-d/go-git.v4/storage/test" @@ -23,8 +24,7 @@ var _ = Suite(&StorageSuite{}) func (s *StorageSuite) SetUpTest(c *C) { s.dir = c.MkDir() - storage, err := NewStorage(osfs.New(s.dir)) - c.Assert(err, IsNil) + storage := NewStorage(osfs.New(s.dir), cache.NewObjectLRUDefault()) setUpTest(s, c, storage) } @@ -44,8 +44,7 @@ func setUpTest(s *StorageSuite, c *C, storage *Storage) { func (s *StorageSuite) TestFilesystem(c *C) { fs := memfs.New() - storage, err := NewStorage(fs) - c.Assert(err, IsNil) + storage := NewStorage(fs, cache.NewObjectLRUDefault()) c.Assert(storage.Filesystem(), Equals, fs) } @@ -64,10 +63,10 @@ var _ = Suite(&StorageExclusiveSuite{}) func (s *StorageExclusiveSuite) SetUpTest(c *C) { s.dir = c.MkDir() - storage, err := NewStorageWithOptions( + storage := NewStorageWithOptions( osfs.New(s.dir), + cache.NewObjectLRUDefault(), Options{ExclusiveAccess: true}) - c.Assert(err, IsNil) setUpTest(&s.StorageSuite, c, storage) } -- cgit From 82c7a306db75d083db50dbd41aebebd8bd55081b Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Thu, 20 Sep 2018 17:10:39 +0200 Subject: storage/filesystem: keep packs open in PackfileIter PackfileIter was not taking into account the option KeepDescriptors and was always closing the file. This caused "file already closed" errors when iterating packfiles in with KeepDescriptors active. Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 33 ++++++++++++++++++++--------- storage/filesystem/object_test.go | 44 +++++++++++++++++++++++++++++++++++---- 2 files changed, 63 insertions(+), 14 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 9eb085f..4aedacc 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -396,7 +396,10 @@ func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.Encode return storer.NewMultiEncodedObjectIter(iters), nil } -func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumbing.Hash]struct{}) (storer.EncodedObjectIter, error) { +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, +) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } @@ -412,7 +415,10 @@ func (s *ObjectStorage) buildPackfileIters(t plumbing.ObjectType, seen map[plumb if err != nil { return nil, err } - return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache) + return newPackfileIter( + s.dir.Fs(), pack, t, seen, s.index[h], + s.deltaBaseCache, s.options.KeepDescriptors, + ) }, }, nil } @@ -470,9 +476,10 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} + pack billy.File + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} + keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile @@ -483,6 +490,7 @@ func NewPackfileIter( f billy.File, idxFile billy.File, t plumbing.ObjectType, + keepPack bool, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { @@ -493,7 +501,8 @@ func NewPackfileIter( return nil, err } - return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil) + seen := make(map[plumbing.Hash]struct{}) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) } func newPackfileIter( @@ -503,6 +512,7 @@ func newPackfileIter( seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, + keepPack bool, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { @@ -517,9 +527,10 @@ func newPackfileIter( } return &packfileIter{ - pack: f, - iter: iter, - seen: seen, + pack: f, + iter: iter, + seen: seen, + keepPack: keepPack, }, nil } @@ -557,7 +568,9 @@ func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { func (iter *packfileIter) Close() { iter.iter.Close() - _ = iter.pack.Close() + if !iter.keepPack { + _ = iter.pack.Close() + } } type objectsIter struct { diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go index bd4a94b..407abf2 100644 --- a/storage/filesystem/object_test.go +++ b/storage/filesystem/object_test.go @@ -153,18 +153,54 @@ func (s *FsSuite) TestPackfileIter(c *C) { idxf, err := dg.ObjectPackIdx(h) c.Assert(err, IsNil) - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) c.Assert(err, IsNil) + err = iter.ForEach(func(o plumbing.EncodedObject) error { c.Assert(o.Type(), Equals, t) return nil }) - c.Assert(err, IsNil) } } }) +} + +func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) { + fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) { + fs := f.DotGit() + ops := dotgit.Options{KeepDescriptors: true} + dg := dotgit.NewWithOptions(fs, ops) + + for _, t := range objectTypes { + ph, err := dg.ObjectPacks() + c.Assert(err, IsNil) + + for _, h := range ph { + f, err := dg.ObjectPack(h) + c.Assert(err, IsNil) + + idxf, err := dg.ObjectPackIdx(h) + c.Assert(err, IsNil) + iter, err := NewPackfileIter(fs, f, idxf, t, true) + c.Assert(err, IsNil) + + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + + // test twice to check that packfiles are not closed + err = iter.ForEach(func(o plumbing.EncodedObject) error { + c.Assert(o.Type(), Equals, t) + return nil + }) + c.Assert(err, IsNil) + } + } + }) } func BenchmarkPackfileIter(b *testing.B) { @@ -201,7 +237,7 @@ func BenchmarkPackfileIter(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } @@ -257,7 +293,7 @@ func BenchmarkPackfileIterReadContent(b *testing.B) { b.Fatal(err) } - iter, err := NewPackfileIter(fs, f, idxf, t) + iter, err := NewPackfileIter(fs, f, idxf, t, false) if err != nil { b.Fatal(err) } -- cgit From a7b0102b83aa86fd299623d35666bfee93fee0c6 Mon Sep 17 00:00:00 2001 From: Javi Fontan Date: Fri, 21 Sep 2018 10:43:43 +0200 Subject: storage/filesystem: add more doc to NewPackfileIter Signed-off-by: Javi Fontan --- storage/filesystem/object.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'storage') diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go index 4aedacc..68bd140 100644 --- a/storage/filesystem/object.go +++ b/storage/filesystem/object.go @@ -476,15 +476,18 @@ func (it *lazyPackfilesIter) Close() { } type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} + pack billy.File + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} + + // tells whether the pack file should be left open after iteration or not keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're -// used. +// used. If keepPack is true the packfile won't be closed after the iteration +// finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, -- cgit