aboutsummaryrefslogtreecommitdiffstats
path: root/plumbing/format/packfile/packfile.go
diff options
context:
space:
mode:
authorzeripath <art27@cantab.net>2021-06-02 10:03:28 +0100
committerGitHub <noreply@github.com>2021-06-02 11:03:28 +0200
commitda810275bf682d29a530ed819aff175f47bd7634 (patch)
tree615e4c2b301532a211e739143b28df313b110dfb /plumbing/format/packfile/packfile.go
parentdb4233e9e8b3b2e37259ed4e7952faaed16218b9 (diff)
downloadgo-git-da810275bf682d29a530ed819aff175f47bd7634.tar.gz
Revert "plumbing: format/packfile, prevent large objects from being read into memory completely (#303)" (#329)v5.4.2
This reverts commit 720c192831a890d0a36b4c6720b60411fa4a0159.
Diffstat (limited to 'plumbing/format/packfile/packfile.go')
-rw-r--r--plumbing/format/packfile/packfile.go73
1 files changed, 0 insertions, 73 deletions
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index 77861d2..ddd7f62 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -32,12 +32,6 @@ var (
// wrapped in FSObject.
const smallObjectThreshold = 16 * 1024
-// Conversely there are large objects that should not be cached and kept
-// in memory as they're too large to be reasonably cached. Objects larger
-// than this threshold are now always never read into memory to be stored
-// in the cache
-const LargeObjectThreshold = 1024 * 1024
-
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
@@ -288,37 +282,6 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
-func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
- switch h.Type {
- case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
- return p.s.ReadObject()
- case plumbing.REFDeltaObject:
- deltaRC, err := p.s.ReadObject()
- if err != nil {
- return nil, err
- }
- r, err := p.readREFDeltaObjectContent(h, deltaRC)
- if err != nil {
- _ = deltaRC.Close()
- return nil, err
- }
- return r, nil
- case plumbing.OFSDeltaObject:
- deltaRC, err := p.s.ReadObject()
- if err != nil {
- return nil, err
- }
- r, err := p.readOFSDeltaObjectContent(h, deltaRC)
- if err != nil {
- _ = deltaRC.Close()
- return nil, err
- }
- return r, nil
- default:
- return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
- }
-}
-
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@@ -371,20 +334,6 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
-func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.ReadCloser) (io.ReadCloser, error) {
- var err error
-
- base, ok := p.cacheGet(h.Reference)
- if !ok {
- base, err = p.Get(h.Reference)
- if err != nil {
- return nil, err
- }
- }
-
- return ReaderFromDelta(h, base, deltaRC)
-}
-
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@@ -415,28 +364,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
-func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.ReadCloser) (io.ReadCloser, error) {
- hash, err := p.FindHash(h.OffsetReference)
- if err != nil {
- return nil, err
- }
-
- base, err := p.objectAtOffset(h.OffsetReference, hash)
- if err != nil {
- return nil, err
- }
-
- base, ok := p.cacheGet(h.Reference)
- if !ok {
- base, err = p.Get(h.Reference)
- if err != nil {
- return nil, err
- }
- }
-
- return ReaderFromDelta(h, base, deltaRC)
-}
-
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {