Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Parse optimisations #602

Merged
merged 2 commits into from Nov 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
15 changes: 13 additions & 2 deletions plumbing/format/objfile/writer.go
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"io"
"strconv"
"sync"

"github.com/go-git/go-git/v5/plumbing"
)
Expand All @@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib io.WriteCloser

closed bool
pending int64 // number of unwritten bytes
Expand All @@ -31,12 +32,21 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := zlibPool.Get().(*zlib.Writer)
zlib.Reset(w)

return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}

var zlibPool = sync.Pool{
New: func() interface{} {
return zlib.NewWriter(nil)
},
}

// WriteHeader writes the type and the size and prepares to accept the object's
// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
// negative size is provided, ErrNegativeSize is returned.
Expand Down Expand Up @@ -100,6 +110,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer zlibPool.Put(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}
Expand Down
28 changes: 28 additions & 0 deletions plumbing/format/packfile/parser_test.go
Expand Up @@ -10,8 +10,10 @@ import (
fixtures "github.com/go-git/go-git-fixtures/v4"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/storage/filesystem"
. "gopkg.in/check.v1"
)

Expand Down Expand Up @@ -248,3 +250,29 @@ func BenchmarkParseBasic(b *testing.B) {
}
}
}

func BenchmarkParser(b *testing.B) {
f := fixtures.Basic().One()
defer fixtures.Clean()

b.ResetTimer()
for n := 0; n < b.N; n++ {
b.StopTimer()
scanner := packfile.NewScanner(f.Packfile())
fs := osfs.New(os.TempDir())
storage := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())

parser, err := packfile.NewParserWithStorage(scanner, storage)
if err != nil {
b.Error(err)
}

b.StartTimer()
_, err = parser.Parse()

b.StopTimer()
if err != nil {
b.Error(err)
}
}
}
5 changes: 3 additions & 2 deletions plumbing/format/packfile/patch_delta.go
Expand Up @@ -53,9 +53,10 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {

target.SetSize(int64(dst.Len()))

b := byteSlicePool.Get().([]byte)
bufp := byteSlicePool.Get().(*[]byte)
b := *bufp
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
byteSlicePool.Put(bufp)
return err
}

Expand Down
14 changes: 9 additions & 5 deletions plumbing/format/packfile/scanner.go
Expand Up @@ -346,15 +346,17 @@ func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
}

defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
bufp := byteSlicePool.Get().(*[]byte)
buf := *bufp
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
byteSlicePool.Put(bufp)
return
}

var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
b := make([]byte, 32*1024)
return &b
},
}

Expand Down Expand Up @@ -387,9 +389,11 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {

// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
bufp := byteSlicePool.Get().(*[]byte)
buf := *bufp
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
byteSlicePool.Put(bufp)

return err
}

Expand Down
19 changes: 17 additions & 2 deletions plumbing/reference.go
Expand Up @@ -204,6 +204,21 @@ func (r *Reference) Strings() [2]string {
}

func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}

name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}
24 changes: 23 additions & 1 deletion plumbing/reference_test.go
@@ -1,6 +1,10 @@
package plumbing

import . "gopkg.in/check.v1"
import (
"testing"

. "gopkg.in/check.v1"
)

type ReferenceSuite struct{}

Expand Down Expand Up @@ -98,3 +102,21 @@ func (s *ReferenceSuite) TestIsTag(c *C) {
r := ReferenceName("refs/tags/v3.1.")
c.Assert(r.IsTag(), Equals, true)
}

func benchMarkReferenceString(r *Reference, b *testing.B) {
for n := 0; n < b.N; n++ {
r.String()
}
}

func BenchmarkReferenceStringSymbolic(b *testing.B) {
benchMarkReferenceString(NewSymbolicReference("v3.1.1", "refs/tags/v3.1.1"), b)
}

func BenchmarkReferenceStringHash(b *testing.B) {
benchMarkReferenceString(NewHashReference("v3.1.1", NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")), b)
}

func BenchmarkReferenceStringInvalid(b *testing.B) {
benchMarkReferenceString(&Reference{}, b)
}
14 changes: 13 additions & 1 deletion storage/filesystem/object.go
Expand Up @@ -4,6 +4,7 @@ import (
"bytes"
"io"
"os"
"sync"
"time"

"github.com/go-git/go-git/v5/plumbing"
Expand Down Expand Up @@ -419,10 +420,21 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb

s.objectCache.Put(obj)

_, err = io.Copy(w, r)
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(w, r, buf)
copyBufferPool.Put(bufp)

return obj, err
}

var copyBufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}

// Get returns the object with the given hash, by searching for it in
// the packfile.
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
Expand Down
8 changes: 5 additions & 3 deletions worktree.go
Expand Up @@ -534,7 +534,8 @@ func (w *Worktree) checkoutChangeRegularFile(name string,

var copyBufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
b := make([]byte, 32*1024)
return &b
},
}

Expand All @@ -561,9 +562,10 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}

defer ioutil.CheckClose(to, &err)
buf := copyBufferPool.Get().([]byte)
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(to, from, buf)
copyBufferPool.Put(buf)
copyBufferPool.Put(bufp)
return
}

Expand Down