Bumps [gopkg.in/src-d/go-git.v4](https://github.com/src-d/go-git) from 4.8.0 to 4.10.0. - [Release notes](https://github.com/src-d/go-git/releases) - [Commits](https://github.com/src-d/go-git/compare/v4.8.0...v4.10.0)master
@@ -128,7 +128,7 @@ require ( | |||||
gopkg.in/macaron.v1 v1.3.2 | gopkg.in/macaron.v1 v1.3.2 | ||||
gopkg.in/redis.v2 v2.3.2 // indirect | gopkg.in/redis.v2 v2.3.2 // indirect | ||||
gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect | gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect | ||||
gopkg.in/src-d/go-git.v4 v4.8.0 | |||||
gopkg.in/src-d/go-git.v4 v4.10.0 | |||||
gopkg.in/testfixtures.v2 v2.5.0 | gopkg.in/testfixtures.v2 v2.5.0 | ||||
mvdan.cc/xurls/v2 v2.0.0 | mvdan.cc/xurls/v2 v2.0.0 | ||||
strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a | strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a | ||||
@@ -354,6 +354,8 @@ gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45i | |||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= | gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= | ||||
gopkg.in/src-d/go-git.v4 v4.8.0 h1:dDEbgvfNG9vUDM54uhCYPExiGa8uYgXpQ/MR8YvxcAM= | gopkg.in/src-d/go-git.v4 v4.8.0 h1:dDEbgvfNG9vUDM54uhCYPExiGa8uYgXpQ/MR8YvxcAM= | ||||
gopkg.in/src-d/go-git.v4 v4.8.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= | gopkg.in/src-d/go-git.v4 v4.8.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= | ||||
gopkg.in/src-d/go-git.v4 v4.10.0 h1:NWjTJTQnk8UpIGlssuefyDZ6JruEjo5s88vm88uASbw= | |||||
gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= | |||||
gopkg.in/stretchr/testify.v1 v1.2.2 h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M= | gopkg.in/stretchr/testify.v1 v1.2.2 h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M= | ||||
gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU= | gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU= | ||||
gopkg.in/testfixtures.v2 v2.5.0 h1:N08B7l2GzFQenyYbzqthDnKAA+cmb17iAZhhFxr7JHw= | gopkg.in/testfixtures.v2 v2.5.0 h1:N08B7l2GzFQenyYbzqthDnKAA+cmb17iAZhhFxr7JHw= | ||||
@@ -8,6 +8,7 @@ import ( | |||||
"sort" | "sort" | ||||
"strconv" | "strconv" | ||||
"gopkg.in/src-d/go-git.v4/internal/url" | |||||
format "gopkg.in/src-d/go-git.v4/plumbing/format/config" | format "gopkg.in/src-d/go-git.v4/plumbing/format/config" | ||||
) | ) | ||||
@@ -399,3 +400,7 @@ func (c *RemoteConfig) marshal() *format.Subsection { | |||||
return c.raw | return c.raw | ||||
} | } | ||||
func (c *RemoteConfig) IsFirstURLLocal() bool { | |||||
return url.IsLocalEndpoint(c.URLs[0]) | |||||
} |
@@ -0,0 +1,37 @@ | |||||
package url | |||||
import ( | |||||
"regexp" | |||||
) | |||||
var ( | |||||
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) | |||||
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`) | |||||
) | |||||
// MatchesScheme returns true if the given string matches a URL-like | |||||
// format scheme. | |||||
func MatchesScheme(url string) bool { | |||||
return isSchemeRegExp.MatchString(url) | |||||
} | |||||
// MatchesScpLike returns true if the given string matches an SCP-like | |||||
// format scheme. | |||||
func MatchesScpLike(url string) bool { | |||||
return scpLikeUrlRegExp.MatchString(url) | |||||
} | |||||
// FindScpLikeComponents returns the user, host, port and path of the | |||||
// given SCP-like URL. | |||||
func FindScpLikeComponents(url string) (user, host, port, path string) { | |||||
m := scpLikeUrlRegExp.FindStringSubmatch(url) | |||||
return m[1], m[2], m[3], m[4] | |||||
} | |||||
// IsLocalEndpoint returns true if the given URL string specifies a | |||||
// local file endpoint. For example, on a Linux machine, | |||||
// `/home/user/src/go-git` would match as a local endpoint, but | |||||
// `https://github.com/src-d/go-git` would not. | |||||
func IsLocalEndpoint(url string) bool { | |||||
return !MatchesScheme(url) && !MatchesScpLike(url) | |||||
} |
@@ -335,6 +335,11 @@ type LogOptions struct { | |||||
// Show only those commits in which the specified file was inserted/updated. | // Show only those commits in which the specified file was inserted/updated. | ||||
// It is equivalent to running `git log -- <file-name>`. | // It is equivalent to running `git log -- <file-name>`. | ||||
FileName *string | FileName *string | ||||
// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>. | |||||
// It is equivalent to running `git log --all`. | |||||
// If set on true, the From option will be ignored. | |||||
All bool | |||||
} | } | ||||
var ( | var ( | ||||
@@ -51,7 +51,13 @@ func WritePackfileToObjectStorage( | |||||
} | } | ||||
defer ioutil.CheckClose(w, &err) | defer ioutil.CheckClose(w, &err) | ||||
_, err = io.Copy(w, packfile) | |||||
var n int64 | |||||
n, err = io.Copy(w, packfile) | |||||
if err == nil && n == 0 { | |||||
return ErrEmptyPackfile | |||||
} | |||||
return err | return err | ||||
} | } | ||||
@@ -48,7 +48,7 @@ func NewFSObject( | |||||
// Reader implements the plumbing.EncodedObject interface. | // Reader implements the plumbing.EncodedObject interface. | ||||
func (o *FSObject) Reader() (io.ReadCloser, error) { | func (o *FSObject) Reader() (io.ReadCloser, error) { | ||||
obj, ok := o.cache.Get(o.hash) | obj, ok := o.cache.Get(o.hash) | ||||
if ok { | |||||
if ok && obj != o { | |||||
reader, err := obj.Reader() | reader, err := obj.Reader() | ||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
@@ -21,6 +21,16 @@ var ( | |||||
ErrZLib = NewError("zlib reading error") | ErrZLib = NewError("zlib reading error") | ||||
) | ) | ||||
// When reading small objects from packfile it is beneficial to do so at | |||||
// once to exploit the buffered I/O. In many cases the objects are so small | |||||
// that they were already loaded to memory when the object header was | |||||
// loaded from the packfile. Wrapping in FSObject would cause this buffered | |||||
// data to be thrown away and then re-read later, with the additional | |||||
// seeking causing reloads from disk. Objects smaller than this threshold | |||||
// are now always read into memory and stored in cache instead of being | |||||
// wrapped in FSObject. | |||||
const smallObjectThreshold = 16 * 1024 | |||||
// Packfile allows retrieving information from inside a packfile. | // Packfile allows retrieving information from inside a packfile. | ||||
type Packfile struct { | type Packfile struct { | ||||
idxfile.Index | idxfile.Index | ||||
@@ -79,15 +89,7 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { | |||||
} | } | ||||
} | } | ||||
if _, err := p.s.SeekFromStart(o); err != nil { | |||||
if err == io.EOF || isInvalid(err) { | |||||
return nil, plumbing.ErrObjectNotFound | |||||
} | |||||
return nil, err | |||||
} | |||||
return p.nextObject() | |||||
return p.objectAtOffset(o) | |||||
} | } | ||||
// GetSizeByOffset retrieves the size of the encoded object from the | // GetSizeByOffset retrieves the size of the encoded object from the | ||||
@@ -105,7 +107,13 @@ func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { | |||||
if err != nil { | if err != nil { | ||||
return 0, err | return 0, err | ||||
} | } | ||||
return h.Length, nil | |||||
return p.getObjectSize(h) | |||||
} | |||||
func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { | |||||
h, err := p.s.SeekObjectHeader(offset) | |||||
p.s.pendingObject = nil | |||||
return h, err | |||||
} | } | ||||
func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { | func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { | ||||
@@ -154,11 +162,7 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err | |||||
if baseType, ok := p.offsetToType[offset]; ok { | if baseType, ok := p.offsetToType[offset]; ok { | ||||
typ = baseType | typ = baseType | ||||
} else { | } else { | ||||
if _, err = p.s.SeekFromStart(offset); err != nil { | |||||
return | |||||
} | |||||
h, err = p.nextObjectHeader() | |||||
h, err = p.objectHeaderAtOffset(offset) | |||||
if err != nil { | if err != nil { | ||||
return | return | ||||
} | } | ||||
@@ -175,8 +179,8 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err | |||||
return | return | ||||
} | } | ||||
func (p *Packfile) nextObject() (plumbing.EncodedObject, error) { | |||||
h, err := p.nextObjectHeader() | |||||
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) { | |||||
h, err := p.objectHeaderAtOffset(offset) | |||||
if err != nil { | if err != nil { | ||||
if err == io.EOF || isInvalid(err) { | if err == io.EOF || isInvalid(err) { | ||||
return nil, plumbing.ErrObjectNotFound | return nil, plumbing.ErrObjectNotFound | ||||
@@ -190,6 +194,13 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) { | |||||
return p.getNextObject(h) | return p.getNextObject(h) | ||||
} | } | ||||
// If the object is not a delta and it's small enough then read it | |||||
// completely into memory now since it is already read from disk | |||||
// into buffer anyway. | |||||
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { | |||||
return p.getNextObject(h) | |||||
} | |||||
hash, err := p.FindHash(h.Offset) | hash, err := p.FindHash(h.Offset) | ||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
@@ -233,11 +244,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { | |||||
} | } | ||||
} | } | ||||
if _, err := p.s.SeekFromStart(offset); err != nil { | |||||
return nil, err | |||||
} | |||||
h, err := p.nextObjectHeader() | |||||
h, err := p.objectHeaderAtOffset(offset) | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -329,8 +336,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
p.cachePut(base) | |||||
} | } | ||||
obj.SetType(base.Type()) | obj.SetType(base.Type()) | ||||
@@ -398,11 +398,7 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) { | |||||
return data, nil | return data, nil | ||||
} | } | ||||
if _, err := p.scanner.SeekFromStart(o.Offset); err != nil { | |||||
return nil, err | |||||
} | |||||
if _, err := p.scanner.NextObjectHeader(); err != nil { | |||||
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { | |||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -138,14 +138,52 @@ func (s *Scanner) readCount() (uint32, error) { | |||||
return binary.ReadUint32(s.r) | return binary.ReadUint32(s.r) | ||||
} | } | ||||
// SeekObjectHeader seeks to specified offset and returns the ObjectHeader | |||||
// for the next object in the reader | |||||
func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { | |||||
// if seeking we assume that you are not interested in the header | |||||
if s.version == 0 { | |||||
s.version = VersionSupported | |||||
} | |||||
if _, err := s.r.Seek(offset, io.SeekStart); err != nil { | |||||
return nil, err | |||||
} | |||||
h, err := s.nextObjectHeader() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
h.Offset = offset | |||||
return h, nil | |||||
} | |||||
// NextObjectHeader returns the ObjectHeader for the next object in the reader | // NextObjectHeader returns the ObjectHeader for the next object in the reader | ||||
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { | func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { | ||||
defer s.Flush() | |||||
if err := s.doPending(); err != nil { | if err := s.doPending(); err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
offset, err := s.r.Seek(0, io.SeekCurrent) | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
h, err := s.nextObjectHeader() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
h.Offset = offset | |||||
return h, nil | |||||
} | |||||
// nextObjectHeader returns the ObjectHeader for the next object in the reader | |||||
// without the Offset field | |||||
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { | |||||
defer s.Flush() | |||||
s.crc.Reset() | s.crc.Reset() | ||||
h := &ObjectHeader{} | h := &ObjectHeader{} | ||||
@@ -308,7 +346,7 @@ var byteSlicePool = sync.Pool{ | |||||
// SeekFromStart sets a new offset from start, returns the old position before | // SeekFromStart sets a new offset from start, returns the old position before | ||||
// the change. | // the change. | ||||
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { | func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { | ||||
// if seeking we assume that you are not interested on the header | |||||
// if seeking we assume that you are not interested in the header | |||||
if s.version == 0 { | if s.version == 0 { | ||||
s.version = VersionSupported | s.version = VersionSupported | ||||
} | } | ||||
@@ -385,7 +423,7 @@ type bufferedSeeker struct { | |||||
} | } | ||||
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { | func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { | ||||
if whence == io.SeekCurrent { | |||||
if whence == io.SeekCurrent && offset == 0 { | |||||
current, err := r.r.Seek(offset, whence) | current, err := r.r.Seek(offset, whence) | ||||
if err != nil { | if err != nil { | ||||
return current, err | return current, err | ||||
@@ -1,10 +1,12 @@ | |||||
package object | package object | ||||
import ( | import ( | ||||
"container/list" | |||||
"io" | "io" | ||||
"gopkg.in/src-d/go-git.v4/plumbing" | "gopkg.in/src-d/go-git.v4/plumbing" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/storer" | "gopkg.in/src-d/go-git.v4/plumbing/storer" | ||||
"gopkg.in/src-d/go-git.v4/storage" | |||||
) | ) | ||||
type commitPreIterator struct { | type commitPreIterator struct { | ||||
@@ -181,3 +183,145 @@ func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { | |||||
} | } | ||||
func (w *commitPostIterator) Close() {} | func (w *commitPostIterator) Close() {} | ||||
// commitAllIterator stands for commit iterator for all refs. | |||||
type commitAllIterator struct { | |||||
// currCommit points to the current commit. | |||||
currCommit *list.Element | |||||
} | |||||
// NewCommitAllIter returns a new commit iterator for all refs. | |||||
// repoStorer is a repo Storer used to get commits and references. | |||||
// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order | |||||
func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { | |||||
commitsPath := list.New() | |||||
commitsLookup := make(map[plumbing.Hash]*list.Element) | |||||
head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) | |||||
if err == nil { | |||||
err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) | |||||
} | |||||
if err != nil && err != plumbing.ErrReferenceNotFound { | |||||
return nil, err | |||||
} | |||||
// add all references along with the HEAD | |||||
refIter, err := repoStorer.IterReferences() | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
defer refIter.Close() | |||||
for { | |||||
ref, err := refIter.Next() | |||||
if err == io.EOF { | |||||
break | |||||
} | |||||
if err == plumbing.ErrReferenceNotFound { | |||||
continue | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { | |||||
return nil, err | |||||
} | |||||
} | |||||
return &commitAllIterator{commitsPath.Front()}, nil | |||||
} | |||||
func addReference( | |||||
repoStorer storage.Storer, | |||||
commitIterFunc func(*Commit) CommitIter, | |||||
ref *plumbing.Reference, | |||||
commitsPath *list.List, | |||||
commitsLookup map[plumbing.Hash]*list.Element) error { | |||||
_, exists := commitsLookup[ref.Hash()] | |||||
if exists { | |||||
// we already have it - skip the reference. | |||||
return nil | |||||
} | |||||
refCommit, _ := GetCommit(repoStorer, ref.Hash()) | |||||
if refCommit == nil { | |||||
// if it's not a commit - skip it. | |||||
return nil | |||||
} | |||||
var ( | |||||
refCommits []*Commit | |||||
parent *list.Element | |||||
) | |||||
// collect all ref commits to add | |||||
commitIter := commitIterFunc(refCommit) | |||||
for c, e := commitIter.Next(); e == nil; { | |||||
parent, exists = commitsLookup[c.Hash] | |||||
if exists { | |||||
break | |||||
} | |||||
refCommits = append(refCommits, c) | |||||
c, e = commitIter.Next() | |||||
} | |||||
commitIter.Close() | |||||
if parent == nil { | |||||
// common parent - not found | |||||
// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) | |||||
for _, c := range refCommits { | |||||
parent = commitsPath.PushBack(c) | |||||
commitsLookup[c.Hash] = parent | |||||
} | |||||
} else { | |||||
// add ref's commits to the path in reverse order (from the latest) | |||||
for i := len(refCommits) - 1; i >= 0; i-- { | |||||
c := refCommits[i] | |||||
// insert before found common parent | |||||
parent = commitsPath.InsertBefore(c, parent) | |||||
commitsLookup[c.Hash] = parent | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (it *commitAllIterator) Next() (*Commit, error) { | |||||
if it.currCommit == nil { | |||||
return nil, io.EOF | |||||
} | |||||
c := it.currCommit.Value.(*Commit) | |||||
it.currCommit = it.currCommit.Next() | |||||
return c, nil | |||||
} | |||||
func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { | |||||
for { | |||||
c, err := it.Next() | |||||
if err == io.EOF { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return err | |||||
} | |||||
err = cb(c) | |||||
if err == storer.ErrStop { | |||||
break | |||||
} | |||||
if err != nil { | |||||
return err | |||||
} | |||||
} | |||||
return nil | |||||
} | |||||
func (it *commitAllIterator) Close() { | |||||
it.currCommit = nil | |||||
} |
@@ -1,23 +1,30 @@ | |||||
package object | package object | ||||
import ( | import ( | ||||
"gopkg.in/src-d/go-git.v4/plumbing/storer" | |||||
"io" | "io" | ||||
"gopkg.in/src-d/go-git.v4/plumbing" | |||||
"gopkg.in/src-d/go-git.v4/plumbing/storer" | |||||
) | ) | ||||
type commitFileIter struct { | type commitFileIter struct { | ||||
fileName string | fileName string | ||||
sourceIter CommitIter | sourceIter CommitIter | ||||
currentCommit *Commit | currentCommit *Commit | ||||
checkParent bool | |||||
} | } | ||||
// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between | // NewCommitFileIterFromIter returns a commit iterator which performs diffTree between | ||||
// successive trees returned from the commit iterator from the argument. The purpose of this is | // successive trees returned from the commit iterator from the argument. The purpose of this is | ||||
// to find the commits that explain how the files that match the path came to be. | // to find the commits that explain how the files that match the path came to be. | ||||
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter { | |||||
// If checkParent is true then the function double checks if potential parent (next commit in a path) | |||||
// is one of the parents in the tree (it's used by `git log --all`). | |||||
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { | |||||
iterator := new(commitFileIter) | iterator := new(commitFileIter) | ||||
iterator.sourceIter = commitIter | iterator.sourceIter = commitIter | ||||
iterator.fileName = fileName | iterator.fileName = fileName | ||||
iterator.checkParent = checkParent | |||||
return iterator | return iterator | ||||
} | } | ||||
@@ -71,20 +78,14 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) { | |||||
return nil, diffErr | return nil, diffErr | ||||
} | } | ||||
foundChangeForFile := false | |||||
for _, change := range changes { | |||||
if change.name() == c.fileName { | |||||
foundChangeForFile = true | |||||
break | |||||
} | |||||
} | |||||
found := c.hasFileChange(changes, parentCommit) | |||||
// Storing the current-commit in-case a change is found, and | // Storing the current-commit in-case a change is found, and | ||||
// Updating the current-commit for the next-iteration | // Updating the current-commit for the next-iteration | ||||
prevCommit := c.currentCommit | prevCommit := c.currentCommit | ||||
c.currentCommit = parentCommit | c.currentCommit = parentCommit | ||||
if foundChangeForFile == true { | |||||
if found { | |||||
return prevCommit, nil | return prevCommit, nil | ||||
} | } | ||||
@@ -95,6 +96,35 @@ func (c *commitFileIter) getNextFileCommit() (*Commit, error) { | |||||
} | } | ||||
} | } | ||||
func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool { | |||||
for _, change := range changes { | |||||
if change.name() != c.fileName { | |||||
continue | |||||
} | |||||
// filename matches, now check if source iterator contains all commits (from all refs) | |||||
if c.checkParent { | |||||
if parent != nil && isParentHash(parent.Hash, c.currentCommit) { | |||||
return true | |||||
} | |||||
continue | |||||
} | |||||
return true | |||||
} | |||||
return false | |||||
} | |||||
func isParentHash(hash plumbing.Hash, commit *Commit) bool { | |||||
for _, h := range commit.ParentHashes { | |||||
if h == hash { | |||||
return true | |||||
} | |||||
} | |||||
return false | |||||
} | |||||
func (c *commitFileIter) ForEach(cb func(*Commit) error) error { | func (c *commitFileIter) ForEach(cb func(*Commit) error) error { | ||||
for { | for { | ||||
commit, nextErr := c.Next() | commit, nextErr := c.Next() | ||||
@@ -21,7 +21,20 @@ func Objects( | |||||
objs, | objs, | ||||
ignore []plumbing.Hash, | ignore []plumbing.Hash, | ||||
) ([]plumbing.Hash, error) { | ) ([]plumbing.Hash, error) { | ||||
ignore, err := objects(s, ignore, nil, true) | |||||
return ObjectsWithStorageForIgnores(s, s, objs, ignore) | |||||
} | |||||
// ObjectsWithStorageForIgnores is the same as Objects, but a | |||||
// secondary storage layer can be provided, to be used to finding the | |||||
// full set of objects to be ignored while finding the reachable | |||||
// objects. This is useful when the main `s` storage layer is slow | |||||
// and/or remote, while the ignore list is available somewhere local. | |||||
func ObjectsWithStorageForIgnores( | |||||
s, ignoreStore storer.EncodedObjectStorer, | |||||
objs, | |||||
ignore []plumbing.Hash, | |||||
) ([]plumbing.Hash, error) { | |||||
ignore, err := objects(ignoreStore, ignore, nil, true) | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -114,7 +127,6 @@ func reachableObjects( | |||||
i := object.NewCommitPreorderIter(commit, seen, ignore) | i := object.NewCommitPreorderIter(commit, seen, ignore) | ||||
pending := make(map[plumbing.Hash]bool) | pending := make(map[plumbing.Hash]bool) | ||||
addPendingParents(pending, visited, commit) | addPendingParents(pending, visited, commit) | ||||
for { | for { | ||||
commit, err := i.Next() | commit, err := i.Next() | ||||
if err == io.EOF { | if err == io.EOF { | ||||
@@ -222,7 +222,7 @@ type MultiEncodedObjectIter struct { | |||||
} | } | ||||
// NewMultiEncodedObjectIter returns an object iterator for the given slice of | // NewMultiEncodedObjectIter returns an object iterator for the given slice of | ||||
// objects. | |||||
// EncodedObjectIters. | |||||
func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { | func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { | ||||
return &MultiEncodedObjectIter{iters: iters} | return &MultiEncodedObjectIter{iters: iters} | ||||
} | } | ||||
@@ -131,9 +131,27 @@ func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { | |||||
// an error happens or the end of the iter is reached. If ErrStop is sent | // an error happens or the end of the iter is reached. If ErrStop is sent | ||||
// the iteration is stop but no error is returned. The iterator is closed. | // the iteration is stop but no error is returned. The iterator is closed. | ||||
func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { | func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { | ||||
return forEachReferenceIter(iter, cb) | |||||
} | |||||
type bareReferenceIterator interface { | |||||
Next() (*plumbing.Reference, error) | |||||
Close() | |||||
} | |||||
func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { | |||||
defer iter.Close() | defer iter.Close() | ||||
for _, r := range iter.series { | |||||
if err := cb(r); err != nil { | |||||
for { | |||||
obj, err := iter.Next() | |||||
if err != nil { | |||||
if err == io.EOF { | |||||
return nil | |||||
} | |||||
return err | |||||
} | |||||
if err := cb(obj); err != nil { | |||||
if err == ErrStop { | if err == ErrStop { | ||||
return nil | return nil | ||||
} | } | ||||
@@ -141,8 +159,6 @@ func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) erro | |||||
return err | return err | ||||
} | } | ||||
} | } | ||||
return nil | |||||
} | } | ||||
// Close releases any resources used by the iterator. | // Close releases any resources used by the iterator. | ||||
@@ -150,6 +166,52 @@ func (iter *ReferenceSliceIter) Close() { | |||||
iter.pos = len(iter.series) | iter.pos = len(iter.series) | ||||
} | } | ||||
// MultiReferenceIter implements ReferenceIter. It iterates over several | |||||
// ReferenceIter, | |||||
// | |||||
// The MultiReferenceIter must be closed with a call to Close() when it is no | |||||
// longer needed. | |||||
type MultiReferenceIter struct { | |||||
iters []ReferenceIter | |||||
} | |||||
// NewMultiReferenceIter returns an reference iterator for the given slice of | |||||
// EncodedObjectIters. | |||||
func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { | |||||
return &MultiReferenceIter{iters: iters} | |||||
} | |||||
// Next returns the next reference from the iterator, if one iterator reach | |||||
// io.EOF is removed and the next one is used. | |||||
func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { | |||||
if len(iter.iters) == 0 { | |||||
return nil, io.EOF | |||||
} | |||||
obj, err := iter.iters[0].Next() | |||||
if err == io.EOF { | |||||
iter.iters[0].Close() | |||||
iter.iters = iter.iters[1:] | |||||
return iter.Next() | |||||
} | |||||
return obj, err | |||||
} | |||||
// ForEach call the cb function for each reference contained on this iter until | |||||
// an error happens or the end of the iter is reached. If ErrStop is sent | |||||
// the iteration is stop but no error is returned. The iterator is closed. | |||||
func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { | |||||
return forEachReferenceIter(iter, cb) | |||||
} | |||||
// Close releases any resources used by the iterator. | |||||
func (iter *MultiReferenceIter) Close() { | |||||
for _, i := range iter.iters { | |||||
i.Close() | |||||
} | |||||
} | |||||
// ResolveReference resolves a SymbolicReference to a HashReference. | // ResolveReference resolves a SymbolicReference to a HashReference. | ||||
func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { | func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { | ||||
r, err := s.Reference(n) | r, err := s.Reference(n) | ||||
@@ -19,10 +19,10 @@ import ( | |||||
"fmt" | "fmt" | ||||
"io" | "io" | ||||
"net/url" | "net/url" | ||||
"regexp" | |||||
"strconv" | "strconv" | ||||
"strings" | "strings" | ||||
giturl "gopkg.in/src-d/go-git.v4/internal/url" | |||||
"gopkg.in/src-d/go-git.v4/plumbing" | "gopkg.in/src-d/go-git.v4/plumbing" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" | "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" | "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" | ||||
@@ -224,34 +224,28 @@ func getPath(u *url.URL) string { | |||||
return res | return res | ||||
} | } | ||||
var ( | |||||
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) | |||||
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`) | |||||
) | |||||
func parseSCPLike(endpoint string) (*Endpoint, bool) { | func parseSCPLike(endpoint string) (*Endpoint, bool) { | ||||
if isSchemeRegExp.MatchString(endpoint) || !scpLikeUrlRegExp.MatchString(endpoint) { | |||||
if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { | |||||
return nil, false | return nil, false | ||||
} | } | ||||
m := scpLikeUrlRegExp.FindStringSubmatch(endpoint) | |||||
port, err := strconv.Atoi(m[3]) | |||||
user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) | |||||
port, err := strconv.Atoi(portStr) | |||||
if err != nil { | if err != nil { | ||||
port = 22 | port = 22 | ||||
} | } | ||||
return &Endpoint{ | return &Endpoint{ | ||||
Protocol: "ssh", | Protocol: "ssh", | ||||
User: m[1], | |||||
Host: m[2], | |||||
User: user, | |||||
Host: host, | |||||
Port: port, | Port: port, | ||||
Path: m[4], | |||||
Path: path, | |||||
}, true | }, true | ||||
} | } | ||||
func parseFile(endpoint string) (*Endpoint, bool) { | func parseFile(endpoint string) (*Endpoint, bool) { | ||||
if isSchemeRegExp.MatchString(endpoint) { | |||||
if giturl.MatchesScheme(endpoint) { | |||||
return nil, false | return nil, false | ||||
} | } | ||||
@@ -6,8 +6,10 @@ import ( | |||||
"fmt" | "fmt" | ||||
"io" | "io" | ||||
"gopkg.in/src-d/go-billy.v4/osfs" | |||||
"gopkg.in/src-d/go-git.v4/config" | "gopkg.in/src-d/go-git.v4/config" | ||||
"gopkg.in/src-d/go-git.v4/plumbing" | "gopkg.in/src-d/go-git.v4/plumbing" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/cache" | |||||
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile" | "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/object" | "gopkg.in/src-d/go-git.v4/plumbing/object" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" | "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" | ||||
@@ -18,6 +20,7 @@ import ( | |||||
"gopkg.in/src-d/go-git.v4/plumbing/transport" | "gopkg.in/src-d/go-git.v4/plumbing/transport" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/transport/client" | "gopkg.in/src-d/go-git.v4/plumbing/transport/client" | ||||
"gopkg.in/src-d/go-git.v4/storage" | "gopkg.in/src-d/go-git.v4/storage" | ||||
"gopkg.in/src-d/go-git.v4/storage/filesystem" | |||||
"gopkg.in/src-d/go-git.v4/storage/memory" | "gopkg.in/src-d/go-git.v4/storage/memory" | ||||
"gopkg.in/src-d/go-git.v4/utils/ioutil" | "gopkg.in/src-d/go-git.v4/utils/ioutil" | ||||
) | ) | ||||
@@ -149,7 +152,17 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { | |||||
var hashesToPush []plumbing.Hash | var hashesToPush []plumbing.Hash | ||||
// Avoid the expensive revlist operation if we're only doing deletes. | // Avoid the expensive revlist operation if we're only doing deletes. | ||||
if !allDelete { | if !allDelete { | ||||
hashesToPush, err = revlist.Objects(r.s, objects, haves) | |||||
if r.c.IsFirstURLLocal() { | |||||
// If we're are pushing to a local repo, it might be much | |||||
// faster to use a local storage layer to get the commits | |||||
// to ignore, when calculating the object revlist. | |||||
localStorer := filesystem.NewStorage( | |||||
osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) | |||||
hashesToPush, err = revlist.ObjectsWithStorageForIgnores( | |||||
r.s, localStorer, objects, haves) | |||||
} else { | |||||
hashesToPush, err = revlist.Objects(r.s, objects, haves) | |||||
} | |||||
if err != nil { | if err != nil { | ||||
return err | return err | ||||
} | } | ||||
@@ -41,6 +41,8 @@ var ( | |||||
ErrTagExists = errors.New("tag already exists") | ErrTagExists = errors.New("tag already exists") | ||||
// ErrTagNotFound an error stating the specified tag does not exist | // ErrTagNotFound an error stating the specified tag does not exist | ||||
ErrTagNotFound = errors.New("tag not found") | ErrTagNotFound = errors.New("tag not found") | ||||
// ErrFetching is returned when the packfile could not be downloaded | |||||
ErrFetching = errors.New("unable to fetch packfile") | |||||
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") | ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") | ||||
ErrRepositoryNotExists = errors.New("repository does not exist") | ErrRepositoryNotExists = errors.New("repository does not exist") | ||||
@@ -342,8 +344,9 @@ func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) | |||||
// transport operations. | // transport operations. | ||||
// | // | ||||
// TODO(mcuadros): move isBare to CloneOptions in v5 | // TODO(mcuadros): move isBare to CloneOptions in v5 | ||||
// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 | |||||
func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { | func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { | ||||
dirExists, err := checkExistsAndIsEmptyDir(path) | |||||
cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -355,7 +358,9 @@ func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOp | |||||
err = r.clone(ctx, o) | err = r.clone(ctx, o) | ||||
if err != nil && err != ErrRepositoryAlreadyExists { | if err != nil && err != ErrRepositoryAlreadyExists { | ||||
cleanUpDir(path, !dirExists) | |||||
if cleanup { | |||||
cleanUpDir(path, cleanupParent) | |||||
} | |||||
} | } | ||||
return r, err | return r, err | ||||
@@ -369,37 +374,37 @@ func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { | |||||
} | } | ||||
} | } | ||||
func checkExistsAndIsEmptyDir(path string) (exists bool, err error) { | |||||
func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { | |||||
fi, err := os.Stat(path) | fi, err := os.Stat(path) | ||||
if err != nil { | if err != nil { | ||||
if os.IsNotExist(err) { | if os.IsNotExist(err) { | ||||
return false, nil | |||||
return true, true, nil | |||||
} | } | ||||
return false, err | |||||
return false, false, err | |||||
} | } | ||||
if !fi.IsDir() { | if !fi.IsDir() { | ||||
return false, fmt.Errorf("path is not a directory: %s", path) | |||||
return false, false, fmt.Errorf("path is not a directory: %s", path) | |||||
} | } | ||||
f, err := os.Open(path) | f, err := os.Open(path) | ||||
if err != nil { | if err != nil { | ||||
return false, err | |||||
return false, false, err | |||||
} | } | ||||
defer ioutil.CheckClose(f, &err) | defer ioutil.CheckClose(f, &err) | ||||
_, err = f.Readdirnames(1) | _, err = f.Readdirnames(1) | ||||
if err == io.EOF { | if err == io.EOF { | ||||
return true, nil | |||||
return true, false, nil | |||||
} | } | ||||
if err != nil { | if err != nil { | ||||
return true, err | |||||
return false, false, err | |||||
} | } | ||||
return true, fmt.Errorf("directory is not empty: %s", path) | |||||
return false, false, nil | |||||
} | } | ||||
func cleanUpDir(path string, all bool) error { | func cleanUpDir(path string, all bool) error { | ||||
@@ -425,7 +430,7 @@ func cleanUpDir(path string, all bool) error { | |||||
} | } | ||||
} | } | ||||
return nil | |||||
return err | |||||
} | } | ||||
// Config return the repository config | // Config return the repository config | ||||
@@ -855,6 +860,8 @@ func (r *Repository) fetchAndUpdateReferences( | |||||
remoteRefs, err := remote.fetch(ctx, o) | remoteRefs, err := remote.fetch(ctx, o) | ||||
if err == NoErrAlreadyUpToDate { | if err == NoErrAlreadyUpToDate { | ||||
objsUpdated = false | objsUpdated = false | ||||
} else if err == packfile.ErrEmptyPackfile { | |||||
return nil, ErrFetching | |||||
} else if err != nil { | } else if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -1020,8 +1027,36 @@ func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { | |||||
// Log returns the commit history from the given LogOptions. | // Log returns the commit history from the given LogOptions. | ||||
func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { | func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { | ||||
h := o.From | |||||
if o.From == plumbing.ZeroHash { | |||||
fn := commitIterFunc(o.Order) | |||||
if fn == nil { | |||||
return nil, fmt.Errorf("invalid Order=%v", o.Order) | |||||
} | |||||
var ( | |||||
it object.CommitIter | |||||
err error | |||||
) | |||||
if o.All { | |||||
it, err = r.logAll(fn) | |||||
} else { | |||||
it, err = r.log(o.From, fn) | |||||
} | |||||
if err != nil { | |||||
return nil, err | |||||
} | |||||
if o.FileName != nil { | |||||
// for `git log --all` also check parent (if the next commit comes from the real parent) | |||||
it = r.logWithFile(*o.FileName, it, o.All) | |||||
} | |||||
return it, nil | |||||
} | |||||
func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { | |||||
h := from | |||||
if from == plumbing.ZeroHash { | |||||
head, err := r.Head() | head, err := r.Head() | ||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
@@ -1034,27 +1069,41 @@ func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
return commitIterFunc(commit), nil | |||||
} | |||||
func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { | |||||
return object.NewCommitAllIter(r.Storer, commitIterFunc) | |||||
} | |||||
var commitIter object.CommitIter | |||||
switch o.Order { | |||||
func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter { | |||||
return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent) | |||||
} | |||||
func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { | |||||
switch order { | |||||
case LogOrderDefault: | case LogOrderDefault: | ||||
commitIter = object.NewCommitPreorderIter(commit, nil, nil) | |||||
return func(c *object.Commit) object.CommitIter { | |||||
return object.NewCommitPreorderIter(c, nil, nil) | |||||
} | |||||
case LogOrderDFS: | case LogOrderDFS: | ||||
commitIter = object.NewCommitPreorderIter(commit, nil, nil) | |||||
return func(c *object.Commit) object.CommitIter { | |||||
return object.NewCommitPreorderIter(c, nil, nil) | |||||
} | |||||
case LogOrderDFSPost: | case LogOrderDFSPost: | ||||
commitIter = object.NewCommitPostorderIter(commit, nil) | |||||
return func(c *object.Commit) object.CommitIter { | |||||
return object.NewCommitPostorderIter(c, nil) | |||||
} | |||||
case LogOrderBSF: | case LogOrderBSF: | ||||
commitIter = object.NewCommitIterBSF(commit, nil, nil) | |||||
return func(c *object.Commit) object.CommitIter { | |||||
return object.NewCommitIterBSF(c, nil, nil) | |||||
} | |||||
case LogOrderCommitterTime: | case LogOrderCommitterTime: | ||||
commitIter = object.NewCommitIterCTime(commit, nil, nil) | |||||
default: | |||||
return nil, fmt.Errorf("invalid Order=%v", o.Order) | |||||
} | |||||
if o.FileName == nil { | |||||
return commitIter, nil | |||||
return func(c *object.Commit) object.CommitIter { | |||||
return object.NewCommitIterCTime(c, nil, nil) | |||||
} | |||||
} | } | ||||
return object.NewCommitFileIterFromIter(*o.FileName, commitIter), nil | |||||
return nil | |||||
} | } | ||||
// Tags returns all the tag References in a repository. | // Tags returns all the tag References in a repository. | ||||
@@ -14,6 +14,7 @@ import ( | |||||
"gopkg.in/src-d/go-billy.v4/osfs" | "gopkg.in/src-d/go-billy.v4/osfs" | ||||
"gopkg.in/src-d/go-git.v4/plumbing" | "gopkg.in/src-d/go-git.v4/plumbing" | ||||
"gopkg.in/src-d/go-git.v4/storage" | |||||
"gopkg.in/src-d/go-git.v4/utils/ioutil" | "gopkg.in/src-d/go-git.v4/utils/ioutil" | ||||
"gopkg.in/src-d/go-billy.v4" | "gopkg.in/src-d/go-billy.v4" | ||||
@@ -596,7 +597,7 @@ func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference | |||||
return err | return err | ||||
} | } | ||||
if ref.Hash() != old.Hash() { | if ref.Hash() != old.Hash() { | ||||
return fmt.Errorf("reference has changed concurrently") | |||||
return storage.ErrReferenceHasChanged | |||||
} | } | ||||
_, err = f.Seek(0, io.SeekStart) | _, err = f.Seek(0, io.SeekStart) | ||||
if err != nil { | if err != nil { | ||||
@@ -1,15 +1,24 @@ | |||||
// +build !norwfs | |||||
package dotgit | package dotgit | ||||
import ( | import ( | ||||
"fmt" | |||||
"os" | "os" | ||||
"gopkg.in/src-d/go-git.v4/plumbing" | "gopkg.in/src-d/go-git.v4/plumbing" | ||||
"gopkg.in/src-d/go-git.v4/utils/ioutil" | "gopkg.in/src-d/go-git.v4/utils/ioutil" | ||||
"gopkg.in/src-d/go-billy.v4" | |||||
) | ) | ||||
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { | func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { | ||||
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { | |||||
return d.setRefRwfs(fileName, content, old) | |||||
} | |||||
return d.setRefNorwfs(fileName, content, old) | |||||
} | |||||
func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { | |||||
// If we are not checking an old ref, just truncate the file. | // If we are not checking an old ref, just truncate the file. | ||||
mode := os.O_RDWR | os.O_CREATE | mode := os.O_RDWR | os.O_CREATE | ||||
if old == nil { | if old == nil { | ||||
@@ -41,3 +50,41 @@ func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err | |||||
_, err = f.Write([]byte(content)) | _, err = f.Write([]byte(content)) | ||||
return err | return err | ||||
} | } | ||||
// There are some filesystems that don't support opening files in RDWD mode. | |||||
// In these filesystems the standard SetRef function can not be used as it | |||||
// reads the reference file to check that it's not modified before updating it. | |||||
// | |||||
// This version of the function writes the reference without extra checks | |||||
// making it compatible with these simple filesystems. This is usually not | |||||
// a problem as they should be accessed by only one process at a time. | |||||
func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { | |||||
_, err := d.fs.Stat(fileName) | |||||
if err == nil && old != nil { | |||||
fRead, err := d.fs.Open(fileName) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
ref, err := d.readReferenceFrom(fRead, old.Name().String()) | |||||
fRead.Close() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if ref.Hash() != old.Hash() { | |||||
return fmt.Errorf("reference has changed concurrently") | |||||
} | |||||
} | |||||
f, err := d.fs.Create(fileName) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
defer f.Close() | |||||
_, err = f.Write([]byte(content)) | |||||
return err | |||||
} |
@@ -1,47 +0,0 @@ | |||||
// +build norwfs | |||||
package dotgit | |||||
import ( | |||||
"fmt" | |||||
"gopkg.in/src-d/go-git.v4/plumbing" | |||||
) | |||||
// There are some filesystems that don't support opening files in RDWD mode. | |||||
// In these filesystems the standard SetRef function can not be used as i | |||||
// reads the reference file to check that it's not modified before updating it. | |||||
// | |||||
// This version of the function writes the reference without extra checks | |||||
// making it compatible with these simple filesystems. This is usually not | |||||
// a problem as they should be accessed by only one process at a time. | |||||
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error { | |||||
_, err := d.fs.Stat(fileName) | |||||
if err == nil && old != nil { | |||||
fRead, err := d.fs.Open(fileName) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
ref, err := d.readReferenceFrom(fRead, old.Name().String()) | |||||
fRead.Close() | |||||
if err != nil { | |||||
return err | |||||
} | |||||
if ref.Hash() != old.Hash() { | |||||
return fmt.Errorf("reference has changed concurrently") | |||||
} | |||||
} | |||||
f, err := d.fs.Create(fileName) | |||||
if err != nil { | |||||
return err | |||||
} | |||||
defer f.Close() | |||||
_, err = f.Write([]byte(content)) | |||||
return err | |||||
} |
@@ -20,24 +20,25 @@ import ( | |||||
type ObjectStorage struct { | type ObjectStorage struct { | ||||
options Options | options Options | ||||
// deltaBaseCache is an object cache uses to cache delta's bases when | |||||
deltaBaseCache cache.Object | |||||
// objectCache is an object cache uses to cache delta's bases and also recently | |||||
// loaded loose objects | |||||
objectCache cache.Object | |||||
dir *dotgit.DotGit | dir *dotgit.DotGit | ||||
index map[plumbing.Hash]idxfile.Index | index map[plumbing.Hash]idxfile.Index | ||||
} | } | ||||
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. | // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. | ||||
func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage { | |||||
return NewObjectStorageWithOptions(dir, cache, Options{}) | |||||
func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { | |||||
return NewObjectStorageWithOptions(dir, objectCache, Options{}) | |||||
} | } | ||||
// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options | // NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options | ||||
func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage { | |||||
func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { | |||||
return &ObjectStorage{ | return &ObjectStorage{ | ||||
options: ops, | |||||
deltaBaseCache: cache, | |||||
dir: dir, | |||||
options: ops, | |||||
objectCache: objectCache, | |||||
dir: dir, | |||||
} | } | ||||
} | } | ||||
@@ -206,7 +207,7 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( | |||||
idx := s.index[pack] | idx := s.index[pack] | ||||
hash, err := idx.FindHash(offset) | hash, err := idx.FindHash(offset) | ||||
if err == nil { | if err == nil { | ||||
obj, ok := s.deltaBaseCache.Get(hash) | |||||
obj, ok := s.objectCache.Get(hash) | |||||
if ok { | if ok { | ||||
return obj.Size(), nil | return obj.Size(), nil | ||||
} | } | ||||
@@ -215,8 +216,8 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( | |||||
} | } | ||||
var p *packfile.Packfile | var p *packfile.Packfile | ||||
if s.deltaBaseCache != nil { | |||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) | |||||
if s.objectCache != nil { | |||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) | |||||
} else { | } else { | ||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f) | p = packfile.NewPackfile(idx, s.dir.Fs(), f) | ||||
} | } | ||||
@@ -241,9 +242,19 @@ func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( | |||||
// EncodedObject returns the object with the given hash, by searching for it in | // EncodedObject returns the object with the given hash, by searching for it in | ||||
// the packfile and the git object directories. | // the packfile and the git object directories. | ||||
func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { | func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { | ||||
obj, err := s.getFromUnpacked(h) | |||||
if err == plumbing.ErrObjectNotFound { | |||||
var obj plumbing.EncodedObject | |||||
var err error | |||||
if s.index != nil { | |||||
obj, err = s.getFromPackfile(h, false) | obj, err = s.getFromPackfile(h, false) | ||||
if err == plumbing.ErrObjectNotFound { | |||||
obj, err = s.getFromUnpacked(h) | |||||
} | |||||
} else { | |||||
obj, err = s.getFromUnpacked(h) | |||||
if err == plumbing.ErrObjectNotFound { | |||||
obj, err = s.getFromPackfile(h, false) | |||||
} | |||||
} | } | ||||
// If the error is still object not found, check if it's a shared object | // If the error is still object not found, check if it's a shared object | ||||
@@ -254,7 +265,7 @@ func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (p | |||||
// Create a new object storage with the DotGit(s) and check for the | // Create a new object storage with the DotGit(s) and check for the | ||||
// required hash object. Skip when not found. | // required hash object. Skip when not found. | ||||
for _, dg := range dotgits { | for _, dg := range dotgits { | ||||
o := NewObjectStorage(dg, s.deltaBaseCache) | |||||
o := NewObjectStorage(dg, s.objectCache) | |||||
enobj, enerr := o.EncodedObject(t, h) | enobj, enerr := o.EncodedObject(t, h) | ||||
if enerr != nil { | if enerr != nil { | ||||
continue | continue | ||||
@@ -304,9 +315,12 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb | |||||
return nil, err | return nil, err | ||||
} | } | ||||
defer ioutil.CheckClose(f, &err) | defer ioutil.CheckClose(f, &err) | ||||
if cacheObj, found := s.objectCache.Get(h); found { | |||||
return cacheObj, nil | |||||
} | |||||
obj = s.NewEncodedObject() | obj = s.NewEncodedObject() | ||||
r, err := objfile.NewReader(f) | r, err := objfile.NewReader(f) | ||||
if err != nil { | if err != nil { | ||||
@@ -327,6 +341,8 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb | |||||
return nil, err | return nil, err | ||||
} | } | ||||
s.objectCache.Put(obj) | |||||
_, err = io.Copy(w, r) | _, err = io.Copy(w, r) | ||||
return obj, err | return obj, err | ||||
} | } | ||||
@@ -369,7 +385,7 @@ func (s *ObjectStorage) decodeObjectAt( | |||||
) (plumbing.EncodedObject, error) { | ) (plumbing.EncodedObject, error) { | ||||
hash, err := idx.FindHash(offset) | hash, err := idx.FindHash(offset) | ||||
if err == nil { | if err == nil { | ||||
obj, ok := s.deltaBaseCache.Get(hash) | |||||
obj, ok := s.objectCache.Get(hash) | |||||
if ok { | if ok { | ||||
return obj, nil | return obj, nil | ||||
} | } | ||||
@@ -380,8 +396,8 @@ func (s *ObjectStorage) decodeObjectAt( | |||||
} | } | ||||
var p *packfile.Packfile | var p *packfile.Packfile | ||||
if s.deltaBaseCache != nil { | |||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache) | |||||
if s.objectCache != nil { | |||||
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) | |||||
} else { | } else { | ||||
p = packfile.NewPackfile(idx, s.dir.Fs(), f) | p = packfile.NewPackfile(idx, s.dir.Fs(), f) | ||||
} | } | ||||
@@ -400,11 +416,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt( | |||||
} | } | ||||
p := packfile.NewScanner(f) | p := packfile.NewScanner(f) | ||||
if _, err := p.SeekFromStart(offset); err != nil { | |||||
return nil, err | |||||
} | |||||
header, err := p.NextObjectHeader() | |||||
header, err := p.SeekObjectHeader(offset) | |||||
if err != nil { | if err != nil { | ||||
return nil, err | return nil, err | ||||
} | } | ||||
@@ -495,7 +507,7 @@ func (s *ObjectStorage) buildPackfileIters( | |||||
} | } | ||||
return newPackfileIter( | return newPackfileIter( | ||||
s.dir.Fs(), pack, t, seen, s.index[h], | s.dir.Fs(), pack, t, seen, s.index[h], | ||||
s.deltaBaseCache, s.options.KeepDescriptors, | |||||
s.objectCache, s.options.KeepDescriptors, | |||||
) | ) | ||||
}, | }, | ||||
}, nil | }, nil | ||||
@@ -51,11 +51,7 @@ func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) | |||||
fs: fs, | fs: fs, | ||||
dir: dir, | dir: dir, | ||||
ObjectStorage: ObjectStorage{ | |||||
options: ops, | |||||
deltaBaseCache: cache, | |||||
dir: dir, | |||||
}, | |||||
ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), | |||||
ReferenceStorage: ReferenceStorage{dir: dir}, | ReferenceStorage: ReferenceStorage{dir: dir}, | ||||
IndexStorage: IndexStorage{dir: dir}, | IndexStorage: IndexStorage{dir: dir}, | ||||
ShallowStorage: ShallowStorage{dir: dir}, | ShallowStorage: ShallowStorage{dir: dir}, | ||||
@@ -13,7 +13,6 @@ import ( | |||||
) | ) | ||||
var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") | var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") | ||||
var ErrRefHasChanged = fmt.Errorf("reference has changed concurrently") | |||||
// Storage is an implementation of git.Storer that stores data on memory, being | // Storage is an implementation of git.Storer that stores data on memory, being | ||||
// ephemeral. The use of this storage should be done in controlled envoriments, | // ephemeral. The use of this storage should be done in controlled envoriments, | ||||
@@ -258,7 +257,7 @@ func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) err | |||||
if old != nil { | if old != nil { | ||||
tmp := r[ref.Name()] | tmp := r[ref.Name()] | ||||
if tmp != nil && tmp.Hash() != old.Hash() { | if tmp != nil && tmp.Hash() != old.Hash() { | ||||
return ErrRefHasChanged | |||||
return storage.ErrReferenceHasChanged | |||||
} | } | ||||
} | } | ||||
r[ref.Name()] = ref | r[ref.Name()] = ref | ||||
@@ -1,10 +1,14 @@ | |||||
package storage | package storage | ||||
import ( | import ( | ||||
"errors" | |||||
"gopkg.in/src-d/go-git.v4/config" | "gopkg.in/src-d/go-git.v4/config" | ||||
"gopkg.in/src-d/go-git.v4/plumbing/storer" | "gopkg.in/src-d/go-git.v4/plumbing/storer" | ||||
) | ) | ||||
var ErrReferenceHasChanged = errors.New("reference has changed concurrently") | |||||
// Storer is a generic storage of objects, references and any information | // Storer is a generic storage of objects, references and any information | ||||
// related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage | // related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage | ||||
// contains two implementation a filesystem base implementation (such as `.git`) | // contains two implementation a filesystem base implementation (such as `.git`) | ||||
@@ -3,8 +3,6 @@ package noder | |||||
import ( | import ( | ||||
"bytes" | "bytes" | ||||
"strings" | "strings" | ||||
"golang.org/x/text/unicode/norm" | |||||
) | ) | ||||
// Path values represent a noder and its ancestors. The root goes first | // Path values represent a noder and its ancestors. The root goes first | ||||
@@ -80,11 +78,9 @@ func (p Path) Compare(other Path) int { | |||||
case i == len(p): | case i == len(p): | ||||
return -1 | return -1 | ||||
default: | default: | ||||
form := norm.Form(norm.NFC) | |||||
this := form.String(p[i].Name()) | |||||
that := form.String(other[i].Name()) | |||||
cmp := strings.Compare(this, that) | |||||
// We do *not* normalize Unicode here. CGit doesn't. | |||||
// https://github.com/src-d/go-git/issues/1057 | |||||
cmp := strings.Compare(p[i].Name(), other[i].Name()) | |||||
if cmp != 0 { | if cmp != 0 { | ||||
return cmp | return cmp | ||||
} | } | ||||
@@ -25,10 +25,11 @@ import ( | |||||
) | ) | ||||
var ( | var ( | ||||
ErrWorktreeNotClean = errors.New("worktree is not clean") | |||||
ErrSubmoduleNotFound = errors.New("submodule not found") | |||||
ErrUnstagedChanges = errors.New("worktree contains unstaged changes") | |||||
ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") | |||||
ErrWorktreeNotClean = errors.New("worktree is not clean") | |||||
ErrSubmoduleNotFound = errors.New("submodule not found") | |||||
ErrUnstagedChanges = errors.New("worktree contains unstaged changes") | |||||
ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") | |||||
ErrNonFastForwardUpdate = errors.New("non-fast-forward update") | |||||
) | ) | ||||
// Worktree represents a git worktree. | // Worktree represents a git worktree. | ||||
@@ -101,7 +102,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { | |||||
} | } | ||||
if !ff { | if !ff { | ||||
return fmt.Errorf("non-fast-forward update") | |||||
return ErrNonFastForwardUpdate | |||||
} | } | ||||
} | } | ||||
@@ -415,7 +415,7 @@ gopkg.in/src-d/go-billy.v4/osfs | |||||
gopkg.in/src-d/go-billy.v4/util | gopkg.in/src-d/go-billy.v4/util | ||||
gopkg.in/src-d/go-billy.v4/helper/chroot | gopkg.in/src-d/go-billy.v4/helper/chroot | ||||
gopkg.in/src-d/go-billy.v4/helper/polyfill | gopkg.in/src-d/go-billy.v4/helper/polyfill | ||||
# gopkg.in/src-d/go-git.v4 v4.8.0 | |||||
# gopkg.in/src-d/go-git.v4 v4.10.0 | |||||
gopkg.in/src-d/go-git.v4 | gopkg.in/src-d/go-git.v4 | ||||
gopkg.in/src-d/go-git.v4/config | gopkg.in/src-d/go-git.v4/config | ||||
gopkg.in/src-d/go-git.v4/plumbing | gopkg.in/src-d/go-git.v4/plumbing | ||||
@@ -442,6 +442,7 @@ gopkg.in/src-d/go-git.v4/utils/merkletrie | |||||
gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem | gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem | ||||
gopkg.in/src-d/go-git.v4/utils/merkletrie/index | gopkg.in/src-d/go-git.v4/utils/merkletrie/index | ||||
gopkg.in/src-d/go-git.v4/utils/merkletrie/noder | gopkg.in/src-d/go-git.v4/utils/merkletrie/noder | ||||
gopkg.in/src-d/go-git.v4/internal/url | |||||
gopkg.in/src-d/go-git.v4/plumbing/format/config | gopkg.in/src-d/go-git.v4/plumbing/format/config | ||||
gopkg.in/src-d/go-git.v4/utils/binary | gopkg.in/src-d/go-git.v4/utils/binary | ||||
gopkg.in/src-d/go-git.v4/plumbing/format/idxfile | gopkg.in/src-d/go-git.v4/plumbing/format/idxfile | ||||