aboutsummaryrefslogtreecommitdiffstats
path: root/entities
diff options
context:
space:
mode:
Diffstat (limited to 'entities')
-rw-r--r--entities/bug/bug.go179
-rw-r--r--entities/bug/bug_actions.go74
-rw-r--r--entities/bug/comment.go45
-rw-r--r--entities/bug/err.go17
-rw-r--r--entities/bug/interface.go44
-rw-r--r--entities/bug/label.go95
-rw-r--r--entities/bug/label_test.go35
-rw-r--r--entities/bug/op_add_comment.go93
-rw-r--r--entities/bug/op_add_comment_test.go18
-rw-r--r--entities/bug/op_create.go112
-rw-r--r--entities/bug/op_create_test.go67
-rw-r--r--entities/bug/op_edit_comment.go129
-rw-r--r--entities/bug/op_edit_comment_test.go84
-rw-r--r--entities/bug/op_label_change.go292
-rw-r--r--entities/bug/op_label_change_test.go20
-rw-r--r--entities/bug/op_set_metadata.go21
-rw-r--r--entities/bug/op_set_status.go95
-rw-r--r--entities/bug/op_set_status_test.go14
-rw-r--r--entities/bug/op_set_title.go112
-rw-r--r--entities/bug/op_set_title_test.go14
-rw-r--r--entities/bug/operation.go73
-rw-r--r--entities/bug/operation_test.go131
-rw-r--r--entities/bug/resolver.go21
-rw-r--r--entities/bug/snapshot.go144
-rw-r--r--entities/bug/sorting.go57
-rw-r--r--entities/bug/status.go86
-rw-r--r--entities/bug/timeline.go80
-rw-r--r--entities/bug/with_snapshot.go53
-rw-r--r--entities/identity/common.go37
-rw-r--r--entities/identity/identity.go620
-rw-r--r--entities/identity/identity_actions.go125
-rw-r--r--entities/identity/identity_actions_test.go157
-rw-r--r--entities/identity/identity_stub.go101
-rw-r--r--entities/identity/identity_stub_test.go26
-rw-r--r--entities/identity/identity_test.go292
-rw-r--r--entities/identity/identity_user.go68
-rw-r--r--entities/identity/interface.go62
-rw-r--r--entities/identity/key.go234
-rw-r--r--entities/identity/key_test.go60
-rw-r--r--entities/identity/resolver.go34
-rw-r--r--entities/identity/version.go273
-rw-r--r--entities/identity/version_test.go78
42 files changed, 4372 insertions, 0 deletions
diff --git a/entities/bug/bug.go b/entities/bug/bug.go
new file mode 100644
index 00000000..213a4ca4
--- /dev/null
+++ b/entities/bug/bug.go
@@ -0,0 +1,179 @@
+// Package bug contains the bug data model and low-level related functions
+package bug
+
+import (
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+var _ Interface = &Bug{}
+var _ entity.Interface = &Bug{}
+
+// 1: original format
+// 2: no more legacy identities
+// 3: Ids are generated from the create operation serialized data instead of from the first git commit
+// 4: with DAG entity framework
+const formatVersion = 4
+
+var def = dag.Definition{
+ Typename: "bug",
+ Namespace: "bugs",
+ OperationUnmarshaler: operationUnmarshaller,
+ FormatVersion: formatVersion,
+}
+
+var ClockLoader = dag.ClockLoader(def)
+
+// Bug holds the data of a bug thread, organized in a way close to
+// how it will be persisted inside Git. This is the data structure
+// used to merge two different version of the same Bug.
+type Bug struct {
+ *dag.Entity
+}
+
+// NewBug create a new Bug
+func NewBug() *Bug {
+ return &Bug{
+ Entity: dag.New(def),
+ }
+}
+
+func simpleResolvers(repo repository.ClockedRepo) entity.Resolvers {
+ return entity.Resolvers{
+ &identity.Identity{}: identity.NewSimpleResolver(repo),
+ }
+}
+
+// Read will read a bug from a repository
+func Read(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
+ return ReadWithResolver(repo, simpleResolvers(repo), id)
+}
+
+// ReadWithResolver will read a bug from its Id, with custom resolvers
+func ReadWithResolver(repo repository.ClockedRepo, resolvers entity.Resolvers, id entity.Id) (*Bug, error) {
+ e, err := dag.Read(def, repo, resolvers, id)
+ if err != nil {
+ return nil, err
+ }
+ return &Bug{Entity: e}, nil
+}
+
+type StreamedBug struct {
+ Bug *Bug
+ Err error
+}
+
+// ReadAll read and parse all local bugs
+func ReadAll(repo repository.ClockedRepo) <-chan StreamedBug {
+ return readAll(repo, simpleResolvers(repo))
+}
+
+// ReadAllWithResolver read and parse all local bugs
+func ReadAllWithResolver(repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedBug {
+ return readAll(repo, resolvers)
+}
+
+// Read and parse all available bug with a given ref prefix
+func readAll(repo repository.ClockedRepo, resolvers entity.Resolvers) <-chan StreamedBug {
+ out := make(chan StreamedBug)
+
+ go func() {
+ defer close(out)
+
+ for streamedEntity := range dag.ReadAll(def, repo, resolvers) {
+ if streamedEntity.Err != nil {
+ out <- StreamedBug{
+ Err: streamedEntity.Err,
+ }
+ } else {
+ out <- StreamedBug{
+ Bug: &Bug{Entity: streamedEntity.Entity},
+ }
+ }
+ }
+ }()
+
+ return out
+}
+
+// ListLocalIds list all the available local bug ids
+func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
+ return dag.ListLocalIds(def, repo)
+}
+
+// Validate check if the Bug data is valid
+func (bug *Bug) Validate() error {
+ if err := bug.Entity.Validate(); err != nil {
+ return err
+ }
+
+ // The very first Op should be a CreateOp
+ firstOp := bug.FirstOp()
+ if firstOp == nil || firstOp.Type() != CreateOp {
+ return fmt.Errorf("first operation should be a Create op")
+ }
+
+ // Check that there is no more CreateOp op
+ for i, op := range bug.Operations() {
+ if i == 0 {
+ continue
+ }
+ if op.Type() == CreateOp {
+ return fmt.Errorf("only one Create op allowed")
+ }
+ }
+
+ return nil
+}
+
+// Append add a new Operation to the Bug
+func (bug *Bug) Append(op Operation) {
+ bug.Entity.Append(op)
+}
+
+// Operations return the ordered operations
+func (bug *Bug) Operations() []Operation {
+ source := bug.Entity.Operations()
+ result := make([]Operation, len(source))
+ for i, op := range source {
+ result[i] = op.(Operation)
+ }
+ return result
+}
+
+// Compile a bug in a easily usable snapshot
+func (bug *Bug) Compile() *Snapshot {
+ snap := &Snapshot{
+ id: bug.Id(),
+ Status: OpenStatus,
+ }
+
+ for _, op := range bug.Operations() {
+ op.Apply(snap)
+ snap.Operations = append(snap.Operations, op)
+ }
+
+ return snap
+}
+
+// FirstOp lookup for the very first operation of the bug.
+// For a valid Bug, this operation should be a CreateOp
+func (bug *Bug) FirstOp() Operation {
+ if fo := bug.Entity.FirstOp(); fo != nil {
+ return fo.(Operation)
+ }
+ return nil
+}
+
+// LastOp lookup for the very last operation of the bug.
+// For a valid Bug, should never be nil
+func (bug *Bug) LastOp() Operation {
+ if lo := bug.Entity.LastOp(); lo != nil {
+ return lo.(Operation)
+ }
+ return nil
+}
diff --git a/entities/bug/bug_actions.go b/entities/bug/bug_actions.go
new file mode 100644
index 00000000..864c2052
--- /dev/null
+++ b/entities/bug/bug_actions.go
@@ -0,0 +1,74 @@
+package bug
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+// Fetch retrieve updates from a remote
+// This does not change the local bugs state
+func Fetch(repo repository.Repo, remote string) (string, error) {
+ return dag.Fetch(def, repo, remote)
+}
+
+// Push update a remote with the local changes
+func Push(repo repository.Repo, remote string) (string, error) {
+ return dag.Push(def, repo, remote)
+}
+
+// Pull will do a Fetch + MergeAll
+// This function will return an error if a merge fail
+// Note: an author is necessary for the case where a merge commit is created, as this commit will
+// have an author and may be signed if a signing key is available.
+func Pull(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) error {
+ _, err := Fetch(repo, remote)
+ if err != nil {
+ return err
+ }
+
+ for merge := range MergeAll(repo, resolvers, remote, mergeAuthor) {
+ if merge.Err != nil {
+ return merge.Err
+ }
+ if merge.Status == entity.MergeStatusInvalid {
+ return errors.Errorf("merge failure: %s", merge.Reason)
+ }
+ }
+
+ return nil
+}
+
+// MergeAll will merge all the available remote bug
+// Note: an author is necessary for the case where a merge commit is created, as this commit will
+// have an author and may be signed if a signing key is available.
+func MergeAll(repo repository.ClockedRepo, resolvers entity.Resolvers, remote string, mergeAuthor identity.Interface) <-chan entity.MergeResult {
+ out := make(chan entity.MergeResult)
+
+ go func() {
+ defer close(out)
+
+ results := dag.MergeAll(def, repo, resolvers, remote, mergeAuthor)
+
+ // wrap the dag.Entity into a complete Bug
+ for result := range results {
+ result := result
+ if result.Entity != nil {
+ result.Entity = &Bug{
+ Entity: result.Entity.(*dag.Entity),
+ }
+ }
+ out <- result
+ }
+ }()
+
+ return out
+}
+
+// RemoveBug will remove a local bug from its entity.Id
+func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
+ return dag.Remove(def, repo, id)
+}
diff --git a/entities/bug/comment.go b/entities/bug/comment.go
new file mode 100644
index 00000000..fcf501ab
--- /dev/null
+++ b/entities/bug/comment.go
@@ -0,0 +1,45 @@
+package bug
+
+import (
+ "github.com/dustin/go-humanize"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+// Comment represent a comment in a Bug
+type Comment struct {
+ // id should be the result of entity.CombineIds with the Bug id and the id
+ // of the Operation that created the comment
+ id entity.Id
+ Author identity.Interface
+ Message string
+ Files []repository.Hash
+
+ // Creation time of the comment.
+ // Should be used only for human display, never for ordering as we can't rely on it in a distributed system.
+ UnixTime timestamp.Timestamp
+}
+
+// Id return the Comment identifier
+func (c Comment) Id() entity.Id {
+ if c.id == "" {
+ // simply panic as it would be a coding error (no id provided at construction)
+ panic("no id")
+ }
+ return c.id
+}
+
+// FormatTimeRel format the UnixTime of the comment for human consumption
+func (c Comment) FormatTimeRel() string {
+ return humanize.Time(c.UnixTime.Time())
+}
+
+func (c Comment) FormatTime() string {
+ return c.UnixTime.Time().Format("Mon Jan 2 15:04:05 2006 +0200")
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (c Comment) IsAuthored() {}
diff --git a/entities/bug/err.go b/entities/bug/err.go
new file mode 100644
index 00000000..1bd174bb
--- /dev/null
+++ b/entities/bug/err.go
@@ -0,0 +1,17 @@
+package bug
+
+import (
+ "errors"
+
+ "github.com/MichaelMure/git-bug/entity"
+)
+
+var ErrBugNotExist = errors.New("bug doesn't exist")
+
+func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
+ return entity.NewErrMultipleMatch("bug", matching)
+}
+
+func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
+ return entity.NewErrMultipleMatch("operation", matching)
+}
diff --git a/entities/bug/interface.go b/entities/bug/interface.go
new file mode 100644
index 00000000..2ae31fd1
--- /dev/null
+++ b/entities/bug/interface.go
@@ -0,0 +1,44 @@
+package bug
+
+import (
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+)
+
+type Interface interface {
+ // Id returns the Bug identifier
+ Id() entity.Id
+
+ // Validate checks if the Bug data is valid
+ Validate() error
+
+ // Append an operation into the staging area, to be committed later
+ Append(op Operation)
+
+ // Operations returns the ordered operations
+ Operations() []Operation
+
+ // NeedCommit indicates that the in-memory state changed and need to be commit in the repository
+ NeedCommit() bool
+
+ // Commit writes the staging area in Git and move the operations to the packs
+ Commit(repo repository.ClockedRepo) error
+
+ // FirstOp lookup for the very first operation of the bug.
+ // For a valid Bug, this operation should be a CreateOp
+ FirstOp() Operation
+
+ // LastOp lookup for the very last operation of the bug.
+ // For a valid Bug, should never be nil
+ LastOp() Operation
+
+ // Compile a bug in an easily usable snapshot
+ Compile() *Snapshot
+
+ // CreateLamportTime return the Lamport time of creation
+ CreateLamportTime() lamport.Time
+
+ // EditLamportTime return the Lamport time of the last edit
+ EditLamportTime() lamport.Time
+}
diff --git a/entities/bug/label.go b/entities/bug/label.go
new file mode 100644
index 00000000..79b5f591
--- /dev/null
+++ b/entities/bug/label.go
@@ -0,0 +1,95 @@
+package bug
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "image/color"
+
+ fcolor "github.com/fatih/color"
+
+ "github.com/MichaelMure/git-bug/util/text"
+)
+
+type Label string
+
+func (l Label) String() string {
+ return string(l)
+}
+
+// RGBA from a Label computed in a deterministic way
+func (l Label) Color() LabelColor {
+ // colors from: https://material-ui.com/style/color/
+ colors := []LabelColor{
+ {R: 244, G: 67, B: 54, A: 255}, // red
+ {R: 233, G: 30, B: 99, A: 255}, // pink
+ {R: 156, G: 39, B: 176, A: 255}, // purple
+ {R: 103, G: 58, B: 183, A: 255}, // deepPurple
+ {R: 63, G: 81, B: 181, A: 255}, // indigo
+ {R: 33, G: 150, B: 243, A: 255}, // blue
+ {R: 3, G: 169, B: 244, A: 255}, // lightBlue
+ {R: 0, G: 188, B: 212, A: 255}, // cyan
+ {R: 0, G: 150, B: 136, A: 255}, // teal
+ {R: 76, G: 175, B: 80, A: 255}, // green
+ {R: 139, G: 195, B: 74, A: 255}, // lightGreen
+ {R: 205, G: 220, B: 57, A: 255}, // lime
+ {R: 255, G: 235, B: 59, A: 255}, // yellow
+ {R: 255, G: 193, B: 7, A: 255}, // amber
+ {R: 255, G: 152, B: 0, A: 255}, // orange
+ {R: 255, G: 87, B: 34, A: 255}, // deepOrange
+ {R: 121, G: 85, B: 72, A: 255}, // brown
+ {R: 158, G: 158, B: 158, A: 255}, // grey
+ {R: 96, G: 125, B: 139, A: 255}, // blueGrey
+ }
+
+ id := 0
+ hash := sha256.Sum256([]byte(l))
+ for _, char := range hash {
+ id = (id + int(char)) % len(colors)
+ }
+
+ return colors[id]
+}
+
+func (l Label) Validate() error {
+ str := string(l)
+
+ if text.Empty(str) {
+ return fmt.Errorf("empty")
+ }
+
+ if !text.SafeOneLine(str) {
+ return fmt.Errorf("label has unsafe characters")
+ }
+
+ return nil
+}
+
+type LabelColor color.RGBA
+
+func (lc LabelColor) RGBA() color.RGBA {
+ return color.RGBA(lc)
+}
+
+func (lc LabelColor) Term256() Term256 {
+ red := Term256(lc.R) * 6 / 256
+ green := Term256(lc.G) * 6 / 256
+ blue := Term256(lc.B) * 6 / 256
+
+ return red*36 + green*6 + blue + 16
+}
+
+type Term256 int
+
+func (t Term256) Escape() string {
+ if fcolor.NoColor {
+ return ""
+ }
+ return fmt.Sprintf("\x1b[38;5;%dm", t)
+}
+
+func (t Term256) Unescape() string {
+ if fcolor.NoColor {
+ return ""
+ }
+ return "\x1b[0m"
+}
diff --git a/entities/bug/label_test.go b/entities/bug/label_test.go
new file mode 100644
index 00000000..49401c49
--- /dev/null
+++ b/entities/bug/label_test.go
@@ -0,0 +1,35 @@
+package bug
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLabelRGBA(t *testing.T) {
+ rgba := Label("test1").Color()
+ expected := LabelColor{R: 0, G: 150, B: 136, A: 255}
+
+ require.Equal(t, expected, rgba)
+}
+
+func TestLabelRGBASimilar(t *testing.T) {
+ rgba := Label("test2").Color()
+ expected := LabelColor{R: 3, G: 169, B: 244, A: 255}
+
+ require.Equal(t, expected, rgba)
+}
+
+func TestLabelRGBAReverse(t *testing.T) {
+ rgba := Label("tset").Color()
+ expected := LabelColor{R: 63, G: 81, B: 181, A: 255}
+
+ require.Equal(t, expected, rgba)
+}
+
+func TestLabelRGBAEqual(t *testing.T) {
+ color1 := Label("test").Color()
+ color2 := Label("test").Color()
+
+ require.Equal(t, color1, color2)
+}
diff --git a/entities/bug/op_add_comment.go b/entities/bug/op_add_comment.go
new file mode 100644
index 00000000..2e6a39f9
--- /dev/null
+++ b/entities/bug/op_add_comment.go
@@ -0,0 +1,93 @@
+package bug
+
+import (
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/text"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+var _ Operation = &AddCommentOperation{}
+var _ dag.OperationWithFiles = &AddCommentOperation{}
+
+// AddCommentOperation will add a new comment in the bug
+type AddCommentOperation struct {
+ dag.OpBase
+ Message string `json:"message"`
+ // TODO: change for a map[string]util.hash to store the filename ?
+ Files []repository.Hash `json:"files"`
+}
+
+func (op *AddCommentOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+func (op *AddCommentOperation) Apply(snapshot *Snapshot) {
+ snapshot.addActor(op.Author())
+ snapshot.addParticipant(op.Author())
+
+ comment := Comment{
+ id: entity.CombineIds(snapshot.Id(), op.Id()),
+ Message: op.Message,
+ Author: op.Author(),
+ Files: op.Files,
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ }
+
+ snapshot.Comments = append(snapshot.Comments, comment)
+
+ item := &AddCommentTimelineItem{
+ CommentTimelineItem: NewCommentTimelineItem(comment),
+ }
+
+ snapshot.Timeline = append(snapshot.Timeline, item)
+}
+
+func (op *AddCommentOperation) GetFiles() []repository.Hash {
+ return op.Files
+}
+
+func (op *AddCommentOperation) Validate() error {
+ if err := op.OpBase.Validate(op, AddCommentOp); err != nil {
+ return err
+ }
+
+ if !text.Safe(op.Message) {
+ return fmt.Errorf("message is not fully printable")
+ }
+
+ return nil
+}
+
+func NewAddCommentOp(author identity.Interface, unixTime int64, message string, files []repository.Hash) *AddCommentOperation {
+ return &AddCommentOperation{
+ OpBase: dag.NewOpBase(AddCommentOp, author, unixTime),
+ Message: message,
+ Files: files,
+ }
+}
+
+// AddCommentTimelineItem hold a comment in the timeline
+type AddCommentTimelineItem struct {
+ CommentTimelineItem
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (a *AddCommentTimelineItem) IsAuthored() {}
+
+// AddComment is a convenience function to add a comment to a bug
+func AddComment(b Interface, author identity.Interface, unixTime int64, message string, files []repository.Hash, metadata map[string]string) (*AddCommentOperation, error) {
+ op := NewAddCommentOp(author, unixTime, message, files)
+ for key, val := range metadata {
+ op.SetMetadata(key, val)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+ b.Append(op)
+ return op, nil
+}
diff --git a/entities/bug/op_add_comment_test.go b/entities/bug/op_add_comment_test.go
new file mode 100644
index 00000000..6f29cb01
--- /dev/null
+++ b/entities/bug/op_add_comment_test.go
@@ -0,0 +1,18 @@
+package bug
+
+import (
+ "testing"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+func TestAddCommentSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *AddCommentOperation {
+ return NewAddCommentOp(author, unixTime, "message", nil)
+ })
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *AddCommentOperation {
+ return NewAddCommentOp(author, unixTime, "message", []repository.Hash{"hash1", "hash2"})
+ })
+}
diff --git a/entities/bug/op_create.go b/entities/bug/op_create.go
new file mode 100644
index 00000000..fdfa131b
--- /dev/null
+++ b/entities/bug/op_create.go
@@ -0,0 +1,112 @@
+package bug
+
+import (
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/text"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+var _ Operation = &CreateOperation{}
+var _ dag.OperationWithFiles = &CreateOperation{}
+
+// CreateOperation define the initial creation of a bug
+type CreateOperation struct {
+ dag.OpBase
+ Title string `json:"title"`
+ Message string `json:"message"`
+ Files []repository.Hash `json:"files"`
+}
+
+func (op *CreateOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+func (op *CreateOperation) Apply(snapshot *Snapshot) {
+ // sanity check: will fail when adding a second Create
+ if snapshot.id != "" && snapshot.id != entity.UnsetId && snapshot.id != op.Id() {
+ return
+ }
+
+ snapshot.id = op.Id()
+
+ snapshot.addActor(op.Author())
+ snapshot.addParticipant(op.Author())
+
+ snapshot.Title = op.Title
+
+ comment := Comment{
+ id: entity.CombineIds(snapshot.Id(), op.Id()),
+ Message: op.Message,
+ Author: op.Author(),
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ }
+
+ snapshot.Comments = []Comment{comment}
+ snapshot.Author = op.Author()
+ snapshot.CreateTime = op.Time()
+
+ snapshot.Timeline = []TimelineItem{
+ &CreateTimelineItem{
+ CommentTimelineItem: NewCommentTimelineItem(comment),
+ },
+ }
+}
+
+func (op *CreateOperation) GetFiles() []repository.Hash {
+ return op.Files
+}
+
+func (op *CreateOperation) Validate() error {
+ if err := op.OpBase.Validate(op, CreateOp); err != nil {
+ return err
+ }
+
+ if text.Empty(op.Title) {
+ return fmt.Errorf("title is empty")
+ }
+ if !text.SafeOneLine(op.Title) {
+ return fmt.Errorf("title has unsafe characters")
+ }
+
+ if !text.Safe(op.Message) {
+ return fmt.Errorf("message is not fully printable")
+ }
+
+ return nil
+}
+
+func NewCreateOp(author identity.Interface, unixTime int64, title, message string, files []repository.Hash) *CreateOperation {
+ return &CreateOperation{
+ OpBase: dag.NewOpBase(CreateOp, author, unixTime),
+ Title: title,
+ Message: message,
+ Files: files,
+ }
+}
+
+// CreateTimelineItem replace a Create operation in the Timeline and hold its edition history
+type CreateTimelineItem struct {
+ CommentTimelineItem
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (c *CreateTimelineItem) IsAuthored() {}
+
+// Create is a convenience function to create a bug
+func Create(author identity.Interface, unixTime int64, title, message string, files []repository.Hash, metadata map[string]string) (*Bug, *CreateOperation, error) {
+ b := NewBug()
+ op := NewCreateOp(author, unixTime, title, message, files)
+ for key, val := range metadata {
+ op.SetMetadata(key, val)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, op, err
+ }
+ b.Append(op)
+ return b, op, nil
+}
diff --git a/entities/bug/op_create_test.go b/entities/bug/op_create_test.go
new file mode 100644
index 00000000..f2c9e675
--- /dev/null
+++ b/entities/bug/op_create_test.go
@@ -0,0 +1,67 @@
+package bug
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+func TestCreate(t *testing.T) {
+ snapshot := Snapshot{}
+
+ repo := repository.NewMockRepoClock()
+
+ rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+ require.NoError(t, err)
+
+ unix := time.Now().Unix()
+
+ create := NewCreateOp(rene, unix, "title", "message", nil)
+
+ create.Apply(&snapshot)
+
+ id := create.Id()
+ require.NoError(t, id.Validate())
+
+ comment := Comment{
+ id: entity.CombineIds(create.Id(), create.Id()),
+ Author: rene,
+ Message: "message",
+ UnixTime: timestamp.Timestamp(create.UnixTime),
+ }
+
+ expected := Snapshot{
+ id: create.Id(),
+ Title: "title",
+ Comments: []Comment{
+ comment,
+ },
+ Author: rene,
+ Participants: []identity.Interface{rene},
+ Actors: []identity.Interface{rene},
+ CreateTime: create.Time(),
+ Timeline: []TimelineItem{
+ &CreateTimelineItem{
+ CommentTimelineItem: NewCommentTimelineItem(comment),
+ },
+ },
+ }
+
+ require.Equal(t, expected, snapshot)
+}
+
+func TestCreateSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *CreateOperation {
+ return NewCreateOp(author, unixTime, "title", "message", nil)
+ })
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *CreateOperation {
+ return NewCreateOp(author, unixTime, "title", "message", []repository.Hash{"hash1", "hash2"})
+ })
+}
diff --git a/entities/bug/op_edit_comment.go b/entities/bug/op_edit_comment.go
new file mode 100644
index 00000000..41079f45
--- /dev/null
+++ b/entities/bug/op_edit_comment.go
@@ -0,0 +1,129 @@
+package bug
+
+import (
+ "fmt"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+
+ "github.com/MichaelMure/git-bug/util/text"
+)
+
+var _ Operation = &EditCommentOperation{}
+var _ dag.OperationWithFiles = &EditCommentOperation{}
+
+// EditCommentOperation will change a comment in the bug
+type EditCommentOperation struct {
+ dag.OpBase
+ Target entity.Id `json:"target"`
+ Message string `json:"message"`
+ Files []repository.Hash `json:"files"`
+}
+
+func (op *EditCommentOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+func (op *EditCommentOperation) Apply(snapshot *Snapshot) {
+ // Todo: currently any message can be edited, even by a different author
+ // crypto signature are needed.
+
+ // Recreate the Comment Id to match on
+ commentId := entity.CombineIds(snapshot.Id(), op.Target)
+
+ var target TimelineItem
+ for i, item := range snapshot.Timeline {
+ if item.Id() == commentId {
+ target = snapshot.Timeline[i]
+ break
+ }
+ }
+
+ if target == nil {
+ // Target not found, edit is a no-op
+ return
+ }
+
+ comment := Comment{
+ id: commentId,
+ Message: op.Message,
+ Files: op.Files,
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ }
+
+ switch target := target.(type) {
+ case *CreateTimelineItem:
+ target.Append(comment)
+ case *AddCommentTimelineItem:
+ target.Append(comment)
+ default:
+ // somehow, the target matched on something that is not a comment
+ // we make the op a no-op
+ return
+ }
+
+ snapshot.addActor(op.Author())
+
+ // Updating the corresponding comment
+
+ for i := range snapshot.Comments {
+ if snapshot.Comments[i].Id() == commentId {
+ snapshot.Comments[i].Message = op.Message
+ snapshot.Comments[i].Files = op.Files
+ break
+ }
+ }
+}
+
+func (op *EditCommentOperation) GetFiles() []repository.Hash {
+ return op.Files
+}
+
+func (op *EditCommentOperation) Validate() error {
+ if err := op.OpBase.Validate(op, EditCommentOp); err != nil {
+ return err
+ }
+
+ if err := op.Target.Validate(); err != nil {
+ return errors.Wrap(err, "target hash is invalid")
+ }
+
+ if !text.Safe(op.Message) {
+ return fmt.Errorf("message is not fully printable")
+ }
+
+ return nil
+}
+
+func NewEditCommentOp(author identity.Interface, unixTime int64, target entity.Id, message string, files []repository.Hash) *EditCommentOperation {
+ return &EditCommentOperation{
+ OpBase: dag.NewOpBase(EditCommentOp, author, unixTime),
+ Target: target,
+ Message: message,
+ Files: files,
+ }
+}
+
+// EditComment is a convenience function to apply the operation
+func EditComment(b Interface, author identity.Interface, unixTime int64, target entity.Id, message string, files []repository.Hash, metadata map[string]string) (*EditCommentOperation, error) {
+ op := NewEditCommentOp(author, unixTime, target, message, files)
+ for key, val := range metadata {
+ op.SetMetadata(key, val)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+ b.Append(op)
+ return op, nil
+}
+
+// EditCreateComment is a convenience function to edit the body of a bug (the first comment)
+func EditCreateComment(b Interface, author identity.Interface, unixTime int64, message string, files []repository.Hash, metadata map[string]string) (*EditCommentOperation, error) {
+ createOp := b.FirstOp().(*CreateOperation)
+ return EditComment(b, author, unixTime, createOp.Id(), message, files, metadata)
+}
diff --git a/entities/bug/op_edit_comment_test.go b/entities/bug/op_edit_comment_test.go
new file mode 100644
index 00000000..1b649cd1
--- /dev/null
+++ b/entities/bug/op_edit_comment_test.go
@@ -0,0 +1,84 @@
+package bug
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+func TestEdit(t *testing.T) {
+ snapshot := Snapshot{}
+
+ repo := repository.NewMockRepo()
+
+ rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+ require.NoError(t, err)
+
+ unix := time.Now().Unix()
+
+ create := NewCreateOp(rene, unix, "title", "create", nil)
+ create.Apply(&snapshot)
+
+ require.NoError(t, create.Id().Validate())
+
+ comment1 := NewAddCommentOp(rene, unix, "comment 1", nil)
+ comment1.Apply(&snapshot)
+
+ require.NoError(t, comment1.Id().Validate())
+
+ // add another unrelated op in between
+ setTitle := NewSetTitleOp(rene, unix, "edited title", "title")
+ setTitle.Apply(&snapshot)
+
+ comment2 := NewAddCommentOp(rene, unix, "comment 2", nil)
+ comment2.Apply(&snapshot)
+
+ require.NoError(t, comment2.Id().Validate())
+
+ edit := NewEditCommentOp(rene, unix, create.Id(), "create edited", nil)
+ edit.Apply(&snapshot)
+
+ require.Len(t, snapshot.Timeline, 4)
+ require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+ require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 1)
+ require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 1)
+ require.Equal(t, snapshot.Comments[0].Message, "create edited")
+ require.Equal(t, snapshot.Comments[1].Message, "comment 1")
+ require.Equal(t, snapshot.Comments[2].Message, "comment 2")
+
+ edit2 := NewEditCommentOp(rene, unix, comment1.Id(), "comment 1 edited", nil)
+ edit2.Apply(&snapshot)
+
+ require.Len(t, snapshot.Timeline, 4)
+ require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+ require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 2)
+ require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 1)
+ require.Equal(t, snapshot.Comments[0].Message, "create edited")
+ require.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
+ require.Equal(t, snapshot.Comments[2].Message, "comment 2")
+
+ edit3 := NewEditCommentOp(rene, unix, comment2.Id(), "comment 2 edited", nil)
+ edit3.Apply(&snapshot)
+
+ require.Len(t, snapshot.Timeline, 4)
+ require.Len(t, snapshot.Timeline[0].(*CreateTimelineItem).History, 2)
+ require.Len(t, snapshot.Timeline[1].(*AddCommentTimelineItem).History, 2)
+ require.Len(t, snapshot.Timeline[3].(*AddCommentTimelineItem).History, 2)
+ require.Equal(t, snapshot.Comments[0].Message, "create edited")
+ require.Equal(t, snapshot.Comments[1].Message, "comment 1 edited")
+ require.Equal(t, snapshot.Comments[2].Message, "comment 2 edited")
+}
+
+func TestEditCommentSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *EditCommentOperation {
+ return NewEditCommentOp(author, unixTime, "target", "message", nil)
+ })
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *EditCommentOperation {
+ return NewEditCommentOp(author, unixTime, "target", "message", []repository.Hash{"hash1", "hash2"})
+ })
+}
diff --git a/entities/bug/op_label_change.go b/entities/bug/op_label_change.go
new file mode 100644
index 00000000..45441f7c
--- /dev/null
+++ b/entities/bug/op_label_change.go
@@ -0,0 +1,292 @@
+package bug
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+var _ Operation = &LabelChangeOperation{}
+
+// LabelChangeOperation define a Bug operation to add or remove labels
+type LabelChangeOperation struct {
+ dag.OpBase
+ Added []Label `json:"added"`
+ Removed []Label `json:"removed"`
+}
+
+func (op *LabelChangeOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+// Apply applies the operation
+func (op *LabelChangeOperation) Apply(snapshot *Snapshot) {
+ snapshot.addActor(op.Author())
+
+ // Add in the set
+AddLoop:
+ for _, added := range op.Added {
+ for _, label := range snapshot.Labels {
+ if label == added {
+ // Already exist
+ continue AddLoop
+ }
+ }
+
+ snapshot.Labels = append(snapshot.Labels, added)
+ }
+
+ // Remove in the set
+ for _, removed := range op.Removed {
+ for i, label := range snapshot.Labels {
+ if label == removed {
+ snapshot.Labels[i] = snapshot.Labels[len(snapshot.Labels)-1]
+ snapshot.Labels = snapshot.Labels[:len(snapshot.Labels)-1]
+ }
+ }
+ }
+
+ // Sort
+ sort.Slice(snapshot.Labels, func(i, j int) bool {
+ return string(snapshot.Labels[i]) < string(snapshot.Labels[j])
+ })
+
+ item := &LabelChangeTimelineItem{
+ id: op.Id(),
+ Author: op.Author(),
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ Added: op.Added,
+ Removed: op.Removed,
+ }
+
+ snapshot.Timeline = append(snapshot.Timeline, item)
+}
+
+func (op *LabelChangeOperation) Validate() error {
+ if err := op.OpBase.Validate(op, LabelChangeOp); err != nil {
+ return err
+ }
+
+ for _, l := range op.Added {
+ if err := l.Validate(); err != nil {
+ return errors.Wrap(err, "added label")
+ }
+ }
+
+ for _, l := range op.Removed {
+ if err := l.Validate(); err != nil {
+ return errors.Wrap(err, "removed label")
+ }
+ }
+
+ if len(op.Added)+len(op.Removed) <= 0 {
+ return fmt.Errorf("no label change")
+ }
+
+ return nil
+}
+
+func NewLabelChangeOperation(author identity.Interface, unixTime int64, added, removed []Label) *LabelChangeOperation {
+ return &LabelChangeOperation{
+ OpBase: dag.NewOpBase(LabelChangeOp, author, unixTime),
+ Added: added,
+ Removed: removed,
+ }
+}
+
+type LabelChangeTimelineItem struct {
+ id entity.Id
+ Author identity.Interface
+ UnixTime timestamp.Timestamp
+ Added []Label
+ Removed []Label
+}
+
+func (l LabelChangeTimelineItem) Id() entity.Id {
+ return l.id
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (l LabelChangeTimelineItem) IsAuthored() {}
+
+// ChangeLabels is a convenience function to change labels on a bug
+func ChangeLabels(b Interface, author identity.Interface, unixTime int64, add, remove []string, metadata map[string]string) ([]LabelChangeResult, *LabelChangeOperation, error) {
+ var added, removed []Label
+ var results []LabelChangeResult
+
+ snap := b.Compile()
+
+ for _, str := range add {
+ label := Label(str)
+
+ // check for duplicate
+ if labelExist(added, label) {
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeDuplicateInOp})
+ continue
+ }
+
+ // check that the label doesn't already exist
+ if labelExist(snap.Labels, label) {
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeAlreadySet})
+ continue
+ }
+
+ added = append(added, label)
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeAdded})
+ }
+
+ for _, str := range remove {
+ label := Label(str)
+
+ // check for duplicate
+ if labelExist(removed, label) {
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeDuplicateInOp})
+ continue
+ }
+
+ // check that the label actually exist
+ if !labelExist(snap.Labels, label) {
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeDoesntExist})
+ continue
+ }
+
+ removed = append(removed, label)
+ results = append(results, LabelChangeResult{Label: label, Status: LabelChangeRemoved})
+ }
+
+ if len(added) == 0 && len(removed) == 0 {
+ return results, nil, fmt.Errorf("no label added or removed")
+ }
+
+ op := NewLabelChangeOperation(author, unixTime, added, removed)
+ for key, val := range metadata {
+ op.SetMetadata(key, val)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, nil, err
+ }
+
+ b.Append(op)
+
+ return results, op, nil
+}
+
+// ForceChangeLabels is a convenience function to apply the operation
+// The difference with ChangeLabels is that no checks of deduplications are done. You are entirely
+// responsible for what you are doing. In the general case, you want to use ChangeLabels instead.
+// The intended use of this function is to allow importers to create legal but unexpected label changes,
+// like removing a label with no information of when it was added before.
+func ForceChangeLabels(b Interface, author identity.Interface, unixTime int64, add, remove []string, metadata map[string]string) (*LabelChangeOperation, error) {
+ added := make([]Label, len(add))
+ for i, str := range add {
+ added[i] = Label(str)
+ }
+
+ removed := make([]Label, len(remove))
+ for i, str := range remove {
+ removed[i] = Label(str)
+ }
+
+ op := NewLabelChangeOperation(author, unixTime, added, removed)
+
+ for key, val := range metadata {
+ op.SetMetadata(key, val)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+
+ b.Append(op)
+
+ return op, nil
+}
+
+func labelExist(labels []Label, label Label) bool {
+ for _, l := range labels {
+ if l == label {
+ return true
+ }
+ }
+
+ return false
+}
+
+type LabelChangeStatus int
+
+const (
+ _ LabelChangeStatus = iota
+ LabelChangeAdded
+ LabelChangeRemoved
+ LabelChangeDuplicateInOp
+ LabelChangeAlreadySet
+ LabelChangeDoesntExist
+)
+
+func (l LabelChangeStatus) MarshalGQL(w io.Writer) {
+ switch l {
+ case LabelChangeAdded:
+ _, _ = fmt.Fprintf(w, strconv.Quote("ADDED"))
+ case LabelChangeRemoved:
+ _, _ = fmt.Fprintf(w, strconv.Quote("REMOVED"))
+ case LabelChangeDuplicateInOp:
+ _, _ = fmt.Fprintf(w, strconv.Quote("DUPLICATE_IN_OP"))
+ case LabelChangeAlreadySet:
+ _, _ = fmt.Fprintf(w, strconv.Quote("ALREADY_EXIST"))
+ case LabelChangeDoesntExist:
+ _, _ = fmt.Fprintf(w, strconv.Quote("DOESNT_EXIST"))
+ default:
+ panic("missing case")
+ }
+}
+
+func (l *LabelChangeStatus) UnmarshalGQL(v interface{}) error {
+ str, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("enums must be strings")
+ }
+ switch str {
+ case "ADDED":
+ *l = LabelChangeAdded
+ case "REMOVED":
+ *l = LabelChangeRemoved
+ case "DUPLICATE_IN_OP":
+ *l = LabelChangeDuplicateInOp
+ case "ALREADY_EXIST":
+ *l = LabelChangeAlreadySet
+ case "DOESNT_EXIST":
+ *l = LabelChangeDoesntExist
+ default:
+ return fmt.Errorf("%s is not a valid LabelChangeStatus", str)
+ }
+ return nil
+}
+
+type LabelChangeResult struct {
+ Label Label
+ Status LabelChangeStatus
+}
+
+func (l LabelChangeResult) String() string {
+ switch l.Status {
+ case LabelChangeAdded:
+ return fmt.Sprintf("label %s added", l.Label)
+ case LabelChangeRemoved:
+ return fmt.Sprintf("label %s removed", l.Label)
+ case LabelChangeDuplicateInOp:
+ return fmt.Sprintf("label %s is a duplicate", l.Label)
+ case LabelChangeAlreadySet:
+ return fmt.Sprintf("label %s was already set", l.Label)
+ case LabelChangeDoesntExist:
+ return fmt.Sprintf("label %s doesn't exist on this bug", l.Label)
+ default:
+ panic(fmt.Sprintf("unknown label change status %v", l.Status))
+ }
+}
diff --git a/entities/bug/op_label_change_test.go b/entities/bug/op_label_change_test.go
new file mode 100644
index 00000000..edbe4714
--- /dev/null
+++ b/entities/bug/op_label_change_test.go
@@ -0,0 +1,20 @@
+package bug
+
+import (
+ "testing"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+func TestLabelChangeSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *LabelChangeOperation {
+ return NewLabelChangeOperation(author, unixTime, []Label{"added"}, []Label{"removed"})
+ })
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *LabelChangeOperation {
+ return NewLabelChangeOperation(author, unixTime, []Label{"added"}, nil)
+ })
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *LabelChangeOperation {
+ return NewLabelChangeOperation(author, unixTime, nil, []Label{"removed"})
+ })
+}
diff --git a/entities/bug/op_set_metadata.go b/entities/bug/op_set_metadata.go
new file mode 100644
index 00000000..b4aab78c
--- /dev/null
+++ b/entities/bug/op_set_metadata.go
@@ -0,0 +1,21 @@
+package bug
+
+import (
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+func NewSetMetadataOp(author identity.Interface, unixTime int64, target entity.Id, newMetadata map[string]string) *dag.SetMetadataOperation[*Snapshot] {
+ return dag.NewSetMetadataOp[*Snapshot](SetMetadataOp, author, unixTime, target, newMetadata)
+}
+
+// SetMetadata is a convenience function to add metadata on another operation
+func SetMetadata(b Interface, author identity.Interface, unixTime int64, target entity.Id, newMetadata map[string]string) (*dag.SetMetadataOperation[*Snapshot], error) {
+ op := NewSetMetadataOp(author, unixTime, target, newMetadata)
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+ b.Append(op)
+ return op, nil
+}
diff --git a/entities/bug/op_set_status.go b/entities/bug/op_set_status.go
new file mode 100644
index 00000000..5e73d982
--- /dev/null
+++ b/entities/bug/op_set_status.go
@@ -0,0 +1,95 @@
+package bug
+
+import (
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+var _ Operation = &SetStatusOperation{}
+
+// SetStatusOperation will change the status of a bug
+type SetStatusOperation struct {
+ dag.OpBase
+ Status Status `json:"status"`
+}
+
+func (op *SetStatusOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
+ snapshot.Status = op.Status
+ snapshot.addActor(op.Author())
+
+ item := &SetStatusTimelineItem{
+ id: op.Id(),
+ Author: op.Author(),
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ Status: op.Status,
+ }
+
+ snapshot.Timeline = append(snapshot.Timeline, item)
+}
+
+func (op *SetStatusOperation) Validate() error {
+ if err := op.OpBase.Validate(op, SetStatusOp); err != nil {
+ return err
+ }
+
+ if err := op.Status.Validate(); err != nil {
+ return errors.Wrap(err, "status")
+ }
+
+ return nil
+}
+
+func NewSetStatusOp(author identity.Interface, unixTime int64, status Status) *SetStatusOperation {
+ return &SetStatusOperation{
+ OpBase: dag.NewOpBase(SetStatusOp, author, unixTime),
+ Status: status,
+ }
+}
+
+type SetStatusTimelineItem struct {
+ id entity.Id
+ Author identity.Interface
+ UnixTime timestamp.Timestamp
+ Status Status
+}
+
+func (s SetStatusTimelineItem) Id() entity.Id {
+ return s.id
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (s SetStatusTimelineItem) IsAuthored() {}
+
+// Open is a convenience function to change a bugs state to Open
+func Open(b Interface, author identity.Interface, unixTime int64, metadata map[string]string) (*SetStatusOperation, error) {
+ op := NewSetStatusOp(author, unixTime, OpenStatus)
+ for key, value := range metadata {
+ op.SetMetadata(key, value)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+ b.Append(op)
+ return op, nil
+}
+
+// Close is a convenience function to change a bugs state to Close
+func Close(b Interface, author identity.Interface, unixTime int64, metadata map[string]string) (*SetStatusOperation, error) {
+ op := NewSetStatusOp(author, unixTime, ClosedStatus)
+ for key, value := range metadata {
+ op.SetMetadata(key, value)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+ b.Append(op)
+ return op, nil
+}
diff --git a/entities/bug/op_set_status_test.go b/entities/bug/op_set_status_test.go
new file mode 100644
index 00000000..7ec78704
--- /dev/null
+++ b/entities/bug/op_set_status_test.go
@@ -0,0 +1,14 @@
+package bug
+
+import (
+ "testing"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+func TestSetStatusSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *SetStatusOperation {
+ return NewSetStatusOp(author, unixTime, ClosedStatus)
+ })
+}
diff --git a/entities/bug/op_set_title.go b/entities/bug/op_set_title.go
new file mode 100644
index 00000000..75efd08e
--- /dev/null
+++ b/entities/bug/op_set_title.go
@@ -0,0 +1,112 @@
+package bug
+
+import (
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+
+ "github.com/MichaelMure/git-bug/util/text"
+)
+
+var _ Operation = &SetTitleOperation{}
+
+// SetTitleOperation will change the title of a bug
+type SetTitleOperation struct {
+ dag.OpBase
+ Title string `json:"title"`
+ Was string `json:"was"`
+}
+
+func (op *SetTitleOperation) Id() entity.Id {
+ return dag.IdOperation(op, &op.OpBase)
+}
+
+func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
+ snapshot.Title = op.Title
+ snapshot.addActor(op.Author())
+
+ item := &SetTitleTimelineItem{
+ id: op.Id(),
+ Author: op.Author(),
+ UnixTime: timestamp.Timestamp(op.UnixTime),
+ Title: op.Title,
+ Was: op.Was,
+ }
+
+ snapshot.Timeline = append(snapshot.Timeline, item)
+}
+
+func (op *SetTitleOperation) Validate() error {
+ if err := op.OpBase.Validate(op, SetTitleOp); err != nil {
+ return err
+ }
+
+ if text.Empty(op.Title) {
+ return fmt.Errorf("title is empty")
+ }
+
+ if !text.SafeOneLine(op.Title) {
+ return fmt.Errorf("title has unsafe characters")
+ }
+
+ if !text.SafeOneLine(op.Was) {
+ return fmt.Errorf("previous title has unsafe characters")
+ }
+
+ return nil
+}
+
+func NewSetTitleOp(author identity.Interface, unixTime int64, title string, was string) *SetTitleOperation {
+ return &SetTitleOperation{
+ OpBase: dag.NewOpBase(SetTitleOp, author, unixTime),
+ Title: title,
+ Was: was,
+ }
+}
+
+type SetTitleTimelineItem struct {
+ id entity.Id
+ Author identity.Interface
+ UnixTime timestamp.Timestamp
+ Title string
+ Was string
+}
+
+func (s SetTitleTimelineItem) Id() entity.Id {
+ return s.id
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (s SetTitleTimelineItem) IsAuthored() {}
+
+// SetTitle is a convenience function to change a bugs title
+func SetTitle(b Interface, author identity.Interface, unixTime int64, title string, metadata map[string]string) (*SetTitleOperation, error) {
+ var lastTitleOp *SetTitleOperation
+ for _, op := range b.Operations() {
+ switch op := op.(type) {
+ case *SetTitleOperation:
+ lastTitleOp = op
+ }
+ }
+
+ var was string
+ if lastTitleOp != nil {
+ was = lastTitleOp.Title
+ } else {
+ was = b.FirstOp().(*CreateOperation).Title
+ }
+
+ op := NewSetTitleOp(author, unixTime, title, was)
+ for key, value := range metadata {
+ op.SetMetadata(key, value)
+ }
+ if err := op.Validate(); err != nil {
+ return nil, err
+ }
+
+ b.Append(op)
+ return op, nil
+}
diff --git a/entities/bug/op_set_title_test.go b/entities/bug/op_set_title_test.go
new file mode 100644
index 00000000..7960ec4f
--- /dev/null
+++ b/entities/bug/op_set_title_test.go
@@ -0,0 +1,14 @@
+package bug
+
+import (
+ "testing"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+func TestSetTitleSerialize(t *testing.T) {
+ dag.SerializeRoundTripTest(t, func(author identity.Interface, unixTime int64) *SetTitleOperation {
+ return NewSetTitleOp(author, unixTime, "title", "was")
+ })
+}
diff --git a/entities/bug/operation.go b/entities/bug/operation.go
new file mode 100644
index 00000000..a02fc780
--- /dev/null
+++ b/entities/bug/operation.go
@@ -0,0 +1,73 @@
+package bug
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+const (
+ _ dag.OperationType = iota
+ CreateOp
+ SetTitleOp
+ AddCommentOp
+ SetStatusOp
+ LabelChangeOp
+ EditCommentOp
+ NoOpOp
+ SetMetadataOp
+)
+
+// Operation define the interface to fulfill for an edit operation of a Bug
+type Operation interface {
+ dag.Operation
+
+ // Apply the operation to a Snapshot to create the final state
+ Apply(snapshot *Snapshot)
+}
+
+// make sure that package external operations do conform to our interface
+var _ Operation = &dag.NoOpOperation[*Snapshot]{}
+var _ Operation = &dag.SetMetadataOperation[*Snapshot]{}
+
+func operationUnmarshaller(raw json.RawMessage, resolvers entity.Resolvers) (dag.Operation, error) {
+ var t struct {
+ OperationType dag.OperationType `json:"type"`
+ }
+
+ if err := json.Unmarshal(raw, &t); err != nil {
+ return nil, err
+ }
+
+ var op dag.Operation
+
+ switch t.OperationType {
+ case AddCommentOp:
+ op = &AddCommentOperation{}
+ case CreateOp:
+ op = &CreateOperation{}
+ case EditCommentOp:
+ op = &EditCommentOperation{}
+ case LabelChangeOp:
+ op = &LabelChangeOperation{}
+ case NoOpOp:
+ op = &dag.NoOpOperation[*Snapshot]{}
+ case SetMetadataOp:
+ op = &dag.SetMetadataOperation[*Snapshot]{}
+ case SetStatusOp:
+ op = &SetStatusOperation{}
+ case SetTitleOp:
+ op = &SetTitleOperation{}
+ default:
+ panic(fmt.Sprintf("unknown operation type %v", t.OperationType))
+ }
+
+ err := json.Unmarshal(raw, &op)
+ if err != nil {
+ return nil, err
+ }
+
+ return op, nil
+}
diff --git a/entities/bug/operation_test.go b/entities/bug/operation_test.go
new file mode 100644
index 00000000..fe8080c3
--- /dev/null
+++ b/entities/bug/operation_test.go
@@ -0,0 +1,131 @@
+package bug
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+// TODO: move to entity/dag?
+
+func TestValidate(t *testing.T) {
+ repo := repository.NewMockRepoClock()
+
+ makeIdentity := func(t *testing.T, name, email string) *identity.Identity {
+ i, err := identity.NewIdentity(repo, name, email)
+ require.NoError(t, err)
+ return i
+ }
+
+ rene := makeIdentity(t, "René Descartes", "rene@descartes.fr")
+
+ unix := time.Now().Unix()
+
+ good := []Operation{
+ NewCreateOp(rene, unix, "title", "message", nil),
+ NewSetTitleOp(rene, unix, "title2", "title1"),
+ NewAddCommentOp(rene, unix, "message2", nil),
+ NewSetStatusOp(rene, unix, ClosedStatus),
+ NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"}),
+ }
+
+ for _, op := range good {
+ if err := op.Validate(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ bad := []Operation{
+ // opbase
+ NewSetStatusOp(makeIdentity(t, "", "rene@descartes.fr"), unix, ClosedStatus),
+ NewSetStatusOp(makeIdentity(t, "René Descartes\u001b", "rene@descartes.fr"), unix, ClosedStatus),
+ NewSetStatusOp(makeIdentity(t, "René Descartes", "rene@descartes.fr\u001b"), unix, ClosedStatus),
+ NewSetStatusOp(makeIdentity(t, "René \nDescartes", "rene@descartes.fr"), unix, ClosedStatus),
+ NewSetStatusOp(makeIdentity(t, "René Descartes", "rene@\ndescartes.fr"), unix, ClosedStatus),
+ &CreateOperation{OpBase: dag.NewOpBase(CreateOp, rene, 0),
+ Title: "title",
+ Message: "message",
+ },
+
+ NewCreateOp(rene, unix, "multi\nline", "message", nil),
+ NewCreateOp(rene, unix, "title", "message", []repository.Hash{repository.Hash("invalid")}),
+ NewCreateOp(rene, unix, "title\u001b", "message", nil),
+ NewCreateOp(rene, unix, "title", "message\u001b", nil),
+ NewSetTitleOp(rene, unix, "multi\nline", "title1"),
+ NewSetTitleOp(rene, unix, "title", "multi\nline"),
+ NewSetTitleOp(rene, unix, "title\u001b", "title2"),
+ NewSetTitleOp(rene, unix, "title", "title2\u001b"),
+ NewAddCommentOp(rene, unix, "message\u001b", nil),
+ NewAddCommentOp(rene, unix, "message", []repository.Hash{repository.Hash("invalid")}),
+ NewSetStatusOp(rene, unix, 1000),
+ NewSetStatusOp(rene, unix, 0),
+ NewLabelChangeOperation(rene, unix, []Label{}, []Label{}),
+ NewLabelChangeOperation(rene, unix, []Label{"multi\nline"}, []Label{}),
+ }
+
+ for i, op := range bad {
+ if err := op.Validate(); err == nil {
+ t.Fatal("validation should have failed", i, op)
+ }
+ }
+}
+
+func TestMetadata(t *testing.T) {
+ repo := repository.NewMockRepoClock()
+
+ rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+ require.NoError(t, err)
+
+ op := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
+
+ op.SetMetadata("key", "value")
+
+ val, ok := op.GetMetadata("key")
+ require.True(t, ok)
+ require.Equal(t, val, "value")
+}
+
+func TestID(t *testing.T) {
+ repo := repository.CreateGoGitTestRepo(t, false)
+
+ repos := []repository.ClockedRepo{
+ repository.NewMockRepo(),
+ repo,
+ }
+
+ for _, repo := range repos {
+ rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+ require.NoError(t, err)
+ err = rene.Commit(repo)
+ require.NoError(t, err)
+
+ b, op, err := Create(rene, time.Now().Unix(), "title", "message", nil, nil)
+ require.NoError(t, err)
+
+ id1 := op.Id()
+ require.NoError(t, id1.Validate())
+
+ err = b.Commit(repo)
+ require.NoError(t, err)
+
+ op2 := b.FirstOp()
+
+ id2 := op2.Id()
+ require.NoError(t, id2.Validate())
+ require.Equal(t, id1, id2)
+
+ b2, err := Read(repo, b.Id())
+ require.NoError(t, err)
+
+ op3 := b2.FirstOp()
+
+ id3 := op3.Id()
+ require.NoError(t, id3.Validate())
+ require.Equal(t, id1, id3)
+ }
+}
diff --git a/entities/bug/resolver.go b/entities/bug/resolver.go
new file mode 100644
index 00000000..e7beb0e4
--- /dev/null
+++ b/entities/bug/resolver.go
@@ -0,0 +1,21 @@
+package bug
+
+import (
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+var _ entity.Resolver = &SimpleResolver{}
+
+// SimpleResolver is a Resolver loading Bugs directly from a Repo
+type SimpleResolver struct {
+ repo repository.ClockedRepo
+}
+
+func NewSimpleResolver(repo repository.ClockedRepo) *SimpleResolver {
+ return &SimpleResolver{repo: repo}
+}
+
+func (r *SimpleResolver) Resolve(id entity.Id) (entity.Interface, error) {
+ return Read(r.repo, id)
+}
diff --git a/entities/bug/snapshot.go b/entities/bug/snapshot.go
new file mode 100644
index 00000000..cece09b8
--- /dev/null
+++ b/entities/bug/snapshot.go
@@ -0,0 +1,144 @@
+package bug
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/entity/dag"
+)
+
+var _ dag.Snapshot = &Snapshot{}
+
+// Snapshot is a compiled form of the Bug data structure used for storage and merge
+type Snapshot struct {
+ id entity.Id
+
+ Status Status
+ Title string
+ Comments []Comment
+ Labels []Label
+ Author identity.Interface
+ Actors []identity.Interface
+ Participants []identity.Interface
+ CreateTime time.Time
+
+ Timeline []TimelineItem
+
+ Operations []dag.Operation
+}
+
+// Id returns the Bug identifier
+func (snap *Snapshot) Id() entity.Id {
+ if snap.id == "" {
+ // simply panic as it would be a coding error (no id provided at construction)
+ panic("no id")
+ }
+ return snap.id
+}
+
+func (snap *Snapshot) AllOperations() []dag.Operation {
+ return snap.Operations
+}
+
+// EditTime returns the last time a bug was modified
+func (snap *Snapshot) EditTime() time.Time {
+ if len(snap.Operations) == 0 {
+ return time.Unix(0, 0)
+ }
+
+ return snap.Operations[len(snap.Operations)-1].Time()
+}
+
+// GetCreateMetadata return the creation metadata
+func (snap *Snapshot) GetCreateMetadata(key string) (string, bool) {
+ return snap.Operations[0].GetMetadata(key)
+}
+
+// SearchTimelineItem will search in the timeline for an item matching the given hash
+func (snap *Snapshot) SearchTimelineItem(id entity.Id) (TimelineItem, error) {
+ for i := range snap.Timeline {
+ if snap.Timeline[i].Id() == id {
+ return snap.Timeline[i], nil
+ }
+ }
+
+ return nil, fmt.Errorf("timeline item not found")
+}
+
+// SearchComment will search for a comment matching the given hash
+func (snap *Snapshot) SearchComment(id entity.Id) (*Comment, error) {
+ for _, c := range snap.Comments {
+ if c.id == id {
+ return &c, nil
+ }
+ }
+
+ return nil, fmt.Errorf("comment item not found")
+}
+
+// append the operation author to the actors list
+func (snap *Snapshot) addActor(actor identity.Interface) {
+ for _, a := range snap.Actors {
+ if actor.Id() == a.Id() {
+ return
+ }
+ }
+
+ snap.Actors = append(snap.Actors, actor)
+}
+
+// append the operation author to the participants list
+func (snap *Snapshot) addParticipant(participant identity.Interface) {
+ for _, p := range snap.Participants {
+ if participant.Id() == p.Id() {
+ return
+ }
+ }
+
+ snap.Participants = append(snap.Participants, participant)
+}
+
+// HasParticipant return true if the id is a participant
+func (snap *Snapshot) HasParticipant(id entity.Id) bool {
+ for _, p := range snap.Participants {
+ if p.Id() == id {
+ return true
+ }
+ }
+ return false
+}
+
+// HasAnyParticipant return true if one of the ids is a participant
+func (snap *Snapshot) HasAnyParticipant(ids ...entity.Id) bool {
+ for _, id := range ids {
+ if snap.HasParticipant(id) {
+ return true
+ }
+ }
+ return false
+}
+
+// HasActor return true if the id is a actor
+func (snap *Snapshot) HasActor(id entity.Id) bool {
+ for _, p := range snap.Actors {
+ if p.Id() == id {
+ return true
+ }
+ }
+ return false
+}
+
+// HasAnyActor return true if one of the ids is a actor
+func (snap *Snapshot) HasAnyActor(ids ...entity.Id) bool {
+ for _, id := range ids {
+ if snap.HasActor(id) {
+ return true
+ }
+ }
+ return false
+}
+
+// IsAuthored is a sign post method for gqlgen
+func (snap *Snapshot) IsAuthored() {}
diff --git a/entities/bug/sorting.go b/entities/bug/sorting.go
new file mode 100644
index 00000000..2e64b92d
--- /dev/null
+++ b/entities/bug/sorting.go
@@ -0,0 +1,57 @@
+package bug
+
+type BugsByCreationTime []*Bug
+
+func (b BugsByCreationTime) Len() int {
+ return len(b)
+}
+
+func (b BugsByCreationTime) Less(i, j int) bool {
+ if b[i].CreateLamportTime() < b[j].CreateLamportTime() {
+ return true
+ }
+
+ if b[i].CreateLamportTime() > b[j].CreateLamportTime() {
+ return false
+ }
+
+ // When the logical clocks are identical, that means we had a concurrent
+ // edition. In this case we rely on the timestamp. While the timestamp might
+ // be incorrect due to a badly set clock, the drift in sorting is bounded
+ // by the first sorting using the logical clock. That means that if users
+ // synchronize their bugs regularly, the timestamp will rarely be used, and
+ // should still provide a kinda accurate sorting when needed.
+ return b[i].FirstOp().Time().Before(b[j].FirstOp().Time())
+}
+
+func (b BugsByCreationTime) Swap(i, j int) {
+ b[i], b[j] = b[j], b[i]
+}
+
+type BugsByEditTime []*Bug
+
+func (b BugsByEditTime) Len() int {
+ return len(b)
+}
+
+func (b BugsByEditTime) Less(i, j int) bool {
+ if b[i].EditLamportTime() < b[j].EditLamportTime() {
+ return true
+ }
+
+ if b[i].EditLamportTime() > b[j].EditLamportTime() {
+ return false
+ }
+
+ // When the logical clocks are identical, that means we had a concurrent
+ // edition. In this case we rely on the timestamp. While the timestamp might
+ // be incorrect due to a badly set clock, the drift in sorting is bounded
+ // by the first sorting using the logical clock. That means that if users
+ // synchronize their bugs regularly, the timestamp will rarely be used, and
+ // should still provide a kinda accurate sorting when needed.
+ return b[i].LastOp().Time().Before(b[j].LastOp().Time())
+}
+
+func (b BugsByEditTime) Swap(i, j int) {
+ b[i], b[j] = b[j], b[i]
+}
diff --git a/entities/bug/status.go b/entities/bug/status.go
new file mode 100644
index 00000000..b8fba609
--- /dev/null
+++ b/entities/bug/status.go
@@ -0,0 +1,86 @@
+package bug
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type Status int
+
+const (
+ _ Status = iota
+ OpenStatus
+ ClosedStatus
+)
+
+func (s Status) String() string {
+ switch s {
+ case OpenStatus:
+ return "open"
+ case ClosedStatus:
+ return "closed"
+ default:
+ return "unknown status"
+ }
+}
+
+func (s Status) Action() string {
+ switch s {
+ case OpenStatus:
+ return "opened"
+ case ClosedStatus:
+ return "closed"
+ default:
+ return "unknown status"
+ }
+}
+
+func StatusFromString(str string) (Status, error) {
+ cleaned := strings.ToLower(strings.TrimSpace(str))
+
+ switch cleaned {
+ case "open":
+ return OpenStatus, nil
+ case "closed":
+ return ClosedStatus, nil
+ default:
+ return 0, fmt.Errorf("unknown status")
+ }
+}
+
+func (s Status) Validate() error {
+ if s != OpenStatus && s != ClosedStatus {
+ return fmt.Errorf("invalid")
+ }
+
+ return nil
+}
+
+func (s Status) MarshalGQL(w io.Writer) {
+ switch s {
+ case OpenStatus:
+ _, _ = fmt.Fprintf(w, strconv.Quote("OPEN"))
+ case ClosedStatus:
+ _, _ = fmt.Fprintf(w, strconv.Quote("CLOSED"))
+ default:
+ panic("missing case")
+ }
+}
+
+func (s *Status) UnmarshalGQL(v interface{}) error {
+ str, ok := v.(string)
+ if !ok {
+ return fmt.Errorf("enums must be strings")
+ }
+ switch str {
+ case "OPEN":
+ *s = OpenStatus
+ case "CLOSED":
+ *s = ClosedStatus
+ default:
+ return fmt.Errorf("%s is not a valid Status", str)
+ }
+ return nil
+}
diff --git a/entities/bug/timeline.go b/entities/bug/timeline.go
new file mode 100644
index 00000000..d7f042db
--- /dev/null
+++ b/entities/bug/timeline.go
@@ -0,0 +1,80 @@
+package bug
+
+import (
+ "strings"
+
+ "github.com/MichaelMure/git-bug/entities/identity"
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+type TimelineItem interface {
+ // Id return the identifier of the item
+ Id() entity.Id
+}
+
+// CommentHistoryStep hold one version of a message in the history
+type CommentHistoryStep struct {
+ // The author of the edition, not necessarily the same as the author of the
+ // original comment
+ Author identity.Interface
+ // The new message
+ Message string
+ UnixTime timestamp.Timestamp
+}
+
+// CommentTimelineItem is a TimelineItem that holds a Comment and its edition history
+type CommentTimelineItem struct {
+ // id should be the same as in Comment
+ id entity.Id
+ Author identity.Interface
+ Message string
+ Files []repository.Hash
+ CreatedAt timestamp.Timestamp
+ LastEdit timestamp.Timestamp
+ History []CommentHistoryStep
+}
+
+func NewCommentTimelineItem(comment Comment) CommentTimelineItem {
+ return CommentTimelineItem{
+ id: comment.id,
+ Author: comment.Author,
+ Message: comment.Message,
+ Files: comment.Files,
+ CreatedAt: comment.UnixTime,
+ LastEdit: comment.UnixTime,
+ History: []CommentHistoryStep{
+ {
+ Message: comment.Message,
+ UnixTime: comment.UnixTime,
+ },
+ },
+ }
+}
+
+func (c *CommentTimelineItem) Id() entity.Id {
+ return c.id
+}
+
+// Append will append a new comment in the history and update the other values
+func (c *CommentTimelineItem) Append(comment Comment) {
+ c.Message = comment.Message
+ c.Files = comment.Files
+ c.LastEdit = comment.UnixTime
+ c.History = append(c.History, CommentHistoryStep{
+ Author: comment.Author,
+ Message: comment.Message,
+ UnixTime: comment.UnixTime,
+ })
+}
+
+// Edited say if the comment was edited
+func (c *CommentTimelineItem) Edited() bool {
+ return len(c.History) > 1
+}
+
+// MessageIsEmpty return true is the message is empty or only made of spaces
+func (c *CommentTimelineItem) MessageIsEmpty() bool {
+ return len(strings.TrimSpace(c.Message)) == 0
+}
diff --git a/entities/bug/with_snapshot.go b/entities/bug/with_snapshot.go
new file mode 100644
index 00000000..0474cac7
--- /dev/null
+++ b/entities/bug/with_snapshot.go
@@ -0,0 +1,53 @@
+package bug
+
+import (
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+var _ Interface = &WithSnapshot{}
+
+// WithSnapshot encapsulate a Bug and maintain the corresponding Snapshot efficiently
+type WithSnapshot struct {
+ *Bug
+ snap *Snapshot
+}
+
+func (b *WithSnapshot) Compile() *Snapshot {
+ if b.snap == nil {
+ snap := b.Bug.Compile()
+ b.snap = snap
+ }
+ return b.snap
+}
+
+// Append intercept Bug.Append() to update the snapshot efficiently
+func (b *WithSnapshot) Append(op Operation) {
+ b.Bug.Append(op)
+
+ if b.snap == nil {
+ return
+ }
+
+ op.Apply(b.snap)
+ b.snap.Operations = append(b.snap.Operations, op)
+}
+
+// Commit intercept Bug.Commit() to update the snapshot efficiently
+func (b *WithSnapshot) Commit(repo repository.ClockedRepo) error {
+ err := b.Bug.Commit(repo)
+
+ if err != nil {
+ b.snap = nil
+ return err
+ }
+
+ // Commit() shouldn't change anything of the bug state apart from the
+ // initial ID set
+
+ if b.snap == nil {
+ return nil
+ }
+
+ b.snap.id = b.Bug.Id()
+ return nil
+}
diff --git a/entities/identity/common.go b/entities/identity/common.go
new file mode 100644
index 00000000..5c6445e9
--- /dev/null
+++ b/entities/identity/common.go
@@ -0,0 +1,37 @@
+package identity
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/MichaelMure/git-bug/entity"
+)
+
+var ErrIdentityNotExist = errors.New("identity doesn't exist")
+
+func NewErrMultipleMatch(matching []entity.Id) *entity.ErrMultipleMatch {
+ return entity.NewErrMultipleMatch("identity", matching)
+}
+
+// Custom unmarshaling function to allow package user to delegate
+// the decoding of an Identity and distinguish between an Identity
+// and a Bare.
+//
+// If the given message has a "id" field, it's considered being a proper Identity.
+func UnmarshalJSON(raw json.RawMessage) (Interface, error) {
+ aux := &IdentityStub{}
+
+ // First try to decode and load as a normal Identity
+ err := json.Unmarshal(raw, &aux)
+ if err == nil && aux.Id() != "" {
+ return aux, nil
+ }
+
+ // abort if we have an error other than the wrong type
+ if _, ok := err.(*json.UnmarshalTypeError); err != nil && !ok {
+ return nil, err
+ }
+
+ return nil, fmt.Errorf("unknown identity type")
+}
diff --git a/entities/identity/identity.go b/entities/identity/identity.go
new file mode 100644
index 00000000..0a7642af
--- /dev/null
+++ b/entities/identity/identity.go
@@ -0,0 +1,620 @@
+// Package identity contains the identity data model and low-level related functions
+package identity
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+const identityRefPattern = "refs/identities/"
+const identityRemoteRefPattern = "refs/remotes/%s/identities/"
+const versionEntryName = "version"
+const identityConfigKey = "git-bug.identity"
+
+var ErrNonFastForwardMerge = errors.New("non fast-forward identity merge")
+var ErrNoIdentitySet = errors.New("No identity is set.\n" +
+ "To interact with bugs, an identity first needs to be created using " +
+ "\"git bug user create\"")
+var ErrMultipleIdentitiesSet = errors.New("multiple user identities set")
+
+func NewErrMultipleMatchIdentity(matching []entity.Id) *entity.ErrMultipleMatch {
+ return entity.NewErrMultipleMatch("identity", matching)
+}
+
+var _ Interface = &Identity{}
+var _ entity.Interface = &Identity{}
+
+type Identity struct {
+ // all the successive version of the identity
+ versions []*version
+}
+
+func NewIdentity(repo repository.RepoClock, name string, email string) (*Identity, error) {
+ return NewIdentityFull(repo, name, email, "", "", nil)
+}
+
+func NewIdentityFull(repo repository.RepoClock, name string, email string, login string, avatarUrl string, keys []*Key) (*Identity, error) {
+ v, err := newVersion(repo, name, email, login, avatarUrl, keys)
+ if err != nil {
+ return nil, err
+ }
+ return &Identity{
+ versions: []*version{v},
+ }, nil
+}
+
+// NewFromGitUser will query the repository for user detail and
+// build the corresponding Identity
+func NewFromGitUser(repo repository.ClockedRepo) (*Identity, error) {
+ name, err := repo.GetUserName()
+ if err != nil {
+ return nil, err
+ }
+ if name == "" {
+ return nil, errors.New("user name is not configured in git yet. Please use `git config --global user.name \"John Doe\"`")
+ }
+
+ email, err := repo.GetUserEmail()
+ if err != nil {
+ return nil, err
+ }
+ if email == "" {
+ return nil, errors.New("user name is not configured in git yet. Please use `git config --global user.email johndoe@example.com`")
+ }
+
+ return NewIdentity(repo, name, email)
+}
+
+// MarshalJSON will only serialize the id
+func (i *Identity) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&IdentityStub{
+ id: i.Id(),
+ })
+}
+
+// UnmarshalJSON will only read the id
+// Users of this package are expected to run Load() to load
+// the remaining data from the identities data in git.
+func (i *Identity) UnmarshalJSON(data []byte) error {
+ panic("identity should be loaded with identity.UnmarshalJSON")
+}
+
+// ReadLocal load a local Identity from the identities data available in git
+func ReadLocal(repo repository.Repo, id entity.Id) (*Identity, error) {
+ ref := fmt.Sprintf("%s%s", identityRefPattern, id)
+ return read(repo, ref)
+}
+
+// ReadRemote load a remote Identity from the identities data available in git
+func ReadRemote(repo repository.Repo, remote string, id string) (*Identity, error) {
+ ref := fmt.Sprintf(identityRemoteRefPattern, remote) + id
+ return read(repo, ref)
+}
+
+// read will load and parse an identity from git
+func read(repo repository.Repo, ref string) (*Identity, error) {
+ id := entity.RefToId(ref)
+
+ if err := id.Validate(); err != nil {
+ return nil, errors.Wrap(err, "invalid ref")
+ }
+
+ hashes, err := repo.ListCommits(ref)
+ if err != nil {
+ return nil, ErrIdentityNotExist
+ }
+ if len(hashes) == 0 {
+ return nil, fmt.Errorf("empty identity")
+ }
+
+ i := &Identity{}
+
+ for _, hash := range hashes {
+ entries, err := repo.ReadTree(hash)
+ if err != nil {
+ return nil, errors.Wrap(err, "can't list git tree entries")
+ }
+ if len(entries) != 1 {
+ return nil, fmt.Errorf("invalid identity data at hash %s", hash)
+ }
+
+ entry := entries[0]
+ if entry.Name != versionEntryName {
+ return nil, fmt.Errorf("invalid identity data at hash %s", hash)
+ }
+
+ data, err := repo.ReadData(entry.Hash)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read git blob data")
+ }
+
+ var version version
+ err = json.Unmarshal(data, &version)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to decode Identity version json %s", hash)
+ }
+
+ // tag the version with the commit hash
+ version.commitHash = hash
+
+ i.versions = append(i.versions, &version)
+ }
+
+ if id != i.versions[0].Id() {
+ return nil, fmt.Errorf("identity ID doesn't math the first version ID")
+ }
+
+ return i, nil
+}
+
+// ListLocalIds list all the available local identity ids
+func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
+ refs, err := repo.ListRefs(identityRefPattern)
+ if err != nil {
+ return nil, err
+ }
+
+ return entity.RefsToIds(refs), nil
+}
+
+// RemoveIdentity will remove a local identity from its entity.Id
+func RemoveIdentity(repo repository.ClockedRepo, id entity.Id) error {
+ var fullMatches []string
+
+ refs, err := repo.ListRefs(identityRefPattern + id.String())
+ if err != nil {
+ return err
+ }
+ if len(refs) > 1 {
+ return NewErrMultipleMatchIdentity(entity.RefsToIds(refs))
+ }
+ if len(refs) == 1 {
+ // we have the identity locally
+ fullMatches = append(fullMatches, refs[0])
+ }
+
+ remotes, err := repo.GetRemotes()
+ if err != nil {
+ return err
+ }
+
+ for remote := range remotes {
+ remotePrefix := fmt.Sprintf(identityRemoteRefPattern+id.String(), remote)
+ remoteRefs, err := repo.ListRefs(remotePrefix)
+ if err != nil {
+ return err
+ }
+ if len(remoteRefs) > 1 {
+ return NewErrMultipleMatchIdentity(entity.RefsToIds(refs))
+ }
+ if len(remoteRefs) == 1 {
+ // found the identity in a remote
+ fullMatches = append(fullMatches, remoteRefs[0])
+ }
+ }
+
+ if len(fullMatches) == 0 {
+ return ErrIdentityNotExist
+ }
+
+ for _, ref := range fullMatches {
+ err = repo.RemoveRef(ref)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type StreamedIdentity struct {
+ Identity *Identity
+ Err error
+}
+
+// ReadAllLocal read and parse all local Identity
+func ReadAllLocal(repo repository.ClockedRepo) <-chan StreamedIdentity {
+ return readAll(repo, identityRefPattern)
+}
+
+// ReadAllRemote read and parse all remote Identity for a given remote
+func ReadAllRemote(repo repository.ClockedRepo, remote string) <-chan StreamedIdentity {
+ refPrefix := fmt.Sprintf(identityRemoteRefPattern, remote)
+ return readAll(repo, refPrefix)
+}
+
+// readAll read and parse all available bug with a given ref prefix
+func readAll(repo repository.ClockedRepo, refPrefix string) <-chan StreamedIdentity {
+ out := make(chan StreamedIdentity)
+
+ go func() {
+ defer close(out)
+
+ refs, err := repo.ListRefs(refPrefix)
+ if err != nil {
+ out <- StreamedIdentity{Err: err}
+ return
+ }
+
+ for _, ref := range refs {
+ b, err := read(repo, ref)
+
+ if err != nil {
+ out <- StreamedIdentity{Err: err}
+ return
+ }
+
+ out <- StreamedIdentity{Identity: b}
+ }
+ }()
+
+ return out
+}
+
+type Mutator struct {
+ Name string
+ Login string
+ Email string
+ AvatarUrl string
+ Keys []*Key
+}
+
+// Mutate allow to create a new version of the Identity in one go
+func (i *Identity) Mutate(repo repository.RepoClock, f func(orig *Mutator)) error {
+ copyKeys := func(keys []*Key) []*Key {
+ result := make([]*Key, len(keys))
+ for i, key := range keys {
+ result[i] = key.Clone()
+ }
+ return result
+ }
+
+ orig := Mutator{
+ Name: i.Name(),
+ Email: i.Email(),
+ Login: i.Login(),
+ AvatarUrl: i.AvatarUrl(),
+ Keys: copyKeys(i.Keys()),
+ }
+ mutated := orig
+ mutated.Keys = copyKeys(orig.Keys)
+
+ f(&mutated)
+
+ if reflect.DeepEqual(orig, mutated) {
+ return nil
+ }
+
+ v, err := newVersion(repo,
+ mutated.Name,
+ mutated.Email,
+ mutated.Login,
+ mutated.AvatarUrl,
+ mutated.Keys,
+ )
+ if err != nil {
+ return err
+ }
+
+ i.versions = append(i.versions, v)
+ return nil
+}
+
+// Write the identity into the Repository. In particular, this ensure that
+// the Id is properly set.
+func (i *Identity) Commit(repo repository.ClockedRepo) error {
+ if !i.NeedCommit() {
+ return fmt.Errorf("can't commit an identity with no pending version")
+ }
+
+ if err := i.Validate(); err != nil {
+ return errors.Wrap(err, "can't commit an identity with invalid data")
+ }
+
+ var lastCommit repository.Hash
+ for _, v := range i.versions {
+ if v.commitHash != "" {
+ lastCommit = v.commitHash
+ // ignore already commit versions
+ continue
+ }
+
+ blobHash, err := v.Write(repo)
+ if err != nil {
+ return err
+ }
+
+ // Make a git tree referencing the blob
+ tree := []repository.TreeEntry{
+ {ObjectType: repository.Blob, Hash: blobHash, Name: versionEntryName},
+ }
+
+ treeHash, err := repo.StoreTree(tree)
+ if err != nil {
+ return err
+ }
+
+ var commitHash repository.Hash
+ if lastCommit != "" {
+ commitHash, err = repo.StoreCommit(treeHash, lastCommit)
+ } else {
+ commitHash, err = repo.StoreCommit(treeHash)
+ }
+ if err != nil {
+ return err
+ }
+
+ lastCommit = commitHash
+ v.commitHash = commitHash
+ }
+
+ ref := fmt.Sprintf("%s%s", identityRefPattern, i.Id().String())
+ return repo.UpdateRef(ref, lastCommit)
+}
+
+func (i *Identity) CommitAsNeeded(repo repository.ClockedRepo) error {
+ if !i.NeedCommit() {
+ return nil
+ }
+ return i.Commit(repo)
+}
+
+func (i *Identity) NeedCommit() bool {
+ for _, v := range i.versions {
+ if v.commitHash == "" {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Merge will merge a different version of the same Identity
+//
+// To make sure that an Identity history can't be altered, a strict fast-forward
+// only policy is applied here. As an Identity should be tied to a single user, this
+// should work in practice, but it does leave a possibility that a user would edit his
+// Identity from two different repo concurrently and push the changes in a non-centralized
+// network of repositories. In this case, it would result in some repo accepting one
+// version and some other accepting another, preventing the network in general to converge
+// to the same result. This would create a sort of partition of the network, and manual
+// cleaning would be required.
+//
+// An alternative approach would be to have a determinist rebase:
+// - any commits present in both local and remote version would be kept, never changed.
+// - newer commits would be merged in a linear chain of commits, ordered based on the
+// Lamport time
+//
+// However, this approach leave the possibility, in the case of a compromised crypto keys,
+// of forging a new version with a bogus Lamport time to be inserted before a legit version,
+// invalidating the correct version and hijacking the Identity. There would only be a short
+// period of time when this would be possible (before the network converge) but I'm not
+// confident enough to implement that. I choose the strict fast-forward only approach,
+// despite its potential problem with two different version as mentioned above.
+func (i *Identity) Merge(repo repository.Repo, other *Identity) (bool, error) {
+ if i.Id() != other.Id() {
+ return false, errors.New("merging unrelated identities is not supported")
+ }
+
+ modified := false
+ var lastCommit repository.Hash
+ for j, otherVersion := range other.versions {
+ // if there is more version in other, take them
+ if len(i.versions) == j {
+ i.versions = append(i.versions, otherVersion)
+ lastCommit = otherVersion.commitHash
+ modified = true
+ }
+
+ // we have a non fast-forward merge.
+ // as explained in the doc above, refusing to merge
+ if i.versions[j].commitHash != otherVersion.commitHash {
+ return false, ErrNonFastForwardMerge
+ }
+ }
+
+ if modified {
+ err := repo.UpdateRef(identityRefPattern+i.Id().String(), lastCommit)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ return false, nil
+}
+
+// Validate check if the Identity data is valid
+func (i *Identity) Validate() error {
+ lastTimes := make(map[string]lamport.Time)
+
+ if len(i.versions) == 0 {
+ return fmt.Errorf("no version")
+ }
+
+ for _, v := range i.versions {
+ if err := v.Validate(); err != nil {
+ return err
+ }
+
+ // check for always increasing lamport time
+ // check that a new version didn't drop a clock
+ for name, previous := range lastTimes {
+ if now, ok := v.times[name]; ok {
+ if now < previous {
+ return fmt.Errorf("non-chronological lamport clock %s (%d --> %d)", name, previous, now)
+ }
+ } else {
+ return fmt.Errorf("version has less lamport clocks than before (missing %s)", name)
+ }
+ }
+
+ for name, now := range v.times {
+ lastTimes[name] = now
+ }
+ }
+
+ return nil
+}
+
+func (i *Identity) lastVersion() *version {
+ if len(i.versions) <= 0 {
+ panic("no version at all")
+ }
+
+ return i.versions[len(i.versions)-1]
+}
+
+// Id return the Identity identifier
+func (i *Identity) Id() entity.Id {
+ // id is the id of the first version
+ return i.versions[0].Id()
+}
+
+// Name return the last version of the name
+func (i *Identity) Name() string {
+ return i.lastVersion().name
+}
+
+// DisplayName return a non-empty string to display, representing the
+// identity, based on the non-empty values.
+func (i *Identity) DisplayName() string {
+ switch {
+ case i.Name() == "" && i.Login() != "":
+ return i.Login()
+ case i.Name() != "" && i.Login() == "":
+ return i.Name()
+ case i.Name() != "" && i.Login() != "":
+ return fmt.Sprintf("%s (%s)", i.Name(), i.Login())
+ }
+
+ panic("invalid person data")
+}
+
+// Email return the last version of the email
+func (i *Identity) Email() string {
+ return i.lastVersion().email
+}
+
+// Login return the last version of the login
+func (i *Identity) Login() string {
+ return i.lastVersion().login
+}
+
+// AvatarUrl return the last version of the Avatar URL
+func (i *Identity) AvatarUrl() string {
+ return i.lastVersion().avatarURL
+}
+
+// Keys return the last version of the valid keys
+func (i *Identity) Keys() []*Key {
+ return i.lastVersion().keys
+}
+
+// SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
+func (i *Identity) SigningKey(repo repository.RepoKeyring) (*Key, error) {
+ keys := i.Keys()
+ for _, key := range keys {
+ err := key.ensurePrivateKey(repo)
+ if err == errNoPrivateKey {
+ continue
+ }
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ }
+ return nil, nil
+}
+
+// ValidKeysAtTime return the set of keys valid at a given lamport time
+func (i *Identity) ValidKeysAtTime(clockName string, time lamport.Time) []*Key {
+ var result []*Key
+
+ var lastTime lamport.Time
+ for _, v := range i.versions {
+ refTime, ok := v.times[clockName]
+ if !ok {
+ refTime = lastTime
+ }
+ lastTime = refTime
+
+ if refTime > time {
+ return result
+ }
+
+ result = v.keys
+ }
+
+ return result
+}
+
+// LastModification return the timestamp at which the last version of the identity became valid.
+func (i *Identity) LastModification() timestamp.Timestamp {
+ return timestamp.Timestamp(i.lastVersion().unixTime)
+}
+
+// LastModificationLamports return the lamport times at which the last version of the identity became valid.
+func (i *Identity) LastModificationLamports() map[string]lamport.Time {
+ return i.lastVersion().times
+}
+
+// IsProtected return true if the chain of git commits started to be signed.
+// If that's the case, only signed commit with a valid key for this identity can be added.
+func (i *Identity) IsProtected() bool {
+ // Todo
+ return false
+}
+
+// SetMetadata store arbitrary metadata along the last not-commit version.
+// If the version has been commit to git already, a new identical version is added and will need to be
+// commit.
+func (i *Identity) SetMetadata(key string, value string) {
+ // once commit, data is immutable so we create a new version
+ if i.lastVersion().commitHash != "" {
+ i.versions = append(i.versions, i.lastVersion().Clone())
+ }
+ // if Id() has been called, we can't change the first version anymore, so we create a new version
+ if len(i.versions) == 1 && i.versions[0].id != entity.UnsetId && i.versions[0].id != "" {
+ i.versions = append(i.versions, i.lastVersion().Clone())
+ }
+
+ i.lastVersion().SetMetadata(key, value)
+}
+
+// ImmutableMetadata return all metadata for this Identity, accumulated from each version.
+// If multiple value are found, the first defined takes precedence.
+func (i *Identity) ImmutableMetadata() map[string]string {
+ metadata := make(map[string]string)
+
+ for _, version := range i.versions {
+ for key, value := range version.metadata {
+ if _, has := metadata[key]; !has {
+ metadata[key] = value
+ }
+ }
+ }
+
+ return metadata
+}
+
+// MutableMetadata return all metadata for this Identity, accumulated from each version.
+// If multiple value are found, the last defined takes precedence.
+func (i *Identity) MutableMetadata() map[string]string {
+ metadata := make(map[string]string)
+
+ for _, version := range i.versions {
+ for key, value := range version.metadata {
+ metadata[key] = value
+ }
+ }
+
+ return metadata
+}
diff --git a/entities/identity/identity_actions.go b/entities/identity/identity_actions.go
new file mode 100644
index 00000000..b58bb2d9
--- /dev/null
+++ b/entities/identity/identity_actions.go
@@ -0,0 +1,125 @@
+package identity
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+// Fetch retrieve updates from a remote
+// This does not change the local identities state
+func Fetch(repo repository.Repo, remote string) (string, error) {
+ return repo.FetchRefs(remote, "identities")
+}
+
+// Push update a remote with the local changes
+func Push(repo repository.Repo, remote string) (string, error) {
+ return repo.PushRefs(remote, "identities")
+}
+
+// Pull will do a Fetch + MergeAll
+// This function will return an error if a merge fail
+func Pull(repo repository.ClockedRepo, remote string) error {
+ _, err := Fetch(repo, remote)
+ if err != nil {
+ return err
+ }
+
+ for merge := range MergeAll(repo, remote) {
+ if merge.Err != nil {
+ return merge.Err
+ }
+ if merge.Status == entity.MergeStatusInvalid {
+ return errors.Errorf("merge failure: %s", merge.Reason)
+ }
+ }
+
+ return nil
+}
+
+// MergeAll will merge all the available remote identity
+func MergeAll(repo repository.ClockedRepo, remote string) <-chan entity.MergeResult {
+ out := make(chan entity.MergeResult)
+
+ go func() {
+ defer close(out)
+
+ remoteRefSpec := fmt.Sprintf(identityRemoteRefPattern, remote)
+ remoteRefs, err := repo.ListRefs(remoteRefSpec)
+
+ if err != nil {
+ out <- entity.MergeResult{Err: err}
+ return
+ }
+
+ for _, remoteRef := range remoteRefs {
+ refSplit := strings.Split(remoteRef, "/")
+ id := entity.Id(refSplit[len(refSplit)-1])
+
+ if err := id.Validate(); err != nil {
+ out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
+ continue
+ }
+
+ remoteIdentity, err := read(repo, remoteRef)
+
+ if err != nil {
+ out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote identity is not readable").Error())
+ continue
+ }
+
+ // Check for error in remote data
+ if err := remoteIdentity.Validate(); err != nil {
+ out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote identity is invalid").Error())
+ continue
+ }
+
+ localRef := identityRefPattern + remoteIdentity.Id().String()
+ localExist, err := repo.RefExist(localRef)
+
+ if err != nil {
+ out <- entity.NewMergeError(err, id)
+ continue
+ }
+
+ // the identity is not local yet, simply create the reference
+ if !localExist {
+ err := repo.CopyRef(remoteRef, localRef)
+
+ if err != nil {
+ out <- entity.NewMergeError(err, id)
+ return
+ }
+
+ out <- entity.NewMergeNewStatus(id, remoteIdentity)
+ continue
+ }
+
+ localIdentity, err := read(repo, localRef)
+
+ if err != nil {
+ out <- entity.NewMergeError(errors.Wrap(err, "local identity is not readable"), id)
+ return
+ }
+
+ updated, err := localIdentity.Merge(repo, remoteIdentity)
+
+ if err != nil {
+ out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "merge failed").Error())
+ return
+ }
+
+ if updated {
+ out <- entity.NewMergeUpdatedStatus(id, localIdentity)
+ } else {
+ out <- entity.NewMergeNothingStatus(id)
+ }
+ }
+ }()
+
+ return out
+}
diff --git a/entities/identity/identity_actions_test.go b/entities/identity/identity_actions_test.go
new file mode 100644
index 00000000..351fb7a4
--- /dev/null
+++ b/entities/identity/identity_actions_test.go
@@ -0,0 +1,157 @@
+package identity
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+func TestIdentityPushPull(t *testing.T) {
+ repoA, repoB, _ := repository.SetupGoGitReposAndRemote(t)
+
+ identity1, err := NewIdentity(repoA, "name1", "email1")
+ require.NoError(t, err)
+ err = identity1.Commit(repoA)
+ require.NoError(t, err)
+
+ // A --> remote --> B
+ _, err = Push(repoA, "origin")
+ require.NoError(t, err)
+
+ err = Pull(repoB, "origin")
+ require.NoError(t, err)
+
+ identities := allIdentities(t, ReadAllLocal(repoB))
+
+ if len(identities) != 1 {
+ t.Fatal("Unexpected number of bugs")
+ }
+
+ // B --> remote --> A
+ identity2, err := NewIdentity(repoB, "name2", "email2")
+ require.NoError(t, err)
+ err = identity2.Commit(repoB)
+ require.NoError(t, err)
+
+ _, err = Push(repoB, "origin")
+ require.NoError(t, err)
+
+ err = Pull(repoA, "origin")
+ require.NoError(t, err)
+
+ identities = allIdentities(t, ReadAllLocal(repoA))
+
+ if len(identities) != 2 {
+ t.Fatal("Unexpected number of bugs")
+ }
+
+ // Update both
+
+ err = identity1.Mutate(repoA, func(orig *Mutator) {
+ orig.Name = "name1b"
+ orig.Email = "email1b"
+ })
+ require.NoError(t, err)
+ err = identity1.Commit(repoA)
+ require.NoError(t, err)
+
+ err = identity2.Mutate(repoB, func(orig *Mutator) {
+ orig.Name = "name2b"
+ orig.Email = "email2b"
+ })
+ require.NoError(t, err)
+ err = identity2.Commit(repoB)
+ require.NoError(t, err)
+
+ // A --> remote --> B
+
+ _, err = Push(repoA, "origin")
+ require.NoError(t, err)
+
+ err = Pull(repoB, "origin")
+ require.NoError(t, err)
+
+ identities = allIdentities(t, ReadAllLocal(repoB))
+
+ if len(identities) != 2 {
+ t.Fatal("Unexpected number of bugs")
+ }
+
+ // B --> remote --> A
+
+ _, err = Push(repoB, "origin")
+ require.NoError(t, err)
+
+ err = Pull(repoA, "origin")
+ require.NoError(t, err)
+
+ identities = allIdentities(t, ReadAllLocal(repoA))
+
+ if len(identities) != 2 {
+ t.Fatal("Unexpected number of bugs")
+ }
+
+ // Concurrent update
+
+ err = identity1.Mutate(repoA, func(orig *Mutator) {
+ orig.Name = "name1c"
+ orig.Email = "email1c"
+ })
+ require.NoError(t, err)
+ err = identity1.Commit(repoA)
+ require.NoError(t, err)
+
+ identity1B, err := ReadLocal(repoB, identity1.Id())
+ require.NoError(t, err)
+
+ err = identity1B.Mutate(repoB, func(orig *Mutator) {
+ orig.Name = "name1concurrent"
+ orig.Email = "name1concurrent"
+ })
+ require.NoError(t, err)
+ err = identity1B.Commit(repoB)
+ require.NoError(t, err)
+
+ // A --> remote --> B
+
+ _, err = Push(repoA, "origin")
+ require.NoError(t, err)
+
+ // Pulling a non-fast-forward update should fail
+ err = Pull(repoB, "origin")
+ require.Error(t, err)
+
+ identities = allIdentities(t, ReadAllLocal(repoB))
+
+ if len(identities) != 2 {
+ t.Fatal("Unexpected number of bugs")
+ }
+
+ // B --> remote --> A
+
+ // Pushing a non-fast-forward update should fail
+ _, err = Push(repoB, "origin")
+ require.Error(t, err)
+
+ err = Pull(repoA, "origin")
+ require.NoError(t, err)
+
+ identities = allIdentities(t, ReadAllLocal(repoA))
+
+ if len(identities) != 2 {
+ t.Fatal("Unexpected number of bugs")
+ }
+}
+
+func allIdentities(t testing.TB, identities <-chan StreamedIdentity) []*Identity {
+ var result []*Identity
+ for streamed := range identities {
+ if streamed.Err != nil {
+ t.Fatal(streamed.Err)
+ }
+ result = append(result, streamed.Identity)
+ }
+ return result
+}
diff --git a/entities/identity/identity_stub.go b/entities/identity/identity_stub.go
new file mode 100644
index 00000000..fb5c90a5
--- /dev/null
+++ b/entities/identity/identity_stub.go
@@ -0,0 +1,101 @@
+package identity
+
+import (
+ "encoding/json"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+var _ Interface = &IdentityStub{}
+
+// IdentityStub is an almost empty Identity, holding only the id.
+// When a normal Identity is serialized into JSON, only the id is serialized.
+// All the other data are stored in git in a chain of commit + a ref.
+// When this JSON is deserialized, an IdentityStub is returned instead, to be replaced
+// later by the proper Identity, loaded from the Repo.
+type IdentityStub struct {
+ id entity.Id
+}
+
+func (i *IdentityStub) MarshalJSON() ([]byte, error) {
+ // TODO: add a type marker
+ return json.Marshal(struct {
+ Id entity.Id `json:"id"`
+ }{
+ Id: i.id,
+ })
+}
+
+func (i *IdentityStub) UnmarshalJSON(data []byte) error {
+ aux := struct {
+ Id entity.Id `json:"id"`
+ }{}
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ i.id = aux.Id
+
+ return nil
+}
+
+// Id return the Identity identifier
+func (i *IdentityStub) Id() entity.Id {
+ return i.id
+}
+
+func (IdentityStub) Name() string {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) DisplayName() string {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) Email() string {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) Login() string {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) AvatarUrl() string {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) Keys() []*Key {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (i *IdentityStub) SigningKey(repo repository.RepoKeyring) (*Key, error) {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) ValidKeysAtTime(_ string, _ lamport.Time) []*Key {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (i *IdentityStub) LastModification() timestamp.Timestamp {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (i *IdentityStub) LastModificationLamports() map[string]lamport.Time {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) IsProtected() bool {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (IdentityStub) Validate() error {
+ panic("identities needs to be properly loaded with identity.ReadLocal()")
+}
+
+func (i *IdentityStub) NeedCommit() bool {
+ return false
+}
diff --git a/entities/identity/identity_stub_test.go b/entities/identity/identity_stub_test.go
new file mode 100644
index 00000000..b01a718c
--- /dev/null
+++ b/entities/identity/identity_stub_test.go
@@ -0,0 +1,26 @@
+package identity
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestIdentityStubSerialize(t *testing.T) {
+ before := &IdentityStub{
+ id: "id1234",
+ }
+
+ data, err := json.Marshal(before)
+ assert.NoError(t, err)
+
+ var after IdentityStub
+ err = json.Unmarshal(data, &after)
+ assert.NoError(t, err)
+
+ // enforce creating the Id
+ before.Id()
+
+ assert.Equal(t, before, &after)
+}
diff --git a/entities/identity/identity_test.go b/entities/identity/identity_test.go
new file mode 100644
index 00000000..f0c3bbe9
--- /dev/null
+++ b/entities/identity/identity_test.go
@@ -0,0 +1,292 @@
+package identity
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+)
+
+// Test the commit and load of an Identity with multiple versions
+func TestIdentityCommitLoad(t *testing.T) {
+ repo := makeIdentityTestRepo(t)
+
+ // single version
+
+ identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+ require.NoError(t, err)
+
+ idBeforeCommit := identity.Id()
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+
+ commitsAreSet(t, identity)
+ require.NotEmpty(t, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+ loaded, err := ReadLocal(repo, identity.Id())
+ require.NoError(t, err)
+ commitsAreSet(t, loaded)
+ require.Equal(t, identity, loaded)
+
+ // multiple versions
+
+ identity, err = NewIdentityFull(repo, "René Descartes", "rene.descartes@example.com", "", "", []*Key{generatePublicKey()})
+ require.NoError(t, err)
+
+ idBeforeCommit = identity.Id()
+
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Keys = []*Key{generatePublicKey()}
+ })
+ require.NoError(t, err)
+
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Keys = []*Key{generatePublicKey()}
+ })
+ require.NoError(t, err)
+
+ require.Equal(t, idBeforeCommit, identity.Id())
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+
+ commitsAreSet(t, identity)
+ require.NotEmpty(t, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+ loaded, err = ReadLocal(repo, identity.Id())
+ require.NoError(t, err)
+ commitsAreSet(t, loaded)
+ require.Equal(t, identity, loaded)
+
+ // add more version
+
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Email = "rene@descartes.com"
+ orig.Keys = []*Key{generatePublicKey()}
+ })
+ require.NoError(t, err)
+
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Email = "rene@descartes.com"
+ orig.Keys = []*Key{generatePublicKey(), generatePublicKey()}
+ })
+ require.NoError(t, err)
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+
+ commitsAreSet(t, identity)
+ require.NotEmpty(t, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.Id())
+ require.Equal(t, idBeforeCommit, identity.versions[0].Id())
+
+ loaded, err = ReadLocal(repo, identity.Id())
+ require.NoError(t, err)
+ commitsAreSet(t, loaded)
+ require.Equal(t, identity, loaded)
+}
+
+func TestIdentityMutate(t *testing.T) {
+ repo := makeIdentityTestRepo(t)
+
+ identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+ require.NoError(t, err)
+
+ require.Len(t, identity.versions, 1)
+
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Email = "rene@descartes.fr"
+ orig.Name = "René"
+ orig.Login = "rene"
+ })
+ require.NoError(t, err)
+
+ require.Len(t, identity.versions, 2)
+ require.Equal(t, identity.Email(), "rene@descartes.fr")
+ require.Equal(t, identity.Name(), "René")
+ require.Equal(t, identity.Login(), "rene")
+}
+
+func commitsAreSet(t *testing.T, identity *Identity) {
+ for _, version := range identity.versions {
+ require.NotEmpty(t, version.commitHash)
+ }
+}
+
+// Test that the correct crypto keys are returned for a given lamport time
+func TestIdentity_ValidKeysAtTime(t *testing.T) {
+ pubKeyA := generatePublicKey()
+ pubKeyB := generatePublicKey()
+ pubKeyC := generatePublicKey()
+ pubKeyD := generatePublicKey()
+ pubKeyE := generatePublicKey()
+
+ identity := Identity{
+ versions: []*version{
+ {
+ times: map[string]lamport.Time{"foo": 100},
+ keys: []*Key{pubKeyA},
+ },
+ {
+ times: map[string]lamport.Time{"foo": 200},
+ keys: []*Key{pubKeyB},
+ },
+ {
+ times: map[string]lamport.Time{"foo": 201},
+ keys: []*Key{pubKeyC},
+ },
+ {
+ times: map[string]lamport.Time{"foo": 201},
+ keys: []*Key{pubKeyD},
+ },
+ {
+ times: map[string]lamport.Time{"foo": 300},
+ keys: []*Key{pubKeyE},
+ },
+ },
+ }
+
+ require.Nil(t, identity.ValidKeysAtTime("foo", 10))
+ require.Equal(t, identity.ValidKeysAtTime("foo", 100), []*Key{pubKeyA})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 140), []*Key{pubKeyA})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 200), []*Key{pubKeyB})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 201), []*Key{pubKeyD})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 202), []*Key{pubKeyD})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 300), []*Key{pubKeyE})
+ require.Equal(t, identity.ValidKeysAtTime("foo", 3000), []*Key{pubKeyE})
+}
+
+// Test the immutable or mutable metadata search
+func TestMetadata(t *testing.T) {
+ repo := makeIdentityTestRepo(t)
+
+ identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+ require.NoError(t, err)
+
+ identity.SetMetadata("key1", "value1")
+ assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
+ assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value1")
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+
+ assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
+ assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value1")
+
+ // try override
+ err = identity.Mutate(repo, func(orig *Mutator) {
+ orig.Email = "rene@descartes.fr"
+ })
+ require.NoError(t, err)
+
+ identity.SetMetadata("key1", "value2")
+ assertHasKeyValue(t, identity.ImmutableMetadata(), "key1", "value1")
+ assertHasKeyValue(t, identity.MutableMetadata(), "key1", "value2")
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+
+ // reload
+ loaded, err := ReadLocal(repo, identity.Id())
+ require.NoError(t, err)
+
+ assertHasKeyValue(t, loaded.ImmutableMetadata(), "key1", "value1")
+ assertHasKeyValue(t, loaded.MutableMetadata(), "key1", "value2")
+
+ // set metadata after commit
+ versionCount := len(identity.versions)
+ identity.SetMetadata("foo", "bar")
+ require.True(t, identity.NeedCommit())
+ require.Len(t, identity.versions, versionCount+1)
+
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+ require.Len(t, identity.versions, versionCount+1)
+}
+
+func assertHasKeyValue(t *testing.T, metadata map[string]string, key, value string) {
+ val, ok := metadata[key]
+ require.True(t, ok)
+ require.Equal(t, val, value)
+}
+
+func TestJSON(t *testing.T) {
+ repo := makeIdentityTestRepo(t)
+
+ identity, err := NewIdentity(repo, "René Descartes", "rene.descartes@example.com")
+ require.NoError(t, err)
+
+ // commit to make sure we have an Id
+ err = identity.Commit(repo)
+ require.NoError(t, err)
+ require.NotEmpty(t, identity.Id())
+
+ // serialize
+ data, err := json.Marshal(identity)
+ require.NoError(t, err)
+
+ // deserialize, got a IdentityStub with the same id
+ var i Interface
+ i, err = UnmarshalJSON(data)
+ require.NoError(t, err)
+ require.Equal(t, identity.Id(), i.Id())
+
+ // make sure we can load the identity properly
+ i, err = ReadLocal(repo, i.Id())
+ require.NoError(t, err)
+}
+
+func TestIdentityRemove(t *testing.T) {
+ repo := repository.CreateGoGitTestRepo(t, false)
+ remoteA := repository.CreateGoGitTestRepo(t, true)
+ remoteB := repository.CreateGoGitTestRepo(t, true)
+
+ err := repo.AddRemote("remoteA", remoteA.GetLocalRemote())
+ require.NoError(t, err)
+
+ err = repo.AddRemote("remoteB", remoteB.GetLocalRemote())
+ require.NoError(t, err)
+
+ // generate an identity for testing
+ rene, err := NewIdentity(repo, "René Descartes", "rene@descartes.fr")
+ require.NoError(t, err)
+
+ err = rene.Commit(repo)
+ require.NoError(t, err)
+
+ _, err = Push(repo, "remoteA")
+ require.NoError(t, err)
+
+ _, err = Push(repo, "remoteB")
+ require.NoError(t, err)
+
+ _, err = Fetch(repo, "remoteA")
+ require.NoError(t, err)
+
+ _, err = Fetch(repo, "remoteB")
+ require.NoError(t, err)
+
+ err = RemoveIdentity(repo, rene.Id())
+ require.NoError(t, err)
+
+ _, err = ReadLocal(repo, rene.Id())
+ require.Error(t, ErrIdentityNotExist, err)
+
+ _, err = ReadRemote(repo, "remoteA", string(rene.Id()))
+ require.Error(t, ErrIdentityNotExist, err)
+
+ _, err = ReadRemote(repo, "remoteB", string(rene.Id()))
+ require.Error(t, ErrIdentityNotExist, err)
+
+ ids, err := ListLocalIds(repo)
+ require.NoError(t, err)
+ require.Len(t, ids, 0)
+}
diff --git a/entities/identity/identity_user.go b/entities/identity/identity_user.go
new file mode 100644
index 00000000..cd67459e
--- /dev/null
+++ b/entities/identity/identity_user.go
@@ -0,0 +1,68 @@
+package identity
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+// SetUserIdentity store the user identity's id in the git config
+func SetUserIdentity(repo repository.RepoConfig, identity *Identity) error {
+ return repo.LocalConfig().StoreString(identityConfigKey, identity.Id().String())
+}
+
+// GetUserIdentity read the current user identity, set with a git config entry
+func GetUserIdentity(repo repository.Repo) (*Identity, error) {
+ id, err := GetUserIdentityId(repo)
+ if err != nil {
+ return nil, err
+ }
+
+ i, err := ReadLocal(repo, id)
+ if err == ErrIdentityNotExist {
+ innerErr := repo.LocalConfig().RemoveAll(identityConfigKey)
+ if innerErr != nil {
+ _, _ = fmt.Fprintln(os.Stderr, errors.Wrap(innerErr, "can't clear user identity").Error())
+ }
+ return nil, err
+ }
+
+ return i, nil
+}
+
+func GetUserIdentityId(repo repository.Repo) (entity.Id, error) {
+ val, err := repo.LocalConfig().ReadString(identityConfigKey)
+ if err == repository.ErrNoConfigEntry {
+ return entity.UnsetId, ErrNoIdentitySet
+ }
+ if err == repository.ErrMultipleConfigEntry {
+ return entity.UnsetId, ErrMultipleIdentitiesSet
+ }
+ if err != nil {
+ return entity.UnsetId, err
+ }
+
+ var id = entity.Id(val)
+
+ if err := id.Validate(); err != nil {
+ return entity.UnsetId, err
+ }
+
+ return id, nil
+}
+
+// IsUserIdentitySet say if the user has set his identity
+func IsUserIdentitySet(repo repository.Repo) (bool, error) {
+ _, err := repo.LocalConfig().ReadString(identityConfigKey)
+ if err == repository.ErrNoConfigEntry {
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
diff --git a/entities/identity/interface.go b/entities/identity/interface.go
new file mode 100644
index 00000000..c6e22e00
--- /dev/null
+++ b/entities/identity/interface.go
@@ -0,0 +1,62 @@
+package identity
+
+import (
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+ "github.com/MichaelMure/git-bug/util/timestamp"
+)
+
+type Interface interface {
+ entity.Interface
+
+ // Name return the last version of the name
+ // Can be empty.
+ Name() string
+
+ // DisplayName return a non-empty string to display, representing the
+ // identity, based on the non-empty values.
+ DisplayName() string
+
+ // Email return the last version of the email
+ // Can be empty.
+ Email() string
+
+ // Login return the last version of the login
+ // Can be empty.
+ // Warning: this login can be defined when importing from a bridge but should *not* be
+ // used to identify an identity as multiple bridge with different login can map to the same
+ // identity. Use the metadata system for that usage instead.
+ Login() string
+
+ // AvatarUrl return the last version of the Avatar URL
+ // Can be empty.
+ AvatarUrl() string
+
+ // Keys return the last version of the valid keys
+ // Can be empty.
+ Keys() []*Key
+
+ // SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
+ SigningKey(repo repository.RepoKeyring) (*Key, error)
+
+ // ValidKeysAtTime return the set of keys valid at a given lamport time for a given clock of another entity
+ // Can be empty.
+ ValidKeysAtTime(clockName string, time lamport.Time) []*Key
+
+ // LastModification return the timestamp at which the last version of the identity became valid.
+ LastModification() timestamp.Timestamp
+
+ // LastModificationLamports return the lamport times at which the last version of the identity became valid.
+ LastModificationLamports() map[string]lamport.Time
+
+ // IsProtected return true if the chain of git commits started to be signed.
+ // If that's the case, only signed commit with a valid key for this identity can be added.
+ IsProtected() bool
+
+ // Validate check if the Identity data is valid
+ Validate() error
+
+ // NeedCommit indicate that the in-memory state changed and need to be committed in the repository
+ NeedCommit() bool
+}
diff --git a/entities/identity/key.go b/entities/identity/key.go
new file mode 100644
index 00000000..82b9b95c
--- /dev/null
+++ b/entities/identity/key.go
@@ -0,0 +1,234 @@
+package identity
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/ProtonMail/go-crypto/openpgp"
+ "github.com/ProtonMail/go-crypto/openpgp/armor"
+ "github.com/ProtonMail/go-crypto/openpgp/packet"
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+var errNoPrivateKey = fmt.Errorf("no private key")
+
+type Key struct {
+ public *packet.PublicKey
+ private *packet.PrivateKey
+}
+
+// GenerateKey generate a keypair (public+private)
+// The type and configuration of the key is determined by the default value in go's OpenPGP.
+func GenerateKey() *Key {
+ entity, err := openpgp.NewEntity("", "", "", &packet.Config{
+ // The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+ // We don't care about the creation time so we can set it to the zero value.
+ Time: func() time.Time {
+ return time.Time{}
+ },
+ })
+ if err != nil {
+ panic(err)
+ }
+ return &Key{
+ public: entity.PrimaryKey,
+ private: entity.PrivateKey,
+ }
+}
+
+// generatePublicKey generate only a public key (only useful for testing)
+// See GenerateKey for the details.
+func generatePublicKey() *Key {
+ k := GenerateKey()
+ k.private = nil
+ return k
+}
+
+func (k *Key) Public() *packet.PublicKey {
+ return k.public
+}
+
+func (k *Key) Private() *packet.PrivateKey {
+ return k.private
+}
+
+func (k *Key) Validate() error {
+ if k.public == nil {
+ return fmt.Errorf("nil public key")
+ }
+ if !k.public.CanSign() {
+ return fmt.Errorf("public key can't sign")
+ }
+
+ if k.private != nil {
+ if !k.private.CanSign() {
+ return fmt.Errorf("private key can't sign")
+ }
+ }
+
+ return nil
+}
+
+func (k *Key) Clone() *Key {
+ clone := &Key{}
+
+ pub := *k.public
+ clone.public = &pub
+
+ if k.private != nil {
+ priv := *k.private
+ clone.private = &priv
+ }
+
+ return clone
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ // Serialize only the public key, in the armored format.
+ var buf bytes.Buffer
+ w, err := armor.Encode(&buf, openpgp.PublicKeyType, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = k.public.Serialize(w)
+ if err != nil {
+ return nil, err
+ }
+ err = w.Close()
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(buf.String())
+}
+
+func (k *Key) UnmarshalJSON(data []byte) error {
+ // De-serialize only the public key, in the armored format.
+ var armored string
+ err := json.Unmarshal(data, &armored)
+ if err != nil {
+ return err
+ }
+
+ block, err := armor.Decode(strings.NewReader(armored))
+ if err == io.EOF {
+ return fmt.Errorf("no armored data found")
+ }
+ if err != nil {
+ return err
+ }
+
+ if block.Type != openpgp.PublicKeyType {
+ return fmt.Errorf("invalid key type")
+ }
+
+ p, err := packet.Read(block.Body)
+ if err != nil {
+ return errors.Wrap(err, "failed to read public key packet")
+ }
+
+ public, ok := p.(*packet.PublicKey)
+ if !ok {
+ return errors.New("got no packet.publicKey")
+ }
+
+ // The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+ // We don't care about the creation time so we can set it to the zero value.
+ public.CreationTime = time.Time{}
+
+ k.public = public
+ return nil
+}
+
+func (k *Key) loadPrivate(repo repository.RepoKeyring) error {
+ item, err := repo.Keyring().Get(k.public.KeyIdString())
+ if err == repository.ErrKeyringKeyNotFound {
+ return errNoPrivateKey
+ }
+ if err != nil {
+ return err
+ }
+
+ block, err := armor.Decode(bytes.NewReader(item.Data))
+ if err == io.EOF {
+ return fmt.Errorf("no armored data found")
+ }
+ if err != nil {
+ return err
+ }
+
+ if block.Type != openpgp.PrivateKeyType {
+ return fmt.Errorf("invalid key type")
+ }
+
+ p, err := packet.Read(block.Body)
+ if err != nil {
+ return errors.Wrap(err, "failed to read private key packet")
+ }
+
+ private, ok := p.(*packet.PrivateKey)
+ if !ok {
+ return errors.New("got no packet.privateKey")
+ }
+
+ // The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
+ // We don't care about the creation time so we can set it to the zero value.
+ private.CreationTime = time.Time{}
+
+ k.private = private
+ return nil
+}
+
+// ensurePrivateKey attempt to load the corresponding private key if it is not loaded already.
+// If no private key is found, returns errNoPrivateKey
+func (k *Key) ensurePrivateKey(repo repository.RepoKeyring) error {
+ if k.private != nil {
+ return nil
+ }
+
+ return k.loadPrivate(repo)
+}
+
+func (k *Key) storePrivate(repo repository.RepoKeyring) error {
+ var buf bytes.Buffer
+ w, err := armor.Encode(&buf, openpgp.PrivateKeyType, nil)
+ if err != nil {
+ return err
+ }
+ err = k.private.Serialize(w)
+ if err != nil {
+ return err
+ }
+ err = w.Close()
+ if err != nil {
+ return err
+ }
+
+ return repo.Keyring().Set(repository.Item{
+ Key: k.public.KeyIdString(),
+ Data: buf.Bytes(),
+ })
+}
+
+func (k *Key) PGPEntity() *openpgp.Entity {
+ uid := packet.NewUserId("", "", "")
+ return &openpgp.Entity{
+ PrimaryKey: k.public,
+ PrivateKey: k.private,
+ Identities: map[string]*openpgp.Identity{
+ uid.Id: {
+ Name: uid.Id,
+ UserId: uid,
+ SelfSignature: &packet.Signature{
+ IsPrimaryId: func() *bool { b := true; return &b }(),
+ },
+ },
+ },
+ }
+}
diff --git a/entities/identity/key_test.go b/entities/identity/key_test.go
new file mode 100644
index 00000000..6e320dc2
--- /dev/null
+++ b/entities/identity/key_test.go
@@ -0,0 +1,60 @@
+package identity
+
+import (
+ "crypto/rsa"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+func TestPublicKeyJSON(t *testing.T) {
+ k := generatePublicKey()
+
+ dataJSON, err := json.Marshal(k)
+ require.NoError(t, err)
+
+ var read Key
+ err = json.Unmarshal(dataJSON, &read)
+ require.NoError(t, err)
+
+ require.Equal(t, k, &read)
+}
+
+func TestStoreLoad(t *testing.T) {
+ repo := repository.NewMockRepoKeyring()
+
+ // public + private
+ k := GenerateKey()
+
+ // Store
+
+ dataJSON, err := json.Marshal(k)
+ require.NoError(t, err)
+
+ err = k.storePrivate(repo)
+ require.NoError(t, err)
+
+ // Load
+
+ var read Key
+ err = json.Unmarshal(dataJSON, &read)
+ require.NoError(t, err)
+
+ err = read.ensurePrivateKey(repo)
+ require.NoError(t, err)
+
+ require.Equal(t, k.public, read.public)
+
+ require.IsType(t, (*rsa.PrivateKey)(nil), k.private.PrivateKey)
+
+ // See https://github.com/golang/crypto/pull/175
+ rsaPriv := read.private.PrivateKey.(*rsa.PrivateKey)
+ back := rsaPriv.Primes[0]
+ rsaPriv.Primes[0] = rsaPriv.Primes[1]
+ rsaPriv.Primes[1] = back
+
+ require.True(t, k.private.PrivateKey.(*rsa.PrivateKey).Equal(read.private.PrivateKey))
+}
diff --git a/entities/identity/resolver.go b/entities/identity/resolver.go
new file mode 100644
index 00000000..5468a8f8
--- /dev/null
+++ b/entities/identity/resolver.go
@@ -0,0 +1,34 @@
+package identity
+
+import (
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+)
+
+var _ entity.Resolver = &SimpleResolver{}
+
+// SimpleResolver is a Resolver loading Identities directly from a Repo
+type SimpleResolver struct {
+ repo repository.Repo
+}
+
+func NewSimpleResolver(repo repository.Repo) *SimpleResolver {
+ return &SimpleResolver{repo: repo}
+}
+
+func (r *SimpleResolver) Resolve(id entity.Id) (entity.Interface, error) {
+ return ReadLocal(r.repo, id)
+}
+
+var _ entity.Resolver = &StubResolver{}
+
+// StubResolver is a Resolver that doesn't load anything, only returning IdentityStub instances
+type StubResolver struct{}
+
+func NewStubResolver() *StubResolver {
+ return &StubResolver{}
+}
+
+func (s *StubResolver) Resolve(id entity.Id) (entity.Interface, error) {
+ return &IdentityStub{id: id}, nil
+}
diff --git a/entities/identity/version.go b/entities/identity/version.go
new file mode 100644
index 00000000..9a52d089
--- /dev/null
+++ b/entities/identity/version.go
@@ -0,0 +1,273 @@
+package identity
+
+import (
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+ "github.com/MichaelMure/git-bug/util/text"
+)
+
+// 1: original format
+// 2: Identity Ids are generated from the first version serialized data instead of from the first git commit
+// + Identity hold multiple lamport clocks from other entities, instead of just bug edit
+const formatVersion = 2
+
+// version is a complete set of information about an Identity at a point in time.
+type version struct {
+ name string
+ email string // as defined in git or from a bridge when importing the identity
+ login string // from a bridge when importing the identity
+ avatarURL string
+
+ // The lamport times of the other entities at which this version become effective
+ times map[string]lamport.Time
+ unixTime int64
+
+ // The set of keys valid at that time, from this version onward, until they get removed
+ // in a new version. This allow to have multiple key for the same identity (e.g. one per
+ // device) as well as revoke key.
+ keys []*Key
+
+ // mandatory random bytes to ensure a better randomness of the data of the first
+ // version of an identity, used to later generate the ID
+ // len(Nonce) should be > 20 and < 64 bytes
+ // It has no functional purpose and should be ignored.
+ // TODO: optional after first version?
+ nonce []byte
+
+ // A set of arbitrary key/value to store metadata about a version or about an Identity in general.
+ metadata map[string]string
+
+ // Not serialized. Store the version's id in memory.
+ id entity.Id
+ // Not serialized
+ commitHash repository.Hash
+}
+
+func newVersion(repo repository.RepoClock, name string, email string, login string, avatarURL string, keys []*Key) (*version, error) {
+ clocks, err := repo.AllClocks()
+ if err != nil {
+ return nil, err
+ }
+
+ times := make(map[string]lamport.Time)
+ for name, clock := range clocks {
+ times[name] = clock.Time()
+ }
+
+ return &version{
+ id: entity.UnsetId,
+ name: name,
+ email: email,
+ login: login,
+ avatarURL: avatarURL,
+ times: times,
+ unixTime: time.Now().Unix(),
+ keys: keys,
+ nonce: makeNonce(20),
+ }, nil
+}
+
+type versionJSON struct {
+ // Additional field to version the data
+ FormatVersion uint `json:"version"`
+
+ Times map[string]lamport.Time `json:"times"`
+ UnixTime int64 `json:"unix_time"`
+ Name string `json:"name,omitempty"`
+ Email string `json:"email,omitempty"`
+ Login string `json:"login,omitempty"`
+ AvatarUrl string `json:"avatar_url,omitempty"`
+ Keys []*Key `json:"pub_keys,omitempty"`
+ Nonce []byte `json:"nonce"`
+ Metadata map[string]string `json:"metadata,omitempty"`
+}
+
+// Id return the identifier of the version
+func (v *version) Id() entity.Id {
+ if v.id == "" {
+ // something went really wrong
+ panic("version's id not set")
+ }
+ if v.id == entity.UnsetId {
+ // This means we are trying to get the version's Id *before* it has been stored.
+ // As the Id is computed based on the actual bytes written on the disk, we are going to predict
+ // those and then get the Id. This is safe as it will be the exact same code writing on disk later.
+ data, err := json.Marshal(v)
+ if err != nil {
+ panic(err)
+ }
+ v.id = entity.DeriveId(data)
+ }
+ return v.id
+}
+
+// Make a deep copy
+func (v *version) Clone() *version {
+ // copy direct fields
+ clone := *v
+
+ // reset some fields
+ clone.commitHash = ""
+ clone.id = entity.UnsetId
+
+ clone.times = make(map[string]lamport.Time)
+ for name, t := range v.times {
+ clone.times[name] = t
+ }
+
+ clone.keys = make([]*Key, len(v.keys))
+ for i, key := range v.keys {
+ clone.keys[i] = key.Clone()
+ }
+
+ clone.nonce = make([]byte, len(v.nonce))
+ copy(clone.nonce, v.nonce)
+
+ // not copying metadata
+
+ return &clone
+}
+
+func (v *version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(versionJSON{
+ FormatVersion: formatVersion,
+ Times: v.times,
+ UnixTime: v.unixTime,
+ Name: v.name,
+ Email: v.email,
+ Login: v.login,
+ AvatarUrl: v.avatarURL,
+ Keys: v.keys,
+ Nonce: v.nonce,
+ Metadata: v.metadata,
+ })
+}
+
+func (v *version) UnmarshalJSON(data []byte) error {
+ var aux versionJSON
+
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ if aux.FormatVersion != formatVersion {
+ return entity.NewErrInvalidFormat(aux.FormatVersion, formatVersion)
+ }
+
+ v.id = entity.DeriveId(data)
+ v.times = aux.Times
+ v.unixTime = aux.UnixTime
+ v.name = aux.Name
+ v.email = aux.Email
+ v.login = aux.Login
+ v.avatarURL = aux.AvatarUrl
+ v.keys = aux.Keys
+ v.nonce = aux.Nonce
+ v.metadata = aux.Metadata
+
+ return nil
+}
+
+func (v *version) Validate() error {
+ // time must be set after a commit
+ if v.commitHash != "" && v.unixTime == 0 {
+ return fmt.Errorf("unix time not set")
+ }
+
+ if text.Empty(v.name) && text.Empty(v.login) {
+ return fmt.Errorf("either name or login should be set")
+ }
+ if !text.SafeOneLine(v.name) {
+ return fmt.Errorf("name has unsafe characters")
+ }
+
+ if !text.SafeOneLine(v.login) {
+ return fmt.Errorf("login has unsafe characters")
+ }
+
+ if !text.SafeOneLine(v.email) {
+ return fmt.Errorf("email has unsafe characters")
+ }
+
+ if v.avatarURL != "" && !text.ValidUrl(v.avatarURL) {
+ return fmt.Errorf("avatarUrl is not a valid URL")
+ }
+
+ if len(v.nonce) > 64 {
+ return fmt.Errorf("nonce is too big")
+ }
+ if len(v.nonce) < 20 {
+ return fmt.Errorf("nonce is too small")
+ }
+
+ for _, k := range v.keys {
+ if err := k.Validate(); err != nil {
+ return errors.Wrap(err, "invalid key")
+ }
+ }
+
+ return nil
+}
+
+// Write will serialize and store the version as a git blob and return
+// its hash
+func (v *version) Write(repo repository.Repo) (repository.Hash, error) {
+ // make sure we don't write invalid data
+ err := v.Validate()
+ if err != nil {
+ return "", errors.Wrap(err, "validation error")
+ }
+
+ data, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ hash, err := repo.StoreData(data)
+ if err != nil {
+ return "", err
+ }
+
+ // make sure we set the Id when writing in the repo
+ v.id = entity.DeriveId(data)
+
+ return hash, nil
+}
+
+func makeNonce(len int) []byte {
+ result := make([]byte, len)
+ _, err := rand.Read(result)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// SetMetadata store arbitrary metadata about a version or an Identity in general
+// If the version has been commit to git already, it won't be overwritten.
+// Beware: changing the metadata on a version will change it's ID
+func (v *version) SetMetadata(key string, value string) {
+ if v.metadata == nil {
+ v.metadata = make(map[string]string)
+ }
+ v.metadata[key] = value
+}
+
+// GetMetadata retrieve arbitrary metadata about the version
+func (v *version) GetMetadata(key string) (string, bool) {
+ val, ok := v.metadata[key]
+ return val, ok
+}
+
+// AllMetadata return all metadata for this version
+func (v *version) AllMetadata() map[string]string {
+ return v.metadata
+}
diff --git a/entities/identity/version_test.go b/entities/identity/version_test.go
new file mode 100644
index 00000000..385ad4d7
--- /dev/null
+++ b/entities/identity/version_test.go
@@ -0,0 +1,78 @@
+package identity
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/MichaelMure/git-bug/entity"
+ "github.com/MichaelMure/git-bug/repository"
+ "github.com/MichaelMure/git-bug/util/lamport"
+)
+
+func makeIdentityTestRepo(t *testing.T) repository.ClockedRepo {
+ repo := repository.NewMockRepo()
+
+ clock1, err := repo.GetOrCreateClock("foo")
+ require.NoError(t, err)
+ err = clock1.Witness(42)
+ require.NoError(t, err)
+
+ clock2, err := repo.GetOrCreateClock("bar")
+ require.NoError(t, err)
+ err = clock2.Witness(34)
+ require.NoError(t, err)
+
+ return repo
+}
+
+func TestVersionJSON(t *testing.T) {
+ repo := makeIdentityTestRepo(t)
+
+ keys := []*Key{
+ generatePublicKey(),
+ generatePublicKey(),
+ }
+
+ before, err := newVersion(repo, "name", "email", "login", "avatarUrl", keys)
+ require.NoError(t, err)
+
+ before.SetMetadata("key1", "value1")
+ before.SetMetadata("key2", "value2")
+
+ expected := &version{
+ id: entity.UnsetId,
+ name: "name",
+ email: "email",
+ login: "login",
+ avatarURL: "avatarUrl",
+ unixTime: time.Now().Unix(),
+ times: map[string]lamport.Time{
+ "foo": 42,
+ "bar": 34,
+ },
+ keys: keys,
+ nonce: before.nonce,
+ metadata: map[string]string{
+ "key1": "value1",
+ "key2": "value2",
+ },
+ }
+
+ require.Equal(t, expected, before)
+
+ data, err := json.Marshal(before)
+ assert.NoError(t, err)
+
+ var after version
+ err = json.Unmarshal(data, &after)
+ assert.NoError(t, err)
+
+ // make sure we now have an Id
+ expected.Id()
+
+ assert.Equal(t, expected, &after)
+}