aboutsummaryrefslogtreecommitdiffstats
path: root/lib/msgstore.go
diff options
context:
space:
mode:
authorSimon Ser <contact@emersion.fr>2019-05-19 09:49:57 +0000
committerDrew DeVault <sir@cmpwn.com>2019-05-19 11:51:16 -0400
commita15ea01cfb0a303355b2e6bb31e85ece0d048ac2 (patch)
tree1df46678dc46a8dd68edb0ff00eaa7f4753fbc34 /lib/msgstore.go
parent43dba93263fe490a691dc504dbfe9d18a3c64697 (diff)
downloadaerc-a15ea01cfb0a303355b2e6bb31e85ece0d048ac2.tar.gz
Update internal state and draw from the same goroutine
This commit introduces a new Aerc.Tick function that should be called to refresh the internal state. This in turn makes each AccountView process worker events. The UI goroutine repeatedly refreshes the internal state before drawing a new frame. The reason for this is that many worker messages may need to be processed for a single frame, and drawing the UI is far slower than refreshing the internal state. This has been confirmed in my testing (calling Aerc.Tick only once per frame results in a slower display). Many synchronization code has been removed. We can now write widgets without having to care so much about races. The remaining sync users are: - widgets/spinner: the spinner value is updated from inside an internal goroutine - lib/ui/invalidatable: Invalidate may be called from any goroutine - lib/ui/grid: same - lib/ui/ui: an internal goroutine needs read access to UI.exit - worker/types/worker: Worker.callbacks is used for both worker and UI callbacks The exact goroutine requirements for Drawable have been documented.
Diffstat (limited to 'lib/msgstore.go')
-rw-r--r--lib/msgstore.go19
1 files changed, 0 insertions, 19 deletions
diff --git a/lib/msgstore.go b/lib/msgstore.go
index fbffa0a7..827d7cbf 100644
--- a/lib/msgstore.go
+++ b/lib/msgstore.go
@@ -2,7 +2,6 @@ package lib
import (
"io"
- "sync"
"time"
"github.com/emersion/go-imap"
@@ -12,8 +11,6 @@ import (
// Accesses to fields must be guarded by MessageStore.Lock/Unlock
type MessageStore struct {
- sync.Mutex
-
Deleted map[uint32]interface{}
DirInfo types.DirectoryInfo
Messages map[uint32]*types.MessageInfo
@@ -49,9 +46,6 @@ func NewMessageStore(worker *types.Worker,
func (store *MessageStore) FetchHeaders(uids []uint32,
cb func(*types.MessageInfo)) {
- store.Lock()
- defer store.Unlock()
-
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch imap.SeqSet
@@ -74,9 +68,6 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
}
func (store *MessageStore) FetchFull(uids []uint32, cb func(io.Reader)) {
- store.Lock()
- defer store.Unlock()
-
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
var toFetch imap.SeqSet
@@ -134,8 +125,6 @@ func merge(to *types.MessageInfo, from *types.MessageInfo) {
}
func (store *MessageStore) Update(msg types.WorkerMessage) {
- store.Lock()
-
update := false
switch msg := msg.(type) {
case *types.DirectoryInfo:
@@ -201,8 +190,6 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
update = true
}
- store.Unlock()
-
if update {
store.update()
}
@@ -220,7 +207,6 @@ func (store *MessageStore) update() {
func (store *MessageStore) Delete(uids []uint32,
cb func(msg types.WorkerMessage)) {
- store.Lock()
var set imap.SeqSet
for _, uid := range uids {
@@ -228,8 +214,6 @@ func (store *MessageStore) Delete(uids []uint32,
store.Deleted[uid] = nil
}
- store.Unlock()
-
store.worker.PostAction(&types.DeleteMessages{Uids: set}, cb)
store.update()
}
@@ -249,7 +233,6 @@ func (store *MessageStore) Copy(uids []uint32, dest string,
func (store *MessageStore) Move(uids []uint32, dest string,
cb func(msg types.WorkerMessage)) {
- store.Lock()
var set imap.SeqSet
for _, uid := range uids {
@@ -257,8 +240,6 @@ func (store *MessageStore) Move(uids []uint32, dest string,
store.Deleted[uid] = nil
}
- store.Unlock()
-
store.worker.PostAction(&types.CopyMessages{
Destination: dest,
Uids: set,