aboutsummaryrefslogtreecommitdiffstats
path: root/lib/msgstore.go
diff options
context:
space:
mode:
authorKoni Marti <koni.marti@gmail.com>2022-08-08 22:21:42 +0200
committerRobin Jarry <robin@jarry.cc>2022-08-22 09:30:37 +0200
commit22e6c9e4fac70c9542d10464f88553c1f20ce577 (patch)
tree8941122cf1c4ab4f16963ccf86c3b982db11ec89 /lib/msgstore.go
parentcfc19a7ec22a1c60f79427ddbabdf437705efbab (diff)
downloadaerc-22e6c9e4fac70c9542d10464f88553c1f20ce577.tar.gz
store: remove unneeded header callback
The message store keeps a map of callbacks for headers that are being fetched. This is not used anywhere in the code base. Instead of a func(*types.MessageInfo) callback use a general func(types.WorkerMessage) in the worker.PostAction function to make it more useful. This callback allows now to get a feedback when all headers are fetched successfully. Note that the pending header map remains so that the same header is not fetched multiple times. Signed-off-by: Koni Marti <koni.marti@gmail.com> Acked-by: Robin Jarry <robin@jarry.cc>
Diffstat (limited to 'lib/msgstore.go')
-rw-r--r--lib/msgstore.go26
1 files changed, 7 insertions, 19 deletions
diff --git a/lib/msgstore.go b/lib/msgstore.go
index f9b68ad4..d126fee8 100644
--- a/lib/msgstore.go
+++ b/lib/msgstore.go
@@ -23,9 +23,8 @@ type MessageStore struct {
uids []uint32
threads []*types.Thread
- selectedUid uint32
- bodyCallbacks map[uint32][]func(*types.FullMessage)
- headerCallbacks map[uint32][]func(*types.MessageInfo)
+ selectedUid uint32
+ bodyCallbacks map[uint32][]func(*types.FullMessage)
// marking
marker marker.Marker
@@ -78,8 +77,7 @@ func NewMessageStore(worker *types.Worker,
selectedUid: MagicUid,
- bodyCallbacks: make(map[uint32][]func(*types.FullMessage)),
- headerCallbacks: make(map[uint32][]func(*types.MessageInfo)),
+ bodyCallbacks: make(map[uint32][]func(*types.FullMessage)),
threadedView: thread,
buildThreads: clientThreads,
@@ -99,7 +97,7 @@ func NewMessageStore(worker *types.Worker,
}
func (store *MessageStore) FetchHeaders(uids []uint32,
- cb func(*types.MessageInfo),
+ cb func(types.WorkerMessage),
) {
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.
@@ -108,13 +106,6 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
if _, ok := store.pendingHeaders[uid]; !ok {
toFetch = append(toFetch, uid)
store.pendingHeaders[uid] = nil
- if cb != nil {
- if list, ok := store.headerCallbacks[uid]; ok {
- store.headerCallbacks[uid] = append(list, cb)
- } else {
- store.headerCallbacks[uid] = []func(*types.MessageInfo){cb}
- }
- }
}
}
if len(toFetch) > 0 {
@@ -122,9 +113,11 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
if _, ok := msg.(*types.Error); ok {
for _, uid := range toFetch {
delete(store.pendingHeaders, uid)
- delete(store.headerCallbacks, uid)
}
}
+ if cb != nil {
+ cb(msg)
+ }
})
}
}
@@ -255,11 +248,6 @@ func (store *MessageStore) Update(msg types.WorkerMessage) {
}
if _, ok := store.pendingHeaders[msg.Info.Uid]; msg.Info.Envelope != nil && ok {
delete(store.pendingHeaders, msg.Info.Uid)
- if cbs, ok := store.headerCallbacks[msg.Info.Uid]; ok {
- for _, cb := range cbs {
- cb(msg)
- }
- }
}
if store.builder != nil {
store.builder.Update(msg.Info)