aboutsummaryrefslogtreecommitdiffstats
path: root/lib/msgstore.go
diff options
context:
space:
mode:
authorTim Culverhouse <tim@timculverhouse.com>2022-09-21 11:44:11 -0500
committerRobin Jarry <robin@jarry.cc>2022-09-25 11:54:26 +0200
commita9af5635bc16bf994489f521006ab5151cddf609 (patch)
tree82ef0ef3c92e8b0c610bde7986897fe1e4a72376 /lib/msgstore.go
parente5b0725824ac9ccf218732238e4b3b525fa6ad46 (diff)
downloadaerc-a9af5635bc16bf994489f521006ab5151cddf609.tar.gz
msgstore: revert 9fdc7acf5b48 "post messageInfo on erroneous fetch"
Commit 9fdc7acf5b48 ("cache: fetch flags from UI") introduced a regression where all messages were marked as erroneous if a single one in the fetch request had an error. Reported-by: Jose Lombera <jose@lombera.dev> Signed-off-by: Tim Culverhouse <tim@timculverhouse.com> Acked-by: Robin Jarry <robin@jarry.cc>
Diffstat (limited to 'lib/msgstore.go')
-rw-r--r--lib/msgstore.go14
1 files changed, 1 insertions, 13 deletions
diff --git a/lib/msgstore.go b/lib/msgstore.go
index 58c5faba..44de7010 100644
--- a/lib/msgstore.go
+++ b/lib/msgstore.go
@@ -119,9 +119,8 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
}
if len(toFetch) > 0 {
store.worker.PostAction(&types.FetchMessageHeaders{Uids: toFetch}, func(msg types.WorkerMessage) {
- if msg, ok := msg.(*types.Error); ok {
+ if _, ok := msg.(*types.Error); ok {
for _, uid := range toFetch {
- store.postInvalidMessageInfo(uid, msg.Error)
delete(store.pendingHeaders, uid)
}
}
@@ -132,17 +131,6 @@ func (store *MessageStore) FetchHeaders(uids []uint32,
}
}
-func (store *MessageStore) postInvalidMessageInfo(uid uint32, err error) {
- logging.Errorf("Unable to fetch header %d: %w", uid, err)
- info := &models.MessageInfo{
- Envelope: &models.Envelope{},
- Flags: []models.Flag{models.SeenFlag},
- Uid: uid,
- Error: err,
- }
- store.Update(&types.MessageInfo{Info: info})
-}
-
func (store *MessageStore) FetchFull(uids []uint32, cb func(*types.FullMessage)) {
// TODO: this could be optimized by pre-allocating toFetch and trimming it
// at the end. In practice we expect to get most messages back in one frame.