This commit is contained in:
Bruno Windels 2019-07-01 10:00:29 +02:00
parent c5b2d0c8b2
commit f3d1128f28
12 changed files with 256 additions and 74 deletions

View file

@ -3,3 +3,6 @@ goal:
write client that works on lumia 950 phone, so I can use matrix on my phone.
try approach offline to indexeddb. go low-memory, and test the performance of storing every event individually in indexeddb.
try to use little bandwidth, mainly by being an offline application and storing all requested data in indexeddb.
be as functional as possible while offline

View file

@ -4,6 +4,12 @@ export class HomeServerError extends Error {
this.errcode = body.errcode;
this.retry_after_ms = body.retry_after_ms;
}
get isFatal() {
switch (this.errcode) {
}
}
}
export class RequestAbortError extends Error {

View file

@ -1,13 +1,11 @@
export default class PendingEvent {
static fromRedaction(eventId) {
constructor(roomId, queueIndex, eventType, content, txnId) {
this._roomId = roomId;
this._eventType = eventType;
this._content = content;
this._txnId = txnId;
this._queueIndex = queueIndex;
}
static fromContent(content) {
}
static fromStateKey(eventType, stateKey, content) {
}
}

View file

@ -1,81 +1,124 @@
class Sender {
constructor({hsApi}) {
import Platform from "../../../Platform.js";
class RateLimitingBackoff {
constructor() {
this._remainingRateLimitedRequest = 0;
}
async waitAfterLimitExceeded(retryAfterMs) {
// this._remainingRateLimitedRequest = 5;
// if (typeof retryAfterMs !== "number") {
// } else {
// }
if (!retryAfterMs) {
retryAfterMs = 5000;
}
await Platform.delay(retryAfterMs);
}
// do we have to know about succeeding requests?
// we can just
async waitForNextSend() {
// this._remainingRateLimitedRequest = Math.max(0, this._remainingRateLimitedRequest - 1);
Platform.delay(1000);
}
}
class SendScheduler {
constructor({hsApi, backoff}) {
this._hsApi = hsApi;
this._slotRequests = [];
this._sendScheduled = false;
this._offline = false;
this._waitTime = 0;
this._backoff = backoff;
}
// this should really be per roomId to avoid head-of-line blocking
acquireSlot() {
//
// takes a callback instead of returning a promise with the slot
// to make sure the scheduler doesn't get blocked by a slot that is not consumed
runSlot(slotCallback) {
let request;
const promise = new Promise((resolve) => request = {resolve});
const promise = new Promise((resolve, reject) => request = {resolve, reject, slotCallback});
this._slotRequests.push(request);
if (!this._sendScheduled) {
this._startNextSlot();
if (!this._sendScheduled && !this._offline) {
this._sendLoop();
}
return promise;
}
async _startNextSlot() {
if (this._waitTime !== 0) {
await Platform.delay(this._waitTime);
}
async _sendLoop() {
while (this._slotRequests.length) {
const request = this._slotRequests.unshift();
this._currentSlot = new SenderSlot(this);
request.resolve(this._currentSlot);
this._currentSlot = new SendSlot(this);
// this can throw!
let result;
try {
result = await request.slotCallback(this._currentSlot);
} catch (err) {
if (err instanceof NetworkError) {
// we're offline, everybody will have
// to re-request slots when we come back online
this._offline = true;
for (const r of this._slotRequests) {
r.reject(err);
}
this._slotRequests = [];
}
request.reject(err);
break;
}
request.resolve(result);
}
// do next here instead of in _doSend
}
_discardSlot(slot) {
if (slot === this._currentSlot) {
this._currentSlot = null;
this._sendScheduled = true;
Promise.resolve().then(() => this._startNextSlot());
}
}
async _doSend(slot, callback) {
async _doSend(slot, sendCallback) {
this._sendScheduled = false;
if (slot !== this._currentSlot) {
throw new Error("slot is not active");
throw new Error("Slot is not active");
}
try {
await this._backoff.waitForNextSend();
// loop is left by return or throw
while(true) {
while (true) { // eslint-disable-line no-constant-condition
try {
return await callback(this._hsApi);
return await sendCallback(this._hsApi);
} catch (err) {
if (err instanceof HomeServerError && err.errcode === "M_LIMIT_EXCEEDED") {
await Platform.delay(err.retry_after_ms);
await this._backoff.waitAfterLimitExceeded(err.retry_after_ms);
} else {
throw err;
}
}
}
} catch (err) {
if (err instanceof NetworkError) {
this._offline = true;
// went offline, probably want to notify SendQueues somehow
}
throw err;
} finally {
this._currentSlot = null;
if (!this._offline && this._slotRequests.length) {
this._sendScheduled = true;
Promise.resolve().then(() => this._startNextSlot());
}
}
}
}
class SenderSlot {
constructor(sender) {
this._sender = sender;
/*
this represents a slot to do one rate limited api call.
because rate-limiting is handled here, it should only
try to do one call, so the SendScheduler can safely
retry if the call ends up being rate limited.
This is also why we have this abstraction it hsApi is not
passed straight to SendQueue when it is its turn to send.
e.g. we wouldn't want to repeat the callback in SendQueue that could
have other side-effects before the call to hsApi that we wouldn't want
repeated (setting up progress handlers for file uploads,
... a UI update to say it started sending?
... updating storage would probably only happen once the call succeeded
... doing multiple hsApi calls for e.g. a file upload before sending a image message (they should individually be retried)
) maybe it is a bit overengineering, but lets stick with it for now.
At least the above is a clear definition why we have this class
*/
class SendSlot {
constructor(scheduler) {
this._scheduler = scheduler;
}
sendEvent(pendingEvent) {
return this._sender._doSend(this, async hsApi => {
sendContentEvent(pendingEvent) {
return this._scheduler._doSend(this, async hsApi => {
const request = hsApi.send(
pendingEvent.roomId,
pendingEvent.eventType,
@ -87,11 +130,76 @@ class SenderSlot {
});
}
discard() {
this._sender._discardSlot(this);
sendRedaction(pendingEvent) {
return this._scheduler._doSend(this, async hsApi => {
const request = hsApi.redact(
pendingEvent.roomId,
pendingEvent.redacts,
pendingEvent.txnId,
pendingEvent.reason
);
const response = await request.response();
return response.event_id;
});
}
// progressCallback should report the amount of bytes sent
uploadMedia(fileName, contentType, blob, progressCallback) {
}
}
export default class SendQueue {
constructor({sender})
function makeTxnId() {
const n = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER);
const str = n.toString(16);
return "t" + "0".repeat(14 - str.length) + str;
}
export default class SendQueue {
constructor({roomId, storage, scheduler, pendingEvents}) {
this._roomId = roomId;
this._storage = storage;
this._scheduler = scheduler;
this._pendingEvents = pendingEvents.map(d => PendingEvent.fromData(d));
}
async _sendLoop() {
let pendingEvent = null;
// eslint-disable-next-line no-cond-assign
while (pendingEvent = await this._nextPendingEvent()) {
// const mxcUrl = await this._scheduler.runSlot(slot => {
// return slot.uploadMedia(fileName, contentType, blob, bytesSent => {
// pendingEvent.updateAttachmentUploadProgress(bytesSent);
// });
// });
// pendingEvent.setAttachmentUrl(mxcUrl);
//update storage for pendingEvent after updating url,
//remove blob only later to keep preview?
await this._scheduler.runSlot(slot => {
if (pendingEvent.isRedaction) {
return slot.sendRedaction(pendingEvent);
} else if (pendingEvent.isContentEvent) {
return slot.sendContentEvent(pendingEvent);
}
});
}
}
async enqueueEvent(eventType, content) {
// temporary
const txn = await this._storage.readWriteTxn([this._storage.storeNames.pendingEvents]);
const pendingEventsStore = txn.pendingEvents;
const maxQueueIndex = await pendingEventsStore.getMaxQueueIndex(this._roomId) || 0;
const queueIndex = maxQueueIndex + 1;
const pendingEvent = new PendingEvent(this._roomId, queueIndex, eventType, content, makeTxnId());
pendingEventsStore.add(pendingEvent.data);
await txn.complete();
// create txnId
// create queueOrder
// store event
// if online and not running send loop
// start sending loop
}
}

View file

@ -1,4 +1,11 @@
export const STORE_NAMES = Object.freeze(["session", "roomState", "roomSummary", "timelineEvents", "timelineFragments"]);
export const STORE_NAMES = Object.freeze([
"session",
"roomState",
"roomSummary",
"timelineEvents",
"timelineFragments",
"pendingEvents",
]);
export const STORE_MAP = Object.freeze(STORE_NAMES.reduce((nameMap, name) => {
nameMap[name] = name;

View file

@ -20,6 +20,7 @@ function createStores(db) {
timelineEvents.createIndex("byEventId", "eventIdKey", {unique: true});
//key = room_id | event.type | event.state_key,
db.createObjectStore("roomState", {keyPath: "key"});
db.createObjectStore("pendingEvents", {keyPath: "key"});
// const roomMembers = db.createObjectStore("roomMembers", {keyPath: [
// "event.room_id",

View file

@ -71,6 +71,16 @@ export default class QueryTarget {
return this._find(range, predicate, "prev");
}
async findMaxKey(range) {
const cursor = this._target.openKeyCursor(range, "prev");
let maxKey;
await iterateCursor(cursor, (_, key) => {
maxKey = key;
return {done: true};
});
return maxKey;
}
/**
* Checks if a given set of keys exist.
* Calls `callback(key, found)` for each key in `keys`, in key sorting order (or reversed if backwards=true).

View file

@ -0,0 +1,44 @@
import { encodeUint32, decodeUint32 } from "../utils.js";
import Platform from "../../../../Platform.js";
function encodeKey(roomId, queueIndex) {
return `${roomId}|${encodeUint32(queueIndex)}`;
}
function decodeKey(key) {
const [roomId, encodedQueueIndex] = key.split("|");
const queueIndex = decodeUint32(encodedQueueIndex);
return {roomId, queueIndex};
}
export default class PendingEventStore {
constructor(eventStore) {
this._eventStore = eventStore;
}
async getMaxQueueIndex(roomId) {
const range = IDBKeyRange.bound(
encodeKey(roomId, Platform.minStorageKey),
encodeKey(roomId, Platform.maxStorageKey),
false,
false,
);
const maxKey = await this._eventStore.findMaxKey(range);
if (maxKey) {
return decodeKey(maxKey).queueIndex;
}
}
add(pendingEvent) {
pendingEvent.key = encodeKey(pendingEvent.roomId, pendingEvent.queueIndex);
return this._eventStore.add(pendingEvent);
}
update(pendingEvent) {
return this._eventStore.put(pendingEvent);
}
getAllEvents() {
return this._eventStore.selectAll();
}
}

View file

@ -1,13 +1,8 @@
import EventKey from "../../../room/timeline/EventKey.js";
import { StorageError } from "../../common.js";
import { encodeUint32 } from "../utils.js";
import Platform from "../../../../Platform.js";
// storage keys are defined to be unsigned 32bit numbers in WebPlatform.js, which is assumed by idb
function encodeUint32(n) {
const hex = n.toString(16);
return "0".repeat(8 - hex.length) + hex;
}
function encodeKey(roomId, fragmentId, eventIndex) {
return `${roomId}|${encodeUint32(fragmentId)}|${encodeUint32(eventIndex)}`;
}

View file

@ -1,10 +1,9 @@
import { StorageError } from "../../common.js";
import Platform from "../../../../Platform.js";
import { encodeUint32 } from "../utils.js";
function encodeKey(roomId, fragmentId) {
let fragmentIdHex = fragmentId.toString(16);
fragmentIdHex = "0".repeat(8 - fragmentIdHex.length) + fragmentIdHex;
return `${roomId}|${fragmentIdHex}`;
return `${roomId}|${encodeUint32(fragmentId)}`;
}
export default class RoomFragmentStore {

View file

@ -1,5 +1,16 @@
import { StorageError } from "../common.js";
// storage keys are defined to be unsigned 32bit numbers in WebPlatform.js, which is assumed by idb
export function encodeUint32(n) {
const hex = n.toString(16);
return "0".repeat(8 - hex.length) + hex;
}
export function decodeUint32(str) {
return parseInt(str, 16);
}
export function openDatabase(name, createObjectStore, version) {
const req = window.indexedDB.open(name, version);
req.onupgradeneeded = (ev) => {

View file

@ -1,9 +1,9 @@
import BaseObservableList from "./BaseObservableList.js";
export default class ObservableArray extends BaseObservableList {
constructor() {
constructor(initialValues = []) {
super();
this._items = [];
this._items = initialValues;
}
append(item) {