Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
fae2d58
refactor(indexeddb): add migrations for media content store
mgoldenberg Oct 13, 2025
728fb1c
refactor(indexeddb): add type for tracking media content and associat…
mgoldenberg Oct 13, 2025
6cc3c0c
refactor(indexeddb): remove indexed media content type synonym
mgoldenberg Oct 13, 2025
2c9cfea
refactor(indexeddb): add indexed types for media content
mgoldenberg Oct 13, 2025
7b36d5c
refactor(indexeddb): add transaction fns for basic media content oper…
mgoldenberg Oct 13, 2025
f265cd2
refactor(indexeddb): add transaction fn for getting max key in range
mgoldenberg Oct 14, 2025
f551c73
refactor(indexeddb): add constant for representing safe bounds of u64
mgoldenberg Oct 14, 2025
5856517
refactor(indexeddb): add key bounds for media content id key
mgoldenberg Oct 14, 2025
e8d5f7b
refactor(indexeddb): add transaction fns for getting the next availab…
mgoldenberg Oct 14, 2025
19ef59a
refactor(indexeddb): flatten nested media metadata into media type
mgoldenberg Oct 15, 2025
c560ddf
refactor(indexeddb): add content id and content size to media metadata
mgoldenberg Oct 15, 2025
10adde6
refactor(indexeddb): add migrations for media metadata store
mgoldenberg Oct 17, 2025
ccbf8dc
refactor(indexeddb): add indexed types and keys for media metadata
mgoldenberg Oct 17, 2025
b73b3ea
refactor(indexeddb): add transaction fns for getting media metadata
mgoldenberg Oct 17, 2025
9390e52
refactor(indexeddb): add transaction fns for add/putting media metadata
mgoldenberg Oct 17, 2025
ace5305
refactor(indexeddb): add transaction fns for deleting media metadata
mgoldenberg Oct 17, 2025
6c8e99e
refactor(indexeddb): return indexed type and js value from indexed ty…
mgoldenberg Oct 18, 2025
301edb1
refactor(indexeddb): return indexed type from Transaction::add_item a…
mgoldenberg Oct 18, 2025
aefd18d
refactor(indexeddb): return indexed type from Transaction::{put_item,…
mgoldenberg Oct 18, 2025
6f05baf
refactor(indexeddb): add fn for prefixed key ranges from existing key…
mgoldenberg Oct 20, 2025
f2a0cf1
refactor(indexeddb): add type synonym for content id in indexed media…
mgoldenberg Oct 20, 2025
c6ea0a2
refactor(indexeddb): remove unused type synonym
mgoldenberg Oct 20, 2025
e44e23e
refactor(indexeddb): add constants for media content id bounds
mgoldenberg Oct 20, 2025
eba78dc
refactor(indexeddb): add content id to media metadata keys
mgoldenberg Oct 20, 2025
44e727d
refactor(indexeddb): rename transaction fn for getting all media meta…
mgoldenberg Oct 20, 2025
20c8198
refactor(indexeddb): add transaction fns for getting media metadata k…
mgoldenberg Oct 20, 2025
f2b629e
refactor(indexeddb): implement specialized fn for getting media metad…
mgoldenberg Oct 20, 2025
9b2f26a
refactor(indexeddb): re-implement media-related fns in terms of media…
mgoldenberg Oct 20, 2025
3352e78
refactor(indexeddb): simplify error type for media metadata impl of i…
mgoldenberg Oct 20, 2025
cab481e
refactor(indexeddb): remove media object store and associated types
mgoldenberg Oct 20, 2025
45a6d6d
refactor(indexeddb): remove (de)serialization functionality from top-…
mgoldenberg Oct 21, 2025
e9a27ef
doc(indexeddb): fix typos in documentation
mgoldenberg Oct 21, 2025
79dd327
refactor(indexeddb): rename MediaContent::id -> MediaContent::content_id
mgoldenberg Oct 24, 2025
6cffa9d
refactor(indexeddb): use UUID instead of u64 as media content id
mgoldenberg Oct 24, 2025
0721b74
doc(indexeddb): add changelog entry for separating media content and …
mgoldenberg Oct 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions crates/matrix-sdk-indexeddb/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,11 @@ All notable changes to this project will be documented in this file.
- [**breaking**] `IndexeddbCryptoStore::get_withheld_info` now returns `Result<Option<RoomKeyWithheldEntry>, ...>`
([#5737](https://github.com/matrix-org/matrix-rust-sdk/pull/5737))

### Performance

- Improve performance of certain media queries in `MediaStore` implementation by storing media content and media metadata
in separate object stores in IndexedDB (see [#5795](https://github.com/matrix-org/matrix-rust-sdk/pull/5795)).

## [0.14.0] - 2025-09-04

No notable changes in this release.
Expand Down Expand Up @@ -48,6 +53,7 @@ No notable changes in this release.

- `save_change` performance improvement, all encryption and serialization
is done now outside of the db transaction.

### Bug Fixes

- Use the `DisplayName` struct to protect against homoglyph attacks.
2 changes: 1 addition & 1 deletion crates/matrix-sdk-indexeddb/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ sha2.workspace = true
thiserror.workspace = true
tokio.workspace = true
tracing.workspace = true
uuid = { workspace = true, features = ["js", "serde", "v4"] }
wasm-bindgen.workspace = true
web-sys = { workspace = true, features = ["IdbKeyRange"] }
zeroize.workspace = true
Expand All @@ -71,7 +72,6 @@ tracing-subscriber = { workspace = true, features = [
"registry",
"tracing-log",
] }
uuid.workspace = true
wasm-bindgen-test.workspace = true
web-sys = { workspace = true, features = [
"IdbKeyRange",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,9 @@ impl From<TransactionError> for EventCacheStoreError {
match value {
DomException { .. } => Self::InvalidData { details: value.to_string() },
Serialization(e) => Self::Serialization(serde_json::Error::custom(e.to_string())),
ItemIsNotUnique | ItemNotFound => Self::InvalidData { details: value.to_string() },
ItemIsNotUnique | ItemNotFound | NumericalOverflow => {
Self::InvalidData { details: value.to_string() }
}
Backend(e) => GenericError::from(e.to_string()).into(),
}
}
Expand Down
27 changes: 17 additions & 10 deletions crates/matrix-sdk-indexeddb/src/event_cache_store/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@ use crate::{
error::AsyncErrorDeps,
event_cache_store::{
serializer::indexed_types::{
IndexedChunkIdKey, IndexedEventIdKey, IndexedEventPositionKey, IndexedEventRelationKey,
IndexedEventRoomKey, IndexedGapIdKey, IndexedLeaseIdKey, IndexedNextChunkIdKey,
IndexedChunk, IndexedChunkIdKey, IndexedEvent, IndexedEventIdKey,
IndexedEventPositionKey, IndexedEventRelationKey, IndexedEventRoomKey, IndexedGapIdKey,
IndexedLease, IndexedLeaseIdKey, IndexedNextChunkIdKey,
},
types::{Chunk, ChunkType, Event, Gap, Lease, Position},
},
Expand Down Expand Up @@ -143,8 +144,10 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> {
}

/// Puts a lease into IndexedDB. If an event with the same key already
/// exists, it will be overwritten.
pub async fn put_lease(&self, lease: &Lease) -> Result<(), TransactionError> {
/// exists, it will be overwritten. When the item is successfully put, the
/// function returns the intermediary type [`IndexedLease`] in case
/// inspection is needed.
pub async fn put_lease(&self, lease: &Lease) -> Result<IndexedLease, TransactionError> {
self.put_item(lease).await
}

Expand Down Expand Up @@ -248,9 +251,11 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> {
/// Add a chunk and ensure that the next and previous
/// chunks are properly linked to the chunk being added. If a chunk with
/// the same identifier already exists, the given chunk will be
/// rejected.
pub async fn add_chunk(&self, chunk: &Chunk) -> Result<(), TransactionError> {
self.add_item(chunk).await?;
/// rejected. When the item is successfully added, the
/// function returns the intermediary type [`IndexedChunk`] in case
/// inspection is needed.
pub async fn add_chunk(&self, chunk: &Chunk) -> Result<IndexedChunk, TransactionError> {
let indexed = self.add_item(chunk).await?;
if let Some(previous) = chunk.previous {
let previous_identifier = ChunkIdentifier::new(previous);
if let Some(mut previous_chunk) =
Expand All @@ -269,7 +274,7 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> {
self.put_item(&next_chunk).await?;
}
}
Ok(())
Ok(indexed)
}

/// Delete chunk that matches the given id and the given linked chunk id and
Expand Down Expand Up @@ -407,8 +412,10 @@ impl<'a> IndexeddbEventCacheStoreTransaction<'a> {
}

/// Puts an event in IndexedDB. If an event with the same key already
/// exists, it will be overwritten.
pub async fn put_event(&self, event: &Event) -> Result<(), TransactionError> {
/// exists, it will be overwritten. When the item is successfully put, the
/// function returns the intermediary type [`IndexedEvent`] in case
/// inspection is needed.
pub async fn put_event(&self, event: &Event) -> Result<IndexedEvent, TransactionError> {
if let Some(position) = event.position() {
// For some reason, we can't simply replace an event with `put_item`
// because we can get an error stating that the data violates a uniqueness
Expand Down
4 changes: 3 additions & 1 deletion crates/matrix-sdk-indexeddb/src/media_store/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,9 @@ impl From<TransactionError> for MediaStoreError {
match value {
DomException { .. } => Self::InvalidData { details: value.to_string() },
Serialization(e) => Self::Serialization(serde_json::Error::custom(e.to_string())),
ItemIsNotUnique | ItemNotFound => Self::InvalidData { details: value.to_string() },
ItemIsNotUnique | ItemNotFound | NumericalOverflow => {
Self::InvalidData { details: value.to_string() }
}
Backend(e) => GenericError::from(e.to_string()).into(),
}
}
Expand Down
69 changes: 46 additions & 23 deletions crates/matrix-sdk-indexeddb/src/media_store/migrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,23 +114,26 @@ pub mod v1 {
pub const LEASES_KEY_PATH: &str = "id";
pub const MEDIA_RETENTION_POLICY_KEY: &str = "media_retention_policy";
pub const MEDIA_CLEANUP_TIME_KEY: &str = "media_cleanup_time";
pub const MEDIA: &str = "media";
pub const MEDIA_KEY_PATH: &str = "id";
pub const MEDIA_URI: &str = "media_uri";
pub const MEDIA_URI_KEY_PATH: &str = "uri";
pub const MEDIA_CONTENT_SIZE: &str = "media_content_size";
pub const MEDIA_CONTENT_SIZE_KEY_PATH: &str = "content_size";
pub const MEDIA_LAST_ACCESS: &str = "media_last_access";
pub const MEDIA_LAST_ACCESS_KEY_PATH: &str = "last_access";
pub const MEDIA_RETENTION_METADATA: &str = "media_retention_metadata";
pub const MEDIA_RETENTION_METADATA_KEY_PATH: &str = "retention_metadata";
pub const MEDIA_METADATA: &str = "media_metadata";
pub const MEDIA_METADATA_KEY_PATH: &str = "id";
pub const MEDIA_METADATA_URI: &str = "media_metadata_uri";
pub const MEDIA_METADATA_URI_KEY_PATH: &str = "uri";
pub const MEDIA_METADATA_CONTENT_SIZE: &str = "media_metadata_content_size";
pub const MEDIA_METADATA_CONTENT_SIZE_KEY_PATH: &str = "content_size";
pub const MEDIA_METADATA_LAST_ACCESS: &str = "media_metadata_last_access";
pub const MEDIA_METADATA_LAST_ACCESS_KEY_PATH: &str = "last_access";
pub const MEDIA_METADATA_RETENTION: &str = "media_metadata_retention";
pub const MEDIA_METADATA_RETENTION_KEY_PATH: &str = "retention";
pub const MEDIA_CONTENT: &str = "media_content";
pub const MEDIA_CONTENT_KEY_PATH: &str = "id";
}

/// Create all object stores and indices for v1 database
pub fn create_object_stores(db: &Database) -> Result<(), Error> {
create_core_object_store(db)?;
create_lease_object_store(db)?;
create_media_object_store(db)?;
create_media_metadata_object_store(db)?;
create_media_content_object_store(db)?;
Ok(())
}

Expand All @@ -152,37 +155,57 @@ pub mod v1 {
Ok(())
}

/// Create an object store for tracking information about media.
/// Create an object store for tracking information about media metadata.
///
/// * Primary Key - `id`
/// * Primary Key - `id` - unique key derived from
/// [`MediaRequestParameters`] of the associated media
/// * Index - `uri` - tracks the [`MxcUri`][1] of the associated media
/// * Index - `content_size` - tracks the size of the media content and
/// whether to ignore the [`MediaRetentionPolicy`][2]
/// * Index - `last_access` - tracks the last time the associated media was
/// accessed
/// * Index - `retention_metadata` - tracks all retention metadata - i.e.,
/// joins `content_size` and `last_access`
/// * Index - `retention` - tracks all retention metadata - i.e., joins
/// `content_size` and `last_access`
///
/// [1]: ruma::MxcUri
/// [2]: matrix_sdk_base::media::store::MediaRetentionPolicy
fn create_media_object_store(db: &Database) -> Result<(), Error> {
fn create_media_metadata_object_store(db: &Database) -> Result<(), Error> {
let media = db
.create_object_store(keys::MEDIA)
.with_key_path(keys::MEDIA_KEY_PATH.into())
.create_object_store(keys::MEDIA_METADATA)
.with_key_path(keys::MEDIA_METADATA_KEY_PATH.into())
.build()?;
let _ = media
.create_index(keys::MEDIA_METADATA_URI, keys::MEDIA_METADATA_URI_KEY_PATH.into())
.build()?;
let _ = media.create_index(keys::MEDIA_URI, keys::MEDIA_URI_KEY_PATH.into()).build()?;
let _ = media
.create_index(keys::MEDIA_CONTENT_SIZE, keys::MEDIA_CONTENT_SIZE_KEY_PATH.into())
.create_index(
keys::MEDIA_METADATA_CONTENT_SIZE,
keys::MEDIA_METADATA_CONTENT_SIZE_KEY_PATH.into(),
)
.build()?;
let _ = media
.create_index(keys::MEDIA_LAST_ACCESS, keys::MEDIA_LAST_ACCESS_KEY_PATH.into())
.create_index(
keys::MEDIA_METADATA_LAST_ACCESS,
keys::MEDIA_METADATA_LAST_ACCESS_KEY_PATH.into(),
)
.build()?;
let _ = media
.create_index(
keys::MEDIA_RETENTION_METADATA,
keys::MEDIA_RETENTION_METADATA_KEY_PATH.into(),
keys::MEDIA_METADATA_RETENTION,
keys::MEDIA_METADATA_RETENTION_KEY_PATH.into(),
)
.build()?;
Ok(())
}

/// Create an object store for tracking information about media.
///
/// * Primary Key - `id` - UUID tracking the ID of the media content.
fn create_media_content_object_store(db: &Database) -> Result<(), Error> {
let _ = db
.create_object_store(keys::MEDIA_CONTENT)
.with_key_path(keys::MEDIA_CONTENT_KEY_PATH.into())
.build()?;
Ok(())
}
}
67 changes: 43 additions & 24 deletions crates/matrix-sdk-indexeddb/src/media_store/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ use tracing::instrument;
use crate::{
media_store::{
transaction::IndexeddbMediaStoreTransaction,
types::{Lease, Media, MediaCleanupTime, MediaMetadata, UnixTime},
types::{Lease, Media, MediaCleanupTime, MediaContent, MediaMetadata, UnixTime},
},
serializer::{Indexed, IndexedTypeSerializer},
transaction::TransactionError,
Expand Down Expand Up @@ -149,12 +149,13 @@ impl MediaStore for IndexeddbMediaStore {
) -> Result<(), IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
if let Some(mut media) = transaction.get_media_by_id(from).await? {
let transaction =
self.transaction(&[MediaMetadata::OBJECT_STORE], TransactionMode::Readwrite)?;
if let Some(mut metadata) = transaction.get_media_metadata_by_id(from).await? {
// delete before adding, in case `from` and `to` generate the same key
transaction.delete_media_by_id(from).await?;
media.metadata.request_parameters = to.clone();
transaction.add_media(&media).await?;
transaction.delete_media_metadata_by_id(from).await?;
metadata.request_parameters = to.clone();
transaction.add_media_metadata(&metadata).await?;
transaction.commit().await?;
}
Ok(())
Expand All @@ -176,7 +177,10 @@ impl MediaStore for IndexeddbMediaStore {
) -> Result<(), IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
let transaction = self.transaction(
&[MediaMetadata::OBJECT_STORE, MediaContent::OBJECT_STORE],
TransactionMode::Readwrite,
)?;
transaction.delete_media_by_id(request).await?;
transaction.commit().await.map_err(Into::into)
}
Expand All @@ -197,7 +201,10 @@ impl MediaStore for IndexeddbMediaStore {
) -> Result<(), IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
let transaction = self.transaction(
&[MediaMetadata::OBJECT_STORE, MediaContent::OBJECT_STORE],
TransactionMode::Readwrite,
)?;
transaction.delete_media_by_uri(uri).await?;
transaction.commit().await.map_err(Into::into)
}
Expand Down Expand Up @@ -274,18 +281,19 @@ impl MediaStoreInner for IndexeddbMediaStore {
) -> Result<(), IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
let transaction = self.transaction(
&[MediaMetadata::OBJECT_STORE, MediaContent::OBJECT_STORE],
TransactionMode::Readwrite,
)?;

let media = Media {
metadata: MediaMetadata {
request_parameters: request.clone(),
last_access: current_time.into(),
ignore_policy,
},
request_parameters: request.clone(),
last_access: current_time.into(),
ignore_policy,
content,
};

transaction.put_media_if_policy_compliant(&media, policy).await?;
transaction.put_media_if_policy_compliant(media, policy).await?;
transaction.commit().await.map_err(Into::into)
}

Expand All @@ -297,11 +305,12 @@ impl MediaStoreInner for IndexeddbMediaStore {
) -> Result<(), IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
if let Some(mut media) = transaction.get_media_by_id(request).await? {
if media.metadata.ignore_policy != ignore_policy {
media.metadata.ignore_policy = ignore_policy;
transaction.put_media(&media).await?;
let transaction =
self.transaction(&[MediaMetadata::OBJECT_STORE], TransactionMode::Readwrite)?;
if let Some(mut metadata) = transaction.get_media_metadata_by_id(request).await? {
if metadata.ignore_policy != ignore_policy {
metadata.ignore_policy = ignore_policy;
transaction.put_media_metadata(&metadata).await?;
transaction.commit().await?;
}
}
Expand All @@ -316,7 +325,10 @@ impl MediaStoreInner for IndexeddbMediaStore {
) -> Result<Option<Vec<u8>>, IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
let transaction = self.transaction(
&[MediaMetadata::OBJECT_STORE, MediaContent::OBJECT_STORE],
TransactionMode::Readwrite,
)?;
let media = transaction.access_media_by_id(request, current_time).await?;
transaction.commit().await?;
Ok(media.map(|m| m.content))
Expand All @@ -330,7 +342,10 @@ impl MediaStoreInner for IndexeddbMediaStore {
) -> Result<Option<Vec<u8>>, IndexeddbMediaStoreError> {
let _timer = timer!("method");

let transaction = self.transaction(&[Media::OBJECT_STORE], TransactionMode::Readwrite)?;
let transaction = self.transaction(
&[MediaMetadata::OBJECT_STORE, MediaContent::OBJECT_STORE],
TransactionMode::Readwrite,
)?;
let media = transaction.access_media_by_uri(uri, current_time).await?.pop();
transaction.commit().await?;
Ok(media.map(|m| m.content))
Expand All @@ -349,7 +364,11 @@ impl MediaStoreInner for IndexeddbMediaStore {
}

let transaction = self.transaction(
&[Media::OBJECT_STORE, MediaCleanupTime::OBJECT_STORE],
&[
MediaMetadata::OBJECT_STORE,
MediaContent::OBJECT_STORE,
MediaCleanupTime::OBJECT_STORE,
],
TransactionMode::Readwrite,
)?;

Expand All @@ -375,7 +394,7 @@ impl MediaStoreInner for IndexeddbMediaStore {
.ok_or(Self::Error::CacheSizeTooBig)?;
if cache_size > (max_cache_size as usize) {
let (_, upper_key) = transaction
.fold_media_keys_by_retention_metadata_while(
.fold_media_metadata_keys_by_retention_while(
CursorDirection::Prev,
ignore_policy,
0usize,
Expand Down
Loading
Loading