Skip to content

Commit 5e17bf8

Browse files
committed
Prepare for schemars v1.0.0 (#27)
* Upgrade to schemars v1.0.1 * Add a numcodecs-wasm-host-reproducible roundtrip test * Fix exhaustive wasm operator match
1 parent c5b1c8a commit 5e17bf8

File tree

21 files changed

+166
-65
lines changed

21 files changed

+166
-65
lines changed

.gitattributes

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
crates/numcodecs-wasm-host-reproducible/tests/round.wasm filter=lfs diff=lfs merge=lfs -text

.github/workflows/ci.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,8 @@ jobs:
114114
steps:
115115
- name: Checkout the Repository
116116
uses: actions/checkout@v2
117+
with:
118+
lfs: true
117119

118120
- name: Install the Rust toolchain
119121
uses: actions-rs/toolchain@v1

Cargo.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ burn = { version = "0.18", default-features = false }
8181
clap = { version = "4.5", default-features = false }
8282
convert_case = { version = "0.8", default-features = false }
8383
format_serde_error = { version = "0.3", default-features = false }
84-
indexmap = { version = "2.7.1", default-features = false }
84+
indexmap = { version = "2.10", default-features = false }
8585
itertools = { version = "0.14", default-features = false }
8686
log = { version = "0.4.27", default-features = false }
8787
simple_logger = { version = "5.0", default-features = false }
@@ -100,7 +100,7 @@ pyo3-error = { version = "0.5", default-features = false }
100100
pyo3-log = { version = "0.12.4", default-features = false }
101101
pythonize = { version = "0.25", default-features = false }
102102
rand = { version = "0.9.1", default-features = false }
103-
schemars = { version = "=1.0.0-alpha.15", default-features = false }
103+
schemars = { version = "1.0.3", default-features = false }
104104
scratch = { version = "1.0", default-features = false }
105105
semver = { version = "1.0.23", default-features = false }
106106
serde = { version = "1.0.218", default-features = false }
@@ -122,7 +122,7 @@ wasmtime = { version = "33.0", default-features = false }
122122
wasmtime_runtime_layer = { version = "33.0", default-features = false }
123123
wasm-encoder = { version = "0.235", default-features = false }
124124
wasm_runtime_layer = { version = "0.5", default-features = false }
125-
wit-bindgen = { version = "0.42", default-features = false }
125+
wit-bindgen = { version = "0.43", default-features = false }
126126
wit-component = { version = "0.235", default-features = false }
127127
wit-parser = { version = "0.235", default-features = false }
128128
wyhash = { version = "0.6", default-features = false }

codecs/fourier-network/tests/schema.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
],
3838
"format": "uint",
3939
"minimum": 1,
40-
"description": "The optional mini-batch size used during training\n\n Setting the mini-batch size to `None` disables the use of batching,\n i.e. the network is trained using one large batch that includes the\n full data."
40+
"description": "The optional mini-batch size used during training\n\nSetting the mini-batch size to `None` disables the use of batching,\ni.e. the network is trained using one large batch that includes the\nfull data."
4141
},
4242
"seed": {
4343
"type": "integer",
@@ -61,7 +61,7 @@
6161
"mini_batch_size",
6262
"seed"
6363
],
64-
"description": "Fourier network codec which trains and overfits a fourier feature neural\n network on encoding and predicts during decoding.\n\n The approach is based on the papers by Tancik et al. 2020\n (<https://dl.acm.org/doi/abs/10.5555/3495724.3496356>)\n and by Huang and Hoefler 2020 (<https://arxiv.org/abs/2210.12538>).",
64+
"description": "Fourier network codec which trains and overfits a fourier feature neural\nnetwork on encoding and predicts during decoding.\n\nThe approach is based on the papers by Tancik et al. 2020\n(<https://dl.acm.org/doi/abs/10.5555/3495724.3496356>)\nand by Huang and Hoefler 2020 (<https://arxiv.org/abs/2210.12538>).",
6565
"title": "FourierNetworkCodec",
6666
"$schema": "https://json-schema.org/draft/2020-12/schema"
6767
}

codecs/jpeg2000/src/ffi/image.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,9 @@ impl Image {
2525
) -> Result<Self, Jpeg2000Error> {
2626
let mut image = std::ptr::null_mut();
2727

28-
if unsafe { openjpeg_sys::opj_read_header(stream.as_raw(), decoder.as_raw(), &mut image) }
29-
!= 1
28+
if unsafe {
29+
openjpeg_sys::opj_read_header(stream.as_raw(), decoder.as_raw(), &raw mut image)
30+
} != 1
3031
{
3132
return Err(Jpeg2000Error::InvalidMainHeader);
3233
}
@@ -56,7 +57,7 @@ impl Image {
5657
let image = NonNull::new(unsafe {
5758
openjpeg_sys::opj_image_create(
5859
1,
59-
&mut image_params,
60+
&raw mut image_params,
6061
openjpeg_sys::OPJ_COLOR_SPACE::OPJ_CLRSPC_GRAY,
6162
)
6263
})

codecs/jpeg2000/src/ffi/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ pub fn encode_into<T: Jpeg2000Element>(
103103
let mut image = Image::from_gray_data(data, width, height)?;
104104

105105
if unsafe {
106-
openjpeg_sys::opj_setup_encoder(encoder.as_raw(), &mut encode_params, image.as_raw())
106+
openjpeg_sys::opj_setup_encoder(encoder.as_raw(), &raw mut encode_params, image.as_raw())
107107
} != 1
108108
{
109109
return Err(Jpeg2000Error::EncoderSetupError);
@@ -136,7 +136,7 @@ pub fn decode<T: Jpeg2000Element>(bytes: &[u8]) -> Result<(Vec<T>, (usize, usize
136136
let mut decode_params = unsafe { decode_params.assume_init() };
137137
decode_params.decod_format = 1; // JP2
138138

139-
if unsafe { openjpeg_sys::opj_setup_decoder(decoder.as_raw(), &mut decode_params) } != 1 {
139+
if unsafe { openjpeg_sys::opj_setup_decoder(decoder.as_raw(), &raw mut decode_params) } != 1 {
140140
return Err(Jpeg2000Error::DecoderSetupError);
141141
}
142142

codecs/jpeg2000/tests/schema.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
"description": "Lossless compression"
5555
}
5656
],
57-
"description": "Codec providing compression using JPEG 2000.\n\n Arrays that are higher-dimensional than 2D are encoded by compressing each\n 2D slice with JPEG 2000 independently. Specifically, the array's shape is\n interpreted as `[.., height, width]`. If you want to compress 2D slices\n along two different axes, you can swizzle the array axes beforehand.",
57+
"description": "Codec providing compression using JPEG 2000.\n\nArrays that are higher-dimensional than 2D are encoded by compressing each\n2D slice with JPEG 2000 independently. Specifically, the array's shape is\ninterpreted as `[.., height, width]`. If you want to compress 2D slices\nalong two different axes, you can swizzle the array axes beforehand.",
5858
"properties": {
5959
"_version": {
6060
"type": "string",

codecs/pco/tests/schema.json

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
11,
2222
12
2323
],
24-
"description": "Compression level, ranging from 0 (weak) over 8 (very good) to 12\n (expensive)"
24+
"description": "Compression level, ranging from 0 (weak) over 8 (very good) to 12\n(expensive)"
2525
},
2626
"_version": {
2727
"type": "string",
@@ -46,7 +46,7 @@
4646
"required": [
4747
"mode"
4848
],
49-
"description": "Automatically detects a good mode.\n\n This works well most of the time, but costs some compression time and\n can select a bad mode in adversarial cases."
49+
"description": "Automatically detects a good mode.\n\nThis works well most of the time, but costs some compression time and\ncan select a bad mode in adversarial cases."
5050
},
5151
{
5252
"type": "object",
@@ -63,7 +63,7 @@
6363
},
6464
{
6565
"type": "object",
66-
"description": "Tries using the `FloatMult` mode with a given base.\n\n Only applies to floating-point types.",
66+
"description": "Tries using the `FloatMult` mode with a given base.\n\nOnly applies to floating-point types.",
6767
"properties": {
6868
"float_mult_base": {
6969
"type": "number",
@@ -82,7 +82,7 @@
8282
},
8383
{
8484
"type": "object",
85-
"description": "Tries using the `FloatQuant` mode with the given number of bits of\n quantization.\n\n Only applies to floating-point types.",
85+
"description": "Tries using the `FloatQuant` mode with the given number of bits of\nquantization.\n\nOnly applies to floating-point types.",
8686
"properties": {
8787
"float_quant_bits": {
8888
"type": "integer",
@@ -102,7 +102,7 @@
102102
},
103103
{
104104
"type": "object",
105-
"description": "Tries using the `IntMult` mode with a given base.\n\n Only applies to integer types.",
105+
"description": "Tries using the `IntMult` mode with a given base.\n\nOnly applies to integer types.",
106106
"properties": {
107107
"int_mult_base": {
108108
"type": "integer",
@@ -135,7 +135,7 @@
135135
"required": [
136136
"delta"
137137
],
138-
"description": "Automatically detects a detects a good delta encoding.\n\n This works well most of the time, but costs some compression time and\n can select a bad delta encoding in adversarial cases."
138+
"description": "Automatically detects a detects a good delta encoding.\n\nThis works well most of the time, but costs some compression time and\ncan select a bad delta encoding in adversarial cases."
139139
},
140140
{
141141
"type": "object",
@@ -148,11 +148,11 @@
148148
"required": [
149149
"delta"
150150
],
151-
"description": "Never uses delta encoding.\n\n This is best if your data is in a random order or adjacent numbers have\n no relation to each other."
151+
"description": "Never uses delta encoding.\n\nThis is best if your data is in a random order or adjacent numbers have\nno relation to each other."
152152
},
153153
{
154154
"type": "object",
155-
"description": "Tries taking nth order consecutive deltas.\n\n Supports a delta encoding order up to 7. For instance, 1st order is\n just regular delta encoding, 2nd is deltas-of-deltas, etc. It is legal\n to use 0th order, but it is identical to None.",
155+
"description": "Tries taking nth order consecutive deltas.\n\nSupports a delta encoding order up to 7. For instance, 1st order is\njust regular delta encoding, 2nd is deltas-of-deltas, etc. It is legal\nto use 0th order, but it is identical to None.",
156156
"properties": {
157157
"delta_encoding_order": {
158158
"type": "integer",
@@ -189,15 +189,15 @@
189189
"required": [
190190
"delta"
191191
],
192-
"description": "Tries delta encoding according to an extra latent variable of\n \"lookback\".\n\n This can improve compression ratio when there are nontrivial patterns\n in the array, but reduces compression speed substantially."
192+
"description": "Tries delta encoding according to an extra latent variable of\n\"lookback\".\n\nThis can improve compression ratio when there are nontrivial patterns\nin the array, but reduces compression speed substantially."
193193
}
194194
]
195195
}
196196
],
197197
"oneOf": [
198198
{
199199
"type": "object",
200-
"description": "Divide the chunk into equal pages of up to this many numbers.\n\n For example, with equal pages up to 100,000, a chunk of 150,000 numbers\n would be divided into 2 pages, each of 75,000 numbers.",
200+
"description": "Divide the chunk into equal pages of up to this many numbers.\n\nFor example, with equal pages up to 100,000, a chunk of 150,000 numbers\nwould be divided into 2 pages, each of 75,000 numbers.",
201201
"properties": {
202202
"equal_pages_up_to": {
203203
"type": "integer",

codecs/random-projection/tests/schema.json

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
"required": [
1818
"seed"
1919
],
20-
"description": "Codec that uses random projections to reduce the dimensionality of high-\n dimensional data to compress it.\n\n A two-dimensional array of shape `$N \\times D$` is encoded as n array of\n shape `$N \\times K$`, where `$K$` is either set explicitly or chosen using\n the the Johnson-Lindenstrauss lemma. For `$K$` to be smaller than `$D$`,\n `$D$` must be quite large. Therefore, this codec should only applied on\n large datasets as it otherwise significantly inflates the data size instead\n of reducing it.\n\n Choosing a lower distortion rate `epsilon` will improve the quality of the\n lossy compression, i.e. reduce the compression error, at the cost of\n increasing `$K$`.\n\n This codec only supports finite floating point data.",
20+
"description": "Codec that uses random projections to reduce the dimensionality of high-\ndimensional data to compress it.\n\nA two-dimensional array of shape `$N \\times D$` is encoded as n array of\nshape `$N \\times K$`, where `$K$` is either set explicitly or chosen using\nthe the Johnson-Lindenstrauss lemma. For `$K$` to be smaller than `$D$`,\n`$D$` must be quite large. Therefore, this codec should only applied on\nlarge datasets as it otherwise significantly inflates the data size instead\nof reducing it.\n\nChoosing a lower distortion rate `epsilon` will improve the quality of the\nlossy compression, i.e. reduce the compression error, at the cost of\nincreasing `$K$`.\n\nThis codec only supports finite floating point data.",
2121
"allOf": [
2222
{
2323
"oneOf": [
@@ -39,7 +39,7 @@
3939
"reduction",
4040
"epsilon"
4141
],
42-
"description": "The reduced dimensionality `$K$` is derived from `epsilon`, as defined\n by the Johnson-Lindenstrauss lemma."
42+
"description": "The reduced dimensionality `$K$` is derived from `epsilon`, as defined\nby the Johnson-Lindenstrauss lemma."
4343
},
4444
{
4545
"type": "object",
@@ -59,7 +59,7 @@
5959
"reduction",
6060
"k"
6161
],
62-
"description": "The reduced dimensionality `$K$`, to which the data is projected, is\n given explicitly."
62+
"description": "The reduced dimensionality `$K$`, to which the data is projected, is\ngiven explicitly."
6363
}
6464
]
6565
},
@@ -76,7 +76,7 @@
7676
"required": [
7777
"projection"
7878
],
79-
"description": "The random projection matrix is dense and its components are sampled\n from `$\\text{N}\\left( 0, \\frac{1}{k} \\right)$`"
79+
"description": "The random projection matrix is dense and its components are sampled\nfrom `$\\text{N}\\left( 0, \\frac{1}{k} \\right)$`"
8080
},
8181
{
8282
"type": "object",
@@ -88,7 +88,7 @@
8888
],
8989
"exclusiveMinimum": 0.0,
9090
"maximum": 1.0,
91-
"description": "The `density` of the sparse projection matrix.\n\n Setting `density` to `$\\frac{1}{3}$` reproduces the settings by\n Achlioptas [^1]. If `density` is `None`, it is set to\n `$\\frac{1}{\\sqrt{d}}$`,\n the minimum density as recommended by Li et al [^2].\n\n\n [^1]: Achlioptas, D. (2003). Database-friendly random projections:\n Johnson-Lindenstrauss with binary coins. *Journal of Computer\n and System Sciences*, 66(4), 671-687. Available from:\n [doi:10.1016/S0022-0000(03)00025-4](https://doi.org/10.1016/S0022-0000(03)00025-4).\n\n [^2]: Li, P., Hastie, T. J., and Church, K. W. (2006). Very sparse\n random projections. In *Proceedings of the 12th ACM SIGKDD\n international conference on Knowledge discovery and data\n mining (KDD '06)*. Association for Computing Machinery, New\n York, NY, USA, 287–296. Available from:\n [doi:10.1145/1150402.1150436](https://doi.org/10.1145/1150402.1150436)."
91+
"description": "The `density` of the sparse projection matrix.\n\nSetting `density` to `$\\frac{1}{3}$` reproduces the settings by\nAchlioptas [^1]. If `density` is `None`, it is set to\n`$\\frac{1}{\\sqrt{d}}$`,\nthe minimum density as recommended by Li et al [^2].\n\n\n[^1]: Achlioptas, D. (2003). Database-friendly random projections:\n Johnson-Lindenstrauss with binary coins. *Journal of Computer\n and System Sciences*, 66(4), 671-687. Available from:\n [doi:10.1016/S0022-0000(03)00025-4](https://doi.org/10.1016/S0022-0000(03)00025-4).\n\n[^2]: Li, P., Hastie, T. J., and Church, K. W. (2006). Very sparse\n random projections. In *Proceedings of the 12th ACM SIGKDD\n international conference on Knowledge discovery and data\n mining (KDD '06)*. Association for Computing Machinery, New\n York, NY, USA, 287–296. Available from:\n [doi:10.1145/1150402.1150436](https://doi.org/10.1145/1150402.1150436)."
9292
},
9393
"projection": {
9494
"type": "string",
@@ -98,7 +98,7 @@
9898
"required": [
9999
"projection"
100100
],
101-
"description": "The random projection matrix is sparse where only `density`% of entries\n are non-zero.\n\n The matrix's components are sampled from\n\n - `$-\\sqrt{\\frac{1}{k \\cdot density}}$` with probability\n `$0.5 \\cdot density$`\n - `$0$` with probability `$1 - density$`\n - `$+\\sqrt{\\frac{1}{k \\cdot density}}$` with probability\n `$0.5 \\cdot density$`"
101+
"description": "The random projection matrix is sparse where only `density`% of entries\nare non-zero.\n\nThe matrix's components are sampled from\n\n- `$-\\sqrt{\\frac{1}{k \\cdot density}}$` with probability\n `$0.5 \\cdot density$`\n- `$0$` with probability `$1 - density$`\n- `$+\\sqrt{\\frac{1}{k \\cdot density}}$` with probability\n `$0.5 \\cdot density$`"
102102
}
103103
]
104104
}

codecs/sperr/tests/schema.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@
6060
"description": "Fixed point-wise (absolute) error"
6161
}
6262
],
63-
"description": "Codec providing compression using SPERR.\n\n Arrays that are higher-dimensional than 3D are encoded by compressing each\n 3D slice with SPERR independently. Specifically, the array's shape is\n interpreted as `[.., depth, height, width]`. If you want to compress 3D\n slices along three different axes, you can swizzle the array axes\n beforehand.",
63+
"description": "Codec providing compression using SPERR.\n\nArrays that are higher-dimensional than 3D are encoded by compressing each\n3D slice with SPERR independently. Specifically, the array's shape is\ninterpreted as `[.., depth, height, width]`. If you want to compress 3D\nslices along three different axes, you can swizzle the array axes\nbeforehand.",
6464
"properties": {
6565
"_version": {
6666
"type": "string",

0 commit comments

Comments
 (0)