diff --git a/Cargo.lock b/Cargo.lock index 4012e18434..7d02089c0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1844,6 +1844,13 @@ dependencies = [ "wasmi_core 0.51.0", ] +[[package]] +name = "wasmi_ir2" +version = "0.51.0" +dependencies = [ + "wasmi_core 0.51.0", +] + [[package]] name = "wasmi_wasi" version = "0.51.0" diff --git a/Cargo.toml b/Cargo.toml index 15a6a69668..9e02ddebe0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/wasmi", "crates/wasi", "crates/ir", + "crates/ir2", "crates/fuzz", "crates/wast", "fuzz", @@ -34,6 +35,7 @@ wasmi = { version = "0.51.0", path = "crates/wasmi", default-features = false } wasmi_wasi = { version = "0.51.0", path = "crates/wasi", default-features = false } wasmi_core = { version = "0.51.0", path = "crates/core", default-features = false } wasmi_ir = { version = "0.51.0", path = "crates/ir", default-features = false } +wasmi_ir2 = { version = "0.51.0", path = "crates/ir2", default-features = false } wasmi_collections = { version = "0.51.0", path = "crates/collections", default-features = false } wasmi_c_api_impl = { version = "0.51.0", path = "crates/c_api" } wasmi_c_api_macros = { version = "0.51.0", path = "crates/c_api/macro" } diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000000..91568e8d68 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,9 @@ +coverage: + status: + project: + default: + target: auto + threshold: 1% + +ignore: + - "crates/ir2/**" diff --git a/crates/ir2/Cargo.toml b/crates/ir2/Cargo.toml new file mode 100644 index 0000000000..e6f84f5a07 --- /dev/null +++ b/crates/ir2/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "wasmi_ir2" +version.workspace = true +rust-version.workspace = true +documentation = "https://docs.rs/wasmi_ir2/" +description = "WebAssembly interpreter internal bytecode representation" +authors.workspace = true +repository.workspace = true +edition.workspace = true +readme.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true +exclude = [ + "benches/wat", + "benches/wasm", + "tests/spec/testsuite", + "**.wast", +] + +[dependencies] +wasmi_core = { workspace = true } + +[features] +default = ["std"] +std = [ + "wasmi_core/std", +] +simd = ["wasmi_core/simd"] + +[package.metadata.docs.rs] +features = ["std", "simd"] diff --git a/crates/ir2/build.rs b/crates/ir2/build.rs new file mode 100644 index 0000000000..8bd945b85c --- /dev/null +++ b/crates/ir2/build.rs @@ -0,0 +1,32 @@ +use self::build::Config; +use std::{fs, path::Path}; + +#[path = "build/mod.rs"] +mod build; + +fn main() { + watch_dir_recursively(Path::new("build")); + let config = Config::default(); + build::generate_code(&config).unwrap() +} + +fn watch_dir_recursively(path: &Path) { + if !path.is_dir() { + return; + } + let entries = match fs::read_dir(path) { + Ok(entries) => entries, + Err(error) => panic!("failed to read directory: {error}"), + }; + for entry in entries { + let entry = match entry { + Ok(entry) => entry, + Err(error) => panic!("failed to read directory entry: {error}"), + }; + let path = entry.path(); + if path.is_file() { + println!("cargo:rerun-if-changed={}", path.display()); + } + watch_dir_recursively(&path); + } +} diff --git a/crates/ir2/build/display/constructors.rs b/crates/ir2/build/display/constructors.rs new file mode 100644 index 0000000000..3edb53b14e --- /dev/null +++ b/crates/ir2/build/display/constructors.rs @@ -0,0 +1,119 @@ +use crate::build::{ + display::{ident::DisplayIdent, utils::DisplaySequence, Indent}, + ident::SnakeCase, + isa::Isa, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + Field, + GenericOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, +}; +use core::fmt::{self, Display}; + +pub struct DisplayConstructor { + pub value: T, + pub indent: Indent, +} + +impl DisplayConstructor { + pub fn new(value: T, indent: Indent) -> Self { + Self { value, indent } + } + + pub fn map(&self, value: V) -> DisplayConstructor { + DisplayConstructor { + value, + indent: self.indent, + } + } +} + +impl<'a, T> DisplayConstructor<&'a T> { + fn display_constructor(&self, f: &mut fmt::Formatter, fields: &[Option]) -> fmt::Result + where + DisplayIdent<&'a T>: Display, + { + let indent = self.indent; + let snake_ident = DisplayIdent::snake(self.value); + let camel_ident = DisplayIdent::camel(self.value); + let fn_params = DisplaySequence::new(", ", fields.iter().filter_map(Option::as_ref)); + let struct_params = DisplaySequence::new( + ", ", + fields + .iter() + .filter_map(Option::as_ref) + .map(|param| param.ident) + .map(SnakeCase), + ); + write!( + f, + "\ + {indent}pub fn {snake_ident}({fn_params}) -> Self {{\n\ + {indent} Self::{camel_ident} {{ {struct_params} }}\n\ + {indent}}}\n\ + " + ) + } +} + +impl Display for DisplayConstructor<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let variants = DisplaySequence::new( + "", + self.value + .ops + .iter() + .map(|op| DisplayConstructor::new(op, indent.inc_by(1))), + ); + write!( + f, + "\ + {indent}impl Op {{\n\ + {variants}\ + {indent}}}\n\ + " + ) + } +} + +macro_rules! impl_display_constructor { + ( $($ty:ty),* $(,)? ) => { + $( + impl Display for DisplayConstructor<&'_ $ty> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields().map(Option::from); + self.display_constructor(f, &fields) + } + } + )* + }; +} +impl_display_constructor! { + UnaryOp, + BinaryOp, + CmpBranchOp, + CmpSelectOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + V128ReplaceLaneOp, + V128LoadLaneOp, +} + +impl Display for DisplayConstructor<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields.map(Option::from); + self.display_constructor(f, &fields) + } +} diff --git a/crates/ir2/build/display/decode.rs b/crates/ir2/build/display/decode.rs new file mode 100644 index 0000000000..a7f8ab5b4a --- /dev/null +++ b/crates/ir2/build/display/decode.rs @@ -0,0 +1,231 @@ +use crate::build::{ + display::{ + ident::DisplayIdent, + utils::{DisplayConcat, DisplaySequence, IntoDisplayMaybe as _}, + Indent, + }, + ident::{CamelCase, Ident, SnakeCase}, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + FieldTy, + GenericOp, + LoadOp, + OperandKind, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, + Isa, +}; +use core::fmt::{self, Display}; + +pub struct DisplayDecode { + pub value: T, + pub indent: Indent, +} + +impl DisplayDecode { + pub fn new(value: T, indent: Indent) -> Self { + Self { value, indent } + } + + pub fn map(&self, value: V) -> DisplayDecode { + DisplayDecode { + value, + indent: self.indent, + } + } +} + +impl Display for DisplayDecode<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let impls = DisplaySequence::new( + "", + self.value + .ops + .iter() + .map(|op| DisplayDecode::new(op, indent)), + ); + write!(f, "{impls}") + } +} + +impl Display for DisplayDecode<&'_ UnaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let camel_ident = DisplayIdent::camel(self.value); + let slot_ty = FieldTy::Slot; + writeln!(f, "pub type {camel_ident} = UnaryOp<{slot_ty}>;") + } +} + +impl Display for DisplayDecode<&'_ BinaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let lhs = op.lhs_field().ty; + let rhs = op.rhs_field().ty; + writeln!(f, "pub type {camel_ident} = BinaryOp<{lhs}, {rhs}>;") + } +} + +impl Display for DisplayDecode<&'_ CmpBranchOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let lhs = op.lhs_field().ty; + let rhs = op.rhs_field().ty; + writeln!(f, "pub type {camel_ident} = CmpBranchOp<{lhs}, {rhs}>;") + } +} + +impl Display for DisplayDecode<&'_ CmpSelectOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let lhs = op.lhs_field().ty; + let rhs = op.rhs_field().ty; + writeln!(f, "pub type {camel_ident} = CmpSelectOp<{lhs}, {rhs}>;") + } +} + +impl Display for DisplayDecode<&'_ LoadOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let mem0_offset16 = (op.mem0 && op.offset16) + .then_some("Mem0Offset16") + .display_maybe(); + let result_suffix = CamelCase(OperandKind::Slot); + let ptr_suffix = SnakeCase(op.ptr); + writeln!( + f, + "pub type {camel_ident} = LoadOp{mem0_offset16}_{result_suffix}{ptr_suffix};" + ) + } +} + +impl Display for DisplayDecode<&'_ StoreOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let lane_ident = op + .laneidx_field() + .map(|_| CamelCase(Ident::Lane)) + .display_maybe(); + let mem0_offset16 = (op.mem0 && op.offset16) + .then_some("Mem0Offset16") + .display_maybe(); + let ptr_suffix = CamelCase(op.ptr); + let value_ty = op.value_field().ty; + let laneidx_ty = op + .laneidx_field() + .map(|field| (", ", field.ty)) + .map(DisplayConcat) + .display_maybe(); + writeln!( + f, + "pub type {camel_ident} = Store{lane_ident}Op{mem0_offset16}_{ptr_suffix}<{value_ty}{laneidx_ty}>;" + ) + } +} + +impl Display for DisplayDecode<&'_ TableGetOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let index_ty = op.index_field().ty; + writeln!(f, "pub type {camel_ident} = TableGet<{index_ty}>;") + } +} + +impl Display for DisplayDecode<&'_ TableSetOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let index_ty = op.index_field().ty; + let value_ty = op.value_field().ty; + writeln!( + f, + "pub type {camel_ident} = TableSet<{index_ty}, {value_ty}>;" + ) + } +} + +impl Display for DisplayDecode<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let op = self.value; + if op.fields.is_empty() { + // No need to decode type with no operands (a.k.a. fields). + return Ok(()); + } + let camel_ident = DisplayIdent::camel(self.value); + let fields = DisplaySequence::new( + ",\n", + op.fields + .iter() + .map(|field| (indent.inc(), "pub ", field)) + .map(DisplayConcat), + ); + let constructors = DisplaySequence::new( + ",\n", + op.fields + .iter() + .map(|field| field.ident) + .map(SnakeCase) + .map(|ident| (indent.inc_by(3), ident, ": Decode::decode(decoder)")) + .map(DisplayConcat), + ); + write!( + f, + "\ + {indent}pub struct {camel_ident} {{\n\ + {fields}\n\ + {indent}}}\n\ + {indent}impl Decode for {camel_ident} {{\n\ + {indent} unsafe fn decode(decoder: &mut D) -> Self {{\n\ + {indent} Self {{\n\ + {constructors}\n\ + {indent} }}\n\ + {indent} }}\n\ + {indent}}}\n\ + " + ) + } +} + +impl Display for DisplayDecode<&'_ V128ReplaceLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let value_ty = op.value_field().ty; + let len_lanes = op.width.len_lanes(); + writeln!( + f, + "pub type {camel_ident} = V128ReplaceLaneOp<{value_ty}, {len_lanes}>;" + ) + } +} + +impl Display for DisplayDecode<&'_ V128LoadLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let op = self.value; + let camel_ident = DisplayIdent::camel(op); + let result_suffix = CamelCase(OperandKind::Slot); + let mem0_offset16 = (op.mem0 && op.offset16) + .then_some("Mem0Offset16") + .display_maybe(); + let ptr_suffix = SnakeCase(op.ptr); + let laneidx = op.width.to_laneidx(); + writeln!( + f, + "pub type {camel_ident} = V128LoadLaneOp{mem0_offset16}_{result_suffix}{ptr_suffix}<{laneidx}>;" + ) + } +} diff --git a/crates/ir2/build/display/encode.rs b/crates/ir2/build/display/encode.rs new file mode 100644 index 0000000000..4b379c2e4c --- /dev/null +++ b/crates/ir2/build/display/encode.rs @@ -0,0 +1,121 @@ +use crate::build::{ + display::{ident::DisplayIdent, utils::DisplaySequence, Indent}, + ident::SnakeCase, + isa::Isa, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + Field, + GenericOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, +}; +use core::fmt::{self, Display}; + +pub struct DisplayEncode { + pub value: T, + pub indent: Indent, +} + +impl DisplayEncode { + pub fn new(value: T, indent: Indent) -> Self { + Self { value, indent } + } + + pub fn map(&self, value: V) -> DisplayEncode { + DisplayEncode { + value, + indent: self.indent, + } + } +} + +impl<'a, T> DisplayEncode<&'a T> { + fn display_encode(&self, f: &mut fmt::Formatter, fields: &[Option]) -> fmt::Result + where + DisplayIdent<&'a T>: Display, + { + let indent = self.indent; + let camel_ident = DisplayIdent::camel(self.value); + let match_params = DisplaySequence::new( + ", ", + fields + .iter() + .filter_map(Option::as_ref) + .map(|field| field.ident) + .map(SnakeCase), + ); + write!( + f, + "\ + {indent}Self::{camel_ident} {{ {match_params} }} => {{\n\ + {indent} (OpCode::{camel_ident}, {match_params}).encode(encoder)\n\ + {indent}}}\n\ + " + ) + } +} + +impl Display for DisplayEncode<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let impls = DisplaySequence::new( + "", + self.value + .ops + .iter() + .map(|op| DisplayEncode::new(op, indent.inc_by(3))), + ); + write!( + f, + "\ + {indent}impl Encode for Op {{\n\ + {indent} fn encode(&self, encoder: &mut E) -> Result {{\n\ + {indent} match self {{\n\ + {impls}\n\ + {indent} }}\n\ + {indent} }}\n\ + {indent}}}\n\ + " + ) + } +} + +macro_rules! impl_display_encode { + ( $($ty:ty),* $(,)? ) => { + $( + impl Display for DisplayEncode<&'_ $ty> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields().map(Option::from); + self.display_encode(f, &fields) + } + } + )* + }; +} +impl_display_encode! { + UnaryOp, + BinaryOp, + CmpBranchOp, + CmpSelectOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + V128ReplaceLaneOp, + V128LoadLaneOp, +} + +impl Display for DisplayEncode<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields.map(Option::from); + self.display_encode(f, &fields) + } +} diff --git a/crates/ir2/build/display/ident.rs b/crates/ir2/build/display/ident.rs new file mode 100644 index 0000000000..aa7a3fc5a4 --- /dev/null +++ b/crates/ir2/build/display/ident.rs @@ -0,0 +1,271 @@ +use crate::build::{ + display::utils::{DisplayConcat, IntoDisplayMaybe as _}, + ident::{Case, Ident, Sep, SnakeCase}, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + GenericOp, + LoadOp, + OperandKind, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, +}; +use core::fmt::{self, Display}; + +pub struct DisplayIdent { + pub value: T, + pub case: Case, +} + +impl DisplayIdent { + pub fn camel(value: T) -> Self { + Self { + value, + case: Case::Camel, + } + } + + pub fn snake(value: T) -> Self { + Self { + value, + case: Case::Snake, + } + } + + pub fn map(&self, value: V) -> DisplayIdent { + DisplayIdent { + value, + case: self.case, + } + } +} + +impl Display for DisplayIdent<&'_ UnaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let op = self.value; + let kind = op.kind; + let ident = case.wrap(kind.ident()); + let sep = case.wrap(Sep); + let ident_prefix = DisplayConcat((case.wrap(kind.result_ty()), sep)); + let ident_suffix = kind + .is_conversion() + .then_some(kind.value_ty()) + .map(|i| (sep, case.wrap(i))) + .map(DisplayConcat) + .display_maybe(); + let result_suffix = case.wrap(OperandKind::Slot); + let value_suffix = SnakeCase(op.value); + write!( + f, + "{ident_prefix}{ident}{ident_suffix}_{result_suffix}{value_suffix}" + ) + } +} + +impl Display for DisplayIdent<&'_ BinaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let sep = case.wrap(Sep); + let kind = self.value.kind; + let ident = case.wrap(kind.ident()); + let ident_prefix = case.wrap(kind.ident_prefix()); + let result_suffix = case.wrap(OperandKind::Slot); + let lhs_suffix = SnakeCase(self.value.lhs); + let rhs_suffix = SnakeCase(self.value.rhs); + write!( + f, + "{ident_prefix}{sep}{ident}_{result_suffix}{lhs_suffix}{rhs_suffix}" + ) + } +} + +impl Display for DisplayIdent<&'_ CmpBranchOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let sep = case.wrap(Sep); + let cmp = self.value.cmp; + let branch = case.wrap(Ident::Branch); + let ident = case.wrap(cmp.ident()); + let input_ident = case.wrap(cmp.ident_prefix()); + let lhs_suffix = case.wrap(self.value.lhs); + let rhs_suffix = SnakeCase(self.value.rhs); + write!( + f, + "{branch}{sep}{input_ident}{sep}{ident}_{lhs_suffix}{rhs_suffix}" + ) + } +} + +impl Display for DisplayIdent<&'_ CmpSelectOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let cmp = self.value.cmp; + let sep = case.wrap(Sep); + let select = case.wrap(Ident::Select); + let ident = case.wrap(cmp.ident()); + let input_ident = case.wrap(cmp.ident_prefix()); + let result_suffix = case.wrap(OperandKind::Slot); + let lhs_suffix = SnakeCase(self.value.lhs); + let rhs_suffix = SnakeCase(self.value.rhs); + write!( + f, + "{select}{sep}{input_ident}{sep}{ident}_{result_suffix}{lhs_suffix}{rhs_suffix}" + ) + } +} + +impl Display for DisplayIdent<&'_ LoadOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let kind = self.value.kind; + let ident = case.wrap(kind.ident()); + let result_suffix = case.wrap(OperandKind::Slot); + let ptr_suffix = SnakeCase(self.value.ptr); + let sep = case.wrap(Sep); + let ident_prefix = self + .value + .kind + .ident_prefix() + .map(|v| (case.wrap(v), sep)) + .map(DisplayConcat) + .display_maybe(); + let mem0_ident = self + .value + .mem0 + .then_some(Ident::Mem0) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + let offset16_ident = self + .value + .offset16 + .then_some(Ident::Offset16) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + write!( + f, + "{ident_prefix}{ident}{mem0_ident}{offset16_ident}_{result_suffix}{ptr_suffix}", + ) + } +} + +impl Display for DisplayIdent<&'_ StoreOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let kind = self.value.kind; + let ident = case.wrap(kind.ident()); + let ptr_suffix = case.wrap(self.value.ptr); + let value_suffix = SnakeCase(self.value.value); + let sep = case.wrap(Sep); + let ident_prefix = self + .value + .kind + .ident_prefix() + .map(|v| (case.wrap(v), sep)) + .map(DisplayConcat) + .display_maybe(); + let mem0_ident = self + .value + .mem0 + .then_some(Ident::Mem0) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + let offset16_ident = self + .value + .offset16 + .then_some(Ident::Offset16) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + write!( + f, + "{ident_prefix}{ident}{mem0_ident}{offset16_ident}_{ptr_suffix}{value_suffix}", + ) + } +} + +impl Display for DisplayIdent<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ident = self.case.wrap(self.value.ident); + write!(f, "{ident}") + } +} + +impl Display for DisplayIdent<&'_ TableGetOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let ident = case.wrap(Ident::TableGet); + let result_suffix = case.wrap(OperandKind::Slot); + let index_suffix = SnakeCase(self.value.index); + write!(f, "{ident}_{result_suffix}{index_suffix}") + } +} + +impl Display for DisplayIdent<&'_ TableSetOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let ident = case.wrap(Ident::TableSet); + let index_suffix = case.wrap(self.value.index); + let value_suffix = SnakeCase(self.value.value); + write!(f, "{ident}_{index_suffix}{value_suffix}") + } +} + +impl Display for DisplayIdent<&'_ V128ReplaceLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let op = self.value; + let sep = case.wrap(Sep); + let v128 = case.wrap(Ident::V128); + let ident = case.wrap(Ident::ReplaceLane); + let width = op.width; + let result_suffix = case.wrap(OperandKind::Slot); + let v128_suffix = SnakeCase(OperandKind::Slot); + let value_suffix = SnakeCase(op.value); + write!( + f, + "{v128}{sep}{ident}{width}_{result_suffix}{v128_suffix}{value_suffix}" + ) + } +} + +impl Display for DisplayIdent<&'_ V128LoadLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let case = self.case; + let op = self.value; + let sep = case.wrap(Sep); + let v128 = case.wrap(Ident::V128); + let ident = case.wrap(Ident::LoadLane); + let width = u8::from(op.width); + let result_suffix = case.wrap(OperandKind::Slot); + let ptr_suffix = SnakeCase(op.ptr); + let v128_suffix = SnakeCase(OperandKind::Slot); + let mem0_ident = self + .value + .mem0 + .then_some(Ident::Mem0) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + let offset16_ident = self + .value + .offset16 + .then_some(Ident::Offset16) + .map(|v| (sep, case.wrap(v))) + .map(DisplayConcat) + .display_maybe(); + write!( + f, + "{v128}{sep}{ident}{width}{mem0_ident}{offset16_ident}_{result_suffix}{ptr_suffix}{v128_suffix}" + ) + } +} diff --git a/crates/ir2/build/display/mod.rs b/crates/ir2/build/display/mod.rs new file mode 100644 index 0000000000..e509379618 --- /dev/null +++ b/crates/ir2/build/display/mod.rs @@ -0,0 +1,41 @@ +mod constructors; +mod decode; +mod encode; +mod ident; +mod op; +mod op_code; +mod result_mut; +mod utils; + +pub use self::{ + constructors::DisplayConstructor, + decode::DisplayDecode, + encode::DisplayEncode, + op::DisplayOp, + op_code::DisplayOpCode, + result_mut::DisplayResultMut, + utils::Indent, +}; +use crate::build::{display::ident::DisplayIdent, op::Op}; +use core::fmt::{self, Display}; + +macro_rules! impl_trait_for_op { + ( + $trait:ident, + $($variant:ident($op_ty:ty)),* $(,)? + ) => { + impl Display for $trait<&'_ Op> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.value { + $( Op::$variant(op) => self.map(op).fmt(f), )* + } + } + } + }; +} +apply_macro_for_ops!(impl_trait_for_op, DisplayOp); +apply_macro_for_ops!(impl_trait_for_op, DisplayIdent); +apply_macro_for_ops!(impl_trait_for_op, DisplayConstructor); +apply_macro_for_ops!(impl_trait_for_op, DisplayResultMut); +apply_macro_for_ops!(impl_trait_for_op, DisplayEncode); +apply_macro_for_ops!(impl_trait_for_op, DisplayDecode); diff --git a/crates/ir2/build/display/op.rs b/crates/ir2/build/display/op.rs new file mode 100644 index 0000000000..6f7cb217ac --- /dev/null +++ b/crates/ir2/build/display/op.rs @@ -0,0 +1,122 @@ +use crate::build::{ + display::{ + ident::DisplayIdent, + utils::{DisplayConcat, DisplaySequence}, + Indent, + }, + isa::Isa, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + Field, + GenericOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, +}; +use core::fmt::{self, Display}; + +pub struct DisplayOp { + pub value: T, + pub indent: Indent, +} + +impl DisplayOp { + pub fn new(val: T, indent: Indent) -> Self { + Self { value: val, indent } + } + + pub fn map(&self, val: V) -> DisplayOp { + DisplayOp { + value: val, + indent: self.indent, + } + } +} + +impl<'a, T> DisplayOp<&'a T> +where + DisplayIdent<&'a T>: Display, +{ + fn display_variant(&self, f: &mut fmt::Formatter<'_>, fields: &[Option]) -> fmt::Result { + let indent = self.indent; + let ident = DisplayIdent::camel(self.value); + let fields = DisplaySequence::new( + ",\n", + fields + .iter() + .filter_map(Option::as_ref) + .map(|field| (indent.inc(), field)) + .map(DisplayConcat), + ); + write!( + f, + "\ + {indent}{ident} {{\n\ + {fields}\n\ + {indent}}}\ + ", + ) + } +} + +impl Display for DisplayOp<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let variants = DisplaySequence::new( + ",\n", + self.value + .ops + .iter() + .map(|op| DisplayOp::new(op, indent.inc())), + ); + write!( + f, + "\ + {indent}/// A Wasmi bytecode operator or instruction. + {indent}#[allow(non_camel_case_types)]\n\ + {indent}pub enum Op {{\n\ + {variants}\n\ + {indent}}}\n\ + " + ) + } +} + +macro_rules! impl_display_variant { + ( $($ty:ty),* $(,)? ) => { + $( + impl Display for DisplayOp<&'_ $ty> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields().map(Option::from); + self.display_variant(f, &fields) + } + } + )* + }; +} +impl_display_variant! { + UnaryOp, + BinaryOp, + CmpBranchOp, + CmpSelectOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + V128ReplaceLaneOp, + V128LoadLaneOp, +} + +impl Display for DisplayOp<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let fields = self.value.fields.map(Option::from); + self.display_variant(f, &fields) + } +} diff --git a/crates/ir2/build/display/op_code.rs b/crates/ir2/build/display/op_code.rs new file mode 100644 index 0000000000..3537b7b746 --- /dev/null +++ b/crates/ir2/build/display/op_code.rs @@ -0,0 +1,98 @@ +use crate::build::{ + display::{ + ident::DisplayIdent, + utils::{DisplayConcat, DisplaySequence}, + Indent, + }, + isa::Isa, + op::Op, +}; +use core::fmt::{self, Display}; + +pub struct DisplayOpCode { + pub value: T, + pub indent: Indent, +} + +impl DisplayOpCode { + pub fn new(value: T, indent: Indent) -> Self { + Self { value, indent } + } +} + +impl Display for DisplayOpCode<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let variants = DisplaySequence::new( + ",\n", + self.value + .ops + .iter() + .map(|op| (indent.inc(), DisplayIdent::camel(op))) + .map(DisplayConcat), + ); + let match_arms_code = DisplaySequence::new( + ",\n", + self.value + .ops + .iter() + .map(|op| DisplayOpCode::new(op, indent.inc_by(3))), + ); + let match_arms_tryfrom = DisplaySequence::new( + ",\n", + self.value + .ops + .iter() + .map(DisplayTryFromU16) + .map(|op| DisplayOpCode::new(op, indent.inc_by(3))), + ); + write!( + f, + "\ + {indent}#[allow(non_camel_case_types)]\n\ + {indent}/// The operator code (op-code) of a Wasmi bytecode [`Op`].\n\ + {indent}#[repr(u16)]\n\ + {indent}pub enum OpCode {{\n\ + {variants}\n\ + {indent}}}\n\ + \n\ + {indent}impl Op {{\n\ + {indent} /// Returns the [`OpCode`] associated to `self`. + {indent} pub fn code(&self) -> OpCode {{\n\ + {indent} match self {{\n\ + {match_arms_code}\n\ + {indent} }}\n\ + {indent} }}\n\ + {indent}}}\n\ + \n\ + {indent}impl TryFrom for OpCode {{\n\ + {indent} type Error = InvalidOpCode;\n\ + {indent} fn try_from(value: u16) -> Result {{\n\ + {indent} let op_code = match value {{\n\ + {match_arms_tryfrom},\n\ + {indent} _ => return Err(InvalidOpCode),\n\ + {indent} }};\n\ + {indent} Ok(op_code)\n\ + {indent} }}\n\ + {indent}}}\n\ + " + ) + } +} + +impl Display for DisplayOpCode<&'_ Op> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let ident = DisplayIdent::camel(self.value); + write!(f, "{indent}Self::{ident} {{ .. }} => OpCode::{ident}") + } +} + +pub struct DisplayTryFromU16(T); +impl Display for DisplayOpCode> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let ident = DisplayIdent::camel(self.value.0); + write!(f, "{indent}x if x == Self::{ident} as _ => Self::{ident}") + } +} diff --git a/crates/ir2/build/display/result_mut.rs b/crates/ir2/build/display/result_mut.rs new file mode 100644 index 0000000000..2824006465 --- /dev/null +++ b/crates/ir2/build/display/result_mut.rs @@ -0,0 +1,143 @@ +use crate::build::{ + display::{ident::DisplayIdent, utils::DisplaySequence, Indent}, + isa::Isa, + op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + GenericOp, + LoadOp, + StoreOp, + TableGetOp, + TableSetOp, + UnaryOp, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, +}; +use core::fmt::{self, Display}; + +pub struct DisplayResultMut { + pub value: T, + pub indent: Indent, +} + +impl DisplayResultMut { + pub fn new(value: T, indent: Indent) -> Self { + Self { value, indent } + } + + pub fn map(&self, value: V) -> DisplayResultMut { + DisplayResultMut { + value, + indent: self.indent, + } + } +} + +impl<'a, T> DisplayResultMut<&'a T> { + fn display_match_arm(&self, f: &mut fmt::Formatter) -> fmt::Result + where + DisplayIdent<&'a T>: Display, + { + let indent = self.indent; + let ident = DisplayIdent::camel(self.value); + writeln!(f, "{indent}Self::{ident} {{ result, .. }} => result,") + } +} + +impl Display for DisplayResultMut<&'_ Isa> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let indent = self.indent; + let variants = DisplaySequence::new( + "", + self.value + .ops + .iter() + .map(|op| DisplayResultMut::new(op, indent.inc_by(3))), + ); + write!( + f, + "\ + {indent}impl Op {{\n\ + {indent} pub fn result_mut(&mut self) -> Option<&mut Slot> {{\n\ + {indent} let res = match self {{\n\ + {variants}\ + {indent} _ => return None,\n\ + {indent} }};\n\ + {indent} Some(res)\n\ + {indent} }}\n\ + {indent}}}\n\ + " + ) + } +} + +impl Display for DisplayResultMut<&'_ UnaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ BinaryOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ CmpBranchOp> { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +impl Display for DisplayResultMut<&'_ CmpSelectOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ LoadOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ StoreOp> { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +impl Display for DisplayResultMut<&'_ TableGetOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ TableSetOp> { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +impl Display for DisplayResultMut<&'_ GenericOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if !self.value.has_result() { + return Ok(()); + } + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ V128ReplaceLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} + +impl Display for DisplayResultMut<&'_ V128LoadLaneOp> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.display_match_arm(f) + } +} diff --git a/crates/ir2/build/display/utils.rs b/crates/ir2/build/display/utils.rs new file mode 100644 index 0000000000..890806b553 --- /dev/null +++ b/crates/ir2/build/display/utils.rs @@ -0,0 +1,119 @@ +use core::fmt::{self, Display}; + +#[derive(Copy, Clone, Default)] +pub struct Indent(usize); + +impl Indent { + pub fn inc(self) -> Self { + Self(self.0 + 1) + } + + pub fn inc_by(self, delta: usize) -> Self { + Self(self.0 + delta) + } +} + +impl Display for Indent { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for _ in 0..self.0 { + write!(f, " ")?; + } + Ok(()) + } +} + +pub struct DisplayConcat(pub T); + +impl Display for DisplayConcat<(T0, T1)> +where + T0: Display, + T1: Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (t0, t1) = &self.0; + write!(f, "{t0}{t1}") + } +} + +impl Display for DisplayConcat<(T0, T1, T2)> +where + T0: Display, + T1: Display, + T2: Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (t0, t1, t2) = &self.0; + write!(f, "{t0}{t1}{t2}") + } +} + +pub struct DisplaySequence { + iter: I, + sep: S, +} + +impl DisplaySequence { + pub fn new(sep: S, iter: I) -> Self { + Self { sep, iter } + } +} + +impl Display for DisplaySequence +where + I: IntoIterator + Clone, + S: Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut iter = self.iter.clone().into_iter(); + let Some(first) = iter.next() else { + return Ok(()); + }; + write!(f, "{first}")?; + let sep = &self.sep; + for item in iter { + write!(f, "{sep}{item}")?; + } + Ok(()) + } +} + +pub enum DisplayMaybe { + Some(T), + None, +} + +impl Display for DisplayMaybe +where + T: Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let DisplayMaybe::Some(field) = self { + field.fmt(f)?; + } + Ok(()) + } +} + +pub trait IntoDisplayMaybe { + fn display_maybe(self) -> DisplayMaybe; +} +impl IntoDisplayMaybe for Option { + fn display_maybe(self) -> DisplayMaybe { + DisplayMaybe::from(self) + } +} + +impl From for DisplayMaybe { + fn from(value: T) -> Self { + DisplayMaybe::Some(value) + } +} + +impl From> for DisplayMaybe { + fn from(value: Option) -> Self { + match value { + Some(value) => Self::Some(value), + None => Self::None, + } + } +} diff --git a/crates/ir2/build/ident.rs b/crates/ir2/build/ident.rs new file mode 100644 index 0000000000..6048e03134 --- /dev/null +++ b/crates/ir2/build/ident.rs @@ -0,0 +1,308 @@ +use core::fmt::{self, Display}; + +#[derive(Copy, Clone)] +pub enum Case { + Camel, + Snake, +} + +impl Case { + pub fn wrap(self, value: T) -> ChosenCase { + match self { + Self::Camel => ChosenCase::Camel(value), + Self::Snake => ChosenCase::Snake(value), + } + } +} + +/// Runtime selected casing, either [`CamelCase`] or [`SnakeCase`]. +#[derive(Copy, Clone)] +pub enum ChosenCase { + Camel(T), + Snake(T), +} + +impl Display for ChosenCase +where + CamelCase: Display, + SnakeCase: Display, + T: Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Camel(value) => write!(f, "{}", CamelCase(value.clone())), + Self::Snake(value) => write!(f, "{}", SnakeCase(value.clone())), + } + } +} + +/// Camel-case tokens, e.g. `HelloWorld`. +pub struct CamelCase(pub T); + +/// Snake-case tokens, e.g. `hello_world`. +pub struct SnakeCase(pub T); + +/// A word separator as required by some casings, e.g. snake case uses `_`. +#[derive(Copy, Clone)] +pub struct Sep; + +impl Display for CamelCase { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { + Ok(()) + } +} + +impl Display for SnakeCase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_") + } +} + +macro_rules! define_ident { + ( + $( + $camel_ident:ident: $snake_ident:ident + ),* $(,)? + ) => { + #[derive(Copy, Clone)] + pub enum Ident { + $( $camel_ident ),* + } + + impl Display for CamelCase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let frag: &'static str = match self.0 { + $( + Ident::$camel_ident => stringify!($camel_ident), + )* + }; + write!(f, "{frag}") + } + } + + impl Display for SnakeCase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let frag: &'static str = match self.0 { + $( + Ident::$camel_ident => stringify!($snake_ident), + )* + }; + write!(f, "{frag}") + } + } + }; +} +define_ident!( + Add: add, + AddSat: add_sat, + Sub: sub, + SubSat: sub_sat, + Mul: mul, + Div: div, + Rem: rem, + Min: min, + Max: max, + Pmin: pmin, + Pmax: pmax, + Copysign: copysign, + Avgr: avgr, + + Shl: shl, + Shr: shr, + Rotl: rotl, + Rotr: rotr, + + Eq: eq, + And: and, + AndNot: and_not, + Or: or, + Xor: xor, + NotEq: not_eq, + NotAnd: not_and, + NotOr: not_or, + Lt: lt, + Le: le, + NotLt: not_lt, + NotLe: not_le, + + BitAnd: bit_and, + BitOr: bit_or, + BitXor: bit_xor, + + Branch: branch, + BranchTable: branch_table, + BranchTableSpan: branch_table_span, + Select: select, + Store8: store8, + Store16: store16, + Store32: store32, + Store64: store64, + Store128: store128, + Load8: load8, + Load16: load16, + Load32: load32, + Load64: load64, + Load: load, + Load8x8: load8x8, + Load16x4: load16x4, + Load32x2: load32x2, + Load8Splat: load8_splat, + Load16Splat: load16_splat, + Load32Splat: load32_splat, + Load64Splat: load64_splat, + Load32Zero: load32_zero, + Load64Zero: load64_zero, + + Copy: copy, + Copy32: copy32, + Copy64: copy64, + CopySpan: copy_span, + + Table: table, + Memory: memory, + Func: func, + FuncType: func_type, + Global: global, + Elem: elem, + Data: data, + Trap: trap, + + CallInternal: call_internal, + CallImported: call_imported, + CallIndirect: call_indirect, + ReturnCallInternal: return_call_internal, + ReturnCallImported: return_call_imported, + ReturnCallIndirect: return_call_indirect, + + I32: i32, + I64: i64, + + Clz: clz, + Ctz: ctz, + Popcnt: popcnt, + Wrap: wrap, + Sext8: sext8, + Sext16: sext16, + Sext32: sext32, + Abs: abs, + Neg: neg, + Ceil: ceil, + Floor: floor, + Trunc: trunc, + TruncSat: trunc_sat, + Nearest: nearest, + Sqrt: sqrt, + Demote: demote, + Promote: promote, + Convert: convert, + + Offset: offset, + TrapCode: trap_code, + ConsumeFuel: consume_fuel, + Fuel: fuel, + Return: r#return, + Return32: return32, + Return64: return64, + ReturnSlot: return_slot, + ReturnSpan: return_span, + Values: values, + Value: value, + Result: result, + Results: results, + Len: len, + LenTargets: len_targets, + LenValues: len_values, + Delta: delta, + Dst: dst, + Src: src, + Index: index, + DstMemory: dst_memory, + SrcMemory: src_memory, + DstTable: dst_table, + SrcTable: src_table, + TableGet: table_get, + TableSet: table_set, + TableSize: table_size, + TableGrow: table_grow, + TableCopy: table_copy, + TableFill: table_fill, + TableInit: table_init, + ElemDrop: elem_drop, + DataDrop: data_drop, + MemoryGrow: memory_grow, + MemorySize: memory_size, + MemoryCopy: memory_copy, + MemoryFill: memory_fill, + MemoryInit: memory_init, + GlobalGet: global_get, + GlobalSet: global_set, + GlobalSet32: global_set32, + GlobalSet64: global_set64, + RefFunc: ref_func, + Mem0: mem0, + Offset16: offset16, + + I64Add128: i64_add128, + I64Sub128: i64_sub128, + S64MulWide: s64_mul_wide, + U64MulWide: u64_mul_wide, + + Lhs: lhs, + Rhs: rhs, + LhsLo: lhs_lo, + LhsHi: lhs_hi, + RhsLo: rhs_lo, + RhsHi: rhs_hi, + Ptr: ptr, + ValTrue: val_true, + ValFalse: val_false, + + Copy128: copy128, + ValueLo: value_lo, + ValueHi: value_hi, + Selector: selector, + + V128: v128, + Lane: lane, + Splat: splat, + S8x16ExtractLane: s8x16_extract_lane, + U8x16ExtractLane: u8x16_extract_lane, + S16x8ExtractLane: s16x8_extract_lane, + U16x8ExtractLane: u16x8_extract_lane, + U32x4ExtractLane: u32x4_extract_lane, + U64x2ExtractLane: u64x2_extract_lane, + ReplaceLane: replace_lane, + LoadLane: load_lane, + Swizzle: swizzle, + I8x16Shuffle: i8x16_shuffle, + Q15MulrSat: q15_mulr_sat, + NarrowI16x8: narrow_i16x8, + NarrowI32x4: narrow_i32x4, + ExtmulLowI8x16: extmul_low_i8x16, + ExtmulHighI8x16: extmul_high_i8x16, + ExtmulLowI16x8: extmul_low_i16x8, + ExtmulHighI16x8: extmul_high_i16x8, + ExtmulLowI32x4: extmul_low_i32x4, + ExtmulHighI32x4: extmul_high_i32x4, + Not: not, + AnyTrue: any_true, + DotI16x8: dot_i16x8, + AllTrue: all_true, + Bitmask: bitmask, + ExtaddPairwise: extadd_pairwise, + ExtendLow: extend_low, + ExtendHigh: extend_high, + DemoteZero: demote_zero, + PromoteLow: promote_low, + TruncSatZero: trunc_sat_zero, + ConvertLow: convert_low, + Store8Lane: store8_lane, + Store16Lane: store16_lane, + Store32Lane: store32_lane, + Store64Lane: store64_lane, + + RelaxedDotI8x16I7x16: relaxed_dot_i8x16_i7x16, + RelaxedDotI8x16I7x16Add: relaxed_dot_i8x16_i7x16_add, + RelaxedMadd: relaxed_madd, + RelaxedNmadd: relaxed_nmadd, +); diff --git a/crates/ir2/build/isa.rs b/crates/ir2/build/isa.rs new file mode 100644 index 0000000000..0751466f61 --- /dev/null +++ b/crates/ir2/build/isa.rs @@ -0,0 +1,1145 @@ +use crate::build::{ + ident::Ident, + op::{ + BinaryOp, + BinaryOpKind, + CmpBranchOp, + CmpOpKind, + CmpSelectOp, + Commutativity, + Field, + FieldTy, + GenericOp, + LaneWidth, + LoadOp, + LoadOpKind, + OperandKind, + StoreOp, + StoreOpKind, + TableGetOp, + TableSetOp, + Ty, + UnaryOp, + UnaryOpKind, + V128LoadLaneOp, + V128ReplaceLaneOp, + }, + Config, + Op, +}; + +#[derive(Default)] +pub struct Isa { + pub ops: Vec, +} + +impl Isa { + fn push_op(&mut self, op: impl Into) { + self.ops.push(op.into()); + } + + fn push_ops(&mut self, ops: impl IntoIterator) { + for op in ops { + self.ops.push(op); + } + } +} + +pub fn wasmi_isa(config: &Config) -> Isa { + let mut isa = Isa::default(); + isa.ops.reserve_exact(500); + add_unary_ops(&mut isa); + add_binary_ops(&mut isa); + add_cmp_branch_ops(&mut isa); + add_cmp_select_ops(&mut isa); + add_load_ops(&mut isa); + add_store_ops(&mut isa); + add_control_ops(&mut isa); + add_copy_ops(&mut isa); + add_call_ops(&mut isa); + add_global_ops(&mut isa); + add_memory_ops(&mut isa); + add_table_ops(&mut isa); + add_wide_arithmetic_ops(&mut isa); + add_simd_ops(&mut isa, config); + isa +} + +fn add_unary_ops(isa: &mut Isa) { + let ops = [ + // i32 + UnaryOpKind::I32Clz, + UnaryOpKind::I32Ctz, + UnaryOpKind::I32Popcnt, + UnaryOpKind::I32Sext8, + UnaryOpKind::I32Sext16, + UnaryOpKind::I32WrapI64, + // i64 + UnaryOpKind::I64Clz, + UnaryOpKind::I64Ctz, + UnaryOpKind::I64Popcnt, + UnaryOpKind::I64Sext8, + UnaryOpKind::I64Sext16, + UnaryOpKind::I64Sext32, + // f32 + UnaryOpKind::F32Abs, + UnaryOpKind::F32Neg, + UnaryOpKind::F32Ceil, + UnaryOpKind::F32Floor, + UnaryOpKind::F32Trunc, + UnaryOpKind::F32Nearest, + UnaryOpKind::F32Sqrt, + UnaryOpKind::F32ConvertS32, + UnaryOpKind::F32ConvertU32, + UnaryOpKind::F32ConvertS64, + UnaryOpKind::F32ConvertU64, + UnaryOpKind::F32DemoteF64, + // f64 + UnaryOpKind::F64Abs, + UnaryOpKind::F64Neg, + UnaryOpKind::F64Ceil, + UnaryOpKind::F64Floor, + UnaryOpKind::F64Trunc, + UnaryOpKind::F64Nearest, + UnaryOpKind::F64Sqrt, + UnaryOpKind::F64ConvertS32, + UnaryOpKind::F64ConvertU32, + UnaryOpKind::F64ConvertS64, + UnaryOpKind::F64ConvertU64, + UnaryOpKind::F64PromoteF32, + // f2i conversions + UnaryOpKind::S32TruncF32, + UnaryOpKind::U32TruncF32, + UnaryOpKind::S32TruncF64, + UnaryOpKind::U32TruncF64, + UnaryOpKind::S64TruncF32, + UnaryOpKind::U64TruncF32, + UnaryOpKind::S64TruncF64, + UnaryOpKind::U64TruncF64, + UnaryOpKind::S32TruncSatF32, + UnaryOpKind::U32TruncSatF32, + UnaryOpKind::S32TruncSatF64, + UnaryOpKind::U32TruncSatF64, + UnaryOpKind::S64TruncSatF32, + UnaryOpKind::U64TruncSatF32, + UnaryOpKind::S64TruncSatF64, + UnaryOpKind::U64TruncSatF64, + ]; + for op in ops { + isa.push_op(UnaryOp::new(op, OperandKind::Slot)); + } +} + +fn add_binary_ops(isa: &mut Isa) { + let ops = [ + // comparisons: i32 + BinaryOpKind::Cmp(CmpOpKind::I32Eq), + BinaryOpKind::Cmp(CmpOpKind::I32And), + BinaryOpKind::Cmp(CmpOpKind::I32Or), + BinaryOpKind::Cmp(CmpOpKind::I32NotEq), + BinaryOpKind::Cmp(CmpOpKind::I32NotAnd), + BinaryOpKind::Cmp(CmpOpKind::I32NotOr), + BinaryOpKind::Cmp(CmpOpKind::S32Lt), + BinaryOpKind::Cmp(CmpOpKind::S32Le), + BinaryOpKind::Cmp(CmpOpKind::U32Lt), + BinaryOpKind::Cmp(CmpOpKind::U32Le), + // comparisons: i64 + BinaryOpKind::Cmp(CmpOpKind::I64Eq), + BinaryOpKind::Cmp(CmpOpKind::I64And), + BinaryOpKind::Cmp(CmpOpKind::I64Or), + BinaryOpKind::Cmp(CmpOpKind::I64NotEq), + BinaryOpKind::Cmp(CmpOpKind::I64NotAnd), + BinaryOpKind::Cmp(CmpOpKind::I64NotOr), + BinaryOpKind::Cmp(CmpOpKind::S64Lt), + BinaryOpKind::Cmp(CmpOpKind::S64Le), + BinaryOpKind::Cmp(CmpOpKind::U64Lt), + BinaryOpKind::Cmp(CmpOpKind::U64Le), + // comparisons: f32 + BinaryOpKind::Cmp(CmpOpKind::F32Eq), + BinaryOpKind::Cmp(CmpOpKind::F32Lt), + BinaryOpKind::Cmp(CmpOpKind::F32Le), + BinaryOpKind::Cmp(CmpOpKind::F32NotEq), + BinaryOpKind::Cmp(CmpOpKind::F32NotLt), + BinaryOpKind::Cmp(CmpOpKind::F32NotLe), + // comparisons: f64 + BinaryOpKind::Cmp(CmpOpKind::F64Eq), + BinaryOpKind::Cmp(CmpOpKind::F64Lt), + BinaryOpKind::Cmp(CmpOpKind::F64Le), + BinaryOpKind::Cmp(CmpOpKind::F64NotEq), + BinaryOpKind::Cmp(CmpOpKind::F64NotLt), + BinaryOpKind::Cmp(CmpOpKind::F64NotLe), + // i32 + BinaryOpKind::I32Add, + BinaryOpKind::I32Sub, + BinaryOpKind::I32Mul, + BinaryOpKind::S32Div, + BinaryOpKind::U32Div, + BinaryOpKind::S32Rem, + BinaryOpKind::U32Rem, + BinaryOpKind::I32BitAnd, + BinaryOpKind::I32BitOr, + BinaryOpKind::I32BitXor, + BinaryOpKind::I32Shl, + BinaryOpKind::S32Shr, + BinaryOpKind::U32Shr, + BinaryOpKind::I32Rotl, + BinaryOpKind::I32Rotr, + // i64 + BinaryOpKind::I64Add, + BinaryOpKind::I64Sub, + BinaryOpKind::I64Mul, + BinaryOpKind::S64Div, + BinaryOpKind::U64Div, + BinaryOpKind::S64Rem, + BinaryOpKind::U64Rem, + BinaryOpKind::I64BitAnd, + BinaryOpKind::I64BitOr, + BinaryOpKind::I64BitXor, + BinaryOpKind::I64Shl, + BinaryOpKind::S64Shr, + BinaryOpKind::U64Shr, + BinaryOpKind::I64Rotl, + BinaryOpKind::I64Rotr, + // f32 + BinaryOpKind::F32Add, + BinaryOpKind::F32Sub, + BinaryOpKind::F32Mul, + BinaryOpKind::F32Div, + BinaryOpKind::F32Min, + BinaryOpKind::F32Max, + BinaryOpKind::F32Copysign, + // f64 + BinaryOpKind::F64Add, + BinaryOpKind::F64Sub, + BinaryOpKind::F64Mul, + BinaryOpKind::F64Div, + BinaryOpKind::F64Min, + BinaryOpKind::F64Max, + BinaryOpKind::F64Copysign, + ]; + for op in ops { + isa.push_op(BinaryOp::new(op, OperandKind::Slot, OperandKind::Slot)); + isa.push_op(BinaryOp::new(op, OperandKind::Slot, OperandKind::Immediate)); + if matches!(op.commutativity(), Commutativity::NonCommutative) { + isa.push_op(BinaryOp::new(op, OperandKind::Immediate, OperandKind::Slot)); + } + } +} + +fn add_cmp_branch_ops(isa: &mut Isa) { + let ops = [ + // i32 + CmpOpKind::I32Eq, + CmpOpKind::I32NotEq, + CmpOpKind::I32And, + CmpOpKind::I32NotAnd, + CmpOpKind::I32Or, + CmpOpKind::I32NotOr, + CmpOpKind::S32Lt, + CmpOpKind::S32Le, + CmpOpKind::U32Lt, + CmpOpKind::U32Le, + // i64 + CmpOpKind::I64Eq, + CmpOpKind::I64NotEq, + CmpOpKind::I64And, + CmpOpKind::I64NotAnd, + CmpOpKind::I64Or, + CmpOpKind::I64NotOr, + CmpOpKind::S64Lt, + CmpOpKind::S64Le, + CmpOpKind::U64Lt, + CmpOpKind::U64Le, + // f32 + CmpOpKind::F32Eq, + CmpOpKind::F32NotEq, + CmpOpKind::F32Lt, + CmpOpKind::F32NotLt, + CmpOpKind::F32Le, + CmpOpKind::F32NotLe, + // f64 + CmpOpKind::F64Eq, + CmpOpKind::F64NotEq, + CmpOpKind::F64Lt, + CmpOpKind::F64NotLt, + CmpOpKind::F64Le, + CmpOpKind::F64NotLe, + ]; + for op in ops { + isa.push_op(CmpBranchOp::new(op, OperandKind::Slot, OperandKind::Slot)); + isa.push_op(CmpBranchOp::new( + op, + OperandKind::Slot, + OperandKind::Immediate, + )); + if matches!(op.commutativity(), Commutativity::NonCommutative) { + isa.push_op(CmpBranchOp::new( + op, + OperandKind::Immediate, + OperandKind::Slot, + )); + } + } +} + +fn add_cmp_select_ops(isa: &mut Isa) { + let ops = [ + // i32 + CmpOpKind::I32Eq, + CmpOpKind::I32And, + CmpOpKind::I32Or, + CmpOpKind::S32Lt, + CmpOpKind::S32Le, + CmpOpKind::U32Lt, + CmpOpKind::U32Le, + // i64 + CmpOpKind::I64Eq, + CmpOpKind::I64And, + CmpOpKind::I64Or, + CmpOpKind::S64Lt, + CmpOpKind::S64Le, + CmpOpKind::U64Lt, + CmpOpKind::U64Le, + // f32 + CmpOpKind::F32Eq, + CmpOpKind::F32Lt, + CmpOpKind::F32Le, + // f64 + CmpOpKind::F64Eq, + CmpOpKind::F64Lt, + CmpOpKind::F64Le, + ]; + for op in ops { + isa.push_op(CmpSelectOp::new(op, OperandKind::Slot, OperandKind::Slot)); + isa.push_op(CmpSelectOp::new( + op, + OperandKind::Slot, + OperandKind::Immediate, + )); + let is_float_op = matches!(op.ident_prefix(), Ty::F32 | Ty::F64); + let is_non_commutative = matches!(op.commutativity(), Commutativity::NonCommutative); + if is_non_commutative && is_float_op { + // Integer ops with `lhs` immediate can be replaced with integer ops with `rhs` immediate + // by also swapping `val_true` and `val_false` operands and comparison operator. + // For example, the following `select` expressions are the same: + // - `if 5 < x then 10 else 20` + // - `if x <= 5 then 20 else 10` + // Float ops cannot simplified the same due to NaN value behavior. + isa.push_op(CmpSelectOp::new( + op, + OperandKind::Immediate, + OperandKind::Slot, + )); + } + } +} + +fn add_load_ops(isa: &mut Isa) { + let ops = [ + // Generic + LoadOpKind::Load32, + LoadOpKind::Load64, + // i32 + LoadOpKind::S32Load8, + LoadOpKind::S32Load16, + LoadOpKind::U32Load8, + LoadOpKind::U32Load16, + // i64 + LoadOpKind::S64Load8, + LoadOpKind::S64Load16, + LoadOpKind::S64Load32, + LoadOpKind::U64Load8, + LoadOpKind::U64Load16, + LoadOpKind::U64Load32, + ]; + for op in ops { + isa.push_op(LoadOp::new(op, OperandKind::Slot, false, false)); + isa.push_op(LoadOp::new(op, OperandKind::Immediate, false, false)); + isa.push_op(LoadOp::new(op, OperandKind::Slot, true, true)); + } +} + +fn add_store_ops(isa: &mut Isa) { + let ops = [ + // Generic + StoreOpKind::Store32, + StoreOpKind::Store64, + // i32 + StoreOpKind::I32Store8, + StoreOpKind::I32Store16, + // i64 + StoreOpKind::I64Store8, + StoreOpKind::I64Store16, + StoreOpKind::I64Store32, + ]; + for op in ops { + isa.push_op(StoreOp::new( + op, + OperandKind::Slot, + OperandKind::Slot, + false, + false, + )); + isa.push_op(StoreOp::new( + op, + OperandKind::Slot, + OperandKind::Immediate, + false, + false, + )); + isa.push_op(StoreOp::new( + op, + OperandKind::Immediate, + OperandKind::Slot, + false, + false, + )); + isa.push_op(StoreOp::new( + op, + OperandKind::Slot, + OperandKind::Slot, + true, + true, + )); + isa.push_op(StoreOp::new( + op, + OperandKind::Slot, + OperandKind::Immediate, + true, + true, + )); + } +} + +fn add_control_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::Trap, + [Field::new(Ident::TrapCode, FieldTy::TrapCode)], + )), + Op::from(GenericOp::new( + Ident::ConsumeFuel, + [Field::new(Ident::Fuel, FieldTy::BlockFuel)], + )), + Op::from(GenericOp::new(Ident::Return, [])), + Op::from(GenericOp::new( + Ident::ReturnSlot, + [Field::new(Ident::Value, FieldTy::Slot)], + )), + Op::from(GenericOp::new( + Ident::Return32, + [Field::new(Ident::Value, FieldTy::U32)], + )), + Op::from(GenericOp::new( + Ident::Return64, + [Field::new(Ident::Value, FieldTy::U64)], + )), + Op::from(GenericOp::new( + Ident::ReturnSpan, + [Field::new(Ident::Values, FieldTy::SlotSpan)], + )), + Op::from(GenericOp::new( + Ident::Branch, + [Field::new(Ident::Offset, FieldTy::BranchOffset)], + )), + Op::from(GenericOp::new( + Ident::BranchTable, + [ + Field::new(Ident::Index, FieldTy::Slot), + Field::new(Ident::LenTargets, FieldTy::U16), + ], + )), + Op::from(GenericOp::new( + Ident::BranchTableSpan, + [ + Field::new(Ident::Index, FieldTy::Slot), + Field::new(Ident::LenTargets, FieldTy::U16), + Field::new(Ident::Values, FieldTy::SlotSpan), + Field::new(Ident::LenValues, FieldTy::U16), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_copy_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::Copy, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::Copy32, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::U32), + ], + )), + Op::from(GenericOp::new( + Ident::Copy64, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::U64), + ], + )), + Op::from(GenericOp::new( + Ident::CopySpan, + [ + Field::new(Ident::Results, FieldTy::SlotSpan), + Field::new(Ident::Values, FieldTy::SlotSpan), + Field::new(Ident::Len, FieldTy::U16), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_call_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::RefFunc, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Func, FieldTy::Func), + ], + )), + Op::from(GenericOp::new( + Ident::CallInternal, + [ + Field::new(Ident::Results, FieldTy::SlotSpan), + Field::new(Ident::Func, FieldTy::InternalFunc), + ], + )), + Op::from(GenericOp::new( + Ident::CallImported, + [ + Field::new(Ident::Results, FieldTy::SlotSpan), + Field::new(Ident::Func, FieldTy::Func), + ], + )), + Op::from(GenericOp::new( + Ident::CallIndirect, + [ + Field::new(Ident::Results, FieldTy::SlotSpan), + Field::new(Ident::Index, FieldTy::Slot), + Field::new(Ident::FuncType, FieldTy::FuncType), + Field::new(Ident::Table, FieldTy::Table), + ], + )), + Op::from(GenericOp::new( + Ident::ReturnCallInternal, + [Field::new(Ident::Func, FieldTy::InternalFunc)], + )), + Op::from(GenericOp::new( + Ident::ReturnCallImported, + [Field::new(Ident::Func, FieldTy::Func)], + )), + Op::from(GenericOp::new( + Ident::ReturnCallIndirect, + [ + Field::new(Ident::Index, FieldTy::Slot), + Field::new(Ident::FuncType, FieldTy::FuncType), + Field::new(Ident::Table, FieldTy::Table), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_global_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::GlobalGet, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Global, FieldTy::Global), + ], + )), + Op::from(GenericOp::new( + Ident::GlobalSet, + [ + Field::new(Ident::Global, FieldTy::Global), + Field::new(Ident::Value, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::GlobalSet32, + [ + Field::new(Ident::Global, FieldTy::Global), + Field::new(Ident::Value, FieldTy::U32), + ], + )), + Op::from(GenericOp::new( + Ident::GlobalSet64, + [ + Field::new(Ident::Global, FieldTy::Global), + Field::new(Ident::Value, FieldTy::U64), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_table_ops(isa: &mut Isa) { + let ops = [ + Op::TableGet(TableGetOp::new(OperandKind::Slot)), + Op::TableGet(TableGetOp::new(OperandKind::Immediate)), + Op::TableSet(TableSetOp::new(OperandKind::Slot, OperandKind::Slot)), + Op::TableSet(TableSetOp::new(OperandKind::Slot, OperandKind::Immediate)), + Op::TableSet(TableSetOp::new(OperandKind::Immediate, OperandKind::Slot)), + Op::TableSet(TableSetOp::new( + OperandKind::Immediate, + OperandKind::Immediate, + )), + Op::from(GenericOp::new( + Ident::TableSize, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Table, FieldTy::Table), + ], + )), + Op::from(GenericOp::new( + Ident::TableGrow, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Delta, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Table, FieldTy::Table), + ], + )), + Op::from(GenericOp::new( + Ident::TableCopy, + [ + Field::new(Ident::DstTable, FieldTy::Table), + Field::new(Ident::SrcTable, FieldTy::Table), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Src, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::TableFill, + [ + Field::new(Ident::Table, FieldTy::Table), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::TableInit, + [ + Field::new(Ident::Table, FieldTy::Table), + Field::new(Ident::Elem, FieldTy::Elem), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Src, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::ElemDrop, + [Field::new(Ident::Elem, FieldTy::Elem)], + )), + ]; + isa.push_ops(ops); +} + +fn add_memory_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::DataDrop, + [Field::new(Ident::Data, FieldTy::Data)], + )), + Op::from(GenericOp::new( + Ident::MemorySize, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Memory, FieldTy::Memory), + ], + )), + Op::from(GenericOp::new( + Ident::MemoryGrow, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Delta, FieldTy::Slot), + Field::new(Ident::Memory, FieldTy::Memory), + ], + )), + Op::from(GenericOp::new( + Ident::MemoryCopy, + [ + Field::new(Ident::DstMemory, FieldTy::Memory), + Field::new(Ident::SrcMemory, FieldTy::Memory), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Src, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::MemoryFill, + [ + Field::new(Ident::Memory, FieldTy::Memory), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::MemoryInit, + [ + Field::new(Ident::Memory, FieldTy::Memory), + Field::new(Ident::Data, FieldTy::Data), + Field::new(Ident::Dst, FieldTy::Slot), + Field::new(Ident::Src, FieldTy::Slot), + Field::new(Ident::Len, FieldTy::Slot), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_wide_arithmetic_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::I64Add128, + [ + Field::new(Ident::Results, FieldTy::FixedSlotSpan2), + Field::new(Ident::LhsLo, FieldTy::Slot), + Field::new(Ident::LhsHi, FieldTy::Slot), + Field::new(Ident::RhsLo, FieldTy::Slot), + Field::new(Ident::RhsHi, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::I64Sub128, + [ + Field::new(Ident::Results, FieldTy::FixedSlotSpan2), + Field::new(Ident::LhsLo, FieldTy::Slot), + Field::new(Ident::LhsHi, FieldTy::Slot), + Field::new(Ident::RhsLo, FieldTy::Slot), + Field::new(Ident::RhsHi, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::S64MulWide, + [ + Field::new(Ident::Results, FieldTy::FixedSlotSpan2), + Field::new(Ident::Lhs, FieldTy::Slot), + Field::new(Ident::Rhs, FieldTy::Slot), + ], + )), + Op::from(GenericOp::new( + Ident::U64MulWide, + [ + Field::new(Ident::Results, FieldTy::FixedSlotSpan2), + Field::new(Ident::Lhs, FieldTy::Slot), + Field::new(Ident::Rhs, FieldTy::Slot), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_simd_ops(isa: &mut Isa, config: &Config) { + if !config.simd { + return; + } + isa.push_op(GenericOp::new( + Ident::Copy128, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::ValueLo, FieldTy::U64), + Field::new(Ident::ValueHi, FieldTy::U64), + ], + )); + isa.push_op(GenericOp::new( + Ident::I8x16Shuffle, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Lhs, FieldTy::Slot), + Field::new(Ident::Rhs, FieldTy::Slot), + Field::new(Ident::Selector, FieldTy::Array16ImmLaneIdx32), + ], + )); + add_simd_splat_ops(isa); + add_simd_extract_lane_ops(isa); + add_simd_replace_lane_ops(isa); + add_simd_binary_ops(isa); + add_simd_shift_ops(isa); + add_simd_unary_ops(isa); + add_simd_load_ops(isa); + add_simd_store_ops(isa); + add_relaxed_simd_ops(isa); +} + +fn add_simd_splat_ops(isa: &mut Isa) { + let kinds = [UnaryOpKind::V128Splat32, UnaryOpKind::V128Splat64]; + for kind in kinds { + isa.push_op(UnaryOp::new(kind, OperandKind::Slot)); + isa.push_op(UnaryOp::new(kind, OperandKind::Immediate)); + } +} + +fn add_simd_extract_lane_ops(isa: &mut Isa) { + let ops = [ + Op::from(GenericOp::new( + Ident::S8x16ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx16), + ], + )), + Op::from(GenericOp::new( + Ident::U8x16ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx16), + ], + )), + Op::from(GenericOp::new( + Ident::S16x8ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx8), + ], + )), + Op::from(GenericOp::new( + Ident::U16x8ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx8), + ], + )), + Op::from(GenericOp::new( + Ident::U32x4ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx4), + ], + )), + Op::from(GenericOp::new( + Ident::U64x2ExtractLane, + [ + Field::new(Ident::Result, FieldTy::Slot), + Field::new(Ident::Value, FieldTy::Slot), + Field::new(Ident::Lane, FieldTy::ImmLaneIdx2), + ], + )), + ]; + isa.push_ops(ops); +} + +fn add_simd_replace_lane_ops(isa: &mut Isa) { + let widths = [ + LaneWidth::W8, + LaneWidth::W16, + LaneWidth::W32, + LaneWidth::W64, + ]; + for width in widths { + isa.push_op(V128ReplaceLaneOp::new(width, OperandKind::Slot)); + isa.push_op(V128ReplaceLaneOp::new(width, OperandKind::Immediate)); + } +} + +fn add_simd_binary_ops(isa: &mut Isa) { + let kinds = [ + // Miscellaneous + BinaryOpKind::I8x16Swizzle, + // Integer Comparisons + BinaryOpKind::I8x16Eq, + BinaryOpKind::I8x16NotEq, + BinaryOpKind::I16x8Eq, + BinaryOpKind::I16x8NotEq, + BinaryOpKind::I32x4Eq, + BinaryOpKind::I32x4NotEq, + BinaryOpKind::I64x2Eq, + BinaryOpKind::I64x2NotEq, + BinaryOpKind::S8x16Lt, + BinaryOpKind::S8x16Le, + BinaryOpKind::S16x8Lt, + BinaryOpKind::S16x8Le, + BinaryOpKind::S32x4Lt, + BinaryOpKind::S32x4Le, + BinaryOpKind::S64x2Lt, + BinaryOpKind::S64x2Le, + BinaryOpKind::U8x16Lt, + BinaryOpKind::U8x16Le, + BinaryOpKind::U16x8Lt, + BinaryOpKind::U16x8Le, + BinaryOpKind::U32x4Lt, + BinaryOpKind::U32x4Le, + BinaryOpKind::U64x2Lt, + BinaryOpKind::U64x2Le, + // Float Comparisons + BinaryOpKind::F32x4Eq, + BinaryOpKind::F32x4NotEq, + BinaryOpKind::F32x4Lt, + BinaryOpKind::F32x4Le, + BinaryOpKind::F64x2Eq, + BinaryOpKind::F64x2NotEq, + BinaryOpKind::F64x2Lt, + BinaryOpKind::F64x2Le, + // Bitwise + BinaryOpKind::V128And, + BinaryOpKind::V128AndNot, + BinaryOpKind::V128Or, + BinaryOpKind::V128Xor, + // i8x16 Ops + BinaryOpKind::S8x16NarrowI16x8, + BinaryOpKind::U8x16NarrowI16x8, + BinaryOpKind::I8x16Add, + BinaryOpKind::S8x16AddSat, + BinaryOpKind::U8x16AddSat, + BinaryOpKind::I8x16Sub, + BinaryOpKind::S8x16SubSat, + BinaryOpKind::U8x16SubSat, + BinaryOpKind::S8x16Min, + BinaryOpKind::U8x16Min, + BinaryOpKind::S8x16Max, + BinaryOpKind::U8x16Max, + BinaryOpKind::U8x16Avgr, + // i16x8 Ops + BinaryOpKind::S16x8Q15MulrSat, + BinaryOpKind::S16x8NarrowI32x4, + BinaryOpKind::U16x8NarrowI32x4, + BinaryOpKind::S16x8ExtmulLowI8x16, + BinaryOpKind::U16x8ExtmulLowI8x16, + BinaryOpKind::S16x8ExtmulHighI8x16, + BinaryOpKind::U16x8ExtmulHighI8x16, + BinaryOpKind::I16x8Add, + BinaryOpKind::S16x8AddSat, + BinaryOpKind::U16x8AddSat, + BinaryOpKind::I16x8Sub, + BinaryOpKind::S16x8SubSat, + BinaryOpKind::U16x8SubSat, + BinaryOpKind::I16x8Mul, + BinaryOpKind::S16x8Min, + BinaryOpKind::U16x8Min, + BinaryOpKind::S16x8Max, + BinaryOpKind::U16x8Max, + BinaryOpKind::U16x8Avgr, + // i32x4 Ops + BinaryOpKind::I32x4Add, + BinaryOpKind::I32x4Sub, + BinaryOpKind::I32x4Mul, + BinaryOpKind::S32x4Min, + BinaryOpKind::U32x4Min, + BinaryOpKind::S32x4Max, + BinaryOpKind::U32x4Max, + BinaryOpKind::S32x4DotI16x8, + BinaryOpKind::S32x4ExtmulLowI16x8, + BinaryOpKind::U32x4ExtmulLowI16x8, + BinaryOpKind::S32x4ExtmulHighI16x8, + BinaryOpKind::U32x4ExtmulHighI16x8, + // i64x2 Ops + BinaryOpKind::I64x2Add, + BinaryOpKind::I64x2Sub, + BinaryOpKind::I64x2Mul, + BinaryOpKind::S64x2ExtmulLowI32x4, + BinaryOpKind::U64x2ExtmulLowI32x4, + BinaryOpKind::S64x2ExtmulHighI32x4, + BinaryOpKind::U64x2ExtmulHighI32x4, + // f32x4 Ops + BinaryOpKind::F32x4Add, + BinaryOpKind::F32x4Sub, + BinaryOpKind::F32x4Mul, + BinaryOpKind::F32x4Div, + BinaryOpKind::F32x4Min, + BinaryOpKind::F32x4Max, + BinaryOpKind::F32x4Pmin, + BinaryOpKind::F32x4Pmax, + // f64x2 Ops + BinaryOpKind::F64x2Add, + BinaryOpKind::F64x2Sub, + BinaryOpKind::F64x2Mul, + BinaryOpKind::F64x2Div, + BinaryOpKind::F64x2Min, + BinaryOpKind::F64x2Max, + BinaryOpKind::F64x2Pmin, + BinaryOpKind::F64x2Pmax, + ]; + for kind in kinds { + isa.push_op(BinaryOp::new(kind, OperandKind::Slot, OperandKind::Slot)); + } +} + +fn add_simd_shift_ops(isa: &mut Isa) { + let kinds = [ + BinaryOpKind::I8x16Shl, + BinaryOpKind::S8x16Shr, + BinaryOpKind::U8x16Shr, + BinaryOpKind::I16x8Shl, + BinaryOpKind::S16x8Shr, + BinaryOpKind::U16x8Shr, + BinaryOpKind::I32x4Shl, + BinaryOpKind::S32x4Shr, + BinaryOpKind::U32x4Shr, + BinaryOpKind::I64x2Shl, + BinaryOpKind::S64x2Shr, + BinaryOpKind::U64x2Shr, + ]; + for kind in kinds { + isa.push_op(BinaryOp::new(kind, OperandKind::Slot, OperandKind::Slot)); + isa.push_op(BinaryOp::new( + kind, + OperandKind::Slot, + OperandKind::Immediate, + )); + } +} + +fn add_simd_unary_ops(isa: &mut Isa) { + let kinds = [ + // SIMD: Generic Unary Ops + UnaryOpKind::V128Not, + UnaryOpKind::V128AnyTrue, + // SIMD: `i8x16` Unary Ops + UnaryOpKind::I8x16Abs, + UnaryOpKind::I8x16Neg, + UnaryOpKind::I8x16Popcnt, + UnaryOpKind::I8x16AllTrue, + UnaryOpKind::I8x16Bitmask, + // SIMD: `i16x8` Unary Ops + UnaryOpKind::I16x8Abs, + UnaryOpKind::I16x8Neg, + UnaryOpKind::I16x8AllTrue, + UnaryOpKind::I16x8Bitmask, + UnaryOpKind::S16x8ExtaddPairwiseI8x16, + UnaryOpKind::U16x8ExtaddPairwiseI8x16, + UnaryOpKind::S16x8ExtendLowI8x16, + UnaryOpKind::U16x8ExtendLowI8x16, + UnaryOpKind::S16x8ExtendHighI8x16, + UnaryOpKind::U16x8ExtendHighI8x16, + // SIMD: `i32x4` Unary Ops + UnaryOpKind::I32x4Abs, + UnaryOpKind::I32x4Neg, + UnaryOpKind::I32x4AllTrue, + UnaryOpKind::I32x4Bitmask, + UnaryOpKind::S32x4ExtaddPairwiseI16x8, + UnaryOpKind::U32x4ExtaddPairwiseI16x8, + UnaryOpKind::S32x4ExtendLowI16x8, + UnaryOpKind::U32x4ExtendLowI16x8, + UnaryOpKind::S32x4ExtendHighI16x8, + UnaryOpKind::U32x4ExtendHighI16x8, + // SIMD: `i64x2` Unary Ops + UnaryOpKind::I64x2Abs, + UnaryOpKind::I64x2Neg, + UnaryOpKind::I64x2AllTrue, + UnaryOpKind::I64x2Bitmask, + UnaryOpKind::S64x2ExtendLowI32x4, + UnaryOpKind::U64x2ExtendLowI32x4, + UnaryOpKind::S64x2ExtendHighI32x4, + UnaryOpKind::U64x2ExtendHighI32x4, + // SIMD: `f32x4` Unary Ops + UnaryOpKind::F32x4DemoteZeroF64x2, + UnaryOpKind::F32x4Ceil, + UnaryOpKind::F32x4Floor, + UnaryOpKind::F32x4Trunc, + UnaryOpKind::F32x4Nearest, + UnaryOpKind::F32x4Abs, + UnaryOpKind::F32x4Neg, + UnaryOpKind::F32x4Sqrt, + // SIMD: `f64x2` Unary Ops + UnaryOpKind::F64x2PromoteLowF32x4, + UnaryOpKind::F64x2Ceil, + UnaryOpKind::F64x2Floor, + UnaryOpKind::F64x2Trunc, + UnaryOpKind::F64x2Nearest, + UnaryOpKind::F64x2Abs, + UnaryOpKind::F64x2Neg, + UnaryOpKind::F64x2Sqrt, + // SIMD: Conversions + UnaryOpKind::S32x4TruncSatF32x4, + UnaryOpKind::U32x4TruncSatF32x4, + UnaryOpKind::S32x4TruncSatZeroF64x2, + UnaryOpKind::U32x4TruncSatZeroF64x2, + UnaryOpKind::F32x4ConvertS32x4, + UnaryOpKind::F32x4ConvertU32x4, + UnaryOpKind::F64x2ConvertLowS32x4, + UnaryOpKind::F64x2ConvertLowU32x4, + ]; + for kind in kinds { + isa.push_op(UnaryOp::new(kind, OperandKind::Slot)); + } +} + +fn add_simd_load_ops(isa: &mut Isa) { + let ops = [ + LoadOpKind::V128Load, + LoadOpKind::S16x8Load8x8, + LoadOpKind::U16x8Load8x8, + LoadOpKind::S32x4Load16x4, + LoadOpKind::U32x4Load16x4, + LoadOpKind::S64x2Load32x2, + LoadOpKind::U64x2Load32x2, + LoadOpKind::V128Load8Splat, + LoadOpKind::V128Load16Splat, + LoadOpKind::V128Load32Splat, + LoadOpKind::V128Load64Splat, + LoadOpKind::V128Load32Zero, + LoadOpKind::V128Load64Zero, + ]; + for op in ops { + isa.push_op(LoadOp::new(op, OperandKind::Slot, false, false)); + isa.push_op(LoadOp::new(op, OperandKind::Slot, true, true)); + } + let widths = [ + LaneWidth::W8, + LaneWidth::W16, + LaneWidth::W32, + LaneWidth::W64, + ]; + for width in widths { + isa.push_op(V128LoadLaneOp::new(width, OperandKind::Slot, false, false)); + isa.push_op(V128LoadLaneOp::new(width, OperandKind::Slot, true, true)); + } +} + +fn add_simd_store_ops(isa: &mut Isa) { + let kinds = [ + StoreOpKind::Store128, + StoreOpKind::V128Store8Lane, + StoreOpKind::V128Store16Lane, + StoreOpKind::V128Store32Lane, + StoreOpKind::V128Store64Lane, + ]; + for kind in kinds { + isa.push_op(StoreOp::new( + kind, + OperandKind::Slot, + OperandKind::Slot, + false, + false, + )); + isa.push_op(StoreOp::new( + kind, + OperandKind::Slot, + OperandKind::Slot, + true, + true, + )); + } +} + +fn add_relaxed_simd_ops(isa: &mut Isa) { + let kinds = [ + BinaryOpKind::S16x8RelaxedDotI8x16I7x16, + BinaryOpKind::S32x4RelaxedDotI8x16I7x16Add, + BinaryOpKind::F32x4RelaxedMadd, + BinaryOpKind::F32x4RelaxedNmadd, + BinaryOpKind::F64x2RelaxedMadd, + BinaryOpKind::F64x2RelaxedNmadd, + ]; + for kind in kinds { + isa.push_op(BinaryOp::new(kind, OperandKind::Slot, OperandKind::Slot)); + } +} diff --git a/crates/ir2/build/mod.rs b/crates/ir2/build/mod.rs new file mode 100644 index 0000000000..6cabdda2bc --- /dev/null +++ b/crates/ir2/build/mod.rs @@ -0,0 +1,149 @@ +#[macro_use] +mod op; +mod display; +pub mod ident; +mod isa; + +use self::{ + display::{ + DisplayConstructor, + DisplayDecode, + DisplayEncode, + DisplayOp, + DisplayOpCode, + DisplayResultMut, + Indent, + }, + ident::{CamelCase, Ident, SnakeCase}, + isa::Isa, + op::Op, +}; +use core::fmt::{self, Display, Error as FmtError, Write as _}; +use std::{env, fs, io::Error as IoError, path::PathBuf}; + +#[derive(Debug)] +pub enum Error { + Io(IoError), + Fmt(FmtError), +} + +impl From for Error { + fn from(error: IoError) -> Self { + Self::Io(error) + } +} + +impl From for Error { + fn from(error: FmtError) -> Self { + Self::Fmt(error) + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::Io(error) => error.fmt(f), + Error::Fmt(error) => error.fmt(f), + } + } +} + +pub struct Config { + out_dir: PathBuf, + simd: bool, +} + +impl Default for Config { + fn default() -> Self { + Self { + out_dir: PathBuf::from(env::var("OUT_DIR").unwrap()), + simd: env::var("CARGO_FEATURE_SIMD").is_ok(), + } + } +} + +pub fn generate_code(config: &Config) -> Result<(), Error> { + fs::create_dir_all(&config.out_dir)?; + let isa = isa::wasmi_isa(config); + let mut buffer = String::new(); + generate_op_rs(config, &isa, &mut buffer)?; + generate_op_code_rs(config, &isa, &mut buffer)?; + generate_encode_rs(config, &isa, &mut buffer)?; + generate_decode_rs(config, &isa, &mut buffer)?; + Ok(()) +} + +fn generate_op_rs(config: &Config, isa: &Isa, contents: &mut String) -> Result<(), Error> { + let expected_size = match config.simd { + true => 210_000, + false => 135_000, + }; + write_to_buffer(contents, expected_size, |buffer| { + write!( + buffer, + "\ + {}\n\ + {}\n\ + {}\n\ + ", + DisplayOp::new(isa, Indent::default()), + DisplayResultMut::new(isa, Indent::default()), + DisplayConstructor::new(isa, Indent::default()), + ) + })?; + fs::write(config.out_dir.join("op.rs"), contents)?; + Ok(()) +} + +fn generate_op_code_rs(config: &Config, isa: &Isa, contents: &mut String) -> Result<(), Error> { + let expected_size = match config.simd { + true => 125_000, + false => 80_000, + }; + write_to_buffer(contents, expected_size, |buffer| { + writeln!(buffer, "{}", DisplayOpCode::new(isa, Indent::default()),) + })?; + fs::write(config.out_dir.join("op_code.rs"), contents)?; + Ok(()) +} + +fn generate_encode_rs(config: &Config, isa: &Isa, contents: &mut String) -> Result<(), Error> { + let expected_size = match config.simd { + true => 120_000, + false => 80_000, + }; + write_to_buffer(contents, expected_size, |buffer| { + write!(buffer, "{}", DisplayEncode::new(isa, Indent::default())) + })?; + fs::write(config.out_dir.join("encode.rs"), contents)?; + Ok(()) +} + +fn generate_decode_rs(config: &Config, isa: &Isa, contents: &mut String) -> Result<(), Error> { + let expected_size = match config.simd { + true => 50_000, + false => 35_000, + }; + write_to_buffer(contents, expected_size, |buffer| { + write!(buffer, "{}", DisplayDecode::new(isa, Indent::default())) + })?; + fs::write(config.out_dir.join("decode.rs"), contents)?; + Ok(()) +} + +#[track_caller] +fn write_to_buffer( + buffer: &mut String, + expected_size: usize, + f: impl FnOnce(&mut String) -> fmt::Result, +) -> Result<(), Error> { + buffer.clear(); + buffer.reserve_exact(expected_size); + f(buffer)?; + let len_contents = buffer.len(); + assert!( + len_contents <= expected_size, + "reserved bytes: {expected_size}, contents.len() = {len_contents}", + ); + Ok(()) +} diff --git a/crates/ir2/build/op.rs b/crates/ir2/build/op.rs new file mode 100644 index 0000000000..29eac3dae6 --- /dev/null +++ b/crates/ir2/build/op.rs @@ -0,0 +1,2338 @@ +use crate::build::{CamelCase, Ident, SnakeCase}; +use core::fmt::{self, Display}; + +macro_rules! apply_macro_for_ops { + ($mac:ident $(, $param:ident)* $(,)?) => { + $mac! { + $($param,)* + Unary(UnaryOp), + Binary(BinaryOp), + CmpBranch(CmpBranchOp), + CmpSelect(CmpSelectOp), + Load(LoadOp), + Store(StoreOp), + TableGet(TableGetOp), + TableSet(TableSetOp), + Generic0(GenericOp<0>), + Generic1(GenericOp<1>), + Generic2(GenericOp<2>), + Generic3(GenericOp<3>), + Generic4(GenericOp<4>), + Generic5(GenericOp<5>), + V128ReplaceLane(V128ReplaceLaneOp), + V128LoadLane(V128LoadLaneOp), + } + }; +} + +macro_rules! impl_from_for_op { + ( + $($variant:ident($op_ty:ty)),* $(,)? + ) => { + #[derive(Copy, Clone)] + pub enum Op { + $( + $variant($op_ty), + )* + } + + $( + impl From<$op_ty> for Op { + fn from(op: $op_ty) -> Self { + Op::$variant(op) + } + } + )* + }; +} +apply_macro_for_ops!(impl_from_for_op); + +#[derive(Copy, Clone)] +pub struct Field { + pub ident: Ident, + pub ty: FieldTy, +} + +impl Field { + pub fn new(ident: Ident, ty: FieldTy) -> Self { + Self { ident, ty } + } +} + +impl Display for Field { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ident = SnakeCase(self.ident); + let ty = self.ty; + write!(f, "{ident}: {ty}") + } +} + +/// The kind of an operand of an [`Op`]. +#[derive(Copy, Clone)] +pub enum OperandKind { + /// The operand is a [`Slot`] index. + Slot, + /// The operand is an immediate value. + Immediate, +} + +impl Display for CamelCase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match self.0 { + OperandKind::Slot => "S", + OperandKind::Immediate => "I", + }; + write!(f, "{s}") + } +} + +impl Display for SnakeCase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match self.0 { + OperandKind::Slot => "s", + OperandKind::Immediate => "i", + }; + write!(f, "{s}") + } +} + +#[derive(Copy, Clone)] +pub struct GenericOp { + pub ident: Ident, + pub fields: [Field; N], +} + +impl GenericOp { + pub fn new(ident: Ident, fields: [Field; N]) -> Self { + Self { ident, fields } + } + + pub fn has_result(&self) -> bool { + self.fields + .iter() + .any(|field| matches!(field.ident, Ident::Result)) + } +} + +#[derive(Copy, Clone)] +pub struct UnaryOp { + pub kind: UnaryOpKind, + pub value: OperandKind, +} + +impl UnaryOp { + pub fn new(kind: UnaryOpKind, value: OperandKind) -> Self { + Self { kind, value } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn value_field(&self) -> Field { + let ty = match self.value { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => { + let value_ty = self.kind.value_ty(); + match value_ty.to_field_ty() { + Some(ty) => ty, + None => panic!("no `FieldTy` for `Ty`: {value_ty}"), + } + } + }; + Field::new(Ident::Value, ty) + } + + pub fn fields(&self) -> [Field; 2] { + [self.result_field(), self.value_field()] + } +} + +#[derive(Copy, Clone)] +pub enum UnaryOpKind { + I32Clz, + I32Ctz, + I32Popcnt, + + I64Clz, + I64Ctz, + I64Popcnt, + + I32WrapI64, + + I32Sext8, + I32Sext16, + I64Sext8, + I64Sext16, + I64Sext32, + + F32Abs, + F32Neg, + F32Ceil, + F32Floor, + F32Trunc, + F32Nearest, + F32Sqrt, + + F64Abs, + F64Neg, + F64Ceil, + F64Floor, + F64Trunc, + F64Nearest, + F64Sqrt, + + S32TruncF32, + U32TruncF32, + S32TruncF64, + U32TruncF64, + S64TruncF32, + U64TruncF32, + S64TruncF64, + U64TruncF64, + + S32TruncSatF32, + U32TruncSatF32, + S32TruncSatF64, + U32TruncSatF64, + S64TruncSatF32, + U64TruncSatF32, + S64TruncSatF64, + U64TruncSatF64, + + F32DemoteF64, + F64PromoteF32, + + F32ConvertS32, + F32ConvertU32, + F32ConvertS64, + F32ConvertU64, + + F64ConvertS32, + F64ConvertU32, + F64ConvertS64, + F64ConvertU64, + + // SIMD: Generic Unary Ops + V128Splat32, + V128Splat64, + V128Not, + V128AnyTrue, + // SIMD: `i8x16` Unary Ops + I8x16Abs, + I8x16Neg, + I8x16Popcnt, + I8x16AllTrue, + I8x16Bitmask, + // SIMD: `i16x8` Unary Ops + I16x8Abs, + I16x8Neg, + I16x8AllTrue, + I16x8Bitmask, + S16x8ExtaddPairwiseI8x16, + U16x8ExtaddPairwiseI8x16, + S16x8ExtendLowI8x16, + U16x8ExtendLowI8x16, + S16x8ExtendHighI8x16, + U16x8ExtendHighI8x16, + // SIMD: `i32x4` Unary Ops + I32x4Abs, + I32x4Neg, + I32x4AllTrue, + I32x4Bitmask, + S32x4ExtaddPairwiseI16x8, + U32x4ExtaddPairwiseI16x8, + S32x4ExtendLowI16x8, + U32x4ExtendLowI16x8, + S32x4ExtendHighI16x8, + U32x4ExtendHighI16x8, + // SIMD: `i64x2` Unary Ops + I64x2Abs, + I64x2Neg, + I64x2AllTrue, + I64x2Bitmask, + S64x2ExtendLowI32x4, + U64x2ExtendLowI32x4, + S64x2ExtendHighI32x4, + U64x2ExtendHighI32x4, + // SIMD: `f32x4` Unary Ops + F32x4DemoteZeroF64x2, + F32x4Ceil, + F32x4Floor, + F32x4Trunc, + F32x4Nearest, + F32x4Abs, + F32x4Neg, + F32x4Sqrt, + // SIMD: `f64x2` Unary Ops + F64x2PromoteLowF32x4, + F64x2Ceil, + F64x2Floor, + F64x2Trunc, + F64x2Nearest, + F64x2Abs, + F64x2Neg, + F64x2Sqrt, + // SIMD: Conversions + S32x4TruncSatF32x4, + U32x4TruncSatF32x4, + S32x4TruncSatZeroF64x2, + U32x4TruncSatZeroF64x2, + F32x4ConvertS32x4, + F32x4ConvertU32x4, + F64x2ConvertLowS32x4, + F64x2ConvertLowU32x4, +} + +impl UnaryOpKind { + pub fn is_conversion(&self) -> bool { + self.value_ty() != self.result_ty() + } + + pub fn value_ty(&self) -> Ty { + match self { + | Self::I32Clz | Self::I32Ctz | Self::I32Popcnt => Ty::I32, + | Self::I64Clz | Self::I64Ctz | Self::I64Popcnt | Self::I32WrapI64 => Ty::I64, + | Self::I32Sext8 | Self::I32Sext16 => Ty::I32, + | Self::I64Sext8 | Self::I64Sext16 | Self::I64Sext32 => Ty::I64, + | Self::F32Abs + | Self::F32Neg + | Self::F32Ceil + | Self::F32Floor + | Self::F32Trunc + | Self::F32Nearest + | Self::F32Sqrt => Ty::F32, + | Self::F64Abs + | Self::F64Neg + | Self::F64Ceil + | Self::F64Floor + | Self::F64Trunc + | Self::F64Nearest + | Self::F64Sqrt => Ty::F64, + | Self::S32TruncF32 | Self::U32TruncF32 => Ty::F32, + | Self::S32TruncF64 | Self::U32TruncF64 => Ty::F64, + | Self::S64TruncF32 | Self::U64TruncF32 => Ty::F32, + | Self::S64TruncF64 | Self::U64TruncF64 => Ty::F64, + | Self::S32TruncSatF32 | Self::U32TruncSatF32 => Ty::F32, + | Self::S32TruncSatF64 | Self::U32TruncSatF64 => Ty::F64, + | Self::S64TruncSatF32 | Self::U64TruncSatF32 => Ty::F32, + | Self::S64TruncSatF64 | Self::U64TruncSatF64 | Self::F32DemoteF64 => Ty::F64, + | Self::F64PromoteF32 => Ty::F32, + | Self::F32ConvertS32 => Ty::I32, + | Self::F32ConvertU32 => Ty::U32, + | Self::F32ConvertS64 => Ty::I64, + | Self::F32ConvertU64 => Ty::U64, + | Self::F64ConvertS32 => Ty::I32, + | Self::F64ConvertU32 => Ty::U32, + | Self::F64ConvertS64 => Ty::I64, + | Self::F64ConvertU64 => Ty::U64, + + // SIMD: Generic Unary Ops + | Self::V128Splat32 => Ty::B32, + | Self::V128Splat64 => Ty::B64, + | Self::V128Not | Self::V128AnyTrue => Ty::V128, + // SIMD: `i8x16` Unary Ops + | Self::I8x16Abs + | Self::I8x16Neg + | Self::I8x16Popcnt + | Self::I8x16AllTrue + | Self::I8x16Bitmask => Ty::I8x16, + // SIMD: `i16x8` Unary Ops + | Self::I16x8Abs | Self::I16x8Neg | Self::I16x8AllTrue | Self::I16x8Bitmask => { + Ty::I16x8 + } + | Self::S16x8ExtaddPairwiseI8x16 + | Self::S16x8ExtendLowI8x16 + | Self::S16x8ExtendHighI8x16 + | Self::U16x8ExtaddPairwiseI8x16 + | Self::U16x8ExtendLowI8x16 + | Self::U16x8ExtendHighI8x16 => Ty::I8x16, + // SIMD: `i32x4` Unary Ops + | Self::I32x4Abs | Self::I32x4Neg | Self::I32x4AllTrue | Self::I32x4Bitmask => { + Ty::I32x4 + } + | Self::S32x4ExtaddPairwiseI16x8 + | Self::S32x4ExtendLowI16x8 + | Self::S32x4ExtendHighI16x8 + | Self::U32x4ExtaddPairwiseI16x8 + | Self::U32x4ExtendLowI16x8 + | Self::U32x4ExtendHighI16x8 => Ty::I16x8, + // SIMD: `i64x2` Unary Ops + | Self::I64x2Abs | Self::I64x2Neg | Self::I64x2AllTrue | Self::I64x2Bitmask => { + Ty::I64x2 + } + | Self::S64x2ExtendLowI32x4 + | Self::S64x2ExtendHighI32x4 + | Self::U64x2ExtendLowI32x4 + | Self::U64x2ExtendHighI32x4 => Ty::I32x4, + // SIMD: `f32x4` Unary Ops + | Self::F32x4DemoteZeroF64x2 => Ty::F64x2, + | Self::F32x4Ceil + | Self::F32x4Floor + | Self::F32x4Trunc + | Self::F32x4Nearest + | Self::F32x4Abs + | Self::F32x4Neg + | Self::F32x4Sqrt => Ty::F32x4, + // SIMD: `f64x2` Unary Ops + | Self::F64x2PromoteLowF32x4 => Ty::F32x4, + | Self::F64x2Ceil + | Self::F64x2Floor + | Self::F64x2Trunc + | Self::F64x2Nearest + | Self::F64x2Abs + | Self::F64x2Neg + | Self::F64x2Sqrt => Ty::F64x2, + // SIMD: Conversions + | Self::S32x4TruncSatF32x4 => Ty::F32x4, + | Self::S32x4TruncSatZeroF64x2 => Ty::F64x2, + | Self::U32x4TruncSatF32x4 => Ty::F32x4, + | Self::U32x4TruncSatZeroF64x2 => Ty::F64x2, + | Self::F32x4ConvertS32x4 => Ty::S32x4, + | Self::F32x4ConvertU32x4 => Ty::U32x4, + | Self::F64x2ConvertLowS32x4 => Ty::S32x4, + | Self::F64x2ConvertLowU32x4 => Ty::U32x4, + } + } + + pub fn result_ty(&self) -> Ty { + match self { + | Self::I32Clz | Self::I32Ctz | Self::I32Popcnt => Ty::I32, + | Self::I64Clz | Self::I64Ctz | Self::I64Popcnt => Ty::I64, + | Self::I32WrapI64 | Self::I32Sext8 | Self::I32Sext16 => Ty::I32, + | Self::I64Sext8 | Self::I64Sext16 | Self::I64Sext32 => Ty::I64, + | Self::F32Abs + | Self::F32Neg + | Self::F32Ceil + | Self::F32Floor + | Self::F32Trunc + | Self::F32Nearest + | Self::F32Sqrt => Ty::F32, + | Self::F64Abs + | Self::F64Neg + | Self::F64Ceil + | Self::F64Floor + | Self::F64Trunc + | Self::F64Nearest + | Self::F64Sqrt => Ty::F64, + | Self::S32TruncF32 | Self::S32TruncF64 => Ty::I32, + | Self::U32TruncF32 | Self::U32TruncF64 => Ty::U32, + | Self::S64TruncF32 | Self::S64TruncF64 => Ty::I64, + | Self::U64TruncF32 | Self::U64TruncF64 => Ty::U64, + | Self::S32TruncSatF32 | Self::S32TruncSatF64 => Ty::I32, + | Self::U32TruncSatF32 | Self::U32TruncSatF64 => Ty::U32, + | Self::S64TruncSatF32 | Self::S64TruncSatF64 => Ty::I64, + | Self::U64TruncSatF32 | Self::U64TruncSatF64 => Ty::U64, + | Self::F32DemoteF64 => Ty::F32, + | Self::F64PromoteF32 => Ty::F64, + | Self::F32ConvertS32 + | Self::F32ConvertU32 + | Self::F32ConvertS64 + | Self::F32ConvertU64 => Ty::F32, + | Self::F64ConvertS32 + | Self::F64ConvertU32 + | Self::F64ConvertS64 + | Self::F64ConvertU64 => Ty::F64, + + // SIMD: Generic Unary Ops + | Self::V128Splat32 | Self::V128Splat64 | Self::V128Not | Self::V128AnyTrue => Ty::V128, + // SIMD: `i8x16` Unary Ops + | Self::I8x16Abs + | Self::I8x16Neg + | Self::I8x16Popcnt + | Self::I8x16AllTrue + | Self::I8x16Bitmask => Ty::I8x16, + // SIMD: `i16x8` Unary Ops + | Self::I16x8Abs => Ty::I16x8, + | Self::I16x8Neg => Ty::I16x8, + | Self::I16x8AllTrue => Ty::I16x8, + | Self::I16x8Bitmask => Ty::I16x8, + | Self::S16x8ExtaddPairwiseI8x16 + | Self::S16x8ExtendLowI8x16 + | Self::S16x8ExtendHighI8x16 => Ty::S16x8, + | Self::U16x8ExtaddPairwiseI8x16 + | Self::U16x8ExtendLowI8x16 + | Self::U16x8ExtendHighI8x16 => Ty::U16x8, + // SIMD: `i32x4` Unary Ops + | Self::I32x4Abs | Self::I32x4Neg | Self::I32x4AllTrue | Self::I32x4Bitmask => { + Ty::I32x4 + } + | Self::S32x4ExtaddPairwiseI16x8 + | Self::S32x4ExtendLowI16x8 + | Self::S32x4ExtendHighI16x8 => Ty::S32x4, + | Self::U32x4ExtaddPairwiseI16x8 + | Self::U32x4ExtendLowI16x8 + | Self::U32x4ExtendHighI16x8 => Ty::U32x4, + // SIMD: `i64x2` Unary Ops + | Self::I64x2Abs | Self::I64x2Neg | Self::I64x2AllTrue | Self::I64x2Bitmask => { + Ty::I64x2 + } + | Self::S64x2ExtendLowI32x4 | Self::S64x2ExtendHighI32x4 => Ty::S64x2, + | Self::U64x2ExtendLowI32x4 | Self::U64x2ExtendHighI32x4 => Ty::U64x2, + // SIMD: `f32x4` Unary Ops + | Self::F32x4DemoteZeroF64x2 + | Self::F32x4Ceil + | Self::F32x4Floor + | Self::F32x4Trunc + | Self::F32x4Nearest + | Self::F32x4Abs + | Self::F32x4Neg + | Self::F32x4Sqrt => Ty::F32x4, + // SIMD: `f64x2` Unary Ops + | Self::F64x2PromoteLowF32x4 + | Self::F64x2Ceil + | Self::F64x2Floor + | Self::F64x2Trunc + | Self::F64x2Nearest + | Self::F64x2Abs + | Self::F64x2Neg + | Self::F64x2Sqrt => Ty::F64x2, + // SIMD: Conversions + | Self::S32x4TruncSatF32x4 | Self::S32x4TruncSatZeroF64x2 => Ty::S32x4, + | Self::U32x4TruncSatF32x4 | Self::U32x4TruncSatZeroF64x2 => Ty::U32x4, + | Self::F32x4ConvertS32x4 | Self::F32x4ConvertU32x4 => Ty::F32x4, + | Self::F64x2ConvertLowS32x4 | Self::F64x2ConvertLowU32x4 => Ty::F64x2, + } + } + + pub fn ident(&self) -> Ident { + match self { + Self::I32Clz => Ident::Clz, + Self::I32Ctz => Ident::Ctz, + Self::I32Popcnt => Ident::Popcnt, + Self::I64Clz => Ident::Clz, + Self::I64Ctz => Ident::Ctz, + Self::I64Popcnt => Ident::Popcnt, + Self::I32WrapI64 => Ident::Wrap, + Self::I32Sext8 => Ident::Sext8, + Self::I32Sext16 => Ident::Sext16, + Self::I64Sext8 => Ident::Sext8, + Self::I64Sext16 => Ident::Sext16, + Self::I64Sext32 => Ident::Sext32, + Self::F32Abs => Ident::Abs, + Self::F32Neg => Ident::Neg, + Self::F32Ceil => Ident::Ceil, + Self::F32Floor => Ident::Floor, + Self::F32Trunc => Ident::Trunc, + Self::F32Nearest => Ident::Nearest, + Self::F32Sqrt => Ident::Sqrt, + Self::F64Abs => Ident::Abs, + Self::F64Neg => Ident::Neg, + Self::F64Ceil => Ident::Ceil, + Self::F64Floor => Ident::Floor, + Self::F64Trunc => Ident::Trunc, + Self::F64Nearest => Ident::Nearest, + Self::F64Sqrt => Ident::Sqrt, + Self::S32TruncF32 => Ident::Trunc, + Self::U32TruncF32 => Ident::Trunc, + Self::S32TruncF64 => Ident::Trunc, + Self::U32TruncF64 => Ident::Trunc, + Self::S64TruncF32 => Ident::Trunc, + Self::U64TruncF32 => Ident::Trunc, + Self::S64TruncF64 => Ident::Trunc, + Self::U64TruncF64 => Ident::Trunc, + Self::S32TruncSatF32 => Ident::TruncSat, + Self::U32TruncSatF32 => Ident::TruncSat, + Self::S32TruncSatF64 => Ident::TruncSat, + Self::U32TruncSatF64 => Ident::TruncSat, + Self::S64TruncSatF32 => Ident::TruncSat, + Self::U64TruncSatF32 => Ident::TruncSat, + Self::S64TruncSatF64 => Ident::TruncSat, + Self::U64TruncSatF64 => Ident::TruncSat, + Self::F32DemoteF64 => Ident::Demote, + Self::F64PromoteF32 => Ident::Promote, + Self::F32ConvertS32 => Ident::Convert, + Self::F32ConvertU32 => Ident::Convert, + Self::F32ConvertS64 => Ident::Convert, + Self::F32ConvertU64 => Ident::Convert, + Self::F64ConvertS32 => Ident::Convert, + Self::F64ConvertU32 => Ident::Convert, + Self::F64ConvertS64 => Ident::Convert, + Self::F64ConvertU64 => Ident::Convert, + + // SIMD: Generic Unary Ops + Self::V128Splat32 => Ident::Splat, + Self::V128Splat64 => Ident::Splat, + Self::V128Not => Ident::Not, + Self::V128AnyTrue => Ident::AnyTrue, + // SIMD: `i8x16` Unary Ops + Self::I8x16Abs => Ident::Abs, + Self::I8x16Neg => Ident::Neg, + Self::I8x16Popcnt => Ident::Popcnt, + Self::I8x16AllTrue => Ident::AllTrue, + Self::I8x16Bitmask => Ident::Bitmask, + // SIMD: `i16x8` Unary Ops + Self::I16x8Abs => Ident::Abs, + Self::I16x8Neg => Ident::Neg, + Self::I16x8AllTrue => Ident::AllTrue, + Self::I16x8Bitmask => Ident::Bitmask, + Self::S16x8ExtaddPairwiseI8x16 => Ident::ExtaddPairwise, + Self::U16x8ExtaddPairwiseI8x16 => Ident::ExtaddPairwise, + Self::S16x8ExtendLowI8x16 => Ident::ExtendLow, + Self::U16x8ExtendLowI8x16 => Ident::ExtendLow, + Self::S16x8ExtendHighI8x16 => Ident::ExtendHigh, + Self::U16x8ExtendHighI8x16 => Ident::ExtendHigh, + // SIMD: `i32x4` Unary Ops + Self::I32x4Abs => Ident::Abs, + Self::I32x4Neg => Ident::Neg, + Self::I32x4AllTrue => Ident::AllTrue, + Self::I32x4Bitmask => Ident::Bitmask, + Self::S32x4ExtaddPairwiseI16x8 => Ident::ExtaddPairwise, + Self::U32x4ExtaddPairwiseI16x8 => Ident::ExtaddPairwise, + Self::S32x4ExtendLowI16x8 => Ident::ExtendLow, + Self::U32x4ExtendLowI16x8 => Ident::ExtendLow, + Self::S32x4ExtendHighI16x8 => Ident::ExtendHigh, + Self::U32x4ExtendHighI16x8 => Ident::ExtendHigh, + // SIMD: `i64x2` Unary Ops + Self::I64x2Abs => Ident::Abs, + Self::I64x2Neg => Ident::Neg, + Self::I64x2AllTrue => Ident::AllTrue, + Self::I64x2Bitmask => Ident::Bitmask, + Self::S64x2ExtendLowI32x4 => Ident::ExtendLow, + Self::U64x2ExtendLowI32x4 => Ident::ExtendLow, + Self::S64x2ExtendHighI32x4 => Ident::ExtendHigh, + Self::U64x2ExtendHighI32x4 => Ident::ExtendHigh, + // SIMD: `f32x4` Unary Ops + Self::F32x4DemoteZeroF64x2 => Ident::DemoteZero, + Self::F32x4Ceil => Ident::Ceil, + Self::F32x4Floor => Ident::Floor, + Self::F32x4Trunc => Ident::Trunc, + Self::F32x4Nearest => Ident::Nearest, + Self::F32x4Abs => Ident::Abs, + Self::F32x4Neg => Ident::Neg, + Self::F32x4Sqrt => Ident::Sqrt, + // SIMD: `f64x2` Unary Ops + Self::F64x2PromoteLowF32x4 => Ident::PromoteLow, + Self::F64x2Ceil => Ident::Ceil, + Self::F64x2Floor => Ident::Floor, + Self::F64x2Trunc => Ident::Trunc, + Self::F64x2Nearest => Ident::Nearest, + Self::F64x2Abs => Ident::Abs, + Self::F64x2Neg => Ident::Neg, + Self::F64x2Sqrt => Ident::Sqrt, + // SIMD: Conversions + Self::S32x4TruncSatF32x4 => Ident::TruncSat, + Self::U32x4TruncSatF32x4 => Ident::TruncSat, + Self::S32x4TruncSatZeroF64x2 => Ident::TruncSatZero, + Self::U32x4TruncSatZeroF64x2 => Ident::TruncSatZero, + Self::F32x4ConvertS32x4 => Ident::Convert, + Self::F32x4ConvertU32x4 => Ident::Convert, + Self::F64x2ConvertLowS32x4 => Ident::ConvertLow, + Self::F64x2ConvertLowU32x4 => Ident::ConvertLow, + } + } +} + +#[derive(Copy, Clone)] +pub struct BinaryOp { + pub kind: BinaryOpKind, + pub lhs: OperandKind, + pub rhs: OperandKind, +} + +impl BinaryOp { + pub fn new(kind: BinaryOpKind, lhs: OperandKind, rhs: OperandKind) -> Self { + Self { kind, lhs, rhs } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn lhs_field(&self) -> Field { + Field::new(Ident::Lhs, self.kind.lhs_field(self.lhs)) + } + + pub fn rhs_field(&self) -> Field { + Field::new(Ident::Rhs, self.kind.rhs_field(self.rhs)) + } + + pub fn fields(&self) -> [Field; 3] { + [self.result_field(), self.lhs_field(), self.rhs_field()] + } +} + +#[derive(Copy, Clone)] +pub enum BinaryOpKind { + // Compare operators. + Cmp(CmpOpKind), + // Binary operators: i32 + I32Add, + I32Sub, + I32Mul, + S32Div, + U32Div, + S32Rem, + U32Rem, + I32BitAnd, + I32BitOr, + I32BitXor, + I32Shl, + S32Shr, + U32Shr, + I32Rotl, + I32Rotr, + // Binary operators: i64 + I64Add, + I64Sub, + I64Mul, + S64Div, + U64Div, + S64Rem, + U64Rem, + I64BitAnd, + I64BitOr, + I64BitXor, + I64Shl, + S64Shr, + U64Shr, + I64Rotl, + I64Rotr, + // Binary operators: f32 + F32Add, + F32Sub, + F32Mul, + F32Div, + F32Min, + F32Max, + F32Copysign, + // Binary operators: f64 + F64Add, + F64Sub, + F64Mul, + F64Div, + F64Min, + F64Max, + F64Copysign, + // Simd Operators + I8x16Swizzle, + I8x16Eq, + I8x16NotEq, + I16x8Eq, + I16x8NotEq, + I32x4Eq, + I32x4NotEq, + I64x2Eq, + I64x2NotEq, + S8x16Lt, + S8x16Le, + S16x8Lt, + S16x8Le, + S32x4Lt, + S32x4Le, + S64x2Lt, + S64x2Le, + U8x16Lt, + U8x16Le, + U16x8Lt, + U16x8Le, + U32x4Lt, + U32x4Le, + U64x2Lt, + U64x2Le, + F32x4Eq, + F32x4NotEq, + F32x4Lt, + F32x4Le, + F64x2Eq, + F64x2NotEq, + F64x2Lt, + F64x2Le, + V128And, + V128AndNot, + V128Or, + V128Xor, + // i8x16 Ops + S8x16NarrowI16x8, + U8x16NarrowI16x8, + I8x16Add, + S8x16AddSat, + U8x16AddSat, + I8x16Sub, + S8x16SubSat, + U8x16SubSat, + S8x16Min, + U8x16Min, + S8x16Max, + U8x16Max, + U8x16Avgr, + // i16x8 Ops + S16x8Q15MulrSat, + S16x8NarrowI32x4, + U16x8NarrowI32x4, + S16x8ExtmulLowI8x16, + U16x8ExtmulLowI8x16, + S16x8ExtmulHighI8x16, + U16x8ExtmulHighI8x16, + I16x8Add, + S16x8AddSat, + U16x8AddSat, + I16x8Sub, + S16x8SubSat, + U16x8SubSat, + I16x8Mul, + S16x8Min, + U16x8Min, + S16x8Max, + U16x8Max, + U16x8Avgr, + // i32x4 Ops + I32x4Add, + I32x4Sub, + I32x4Mul, + S32x4Min, + U32x4Min, + S32x4Max, + U32x4Max, + S32x4DotI16x8, + S32x4ExtmulLowI16x8, + U32x4ExtmulLowI16x8, + S32x4ExtmulHighI16x8, + U32x4ExtmulHighI16x8, + // i64x2 Ops + I64x2Add, + I64x2Sub, + I64x2Mul, + S64x2ExtmulLowI32x4, + U64x2ExtmulLowI32x4, + S64x2ExtmulHighI32x4, + U64x2ExtmulHighI32x4, + // f32x4 Ops + F32x4Add, + F32x4Sub, + F32x4Mul, + F32x4Div, + F32x4Min, + F32x4Max, + F32x4Pmin, + F32x4Pmax, + // f64x2 Ops + F64x2Add, + F64x2Sub, + F64x2Mul, + F64x2Div, + F64x2Min, + F64x2Max, + F64x2Pmin, + F64x2Pmax, + // Simd Shift Ops + I8x16Shl, + S8x16Shr, + U8x16Shr, + I16x8Shl, + S16x8Shr, + U16x8Shr, + I32x4Shl, + S32x4Shr, + U32x4Shr, + I64x2Shl, + S64x2Shr, + U64x2Shr, + // Relaxed SIMD + S16x8RelaxedDotI8x16I7x16, + S32x4RelaxedDotI8x16I7x16Add, + F32x4RelaxedMadd, + F32x4RelaxedNmadd, + F64x2RelaxedMadd, + F64x2RelaxedNmadd, +} + +impl BinaryOpKind { + pub fn ident(&self) -> Ident { + match self { + Self::Cmp(cmp) => cmp.ident(), + Self::I32Add => Ident::Add, + Self::I32Sub => Ident::Sub, + Self::I32Mul => Ident::Mul, + Self::S32Div => Ident::Div, + Self::U32Div => Ident::Div, + Self::S32Rem => Ident::Rem, + Self::U32Rem => Ident::Rem, + Self::I32BitAnd => Ident::BitAnd, + Self::I32BitOr => Ident::BitOr, + Self::I32BitXor => Ident::BitXor, + Self::I32Shl => Ident::Shl, + Self::S32Shr => Ident::Shr, + Self::U32Shr => Ident::Shr, + Self::I32Rotl => Ident::Rotl, + Self::I32Rotr => Ident::Rotr, + Self::I64Add => Ident::Add, + Self::I64Sub => Ident::Sub, + Self::I64Mul => Ident::Mul, + Self::S64Div => Ident::Div, + Self::U64Div => Ident::Div, + Self::S64Rem => Ident::Rem, + Self::U64Rem => Ident::Rem, + Self::I64BitAnd => Ident::BitAnd, + Self::I64BitOr => Ident::BitOr, + Self::I64BitXor => Ident::BitXor, + Self::I64Shl => Ident::Shl, + Self::S64Shr => Ident::Shr, + Self::U64Shr => Ident::Shr, + Self::I64Rotl => Ident::Rotl, + Self::I64Rotr => Ident::Rotr, + Self::F32Add => Ident::Add, + Self::F32Sub => Ident::Sub, + Self::F32Mul => Ident::Mul, + Self::F32Div => Ident::Div, + Self::F32Min => Ident::Min, + Self::F32Max => Ident::Max, + Self::F32Copysign => Ident::Copysign, + Self::F64Add => Ident::Add, + Self::F64Sub => Ident::Sub, + Self::F64Mul => Ident::Mul, + Self::F64Div => Ident::Div, + Self::F64Min => Ident::Min, + Self::F64Max => Ident::Max, + Self::F64Copysign => Ident::Copysign, + // Simd Ops + Self::I8x16Swizzle => Ident::Swizzle, + Self::I8x16Eq => Ident::Eq, + Self::I8x16NotEq => Ident::NotEq, + Self::I16x8Eq => Ident::Eq, + Self::I16x8NotEq => Ident::NotEq, + Self::I32x4Eq => Ident::Eq, + Self::I32x4NotEq => Ident::NotEq, + Self::I64x2Eq => Ident::Eq, + Self::I64x2NotEq => Ident::NotEq, + Self::S8x16Lt => Ident::Lt, + Self::S8x16Le => Ident::Le, + Self::S16x8Lt => Ident::Lt, + Self::S16x8Le => Ident::Le, + Self::S32x4Lt => Ident::Lt, + Self::S32x4Le => Ident::Le, + Self::S64x2Lt => Ident::Lt, + Self::S64x2Le => Ident::Le, + Self::U8x16Lt => Ident::Lt, + Self::U8x16Le => Ident::Le, + Self::U16x8Lt => Ident::Lt, + Self::U16x8Le => Ident::Le, + Self::U32x4Lt => Ident::Lt, + Self::U32x4Le => Ident::Le, + Self::U64x2Lt => Ident::Lt, + Self::U64x2Le => Ident::Le, + Self::F32x4Eq => Ident::Eq, + Self::F32x4NotEq => Ident::NotEq, + Self::F32x4Lt => Ident::Lt, + Self::F32x4Le => Ident::Le, + Self::F64x2Eq => Ident::Eq, + Self::F64x2NotEq => Ident::NotEq, + Self::F64x2Lt => Ident::Lt, + Self::F64x2Le => Ident::Le, + Self::V128And => Ident::And, + Self::V128AndNot => Ident::AndNot, + Self::V128Or => Ident::Or, + Self::V128Xor => Ident::Xor, + // i8x16 Ops + Self::S8x16NarrowI16x8 => Ident::NarrowI16x8, + Self::U8x16NarrowI16x8 => Ident::NarrowI16x8, + Self::I8x16Add => Ident::Add, + Self::S8x16AddSat => Ident::AddSat, + Self::U8x16AddSat => Ident::AddSat, + Self::I8x16Sub => Ident::Sub, + Self::S8x16SubSat => Ident::SubSat, + Self::U8x16SubSat => Ident::SubSat, + Self::S8x16Min => Ident::Min, + Self::U8x16Min => Ident::Min, + Self::S8x16Max => Ident::Max, + Self::U8x16Max => Ident::Max, + Self::U8x16Avgr => Ident::Avgr, + // i16x8 Ops + Self::S16x8Q15MulrSat => Ident::Q15MulrSat, + Self::S16x8NarrowI32x4 => Ident::NarrowI32x4, + Self::U16x8NarrowI32x4 => Ident::NarrowI32x4, + Self::S16x8ExtmulLowI8x16 => Ident::ExtmulLowI8x16, + Self::U16x8ExtmulLowI8x16 => Ident::ExtmulLowI8x16, + Self::S16x8ExtmulHighI8x16 => Ident::ExtmulHighI8x16, + Self::U16x8ExtmulHighI8x16 => Ident::ExtmulHighI8x16, + Self::I16x8Add => Ident::Add, + Self::S16x8AddSat => Ident::AddSat, + Self::U16x8AddSat => Ident::AddSat, + Self::I16x8Sub => Ident::Sub, + Self::S16x8SubSat => Ident::SubSat, + Self::U16x8SubSat => Ident::SubSat, + Self::I16x8Mul => Ident::Mul, + Self::S16x8Min => Ident::Min, + Self::U16x8Min => Ident::Min, + Self::S16x8Max => Ident::Max, + Self::U16x8Max => Ident::Max, + Self::U16x8Avgr => Ident::Avgr, + // i32x4 Ops + Self::I32x4Add => Ident::Add, + Self::I32x4Sub => Ident::Sub, + Self::I32x4Mul => Ident::Mul, + Self::S32x4Min => Ident::Min, + Self::U32x4Min => Ident::Min, + Self::S32x4Max => Ident::Max, + Self::U32x4Max => Ident::Max, + Self::S32x4DotI16x8 => Ident::DotI16x8, + Self::S32x4ExtmulLowI16x8 => Ident::ExtmulLowI16x8, + Self::U32x4ExtmulLowI16x8 => Ident::ExtmulLowI16x8, + Self::S32x4ExtmulHighI16x8 => Ident::ExtmulHighI16x8, + Self::U32x4ExtmulHighI16x8 => Ident::ExtmulHighI16x8, + // i64x2 Ops + Self::I64x2Add => Ident::Add, + Self::I64x2Sub => Ident::Sub, + Self::I64x2Mul => Ident::Mul, + Self::S64x2ExtmulLowI32x4 => Ident::ExtmulLowI32x4, + Self::U64x2ExtmulLowI32x4 => Ident::ExtmulLowI32x4, + Self::S64x2ExtmulHighI32x4 => Ident::ExtmulHighI32x4, + Self::U64x2ExtmulHighI32x4 => Ident::ExtmulHighI32x4, + // f32x4 Ops + Self::F32x4Add => Ident::Add, + Self::F32x4Sub => Ident::Sub, + Self::F32x4Mul => Ident::Mul, + Self::F32x4Div => Ident::Div, + Self::F32x4Min => Ident::Min, + Self::F32x4Max => Ident::Max, + Self::F32x4Pmin => Ident::Pmin, + Self::F32x4Pmax => Ident::Pmax, + // f64x2 Ops + Self::F64x2Add => Ident::Add, + Self::F64x2Sub => Ident::Sub, + Self::F64x2Mul => Ident::Mul, + Self::F64x2Div => Ident::Div, + Self::F64x2Min => Ident::Min, + Self::F64x2Max => Ident::Max, + Self::F64x2Pmin => Ident::Pmin, + Self::F64x2Pmax => Ident::Pmax, + // Simd Shift Ops + Self::I8x16Shl => Ident::Shl, + Self::S8x16Shr => Ident::Shr, + Self::U8x16Shr => Ident::Shr, + Self::I16x8Shl => Ident::Shl, + Self::S16x8Shr => Ident::Shr, + Self::U16x8Shr => Ident::Shr, + Self::I32x4Shl => Ident::Shl, + Self::S32x4Shr => Ident::Shr, + Self::U32x4Shr => Ident::Shr, + Self::I64x2Shl => Ident::Shl, + Self::S64x2Shr => Ident::Shr, + Self::U64x2Shr => Ident::Shr, + // Relaxed SIMD + Self::S16x8RelaxedDotI8x16I7x16 => Ident::RelaxedDotI8x16I7x16, + Self::S32x4RelaxedDotI8x16I7x16Add => Ident::RelaxedDotI8x16I7x16Add, + Self::F32x4RelaxedMadd => Ident::RelaxedMadd, + Self::F32x4RelaxedNmadd => Ident::RelaxedNmadd, + Self::F64x2RelaxedMadd => Ident::RelaxedMadd, + Self::F64x2RelaxedNmadd => Ident::RelaxedNmadd, + } + } + + pub fn ident_prefix(&self) -> Ty { + match self { + | BinaryOpKind::Cmp(op) => op.ident_prefix(), + | Self::I32Add + | Self::I32Sub + | Self::I32Mul + | Self::I32BitAnd + | Self::I32BitOr + | Self::I32BitXor + | Self::I32Shl + | Self::I32Rotl + | Self::I32Rotr => Ty::I32, + | Self::S32Div | Self::S32Rem | Self::S32Shr => Ty::S32, + | Self::U32Div | Self::U32Rem | Self::U32Shr => Ty::U32, + | Self::I64Add + | Self::I64Sub + | Self::I64Mul + | Self::I64BitAnd + | Self::I64BitOr + | Self::I64BitXor + | Self::I64Shl + | Self::I64Rotl + | Self::I64Rotr => Ty::I64, + | Self::S64Div | Self::S64Rem | Self::S64Shr => Ty::S64, + | Self::U64Div | Self::U64Rem | Self::U64Shr => Ty::U64, + | Self::F32Add + | Self::F32Sub + | Self::F32Mul + | Self::F32Div + | Self::F32Min + | Self::F32Max + | Self::F32Copysign => Ty::F32, + | Self::F64Add + | Self::F64Sub + | Self::F64Mul + | Self::F64Div + | Self::F64Min + | Self::F64Max + | Self::F64Copysign => Ty::F64, + | Self::I8x16Swizzle => Ty::I8x16, + | Self::I8x16Eq | Self::I8x16NotEq => Ty::I8x16, + | Self::I16x8Eq | Self::I16x8NotEq => Ty::I16x8, + | Self::I32x4Eq | Self::I32x4NotEq => Ty::I32x4, + | Self::I64x2Eq | Self::I64x2NotEq => Ty::I64x2, + | Self::S8x16Lt | Self::S8x16Le => Ty::S8x16, + | Self::S16x8Lt | Self::S16x8Le => Ty::S16x8, + | Self::S32x4Lt | Self::S32x4Le => Ty::S32x4, + | Self::S64x2Lt | Self::S64x2Le => Ty::S64x2, + | Self::U8x16Lt | Self::U8x16Le => Ty::U8x16, + | Self::U16x8Lt | Self::U16x8Le => Ty::U16x8, + | Self::U32x4Lt | Self::U32x4Le => Ty::U32x4, + | Self::U64x2Lt | Self::U64x2Le => Ty::U64x2, + | Self::F32x4Eq | Self::F32x4NotEq | Self::F32x4Lt | Self::F32x4Le => Ty::F32x4, + | Self::F64x2Eq | Self::F64x2NotEq | Self::F64x2Lt | Self::F64x2Le => Ty::F64x2, + | Self::V128And | Self::V128AndNot | Self::V128Or | Self::V128Xor => Ty::V128, + // i8x16 Ops + | Self::S8x16NarrowI16x8 => Ty::S8x16, + | Self::U8x16NarrowI16x8 => Ty::U8x16, + | Self::I8x16Add => Ty::I8x16, + | Self::S8x16AddSat => Ty::S8x16, + | Self::U8x16AddSat => Ty::U8x16, + | Self::I8x16Sub => Ty::I8x16, + | Self::S8x16SubSat => Ty::S8x16, + | Self::U8x16SubSat => Ty::U8x16, + | Self::S8x16Min => Ty::S8x16, + | Self::U8x16Min => Ty::U8x16, + | Self::S8x16Max => Ty::S8x16, + | Self::U8x16Max => Ty::U8x16, + | Self::U8x16Avgr => Ty::U8x16, + // i16x8 Ops + | Self::S16x8Q15MulrSat => Ty::S16x8, + | Self::S16x8NarrowI32x4 => Ty::S16x8, + | Self::U16x8NarrowI32x4 => Ty::U16x8, + | Self::S16x8ExtmulLowI8x16 => Ty::S16x8, + | Self::U16x8ExtmulLowI8x16 => Ty::U16x8, + | Self::S16x8ExtmulHighI8x16 => Ty::S16x8, + | Self::U16x8ExtmulHighI8x16 => Ty::U16x8, + | Self::I16x8Add => Ty::I16x8, + | Self::S16x8AddSat => Ty::S16x8, + | Self::U16x8AddSat => Ty::U16x8, + | Self::I16x8Sub => Ty::I16x8, + | Self::S16x8SubSat => Ty::S16x8, + | Self::U16x8SubSat => Ty::U16x8, + | Self::I16x8Mul => Ty::I16x8, + | Self::S16x8Min => Ty::S16x8, + | Self::U16x8Min => Ty::U16x8, + | Self::S16x8Max => Ty::S16x8, + | Self::U16x8Max => Ty::U16x8, + | Self::U16x8Avgr => Ty::U16x8, + // i32x4 Ops + | Self::I32x4Add | Self::I32x4Sub | Self::I32x4Mul => Ty::I32x4, + | Self::S32x4Min => Ty::S32x4, + | Self::U32x4Min => Ty::U32x4, + | Self::S32x4Max => Ty::S32x4, + | Self::U32x4Max => Ty::U32x4, + | Self::S32x4DotI16x8 => Ty::S32x4, + | Self::S32x4ExtmulLowI16x8 => Ty::S32x4, + | Self::U32x4ExtmulLowI16x8 => Ty::U32x4, + | Self::S32x4ExtmulHighI16x8 => Ty::S32x4, + | Self::U32x4ExtmulHighI16x8 => Ty::U32x4, + // i64x2 Ops + | Self::I64x2Add | Self::I64x2Sub | Self::I64x2Mul => Ty::I64x2, + | Self::S64x2ExtmulLowI32x4 => Ty::S64x2, + | Self::U64x2ExtmulLowI32x4 => Ty::U64x2, + | Self::S64x2ExtmulHighI32x4 => Ty::S64x2, + | Self::U64x2ExtmulHighI32x4 => Ty::U64x2, + // f32x4 Ops + | Self::F32x4Add + | Self::F32x4Sub + | Self::F32x4Mul + | Self::F32x4Div + | Self::F32x4Min + | Self::F32x4Max + | Self::F32x4Pmin + | Self::F32x4Pmax => Ty::F32x4, + // f64x2 Ops + | Self::F64x2Add + | Self::F64x2Sub + | Self::F64x2Mul + | Self::F64x2Div + | Self::F64x2Min + | Self::F64x2Max + | Self::F64x2Pmin + | Self::F64x2Pmax => Ty::F64x2, + // Simd Shift Ops + | Self::I8x16Shl => Ty::I8x16, + | Self::S8x16Shr => Ty::S8x16, + | Self::U8x16Shr => Ty::U8x16, + | Self::I16x8Shl => Ty::I16x8, + | Self::S16x8Shr => Ty::S16x8, + | Self::U16x8Shr => Ty::U16x8, + | Self::I32x4Shl => Ty::I32x4, + | Self::S32x4Shr => Ty::S32x4, + | Self::U32x4Shr => Ty::U32x4, + | Self::I64x2Shl => Ty::I64x2, + | Self::S64x2Shr => Ty::S64x2, + | Self::U64x2Shr => Ty::U64x2, + // Relaxed SIMD + | Self::S16x8RelaxedDotI8x16I7x16 => Ty::S16x8, + | Self::S32x4RelaxedDotI8x16I7x16Add => Ty::S32x4, + | Self::F32x4RelaxedMadd | Self::F32x4RelaxedNmadd => Ty::F32x4, + | Self::F64x2RelaxedMadd | Self::F64x2RelaxedNmadd => Ty::F64x2, + } + } + + fn lhs_field(&self, input: OperandKind) -> FieldTy { + match input { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => match self { + | Self::Cmp(cmp) => cmp.input_field(input), + | Self::I32Add + | Self::I32Sub + | Self::I32Mul + | Self::S32Div + | Self::U32Div + | Self::S32Rem + | Self::U32Rem + | Self::I32BitAnd + | Self::I32BitOr + | Self::I32BitXor + | Self::I32Shl + | Self::S32Shr + | Self::U32Shr + | Self::I32Rotl + | Self::I32Rotr => FieldTy::I32, + | Self::I64Add + | Self::I64Sub + | Self::I64Mul + | Self::S64Div + | Self::U64Div + | Self::S64Rem + | Self::U64Rem + | Self::I64BitAnd + | Self::I64BitOr + | Self::I64BitXor + | Self::I64Shl + | Self::S64Shr + | Self::U64Shr + | Self::I64Rotl + | Self::I64Rotr => FieldTy::I64, + | Self::F32Add + | Self::F32Sub + | Self::F32Mul + | Self::F32Div + | Self::F32Min + | Self::F32Max + | Self::F32Copysign => FieldTy::F32, + | Self::F64Add + | Self::F64Sub + | Self::F64Mul + | Self::F64Div + | Self::F64Min + | Self::F64Max + | Self::F64Copysign => FieldTy::F64, + _ => panic!("operator cannot have an immediate `lhs` field"), + }, + } + } + + fn rhs_field(&self, input: OperandKind) -> FieldTy { + match input { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => match self { + | Self::Cmp(cmp) => cmp.input_field(input), + | Self::I32Add + | Self::I32Sub + | Self::I32Mul + | Self::I32BitAnd + | Self::I32BitOr + | Self::I32BitXor => FieldTy::I32, + | Self::I32Shl | Self::S32Shr | Self::U32Shr | Self::I32Rotl | Self::I32Rotr => { + FieldTy::U8 + } + | Self::S32Div | Self::U32Div | Self::S32Rem | Self::U32Rem => FieldTy::NonZeroU32, + | Self::I64Add + | Self::I64Sub + | Self::I64Mul + | Self::I64BitAnd + | Self::I64BitOr + | Self::I64BitXor => FieldTy::I64, + | Self::I64Shl | Self::S64Shr | Self::U64Shr | Self::I64Rotl | Self::I64Rotr => { + FieldTy::U8 + } + | Self::S64Div | Self::U64Div | Self::S64Rem | Self::U64Rem => FieldTy::NonZeroU64, + | Self::F32Add + | Self::F32Sub + | Self::F32Mul + | Self::F32Div + | Self::F32Min + | Self::F32Max => FieldTy::F32, + | Self::F32Copysign => FieldTy::SignF32, + | Self::F64Add + | Self::F64Sub + | Self::F64Mul + | Self::F64Div + | Self::F64Min + | Self::F64Max => FieldTy::F64, + | Self::F64Copysign => FieldTy::SignF64, + | Self::I8x16Shl + | Self::S8x16Shr + | Self::U8x16Shr + | Self::I16x8Shl + | Self::S16x8Shr + | Self::U16x8Shr + | Self::I32x4Shl + | Self::S32x4Shr + | Self::U32x4Shr + | Self::I64x2Shl + | Self::S64x2Shr + | Self::U64x2Shr => FieldTy::U8, + _ => panic!("operator cannot have an immediate `rhs` field"), + }, + } + } + + pub fn commutativity(&self) -> Commutativity { + match self { + | Self::Cmp(cmp) => cmp.commutativity(), + | Self::I32Add + | Self::I32Mul + | Self::I32BitAnd + | Self::I32BitOr + | Self::I32BitXor + | Self::I64Add + | Self::I64Mul + | Self::I64BitAnd + | Self::I64BitOr + | Self::I64BitXor => Commutativity::Commutative, + _ => Commutativity::NonCommutative, + } + } +} + +#[derive(Copy, Clone)] +pub enum Commutativity { + Commutative, + NonCommutative, +} + +#[derive(Copy, Clone)] +pub struct CmpBranchOp { + pub cmp: CmpOpKind, + pub lhs: OperandKind, + pub rhs: OperandKind, +} + +impl CmpBranchOp { + pub fn new(cmp: CmpOpKind, lhs: OperandKind, rhs: OperandKind) -> Self { + Self { cmp, lhs, rhs } + } + + pub fn lhs_field(&self) -> Field { + Field::new(Ident::Lhs, self.cmp.input_field(self.lhs)) + } + + pub fn rhs_field(&self) -> Field { + Field::new(Ident::Rhs, self.cmp.input_field(self.rhs)) + } + + pub fn offset_field(&self) -> Field { + Field::new(Ident::Offset, FieldTy::BranchOffset) + } + + pub fn fields(&self) -> [Field; 3] { + [self.lhs_field(), self.rhs_field(), self.offset_field()] + } +} + +#[derive(Copy, Clone)] +pub struct CmpSelectOp { + pub cmp: CmpOpKind, + pub lhs: OperandKind, + pub rhs: OperandKind, +} + +impl CmpSelectOp { + pub fn new(cmp: CmpOpKind, lhs: OperandKind, rhs: OperandKind) -> Self { + Self { cmp, lhs, rhs } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn lhs_field(&self) -> Field { + Field::new(Ident::Lhs, self.cmp.input_field(self.lhs)) + } + + pub fn rhs_field(&self) -> Field { + Field::new(Ident::Rhs, self.cmp.input_field(self.rhs)) + } + + pub fn val_true_field(&self) -> Field { + Field::new(Ident::ValTrue, FieldTy::Slot) + } + + pub fn val_false_field(&self) -> Field { + Field::new(Ident::ValFalse, FieldTy::Slot) + } + + pub fn fields(&self) -> [Field; 5] { + [ + self.result_field(), + self.lhs_field(), + self.rhs_field(), + self.val_true_field(), + self.val_false_field(), + ] + } +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum Ty { + /// A general 32-bit integer type. + I32, + /// A general 64-bit integer type. + I64, + /// A signed 32-bit integer type. + S32, + /// A signed 64-bit integer type. + S64, + /// A unsigned 32-bit integer type. + U32, + /// A unsigned 64-bit integer type. + U64, + /// A generic 32-bits value. + B32, + /// A generic 64-bits value. + B64, + /// A 32-bit float type. + F32, + /// A 64-bit float type. + F64, + /// A generic `simd` vector type. + V128, + /// A `i8x16` vector type for `simd`. + I8x16, + /// A `i16x8` vector type for `simd`. + I16x8, + /// A `i32x4` vector type for `simd`. + I32x4, + /// A `i64x2` vector type for `simd`. + I64x2, + /// A `u8x16` vector type for `simd`. + U8x16, + /// A `u16x8` vector type for `simd`. + U16x8, + /// A `u32x4` vector type for `simd`. + U32x4, + /// A `u64x2` vector type for `simd`. + U64x2, + /// A `s8x16` vector type for `simd`. + S8x16, + /// A `s16x8` vector type for `simd`. + S16x8, + /// A `s32x4` vector type for `simd`. + S32x4, + /// A `s64x2` vector type for `simd`. + S64x2, + /// A `f32x4` vector type for `simd`. + F32x4, + /// A `f64x2` vector type for `simd`. + F64x2, +} + +impl Ty { + pub fn to_field_ty(self) -> Option { + let ty = match self { + | Ty::S32 | Ty::I32 => FieldTy::I32, + | Ty::S64 | Ty::I64 => FieldTy::I64, + | Ty::B32 | Ty::U32 => FieldTy::U32, + | Ty::B64 | Ty::U64 => FieldTy::U64, + | Ty::F32 => FieldTy::F32, + | Ty::F64 => FieldTy::F64, + _ => return None, + }; + Some(ty) + } +} + +impl Display for Ty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Ty::I32 => "i32", + Ty::I64 => "i64", + Ty::S32 => "i32", + Ty::S64 => "i64", + Ty::U32 => "u32", + Ty::U64 => "u64", + Ty::B32 => "32", + Ty::B64 => "64", + Ty::F32 => "f32", + Ty::F64 => "f64", + Ty::V128 => "v128", + Ty::I8x16 => "i8x16", + Ty::I16x8 => "i16x8", + Ty::I32x4 => "i32x4", + Ty::I64x2 => "i64x2", + Ty::U8x16 => "u8x16", + Ty::U16x8 => "u16x8", + Ty::U32x4 => "u32x4", + Ty::U64x2 => "u64x2", + Ty::S8x16 => "s8x16", + Ty::S16x8 => "s16x8", + Ty::S32x4 => "s32x4", + Ty::S64x2 => "s64x2", + Ty::F32x4 => "f32x4", + Ty::F64x2 => "f64x2", + }; + write!(f, "{s}") + } +} + +impl Display for SnakeCase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Display for CamelCase { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self.0 { + Ty::I32 => "I32", + Ty::I64 => "I64", + Ty::S32 => "I32", + Ty::S64 => "I64", + Ty::U32 => "U32", + Ty::U64 => "U64", + Ty::B32 => "32", + Ty::B64 => "64", + Ty::F32 => "F32", + Ty::F64 => "F64", + Ty::V128 => "V128", + Ty::I8x16 => "I8x16", + Ty::I16x8 => "I16x8", + Ty::I32x4 => "I32x4", + Ty::I64x2 => "I64x2", + Ty::U8x16 => "U8x16", + Ty::U16x8 => "U16x8", + Ty::U32x4 => "U32x4", + Ty::U64x2 => "U64x2", + Ty::S8x16 => "S8x16", + Ty::S16x8 => "S16x8", + Ty::S32x4 => "S32x4", + Ty::S64x2 => "S64x2", + Ty::F32x4 => "F32x4", + Ty::F64x2 => "F64x2", + }; + write!(f, "{s}") + } +} + +#[derive(Copy, Clone)] +pub enum FieldTy { + Slot, + SlotSpan, + FixedSlotSpan2, + U8, + U16, + U32, + U64, + I8, + I16, + I32, + I64, + F32, + F64, + NonZeroU32, + NonZeroU64, + SignF32, + SignF64, + Address, + Offset16, + BranchOffset, + Memory, + Table, + Global, + Func, + FuncType, + InternalFunc, + Elem, + Data, + TrapCode, + BlockFuel, + Array16ImmLaneIdx32, + ImmLaneIdx16, + ImmLaneIdx8, + ImmLaneIdx4, + ImmLaneIdx2, + Bytes16, + V128, +} + +impl Display for FieldTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Self::Slot => "Slot", + Self::SlotSpan => "SlotSpan", + Self::FixedSlotSpan2 => "FixedSlotSpan<2>", + Self::U8 => "u8", + Self::U16 => "u16", + Self::U32 => "u32", + Self::U64 => "u64", + Self::I8 => "i8", + Self::I16 => "i16", + Self::I32 => "i32", + Self::I64 => "i64", + Self::F32 => "f32", + Self::F64 => "f64", + Self::NonZeroU32 => "NonZero", + Self::NonZeroU64 => "NonZero", + Self::SignF32 => "Sign", + Self::SignF64 => "Sign", + Self::Address => "Address", + Self::Offset16 => "Offset16", + Self::BranchOffset => "BranchOffset", + Self::Memory => "Memory", + Self::Table => "Table", + Self::Global => "Global", + Self::Func => "Func", + Self::FuncType => "FuncType", + Self::InternalFunc => "InternalFunc", + Self::Elem => "Elem", + Self::Data => "Data", + Self::TrapCode => "TrapCode", + Self::BlockFuel => "BlockFuel", + Self::Array16ImmLaneIdx32 => "[ImmLaneIdx<32>; 16]", + Self::ImmLaneIdx16 => "ImmLaneIdx<16>", + Self::ImmLaneIdx8 => "ImmLaneIdx<8>", + Self::ImmLaneIdx4 => "ImmLaneIdx<4>", + Self::ImmLaneIdx2 => "ImmLaneIdx<2>", + Self::Bytes16 => "[u8; 16]", + Self::V128 => "V128", + }; + write!(f, "{s}") + } +} + +#[derive(Copy, Clone)] +pub enum CmpOpKind { + I32Eq, + I32NotEq, + I32And, + I32NotAnd, + I32Or, + I32NotOr, + S32Lt, + U32Lt, + S32Le, + U32Le, + + I64Eq, + I64NotEq, + I64And, + I64NotAnd, + I64Or, + I64NotOr, + S64Lt, + U64Lt, + S64Le, + U64Le, + + F32Eq, + F32NotEq, + F32Lt, + F32NotLt, + F32Le, + F32NotLe, + + F64Eq, + F64NotEq, + F64Lt, + F64NotLt, + F64Le, + F64NotLe, +} + +impl CmpOpKind { + pub fn commutativity(&self) -> Commutativity { + match self { + | Self::I32Eq + | Self::I32NotEq + | Self::I32And + | Self::I32NotAnd + | Self::I32Or + | Self::I32NotOr + | Self::I64Eq + | Self::I64NotEq + | Self::I64And + | Self::I64NotAnd + | Self::I64Or + | Self::I64NotOr + | Self::F32Eq + | Self::F32NotEq + | Self::F64Eq + | Self::F64NotEq => Commutativity::Commutative, + _ => Commutativity::NonCommutative, + } + } + + fn input_field(&self, input: OperandKind) -> FieldTy { + match input { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => match self { + | Self::I32Eq + | Self::I32NotEq + | Self::I32And + | Self::I32NotAnd + | Self::I32Or + | Self::I32NotOr + | Self::S32Lt + | Self::S32Le => FieldTy::I32, + | Self::U32Lt | Self::U32Le => FieldTy::U32, + | Self::I64Eq + | Self::I64NotEq + | Self::I64And + | Self::I64NotAnd + | Self::I64Or + | Self::I64NotOr + | Self::S64Lt + | Self::S64Le => FieldTy::I64, + | Self::U64Lt | Self::U64Le => FieldTy::U64, + | Self::F32Eq + | Self::F32NotEq + | Self::F32Lt + | Self::F32NotLt + | Self::F32Le + | Self::F32NotLe => FieldTy::F32, + | Self::F64Eq + | Self::F64NotEq + | Self::F64Lt + | Self::F64NotLt + | Self::F64Le + | Self::F64NotLe => FieldTy::F64, + }, + } + } + + pub fn ident_prefix(&self) -> Ty { + match self { + | Self::I32Eq + | Self::I32NotEq + | Self::I32And + | Self::I32NotAnd + | Self::I32Or + | Self::I32NotOr => Ty::I32, + | Self::S32Lt | Self::S32Le => Ty::S32, + | Self::U32Lt | Self::U32Le => Ty::U32, + | Self::I64Eq + | Self::I64NotEq + | Self::I64And + | Self::I64NotAnd + | Self::I64Or + | Self::I64NotOr => Ty::I64, + | Self::S64Lt | Self::S64Le => Ty::S64, + | Self::U64Lt | Self::U64Le => Ty::U64, + | Self::F32Eq + | Self::F32NotEq + | Self::F32Lt + | Self::F32NotLt + | Self::F32Le + | Self::F32NotLe => Ty::F32, + | Self::F64Eq + | Self::F64NotEq + | Self::F64Lt + | Self::F64NotLt + | Self::F64Le + | Self::F64NotLe => Ty::F64, + } + } + + pub fn ident(&self) -> Ident { + match self { + Self::I32Eq => Ident::Eq, + Self::I32NotEq => Ident::NotEq, + Self::I32And => Ident::And, + Self::I32NotAnd => Ident::NotAnd, + Self::I32Or => Ident::Or, + Self::I32NotOr => Ident::NotOr, + Self::S32Lt => Ident::Lt, + Self::U32Lt => Ident::Lt, + Self::S32Le => Ident::Le, + Self::U32Le => Ident::Le, + Self::I64Eq => Ident::Eq, + Self::I64NotEq => Ident::NotEq, + Self::I64And => Ident::And, + Self::I64NotAnd => Ident::NotAnd, + Self::I64Or => Ident::Or, + Self::I64NotOr => Ident::NotOr, + Self::S64Lt => Ident::Lt, + Self::U64Lt => Ident::Lt, + Self::S64Le => Ident::Le, + Self::U64Le => Ident::Le, + Self::F32Eq => Ident::Eq, + Self::F32Lt => Ident::Lt, + Self::F32Le => Ident::Le, + Self::F32NotEq => Ident::NotEq, + Self::F32NotLt => Ident::NotLt, + Self::F32NotLe => Ident::NotLe, + Self::F64Eq => Ident::Eq, + Self::F64Lt => Ident::Lt, + Self::F64Le => Ident::Le, + Self::F64NotEq => Ident::NotEq, + Self::F64NotLt => Ident::NotLt, + Self::F64NotLe => Ident::NotLe, + } + } +} + +#[derive(Copy, Clone)] +pub struct LoadOp { + /// The kind of the load operator. + pub kind: LoadOpKind, + /// The `ptr` field type. + pub ptr: OperandKind, + /// True, if the operator is always operating on (`memory 0`). + pub mem0: bool, + /// True, if the operator uses a 16-bit offset field. + pub offset16: bool, +} + +impl LoadOp { + pub fn new(kind: LoadOpKind, ptr: OperandKind, mem0: bool, offset16: bool) -> Self { + Self { + kind, + ptr, + mem0, + offset16, + } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn ptr_field(&self) -> Field { + let ptr_ty = match self.ptr { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::Address, + }; + Field::new(Ident::Ptr, ptr_ty) + } + + pub fn offset_field(&self) -> Option { + let offset_ty = match self.ptr { + OperandKind::Slot => match self.offset16 { + true => FieldTy::Offset16, + false => FieldTy::U64, + }, + OperandKind::Immediate => return None, + }; + Some(Field::new(Ident::Offset, offset_ty)) + } + + pub fn memory_field(&self) -> Option { + if self.mem0 { + return None; + } + Some(Field::new(Ident::Memory, FieldTy::Memory)) + } + + pub fn fields(&self) -> [Option; 4] { + [ + Some(self.result_field()), + Some(self.ptr_field()), + self.offset_field(), + self.memory_field(), + ] + } +} + +#[derive(Copy, Clone)] +pub enum LoadOpKind { + // Scalar + Load32, + Load64, + S32Load8, + U32Load8, + S32Load16, + U32Load16, + S64Load8, + U64Load8, + S64Load16, + U64Load16, + S64Load32, + U64Load32, + // Simd + V128Load, + S16x8Load8x8, + U16x8Load8x8, + S32x4Load16x4, + U32x4Load16x4, + S64x2Load32x2, + U64x2Load32x2, + V128Load8Splat, + V128Load16Splat, + V128Load32Splat, + V128Load64Splat, + V128Load32Zero, + V128Load64Zero, +} + +impl LoadOpKind { + pub fn ident(&self) -> Ident { + match self { + Self::Load32 => Ident::Load32, + Self::Load64 => Ident::Load64, + Self::S32Load8 => Ident::Load8, + Self::U32Load8 => Ident::Load8, + Self::S32Load16 => Ident::Load16, + Self::U32Load16 => Ident::Load16, + Self::S64Load8 => Ident::Load8, + Self::U64Load8 => Ident::Load8, + Self::S64Load16 => Ident::Load16, + Self::U64Load16 => Ident::Load16, + Self::S64Load32 => Ident::Load32, + Self::U64Load32 => Ident::Load32, + Self::V128Load => Ident::Load, + Self::S16x8Load8x8 => Ident::Load8x8, + Self::U16x8Load8x8 => Ident::Load8x8, + Self::S32x4Load16x4 => Ident::Load16x4, + Self::U32x4Load16x4 => Ident::Load16x4, + Self::S64x2Load32x2 => Ident::Load32x2, + Self::U64x2Load32x2 => Ident::Load32x2, + Self::V128Load8Splat => Ident::Load8Splat, + Self::V128Load16Splat => Ident::Load16Splat, + Self::V128Load32Splat => Ident::Load32Splat, + Self::V128Load64Splat => Ident::Load64Splat, + Self::V128Load32Zero => Ident::Load32Zero, + Self::V128Load64Zero => Ident::Load64Zero, + } + } + + pub fn ident_prefix(&self) -> Option { + let prefix = match self { + | Self::Load32 | Self::Load64 => return None, + | Self::S32Load8 => Ty::S32, + | Self::U32Load8 => Ty::U32, + | Self::S32Load16 => Ty::S32, + | Self::U32Load16 => Ty::U32, + | Self::S64Load8 => Ty::S64, + | Self::U64Load8 => Ty::U64, + | Self::S64Load16 => Ty::S64, + | Self::U64Load16 => Ty::U64, + | Self::S64Load32 => Ty::S64, + | Self::U64Load32 => Ty::U64, + | Self::V128Load => Ty::V128, + | Self::S16x8Load8x8 => Ty::S16x8, + | Self::U16x8Load8x8 => Ty::U16x8, + | Self::S32x4Load16x4 => Ty::S32x4, + | Self::U32x4Load16x4 => Ty::U32x4, + | Self::S64x2Load32x2 => Ty::S64x2, + | Self::U64x2Load32x2 => Ty::U64x2, + | Self::V128Load8Splat => Ty::V128, + | Self::V128Load16Splat => Ty::V128, + | Self::V128Load32Splat => Ty::V128, + | Self::V128Load64Splat => Ty::V128, + | Self::V128Load32Zero => Ty::V128, + | Self::V128Load64Zero => Ty::V128, + }; + Some(prefix) + } +} + +#[derive(Copy, Clone)] +pub struct StoreOp { + /// The kind of the load operator. + pub kind: StoreOpKind, + /// The `ptr` input type. + pub ptr: OperandKind, + /// The `value` input type. + pub value: OperandKind, + /// True, if the operator is always operating on (`memory 0`). + pub mem0: bool, + /// True, if the operator uses a 16-bit offset field. + pub offset16: bool, +} + +impl StoreOp { + pub fn new( + kind: StoreOpKind, + ptr: OperandKind, + value: OperandKind, + mem0: bool, + offset16: bool, + ) -> Self { + Self { + kind, + ptr, + value, + mem0, + offset16, + } + } + + pub fn ptr_field(&self) -> Field { + let ptr_ty = match self.ptr { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::Address, + }; + Field::new(Ident::Ptr, ptr_ty) + } + + pub fn offset_field(&self) -> Option { + let offset_ty = match self.ptr { + OperandKind::Slot => match self.offset16 { + true => FieldTy::Offset16, + false => FieldTy::U64, + }, + OperandKind::Immediate => return None, + }; + Some(Field::new(Ident::Offset, offset_ty)) + } + + pub fn value_field(&self) -> Field { + let value_ty = self.kind.value_ty(self.value); + Field::new(Ident::Value, value_ty) + } + + pub fn memory_field(&self) -> Option { + if self.mem0 { + return None; + } + Some(Field::new(Ident::Memory, FieldTy::Memory)) + } + + pub fn laneidx_field(&self) -> Option { + let ty = self.kind.laneidx_ty()?; + Some(Field::new(Ident::Lane, ty)) + } + + pub fn fields(&self) -> [Option; 5] { + [ + Some(self.ptr_field()), + self.offset_field(), + Some(self.value_field()), + self.memory_field(), + self.laneidx_field(), + ] + } +} + +#[derive(Copy, Clone)] +pub enum StoreOpKind { + // Generic + Store32, + Store64, + // i32 + I32Store8, + I32Store16, + // i64 + I64Store8, + I64Store16, + I64Store32, + // v128 + Store128, + V128Store8Lane, + V128Store16Lane, + V128Store32Lane, + V128Store64Lane, +} + +impl StoreOpKind { + pub fn ident(&self) -> Ident { + match self { + Self::Store32 => Ident::Store32, + Self::Store64 => Ident::Store64, + Self::I32Store8 => Ident::Store8, + Self::I32Store16 => Ident::Store16, + Self::I64Store8 => Ident::Store8, + Self::I64Store16 => Ident::Store16, + Self::I64Store32 => Ident::Store32, + Self::Store128 => Ident::Store128, + Self::V128Store8Lane => Ident::Store8Lane, + Self::V128Store16Lane => Ident::Store16Lane, + Self::V128Store32Lane => Ident::Store32Lane, + Self::V128Store64Lane => Ident::Store64Lane, + } + } + + pub fn ident_prefix(&self) -> Option { + match self { + Self::Store32 => None, + Self::Store64 => None, + Self::I32Store8 => Some(Ident::I32), + Self::I32Store16 => Some(Ident::I32), + Self::I64Store8 => Some(Ident::I64), + Self::I64Store16 => Some(Ident::I64), + Self::I64Store32 => Some(Ident::I64), + Self::Store128 => None, + Self::V128Store8Lane => Some(Ident::V128), + Self::V128Store16Lane => Some(Ident::V128), + Self::V128Store32Lane => Some(Ident::V128), + Self::V128Store64Lane => Some(Ident::V128), + } + } + + fn value_ty(&self, input: OperandKind) -> FieldTy { + match input { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => match self { + Self::Store32 => FieldTy::U32, + Self::Store64 => FieldTy::U64, + Self::I32Store8 => FieldTy::I8, + Self::I32Store16 => FieldTy::I16, + Self::I64Store8 => FieldTy::I8, + Self::I64Store16 => FieldTy::I16, + Self::I64Store32 => FieldTy::I32, + Self::Store128 => FieldTy::Bytes16, + Self::V128Store8Lane => FieldTy::V128, + Self::V128Store16Lane => FieldTy::V128, + Self::V128Store32Lane => FieldTy::V128, + Self::V128Store64Lane => FieldTy::V128, + }, + } + } + + fn laneidx_ty(&self) -> Option { + let ty = match self { + Self::V128Store8Lane => FieldTy::ImmLaneIdx16, + Self::V128Store16Lane => FieldTy::ImmLaneIdx8, + Self::V128Store32Lane => FieldTy::ImmLaneIdx4, + Self::V128Store64Lane => FieldTy::ImmLaneIdx2, + _ => return None, + }; + Some(ty) + } +} + +#[derive(Copy, Clone)] +pub struct TableGetOp { + /// The `index` type. + pub index: OperandKind, +} + +impl TableGetOp { + pub fn new(index: OperandKind) -> Self { + Self { index } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn index_field(&self) -> Field { + let index_ty = match self.index { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::U32, + }; + Field::new(Ident::Index, index_ty) + } + + pub fn table_field(&self) -> Field { + Field::new(Ident::Table, FieldTy::Table) + } + + pub fn fields(&self) -> [Field; 3] { + [self.result_field(), self.index_field(), self.table_field()] + } +} + +#[derive(Copy, Clone)] +pub struct TableSetOp { + /// The `index` input. + pub index: OperandKind, + /// The `value` input. + pub value: OperandKind, +} + +impl TableSetOp { + pub fn new(index: OperandKind, value: OperandKind) -> Self { + Self { index, value } + } + + pub fn index_field(&self) -> Field { + let index_ty = match self.index { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::U32, + }; + Field::new(Ident::Index, index_ty) + } + + pub fn value_field(&self) -> Field { + let value_ty = match self.value { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::U64, + }; + Field::new(Ident::Value, value_ty) + } + + pub fn table_field(&self) -> Field { + Field::new(Ident::Table, FieldTy::Table) + } + + pub fn fields(&self) -> [Field; 3] { + [self.index_field(), self.value_field(), self.table_field()] + } +} + +#[derive(Copy, Clone)] +pub enum LaneWidth { + W8, + W16, + W32, + W64, +} + +impl Display for LaneWidth { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let width = u8::from(*self); + let len_lanes = self.len_lanes(); + write!(f, "{width}x{len_lanes}") + } +} + +impl From for u8 { + fn from(width: LaneWidth) -> Self { + match width { + LaneWidth::W8 => 8, + LaneWidth::W16 => 16, + LaneWidth::W32 => 32, + LaneWidth::W64 => 64, + } + } +} + +impl LaneWidth { + pub fn len_lanes(self) -> u8 { + match self { + Self::W8 => 16, + Self::W16 => 8, + Self::W32 => 4, + Self::W64 => 2, + } + } + + pub fn to_laneidx(self) -> FieldTy { + match self { + Self::W8 => FieldTy::ImmLaneIdx16, + Self::W16 => FieldTy::ImmLaneIdx8, + Self::W32 => FieldTy::ImmLaneIdx4, + Self::W64 => FieldTy::ImmLaneIdx2, + } + } +} + +#[derive(Copy, Clone)] +pub struct V128ReplaceLaneOp { + /// The type of the value to be splatted. + pub width: LaneWidth, + /// The `value` used for replacing. + pub value: OperandKind, +} + +impl V128ReplaceLaneOp { + pub fn new(width: LaneWidth, value: OperandKind) -> Self { + Self { width, value } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn v128_field(&self) -> Field { + Field::new(Ident::V128, FieldTy::Slot) + } + + pub fn value_field(&self) -> Field { + let value_ty = match self.value { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => match self.width { + LaneWidth::W8 => FieldTy::U8, + LaneWidth::W16 => FieldTy::U16, + LaneWidth::W32 => FieldTy::U32, + LaneWidth::W64 => FieldTy::U64, + }, + }; + Field::new(Ident::Value, value_ty) + } + + pub fn lane_field(&self) -> Field { + let lane_ty = match self.width { + LaneWidth::W8 => FieldTy::ImmLaneIdx16, + LaneWidth::W16 => FieldTy::ImmLaneIdx8, + LaneWidth::W32 => FieldTy::ImmLaneIdx4, + LaneWidth::W64 => FieldTy::ImmLaneIdx2, + }; + Field::new(Ident::Lane, lane_ty) + } + + pub fn fields(&self) -> [Field; 4] { + [ + self.result_field(), + self.v128_field(), + self.value_field(), + self.lane_field(), + ] + } +} + +#[derive(Copy, Clone)] +pub struct V128LoadLaneOp { + /// The type of the value to be splatted. + pub width: LaneWidth, + /// The `value` used for replacing. + pub ptr: OperandKind, + /// True, if the operator is always operating on (`memory 0`). + pub mem0: bool, + /// True, if the operator uses a 16-bit offset field. + pub offset16: bool, +} + +impl V128LoadLaneOp { + pub fn new(width: LaneWidth, ptr: OperandKind, mem0: bool, offset16: bool) -> Self { + Self { + width, + ptr, + mem0, + offset16, + } + } + + pub fn result_field(&self) -> Field { + Field::new(Ident::Result, FieldTy::Slot) + } + + pub fn ptr_field(&self) -> Field { + let ptr_ty = match self.ptr { + OperandKind::Slot => FieldTy::Slot, + OperandKind::Immediate => FieldTy::Address, + }; + Field::new(Ident::Ptr, ptr_ty) + } + + pub fn offset_field(&self) -> Option { + let offset_ty = match self.ptr { + OperandKind::Slot => match self.offset16 { + true => FieldTy::Offset16, + false => FieldTy::U64, + }, + OperandKind::Immediate => return None, + }; + Some(Field::new(Ident::Offset, offset_ty)) + } + + pub fn v128_field(&self) -> Field { + Field::new(Ident::V128, FieldTy::Slot) + } + + pub fn memory_field(&self) -> Option { + if self.mem0 { + return None; + } + Some(Field::new(Ident::Memory, FieldTy::Memory)) + } + + pub fn laneidx_field(&self) -> Field { + let ty = match self.width { + LaneWidth::W8 => FieldTy::ImmLaneIdx16, + LaneWidth::W16 => FieldTy::ImmLaneIdx8, + LaneWidth::W32 => FieldTy::ImmLaneIdx4, + LaneWidth::W64 => FieldTy::ImmLaneIdx2, + }; + Field::new(Ident::Lane, ty) + } + + pub fn fields(&self) -> [Option; 6] { + [ + Some(self.result_field()), + Some(self.ptr_field()), + self.offset_field(), + self.memory_field(), + Some(self.v128_field()), + Some(self.laneidx_field()), + ] + } +} diff --git a/crates/ir2/src/decode/mod.rs b/crates/ir2/src/decode/mod.rs new file mode 100644 index 0000000000..85e14488a6 --- /dev/null +++ b/crates/ir2/src/decode/mod.rs @@ -0,0 +1,156 @@ +#![allow(non_camel_case_types)] + +mod op; + +use self::op::{ + BinaryOp, + CmpBranchOp, + CmpSelectOp, + LoadOpMem0Offset16_Ss, + LoadOp_Si, + LoadOp_Ss, + StoreOpMem0Offset16_S, + StoreOp_I, + StoreOp_S, + TableGet, + TableSet, + UnaryOp, +}; +#[cfg(feature = "simd")] +use self::op::{ + StoreLaneOpMem0Offset16_S, + StoreLaneOp_S, + V128LoadLaneOpMem0Offset16_Ss, + V128LoadLaneOp_Ss, + V128ReplaceLaneOp, +}; +#[cfg(feature = "simd")] +use crate::core::simd::ImmLaneIdx; +use crate::{ + core::TrapCode, + index::{Data, Elem, Func, FuncType, Global, InternalFunc, Memory, Table}, + Address, + BlockFuel, + BoundedSlotSpan, + BranchOffset, + BranchTableTarget, + FixedSlotSpan, + Offset16, + OpCode, + Sign, + Slot, + SlotSpan, +}; +use core::{mem, num::NonZero}; + +/// Types that can be used to decode types implementing [`Decode`]. +pub trait Decoder { + /// Reads enough bytes from `self` to populate `buffer`. + fn read_bytes(&mut self, buffer: &mut [u8]); +} + +/// Types that can be decoded using a type that implements [`Decoder`]. +pub trait Decode { + /// Decodes `Self` via `decoder`. + /// + /// # Safety + /// + /// It is the callers responsibility to ensure that the decoder + /// decodes items in the order they have been encoded and on valid + /// positions within the decode stream. + unsafe fn decode(decoder: &mut D) -> Self; +} + +impl Decode for BoundedSlotSpan { + unsafe fn decode(decoder: &mut D) -> Self { + let span = SlotSpan::decode(decoder); + let len = u16::decode(decoder); + Self::new(span, len) + } +} + +impl Decode for FixedSlotSpan { + unsafe fn decode(decoder: &mut D) -> Self { + Self::new_unchecked(SlotSpan::decode(decoder)) + } +} + +impl Decode for BranchTableTarget { + unsafe fn decode(decoder: &mut D) -> Self { + let results = SlotSpan::decode(decoder); + let offset = BranchOffset::decode(decoder); + Self::new(results, offset) + } +} + +macro_rules! impl_decode_for_primitive { + ( $($ty:ty),* $(,)? ) => { + $( + impl Decode for $ty { + unsafe fn decode(decoder: &mut D) -> Self { + let mut bytes = [0_u8; mem::size_of::<$ty>()]; + decoder.read_bytes(&mut bytes); + Self::from_ne_bytes(bytes) + } + } + )* + }; +} +impl_decode_for_primitive!( + u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64 +); + +macro_rules! impl_decode_using { + ( $($ty:ty as $as:ty = $e:expr),* $(,)? ) => { + $( + impl Decode for $ty { + unsafe fn decode(decoder: &mut D) -> Self { + $e(<$as as Decode>::decode(decoder)) + } + } + )* + }; +} +impl_decode_using! { + bool as u8 = |value| value != 0, + Offset16 as u16 = Into::into, + BranchOffset as i32 = Into::into, + BlockFuel as u64 = Into::into, + Slot as u16 = Into::into, + Func as u32 = Into::into, + FuncType as u32 = Into::into, + InternalFunc as u32 = Into::into, + Global as u32 = Into::into, + Memory as u16 = Into::into, + Table as u32 = Into::into, + Data as u32 = Into::into, + Elem as u32 = Into::into, + + Address as u64 = |address| unsafe { Address::try_from(address).unwrap_unchecked() }, + Sign as bool = Sign::new, + Sign as bool = Sign::new, + SlotSpan as Slot = SlotSpan::new, + NonZero as u32 = |value| unsafe { NonZero::new_unchecked(value) }, + NonZero as u64 = |value| unsafe { NonZero::new_unchecked(value) }, + TrapCode as u8 = |code: u8| -> TrapCode { + TrapCode::try_from(code).unwrap_unchecked() + }, + OpCode as u16 = |code: u16| -> OpCode { + OpCode::try_from(code).unwrap_unchecked() + } +} + +impl Decode for [T; N] { + unsafe fn decode(decoder: &mut D) -> Self { + core::array::from_fn(|_| ::decode(decoder)) + } +} + +#[cfg(feature = "simd")] +impl Decode for ImmLaneIdx { + unsafe fn decode(decoder: &mut D) -> Self { + ImmLaneIdx::try_from(u8::decode(decoder)).unwrap_unchecked() + } +} + +include!(concat!(env!("OUT_DIR"), "/decode.rs")); diff --git a/crates/ir2/src/decode/op.rs b/crates/ir2/src/decode/op.rs new file mode 100644 index 0000000000..7ddd378eef --- /dev/null +++ b/crates/ir2/src/decode/op.rs @@ -0,0 +1,346 @@ +#[cfg(feature = "simd")] +use crate::core::simd::ImmLaneIdx; +use crate::{ + index::{Memory, Table}, + Address, + BranchOffset, + Decode, + Decoder, + Offset16, + Slot, +}; + +#[derive(Copy, Clone)] +pub struct UnaryOp { + pub result: Slot, + pub value: V, +} + +impl Decode for UnaryOp { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + value: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct BinaryOp { + pub result: Slot, + pub lhs: Lhs, + pub rhs: Rhs, +} + +impl Decode for BinaryOp +where + Lhs: Decode, + Rhs: Decode, +{ + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + lhs: Decode::decode(decoder), + rhs: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct CmpBranchOp { + pub lhs: Lhs, + pub rhs: Rhs, + pub offset: BranchOffset, +} + +impl Decode for CmpBranchOp +where + Lhs: Decode, + Rhs: Decode, +{ + unsafe fn decode(decoder: &mut D) -> Self { + Self { + lhs: Decode::decode(decoder), + rhs: Decode::decode(decoder), + offset: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct CmpSelectOp { + pub result: Slot, + pub lhs: Lhs, + pub rhs: Rhs, + pub val_true: Slot, + pub val_false: Slot, +} + +impl Decode for CmpSelectOp +where + Lhs: Decode, + Rhs: Decode, +{ + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + lhs: Decode::decode(decoder), + rhs: Decode::decode(decoder), + val_true: Decode::decode(decoder), + val_false: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct LoadOp_Ss { + pub result: Slot, + pub ptr: Slot, + pub offset: u64, + pub memory: Memory, +} + +impl Decode for LoadOp_Ss { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + memory: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct LoadOp_Si { + pub result: Slot, + pub address: Address, + pub memory: Memory, +} + +impl Decode for LoadOp_Si { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + address: Decode::decode(decoder), + memory: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct LoadOpMem0Offset16_Ss { + pub result: Slot, + pub ptr: Slot, + pub offset: Offset16, +} + +impl Decode for LoadOpMem0Offset16_Ss { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct StoreOp_S { + pub ptr: Slot, + pub offset: u64, + pub value: T, + pub memory: Memory, +} + +impl Decode for StoreOp_S { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + value: Decode::decode(decoder), + memory: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct StoreOp_I { + pub address: Address, + pub value: T, + pub memory: Memory, +} + +impl Decode for StoreOp_I { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + address: Decode::decode(decoder), + value: Decode::decode(decoder), + memory: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct StoreOpMem0Offset16_S { + pub ptr: Slot, + pub offset: Offset16, + pub value: T, +} + +impl Decode for StoreOpMem0Offset16_S { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + value: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +#[cfg(feature = "simd")] +pub struct StoreLaneOp_S { + pub ptr: Slot, + pub offset: u64, + pub value: T, + pub memory: Memory, + pub lane: LaneIdx, +} + +#[cfg(feature = "simd")] +impl Decode for StoreLaneOp_S { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + value: Decode::decode(decoder), + memory: Decode::decode(decoder), + lane: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +#[cfg(feature = "simd")] +pub struct StoreLaneOpMem0Offset16_S { + pub ptr: Slot, + pub offset: Offset16, + pub value: T, + pub lane: LaneIdx, +} + +#[cfg(feature = "simd")] +impl Decode for StoreLaneOpMem0Offset16_S { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + value: Decode::decode(decoder), + lane: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct TableGet { + pub result: Slot, + pub index: T, + pub table: Table, +} + +impl Decode for TableGet { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + index: Decode::decode(decoder), + table: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +pub struct TableSet { + pub table: Table, + pub index: I, + pub value: V, +} + +impl Decode for TableSet { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + table: Decode::decode(decoder), + index: Decode::decode(decoder), + value: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +#[cfg(feature = "simd")] +pub struct V128ReplaceLaneOp { + pub result: Slot, + pub v128: Slot, + pub value: V, + pub lane: ImmLaneIdx, +} + +#[cfg(feature = "simd")] +impl Decode for V128ReplaceLaneOp { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + v128: Decode::decode(decoder), + value: Decode::decode(decoder), + lane: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +#[cfg(feature = "simd")] +pub struct V128LoadLaneOp_Ss { + pub result: Slot, + pub ptr: Slot, + pub offset: u64, + pub memory: Memory, + pub v128: Slot, + pub lane: LaneIdx, +} + +#[cfg(feature = "simd")] +impl Decode for V128LoadLaneOp_Ss { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + memory: Decode::decode(decoder), + v128: Decode::decode(decoder), + lane: Decode::decode(decoder), + } + } +} + +#[derive(Copy, Clone)] +#[cfg(feature = "simd")] +pub struct V128LoadLaneOpMem0Offset16_Ss { + pub result: Slot, + pub ptr: Slot, + pub offset: Offset16, + pub v128: Slot, + pub lane: LaneIdx, +} + +#[cfg(feature = "simd")] +impl Decode for V128LoadLaneOpMem0Offset16_Ss { + unsafe fn decode(decoder: &mut D) -> Self { + Self { + result: Decode::decode(decoder), + ptr: Decode::decode(decoder), + offset: Decode::decode(decoder), + v128: Decode::decode(decoder), + lane: Decode::decode(decoder), + } + } +} diff --git a/crates/ir2/src/encode.rs b/crates/ir2/src/encode.rs new file mode 100644 index 0000000000..e82c8d6dd2 --- /dev/null +++ b/crates/ir2/src/encode.rs @@ -0,0 +1,206 @@ +#[cfg(feature = "simd")] +use crate::core::simd::ImmLaneIdx; +use crate::{ + core::TrapCode, + index::{Data, Elem, Func, FuncType, Global, InternalFunc, Memory, Table}, + Address, + BlockFuel, + BoundedSlotSpan, + BranchOffset, + BranchTableTarget, + FixedSlotSpan, + Offset16, + Op, + OpCode, + Sign, + Slot, + SlotSpan, +}; +use core::num::NonZero; + +/// Types that can encode types that implement [`Encode`]. +pub trait Encoder { + /// Position of encoded items. + type Pos: Copy; + /// Errors that may be returned during encoding. + type Error; + + /// Writes `bytes` to the encoder. + /// + /// # Errors + /// + /// If the encoder cannot encode more `bytes`. + fn write_bytes(&mut self, bytes: &[u8]) -> Result; + + /// Encodes the [`OpCode`] to `self`. + /// + /// # Note + /// This API allows the encoder to customize encoding of [`OpCode`], e.g. to + /// allow for direct or indirect threading encodings where the [`OpCode`] is + /// either encoded as function pointer or as `u16` value respectively. + fn encode_op_code(&mut self, code: OpCode) -> Result; + + /// Registers an encoded [`BranchOffset`] to the encoder. + /// + /// # Errors + /// + /// If the encoder cannot register the `branch_offset`. + fn branch_offset( + &mut self, + pos: Self::Pos, + branch_offset: BranchOffset, + ) -> Result<(), Self::Error>; +} + +/// Types that can be encoded by types that implement [`Encoder`]. +pub trait Encode { + /// Encode `self` to `encoder` and return its position within the `encoder`. + fn encode(&self, encoder: &mut E) -> Result + where + E: Encoder; +} + +impl Encode for OpCode { + fn encode(&self, encoder: &mut E) -> Result + where + E: Encoder, + { + encoder.encode_op_code(*self) + } +} + +impl Encode for BranchOffset { + fn encode(&self, encoder: &mut E) -> Result + where + E: Encoder, + { + let pos = self.to_i32().encode(encoder)?; + encoder.branch_offset(pos, *self)?; + Ok(pos) + } +} + +impl Encode for BoundedSlotSpan { + fn encode(&self, encoder: &mut E) -> Result { + (self.span(), self.len()).encode(encoder) + } +} + +impl Encode for FixedSlotSpan { + fn encode(&self, encoder: &mut E) -> Result { + self.span().encode(encoder) + } +} + +impl Encode for BranchTableTarget { + fn encode(&self, encoder: &mut E) -> Result { + (self.results, self.offset).encode(encoder) + } +} + +macro_rules! impl_encode_for_primitive { + ( $($ty:ty),* $(,)? ) => { + $( + impl Encode for $ty { + fn encode(&self, encoder: &mut E) -> Result { + encoder.write_bytes(&self.to_ne_bytes()) + } + } + )* + }; +} +impl_encode_for_primitive!( + u8, u16, u32, u64, u128, usize, i8, i16, i32, i64, i128, isize, f32, f64 +); + +macro_rules! impl_encode_using { + ( $($ty:ty as $prim:ty = $e:expr),* $(,)? ) => { + $( + impl Encode for $ty { + fn encode(&self, encoder: &mut E) -> Result { + let conv = |value: &Self| -> $prim { $e(*value) }; + conv(self).encode(encoder) + } + } + )* + }; +} +impl_encode_using! { + bool as u8 = Into::into, + Offset16 as u16 = Into::into, + BlockFuel as u64 = Into::into, + Address as u64 = Into::into, + Slot as u16 = Into::into, + Func as u32 = Into::into, + FuncType as u32 = Into::into, + InternalFunc as u32 = Into::into, + Global as u32 = Into::into, + Memory as u16 = Into::into, + Table as u32 = Into::into, + Data as u32 = Into::into, + Elem as u32 = Into::into, + + Sign as bool = Sign::is_positive, + Sign as bool = Sign::is_positive, + SlotSpan as Slot = SlotSpan::head, + NonZero as u32 = NonZero::get, + NonZero as u64 = NonZero::get, + TrapCode as u8 = |code: TrapCode| -> u8 { code as _ }, +} + +#[cfg(feature = "simd")] +impl Encode for ImmLaneIdx { + fn encode(&self, encoder: &mut E) -> Result + where + E: Encoder, + { + u8::from(*self).encode(encoder) + } +} + +macro_rules! for_tuple { + ( $mac:ident ) => { + $mac! { T0 } + $mac! { T0, T1 } + $mac! { T0, T1, T2 } + $mac! { T0, T1, T2, T3 } + $mac! { T0, T1, T2, T3, T4 } + $mac! { T0, T1, T2, T3, T4, T5 } + $mac! { T0, T1, T2, T3, T4, T5, T6 } + }; +} +macro_rules! impl_encode_for_tuple { + ( $t0:ident $(, $t:ident)* $(,)? ) => { + impl<$t0: Encode $(, $t: Encode)*> Encode for ($t0, $($t,)*) { + fn encode(&self, encoder: &mut E) -> Result { + #[allow(non_snake_case)] + let ($t0, $($t,)*) = self; + let pos = $t0.encode(encoder)?; + $( $t.encode(encoder)?; )* + Ok(pos) + } + } + }; +} +for_tuple!(impl_encode_for_tuple); + +impl Encode for &'_ T { + fn encode(&self, encoder: &mut E) -> Result { + ::encode(*self, encoder) + } +} + +impl Encode for [T; N] { + fn encode(&self, encoder: &mut E) -> Result { + let Some((first, rest)) = self.split_first() else { + panic!("cannot encode zero-sized arrays") + }; + let pos = first.encode(encoder)?; + for item in rest { + item.encode(encoder)?; + } + Ok(pos) + } +} + +include!(concat!(env!("OUT_DIR"), "/encode.rs")); diff --git a/crates/ir2/src/error.rs b/crates/ir2/src/error.rs new file mode 100644 index 0000000000..b515b23f3a --- /dev/null +++ b/crates/ir2/src/error.rs @@ -0,0 +1,24 @@ +use core::fmt; + +/// An error that may be occurred when operating with some Wasmi IR primitives. +#[derive(Debug)] +pub enum Error { + /// Encountered when trying to create a [`Slot`](crate::Slot) from an out of bounds integer. + StackSlotOutOfBounds, + /// Encountered when trying to create a [`BranchOffset`](crate::BranchOffset) from an out of bounds integer. + BranchOffsetOutOfBounds, + /// Encountered when trying to create a [`BlockFuel`](crate::BlockFuel) from an out of bounds integer. + BlockFuelOutOfBounds, +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::StackSlotOutOfBounds => write!(f, "stack slot out of bounds"), + Self::BranchOffsetOutOfBounds => write!(f, "branch offset out of bounds"), + Self::BlockFuelOutOfBounds => write!(f, "block fuel out of bounds"), + } + } +} + +impl core::error::Error for Error {} diff --git a/crates/ir2/src/index.rs b/crates/ir2/src/index.rs new file mode 100644 index 0000000000..aed5db1503 --- /dev/null +++ b/crates/ir2/src/index.rs @@ -0,0 +1,105 @@ +//! Definitions for thin-wrapper index types. + +use crate::Error; + +macro_rules! for_each_index { + ($mac:ident) => { + $mac! { + /// A Wasmi stack slot. + Slot(pub(crate) u16); + /// A Wasm function index. + Func(pub(crate) u32); + /// A Wasm function type index. + FuncType(pub(crate) u32); + /// A Wasmi internal function index. + InternalFunc(pub(crate) u32); + /// A Wasm global variable index. + Global(pub(crate) u32); + /// A Wasm linear memory index. + Memory(pub(crate) u16); + /// A Wasm table index. + Table(pub(crate) u32); + /// A Wasm data segment index. + Data(pub(crate) u32); + /// A Wasm element segment index. + Elem(pub(crate) u32); + } + }; +} + +impl Memory { + /// Returns `true` if `self` refers to the default linear memory which always is at index 0. + pub fn is_default(&self) -> bool { + self.0 == 0 + } +} + +macro_rules! define_index { + ( + $( + $( #[$docs:meta] )* + $name:ident($vis:vis $ty:ty) + );* $(;)? + ) => { + $( + $( #[$docs] )* + #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct $name($vis $ty); + + impl From<$name> for $ty { + fn from(value: $name) -> $ty { + value.0 + } + } + + impl From<$ty> for $name { + fn from(value: $ty) -> Self { + Self(value) + } + } + )* + }; +} +for_each_index!(define_index); + +impl TryFrom for Slot { + type Error = Error; + + fn try_from(local_index: u32) -> Result { + u16::try_from(local_index) + .map_err(|_| Error::StackSlotOutOfBounds) + .map(Self::from) + } +} + +impl Slot { + /// Returns the n-th next [`Slot`] from `self` with contiguous index. + /// + /// # Note + /// + /// - Calling this with `n == 0` just returns `self`. + /// - This has wrapping semantics with respect to the underlying index. + pub fn next_n(self, n: u16) -> Self { + Self(self.0.wrapping_add(n)) + } + + /// Returns the n-th previous [`Slot`] from `self` with contiguous index. + /// + /// # Note + /// + /// - Calling this with `n == 0` just returns `self`. + /// - This has wrapping semantics with respect to the underlying index. + pub fn prev_n(self, n: u16) -> Self { + Self(self.0.wrapping_sub(n)) + } + + /// Returns the [`Slot`] with the next contiguous index. + pub fn next(self) -> Self { + self.next_n(1) + } + + /// Returns the [`Slot`] with the previous contiguous index. + pub fn prev(self) -> Self { + self.prev_n(1) + } +} diff --git a/crates/ir2/src/lib.rs b/crates/ir2/src/lib.rs new file mode 100644 index 0000000000..10c232cdb0 --- /dev/null +++ b/crates/ir2/src/lib.rs @@ -0,0 +1,28 @@ +#![no_std] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate std; + +pub mod decode; +mod encode; +mod error; +pub mod index; +mod op; +mod opcode; +mod primitive; +mod span; + +use wasmi_core as core; + +#[doc(inline)] +pub use self::{ + decode::{Decode, Decoder}, + encode::{Encode, Encoder}, + error::Error, + index::Slot, + op::Op, + opcode::{InvalidOpCode, OpCode}, + primitive::{Address, BlockFuel, BranchOffset, BranchTableTarget, Offset16, Sign}, + span::{BoundedSlotSpan, FixedSlotSpan, SlotSpan, SlotSpanIter}, +}; diff --git a/crates/ir2/src/op.rs b/crates/ir2/src/op.rs new file mode 100644 index 0000000000..4eb10d1c72 --- /dev/null +++ b/crates/ir2/src/op.rs @@ -0,0 +1,30 @@ +#[cfg(feature = "simd")] +use crate::core::simd::ImmLaneIdx; +use crate::{ + core::TrapCode, + index::{Data, Elem, Func, FuncType, Global, InternalFunc, Memory, Table}, + Address, + BlockFuel, + BranchOffset, + FixedSlotSpan, + Offset16, + Sign, + Slot, + SlotSpan, +}; +use core::num::NonZero; + +include!(concat!(env!("OUT_DIR"), "/op.rs")); + +impl Copy for Op {} +impl Clone for Op { + fn clone(&self) -> Self { + *self + } +} + +#[test] +fn op_size_of_and_alignment() { + assert_eq!(core::mem::size_of::(), 24); + assert_eq!(core::mem::align_of::(), 8); +} diff --git a/crates/ir2/src/opcode.rs b/crates/ir2/src/opcode.rs new file mode 100644 index 0000000000..e78729c4e0 --- /dev/null +++ b/crates/ir2/src/opcode.rs @@ -0,0 +1,19 @@ +use crate::Op; + +include!(concat!(env!("OUT_DIR"), "/op_code.rs")); + +impl Copy for OpCode {} +impl Clone for OpCode { + fn clone(&self) -> Self { + *self + } +} +impl From for u16 { + fn from(code: OpCode) -> Self { + code as u16 + } +} + +/// Indicated an invalid `u16` value for an [`OpCode`]. +#[derive(Debug, Copy, Clone)] +pub struct InvalidOpCode; diff --git a/crates/ir2/src/primitive.rs b/crates/ir2/src/primitive.rs new file mode 100644 index 0000000000..037830aa00 --- /dev/null +++ b/crates/ir2/src/primitive.rs @@ -0,0 +1,247 @@ +use crate::{Error, SlotSpan}; +use core::marker::PhantomData; + +/// An [`Op::BranchTableSpan`](crate::Op::BranchTableSpan) branching target. +#[derive(Debug, Copy, Clone)] +pub struct BranchTableTarget { + /// The result stack slots of the branch target. + pub results: SlotSpan, + /// The offset to branch to for the target. + pub offset: BranchOffset, +} + +impl BranchTableTarget { + /// Creates a new [`BranchTableTarget`] for `results` and `offset`. + pub fn new(results: SlotSpan, offset: BranchOffset) -> Self { + Self { results, offset } + } +} + +/// Error that may occur upon converting values to [`Address`] and [`Offset16`]. +#[derive(Debug, Copy, Clone)] +pub struct OutOfBoundsConst; + +/// The sign of a value. +#[derive(Debug)] +pub struct Sign { + /// Whether the sign value is positive. + pub(crate) is_positive: bool, + /// Required for the Rust compiler. + marker: PhantomData T>, +} + +impl Clone for Sign { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Sign {} + +impl PartialEq for Sign { + fn eq(&self, other: &Self) -> bool { + self.is_positive == other.is_positive + } +} + +impl Eq for Sign {} + +impl Sign { + /// Create a new typed [`Sign`] with the given value. + pub(crate) fn new(is_positive: bool) -> Self { + Self { + is_positive, + marker: PhantomData, + } + } + + /// Creates a new typed [`Sign`] that has positive polarity. + pub fn pos() -> Self { + Self::new(true) + } + + /// Creates a new typed [`Sign`] that has negative polarity. + pub fn neg() -> Self { + Self::new(false) + } + + /// Returns `true` if [`Sign`] is positive. + pub(crate) fn is_positive(self) -> bool { + self.is_positive + } +} + +macro_rules! impl_sign_for { + ( $($ty:ty),* $(,)? ) => { + $( + impl From<$ty> for Sign<$ty> { + fn from(value: $ty) -> Self { + Self::new(value.is_sign_positive()) + } + } + + impl From> for $ty { + fn from(sign: Sign<$ty>) -> Self { + match sign.is_positive { + true => 1.0, + false => -1.0, + } + } + } + )* + }; +} +impl_sign_for!(f32, f64); + +/// A signed offset for branch instructions. +/// +/// This defines how much the instruction pointer is offset +/// upon taking the respective branch. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct BranchOffset(i32); + +impl From for BranchOffset { + fn from(index: i32) -> Self { + Self(index) + } +} + +impl BranchOffset { + /// Creates an uninitialized [`BranchOffset`]. + pub fn uninit() -> Self { + Self(0) + } + + /// Creates an initialized [`BranchOffset`] from `src` to `dst`. + /// + /// # Errors + /// + /// If the resulting [`BranchOffset`] is out of bounds. + pub fn from_src_to_dst(src: u32, dst: u32) -> Result { + let src = i64::from(src); + let dst = i64::from(dst); + let Some(offset) = dst.checked_sub(src) else { + // Note: This never needs to be called on backwards branches since they are immediated resolved. + unreachable!( + "offset for forward branches must have `src` be smaller than or equal to `dst`" + ); + }; + let Ok(offset) = i32::try_from(offset) else { + return Err(Error::BranchOffsetOutOfBounds); + }; + Ok(Self(offset)) + } + + /// Returns `true` if the [`BranchOffset`] has been initialized. + pub fn is_init(self) -> bool { + self.to_i32() != 0 + } + + /// Initializes the [`BranchOffset`] with a proper value. + /// + /// # Panics + /// + /// - If the [`BranchOffset`] have already been initialized. + /// - If the given [`BranchOffset`] is not properly initialized. + pub fn init(&mut self, valid_offset: BranchOffset) { + assert!(valid_offset.is_init()); + assert!(!self.is_init()); + *self = valid_offset; + } + + /// Returns the `i32` representation of the [`BranchOffset`]. + pub fn to_i32(self) -> i32 { + self.0 + } +} + +/// The accumulated fuel to execute a block via [`Op::ConsumeFuel`]. +/// +/// [`Op::ConsumeFuel`]: crate::Op::ConsumeFuel +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(transparent)] +pub struct BlockFuel(u64); + +impl From for BlockFuel { + fn from(value: u64) -> Self { + Self(value) + } +} + +impl From for u64 { + fn from(value: BlockFuel) -> Self { + value.0 + } +} + +impl BlockFuel { + /// Bump the fuel by `amount` if possible. + /// + /// # Errors + /// + /// If the new fuel amount after this operation is out of bounds. + pub fn bump_by(&mut self, amount: u64) -> Result<(), Error> { + self.0 = u64::from(*self) + .checked_add(amount) + .ok_or(Error::BlockFuelOutOfBounds)?; + Ok(()) + } +} + +/// A 64-bit memory address used for some load and store instructions. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(transparent)] +pub struct Address(u64); + +impl TryFrom for Address { + type Error = OutOfBoundsConst; + + fn try_from(address: u64) -> Result { + if usize::try_from(address).is_err() { + return Err(OutOfBoundsConst); + }; + Ok(Self(address)) + } +} + +impl From
for usize { + fn from(address: Address) -> Self { + // Note: no checks are needed since we statically ensured that + // `Address32` can be safely and losslessly cast to `usize`. + debug_assert!(usize::try_from(address.0).is_ok()); + address.0 as usize + } +} + +impl From
for u64 { + fn from(address: Address) -> Self { + address.0 + } +} + +/// A 16-bit encoded load or store address offset. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(transparent)] +pub struct Offset16(u16); + +impl TryFrom for Offset16 { + type Error = OutOfBoundsConst; + + fn try_from(address: u64) -> Result { + ::try_from(address) + .map(Self) + .map_err(|_| OutOfBoundsConst) + } +} + +impl From for Offset16 { + fn from(offset: u16) -> Self { + Self(offset) + } +} + +impl From for u16 { + fn from(offset: Offset16) -> Self { + offset.0 + } +} diff --git a/crates/ir2/src/span.rs b/crates/ir2/src/span.rs new file mode 100644 index 0000000000..b209f4bf67 --- /dev/null +++ b/crates/ir2/src/span.rs @@ -0,0 +1,347 @@ +use crate::{Error, Slot}; + +/// A [`SlotSpan`] of contiguous [`Slot`] indices. +/// +/// # Note +/// +/// - Represents an amount of contiguous [`Slot`] indices. +/// - For the sake of space efficiency the actual number of [`Slot`] +/// of the [`SlotSpan`] is stored externally and provided in +/// [`SlotSpan::iter`] when there is a need to iterate over +/// the [`Slot`] of the [`SlotSpan`]. +/// +/// The caller is responsible for providing the correct length. +/// Due to Wasm validation guided bytecode construction we assert +/// that the externally stored length is valid. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct SlotSpan(Slot); + +impl SlotSpan { + /// Creates a new [`SlotSpan`] starting with the given `start` [`Slot`]. + pub fn new(head: Slot) -> Self { + Self(head) + } + + /// Returns a [`SlotSpanIter`] yielding `len` [`Slot`]s. + pub fn iter_sized(self, len: usize) -> SlotSpanIter { + SlotSpanIter::new(self.0, len) + } + + /// Returns a [`SlotSpanIter`] yielding `len` [`Slot`]s. + pub fn iter(self, len: u16) -> SlotSpanIter { + SlotSpanIter::new_u16(self.0, len) + } + + /// Returns the head [`Slot`] of the [`SlotSpan`]. + pub fn head(self) -> Slot { + self.0 + } + + /// Returns an exclusive reference to the head [`Slot`] of the [`SlotSpan`]. + pub fn head_mut(&mut self) -> &mut Slot { + &mut self.0 + } + + /// Returns `true` if `copy_span results <- values` has overlapping copies. + /// + /// # Examples + /// + /// - `[ ]`: empty never overlaps + /// - `[ 1 <- 0 ]`: single element never overlaps + /// - `[ 0 <- 1, 1 <- 2, 2 <- 3 ]`: no overlap + /// - `[ 1 <- 0, 2 <- 1 ]`: overlaps! + pub fn has_overlapping_copies(results: Self, values: Self, len: u16) -> bool { + SlotSpanIter::has_overlapping_copies(results.iter(len), values.iter(len)) + } +} + +/// A [`SlotSpan`] with a statically known number of [`Slot`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(transparent)] +pub struct FixedSlotSpan { + /// The underlying [`SlotSpan`] without the known length. + span: SlotSpan, +} + +impl FixedSlotSpan<2> { + /// Returns an array of the results represented by `self`. + pub fn to_array(self) -> [Slot; 2] { + let span = self.span(); + let fst = span.head(); + let snd = fst.next(); + [fst, snd] + } +} + +impl FixedSlotSpan { + /// Creates a new [`SlotSpan`] starting with the given `start` [`Slot`]. + pub fn new(span: SlotSpan) -> Result { + let head = span.head(); + if head >= head.next_n(N) { + return Err(Error::StackSlotOutOfBounds); + } + Ok(Self { span }) + } + + /// Creates a new [`SlotSpan`] starting with the given `start` [`Slot`]. + /// + /// # Safety + /// + /// The caller is responsible for making sure that `span` is valid for a length of `N`. + pub unsafe fn new_unchecked(span: SlotSpan) -> Self { + Self { span } + } + + /// Returns a [`SlotSpanIter`] yielding `N` [`Slot`]s. + pub fn iter(&self) -> SlotSpanIter { + self.span.iter(self.len()) + } + + /// Creates a new [`BoundedSlotSpan`] from `self`. + pub fn bounded(self) -> BoundedSlotSpan { + BoundedSlotSpan { + span: self.span, + len: N, + } + } + + /// Returns the underlying [`SlotSpan`] of `self`. + pub fn span(self) -> SlotSpan { + self.span + } + + /// Returns an exclusive reference to the underlying [`SlotSpan`] of `self`. + pub fn span_mut(&mut self) -> &mut SlotSpan { + &mut self.span + } + + /// Returns `true` if the [`Slot`] is contained in `self`. + pub fn contains(self, reg: Slot) -> bool { + if self.is_empty() { + return false; + } + let min = self.span.head(); + let max = min.next_n(N); + min <= reg && reg < max + } + + /// Returns the number of [`Slot`]s in `self`. + pub fn len(self) -> u16 { + N + } + + /// Returns `true` if `self` is empty. + pub fn is_empty(self) -> bool { + N == 0 + } +} + +impl IntoIterator for &FixedSlotSpan { + type Item = Slot; + type IntoIter = SlotSpanIter; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for FixedSlotSpan { + type Item = Slot; + type IntoIter = SlotSpanIter; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// A [`SlotSpan`] with a known number of [`Slot`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct BoundedSlotSpan { + /// The first [`Slot`] in `self`. + span: SlotSpan, + /// The number of [`Slot`] in `self`. + len: u16, +} + +impl BoundedSlotSpan { + /// Creates a new [`BoundedSlotSpan`] from the given `span` and `len`. + pub fn new(span: SlotSpan, len: u16) -> Self { + Self { span, len } + } + + /// Returns a [`SlotSpanIter`] yielding `len` [`Slot`]s. + pub fn iter(&self) -> SlotSpanIter { + self.span.iter(self.len()) + } + + /// Returns `self` as unbounded [`SlotSpan`]. + pub fn span(&self) -> SlotSpan { + self.span + } + + /// Returns a mutable reference to the underlying [`SlotSpan`]. + pub fn span_mut(&mut self) -> &mut SlotSpan { + &mut self.span + } + + /// Returns `true` if the [`Slot`] is contained in `self`. + pub fn contains(self, reg: Slot) -> bool { + if self.is_empty() { + return false; + } + let min = self.span.head(); + let max = min.next_n(self.len); + min <= reg && reg < max + } + + /// Returns the number of [`Slot`] in `self`. + pub fn len(&self) -> u16 { + self.len + } + + /// Returns `true` if `self` is empty. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl IntoIterator for &BoundedSlotSpan { + type Item = Slot; + type IntoIter = SlotSpanIter; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for BoundedSlotSpan { + type Item = Slot; + type IntoIter = SlotSpanIter; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// A [`SlotSpanIter`] iterator yielding contiguous [`Slot`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct SlotSpanIter { + /// The next [`Slot`] in the [`SlotSpanIter`]. + next: Slot, + /// The last [`Slot`] in the [`SlotSpanIter`]. + last: Slot, +} + +impl SlotSpanIter { + /// Creates a [`SlotSpanIter`] from then given raw `start` and `end` [`Slot`]. + pub fn from_raw_parts(start: Slot, end: Slot) -> Self { + debug_assert!(u16::from(start) <= u16::from(end)); + Self { + next: start, + last: end, + } + } + + /// Creates a new [`SlotSpanIter`] for the given `start` [`Slot`] and length `len`. + /// + /// # Panics + /// + /// If the `start..end` [`Slot`] span indices are out of bounds. + fn new(start: Slot, len: usize) -> Self { + let len = u16::try_from(len) + .unwrap_or_else(|_| panic!("out of bounds length for register span: {len}")); + Self::new_u16(start, len) + } + + /// Creates a new [`SlotSpanIter`] for the given `start` [`Slot`] and length `len`. + /// + /// # Panics + /// + /// If the `start..end` [`Slot`] span indices are out of bounds. + fn new_u16(start: Slot, len: u16) -> Self { + let next = start; + let last = start + .0 + .checked_add(len) + .map(Slot) + .expect("overflowing register index for register span"); + Self::from_raw_parts(next, last) + } + + /// Creates a [`SlotSpan`] from this [`SlotSpanIter`]. + pub fn span(self) -> SlotSpan { + SlotSpan(self.next) + } + + /// Returns the remaining number of [`Slot`]s yielded by the [`SlotSpanIter`]. + pub fn len_as_u16(&self) -> u16 { + self.last.0.abs_diff(self.next.0) + } + + /// Returns `true` if `self` yields no more [`Slot`]s. + pub fn is_empty(&self) -> bool { + self.len_as_u16() == 0 + } + + /// Returns `true` if `copy_span results <- values` has overlapping copies. + /// + /// # Examples + /// + /// - `[ ]`: empty never overlaps + /// - `[ 1 <- 0 ]`: single element never overlaps + /// - `[ 0 <- 1, 1 <- 2, 2 <- 3 ]`: no overlap + /// - `[ 1 <- 0, 2 <- 1 ]`: overlaps! + pub fn has_overlapping_copies(results: Self, values: Self) -> bool { + assert_eq!( + results.len(), + values.len(), + "cannot copy between different sized register spans" + ); + let len = results.len(); + if len <= 1 { + // Empty spans or single-element spans can never overlap. + return false; + } + let first_value = values.span().head(); + let first_result = results.span().head(); + if first_value >= first_result { + // This case can never result in overlapping copies. + return false; + } + let mut values = values; + let last_value = values + .next_back() + .expect("span is non empty and thus must return"); + last_value >= first_result + } +} + +impl Iterator for SlotSpanIter { + type Item = Slot; + + fn next(&mut self) -> Option { + if self.next == self.last { + return None; + } + let reg = self.next; + self.next = self.next.next(); + Some(reg) + } +} + +impl DoubleEndedIterator for SlotSpanIter { + fn next_back(&mut self) -> Option { + if self.next == self.last { + return None; + } + self.last = self.last.prev(); + Some(self.last) + } +} + +impl ExactSizeIterator for SlotSpanIter { + fn len(&self) -> usize { + usize::from(SlotSpanIter::len_as_u16(self)) + } +}