Skip to content

Commit d832870

Browse files
Honryjeffkilpatrick
authored andcommitted
[WebNN] Fix some spelling and naming issues (microsoft#25433)
1 parent f04c942 commit d832870

19 files changed

+51
-51
lines changed

onnxruntime/core/providers/webnn/builders/helper.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ inline std::string GetTensorName(const ConstPointerContainer<std::vector<NodeArg
8484
}
8585

8686
template <typename T>
87-
inline std::vector<T> GetNarrowedIntfromInt64(gsl::span<const int64_t> int64_vec) {
87+
inline std::vector<T> GetNarrowedIntFromInt64(gsl::span<const int64_t> int64_vec) {
8888
std::vector<T> vec;
8989
vec.reserve(int64_vec.size());
9090
std::transform(int64_vec.begin(), int64_vec.end(),

onnxruntime/core/providers/webnn/builders/impl/attention_helper.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ namespace webnn {
99
/*
1010
ScaledDotProductAttention Subgraph: The basis for MultiHeadAttention and GroupQueryAttention
1111
inputs: query, key, value, scale, attention mask, and reshape_output_shape (for reshape)
12-
Abbreviatios: B is batch_size, S is query sequence_length, kv_S is key/value sequence length,
13-
N is number of attention heads, H is head size, W is hidden_size
12+
Abbreviations: B is batch_size, S is query sequence_length, kv_S is key/value sequence length,
13+
N is number of attention heads, H is head size, W is hidden_size
1414
1515
query key
1616
| |

onnxruntime/core/providers/webnn/builders/impl/base_op_builder.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class BaseOpBuilder : public IOpBuilder {
4949
// with opset version 7 or above for opset domain 'ai.onnx'.
5050
// WebNN EP ignores node support for opset less than 7 by
5151
// default as which will be fallback earlier by ONNX Runtime.
52-
// We still set the mininal supported opset to 1 as we couldn't
52+
// We still set the minimal supported opset to 1 as we couldn't
5353
// get the model opset version at this stage.
5454
virtual int GetMinSupportedOpSet(const Node& /* node */) const { return 1; }
5555
virtual int GetMaxSupportedOpSet(const Node& /* node */) const { return 23; }

onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ common::Status SetConvBaseOptions(ModelBuilder& model_builder,
7878
if (output_padding.size() == 1 && is_conv1d) {
7979
output_padding.push_back(0);
8080
}
81-
options.set("outputPadding", emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(output_padding)));
81+
options.set("outputPadding", emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(output_padding)));
8282

8383
// If output shape is explicitly provided, compute the pads.
8484
// Otherwise compute the output shape, as well as the pads if the auto_pad attribute is SAME_UPPER/SAME_LOWER.
@@ -87,7 +87,7 @@ common::Status SetConvBaseOptions(ModelBuilder& model_builder,
8787
auto_pad_type, pads_out, output_shape, !is_nhwc));
8888

8989
if (output_shape[0] != -1 && output_shape[1] != -1) {
90-
options.set("outputSizes", emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(output_shape)));
90+
options.set("outputSizes", emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(output_shape)));
9191
}
9292
pads = pads_out;
9393
} else {
@@ -97,13 +97,13 @@ common::Status SetConvBaseOptions(ModelBuilder& model_builder,
9797

9898
const auto group = helper.Get("group", static_cast<uint32_t>(1));
9999
options.set("groups", group);
100-
options.set("strides", emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(strides)));
101-
options.set("dilations", emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(dilations)));
100+
options.set("strides", emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(strides)));
101+
options.set("dilations", emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(dilations)));
102102

103103
// Permute the ONNX's pads, which is [beginning_height, beginning_width, ending_height, ending_width],
104104
// while WebNN's padding is [beginning_height, ending_height, beginning_width, ending_width].
105105
const std::vector<int64_t> padding{pads[0], pads[2], pads[1], pads[3]};
106-
options.set("padding", emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(padding)));
106+
options.set("padding", emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(padding)));
107107

108108
// Add bias if present.
109109
if (input_defs.size() > 2 && op_type != "ConvInteger") {
@@ -123,7 +123,7 @@ Status AddInitializerInNewLayout(ModelBuilder& model_builder,
123123

124124
const auto& shape = tensor.dims();
125125
std::vector<uint32_t> dims =
126-
GetNarrowedIntfromInt64<uint32_t>(std::vector<int64_t>(std::begin(shape), std::end(shape)));
126+
GetNarrowedIntFromInt64<uint32_t>(std::vector<int64_t>(std::begin(shape), std::end(shape)));
127127

128128
if (is_conv1d) {
129129
// Support conv1d by prepending a 1 size dimension.
@@ -172,21 +172,21 @@ Status AddInitializerInNewLayout(ModelBuilder& model_builder,
172172
h * w_t +
173173
w;
174174

175-
uint32_t nnapi_idx;
175+
uint32_t wnn_idx;
176176
if (is_conv == 1) { // L_0231
177-
nnapi_idx = out * h_t * w_t * in_t +
178-
h * w_t * in_t +
179-
w * in_t +
180-
in;
177+
wnn_idx = out * h_t * w_t * in_t +
178+
h * w_t * in_t +
179+
w * in_t +
180+
in;
181181
} else { // L_1230 for depthwise conv weight
182-
nnapi_idx = in * h_t * w_t * out_t +
183-
h * w_t * out_t +
184-
w * out_t +
185-
out;
182+
wnn_idx = in * h_t * w_t * out_t +
183+
h * w_t * out_t +
184+
w * out_t +
185+
out;
186186
}
187187

188188
for (size_t i = 0; i < element_size; i++) {
189-
buffer[element_size * nnapi_idx + i] = src[element_size * onnx_idx + i];
189+
buffer[element_size * wnn_idx + i] = src[element_size * onnx_idx + i];
190190
}
191191
}
192192
}
@@ -234,7 +234,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
234234
} else {
235235
input_shape.push_back(1);
236236
}
237-
std::vector<uint32_t> new_shape = GetNarrowedIntfromInt64<uint32_t>(input_shape);
237+
std::vector<uint32_t> new_shape = GetNarrowedIntFromInt64<uint32_t>(input_shape);
238238
common_options.set("label", node.Name() + "_reshape_input");
239239
input = model_builder.GetBuilder().call<emscripten::val>("reshape", input,
240240
emscripten::val::array(new_shape), common_options);
@@ -283,7 +283,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
283283
// Reshape weight to 4D for conv1d.
284284
if (!is_nhwc || !is_constant_weight) {
285285
// The weight_shape has been appended 1's, reshape weight operand.
286-
std::vector<uint32_t> new_shape = GetNarrowedIntfromInt64<uint32_t>(weight_shape);
286+
std::vector<uint32_t> new_shape = GetNarrowedIntFromInt64<uint32_t>(weight_shape);
287287
common_options.set("label", node.Name() + "_reshape_filter");
288288
filter = model_builder.GetBuilder().call<emscripten::val>("reshape",
289289
filter,
@@ -338,7 +338,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
338338
std::vector<int64_t> w_zero_point_shape;
339339
ORT_RETURN_IF_NOT(GetShape(*input_defs[3], w_zero_point_shape, logger), "Cannot get shape of w_zero_point");
340340
w_scale = model_builder.CreateOrGetConstant<float>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT, 1.0f,
341-
GetNarrowedIntfromInt64<uint32_t>(w_zero_point_shape));
341+
GetNarrowedIntFromInt64<uint32_t>(w_zero_point_shape));
342342
} else {
343343
w_zero_point = model_builder.CreateOrGetConstant<uint8_t>(x_type, 0);
344344
w_scale = x_scale;
@@ -363,7 +363,7 @@ Status ConvOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
363363
const auto& output_defs = node.OutputDefs();
364364
std::vector<int64_t> output_shape;
365365
ORT_RETURN_IF_NOT(GetShape(*output_defs[0], output_shape, logger), "Cannot get output shape");
366-
std::vector<uint32_t> new_shape = GetNarrowedIntfromInt64<uint32_t>(output_shape);
366+
std::vector<uint32_t> new_shape = GetNarrowedIntFromInt64<uint32_t>(output_shape);
367367
common_options.set("label", node.Name() + "_reshape_output");
368368
output = model_builder.GetBuilder().call<emscripten::val>("reshape",
369369
output,

onnxruntime/core/providers/webnn/builders/impl/dropout_op_builder.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ Status DropoutOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
5353
if (output_defs.size() > 1) {
5454
std::vector<int64_t> mask_shape;
5555
ORT_RETURN_IF_NOT(GetShape(*output_defs[1], mask_shape, logger), "Cannot get mask output's shape");
56-
std::vector<uint32_t> dims = GetNarrowedIntfromInt64<uint32_t>(mask_shape);
56+
std::vector<uint32_t> dims = GetNarrowedIntFromInt64<uint32_t>(mask_shape);
5757
emscripten::val one_constant = model_builder.CreateOrGetConstant<uint8_t>(
5858
ONNX_NAMESPACE::TensorProto_DataType_BOOL, 1, dims);
5959

onnxruntime/core/providers/webnn/builders/impl/expand_op_builder.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ Status ExpandOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
5656
emscripten::val options = emscripten::val::object();
5757
options.set("label", node.Name());
5858

59-
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(output_shape));
59+
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(output_shape));
6060
emscripten::val output = model_builder.GetBuilder().call<emscripten::val>("expand", input, output_shape_arr, options);
6161
model_builder.AddOperand(node.OutputDefs()[0]->Name(), std::move(output));
6262
return Status::OK();

onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,14 +55,14 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
5555
// If the input A is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions.
5656
if (a_shape.size() == 1) {
5757
a_shape.insert(a_shape.begin(), 1);
58-
emscripten::val a_shape_arr = emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(a_shape));
58+
emscripten::val a_shape_arr = emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(a_shape));
5959
common_options.set("label", node.Name() + "_reshape_a");
6060
a = model_builder.GetBuilder().call<emscripten::val>("reshape", a, a_shape_arr, common_options);
6161
}
6262
// If the input B is 1-D, it is promoted to a matrix by appending a 1 to its dimensions.
6363
if (b_shape.size() == 1) {
6464
b_shape.push_back(1);
65-
emscripten::val b_shape_arr = emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(b_shape));
65+
emscripten::val b_shape_arr = emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(b_shape));
6666
common_options.set("label", node.Name() + "_reshape_b");
6767
b = model_builder.GetBuilder().call<emscripten::val>("reshape", b, b_shape_arr, common_options);
6868
}
@@ -74,7 +74,7 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
7474
// If A or B input is 1-D, we need to reshape the output back to its original shape.
7575
if (a_shape.size() == 1 || b_shape.size() == 1) {
7676
common_options.set("label", node.Name() + "_reshape_output");
77-
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(output_shape));
77+
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(output_shape));
7878
output = model_builder.GetBuilder().call<emscripten::val>("reshape",
7979
output,
8080
output_shape_arr,
@@ -95,7 +95,7 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
9595
// The scale input should have the same shape as the zero point input.
9696
a_scale = model_builder.CreateOrGetConstant<float>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT,
9797
1.0f,
98-
GetNarrowedIntfromInt64<uint32_t>(a_zero_point_shape));
98+
GetNarrowedIntFromInt64<uint32_t>(a_zero_point_shape));
9999
} else {
100100
// If a_zero_point is not provided, create default scalar for zero_point and scale inputs.
101101
a_zero_point = model_builder.CreateOrGetConstant<uint8_t>(a_type, 0);
@@ -115,7 +115,7 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
115115
ORT_RETURN_IF_NOT(GetShape(*input_defs[3], b_zero_point_shape, logger), "Cannot get shape of b_zero_point");
116116
b_scale = model_builder.CreateOrGetConstant<float>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT,
117117
1.0f,
118-
GetNarrowedIntfromInt64<uint32_t>(b_zero_point_shape));
118+
GetNarrowedIntFromInt64<uint32_t>(b_zero_point_shape));
119119
} else {
120120
b_zero_point = model_builder.CreateOrGetConstant<uint8_t>(a_type, 0);
121121
b_scale = model_builder.CreateOrGetConstant<float>(ONNX_NAMESPACE::TensorProto_DataType_FLOAT, 1.0f);
@@ -143,7 +143,7 @@ Status GemmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
143143
// If A or B input is 1-D, we need to reshape the output back to its original shape.
144144
if (a_shape.size() == 1 || b_shape.size() == 1) {
145145
common_options.set("label", node.Name() + "_reshape_output");
146-
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntfromInt64<uint32_t>(output_shape));
146+
emscripten::val output_shape_arr = emscripten::val::array(GetNarrowedIntFromInt64<uint32_t>(output_shape));
147147
output = model_builder.GetBuilder().call<emscripten::val>("reshape",
148148
output,
149149
output_shape_arr,

onnxruntime/core/providers/webnn/builders/impl/gqa_op_builder.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ std::vector<int32_t> repeat_sequence(int32_t sequence_length, int32_t kv_num_hea
6262
}
6363

6464
/** GroupQueryAttention SubGraph.
65-
Abbreviatios: B is batch_size, S is sequence_length, W is hidden_size, P is past_sequence_length
66-
N is number of attention heads, kv_N is number of attention heads for kv, H is head size
67-
G is group size, and G=N/kv_N, W=N*H, h=Sqrt(H).
65+
Abbreviations: B is batch_size, S is sequence_length, W is hidden_size, P is past_sequence_length
66+
N is number of attention heads, kv_N is number of attention heads for kv, H is head size
67+
G is group size, and G=N/kv_N, W=N*H, h=Sqrt(H).
6868
GQA inputs: query, key, value, past_key, past_value, seqlens_k, total_sequence_length
6969
Notes: cos_cache, sin_cache inputs are not supported. If the data type of the inputs (qkv and past kv) is float16,
7070
we cast them to float32 to ensure data precision.

onnxruntime/core/providers/webnn/builders/impl/mha_op_builder.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,8 @@ class MultiHeadAttentionOpBuilder : public BaseOpBuilder {
3131
};
3232

3333
/** MultiHeadAttention SubGraph.
34-
Abbreviatios: B is batch_size, S is sequence_length, W is hidden_size
35-
N is number of attention heads, H is head size
34+
Abbreviations: B is batch_size, S is sequence_length, W is hidden_size
35+
N is number of attention heads, H is head size
3636
Notes: If the datatype of the inputs (qkv and past kv) is float16, we cast them to float32 to ensure data precision.
3737
3838
query key value

onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
234234
output = model_builder.GetBuilder().call<emscripten::val>("instanceNormalization", input, options);
235235
// Reshape back to the original output shape for 3D input.
236236
if (input_shape.size() != 4) {
237-
std::vector<uint32_t> output_shape = GetNarrowedIntfromInt64<uint32_t>(input_shape);
237+
std::vector<uint32_t> output_shape = GetNarrowedIntFromInt64<uint32_t>(input_shape);
238238
emscripten::val reshape_output_options = emscripten::val::object();
239239
reshape_output_options.set("label", node.Name() + "reshape_output");
240240
output = model_builder.GetBuilder().call<emscripten::val>("reshape",

0 commit comments

Comments
 (0)