@@ -112,56 +112,73 @@ Status ModelBuilder::RegisterInitializers() {
112112 auto num_elements = SafeInt<size_t >(Product (shape));
113113 emscripten::val view = emscripten::val::undefined ();
114114 std::byte* tensor_ptr = nullptr ;
115- if (tensor.has_raw_data ()) {
116- tensor_ptr = reinterpret_cast <std::byte*>(const_cast <char *>(tensor.raw_data ().c_str ()));
115+
116+ if (utils::HasExternalData (tensor)) {
117+ // Create WebNN Constant from external data.
118+ std::basic_string<ORTCHAR_T> external_file_path;
119+ onnxruntime::FileOffsetType data_offset;
120+ SafeInt<size_t > tensor_byte_size;
121+ ORT_RETURN_IF_ERROR (utils::GetExternalDataInfo (
122+ tensor, graph_viewer_.ModelPath (), external_file_path, data_offset, tensor_byte_size));
123+
124+ auto jsepRegisterMLConstant = emscripten::val::module_property (" jsepRegisterMLConstant" );
125+ operand = jsepRegisterMLConstant (emscripten::val (external_file_path),
126+ static_cast <int32_t >(data_offset),
127+ static_cast <int32_t >(tensor_byte_size),
128+ wnn_builder_,
129+ desc);
117130 } else {
118- // Store temporary unpacked_tensor.
119- unpacked_tensors_.push_back ({});
120- std::vector<uint8_t >& unpacked_tensor = unpacked_tensors_.back ();
121- ORT_RETURN_IF_ERROR (onnxruntime::utils::UnpackInitializerData (tensor, unpacked_tensor));
122- tensor_ptr = reinterpret_cast <std::byte*>(unpacked_tensor.data ());
123- }
124- switch (data_type) {
125- case ONNX_NAMESPACE::TensorProto_DataType_BOOL:
126- case ONNX_NAMESPACE::TensorProto_DataType_UINT8:
127- view = emscripten::val{emscripten::typed_memory_view (num_elements,
128- reinterpret_cast <uint8_t *>(tensor_ptr))};
129- break ;
130- case ONNX_NAMESPACE::TensorProto_DataType_INT8:
131- view = emscripten::val{emscripten::typed_memory_view (num_elements,
132- reinterpret_cast <int8_t *>(tensor_ptr))};
133- break ;
134- case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16:
135- view = emscripten::val{emscripten::typed_memory_view (num_elements,
136- reinterpret_cast <uint16_t *>(tensor_ptr))};
137- break ;
138- case ONNX_NAMESPACE::TensorProto_DataType_FLOAT:
139- view = emscripten::val{emscripten::typed_memory_view (num_elements,
140- reinterpret_cast <float *>(tensor_ptr))};
141- break ;
142- case ONNX_NAMESPACE::TensorProto_DataType_INT32:
143- view = emscripten::val{emscripten::typed_memory_view (num_elements,
144- reinterpret_cast <int32_t *>(tensor_ptr))};
145- break ;
146- case ONNX_NAMESPACE::TensorProto_DataType_INT64:
147- view = emscripten::val{emscripten::typed_memory_view (num_elements,
148- reinterpret_cast <int64_t *>(tensor_ptr))};
149- break ;
150- case ONNX_NAMESPACE::TensorProto_DataType_UINT32:
151- view = emscripten::val{emscripten::typed_memory_view (num_elements,
152- reinterpret_cast <uint32_t *>(tensor_ptr))};
153- break ;
154- case ONNX_NAMESPACE::TensorProto_DataType_UINT64:
155- view = emscripten::val{emscripten::typed_memory_view (num_elements,
156- reinterpret_cast <uint64_t *>(tensor_ptr))};
157- break ;
158- default :
159- break ;
131+ if (tensor.has_raw_data ()) {
132+ tensor_ptr = reinterpret_cast <std::byte*>(const_cast <char *>(tensor.raw_data ().c_str ()));
133+ } else {
134+ // Store temporary unpacked_tensor.
135+ unpacked_tensors_.push_back ({});
136+ std::vector<uint8_t >& unpacked_tensor = unpacked_tensors_.back ();
137+ ORT_RETURN_IF_ERROR (onnxruntime::utils::UnpackInitializerData (tensor, unpacked_tensor));
138+ tensor_ptr = reinterpret_cast <std::byte*>(unpacked_tensor.data ());
139+ }
140+ switch (data_type) {
141+ case ONNX_NAMESPACE::TensorProto_DataType_BOOL:
142+ case ONNX_NAMESPACE::TensorProto_DataType_UINT8:
143+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
144+ reinterpret_cast <uint8_t *>(tensor_ptr))};
145+ break ;
146+ case ONNX_NAMESPACE::TensorProto_DataType_INT8:
147+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
148+ reinterpret_cast <int8_t *>(tensor_ptr))};
149+ break ;
150+ case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16:
151+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
152+ reinterpret_cast <uint16_t *>(tensor_ptr))};
153+ break ;
154+ case ONNX_NAMESPACE::TensorProto_DataType_FLOAT:
155+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
156+ reinterpret_cast <float *>(tensor_ptr))};
157+ break ;
158+ case ONNX_NAMESPACE::TensorProto_DataType_INT32:
159+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
160+ reinterpret_cast <int32_t *>(tensor_ptr))};
161+ break ;
162+ case ONNX_NAMESPACE::TensorProto_DataType_INT64:
163+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
164+ reinterpret_cast <int64_t *>(tensor_ptr))};
165+ break ;
166+ case ONNX_NAMESPACE::TensorProto_DataType_UINT32:
167+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
168+ reinterpret_cast <uint32_t *>(tensor_ptr))};
169+ break ;
170+ case ONNX_NAMESPACE::TensorProto_DataType_UINT64:
171+ view = emscripten::val{emscripten::typed_memory_view (num_elements,
172+ reinterpret_cast <uint64_t *>(tensor_ptr))};
173+ break ;
174+ default :
175+ break ;
176+ }
177+
178+ // Wasm memory grow will cause all array buffers reallocation, which will be treated as detached
179+ // buffers in JS side. Simply create a copy to fix it.
180+ operand = wnn_builder_.call <emscripten::val>(" constant" , desc, view.call <emscripten::val>(" slice" ));
160181 }
161-
162- // Wasm memory grow will cause all array buffers reallocation, which will be treated as detached
163- // buffers in JS side. Simply create a copy to fix it.
164- operand = wnn_builder_.call <emscripten::val>(" constant" , desc, view.call <emscripten::val>(" slice" ));
165182 } else {
166183 // TODO: support other type.
167184 return ORT_MAKE_STATUS (ONNXRUNTIME, INVALID_ARGUMENT,
0 commit comments