Skip to content

Commit 363517b

Browse files
committed
[NPUW] Fix model builder hidden_size defaults
Defer norm/ffn defaults in ModelConfig to build_model() so changing hidden_size after construction takes effect. Previously the constructor baked in defaults using the initial hidden_size, silently ignoring later changes.
1 parent 44967ef commit 363517b

File tree

2 files changed

+13
-6
lines changed

2 files changed

+13
-6
lines changed

src/plugins/intel_npu/tests/functional/behavior/npuw/test_engine/models/model_builder.cpp

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,10 +1392,20 @@ std::shared_ptr<ov::Model> ModelBuilder::make_model(const ov::Output<ov::Node>&
13921392
return std::make_shared<ov::Model>(ov::OutputVector{res->output(0)}, m_sinks, model_name);
13931393
}
13941394

1395-
std::shared_ptr<ov::Model> ModelBuilder::build_model(const ModelConfig& config) {
1395+
std::shared_ptr<ov::Model> ModelBuilder::build_model(const ModelConfig& config_in) {
13961396
OPENVINO_ASSERT(
1397-
(int)config.use_conv_features + (int)config.use_cross_attention + (int)config.use_token_type_embedding <= 1,
1397+
(int)config_in.use_conv_features + (int)config_in.use_cross_attention + (int)config_in.use_token_type_embedding <= 1,
13981398
"At most one structural dispatch flag may be set");
1399+
1400+
// Fill in norm/ffn defaults from actual config sizes when the caller left them empty.
1401+
ModelConfig config = config_in;
1402+
if (!config.norm) {
1403+
config.norm = LayerNorm(config.hidden_size, config.precision);
1404+
}
1405+
if (!config.ffn) {
1406+
config.ffn = SwiGLU(config.hidden_size, config.intermediate_size, config.precision, config.weight);
1407+
}
1408+
13991409
if (config.use_conv_features) {
14001410
return build_whisper_encoder(config);
14011411
}

src/plugins/intel_npu/tests/functional/behavior/npuw/test_engine/models/model_builder.hpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -411,10 +411,7 @@ struct ModelConfig {
411411
size_t max_position_embeddings = 512;
412412
size_t type_vocab_size = 2;
413413

414-
ModelConfig()
415-
: lm_head_weight(weight),
416-
norm(LayerNorm(hidden_size, precision)),
417-
ffn(SwiGLU(hidden_size, intermediate_size, precision, weight)) {}
414+
ModelConfig() : lm_head_weight(weight) {}
418415

419416
size_t get_kv_heads() const {
420417
return num_kv_heads == 0 ? num_heads : num_kv_heads;

0 commit comments

Comments
 (0)