Skip to content

Automlx Metrics Updates #1233

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/run-forecast-explainer-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,6 @@ jobs:
$CONDA/bin/conda init
source /home/runner/.bashrc
pip install -r test-requirements-operators.txt
pip install "oracle-automlx[forecasting]>=25.1.1"
pip install "oracle-automlx[forecasting]>=25.3.0"
pip install pandas>=2.2.0
python -m pytest -v -p no:warnings --durations=5 tests/operators/forecast/test_explainers.py
2 changes: 1 addition & 1 deletion .github/workflows/run-forecast-unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,6 @@ jobs:
$CONDA/bin/conda init
source /home/runner/.bashrc
pip install -r test-requirements-operators.txt
pip install "oracle-automlx[forecasting]>=25.1.1"
pip install "oracle-automlx[forecasting]>=25.3.0"
pip install pandas>=2.2.0
python -m pytest -v -p no:warnings --durations=5 tests/operators/forecast --ignore=tests/operators/forecast/test_explainers.py
4 changes: 2 additions & 2 deletions ads/opctl/operator/lowcode/anomaly/model/automlx.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ class AutoMLXOperatorModel(AnomalyOperatorBaseModel):
@runtime_dependency(
module="automlx",
err_msg=(
"Please run `pip3 install oracle-automlx>=23.4.1` and "
"`pip3 install oracle-automlx[classic]>=23.4.1` "
"Please run `pip3 install oracle-automlx>=25.3.0` and "
"`pip3 install oracle-automlx[classic]>=25.3.0` "
"to install the required dependencies for automlx."
),
)
Expand Down
2 changes: 1 addition & 1 deletion ads/opctl/operator/lowcode/forecast/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class ForecastOutputColumns(ExtendedEnum):

AUTOMLX_METRIC_MAP = {
"smape": "neg_sym_mean_abs_percent_error",
"mape": "neg_sym_mean_abs_percent_error",
"mape": "neg_mean_abs_percent_error",
"mase": "neg_mean_abs_scaled_error",
"mae": "neg_mean_absolute_error",
"mse": "neg_mean_squared_error",
Expand Down
36 changes: 21 additions & 15 deletions ads/opctl/operator/lowcode/forecast/model/automlx.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@

logging.getLogger("report_creator").setLevel(logging.WARNING)
AUTOMLX_N_ALGOS_TUNED = 4
AUTOMLX_DEFAULT_SCORE_METRIC = "neg_sym_mean_abs_percent_error"
AUTOMLX_DEFAULT_SCORE_METRIC = ['neg_sym_mean_abs_percent_error',
'neg_mean_abs_percent_error',
'neg_root_mean_squared_error']


class AutoMLXOperatorModel(ForecastOperatorBaseModel):
Expand All @@ -45,10 +47,13 @@ def set_kwargs(self):
model_kwargs_cleaned["n_algos_tuned"] = model_kwargs_cleaned.get(
"n_algos_tuned", AUTOMLX_N_ALGOS_TUNED
)
model_kwargs_cleaned["score_metric"] = AUTOMLX_METRIC_MAP.get(
self.spec.metric,
model_kwargs_cleaned.get("score_metric", AUTOMLX_DEFAULT_SCORE_METRIC),
)
metric_to_optimize = AUTOMLX_METRIC_MAP.get(self.spec.metric)
model_kwargs_cleaned["score_metric"] = AUTOMLX_DEFAULT_SCORE_METRIC
# The first score metric in the list will be the one for which the pipeline optimizes
if metric_to_optimize is not None:
model_kwargs_cleaned["score_metric"].remove(metric_to_optimize)
model_kwargs_cleaned["score_metric"].insert(0, metric_to_optimize)

model_kwargs_cleaned.pop("task", None)
time_budget = model_kwargs_cleaned.pop("time_budget", -1)
model_kwargs_cleaned["preprocessing"] = (
Expand All @@ -70,7 +75,7 @@ def preprocess(self, data, series_id): # TODO: re-use self.le for explanations
@runtime_dependency(
module="automlx",
err_msg=(
"Please run `pip3 install oracle-automlx[forecasting]>=25.1.1` "
"Please run `pip3 install oracle-automlx[forecasting]>=25.3.0` "
"to install the required dependencies for automlx."
),
)
Expand Down Expand Up @@ -163,7 +168,7 @@ def _build_model(self) -> pd.DataFrame:
self.models[s_id] = {}
self.models[s_id]["model"] = model
self.models[s_id]["le"] = self.le[s_id]
self.models[s_id]["score"] = self.get_validation_score_and_metric(model)
self.models[s_id]["score"] = self.get_all_metrics(model)

# In case of Naive model, model.forecast function call does not return confidence intervals.
if f"{target}_ci_upper" not in summary_frame:
Expand Down Expand Up @@ -518,26 +523,27 @@ def explain_model(self):
)
logger.debug(f"Full Traceback: {traceback.format_exc()}")

def get_validation_score_and_metric(self, model):
def get_all_metrics(self, model):
trials = model.completed_trials_summary_
model_params = model.selected_model_params_
if len(trials) > 0:
score_col = [col for col in trials.columns if "Score" in col][0]
validation_score = trials[trials.Hyperparameters == model_params][
score_col
all_metrics = trials[trials.Hyperparameters == model_params][
"All Metrics"
].iloc[0]
else:
validation_score = 0
return -1 * validation_score
all_metrics = {}
reverse_map = {v: k for k, v in AUTOMLX_METRIC_MAP.items()}
all_metrics = {reverse_map[key]: -1 * value for key, value in all_metrics.items() if key in reverse_map}
return all_metrics

def generate_train_metrics(self) -> pd.DataFrame:
"""
Generate Training Metrics when fitted data is not available.
Generate Training Metrics for Automlx
"""
total_metrics = pd.DataFrame()
for s_id in self.forecast_output.list_series_ids():
try:
metrics = {self.spec.metric.upper(): self.models[s_id]["score"]}
metrics = self.models[s_id]["score"]
metrics_df = pd.DataFrame.from_dict(
metrics, orient="index", columns=[s_id]
)
Expand Down