Skip to content

Commit 96b8186

Browse files
committed
ADULT: Change objective from max to min problem --> New Container Version 0.0.2
1 parent c07fc4f commit 96b8186

File tree

3 files changed

+11
-9
lines changed

3 files changed

+11
-9
lines changed

hpobench/benchmarks/mo/adult_benchmark.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
"""
22
Changelog:
33
==========
4+
0.0.2:
5+
* Change the objective value from accuracy to misclassification rate. (1 - accuracy)
46
57
0.0.1:
68
* First implementation of the Multi-Objective Fair Adult Benchmark.
@@ -127,7 +129,7 @@ def get_meta_information() -> Dict:
127129
@staticmethod
128130
def get_objective_names() -> List[str]:
129131
"""Get a list of objectives evaluated in the objective_function. """
130-
return ['accuracy', 'DSP', 'DEO', 'DFP']
132+
return ['misclassification_rate', 'DSP', 'DEO', 'DFP']
131133

132134
@AbstractMultiObjectiveBenchmark.check_parameters
133135
def objective_function(self, configuration: Union[CS.Configuration, Dict],
@@ -165,7 +167,7 @@ def objective_function(self, configuration: Union[CS.Configuration, Dict],
165167
-------
166168
Dict -
167169
function_value : Dict - validation metrics after training on train
168-
accuracy: float
170+
misclassification_rate: float: 1 - validation accuracy
169171
DSO: float
170172
DEO: float
171173
DFP: float
@@ -247,7 +249,7 @@ def objective_function(self, configuration: Union[CS.Configuration, Dict],
247249

248250
elapsed_time = time.time() - ts_start
249251

250-
return {'function_value': {'accuracy': float(val_accuracy),
252+
return {'function_value': {'misclassification_rate': 1 - float(val_accuracy),
251253
'DSO': float(val_statistical_disparity),
252254
'DEO': float(val_unequal_opportunity),
253255
'DFP': float(val_unequalized_odds)
@@ -310,7 +312,7 @@ def objective_function_test(self, configuration: Union[CS.Configuration, Dict],
310312
-------
311313
Dict -
312314
function_value : Dict - test metrics reported after training on (train+valid)
313-
accuracy: float
315+
misclassification_rate: float: 1 - test accuracy
314316
DSO: float
315317
DEO: float
316318
DFP: float
@@ -381,7 +383,7 @@ def objective_function_test(self, configuration: Union[CS.Configuration, Dict],
381383
logger.debug(f"config:{configuration}, test_score: {test_accuracy}, train score:{train_accuracy},"
382384
f"dsp:{test_statistical_disparity}, deo :{test_unequal_opportunity}, dfp :{test_unequalized_odds}")
383385

384-
return {'function_value': {'accuracy': float(test_accuracy),
386+
return {'function_value': {'misclassification_rate': 1 - float(test_accuracy),
385387
'DSO': float(test_statistical_disparity),
386388
'DEO': float(test_unequal_opportunity),
387389
'DFP': float(test_unequalized_odds)

hpobench/container/benchmarks/mo/adult_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,5 @@ class AdultBenchmark(AbstractBenchmarkClient):
88
def __init__(self, **kwargs):
99
kwargs['benchmark_name'] = kwargs.get('benchmark_name', 'AdultBenchmark')
1010
kwargs['container_name'] = kwargs.get('container_name', 'fair_adult')
11-
kwargs['latest'] = kwargs.get('container_tag', '0.0.1')
11+
kwargs['latest'] = kwargs.get('container_tag', '0.0.2')
1212
super(AdultBenchmark, self).__init__(**kwargs)

tests/test_adult.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ def test_adult_benchmark():
2828
result_2 = benchmark.objective_function(test_config, rng=1, fidelity={'budget': 3})
2929

3030
assert result_1['info']['valid_accuracy'] == pytest.approx(0.7539, rel=0.001)
31-
assert result_1['info']['valid_accuracy'] == result_1['function_value']['accuracy']
31+
assert 1 - result_1['info']['valid_accuracy'] == result_1['function_value']['misclassification_rate']
3232
assert result_1['info']['train_accuracy'] == pytest.approx(0.76145, rel=0.001)
3333
assert result_1['info']['train_accuracy'] == result_2['info']['train_accuracy']
3434

3535
result_1 = benchmark.objective_function_test(test_config, rng=1, fidelity={'budget': 3})
36-
assert result_1['function_value']['accuracy'] == pytest.approx(0.76377, rel=0.001)
37-
assert result_1['function_value']['accuracy'] == result_1['info']['test_accuracy']
36+
assert 1 - result_1['function_value']['misclassification_rate'] == pytest.approx(0.76377, rel=0.001)
37+
assert 1 - result_1['function_value']['misclassification_rate'] == result_1['info']['test_accuracy']

0 commit comments

Comments
 (0)