From c5c80edccfdd1c3ad3228546ce526fbb7648e996 Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sun, 19 Apr 2020 20:55:01 -0700 Subject: [PATCH 1/7] Change namespace and add adapter --- axelrod/__init__.py | 44 +- axelrod/game.py | 55 +- axelrod/{tests => ipd}/__init__.py | 0 axelrod/{ => ipd}/_strategy_utils.py | 34 +- axelrod/{ => ipd}/action.py | 0 axelrod/{ => ipd}/classifier.py | 30 +- .../compute_finite_state_machine_memory.py | 2 +- axelrod/{ => ipd}/data/all_classifiers.yml | 0 axelrod/{ => ipd}/data/ann_weights.csv | 0 axelrod/{ => ipd}/data/pso_gambler.csv | 0 axelrod/{ => ipd}/deterministic_cache.py | 14 +- axelrod/{ => ipd}/ecosystem.py | 8 +- axelrod/{ => ipd}/eigen.py | 0 axelrod/{ => ipd}/evolvable_player.py | 18 +- axelrod/{ => ipd}/fingerprint.py | 46 +- axelrod/ipd/game.py | 68 +++ axelrod/{ => ipd}/graph.py | 0 axelrod/{ => ipd}/history.py | 2 +- axelrod/{ => ipd}/interaction_utils.py | 10 +- axelrod/{ => ipd}/load_data_.py | 0 axelrod/ipd/match.py | 258 +++++++++ axelrod/{ => ipd}/match_generator.py | 6 +- axelrod/{ => ipd}/mock_player.py | 10 +- axelrod/{ => ipd}/moran.py | 22 +- axelrod/ipd/player.py | 211 ++++++++ axelrod/{ => ipd}/plot.py | 0 axelrod/{ => ipd}/random_.py | 2 +- axelrod/{ => ipd}/result_set.py | 6 +- axelrod/{ => ipd}/strategies/__init__.py | 6 +- axelrod/{ => ipd}/strategies/_filters.py | 12 +- axelrod/{ => ipd}/strategies/_strategies.py | 3 +- axelrod/{ => ipd}/strategies/adaptive.py | 10 +- axelrod/{ => ipd}/strategies/adaptor.py | 10 +- axelrod/{ => ipd}/strategies/alternator.py | 8 +- axelrod/{ => ipd}/strategies/ann.py | 30 +- axelrod/{ => ipd}/strategies/apavlov.py | 12 +- axelrod/{ => ipd}/strategies/appeaser.py | 8 +- axelrod/{ => ipd}/strategies/averagecopier.py | 14 +- axelrod/{ => ipd}/strategies/axelrod_first.py | 56 +- .../{ => ipd}/strategies/axelrod_second.py | 106 ++-- axelrod/{ => ipd}/strategies/backstabber.py | 22 +- .../{ => ipd}/strategies/better_and_better.py | 10 +- .../{ => ipd}/strategies/bush_mosteller.py | 12 +- axelrod/{ => ipd}/strategies/calculator.py | 12 +- axelrod/{ => ipd}/strategies/cooperator.py | 12 +- axelrod/{ => ipd}/strategies/cycler.py | 16 +- axelrod/{ => ipd}/strategies/darwin.py | 8 +- axelrod/{ => ipd}/strategies/dbs.py | 8 +- axelrod/{ => ipd}/strategies/defector.py | 12 +- axelrod/{ => ipd}/strategies/doubler.py | 8 +- .../strategies/finite_state_machines.py | 12 +- axelrod/{ => ipd}/strategies/forgiver.py | 12 +- axelrod/{ => ipd}/strategies/gambler.py | 10 +- axelrod/{ => ipd}/strategies/geller.py | 12 +- axelrod/{ => ipd}/strategies/gobymajority.py | 8 +- axelrod/{ => ipd}/strategies/gradualkiller.py | 12 +- axelrod/{ => ipd}/strategies/grudger.py | 36 +- axelrod/{ => ipd}/strategies/grumpy.py | 8 +- axelrod/{ => ipd}/strategies/handshake.py | 8 +- axelrod/{ => ipd}/strategies/hmm.py | 18 +- axelrod/{ => ipd}/strategies/human.py | 8 +- axelrod/{ => ipd}/strategies/hunter.py | 32 +- axelrod/{ => ipd}/strategies/inverse.py | 10 +- axelrod/{ => ipd}/strategies/lookerup.py | 12 +- .../strategies/mathematicalconstants.py | 8 +- axelrod/{ => ipd}/strategies/memoryone.py | 20 +- axelrod/{ => ipd}/strategies/memorytwo.py | 18 +- axelrod/{ => ipd}/strategies/meta.py | 20 +- axelrod/{ => ipd}/strategies/mindcontrol.py | 14 +- axelrod/{ => ipd}/strategies/mindreader.py | 12 +- axelrod/{ => ipd}/strategies/mutual.py | 18 +- axelrod/{ => ipd}/strategies/negation.py | 10 +- axelrod/{ => ipd}/strategies/oncebitten.py | 18 +- axelrod/{ => ipd}/strategies/prober.py | 40 +- axelrod/{ => ipd}/strategies/punisher.py | 20 +- axelrod/{ => ipd}/strategies/qlearner.py | 14 +- axelrod/{ => ipd}/strategies/rand.py | 10 +- axelrod/{ => ipd}/strategies/resurrection.py | 12 +- axelrod/{ => ipd}/strategies/retaliate.py | 14 +- .../{ => ipd}/strategies/revised_downing.py | 8 +- axelrod/{ => ipd}/strategies/selfsteem.py | 10 +- .../{ => ipd}/strategies/sequence_player.py | 12 +- axelrod/{ => ipd}/strategies/shortmem.py | 8 +- axelrod/{ => ipd}/strategies/stalker.py | 14 +- axelrod/{ => ipd}/strategies/titfortat.py | 94 ++-- axelrod/{ => ipd}/strategies/verybad.py | 8 +- .../{ => ipd}/strategies/worse_and_worse.py | 22 +- .../{ => ipd}/strategies/zero_determinant.py | 2 +- axelrod/{ => ipd}/strategy_transformers.py | 36 +- .../integration => ipd/tests}/__init__.py | 0 .../tests/integration}/__init__.py | 0 .../tests/integration/test_filtering.py | 2 +- .../tests/integration/test_matches.py | 12 +- .../{ => ipd}/tests/integration/test_names.py | 0 .../integration/test_sample_tournaments.py | 6 +- .../tests/integration/test_tournament.py | 30 +- axelrod/{ => ipd}/tests/property.py | 12 +- .../unit => ipd/tests/strategies}/__init__.py | 0 .../tests/strategies/test_adaptive.py | 2 +- .../tests/strategies/test_adaptor.py | 0 .../tests/strategies/test_alternator.py | 0 .../{ => ipd}/tests/strategies/test_ann.py | 6 +- .../tests/strategies/test_apavlov.py | 0 .../tests/strategies/test_appeaser.py | 0 .../tests/strategies/test_averagecopier.py | 0 .../tests/strategies/test_axelrod_first.py | 12 +- .../tests/strategies/test_axelrod_second.py | 4 +- .../tests/strategies/test_backstabber.py | 0 .../strategies/test_better_and_better.py | 0 .../tests/strategies/test_bush_mosteller.py | 0 .../tests/strategies/test_calculator.py | 4 +- .../tests/strategies/test_cooperator.py | 0 .../{ => ipd}/tests/strategies/test_cycler.py | 6 +- .../{ => ipd}/tests/strategies/test_darwin.py | 0 .../{ => ipd}/tests/strategies/test_dbs.py | 2 +- .../tests/strategies/test_defector.py | 0 .../tests/strategies/test_doubler.py | 0 .../tests/strategies/test_evolvable_player.py | 8 +- .../strategies/test_finite_state_machines.py | 10 +- .../tests/strategies/test_forgiver.py | 0 .../tests/strategies/test_gambler.py | 4 +- .../{ => ipd}/tests/strategies/test_geller.py | 0 .../tests/strategies/test_gobymajority.py | 0 .../tests/strategies/test_gradualkiller.py | 0 .../tests/strategies/test_grudger.py | 0 .../{ => ipd}/tests/strategies/test_grumpy.py | 0 .../tests/strategies/test_handshake.py | 0 .../tests/strategies/test_headsup.py | 0 .../{ => ipd}/tests/strategies/test_hmm.py | 10 +- .../{ => ipd}/tests/strategies/test_human.py | 4 +- .../{ => ipd}/tests/strategies/test_hunter.py | 4 +- .../tests/strategies/test_inverse.py | 0 .../tests/strategies/test_lookerup.py | 6 +- .../strategies/test_mathematicalconstants.py | 0 .../tests/strategies/test_memoryone.py | 16 +- .../tests/strategies/test_memorytwo.py | 12 +- .../{ => ipd}/tests/strategies/test_meta.py | 10 +- .../tests/strategies/test_mindcontrol.py | 0 .../tests/strategies/test_mindreader.py | 2 +- .../{ => ipd}/tests/strategies/test_mutual.py | 36 +- .../tests/strategies/test_negation.py | 0 .../tests/strategies/test_oncebitten.py | 0 .../{ => ipd}/tests/strategies/test_player.py | 40 +- .../{ => ipd}/tests/strategies/test_prober.py | 0 .../tests/strategies/test_punisher.py | 0 .../tests/strategies/test_qlearner.py | 2 +- .../{ => ipd}/tests/strategies/test_rand.py | 0 .../tests/strategies/test_resurrection.py | 0 .../tests/strategies/test_retaliate.py | 0 .../tests/strategies/test_revised_downing.py | 0 .../tests/strategies/test_selfsteem.py | 0 .../tests/strategies/test_sequence_player.py | 4 +- .../tests/strategies/test_shortmem.py | 0 .../tests/strategies/test_stalker.py | 2 +- .../tests/strategies/test_titfortat.py | 12 +- .../tests/strategies/test_verybad.py | 0 .../tests/strategies/test_worse_and_worse.py | 0 .../tests/strategies/test_zero_determinant.py | 4 +- axelrod/ipd/tests/unit/__init__.py | 0 axelrod/{ => ipd}/tests/unit/test_actions.py | 2 +- .../tests/unit/test_classification.py | 10 +- ...est_compute_finite_state_machine_memory.py | 2 +- .../tests/unit/test_deterministic_cache.py | 8 +- .../{ => ipd}/tests/unit/test_ecosystem.py | 4 +- axelrod/{ => ipd}/tests/unit/test_eigen.py | 2 +- axelrod/{ => ipd}/tests/unit/test_filters.py | 12 +- .../{ => ipd}/tests/unit/test_fingerprint.py | 67 ++- axelrod/{ => ipd}/tests/unit/test_game.py | 24 +- axelrod/{ => ipd}/tests/unit/test_graph.py | 0 axelrod/{ => ipd}/tests/unit/test_history.py | 2 +- .../tests/unit/test_interaction_utils.py | 2 +- .../{ => ipd}/tests/unit/test_load_data.py | 4 +- axelrod/{ => ipd}/tests/unit/test_match.py | 82 +-- .../tests/unit/test_match_generator.py | 28 +- .../{ => ipd}/tests/unit/test_mock_player.py | 4 +- axelrod/{ => ipd}/tests/unit/test_moran.py | 6 +- axelrod/{ => ipd}/tests/unit/test_pickling.py | 142 ++--- axelrod/{ => ipd}/tests/unit/test_plot.py | 28 +- axelrod/{ => ipd}/tests/unit/test_property.py | 44 +- axelrod/{ => ipd}/tests/unit/test_random_.py | 0 .../{ => ipd}/tests/unit/test_resultset.py | 23 +- .../tests/unit/test_strategy_transformers.py | 14 +- .../tests/unit/test_strategy_utils.py | 6 +- .../{ => ipd}/tests/unit/test_tournament.py | 118 ++--- axelrod/{ => ipd}/tests/unit/test_version.py | 0 axelrod/ipd/tournament.py | 497 ++++++++++++++++++ axelrod/ipd_adapter.py | 160 ++++++ axelrod/match.py | 258 ++------- axelrod/player.py | 195 +------ axelrod/tournament.py | 431 +-------------- .../running_axelrods_first_tournament/main.py | 2 +- migrate_ipd.sh | 48 ++ rebuild_classifier_table.py | 2 +- test | 2 +- 194 files changed, 2437 insertions(+), 1977 deletions(-) rename axelrod/{tests => ipd}/__init__.py (100%) rename axelrod/{ => ipd}/_strategy_utils.py (90%) rename axelrod/{ => ipd}/action.py (100%) rename axelrod/{ => ipd}/classifier.py (90%) rename axelrod/{ => ipd}/compute_finite_state_machine_memory.py (99%) rename axelrod/{ => ipd}/data/all_classifiers.yml (100%) rename axelrod/{ => ipd}/data/ann_weights.csv (100%) rename axelrod/{ => ipd}/data/pso_gambler.csv (100%) rename axelrod/{ => ipd}/deterministic_cache.py (92%) rename axelrod/{ => ipd}/ecosystem.py (97%) rename axelrod/{ => ipd}/eigen.py (100%) rename axelrod/{ => ipd}/evolvable_player.py (83%) rename axelrod/{ => ipd}/fingerprint.py (93%) create mode 100644 axelrod/ipd/game.py rename axelrod/{ => ipd}/graph.py (100%) rename axelrod/{ => ipd}/history.py (98%) rename axelrod/{ => ipd}/interaction_utils.py (97%) rename axelrod/{ => ipd}/load_data_.py (100%) create mode 100644 axelrod/ipd/match.py rename axelrod/{ => ipd}/match_generator.py (95%) rename axelrod/{ => ipd}/mock_player.py (75%) rename axelrod/{ => ipd}/moran.py (96%) create mode 100644 axelrod/ipd/player.py rename axelrod/{ => ipd}/plot.py (100%) rename axelrod/{ => ipd}/random_.py (98%) rename axelrod/{ => ipd}/result_set.py (99%) rename axelrod/{ => ipd}/strategies/__init__.py (96%) rename axelrod/{ => ipd}/strategies/_filters.py (95%) rename axelrod/{ => ipd}/strategies/_strategies.py (99%) rename axelrod/{ => ipd}/strategies/adaptive.py (87%) rename axelrod/{ => ipd}/strategies/adaptor.py (91%) rename axelrod/{ => ipd}/strategies/alternator.py (78%) rename axelrod/{ => ipd}/strategies/ann.py (93%) rename axelrod/{ => ipd}/strategies/apavlov.py (93%) rename axelrod/{ => ipd}/strategies/appeaser.py (84%) rename axelrod/{ => ipd}/strategies/averagecopier.py (82%) rename axelrod/{ => ipd}/strategies/axelrod_first.py (96%) rename axelrod/{ => ipd}/strategies/axelrod_second.py (96%) rename axelrod/{ => ipd}/strategies/backstabber.py (81%) rename axelrod/{ => ipd}/strategies/better_and_better.py (75%) rename axelrod/{ => ipd}/strategies/bush_mosteller.py (94%) rename axelrod/{ => ipd}/strategies/calculator.py (81%) rename axelrod/{ => ipd}/strategies/cooperator.py (88%) rename axelrod/{ => ipd}/strategies/cycler.py (93%) rename axelrod/{ => ipd}/strategies/darwin.py (95%) rename axelrod/{ => ipd}/strategies/dbs.py (99%) rename axelrod/{ => ipd}/strategies/defector.py (83%) rename axelrod/{ => ipd}/strategies/doubler.py (81%) rename axelrod/{ => ipd}/strategies/finite_state_machines.py (98%) rename axelrod/{ => ipd}/strategies/forgiver.py (86%) rename axelrod/{ => ipd}/strategies/gambler.py (95%) rename axelrod/{ => ipd}/strategies/geller.py (92%) rename axelrod/{ => ipd}/strategies/gobymajority.py (97%) rename axelrod/{ => ipd}/strategies/gradualkiller.py (77%) rename axelrod/{ => ipd}/strategies/grudger.py (91%) rename axelrod/{ => ipd}/strategies/grumpy.py (92%) rename axelrod/{ => ipd}/strategies/handshake.py (87%) rename axelrod/{ => ipd}/strategies/hmm.py (96%) rename axelrod/{ => ipd}/strategies/human.py (97%) rename axelrod/{ => ipd}/strategies/hunter.py (90%) rename axelrod/{ => ipd}/strategies/inverse.py (84%) rename axelrod/{ => ipd}/strategies/lookerup.py (98%) rename axelrod/{ => ipd}/strategies/mathematicalconstants.py (91%) rename axelrod/{ => ipd}/strategies/memoryone.py (95%) rename axelrod/{ => ipd}/strategies/memorytwo.py (95%) rename axelrod/{ => ipd}/strategies/meta.py (97%) rename axelrod/{ => ipd}/strategies/mindcontrol.py (87%) rename axelrod/{ => ipd}/strategies/mindreader.py (91%) rename axelrod/{ => ipd}/strategies/mutual.py (81%) rename axelrod/{ => ipd}/strategies/negation.py (77%) rename axelrod/{ => ipd}/strategies/oncebitten.py (89%) rename axelrod/{ => ipd}/strategies/prober.py (92%) rename axelrod/{ => ipd}/strategies/punisher.py (91%) rename axelrod/{ => ipd}/strategies/qlearner.py (92%) rename axelrod/{ => ipd}/strategies/rand.py (82%) rename axelrod/{ => ipd}/strategies/resurrection.py (87%) rename axelrod/{ => ipd}/strategies/retaliate.py (94%) rename axelrod/{ => ipd}/strategies/revised_downing.py (92%) rename axelrod/{ => ipd}/strategies/selfsteem.py (87%) rename axelrod/{ => ipd}/strategies/sequence_player.py (91%) rename axelrod/{ => ipd}/strategies/shortmem.py (89%) rename axelrod/{ => ipd}/strategies/stalker.py (87%) rename axelrod/{ => ipd}/strategies/titfortat.py (92%) rename axelrod/{ => ipd}/strategies/verybad.py (88%) rename axelrod/{ => ipd}/strategies/worse_and_worse.py (86%) rename axelrod/{ => ipd}/strategies/zero_determinant.py (99%) rename axelrod/{ => ipd}/strategy_transformers.py (95%) rename axelrod/{tests/integration => ipd/tests}/__init__.py (100%) rename axelrod/{tests/strategies => ipd/tests/integration}/__init__.py (100%) rename axelrod/{ => ipd}/tests/integration/test_filtering.py (98%) rename axelrod/{ => ipd}/tests/integration/test_matches.py (87%) rename axelrod/{ => ipd}/tests/integration/test_names.py (100%) rename axelrod/{ => ipd}/tests/integration/test_sample_tournaments.py (96%) rename axelrod/{ => ipd}/tests/integration/test_tournament.py (85%) rename axelrod/{ => ipd}/tests/property.py (96%) rename axelrod/{tests/unit => ipd/tests/strategies}/__init__.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_adaptive.py (96%) rename axelrod/{ => ipd}/tests/strategies/test_adaptor.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_alternator.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_ann.py (96%) rename axelrod/{ => ipd}/tests/strategies/test_apavlov.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_appeaser.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_averagecopier.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_axelrod_first.py (98%) rename axelrod/{ => ipd}/tests/strategies/test_axelrod_second.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_backstabber.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_better_and_better.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_bush_mosteller.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_calculator.py (97%) rename axelrod/{ => ipd}/tests/strategies/test_cooperator.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_cycler.py (97%) rename axelrod/{ => ipd}/tests/strategies/test_darwin.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_dbs.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_defector.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_doubler.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_evolvable_player.py (96%) rename axelrod/{ => ipd}/tests/strategies/test_finite_state_machines.py (98%) rename axelrod/{ => ipd}/tests/strategies/test_forgiver.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_gambler.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_geller.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_gobymajority.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_gradualkiller.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_grudger.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_grumpy.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_handshake.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_headsup.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_hmm.py (97%) rename axelrod/{ => ipd}/tests/strategies/test_human.py (97%) rename axelrod/{ => ipd}/tests/strategies/test_hunter.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_inverse.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_lookerup.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_mathematicalconstants.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_memoryone.py (94%) rename axelrod/{ => ipd}/tests/strategies/test_memorytwo.py (94%) rename axelrod/{ => ipd}/tests/strategies/test_meta.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_mindcontrol.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_mindreader.py (98%) rename axelrod/{ => ipd}/tests/strategies/test_mutual.py (80%) rename axelrod/{ => ipd}/tests/strategies/test_negation.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_oncebitten.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_player.py (95%) rename axelrod/{ => ipd}/tests/strategies/test_prober.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_punisher.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_qlearner.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_rand.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_resurrection.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_retaliate.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_revised_downing.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_selfsteem.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_sequence_player.py (94%) rename axelrod/{ => ipd}/tests/strategies/test_shortmem.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_stalker.py (98%) rename axelrod/{ => ipd}/tests/strategies/test_titfortat.py (99%) rename axelrod/{ => ipd}/tests/strategies/test_verybad.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_worse_and_worse.py (100%) rename axelrod/{ => ipd}/tests/strategies/test_zero_determinant.py (98%) create mode 100644 axelrod/ipd/tests/unit/__init__.py rename axelrod/{ => ipd}/tests/unit/test_actions.py (96%) rename axelrod/{ => ipd}/tests/unit/test_classification.py (97%) rename axelrod/{ => ipd}/tests/unit/test_compute_finite_state_machine_memory.py (99%) rename axelrod/{ => ipd}/tests/unit/test_deterministic_cache.py (93%) rename axelrod/{ => ipd}/tests/unit/test_ecosystem.py (97%) rename axelrod/{ => ipd}/tests/unit/test_eigen.py (96%) rename axelrod/{ => ipd}/tests/unit/test_filters.py (95%) rename axelrod/{ => ipd}/tests/unit/test_fingerprint.py (92%) rename axelrod/{ => ipd}/tests/unit/test_game.py (77%) rename axelrod/{ => ipd}/tests/unit/test_graph.py (100%) rename axelrod/{ => ipd}/tests/unit/test_history.py (98%) rename axelrod/{ => ipd}/tests/unit/test_interaction_utils.py (98%) rename axelrod/{ => ipd}/tests/unit/test_load_data.py (79%) rename axelrod/{ => ipd}/tests/unit/test_match.py (83%) rename axelrod/{ => ipd}/tests/unit/test_match_generator.py (92%) rename axelrod/{ => ipd}/tests/unit/test_mock_player.py (87%) rename axelrod/{ => ipd}/tests/unit/test_moran.py (99%) rename axelrod/{ => ipd}/tests/unit/test_pickling.py (63%) rename axelrod/{ => ipd}/tests/unit/test_plot.py (91%) rename axelrod/{ => ipd}/tests/unit/test_property.py (84%) rename axelrod/{ => ipd}/tests/unit/test_random_.py (100%) rename axelrod/{ => ipd}/tests/unit/test_resultset.py (98%) rename axelrod/{ => ipd}/tests/unit/test_strategy_transformers.py (98%) rename axelrod/{ => ipd}/tests/unit/test_strategy_utils.py (97%) rename axelrod/{ => ipd}/tests/unit/test_tournament.py (93%) rename axelrod/{ => ipd}/tests/unit/test_version.py (100%) create mode 100644 axelrod/ipd/tournament.py create mode 100644 axelrod/ipd_adapter.py create mode 100755 migrate_ipd.sh diff --git a/axelrod/__init__.py b/axelrod/__init__.py index bdc39f202..61e8e5004 100644 --- a/axelrod/__init__.py +++ b/axelrod/__init__.py @@ -1,24 +1,28 @@ DEFAULT_TURNS = 200 # The order of imports matters! +from axelrod.ipd import graph +from axelrod.ipd.action import Action +from axelrod.ipd.random_ import random_choice, random_flip, seed, Pdf +from axelrod.ipd import eigen +from axelrod.ipd.plot import Plot +from axelrod.ipd.history import History, LimitedHistory +from axelrod.player import BasePlayer +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.classifier import Classifiers +from axelrod.ipd.evolvable_player import EvolvablePlayer +from axelrod.game import BaseGame +from axelrod.ipd.game import IpdGame, DefaultGame +from axelrod.ipd.moran import MoranProcess, ApproximateMoranProcess +from axelrod.ipd.strategies import * +from axelrod.ipd.match_generator import * +from axelrod.ipd.tournament import IpdTournament +from axelrod.ipd.ecosystem import Ecosystem +from axelrod.ipd.match import IpdMatch +from axelrod.ipd.result_set import ResultSet +from axelrod.ipd.deterministic_cache import DeterministicCache +from axelrod.ipd import fingerprint +from axelrod.ipd.fingerprint import AshlockFingerprint, TransitiveFingerprint +from axelrod.ipd import interaction_utils +from axelrod.ipd.mock_player import MockPlayer from axelrod.version import __version__ -from axelrod.load_data_ import load_pso_tables, load_weights -from axelrod import graph -from axelrod.action import Action -from axelrod.random_ import random_choice, random_flip, seed, Pdf -from axelrod.plot import Plot -from axelrod.game import DefaultGame, Game -from axelrod.history import History, LimitedHistory -from axelrod.player import Player -from axelrod.classifier import Classifiers -from axelrod.evolvable_player import EvolvablePlayer -from axelrod.mock_player import MockPlayer -from axelrod.match import Match -from axelrod.moran import MoranProcess, ApproximateMoranProcess -from axelrod.strategies import * -from axelrod.deterministic_cache import DeterministicCache -from axelrod.match_generator import * -from axelrod.tournament import Tournament -from axelrod.result_set import ResultSet -from axelrod.ecosystem import Ecosystem -from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint diff --git a/axelrod/game.py b/axelrod/game.py index 1c3278275..2099e5b14 100644 --- a/axelrod/game.py +++ b/axelrod/game.py @@ -1,46 +1,18 @@ from typing import Tuple, Union -from axelrod import Action - -C, D = Action.C, Action.D +import axelrod as axl Score = Union[int, float] -class Game(object): - """Container for the game matrix and scoring logic. - - Attributes - ---------- - scores: dict - The numerical score attribute to all combinations of action pairs. - """ - - def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: - """Create a new game object. - - Parameters - ---------- - r: int or float - Score obtained by both players for mutual cooperation. - s: int or float - Score obtained by a player for cooperating against a defector. - t: int or float - Score obtained by a player for defecting against a cooperator. - p: int or float - Score obtained by both player for mutual defection. - """ - self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} +class BaseGame(object): + """Container for the scoring logic.""" - def RPST(self) -> Tuple[Score, Score, Score, Score]: - """Returns game matrix values in Press and Dyson notation.""" - R = self.scores[(C, C)][0] - P = self.scores[(D, D)][0] - S = self.scores[(C, D)][0] - T = self.scores[(D, C)][0] - return R, P, S, T + def __init__(self) -> None: + """Create a new game object.""" + pass - def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]: + def score(self, pair: Tuple[axl.Action, axl.Action]) -> Tuple[Score, Score]: """Returns the appropriate score for a decision pair. Parameters @@ -53,15 +25,4 @@ def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]: tuple of int or float Scores for two player resulting from their actions. """ - return self.scores[pair] - - def __repr__(self) -> str: - return "Axelrod game: (R,P,S,T) = {}".format(self.RPST()) - - def __eq__(self, other): - if not isinstance(other, Game): - return False - return self.RPST() == other.RPST() - - -DefaultGame = Game() + raise NotImplementedError() diff --git a/axelrod/tests/__init__.py b/axelrod/ipd/__init__.py similarity index 100% rename from axelrod/tests/__init__.py rename to axelrod/ipd/__init__.py diff --git a/axelrod/_strategy_utils.py b/axelrod/ipd/_strategy_utils.py similarity index 90% rename from axelrod/_strategy_utils.py rename to axelrod/ipd/_strategy_utils.py index d028a072b..b10cddf75 100644 --- a/axelrod/_strategy_utils.py +++ b/axelrod/ipd/_strategy_utils.py @@ -3,9 +3,9 @@ import itertools from functools import lru_cache -from axelrod.action import Action -from axelrod.strategies.cooperator import Cooperator -from axelrod.strategies.defector import Defector +from axelrod.ipd.action import Action +from axelrod.ipd.strategies.cooperator import Cooperator +from axelrod.ipd.strategies.defector import Defector C, D = Action.C, Action.D @@ -53,9 +53,9 @@ def inspect_strategy(inspector, opponent): Parameters ---------- - inspector: Player + inspector: IpdPlayer The player doing the inspecting - opponent: Player + opponent: IpdPlayer The player being inspected Returns @@ -82,9 +82,9 @@ def _limited_simulate_play(player_1, player_2, h1): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player whose move is already known. - player_2: Player + player_2: IpdPlayer The player the we want to inspect. h1: Action The next action for first player. @@ -99,9 +99,9 @@ def simulate_match(player_1, player_2, strategy, rounds=10): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player that will have a constant strategy. - player_2: Player + player_2: IpdPlayer The player we want to simulate. strategy: Action The constant strategy to use for first player. @@ -117,12 +117,12 @@ def _calculate_scores(p1, p2, game): Parameters ---------- - p1: Player + p1: IpdPlayer The first player. - p2: Player + p2: IpdPlayer The second player. - game: Game - Game object used to score rounds in the players' histories. + game: IpdGame + IpdGame object used to score rounds in the players' histories. Returns ------- @@ -142,12 +142,12 @@ def look_ahead(player_1, player_2, game, rounds=10): Parameters ---------- - player_1: Player + player_1: IpdPlayer The player that will look ahead. - player_2: Player + player_2: IpdPlayer The opponent that will be inspected. - game: Game - The Game object used to score rounds. + game: IpdGame + The IpdGame object used to score rounds. rounds: int The number of rounds to look ahead. diff --git a/axelrod/action.py b/axelrod/ipd/action.py similarity index 100% rename from axelrod/action.py rename to axelrod/ipd/action.py diff --git a/axelrod/classifier.py b/axelrod/ipd/classifier.py similarity index 90% rename from axelrod/classifier.py rename to axelrod/ipd/classifier.py index a88dbe39e..f34b89ab7 100644 --- a/axelrod/classifier.py +++ b/axelrod/ipd/classifier.py @@ -14,7 +14,7 @@ import warnings import yaml -from axelrod.player import Player +from axelrod.ipd.player import IpdPlayer ALL_CLASSIFIERS_PATH = "data/all_classifiers.yml" @@ -22,10 +22,10 @@ class Classifier(Generic[T]): - """Describes a Player (strategy). + """Describes a IpdPlayer (strategy). User sets a name and function, f, at initialization. Through - classify_player, looks for the classifier to be set in the passed Player + classify_player, looks for the classifier to be set in the passed IpdPlayer class. If not set, then passes to f for calculation. f must operate on the class, and not an instance. If necessary, f may @@ -38,18 +38,18 @@ class Classifier(Generic[T]): Attributes ---------- name: An identifier for the classifier, used as a dict key in storage and in - 'classifier' dicts of Player classes. - player_class_classifier: A function that takes in a Player class (not an + 'classifier' dicts of IpdPlayer classes. + player_class_classifier: A function that takes in a IpdPlayer class (not an instance) and returns a value. """ def __init__( - self, name: Text, player_class_classifier: Callable[[Type[Player]], T] + self, name: Text, player_class_classifier: Callable[[Type[IpdPlayer]], T] ): self.name = name self.player_class_classifier = player_class_classifier - def classify_player(self, player: Type[Player]) -> T: + def classify_player(self, player: Type[IpdPlayer]) -> T: """Look for this classifier in the passed player's 'classifier' dict, otherwise pass to the player to f.""" try: @@ -80,7 +80,7 @@ def classify_player(self, player: Type[Player]) -> T: def rebuild_classifier_table( classifiers: List[Classifier], - players: List[Type[Player]], + players: List[Type[IpdPlayer]], path: Text = ALL_CLASSIFIERS_PATH, ) -> None: """Builds the classifier table in data. @@ -142,7 +142,7 @@ def known_classifier(cls, classifier_name: Text) -> bool: @classmethod def __getitem__( cls, key: Union[Classifier, Text] - ) -> Callable[[Union[Player, Type[Player]]], Any]: + ) -> Callable[[Union[IpdPlayer, Type[IpdPlayer]]], Any]: """Looks up the classifier for the player. Given a passed classifier key, return a function that: @@ -152,7 +152,7 @@ def __getitem__( player in the all_player_dicts. Returns None if the classifier is not found in either of those. - The returned function expects Player instances, but if a Player class is + The returned function expects IpdPlayer instances, but if a IpdPlayer class is passed, then it will create an instance by calling an argument-less initializer. If no such initializer exists on the class, then an error will result. @@ -164,7 +164,7 @@ def __getitem__( Returns ------- - A function that will map Player (or Player instances) to their value for + A function that will map IpdPlayer (or IpdPlayer instances) to their value for this classification. """ # Key may be the name or an instance. Convert to name. @@ -175,7 +175,7 @@ def __getitem__( raise KeyError("Unknown classifier") def classify_player_for_this_classifier( - player: Union[Player, Type[Player]] + player: Union[IpdPlayer, Type[IpdPlayer]] ) -> Any: def try_lookup() -> Any: try: @@ -187,7 +187,7 @@ def try_lookup() -> Any: # If the passed player is not an instance, then try to initialize an # instance without arguments. - if not isinstance(player, Player): + if not isinstance(player, IpdPlayer): try: player = player() warnings.warn( @@ -214,7 +214,7 @@ def try_lookup() -> Any: return classify_player_for_this_classifier @classmethod - def is_basic(cls, s: Union[Player, Type[Player]]): + def is_basic(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): """ Defines criteria for a strategy to be considered 'basic' """ @@ -232,7 +232,7 @@ def is_basic(cls, s: Union[Player, Type[Player]]): ) @classmethod - def obey_axelrod(cls, s: Union[Player, Type[Player]]): + def obey_axelrod(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): """ A function to check if a strategy obeys Axelrod's original tournament rules. diff --git a/axelrod/compute_finite_state_machine_memory.py b/axelrod/ipd/compute_finite_state_machine_memory.py similarity index 99% rename from axelrod/compute_finite_state_machine_memory.py rename to axelrod/ipd/compute_finite_state_machine_memory.py index 0f83d3494..24fcb4f1d 100644 --- a/axelrod/compute_finite_state_machine_memory.py +++ b/axelrod/ipd/compute_finite_state_machine_memory.py @@ -1,4 +1,4 @@ -from axelrod.action import Action +from axelrod.ipd.action import Action from collections import defaultdict, namedtuple from typing import DefaultDict, Iterator, Dict, Tuple, Set, List diff --git a/axelrod/data/all_classifiers.yml b/axelrod/ipd/data/all_classifiers.yml similarity index 100% rename from axelrod/data/all_classifiers.yml rename to axelrod/ipd/data/all_classifiers.yml diff --git a/axelrod/data/ann_weights.csv b/axelrod/ipd/data/ann_weights.csv similarity index 100% rename from axelrod/data/ann_weights.csv rename to axelrod/ipd/data/ann_weights.csv diff --git a/axelrod/data/pso_gambler.csv b/axelrod/ipd/data/pso_gambler.csv similarity index 100% rename from axelrod/data/pso_gambler.csv rename to axelrod/ipd/data/pso_gambler.csv diff --git a/axelrod/deterministic_cache.py b/axelrod/ipd/deterministic_cache.py similarity index 92% rename from axelrod/deterministic_cache.py rename to axelrod/ipd/deterministic_cache.py index 24fc53b7d..b792986eb 100644 --- a/axelrod/deterministic_cache.py +++ b/axelrod/ipd/deterministic_cache.py @@ -18,9 +18,9 @@ from axelrod import Classifiers from .action import Action -from .player import Player +from .player import IpdPlayer -CachePlayerKey = Tuple[Player, Player] +CachePlayerKey = Tuple[IpdPlayer, IpdPlayer] CacheKey = Tuple[str, str] @@ -38,7 +38,7 @@ def _key_transform(key: CachePlayerKey) -> CacheKey: def _is_valid_key(key: CachePlayerKey) -> bool: """Validate a deterministic cache player key. - The key should always be a 2-tuple, with a pair of axelrod.Player + The key should always be a 2-tuple, with a pair of axelrodPlayer instances and one integer. Both players should be deterministic. Parameters @@ -52,7 +52,7 @@ def _is_valid_key(key: CachePlayerKey) -> bool: if not isinstance(key, tuple) or len(key) != 2: return False - if not (isinstance(key[0], Player) and isinstance(key[1], Player)): + if not (isinstance(key[0], IpdPlayer) and isinstance(key[1], IpdPlayer)): return False if Classifiers["stochastic"](key[0]) or Classifiers["stochastic"](key[1]): @@ -89,8 +89,8 @@ class DeterministicCache(UserDict): By also storing those cached results in a file, we can re-use the cache between multiple tournaments if necessary. - The cache is a dictionary mapping pairs of Player classes to a list of - resulting interactions. e.g. for a 3 turn Match between Cooperator and + The cache is a dictionary mapping pairs of IpdPlayer classes to a list of + resulting interactions. e.g. for a 3 turn IpdMatch between Cooperator and Alternator, the dictionary entry would be: (axelrod.Cooperator, axelrod.Alternator): [(C, C), (C, D), (C, C)] @@ -132,7 +132,7 @@ def __setitem__(self, key: CachePlayerKey, value): if not _is_valid_key(key): raise ValueError( - "Key must be a tuple of 2 deterministic axelrod Player classes" + "Key must be a tuple of 2 deterministic axelrod IpdPlayer classes" ) if not _is_valid_value(value): diff --git a/axelrod/ecosystem.py b/axelrod/ipd/ecosystem.py similarity index 97% rename from axelrod/ecosystem.py rename to axelrod/ipd/ecosystem.py index 4c3bfb907..a725dab76 100644 --- a/axelrod/ecosystem.py +++ b/axelrod/ipd/ecosystem.py @@ -5,7 +5,7 @@ tournament needs to happen before it is created. For example: players = [axelrod.Cooperator(), axlerod.Defector()] -tournament = axelrod.Tournament(players=players) +tournament = axelrod.IpdTournament(players=players) results = tournament.play() ecosystem = axelrod.Ecosystem(results) ecosystem.reproduce(100) @@ -14,7 +14,7 @@ import random from typing import Callable, List -from axelrod.result_set import ResultSet +from axelrod.ipd.result_set import ResultSet class Ecosystem(object): @@ -33,7 +33,7 @@ def __init__( population: List[int] = None, ) -> None: """Create a new ecosystem. - + Parameters ---------- results: ResultSet @@ -83,7 +83,7 @@ def __init__( def reproduce(self, turns: int): """Reproduce populations according to the payoff matrix. - + Parameters ---------- turns: int diff --git a/axelrod/eigen.py b/axelrod/ipd/eigen.py similarity index 100% rename from axelrod/eigen.py rename to axelrod/ipd/eigen.py diff --git a/axelrod/evolvable_player.py b/axelrod/ipd/evolvable_player.py similarity index 83% rename from axelrod/evolvable_player.py rename to axelrod/ipd/evolvable_player.py index e80da1c69..68681b250 100644 --- a/axelrod/evolvable_player.py +++ b/axelrod/ipd/evolvable_player.py @@ -2,23 +2,23 @@ from pickle import dumps, loads from random import randrange from typing import Dict, List -from .player import Player +from .player import IpdPlayer class InsufficientParametersError(Exception): - """Error indicating that insufficient parameters were specified to initialize an Evolvable Player.""" + """Error indicating that insufficient parameters were specified to initialize an Evolvable IpdPlayer.""" def __init__(self, *args): super().__init__(*args) -class EvolvablePlayer(Player): +class EvolvablePlayer(IpdPlayer): """A class for a player that can evolve, for use in the Moran process or with reinforcement learning algorithms. This is an abstract base class, not intended to be used directly. """ name = "EvolvablePlayer" - parent_class = Player + parent_class = IpdPlayer parent_kwargs = [] # type: List[str] def overwrite_init_kwargs(self, **kwargs): @@ -43,25 +43,25 @@ def serialize_parameters(self): @classmethod def deserialize_parameters(cls, serialized): - """Deserialize parameters to a Player instance.""" + """Deserialize parameters to a IpdPlayer instance.""" init_kwargs = loads(base64.b64decode(serialized)) return cls(**init_kwargs) # Optional methods for evolutionary algorithms and Moran processes. def mutate(self): - """Optional method to allow Player to produce a variant (not in place).""" + """Optional method to allow IpdPlayer to produce a variant (not in place).""" pass # pragma: no cover def crossover(self, other): - """Optional method to allow Player to produce variants in combination with another player. Returns a new - Player.""" + """Optional method to allow IpdPlayer to produce variants in combination with another player. Returns a new + IpdPlayer.""" pass # pragma: no cover # Optional methods for particle swarm algorithm. def receive_vector(self, vector): - """Receive a vector of params and overwrite the Player.""" + """Receive a vector of params and overwrite the IpdPlayer.""" pass # pragma: no cover def create_vector_bounds(self): diff --git a/axelrod/fingerprint.py b/axelrod/ipd/fingerprint.py similarity index 93% rename from axelrod/fingerprint.py rename to axelrod/ipd/fingerprint.py index 20390abfd..273777a6d 100644 --- a/axelrod/fingerprint.py +++ b/axelrod/ipd/fingerprint.py @@ -10,12 +10,12 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable import axelrod as axl -from axelrod import Player -from axelrod.interaction_utils import ( +from axelrod import IpdPlayer +from axelrod.ipd.interaction_utils import ( compute_final_score_per_turn, read_interactions_from_file, ) -from axelrod.strategy_transformers import DualTransformer, JossAnnTransformer +from axelrod.ipd.strategy_transformers import DualTransformer, JossAnnTransformer Point = namedtuple("Point", "x y") @@ -59,7 +59,7 @@ def _create_points(step: float, progress_bar: bool = True) -> List[Point]: return points -def _create_jossann(point: Point, probe: Any) -> Player: +def _create_jossann(point: Point, probe: Any) -> IpdPlayer: """Creates a JossAnn probe player that matches the Point. If the coordinates of point sums to more than 1 the parameters are @@ -70,8 +70,8 @@ def _create_jossann(point: Point, probe: Any) -> Player: ---------- point : Point probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. Returns ---------- @@ -80,7 +80,7 @@ def _create_jossann(point: Point, probe: Any) -> Player: """ x, y = point - if isinstance(probe, axl.Player): + if isinstance(probe, axl.IpdPlayer): init_kwargs = probe.init_kwargs probe = probe.__class__ else: @@ -96,8 +96,8 @@ def _create_jossann(point: Point, probe: Any) -> Player: def _create_probes( - probe: Union[type, Player], points: list, progress_bar: bool = True -) -> List[Player]: + probe: Union[type, IpdPlayer], points: list, progress_bar: bool = True +) -> List[IpdPlayer]: """Creates a set of probe strategies over the unit square. Constructs probe strategies that correspond to points with coordinates @@ -106,8 +106,8 @@ def _create_probes( Parameters ---------- probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. points : list of Point objects with coordinates (x, y) progress_bar : bool @@ -216,17 +216,17 @@ def _reshape_data(data: dict, points: list, size: int) -> np.ndarray: class AshlockFingerprint(object): def __init__( - self, strategy: Union[type, Player], probe: Union[type, Player] = axl.TitForTat + self, strategy: Union[type, IpdPlayer], probe: Union[type, IpdPlayer] = axl.TitForTat ) -> None: """ Parameters ---------- strategy : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. probe : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. Default: Tit For Tat """ self.strategy = strategy @@ -254,7 +254,7 @@ def _construct_tournament_elements( corresponding probe (+1 to allow for including the Strategy). tournament_players : list - A list containing instances of axelrod.Player. The first item is the + A list containing instances of axelrodPlayer. The first item is the original player, the rest are the probes. """ @@ -264,7 +264,7 @@ def _construct_tournament_elements( self.probe, self.points, progress_bar=progress_bar ) - if isinstance(self.strategy, axl.Player): + if isinstance(self.strategy, axl.IpdPlayer): tournament_players = [self.strategy] + probe_players else: tournament_players = [self.strategy()] + probe_players @@ -321,7 +321,7 @@ def fingerprint( ) self.step = step - self.spatial_tournament = axl.Tournament( + self.spatial_tournament = axl.IpdTournament( tourn_players, turns=turns, repetitions=repetitions, edges=edges ) self.spatial_tournament.play( @@ -404,8 +404,8 @@ def __init__(self, strategy, opponents=None, number_of_opponents=50): Parameters ---------- strategy : class or instance - A class that must be descended from axelrod.Player or an instance of - axelrod.Player. + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. opponents : list of instances A list that contains a list of opponents Default: A spectrum of Random players @@ -460,7 +460,7 @@ def fingerprint( and the jth column the jth turn. """ - if isinstance(self.strategy, axl.Player): + if isinstance(self.strategy, axl.IpdPlayer): players = [self.strategy] + self.opponents else: players = [self.strategy()] + self.opponents @@ -470,7 +470,7 @@ def fingerprint( temp_file_descriptor, filename = mkstemp() # type: ignore edges = [(0, k + 1) for k in range(len(self.opponents))] - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=players, edges=edges, turns=turns, diff --git a/axelrod/ipd/game.py b/axelrod/ipd/game.py new file mode 100644 index 000000000..d35ec98c7 --- /dev/null +++ b/axelrod/ipd/game.py @@ -0,0 +1,68 @@ +from typing import Tuple, Union + +from axelrod import Action, BaseGame + +C, D = Action.C, Action.D + +Score = Union[int, float] + + +class IpdGame(BaseGame): + """Container for the game matrix and scoring logic. + + Attributes + ---------- + scores: dict + The numerical score attribute to all combinations of action pairs. + """ + + def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: + """Create a new game object. + + Parameters + ---------- + r: int or float + Score obtained by both players for mutual cooperation. + s: int or float + Score obtained by a player for cooperating against a defector. + t: int or float + Score obtained by a player for defecting against a cooperator. + p: int or float + Score obtained by both player for mutual defection. + """ + self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} + super().__init__() + + def RPST(self) -> Tuple[Score, Score, Score, Score]: + """Returns game matrix values in Press and Dyson notation.""" + R = self.scores[(C, C)][0] + P = self.scores[(D, D)][0] + S = self.scores[(C, D)][0] + T = self.scores[(D, C)][0] + return R, P, S, T + + def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]: + """Returns the appropriate score for a decision pair. + + Parameters + ---------- + pair: tuple(Action, Action) + A pair actions for two players, for example (C, C). + + Returns + ------- + tuple of int or float + Scores for two player resulting from their actions. + """ + return self.scores[pair] + + def __repr__(self) -> str: + return "Axelrod game: (R,P,S,T) = {}".format(self.RPST()) + + def __eq__(self, other): + if not isinstance(other, IpdGame): + return False + return self.RPST() == other.RPST() + + +DefaultGame = IpdGame() diff --git a/axelrod/graph.py b/axelrod/ipd/graph.py similarity index 100% rename from axelrod/graph.py rename to axelrod/ipd/graph.py diff --git a/axelrod/history.py b/axelrod/ipd/history.py similarity index 98% rename from axelrod/history.py rename to axelrod/ipd/history.py index 9e21b04c7..a9dca6406 100644 --- a/axelrod/history.py +++ b/axelrod/ipd/history.py @@ -1,6 +1,6 @@ from collections import Counter -from axelrod.action import Action, actions_to_str +from axelrod.ipd.action import Action, actions_to_str C, D = Action.C, Action.D diff --git a/axelrod/interaction_utils.py b/axelrod/ipd/interaction_utils.py similarity index 97% rename from axelrod/interaction_utils.py rename to axelrod/ipd/interaction_utils.py index 82159f94c..72bdcaa16 100644 --- a/axelrod/interaction_utils.py +++ b/axelrod/ipd/interaction_utils.py @@ -4,16 +4,16 @@ [(C, D), (D, C),...] -This is used by both the Match class and the ResultSet class which analyse +This is used by both the IpdMatch class and the ResultSet class which analyse interactions. """ from collections import Counter, defaultdict import pandas as pd import tqdm -from axelrod.action import Action, str_to_actions +from axelrod.ipd.action import Action, str_to_actions -from .game import Game +from .game import IpdGame C, D = Action.C, Action.D @@ -21,7 +21,7 @@ def compute_scores(interactions, game=None): """Returns the scores of a given set of interactions.""" if not game: - game = Game() + game = IpdGame() return [game.score(plays) for plays in interactions] @@ -53,7 +53,7 @@ def compute_final_score_per_turn(interactions, game=None): def compute_winner_index(interactions, game=None): - """Returns the index of the winner of the Match""" + """Returns the index of the winner of the IpdMatch""" scores = compute_final_score(interactions, game) if scores is not None: diff --git a/axelrod/load_data_.py b/axelrod/ipd/load_data_.py similarity index 100% rename from axelrod/load_data_.py rename to axelrod/ipd/load_data_.py diff --git a/axelrod/ipd/match.py b/axelrod/ipd/match.py new file mode 100644 index 000000000..4dafbe44f --- /dev/null +++ b/axelrod/ipd/match.py @@ -0,0 +1,258 @@ +import random +from math import ceil, log + +import axelrod.ipd.interaction_utils as iu +from axelrod import DEFAULT_TURNS +from axelrod.ipd.action import Action +from axelrod import Classifiers +from axelrod.ipd.game import IpdGame +from axelrod.match import BaseMatch +from .deterministic_cache import DeterministicCache + +C, D = Action.C, Action.D + + +def is_stochastic(players, noise): + """Determines if a match is stochastic -- true if there is noise or if any + of the players involved is stochastic.""" + return noise or any(map(Classifiers["stochastic"], players)) + + +class IpdMatch(BaseMatch): + """The IpdMatch class conducts matches between two players.""" + + def __init__( + self, + players, + turns=None, + prob_end=None, + game=None, + deterministic_cache=None, + noise=0, + match_attributes=None, + reset=True, + ): + """ + Parameters + ---------- + players : tuple + A pair of Player objects + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + game : axelrod.IpdGame + The game object used to score the match + deterministic_cache : axelrod.DeterministicCache + A cache of resulting actions for deterministic matches + noise : float + The probability that a player's intended action should be flipped + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + reset : bool + Whether to reset players or not + """ + + defaults = { + (True, True): (DEFAULT_TURNS, 0), + (True, False): (float("inf"), prob_end), + (False, True): (turns, 0), + (False, False): (turns, prob_end), + } + self.turns, self.prob_end = defaults[(turns is None, prob_end is None)] + + self.result = [] + self.noise = noise + + if game is None: + self.game = IpdGame() + else: + self.game = game + + if deterministic_cache is None: + self._cache = DeterministicCache() + else: + self._cache = deterministic_cache + + if match_attributes is None: + known_turns = self.turns if prob_end is None else float("inf") + self.match_attributes = { + "length": known_turns, + "game": self.game, + "noise": self.noise, + } + else: + self.match_attributes = match_attributes + + self.players = list(players) + self.reset = reset + + super().__init__( + players, turns, prob_end, game, noise, match_attributes, reset + ) + + @property + def players(self): + return self._players + + @players.setter + def players(self, players): + """Ensure that players are passed the match attributes""" + newplayers = [] + for player in players: + player.set_match_attributes(**self.match_attributes) + newplayers.append(player) + self._players = newplayers + + @property + def _stochastic(self): + """ + A boolean to show whether a match between two players would be + stochastic. + """ + return is_stochastic(self.players, self.noise) + + @property + def _cache_update_required(self): + """ + A boolean to show whether the deterministic cache should be updated. + """ + return ( + not self.noise + and self._cache.mutable + and not (any(Classifiers["stochastic"](p) for p in self.players)) + ) + + def _cached_enough_turns(self, cache_key, turns): + """ + Returns true iff there are is a entry in self._cache for the given key and + it's at least turns long. + """ + if cache_key not in self._cache: + return False + return len(self._cache[cache_key]) >= turns + + def play(self): + """ + The resulting list of actions from a match between two players. + + This method determines whether the actions list can be obtained from + the deterministic cache and returns it from there if so. If not, it + calls the play method for player1 and returns the list from there. + + Returns + ------- + A list of the form: + + e.g. for a 2 turn match between Cooperator and Defector: + + [(C, C), (C, D)] + + i.e. One entry per turn containing a pair of actions. + """ + turns = min(sample_length(self.prob_end), self.turns) + cache_key = (self.players[0], self.players[1]) + + if self._stochastic or not self._cached_enough_turns(cache_key, turns): + for p in self.players: + if self.reset: + p.reset() + p.set_match_attributes(**self.match_attributes) + result = [] + for _ in range(turns): + plays = self.players[0].play(self.players[1], self.noise) + result.append(plays) + + if self._cache_update_required: + self._cache[cache_key] = result + else: + result = self._cache[cache_key][:turns] + + self.result = result + return result + + def scores(self): + """Returns the scores of the previous IpdMatch plays.""" + return iu.compute_scores(self.result, self.game) + + def final_score(self): + """Returns the final score for a IpdMatch.""" + return iu.compute_final_score(self.result, self.game) + + def final_score_per_turn(self): + """Returns the mean score per round for a IpdMatch.""" + return iu.compute_final_score_per_turn(self.result, self.game) + + def winner(self): + """Returns the winner of the IpdMatch.""" + winner_index = iu.compute_winner_index(self.result, self.game) + if winner_index is False: # No winner + return False + if winner_index is None: # No plays + return None + return self.players[winner_index] + + def cooperation(self): + """Returns the count of cooperations by each player.""" + return iu.compute_cooperations(self.result) + + def normalised_cooperation(self): + """Returns the count of cooperations by each player per turn.""" + return iu.compute_normalised_cooperation(self.result) + + def state_distribution(self): + """ + Returns the count of each state for a set of interactions. + """ + return iu.compute_state_distribution(self.result) + + def normalised_state_distribution(self): + """ + Returns the normalized count of each state for a set of interactions. + """ + return iu.compute_normalised_state_distribution(self.result) + + def sparklines(self, c_symbol="█", d_symbol=" "): + return iu.compute_sparklines(self.result, c_symbol, d_symbol) + + def __len__(self): + return self.turns + + +def sample_length(prob_end): + """ + Sample length of a game. + + This is using inverse random sample on a probability density function + given by: + + f(n) = p_end * (1 - p_end) ^ (n - 1) + + (So the probability of length n is given by f(n)) + + Which gives cumulative distribution function + : + + F(n) = 1 - (1 - p_end) ^ n + + (So the probability of length less than or equal to n is given by F(n)) + + Which gives for given x = F(n) (ie the random sample) gives n: + + n = ceil((ln(1-x)/ln(1-p_end))) + + This approach of sampling from a distribution is called inverse + transform sampling + . + + Note that this corresponds to sampling at the end of every turn whether + or not the IpdMatch ends. + """ + if prob_end == 0: + return float("inf") + if prob_end == 1: + return 1 + x = random.random() + return int(ceil(log(1 - x) / log(1 - prob_end))) diff --git a/axelrod/match_generator.py b/axelrod/ipd/match_generator.py similarity index 95% rename from axelrod/match_generator.py rename to axelrod/ipd/match_generator.py index d74c143fd..d4dca9dc1 100644 --- a/axelrod/match_generator.py +++ b/axelrod/ipd/match_generator.py @@ -11,18 +11,18 @@ def __init__( match_attributes=None, ): """ - A class to generate matches. This is used by the Tournament class which + A class to generate matches. This is used by the IpdTournament class which is in charge of playing the matches and collecting the results. Parameters ---------- players : list - A list of axelrod.Player objects + A list of axelrodPlayer objects repetitions : int The number of repetitions of a given match turns : integer The number of turns per match - game : axelrod.Game + game : axelrod.IpdGame The game object used to score the match noise : float, 0 The probability that a player's intended action should be flipped diff --git a/axelrod/mock_player.py b/axelrod/ipd/mock_player.py similarity index 75% rename from axelrod/mock_player.py rename to axelrod/ipd/mock_player.py index 41ee0de2a..f7115303e 100644 --- a/axelrod/mock_player.py +++ b/axelrod/ipd/mock_player.py @@ -1,18 +1,18 @@ from itertools import cycle from typing import List -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class MockPlayer(Player): +class MockPlayer(IpdPlayer): """Creates a mock player that plays a given sequence of actions. If no actions are given, plays like Cooperator. Used for testing. """ - name = "Mock Player" + name = "Mock IpdPlayer" def __init__(self, actions: List[Action] = None) -> None: super().__init__() @@ -20,7 +20,7 @@ def __init__(self, actions: List[Action] = None) -> None: actions = [] self.actions = cycle(actions) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Return the next saved action, if present. try: action = self.actions.__next__() diff --git a/axelrod/moran.py b/axelrod/ipd/moran.py similarity index 96% rename from axelrod/moran.py rename to axelrod/ipd/moran.py index 1c0bdc487..1ea3c7e40 100644 --- a/axelrod/moran.py +++ b/axelrod/ipd/moran.py @@ -6,11 +6,11 @@ import matplotlib.pyplot as plt import numpy as np -from axelrod import EvolvablePlayer, DEFAULT_TURNS, Game, Player +from axelrod import EvolvablePlayer, DEFAULT_TURNS, IpdGame, IpdPlayer from .deterministic_cache import DeterministicCache from .graph import Graph, complete_graph -from .match import Match +from .match import IpdMatch from .random_ import randrange @@ -45,11 +45,11 @@ def fitness_proportionate_selection( class MoranProcess(object): def __init__( self, - players: List[Player], + players: List[IpdPlayer], turns: int = DEFAULT_TURNS, prob_end: float = None, noise: float = 0, - game: Game = None, + game: IpdGame = None, deterministic_cache: DeterministicCache = None, mutation_rate: float = 0.0, mode: str = "bd", @@ -61,7 +61,7 @@ def __init__( ) -> None: """ An agent based Moran process class. In each round, each player plays a - Match with each other player. Players are assigned a fitness score by + IpdMatch with each other player. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly chosen player. @@ -78,7 +78,7 @@ def __init__( It is possible to pass interaction graphs and reproduction graphs to the Moran process. In this case, in each round, each player plays a - Match with each neighboring player according to the interaction graph. + IpdMatch with each neighboring player according to the interaction graph. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly @@ -94,7 +94,7 @@ def __init__( noise: The background noise, if any. Randomly flips plays with probability `noise`. - game: axelrod.Game + game: axelrod.IpdGame The game object used to score matches. deterministic_cache: A optional prebuilt deterministic cache @@ -182,7 +182,7 @@ def set_players(self) -> None: self.players.append(player) self.populations = [self.population_distribution()] - def mutate(self, index: int) -> Player: + def mutate(self, index: int) -> IpdPlayer: """Mutate the player at index. Parameters @@ -356,7 +356,7 @@ def score_all(self) -> List: for i, j in self._matchup_indices(): player1 = self.players[i] player2 = self.players[j] - match = Match( + match = IpdMatch( (player1, player2), turns=self.turns, prob_end=self.prob_end, @@ -469,14 +469,14 @@ def populations_plot(self, ax=None): class ApproximateMoranProcess(MoranProcess): """ A class to approximate a Moran process based - on a distribution of potential Match outcomes. + on a distribution of potential IpdMatch outcomes. Instead of playing the matches, the result is sampled from a dictionary of player tuples to distribution of match outcomes """ def __init__( - self, players: List[Player], cached_outcomes: dict, mutation_rate: float = 0 + self, players: List[IpdPlayer], cached_outcomes: dict, mutation_rate: float = 0 ) -> None: """ Parameters diff --git a/axelrod/ipd/player.py b/axelrod/ipd/player.py new file mode 100644 index 000000000..6391f26e9 --- /dev/null +++ b/axelrod/ipd/player.py @@ -0,0 +1,211 @@ +import copy +import inspect +import itertools +import types +from typing import Any, Dict + +import numpy as np + +from axelrod.player import BasePlayer +from axelrod.ipd.action import Action +from axelrod.ipd.game import DefaultGame +from axelrod.ipd.history import History +from axelrod.ipd.random_ import random_flip + +C, D = Action.C, Action.D + + +def simultaneous_play(player, coplayer, noise=0): + """This pits two players against each other.""" + s1, s2 = player.strategy(coplayer), coplayer.strategy(player) + if noise: + s1 = random_flip(s1, noise) + s2 = random_flip(s2, noise) + player.update_history(s1, s2) + coplayer.update_history(s2, s1) + return s1, s2 + + +class IpdPlayer(BasePlayer): + """A class for a player in the tournament. + + This is an abstract base class, not intended to be used directly. + """ + + name = "IpdPlayer" + classifier = {} # type: Dict[str, Any] + + def __new__(cls, *args, **kwargs): + """Caches arguments for IpdPlayer cloning.""" + obj = super().__new__(cls) + obj.init_kwargs = cls.init_params(*args, **kwargs) + return obj + + @classmethod + def init_params(cls, *args, **kwargs): + """ + Return a dictionary containing the init parameters of a strategy + (without 'self'). + Use *args and *kwargs as value if specified + and complete the rest with the default values. + """ + sig = inspect.signature(cls.__init__) + # The 'self' parameter needs to be removed or the first *args will be + # assigned to it + self_param = sig.parameters.get("self") + new_params = list(sig.parameters.values()) + new_params.remove(self_param) + sig = sig.replace(parameters=new_params) + boundargs = sig.bind_partial(*args, **kwargs) + boundargs.apply_defaults() + return boundargs.arguments + + def __init__(self): + """Initiates an empty history.""" + self._history = History() + self.classifier = copy.deepcopy(self.classifier) + self.set_match_attributes() + super().__init__() + + def __eq__(self, other): + """ + Test if two players are equal. + """ + if self.__repr__() != other.__repr__(): + return False + + for attribute in set( + list(self.__dict__.keys()) + list(other.__dict__.keys()) + ): + + value = getattr(self, attribute, None) + other_value = getattr(other, attribute, None) + + if isinstance(value, np.ndarray): + if not (np.array_equal(value, other_value)): + return False + + elif isinstance(value, types.GeneratorType) or isinstance( + value, itertools.cycle + ): + # Split the original generator so it is not touched + generator, original_value = itertools.tee(value) + other_generator, original_other_value = itertools.tee( + other_value + ) + + if isinstance(value, types.GeneratorType): + setattr(self, attribute, (ele for ele in original_value)) + setattr( + other, attribute, (ele for ele in original_other_value) + ) + else: + setattr(self, attribute, itertools.cycle(original_value)) + setattr( + other, attribute, itertools.cycle(original_other_value) + ) + + for _ in range(200): + try: + if next(generator) != next(other_generator): + return False + except StopIteration: + break + + # Code for a strange edge case where each strategy points at each + # other + elif value is other and other_value is self: + pass + else: + if value != other_value: + return False + return True + + def receive_match_attributes(self): + # Overwrite this function if your strategy needs + # to make use of match_attributes such as + # the game matrix, the number of rounds or the noise + pass + + def set_match_attributes(self, length=-1, game=None, noise=0): + if not game: + game = DefaultGame + self.match_attributes = {"length": length, "game": game, "noise": noise} + self.receive_match_attributes() + + def __repr__(self): + """The string method for the strategy. + Appends the `__init__` parameters to the strategy's name.""" + name = self.name + prefix = ": " + gen = ( + value for value in self.init_kwargs.values() if value is not None + ) + for value in gen: + try: + if issubclass(value, IpdPlayer): + value = value.name + except TypeError: + pass + name = "".join([name, prefix, str(value)]) + prefix = ", " + return name + + def __getstate__(self): + """Used for pickling. Override if IpdPlayer contains unpickleable attributes.""" + return self.__dict__ + + def strategy(self, opponent): + """This is a placeholder strategy.""" + raise NotImplementedError() + + def play(self, opponent, noise=0, strategy_holder=None): + """This pits two players against each other, using the passed strategy + holder, if provided.""" + if strategy_holder is None: + strategy_holder = self + return simultaneous_play(strategy_holder, opponent, noise) + + def clone(self): + """Clones the player without history, reapplying configuration + parameters as necessary.""" + + # You may be tempted to re-implement using the `copy` module + # Note that this would require a deepcopy in some cases and there may + # be significant changes required throughout the library. + # Consider overriding in special cases only if necessary + cls = self.__class__ + new_player = cls(**self.init_kwargs) + new_player.match_attributes = copy.copy(self.match_attributes) + return new_player + + def reset(self): + """Resets a player to its initial state + + This method is called at the beginning of each match (between a pair + of players) to reset a player's state to its initial starting point. + It ensures that no 'memory' of previous matches is carried forward. + """ + # This also resets the history. + self.__init__(**self.init_kwargs) + + def update_history(self, play, coplay): + self.history.append(play, coplay) + + @property + def history(self): + return self._history + + # Properties maintained for legacy API, can refactor to self.history.X + # in 5.0.0 to reduce function call overhead. + @property + def cooperations(self): + return self._history.cooperations + + @property + def defections(self): + return self._history.defections + + @property + def state_distribution(self): + return self._history.state_distribution diff --git a/axelrod/plot.py b/axelrod/ipd/plot.py similarity index 100% rename from axelrod/plot.py rename to axelrod/ipd/plot.py diff --git a/axelrod/random_.py b/axelrod/ipd/random_.py similarity index 98% rename from axelrod/random_.py rename to axelrod/ipd/random_.py index 183c77b5f..e3ef91e8d 100644 --- a/axelrod/random_.py +++ b/axelrod/ipd/random_.py @@ -3,7 +3,7 @@ import numpy as np from numpy.random import choice -from axelrod.action import Action +from axelrod.ipd.action import Action C, D = Action.C, Action.D diff --git a/axelrod/result_set.py b/axelrod/ipd/result_set.py similarity index 99% rename from axelrod/result_set.py rename to axelrod/ipd/result_set.py index 36e33a4b7..319784a69 100644 --- a/axelrod/result_set.py +++ b/axelrod/ipd/result_set.py @@ -7,12 +7,12 @@ import numpy as np import tqdm -from axelrod.action import Action +from axelrod.ipd.action import Action import dask as da import dask.dataframe as dd -from . import eigen +from axelrod import eigen C, D = Action.C, Action.D @@ -677,7 +677,7 @@ def summarise(self): median_wins = map(np.nanmedian, self.wins) self.player = namedtuple( - "Player", + "IpdPlayer", [ "Rank", "Name", diff --git a/axelrod/strategies/__init__.py b/axelrod/ipd/strategies/__init__.py similarity index 96% rename from axelrod/strategies/__init__.py rename to axelrod/ipd/strategies/__init__.py index 1e8e1a4ff..cc42e0a3a 100644 --- a/axelrod/strategies/__init__.py +++ b/axelrod/ipd/strategies/__init__.py @@ -1,4 +1,4 @@ -from ..classifier import Classifiers +from axelrod.ipd.classifier import Classifiers from ._strategies import * from ._filters import passes_filterset @@ -121,13 +121,13 @@ def filtered_strategies(filterset, strategies=all_strategies): 'min_memory_depth': 2 } strategies: list - of subclasses of axelrod.Player + of subclasses of axelrodPlayer Returns ------- list - of subclasses of axelrod.Player + of subclasses of axelrodPlayer """ return [s for s in strategies if passes_filterset(s, filterset)] diff --git a/axelrod/strategies/_filters.py b/axelrod/ipd/strategies/_filters.py similarity index 95% rename from axelrod/strategies/_filters.py rename to axelrod/ipd/strategies/_filters.py index c9c199bf7..5a150dd9a 100644 --- a/axelrod/strategies/_filters.py +++ b/axelrod/ipd/strategies/_filters.py @@ -13,7 +13,7 @@ def passes_operator_filter(player, classifier_key, value, operator): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -27,7 +27,7 @@ class ExampleStrategy(Player): Parameters ---------- - player : an instance of axelrod.Player + player : an instance of axelrodPlayer classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'memory_depth'). @@ -58,7 +58,7 @@ def passes_in_list_filter(player, classifier_key, value): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -73,7 +73,7 @@ class ExampleStrategy(Player): Parameters ---------- - player: a descendant class of axelrod.Player + player: a descendant class of axelrodPlayer classifier_key: string Defining which entry from the strategy's classifier dict is to be tested (e.g. 'makes_use_of'). @@ -101,7 +101,7 @@ def passes_filterset(strategy, filterset): For the following strategy: - class ExampleStrategy(Player): + class ExampleStrategy(IpdPlayer): classifier = { 'stochastic': True, 'inspects_source': False, @@ -123,7 +123,7 @@ class ExampleStrategy(Player): Parameters ---------- - strategy : a descendant class of axelrod.Player + strategy : a descendant class of axelrodPlayer filterset : dict mapping filter name to criterion. e.g. diff --git a/axelrod/strategies/_strategies.py b/axelrod/ipd/strategies/_strategies.py similarity index 99% rename from axelrod/strategies/_strategies.py rename to axelrod/ipd/strategies/_strategies.py index 85e58b4b4..aa772edc5 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/ipd/strategies/_strategies.py @@ -106,6 +106,7 @@ Thumper, ) from .finite_state_machines import ( # pylint: disable=unused-import + SimpleFSM, EvolvableFSMPlayer, FSMPlayer, ) @@ -145,7 +146,7 @@ from .grumpy import Grumpy from .handshake import Handshake from .hmm import EvolvedHMM5 -from .hmm import EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import +from .hmm import SimpleHMM, EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import from .human import Human # pylint: disable=unused-import from .hunter import ( AlternatorHunter, diff --git a/axelrod/strategies/adaptive.py b/axelrod/ipd/strategies/adaptive.py similarity index 87% rename from axelrod/strategies/adaptive.py rename to axelrod/ipd/strategies/adaptive.py index bef1b8dac..33b3dd29e 100644 --- a/axelrod/strategies/adaptive.py +++ b/axelrod/ipd/strategies/adaptive.py @@ -1,12 +1,12 @@ from typing import List -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Adaptive(Player): +class Adaptive(IpdPlayer): """Start with a specific sequence of C and D, then play the strategy that has worked best, recalculated each turn. @@ -34,7 +34,7 @@ def __init__(self, initial_plays: List[Action] = None) -> None: self.initial_plays = initial_plays self.scores = {C: 0, D: 0} - def score_last_round(self, opponent: Player): + def score_last_round(self, opponent: IpdPlayer): # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] if len(self.history): @@ -42,7 +42,7 @@ def score_last_round(self, opponent: Player): scores = game.score(last_round) self.scores[last_round[0]] += scores[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Update scores from the last play self.score_last_round(opponent) # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D diff --git a/axelrod/strategies/adaptor.py b/axelrod/ipd/strategies/adaptor.py similarity index 91% rename from axelrod/strategies/adaptor.py rename to axelrod/ipd/strategies/adaptor.py index 2648b2704..fc431d23a 100644 --- a/axelrod/strategies/adaptor.py +++ b/axelrod/ipd/strategies/adaptor.py @@ -1,15 +1,15 @@ from typing import Dict, Tuple -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice from numpy import heaviside C, D = Action.C, Action.D -class AbstractAdaptor(Player): +class AbstractAdaptor(IpdPlayer): """ An adaptive strategy that updates an internal state based on the last round of play. Using this state the player Cooperates with a probability @@ -46,7 +46,7 @@ def __init__(self, delta: Dict[Tuple[Action, Action], float], self.delta = delta self.s = 0. - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.history: # Update internal state from the last play last_round = (self.history[-1], opponent.history[-1]) diff --git a/axelrod/strategies/alternator.py b/axelrod/ipd/strategies/alternator.py similarity index 78% rename from axelrod/strategies/alternator.py rename to axelrod/ipd/strategies/alternator.py index 6ed3605a4..69099ce21 100644 --- a/axelrod/strategies/alternator.py +++ b/axelrod/ipd/strategies/alternator.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Alternator(Player): +class Alternator(IpdPlayer): """ A player who alternates between cooperating and defecting. @@ -25,7 +25,7 @@ class Alternator(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if self.history[-1] == C: diff --git a/axelrod/strategies/ann.py b/axelrod/ipd/strategies/ann.py similarity index 93% rename from axelrod/strategies/ann.py rename to axelrod/ipd/strategies/ann.py index 2d3a1bc85..b33711b87 100644 --- a/axelrod/strategies/ann.py +++ b/axelrod/ipd/strategies/ann.py @@ -1,10 +1,10 @@ from typing import List, Tuple import numpy as np import numpy.random as random -from axelrod.action import Action -from axelrod.load_data_ import load_weights -from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.load_data_ import load_weights +from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D @@ -19,17 +19,17 @@ def num_weights(num_features, num_hidden): return size -def compute_features(player: Player, opponent: Player) -> List[int]: +def compute_features(player: IpdPlayer, opponent: IpdPlayer) -> List[int]: """ Compute history features for Neural Network: * Opponent's first move is C * Opponent's first move is D * Opponent's second move is C * Opponent's second move is D - * Player's previous move is C - * Player's previous move is D - * Player's second previous move is C - * Player's second previous move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D * Opponent's previous move is C * Opponent's previous move is D * Opponent's second previous move is C @@ -148,7 +148,7 @@ def split_weights( return input2hidden, hidden2output, bias -class ANN(Player): +class ANN(IpdPlayer): """Artificial Neural Network based strategy. A single layer neural network based strategy, with the following @@ -157,10 +157,10 @@ class ANN(Player): * Opponent's first move is D * Opponent's second move is C * Opponent's second move is D - * Player's previous move is C - * Player's previous move is D - * Player's second previous move is C - * Player's second previous move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D * Opponent's previous move is C * Opponent's previous move is D * Opponent's second previous move is C @@ -206,7 +206,7 @@ def _process_weights(self, weights, num_features, num_hidden): self.hidden_to_output_layer_weights = np.array(h2o) self.bias_weights = np.array(bias) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: features = compute_features(self, opponent) output = activate( self.bias_weights, diff --git a/axelrod/strategies/apavlov.py b/axelrod/ipd/strategies/apavlov.py similarity index 93% rename from axelrod/strategies/apavlov.py rename to axelrod/ipd/strategies/apavlov.py index 0b54b10a9..dddcd6884 100644 --- a/axelrod/strategies/apavlov.py +++ b/axelrod/ipd/strategies/apavlov.py @@ -1,12 +1,12 @@ from typing import Optional -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class APavlov2006(Player): +class APavlov2006(IpdPlayer): """ APavlov attempts to classify its opponent as one of five strategies: Cooperative, ALLD, STFT, PavlovD, or Random. APavlov then responds in a @@ -33,7 +33,7 @@ def __init__(self) -> None: super().__init__() self.opponent_class = None # type: Optional[str] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C @@ -70,7 +70,7 @@ def strategy(self, opponent: Player) -> Action: return C -class APavlov2011(Player): +class APavlov2011(IpdPlayer): """ APavlov attempts to classify its opponent as one of four strategies: Cooperative, ALLD, STFT, or Random. APavlov then responds in a manner @@ -97,7 +97,7 @@ def __init__(self) -> None: super().__init__() self.opponent_class = None # type: Optional[str] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # TFT for six rounds if len(self.history) < 6: return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/strategies/appeaser.py b/axelrod/ipd/strategies/appeaser.py similarity index 84% rename from axelrod/strategies/appeaser.py rename to axelrod/ipd/strategies/appeaser.py index 790b8439d..ee4507231 100644 --- a/axelrod/strategies/appeaser.py +++ b/axelrod/ipd/strategies/appeaser.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Appeaser(Player): +class Appeaser(IpdPlayer): """A player who tries to guess what the opponent wants. Switch the classifier every time the opponent plays D. @@ -26,7 +26,7 @@ class Appeaser(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not len(opponent.history): return C else: diff --git a/axelrod/strategies/averagecopier.py b/axelrod/ipd/strategies/averagecopier.py similarity index 82% rename from axelrod/strategies/averagecopier.py rename to axelrod/ipd/strategies/averagecopier.py index 597407c61..893214e5a 100644 --- a/axelrod/strategies/averagecopier.py +++ b/axelrod/ipd/strategies/averagecopier.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class AverageCopier(Player): +class AverageCopier(IpdPlayer): """ The player will cooperate with probability p if the opponent's cooperation ratio is p. Starts with random decision. @@ -26,7 +26,7 @@ class AverageCopier(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: # Randomly picks a strategy (not affected by history). return random_choice(0.5) @@ -34,7 +34,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(p) -class NiceAverageCopier(Player): +class NiceAverageCopier(IpdPlayer): """ Same as Average Copier, but always starts by cooperating. @@ -54,7 +54,7 @@ class NiceAverageCopier(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C p = opponent.cooperations / len(opponent.history) diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/ipd/strategies/axelrod_first.py similarity index 96% rename from axelrod/strategies/axelrod_first.py rename to axelrod/ipd/strategies/axelrod_first.py index f9dab0b94..fe9091284 100644 --- a/axelrod/strategies/axelrod_first.py +++ b/axelrod/ipd/strategies/axelrod_first.py @@ -16,10 +16,10 @@ import random from typing import Dict, List, Tuple, Optional -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice -from axelrod.strategy_transformers import FinalTransformer +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice +from axelrod.ipd.strategy_transformers import FinalTransformer from scipy.stats import chisquare from .memoryone import MemoryOnePlayer @@ -27,7 +27,7 @@ C, D = Action.C, Action.D -class FirstByDavis(Player): +class FirstByDavis(IpdPlayer): """ Submitted to Axelrod's first tournament by Morton Davis. @@ -64,7 +64,7 @@ def __init__(self, rounds_to_cooperate: int = 10) -> None: super().__init__() self._rounds_to_cooperate = rounds_to_cooperate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for the remaining rounds if the opponent ever plays D.""" if len(self.history) < self._rounds_to_cooperate: @@ -74,7 +74,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByDowning(Player): +class FirstByDowning(IpdPlayer): """ Submitted to Axelrod's first tournament by Downing @@ -89,7 +89,7 @@ class FirstByDowning(Player): > based on an outcome maximization interpretation of human performances proposed > by Downing (1975)." - The Downing (1975) paper is "The Prisoner's Dilemma Game as a + The Downing (1975) paper is "The Prisoner's Dilemma IpdGame as a Problem-Solving Phenomenon" [Downing1975]_ and this is used to implement the strategy. @@ -248,7 +248,7 @@ def __init__(self) -> None: self.number_opponent_cooperations_in_response_to_C = 0 self.number_opponent_cooperations_in_response_to_D = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 if round_number == 1: @@ -284,7 +284,7 @@ def strategy(self, opponent: Player) -> Action: return self.history[-1].flip() -class FirstByFeld(Player): +class FirstByFeld(IpdPlayer): """ Submitted to Axelrod's first tournament by Scott Feld. @@ -349,7 +349,7 @@ def _cooperation_probability(self) -> float: rounds = len(self.history) return max(self._start_coop_prob + slope * rounds, self._end_coop_prob) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C if opponent.history[-1] == D: @@ -358,7 +358,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(p) -class FirstByGraaskamp(Player): +class FirstByGraaskamp(IpdPlayer): """ Submitted to Axelrod's first tournament by James Graaskamp. @@ -423,7 +423,7 @@ def __init__(self, alpha: float = 0.05) -> None: self.opponent_is_random = False self.next_random_defection_turn = None # type: Optional[int] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" # First move if not self.history: @@ -459,7 +459,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByGrofman(Player): +class FirstByGrofman(IpdPlayer): """ Submitted to Axelrod's first tournament by Bernard Grofman. @@ -485,7 +485,7 @@ class FirstByGrofman(Player): "manipulates_source": False, "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: return C return random_choice(2 / 7) @@ -523,7 +523,7 @@ def __init__(self, p: float = 0.9) -> None: super().__init__(four_vector) -class FirstByNydegger(Player): +class FirstByNydegger(IpdPlayer): """ Submitted to Axelrod's first tournament by Rudy Nydegger. @@ -606,7 +606,7 @@ def score_history( a += weight * score_map[plays] return a - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) == 1: @@ -624,7 +624,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByShubik(Player): +class FirstByShubik(IpdPlayer): """ Submitted to Axelrod's first tournament by Martin Shubik. @@ -688,7 +688,7 @@ def _decrease_retaliation_counter(self): if self.retaliation_remaining == 0: self.is_retaliating = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -709,7 +709,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FirstByTullock(Player): +class FirstByTullock(IpdPlayer): """ Submitted to Axelrod's first tournament by Gordon Tullock. @@ -756,7 +756,7 @@ def __init__(self) -> None: self._rounds_to_cooperate = 11 self.memory_depth = self._rounds_to_cooperate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) < self._rounds_to_cooperate: return C rounds = self._rounds_to_cooperate - 1 @@ -766,7 +766,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(prob_cooperate) -class FirstByAnonymous(Player): +class FirstByAnonymous(IpdPlayer): """ Submitted to Axelrod's first tournament by a graduate student whose name was withheld. @@ -802,13 +802,13 @@ class FirstByAnonymous(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: r = random.uniform(3, 7) / 10 return random_choice(r) @FinalTransformer((D, D), name_prefix=None) -class FirstBySteinAndRapoport(Player): +class FirstBySteinAndRapoport(IpdPlayer): """ Submitted to Axelrod's first tournament by William Stein and Amnon Rapoport. @@ -858,7 +858,7 @@ def __init__(self, alpha: float = 0.05) -> None: self.alpha = alpha self.opponent_is_random = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 # First 4 moves @@ -880,7 +880,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D, D), name_prefix=None) -class FirstByTidemanAndChieruzzi(Player): +class FirstByTidemanAndChieruzzi(IpdPlayer): """ Submitted to Axelrod's first tournament by Nicolas Tideman and Paula Chieruzzi. @@ -960,7 +960,7 @@ def _fresh_start(self): self.retaliation_remaining = 0 self.remembered_number_of_opponent_defectioons = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] @@ -969,7 +969,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/ipd/strategies/axelrod_second.py similarity index 96% rename from axelrod/strategies/axelrod_second.py rename to axelrod/ipd/strategies/axelrod_second.py index 50e4e18ee..7b359d36e 100644 --- a/axelrod/strategies/axelrod_second.py +++ b/axelrod/ipd/strategies/axelrod_second.py @@ -8,16 +8,16 @@ from typing import List import numpy as np -from axelrod.action import Action -from axelrod.interaction_utils import compute_final_score -from axelrod.player import Player -from axelrod.random_ import random_choice -from axelrod.strategies.finite_state_machines import FSMPlayer +from axelrod.ipd.action import Action +from axelrod.ipd.interaction_utils import compute_final_score +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice +from axelrod.ipd.strategies.finite_state_machines import FSMPlayer C, D = Action.C, Action.D -class SecondByChampion(Player): +class SecondByChampion(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Danny Champion. @@ -43,7 +43,7 @@ class SecondByChampion(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) # Cooperate for the first 10 turns if current_round == 0: @@ -61,7 +61,7 @@ def strategy(self, opponent: Player) -> Action: return D return C -class SecondByEatherley(Player): +class SecondByEatherley(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Graham Eatherley. @@ -87,7 +87,7 @@ class SecondByEatherley(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not len(opponent.history): return C @@ -100,7 +100,7 @@ def strategy(opponent: Player) -> Action: return random_choice(1 - defection_prop) -class SecondByTester(Player): +class SecondByTester(IpdPlayer): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -131,7 +131,7 @@ def __init__(self) -> None: super().__init__() self.is_TFT = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Defect on the first move if not opponent.history: return D @@ -149,7 +149,7 @@ def strategy(self, opponent: Player) -> Action: return self.history[-1].flip() -class SecondByGladstein(Player): +class SecondByGladstein(IpdPlayer): """ Submitted to Axelrod's second tournament by David Gladstein. @@ -185,7 +185,7 @@ def __init__(self) -> None: # This strategy assumes the opponent is a patsy self.patsy = True - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Defect on the first move if not self.history: return D @@ -205,7 +205,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SecondByTranquilizer(Player): +class SecondByTranquilizer(IpdPlayer): """ Submitted to Axelrod's second tournament by Craig Feathers @@ -376,7 +376,7 @@ def update_state(self, opponent): ) / (self.one_turn_after_good_defection_ratio_count + 1) self.one_turn_after_good_defection_ratio_count += 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C @@ -420,7 +420,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SecondByGrofman(Player): +class SecondByGrofman(IpdPlayer): """ Submitted to Axelrod's second tournament by Bernard Grofman. @@ -459,7 +459,7 @@ class SecondByGrofman(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Cooperate on the first two moves if len(self.history) < 2: return C @@ -478,7 +478,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByKluepfel(Player): +class SecondByKluepfel(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Charles Kluepfel (K32R). @@ -527,7 +527,7 @@ def __init__(self): super().__init__() self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = 0, 0, 0, 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First update the response matrix. if len(self.history) >= 2: if self.history[-2] == D: @@ -583,7 +583,7 @@ def strategy(self, opponent: Player) -> Action: return one_move_ago.flip() -class SecondByBorufsen(Player): +class SecondByBorufsen(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Otto Borufsen (K32R), and came in third in that tournament. @@ -657,7 +657,7 @@ def try_return(self, to_return): return C return D - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: @@ -739,7 +739,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class SecondByCave(Player): +class SecondByCave(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and came in fourth in that tournament. @@ -771,7 +771,7 @@ class SecondByCave(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -796,7 +796,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByWmAdams(Player): +class SecondByWmAdams(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by William Adams (K44R), and came in fifth in that tournament. @@ -822,7 +822,7 @@ class SecondByWmAdams(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) <= 1: return C number_defects = opponent.defections @@ -836,7 +836,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByGraaskampKatzen(Player): +class SecondByGraaskampKatzen(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken Katzen (K60R), and came in sixth in that tournament. @@ -874,12 +874,12 @@ def __init__(self): self.own_score = 0 self.mode = "Normal" - def update_score(self, opponent: Player): + def update_score(self, opponent: IpdPlayer): game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) self.own_score += game.score(last_round)[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.mode == "Defect": return D @@ -909,7 +909,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] # Tit-for-Tat -class SecondByWeiner(Player): +class SecondByWeiner(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R), and came in seventh in that tournament. @@ -969,7 +969,7 @@ def try_return(self, to_return): return D return to_return - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C @@ -1001,7 +1001,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(opponent.history[-1]) -class SecondByHarrington(Player): +class SecondByHarrington(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R) and came in eighth in that tournament. @@ -1238,7 +1238,7 @@ def detect_parity_streak(self, last_move): if self.parity_streak[self.parity_bit] >= self.parity_limit: return True - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: @@ -1340,7 +1340,7 @@ def strategy(self, opponent: Player) -> Action: return self.try_return(D, lower_flags=False) -class SecondByTidemanAndChieruzzi(Player): +class SecondByTidemanAndChieruzzi(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman and Paula Chieruzzi (K84R) and came in ninth in that tournament. @@ -1395,7 +1395,7 @@ def _fresh_start(self): self.score_to_beat = 0 self.score_to_beat_inc = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] @@ -1404,7 +1404,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: @@ -1451,7 +1451,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByGetzler(Player): +class SecondByGetzler(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R) and came in eleventh in that tournament. @@ -1479,7 +1479,7 @@ def __init__(self) -> None: super().__init__() self.flack = 0.0 # The relative untrustworthiness of opponent - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -1489,7 +1489,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(1.0 - self.flack) -class SecondByLeyvraz(Player): +class SecondByLeyvraz(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Fransois Leyvraz (K68R) and came in twelfth in that tournament. @@ -1532,7 +1532,7 @@ def __init__(self) -> None: (D, D, D): 0.25, # Rule 1 } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: recent_history = [C, C, C] # Default to C. for go_back in range(1, 4): if len(opponent.history) >= go_back: @@ -1543,7 +1543,7 @@ def strategy(self, opponent: Player) -> Action: ) -class SecondByWhite(Player): +class SecondByWhite(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Edward C White (K72R) and came in thirteenth in that tournament. @@ -1569,7 +1569,7 @@ class SecondByWhite(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn <= 10 or opponent.history[-1] == C: @@ -1580,7 +1580,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByBlack(Player): +class SecondByBlack(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Paul E Black (K83R) and came in fifteenth in that tournament. @@ -1613,7 +1613,7 @@ def __init__(self) -> None: # Cooperation probability self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04} - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 5: return C @@ -1625,7 +1625,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(self.prob_coop[number_defects]) -class SecondByRichardHufford(Player): +class SecondByRichardHufford(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R) and came in sixteenth in that tournament. @@ -1691,7 +1691,7 @@ def __init__(self) -> None: self.coop_after_ab_count = 2 self.def_after_ab_count = 2 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -1739,7 +1739,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByYamachi(Player): +class SecondByYamachi(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R) and came in seventeenth in that tournament. @@ -1811,7 +1811,7 @@ def try_return(self, to_return, opp_def): return to_return - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return self.try_return(C, 0) @@ -1930,7 +1930,7 @@ def __init__(self) -> None: super().__init__() self.credit = 7 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn == 1: return C @@ -1956,7 +1956,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SecondByRowsam(Player): +class SecondByRowsam(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R) and came in 21st in that tournament. @@ -2007,7 +2007,7 @@ def __init__(self) -> None: self.current_score = 0 self.opponent_score = 0 - def _score_last_round(self, opponent: Player): + def _score_last_round(self, opponent: IpdPlayer): """Updates the scores for each player.""" game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) @@ -2015,7 +2015,7 @@ def _score_last_round(self, opponent: Player): self.current_score += scores[0] self.opponent_score += scores[1] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 if turn > 1: self._score_last_round(opponent) @@ -2061,7 +2061,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SecondByAppold(Player): +class SecondByAppold(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and came in 22nd in that tournament. @@ -2106,7 +2106,7 @@ def __init__(self) -> None: self.first_opp_def = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) + 1 us_two_turns_ago = C if turn <= 2 else self.history[-2] diff --git a/axelrod/strategies/backstabber.py b/axelrod/ipd/strategies/backstabber.py similarity index 81% rename from axelrod/strategies/backstabber.py rename to axelrod/ipd/strategies/backstabber.py index b3a522ae2..7020b7c1c 100644 --- a/axelrod/strategies/backstabber.py +++ b/axelrod/ipd/strategies/backstabber.py @@ -1,12 +1,12 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.strategy_transformers import FinalTransformer +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.strategy_transformers import FinalTransformer C, D = Action.C, Action.D @FinalTransformer((D, D), name_prefix=None) # End with two defections -class BackStabber(Player): +class BackStabber(IpdPlayer): """ Forgives the first 3 defections but on the fourth will defect forever. Defects on the last 2 rounds unconditionally. @@ -27,12 +27,12 @@ class BackStabber(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return _backstabber_strategy(opponent) @FinalTransformer((D, D), name_prefix=None) # End with two defections -class DoubleCrosser(Player): +class DoubleCrosser(IpdPlayer): """ Forgives the first 3 defections but on the fourth will defect forever. Defects on the last 2 rounds unconditionally. @@ -57,13 +57,13 @@ class DoubleCrosser(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if _opponent_triggers_alt_strategy(opponent): return _alt_strategy(opponent) return _backstabber_strategy(opponent) -def _backstabber_strategy(opponent: Player) -> Action: +def _backstabber_strategy(opponent: IpdPlayer) -> Action: """ Cooperates until opponent defects a total of four times, then always defects. @@ -75,7 +75,7 @@ def _backstabber_strategy(opponent: Player) -> Action: return C -def _alt_strategy(opponent: Player) -> Action: +def _alt_strategy(opponent: IpdPlayer) -> Action: """ If opponent's previous two plays were defect, then defects on next round. Otherwise, cooperates. @@ -86,7 +86,7 @@ def _alt_strategy(opponent: Player) -> Action: return C -def _opponent_triggers_alt_strategy(opponent: Player) -> bool: +def _opponent_triggers_alt_strategy(opponent: IpdPlayer) -> bool: """ If opponent did not defect in first 7 rounds and the current round is from 8 to 180, return True. Else, return False. @@ -99,7 +99,7 @@ def _opponent_triggers_alt_strategy(opponent: Player) -> bool: return before_alt_strategy < current_round <= last_round_of_alt_strategy -def _opponent_defected_in_first_n_rounds(opponent: Player, first_n_rounds: int) -> bool: +def _opponent_defected_in_first_n_rounds(opponent: IpdPlayer, first_n_rounds: int) -> bool: """ If opponent defected in the first N rounds, return True. Else return False. """ diff --git a/axelrod/strategies/better_and_better.py b/axelrod/ipd/strategies/better_and_better.py similarity index 75% rename from axelrod/strategies/better_and_better.py rename to axelrod/ipd/strategies/better_and_better.py index 1af697a00..e5ff9eb00 100644 --- a/axelrod/strategies/better_and_better.py +++ b/axelrod/ipd/strategies/better_and_better.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class BetterAndBetter(Player): +class BetterAndBetter(IpdPlayer): """ Defects with probability of '(1000 - current turn) / 1000'. Therefore it is less and less likely to defect as the round goes on. @@ -26,7 +26,7 @@ class BetterAndBetter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 probability = current_round / 1000 return random_choice(probability) diff --git a/axelrod/strategies/bush_mosteller.py b/axelrod/ipd/strategies/bush_mosteller.py similarity index 94% rename from axelrod/strategies/bush_mosteller.py rename to axelrod/ipd/strategies/bush_mosteller.py index d6ed5adf3..223dd33a5 100644 --- a/axelrod/strategies/bush_mosteller.py +++ b/axelrod/ipd/strategies/bush_mosteller.py @@ -1,13 +1,13 @@ import random from axelrod import random_choice -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class BushMosteller(Player): +class BushMosteller(IpdPlayer): """ A player that is based on Bush Mosteller reinforced learning algorithm, it decides what it will @@ -72,14 +72,14 @@ def __init__( self._stimulus = 0.0 self._learning_rate = learning_rate - def stimulus_update(self, opponent: Player): + def stimulus_update(self, opponent: IpdPlayer): """ Updates the stimulus attribute based on the opponent's history. Used by the strategy. Parameters - opponent : axelrod.Player + opponent : axelrodPlayer The current opponent """ game = self.match_attributes["game"] @@ -120,7 +120,7 @@ def stimulus_update(self, opponent: Player): elif self._stimulus < 0: self._d_prob += self._learning_rate * self._stimulus * self._d_prob - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First turn if len(self.history) == 0: diff --git a/axelrod/strategies/calculator.py b/axelrod/ipd/strategies/calculator.py similarity index 81% rename from axelrod/strategies/calculator.py rename to axelrod/ipd/strategies/calculator.py index 8ac9b59d0..bd206fead 100644 --- a/axelrod/strategies/calculator.py +++ b/axelrod/ipd/strategies/calculator.py @@ -1,13 +1,13 @@ -from axelrod._strategy_utils import detect_cycle -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd._strategy_utils import detect_cycle +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer from .axelrod_first import FirstByJoss as Joss C, D = Action.C, Action.D -class Calculator(Player): +class Calculator(IpdPlayer): """ Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is detected, defect forever. Otherwise play TFT. @@ -33,7 +33,7 @@ def __init__(self) -> None: super().__init__() self.joss_instance = Joss() - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn > 0: self.joss_instance.history.append(self.history[-1], @@ -47,7 +47,7 @@ def strategy(self, opponent: Player) -> Action: play = self.joss_instance.strategy(opponent) return play - def extended_strategy(self, opponent: Player) -> Action: + def extended_strategy(self, opponent: IpdPlayer) -> Action: if self.cycle: return D else: diff --git a/axelrod/strategies/cooperator.py b/axelrod/ipd/strategies/cooperator.py similarity index 88% rename from axelrod/strategies/cooperator.py rename to axelrod/ipd/strategies/cooperator.py index 6435a3504..3b4c0c4a6 100644 --- a/axelrod/strategies/cooperator.py +++ b/axelrod/ipd/strategies/cooperator.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Cooperator(Player): +class Cooperator(IpdPlayer): """A player who only ever cooperates. Names: @@ -26,11 +26,11 @@ class Cooperator(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C -class TrickyCooperator(Player): +class TrickyCooperator(IpdPlayer): """ A cooperator that is trying to be tricky. @@ -53,7 +53,7 @@ class TrickyCooperator(Player): _min_history_required_to_try_trickiness = 3 _max_history_depth_for_trickiness = -10 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Almost always cooperates, but will try to trick the opponent by defecting. diff --git a/axelrod/strategies/cycler.py b/axelrod/ipd/strategies/cycler.py similarity index 93% rename from axelrod/strategies/cycler.py rename to axelrod/ipd/strategies/cycler.py index 509141717..5aec043ac 100644 --- a/axelrod/strategies/cycler.py +++ b/axelrod/ipd/strategies/cycler.py @@ -3,15 +3,15 @@ import random from typing import List, Tuple -from axelrod.action import Action, actions_to_str, str_to_actions -from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.player import Player +from axelrod.ipd.action import Action, actions_to_str, str_to_actions +from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D actions = (C, D) -class AntiCycler(Player): +class AntiCycler(IpdPlayer): """ A player that follows a sequence of plays that contains no cycles: CDD CD CCD CCCD CCCCD ... @@ -42,7 +42,7 @@ def __init__(self) -> None: def _get_first_three() -> List[Action]: return [C, D, D] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: while self.first_three: return self.first_three.pop(0) if self.cycle_counter < self.cycle_length: @@ -54,7 +54,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Cycler(Player): +class Cycler(IpdPlayer): """ A player that repeats a given sequence indefinitely. @@ -89,7 +89,7 @@ def __init__(self, cycle: str = "CCD") -> None: self.cycle = cycle self.set_cycle(cycle=cycle) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return next(self.cycle_iter) def set_cycle(self, cycle: str): @@ -156,7 +156,7 @@ def mutate(self) -> EvolvablePlayer: def crossover(self, other) -> EvolvablePlayer: """ - Creates and returns a new Player instance with a single crossover point. + Creates and returns a new IpdPlayer instance with a single crossover point. """ if other.__class__ != self.__class__: raise TypeError("Crossover must be between the same player classes.") diff --git a/axelrod/strategies/darwin.py b/axelrod/ipd/strategies/darwin.py similarity index 95% rename from axelrod/strategies/darwin.py rename to axelrod/ipd/strategies/darwin.py index 93db009eb..59da40f05 100644 --- a/axelrod/strategies/darwin.py +++ b/axelrod/ipd/strategies/darwin.py @@ -6,13 +6,13 @@ from collections import defaultdict from typing import Optional -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Darwin(Player): +class Darwin(IpdPlayer): """ A strategy which accumulates a record (the 'genome') of what the most favourable response in the previous round should have been, and naively @@ -61,7 +61,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: trial = len(self.history) if trial > 0: diff --git a/axelrod/strategies/dbs.py b/axelrod/ipd/strategies/dbs.py similarity index 99% rename from axelrod/strategies/dbs.py rename to axelrod/ipd/strategies/dbs.py index dafdb8ec2..07c9b4279 100644 --- a/axelrod/strategies/dbs.py +++ b/axelrod/ipd/strategies/dbs.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class DBS(Player): +class DBS(IpdPlayer): """ A strategy that learns the opponent's strategy and uses symbolic noise detection for detecting whether anomalies in player’s behavior are @@ -210,7 +210,7 @@ def compute_prob_rule(self, outcome, alpha=1): p_cond = discounted_g / discounted_f return p_cond - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if not self.history: return C diff --git a/axelrod/strategies/defector.py b/axelrod/ipd/strategies/defector.py similarity index 83% rename from axelrod/strategies/defector.py rename to axelrod/ipd/strategies/defector.py index 4e05184f8..4caafcc34 100644 --- a/axelrod/strategies/defector.py +++ b/axelrod/ipd/strategies/defector.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Defector(Player): +class Defector(IpdPlayer): """A player who only ever defects. Names: @@ -26,11 +26,11 @@ class Defector(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D -class TrickyDefector(Player): +class TrickyDefector(IpdPlayer): """A defector that is trying to be tricky. Names: @@ -49,7 +49,7 @@ class TrickyDefector(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Almost always defects, but will try to trick the opponent into cooperating. diff --git a/axelrod/strategies/doubler.py b/axelrod/ipd/strategies/doubler.py similarity index 81% rename from axelrod/strategies/doubler.py rename to axelrod/ipd/strategies/doubler.py index 27c9ad847..4614483b2 100644 --- a/axelrod/strategies/doubler.py +++ b/axelrod/ipd/strategies/doubler.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Doubler(Player): +class Doubler(IpdPlayer): """ Cooperates except when the opponent has defected and the opponent's cooperation count is less than twice their defection count. @@ -25,7 +25,7 @@ class Doubler(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if ( diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/ipd/strategies/finite_state_machines.py similarity index 98% rename from axelrod/strategies/finite_state_machines.py rename to axelrod/ipd/strategies/finite_state_machines.py index e7bdd7f63..b27b31a6f 100644 --- a/axelrod/strategies/finite_state_machines.py +++ b/axelrod/ipd/strategies/finite_state_machines.py @@ -3,9 +3,9 @@ from typing import Any, List, Sequence, Tuple, Union import numpy.random as random from numpy.random import choice -from axelrod.action import Action -from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D actions = (C, D) @@ -92,10 +92,10 @@ def num_states(self): return len(set(state for state, action in self._state_transitions)) -class FSMPlayer(Player): +class FSMPlayer(IpdPlayer): """Abstract base class for finite state machine players.""" - name = "FSM Player" + name = "FSM IpdPlayer" classifier = { "memory_depth": 1, @@ -118,7 +118,7 @@ def __init__( self.initial_action = initial_action self.fsm = SimpleFSM(transitions, initial_state) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return self.initial_action else: diff --git a/axelrod/strategies/forgiver.py b/axelrod/ipd/strategies/forgiver.py similarity index 86% rename from axelrod/strategies/forgiver.py rename to axelrod/ipd/strategies/forgiver.py index 4d2cb7ed8..fef4a608f 100644 --- a/axelrod/strategies/forgiver.py +++ b/axelrod/ipd/strategies/forgiver.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Forgiver(Player): +class Forgiver(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected more than 10 percent of the time @@ -25,7 +25,7 @@ class Forgiver(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D if the opponent has defected more than 10 percent of the time. @@ -35,7 +35,7 @@ def strategy(self, opponent: Player) -> Action: return C -class ForgivingTitForTat(Player): +class ForgivingTitForTat(IpdPlayer): """ A player starts by cooperating however will defect if at any point, the opponent has defected more than 10 percent of the time, and their most @@ -57,7 +57,7 @@ class ForgivingTitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D if the opponent has defected more than 10 percent of the time and their most recent decision was defect. diff --git a/axelrod/strategies/gambler.py b/axelrod/ipd/strategies/gambler.py similarity index 95% rename from axelrod/strategies/gambler.py rename to axelrod/ipd/strategies/gambler.py index f127ce0f4..599f0cb82 100644 --- a/axelrod/strategies/gambler.py +++ b/axelrod/ipd/strategies/gambler.py @@ -7,11 +7,11 @@ import random from typing import Any -from axelrod.action import Action, str_to_actions, actions_to_str -from axelrod.load_data_ import load_pso_tables -from axelrod.player import Player +from axelrod.ipd.action import Action, str_to_actions, actions_to_str +from axelrod.ipd.load_data_ import load_pso_tables +from axelrod.ipd.player import IpdPlayer -from axelrod.random_ import random_choice +from axelrod.ipd.random_ import random_choice from .lookerup import EvolvableLookerUp, LookupTable, LookerUp, Plays, create_lookup_table_keys @@ -40,7 +40,7 @@ class Gambler(LookerUp): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: actions_or_float = super(Gambler, self).strategy(opponent) if isinstance(actions_or_float, Action): return actions_or_float diff --git a/axelrod/strategies/geller.py b/axelrod/ipd/strategies/geller.py similarity index 92% rename from axelrod/strategies/geller.py rename to axelrod/ipd/strategies/geller.py index 8343f8aa1..22311de3b 100644 --- a/axelrod/strategies/geller.py +++ b/axelrod/ipd/strategies/geller.py @@ -4,15 +4,15 @@ optimising them. """ -from axelrod._strategy_utils import inspect_strategy -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd._strategy_utils import inspect_strategy +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class Geller(Player): +class Geller(IpdPlayer): """Observes what the player will do in the next round and adjust. If unable to do this: will play randomly. @@ -54,7 +54,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return random_choice(0.5) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Look at what the opponent will play in the next round and choose a strategy that gives the least jail time, which is is equivalent to playing the same diff --git a/axelrod/strategies/gobymajority.py b/axelrod/ipd/strategies/gobymajority.py similarity index 97% rename from axelrod/strategies/gobymajority.py rename to axelrod/ipd/strategies/gobymajority.py index e79efca0a..dcda9e56b 100644 --- a/axelrod/strategies/gobymajority.py +++ b/axelrod/ipd/strategies/gobymajority.py @@ -1,13 +1,13 @@ import copy from typing import Any, Dict, Union -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class GoByMajority(Player): +class GoByMajority(IpdPlayer): """Submitted to Axelrod's second tournament by Gail Grisell. It came 23rd and was written in 10 lines of BASIC. @@ -70,7 +70,7 @@ def __init__( def __repr__(self): return self.name - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is affected by the history of the opponent. As long as the opponent cooperates at least as often as they defect then diff --git a/axelrod/strategies/gradualkiller.py b/axelrod/ipd/strategies/gradualkiller.py similarity index 77% rename from axelrod/strategies/gradualkiller.py rename to axelrod/ipd/strategies/gradualkiller.py index 975f43c52..40737c1d4 100644 --- a/axelrod/strategies/gradualkiller.py +++ b/axelrod/ipd/strategies/gradualkiller.py @@ -1,17 +1,17 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.strategy_transformers import InitialTransformer +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.strategy_transformers import InitialTransformer C, D = Action.C, Action.D @InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) -class GradualKiller(Player): +class GradualKiller(IpdPlayer): """ It begins by defecting in the first five moves, then cooperates two times. It then defects all the time if the opponent has defected in move 6 and 7, else cooperates all the time. - Initially designed to stop Gradual from defeating TitForTat in a 3 Player + Initially designed to stop Gradual from defeating TitForTat in a 3 IpdPlayer tournament. Names @@ -31,7 +31,7 @@ class GradualKiller(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if opponent.history[5:7] == [D, D]: return D return C diff --git a/axelrod/strategies/grudger.py b/axelrod/ipd/strategies/grudger.py similarity index 91% rename from axelrod/strategies/grudger.py rename to axelrod/ipd/strategies/grudger.py index 61215bb9a..660ff7667 100644 --- a/axelrod/strategies/grudger.py +++ b/axelrod/ipd/strategies/grudger.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Grudger(Player): +class Grudger(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected. @@ -33,7 +33,7 @@ class Grudger(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -41,7 +41,7 @@ def strategy(opponent: Player) -> Action: return C -class ForgetfulGrudger(Player): +class ForgetfulGrudger(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after mem_length matches. @@ -69,7 +69,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D for mem_length rounds if the opponent ever plays D.""" if self.grudge_memory == self.mem_length: @@ -85,7 +85,7 @@ def strategy(self, opponent: Player) -> Action: return C -class OppositeGrudger(Player): +class OppositeGrudger(IpdPlayer): """ A player starts by defecting however will cooperate if at any point the opponent has cooperated. @@ -107,7 +107,7 @@ class OppositeGrudger(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing D, then plays C for the remaining rounds if the opponent ever plays C.""" if opponent.cooperations: @@ -115,7 +115,7 @@ def strategy(opponent: Player) -> Action: return D -class Aggravater(Player): +class Aggravater(IpdPlayer): """ Grudger, except that it defects on the first 3 turns @@ -136,7 +136,7 @@ class Aggravater(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if len(opponent.history) < 3: return D elif opponent.defections: @@ -144,7 +144,7 @@ def strategy(opponent: Player) -> Action: return C -class SoftGrudger(Player): +class SoftGrudger(IpdPlayer): """ A modification of the Grudger strategy. Instead of punishing by always defecting: punishes by playing: D, D, D, D, C, C. (Will continue to @@ -170,7 +170,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays D, D, D, D, C, C against a defection """ if self.grudged: @@ -186,7 +186,7 @@ def strategy(self, opponent: Player) -> Action: return C -class GrudgerAlternator(Player): +class GrudgerAlternator(IpdPlayer): """ A player starts by cooperating until the first opponents defection, then alternates D-C. @@ -208,7 +208,7 @@ class GrudgerAlternator(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Begins by playing C, then plays Alternator for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -217,7 +217,7 @@ def strategy(self, opponent: Player) -> Action: return C -class EasyGo(Player): +class EasyGo(IpdPlayer): """ A player starts by defecting however will cooperate if at any point the opponent has defected. @@ -241,7 +241,7 @@ class EasyGo(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Begins by playing D, then plays C for the remaining rounds if the opponent ever plays D.""" if opponent.defections: @@ -249,7 +249,7 @@ def strategy(opponent: Player) -> Action: return D -class GeneralSoftGrudger(Player): +class GeneralSoftGrudger(IpdPlayer): """ A generalization of the SoftGrudger strategy. SoftGrudger punishes by playing: D, D, D, D, C, C. after a defection by the opponent. @@ -296,7 +296,7 @@ def __init__(self, n: int = 1, d: int = 4, c: int = 2) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Punishes after its opponent defects 'n' times consecutively. The punishment is in the form of 'd' defections followed by a penance of diff --git a/axelrod/strategies/grumpy.py b/axelrod/ipd/strategies/grumpy.py similarity index 92% rename from axelrod/strategies/grumpy.py rename to axelrod/ipd/strategies/grumpy.py index 6c4adaf5f..a1544a2d3 100644 --- a/axelrod/strategies/grumpy.py +++ b/axelrod/ipd/strategies/grumpy.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Grumpy(Player): +class Grumpy(IpdPlayer): """ A player that defects after a certain level of grumpiness. Grumpiness increases when the opponent defects and decreases @@ -49,7 +49,7 @@ def __init__( self.grumpy_threshold = grumpy_threshold self.nice_threshold = nice_threshold - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """A player that gets grumpier the more the opposition defects, and nicer the more they cooperate. diff --git a/axelrod/strategies/handshake.py b/axelrod/ipd/strategies/handshake.py similarity index 87% rename from axelrod/strategies/handshake.py rename to axelrod/ipd/strategies/handshake.py index 61a0d8219..c8c7e82c9 100644 --- a/axelrod/strategies/handshake.py +++ b/axelrod/ipd/strategies/handshake.py @@ -1,12 +1,12 @@ from typing import List -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Handshake(Player): +class Handshake(IpdPlayer): """Starts with C, D. If the opponent plays the same way, cooperate forever, else defect forever. @@ -32,7 +32,7 @@ def __init__(self, initial_plays: List[Action] = None) -> None: initial_plays = [C, D] self.initial_plays = initial_plays - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Begin by playing the sequence C, D index = len(self.history) if index < len(self.initial_plays): diff --git a/axelrod/strategies/hmm.py b/axelrod/ipd/strategies/hmm.py similarity index 96% rename from axelrod/strategies/hmm.py rename to axelrod/ipd/strategies/hmm.py index 8ae2ed811..da76ac959 100644 --- a/axelrod/strategies/hmm.py +++ b/axelrod/ipd/strategies/hmm.py @@ -2,10 +2,10 @@ import numpy.random as random from numpy.random import choice -from axelrod.action import Action -from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists, crossover_lists -from axelrod.player import Player -from axelrod.random_ import random_choice, random_vector +from axelrod.ipd.action import Action +from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists, crossover_lists +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice, random_vector C, D = Action.C, Action.D @@ -90,7 +90,7 @@ def is_well_formed(self) -> bool: return False return True - def __eq__(self, other: Player) -> bool: + def __eq__(self, other: IpdPlayer) -> bool: """Equality of two HMMs""" check = True for attr in [ @@ -120,16 +120,16 @@ def move(self, opponent_action: Action) -> Action: return action -class HMMPlayer(Player): +class HMMPlayer(IpdPlayer): """ Abstract base class for Hidden Markov Model players. Names - - HMM Player: Original name by Marc Harper + - HMM IpdPlayer: Original name by Marc Harper """ - name = "HMM Player" + name = "HMM IpdPlayer" classifier = { "memory_depth": 1, @@ -176,7 +176,7 @@ def is_stochastic(self) -> bool: return True return False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return self.initial_action else: diff --git a/axelrod/strategies/human.py b/axelrod/ipd/strategies/human.py similarity index 97% rename from axelrod/strategies/human.py rename to axelrod/ipd/strategies/human.py index 753194248..8f343b93c 100644 --- a/axelrod/strategies/human.py +++ b/axelrod/ipd/strategies/human.py @@ -1,7 +1,7 @@ from os import linesep -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer from prompt_toolkit import prompt from prompt_toolkit.validation import ValidationError, Validator @@ -39,7 +39,7 @@ def validate(self, document) -> None: raise ValidationError(message="Action must be C or D", cursor_position=0) -class Human(Player): +class Human(IpdPlayer): """ A strategy that prompts for keyboard input rather than deriving its own action. @@ -147,7 +147,7 @@ def _get_human_input(self) -> Action: # pragma: no cover return Action.from_char(action.upper()) - def strategy(self, opponent: Player, input_function=None): + def strategy(self, opponent: IpdPlayer, input_function=None): """ Ordinarily, the strategy prompts for keyboard input rather than deriving its own action. diff --git a/axelrod/strategies/hunter.py b/axelrod/ipd/strategies/hunter.py similarity index 90% rename from axelrod/strategies/hunter.py rename to axelrod/ipd/strategies/hunter.py index fe2b39fa1..c385c6ba5 100644 --- a/axelrod/strategies/hunter.py +++ b/axelrod/ipd/strategies/hunter.py @@ -1,13 +1,13 @@ from typing import List, Optional, Tuple -from axelrod._strategy_utils import detect_cycle -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd._strategy_utils import detect_cycle +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class DefectorHunter(Player): +class DefectorHunter(IpdPlayer): """A player who hunts for defectors. Names: @@ -26,13 +26,13 @@ class DefectorHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) >= 4 and len(opponent.history) == opponent.defections: return D return C -class CooperatorHunter(Player): +class CooperatorHunter(IpdPlayer): """A player who hunts for cooperators. Names: @@ -51,7 +51,7 @@ class CooperatorHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) >= 4 and len(opponent.history) == opponent.cooperations: return D return C @@ -64,7 +64,7 @@ def is_alternator(history: List[Action]) -> bool: return True -class AlternatorHunter(Player): +class AlternatorHunter(IpdPlayer): """A player who hunts for alternators. Names: @@ -87,7 +87,7 @@ def __init__(self) -> None: super().__init__() self.is_alt = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 6: return C if len(self.history) == 6: @@ -98,7 +98,7 @@ def strategy(self, opponent: Player) -> Action: return C -class CycleHunter(Player): +class CycleHunter(IpdPlayer): """Hunts strategies that play cyclically, like any of the Cyclers, Alternator, etc. @@ -122,7 +122,7 @@ def __init__(self) -> None: super().__init__() self.cycle = None # type: Optional[Tuple[Action]] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.cycle: return D cycle = detect_cycle(opponent.history, min_size=3) @@ -143,7 +143,7 @@ class EventualCycleHunter(CycleHunter): name = "Eventual Cycle Hunter" - def strategy(self, opponent: Player) -> None: + def strategy(self, opponent: IpdPlayer) -> None: if len(opponent.history) < 10: return C if len(opponent.history) == opponent.cooperations: @@ -157,7 +157,7 @@ def strategy(self, opponent: Player) -> None: return C -class MathConstantHunter(Player): +class MathConstantHunter(IpdPlayer): """A player who hunts for mathematical constant players. Names: @@ -176,7 +176,7 @@ class MathConstantHunter(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Check whether the number of cooperations in the first and second halves of the history are close. The variance of the uniform distribution (1/4) @@ -206,7 +206,7 @@ def strategy(self, opponent: Player) -> Action: return C -class RandomHunter(Player): +class RandomHunter(IpdPlayer): """A player who hunts for random players. Names: @@ -230,7 +230,7 @@ def __init__(self) -> None: self.countDD = 0 super().__init__() - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ A random player is unpredictable, which means the conditional frequency of cooperation after cooperation, and defection after defections, should diff --git a/axelrod/strategies/inverse.py b/axelrod/ipd/strategies/inverse.py similarity index 84% rename from axelrod/strategies/inverse.py rename to axelrod/ipd/strategies/inverse.py index 092309330..cd9baad8c 100644 --- a/axelrod/strategies/inverse.py +++ b/axelrod/ipd/strategies/inverse.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class Inverse(Player): +class Inverse(IpdPlayer): """A player who defects with a probability that diminishes relative to how long ago the opponent defected. @@ -26,7 +26,7 @@ class Inverse(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """Looks at opponent history to see if they have defected. If so, player defection is inversely proportional to when this occurred. diff --git a/axelrod/strategies/lookerup.py b/axelrod/ipd/strategies/lookerup.py similarity index 98% rename from axelrod/strategies/lookerup.py rename to axelrod/ipd/strategies/lookerup.py index b66d06293..8bd1dcbfa 100644 --- a/axelrod/strategies/lookerup.py +++ b/axelrod/ipd/strategies/lookerup.py @@ -5,9 +5,9 @@ import numpy.random as random from numpy.random import choice -from axelrod.action import Action, actions_to_str, str_to_actions -from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_dictionaries -from axelrod.player import Player +from axelrod.ipd.action import Action, actions_to_str, str_to_actions +from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_dictionaries +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D @@ -227,7 +227,7 @@ def create_lookup_table_keys( } -class LookerUp(Player): +class LookerUp(IpdPlayer): """ This strategy uses a LookupTable to decide its next action. If there is not enough history to use the table, it calls from a list of @@ -361,7 +361,7 @@ def _get_initial_actions(self, initial_actions: tuple) -> tuple: return initial_actions + tuple([C] * initial_actions_shortfall) return initial_actions[:table_depth] - def strategy(self, opponent: Player) -> Reaction: + def strategy(self, opponent: IpdPlayer) -> Reaction: turn_index = len(opponent.history) while turn_index < len(self._initial_actions_pool): return self._initial_actions_pool[turn_index] @@ -573,7 +573,7 @@ def __init__(self) -> None: super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C)) -def get_last_n_plays(player: Player, depth: int) -> tuple: +def get_last_n_plays(player: IpdPlayer, depth: int) -> tuple: """Returns the last N plays of player as a tuple.""" if depth == 0: return () diff --git a/axelrod/strategies/mathematicalconstants.py b/axelrod/ipd/strategies/mathematicalconstants.py similarity index 91% rename from axelrod/strategies/mathematicalconstants.py rename to axelrod/ipd/strategies/mathematicalconstants.py index 8f88d1a2b..7d650669b 100644 --- a/axelrod/strategies/mathematicalconstants.py +++ b/axelrod/ipd/strategies/mathematicalconstants.py @@ -1,12 +1,12 @@ import math -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class CotoDeRatio(Player): +class CotoDeRatio(IpdPlayer): """The player will always aim to bring the ratio of co-operations to defections closer to the ratio as given in a sub class @@ -25,7 +25,7 @@ class CotoDeRatio(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Initially cooperate if len(opponent.history) == 0: return C diff --git a/axelrod/strategies/memoryone.py b/axelrod/ipd/strategies/memoryone.py similarity index 95% rename from axelrod/strategies/memoryone.py rename to axelrod/ipd/strategies/memoryone.py index 8fa4aeae6..2811a5424 100644 --- a/axelrod/strategies/memoryone.py +++ b/axelrod/ipd/strategies/memoryone.py @@ -4,14 +4,14 @@ import warnings from typing import Tuple -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class MemoryOnePlayer(Player): +class MemoryOnePlayer(IpdPlayer): """ Uses a four-vector for strategies based on the last round of play, (P(C|CC), P(C|CD), P(C|DC), P(C|DD)). Win-Stay Lose-Shift is set as @@ -24,7 +24,7 @@ class MemoryOnePlayer(Player): - Memory One: [Nowak1990]_ """ - name = "Generic Memory One Player" + name = "Generic Memory One IpdPlayer" classifier = { "memory_depth": 1, # Memory-one Four-Vector "stochastic": True, @@ -73,7 +73,7 @@ def set_initial_four_vector(self, four_vector): warnings.warn("Memory one player is set to default (1, 0, 0, 1).") self.set_four_vector(four_vector) - if self.name == "Generic Memory One Player": + if self.name == "Generic Memory One IpdPlayer": self.name = "%s: %s" % (self.name, four_vector) def set_four_vector(self, four_vector: Tuple[float, float, float, float]): @@ -86,7 +86,7 @@ def set_four_vector(self, four_vector: Tuple[float, float, float, float]): self._four_vector = dict(zip([(C, C), (C, D), (D, C), (D, D)], four_vector)) self.classifier["stochastic"] = any(0 < x < 1 for x in set(four_vector)) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return self._initial # Determine which probability to use @@ -294,7 +294,7 @@ def __repr__(self) -> str: return "%s: %s" % (self.name, round(self.q, 2)) -class ALLCorALLD(Player): +class ALLCorALLD(IpdPlayer): """This strategy is at the parameter extreme of the ZD strategies (phi = 0). It simply repeats its last move, and so mimics ALLC or ALLD after round one. If the tournament is noisy, there will be long runs of C and D. @@ -319,7 +319,7 @@ class ALLCorALLD(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return random_choice(0.6) return self.history[-1] @@ -335,7 +335,7 @@ class ReactivePlayer(MemoryOnePlayer): - Reactive: [Nowak1989]_ """ - name = "Reactive Player" + name = "Reactive IpdPlayer" def __init__(self, probabilities: Tuple[float, float]) -> None: four_vector = (*probabilities, *probabilities) diff --git a/axelrod/strategies/memorytwo.py b/axelrod/ipd/strategies/memorytwo.py similarity index 95% rename from axelrod/strategies/memorytwo.py rename to axelrod/ipd/strategies/memorytwo.py index 3466256b3..5f3489a0d 100644 --- a/axelrod/strategies/memorytwo.py +++ b/axelrod/ipd/strategies/memorytwo.py @@ -4,9 +4,9 @@ import warnings from typing import Dict, Tuple -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice from .defector import Defector from .titfortat import TitFor2Tats, TitForTat @@ -14,7 +14,7 @@ C, D = Action.C, Action.D -class MemoryTwoPlayer(Player): +class MemoryTwoPlayer(IpdPlayer): """ Uses a sixteen-vector for strategies based on the 16 conditional probabilities P(X | I,J,K,L) where X, I, J, K, L in [C, D] and I, J are the players last @@ -43,7 +43,7 @@ class MemoryTwoPlayer(Player): - Memory Two: [Hilbe2017]_ """ - name = "Generic Memory Two Player" + name = "Generic Memory Two IpdPlayer" classifier = { "memory_depth": 2, "stochastic": False, @@ -76,7 +76,7 @@ def set_initial_sixteen_vector(self, sixteen_vector): warnings.warn("Memory two player is set to default, Cooperator.") self.set_sixteen_vector(sixteen_vector) - if self.name == "Generic Memory Two Player": + if self.name == "Generic Memory Two IpdPlayer": self.name = "%s: %s" % (self.name, sixteen_vector) def set_sixteen_vector(self, sixteen_vector: Tuple): @@ -95,7 +95,7 @@ def set_sixteen_vector(self, sixteen_vector: Tuple): ) # type: Dict[tuple, float] self.classifier["stochastic"] = any(0 < x < 1 for x in set(sixteen_vector)) - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) <= 1: return self._initial # Determine which probability to use @@ -206,7 +206,7 @@ def __init__(self) -> None: super().__init__(sixteen_vector) -class MEM2(Player): +class MEM2(IpdPlayer): """A memory-two player that switches between TFT, TFTT, and ALLD. Note that the reference claims that this is a memory two strategy but in @@ -237,7 +237,7 @@ def __init__(self) -> None: self.shift_counter = 3 self.alld_counter = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Update Histories # Note that this assumes that TFT and TFTT do not use internal counters, # Rather that they examine the actual history of play diff --git a/axelrod/strategies/meta.py b/axelrod/ipd/strategies/meta.py similarity index 97% rename from axelrod/strategies/meta.py rename to axelrod/ipd/strategies/meta.py index 09bebd26b..b3191186f 100644 --- a/axelrod/strategies/meta.py +++ b/axelrod/ipd/strategies/meta.py @@ -3,11 +3,11 @@ import numpy as np from numpy.random import choice -from axelrod.action import Action -from axelrod.classifier import Classifiers -from axelrod.player import Player -from axelrod.strategies import TitForTat -from axelrod.strategy_transformers import NiceTransformer +from axelrod.ipd.action import Action +from axelrod.ipd.classifier import Classifiers +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.strategies import TitForTat +from axelrod.ipd.strategy_transformers import NiceTransformer from ._strategies import all_strategies from .hunter import ( AlternatorHunter, @@ -25,16 +25,16 @@ C, D = Action.C, Action.D -class MetaPlayer(Player): +class MetaPlayer(IpdPlayer): """ A generic player that has its own team of players. Names: - - Meta Player: Original name by Karol Langner + - Meta IpdPlayer: Original name by Karol Langner """ - name = "Meta Player" + name = "Meta IpdPlayer" classifier = { "memory_depth": float("inf"), # Long memory "stochastic": True, @@ -623,7 +623,7 @@ def __init__( loss_value: float = -2, gain_value: float = 1, memory: list = None, - start_strategy: Player = TitForTat, + start_strategy: IpdPlayer = TitForTat, start_strategy_duration: int = 15, ): super().__init__(team=[start_strategy]) @@ -640,7 +640,7 @@ def __init__( self.gloss_values = None def __repr__(self): - return Player.__repr__(self) + return IpdPlayer.__repr__(self) def gain_loss_translate(self): """ diff --git a/axelrod/strategies/mindcontrol.py b/axelrod/ipd/strategies/mindcontrol.py similarity index 87% rename from axelrod/strategies/mindcontrol.py rename to axelrod/ipd/strategies/mindcontrol.py index bdb3e974b..80a68c2a4 100644 --- a/axelrod/strategies/mindcontrol.py +++ b/axelrod/ipd/strategies/mindcontrol.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class MindController(Player): +class MindController(IpdPlayer): """A player that changes the opponents strategy to cooperate. Names @@ -24,7 +24,7 @@ class MindController(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: """ Alters the opponents strategy method to be a lambda function which always returns C. This player will then always return D to take @@ -35,7 +35,7 @@ def strategy(opponent: Player) -> Action: return D -class MindWarper(Player): +class MindWarper(IpdPlayer): """ A player that changes the opponent's strategy but blocks changes to its own. @@ -63,7 +63,7 @@ def __setattr__(self, name: str, val: str): self.__dict__[name] = val @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: opponent.strategy = lambda opponent: C return D @@ -90,6 +90,6 @@ class MindBender(MindWarper): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: opponent.__dict__["strategy"] = lambda opponent: C return D diff --git a/axelrod/strategies/mindreader.py b/axelrod/ipd/strategies/mindreader.py similarity index 91% rename from axelrod/strategies/mindreader.py rename to axelrod/ipd/strategies/mindreader.py index dd6c64269..a6812f493 100644 --- a/axelrod/strategies/mindreader.py +++ b/axelrod/ipd/strategies/mindreader.py @@ -3,14 +3,14 @@ indicated by their classifier). We do not recommend putting a lot of time in to optimising them. """ -from axelrod._strategy_utils import inspect_strategy, look_ahead -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd._strategy_utils import inspect_strategy, look_ahead +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class MindReader(Player): +class MindReader(IpdPlayer): """A player that looks ahead at what the opponent will do and decides what to do. @@ -35,7 +35,7 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return D - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Pretends to play the opponent a number of times before each match. The primary purpose is to look far enough ahead to see if a defect will @@ -103,6 +103,6 @@ def foil_strategy_inspection() -> Action: """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Will read the mind of the opponent and play the opponent's strategy. """ return inspect_strategy(self, opponent) diff --git a/axelrod/strategies/mutual.py b/axelrod/ipd/strategies/mutual.py similarity index 81% rename from axelrod/strategies/mutual.py rename to axelrod/ipd/strategies/mutual.py index d537bbaad..29966ce3e 100644 --- a/axelrod/strategies/mutual.py +++ b/axelrod/ipd/strategies/mutual.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class Desperate(Player): +class Desperate(IpdPlayer): """A player that only cooperates after mutual defection. Names: @@ -23,7 +23,7 @@ class Desperate(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == D and opponent.history[-1] == D: @@ -31,7 +31,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Hopeless(Player): +class Hopeless(IpdPlayer): """A player that only defects after mutual cooperation. Names: @@ -49,7 +49,7 @@ class Hopeless(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == C and opponent.history[-1] == C: @@ -57,7 +57,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Willing(Player): +class Willing(IpdPlayer): """A player that only defects after mutual defection. Names: @@ -75,7 +75,7 @@ class Willing(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return random_choice() if self.history[-1] == D and opponent.history[-1] == D: diff --git a/axelrod/strategies/negation.py b/axelrod/ipd/strategies/negation.py similarity index 77% rename from axelrod/strategies/negation.py rename to axelrod/ipd/strategies/negation.py index cccf218a1..e8e580dda 100644 --- a/axelrod/strategies/negation.py +++ b/axelrod/ipd/strategies/negation.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class Negation(Player): +class Negation(IpdPlayer): """ A player starts by cooperating or defecting randomly if it's their first move, then simply doing the opposite of the opponents last move thereafter. @@ -26,7 +26,7 @@ class Negation(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Random first move if not self.history: return random_choice() diff --git a/axelrod/strategies/oncebitten.py b/axelrod/ipd/strategies/oncebitten.py similarity index 89% rename from axelrod/strategies/oncebitten.py rename to axelrod/ipd/strategies/oncebitten.py index 4703e3c70..3618c047c 100644 --- a/axelrod/strategies/oncebitten.py +++ b/axelrod/ipd/strategies/oncebitten.py @@ -1,12 +1,12 @@ import random -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class OnceBitten(Player): +class OnceBitten(IpdPlayer): """ Cooperates once when the opponent defects, but if they defect twice in a row defaults to forgetful grudger for 10 turns defecting. @@ -33,7 +33,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for mem_length rounds if the opponent ever plays D twice in a row. @@ -54,7 +54,7 @@ def strategy(self, opponent: Player) -> Action: return C -class FoolMeOnce(Player): +class FoolMeOnce(IpdPlayer): """ Forgives one D then retaliates forever on a second D. @@ -75,7 +75,7 @@ class FoolMeOnce(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if not opponent.history: return C if opponent.defections > 1: @@ -83,11 +83,11 @@ def strategy(opponent: Player) -> Action: return C -class ForgetfulFoolMeOnce(Player): +class ForgetfulFoolMeOnce(IpdPlayer): """ Forgives one D then retaliates forever on a second D. Sometimes randomly forgets the defection count, and so keeps a secondary count separate from - the standard count in Player. + the standard count in IpdPlayer. Names: @@ -117,7 +117,7 @@ def __init__(self, forget_probability: float = 0.05) -> None: self._initial = C self.forget_probability = forget_probability - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: r = random.random() if not opponent.history: return self._initial diff --git a/axelrod/strategies/prober.py b/axelrod/ipd/strategies/prober.py similarity index 92% rename from axelrod/strategies/prober.py rename to axelrod/ipd/strategies/prober.py index 96940a1fc..5a39c722b 100644 --- a/axelrod/strategies/prober.py +++ b/axelrod/ipd/strategies/prober.py @@ -1,9 +1,9 @@ import random from typing import List -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice Vector = List[float] @@ -11,7 +11,7 @@ C, D = Action.C, Action.D -class CollectiveStrategy(Player): +class CollectiveStrategy(IpdPlayer): """Defined in [Li2009]_. 'It always cooperates in the first move and defects in the second move. If the opponent also cooperates in the first move and defects in the second move, CS will cooperate until the opponent defects. @@ -35,7 +35,7 @@ class CollectiveStrategy(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return C @@ -48,7 +48,7 @@ def strategy(self, opponent: Player) -> Action: return D -class Detective(Player): +class Detective(IpdPlayer): """ Starts with C, D, C, C, or with the given sequence of actions. If the opponent defects at least once in the first fixed rounds, @@ -77,7 +77,7 @@ def __init__(self, initial_actions: List[Action] = None) -> None: else: self.initial_actions = initial_actions - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: hist_size = len(self.history) init_size = len(self.initial_actions) if hist_size < init_size: @@ -87,7 +87,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] # TFT -class Prober(Player): +class Prober(IpdPlayer): """ Plays D, C, C initially. Defects forever if opponent cooperated in moves 2 and 3. Otherwise plays TFT. @@ -108,7 +108,7 @@ class Prober(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -124,7 +124,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober2(Player): +class Prober2(IpdPlayer): """ Plays D, C, C initially. Cooperates forever if opponent played D then C in moves 2 and 3. Otherwise plays TFT. @@ -145,7 +145,7 @@ class Prober2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -161,7 +161,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober3(Player): +class Prober3(IpdPlayer): """ Plays D, C initially. Defects forever if opponent played C in moves 2. Otherwise plays TFT. @@ -182,7 +182,7 @@ class Prober3(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -196,7 +196,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class Prober4(Player): +class Prober4(IpdPlayer): """ Plays C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D initially. Counts retaliating and provocative defections of the opponent. @@ -248,7 +248,7 @@ def __init__(self) -> None: self.unjust_Ds = 0 self.turned_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return self.init_sequence[0] turn = len(self.history) @@ -270,7 +270,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1] == D else C -class HardProber(Player): +class HardProber(IpdPlayer): """ Plays D, D, C, C initially. Defects forever if opponent cooperated in moves 2 and 3. Otherwise plays TFT. @@ -291,7 +291,7 @@ class HardProber(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turn = len(self.history) if turn == 0: return D @@ -309,7 +309,7 @@ def strategy(self, opponent: Player) -> Action: return D if opponent.history[-1:] == [D] else C -class NaiveProber(Player): +class NaiveProber(IpdPlayer): """ Like tit-for-tat, but it occasionally defects with a small probability. @@ -341,7 +341,7 @@ def __init__(self, p: float = 0.1) -> None: if (self.p == 0) or (self.p == 1): self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if len(self.history) == 0: return C @@ -385,7 +385,7 @@ def __init__(self, p: float = 0.1) -> None: super().__init__(p) self.probing = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if len(self.history) == 0: return C diff --git a/axelrod/strategies/punisher.py b/axelrod/ipd/strategies/punisher.py similarity index 91% rename from axelrod/strategies/punisher.py rename to axelrod/ipd/strategies/punisher.py index dd8145576..1dff4b5fd 100644 --- a/axelrod/strategies/punisher.py +++ b/axelrod/ipd/strategies/punisher.py @@ -1,12 +1,12 @@ from typing import List -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Punisher(Player): +class Punisher(IpdPlayer): """ A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with @@ -38,7 +38,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing D if the opponent ever @@ -61,7 +61,7 @@ def strategy(self, opponent: Player) -> Action: return C -class InversePunisher(Player): +class InversePunisher(IpdPlayer): """ An inverted version of Punisher. The player starts by cooperating however will defect if at any point the opponent has defected, and forgets after @@ -90,7 +90,7 @@ def __init__(self) -> None: self.grudged = False self.grudge_memory = 1 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing C if the opponent ever plays @@ -113,7 +113,7 @@ def strategy(self, opponent: Player) -> Action: return C -class LevelPunisher(Player): +class LevelPunisher(IpdPlayer): """ A player starts by cooperating however, after 10 rounds will defect if at any point the number of defections @@ -135,7 +135,7 @@ class LevelPunisher(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) < 10: return C elif (len(opponent.history) - opponent.cooperations) / len( @@ -146,7 +146,7 @@ def strategy(self, opponent: Player) -> Action: return C -class TrickyLevelPunisher(Player): +class TrickyLevelPunisher(IpdPlayer): """ A player starts by cooperating however, after 10, 50 and 100 rounds will defect if at any point the percentage of defections @@ -168,7 +168,7 @@ class TrickyLevelPunisher(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C if len(opponent.history) < 10: diff --git a/axelrod/strategies/qlearner.py b/axelrod/ipd/strategies/qlearner.py similarity index 92% rename from axelrod/strategies/qlearner.py rename to axelrod/ipd/strategies/qlearner.py index dd308feb6..de233b355 100644 --- a/axelrod/strategies/qlearner.py +++ b/axelrod/ipd/strategies/qlearner.py @@ -2,16 +2,16 @@ from collections import OrderedDict from typing import Dict, List, Union -from axelrod.action import Action, actions_to_str -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action, actions_to_str +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice Score = Union[int, float] C, D = Action.C, Action.D -class RiskyQLearner(Player): +class RiskyQLearner(IpdPlayer): """A player who learns the best strategies through the q-learning algorithm. @@ -58,7 +58,7 @@ def receive_match_attributes(self): (R, P, S, T) = self.match_attributes["game"].RPST() self.payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """Runs a qlearn algorithm while the tournament is running.""" if len(self.history) == 0: self.prev_action = random_choice() @@ -84,7 +84,7 @@ def select_action(self, state: str) -> Action: return max(self.Qs[state], key=lambda x: self.Qs[state][x]) return random_choice() - def find_state(self, opponent: Player) -> str: + def find_state(self, opponent: IpdPlayer) -> str: """ Finds the my_state (the opponents last n moves + its previous proportion of playing C) as a hashable state @@ -102,7 +102,7 @@ def perform_q_learning(self, prev_state: str, state: str, action: Action, reward ] + self.learning_rate * (reward + self.discount_rate * self.Vs[state]) self.Vs[prev_state] = max(self.Qs[prev_state].values()) - def find_reward(self, opponent: Player) -> Dict[Action, Dict[Action, Score]]: + def find_reward(self, opponent: IpdPlayer) -> Dict[Action, Dict[Action, Score]]: """ Finds the reward gained on the last iteration """ diff --git a/axelrod/strategies/rand.py b/axelrod/ipd/strategies/rand.py similarity index 82% rename from axelrod/strategies/rand.py rename to axelrod/ipd/strategies/rand.py index eb259e37c..3da79d28f 100644 --- a/axelrod/strategies/rand.py +++ b/axelrod/ipd/strategies/rand.py @@ -1,9 +1,9 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice -class Random(Player): +class Random(IpdPlayer): """A player who randomly chooses between cooperating and defecting. This strategy came 15th in Axelrod's original tournament. @@ -42,5 +42,5 @@ def __init__(self, p: float = 0.5) -> None: if p in [0, 1]: self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: return random_choice(self.p) diff --git a/axelrod/strategies/resurrection.py b/axelrod/ipd/strategies/resurrection.py similarity index 87% rename from axelrod/strategies/resurrection.py rename to axelrod/ipd/strategies/resurrection.py index de266320a..3e3e1f068 100644 --- a/axelrod/strategies/resurrection.py +++ b/axelrod/ipd/strategies/resurrection.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Resurrection(Player): +class Resurrection(IpdPlayer): """ A player starts by cooperating and defects if the number of rounds played by the player is greater than five and the last five rounds @@ -29,7 +29,7 @@ class Resurrection(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: @@ -38,7 +38,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class DoubleResurrection(Player): +class DoubleResurrection(IpdPlayer): """ A player starts by cooperating and defects if the number of rounds played by the player is greater than five and the last five rounds @@ -62,7 +62,7 @@ class DoubleResurrection(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: diff --git a/axelrod/strategies/retaliate.py b/axelrod/ipd/strategies/retaliate.py similarity index 94% rename from axelrod/strategies/retaliate.py rename to axelrod/ipd/strategies/retaliate.py index be4b49648..3e1dbf445 100644 --- a/axelrod/strategies/retaliate.py +++ b/axelrod/ipd/strategies/retaliate.py @@ -1,12 +1,12 @@ from collections import defaultdict -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class Retaliate(Player): +class Retaliate(IpdPlayer): """ A player starts by cooperating but will retaliate once the opponent has won more than 10 percent times the number of defections the player has. @@ -29,14 +29,14 @@ class Retaliate(Player): def __init__(self, retaliation_threshold: float = 0.1) -> None: """ - Uses the basic init from the Player class, but also set the name to + Uses the basic init from the IpdPlayer class, but also set the name to include the retaliation setting. """ super().__init__() self.retaliation_threshold = retaliation_threshold self.play_counts = defaultdict(int) # type: defaultdict - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ If the opponent has played D to my C more often than x% of the time that I've done the same to him, play D. Otherwise, play C. @@ -82,7 +82,7 @@ def __init__(self, retaliation_threshold: float = 0.05) -> None: super().__init__(retaliation_threshold=retaliation_threshold) -class LimitedRetaliate(Player): +class LimitedRetaliate(IpdPlayer): """ A player that co-operates unless the opponent defects and wins. It will then retaliate by defecting. It stops when either, it has beaten @@ -125,7 +125,7 @@ def __init__( self.retaliation_limit = retaliation_limit self.play_counts = defaultdict(int) # type: defaultdict - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """ If the opponent has played D to my C more often than x% of the time that I've done the same to him, retaliate by playing D but stop doing diff --git a/axelrod/strategies/revised_downing.py b/axelrod/ipd/strategies/revised_downing.py similarity index 92% rename from axelrod/strategies/revised_downing.py rename to axelrod/ipd/strategies/revised_downing.py index 530905c1b..f7d382aad 100644 --- a/axelrod/strategies/revised_downing.py +++ b/axelrod/ipd/strategies/revised_downing.py @@ -2,12 +2,12 @@ Revised Downing implemented from the Fortran source code for the second of Axelrod's tournaments. """ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class RevisedDowning(Player): +class RevisedDowning(IpdPlayer): """ Strategy submitted to Axelrod's second tournament by Leslie Downing. (K59R). @@ -44,7 +44,7 @@ def __init__(self) -> None: self.total_C = 0 # note the same as self.cooperations self.total_D = 0 # note the same as self.defections - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: round_number = len(self.history) + 1 if round_number == 1: diff --git a/axelrod/strategies/selfsteem.py b/axelrod/ipd/strategies/selfsteem.py similarity index 87% rename from axelrod/strategies/selfsteem.py rename to axelrod/ipd/strategies/selfsteem.py index 59b43657b..d5b5f8db8 100644 --- a/axelrod/strategies/selfsteem.py +++ b/axelrod/ipd/strategies/selfsteem.py @@ -1,13 +1,13 @@ from math import pi, sin -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class SelfSteem(Player): +class SelfSteem(IpdPlayer): """ This strategy is based on the feeling with the same name. It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies @@ -37,7 +37,7 @@ class SelfSteem(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: turns_number = len(self.history) sine_value = sin(2 * pi * turns_number / 10) diff --git a/axelrod/strategies/sequence_player.py b/axelrod/ipd/strategies/sequence_player.py similarity index 91% rename from axelrod/strategies/sequence_player.py rename to axelrod/ipd/strategies/sequence_player.py index d04d30830..cbff54f0e 100644 --- a/axelrod/strategies/sequence_player.py +++ b/axelrod/ipd/strategies/sequence_player.py @@ -1,20 +1,20 @@ from types import FunctionType from typing import Tuple -from axelrod._strategy_utils import thue_morse_generator -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd._strategy_utils import thue_morse_generator +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class SequencePlayer(Player): +class SequencePlayer(IpdPlayer): """Abstract base class for players that use a generated sequence to determine their plays. Names: - - Sequence Player: Original name by Marc Harper + - Sequence IpdPlayer: Original name by Marc Harper """ def __init__( @@ -33,7 +33,7 @@ def meta_strategy(value: int) -> Action: else: return C - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Iterate through the sequence and apply the meta strategy for s in self.sequence_generator: return self.meta_strategy(s) diff --git a/axelrod/strategies/shortmem.py b/axelrod/ipd/strategies/shortmem.py similarity index 89% rename from axelrod/strategies/shortmem.py rename to axelrod/ipd/strategies/shortmem.py index 23bf7c523..38180286d 100644 --- a/axelrod/strategies/shortmem.py +++ b/axelrod/ipd/strategies/shortmem.py @@ -1,10 +1,10 @@ -from axelrod import Player -from axelrod.action import Action +from axelrod import IpdPlayer +from axelrod.ipd.action import Action C, D = Action.C, Action.D -class ShortMem(Player): +class ShortMem(IpdPlayer): """ A player starts by always cooperating for the first 10 moves. @@ -32,7 +32,7 @@ class ShortMem(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: if len(opponent.history) <= 10: return C diff --git a/axelrod/strategies/stalker.py b/axelrod/ipd/strategies/stalker.py similarity index 87% rename from axelrod/strategies/stalker.py rename to axelrod/ipd/strategies/stalker.py index bc533cfa3..74357006a 100644 --- a/axelrod/strategies/stalker.py +++ b/axelrod/ipd/strategies/stalker.py @@ -1,13 +1,13 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice -from axelrod.strategy_transformers import FinalTransformer +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice +from axelrod.ipd.strategy_transformers import FinalTransformer C, D = Action.C, Action.D @FinalTransformer((D,), name_prefix=None) # End with defection -class Stalker(Player): +class Stalker(IpdPlayer): """ This is a strategy which is only influenced by the score. @@ -49,14 +49,14 @@ def receive_match_attributes(self): self.wish_score = (R + P) / 2 self.current_score = 0 - def score_last_round(self, opponent: Player): + def score_last_round(self, opponent: IpdPlayer): # Load the default game if not supplied by a tournament. game = self.match_attributes["game"] last_round = (self.history[-1], opponent.history[-1]) scores = game.score(last_round) self.current_score += scores[0] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C diff --git a/axelrod/strategies/titfortat.py b/axelrod/ipd/strategies/titfortat.py similarity index 92% rename from axelrod/strategies/titfortat.py rename to axelrod/ipd/strategies/titfortat.py index cb3b79245..c96be3c11 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/ipd/strategies/titfortat.py @@ -1,12 +1,12 @@ -from axelrod.action import Action, actions_to_str -from axelrod.player import Player -from axelrod.random_ import random_choice -from axelrod.strategy_transformers import FinalTransformer, TrackHistoryTransformer +from axelrod.ipd.action import Action, actions_to_str +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice +from axelrod.ipd.strategy_transformers import FinalTransformer, TrackHistoryTransformer C, D = Action.C, Action.D -class TitForTat(Player): +class TitForTat(IpdPlayer): """ A player starts by cooperating and then mimics the previous action of the opponent. @@ -36,7 +36,7 @@ class TitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" # First move if not self.history: @@ -47,7 +47,7 @@ def strategy(self, opponent: Player) -> Action: return C -class TitFor2Tats(Player): +class TitFor2Tats(IpdPlayer): """A player starts by cooperating and then defects only after two defects by opponent. @@ -73,11 +73,11 @@ class TitFor2Tats(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if opponent.history[-2:] == [D, D] else C -class TwoTitsForTat(Player): +class TwoTitsForTat(IpdPlayer): """A player starts by cooperating and replies to each defect by two defections. @@ -98,11 +98,11 @@ class TwoTitsForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if D in opponent.history[-2:] else C -class DynamicTwoTitsForTat(Player): +class DynamicTwoTitsForTat(IpdPlayer): """ A player starts by cooperating and then punishes its opponent's defections with defections, but with a dynamic bias towards cooperating @@ -139,7 +139,7 @@ def strategy(opponent): return C -class Bully(Player): +class Bully(IpdPlayer): """A player that behaves opposite to Tit For Tat, including first move. Starts by defecting and then does the opposite of opponent's previous move. @@ -164,11 +164,11 @@ class Bully(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C if opponent.history[-1:] == [D] else D -class SneakyTitForTat(Player): +class SneakyTitForTat(IpdPlayer): """Tries defecting once and repents if punished. Names: @@ -187,7 +187,7 @@ class SneakyTitForTat(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) < 2: return C if D not in opponent.history: @@ -197,7 +197,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class SuspiciousTitForTat(Player): +class SuspiciousTitForTat(IpdPlayer): """A variant of Tit For Tat that starts off with a defection. Names: @@ -218,11 +218,11 @@ class SuspiciousTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return C if opponent.history[-1:] == [C] else D -class AntiTitForTat(Player): +class AntiTitForTat(IpdPlayer): """A strategy that plays the opposite of the opponents previous move. This is similar to Bully, except that the first move is cooperation. @@ -244,11 +244,11 @@ class AntiTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: return D if opponent.history[-1:] == [C] else C -class HardTitForTat(Player): +class HardTitForTat(IpdPlayer): """A variant of Tit For Tat that uses a longer history for retaliation. Names: @@ -268,7 +268,7 @@ class HardTitForTat(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not opponent.history: return C @@ -279,7 +279,7 @@ def strategy(opponent: Player) -> Action: return C -class HardTitFor2Tats(Player): +class HardTitFor2Tats(IpdPlayer): """A variant of Tit For Two Tats that uses a longer history for retaliation. @@ -300,7 +300,7 @@ class HardTitFor2Tats(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: # Cooperate on the first move if not opponent.history: return C @@ -312,7 +312,7 @@ def strategy(opponent: Player) -> Action: return C -class OmegaTFT(Player): +class OmegaTFT(IpdPlayer): """OmegaTFT modifies Tit For Tat in two ways: - checks for deadlock loops of alternating rounds of (C, D) and (D, C), and attempting to break them @@ -343,7 +343,7 @@ def __init__( self.randomness_counter = 0 self.deadlock_counter = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Cooperate on the first move if not self.history: return C @@ -384,7 +384,7 @@ def strategy(self, opponent: Player) -> Action: return move -class OriginalGradual(Player): +class OriginalGradual(IpdPlayer): """ A player that punishes defections with a growing number of defections but after punishing for `punishment_limit` number of times enters a calming @@ -423,7 +423,7 @@ def __init__(self) -> None: self.punishment_count = 0 self.punishment_limit = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if self.calming: self.calming = False @@ -448,7 +448,7 @@ def strategy(self, opponent: Player) -> Action: return C -class Gradual(Player): +class Gradual(IpdPlayer): """ Similar to OriginalGradual, this is a player that punishes defections with a growing number of defections but after punishing for `punishment_limit` @@ -489,7 +489,7 @@ def __init__(self) -> None: self.calm_count = 0 self.punish_count = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(self.history) == 0: return C @@ -510,7 +510,7 @@ def strategy(self, opponent: Player) -> Action: @TrackHistoryTransformer(name_prefix=None) -class ContriteTitForTat(Player): +class ContriteTitForTat(IpdPlayer): """ A player that corresponds to Tit For Tat if there is no noise. In the case of a noisy match: if the opponent defects as a result of a noisy defection @@ -538,7 +538,7 @@ def __init__(self): self.contrite = False self._recorded_history = [] - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not opponent.history: return C @@ -556,7 +556,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class AdaptiveTitForTat(Player): +class AdaptiveTitForTat(IpdPlayer): """ATFT - Adaptive Tit For Tat (Basic Model) Algorithm @@ -606,7 +606,7 @@ def __init__(self, rate: float = 0.5) -> None: self.rate = rate self.world = rate - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if len(opponent.history) == 0: return C @@ -622,7 +622,7 @@ def strategy(self, opponent: Player) -> Action: return D -class SpitefulTitForTat(Player): +class SpitefulTitForTat(IpdPlayer): """ A player starts by cooperating and then mimics the previous action of the opponent until opponent defects twice in a row, at which point player @@ -648,7 +648,7 @@ def __init__(self) -> None: super().__init__() self.retaliating = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # First move if not self.history: return C @@ -665,7 +665,7 @@ def strategy(self, opponent: Player) -> Action: return C -class SlowTitForTwoTats2(Player): +class SlowTitForTwoTats2(IpdPlayer): """ A player plays C twice, then if the opponent plays the same move twice, plays that move, otherwise plays previous move. @@ -686,7 +686,7 @@ class SlowTitForTwoTats2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # Start with two cooperations if len(self.history) < 2: @@ -701,7 +701,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class Alexei(Player): +class Alexei(IpdPlayer): """ Plays similar to Tit-for-Tat, but always defect on last turn. @@ -721,7 +721,7 @@ class Alexei(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if opponent.history[-1] == D: @@ -730,7 +730,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class EugineNier(Player): +class EugineNier(IpdPlayer): """ Plays similar to Tit-for-Tat, but with two conditions: 1) Always Defect on Last Move @@ -756,7 +756,7 @@ def __init__(self): super().__init__() self.is_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if not (self.is_defector) and opponent.defections >= 5: @@ -766,7 +766,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class NTitsForMTats(Player): +class NTitsForMTats(IpdPlayer): """ A parameterizable Tit-for-Tat, The arguments are: @@ -812,7 +812,7 @@ def __init__(self, N: int = 3, M: int = 2) -> None: self.classifier["memory_depth"] = max([M, N]) self.retaliate_count = 0 - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: # if opponent defected consecutively M times, start the retaliation if not self.M or opponent.history[-self.M :].count(D) == self.M: self.retaliate_count = self.N @@ -823,7 +823,7 @@ def strategy(self, opponent: Player) -> Action: @FinalTransformer((D,), name_prefix=None) -class Michaelos(Player): +class Michaelos(IpdPlayer): """ Plays similar to Tit-for-Tat with two exceptions: 1) Defect on last turn. @@ -851,7 +851,7 @@ def __init__(self): super().__init__() self.is_defector = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: if not self.history: return C if self.is_defector: @@ -867,7 +867,7 @@ def strategy(self, opponent: Player) -> Action: return opponent.history[-1] -class RandomTitForTat(Player): +class RandomTitForTat(IpdPlayer): """ A player starts by cooperating and then follows by copying its opponent (tit for tat style). From then on the player @@ -904,7 +904,7 @@ def __init__(self, p: float = 0.5) -> None: if p in [0, 1]: self.classifier["stochastic"] = False - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: """This is the actual strategy""" if not self.history: return C diff --git a/axelrod/strategies/verybad.py b/axelrod/ipd/strategies/verybad.py similarity index 88% rename from axelrod/strategies/verybad.py rename to axelrod/ipd/strategies/verybad.py index 7a50b8fe5..ecfb5bf72 100644 --- a/axelrod/strategies/verybad.py +++ b/axelrod/ipd/strategies/verybad.py @@ -1,10 +1,10 @@ -from axelrod.action import Action -from axelrod.player import Player +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer C, D = Action.C, Action.D -class VeryBad(Player): +class VeryBad(IpdPlayer): """ It cooperates in the first three rounds, and uses probability (it implements a memory, which stores the opponent’s moves) to decide for @@ -32,7 +32,7 @@ class VeryBad(Player): } @staticmethod - def strategy(opponent: Player) -> Action: + def strategy(opponent: IpdPlayer) -> Action: total_moves = len(opponent.history) if total_moves < 3: diff --git a/axelrod/strategies/worse_and_worse.py b/axelrod/ipd/strategies/worse_and_worse.py similarity index 86% rename from axelrod/strategies/worse_and_worse.py rename to axelrod/ipd/strategies/worse_and_worse.py index cd80ca822..542a2edb2 100644 --- a/axelrod/strategies/worse_and_worse.py +++ b/axelrod/ipd/strategies/worse_and_worse.py @@ -1,11 +1,11 @@ -from axelrod.action import Action -from axelrod.player import Player -from axelrod.random_ import random_choice +from axelrod.ipd.action import Action +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.random_ import random_choice C, D = Action.C, Action.D -class WorseAndWorse(Player): +class WorseAndWorse(IpdPlayer): """ Defects with probability of 'current turn / 1000'. Therefore it is more and more likely to defect as the round goes on. @@ -28,13 +28,13 @@ class WorseAndWorse(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 probability = 1 - current_round / 1000 return random_choice(probability) -class KnowledgeableWorseAndWorse(Player): +class KnowledgeableWorseAndWorse(IpdPlayer): """ This strategy is based on 'Worse And Worse' but will defect with probability of 'current turn / total no. of turns'. @@ -54,14 +54,14 @@ class KnowledgeableWorseAndWorse(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 expected_length = self.match_attributes["length"] probability = 1 - current_round / expected_length return random_choice(probability) -class WorseAndWorse2(Player): +class WorseAndWorse2(IpdPlayer): """ Plays as tit for tat during the first 20 moves. Then defects with probability (current turn - 20) / current turn. @@ -82,7 +82,7 @@ class WorseAndWorse2(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: @@ -94,7 +94,7 @@ def strategy(self, opponent: Player) -> Action: return random_choice(probability) -class WorseAndWorse3(Player): +class WorseAndWorse3(IpdPlayer): """ Cooperates in the first turn. Then defects with probability no. of opponent defects / (current turn - 1). @@ -116,7 +116,7 @@ class WorseAndWorse3(Player): "manipulates_state": False, } - def strategy(self, opponent: Player) -> Action: + def strategy(self, opponent: IpdPlayer) -> Action: current_round = len(self.history) + 1 if current_round == 1: diff --git a/axelrod/strategies/zero_determinant.py b/axelrod/ipd/strategies/zero_determinant.py similarity index 99% rename from axelrod/strategies/zero_determinant.py rename to axelrod/ipd/strategies/zero_determinant.py index aaacfb130..f7287f3d9 100644 --- a/axelrod/strategies/zero_determinant.py +++ b/axelrod/ipd/strategies/zero_determinant.py @@ -1,4 +1,4 @@ -from axelrod.action import Action +from axelrod.ipd.action import Action from .memoryone import MemoryOnePlayer diff --git a/axelrod/strategy_transformers.py b/axelrod/ipd/strategy_transformers.py similarity index 95% rename from axelrod/strategy_transformers.py rename to axelrod/ipd/strategy_transformers.py index 6d50d77c2..3f8abe8f0 100644 --- a/axelrod/strategy_transformers.py +++ b/axelrod/ipd/strategy_transformers.py @@ -14,9 +14,9 @@ from numpy.random import choice -from axelrod.strategies.sequence_player import SequencePlayer +from axelrod.ipd.strategies.sequence_player import SequencePlayer from .action import Action -from .player import Player +from .player import IpdPlayer from .random_ import random_choice C, D = Action.C, Action.D @@ -69,8 +69,8 @@ def __call__(self, PlayerClass): """ Parameters ---------- - PlayerClass: A subclass of axelrod.Player, e.g. Cooperator - The Player Class to modify + PlayerClass: A subclass of axelrodPlayer, e.g. Cooperator + The IpdPlayer Class to modify Returns ------- @@ -96,7 +96,7 @@ def __call__(self, PlayerClass): # with `strategy_wrapper` def strategy(self, opponent): if strategy_wrapper == dual_wrapper: - # dual_wrapper figures out strategy as if the Player had + # dual_wrapper figures out strategy as if the IpdPlayer had # played the opposite actions of its current history. self._history = self.history.flip_plays() @@ -107,7 +107,7 @@ def strategy(self, opponent): if strategy_wrapper == dual_wrapper: # After dual_wrapper calls the strategy, it returns - # the Player to its original state. + # the IpdPlayer to its original state. self._history = self.history.flip_plays() # Apply the wrapper @@ -120,9 +120,9 @@ def strategy(self, opponent): name = PlayerClass.name name_prefix = self.name_prefix if name_prefix: - # Modify the Player name (class variable inherited from Player) + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) new_class_name = "".join([name_prefix, PlayerClass.__name__]) - # Modify the Player name (class variable inherited from Player) + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) name = " ".join([name_prefix, PlayerClass.name]) original_classifier = copy.deepcopy(PlayerClass.classifier) # Copy @@ -199,7 +199,7 @@ def reduce_for_decorated_class(self_): return Decorator -def player_can_be_pickled(player: Player) -> bool: +def player_can_be_pickled(player: IpdPlayer) -> bool: """ Returns True if pickle.dump(player) does not raise pickle.PicklingError. """ @@ -246,7 +246,7 @@ class StrategyReBuilder(object): that could not normally be pickled. """ - def __call__(self, decorators: list, import_name: str, module_name: str) -> Player: + def __call__(self, decorators: list, import_name: str, module_name: str) -> IpdPlayer: module_ = import_module(module_name) import_class = getattr(module_, import_name) @@ -281,11 +281,11 @@ def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs) Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: an axelrod.Action, C or D The proposed action by the wrapped strategy - proposed_action = Player.strategy(...) + proposed_action = IpdPlayer.strategy(...) args, kwargs: Any additional arguments that you need. @@ -310,7 +310,7 @@ def flip_wrapper(player, opponent, action): FlipTransformer = StrategyTransformerFactory(flip_wrapper, name_prefix="Flipped") -def dual_wrapper(player, opponent: Player, proposed_action: Action) -> Action: +def dual_wrapper(player, opponent: IpdPlayer, proposed_action: Action) -> Action: """Wraps the players strategy function to produce the Dual. The Dual of a strategy will return the exact opposite set of moves to the @@ -321,8 +321,8 @@ def dual_wrapper(player, opponent: Player, proposed_action: Action) -> Action: Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: axelrod.Action, C or D The proposed action by the wrapped strategy @@ -588,8 +588,8 @@ def joss_ann_wrapper(player, opponent, proposed_action, probability): Parameters ---------- - player: Player object or subclass (self) - opponent: Player object or subclass + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass proposed_action: axelrod.Action, C or D The proposed action by the wrapped strategy probability: tuple diff --git a/axelrod/tests/integration/__init__.py b/axelrod/ipd/tests/__init__.py similarity index 100% rename from axelrod/tests/integration/__init__.py rename to axelrod/ipd/tests/__init__.py diff --git a/axelrod/tests/strategies/__init__.py b/axelrod/ipd/tests/integration/__init__.py similarity index 100% rename from axelrod/tests/strategies/__init__.py rename to axelrod/ipd/tests/integration/__init__.py diff --git a/axelrod/tests/integration/test_filtering.py b/axelrod/ipd/tests/integration/test_filtering.py similarity index 98% rename from axelrod/tests/integration/test_filtering.py rename to axelrod/ipd/tests/integration/test_filtering.py index bce495b76..da6a0f24c 100644 --- a/axelrod/tests/integration/test_filtering.py +++ b/axelrod/ipd/tests/integration/test_filtering.py @@ -2,7 +2,7 @@ import warnings import axelrod as axl -from axelrod.tests.property import strategy_lists +from axelrod.ipd.tests.property import strategy_lists from hypothesis import example, given, settings from hypothesis.strategies import integers diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/ipd/tests/integration/test_matches.py similarity index 87% rename from axelrod/tests/integration/test_matches.py rename to axelrod/ipd/tests/integration/test_matches.py index a627fe885..44251d233 100644 --- a/axelrod/tests/integration/test_matches.py +++ b/axelrod/ipd/tests/integration/test_matches.py @@ -2,7 +2,7 @@ import unittest import axelrod as axl -from axelrod.tests.property import strategy_lists +from axelrod.ipd.tests.property import strategy_lists from hypothesis import given, settings from hypothesis.strategies import integers @@ -29,7 +29,7 @@ def test_outcome_repeats(self, strategies, turns): """A test that if we repeat 3 matches with deterministic and well behaved strategies then we get the same result""" players = [s() for s in strategies] - matches = [axl.Match(players, turns) for _ in range(3)] + matches = [axl.IpdMatch(players, turns) for _ in range(3)] self.assertEqual(matches[0].play(), matches[1].play()) self.assertEqual(matches[1].play(), matches[2].play()) @@ -48,7 +48,7 @@ def test_outcome_repeats_stochastic(self, strategies, turns, seed): for _ in range(3): axl.seed(seed) players = [s() for s in strategies] - results.append(axl.Match(players, turns).play()) + results.append(axl.IpdMatch(players, turns).play()) self.assertEqual(results[0], results[1]) self.assertEqual(results[1], results[2]) @@ -61,11 +61,11 @@ def test_matches_with_det_player_for_stochastic_classes(self): p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) p3 = axl.MemoryOnePlayer(four_vector=(1, 1, 1, 0)) - m = axl.Match((p1, p2), turns=3) + m = axl.IpdMatch((p1, p2), turns=3) self.assertEqual(m.play(), [(C, C), (D, C), (D, D)]) - m = axl.Match((p2, p3), turns=3) + m = axl.IpdMatch((p2, p3), turns=3) self.assertEqual(m.play(), [(C, C), (C, C), (C, C)]) - m = axl.Match((p1, p3), turns=3) + m = axl.IpdMatch((p1, p3), turns=3) self.assertEqual(m.play(), [(C, C), (D, C), (D, C)]) diff --git a/axelrod/tests/integration/test_names.py b/axelrod/ipd/tests/integration/test_names.py similarity index 100% rename from axelrod/tests/integration/test_names.py rename to axelrod/ipd/tests/integration/test_names.py diff --git a/axelrod/tests/integration/test_sample_tournaments.py b/axelrod/ipd/tests/integration/test_sample_tournaments.py similarity index 96% rename from axelrod/tests/integration/test_sample_tournaments.py rename to axelrod/ipd/tests/integration/test_sample_tournaments.py index e98aa9ad2..c8b08e2e8 100644 --- a/axelrod/tests/integration/test_sample_tournaments.py +++ b/axelrod/ipd/tests/integration/test_sample_tournaments.py @@ -8,17 +8,17 @@ class TestSampleTournaments(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() @classmethod def get_test_outcome(cls, outcome, turns=10): # Extract the name of players from the outcome tuples, - # and initiate the players by getting the classes from axelrod. + # and initiate the players by getting the classes from axelrod.ipd. names = [out[0] for out in outcome] players = [getattr(axl, n)() for n in names] # Play the tournament and build the actual outcome tuples. - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=players, game=cls.game, turns=turns, repetitions=1 ) results = tournament.play(progress_bar=False) diff --git a/axelrod/tests/integration/test_tournament.py b/axelrod/ipd/tests/integration/test_tournament.py similarity index 85% rename from axelrod/tests/integration/test_tournament.py rename to axelrod/ipd/tests/integration/test_tournament.py index 2ab59c974..8b46c3546 100644 --- a/axelrod/tests/integration/test_tournament.py +++ b/axelrod/ipd/tests/integration/test_tournament.py @@ -4,9 +4,9 @@ import pathlib import axelrod as axl -from axelrod.load_data_ import axl_filename -from axelrod.strategy_transformers import FinalTransformer -from axelrod.tests.property import tournaments +from axelrod.ipd.load_data_ import axl_filename +from axelrod.ipd.strategy_transformers import FinalTransformer +from axelrod.ipd.tests.property import tournaments from hypothesis import given, settings @@ -14,7 +14,7 @@ class TestTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [ axl.Cooperator(), axl.TitForTat(), @@ -50,14 +50,14 @@ def setUpClass(cls): def test_big_tournaments(self, tournament): """A test to check that tournament runs with a sample of non-cheating strategies.""" - path = pathlib.Path("test_outputs/test_tournament.csv") + path = pathlib.Path("../test_outputs/test_tournament.csv") filename = axl_filename(path) self.assertIsNone( tournament.play(progress_bar=False, filename=filename, build_results=False) ) def test_serial_play(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -69,7 +69,7 @@ def test_serial_play(self): self.assertEqual(actual_outcome, self.expected_outcome) def test_parallel_play(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -89,14 +89,14 @@ def test_repeat_tournament_deterministic(self): ] files = [] for _ in range(2): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name="test", players=deterministic_players, game=self.game, turns=2, repetitions=2, ) - path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) + path = pathlib.Path("../test_outputs/stochastic_tournament_{}.csv".format(_)) files.append(axl_filename(path)) tournament.play(progress_bar=False, filename=files[-1], build_results=False) self.assertTrue(filecmp.cmp(files[0], files[1])) @@ -113,14 +113,14 @@ def test_repeat_tournament_stochastic(self): for s in axl.short_run_time_strategies if axl.Classifiers["stochastic"](s()) ] - tournament = axl.Tournament( + tournament = axl.IpdTournament( name="test", players=stochastic_players, game=self.game, turns=2, repetitions=2, ) - path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) + path = pathlib.Path("../test_outputs/stochastic_tournament_{}.csv".format(_)) files.append(axl_filename(path)) tournament.play(progress_bar=False, filename=files[-1], build_results=False) self.assertTrue(filecmp.cmp(files[0], files[1])) @@ -130,13 +130,13 @@ class TestNoisyTournament(unittest.TestCase): def test_noisy_tournament(self): # Defector should win for low noise players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players, turns=5, repetitions=3, noise=0.0) + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.0) results = tournament.play(progress_bar=False) self.assertEqual(results.ranked_names[0], "Defector") # If the noise is large enough, cooperator should win players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players, turns=5, repetitions=3, noise=0.75) + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.75) results = tournament.play(progress_bar=False) self.assertEqual(results.ranked_names[0], "Cooperator") @@ -149,7 +149,7 @@ def test_players_do_not_know_match_length(self): p1 = FinalTransformer(["D", "D"])(axl.Cooperator)() p2 = FinalTransformer(["D", "D"])(axl.Cooperator)() players = [p1, p2] - tournament = axl.Tournament(players, prob_end=0.5, repetitions=1) + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=1) results = tournament.play(progress_bar=False) # Check that both plays always cooperated for rating in results.cooperating_rating: @@ -165,7 +165,7 @@ def test_matches_have_different_length(self): p3 = axl.Cooperator() players = [p1, p2, p3] axl.seed(0) - tournament = axl.Tournament(players, prob_end=0.5, repetitions=2) + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=2) results = tournament.play(progress_bar=False) # Check that match length are different across the repetitions self.assertNotEqual(results.match_lengths[0], results.match_lengths[1]) diff --git a/axelrod/tests/property.py b/axelrod/ipd/tests/property.py similarity index 96% rename from axelrod/tests/property.py rename to axelrod/ipd/tests/property.py index 95d08e2b8..705acbdc9 100644 --- a/axelrod/tests/property.py +++ b/axelrod/ipd/tests/property.py @@ -61,7 +61,7 @@ def matches( players = [s() for s in strategies] turns = draw(integers(min_value=min_turns, max_value=max_turns)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - match = axl.Match(players, turns=turns, noise=noise) + match = axl.IpdMatch(players, turns=turns, noise=noise) return match @@ -108,7 +108,7 @@ def tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament(players, turns=turns, repetitions=repetitions, noise=noise) + tournament = axl.IpdTournament(players, turns=turns, repetitions=repetitions, noise=noise) return tournament @@ -155,7 +155,7 @@ def prob_end_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=repetitions, noise=noise ) return tournament @@ -223,7 +223,7 @@ def spatial_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, turns=turns, repetitions=repetitions, noise=noise, edges=edges ) return tournament @@ -291,7 +291,7 @@ def prob_end_spatial_tournaments( repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) noise = draw(floats(min_value=min_noise, max_value=max_noise)) - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges ) return tournament @@ -331,5 +331,5 @@ def games(draw, prisoners_dilemma=True, max_value=100): r = draw(integers(max_value=max_value)) p = draw(integers(max_value=max_value)) - game = axl.Game(r=r, s=s, t=t, p=p) + game = axl.IpdGame(r=r, s=s, t=t, p=p) return game diff --git a/axelrod/tests/unit/__init__.py b/axelrod/ipd/tests/strategies/__init__.py similarity index 100% rename from axelrod/tests/unit/__init__.py rename to axelrod/ipd/tests/strategies/__init__.py diff --git a/axelrod/tests/strategies/test_adaptive.py b/axelrod/ipd/tests/strategies/test_adaptive.py similarity index 96% rename from axelrod/tests/strategies/test_adaptive.py rename to axelrod/ipd/tests/strategies/test_adaptive.py index 1fbca26f4..5b796ce4e 100644 --- a/axelrod/tests/strategies/test_adaptive.py +++ b/axelrod/ipd/tests/strategies/test_adaptive.py @@ -40,7 +40,7 @@ def test_scoring(self): player.play(opponent) player.play(opponent) self.assertEqual(3, player.scores[C]) - game = axl.Game(-3, 10, 10, 10) + game = axl.IpdGame(-3, 10, 10, 10) player.set_match_attributes(game=game) player.play(opponent) self.assertEqual(0, player.scores[C]) diff --git a/axelrod/tests/strategies/test_adaptor.py b/axelrod/ipd/tests/strategies/test_adaptor.py similarity index 100% rename from axelrod/tests/strategies/test_adaptor.py rename to axelrod/ipd/tests/strategies/test_adaptor.py diff --git a/axelrod/tests/strategies/test_alternator.py b/axelrod/ipd/tests/strategies/test_alternator.py similarity index 100% rename from axelrod/tests/strategies/test_alternator.py rename to axelrod/ipd/tests/strategies/test_alternator.py diff --git a/axelrod/tests/strategies/test_ann.py b/axelrod/ipd/tests/strategies/test_ann.py similarity index 96% rename from axelrod/tests/strategies/test_ann.py rename to axelrod/ipd/tests/strategies/test_ann.py index 3a63d2131..e185577c2 100644 --- a/axelrod/tests/strategies/test_ann.py +++ b/axelrod/ipd/tests/strategies/test_ann.py @@ -2,9 +2,9 @@ import unittest import axelrod as axl -from axelrod.evolvable_player import InsufficientParametersError -from axelrod.load_data_ import load_weights -from axelrod.strategies.ann import split_weights +from axelrod.ipd.evolvable_player import InsufficientParametersError +from axelrod.ipd.load_data_ import load_weights +from axelrod.ipd.strategies.ann import split_weights from .test_player import TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer diff --git a/axelrod/tests/strategies/test_apavlov.py b/axelrod/ipd/tests/strategies/test_apavlov.py similarity index 100% rename from axelrod/tests/strategies/test_apavlov.py rename to axelrod/ipd/tests/strategies/test_apavlov.py diff --git a/axelrod/tests/strategies/test_appeaser.py b/axelrod/ipd/tests/strategies/test_appeaser.py similarity index 100% rename from axelrod/tests/strategies/test_appeaser.py rename to axelrod/ipd/tests/strategies/test_appeaser.py diff --git a/axelrod/tests/strategies/test_averagecopier.py b/axelrod/ipd/tests/strategies/test_averagecopier.py similarity index 100% rename from axelrod/tests/strategies/test_averagecopier.py rename to axelrod/ipd/tests/strategies/test_averagecopier.py diff --git a/axelrod/tests/strategies/test_axelrod_first.py b/axelrod/ipd/tests/strategies/test_axelrod_first.py similarity index 98% rename from axelrod/tests/strategies/test_axelrod_first.py rename to axelrod/ipd/tests/strategies/test_axelrod_first.py index 35ee2d5f4..1327757a3 100644 --- a/axelrod/tests/strategies/test_axelrod_first.py +++ b/axelrod/ipd/tests/strategies/test_axelrod_first.py @@ -100,20 +100,20 @@ def test_cooperation_probability(self): p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100) self.assertEqual(1.0, p1._cooperation_probability()) p2 = axl.Cooperator() - match = axl.Match((p1, p2), turns=50) + match = axl.IpdMatch((p1, p2), turns=50) match.play() self.assertEqual(0.9, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=100) + match = axl.IpdMatch((p1, p2), turns=100) match.play() self.assertEqual(0.8, p1._cooperation_probability()) # Test cooperation probabilities, second set of params p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200) self.assertEqual(1.0, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=100) + match = axl.IpdMatch((p1, p2), turns=100) match.play() self.assertEqual(0.75, p1._cooperation_probability()) - match = axl.Match((p1, p2), turns=200) + match = axl.IpdMatch((p1, p2), turns=200) match.play() self.assertEqual(0.5, p1._cooperation_probability()) @@ -122,7 +122,7 @@ def test_decay(self): for opponent in [axl.Cooperator(), axl.Defector()]: player = self.player() self.assertEqual(player._cooperation_probability(), player._start_coop_prob) - match = axl.Match((player, opponent), turns=201) + match = axl.IpdMatch((player, opponent), turns=201) match.play() self.assertEqual(player._cooperation_probability(), player._end_coop_prob) @@ -487,7 +487,7 @@ def test_init(self): self.assertFalse(player.opponent_is_random) def test_strategy(self): - # Our Player (SteinAndRapoport) vs Cooperator + # Our IpdPlayer (SteinAndRapoport) vs Cooperator # After 15th round (pvalue < alpha) still plays TitForTat. # Note it always defects on the last two rounds. opponent = axl.Cooperator() diff --git a/axelrod/tests/strategies/test_axelrod_second.py b/axelrod/ipd/tests/strategies/test_axelrod_second.py similarity index 99% rename from axelrod/tests/strategies/test_axelrod_second.py rename to axelrod/ipd/tests/strategies/test_axelrod_second.py index 7c9122d8d..1b89e5b36 100644 --- a/axelrod/tests/strategies/test_axelrod_second.py +++ b/axelrod/ipd/tests/strategies/test_axelrod_second.py @@ -410,7 +410,7 @@ def test_strategy(self): # Test to make sure logic matches Fortran (discrepancy found 8/23/2017) opponent = axl.AntiTitForTat() - # Actions come from a match run by Axelrod Fortran using Player('k86r') + # Actions come from a match run by Axelrod Fortran using IpdPlayer('k86r') actions = [ (C, C), (C, D), @@ -1157,7 +1157,7 @@ def test_strategy(self): expected_actions += [(D, C)] random.seed(10) player = self.player() - match = axl.Match((player, axl.Random()), turns=len(expected_actions)) + match = axl.IpdMatch((player, axl.Random()), turns=len(expected_actions)) # The history matrix will be [[0, 2], [5, 6], [3, 6], [4, 2]] actions = match.play() self.assertEqual(actions, expected_actions) diff --git a/axelrod/tests/strategies/test_backstabber.py b/axelrod/ipd/tests/strategies/test_backstabber.py similarity index 100% rename from axelrod/tests/strategies/test_backstabber.py rename to axelrod/ipd/tests/strategies/test_backstabber.py diff --git a/axelrod/tests/strategies/test_better_and_better.py b/axelrod/ipd/tests/strategies/test_better_and_better.py similarity index 100% rename from axelrod/tests/strategies/test_better_and_better.py rename to axelrod/ipd/tests/strategies/test_better_and_better.py diff --git a/axelrod/tests/strategies/test_bush_mosteller.py b/axelrod/ipd/tests/strategies/test_bush_mosteller.py similarity index 100% rename from axelrod/tests/strategies/test_bush_mosteller.py rename to axelrod/ipd/tests/strategies/test_bush_mosteller.py diff --git a/axelrod/tests/strategies/test_calculator.py b/axelrod/ipd/tests/strategies/test_calculator.py similarity index 97% rename from axelrod/tests/strategies/test_calculator.py rename to axelrod/ipd/tests/strategies/test_calculator.py index 3b2dc66c8..938cfc2d4 100644 --- a/axelrod/tests/strategies/test_calculator.py +++ b/axelrod/ipd/tests/strategies/test_calculator.py @@ -1,7 +1,7 @@ """Tests for Calculator strategy.""" import axelrod as axl -from axelrod._strategy_utils import detect_cycle +from axelrod.ipd._strategy_utils import detect_cycle from .test_player import TestPlayer @@ -152,7 +152,7 @@ def get_joss_strategy_actions(opponent_moves: list, indices_to_flip: list) -> li """ Takes a list of opponent moves and returns a tuple list of [(Joss moves, opponent moves)]. "indices_to_flip" are the indices where Joss differs from it's expected TitForTat. - Joss is from axelrod.strategies.axelrod_first. + Joss is from axelrod.ipd.strategies.axelrod_first. """ out = [] for index, action in enumerate(opponent_moves): diff --git a/axelrod/tests/strategies/test_cooperator.py b/axelrod/ipd/tests/strategies/test_cooperator.py similarity index 100% rename from axelrod/tests/strategies/test_cooperator.py rename to axelrod/ipd/tests/strategies/test_cooperator.py diff --git a/axelrod/tests/strategies/test_cycler.py b/axelrod/ipd/tests/strategies/test_cycler.py similarity index 97% rename from axelrod/tests/strategies/test_cycler.py rename to axelrod/ipd/tests/strategies/test_cycler.py index 83de4f135..39f819d61 100644 --- a/axelrod/tests/strategies/test_cycler.py +++ b/axelrod/ipd/tests/strategies/test_cycler.py @@ -4,9 +4,9 @@ import random import axelrod as axl -from axelrod._strategy_utils import detect_cycle -from axelrod.action import Action, str_to_actions -from axelrod.evolvable_player import InsufficientParametersError +from axelrod.ipd._strategy_utils import detect_cycle +from axelrod.ipd.action import Action, str_to_actions +from axelrod.ipd.evolvable_player import InsufficientParametersError from .test_player import TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer diff --git a/axelrod/tests/strategies/test_darwin.py b/axelrod/ipd/tests/strategies/test_darwin.py similarity index 100% rename from axelrod/tests/strategies/test_darwin.py rename to axelrod/ipd/tests/strategies/test_darwin.py diff --git a/axelrod/tests/strategies/test_dbs.py b/axelrod/ipd/tests/strategies/test_dbs.py similarity index 99% rename from axelrod/tests/strategies/test_dbs.py rename to axelrod/ipd/tests/strategies/test_dbs.py index 93d61e41a..055c13ed6 100644 --- a/axelrod/tests/strategies/test_dbs.py +++ b/axelrod/ipd/tests/strategies/test_dbs.py @@ -3,7 +3,7 @@ import unittest import axelrod as axl -from axelrod.strategies import dbs +from axelrod.ipd.strategies import dbs from .test_player import TestPlayer diff --git a/axelrod/tests/strategies/test_defector.py b/axelrod/ipd/tests/strategies/test_defector.py similarity index 100% rename from axelrod/tests/strategies/test_defector.py rename to axelrod/ipd/tests/strategies/test_defector.py diff --git a/axelrod/tests/strategies/test_doubler.py b/axelrod/ipd/tests/strategies/test_doubler.py similarity index 100% rename from axelrod/tests/strategies/test_doubler.py rename to axelrod/ipd/tests/strategies/test_doubler.py diff --git a/axelrod/tests/strategies/test_evolvable_player.py b/axelrod/ipd/tests/strategies/test_evolvable_player.py similarity index 96% rename from axelrod/tests/strategies/test_evolvable_player.py rename to axelrod/ipd/tests/strategies/test_evolvable_player.py index ccb6ac37d..0712c726b 100644 --- a/axelrod/tests/strategies/test_evolvable_player.py +++ b/axelrod/ipd/tests/strategies/test_evolvable_player.py @@ -3,8 +3,8 @@ import random import axelrod as axl -from axelrod.action import Action -from axelrod.evolvable_player import copy_lists, crossover_lists, crossover_dictionaries +from axelrod.ipd.action import Action +from axelrod.ipd.evolvable_player import copy_lists, crossover_lists, crossover_dictionaries from .test_player import TestPlayer C, D = Action.C, Action.D @@ -155,12 +155,12 @@ def behavior_test(self, player1, player2): for opponent_class in [axl.Random, axl.TitForTat, axl.Alternator]: axl.seed(0) opponent = opponent_class() - match = axl.Match((player1.clone(), opponent)) + match = axl.IpdMatch((player1.clone(), opponent)) results1 = match.play() axl.seed(0) opponent = opponent_class() - match = axl.Match((player2.clone(), opponent)) + match = axl.IpdMatch((player2.clone(), opponent)) results2 = match.play() self.assertEqual(results1, results2) diff --git a/axelrod/tests/strategies/test_finite_state_machines.py b/axelrod/ipd/tests/strategies/test_finite_state_machines.py similarity index 98% rename from axelrod/tests/strategies/test_finite_state_machines.py rename to axelrod/ipd/tests/strategies/test_finite_state_machines.py index 12fe52e5d..25f192de8 100644 --- a/axelrod/tests/strategies/test_finite_state_machines.py +++ b/axelrod/ipd/tests/strategies/test_finite_state_machines.py @@ -5,9 +5,9 @@ import random import axelrod as axl -from axelrod.compute_finite_state_machine_memory import get_memory_from_transitions -from axelrod.evolvable_player import InsufficientParametersError -from axelrod.strategies.finite_state_machines import EvolvableFSMPlayer, FSMPlayer, SimpleFSM +from axelrod.ipd.compute_finite_state_machine_memory import get_memory_from_transitions +from axelrod.ipd.evolvable_player import InsufficientParametersError +from axelrod.ipd.strategies import EvolvableFSMPlayer, FSMPlayer, SimpleFSM from .test_player import TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer @@ -96,7 +96,7 @@ class TestSampleFSMPlayer(TestPlayer): """Test a few sample tables to make sure that the finite state machines are working as intended.""" - name = "FSM Player: ((1, C, 1, C), (1, D, 1, D)), 1, C" + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" player = axl.FSMPlayer expected_classifier = { @@ -168,7 +168,7 @@ def test_wsls(self): class TestFSMPlayer(TestPlayer): - name = "FSM Player: ((1, C, 1, C), (1, D, 1, D)), 1, C" + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" player = axl.FSMPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_forgiver.py b/axelrod/ipd/tests/strategies/test_forgiver.py similarity index 100% rename from axelrod/tests/strategies/test_forgiver.py rename to axelrod/ipd/tests/strategies/test_forgiver.py diff --git a/axelrod/tests/strategies/test_gambler.py b/axelrod/ipd/tests/strategies/test_gambler.py similarity index 99% rename from axelrod/tests/strategies/test_gambler.py rename to axelrod/ipd/tests/strategies/test_gambler.py index 2749526a3..31c00382a 100755 --- a/axelrod/tests/strategies/test_gambler.py +++ b/axelrod/ipd/tests/strategies/test_gambler.py @@ -7,8 +7,8 @@ import random import axelrod as axl -from axelrod.load_data_ import load_pso_tables -from axelrod.strategies.lookerup import create_lookup_table_keys +from axelrod.ipd.load_data_ import load_pso_tables +from axelrod.ipd.strategies.lookerup import create_lookup_table_keys from .test_lookerup import convert_original_to_current from .test_player import TestPlayer diff --git a/axelrod/tests/strategies/test_geller.py b/axelrod/ipd/tests/strategies/test_geller.py similarity index 100% rename from axelrod/tests/strategies/test_geller.py rename to axelrod/ipd/tests/strategies/test_geller.py diff --git a/axelrod/tests/strategies/test_gobymajority.py b/axelrod/ipd/tests/strategies/test_gobymajority.py similarity index 100% rename from axelrod/tests/strategies/test_gobymajority.py rename to axelrod/ipd/tests/strategies/test_gobymajority.py diff --git a/axelrod/tests/strategies/test_gradualkiller.py b/axelrod/ipd/tests/strategies/test_gradualkiller.py similarity index 100% rename from axelrod/tests/strategies/test_gradualkiller.py rename to axelrod/ipd/tests/strategies/test_gradualkiller.py diff --git a/axelrod/tests/strategies/test_grudger.py b/axelrod/ipd/tests/strategies/test_grudger.py similarity index 100% rename from axelrod/tests/strategies/test_grudger.py rename to axelrod/ipd/tests/strategies/test_grudger.py diff --git a/axelrod/tests/strategies/test_grumpy.py b/axelrod/ipd/tests/strategies/test_grumpy.py similarity index 100% rename from axelrod/tests/strategies/test_grumpy.py rename to axelrod/ipd/tests/strategies/test_grumpy.py diff --git a/axelrod/tests/strategies/test_handshake.py b/axelrod/ipd/tests/strategies/test_handshake.py similarity index 100% rename from axelrod/tests/strategies/test_handshake.py rename to axelrod/ipd/tests/strategies/test_handshake.py diff --git a/axelrod/tests/strategies/test_headsup.py b/axelrod/ipd/tests/strategies/test_headsup.py similarity index 100% rename from axelrod/tests/strategies/test_headsup.py rename to axelrod/ipd/tests/strategies/test_headsup.py diff --git a/axelrod/tests/strategies/test_hmm.py b/axelrod/ipd/tests/strategies/test_hmm.py similarity index 97% rename from axelrod/tests/strategies/test_hmm.py rename to axelrod/ipd/tests/strategies/test_hmm.py index 558da9bf4..477f76732 100644 --- a/axelrod/tests/strategies/test_hmm.py +++ b/axelrod/ipd/tests/strategies/test_hmm.py @@ -4,14 +4,14 @@ import random import axelrod as axl -from axelrod.evolvable_player import InsufficientParametersError -from axelrod.strategies.hmm import ( +from axelrod.ipd.random_ import random_vector +from axelrod.ipd.evolvable_player import InsufficientParametersError +from axelrod.ipd.strategies import ( EvolvableHMMPlayer, HMMPlayer, SimpleHMM, - is_stochastic_matrix, - random_vector, ) +from axelrod.ipd.strategies.hmm import is_stochastic_matrix from .test_player import TestMatch, TestPlayer from .test_evolvable_player import PartialClass, TestEvolvablePlayer @@ -149,7 +149,7 @@ def test_malformed_params(self): class TestHMMPlayer(TestPlayer): - name = "HMM Player: 0, C" + name = "HMM IpdPlayer: 0, C" player = axl.HMMPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_human.py b/axelrod/ipd/tests/strategies/test_human.py similarity index 97% rename from axelrod/tests/strategies/test_human.py rename to axelrod/ipd/tests/strategies/test_human.py index 46c1f3f8a..14542edbb 100644 --- a/axelrod/tests/strategies/test_human.py +++ b/axelrod/ipd/tests/strategies/test_human.py @@ -4,7 +4,7 @@ from os import linesep import axelrod as axl -from axelrod.strategies.human import ActionValidator, Human +from axelrod.ipd.strategies.human import ActionValidator, Human from prompt_toolkit.validation import ValidationError from .test_player import TestPlayer @@ -113,7 +113,7 @@ def test_get_human_input_D(self): def test_strategy(self): human = Human() expected_action = C - actual_action = human.strategy(axl.Player(), lambda: C) + actual_action = human.strategy(axl.IpdPlayer(), lambda: C) self.assertEqual(actual_action, expected_action) def test_reset_history_and_attributes(self): diff --git a/axelrod/tests/strategies/test_hunter.py b/axelrod/ipd/tests/strategies/test_hunter.py similarity index 99% rename from axelrod/tests/strategies/test_hunter.py rename to axelrod/ipd/tests/strategies/test_hunter.py index 7c2912494..90154338a 100644 --- a/axelrod/tests/strategies/test_hunter.py +++ b/axelrod/ipd/tests/strategies/test_hunter.py @@ -2,10 +2,8 @@ import unittest -import random - import axelrod as axl -from axelrod.strategies.hunter import detect_cycle +from axelrod.ipd.strategies.hunter import detect_cycle from .test_player import TestPlayer diff --git a/axelrod/tests/strategies/test_inverse.py b/axelrod/ipd/tests/strategies/test_inverse.py similarity index 100% rename from axelrod/tests/strategies/test_inverse.py rename to axelrod/ipd/tests/strategies/test_inverse.py diff --git a/axelrod/tests/strategies/test_lookerup.py b/axelrod/ipd/tests/strategies/test_lookerup.py similarity index 99% rename from axelrod/tests/strategies/test_lookerup.py rename to axelrod/ipd/tests/strategies/test_lookerup.py index 024328c93..02d567e8d 100755 --- a/axelrod/tests/strategies/test_lookerup.py +++ b/axelrod/ipd/tests/strategies/test_lookerup.py @@ -7,9 +7,9 @@ import random import axelrod as axl -from axelrod.action import str_to_actions -from axelrod.evolvable_player import InsufficientParametersError -from axelrod.strategies.lookerup import ( +from axelrod.ipd.action import str_to_actions +from axelrod.ipd.evolvable_player import InsufficientParametersError +from axelrod.ipd.strategies.lookerup import ( EvolvableLookerUp, LookupTable, Plays, diff --git a/axelrod/tests/strategies/test_mathematicalconstants.py b/axelrod/ipd/tests/strategies/test_mathematicalconstants.py similarity index 100% rename from axelrod/tests/strategies/test_mathematicalconstants.py rename to axelrod/ipd/tests/strategies/test_mathematicalconstants.py diff --git a/axelrod/tests/strategies/test_memoryone.py b/axelrod/ipd/tests/strategies/test_memoryone.py similarity index 94% rename from axelrod/tests/strategies/test_memoryone.py rename to axelrod/ipd/tests/strategies/test_memoryone.py index 784baadf4..ecbbecd75 100644 --- a/axelrod/tests/strategies/test_memoryone.py +++ b/axelrod/ipd/tests/strategies/test_memoryone.py @@ -3,7 +3,7 @@ import warnings import axelrod as axl -from axelrod.strategies.memoryone import MemoryOnePlayer +from axelrod.ipd.strategies.memoryone import MemoryOnePlayer from .test_player import TestPlayer, test_four_vector @@ -19,9 +19,9 @@ class TestGenericPlayerOne(unittest.TestCase): p3 = axl.MemoryOnePlayer(four_vector=(1, 0.5, 1, 0.5)) def test_name(self): - self.assertEqual(self.p1.name, "Generic Memory One Player: (0, 0, 0, 0)") - self.assertEqual(self.p2.name, "Generic Memory One Player: (1, 0, 1, 0)") - self.assertEqual(self.p3.name, "Generic Memory One Player: (1, 0.5, 1, 0.5)") + self.assertEqual(self.p1.name, "Generic Memory One IpdPlayer: (0, 0, 0, 0)") + self.assertEqual(self.p2.name, "Generic Memory One IpdPlayer: (1, 0, 1, 0)") + self.assertEqual(self.p3.name, "Generic Memory One IpdPlayer: (1, 0.5, 1, 0.5)") def test_stochastic_classification(self): self.assertFalse(axl.Classifiers["stochastic"](self.p1)) @@ -93,7 +93,7 @@ def test_strategy(self): self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) def test_four_vector(self): - (R, P, S, T) = axl.Game().RPST() + (R, P, S, T) = axl.IpdGame().RPST() p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) expected_dictionary = {(C, C): 1.0, (C, D): p, (D, C): 1.0, (D, D): p} test_four_vector(self, expected_dictionary) @@ -293,9 +293,9 @@ class TestGenericReactiveStrategy(unittest.TestCase): p3 = axl.ReactivePlayer(probabilities=(1, 0.5)) def test_name(self): - self.assertEqual(self.p1.name, "Reactive Player: (0, 0)") - self.assertEqual(self.p2.name, "Reactive Player: (1, 0)") - self.assertEqual(self.p3.name, "Reactive Player: (1, 0.5)") + self.assertEqual(self.p1.name, "Reactive IpdPlayer: (0, 0)") + self.assertEqual(self.p2.name, "Reactive IpdPlayer: (1, 0)") + self.assertEqual(self.p3.name, "Reactive IpdPlayer: (1, 0.5)") def test_four_vector(self): self.assertEqual( diff --git a/axelrod/tests/strategies/test_memorytwo.py b/axelrod/ipd/tests/strategies/test_memorytwo.py similarity index 94% rename from axelrod/tests/strategies/test_memorytwo.py rename to axelrod/ipd/tests/strategies/test_memorytwo.py index 3318c6983..3f0b65a5e 100644 --- a/axelrod/tests/strategies/test_memorytwo.py +++ b/axelrod/ipd/tests/strategies/test_memorytwo.py @@ -7,7 +7,7 @@ import warnings import axelrod as axl -from axelrod.strategies.memorytwo import MemoryTwoPlayer +from axelrod.ipd.strategies import MemoryTwoPlayer from .test_player import TestPlayer @@ -51,19 +51,19 @@ class TestGenericPlayerTwo(unittest.TestCase): def test_name(self): self.assertEqual( self.p1.name, - "Generic Memory Two Player: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + "Generic Memory Two IpdPlayer: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", ) self.assertEqual( self.p2.name, - "Generic Memory Two Player: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", + "Generic Memory Two IpdPlayer: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", ) self.assertEqual( self.p3.name, - "Generic Memory Two Player: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", + "Generic Memory Two IpdPlayer: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", ) self.assertEqual( self.p4.name, - "Generic Memory Two Player: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", + "Generic Memory Two IpdPlayer: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", ) def test_deterministic_classification(self): @@ -142,7 +142,7 @@ def test_exception_if_probability_vector_outside_valid_values(self): class TestMemoryStochastic(TestPlayer): name = ( - "Generic Memory Two Player: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" + "Generic Memory Two IpdPlayer: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" ) player = axl.MemoryTwoPlayer expected_classifier = { diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/ipd/tests/strategies/test_meta.py similarity index 99% rename from axelrod/tests/strategies/test_meta.py rename to axelrod/ipd/tests/strategies/test_meta.py index 7256369c8..4559180e8 100644 --- a/axelrod/tests/strategies/test_meta.py +++ b/axelrod/ipd/tests/strategies/test_meta.py @@ -15,7 +15,7 @@ class TestMetaPlayer(TestPlayer): dictionary and the reset methods. Inherit from this class just as you would the TestPlayer class.""" - name = "Meta Player" + name = "Meta IpdPlayer" player = axl.MetaPlayer expected_classifier = { "memory_depth": float("inf"), @@ -86,7 +86,7 @@ def test_clone(self, seed): player2.reset() for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -107,7 +107,7 @@ class TestMetaMajority(TestMetaPlayer): def test_strategy(self): P1 = axl.MetaMajority() - P2 = axl.Player() + P2 = axl.IpdPlayer() # With more cooperators on the team than defectors, we should cooperate. P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] @@ -138,7 +138,7 @@ def test_team(self): def test_strategy(self): P1 = axl.MetaMinority() - P2 = axl.Player() + P2 = axl.IpdPlayer() # With more cooperators on the team, we should defect. P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] @@ -164,7 +164,7 @@ class TestNiceMetaWinner(TestMetaPlayer): def test_strategy(self): P1 = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) - P2 = axl.Player() + P2 = axl.IpdPlayer() # This meta player will simply choose the strategy with the highest # current score. diff --git a/axelrod/tests/strategies/test_mindcontrol.py b/axelrod/ipd/tests/strategies/test_mindcontrol.py similarity index 100% rename from axelrod/tests/strategies/test_mindcontrol.py rename to axelrod/ipd/tests/strategies/test_mindcontrol.py diff --git a/axelrod/tests/strategies/test_mindreader.py b/axelrod/ipd/tests/strategies/test_mindreader.py similarity index 98% rename from axelrod/tests/strategies/test_mindreader.py rename to axelrod/ipd/tests/strategies/test_mindreader.py index 21a78ec6a..7c3031f06 100644 --- a/axelrod/tests/strategies/test_mindreader.py +++ b/axelrod/ipd/tests/strategies/test_mindreader.py @@ -1,7 +1,7 @@ """Tests for the Mindreader strategy.""" import axelrod as axl -from axelrod._strategy_utils import simulate_match +from axelrod.ipd._strategy_utils import simulate_match from .test_player import TestPlayer diff --git a/axelrod/tests/strategies/test_mutual.py b/axelrod/ipd/tests/strategies/test_mutual.py similarity index 80% rename from axelrod/tests/strategies/test_mutual.py rename to axelrod/ipd/tests/strategies/test_mutual.py index 8ba512912..921fc698d 100644 --- a/axelrod/tests/strategies/test_mutual.py +++ b/axelrod/ipd/tests/strategies/test_mutual.py @@ -22,33 +22,33 @@ class TestDesperate(TestPlayer): } def test_strategy(self): - # Our Player (Desperate) vs Cooperator SEED --> 1 + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Cooperator SEED --> 2 + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Desperate) vs Defector SEED --> 1 + # Our IpdPlayer (Desperate) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Defector SEED --> 2 + # Our IpdPlayer (Desperate) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (C, D), (D, D), (C, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Desperate) vs Alternator SEED --> 1 + # Our IpdPlayer (Desperate) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Desperate) vs Alternator SEED --> 2 + # Our IpdPlayer (Desperate) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) @@ -69,33 +69,33 @@ class TestHopeless(TestPlayer): } def test_strategy(self): - # Our Player (Hopeless) vs Cooperator SEED --> 1 + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (D, C), (C, C), (D, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Cooperator SEED --> 2 + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (C, C), (D, C), (C, C), (D, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Hopeless) vs Defector SEED --> 1 + # Our IpdPlayer (Hopeless) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Defector SEED --> 2 + # Our IpdPlayer (Hopeless) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Hopeless) vs Alternator SEED --> 1 + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Hopeless) vs Alternator SEED --> 2 + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (C, D), (C, C), (D, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) @@ -116,33 +116,33 @@ class TestWilling(TestPlayer): } def test_strategy(self): - # Our Player (Willing) vs Cooperator SEED --> 1 + # Our IpdPlayer (Willing) vs Cooperator SEED --> 1 opponent = axl.Cooperator() opponent_actions = [C] * 5 actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Cooperator SEED --> 2 + # Our IpdPlayer (Willing) vs Cooperator SEED --> 2 opponent = axl.Cooperator() actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Willing) vs Defector SEED --> 1 + # Our IpdPlayer (Willing) vs Defector SEED --> 1 opponent = axl.Defector() actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Defector SEED --> 2 + # Our IpdPlayer (Willing) vs Defector SEED --> 2 opponent = axl.Defector() actions = [(D, D), (D, D), (D, D), (D, D), (D, D)] self.versus_test(opponent, expected_actions=actions, seed=2) - # Our Player (Willing) vs Alternator SEED --> 1 + # Our IpdPlayer (Willing) vs Alternator SEED --> 1 opponent = axl.Alternator() actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=1) - # Our Player (Willing) vs Alternator SEED --> 2 + # Our IpdPlayer (Willing) vs Alternator SEED --> 2 opponent = axl.Alternator() actions = [(D, C), (C, D), (C, C), (C, D), (C, C)] self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_negation.py b/axelrod/ipd/tests/strategies/test_negation.py similarity index 100% rename from axelrod/tests/strategies/test_negation.py rename to axelrod/ipd/tests/strategies/test_negation.py diff --git a/axelrod/tests/strategies/test_oncebitten.py b/axelrod/ipd/tests/strategies/test_oncebitten.py similarity index 100% rename from axelrod/tests/strategies/test_oncebitten.py rename to axelrod/ipd/tests/strategies/test_oncebitten.py diff --git a/axelrod/tests/strategies/test_player.py b/axelrod/ipd/tests/strategies/test_player.py similarity index 95% rename from axelrod/tests/strategies/test_player.py rename to axelrod/ipd/tests/strategies/test_player.py index 2b4dd33c4..af2e7c0b5 100644 --- a/axelrod/tests/strategies/test_player.py +++ b/axelrod/ipd/tests/strategies/test_player.py @@ -6,8 +6,8 @@ import numpy as np import axelrod as axl -from axelrod.player import simultaneous_play -from axelrod.tests.property import strategy_lists +from axelrod.ipd.player import simultaneous_play +from axelrod.ipd.tests.property import strategy_lists from hypothesis import given, settings from hypothesis.strategies import integers, sampled_from @@ -43,8 +43,8 @@ def defect(*args): } -class ParameterisedTestPlayer(axl.Player): - """A simple Player class for testing init parameters""" +class ParameterisedTestPlayer(axl.IpdPlayer): + """A simple IpdPlayer class for testing init parameters""" name = "ParameterisedTestPlayer" classifier = _test_classifier @@ -54,8 +54,8 @@ def __init__(self, arg_test1="testing1", arg_test2="testing2"): class TestPlayerClass(unittest.TestCase): - name = "Player" - player = axl.Player + name = "IpdPlayer" + player = axl.IpdPlayer classifier = {"stochastic": False} def test_play(self): @@ -90,7 +90,7 @@ def test_play(self): def test_state_distribution(self): player1 = axl.MockPlayer([C, C, D, D, C]) player2 = axl.MockPlayer([C, D, C, D, D]) - match = axl.Match((player1, player2), turns=5) + match = axl.IpdMatch((player1, player2), turns=5) _ = match.play() self.assertEqual( player1.state_distribution, @@ -112,7 +112,7 @@ def test_noisy_play(self): self.assertEqual(player2.history[0], D) def test_update_history(self): - player = axl.Player() + player = axl.IpdPlayer() self.assertEqual(player.history, []) self.assertEqual(player.cooperations, 0) self.assertEqual(player.defections, 0) @@ -126,7 +126,7 @@ def test_update_history(self): self.assertEqual(player.cooperations, 1) def test_history_assignment(self): - player = axl.Player() + player = axl.IpdPlayer() with self.assertRaises(AttributeError): player.history = [] @@ -146,7 +146,7 @@ def test_clone(self): seed = random.randint(0, 10 ** 6) for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -319,9 +319,9 @@ def test_init_kwargs(self): # Test that init_kwargs exist and are empty self.assertEqual(self.player().init_kwargs, {}) # Test that passing a positional argument raises an error - self.assertRaises(TypeError, axl.Player, "test") + self.assertRaises(TypeError, axl.IpdPlayer, "test") # Test that passing a keyword argument raises an error - self.assertRaises(TypeError, axl.Player, arg_test1="test") + self.assertRaises(TypeError, axl.IpdPlayer, arg_test1="test") # Tests for Players with init parameters @@ -354,7 +354,7 @@ def test_init_kwargs(self): ) -class TestOpponent(axl.Player): +class TestOpponent(axl.IpdPlayer): """A player who only exists so we have something to test against""" name = "TestOpponent" @@ -496,7 +496,7 @@ def test_clone(self, seed): player2.reset() for p in [player1, player2]: axl.seed(seed) - m = axl.Match((p, op), turns=turns) + m = axl.IpdMatch((p, op), turns=turns) m.play() self.assertEqual(len(player1.history), turns) self.assertEqual(player1.history, player2.history) @@ -554,9 +554,9 @@ def versus_test( Tests a sequence of outcomes for two given players. Parameters: ----------- - opponent: Player or list + opponent: IpdPlayer or list An instance of a player OR a sequence of actions. If a sequence of - actions is passed, a Mock Player is created that cycles over that + actions is passed, a Mock IpdPlayer is created that cycles over that sequence. expected_actions: List The expected outcomes of the match (list of tuples of actions). @@ -587,7 +587,7 @@ def versus_test( player = self.player(**init_kwargs) - match = axl.Match( + match = axl.IpdMatch( (player, opponent), turns=turns, noise=noise, @@ -657,7 +657,7 @@ def versus_test( if seed: axl.seed(seed) turns = len(expected_actions1) - match = axl.Match((player1, player2), turns=turns, noise=noise) + match = axl.IpdMatch((player1, player2), turns=turns, noise=noise) match.play() # Test expected sequence of play. for i, (outcome1, outcome2) in enumerate( @@ -696,7 +696,7 @@ def test_memory(player, opponent, memory_length, seed=0, turns=10): """ # Play the match normally. axl.seed(seed) - match = axl.Match((player, opponent), turns=turns) + match = axl.IpdMatch((player, opponent), turns=turns) plays = [p[0] for p in match.play()] # Play with limited history. @@ -705,7 +705,7 @@ def test_memory(player, opponent, memory_length, seed=0, turns=10): player._history = axl.LimitedHistory(memory_length) opponent._history = axl.LimitedHistory(memory_length) axl.seed(seed) - match = axl.Match((player, opponent), turns=turns, reset=False) + match = axl.IpdMatch((player, opponent), turns=turns, reset=False) limited_plays = [p[0] for p in match.play()] return plays == limited_plays diff --git a/axelrod/tests/strategies/test_prober.py b/axelrod/ipd/tests/strategies/test_prober.py similarity index 100% rename from axelrod/tests/strategies/test_prober.py rename to axelrod/ipd/tests/strategies/test_prober.py diff --git a/axelrod/tests/strategies/test_punisher.py b/axelrod/ipd/tests/strategies/test_punisher.py similarity index 100% rename from axelrod/tests/strategies/test_punisher.py rename to axelrod/ipd/tests/strategies/test_punisher.py diff --git a/axelrod/tests/strategies/test_qlearner.py b/axelrod/ipd/tests/strategies/test_qlearner.py similarity index 99% rename from axelrod/tests/strategies/test_qlearner.py rename to axelrod/ipd/tests/strategies/test_qlearner.py index 81a261109..1b07a4bfb 100644 --- a/axelrod/tests/strategies/test_qlearner.py +++ b/axelrod/ipd/tests/strategies/test_qlearner.py @@ -24,7 +24,7 @@ class TestRiskyQLearner(TestPlayer): } def test_payoff_matrix(self): - (R, P, S, T) = axl.Game().RPST() + (R, P, S, T) = axl.IpdGame().RPST() payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} player = self.player() self.assertEqual(player.payoff_matrix, payoff_matrix) diff --git a/axelrod/tests/strategies/test_rand.py b/axelrod/ipd/tests/strategies/test_rand.py similarity index 100% rename from axelrod/tests/strategies/test_rand.py rename to axelrod/ipd/tests/strategies/test_rand.py diff --git a/axelrod/tests/strategies/test_resurrection.py b/axelrod/ipd/tests/strategies/test_resurrection.py similarity index 100% rename from axelrod/tests/strategies/test_resurrection.py rename to axelrod/ipd/tests/strategies/test_resurrection.py diff --git a/axelrod/tests/strategies/test_retaliate.py b/axelrod/ipd/tests/strategies/test_retaliate.py similarity index 100% rename from axelrod/tests/strategies/test_retaliate.py rename to axelrod/ipd/tests/strategies/test_retaliate.py diff --git a/axelrod/tests/strategies/test_revised_downing.py b/axelrod/ipd/tests/strategies/test_revised_downing.py similarity index 100% rename from axelrod/tests/strategies/test_revised_downing.py rename to axelrod/ipd/tests/strategies/test_revised_downing.py diff --git a/axelrod/tests/strategies/test_selfsteem.py b/axelrod/ipd/tests/strategies/test_selfsteem.py similarity index 100% rename from axelrod/tests/strategies/test_selfsteem.py rename to axelrod/ipd/tests/strategies/test_selfsteem.py diff --git a/axelrod/tests/strategies/test_sequence_player.py b/axelrod/ipd/tests/strategies/test_sequence_player.py similarity index 94% rename from axelrod/tests/strategies/test_sequence_player.py rename to axelrod/ipd/tests/strategies/test_sequence_player.py index 2a3c655ef..f0503ce95 100644 --- a/axelrod/tests/strategies/test_sequence_player.py +++ b/axelrod/ipd/tests/strategies/test_sequence_player.py @@ -2,8 +2,8 @@ import unittest import axelrod as axl -from axelrod._strategy_utils import recursive_thue_morse -from axelrod.strategies.sequence_player import SequencePlayer +from axelrod.ipd._strategy_utils import recursive_thue_morse +from axelrod.ipd.strategies.sequence_player import SequencePlayer from .test_player import TestOpponent, TestPlayer diff --git a/axelrod/tests/strategies/test_shortmem.py b/axelrod/ipd/tests/strategies/test_shortmem.py similarity index 100% rename from axelrod/tests/strategies/test_shortmem.py rename to axelrod/ipd/tests/strategies/test_shortmem.py diff --git a/axelrod/tests/strategies/test_stalker.py b/axelrod/ipd/tests/strategies/test_stalker.py similarity index 98% rename from axelrod/tests/strategies/test_stalker.py rename to axelrod/ipd/tests/strategies/test_stalker.py index cc013543c..31acdb769 100644 --- a/axelrod/tests/strategies/test_stalker.py +++ b/axelrod/ipd/tests/strategies/test_stalker.py @@ -87,7 +87,7 @@ def test_strategy(self): def test_reset(self): axl.seed(0) player = axl.Stalker() - m = axl.Match((player, axl.Alternator())) + m = axl.IpdMatch((player, axl.Alternator())) m.play() self.assertNotEqual(player.current_score, 0) player.reset() diff --git a/axelrod/tests/strategies/test_titfortat.py b/axelrod/ipd/tests/strategies/test_titfortat.py similarity index 99% rename from axelrod/tests/strategies/test_titfortat.py rename to axelrod/ipd/tests/strategies/test_titfortat.py index b99522d5f..c71ec03a9 100644 --- a/axelrod/tests/strategies/test_titfortat.py +++ b/axelrod/ipd/tests/strategies/test_titfortat.py @@ -5,7 +5,7 @@ import random import axelrod as axl -from axelrod.tests.property import strategy_lists +from axelrod.ipd.tests.property import strategy_lists from hypothesis import given from hypothesis.strategies import integers @@ -44,7 +44,7 @@ def test_strategy(self): actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] self.versus_test(axl.Defector(), expected_actions=actions) - # This behaviour is independent of knowledge of the Match length + # This behaviour is independent of knowledge of the IpdMatch length actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] self.versus_test( axl.Alternator(), @@ -486,7 +486,7 @@ def test_specific_set_of_results(self): axl.CyclerDDC(), ] axl.seed(1) - tournament = axl.Tournament(players, turns=1000, repetitions=1) + tournament = axl.IpdTournament(players, turns=1000, repetitions=1) results = tournament.play(progress_bar=False) scores = [ round(average_score_per_turn * 1000, 1) @@ -671,7 +671,7 @@ def test_output_from_literature(self): axl.seed(1) turns = 1000 - tournament = axl.Tournament(players, turns=turns, repetitions=1) + tournament = axl.IpdTournament(players, turns=turns, repetitions=1) results = tournament.play(progress_bar=False) scores = [ round(average_score_per_turn * 1000, 1) @@ -723,8 +723,8 @@ def test_is_tit_for_tat_with_no_noise(self, strategies, turns): tft = axl.TitForTat() ctft = self.player() opponent = strategies[0]() - m1 = axl.Match((tft, opponent), turns) - m2 = axl.Match((ctft, opponent), turns) + m1 = axl.IpdMatch((tft, opponent), turns) + m2 = axl.IpdMatch((ctft, opponent), turns) self.assertEqual(m1.play(), m2.play()) def test_strategy_with_noise(self): diff --git a/axelrod/tests/strategies/test_verybad.py b/axelrod/ipd/tests/strategies/test_verybad.py similarity index 100% rename from axelrod/tests/strategies/test_verybad.py rename to axelrod/ipd/tests/strategies/test_verybad.py diff --git a/axelrod/tests/strategies/test_worse_and_worse.py b/axelrod/ipd/tests/strategies/test_worse_and_worse.py similarity index 100% rename from axelrod/tests/strategies/test_worse_and_worse.py rename to axelrod/ipd/tests/strategies/test_worse_and_worse.py diff --git a/axelrod/tests/strategies/test_zero_determinant.py b/axelrod/ipd/tests/strategies/test_zero_determinant.py similarity index 98% rename from axelrod/tests/strategies/test_zero_determinant.py rename to axelrod/ipd/tests/strategies/test_zero_determinant.py index 615ca27fd..f5a9b74bf 100644 --- a/axelrod/tests/strategies/test_zero_determinant.py +++ b/axelrod/ipd/tests/strategies/test_zero_determinant.py @@ -3,8 +3,8 @@ import unittest import axelrod as axl -from axelrod.game import DefaultGame -from axelrod.strategies.zero_determinant import LRPlayer +from axelrod.ipd.game import DefaultGame +from axelrod.ipd.strategies.zero_determinant import LRPlayer from .test_player import TestPlayer, test_four_vector diff --git a/axelrod/ipd/tests/unit/__init__.py b/axelrod/ipd/tests/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/axelrod/tests/unit/test_actions.py b/axelrod/ipd/tests/unit/test_actions.py similarity index 96% rename from axelrod/tests/unit/test_actions.py rename to axelrod/ipd/tests/unit/test_actions.py index d5ad6a338..1d8d8b283 100644 --- a/axelrod/tests/unit/test_actions.py +++ b/axelrod/ipd/tests/unit/test_actions.py @@ -1,7 +1,7 @@ import unittest import axelrod as axl -from axelrod.action import UnknownActionError, actions_to_str, str_to_actions +from axelrod.ipd.action import UnknownActionError, actions_to_str, str_to_actions C, D = axl.Action.C, axl.Action.D diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/ipd/tests/unit/test_classification.py similarity index 97% rename from axelrod/tests/unit/test_classification.py rename to axelrod/ipd/tests/unit/test_classification.py index e0db01e5d..3ce32ca66 100644 --- a/axelrod/tests/unit/test_classification.py +++ b/axelrod/ipd/tests/unit/test_classification.py @@ -7,17 +7,17 @@ import yaml import axelrod as axl -from axelrod.classifier import ( +from axelrod.ipd.classifier import ( Classifier, Classifiers, _Classifiers, memory_depth, rebuild_classifier_table, ) -from axelrod.player import Player +from axelrod.ipd.player import IpdPlayer -class TitForTatWithEmptyClassifier(Player): +class TitForTatWithEmptyClassifier(IpdPlayer): """ Same name as TitForTat, but with empty classifier. """ @@ -27,7 +27,7 @@ class TitForTatWithEmptyClassifier(Player): classifier = {} -class TitForTatWithNonTrivialInitialzer(Player): +class TitForTatWithNonTrivialInitialzer(IpdPlayer): """ Same name as TitForTat, but with empty classifier. """ @@ -50,7 +50,7 @@ def tearDown(self) -> None: def test_classifier_build(self): dirname = os.path.dirname(__file__) - test_path = os.path.join(dirname, "../../../test_outputs/classifier_test.yaml") + test_path = os.path.join(dirname, "../../../../test_outputs/classifier_test.yaml") # Just returns the name of the player. For testing. name_classifier = Classifier[Text]("name", lambda player: player.name) diff --git a/axelrod/tests/unit/test_compute_finite_state_machine_memory.py b/axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py similarity index 99% rename from axelrod/tests/unit/test_compute_finite_state_machine_memory.py rename to axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py index 82e828676..7f721c0f6 100644 --- a/axelrod/tests/unit/test_compute_finite_state_machine_memory.py +++ b/axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py @@ -3,7 +3,7 @@ import unittest import axelrod as axl -from axelrod.compute_finite_state_machine_memory import * +from axelrod.ipd.compute_finite_state_machine_memory import * C, D = axl.Action.C, axl.Action.D diff --git a/axelrod/tests/unit/test_deterministic_cache.py b/axelrod/ipd/tests/unit/test_deterministic_cache.py similarity index 93% rename from axelrod/tests/unit/test_deterministic_cache.py rename to axelrod/ipd/tests/unit/test_deterministic_cache.py index 82d11904c..2c3518030 100644 --- a/axelrod/tests/unit/test_deterministic_cache.py +++ b/axelrod/ipd/tests/unit/test_deterministic_cache.py @@ -4,7 +4,7 @@ import pickle import axelrod as axl -from axelrod.load_data_ import axl_filename +from axelrod.ipd.load_data_ import axl_filename C, D = axl.Action.C, axl.Action.D @@ -14,9 +14,9 @@ class TestDeterministicCache(unittest.TestCase): def setUpClass(cls): cls.test_key = (axl.TitForTat(), axl.Defector()) cls.test_value = [(C, D), (D, D), (D, D)] - save_path = pathlib.Path("test_outputs/test_cache_save.txt") + save_path = pathlib.Path("../test_outputs/test_cache_save.txt") cls.test_save_file = axl_filename(save_path) - load_path = pathlib.Path("test_outputs/test_cache_load.txt") + load_path = pathlib.Path("../test_outputs/test_cache_load.txt") cls.test_load_file = axl_filename(load_path) test_data_to_pickle = {("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)]} cls.test_pickle = pickle.dumps(test_data_to_pickle) @@ -96,7 +96,7 @@ def test_load(self): self.assertEqual(self.cache[self.test_key], self.test_value) def test_load_error_for_inccorect_format(self): - path = pathlib.Path("test_outputs/test.cache") + path = pathlib.Path("../test_outputs/test.cache") filename = axl_filename(path) with open(filename, "wb") as io: pickle.dump(range(5), io) diff --git a/axelrod/tests/unit/test_ecosystem.py b/axelrod/ipd/tests/unit/test_ecosystem.py similarity index 97% rename from axelrod/tests/unit/test_ecosystem.py rename to axelrod/ipd/tests/unit/test_ecosystem.py index 92098ba17..ec552b542 100644 --- a/axelrod/tests/unit/test_ecosystem.py +++ b/axelrod/ipd/tests/unit/test_ecosystem.py @@ -8,7 +8,7 @@ class TestEcosystem(unittest.TestCase): @classmethod def setUpClass(cls): - cooperators = axl.Tournament( + cooperators = axl.IpdTournament( players=[ axl.Cooperator(), axl.Cooperator(), @@ -16,7 +16,7 @@ def setUpClass(cls): axl.Cooperator(), ] ) - defector_wins = axl.Tournament( + defector_wins = axl.IpdTournament( players=[ axl.Cooperator(), axl.Cooperator(), diff --git a/axelrod/tests/unit/test_eigen.py b/axelrod/ipd/tests/unit/test_eigen.py similarity index 96% rename from axelrod/tests/unit/test_eigen.py rename to axelrod/ipd/tests/unit/test_eigen.py index 36f0bf5b9..384f018ca 100644 --- a/axelrod/tests/unit/test_eigen.py +++ b/axelrod/ipd/tests/unit/test_eigen.py @@ -5,7 +5,7 @@ import numpy from numpy.testing import assert_array_almost_equal -from axelrod.eigen import _normalise, principal_eigenvector +from axelrod.ipd.eigen import _normalise, principal_eigenvector diff --git a/axelrod/tests/unit/test_filters.py b/axelrod/ipd/tests/unit/test_filters.py similarity index 95% rename from axelrod/tests/unit/test_filters.py rename to axelrod/ipd/tests/unit/test_filters.py index f2d9e2e17..b1c78255a 100644 --- a/axelrod/tests/unit/test_filters.py +++ b/axelrod/ipd/tests/unit/test_filters.py @@ -1,15 +1,15 @@ import unittest import axelrod as axl -from axelrod.player import Player -from axelrod.strategies._filters import * +from axelrod.ipd.player import IpdPlayer +from axelrod.ipd.strategies._filters import * from hypothesis import example, given, settings from hypothesis.strategies import integers class TestFilters(unittest.TestCase): - class TestStrategy(Player): + class TestStrategy(IpdPlayer): classifier = { "stochastic": True, "inspects_source": False, @@ -127,17 +127,17 @@ def test_passes_filterset(self, smaller, larger): self.assertFalse(passes_filterset(self.TestStrategy, sparse_failing_filterset)) def test_filtered_strategies(self): - class StochasticTestStrategy(Player): + class StochasticTestStrategy(IpdPlayer): classifier = { "stochastic": True, "memory_depth": float("inf"), "makes_use_of": [], } - class MemoryDepth2TestStrategy(Player): + class MemoryDepth2TestStrategy(IpdPlayer): classifier = {"stochastic": False, "memory_depth": 2, "makes_use_of": []} - class UsesLengthTestStrategy(Player): + class UsesLengthTestStrategy(IpdPlayer): classifier = { "stochastic": True, "memory_depth": float("inf"), diff --git a/axelrod/tests/unit/test_fingerprint.py b/axelrod/ipd/tests/unit/test_fingerprint.py similarity index 92% rename from axelrod/tests/unit/test_fingerprint.py rename to axelrod/ipd/tests/unit/test_fingerprint.py index f60a93e7a..50895d296 100644 --- a/axelrod/tests/unit/test_fingerprint.py +++ b/axelrod/ipd/tests/unit/test_fingerprint.py @@ -8,10 +8,9 @@ import pathlib import axelrod as axl -from axelrod.fingerprint import AshlockFingerprint, Point, TransitiveFingerprint -from axelrod.load_data_ import axl_filename -from axelrod.strategy_transformers import DualTransformer, JossAnnTransformer -from axelrod.tests.property import strategy_lists +from axelrod.ipd.fingerprint import AshlockFingerprint, Point, TransitiveFingerprint +from axelrod.ipd.load_data_ import axl_filename +from axelrod.ipd.tests.property import strategy_lists from hypothesis import given, settings @@ -145,9 +144,9 @@ def test_fingerprint_interactions_cooperator(self): # Interactions are invariant for any points where y is zero, and # the score should be maximum possible. - # Player 1 is Point(0.0, 0.0). - # Player 4 is Point(0.5, 0.0). - # Player 7 is Point(1.0, 0.0). + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). for iplayer in (1, 4, 7): for turns in af.interactions[(0, iplayer)]: self.assertEqual(len(turns), 5) @@ -156,7 +155,7 @@ def test_fingerprint_interactions_cooperator(self): self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - # Player 3 is Point(0.0, 1.0), which means constant defection + # IpdPlayer 3 is Point(0.0, 1.0), which means constant defection # from the probe. But the Cooperator doesn't change and score is zero. for turns in af.interactions[(0, 3)]: self.assertEqual(len(turns), 5) @@ -170,9 +169,9 @@ def test_fingerprint_interactions_titfortat(self): # Tit-for-Tats will always cooperate if left to their own devices, # so interactions are invariant for any points where y is zero, # and the score should be maximum possible. - # Player 1 is Point(0.0, 0.0). - # Player 4 is Point(0.5, 0.0). - # Player 7 is Point(1.0, 0.0). + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). for iplayer in (1, 4, 7): for turns in af.interactions[(0, iplayer)]: self.assertEqual(len(turns), 5) @@ -181,7 +180,7 @@ def test_fingerprint_interactions_titfortat(self): self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - # Player 3 is Point(0.0, 1.0) which implies defection after the + # IpdPlayer 3 is Point(0.0, 1.0) which implies defection after the # first turn since Tit-for-Tat is playing, and a score of 0.8 # since we get zero on first turn and one point per turn later. for turns in af.interactions[(0, 3)]: @@ -199,7 +198,7 @@ def test_temp_file_creation(self): RecordedMksTemp.reset_record() af = AshlockFingerprint(axl.TitForTat) - path = pathlib.Path("test_outputs/test_fingerprint.csv") + path = pathlib.Path("../test_outputs/test_fingerprint.csv") filename = axl_filename(path) self.assertEqual(RecordedMksTemp.record, []) @@ -216,7 +215,7 @@ def test_temp_file_creation(self): self.assertFalse(os.path.isfile(filename)) def test_fingerprint_with_filename(self): - path = pathlib.Path("test_outputs/test_fingerprint.csv") + path = pathlib.Path("../test_outputs/test_fingerprint.csv") filename = axl_filename(path) af = AshlockFingerprint(axl.TitForTat) af.fingerprint( @@ -431,7 +430,7 @@ def test_init_with_not_default_number(self): ) def test_fingerprint_with_filename(self): - path = pathlib.Path("test_outputs/test_fingerprint.csv") + path = pathlib.Path("../test_outputs/test_fingerprint.csv") filename = axl_filename(path) strategy = axl.TitForTat() tf = TransitiveFingerprint(strategy) @@ -443,7 +442,7 @@ def test_fingerprint_with_filename(self): def test_serial_fingerprint(self): strategy = axl.TitForTat() tf = TransitiveFingerprint(strategy) - path = pathlib.Path("test_outputs/test_fingerprint.csv") + path = pathlib.Path("../test_outputs/test_fingerprint.csv") tf.fingerprint( repetitions=1, progress_bar=False, @@ -460,27 +459,27 @@ def test_parallel_fingerprint(self): def test_analyse_cooperation_ratio(self): tf = TransitiveFingerprint(axl.TitForTat) - path = pathlib.Path("test_outputs/test_fingerprint.csv") + path = pathlib.Path("../test_outputs/test_fingerprint.csv") filename = axl_filename(path) with open(filename, "w") as f: f.write( - """Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions -0,0,1,0,Player0,Player1,CCC -0,1,0,0,Player1,Player0,DDD -1,0,1,1,Player0,Player1,CCC -1,1,0,1,Player1,Player0,DDD -2,0,2,0,Player0,Player2,CCD -2,2,0,0,Player2,Player0,DDD -3,0,2,1,Player0,Player2,CCC -3,2,0,1,Player2,Player0,DDD -4,0,3,0,Player0,Player3,CCD -4,3,0,0,Player3,Player0,DDD -5,0,3,1,Player0,Player3,DCC -5,3,0,1,Player3,Player0,DDD -6,0,4,2,Player0,Player4,DDD -6,4,0,2,Player4,Player0,DDD -7,0,4,3,Player0,Player4,DDD -7,4,0,3,Player4,Player0,DDD""" + """Interaction index,Player index,Opponent index,Repetition,IpdPlayer name,Opponent name,Actions +0,0,1,0,IpdPlayer0,IpdPlayer1,CCC +0,1,0,0,IpdPlayer1,IpdPlayer0,DDD +1,0,1,1,IpdPlayer0,IpdPlayer1,CCC +1,1,0,1,IpdPlayer1,IpdPlayer0,DDD +2,0,2,0,IpdPlayer0,IpdPlayer2,CCD +2,2,0,0,IpdPlayer2,IpdPlayer0,DDD +3,0,2,1,IpdPlayer0,IpdPlayer2,CCC +3,2,0,1,IpdPlayer2,IpdPlayer0,DDD +4,0,3,0,IpdPlayer0,IpdPlayer3,CCD +4,3,0,0,IpdPlayer3,IpdPlayer0,DDD +5,0,3,1,IpdPlayer0,IpdPlayer3,DCC +5,3,0,1,IpdPlayer3,IpdPlayer0,DDD +6,0,4,2,IpdPlayer0,IpdPlayer4,DDD +6,4,0,2,IpdPlayer4,IpdPlayer0,DDD +7,0,4,3,IpdPlayer0,IpdPlayer4,DDD +7,4,0,3,IpdPlayer4,IpdPlayer0,DDD""" ) data = tf.analyse_cooperation_ratio(filename) expected_data = np.array( diff --git a/axelrod/tests/unit/test_game.py b/axelrod/ipd/tests/unit/test_game.py similarity index 77% rename from axelrod/tests/unit/test_game.py rename to axelrod/ipd/tests/unit/test_game.py index c52d22b7b..25b4eb4e3 100644 --- a/axelrod/tests/unit/test_game.py +++ b/axelrod/ipd/tests/unit/test_game.py @@ -1,7 +1,7 @@ import unittest import axelrod as axl -from axelrod.tests.property import games +from axelrod.ipd.tests.property import games from hypothesis import given, settings from hypothesis.strategies import integers @@ -17,29 +17,29 @@ def test_default_scores(self): (D, D): (1, 1), (C, C): (3, 3), } - self.assertEqual(axl.Game().scores, expected_scores) + self.assertEqual(axl.IpdGame().scores, expected_scores) def test_default_RPST(self): expected_values = (3, 1, 0, 5) - self.assertEqual(axl.Game().RPST(), expected_values) + self.assertEqual(axl.IpdGame().RPST(), expected_values) def test_default_score(self): - game = axl.Game() + game = axl.IpdGame() self.assertEqual(game.score((C, C)), (3, 3)) self.assertEqual(game.score((D, D)), (1, 1)) self.assertEqual(game.score((C, D)), (0, 5)) self.assertEqual(game.score((D, C)), (5, 0)) def test_default_equality(self): - self.assertEqual(axl.Game(), axl.Game()) + self.assertEqual(axl.IpdGame(), axl.IpdGame()) def test_not_default_equality(self): - self.assertEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 4)) - self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 5)) - self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game()) + self.assertEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 4)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 5)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame()) def test_wrong_class_equality(self): - self.assertNotEqual(axl.Game(), "wrong class") + self.assertNotEqual(axl.IpdGame(), "wrong class") @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) @@ -51,21 +51,21 @@ def test_random_init(self, r, p, s, t): (D, D): (p, p), (C, C): (r, r), } - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.scores, expected_scores) @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) def test_random_RPST(self, r, p, s, t): """Test RPST method with random scores using the hypothesis library.""" - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.RPST(), (r, p, s, t)) @given(r=integers(), p=integers(), s=integers(), t=integers()) @settings(max_examples=5) def test_random_score(self, r, p, s, t): """Test score method with random scores using the hypothesis library.""" - game = axl.Game(r, s, t, p) + game = axl.IpdGame(r, s, t, p) self.assertEqual(game.score((C, C)), (r, r)) self.assertEqual(game.score((D, D)), (p, p)) self.assertEqual(game.score((C, D)), (s, t)) diff --git a/axelrod/tests/unit/test_graph.py b/axelrod/ipd/tests/unit/test_graph.py similarity index 100% rename from axelrod/tests/unit/test_graph.py rename to axelrod/ipd/tests/unit/test_graph.py diff --git a/axelrod/tests/unit/test_history.py b/axelrod/ipd/tests/unit/test_history.py similarity index 98% rename from axelrod/tests/unit/test_history.py rename to axelrod/ipd/tests/unit/test_history.py index 7c517958b..572568649 100644 --- a/axelrod/tests/unit/test_history.py +++ b/axelrod/ipd/tests/unit/test_history.py @@ -3,7 +3,7 @@ from collections import Counter import axelrod as axl -from axelrod.history import History, LimitedHistory +from axelrod.ipd.history import History, LimitedHistory C, D = axl.Action.C, axl.Action.D diff --git a/axelrod/tests/unit/test_interaction_utils.py b/axelrod/ipd/tests/unit/test_interaction_utils.py similarity index 98% rename from axelrod/tests/unit/test_interaction_utils.py rename to axelrod/ipd/tests/unit/test_interaction_utils.py index 2e1d2c5e1..6ddda717c 100644 --- a/axelrod/tests/unit/test_interaction_utils.py +++ b/axelrod/ipd/tests/unit/test_interaction_utils.py @@ -129,7 +129,7 @@ def test_compute_sparklines(self): def test_read_interactions_from_file(self): tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) players = [axl.Cooperator(), axl.Defector()] - tournament = axl.Tournament(players=players, turns=2, repetitions=3) + tournament = axl.IpdTournament(players=players, turns=2, repetitions=3) tournament.play(filename=tmp_file.name) tmp_file.close() expected_interactions = { diff --git a/axelrod/tests/unit/test_load_data.py b/axelrod/ipd/tests/unit/test_load_data.py similarity index 79% rename from axelrod/tests/unit/test_load_data.py rename to axelrod/ipd/tests/unit/test_load_data.py index 6dd880335..a38a7c1b0 100644 --- a/axelrod/tests/unit/test_load_data.py +++ b/axelrod/ipd/tests/unit/test_load_data.py @@ -2,12 +2,12 @@ import pathlib import unittest -from axelrod.load_data_ import axl_filename +from axelrod.ipd.load_data_ import axl_filename class TestLoadData(unittest.TestCase): def test_axl_filename(self): - path = pathlib.Path("axelrod/strategies/titfortat.py") + path = pathlib.Path("ipd/strategies/titfortat.py") actual_fn = axl_filename(path) # First go from "unit" up to "tests", then up to "axelrod" diff --git a/axelrod/tests/unit/test_match.py b/axelrod/ipd/tests/unit/test_match.py similarity index 83% rename from axelrod/tests/unit/test_match.py rename to axelrod/ipd/tests/unit/test_match.py index 71913d103..66a204b15 100644 --- a/axelrod/tests/unit/test_match.py +++ b/axelrod/ipd/tests/unit/test_match.py @@ -3,8 +3,8 @@ from collections import Counter import axelrod as axl -from axelrod.deterministic_cache import DeterministicCache -from axelrod.tests.property import games +from axelrod.ipd.deterministic_cache import DeterministicCache +from axelrod.ipd.tests.property import games from hypothesis import example, given from hypothesis.strategies import assume, floats, integers @@ -17,7 +17,7 @@ class TestMatch(unittest.TestCase): @example(turns=5, game=axl.DefaultGame) def test_init(self, turns, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns, game=game) + match = axl.IpdMatch((p1, p2), turns, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, turns) @@ -31,7 +31,7 @@ def test_init(self, turns, game): @given(prob_end=floats(min_value=0, max_value=1), game=games()) def test_init_with_prob_end(self, prob_end, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=prob_end, game=game) + match = axl.IpdMatch((p1, p2), prob_end=prob_end, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, float("inf")) @@ -49,7 +49,7 @@ def test_init_with_prob_end(self, prob_end, game): ) def test_init_with_prob_end_and_turns(self, turns, prob_end, game): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns=turns, prob_end=prob_end, game=game) + match = axl.IpdMatch((p1, p2), turns=turns, prob_end=prob_end, game=game) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, turns) @@ -62,7 +62,7 @@ def test_init_with_prob_end_and_turns(self, turns, prob_end, game): def test_default_init(self): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2)) + match = axl.IpdMatch((p1, p2)) self.assertEqual(match.result, []) self.assertEqual(match.players, [p1, p2]) self.assertEqual(match.turns, axl.DEFAULT_TURNS) @@ -81,7 +81,7 @@ def test_example_prob_end(self): outcomes """ p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=0.5) + match = axl.IpdMatch((p1, p2), prob_end=0.5) expected_lengths = [3, 1, 5] for seed, expected_length in zip(range(3), expected_lengths): axl.seed(seed) @@ -97,7 +97,7 @@ def test_example_prob_end(self): def test_non_default_attributes(self, turns, game): p1, p2 = axl.Cooperator(), axl.Cooperator() match_attributes = {"length": 500, "game": game, "noise": 0.5} - match = axl.Match( + match = axl.IpdMatch( (p1, p2), turns, game=game, match_attributes=match_attributes ) self.assertEqual(match.players[0].match_attributes["length"], 500) @@ -107,7 +107,7 @@ def test_non_default_attributes(self, turns, game): @example(turns=5) def test_len(self, turns): p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), turns) + match = axl.IpdMatch((p1, p2), turns) self.assertEqual(len(match), turns) def test_len_error(self): @@ -115,7 +115,7 @@ def test_len_error(self): Length is not defined if it is infinite. """ p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), prob_end=0.5) + match = axl.IpdMatch((p1, p2), prob_end=0.5) with self.assertRaises(TypeError): len(match) @@ -125,14 +125,14 @@ def test_stochastic(self, p): assume(0 < p < 1) p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertFalse(match._stochastic) - match = axl.Match((p1, p2), 5, noise=p) + match = axl.IpdMatch((p1, p2), 5, noise=p) self.assertTrue(match._stochastic) p1 = axl.Random() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertTrue(match._stochastic) @given(p=floats(min_value=0, max_value=1)) @@ -141,25 +141,25 @@ def test_cache_update_required(self, p): assume(0 < p < 1) p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.Match((p1, p2), 5, noise=p) + match = axl.IpdMatch((p1, p2), 5, noise=p) self.assertFalse(match._cache_update_required) cache = DeterministicCache() cache.mutable = False - match = axl.Match((p1, p2), 5, deterministic_cache=cache) + match = axl.IpdMatch((p1, p2), 5, deterministic_cache=cache) self.assertFalse(match._cache_update_required) - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertTrue(match._cache_update_required) p1 = axl.Random() - match = axl.Match((p1, p2), 5) + match = axl.IpdMatch((p1, p2), 5) self.assertFalse(match._cache_update_required) def test_play(self): cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) expected_result = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result) self.assertEqual( @@ -169,7 +169,7 @@ def test_play(self): # a deliberately incorrect result so we can tell it came from the cache expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] cache[(axl.Cooperator(), axl.Defector())] = expected_result - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) self.assertEqual(match.play(), expected_result[:3]) def test_cache_grows(self): @@ -180,7 +180,7 @@ def test_cache_grows(self): """ cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 3, deterministic_cache=cache) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] expected_result_3_turn = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result_3_turn) @@ -200,7 +200,7 @@ def test_cache_doesnt_shrink(self): """ cache = DeterministicCache() players = (axl.Cooperator(), axl.Defector()) - match = axl.Match(players, 5, deterministic_cache=cache) + match = axl.IpdMatch(players, 5, deterministic_cache=cache) expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] expected_result_3_turn = [(C, D), (C, D), (C, D)] self.assertEqual(match.play(), expected_result_5_turn) @@ -215,7 +215,7 @@ def test_cache_doesnt_shrink(self): def test_scores(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.scores(), []) match.play() self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) @@ -224,12 +224,12 @@ def test_final_score(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.final_score(), None) match.play() self.assertEqual(match.final_score(), (2, 7)) - match = axl.Match((player2, player1), 3) + match = axl.IpdMatch((player2, player1), 3) self.assertEqual(match.final_score(), None) match.play() self.assertEqual(match.final_score(), (7, 2)) @@ -239,12 +239,12 @@ def test_final_score_per_turn(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.final_score_per_turn(), None) match.play() self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) - match = axl.Match((player2, player1), turns) + match = axl.IpdMatch((player2, player1), turns) self.assertEqual(match.final_score_per_turn(), None) match.play() self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) @@ -253,18 +253,18 @@ def test_winner(self): player1 = axl.TitForTat() player2 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), player2) - match = axl.Match((player2, player1), 3) + match = axl.IpdMatch((player2, player1), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), player2) player1 = axl.Defector() - match = axl.Match((player1, player2), 3) + match = axl.IpdMatch((player1, player2), 3) self.assertEqual(match.winner(), None) match.play() self.assertEqual(match.winner(), False) @@ -274,7 +274,7 @@ def test_cooperation(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.cooperation(), None) match.play() self.assertEqual(match.cooperation(), (3, 2)) @@ -282,7 +282,7 @@ def test_cooperation(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.cooperation(), None) match.play() self.assertEqual(match.cooperation(), (2, 0)) @@ -292,7 +292,7 @@ def test_normalised_cooperation(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_cooperation(), None) match.play() self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) @@ -300,7 +300,7 @@ def test_normalised_cooperation(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_cooperation(), None) match.play() self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) @@ -310,7 +310,7 @@ def test_state_distribution(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.state_distribution(), None) match.play() @@ -320,7 +320,7 @@ def test_state_distribution(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.state_distribution(), None) match.play() @@ -332,7 +332,7 @@ def test_normalised_state_distribution(self): player1 = axl.Cooperator() player2 = axl.Alternator() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_state_distribution(), None) match.play() @@ -342,7 +342,7 @@ def test_normalised_state_distribution(self): player1 = axl.Alternator() player2 = axl.Defector() - match = axl.Match((player1, player2), turns) + match = axl.IpdMatch((player1, player2), turns) self.assertEqual(match.normalised_state_distribution(), None) match.play() @@ -351,7 +351,7 @@ def test_normalised_state_distribution(self): def test_sparklines(self): players = (axl.Cooperator(), axl.Alternator()) - match = axl.Match(players, 4) + match = axl.IpdMatch(players, 4) match.play() expected_sparklines = "████\n█ █ " self.assertEqual(match.sparklines(), expected_sparklines) @@ -368,10 +368,10 @@ def test_sample_length(self): (3, 0.4, 1), ]: axl.seed(seed) - self.assertEqual(axl.match.sample_length(prob_end), expected_length) + self.assertEqual(axl.ipd.match.sample_length(prob_end), expected_length) def test_sample_with_0_prob(self): - self.assertEqual(axl.match.sample_length(0), float("inf")) + self.assertEqual(axl.ipd.match.sample_length(0), float("inf")) def test_sample_with_1_prob(self): - self.assertEqual(axl.match.sample_length(1), 1) + self.assertEqual(axl.ipd.match.sample_length(1), 1) diff --git a/axelrod/tests/unit/test_match_generator.py b/axelrod/ipd/tests/unit/test_match_generator.py similarity index 92% rename from axelrod/tests/unit/test_match_generator.py rename to axelrod/ipd/tests/unit/test_match_generator.py index 27faa78c0..682bd4b85 100644 --- a/axelrod/tests/unit/test_match_generator.py +++ b/axelrod/ipd/tests/unit/test_match_generator.py @@ -1,7 +1,7 @@ import unittest import axelrod as axl -from axelrod.match_generator import graph_is_connected +from axelrod.ipd.match_generator import graph_is_connected from hypothesis import example, given, settings from hypothesis.strategies import floats, integers @@ -15,7 +15,7 @@ ] test_turns = 100 test_repetitions = 20 -test_game = axl.Game() +test_game = axl.IpdGame() class TestMatchGenerator(unittest.TestCase): @@ -40,8 +40,8 @@ def test_build_single_match_params(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), test_turns) def test_build_single_match_params_with_noise(self): @@ -62,8 +62,8 @@ def test_build_single_match_params_with_noise(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), test_turns) def test_build_single_match_params_with_prob_end(self): @@ -83,8 +83,8 @@ def test_build_single_match_params_with_prob_end(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) with self.assertRaises(TypeError): len(match) @@ -106,8 +106,8 @@ def test_build_single_match_params_with_prob_end_and_noise(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) with self.assertRaises(TypeError): len(match) @@ -129,8 +129,8 @@ def test_build_single_match_params_with_prob_end_and_turns(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertIsInstance(len(match), int) self.assertGreater(len(match), 0) self.assertLessEqual(len(match), 10) @@ -154,8 +154,8 @@ def test_build_single_match_params_with_fixed_length_unknown(self): # Check that can build a match players = [axl.Cooperator(), axl.Defector()] match_params["players"] = players - match = axl.Match(**match_params) - self.assertIsInstance(match, axl.Match) + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) self.assertEqual(len(match), 5) self.assertEqual(match.match_attributes, {"length": float("inf")}) diff --git a/axelrod/tests/unit/test_mock_player.py b/axelrod/ipd/tests/unit/test_mock_player.py similarity index 87% rename from axelrod/tests/unit/test_mock_player.py rename to axelrod/ipd/tests/unit/test_mock_player.py index 457e77711..a089d5d6c 100644 --- a/axelrod/tests/unit/test_mock_player.py +++ b/axelrod/ipd/tests/unit/test_mock_player.py @@ -9,12 +9,12 @@ class TestMockPlayer(unittest.TestCase): def test_strategy(self): for action in [C, D]: m = axl.MockPlayer(actions=[action]) - p2 = axl.Player() + p2 = axl.IpdPlayer() self.assertEqual(action, m.strategy(p2)) actions = [C, C, D, D, C, C] m = axl.MockPlayer(actions=actions) - p2 = axl.Player() + p2 = axl.IpdPlayer() for action in actions: self.assertEqual(action, m.strategy(p2)) diff --git a/axelrod/tests/unit/test_moran.py b/axelrod/ipd/tests/unit/test_moran.py similarity index 99% rename from axelrod/tests/unit/test_moran.py rename to axelrod/ipd/tests/unit/test_moran.py index d972b288f..8e3778158 100644 --- a/axelrod/tests/unit/test_moran.py +++ b/axelrod/ipd/tests/unit/test_moran.py @@ -5,8 +5,8 @@ import matplotlib.pyplot as plt import axelrod as axl -from axelrod.moran import fitness_proportionate_selection -from axelrod.tests.property import strategy_lists +from axelrod.ipd.moran import fitness_proportionate_selection +from axelrod.ipd.tests.property import strategy_lists from hypothesis import example, given, settings @@ -169,7 +169,7 @@ def test_different_game(self): # Possible for Cooperator to become fixed when using a different game p1, p2 = axl.Cooperator(), axl.Defector() axl.seed(0) - game = axl.Game(r=4, p=2, s=1, t=6) + game = axl.IpdGame(r=4, p=2, s=1, t=6) mp = axl.MoranProcess((p1, p2), turns=5, game=game) populations = mp.play() self.assertEqual(mp.winning_strategy_name, str(p1)) diff --git a/axelrod/tests/unit/test_pickling.py b/axelrod/ipd/tests/unit/test_pickling.py similarity index 63% rename from axelrod/tests/unit/test_pickling.py rename to axelrod/ipd/tests/unit/test_pickling.py index 1cb14101d..b588c1b81 100644 --- a/axelrod/tests/unit/test_pickling.py +++ b/axelrod/ipd/tests/unit/test_pickling.py @@ -11,10 +11,10 @@ # First set: special cases -PointerToWrappedStrategy = axl.strategy_transformers.FlipTransformer()(axl.strategy_transformers.FlipTransformer()(axl.Cooperator)) +PointerToWrappedStrategy = axl.ipd.strategy_transformers.FlipTransformer()(axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)) -class MyDefector(axl.Player): +class MyDefector(axl.IpdPlayer): def __init__(self): super(MyDefector, self).__init__() @@ -22,40 +22,40 @@ def strategy(self, opponent): return D -PointerToWrappedClassNotInStrategies = axl.strategy_transformers.FlipTransformer()( - axl.strategy_transformers.FlipTransformer()(MyDefector) +PointerToWrappedClassNotInStrategies = axl.ipd.strategy_transformers.FlipTransformer()( + axl.ipd.strategy_transformers.FlipTransformer()(MyDefector) ) -@axl.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) -@axl.strategy_transformers.DualTransformer(name_prefix=None) -@axl.strategy_transformers.FlipTransformer(name_prefix=None) -@axl.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) class InterspersedDualTransformersNamePrefixAbsent(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer((D, D, C)) -@axl.strategy_transformers.DualTransformer() -@axl.strategy_transformers.FlipTransformer() -@axl.strategy_transformers.DualTransformer() +@axl.ipd.strategy_transformers.IdentityTransformer((D, D, C)) +@axl.ipd.strategy_transformers.DualTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.DualTransformer() class InterspersedDualTransformersNamePrefixPresent(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer() -class MyCooperator(axl.Player): +@axl.ipd.strategy_transformers.FlipTransformer() +class MyCooperator(axl.IpdPlayer): def strategy(self, opponent): return C -@axl.strategy_transformers.FlipTransformer() -@axl.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() class DoubleFlip(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() class SingleFlip(axl.Cooperator): pass @@ -63,47 +63,47 @@ class SingleFlip(axl.Cooperator): # Second set: All the transformers -@axl.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) +@axl.ipd.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) class Apology(axl.Cooperator): pass -@axl.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) class DeadlockBreaking(axl.Cooperator): pass -@axl.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) class Dual(axl.Cooperator): pass -@axl.strategy_transformers.FlipTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) class Flip(axl.Cooperator): pass -@axl.strategy_transformers.FinalTransformer((D, D), name_prefix=None) +@axl.ipd.strategy_transformers.FinalTransformer((D, D), name_prefix=None) class Final(axl.Cooperator): pass -@axl.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) +@axl.ipd.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) class Forgiver(axl.Cooperator): pass -@axl.strategy_transformers.GrudgeTransformer(3, name_prefix=None) +@axl.ipd.strategy_transformers.GrudgeTransformer(3, name_prefix=None) class Grudge(axl.Cooperator): pass -@axl.strategy_transformers.InitialTransformer((C, D), name_prefix=None) +@axl.ipd.strategy_transformers.InitialTransformer((C, D), name_prefix=None) class Initial(axl.Cooperator): pass -@axl.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) +@axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) class JossAnn(axl.Cooperator): pass @@ -112,42 +112,42 @@ class JossAnn(axl.Cooperator): probability = [0.2, 0.3] -@axl.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) +@axl.ipd.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) class Mixed(axl.Cooperator): pass -@axl.strategy_transformers.NiceTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.NiceTransformer(name_prefix=None) class Nice(axl.Cooperator): pass -@axl.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) +@axl.ipd.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) class Noisy(axl.Cooperator): pass -@axl.strategy_transformers.RetaliationTransformer(3, name_prefix=None) +@axl.ipd.strategy_transformers.RetaliationTransformer(3, name_prefix=None) class Retaliation(axl.Cooperator): pass -@axl.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) class RetaliateUntilApology(axl.Cooperator): pass -@axl.strategy_transformers.TrackHistoryTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.TrackHistoryTransformer(name_prefix=None) class TrackHistory(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer() +@axl.ipd.strategy_transformers.IdentityTransformer() class Identity(axl.Cooperator): pass -@axl.strategy_transformers.IdentityTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None) class TransformedThue(axl.ThueMorse): pass @@ -160,7 +160,7 @@ def __init__(self): super().__init__(team=team) -TransformedMetaThue = axl.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) +TransformedMetaThue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) transformed_no_prefix = [ @@ -183,22 +183,22 @@ def __init__(self): ] transformer_instances = [ - axl.strategy_transformers.ApologyTransformer([D], [C]), - axl.strategy_transformers.DeadlockBreakingTransformer(), - axl.strategy_transformers.DualTransformer(), - axl.strategy_transformers.FlipTransformer(), - axl.strategy_transformers.FinalTransformer((D, D)), - axl.strategy_transformers.ForgiverTransformer(0.2), - axl.strategy_transformers.GrudgeTransformer(3), - axl.strategy_transformers.InitialTransformer((C, D)), - axl.strategy_transformers.JossAnnTransformer((0.2, 0.6)), - axl.strategy_transformers.MixedTransformer(probability, strategies), - axl.strategy_transformers.NiceTransformer(), - axl.strategy_transformers.NoisyTransformer(0.2), - axl.strategy_transformers.RetaliationTransformer(3), - axl.strategy_transformers.RetaliateUntilApologyTransformer(), - axl.strategy_transformers.TrackHistoryTransformer(), - axl.strategy_transformers.IdentityTransformer(), + axl.ipd.strategy_transformers.ApologyTransformer([D], [C]), + axl.ipd.strategy_transformers.DeadlockBreakingTransformer(), + axl.ipd.strategy_transformers.DualTransformer(), + axl.ipd.strategy_transformers.FlipTransformer(), + axl.ipd.strategy_transformers.FinalTransformer((D, D)), + axl.ipd.strategy_transformers.ForgiverTransformer(0.2), + axl.ipd.strategy_transformers.GrudgeTransformer(3), + axl.ipd.strategy_transformers.InitialTransformer((C, D)), + axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.6)), + axl.ipd.strategy_transformers.MixedTransformer(probability, strategies), + axl.ipd.strategy_transformers.NiceTransformer(), + axl.ipd.strategy_transformers.NoisyTransformer(0.2), + axl.ipd.strategy_transformers.RetaliationTransformer(3), + axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(), + axl.ipd.strategy_transformers.TrackHistoryTransformer(), + axl.ipd.strategy_transformers.IdentityTransformer(), ] @@ -219,11 +219,11 @@ def assert_original_equals_pickled(self, player_, turns=10): opponent_2 = opponent_class() axl.seed(0) - match_1 = axl.Match((player, opponent_1), turns=turns) + match_1 = axl.IpdMatch((player, opponent_1), turns=turns) result_1 = match_1.play() axl.seed(0) - match_2 = axl.Match((clone, opponent_2), turns=turns) + match_2 = axl.IpdMatch((clone, opponent_2), turns=turns) result_2 = match_2.play() self.assertEqual(result_1, result_2) @@ -236,7 +236,7 @@ def test_parameterized_player(self): self.assert_original_equals_pickled(player) def test_sequence_player(self): - inline_transformed_thue = axl.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() + inline_transformed_thue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() for player in [axl.ThueMorse(), axl.ThueMorseInverse(), MetaThue(), TransformedMetaThue(), inline_transformed_thue, TransformedThue(), ]: @@ -246,14 +246,14 @@ def test_sequence_player(self): axl.seed(10) player.reset() opponent = opponent_class() - match_1 = axl.Match((player, opponent), turns=20) + match_1 = axl.IpdMatch((player, opponent), turns=20) _ = match_1.play() self.assert_equals_instance_from_pickling(player) def test_final_transformer_called(self): player = axl.Alexei() copy = pickle.loads(pickle.dumps(player)) - match = axl.Match((player, copy), turns=3) + match = axl.IpdMatch((player, copy), turns=3) results = match.play() self.assertEqual(results, [(C, C), (C, C), (D, D)]) @@ -275,9 +275,9 @@ def test_pickling_all_transformers_as_instance_called_on_a_class(self): self.assert_original_equals_pickled(player) def test_created_on_the_spot_multiple_transformers(self): - player_class = axl.strategy_transformers.FlipTransformer()(axl.Cooperator) - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player = axl.strategy_transformers.FinalTransformer((C, D))(player_class)() + player_class = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player = axl.ipd.strategy_transformers.FinalTransformer((C, D))(player_class)() self.assert_original_equals_pickled(player) @@ -293,10 +293,10 @@ def test_dual_transformer_regression_test(self): self.assert_original_equals_pickled(player) player_class = axl.WinStayLoseShift - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.InitialTransformer((C, D))(player_class) - player_class = axl.strategy_transformers.DualTransformer()(player_class) - player_class = axl.strategy_transformers.TrackHistoryTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.InitialTransformer((C, D))(player_class) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.TrackHistoryTransformer()(player_class) interspersed_dual_transformers = player_class() @@ -318,7 +318,7 @@ def test_class_and_instance_name_different_built_from_player_class(self): player = MyCooperator() class_names = [class_.__name__ for class_ in MyCooperator.mro()] self.assertEqual( - class_names, ["FlippedMyCooperator", "MyCooperator", "Player", "object"] + class_names, ["FlippedMyCooperator", "MyCooperator", "IpdPlayer", "object"] ) self.assert_original_equals_pickled(player) @@ -333,14 +333,14 @@ def test_pointer_to_class_derived_from_strategy(self): "FlippedFlippedCooperator", "FlippedCooperator", "Cooperator", - "Player", + "IpdPlayer", "object", ], ) self.assert_original_equals_pickled(player) - def test_pointer_to_class_derived_from_Player(self): + def test_pointer_to_class_derived_from_IpdPlayer(self): player = PointerToWrappedClassNotInStrategies() class_names = [class_.__name__ for class_ in player.__class__.mro()] @@ -350,7 +350,7 @@ def test_pointer_to_class_derived_from_Player(self): "FlippedFlippedMyDefector", "FlippedMyDefector", "MyDefector", - "Player", + "IpdPlayer", "object", ], ) @@ -369,7 +369,7 @@ class LocalCooperator(axl.Cooperator): self.assertRaises(AttributeError, pickle.dumps, un_transformed) - player = axl.strategy_transformers.FlipTransformer()(LocalCooperator)() + player = axl.ipd.strategy_transformers.FlipTransformer()(LocalCooperator)() pickled = pickle.dumps(player) self.assertRaises(AttributeError, pickle.loads, pickled) @@ -378,17 +378,17 @@ def test_with_various_name_prefixes(self): self.assertEqual(no_prefix.__class__.__name__, "Flip") self.assert_original_equals_pickled(no_prefix) - default_prefix = axl.strategy_transformers.FlipTransformer()(axl.Cooperator)() + default_prefix = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)() self.assertEqual(default_prefix.__class__.__name__, "FlippedCooperator") self.assert_original_equals_pickled(default_prefix) - fliptastic = axl.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") + fliptastic = axl.ipd.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") new_prefix = fliptastic(axl.Cooperator)() self.assertEqual(new_prefix.__class__.__name__, "FliptasticCooperator") self.assert_original_equals_pickled(new_prefix) def test_dynamic_class_no_name_prefix(self): - player = axl.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() + player = axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() self.assertEqual(player.__class__.__name__, "Cooperator") self.assert_original_equals_pickled(player) diff --git a/axelrod/tests/unit/test_plot.py b/axelrod/ipd/tests/unit/test_plot.py similarity index 91% rename from axelrod/tests/unit/test_plot.py rename to axelrod/ipd/tests/unit/test_plot.py index 89d40d8d9..14287e869 100644 --- a/axelrod/tests/unit/test_plot.py +++ b/axelrod/ipd/tests/unit/test_plot.py @@ -8,13 +8,13 @@ from numpy import mean import axelrod as axl -from axelrod.load_data_ import axl_filename +from axelrod.ipd.load_data_ import axl_filename class TestPlot(unittest.TestCase): @classmethod def setUpClass(cls): - path = pathlib.Path("test_outputs/test_results.csv") + path = pathlib.Path("../test_outputs/test_results.csv") cls.filename = axl_filename(path) cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] @@ -62,31 +62,31 @@ def setUpClass(cls): ) def test_default_cmap(self): - cmap = axl.plot.default_cmap("0.0") + cmap = axl.ipd.plot.default_cmap("0.0") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.3alpha") + cmap = axl.ipd.plot.default_cmap("1.3alpha") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.4.99") + cmap = axl.ipd.plot.default_cmap("1.4.99") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap("1.4") + cmap = axl.ipd.plot.default_cmap("1.4") self.assertEqual(cmap, "YlGnBu") - cmap = axl.plot.default_cmap() + cmap = axl.ipd.plot.default_cmap() self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.5") + cmap = axl.ipd.plot.default_cmap("1.5") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.5beta") + cmap = axl.ipd.plot.default_cmap("1.5beta") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("1.7") + cmap = axl.ipd.plot.default_cmap("1.7") self.assertEqual(cmap, "viridis") - cmap = axl.plot.default_cmap("2.0") + cmap = axl.ipd.plot.default_cmap("2.0") self.assertEqual(cmap, "viridis") def test_init(self): @@ -96,7 +96,7 @@ def test_init(self): def test_init_from_resulsetfromfile(self): tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) players = [axl.Cooperator(), axl.TitForTat(), axl.Defector()] - tournament = axl.Tournament(players=players, turns=2, repetitions=2) + tournament = axl.IpdTournament(players=players, turns=2, repetitions=2) tournament.play(filename=tmp_file.name, progress_bar=False) tmp_file.close() rs = axl.ResultSet(tmp_file.name, players, 2, progress_bar=False) @@ -248,10 +248,10 @@ def test_all_plots(self): plot = axl.Plot(self.test_result_set) # Test that this method does not crash. self.assertIsNone( - plot.save_all_plots(prefix="test_outputs/", progress_bar=False) + plot.save_all_plots(prefix="../test_outputs/", progress_bar=False) ) self.assertIsNone( plot.save_all_plots( - prefix="test_outputs/", title_prefix="A prefix", progress_bar=True + prefix="../test_outputs/", title_prefix="A prefix", progress_bar=True ) ) diff --git a/axelrod/tests/unit/test_property.py b/axelrod/ipd/tests/unit/test_property.py similarity index 84% rename from axelrod/tests/unit/test_property.py rename to axelrod/ipd/tests/unit/test_property.py index fbf89cca2..ac5f16d94 100644 --- a/axelrod/tests/unit/test_property.py +++ b/axelrod/ipd/tests/unit/test_property.py @@ -1,7 +1,7 @@ import unittest import axelrod as axl -from axelrod.tests.property import ( +from axelrod.ipd.tests.property import ( games, matches, prob_end_spatial_tournaments, @@ -21,7 +21,7 @@ def test_call(self): strategies = strategy_lists().example() self.assertIsInstance(strategies, list) for p in strategies: - self.assertIsInstance(p(), axl.Player) + self.assertIsInstance(p(), axl.IpdPlayer) @given(strategies=strategy_lists(min_size=1, max_size=50)) @settings(max_examples=5) @@ -30,7 +30,7 @@ def test_decorator(self, strategies): self.assertGreaterEqual(len(strategies), 1) self.assertLessEqual(len(strategies), 50) for strategy in strategies: - self.assertIsInstance(strategy(), axl.Player) + self.assertIsInstance(strategy(), axl.IpdPlayer) @given(strategies=strategy_lists(strategies=axl.basic_strategies)) @settings(max_examples=5) @@ -39,7 +39,7 @@ def test_decorator_with_given_strategies(self, strategies): basic_player_names = [str(s()) for s in axl.basic_strategies] for strategy in strategies: player = strategy() - self.assertIsInstance(player, axl.Player) + self.assertIsInstance(player, axl.IpdPlayer) self.assertIn(str(player), basic_player_names) @@ -50,12 +50,12 @@ class TestMatch(unittest.TestCase): def test_call(self): match = matches().example() - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=1)) @settings(max_examples=5) def test_decorator(self, match): - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) self.assertGreaterEqual(len(match), 10) self.assertLessEqual(len(match), 50) self.assertGreaterEqual(match.noise, 0) @@ -64,7 +64,7 @@ def test_decorator(self, match): @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=0)) @settings(max_examples=5) def test_decorator_with_no_noise(self, match): - self.assertIsInstance(match, axl.Match) + self.assertIsInstance(match, axl.IpdMatch) self.assertGreaterEqual(len(match), 10) self.assertLessEqual(len(match), 50) self.assertEqual(match.noise, 0) @@ -73,7 +73,7 @@ def test_decorator_with_no_noise(self, match): class TestTournament(unittest.TestCase): def test_call(self): tournament = tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=tournaments( @@ -88,7 +88,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.turns, 50) self.assertGreaterEqual(tournament.turns, 2) self.assertLessEqual(tournament.noise, 1) @@ -99,7 +99,7 @@ def test_decorator(self, tournament): @given(tournament=tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -108,7 +108,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestProbEndTournament(unittest.TestCase): def test_call(self): tournament = tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=prob_end_tournaments( @@ -123,7 +123,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.prob_end, 1) self.assertGreaterEqual(tournament.prob_end, 0) self.assertLessEqual(tournament.noise, 1) @@ -134,7 +134,7 @@ def test_decorator(self, tournament): @given(tournament=prob_end_tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -143,7 +143,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestSpatialTournament(unittest.TestCase): def test_call(self): tournament = spatial_tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=spatial_tournaments( @@ -158,7 +158,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.turns, 50) self.assertGreaterEqual(tournament.turns, 2) self.assertLessEqual(tournament.noise, 1) @@ -169,7 +169,7 @@ def test_decorator(self, tournament): @given(tournament=spatial_tournaments(strategies=axl.basic_strategies, max_size=3)) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -178,7 +178,7 @@ def test_decorator_with_given_strategies(self, tournament): class TestProbEndSpatialTournament(unittest.TestCase): def test_call(self): tournament = prob_end_spatial_tournaments().example() - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) @given( tournament=prob_end_spatial_tournaments( @@ -193,7 +193,7 @@ def test_call(self): ) @settings(max_examples=5) def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) self.assertLessEqual(tournament.prob_end, 1) self.assertGreaterEqual(tournament.prob_end, 0) self.assertLessEqual(tournament.noise, 1) @@ -208,7 +208,7 @@ def test_decorator(self, tournament): ) @settings(max_examples=5) def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.Tournament) + self.assertIsInstance(tournament, axl.IpdTournament) basic_player_names = [str(s()) for s in axl.basic_strategies] for p in tournament.players: self.assertIn(str(p), basic_player_names) @@ -217,16 +217,16 @@ def test_decorator_with_given_strategies(self, tournament): class TestGame(unittest.TestCase): def test_call(self): game = games().example() - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) @given(game=games()) @settings(max_examples=5) def test_decorator(self, game): - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) r, p, s, t = game.RPST() self.assertTrue((2 * r) > (t + s) and (t > r > p > s)) @given(game=games(prisoners_dilemma=False)) @settings(max_examples=5) def test_decorator_unconstrained(self, game): - self.assertIsInstance(game, axl.Game) + self.assertIsInstance(game, axl.IpdGame) diff --git a/axelrod/tests/unit/test_random_.py b/axelrod/ipd/tests/unit/test_random_.py similarity index 100% rename from axelrod/tests/unit/test_random_.py rename to axelrod/ipd/tests/unit/test_random_.py diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/ipd/tests/unit/test_resultset.py similarity index 98% rename from axelrod/tests/unit/test_resultset.py rename to axelrod/ipd/tests/unit/test_resultset.py index 8bd0be3af..fdba8c99b 100644 --- a/axelrod/tests/unit/test_resultset.py +++ b/axelrod/ipd/tests/unit/test_resultset.py @@ -2,14 +2,13 @@ import csv from collections import Counter import pandas as pd -from dask.dataframe.core import DataFrame from numpy import mean, nanmedian, std import pathlib import axelrod as axl -from axelrod.load_data_ import axl_filename -from axelrod.result_set import create_counter_dict -from axelrod.tests.property import prob_end_tournaments, tournaments +from axelrod.ipd.load_data_ import axl_filename +from axelrod.ipd.result_set import create_counter_dict +from axelrod.ipd.tests.property import tournaments from hypothesis import given, settings @@ -20,7 +19,7 @@ class TestResultSet(unittest.TestCase): @classmethod def setUpClass(cls): - path = pathlib.Path("test_outputs/test_results.csv") + path = pathlib.Path("../test_outputs/test_results.csv") cls.filename = str(axl_filename(path)) cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] @@ -497,7 +496,7 @@ def test_self_interaction_for_random_strategies(self): # the copies of the strategy. axl.seed(0) players = [s() for s in axl.demo_strategies] - tournament = axl.Tournament(players, repetitions=2, turns=5) + tournament = axl.IpdTournament(players, repetitions=2, turns=5) results = tournament.play(progress_bar=False) self.assertEqual(results.payoff_diffs_means[-1][-1], 0.0) @@ -511,7 +510,7 @@ def test_equality(self): self.assertEqual(rs_sets[0], rs_sets[1]) players = [s() for s in axl.demo_strategies] - tournament = axl.Tournament(players, repetitions=2, turns=5) + tournament = axl.IpdTournament(players, repetitions=2, turns=5) results = tournament.play(progress_bar=False) self.assertNotEqual(results, rs_sets[0]) @@ -572,7 +571,7 @@ def test_summarise_regression_test(self): axl.TitForTat(), axl.Grudger(), ] - tournament = axl.Tournament(players, turns=10, repetitions=3) + tournament = axl.IpdTournament(players, turns=10, repetitions=3) results = tournament.play() summary = [ @@ -668,7 +667,7 @@ def test_write_summary(self): class TestDecorator(unittest.TestCase): def test_update_progress_bar(self): method = lambda x: None - self.assertEqual(axl.result_set.update_progress_bar(method)(1), None) + self.assertEqual(axl.ipd.result_set.update_progress_bar(method)(1), None) class TestResultSetSpatialStructure(TestResultSet): @@ -679,7 +678,7 @@ class TestResultSetSpatialStructure(TestResultSet): @classmethod def setUpClass(cls): - path = pathlib.Path("test_outputs/test_results_spatial.csv") + path = pathlib.Path("../test_outputs/test_results_spatial.csv") cls.filename = str(axl_filename(path)) cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] cls.turns = 5 @@ -860,7 +859,7 @@ class TestResultSetSpatialStructureTwo(TestResultSetSpatialStructure): @classmethod def setUpClass(cls): - path = pathlib.Path("test_outputs/test_results_spatial_two.csv") + path = pathlib.Path("../test_outputs/test_results_spatial_two.csv") cls.filename = str(axl_filename(path)) cls.players = [ axl.Alternator(), @@ -1062,7 +1061,7 @@ class TestResultSetSpatialStructureThree(TestResultSetSpatialStructure): @classmethod def setUpClass(cls): - path = pathlib.Path("test_outputs/test_results_spatial_three.csv") + path = pathlib.Path("../test_outputs/test_results_spatial_three.csv") cls.filename = str(axl_filename(path)) cls.players = [ axl.Alternator(), diff --git a/axelrod/tests/unit/test_strategy_transformers.py b/axelrod/ipd/tests/unit/test_strategy_transformers.py similarity index 98% rename from axelrod/tests/unit/test_strategy_transformers.py rename to axelrod/ipd/tests/unit/test_strategy_transformers.py index 7b909ab22..988da4782 100644 --- a/axelrod/tests/unit/test_strategy_transformers.py +++ b/axelrod/ipd/tests/unit/test_strategy_transformers.py @@ -1,9 +1,9 @@ import unittest import axelrod as axl -from axelrod.strategy_transformers import * -from axelrod.tests.strategies.test_cooperator import TestCooperator -from axelrod.tests.strategies.test_titfortat import TestTitForTat +from axelrod.ipd.strategy_transformers import * +from axelrod.ipd.tests.strategies.test_cooperator import TestCooperator +from axelrod.ipd.tests.strategies.test_titfortat import TestTitForTat C, D = axl.Action.C, axl.Action.D @@ -171,12 +171,12 @@ def test_doc(self): self.assertEqual(player.__doc__, transformer.__doc__) def test_cloning(self): - """Tests that Player.clone preserves the application of transformations. + """Tests that IpdPlayer.clone preserves the application of transformations. """ p1 = axl.Cooperator() p2 = FlipTransformer()(axl.Cooperator)() # Defector p3 = p2.clone() - match = axl.Match((p1, p3), turns=2) + match = axl.IpdMatch((p1, p3), turns=2) results = match.play() self.assertEqual(results, [(C, D), (C, D)]) @@ -187,7 +187,7 @@ def test_generic(self): Cooperator2 = transformer(axl.Cooperator) p1 = Cooperator2() p2 = axl.Cooperator() - match = axl.Match((p1, p2), turns=2) + match = axl.IpdMatch((p1, p2), turns=2) results = match.play() self.assertEqual(results, [(C, C), (C, C)]) @@ -195,7 +195,7 @@ def test_flip_transformer(self): """Tests that FlipTransformer(Cooperator) == Defector.""" p1 = axl.Cooperator() p2 = FlipTransformer()(axl.Cooperator)() # Defector - match = axl.Match((p1, p2), turns=3) + match = axl.IpdMatch((p1, p2), turns=3) results = match.play() self.assertEqual(results, [(C, D), (C, D), (C, D)]) diff --git a/axelrod/tests/unit/test_strategy_utils.py b/axelrod/ipd/tests/unit/test_strategy_utils.py similarity index 97% rename from axelrod/tests/unit/test_strategy_utils.py rename to axelrod/ipd/tests/unit/test_strategy_utils.py index fef7e2af6..4fdf98727 100644 --- a/axelrod/tests/unit/test_strategy_utils.py +++ b/axelrod/ipd/tests/unit/test_strategy_utils.py @@ -3,7 +3,7 @@ import unittest import axelrod as axl -from axelrod._strategy_utils import ( +from axelrod.ipd._strategy_utils import ( detect_cycle, inspect_strategy, look_ahead, @@ -105,8 +105,8 @@ def test_tft_reacts_to_defection(self): class TestLookAhead(unittest.TestCase): def setUp(self): - self.inspector = axl.Player() - self.game = axl.Game() + self.inspector = axl.IpdPlayer() + self.game = axl.IpdGame() def test_cooperator(self): tft = axl.Cooperator() diff --git a/axelrod/tests/unit/test_tournament.py b/axelrod/ipd/tests/unit/test_tournament.py similarity index 93% rename from axelrod/tests/unit/test_tournament.py rename to axelrod/ipd/tests/unit/test_tournament.py index c91abcf46..5770c8d7f 100644 --- a/axelrod/tests/unit/test_tournament.py +++ b/axelrod/ipd/tests/unit/test_tournament.py @@ -10,19 +10,19 @@ import warnings from multiprocessing import Queue, cpu_count -from axelrod.load_data_ import axl_filename +from axelrod.ipd.load_data_ import axl_filename import numpy as np import pandas as pd from tqdm import tqdm import axelrod as axl -from axelrod.tests.property import ( +from axelrod.ipd.tests.property import ( prob_end_tournaments, spatial_tournaments, strategy_lists, tournaments, ) -from axelrod.tournament import _close_objects +from axelrod.ipd.tournament import _close_objects from hypothesis import example, given, settings from hypothesis.strategies import floats, integers @@ -67,7 +67,7 @@ def reset_record(cls): class TestTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -89,11 +89,11 @@ def setUpClass(cls): [200, 200, 1, 200, 200], ] - path = pathlib.Path("test_outputs/test_tournament.csv") + path = pathlib.Path("../test_outputs/test_tournament.csv") cls.filename = axl_filename(path) def setUp(self): - self.test_tournament = axl.Tournament( + self.test_tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -102,7 +102,7 @@ def setUp(self): ) def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -110,18 +110,18 @@ def test_init(self): noise=0.2, ) self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.Game) + self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertEqual(tournament.turns, self.test_turns) self.assertEqual(tournament.repetitions, 10) self.assertEqual(tournament.name, "test") self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") def test_init_with_match_attributes(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( players=self.players, match_attributes={"length": float("inf")} ) mg = tournament.match_generator @@ -129,7 +129,7 @@ def test_init_with_match_attributes(self): self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) def test_warning(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -244,7 +244,7 @@ def test_get_progress_bar(self): self.assertEqual(pbar.total, self.test_tournament.match_generator.size) new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] - new_tournament = axl.Tournament(players=self.players, edges=new_edges) + new_tournament = axl.IpdTournament(players=self.players, edges=new_edges) new_tournament.use_progress_bar = True pbar = new_tournament._get_progress_bar() self.assertEqual(pbar.desc, "Playing matches") @@ -253,7 +253,7 @@ def test_get_progress_bar(self): def test_serial_play(self): # Test that we get an instance of ResultSet - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -264,7 +264,7 @@ def test_serial_play(self): self.assertIsInstance(results, axl.ResultSet) # Test that _run_serial_repetitions is called with empty matches list - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -276,8 +276,8 @@ def test_serial_play(self): def test_serial_play_with_different_game(self): # Test that a non default game is passed to the result set - game = axl.Game(p=-1, r=-1, s=-1, t=-1) - tournament = axl.Tournament( + game = axl.IpdGame(p=-1, r=-1, s=-1, t=-1) + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 ) results = tournament.play(progress_bar=False) @@ -286,7 +286,7 @@ def test_serial_play_with_different_game(self): @patch("tqdm.tqdm", RecordedTQDM) def test_no_progress_bar_play(self): """Test that progress bar is not created for progress_bar=False""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -318,7 +318,7 @@ def assert_play_pbar_correct_total_and_finished(self, pbar, total): @patch("tqdm.tqdm", RecordedTQDM) def test_progress_bar_play(self): """Test that progress bar is created by default and with True argument""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -357,7 +357,7 @@ def test_progress_bar_play(self): def test_progress_bar_play_parallel(self): """Test that tournament plays when asking for progress bar for parallel tournament and that progress bar is created.""" - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -401,7 +401,7 @@ def test_progress_bar_play_parallel(self): ) @settings(max_examples=50) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[s() for s in test_strategies], turns=test_turns, repetitions=test_repetitions, @@ -411,12 +411,12 @@ def test_progress_bar_play_parallel(self): # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, # these two examples were identified by hypothesis. @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, ) ) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 ) ) @@ -430,7 +430,7 @@ def test_property_serial_play(self, tournament): def test_parallel_play(self): # Test that we get an instance of ResultSet - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -450,7 +450,7 @@ def test_parallel_play(self): axl.ThueMorse(), axl.DoubleCrosser(), ] - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=players, game=self.game, @@ -461,7 +461,7 @@ def test_parallel_play(self): self.assertEqual(len(scores), len(players)) def test_parallel_play_with_writing_to_file(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -476,7 +476,7 @@ def test_parallel_play_with_writing_to_file(self): self.assertEqual(tournament.num_interactions, 75) def test_run_serial(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -497,7 +497,7 @@ class PickleableMock(MagicMock): def __reduce__(self): return MagicMock, () - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -524,7 +524,7 @@ def __reduce__(self): def test_n_workers(self): max_processes = cpu_count() - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -533,7 +533,7 @@ def test_n_workers(self): ) self.assertEqual(tournament._n_workers(processes=1), max_processes) - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -549,7 +549,7 @@ def test_2_workers(self): # This is a separate test with a skip condition because we # cannot guarantee that the tests will always run on a machine # with more than one processor - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -562,7 +562,7 @@ def test_start_workers(self): workers = 2 work_queue = Queue() done_queue = Queue() - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -582,7 +582,7 @@ def test_start_workers(self): self.assertEqual(stops, workers) def test_worker(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -609,7 +609,7 @@ def test_worker(self): self.assertEqual(queue_stop, "STOP") def test_build_result_set(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -620,7 +620,7 @@ def test_build_result_set(self): self.assertIsInstance(results, axl.ResultSet) def test_no_build_result_set(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -645,7 +645,7 @@ def test_no_build_result_set(self): @example(turns=3) @example(turns=axl.DEFAULT_TURNS) def test_play_matches(self, turns): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -693,13 +693,13 @@ def test_match_cache_is_used(self): FakeRandom.classifier["stochastic"] = False p1 = FakeRandom() p2 = FakeRandom() - tournament = axl.Tournament((p1, p2), turns=5, repetitions=2) + tournament = axl.IpdTournament((p1, p2), turns=5, repetitions=2) results = tournament.play(progress_bar=False) for player_scores in results.scores: self.assertEqual(player_scores[0], player_scores[1]) def test_write_interactions(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -721,7 +721,7 @@ def test_write_interactions(self): self.assertEqual(len(calls), 15) def test_write_to_csv_with_results(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -730,12 +730,12 @@ def test_write_to_csv_with_results(self): ) tournament.play(filename=self.filename, progress_bar=False) df = pd.read_csv(self.filename) - path = pathlib.Path("test_outputs/expected_test_tournament.csv") + path = pathlib.Path("../test_outputs/expected_test_tournament.csv") expected_df = pd.read_csv(axl_filename(path)) self.assertTrue(df.equals(expected_df)) def test_write_to_csv_without_results(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -744,7 +744,7 @@ def test_write_to_csv_without_results(self): ) tournament.play(filename=self.filename, progress_bar=False, build_results=False) df = pd.read_csv(self.filename) - path = pathlib.Path("test_outputs/expected_test_tournament_no_results.csv") + path = pathlib.Path("../test_outputs/expected_test_tournament_no_results.csv") expected_df = pd.read_csv(axl_filename(path)) self.assertTrue(df.equals(expected_df)) @@ -752,14 +752,14 @@ def test_write_to_csv_without_results(self): class TestProbEndTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions cls.test_prob_end = test_prob_end def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -774,7 +774,7 @@ def test_init(self): self.assertEqual(tournament.name, "test") self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") @given( @@ -789,7 +789,7 @@ def test_init(self): ) @settings(max_examples=5) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[s() for s in test_strategies], prob_end=0.2, repetitions=test_repetitions, @@ -799,12 +799,12 @@ def test_init(self): # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, # these two examples were identified by hypothesis. @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, ) ) @example( - tournament=axl.Tournament( + tournament=axl.IpdTournament( players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, ) ) @@ -820,7 +820,7 @@ def test_property_serial_play(self, tournament): class TestSpatialTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -828,7 +828,7 @@ def setUpClass(cls): cls.test_edges = test_edges def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -845,7 +845,7 @@ def test_init(self): self.assertIsInstance(tournament._logger, logging.Logger) self.assertEqual(tournament.noise, 0.2) self.assertEqual(tournament.match_generator.noise, 0.2) - anonymous_tournament = axl.Tournament(players=self.players) + anonymous_tournament = axl.IpdTournament(players=self.players) self.assertEqual(anonymous_tournament.name, "axelrod") @given( @@ -872,11 +872,11 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): edges.append((i, j)) # create a round robin tournament - tournament = axl.Tournament( + tournament = axl.IpdTournament( players, repetitions=repetitions, turns=turns, noise=noise ) # create a complete spatial tournament - spatial_tournament = axl.Tournament( + spatial_tournament = axl.IpdTournament( players, repetitions=repetitions, turns=turns, noise=noise, edges=edges ) @@ -915,13 +915,13 @@ def test_particular_tournament(self): axl.Grudger(), ] edges = [(0, 2), (0, 3), (1, 2), (1, 3)] - tournament = axl.Tournament(players, edges=edges) + tournament = axl.IpdTournament(players, edges=edges) results = tournament.play(progress_bar=False) expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] self.assertEqual(results.ranked_names, expected_ranked_names) # Check that this tournament runs with noise - tournament = axl.Tournament(players, edges=edges, noise=0.5) + tournament = axl.IpdTournament(players, edges=edges, noise=0.5) results = tournament.play(progress_bar=False) self.assertIsInstance(results, axl.ResultSet) @@ -929,7 +929,7 @@ def test_particular_tournament(self): class TestProbEndingSpatialTournament(unittest.TestCase): @classmethod def setUpClass(cls): - cls.game = axl.Game() + cls.game = axl.IpdGame() cls.players = [s() for s in test_strategies] cls.test_name = "test" cls.test_repetitions = test_repetitions @@ -937,7 +937,7 @@ def setUpClass(cls): cls.test_edges = test_edges def test_init(self): - tournament = axl.Tournament( + tournament = axl.IpdTournament( name=self.test_name, players=self.players, game=self.game, @@ -973,7 +973,7 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): players = [s() for s in strategies] # create a prob end round robin tournament - tournament = axl.Tournament(players, prob_end=prob_end, repetitions=reps) + tournament = axl.IpdTournament(players, prob_end=prob_end, repetitions=reps) axl.seed(seed) results = tournament.play(progress_bar=False) @@ -981,7 +981,7 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): # edges edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] - spatial_tournament = axl.Tournament( + spatial_tournament = axl.IpdTournament( players, prob_end=prob_end, repetitions=reps, edges=edges ) axl.seed(seed) @@ -1007,7 +1007,7 @@ def test_one_turn_tournament(self, tournament, seed): Tests that gives same result as the corresponding spatial round robin spatial tournament """ - prob_end_tour = axl.Tournament( + prob_end_tour = axl.IpdTournament( tournament.players, prob_end=1, edges=tournament.edges, diff --git a/axelrod/tests/unit/test_version.py b/axelrod/ipd/tests/unit/test_version.py similarity index 100% rename from axelrod/tests/unit/test_version.py rename to axelrod/ipd/tests/unit/test_version.py diff --git a/axelrod/ipd/tournament.py b/axelrod/ipd/tournament.py new file mode 100644 index 000000000..7d68a9d36 --- /dev/null +++ b/axelrod/ipd/tournament.py @@ -0,0 +1,497 @@ +import csv +import logging +import os +import warnings +from collections import defaultdict +from multiprocessing import Process, Queue, cpu_count +from tempfile import mkstemp +from typing import List, Optional, Tuple + +import axelrod.ipd.interaction_utils as iu +import tqdm +from axelrod import DEFAULT_TURNS +from axelrod.ipd.action import Action, actions_to_str, str_to_actions +from axelrod.ipd.player import IpdPlayer +from axelrod.tournament import BaseTournament + +from .game import IpdGame +from .match import IpdMatch +from .match_generator import MatchGenerator +from .result_set import ResultSet + +C, D = Action.C, Action.D + + +class IpdTournament(BaseTournament): + def __init__( + self, + players: List[IpdPlayer], + name: str = "axelrod", + game: IpdGame = None, + turns: int = None, + prob_end: float = None, + repetitions: int = 10, + noise: float = 0, + edges: List[Tuple] = None, + match_attributes: dict = None, + ) -> None: + """ + Parameters + ---------- + players : list + A list of axelrodPlayer objects + name : string + A name for the tournament + game : axelrod.IpdGame + The game object used to score the tournament + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + repetitions : integer + The number of times the round robin should be repeated + noise : float + The probability that a player's intended action should be flipped + prob_end : float + The probability of a given turn ending a match + edges : list + A list of edges between players + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + """ + if game is None: + self.game = IpdGame() + else: + self.game = game + self.name = name + self.noise = noise + self.num_interactions = 0 + self.players = players + self.repetitions = repetitions + self.edges = edges + + if turns is None and prob_end is None: + turns = DEFAULT_TURNS + + self.turns = turns + self.prob_end = prob_end + self.match_generator = MatchGenerator( + players=players, + turns=turns, + game=self.game, + repetitions=self.repetitions, + prob_end=prob_end, + noise=self.noise, + edges=edges, + match_attributes=match_attributes, + ) + self._logger = logging.getLogger(__name__) + + self.use_progress_bar = True + self.filename = None # type: Optional[str] + self._temp_file_descriptor = None # type: Optional[int] + + super().__init__( + players, + name, + game, + turns, + prob_end, + repetitions, + noise, + edges, + match_attributes + ) + + def setup_output(self, filename=None): + """assign/create `filename` to `self`. If file should be deleted once + `play` is finished, assign a file descriptor. """ + temp_file_descriptor = None + if filename is None: + temp_file_descriptor, filename = mkstemp() + + self.filename = filename + self._temp_file_descriptor = temp_file_descriptor + + def play( + self, + build_results: bool = True, + filename: str = None, + processes: int = None, + progress_bar: bool = True, + ) -> ResultSet: + """ + Plays the tournament and passes the results to the ResultSet class + + Parameters + ---------- + build_results : bool + whether or not to build a results set + filename : string + name of output file + processes : integer + The number of processes to be used for parallel processing + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ------- + axelrod.ResultSet + """ + self.num_interactions = 0 + + self.use_progress_bar = progress_bar + + self.setup_output(filename) + + if not build_results and not filename: + warnings.warn( + "IpdTournament results will not be accessible since " + "build_results=False and no filename was supplied." + ) + + if processes is None: + self._run_serial(build_results=build_results) + else: + self._run_parallel(build_results=build_results, processes=processes) + + result_set = None + if build_results: + result_set = ResultSet( + filename=self.filename, + players=[str(p) for p in self.players], + repetitions=self.repetitions, + processes=processes, + progress_bar=progress_bar, + ) + if self._temp_file_descriptor is not None: + assert self.filename is not None + os.close(self._temp_file_descriptor) + os.remove(self.filename) + + return result_set + + def _run_serial(self, build_results: bool = True) -> bool: + """Run all matches in serial.""" + + chunks = self.match_generator.build_match_chunks() + + out_file, writer = self._get_file_objects(build_results) + progress_bar = self._get_progress_bar() + + for chunk in chunks: + results = self._play_matches(chunk, build_results=build_results) + self._write_interactions_to_file(results, writer=writer) + + if self.use_progress_bar: + progress_bar.update(1) + + _close_objects(out_file, progress_bar) + + return True + + def _get_file_objects(self, build_results=True): + """Returns the file object and writer for writing results or + (None, None) if self.filename is None""" + file_obj = None + writer = None + if self.filename is not None: + file_obj = open(self.filename, "w") + writer = csv.writer(file_obj, lineterminator="\n") + + header = [ + "Interaction index", + "Player index", + "Opponent index", + "Repetition", + "Player name", + "Opponent name", + "Actions", + ] + if build_results: + header.extend( + [ + "Score", + "Score difference", + "Turns", + "Score per turn", + "Score difference per turn", + "Win", + "Initial cooperation", + "Cooperation count", + "CC count", + "CD count", + "DC count", + "DD count", + "CC to C count", + "CC to D count", + "CD to C count", + "CD to D count", + "DC to C count", + "DC to D count", + "DD to C count", + "DD to D count", + "Good partner", + ] + ) + + writer.writerow(header) + return file_obj, writer + + def _get_progress_bar(self): + if self.use_progress_bar: + return tqdm.tqdm(total=self.match_generator.size, desc="Playing matches") + return None + + def _write_interactions_to_file(self, results, writer): + """Write the interactions to csv.""" + for index_pair, interactions in results.items(): + repetition = 0 + for interaction, results in interactions: + + if results is not None: + ( + scores, + score_diffs, + turns, + score_per_turns, + score_diffs_per_turns, + initial_cooperation, + cooperations, + state_distribution, + state_to_action_distributions, + winner_index, + ) = results + for index, player_index in enumerate(index_pair): + opponent_index = index_pair[index - 1] + row = [self.num_interactions, player_index, opponent_index, repetition, + str(self.players[player_index]), str(self.players[opponent_index])] + history = actions_to_str([i[index] for i in interaction]) + row.append(history) + + if results is not None: + row.append(scores[index]) + row.append(score_diffs[index]) + row.append(turns) + row.append(score_per_turns[index]) + row.append(score_diffs_per_turns[index]) + row.append(int(winner_index is index)) + row.append(initial_cooperation[index]) + row.append(cooperations[index]) + + states = [(C, C), (C, D), (D, C), (D, D)] + if index == 1: + states = [s[::-1] for s in states] + for state in states: + row.append(state_distribution[state]) + for state in states: + row.append(state_to_action_distributions[index][(state, C)]) + row.append(state_to_action_distributions[index][(state, D)]) + + row.append(int(cooperations[index] >= cooperations[index - 1])) + + writer.writerow(row) + repetition += 1 + self.num_interactions += 1 + + def _run_parallel(self, processes: int = 2, build_results: bool = True) -> bool: + """ + Run all matches in parallel + + Parameters + ---------- + build_results : bool + whether or not to build a results set + processes : int + How many processes to use. + """ + # At first sight, it might seem simpler to use the multiprocessing Pool + # Class rather than Processes and Queues. However, this way is faster. + work_queue = Queue() # type: Queue + done_queue = Queue() # type: Queue + workers = self._n_workers(processes=processes) + + chunks = self.match_generator.build_match_chunks() + for chunk in chunks: + work_queue.put(chunk) + + self._start_workers(workers, work_queue, done_queue, build_results) + self._process_done_queue(workers, done_queue, build_results) + + return True + + def _n_workers(self, processes: int = 2) -> int: + """ + Determines the number of parallel processes to use. + + Returns + ------- + integer + """ + if 2 <= processes <= cpu_count(): + n_workers = processes + else: + n_workers = cpu_count() + return n_workers + + def _start_workers( + self, + workers: int, + work_queue: Queue, + done_queue: Queue, + build_results: bool = True, + ) -> bool: + """ + Initiates the sub-processes to carry out parallel processing. + + Parameters + ---------- + workers : integer + The number of sub-processes to create + work_queue : multiprocessing.Queue + A queue containing an entry for each round robin to be processed + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + for worker in range(workers): + process = Process( + target=self._worker, args=(work_queue, done_queue, build_results) + ) + work_queue.put("STOP") + process.start() + return True + + def _process_done_queue( + self, workers: int, done_queue: Queue, build_results: bool = True + ): + """ + Retrieves the matches from the parallel sub-processes + + Parameters + ---------- + workers : integer + The number of sub-processes in existence + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + out_file, writer = self._get_file_objects(build_results) + progress_bar = self._get_progress_bar() + + stops = 0 + while stops < workers: + results = done_queue.get() + if results == "STOP": + stops += 1 + else: + self._write_interactions_to_file(results, writer) + + if self.use_progress_bar: + progress_bar.update(1) + + _close_objects(out_file, progress_bar) + return True + + def _worker(self, work_queue: Queue, done_queue: Queue, build_results: bool = True): + """ + The work for each parallel sub-process to execute. + + Parameters + ---------- + work_queue : multiprocessing.Queue + A queue containing an entry for each round robin to be processed + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + for chunk in iter(work_queue.get, "STOP"): + interactions = self._play_matches(chunk, build_results) + done_queue.put(interactions) + done_queue.put("STOP") + return True + + def _play_matches(self, chunk, build_results=True): + """ + Play matches in a given chunk. + + Parameters + ---------- + chunk : tuple (index pair, match_parameters, repetitions) + match_parameters are also a tuple: (turns, game, noise) + build_results : bool + whether or not to build a results set + + Returns + ------- + interactions : dictionary + Mapping player index pairs to results of matches: + + (0, 1) -> [(C, D), (D, C),...] + """ + interactions = defaultdict(list) + index_pair, match_params, repetitions = chunk + p1_index, p2_index = index_pair + player1 = self.players[p1_index].clone() + player2 = self.players[p2_index].clone() + match_params["players"] = (player1, player2) + match = IpdMatch(**match_params) + for _ in range(repetitions): + match.play() + + if build_results: + results = self._calculate_results(match.result) + else: + results = None + + interactions[index_pair].append([match.result, results]) + return interactions + + def _calculate_results(self, interactions): + results = [] + + scores = iu.compute_final_score(interactions, self.game) + results.append(scores) + + score_diffs = scores[0] - scores[1], scores[1] - scores[0] + results.append(score_diffs) + + turns = len(interactions) + results.append(turns) + + score_per_turns = iu.compute_final_score_per_turn(interactions, self.game) + results.append(score_per_turns) + + score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns + results.append(score_diffs_per_turns) + + initial_coops = tuple(map(bool, iu.compute_cooperations(interactions[:1]))) + results.append(initial_coops) + + cooperations = iu.compute_cooperations(interactions) + results.append(cooperations) + + state_distribution = iu.compute_state_distribution(interactions) + results.append(state_distribution) + + state_to_action_distributions = iu.compute_state_to_action_distribution( + interactions + ) + results.append(state_to_action_distributions) + + winner_index = iu.compute_winner_index(interactions, self.game) + results.append(winner_index) + + return results + + +def _close_objects(*objs): + """If the objects have a `close` method, closes them.""" + for obj in objs: + if hasattr(obj, "close"): + obj.close() diff --git a/axelrod/ipd_adapter.py b/axelrod/ipd_adapter.py new file mode 100644 index 000000000..ce0f06fad --- /dev/null +++ b/axelrod/ipd_adapter.py @@ -0,0 +1,160 @@ +"""This is an adapter for historical API on Player, Game, Match, and Tournament + +For each of these classes, we keep a copy of the Ipd version of them as an +element, and translate the historical API to the current API on the Ipd version. +This keeps legacy code working as the internal API shifts to accommodate a more +general class of games. +""" + +from typing import Dict, List, Tuple, Union + +import axelrod as axl + +Score = Union[int, float] + + +class Player(object): + """Legacy players derive from this adapter.""" + + def __init__(self): + """The derived class should call super().__init__(). At that point, + name and clasifiers on the derived class will be set, so we copy that to + player.""" + self._player = axl.IpdPlayer() + if self.name: + self._player.name = self.name + if self.classifier: + self._player.classifier = self.classifier + + def strategy(self, opponent: "BasePlayer") -> axl.Action: + """We expect the derived class to set this behavior.""" + raise NotImplementedError() + + def play( + self, opponent: "BasePlayer", noise: float = 0 + ) -> Tuple[axl.Action, axl.Action]: + # We have to provide _player.play a copy of this strategy, which will + # have an overwritten strategy, and possibly saved state and helper + # methods. + self._player.play(opponent, noise, strategy_holder=self) + + def clone(self) -> "Player": + new_player = Player() + new_player._player = self._player.clone() + return new_player + + def reset(self): + self._player.reset() + + +class Game(object): + def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1): + self._game = axl.IpdGame(r, s, t, p) + + def score(self, pair: Tuple[axl.Action, axl.Action]) -> Tuple[Score, Score]: + return self._game.score(pair) + + +class Match(object): + def __init__( + self, + players: Tuple[axl.IpdPlayer], + turns: int = None, + prob_end: float = None, + game: axl.IpdGame = None, + deterministic_cache: axl.DeterministicCache = None, + noise: float = 0, + match_attributes: Dict = None, + reset: bool = True, + ): + self._match = axl.IpdMatch( + players, + turns, + prob_end, + game, + deterministic_cache, + noise, + match_attributes, + reset, + ) + + @property + def players(self) -> Tuple[axl.IpdPlayer]: + return self._match.players + + @players.setter + def players(self, players: Tuple[axl.IpdPlayer]): + self._match.players = players + + def play(self) -> List[Tuple[axl.Action]]: + return self._match.play() + + def scores(self) -> List[Score]: + return self._match.scores() + + def final_score(self) -> Score: + return self._match.final_score() + + def final_score_per_turn(self) -> Score: + return self._match.final_score_per_turn() + + def winner(self) -> axl.IpdPlayer: + return self._match.winner() + + def cooperation(self): + return self._match.cooperation() + + def normalised_cooperation(self): + return self._match.normalised_cooperation() + + def state_distribution(self): + return self._match.state_distribution() + + def normalised_state_distribution(self): + return self._match.normalised_state_distribution() + + def sparklines(self, c_symbol="█", d_symbol=" "): + return self._match.sparklines(c_symbol=c_symbol, d_symbol=d_symbol) + + def __len__(self): + return len(self._match) + + +class Tournament(object): + def __init__( + self, + players: List[axl.IpdPlayer], + name: str = "axelrod", + game: axl.IpdGame = None, + turns: int = None, + prob_end: float = None, + repetitions: int = 10, + noise: float = 0, + edges: List[Tuple] = None, + match_attributes: dict = None, + ) -> None: + self._tournament = axl.IpdTournament( + players, + name, + game, + turns, + prob_end, + repetitions, + noise, + edges, + match_attributes, + ) + + def setup_output(self, filename=None) -> None: + self._tournament.setup_output(filename) + + def play( + self, + build_results: bool = True, + filename: str = None, + processes: int = None, + progress_bar: bool = True, + ) -> axl.ResultSet: + return self._tournament.play( + build_results, filename, processes, progress_bar + ) diff --git a/axelrod/match.py b/axelrod/match.py index e9dd29442..8dbd5580f 100644 --- a/axelrod/match.py +++ b/axelrod/match.py @@ -1,49 +1,38 @@ -import random -from math import ceil, log +from typing import Dict, List, Tuple, Union -import axelrod.interaction_utils as iu -from axelrod import DEFAULT_TURNS -from axelrod.action import Action -from axelrod import Classifiers -from axelrod.game import Game -from .deterministic_cache import DeterministicCache +import axelrod as axl +from axelrod.game import BaseGame +from axelrod.player import BasePlayer -C, D = Action.C, Action.D +Score = Union[int, float] -def is_stochastic(players, noise): - """Determines if a match is stochastic -- true if there is noise or if any - of the players involved is stochastic.""" - return noise or any(map(Classifiers["stochastic"], players)) - - -class Match(object): - """The Match class conducts matches between two players.""" +class BaseMatch(object): + """The BaseMatch class conducts matches between two players.""" def __init__( self, - players, - turns=None, - prob_end=None, - game=None, - deterministic_cache=None, - noise=0, - match_attributes=None, - reset=True, + players: Tuple[BasePlayer], + turns: int = None, + prob_end: float = None, + game: BaseGame = None, + noise: float = 0, + match_attributes: Dict = None, + reset: bool = True, ): """ + Needs to be overwritten in derived class. + Parameters ---------- players : tuple - A pair of axelrod.Player objects + A pair of axelrodPlayer objects turns : integer The number of turns per match prob_end : float The probability of a given turn ending a match - game : axelrod.Game + game : axelrod.BaseGame The game object used to score the match - deterministic_cache : axelrod.DeterministicCache - A cache of resulting actions for deterministic matches noise : float The probability that a player's intended action should be flipped match_attributes : dict @@ -53,201 +42,32 @@ def __init__( reset : bool Whether to reset players or not """ - - defaults = { - (True, True): (DEFAULT_TURNS, 0), - (True, False): (float("inf"), prob_end), - (False, True): (turns, 0), - (False, False): (turns, prob_end), - } - self.turns, self.prob_end = defaults[(turns is None, prob_end is None)] - - self.result = [] - self.noise = noise - - if game is None: - self.game = Game() - else: - self.game = game - - if deterministic_cache is None: - self._cache = DeterministicCache() - else: - self._cache = deterministic_cache - - if match_attributes is None: - known_turns = self.turns if prob_end is None else float("inf") - self.match_attributes = { - "length": known_turns, - "game": self.game, - "noise": self.noise, - } - else: - self.match_attributes = match_attributes - - self.players = list(players) - self.reset = reset - - @property - def players(self): - return self._players - - @players.setter - def players(self, players): - """Ensure that players are passed the match attributes""" - newplayers = [] - for player in players: - player.set_match_attributes(**self.match_attributes) - newplayers.append(player) - self._players = newplayers - - @property - def _stochastic(self): - """ - A boolean to show whether a match between two players would be - stochastic. - """ - return is_stochastic(self.players, self.noise) + pass @property - def _cache_update_required(self): - """ - A boolean to show whether the deterministic cache should be updated. - """ - return ( - not self.noise - and self._cache.mutable - and not (any(Classifiers["stochastic"](p) for p in self.players)) - ) - - def _cached_enough_turns(self, cache_key, turns): - """ - Returns true iff there are is a entry in self._cache for the given key and - it's at least turns long. - """ - if cache_key not in self._cache: - return False - return len(self._cache[cache_key]) >= turns - - def play(self): - """ - The resulting list of actions from a match between two players. - - This method determines whether the actions list can be obtained from - the deterministic cache and returns it from there if so. If not, it - calls the play method for player1 and returns the list from there. - - Returns - ------- - A list of the form: - - e.g. for a 2 turn match between Cooperator and Defector: - - [(C, C), (C, D)] - - i.e. One entry per turn containing a pair of actions. - """ - turns = min(sample_length(self.prob_end), self.turns) - cache_key = (self.players[0], self.players[1]) - - if self._stochastic or not self._cached_enough_turns(cache_key, turns): - for p in self.players: - if self.reset: - p.reset() - p.set_match_attributes(**self.match_attributes) - result = [] - for _ in range(turns): - plays = self.players[0].play(self.players[1], self.noise) - result.append(plays) - - if self._cache_update_required: - self._cache[cache_key] = result - else: - result = self._cache[cache_key][:turns] - - self.result = result - return result - - def scores(self): - """Returns the scores of the previous Match plays.""" - return iu.compute_scores(self.result, self.game) - - def final_score(self): - """Returns the final score for a Match.""" - return iu.compute_final_score(self.result, self.game) - - def final_score_per_turn(self): - """Returns the mean score per round for a Match.""" - return iu.compute_final_score_per_turn(self.result, self.game) - - def winner(self): - """Returns the winner of the Match.""" - winner_index = iu.compute_winner_index(self.result, self.game) - if winner_index is False: # No winner - return False - if winner_index is None: # No plays - return None - return self.players[winner_index] - - def cooperation(self): - """Returns the count of cooperations by each player.""" - return iu.compute_cooperations(self.result) - - def normalised_cooperation(self): - """Returns the count of cooperations by each player per turn.""" - return iu.compute_normalised_cooperation(self.result) - - def state_distribution(self): - """ - Returns the count of each state for a set of interactions. - """ - return iu.compute_state_distribution(self.result) - - def normalised_state_distribution(self): - """ - Returns the normalized count of each state for a set of interactions. - """ - return iu.compute_normalised_state_distribution(self.result) - - def sparklines(self, c_symbol="█", d_symbol=" "): - return iu.compute_sparklines(self.result, c_symbol, d_symbol) - - def __len__(self): - return self.turns - - -def sample_length(prob_end): - """ - Sample length of a game. - - This is using inverse random sample on a probability density function - given by: - - f(n) = p_end * (1 - p_end) ^ (n - 1) - - (So the probability of length n is given by f(n)) - - Which gives cumulative distribution function - : + def players(self) -> Tuple[BasePlayer]: + raise NotImplementedError() - F(n) = 1 - (1 - p_end) ^ n + def play(self) -> List[Tuple[axl.Action]]: + """The resulting list of actions from a match between two players.""" + raise NotImplementedError() - (So the probability of length less than or equal to n is given by F(n)) + def scores(self) -> List[Score]: + """Returns the scores of the previous BaseMatch plays.""" + raise NotImplementedError() - Which gives for given x = F(n) (ie the random sample) gives n: + def final_score(self) -> Score: + """Returns the final score for a BaseMatch.""" + raise NotImplementedError() - n = ceil((ln(1-x)/ln(1-p_end))) + def final_score_per_turn(self) -> Score: + """Returns the mean score per round for a BaseMatch.""" + raise NotImplementedError() - This approach of sampling from a distribution is called inverse - transform sampling - . + def winner(self) -> BasePlayer: + """Returns the winner of the IpdMatch.""" + raise NotImplementedError() - Note that this corresponds to sampling at the end of every turn whether - or not the Match ends. - """ - if prob_end == 0: - return float("inf") - if prob_end == 1: - return 1 - x = random.random() - return int(ceil(log(1 - x) / log(1 - prob_end))) + def __len__(self) -> int: + """Number of turns in the match""" + raise NotImplementedError() diff --git a/axelrod/player.py b/axelrod/player.py index 61cc609af..007f66e85 100644 --- a/axelrod/player.py +++ b/axelrod/player.py @@ -1,168 +1,31 @@ -import copy -import inspect -import itertools -import types -from typing import Any, Dict +from typing import Optional, Tuple -import numpy as np +import axelrod as axl -from axelrod.action import Action -from axelrod.game import DefaultGame -from axelrod.history import History -from axelrod.random_ import random_flip - -C, D = Action.C, Action.D - - -def simultaneous_play(player, coplayer, noise=0): - """This pits two players against each other.""" - s1, s2 = player.strategy(coplayer), coplayer.strategy(player) - if noise: - s1 = random_flip(s1, noise) - s2 = random_flip(s2, noise) - player.update_history(s1, s2) - coplayer.update_history(s2, s1) - return s1, s2 - - -class Player(object): - """A class for a player in the tournament. - - This is an abstract base class, not intended to be used directly. - """ - - name = "Player" - classifier = {} # type: Dict[str, Any] - - def __new__(cls, *args, **kwargs): - """Caches arguments for Player cloning.""" - obj = super().__new__(cls) - obj.init_kwargs = cls.init_params(*args, **kwargs) - return obj - - @classmethod - def init_params(cls, *args, **kwargs): - """ - Return a dictionary containing the init parameters of a strategy - (without 'self'). - Use *args and *kwargs as value if specified - and complete the rest with the default values. - """ - sig = inspect.signature(cls.__init__) - # The 'self' parameter needs to be removed or the first *args will be - # assigned to it - self_param = sig.parameters.get("self") - new_params = list(sig.parameters.values()) - new_params.remove(self_param) - sig = sig.replace(parameters=new_params) - boundargs = sig.bind_partial(*args, **kwargs) - boundargs.apply_defaults() - return boundargs.arguments +class BasePlayer(object): def __init__(self): - """Initiates an empty history.""" - self._history = History() - self.classifier = copy.deepcopy(self.classifier) - self.set_match_attributes() - - def __eq__(self, other): - """ - Test if two players are equal. - """ - if self.__repr__() != other.__repr__(): - return False - - for attribute in set(list(self.__dict__.keys()) + list(other.__dict__.keys())): - - value = getattr(self, attribute, None) - other_value = getattr(other, attribute, None) - - if isinstance(value, np.ndarray): - if not (np.array_equal(value, other_value)): - return False - - elif isinstance(value, types.GeneratorType) or isinstance( - value, itertools.cycle - ): - # Split the original generator so it is not touched - generator, original_value = itertools.tee(value) - other_generator, original_other_value = itertools.tee(other_value) - - if isinstance(value, types.GeneratorType): - setattr(self, attribute, (ele for ele in original_value)) - setattr(other, attribute, (ele for ele in original_other_value)) - else: - setattr(self, attribute, itertools.cycle(original_value)) - setattr(other, attribute, itertools.cycle(original_other_value)) - - for _ in range(200): - try: - if next(generator) != next(other_generator): - return False - except StopIteration: - break - - # Code for a strange edge case where each strategy points at each - # other - elif value is other and other_value is self: - pass - else: - if value != other_value: - return False - return True - - def receive_match_attributes(self): - # Overwrite this function if your strategy needs - # to make use of match_attributes such as - # the game matrix, the number of rounds or the noise pass - def set_match_attributes(self, length=-1, game=None, noise=0): - if not game: - game = DefaultGame - self.match_attributes = {"length": length, "game": game, "noise": noise} - self.receive_match_attributes() - - def __repr__(self): - """The string method for the strategy. - Appends the `__init__` parameters to the strategy's name.""" - name = self.name - prefix = ": " - gen = (value for value in self.init_kwargs.values() if value is not None) - for value in gen: - try: - if issubclass(value, Player): - value = value.name - except TypeError: - pass - name = "".join([name, prefix, str(value)]) - prefix = ", " - return name - - def __getstate__(self): - """Used for pickling. Override if Player contains unpickleable attributes.""" - return self.__dict__ - - def strategy(self, opponent): - """This is a placeholder strategy.""" + def strategy(self, opponent: "BasePlayer") -> axl.Action: + """Calculates the action of this player against the provided + opponent.""" raise NotImplementedError() - def play(self, opponent, noise=0): - """This pits two players against each other.""" - return simultaneous_play(self, opponent, noise) + def play( + self, + opponent: "BasePlayer", + noise: float = 0, + strategy_holder: Optional["BasePlayer"] = None, + ) -> Tuple[axl.Action, axl.Action]: + """This pits two players against each other, using the passed strategy + holder, if provided.""" + raise NotImplementedError() - def clone(self): + def clone(self) -> "BasePlayer": """Clones the player without history, reapplying configuration parameters as necessary.""" - - # You may be tempted to re-implement using the `copy` module - # Note that this would require a deepcopy in some cases and there may - # be significant changes required throughout the library. - # Consider overriding in special cases only if necessary - cls = self.__class__ - new_player = cls(**self.init_kwargs) - new_player.match_attributes = copy.copy(self.match_attributes) - return new_player + raise NotImplementedError() def reset(self): """Resets a player to its initial state @@ -171,26 +34,4 @@ def reset(self): of players) to reset a player's state to its initial starting point. It ensures that no 'memory' of previous matches is carried forward. """ - # This also resets the history. - self.__init__(**self.init_kwargs) - - def update_history(self, play, coplay): - self.history.append(play, coplay) - - @property - def history(self): - return self._history - - # Properties maintained for legacy API, can refactor to self.history.X - # in 5.0.0 to reduce function call overhead. - @property - def cooperations(self): - return self._history.cooperations - - @property - def defections(self): - return self._history.defections - - @property - def state_distribution(self): - return self._history.state_distribution + raise NotImplementedError() diff --git a/axelrod/tournament.py b/axelrod/tournament.py index 26144757c..90c168f5e 100644 --- a/axelrod/tournament.py +++ b/axelrod/tournament.py @@ -1,32 +1,16 @@ -import csv -import logging -import os -import warnings -from collections import defaultdict -from multiprocessing import Process, Queue, cpu_count -from tempfile import mkstemp -from typing import List, Optional, Tuple +from typing import List, Tuple -import axelrod.interaction_utils as iu -import tqdm -from axelrod import DEFAULT_TURNS -from axelrod.action import Action, actions_to_str, str_to_actions -from axelrod.player import Player +import axelrod as axl +from axelrod.player import BasePlayer +from axelrod.game import BaseGame -from .game import Game -from .match import Match -from .match_generator import MatchGenerator -from .result_set import ResultSet -C, D = Action.C, Action.D - - -class Tournament(object): +class BaseTournament(object): def __init__( self, - players: List[Player], + players: List[BasePlayer], name: str = "axelrod", - game: Game = None, + game: BaseGame = None, turns: int = None, prob_end: float = None, repetitions: int = 10, @@ -38,10 +22,10 @@ def __init__( Parameters ---------- players : list - A list of axelrod.Player objects + A list of axelrodPlayer objects name : string A name for the tournament - game : axelrod.Game + game : axelrod.IpdGame The game object used to score the tournament turns : integer The number of turns per match @@ -60,47 +44,12 @@ def __init__( The default is to use the correct values for turns, game and noise but these can be overridden if desired. """ - if game is None: - self.game = Game() - else: - self.game = game - self.name = name - self.noise = noise - self.num_interactions = 0 - self.players = players - self.repetitions = repetitions - self.edges = edges - - if turns is None and prob_end is None: - turns = DEFAULT_TURNS - - self.turns = turns - self.prob_end = prob_end - self.match_generator = MatchGenerator( - players=players, - turns=turns, - game=self.game, - repetitions=self.repetitions, - prob_end=prob_end, - noise=self.noise, - edges=edges, - match_attributes=match_attributes, - ) - self._logger = logging.getLogger(__name__) - - self.use_progress_bar = True - self.filename = None # type: Optional[str] - self._temp_file_descriptor = None # type: Optional[int] + pass def setup_output(self, filename=None): """assign/create `filename` to `self`. If file should be deleted once `play` is finished, assign a file descriptor. """ - temp_file_descriptor = None - if filename is None: - temp_file_descriptor, filename = mkstemp() - - self.filename = filename - self._temp_file_descriptor = temp_file_descriptor + raise NotImplementedError() def play( self, @@ -108,7 +57,7 @@ def play( filename: str = None, processes: int = None, progress_bar: bool = True, - ) -> ResultSet: + ) -> axl.ResultSet: """ Plays the tournament and passes the results to the ResultSet class @@ -127,358 +76,4 @@ def play( ------- axelrod.ResultSet """ - self.num_interactions = 0 - - self.use_progress_bar = progress_bar - - self.setup_output(filename) - - if not build_results and not filename: - warnings.warn( - "Tournament results will not be accessible since " - "build_results=False and no filename was supplied." - ) - - if processes is None: - self._run_serial(build_results=build_results) - else: - self._run_parallel(build_results=build_results, processes=processes) - - result_set = None - if build_results: - result_set = ResultSet( - filename=self.filename, - players=[str(p) for p in self.players], - repetitions=self.repetitions, - processes=processes, - progress_bar=progress_bar, - ) - if self._temp_file_descriptor is not None: - assert self.filename is not None - os.close(self._temp_file_descriptor) - os.remove(self.filename) - - return result_set - - def _run_serial(self, build_results: bool = True) -> bool: - """Run all matches in serial.""" - - chunks = self.match_generator.build_match_chunks() - - out_file, writer = self._get_file_objects(build_results) - progress_bar = self._get_progress_bar() - - for chunk in chunks: - results = self._play_matches(chunk, build_results=build_results) - self._write_interactions_to_file(results, writer=writer) - - if self.use_progress_bar: - progress_bar.update(1) - - _close_objects(out_file, progress_bar) - - return True - - def _get_file_objects(self, build_results=True): - """Returns the file object and writer for writing results or - (None, None) if self.filename is None""" - file_obj = None - writer = None - if self.filename is not None: - file_obj = open(self.filename, "w") - writer = csv.writer(file_obj, lineterminator="\n") - - header = [ - "Interaction index", - "Player index", - "Opponent index", - "Repetition", - "Player name", - "Opponent name", - "Actions", - ] - if build_results: - header.extend( - [ - "Score", - "Score difference", - "Turns", - "Score per turn", - "Score difference per turn", - "Win", - "Initial cooperation", - "Cooperation count", - "CC count", - "CD count", - "DC count", - "DD count", - "CC to C count", - "CC to D count", - "CD to C count", - "CD to D count", - "DC to C count", - "DC to D count", - "DD to C count", - "DD to D count", - "Good partner", - ] - ) - - writer.writerow(header) - return file_obj, writer - - def _get_progress_bar(self): - if self.use_progress_bar: - return tqdm.tqdm(total=self.match_generator.size, desc="Playing matches") - return None - - def _write_interactions_to_file(self, results, writer): - """Write the interactions to csv.""" - for index_pair, interactions in results.items(): - repetition = 0 - for interaction, results in interactions: - - if results is not None: - ( - scores, - score_diffs, - turns, - score_per_turns, - score_diffs_per_turns, - initial_cooperation, - cooperations, - state_distribution, - state_to_action_distributions, - winner_index, - ) = results - for index, player_index in enumerate(index_pair): - opponent_index = index_pair[index - 1] - row = [self.num_interactions, player_index, opponent_index, repetition, - str(self.players[player_index]), str(self.players[opponent_index])] - history = actions_to_str([i[index] for i in interaction]) - row.append(history) - - if results is not None: - row.append(scores[index]) - row.append(score_diffs[index]) - row.append(turns) - row.append(score_per_turns[index]) - row.append(score_diffs_per_turns[index]) - row.append(int(winner_index is index)) - row.append(initial_cooperation[index]) - row.append(cooperations[index]) - - states = [(C, C), (C, D), (D, C), (D, D)] - if index == 1: - states = [s[::-1] for s in states] - for state in states: - row.append(state_distribution[state]) - for state in states: - row.append(state_to_action_distributions[index][(state, C)]) - row.append(state_to_action_distributions[index][(state, D)]) - - row.append(int(cooperations[index] >= cooperations[index - 1])) - - writer.writerow(row) - repetition += 1 - self.num_interactions += 1 - - def _run_parallel(self, processes: int = 2, build_results: bool = True) -> bool: - """ - Run all matches in parallel - - Parameters - ---------- - build_results : bool - whether or not to build a results set - processes : int - How many processes to use. - """ - # At first sight, it might seem simpler to use the multiprocessing Pool - # Class rather than Processes and Queues. However, this way is faster. - work_queue = Queue() # type: Queue - done_queue = Queue() # type: Queue - workers = self._n_workers(processes=processes) - - chunks = self.match_generator.build_match_chunks() - for chunk in chunks: - work_queue.put(chunk) - - self._start_workers(workers, work_queue, done_queue, build_results) - self._process_done_queue(workers, done_queue, build_results) - - return True - - def _n_workers(self, processes: int = 2) -> int: - """ - Determines the number of parallel processes to use. - - Returns - ------- - integer - """ - if 2 <= processes <= cpu_count(): - n_workers = processes - else: - n_workers = cpu_count() - return n_workers - - def _start_workers( - self, - workers: int, - work_queue: Queue, - done_queue: Queue, - build_results: bool = True, - ) -> bool: - """ - Initiates the sub-processes to carry out parallel processing. - - Parameters - ---------- - workers : integer - The number of sub-processes to create - work_queue : multiprocessing.Queue - A queue containing an entry for each round robin to be processed - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - for worker in range(workers): - process = Process( - target=self._worker, args=(work_queue, done_queue, build_results) - ) - work_queue.put("STOP") - process.start() - return True - - def _process_done_queue( - self, workers: int, done_queue: Queue, build_results: bool = True - ): - """ - Retrieves the matches from the parallel sub-processes - - Parameters - ---------- - workers : integer - The number of sub-processes in existence - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - out_file, writer = self._get_file_objects(build_results) - progress_bar = self._get_progress_bar() - - stops = 0 - while stops < workers: - results = done_queue.get() - if results == "STOP": - stops += 1 - else: - self._write_interactions_to_file(results, writer) - - if self.use_progress_bar: - progress_bar.update(1) - - _close_objects(out_file, progress_bar) - return True - - def _worker(self, work_queue: Queue, done_queue: Queue, build_results: bool = True): - """ - The work for each parallel sub-process to execute. - - Parameters - ---------- - work_queue : multiprocessing.Queue - A queue containing an entry for each round robin to be processed - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - for chunk in iter(work_queue.get, "STOP"): - interactions = self._play_matches(chunk, build_results) - done_queue.put(interactions) - done_queue.put("STOP") - return True - - def _play_matches(self, chunk, build_results=True): - """ - Play matches in a given chunk. - - Parameters - ---------- - chunk : tuple (index pair, match_parameters, repetitions) - match_parameters are also a tuple: (turns, game, noise) - build_results : bool - whether or not to build a results set - - Returns - ------- - interactions : dictionary - Mapping player index pairs to results of matches: - - (0, 1) -> [(C, D), (D, C),...] - """ - interactions = defaultdict(list) - index_pair, match_params, repetitions = chunk - p1_index, p2_index = index_pair - player1 = self.players[p1_index].clone() - player2 = self.players[p2_index].clone() - match_params["players"] = (player1, player2) - match = Match(**match_params) - for _ in range(repetitions): - match.play() - - if build_results: - results = self._calculate_results(match.result) - else: - results = None - - interactions[index_pair].append([match.result, results]) - return interactions - - def _calculate_results(self, interactions): - results = [] - - scores = iu.compute_final_score(interactions, self.game) - results.append(scores) - - score_diffs = scores[0] - scores[1], scores[1] - scores[0] - results.append(score_diffs) - - turns = len(interactions) - results.append(turns) - - score_per_turns = iu.compute_final_score_per_turn(interactions, self.game) - results.append(score_per_turns) - - score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns - results.append(score_diffs_per_turns) - - initial_coops = tuple(map(bool, iu.compute_cooperations(interactions[:1]))) - results.append(initial_coops) - - cooperations = iu.compute_cooperations(interactions) - results.append(cooperations) - - state_distribution = iu.compute_state_distribution(interactions) - results.append(state_distribution) - - state_to_action_distributions = iu.compute_state_to_action_distribution( - interactions - ) - results.append(state_to_action_distributions) - - winner_index = iu.compute_winner_index(interactions, self.game) - results.append(winner_index) - - return results - - -def _close_objects(*objs): - """If the objects have a `close` method, closes them.""" - for obj in objs: - if hasattr(obj, "close"): - obj.close() + raise NotImplementedError() diff --git a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py index 670a9ec7a..ec9643a99 100644 --- a/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py +++ b/docs/tutorials/getting_started/_static/running_axelrods_first_tournament/main.py @@ -12,7 +12,7 @@ first_tournament_participants_ordered_by_reported_rank ) axl.seed(0) -tournament = axl.Tournament( +tournament = axl.IpdTournament( players=first_tournament_participants_ordered_by_reported_rank, turns=200, repetitions=5, diff --git a/migrate_ipd.sh b/migrate_ipd.sh new file mode 100755 index 000000000..fdfd6290d --- /dev/null +++ b/migrate_ipd.sh @@ -0,0 +1,48 @@ +#!/bin/sh +# Migrates data for ipd. Will delete later. + +# Manually move most files into ipd. +# Change any axelrod.ipd to axelrod if the IDE tried to change any. + +# Replace Player with IpdPlayer +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Player\([^A-Za-z]\)/\1IpdPlayer\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Player\([^A-Za-z]\)/IpdPlayer\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Player$/\1IpdPlayer/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Player$/IpdPlayer/g' {} ';' + +# Replace Match with IpdMatch +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Match\([^A-Za-z]\)/\1IpdMatch\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Match\([^A-Za-z]\)/IpdMatch\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Match$/\1IpdMatch/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Match$/IpdMatch/g' {} ';' + +# Replace Game with IpdGame +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Game\([^A-Za-z]\)/\1IpdGame\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Game\([^A-Za-z]\)/IpdGame\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Game$/\1IpdGame/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Game$/IpdGame/g' {} ';' + +# Replace Tournament with IpdTournament +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Tournament\([^A-Za-z]\)/\1IpdTournament\2/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Tournament\([^A-Za-z]\)/IpdTournament\1/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/\([^A-Za-z]\)Tournament$/\1IpdTournament/g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/^Tournament$/IpdTournament/g' {} ';' + +# Undo IpdPlayer for Player in quotes +# find . -type f -name "*.py" -exec sed -i 's/\"IpdPlayer\ /\"Player\ /g' {} ';' +# Manually change remaining "IpdPlayer index" to "Player index" + +# Fix imports +# find . -type f -name "*.py" -exec sed -i 's/from\ axelrod\./from\ axelrod\.ipd\./g' {} ';' +# find . -type f -name "*.py" -exec sed -i 's/import\ axelrod\./import\ axelrod\.ipd\./g' {} ';' + +# A bunch of stuff needs to be added to __init__, so copy the new file. +# Change "from axelrod.ipd.strategies import FSMPlayer" to "from axelrod.ipd.strategies.finite_state_machines import FSMPlayer" in axelrod_second.py + +# find . -type f -name "*.py" -exec sed -i 's/test_outputs/\.\.\/test_outputs/g' {} ';' + +# Manually change "axl.match" to "axl.ipd.match" and "axl.plot" to "axl.ipd.match" and "axl.strategy_transformers" to "axl.ipd.strategy_transformers" and "axl.result_set" to "axl.ipd.result_set" +# Manually change path in test_load_data.py +# Manually change "from axelrod.ipd.tests import TestTitForTat" to "from axelrod.ipd.tests.strategies.test_titfortat import TestTitForTat" +# Manually SimpleFSM and SimpleHMM to _strategies. +# Manually fix test_hmm imports diff --git a/rebuild_classifier_table.py b/rebuild_classifier_table.py index 86d39b587..17f6172cf 100644 --- a/rebuild_classifier_table.py +++ b/rebuild_classifier_table.py @@ -1,7 +1,7 @@ import os from axelrod import all_strategies -from axelrod.classifier import all_classifiers, rebuild_classifier_table +from axelrod.ipd.classifier import all_classifiers, rebuild_classifier_table if __name__ == "__main__": # Change to relative path inside axelrod folder diff --git a/test b/test index e96b8edb5..ab682950b 100755 --- a/test +++ b/test @@ -1,3 +1,3 @@ #!/usr/bin/env bash -python -m unittest discover axelrod/tests/ +python -m unittest discover python doctests.py From 86f75bd0651cf503a6d6fd670c94cc0a7a3448aa Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Tue, 21 Apr 2020 21:45:52 -0700 Subject: [PATCH 2/7] Change namespace back --- axelrod/__init__.py | 42 +- axelrod/{game.py => base_game.py} | 0 axelrod/{match.py => base_match.py} | 4 +- axelrod/{player.py => base_player.py} | 0 axelrod/{tournament.py => base_tournament.py} | 4 +- axelrod/ipd/__init__.py | 0 axelrod/ipd/_strategy_utils.py | 193 -- axelrod/ipd/action.py | 102 - axelrod/ipd/classifier.py | 246 -- .../compute_finite_state_machine_memory.py | 266 -- axelrod/ipd/data/all_classifiers.yml | 1986 --------------- axelrod/ipd/data/ann_weights.csv | 4 - axelrod/ipd/data/pso_gambler.csv | 6 - axelrod/ipd/deterministic_cache.py | 175 -- axelrod/ipd/ecosystem.py | 121 - axelrod/ipd/eigen.py | 90 - axelrod/ipd/evolvable_player.py | 88 - axelrod/ipd/fingerprint.py | 611 ----- axelrod/ipd/game.py | 68 - axelrod/ipd/graph.py | 166 -- axelrod/ipd/history.py | 133 - axelrod/ipd/interaction_utils.py | 286 --- axelrod/ipd/load_data_.py | 62 - axelrod/ipd/match.py | 258 -- axelrod/ipd/match_generator.py | 123 - axelrod/ipd/mock_player.py | 29 - axelrod/ipd/moran.py | 541 ----- axelrod/ipd/player.py | 211 -- axelrod/ipd/plot.py | 333 --- axelrod/ipd/random_.py | 95 - axelrod/ipd/result_set.py | 788 ------ axelrod/ipd/strategies/__init__.py | 133 - axelrod/ipd/strategies/_filters.py | 201 -- axelrod/ipd/strategies/_strategies.py | 508 ---- axelrod/ipd/strategies/adaptive.py | 55 - axelrod/ipd/strategies/adaptor.py | 104 - axelrod/ipd/strategies/alternator.py | 33 - axelrod/ipd/strategies/ann.py | 350 --- axelrod/ipd/strategies/apavlov.py | 122 - axelrod/ipd/strategies/appeaser.py | 38 - axelrod/ipd/strategies/averagecopier.py | 61 - axelrod/ipd/strategies/axelrod_first.py | 1026 -------- axelrod/ipd/strategies/axelrod_second.py | 2131 ----------------- axelrod/ipd/strategies/backstabber.py | 106 - axelrod/ipd/strategies/better_and_better.py | 32 - axelrod/ipd/strategies/bush_mosteller.py | 132 - axelrod/ipd/strategies/calculator.py | 55 - axelrod/ipd/strategies/cooperator.py | 77 - axelrod/ipd/strategies/cycler.py | 270 --- axelrod/ipd/strategies/darwin.py | 97 - axelrod/ipd/strategies/dbs.py | 441 ---- axelrod/ipd/strategies/defector.py | 61 - axelrod/ipd/strategies/doubler.py | 36 - .../ipd/strategies/finite_state_machines.py | 1002 -------- axelrod/ipd/strategies/forgiver.py | 67 - axelrod/ipd/strategies/gambler.py | 235 -- axelrod/ipd/strategies/geller.py | 118 - axelrod/ipd/strategies/gobymajority.py | 246 -- axelrod/ipd/strategies/gradualkiller.py | 37 - axelrod/ipd/strategies/grudger.py | 319 --- axelrod/ipd/strategies/grumpy.py | 73 - axelrod/ipd/strategies/handshake.py | 44 - axelrod/ipd/strategies/hmm.py | 389 --- axelrod/ipd/strategies/human.py | 175 -- axelrod/ipd/strategies/hunter.py | 255 -- axelrod/ipd/strategies/inverse.py | 48 - axelrod/ipd/strategies/lookerup.py | 580 ----- .../ipd/strategies/mathematicalconstants.py | 79 - axelrod/ipd/strategies/memoryone.py | 343 --- axelrod/ipd/strategies/memorytwo.py | 259 -- axelrod/ipd/strategies/meta.py | 682 ------ axelrod/ipd/strategies/mindcontrol.py | 95 - axelrod/ipd/strategies/mindreader.py | 108 - axelrod/ipd/strategies/mutual.py | 83 - axelrod/ipd/strategies/negation.py | 34 - axelrod/ipd/strategies/oncebitten.py | 130 - axelrod/ipd/strategies/prober.py | 405 ---- axelrod/ipd/strategies/punisher.py | 183 -- axelrod/ipd/strategies/qlearner.py | 161 -- axelrod/ipd/strategies/rand.py | 46 - axelrod/ipd/strategies/resurrection.py | 73 - axelrod/ipd/strategies/retaliate.py | 196 -- axelrod/ipd/strategies/revised_downing.py | 75 - axelrod/ipd/strategies/selfsteem.py | 53 - axelrod/ipd/strategies/sequence_player.py | 111 - axelrod/ipd/strategies/shortmem.py | 48 - axelrod/ipd/strategies/stalker.py | 78 - axelrod/ipd/strategies/titfortat.py | 917 ------- axelrod/ipd/strategies/verybad.py | 52 - axelrod/ipd/strategies/worse_and_worse.py | 126 - axelrod/ipd/strategies/zero_determinant.py | 256 -- axelrod/ipd/strategy_transformers.py | 679 ------ axelrod/ipd/tests/__init__.py | 0 axelrod/ipd/tests/integration/__init__.py | 0 .../ipd/tests/integration/test_filtering.py | 124 - axelrod/ipd/tests/integration/test_matches.py | 71 - axelrod/ipd/tests/integration/test_names.py | 13 - .../integration/test_sample_tournaments.py | 70 - .../ipd/tests/integration/test_tournament.py | 171 -- axelrod/ipd/tests/property.py | 335 --- axelrod/ipd/tests/strategies/__init__.py | 0 axelrod/ipd/tests/strategies/test_adaptive.py | 46 - axelrod/ipd/tests/strategies/test_adaptor.py | 93 - .../ipd/tests/strategies/test_alternator.py | 33 - axelrod/ipd/tests/strategies/test_ann.py | 152 -- axelrod/ipd/tests/strategies/test_apavlov.py | 163 -- axelrod/ipd/tests/strategies/test_appeaser.py | 37 - .../tests/strategies/test_averagecopier.py | 178 -- .../tests/strategies/test_axelrod_first.py | 810 ------- .../tests/strategies/test_axelrod_second.py | 2035 ---------------- .../ipd/tests/strategies/test_backstabber.py | 171 -- .../strategies/test_better_and_better.py | 94 - .../tests/strategies/test_bush_mosteller.py | 77 - .../ipd/tests/strategies/test_calculator.py | 166 -- .../ipd/tests/strategies/test_cooperator.py | 79 - axelrod/ipd/tests/strategies/test_cycler.py | 237 -- axelrod/ipd/tests/strategies/test_darwin.py | 105 - axelrod/ipd/tests/strategies/test_dbs.py | 283 --- axelrod/ipd/tests/strategies/test_defector.py | 63 - axelrod/ipd/tests/strategies/test_doubler.py | 49 - .../tests/strategies/test_evolvable_player.py | 213 -- .../strategies/test_finite_state_machines.py | 1139 --------- axelrod/ipd/tests/strategies/test_forgiver.py | 102 - axelrod/ipd/tests/strategies/test_gambler.py | 585 ----- axelrod/ipd/tests/strategies/test_geller.py | 132 - .../ipd/tests/strategies/test_gobymajority.py | 179 -- .../tests/strategies/test_gradualkiller.py | 76 - axelrod/ipd/tests/strategies/test_grudger.py | 278 --- axelrod/ipd/tests/strategies/test_grumpy.py | 80 - .../ipd/tests/strategies/test_handshake.py | 36 - axelrod/ipd/tests/strategies/test_headsup.py | 120 - axelrod/ipd/tests/strategies/test_hmm.py | 327 --- axelrod/ipd/tests/strategies/test_human.py | 133 - axelrod/ipd/tests/strategies/test_hunter.py | 265 -- axelrod/ipd/tests/strategies/test_inverse.py | 48 - axelrod/ipd/tests/strategies/test_lookerup.py | 760 ------ .../strategies/test_mathematicalconstants.py | 82 - .../ipd/tests/strategies/test_memoryone.py | 319 --- .../ipd/tests/strategies/test_memorytwo.py | 315 --- axelrod/ipd/tests/strategies/test_meta.py | 721 ------ .../ipd/tests/strategies/test_mindcontrol.py | 99 - .../ipd/tests/strategies/test_mindreader.py | 172 -- axelrod/ipd/tests/strategies/test_mutual.py | 148 -- axelrod/ipd/tests/strategies/test_negation.py | 39 - .../ipd/tests/strategies/test_oncebitten.py | 142 -- axelrod/ipd/tests/strategies/test_player.py | 735 ------ axelrod/ipd/tests/strategies/test_prober.py | 385 --- axelrod/ipd/tests/strategies/test_punisher.py | 194 -- axelrod/ipd/tests/strategies/test_qlearner.py | 151 -- axelrod/ipd/tests/strategies/test_rand.py | 46 - .../ipd/tests/strategies/test_resurrection.py | 59 - .../ipd/tests/strategies/test_retaliate.py | 140 -- .../tests/strategies/test_revised_downing.py | 42 - .../ipd/tests/strategies/test_selfsteem.py | 81 - .../tests/strategies/test_sequence_player.py | 80 - axelrod/ipd/tests/strategies/test_shortmem.py | 57 - axelrod/ipd/tests/strategies/test_stalker.py | 94 - .../ipd/tests/strategies/test_titfortat.py | 1191 --------- axelrod/ipd/tests/strategies/test_verybad.py | 47 - .../tests/strategies/test_worse_and_worse.py | 157 -- .../tests/strategies/test_zero_determinant.py | 319 --- axelrod/ipd/tests/unit/__init__.py | 0 axelrod/ipd/tests/unit/test_actions.py | 64 - axelrod/ipd/tests/unit/test_classification.py | 356 --- ...est_compute_finite_state_machine_memory.py | 350 --- .../tests/unit/test_deterministic_cache.py | 111 - axelrod/ipd/tests/unit/test_ecosystem.py | 102 - axelrod/ipd/tests/unit/test_eigen.py | 52 - axelrod/ipd/tests/unit/test_filters.py | 170 -- axelrod/ipd/tests/unit/test_fingerprint.py | 516 ---- axelrod/ipd/tests/unit/test_game.py | 80 - axelrod/ipd/tests/unit/test_graph.py | 305 --- axelrod/ipd/tests/unit/test_history.py | 119 - .../ipd/tests/unit/test_interaction_utils.py | 146 -- axelrod/ipd/tests/unit/test_load_data.py | 17 - axelrod/ipd/tests/unit/test_match.py | 377 --- .../ipd/tests/unit/test_match_generator.py | 237 -- axelrod/ipd/tests/unit/test_mock_player.py | 20 - axelrod/ipd/tests/unit/test_moran.py | 561 ----- axelrod/ipd/tests/unit/test_pickling.py | 394 --- axelrod/ipd/tests/unit/test_plot.py | 257 -- axelrod/ipd/tests/unit/test_property.py | 232 -- axelrod/ipd/tests/unit/test_random_.py | 88 - axelrod/ipd/tests/unit/test_resultset.py | 1248 ---------- .../tests/unit/test_strategy_transformers.py | 714 ------ axelrod/ipd/tests/unit/test_strategy_utils.py | 144 -- axelrod/ipd/tests/unit/test_tournament.py | 1070 --------- axelrod/ipd/tests/unit/test_version.py | 10 - axelrod/ipd/tournament.py | 497 ---- 189 files changed, 22 insertions(+), 46118 deletions(-) rename axelrod/{game.py => base_game.py} (100%) rename axelrod/{match.py => base_match.py} (96%) rename axelrod/{player.py => base_player.py} (100%) rename axelrod/{tournament.py => base_tournament.py} (96%) delete mode 100644 axelrod/ipd/__init__.py delete mode 100644 axelrod/ipd/_strategy_utils.py delete mode 100644 axelrod/ipd/action.py delete mode 100644 axelrod/ipd/classifier.py delete mode 100644 axelrod/ipd/compute_finite_state_machine_memory.py delete mode 100644 axelrod/ipd/data/all_classifiers.yml delete mode 100644 axelrod/ipd/data/ann_weights.csv delete mode 100644 axelrod/ipd/data/pso_gambler.csv delete mode 100644 axelrod/ipd/deterministic_cache.py delete mode 100644 axelrod/ipd/ecosystem.py delete mode 100644 axelrod/ipd/eigen.py delete mode 100644 axelrod/ipd/evolvable_player.py delete mode 100644 axelrod/ipd/fingerprint.py delete mode 100644 axelrod/ipd/game.py delete mode 100644 axelrod/ipd/graph.py delete mode 100644 axelrod/ipd/history.py delete mode 100644 axelrod/ipd/interaction_utils.py delete mode 100644 axelrod/ipd/load_data_.py delete mode 100644 axelrod/ipd/match.py delete mode 100644 axelrod/ipd/match_generator.py delete mode 100644 axelrod/ipd/mock_player.py delete mode 100644 axelrod/ipd/moran.py delete mode 100644 axelrod/ipd/player.py delete mode 100644 axelrod/ipd/plot.py delete mode 100644 axelrod/ipd/random_.py delete mode 100644 axelrod/ipd/result_set.py delete mode 100644 axelrod/ipd/strategies/__init__.py delete mode 100644 axelrod/ipd/strategies/_filters.py delete mode 100644 axelrod/ipd/strategies/_strategies.py delete mode 100644 axelrod/ipd/strategies/adaptive.py delete mode 100644 axelrod/ipd/strategies/adaptor.py delete mode 100644 axelrod/ipd/strategies/alternator.py delete mode 100644 axelrod/ipd/strategies/ann.py delete mode 100644 axelrod/ipd/strategies/apavlov.py delete mode 100644 axelrod/ipd/strategies/appeaser.py delete mode 100644 axelrod/ipd/strategies/averagecopier.py delete mode 100644 axelrod/ipd/strategies/axelrod_first.py delete mode 100644 axelrod/ipd/strategies/axelrod_second.py delete mode 100644 axelrod/ipd/strategies/backstabber.py delete mode 100644 axelrod/ipd/strategies/better_and_better.py delete mode 100644 axelrod/ipd/strategies/bush_mosteller.py delete mode 100644 axelrod/ipd/strategies/calculator.py delete mode 100644 axelrod/ipd/strategies/cooperator.py delete mode 100644 axelrod/ipd/strategies/cycler.py delete mode 100644 axelrod/ipd/strategies/darwin.py delete mode 100644 axelrod/ipd/strategies/dbs.py delete mode 100644 axelrod/ipd/strategies/defector.py delete mode 100644 axelrod/ipd/strategies/doubler.py delete mode 100644 axelrod/ipd/strategies/finite_state_machines.py delete mode 100644 axelrod/ipd/strategies/forgiver.py delete mode 100644 axelrod/ipd/strategies/gambler.py delete mode 100644 axelrod/ipd/strategies/geller.py delete mode 100644 axelrod/ipd/strategies/gobymajority.py delete mode 100644 axelrod/ipd/strategies/gradualkiller.py delete mode 100644 axelrod/ipd/strategies/grudger.py delete mode 100644 axelrod/ipd/strategies/grumpy.py delete mode 100644 axelrod/ipd/strategies/handshake.py delete mode 100644 axelrod/ipd/strategies/hmm.py delete mode 100644 axelrod/ipd/strategies/human.py delete mode 100644 axelrod/ipd/strategies/hunter.py delete mode 100644 axelrod/ipd/strategies/inverse.py delete mode 100644 axelrod/ipd/strategies/lookerup.py delete mode 100644 axelrod/ipd/strategies/mathematicalconstants.py delete mode 100644 axelrod/ipd/strategies/memoryone.py delete mode 100644 axelrod/ipd/strategies/memorytwo.py delete mode 100644 axelrod/ipd/strategies/meta.py delete mode 100644 axelrod/ipd/strategies/mindcontrol.py delete mode 100644 axelrod/ipd/strategies/mindreader.py delete mode 100644 axelrod/ipd/strategies/mutual.py delete mode 100644 axelrod/ipd/strategies/negation.py delete mode 100644 axelrod/ipd/strategies/oncebitten.py delete mode 100644 axelrod/ipd/strategies/prober.py delete mode 100644 axelrod/ipd/strategies/punisher.py delete mode 100644 axelrod/ipd/strategies/qlearner.py delete mode 100644 axelrod/ipd/strategies/rand.py delete mode 100644 axelrod/ipd/strategies/resurrection.py delete mode 100644 axelrod/ipd/strategies/retaliate.py delete mode 100644 axelrod/ipd/strategies/revised_downing.py delete mode 100644 axelrod/ipd/strategies/selfsteem.py delete mode 100644 axelrod/ipd/strategies/sequence_player.py delete mode 100644 axelrod/ipd/strategies/shortmem.py delete mode 100644 axelrod/ipd/strategies/stalker.py delete mode 100644 axelrod/ipd/strategies/titfortat.py delete mode 100644 axelrod/ipd/strategies/verybad.py delete mode 100644 axelrod/ipd/strategies/worse_and_worse.py delete mode 100644 axelrod/ipd/strategies/zero_determinant.py delete mode 100644 axelrod/ipd/strategy_transformers.py delete mode 100644 axelrod/ipd/tests/__init__.py delete mode 100644 axelrod/ipd/tests/integration/__init__.py delete mode 100644 axelrod/ipd/tests/integration/test_filtering.py delete mode 100644 axelrod/ipd/tests/integration/test_matches.py delete mode 100644 axelrod/ipd/tests/integration/test_names.py delete mode 100644 axelrod/ipd/tests/integration/test_sample_tournaments.py delete mode 100644 axelrod/ipd/tests/integration/test_tournament.py delete mode 100644 axelrod/ipd/tests/property.py delete mode 100644 axelrod/ipd/tests/strategies/__init__.py delete mode 100644 axelrod/ipd/tests/strategies/test_adaptive.py delete mode 100644 axelrod/ipd/tests/strategies/test_adaptor.py delete mode 100644 axelrod/ipd/tests/strategies/test_alternator.py delete mode 100644 axelrod/ipd/tests/strategies/test_ann.py delete mode 100644 axelrod/ipd/tests/strategies/test_apavlov.py delete mode 100644 axelrod/ipd/tests/strategies/test_appeaser.py delete mode 100644 axelrod/ipd/tests/strategies/test_averagecopier.py delete mode 100644 axelrod/ipd/tests/strategies/test_axelrod_first.py delete mode 100644 axelrod/ipd/tests/strategies/test_axelrod_second.py delete mode 100644 axelrod/ipd/tests/strategies/test_backstabber.py delete mode 100644 axelrod/ipd/tests/strategies/test_better_and_better.py delete mode 100644 axelrod/ipd/tests/strategies/test_bush_mosteller.py delete mode 100644 axelrod/ipd/tests/strategies/test_calculator.py delete mode 100644 axelrod/ipd/tests/strategies/test_cooperator.py delete mode 100644 axelrod/ipd/tests/strategies/test_cycler.py delete mode 100644 axelrod/ipd/tests/strategies/test_darwin.py delete mode 100644 axelrod/ipd/tests/strategies/test_dbs.py delete mode 100644 axelrod/ipd/tests/strategies/test_defector.py delete mode 100644 axelrod/ipd/tests/strategies/test_doubler.py delete mode 100644 axelrod/ipd/tests/strategies/test_evolvable_player.py delete mode 100644 axelrod/ipd/tests/strategies/test_finite_state_machines.py delete mode 100644 axelrod/ipd/tests/strategies/test_forgiver.py delete mode 100755 axelrod/ipd/tests/strategies/test_gambler.py delete mode 100644 axelrod/ipd/tests/strategies/test_geller.py delete mode 100644 axelrod/ipd/tests/strategies/test_gobymajority.py delete mode 100644 axelrod/ipd/tests/strategies/test_gradualkiller.py delete mode 100644 axelrod/ipd/tests/strategies/test_grudger.py delete mode 100644 axelrod/ipd/tests/strategies/test_grumpy.py delete mode 100644 axelrod/ipd/tests/strategies/test_handshake.py delete mode 100644 axelrod/ipd/tests/strategies/test_headsup.py delete mode 100644 axelrod/ipd/tests/strategies/test_hmm.py delete mode 100644 axelrod/ipd/tests/strategies/test_human.py delete mode 100644 axelrod/ipd/tests/strategies/test_hunter.py delete mode 100644 axelrod/ipd/tests/strategies/test_inverse.py delete mode 100755 axelrod/ipd/tests/strategies/test_lookerup.py delete mode 100644 axelrod/ipd/tests/strategies/test_mathematicalconstants.py delete mode 100644 axelrod/ipd/tests/strategies/test_memoryone.py delete mode 100644 axelrod/ipd/tests/strategies/test_memorytwo.py delete mode 100644 axelrod/ipd/tests/strategies/test_meta.py delete mode 100644 axelrod/ipd/tests/strategies/test_mindcontrol.py delete mode 100644 axelrod/ipd/tests/strategies/test_mindreader.py delete mode 100644 axelrod/ipd/tests/strategies/test_mutual.py delete mode 100644 axelrod/ipd/tests/strategies/test_negation.py delete mode 100644 axelrod/ipd/tests/strategies/test_oncebitten.py delete mode 100644 axelrod/ipd/tests/strategies/test_player.py delete mode 100644 axelrod/ipd/tests/strategies/test_prober.py delete mode 100644 axelrod/ipd/tests/strategies/test_punisher.py delete mode 100644 axelrod/ipd/tests/strategies/test_qlearner.py delete mode 100644 axelrod/ipd/tests/strategies/test_rand.py delete mode 100644 axelrod/ipd/tests/strategies/test_resurrection.py delete mode 100644 axelrod/ipd/tests/strategies/test_retaliate.py delete mode 100644 axelrod/ipd/tests/strategies/test_revised_downing.py delete mode 100644 axelrod/ipd/tests/strategies/test_selfsteem.py delete mode 100644 axelrod/ipd/tests/strategies/test_sequence_player.py delete mode 100644 axelrod/ipd/tests/strategies/test_shortmem.py delete mode 100644 axelrod/ipd/tests/strategies/test_stalker.py delete mode 100644 axelrod/ipd/tests/strategies/test_titfortat.py delete mode 100644 axelrod/ipd/tests/strategies/test_verybad.py delete mode 100644 axelrod/ipd/tests/strategies/test_worse_and_worse.py delete mode 100644 axelrod/ipd/tests/strategies/test_zero_determinant.py delete mode 100644 axelrod/ipd/tests/unit/__init__.py delete mode 100644 axelrod/ipd/tests/unit/test_actions.py delete mode 100644 axelrod/ipd/tests/unit/test_classification.py delete mode 100644 axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py delete mode 100644 axelrod/ipd/tests/unit/test_deterministic_cache.py delete mode 100644 axelrod/ipd/tests/unit/test_ecosystem.py delete mode 100644 axelrod/ipd/tests/unit/test_eigen.py delete mode 100644 axelrod/ipd/tests/unit/test_filters.py delete mode 100644 axelrod/ipd/tests/unit/test_fingerprint.py delete mode 100644 axelrod/ipd/tests/unit/test_game.py delete mode 100644 axelrod/ipd/tests/unit/test_graph.py delete mode 100644 axelrod/ipd/tests/unit/test_history.py delete mode 100644 axelrod/ipd/tests/unit/test_interaction_utils.py delete mode 100644 axelrod/ipd/tests/unit/test_load_data.py delete mode 100644 axelrod/ipd/tests/unit/test_match.py delete mode 100644 axelrod/ipd/tests/unit/test_match_generator.py delete mode 100644 axelrod/ipd/tests/unit/test_mock_player.py delete mode 100644 axelrod/ipd/tests/unit/test_moran.py delete mode 100644 axelrod/ipd/tests/unit/test_pickling.py delete mode 100644 axelrod/ipd/tests/unit/test_plot.py delete mode 100644 axelrod/ipd/tests/unit/test_property.py delete mode 100644 axelrod/ipd/tests/unit/test_random_.py delete mode 100644 axelrod/ipd/tests/unit/test_resultset.py delete mode 100644 axelrod/ipd/tests/unit/test_strategy_transformers.py delete mode 100644 axelrod/ipd/tests/unit/test_strategy_utils.py delete mode 100644 axelrod/ipd/tests/unit/test_tournament.py delete mode 100644 axelrod/ipd/tests/unit/test_version.py delete mode 100644 axelrod/ipd/tournament.py diff --git a/axelrod/__init__.py b/axelrod/__init__.py index 61e8e5004..1edd170a4 100644 --- a/axelrod/__init__.py +++ b/axelrod/__init__.py @@ -1,28 +1,22 @@ DEFAULT_TURNS = 200 # The order of imports matters! -from axelrod.ipd import graph -from axelrod.ipd.action import Action -from axelrod.ipd.random_ import random_choice, random_flip, seed, Pdf -from axelrod.ipd import eigen -from axelrod.ipd.plot import Plot -from axelrod.ipd.history import History, LimitedHistory -from axelrod.player import BasePlayer -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.classifier import Classifiers -from axelrod.ipd.evolvable_player import EvolvablePlayer -from axelrod.game import BaseGame -from axelrod.ipd.game import IpdGame, DefaultGame -from axelrod.ipd.moran import MoranProcess, ApproximateMoranProcess -from axelrod.ipd.strategies import * -from axelrod.ipd.match_generator import * -from axelrod.ipd.tournament import IpdTournament -from axelrod.ipd.ecosystem import Ecosystem -from axelrod.ipd.match import IpdMatch -from axelrod.ipd.result_set import ResultSet -from axelrod.ipd.deterministic_cache import DeterministicCache -from axelrod.ipd import fingerprint -from axelrod.ipd.fingerprint import AshlockFingerprint, TransitiveFingerprint -from axelrod.ipd import interaction_utils -from axelrod.ipd.mock_player import MockPlayer from axelrod.version import __version__ +from axelrod.load_data_ import load_pso_tables, load_weights +from axelrod import graph +from axelrod.action import Action +from axelrod.random_ import random_choice, random_flip, seed, Pdf +from axelrod.plot import Plot +from axelrod.game import DefaultGame +from axelrod.history import History, LimitedHistory +from axelrod.classifier import Classifiers +from axelrod.evolvable_player import EvolvablePlayer +from axelrod.mock_player import MockPlayer +from axelrod.moran import MoranProcess, ApproximateMoranProcess +from axelrod.strategies import * +from axelrod.deterministic_cache import DeterministicCache +from axelrod.match_generator import * +from axelrod.result_set import ResultSet +from axelrod.ecosystem import Ecosystem +from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint +from axelrod.ipd_adapter import Player, Game, Match, Tournament diff --git a/axelrod/game.py b/axelrod/base_game.py similarity index 100% rename from axelrod/game.py rename to axelrod/base_game.py diff --git a/axelrod/match.py b/axelrod/base_match.py similarity index 96% rename from axelrod/match.py rename to axelrod/base_match.py index 8dbd5580f..ef34c6b22 100644 --- a/axelrod/match.py +++ b/axelrod/base_match.py @@ -1,8 +1,8 @@ from typing import Dict, List, Tuple, Union import axelrod as axl -from axelrod.game import BaseGame -from axelrod.player import BasePlayer +from axelrod.base_game import BaseGame +from axelrod.base_player import BasePlayer Score = Union[int, float] diff --git a/axelrod/player.py b/axelrod/base_player.py similarity index 100% rename from axelrod/player.py rename to axelrod/base_player.py diff --git a/axelrod/tournament.py b/axelrod/base_tournament.py similarity index 96% rename from axelrod/tournament.py rename to axelrod/base_tournament.py index 90c168f5e..6da6ddd95 100644 --- a/axelrod/tournament.py +++ b/axelrod/base_tournament.py @@ -1,8 +1,8 @@ from typing import List, Tuple import axelrod as axl -from axelrod.player import BasePlayer -from axelrod.game import BaseGame +from axelrod.base_player import BasePlayer +from axelrod.base_game import BaseGame class BaseTournament(object): diff --git a/axelrod/ipd/__init__.py b/axelrod/ipd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/axelrod/ipd/_strategy_utils.py b/axelrod/ipd/_strategy_utils.py deleted file mode 100644 index b10cddf75..000000000 --- a/axelrod/ipd/_strategy_utils.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Utilities used by various strategies.""" - -import itertools -from functools import lru_cache - -from axelrod.ipd.action import Action -from axelrod.ipd.strategies.cooperator import Cooperator -from axelrod.ipd.strategies.defector import Defector - -C, D = Action.C, Action.D - - -def detect_cycle(history, min_size=1, max_size=12, offset=0): - """Detects cycles in the sequence history. - - Mainly used by hunter strategies. - - Parameters - ---------- - history: sequence of C and D - The sequence to look for cycles within - min_size: int, 1 - The minimum length of the cycle - max_size: int, 12 - The maximum length of the cycle - offset: int, 0 - The amount of history to skip initially - - Returns - ------- - Tuple of C and D - The cycle detected in the input history - """ - history_tail = history[offset:] - new_max_size = min(len(history_tail) // 2, max_size) - for i in range(min_size, new_max_size + 1): - has_cycle = True - cycle = tuple(history_tail[:i]) - for j, elem in enumerate(history_tail): - if elem != cycle[j % len(cycle)]: - has_cycle = False - break - if has_cycle: - return cycle - return None - - -def inspect_strategy(inspector, opponent): - """Inspects the strategy of an opponent. - - Simulate one round of play with an opponent, unless the opponent has - an inspection countermeasure. - - Parameters - ---------- - inspector: IpdPlayer - The player doing the inspecting - opponent: IpdPlayer - The player being inspected - - Returns - ------- - Action - The action that would be taken by the opponent. - """ - if hasattr(opponent, "foil_strategy_inspection"): - return opponent.foil_strategy_inspection() - else: - return opponent.strategy(inspector) - - -def _limited_simulate_play(player_1, player_2, h1): - """Simulates a player's move. - - After inspecting player_2's next move (allowing player_2's strategy - method to set any internal variables as needed), update histories - for both players. Note that player_1's move is an argument. - - If you need a more complete simulation, see `simulate_play` in - player.py. This function is specifically designed for the needs - of MindReader. - - Parameters - ---------- - player_1: IpdPlayer - The player whose move is already known. - player_2: IpdPlayer - The player the we want to inspect. - h1: Action - The next action for first player. - """ - h2 = inspect_strategy(player_1, player_2) - player_1.update_history(h1, h2) - player_2.update_history(h2, h1) - - -def simulate_match(player_1, player_2, strategy, rounds=10): - """Simulates a number of rounds with a constant strategy. - - Parameters - ---------- - player_1: IpdPlayer - The player that will have a constant strategy. - player_2: IpdPlayer - The player we want to simulate. - strategy: Action - The constant strategy to use for first player. - rounds: int - The number of rounds to play. - """ - for match in range(rounds): - _limited_simulate_play(player_1, player_2, strategy) - - -def _calculate_scores(p1, p2, game): - """Calculates the scores for two players based their history. - - Parameters - ---------- - p1: IpdPlayer - The first player. - p2: IpdPlayer - The second player. - game: IpdGame - IpdGame object used to score rounds in the players' histories. - - Returns - ------- - int, int - The scores for the two input players. - """ - s1, s2 = 0, 0 - for pair in zip(p1.history, p2.history): - score = game.score(pair) - s1 += score[0] - s2 += score[1] - return s1, s2 - - -def look_ahead(player_1, player_2, game, rounds=10): - """Returns a constant action that maximizes score by looking ahead. - - Parameters - ---------- - player_1: IpdPlayer - The player that will look ahead. - player_2: IpdPlayer - The opponent that will be inspected. - game: IpdGame - The IpdGame object used to score rounds. - rounds: int - The number of rounds to look ahead. - - Returns - ------- - Action - The action that maximized score if it is played constantly. - """ - results = {} - possible_strategies = {C: Cooperator(), D: Defector()} - for action, player in possible_strategies.items(): - # Instead of a deepcopy, create a new opponent and replay the history to it. - opponent_ = player_2.clone() - for h in player_1.history: - _limited_simulate_play(player, opponent_, h) - - # Now play forward with the constant strategy. - simulate_match(player, opponent_, action, rounds) - results[action] = _calculate_scores(player, opponent_, game) - - return C if results[C] > results[D] else D - - -@lru_cache() -def recursive_thue_morse(n): - """The recursive definition of the Thue-Morse sequence. - - The first few terms of the Thue-Morse sequence are: - 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . - """ - - if n == 0: - return 0 - if n % 2 == 0: - return recursive_thue_morse(n / 2) - if n % 2 == 1: - return 1 - recursive_thue_morse((n - 1) / 2) - - -def thue_morse_generator(start=0): - """A generator for the Thue-Morse sequence.""" - for n in itertools.count(start): - yield recursive_thue_morse(n) diff --git a/axelrod/ipd/action.py b/axelrod/ipd/action.py deleted file mode 100644 index a87b9a06b..000000000 --- a/axelrod/ipd/action.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Actions for the Prisoner's Dilemma and related utilities. - -For convenience in other modules you can alias the actions: - -from axelrod import Action -C, D = Action.C, Action.D -""" - -from enum import Enum -from functools import total_ordering -from typing import Iterable - - -class UnknownActionError(ValueError): - """Error indicating an unknown action was used.""" - - def __init__(self, *args): - super(UnknownActionError, self).__init__(*args) - - -@total_ordering -class Action(Enum): - """Core actions in the Prisoner's Dilemma. - - There are only two possible actions, namely Cooperate or Defect, - which are called C and D for convenience. - """ - - C = 0 # Cooperate - D = 1 # Defect - - def __lt__(self, other): - return self.value < other.value - - def __repr__(self): - return self.name - - def __str__(self): - return self.name - - def flip(self): - """Returns the opposite Action.""" - if self == Action.C: - return Action.D - return Action.C - - @classmethod - def from_char(cls, character): - """Converts a single character into an Action. - - Parameters - ---------- - character: a string of length one - - Returns - ------- - Action - The action corresponding to the input character - - - Raises - ------ - UnknownActionError - If the input string is not 'C' or 'D' - """ - if character == "C": - return cls.C - if character == "D": - return cls.D - raise UnknownActionError('Character must be "C" or "D".') - - -def str_to_actions(actions: str) -> tuple: - """Converts a string to a tuple of actions. - - Parameters - ---------- - actions: string consisting of 'C's and 'D's - - Returns - ------- - tuple - Each element corresponds to a letter from the input string. - """ - return tuple(Action.from_char(element) for element in actions) - - -def actions_to_str(actions: Iterable[Action]) -> str: - """Converts an iterable of actions into a string. - - Example: (D, D, C) would be converted to 'DDC' - - Parameters - ----------- - actions: iterable of Action - - Returns - ------- - str - A string of 'C's and 'D's. - """ - return "".join(map(str, actions)) diff --git a/axelrod/ipd/classifier.py b/axelrod/ipd/classifier.py deleted file mode 100644 index f34b89ab7..000000000 --- a/axelrod/ipd/classifier.py +++ /dev/null @@ -1,246 +0,0 @@ -import os -from typing import ( - Any, - Callable, - Generic, - List, - Optional, - Set, - Text, - Type, - TypeVar, - Union, -) -import warnings -import yaml - -from axelrod.ipd.player import IpdPlayer - -ALL_CLASSIFIERS_PATH = "data/all_classifiers.yml" - -T = TypeVar("T") - - -class Classifier(Generic[T]): - """Describes a IpdPlayer (strategy). - - User sets a name and function, f, at initialization. Through - classify_player, looks for the classifier to be set in the passed IpdPlayer - class. If not set, then passes to f for calculation. - - f must operate on the class, and not an instance. If necessary, f may - initialize an instance, but this shouldn't depend on runtime states, because - the result gets stored in a file. If a strategy's classifier depends on - runtime states, such as those created by transformers, then it can set the - field in its classifier dict, and that will take precedent over saved - values. - - Attributes - ---------- - name: An identifier for the classifier, used as a dict key in storage and in - 'classifier' dicts of IpdPlayer classes. - player_class_classifier: A function that takes in a IpdPlayer class (not an - instance) and returns a value. - """ - - def __init__( - self, name: Text, player_class_classifier: Callable[[Type[IpdPlayer]], T] - ): - self.name = name - self.player_class_classifier = player_class_classifier - - def classify_player(self, player: Type[IpdPlayer]) -> T: - """Look for this classifier in the passed player's 'classifier' dict, - otherwise pass to the player to f.""" - try: - return player.classifier[self.name] - except: - return self.player_class_classifier(player) - - -stochastic = Classifier[bool]("stochastic", lambda _: False) -memory_depth = Classifier[Union[float, int]]("memory_depth", lambda _: float("inf")) -makes_use_of = Classifier[Optional[Set[Text]]]("makes_use_of", lambda _: None) -long_run_time = Classifier[bool]("long_run_time", lambda _: False) -inspects_source = Classifier[Optional[bool]]("inspects_source", lambda _: None) -manipulates_source = Classifier[Optional[bool]]("manipulates_source", lambda _: None) -manipulates_state = Classifier[Optional[bool]]("manipulates_state", lambda _: None) - -# Should list all known classifiers. -all_classifiers = [ - stochastic, - memory_depth, - makes_use_of, - long_run_time, - inspects_source, - manipulates_source, - manipulates_state, -] - - -def rebuild_classifier_table( - classifiers: List[Classifier], - players: List[Type[IpdPlayer]], - path: Text = ALL_CLASSIFIERS_PATH, -) -> None: - """Builds the classifier table in data. - - Parameters - ---------- - classifiers: A list of classifiers to calculate on the strategies - players: A list of strategies (classes, not instances) to compute the - classifiers for. - path: Where to save the resulting yaml file. - """ - # Get absolute path - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, path) - - all_player_dicts = dict() - for p in players: - new_player_dict = dict() - for c in classifiers: - new_player_dict[c.name] = c.classify_player(p) - all_player_dicts[p.name] = new_player_dict - - with open(filename, "w") as f: - yaml.dump(all_player_dicts, f) - - -class _Classifiers(object): - """A singleton used to calculate any known classifier. - - Attributes - ---------- - all_player_dicts: A local copy of the dict saved in the classifier table. - The keys are player names, and the values are 'classifier' dicts (keyed - by classifier name). - """ - - _instance = None - all_player_dicts = dict() - - # Make this a singleton - def __new__(cls): - if cls._instance is None: - cls._instance = super(_Classifiers, cls).__new__(cls) - # When this is first created, read from the classifier table file. - # Get absolute path - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, ALL_CLASSIFIERS_PATH) - with open(filename, "r") as f: - cls.all_player_dicts = yaml.load(f, Loader=yaml.FullLoader) - - return cls._instance - - @classmethod - def known_classifier(cls, classifier_name: Text) -> bool: - """Returns True if the passed classifier_name is known.""" - global all_classifiers - return classifier_name in (c.name for c in all_classifiers) - - @classmethod - def __getitem__( - cls, key: Union[Classifier, Text] - ) -> Callable[[Union[IpdPlayer, Type[IpdPlayer]]], Any]: - """Looks up the classifier for the player. - - Given a passed classifier key, return a function that: - - Takes a player. If the classifier is found in the 'classifier' dict on - the player, then return that. Otherwise look for the classifier for the - player in the all_player_dicts. Returns None if the classifier is not - found in either of those. - - The returned function expects IpdPlayer instances, but if a IpdPlayer class is - passed, then it will create an instance by calling an argument-less - initializer. If no such initializer exists on the class, then an error - will result. - - Parameters - ---------- - key: A classifier or classifier name that we want to calculate for the - player. - - Returns - ------- - A function that will map IpdPlayer (or IpdPlayer instances) to their value for - this classification. - """ - # Key may be the name or an instance. Convert to name. - if not isinstance(key, str): - key = key.name - - if not cls.known_classifier(key): - raise KeyError("Unknown classifier") - - def classify_player_for_this_classifier( - player: Union[IpdPlayer, Type[IpdPlayer]] - ) -> Any: - def try_lookup() -> Any: - try: - player_classifiers = cls.all_player_dicts[player.name] - except: - return None - - return player_classifiers.get(key, None) - - # If the passed player is not an instance, then try to initialize an - # instance without arguments. - if not isinstance(player, IpdPlayer): - try: - player = player() - warnings.warn( - "Classifiers are intended to run on player instances. " - "Passed player {} was initialized with default " - "arguments.".format(player.name) - ) - except: - # All strategies must have trivial initializers. - raise Exception( - "Passed player class doesn't have a trivial initializer." - ) - - # Factory-generated players won't exist in the table. As well, some - # players, like Random, may change classifiers at construction time; - # this get() function takes a player instance, while the saved-values - # are from operations on the player object itself. - if key in player.classifier: - return player.classifier[key] - - # Try to find the name in the all_player_dicts, read from disk. - return try_lookup() - - return classify_player_for_this_classifier - - @classmethod - def is_basic(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): - """ - Defines criteria for a strategy to be considered 'basic' - """ - stochastic = cls.__getitem__("stochastic")(s) - depth = cls.__getitem__("memory_depth")(s) - inspects_source = cls.__getitem__("inspects_source")(s) - manipulates_source = cls.__getitem__("manipulates_source")(s) - manipulates_state = cls.__getitem__("manipulates_state")(s) - return ( - not stochastic - and not inspects_source - and not manipulates_source - and not manipulates_state - and depth in (0, 1) - ) - - @classmethod - def obey_axelrod(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): - """ - A function to check if a strategy obeys Axelrod's original tournament - rules. - """ - for c in ["inspects_source", "manipulates_source", "manipulates_state"]: - if cls.__getitem__(c)(s): - return False - return True - - -Classifiers = _Classifiers() diff --git a/axelrod/ipd/compute_finite_state_machine_memory.py b/axelrod/ipd/compute_finite_state_machine_memory.py deleted file mode 100644 index 24fcb4f1d..000000000 --- a/axelrod/ipd/compute_finite_state_machine_memory.py +++ /dev/null @@ -1,266 +0,0 @@ -from axelrod.ipd.action import Action -from collections import defaultdict, namedtuple -from typing import DefaultDict, Iterator, Dict, Tuple, Set, List - -C, D = Action.C, Action.D - -Transition = namedtuple( - "Transition", ["state", "last_opponent_action", "next_state", "next_action"] -) -TransitionDict = Dict[Tuple[int, Action], Tuple[int, Action]] - - -class Memit(object): - """ - Memit = unit of memory. - - This represents the amount of memory that we gain with each new piece of - history. It includes a state, our_response that we make on our way into that - state (in_act), and the opponent's action that makes us move out of that state - (out_act). - - For example, for this finite state machine: - (0, C, 0, C), - (0, D, 1, C), - (1, C, 0, D), - (1, D, 0, D) - - Has the memits: - (C, 0, C), - (C, 0, D), - (D, 0, C), - (D, 0, D), - (C, 1, C), - (C, 1, D) - """ - - def __init__(self, in_act: Action, state: int, out_act: Action): - self.in_act = in_act - self.state = state - self.out_act = out_act - - def __repr__(self) -> str: - return "{}, {}, {}".format(self.in_act, self.state, self.out_act) - - def __hash__(self): - return hash(repr(self)) - - def __eq__(self, other_memit) -> bool: - """In action and out actions are the same.""" - return ( - self.in_act == other_memit.in_act - and self.out_act == other_memit.out_act - ) - - def __lt__(self, other_memit) -> bool: - return repr(self) < repr(other_memit) - - -MemitPair = Tuple[Memit, Memit] - - -def ordered_memit_tuple(x: Memit, y: Memit) -> tuple: - """Returns a tuple of x in y, sorted so that (x, y) are viewed as the - same as (y, x). - """ - if x < y: - return (x, y) - else: - return (y, x) - - -def transition_iterator(transitions: TransitionDict) -> Iterator[Transition]: - """Changes the transition dictionary into a iterator on namedtuples.""" - for k, v in transitions.items(): - yield Transition(k[0], k[1], v[0], v[1]) - - -def get_accessible_transitions( - transitions: TransitionDict, initial_state: int -) -> TransitionDict: - """Gets all transitions from the list that can be reached from the - initial_state. - """ - # Initial dict of edges between states and a dict of visited status for each - # of the states. - edge_dict = defaultdict(list) # type: DefaultDict[int, List[int]] - visited = dict() - for trans in transition_iterator(transitions): - visited[trans.state] = False - edge_dict[trans.state].append(trans.next_state) - # Keep track of states that can be accessed. - accessible_states = [initial_state] - - state_queue = [initial_state] - visited[initial_state] = True - # While there are states in the queue, visit all its children, adding each - # to the accesible_states. [A basic breadth-first search.] - while len(state_queue) > 0: - state = state_queue.pop() - for successor in edge_dict[state]: - # Don't process the same state twice. - if not visited[successor]: - visited[successor] = True - state_queue.append(successor) - accessible_states.append(successor) - - # Now for each transition in the passed TransitionDict, copy the transition - # to accessible_transitions if and only if the starting state is accessible, - # as determined above. - accessible_transitions = dict() - for trans in transition_iterator(transitions): - if trans.state in accessible_states: - accessible_transitions[ - (trans.state, trans.last_opponent_action) - ] = (trans.next_state, trans.next_action) - - return accessible_transitions - - -def longest_path( - edges: DefaultDict[MemitPair, Set[MemitPair]], starting_at: MemitPair -) -> int: - """Returns the number of nodes in the longest path that starts at the given - node. Returns infinity if a loop is encountered. - """ - visited = dict() - for source, destinations in edges.items(): - visited[source] = False - for destination in destinations: - visited[destination] = False - - # This is what we'll recurse on. visited dict is shared between calls. - def recurse(at_node): - visited[at_node] = True - record = 1 # Count the nodes, not the edges. - for successor in edges[at_node]: - if visited[successor]: - return float("inf") - successor_length = recurse(successor) - if successor_length == float("inf"): - return float("inf") - if record < successor_length + 1: - record = successor_length + 1 - return record - - return recurse(starting_at) - - -def get_memory_from_transitions( - transitions: TransitionDict, - initial_state: int = None, - all_actions: Tuple[Action, Action] = (C, D), -) -> int: - """This function calculates the memory of an FSM from the transitions. - - Assume that transitions are a dict with entries like - (state, last_opponent_action): (next_state, next_action) - - We first break down the transitions into memits (see above). We also create - a graph of memits, where the successor to a given memit are all possible - memits that could occur in the memory immediately before the given memit. - - Then we pair up memits with different states, but same in and out actions. - These represent points in time that we can't determine which state we're in. - We also create a graph of memit-pairs, where memit-pair, Y, succeeds a - memit-pair, X, if the two memits in X are succeeded by the two memits in Y. - These edges reperesent consecutive points in time that we can't determine - which state we're in. - - Then for all memit-pairs that disagree, in the sense that they imply - different next_action, we find the longest chain starting at that - memit-pair. [If a loop is encountered then this will be infinite.] We take - the maximum over all such memit-pairs. This represents the longest possible - chain of memory for which we wouldn't know what to do next. We return this. - """ - # If initial_state is set, use this to determine which transitions are - # reachable from the initial_state and restrict to those. - if initial_state is not None: - transitions = get_accessible_transitions(transitions, initial_state) - - # Get the incoming actions for each state. - incoming_action_by_state = defaultdict( - set - ) # type: DefaultDict[int, Set[Action]] - for trans in transition_iterator(transitions): - incoming_action_by_state[trans.next_state].add(trans.next_action) - - # Keys are starting memit, and values are all possible terminal memit. - # Will walk backwards through the graph. - memit_edges = defaultdict(set) # type: DefaultDict[Memit, Set[Memit]] - for trans in transition_iterator(transitions): - # Since all actions are out-paths for each state, add all of these. - # That is to say that the opponent could do anything - for out_action in all_actions: - # More recent in action history - starting_node = Memit( - trans.next_action, trans.next_state, out_action - ) - # All incoming paths to current state - for in_action in incoming_action_by_state[trans.state]: - # Less recent in action history - ending_node = Memit( - in_action, trans.state, trans.last_opponent_action - ) - memit_edges[starting_node].add(ending_node) - - all_memits = list(memit_edges.keys()) - - pair_nodes = set() - pair_edges = defaultdict( - set - ) # type: DefaultDict[MemitPair, Set[MemitPair]] - # Loop through all pairs of memits. - for x, y in [(x, y) for x in all_memits for y in all_memits]: - if x == y and x.state == y.state: - continue - if x != y: - continue - - # If the memits match, then the strategy can't tell the difference - # between the states. We call this a pair of matched memits (or just a - # pair). - pair_nodes.add(ordered_memit_tuple(x, y)) - # When two memits in matched pair have successors that are also matched, - # then we draw an edge. This represents consecutive historical times - # that we can't tell which state we're in. - for x_successor in memit_edges[x]: - for y_successor in memit_edges[y]: - if x_successor == y_successor: - pair_edges[ordered_memit_tuple(x, y)].add( - ordered_memit_tuple(x_successor, y_successor) - ) - - # Get next_action for each memit. Used to decide if they are in conflict, - # because we only have undecidability if next_action doesn't match. - next_action_by_memit = dict() - for trans in transition_iterator(transitions): - for in_action in incoming_action_by_state[trans.state]: - memit_key = Memit( - in_action, trans.state, trans.last_opponent_action - ) - next_action_by_memit[memit_key] = trans.next_action - - # Calculate the longest path. - record = 0 - for pair in pair_nodes: - if next_action_by_memit[pair[0]] != next_action_by_memit[pair[1]]: - # longest_path is the longest chain of tied states. We add one to - # get the memory length needed to break all ties. - path_length = longest_path(pair_edges, pair) + 1 - if record < path_length: - record = path_length - - if record > 0: - return record - - # If there are no pair of tied memits (for which the next action are - # distinct), then either no memits are needed to break a tie (i.e. all - # next_actions are the same) or the first memit breaks a tie (i.e. memory 1) - next_action_set = set() - for trans in transition_iterator(transitions): - next_action_set.add(trans.next_action) - if len(next_action_set) == 1: - return 0 - return 1 - diff --git a/axelrod/ipd/data/all_classifiers.yml b/axelrod/ipd/data/all_classifiers.yml deleted file mode 100644 index 870322292..000000000 --- a/axelrod/ipd/data/all_classifiers.yml +++ /dev/null @@ -1,1986 +0,0 @@ -$\phi$: - inspects_source: false - long_run_time: false - makes_use_of: &id001 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -$\pi$: - inspects_source: false - long_run_time: false - makes_use_of: *id001 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -$e$: - inspects_source: false - long_run_time: false - makes_use_of: *id001 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -ALLCorALLD: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -AON2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Adaptive: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Adaptive Pavlov 2006: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Adaptive Pavlov 2011: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Adaptive Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -AdaptorBrief: - inspects_source: false - long_run_time: false - makes_use_of: &id002 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -AdaptorLong: - inspects_source: false - long_run_time: false - makes_use_of: *id002 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Aggravater: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Alexei: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Alternator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Alternator Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Anti Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -AntiCycler: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Appeaser: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Arrogant QLearner: - inspects_source: false - long_run_time: false - makes_use_of: &id003 !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Average Copier: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -BackStabber: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Better and Better: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Bully: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Bush Mosteller: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Calculator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Cautious QLearner: - inspects_source: false - long_run_time: false - makes_use_of: *id003 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -CollectiveStrategy: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Contrite Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -Cooperator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 0 - stochastic: false -Cooperator Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Cycle Hunter: - inspects_source: false - long_run_time: false - makes_use_of: &id005 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Cycler CCCCCD: - inspects_source: false - long_run_time: false - makes_use_of: &id004 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Cycler CCCD: - inspects_source: false - long_run_time: false - makes_use_of: *id004 - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -Cycler CCCDCD: - inspects_source: false - long_run_time: false - makes_use_of: *id004 - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Cycler CCD: - inspects_source: false - long_run_time: false - makes_use_of: *id004 - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Cycler DC: - inspects_source: false - long_run_time: false - makes_use_of: *id004 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Cycler DDC: - inspects_source: false - long_run_time: false - makes_use_of: *id004 - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -DBS: - inspects_source: false - long_run_time: true - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Darwin: - inspects_source: true - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: true - memory_depth: .inf - stochastic: false -Defector: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 0 - stochastic: false -Defector Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Delayed AON1: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Desperate: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Detective: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -DoubleCrosser: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -DoubleResurrection: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Doubler: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Dynamic Two Tits For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -EasyGo: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -EugineNier: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Eventual Cycle Hunter: - inspects_source: false - long_run_time: false - makes_use_of: *id005 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved ANN: - inspects_source: false - long_run_time: false - makes_use_of: &id006 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved ANN 5: - inspects_source: false - long_run_time: false - makes_use_of: *id006 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved ANN 5 Noise 05: - inspects_source: false - long_run_time: false - makes_use_of: *id006 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved FSM 16: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved FSM 16 Noise 05: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved FSM 4: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Evolved HMM 5: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: true -EvolvedLookerUp1_1_1: - inspects_source: false - long_run_time: false - makes_use_of: &id007 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -EvolvedLookerUp2_2_2: - inspects_source: false - long_run_time: false - makes_use_of: *id007 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Firm But Fair: - inspects_source: false - long_run_time: false - makes_use_of: &id008 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -First by Anonymous: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 0 - stochastic: true -First by Davis: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -First by Downing: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -First by Feld: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 200 - stochastic: true -First by Graaskamp: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -First by Grofman: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -First by Joss: - inspects_source: false - long_run_time: false - makes_use_of: *id008 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -First by Nydegger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -First by Shubik: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -First by Stein and Rapoport: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -First by Tideman and Chieruzzi: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -First by Tullock: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Fool Me Once: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Forgetful Fool Me Once: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Forgetful Grudger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 10 - stochastic: false -Forgiver: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Forgiving Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Fortress3: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Fortress4: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -GTFT: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Geller: - inspects_source: true - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Geller Cooperator: - inspects_source: true - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Geller Defector: - inspects_source: true - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -General Soft Grudger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Go By Majority: - inspects_source: false - long_run_time: false - makes_use_of: &id009 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Go By Majority 10: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 10 - stochastic: false -Go By Majority 20: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 20 - stochastic: false -Go By Majority 40: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 40 - stochastic: false -Go By Majority 5: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Gradual: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Gradual Killer: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Grudger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -GrudgerAlternator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Grumpy: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Handshake: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Hard Go By Majority: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Hard Go By Majority 10: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 10 - stochastic: false -Hard Go By Majority 20: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 20 - stochastic: false -Hard Go By Majority 40: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 40 - stochastic: false -Hard Go By Majority 5: - inspects_source: false - long_run_time: false - makes_use_of: *id009 - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Hard Prober: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Hard Tit For 2 Tats: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -Hard Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -Hesitant QLearner: - inspects_source: false - long_run_time: false - makes_use_of: *id003 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Hopeless: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Inverse: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Inverse Punisher: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Knowledgeable Worse and Worse: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Level Punisher: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Limited Retaliate: - inspects_source: false - long_run_time: false - makes_use_of: &id010 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Limited Retaliate 2: - inspects_source: false - long_run_time: false - makes_use_of: *id010 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Limited Retaliate 3: - inspects_source: false - long_run_time: false - makes_use_of: *id010 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -MEM2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Math Constant Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Memory Decay: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Meta Hunter Aggressive: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Meta Majority: - inspects_source: false - long_run_time: true - makes_use_of: &id011 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Majority Finite Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Majority Long Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Majority Memory One: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Minority: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Mixer: - inspects_source: false - long_run_time: true - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Deterministic: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Ensemble: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Finite Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Long Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Memory One: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Meta Winner Stochastic: - inspects_source: false - long_run_time: true - makes_use_of: *id011 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Michaelos: - inspects_source: false - long_run_time: false - makes_use_of: !!set - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Mind Bender: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: true - manipulates_state: false - memory_depth: -10 - stochastic: false -Mind Controller: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: true - manipulates_state: false - memory_depth: -10 - stochastic: false -Mind Reader: - inspects_source: true - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Mind Warper: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: true - manipulates_state: false - memory_depth: -10 - stochastic: false -Mirror Mind Reader: - inspects_source: true - long_run_time: false - makes_use_of: !!set {} - manipulates_source: true - manipulates_state: false - memory_depth: .inf - stochastic: false -N Tit(s) For M Tat(s): - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -NMWE Deterministic: - inspects_source: false - long_run_time: true - makes_use_of: &id012 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -NMWE Finite Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id012 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -NMWE Long Memory: - inspects_source: false - long_run_time: true - makes_use_of: *id012 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -NMWE Memory One: - inspects_source: false - long_run_time: true - makes_use_of: *id012 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -NMWE Stochastic: - inspects_source: false - long_run_time: true - makes_use_of: *id012 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Naive Prober: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Negation: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Nice Average Copier: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Nice Meta Winner: - inspects_source: false - long_run_time: true - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Nice Meta Winner Ensemble: - inspects_source: false - long_run_time: true - makes_use_of: *id012 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Omega TFT: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Once Bitten: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 12 - stochastic: false -Opposite Grudger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -PSO Gambler 1_1_1: - inspects_source: false - long_run_time: false - makes_use_of: &id013 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -PSO Gambler 2_2_2: - inspects_source: false - long_run_time: false - makes_use_of: *id013 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -PSO Gambler 2_2_2 Noise 05: - inspects_source: false - long_run_time: false - makes_use_of: *id013 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -PSO Gambler Mem1: - inspects_source: false - long_run_time: false - makes_use_of: *id013 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Predator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Prober: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Prober 2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Prober 3: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Prober 4: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Protected Mind Reader: - inspects_source: true - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: true - manipulates_state: false - memory_depth: .inf - stochastic: false -Pun1: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Punisher: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Raider: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Random: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 0 - stochastic: true -Random Hunter: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Random Tit for Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Remorseful Prober: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: true -Resurrection: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: false -Retaliate: - inspects_source: false - long_run_time: false - makes_use_of: &id014 !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Retaliate 2: - inspects_source: false - long_run_time: false - makes_use_of: *id014 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Retaliate 3: - inspects_source: false - long_run_time: false - makes_use_of: *id014 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Revised Downing: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Ripoff: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: false -Risky QLearner: - inspects_source: false - long_run_time: false - makes_use_of: *id003 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Appold: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Black: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 5 - stochastic: true -Second by Borufsen: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Cave: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Champion: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Colbert: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 4 - stochastic: false -Second by Eatherley: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Getzler: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Gladstein: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by GraaskampKatzen: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Grofman: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 8 - stochastic: false -Second by Harrington: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Kluepfel: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Leyvraz: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 3 - stochastic: true -Second by Mikkelson: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by RichardHufford: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Rowsam: - inspects_source: false - long_run_time: false - makes_use_of: !!set - a: null - e: null - g: null - m: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Tester: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Tideman and Chieruzzi: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by Tranquilizer: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Weiner: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by White: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Second by WmAdams: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Second by Yamachi: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -SelfSteem: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -ShortMem: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Slow Tit For Two Tats 2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Sneaky Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Soft Grudger: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 6 - stochastic: false -Soft Joss: - inspects_source: false - long_run_time: false - makes_use_of: *id008 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -SolutionB1: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -SolutionB5: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Spiteful Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Stalker: - inspects_source: false - long_run_time: false - makes_use_of: !!set - game: null - length: null - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Stochastic Cooperator: - inspects_source: false - long_run_time: false - makes_use_of: *id008 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Stochastic WSLS: - inspects_source: false - long_run_time: false - makes_use_of: *id008 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Suspicious Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -TF1: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -TF2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -TF3: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -ThueMorse: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -ThueMorseInverse: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Thumper: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Tit For 2 Tats: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -Tit For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Tricky Cooperator: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 10 - stochastic: false -Tricky Defector: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Tricky Level Punisher: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Two Tits For Tat: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: false -UsuallyCooperates: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -UsuallyDefects: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -VeryBad: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Willing: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -Win-Shift Lose-Stay: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Win-Stay Lose-Shift: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: false -Winner12: - inspects_source: false - long_run_time: false - makes_use_of: *id007 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Winner21: - inspects_source: false - long_run_time: false - makes_use_of: *id007 - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: false -Worse and Worse: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Worse and Worse 2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -Worse and Worse 3: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: .inf - stochastic: true -ZD-Extort-2: - inspects_source: false - long_run_time: false - makes_use_of: &id015 !!set - game: null - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-Extort-2 v2: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-Extort-4: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-Extort3: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-Extortion: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-GEN-2: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-GTFT-2: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-Mem2: - inspects_source: false - long_run_time: false - makes_use_of: !!set {} - manipulates_source: false - manipulates_state: false - memory_depth: 2 - stochastic: true -ZD-Mischief: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true -ZD-SET-2: - inspects_source: false - long_run_time: false - makes_use_of: *id015 - manipulates_source: false - manipulates_state: false - memory_depth: 1 - stochastic: true diff --git a/axelrod/ipd/data/ann_weights.csv b/axelrod/ipd/data/ann_weights.csv deleted file mode 100644 index f99d7a002..000000000 --- a/axelrod/ipd/data/ann_weights.csv +++ /dev/null @@ -1,4 +0,0 @@ -# name, features, hidden_layer_size, weights... -Evolved ANN, 17, 10, 3.459899447605539, 2.1318404265386586, 13.17966613895038, -6.192108007790158, -0.37086706789610013, -0.3896626810316768, -0.3866099292858918, -0.5945768318137592, 8.657276286604098, -0.31582965913711214, -12.77263491874852, -3.7221902888177283, 0.7700315120556411, -33.026063114130366, -0.6766419579791423, -0.9319070818093544, -12.214457623430176, -0.7005949704263965, -0.9248704722582742, -0.30575483808901427, -0.6882322381266356, 0.492463682676912, 0.4162176461180324, 2.2349377906341243, -4.828561111548519, -1.9512825291316083, 15.234656539966528, 13.201167624183022, 2.128673097211623, 2.92411276919898, -0.7547167660636005, 1.199314577481091, 0.06760026684238629, 0.7832939406545589, -177.49932309404323, 6.89500655807993, -0.08284645051120698, -0.799724698184292, -8.207688126907367, 17.450767180301746, -19.54570476880868, 23.570589723652876, 7.443462688149636, -4.262126715956444, 51.789933932119446, -0.9595303137934914, -0.015387143246637383, -1.6996871126483133, 0.5326556419474413, -0.8690658866365495, -0.747871482057276, -0.1990663384836313, 0.7376303253285281, 19.692218213944003, -2.920745672290689, -0.2877252920885854, -5.351299019427598, 1.1722532499353777, 1.3902788805171062, 0.38849134405949914, -5.729822642840242, -4.641822939824729, 0.5691745664669388, 0.30905426295392857, -8.85980449965398, -4.732837361082687, 0.8831193558694339, -0.9520915826182503, -0.5355473390079997, -22.037852229409136, -0.9596756645256569, 0.15632020854402362, -24.710657392303975, 21.00195680553553, -6.099731246259528, -0.9426766932743909, -0.7194608598270196, 3.4908551710867917, 0.1371367081336668, -0.9960642361403127, -0.23395001734734366, 0.7974681865981025, 1.1178545864789984, 0.0309085368898055, 0.3709257972027509, -0.7705906687715782, 0.3106563294379545, -0.006111882508708552, 0.01778595332796895, 0.5677955352244695, -0.04300357455768222, -0.8378512149555155, 0.4517674874175419, 0.026356153000395066, 8.559746666840512, 60.1036518587203, -11.066045796708, 1.9547952088765457, -1.2572770384601373, -3.1565358568060065, -0.39428165742075283, -7.976304408878465, -12.979182222761667, -0.8903476091382874, -0.044046900475693686, -2290.612969360032, 3.1361101611630904, -4.744165899359072, 1.5590913900932555, 23.589552731167824, 3.696503591016147, -49.0193733334204, -0.31155127179562386, -12.893291340674148, 3.355621906094085, -1124.6168527572167, -8.611905802129927, -5.495486462660128, 9.317002862904996, -0.928102034629926, 0.9862671193437127, 71.6360157474848, -41.78727928552422, -0.08938630210606524, -25.475912160872333, -1.6296570038831701, 3.721506342123227, 1.2616295894267517, -2.16674011456336, 1.0608792593103447, -1.5348732560098246, 0.6488189032682272, -9.554624561640432, 0.9982451738644897, -3.585518099583287, 2.3410371683096507, -1.059363823436213, -36.03382554076086, 49.400216254399005, -0.09560108506061127, 22.265326907988467, -1167.4125713033582, -827.3412289305065, -0.2817962724984171, -12.585799415544116, -0.4968822378372789, 81.10836010264876, 0.7230863554676401, -3.7241902674476655, -0.9757756717170544, -68.02893974316859, -2.2396567795647564, 0.07066110212550569, -1.250920612229347, -0.046887193108303915, -0.7680639795702753, 17.937616126439604, -1.1019678164169133, 0.07411014749198457, 2.056144561078099, -0.9166833148022522, -27.361514430051557, -2.3877636883795264, 2.2300576943504535, 447.438860571402, 0.45131006835733695, 10.58847351640523, 1.9849158213808964, -1574.2103557260862, -0.7371599837780478, 0.9086264191508042, -0.7828307535556679, 0.3300464039361075, 1.81242958242284, -3.610267708304238, -0.973651955724061, 0.1209299242691169, 2.9843573498979894, -0.11277293056015156, -0.7439895632451479, -3.8649914641316685, 0.9782080851048618, -5.053463725816831, 3.5891164827308604, -3.7958700956913, -2.8572114118106247, -0.41818946926149336, 1.4695517340149908, -0.026854015704181288, -0.02788909596479816 -Evolved ANN 5, 17, 5, -28.102635339566508, -5.270221138740612, -1.7991915039829207, -19.860573976774578, -24.60513164187047, 3.8162913045444635, 0.023473769583907095, -54.50321528122049, 10.003539037251969, -2.3346147693972115, 16.73844151591633, 0.3100810247981438, -169.33492203029917, -5.724230282870263, 0.8526201446384842, -0.22605058147685014, -17.835641849307482, -0.7043679829900225, -10.486007034199204, -3.0550187074781925, -0.6810531857496793, -4453.796912952781, 33810.53059746947, -10848.78737197365, -118.11626381871724, 93.18879667509046, -16083.650736874622, -13.725703495018438, 50.10494647994167, -7.604207807021347, 90.77373268627657, 1.595074916653774, 2.7557812139573015, -39715.109221929466, 28.904911685140643, -0.43899423549864697, 5.768080489913274, 5.295125125995148, -2.546634014137431, -238767.20745174124, -3.5528156876545527, -4.9527347193332965, 1115.2695575158439, 4.174668501799942, -14.649495628121237, -23.25321447104678, 0.5500236215768699, -20.73180030629891, 0.5694594098639599, 9.126335853167467, 3.566801760870098, 1337.557828195395, 7235.478182255781, 73866.16381328272, -857.4430471718431, -924.9591765734541, -3518.967885192465, -1.9067958478325906, -6.633454070601698, -14.521354381912227, -2885.6120623623256, 22500.261088902847, -2.151057666676581, -1132.4844452804136, -320738.9350600944, 95.16925312662866, -5656.2165411116075, -45086.65321702537, -27.563351028422737, -2.61387586136827, 371.56363217654854, -303.4084110871267, 6.983609633015089, -47.030329656598646, -32.801150369212415, 106.5993554145673, -1488.6548395800542, -76.76127924999216, 228.98568852459917, 3.1681704936401016, 7.847547722960641, -17.600408577355783, -10.176187185243615, -2.8861364567007923, -6.112911717298348, -335.90148548509376, 52.34158411280463, 1272.9680157062762, 22.881750122081684, 533.1837365484016, -0.6762316764182235, 4.4063523628928465, 4.139696013585834, 124.12127527435491, -128.06733876616389 -Evolved ANN 5 Noise 05, 17, 5, 23.884262150181755, -1818.8117704211384, -99.98875916610936, -0.03961610388820535, -6.210054298897063, 3.7052379169262717, -0.2341738759745093, -181.13859219885555, 12.13317672298101, -0.49669501894086343, -1.0749425884840627, 10.74864948953037, -0.2896667901231506, -16.22712292450597, -1.7178383009508598, 0.03221417142047274, -158.1666203488453, -14.63669956068759, 15.408582826806054, 5.844929636776999, 4.566735602054217, 3.2997530921332476, -1.9670038978333584, 11.7498294264678, 13.681065767064851, 6.221555978655614, -6.577090380820985, -5.280239152793359, -3.708452340749404, 0.9946864682020832, -4.969083580503034, 0.37099841885684537, -4.663632521892934, 0.5739444442201613, -97.24526798384726, -18.898938443720873, 2.836962511054863, 1426.696564885826, -61.660716521684535, -96.9853369128562, -35.811382478532956, 21.74704795347286, -43208.50415025884, -30.91261823654964, 556.8238912039677, -1.2029710853005269, 86.95493903070172, -10502.293376143582, -2.0466530653599544, 22.16892386331129, -155.02893455429202, -28.409090647029252, -3.391747333401208, -13.869670523479188, 13.919971713037201, 10.27621997230116, 0.8687219298082444, 5.274915939989237, -103.56226257273838, -11.47523804695477, 8.100576862445774, -0.5353167277104594, -9.108755782674894, 0.8155938130504109, -7.262915116612636, -0.7403289719769353, -1.7023773645551308, 1.0574026713510245, -2732.0241659942394, -5.435067280962746, -188.24943501427512, 0.37886585953090285, -67.01235198117197, -121.1206837158425, -83.84782446726592, 45.31005125030543, 11.069886195948996, -169.35773604933806, 31.675914286375498, -311.2006747809748, 49.093586385973545, -5.819783407611689, -88.52645028711315, -284.2358463873679, 0.6989482567897998, 156.74810964228283, 1.8064093223587379, 3.4228475977181265, -125.93039228074147, -25.169489233728665, 115.98479433268457, 21.27858612035085, 0.2744006209426808, 2.899956014713407, -53.98506935427509 diff --git a/axelrod/ipd/data/pso_gambler.csv b/axelrod/ipd/data/pso_gambler.csv deleted file mode 100644 index 3b8e1888c..000000000 --- a/axelrod/ipd/data/pso_gambler.csv +++ /dev/null @@ -1,6 +0,0 @@ -# Name (string), plays (int), opp_plays(int), starting_plays(int), weights (floats) -PSO Gambler Mem1, 1, 1, 0, 1.0, 0.52173487, 0.0, 0.12050939 -PSO Gambler 1_1_1, 1, 1, 1, 1.0, 1.0, 0.12304797, 0.57740178, 0.0, 0.0, 0.13581423, 0.11886807 -# , 2, 2, 2, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.93, 0.0, 1.0, 0.67, 0.42, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.48, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.19, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.36, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 -PSO Gambler 2_2_2, 2, 2, 2, 1.0, 1.0, 1.0, 0.0, 1.0, 0.95280465, 0.0, 0.0, 0.0, 0.80897541, 0.0, 0.0, 0.02126434, 0.0, 0.43278586, 0.0, 0.0, 0.0, 1.0, 0.15140743, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.23563137, 0.0, 0.0, 0.65147565, 1.0, 0.0, 0.0, 0.15412392, 1.0, 0.0, 0.0, 0.24922166, 1.0, 0.0, 0.0, 0.0, 0.00227615, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.77344942, 1.0, 0.24523149, 1.0, 0.0 -PSO Gambler 2_2_2 Noise 05, 2, 2, 2, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.98603825, 1.0, 1.0, 0.0, 0.0, 0.16240799, 0.63548102, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.13863175, 0.06434619, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.7724137, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.50999729, 1.0, 0.0, 0.0, 0.00524508, 0.87463905, 0.0, 0.07127653, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.28124022, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0 diff --git a/axelrod/ipd/deterministic_cache.py b/axelrod/ipd/deterministic_cache.py deleted file mode 100644 index b792986eb..000000000 --- a/axelrod/ipd/deterministic_cache.py +++ /dev/null @@ -1,175 +0,0 @@ -"""Tools for caching the results of deterministic matches. - -The cache, in most cases, can simply be treated as a dictionary: - -cache = DeterministicCache() -cache[key1] = result1 -cache[key2] = result2 -... -if some_key in cache: - do_something(cache[some_key]) -else: - ... -""" - -import pickle -from collections import UserDict -from typing import List, Tuple - -from axelrod import Classifiers -from .action import Action -from .player import IpdPlayer - -CachePlayerKey = Tuple[IpdPlayer, IpdPlayer] -CacheKey = Tuple[str, str] - - -def _key_transform(key: CachePlayerKey) -> CacheKey: - """Convert a CachePlayerKey to a CacheKey - - Parameters - ---------- - key: tuple - A 3-tuple: (player instance, player instance) - """ - return key[0].name, key[1].name - - -def _is_valid_key(key: CachePlayerKey) -> bool: - """Validate a deterministic cache player key. - - The key should always be a 2-tuple, with a pair of axelrodPlayer - instances and one integer. Both players should be deterministic. - - Parameters - ---------- - key : object - - Returns - ------- - Boolean indicating if the key is valid - """ - if not isinstance(key, tuple) or len(key) != 2: - return False - - if not (isinstance(key[0], IpdPlayer) and isinstance(key[1], IpdPlayer)): - return False - - if Classifiers["stochastic"](key[0]) or Classifiers["stochastic"](key[1]): - return False - - return True - - -def _is_valid_value(value: List) -> bool: - """Validate a deterministic cache value. - - The value just needs to be a list, with any contents. - - Parameters - ---------- - value : object - - Returns - ------- - Boolean indicating if the value is valid - """ - return isinstance(value, list) - - -class DeterministicCache(UserDict): - """A class to cache the results of deterministic matches. - - For matches with no noise between pairs of deterministic players, the - results will always be the same. We can hold the results for the longest - run in this class, so as to avoid repeatedly generating them in tournaments - of multiple repetitions. If a shorter or equal-length match is run, we can - use the stored results. - - By also storing those cached results in a file, we can re-use the cache - between multiple tournaments if necessary. - - The cache is a dictionary mapping pairs of IpdPlayer classes to a list of - resulting interactions. e.g. for a 3 turn IpdMatch between Cooperator and - Alternator, the dictionary entry would be: - - (axelrod.Cooperator, axelrod.Alternator): [(C, C), (C, D), (C, C)] - - Most of the functionality is provided by the UserDict class (which uses an - instance of dict as the 'data' attribute to hold the dictionary entries). - - This class overrides the __init__ and __setitem__ methods in order to limit - and validate the keys and values to be as described above. It also adds - methods to save/load the cache to/from a file. - """ - - def __init__(self, file_name: str = None) -> None: - """Initialize a new cache. - - Parameters - ---------- - file_name : string - Path to a previously saved cache file - """ - super().__init__() - self.mutable = True - if file_name is not None: - self.load(file_name) - - def __delitem__(self, key: CachePlayerKey): - return super().__delitem__(_key_transform(key)) - - def __getitem__(self, key: CachePlayerKey) -> List[Tuple[Action, Action]]: - return super().__getitem__(_key_transform(key)) - - def __contains__(self, key): - return super().__contains__(_key_transform(key)) - - def __setitem__(self, key: CachePlayerKey, value): - """Validate the key and value before setting them.""" - if not self.mutable: - raise ValueError("Cannot update cache unless mutable is True.") - - if not _is_valid_key(key): - raise ValueError( - "Key must be a tuple of 2 deterministic axelrod IpdPlayer classes" - ) - - if not _is_valid_value(value): - raise ValueError( - "Value must be a list with length equal to turns attribute" - ) - - super().__setitem__(_key_transform(key), value) - - def save(self, file_name: str) -> bool: - """Serialise the cache dictionary to a file. - - Parameters - ---------- - file_name : string - File path to which the cache should be saved - """ - with open(file_name, "wb") as io: - pickle.dump(self.data, io) - return True - - def load(self, file_name: str) -> bool: - """Load a previously saved cache into the dictionary. - - Parameters - ---------- - file_name : string - Path to a previously saved cache file - """ - with open(file_name, "rb") as io: - data = pickle.load(io) - - if isinstance(data, dict): - self.data = data - else: - raise ValueError( - "Cache file exists but is not the correct format. " - "Try deleting and re-building the cache file." - ) - return True diff --git a/axelrod/ipd/ecosystem.py b/axelrod/ipd/ecosystem.py deleted file mode 100644 index a725dab76..000000000 --- a/axelrod/ipd/ecosystem.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Tools for simulating population dynamics of immutable players. - -An ecosystem runs in the context of a previous tournament, and takes the -results as input. That means no matches are run by the ecosystem, and a -tournament needs to happen before it is created. For example: - -players = [axelrod.Cooperator(), axlerod.Defector()] -tournament = axelrod.IpdTournament(players=players) -results = tournament.play() -ecosystem = axelrod.Ecosystem(results) -ecosystem.reproduce(100) -""" - -import random -from typing import Callable, List - -from axelrod.ipd.result_set import ResultSet - - -class Ecosystem(object): - """An ecosystem based on the payoff matrix from a tournament. - - Attributes - ---------- - num_players: int - The number of players - """ - - def __init__( - self, - results: ResultSet, - fitness: Callable[[float], float] = None, - population: List[int] = None, - ) -> None: - """Create a new ecosystem. - - Parameters - ---------- - results: ResultSet - The results of the tournament run beforehand to use. - fitness: List of callables - The reproduction rate at which populations reproduce. - population: List of ints. - The initial populations of the players, corresponding to the - payoff matrix in results. - """ - - self.results = results - self.num_players = self.results.num_players - self.payoff_matrix = self.results.payoff_matrix - self.payoff_stddevs = self.results.payoff_stddevs - - # Population sizes will be recorded in this nested list, with each - # internal list containing strategy populations for a given turn. The - # first list, representing the starting populations, will by default - # have all equal values, and all population lists will be normalized to - # one. An initial population vector can also be passed. This will be - # normalised, but must be of the correct size and have all non-negative - # values. - if population: - if min(population) < 0: - raise TypeError( - "Minimum value of population vector must be non-negative" - ) - elif len(population) != self.num_players: - raise TypeError( - "Population vector must be same size as number of players" - ) - else: - norm = sum(population) - self.population_sizes = [[p / norm for p in population]] - else: - self.population_sizes = [ - [1 / self.num_players for _ in range(self.num_players)] - ] - - # This function is quite arbitrary and probably only influences the - # kinetics for the current code. - if fitness: - self.fitness = fitness - else: - self.fitness = lambda p: p - - def reproduce(self, turns: int): - """Reproduce populations according to the payoff matrix. - - Parameters - ---------- - turns: int - The number of turns to run. - """ - for iturn in range(turns): - plist = list(range(self.num_players)) - pops = self.population_sizes[-1] - - # The unit payoff for each player in this turn is the sum of the - # payoffs obtained from playing with all other players, scaled by - # the size of the opponent's population. Note that we sample the - # normal distribution based on the payoff matrix and its standard - # deviations obtained from the iterated PD tournament run - # previously. - payoffs = [0.0 for ip in plist] - for ip in plist: - for jp in plist: - avg = self.payoff_matrix[ip][jp] - dev = self.payoff_stddevs[ip][jp] - p = random.normalvariate(avg, dev) - payoffs[ip] += p * pops[jp] - - # The fitness should determine how well a strategy reproduces. The - # new populations should be multiplied by something that is - # proportional to the fitness, but we are normalizing anyway so - # just multiply times fitness. - fitness = [self.fitness(p) for p in payoffs] - newpops = [p * f for p, f in zip(pops, fitness)] - - # Make sure the new populations are normalized to one. - norm = sum(newpops) - newpops = [p / norm for p in newpops] - - self.population_sizes.append(newpops) diff --git a/axelrod/ipd/eigen.py b/axelrod/ipd/eigen.py deleted file mode 100644 index f7b6670f5..000000000 --- a/axelrod/ipd/eigen.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -Compute the principal eigenvector of a matrix using power iteration. - -See also numpy.linalg.eig which calculates all the eigenvalues and -eigenvectors. -""" - -from typing import Tuple - -import numpy - - -def _normalise(nvec: numpy.ndarray) -> numpy.ndarray: - """Normalises the given numpy array.""" - with numpy.errstate(invalid="ignore"): - result = nvec / numpy.sqrt((nvec @ nvec)) - return result - - -def _squared_error(vector_1: numpy.ndarray, vector_2: numpy.ndarray) -> float: - """Computes the squared error between two numpy arrays.""" - diff = vector_1 - vector_2 - s = diff @ diff - return numpy.sqrt(s) - - -def _power_iteration(mat: numpy.array, initial: numpy.ndarray) -> numpy.ndarray: - """ - Generator of successive approximations. - - Params - ------ - mat: numpy.array - The matrix to use for multiplication iteration - initial: numpy.array, None - The initial state. Will be set to numpy.array([1, 1, ...]) if None - - Yields - ------ - Successive powers (mat ^ k) * initial - """ - - vec = initial - while True: - vec = _normalise(numpy.dot(mat, vec)) - yield vec - - -def principal_eigenvector( - mat: numpy.array, maximum_iterations=1000, max_error=1e-3 -) -> Tuple[numpy.ndarray, float]: - """ - Computes the (normalised) principal eigenvector of the given matrix. - - Params - ------ - mat: numpy.array - The matrix to use for multiplication iteration - maximum_iterations: int, None - The maximum number of iterations of the approximation - max_error: float, 1e-8 - Exit criterion -- error threshold of the difference of successive steps - - Returns - ------- - ndarray - Eigenvector estimate for the input matrix - float - Eigenvalue corresonding to the returned eigenvector - """ - - mat_ = numpy.array(mat) - size = mat_.shape[0] - initial = numpy.ones(size) - - # Power iteration - if not maximum_iterations: - maximum_iterations = float("inf") - last = initial - for i, vector in enumerate(_power_iteration(mat, initial=initial)): - if i > maximum_iterations: - break - if _squared_error(vector, last) < max_error: - break - last = vector - # Compute the eigenvalue (Rayleigh quotient) - eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector) - # Liberate the eigenvalue from numpy - eigenvalue = float(eigenvalue) - return vector, eigenvalue diff --git a/axelrod/ipd/evolvable_player.py b/axelrod/ipd/evolvable_player.py deleted file mode 100644 index 68681b250..000000000 --- a/axelrod/ipd/evolvable_player.py +++ /dev/null @@ -1,88 +0,0 @@ -import base64 -from pickle import dumps, loads -from random import randrange -from typing import Dict, List -from .player import IpdPlayer - - -class InsufficientParametersError(Exception): - """Error indicating that insufficient parameters were specified to initialize an Evolvable IpdPlayer.""" - def __init__(self, *args): - super().__init__(*args) - - -class EvolvablePlayer(IpdPlayer): - """A class for a player that can evolve, for use in the Moran process or with reinforcement learning algorithms. - - This is an abstract base class, not intended to be used directly. - """ - - name = "EvolvablePlayer" - parent_class = IpdPlayer - parent_kwargs = [] # type: List[str] - - def overwrite_init_kwargs(self, **kwargs): - """Use to overwrite parameters for proper cloning and testing.""" - for k, v in kwargs.items(): - self.init_kwargs[k] = v - - def create_new(self, **kwargs): - """Creates a new variant with parameters overwritten by kwargs.""" - init_kwargs = self.init_kwargs.copy() - init_kwargs.update(kwargs) - return self.__class__(**init_kwargs) - - # Serialization and deserialization. You may overwrite to obtain more human readable serializations - # but you must overwrite both. - - def serialize_parameters(self): - """Serialize parameters.""" - pickled = dumps(self.init_kwargs) # bytes - s = base64.b64encode(pickled).decode('utf8') # string - return s - - @classmethod - def deserialize_parameters(cls, serialized): - """Deserialize parameters to a IpdPlayer instance.""" - init_kwargs = loads(base64.b64decode(serialized)) - return cls(**init_kwargs) - - # Optional methods for evolutionary algorithms and Moran processes. - - def mutate(self): - """Optional method to allow IpdPlayer to produce a variant (not in place).""" - pass # pragma: no cover - - def crossover(self, other): - """Optional method to allow IpdPlayer to produce variants in combination with another player. Returns a new - IpdPlayer.""" - pass # pragma: no cover - - # Optional methods for particle swarm algorithm. - - def receive_vector(self, vector): - """Receive a vector of params and overwrite the IpdPlayer.""" - pass # pragma: no cover - - def create_vector_bounds(self): - """Creates the bounds for the decision variables for Particle Swarm Algorithm.""" - pass # pragma: no cover - - -def copy_lists(lists: List[List]) -> List[List]: - return list(map(list, lists)) - - -def crossover_lists(list1: List, list2: List) -> List: - cross_point = randrange(len(list1)) - new_list = list(list1[:cross_point]) + list(list2[cross_point:]) - return new_list - - -def crossover_dictionaries(table1: Dict, table2: Dict) -> Dict: - keys = list(table1.keys()) - cross_point = randrange(len(keys)) - new_items = [(k, table1[k]) for k in keys[:cross_point]] - new_items += [(k, table2[k]) for k in keys[cross_point:]] - new_table = dict(new_items) - return new_table diff --git a/axelrod/ipd/fingerprint.py b/axelrod/ipd/fingerprint.py deleted file mode 100644 index 273777a6d..000000000 --- a/axelrod/ipd/fingerprint.py +++ /dev/null @@ -1,611 +0,0 @@ -import os -from collections import namedtuple -from tempfile import mkstemp -from typing import Any, List, Union - -import dask.dataframe as dd -import matplotlib.pyplot as plt -import numpy as np -import tqdm -from mpl_toolkits.axes_grid1 import make_axes_locatable - -import axelrod as axl -from axelrod import IpdPlayer -from axelrod.ipd.interaction_utils import ( - compute_final_score_per_turn, - read_interactions_from_file, -) -from axelrod.ipd.strategy_transformers import DualTransformer, JossAnnTransformer - -Point = namedtuple("Point", "x y") - - -def _create_points(step: float, progress_bar: bool = True) -> List[Point]: - """Creates a set of Points over the unit square. - - A Point has coordinates (x, y). This function constructs points that are - separated by a step equal to `step`. The points are over the unit - square which implies that the number created will be (1/`step` + 1)^2. - - Parameters - ---------- - step : float - The separation between each Point. Smaller steps will produce more - Points with coordinates that will be closer together. - progress_bar : bool - Whether or not to create a progress bar which will be updated - - Returns - ---------- - points : list - of Point objects with coordinates (x, y) - """ - num = int((1 / step) // 1) + 1 - - if progress_bar: - p_bar = tqdm.tqdm(total=num ** 2, desc="Generating points") - - points = [] - for x in np.linspace(0, 1, num): - for y in np.linspace(0, 1, num): - points.append(Point(x, y)) - - if progress_bar: - p_bar.update() - - if progress_bar: - p_bar.close() - - return points - - -def _create_jossann(point: Point, probe: Any) -> IpdPlayer: - """Creates a JossAnn probe player that matches the Point. - - If the coordinates of point sums to more than 1 the parameters are - flipped and subtracted from 1 to give meaningful probabilities. We also - use the Dual of the probe. This is outlined further in [Ashlock2010]_. - - Parameters - ---------- - point : Point - probe : class or instance - A class that must be descended from axelrodPlayer or an instance of - axelrodPlayer. - - Returns - ---------- - joss_ann: Joss-AnnTitForTat object - `JossAnnTransformer` with parameters that correspond to `point`. - """ - x, y = point - - if isinstance(probe, axl.IpdPlayer): - init_kwargs = probe.init_kwargs - probe = probe.__class__ - else: - init_kwargs = {} - - if x + y >= 1: - joss_ann = DualTransformer()(JossAnnTransformer((1 - x, 1 - y))(probe))( - **init_kwargs - ) - else: - joss_ann = JossAnnTransformer((x, y))(probe)(**init_kwargs) - return joss_ann - - -def _create_probes( - probe: Union[type, IpdPlayer], points: list, progress_bar: bool = True -) -> List[IpdPlayer]: - """Creates a set of probe strategies over the unit square. - - Constructs probe strategies that correspond to points with coordinates - (x, y). The probes are created using the `JossAnnTransformer`. - - Parameters - ---------- - probe : class or instance - A class that must be descended from axelrodPlayer or an instance of - axelrodPlayer. - points : list - of Point objects with coordinates (x, y) - progress_bar : bool - Whether or not to create a progress bar which will be updated - - Returns - ---------- - probes : list - A list of `JossAnnTransformer` players with parameters that - correspond to point. - """ - if progress_bar: - points = tqdm.tqdm(points, desc="Generating probes") - probes = [_create_jossann(point, probe) for point in points] - return probes - - -def _create_edges(points: List[Point], progress_bar: bool = True) -> list: - """Creates a set of edges for a spatial tournament. - - Constructs edges that correspond to `points`. All edges begin at 0, and - connect to the index +1 of the probe. - - Parameters - ---------- - points : list - of Point objects with coordinates (x, y) - progress_bar : bool - Whether or not to create a progress bar which will be updated - - - Returns - ---------- - edges : list of tuples - A list containing tuples of length 2. All tuples will have 0 as the - first element. The second element is the index of the - corresponding probe (+1 to allow for including the Strategy). - """ - if progress_bar: - points = tqdm.tqdm(points, desc="Generating network edges") - edges = [(0, index + 1) for index, point in enumerate(points)] - return edges - - -def _generate_data(interactions: dict, points: list, edges: list) -> dict: - """Generates useful data from a spatial tournament. - - Matches interactions from `results` to their corresponding Point in - `probe_points`. - - Parameters - ---------- - interactions : dict - A dictionary mapping edges to the corresponding interactions of - those players. - points : list - of Point objects with coordinates (x, y). - edges : list of tuples - A list containing tuples of length 2. All tuples will have either 0 - or 1 as the first element. The second element is the index of the - corresponding probe (+1 to allow for including the Strategy). - - Returns - ---------- - point_scores : dict - A dictionary where the keys are Points of the form (x, y) and - the values are the mean score for the corresponding interactions. - """ - edge_scores = [ - np.mean( - [compute_final_score_per_turn(scores)[0] for scores in interactions[edge]] - ) - for edge in edges - ] - point_scores = dict(zip(points, edge_scores)) - return point_scores - - -def _reshape_data(data: dict, points: list, size: int) -> np.ndarray: - """Shape the data so that it can be plotted easily. - - Parameters - ---------- - data : dictionary - A dictionary where the keys are Points of the form (x, y) and - the values are the mean score for the corresponding interactions. - - points : list - of Point objects with coordinates (x, y). - - size : int - The number of Points in every row/column. - - Returns - ---------- - plotting_data : list - 2-D numpy array of the scores, correctly shaped to ensure that the - score corresponding to Point (0, 0) is in the left hand corner ie. - the standard origin. - """ - ordered_data = [data[point] for point in points] - shaped_data = np.reshape(ordered_data, (size, size), order="F") - plotting_data = np.flipud(shaped_data) - return plotting_data - - -class AshlockFingerprint(object): - def __init__( - self, strategy: Union[type, IpdPlayer], probe: Union[type, IpdPlayer] = axl.TitForTat - ) -> None: - """ - Parameters - ---------- - strategy : class or instance - A class that must be descended from axelrodPlayer or an instance of - axelrodPlayer. - probe : class or instance - A class that must be descended from axelrodPlayer or an instance of - axelrodPlayer. - Default: Tit For Tat - """ - self.strategy = strategy - self.probe = probe - - def _construct_tournament_elements( - self, step: float, progress_bar: bool = True - ) -> tuple: - """Build the elements required for a spatial tournament - - Parameters - ---------- - step : float - The separation between each Point. Smaller steps will - produce more Points that will be closer together. - progress_bar : bool - Whether or not to create a progress bar which will be updated - - - Returns - ---------- - edges : list of tuples - A list containing tuples of length 2. All tuples will have either 0 - or 1 as the first element. The second element is the index of the - corresponding probe (+1 to allow for including the Strategy). - - tournament_players : list - A list containing instances of axelrodPlayer. The first item is the - original player, the rest are the probes. - - """ - self.points = _create_points(step, progress_bar=progress_bar) - edges = _create_edges(self.points, progress_bar=progress_bar) - probe_players = _create_probes( - self.probe, self.points, progress_bar=progress_bar - ) - - if isinstance(self.strategy, axl.IpdPlayer): - tournament_players = [self.strategy] + probe_players - else: - tournament_players = [self.strategy()] + probe_players - - return edges, tournament_players - - def fingerprint( - self, - turns: int = 50, - repetitions: int = 10, - step: float = 0.01, - processes: int = None, - filename: str = None, - progress_bar: bool = True, - ) -> dict: - """Build and play the spatial tournament. - - Creates the probes and their edges then builds a spatial tournament. - When the coordinates of the probe sum to more than 1, the flip_plays of the - probe is taken instead and then the Joss-Ann Transformer is applied. If - the coordinates sum to less than 1 (or equal), then only the Joss-Ann is - applied, a flip_plays is not required. - - Parameters - ---------- - turns : int, optional - The number of turns per match - repetitions : int, optional - The number of times the round robin should be repeated - step : float, optional - The separation between each Point. Smaller steps will - produce more Points that will be closer together. - processes : int, optional - The number of processes to be used for parallel processing - filename: str, optional - The name of the file for self.spatial_tournament's interactions. - if None, will auto-generate a filename. - progress_bar : bool - Whether or not to create a progress bar which will be updated - - Returns - ---------- - self.data : dict - A dictionary where the keys are coordinates of the form (x, y) and - the values are the mean score for the corresponding interactions. - """ - - temp_file_descriptor = None - if filename is None: - temp_file_descriptor, filename = mkstemp() # type: ignore - - edges, tourn_players = self._construct_tournament_elements( - step, progress_bar=progress_bar - ) - - self.step = step - self.spatial_tournament = axl.IpdTournament( - tourn_players, turns=turns, repetitions=repetitions, edges=edges - ) - self.spatial_tournament.play( - build_results=False, - filename=filename, - processes=processes, - progress_bar=progress_bar, - ) - - self.interactions = read_interactions_from_file( - filename, progress_bar=progress_bar - ) - - if temp_file_descriptor is not None: - assert filename is not None - os.close(temp_file_descriptor) - os.remove(filename) - - self.data = _generate_data(self.interactions, self.points, edges) - return self.data - - def plot( - self, - cmap: str = "seismic", - interpolation: str = "none", - title: str = None, - colorbar: bool = True, - labels: bool = True, - ) -> plt.Figure: - """Plot the results of the spatial tournament. - - Parameters - ---------- - cmap : str, optional - A matplotlib colour map, full list can be found at - http://matplotlib.org/examples/color/colormaps_reference.html - interpolation : str, optional - A matplotlib interpolation, full list can be found at - http://matplotlib.org/examples/images_contours_and_fields/interpolation_methods.html - title : str, optional - A title for the plot - colorbar : bool, optional - Choose whether the colorbar should be included or not - labels : bool, optional - Choose whether the axis labels and ticks should be included - - Returns - ---------- - figure : matplotlib figure - A heat plot of the results of the spatial tournament - """ - size = int((1 / self.step) // 1) + 1 - plotting_data = _reshape_data(self.data, self.points, size) - fig, ax = plt.subplots() - cax = ax.imshow(plotting_data, cmap=cmap, interpolation=interpolation) - - if colorbar: - max_score = max(self.data.values()) - min_score = min(self.data.values()) - ticks = [min_score, (max_score + min_score) / 2, max_score] - fig.colorbar(cax, ticks=ticks) - - plt.xlabel("$x$") - plt.ylabel("$y$", rotation=0) - ax.tick_params(axis="both", which="both", length=0) - plt.xticks([0, len(plotting_data) - 1], ["0", "1"]) - plt.yticks([0, len(plotting_data) - 1], ["1", "0"]) - - if not labels: - plt.axis("off") - - if title is not None: - plt.title(title) - return fig - - -class TransitiveFingerprint(object): - def __init__(self, strategy, opponents=None, number_of_opponents=50): - """ - Parameters - ---------- - strategy : class or instance - A class that must be descended from axelrodPlayer or an instance of - axelrodPlayer. - opponents : list of instances - A list that contains a list of opponents - Default: A spectrum of Random players - number_of_opponents: int - The number of Random opponents - Default: 50 - """ - self.strategy = strategy - - if opponents is None: - self.opponents = [ - axl.Random(p) for p in np.linspace(0, 1, number_of_opponents) - ] - else: - self.opponents = opponents - - def fingerprint( - self, - turns: int = 50, - repetitions: int = 1000, - noise: float = None, - processes: int = None, - filename: str = None, - progress_bar: bool = True, - ) -> np.array: - """Creates a spatial tournament to run the necessary matches to obtain - fingerprint data. - - Creates the opponents and their edges then builds a spatial tournament. - - Parameters - ---------- - turns : int, optional - The number of turns per match - repetitions : int, optional - The number of times the round robin should be repeated - noise : float, optional - The probability that a player's intended action should be flipped - processes : int, optional - The number of processes to be used for parallel processing - filename: str, optional - The name of the file for spatial tournament's interactions. - if None, a filename will be generated. - progress_bar : bool - Whether or not to create a progress bar which will be updated - - Returns - ---------- - self.data : np.array - A numpy array containing the mean cooperation rate against each - opponent in each turn. The ith row corresponds to the ith opponent - and the jth column the jth turn. - """ - - if isinstance(self.strategy, axl.IpdPlayer): - players = [self.strategy] + self.opponents - else: - players = [self.strategy()] + self.opponents - - temp_file_descriptor = None - if filename is None: - temp_file_descriptor, filename = mkstemp() # type: ignore - - edges = [(0, k + 1) for k in range(len(self.opponents))] - tournament = axl.IpdTournament( - players=players, - edges=edges, - turns=turns, - noise=noise, - repetitions=repetitions, - ) - tournament.play( - filename=filename, - build_results=False, - progress_bar=progress_bar, - processes=processes, - ) - - self.data = self.analyse_cooperation_ratio(filename) - - if temp_file_descriptor is not None: - assert filename is not None - os.close(temp_file_descriptor) - os.remove(filename) - - return self.data - - @staticmethod - def analyse_cooperation_ratio(filename): - """Generates the data used from the tournament - - Return an M by N array where M is the number of opponents and N is the - number of turns. - - Parameters - ---------- - filename : str - The filename of the interactions - - Returns - ---------- - self.data : np.array - A numpy array containing the mean cooperation rate against each - opponent in each turn. The ith row corresponds to the ith opponent - and the jth column the jth turn. - """ - did_c = np.vectorize(lambda actions: [int(action == "C") for action in actions]) - - cooperation_rates = {} - df = dd.read_csv(filename) - # We ignore the actions of all opponents. So we filter the dataframe to - # only include the results of the player with index `0`. - df = df[df["Player index"] == 0][["Opponent index", "Actions"]] - - for _, row in df.iterrows(): - opponent_index, player_history = row["Opponent index"], row["Actions"] - if opponent_index in cooperation_rates: - cooperation_rates[opponent_index].append(did_c(player_history)) - else: - cooperation_rates[opponent_index] = [did_c(player_history)] - - for index, rates in cooperation_rates.items(): - cooperation_rates[index] = np.mean(rates, axis=0) - - return np.array( - [cooperation_rates[index] for index in sorted(cooperation_rates)] - ) - - def plot( - self, - cmap: str = "viridis", - interpolation: str = "none", - title: str = None, - colorbar: bool = True, - labels: bool = True, - display_names: bool = False, - ax: plt.Figure = None, - ) -> plt.Figure: - """Plot the results of the spatial tournament. - Parameters - ---------- - cmap : str, optional - A matplotlib colour map, full list can be found at - http://matplotlib.org/examples/color/colormaps_reference.html - interpolation : str, optional - A matplotlib interpolation, full list can be found at - http://matplotlib.org/examples/images_contours_and_fields/interpolation_methods.html - title : str, optional - A title for the plot - colorbar : bool, optional - Choose whether the colorbar should be included or not - labels : bool, optional - Choose whether the axis labels and ticks should be included - display_names : bool, optional - Choose whether to display the names of the strategies - ax: matplotlib axis - Allows the plot to be written to a given matplotlib axis. - Default is None. - Returns - ---------- - figure : matplotlib figure - A heat plot of the results of the spatial tournament - """ - if ax is None: - fig, ax = plt.subplots() - else: - ax = ax - - fig = ax.get_figure() - mat = ax.imshow(self.data, cmap=cmap, interpolation=interpolation) - - width = len(self.data) / 2 - height = width - fig.set_size_inches(width, height) - - plt.xlabel("turns") - ax.tick_params(axis="both", which="both", length=0) - - if display_names: - plt.yticks( - range(len(self.opponents)), [str(player) for player in self.opponents] - ) - else: - plt.yticks([0, len(self.opponents) - 1], [0, 1]) - plt.ylabel("Probability of cooperation") - - if not labels: - plt.axis("off") - - if title is not None: - plt.title(title) - - if colorbar: - max_score = 0 - min_score = 1 - ticks = [min_score, 1 / 2, max_score] - - divider = make_axes_locatable(ax) - cax = divider.append_axes("right", size="5%", pad=0.2) - cbar = fig.colorbar(mat, cax=cax, ticks=ticks) - - plt.tight_layout() - return fig diff --git a/axelrod/ipd/game.py b/axelrod/ipd/game.py deleted file mode 100644 index d35ec98c7..000000000 --- a/axelrod/ipd/game.py +++ /dev/null @@ -1,68 +0,0 @@ -from typing import Tuple, Union - -from axelrod import Action, BaseGame - -C, D = Action.C, Action.D - -Score = Union[int, float] - - -class IpdGame(BaseGame): - """Container for the game matrix and scoring logic. - - Attributes - ---------- - scores: dict - The numerical score attribute to all combinations of action pairs. - """ - - def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: - """Create a new game object. - - Parameters - ---------- - r: int or float - Score obtained by both players for mutual cooperation. - s: int or float - Score obtained by a player for cooperating against a defector. - t: int or float - Score obtained by a player for defecting against a cooperator. - p: int or float - Score obtained by both player for mutual defection. - """ - self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} - super().__init__() - - def RPST(self) -> Tuple[Score, Score, Score, Score]: - """Returns game matrix values in Press and Dyson notation.""" - R = self.scores[(C, C)][0] - P = self.scores[(D, D)][0] - S = self.scores[(C, D)][0] - T = self.scores[(D, C)][0] - return R, P, S, T - - def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]: - """Returns the appropriate score for a decision pair. - - Parameters - ---------- - pair: tuple(Action, Action) - A pair actions for two players, for example (C, C). - - Returns - ------- - tuple of int or float - Scores for two player resulting from their actions. - """ - return self.scores[pair] - - def __repr__(self) -> str: - return "Axelrod game: (R,P,S,T) = {}".format(self.RPST()) - - def __eq__(self, other): - if not isinstance(other, IpdGame): - return False - return self.RPST() == other.RPST() - - -DefaultGame = IpdGame() diff --git a/axelrod/ipd/graph.py b/axelrod/ipd/graph.py deleted file mode 100644 index 9f41bde4d..000000000 --- a/axelrod/ipd/graph.py +++ /dev/null @@ -1,166 +0,0 @@ -"""Weighted undirected sparse graphs. - -Original source: -https://github.com/marcharper/stationary/blob/master/stationary/utils/graph.py -""" - -from collections import defaultdict - - -class Graph(object): - """Weighted and directed graph class. - - This class is intended for the graph associated to a Markov process, - since it gives easy access to the neighbors of a particular state. - - Vertices can be any hashable Python object. - - Initialize with a list of edges: - [[node1, node2, weights], ...] - Weights can be omitted for an undirected graph. - - For efficiency, neighbors are cached in dictionaries. Undirected - graphs are implemented as directed graphs in which every edge (s, t) - has the opposite edge (t, s). - - Attributes - ---------- - directed: Boolean indicating whether the graph is directed - original_edges: the edges passed into the initializer - out_mapping: a dictionary mapping all heads to dictionaries that map - all tails to their edge weights (None means no weight) - in_mapping: a dictionary mapping all tails to dictionaries that map - all heads to their edge weights (none means to weight) - - Properties - ---------- - vertices: the set of vertices in the graph - edges: the set of current edges in the graph - """ - - def __init__(self, edges=None, directed=False): - self.directed = directed - self.original_edges = edges - self.out_mapping = defaultdict(lambda: defaultdict(float)) - self.in_mapping = defaultdict(lambda: defaultdict(float)) - self._edges = [] - if edges: - self._add_edges(edges) - - def _add_edge(self, source, target, weight=None): - if (source, target) not in self._edges: - self._edges.append((source, target)) - self.out_mapping[source][target] = weight - self.in_mapping[target][source] = weight - if ( - not self.directed - and (source != target) - and (target, source) not in self._edges - ): - self._edges.append((target, source)) - self.out_mapping[target][source] = weight - self.in_mapping[source][target] = weight - - def _add_edges(self, edges): - for edge in edges: - self._add_edge(*edge) - - def add_loops(self): - """ - Add all loops to edges - """ - self._add_edges((x, x) for x in self.vertices) - - @property - def edges(self): - return self._edges - - @property - def vertices(self): - return list(self.out_mapping.keys()) - - def out_dict(self, source): - """Returns a dictionary of the outgoing edges of source with weights.""" - return self.out_mapping[source] - - def out_vertices(self, source): - """Returns a list of the outgoing vertices.""" - return list(self.out_mapping[source].keys()) - - def in_dict(self, target): - """Returns a dictionary of the incoming edges of source with weights.""" - return self.in_mapping[target] - - def in_vertices(self, source): - """Returns a list of the outgoing vertices.""" - return list(self.in_mapping[source].keys()) - - def __repr__(self): - s = "".format(repr(self.original_edges)) - return s - - -# Example graph factories. - - -def cycle(length, directed=False): - """Produces a cycle of a specified length. - - Parameters - ---------- - length: int - Number of vertices in the cycle - directed: bool, False - Is the cycle directed? - - Returns - ------- - a Graph object for the cycle - """ - edges = [(i, i + 1) for i in range(length - 1)] - edges.append((length - 1, 0)) - return Graph(edges=edges, directed=directed) - - -def complete_graph(size, loops=True, directed=False): - """ - Produces a complete graph of size `length`. - https://en.wikipedia.org/wiki/Complete_graph - - Parameters - ---------- - size: int - Number of vertices in the cycle - loops: bool, True - attach loops at each node? - directed: bool, False - Is the graph directed? - - Returns - ------- - a Graph object for the complete graph - """ - edges = [(i, j) for i in range(size) for j in range(i + 1, size)] - graph = Graph(directed=directed, edges=edges) - if loops: - graph.add_loops() - return graph - - -def attached_complete_graphs(length, loops=True, directed=False): - """Creates two complete undirected graphs of size `length` - attached by a single edge.""" - edges = [] - # Two complete graphs - for cluster in range(2): - for i in range(length): - for j in range(i + 1, length): - edges.append(("{}:{}".format(cluster, i), - "{}:{}".format(cluster, j))) - # Attach at one node - edges.append(("0:0", "1:0")) - graph = Graph(directed=directed, edges=edges) - if loops: - graph.add_loops() - - return graph diff --git a/axelrod/ipd/history.py b/axelrod/ipd/history.py deleted file mode 100644 index a9dca6406..000000000 --- a/axelrod/ipd/history.py +++ /dev/null @@ -1,133 +0,0 @@ -from collections import Counter - -from axelrod.ipd.action import Action, actions_to_str - -C, D = Action.C, Action.D - - -class History(object): - """ - History class to track the history of play and metadata including - the number of cooperations and defections, and if available, the - opponents plays and the state distribution of the history of play. - """ - - def __init__(self, plays=None, coplays=None): - """ - Parameters - ---------- - plays: - An ordered iterable of the actions of the player. - coplays: - An ordered iterable of the actions of the coplayer (aka opponent). - """ - self._plays = [] - # Coplays is tracked mainly for computation of the state distribution - # when cloning or dualing. - self._coplays = [] - self._actions = Counter() - self._state_distribution = Counter() - if plays: - self.extend(plays, coplays) - - def append(self, play, coplay): - """Appends a new (play, coplay) pair an updates metadata for - number of cooperations and defections, and the state distribution.""" - self._plays.append(play) - self._actions[play] += 1 - self._coplays.append(coplay) - self._state_distribution[(play, coplay)] += 1 - - def copy(self): - """Returns a new object with the same data.""" - return self.__class__(plays=self._plays, coplays=self._coplays) - - def flip_plays(self): - """Creates a flipped plays history for use with DualTransformer.""" - flipped_plays = [action.flip() for action in self._plays] - return self.__class__(plays=flipped_plays, coplays=self._coplays) - - def extend(self, plays, coplays): - """A function that emulates list.extend.""" - # We could repeatedly call self.append but this is more efficient. - self._plays.extend(plays) - self._actions.update(plays) - self._coplays.extend(coplays) - self._state_distribution.update(zip(plays, coplays)) - - def reset(self): - """Clears all data in the History object.""" - self._plays.clear() - self._coplays.clear() - self._actions.clear() - self._state_distribution.clear() - - @property - def coplays(self): - return self._coplays - - @property - def cooperations(self): - return self._actions[C] - - @property - def defections(self): - return self._actions[D] - - @property - def state_distribution(self): - return self._state_distribution - - def __eq__(self, other): - if isinstance(other, list): - return self._plays == other - elif isinstance(other, History): - return self._plays == other._plays and self._coplays == other._coplays - raise TypeError("Cannot compare types.") - - def __getitem__(self, key): - # Passthrough keys and slice objects - return self._plays[key] - - def __str__(self): - return actions_to_str(self._plays) - - def __list__(self): - return self._plays - - def __len__(self): - return len(self._plays) - - def __repr__(self): - return repr(self.__list__()) - - -class LimitedHistory(History): - """ - History class that only tracks the last N rounds. Used for testing memory - depth. - """ - - def __init__(self, memory_depth): - """ - Parameters - ---------- - memory_depth, int: - length of history to retain - """ - super().__init__() - self.memory_depth = memory_depth - - def append(self, play, coplay): - """Appends a new (play, coplay) pair an updates metadata for - number of cooperations and defections, and the state distribution.""" - - self._plays.append(play) - self._actions[play] += 1 - if coplay: - self._coplays.append(coplay) - self._state_distribution[(play, coplay)] += 1 - if len(self._plays) > self.memory_depth: - first_play, first_coplay = self._plays.pop(0), self._coplays.pop(0) - self._actions[first_play] -= 1 - self._state_distribution[(first_play, first_coplay)] -= 1 diff --git a/axelrod/ipd/interaction_utils.py b/axelrod/ipd/interaction_utils.py deleted file mode 100644 index 72bdcaa16..000000000 --- a/axelrod/ipd/interaction_utils.py +++ /dev/null @@ -1,286 +0,0 @@ -""" -Functions to calculate results from interactions. Interactions are lists of the -form: - - [(C, D), (D, C),...] - -This is used by both the IpdMatch class and the ResultSet class which analyse -interactions. -""" -from collections import Counter, defaultdict - -import pandas as pd -import tqdm -from axelrod.ipd.action import Action, str_to_actions - -from .game import IpdGame - -C, D = Action.C, Action.D - - -def compute_scores(interactions, game=None): - """Returns the scores of a given set of interactions.""" - if not game: - game = IpdGame() - return [game.score(plays) for plays in interactions] - - -def compute_final_score(interactions, game=None): - """Returns the final score of a given set of interactions.""" - scores = compute_scores(interactions, game) - if len(scores) == 0: - return None - - final_score = tuple( - sum([score[player_index] for score in scores]) for player_index in [0, 1] - ) - return final_score - - -def compute_final_score_per_turn(interactions, game=None): - """Returns the mean score per round for a set of interactions""" - scores = compute_scores(interactions, game) - num_turns = len(interactions) - - if len(scores) == 0: - return None - - final_score_per_turn = tuple( - sum([score[player_index] for score in scores]) / num_turns - for player_index in [0, 1] - ) - return final_score_per_turn - - -def compute_winner_index(interactions, game=None): - """Returns the index of the winner of the IpdMatch""" - scores = compute_final_score(interactions, game) - - if scores is not None: - if scores[0] == scores[1]: - return False # No winner - return max([0, 1], key=lambda i: scores[i]) - return None - - -def compute_cooperations(interactions): - """Returns the count of cooperations by each player for a set of - interactions""" - - if len(interactions) == 0: - return None - - cooperation = tuple( - sum([play[player_index] == C for play in interactions]) - for player_index in [0, 1] - ) - return cooperation - - -def compute_normalised_cooperation(interactions): - """Returns the count of cooperations by each player per turn for a set of - interactions""" - if len(interactions) == 0: - return None - - num_turns = len(interactions) - cooperation = compute_cooperations(interactions) - - normalised_cooperation = tuple([c / num_turns for c in cooperation]) - - return normalised_cooperation - - -def compute_state_distribution(interactions): - """ - Returns the count of each state for a set of interactions. - - Parameters - ---------- - interactions : list of tuples - A list containing the interactions of the match as shown at the top of - this file. - - Returns - ---------- - Counter(interactions) : Counter Object - Dictionary where the keys are the states and the values are the number - of times that state occurs. - """ - if not interactions: - return None - return Counter(interactions) - - -def compute_normalised_state_distribution(interactions): - """ - Returns the normalized count of each state for a set of interactions. - - Parameters - ---------- - interactions : list of tuples - A list containing the interactions of the match as shown at the top of - this file. - - Returns - ---------- - normalized_count : Counter Object - Dictionary where the keys are the states and the values are a normalized - count of the number of times that state occurs. - """ - if not interactions: - return None - - interactions_count = Counter(interactions) - total = sum(interactions_count.values(), 0) - - normalized_count = Counter( - {key: value / total for key, value in interactions_count.items()} - ) - return normalized_count - - -def compute_state_to_action_distribution(interactions): - """ - Returns a list (for each player) of counts of each state to action pair - for a set of interactions. A state to action pair is of the form: - - ((C, D), C) - - Implying that from a state of (C, D) (the first player having played C and - the second playing D) the player in question then played C. - - The following counter object implies that the player in question was in - state (C, D) for a total of 12 times, subsequently cooperating 4 times and - defecting 8 times. - - Counter({((C, D), C): 4, ((C, D), D): 8}) - - Parameters - ---------- - interactions : list of tuples - A list containing the interactions of the match as shown at the top of - this file. - - Returns - ---------- - state_to_C_distributions : List of Counter Object - List of Counter objects where the keys are the states and actions and - the values the counts. The - first/second Counter corresponds to the first/second player. - """ - if not interactions: - return None - - distributions = [ - Counter( - [ - (state, outcome[j]) - for state, outcome in zip(interactions, interactions[1:]) - ] - ) - for j in range(2) - ] - return distributions - - -def compute_normalised_state_to_action_distribution(interactions): - """ - Returns a list (for each player) of normalised counts of each state to action - pair for a set of interactions. A state to action pair is of the form: - - ((C, D), C) - - implying that from a state of (C, D) (the first player having played C and - the second playing D) the player in question then played C. - - The following counter object, implies that the player in question was only - ever in state (C, D), subsequently cooperating 1/3 of the time and defecting - 2/3 times. - - Counter({((C, D), C): 0.333333, ((C, D), D): 0.66666667}) - - Parameters - ---------- - interactions : list of tuples - A list containing the interactions of the match as shown at the top of - this file. - - Returns - ------- - normalised_state_to_C_distributions : List of Counter Object - List of Counter objects where the keys are the states and actions and - the values the normalized counts. The first/second Counter corresponds - to the first/second player. - """ - if not interactions: - return None - - distribution = compute_state_to_action_distribution(interactions) - normalized_distribution = [] - for player in range(2): - counter = {} - for state in [(C, C), (C, D), (D, C), (D, D)]: - C_count = distribution[player].get((state, C), 0) - D_count = distribution[player].get((state, D), 0) - total = C_count + D_count - if total > 0: - if C_count > 0: - counter[(state, C)] = C_count / (C_count + D_count) - if D_count > 0: - counter[(state, D)] = D_count / (C_count + D_count) - normalized_distribution.append(Counter(counter)) - return normalized_distribution - - -def sparkline(actions, c_symbol="█", d_symbol=" "): - return "".join([c_symbol if play == C else d_symbol for play in actions]) - - -def compute_sparklines(interactions, c_symbol="█", d_symbol=" "): - """Returns the sparklines for a set of interactions""" - if len(interactions) == 0: - return None - - histories = list(zip(*interactions)) - return ( - sparkline(histories[0], c_symbol, d_symbol) - + "\n" - + sparkline(histories[1], c_symbol, d_symbol) - ) - - -def read_interactions_from_file(filename, progress_bar=True): - """ - Reads a file and returns a dictionary mapping tuples of player pairs to - lists of interactions - """ - df = pd.read_csv(filename)[ - ["Interaction index", "Player index", "Opponent index", "Actions"] - ] - groupby = df.groupby("Interaction index") - if progress_bar: - groupby = tqdm.tqdm(groupby) - - pairs_to_interactions = defaultdict(list) - for _, d in tqdm.tqdm(groupby): - key = tuple(d[["Player index", "Opponent index"]].iloc[0]) - value = list(map(str_to_actions, zip(*d["Actions"]))) - pairs_to_interactions[key].append(value) - return pairs_to_interactions - - -def string_to_interactions(string): - """ - Converts a compact string representation of an interaction to an - interaction: - - 'CDCDDD' -> [(C, D), (C, D), (D, D)] - """ - interactions = [] - interactions_list = list(string) - while interactions_list: - p1action = Action.from_char(interactions_list.pop(0)) - p2action = Action.from_char(interactions_list.pop(0)) - interactions.append((p1action, p2action)) - return interactions diff --git a/axelrod/ipd/load_data_.py b/axelrod/ipd/load_data_.py deleted file mode 100644 index ac29250fd..000000000 --- a/axelrod/ipd/load_data_.py +++ /dev/null @@ -1,62 +0,0 @@ -import pathlib -from typing import Dict, List, Text, Tuple - -import pkg_resources - - -def axl_filename(path: pathlib.Path) -> pathlib.Path: - """Given a path under Axelrod/, return absolute filepath. - - Parameters - ---------- - axl_path - A pathlib.Path object with the relative directory under Axelrod/ - - Returns - ------- - A pathlib.Path object with the absolute directory. - """ - # We go up a dir because this code is located in Axelrod/axelrod. - axl_path = pathlib.Path(__file__).resolve().parent.parent - return axl_path / path - - -def load_file(filename: str, directory: str) -> List[List[str]]: - """Loads a data file stored in the Axelrod library's data subdirectory, - likely for parameters for a strategy.""" - path = "/".join((directory, filename)) - data_bytes = pkg_resources.resource_string(__name__, path) - data = data_bytes.decode("UTF-8", "replace") - rows = [] - for line in data.split("\n"): - if line.startswith("#") or len(line) == 0: - continue - s = line.split(", ") - rows.append(s) - return rows - - -def load_weights( - filename: str = "ann_weights.csv", directory: str = "data" -) -> Dict[str, Tuple[int, int, List[float]]]: - """Load Neural Network Weights.""" - rows = load_file(filename, directory) - d = dict() - for row in rows: - name = str(row[0]) - num_features = int(row[1]) - num_hidden = int(row[2]) - weights = list(map(float, row[3:])) - d[name] = (num_features, num_hidden, weights) - return d - - -def load_pso_tables(filename="pso_gambler.csv", directory="data"): - """Load lookup tables.""" - rows = load_file(filename, directory) - d = dict() - for row in rows: - name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3]) - values = list(map(float, row[4:])) - d[(name, int(a), int(b), int(c))] = values - return d diff --git a/axelrod/ipd/match.py b/axelrod/ipd/match.py deleted file mode 100644 index 4dafbe44f..000000000 --- a/axelrod/ipd/match.py +++ /dev/null @@ -1,258 +0,0 @@ -import random -from math import ceil, log - -import axelrod.ipd.interaction_utils as iu -from axelrod import DEFAULT_TURNS -from axelrod.ipd.action import Action -from axelrod import Classifiers -from axelrod.ipd.game import IpdGame -from axelrod.match import BaseMatch -from .deterministic_cache import DeterministicCache - -C, D = Action.C, Action.D - - -def is_stochastic(players, noise): - """Determines if a match is stochastic -- true if there is noise or if any - of the players involved is stochastic.""" - return noise or any(map(Classifiers["stochastic"], players)) - - -class IpdMatch(BaseMatch): - """The IpdMatch class conducts matches between two players.""" - - def __init__( - self, - players, - turns=None, - prob_end=None, - game=None, - deterministic_cache=None, - noise=0, - match_attributes=None, - reset=True, - ): - """ - Parameters - ---------- - players : tuple - A pair of Player objects - turns : integer - The number of turns per match - prob_end : float - The probability of a given turn ending a match - game : axelrod.IpdGame - The game object used to score the match - deterministic_cache : axelrod.DeterministicCache - A cache of resulting actions for deterministic matches - noise : float - The probability that a player's intended action should be flipped - match_attributes : dict - Mapping attribute names to values which should be passed to players. - The default is to use the correct values for turns, game and noise - but these can be overridden if desired. - reset : bool - Whether to reset players or not - """ - - defaults = { - (True, True): (DEFAULT_TURNS, 0), - (True, False): (float("inf"), prob_end), - (False, True): (turns, 0), - (False, False): (turns, prob_end), - } - self.turns, self.prob_end = defaults[(turns is None, prob_end is None)] - - self.result = [] - self.noise = noise - - if game is None: - self.game = IpdGame() - else: - self.game = game - - if deterministic_cache is None: - self._cache = DeterministicCache() - else: - self._cache = deterministic_cache - - if match_attributes is None: - known_turns = self.turns if prob_end is None else float("inf") - self.match_attributes = { - "length": known_turns, - "game": self.game, - "noise": self.noise, - } - else: - self.match_attributes = match_attributes - - self.players = list(players) - self.reset = reset - - super().__init__( - players, turns, prob_end, game, noise, match_attributes, reset - ) - - @property - def players(self): - return self._players - - @players.setter - def players(self, players): - """Ensure that players are passed the match attributes""" - newplayers = [] - for player in players: - player.set_match_attributes(**self.match_attributes) - newplayers.append(player) - self._players = newplayers - - @property - def _stochastic(self): - """ - A boolean to show whether a match between two players would be - stochastic. - """ - return is_stochastic(self.players, self.noise) - - @property - def _cache_update_required(self): - """ - A boolean to show whether the deterministic cache should be updated. - """ - return ( - not self.noise - and self._cache.mutable - and not (any(Classifiers["stochastic"](p) for p in self.players)) - ) - - def _cached_enough_turns(self, cache_key, turns): - """ - Returns true iff there are is a entry in self._cache for the given key and - it's at least turns long. - """ - if cache_key not in self._cache: - return False - return len(self._cache[cache_key]) >= turns - - def play(self): - """ - The resulting list of actions from a match between two players. - - This method determines whether the actions list can be obtained from - the deterministic cache and returns it from there if so. If not, it - calls the play method for player1 and returns the list from there. - - Returns - ------- - A list of the form: - - e.g. for a 2 turn match between Cooperator and Defector: - - [(C, C), (C, D)] - - i.e. One entry per turn containing a pair of actions. - """ - turns = min(sample_length(self.prob_end), self.turns) - cache_key = (self.players[0], self.players[1]) - - if self._stochastic or not self._cached_enough_turns(cache_key, turns): - for p in self.players: - if self.reset: - p.reset() - p.set_match_attributes(**self.match_attributes) - result = [] - for _ in range(turns): - plays = self.players[0].play(self.players[1], self.noise) - result.append(plays) - - if self._cache_update_required: - self._cache[cache_key] = result - else: - result = self._cache[cache_key][:turns] - - self.result = result - return result - - def scores(self): - """Returns the scores of the previous IpdMatch plays.""" - return iu.compute_scores(self.result, self.game) - - def final_score(self): - """Returns the final score for a IpdMatch.""" - return iu.compute_final_score(self.result, self.game) - - def final_score_per_turn(self): - """Returns the mean score per round for a IpdMatch.""" - return iu.compute_final_score_per_turn(self.result, self.game) - - def winner(self): - """Returns the winner of the IpdMatch.""" - winner_index = iu.compute_winner_index(self.result, self.game) - if winner_index is False: # No winner - return False - if winner_index is None: # No plays - return None - return self.players[winner_index] - - def cooperation(self): - """Returns the count of cooperations by each player.""" - return iu.compute_cooperations(self.result) - - def normalised_cooperation(self): - """Returns the count of cooperations by each player per turn.""" - return iu.compute_normalised_cooperation(self.result) - - def state_distribution(self): - """ - Returns the count of each state for a set of interactions. - """ - return iu.compute_state_distribution(self.result) - - def normalised_state_distribution(self): - """ - Returns the normalized count of each state for a set of interactions. - """ - return iu.compute_normalised_state_distribution(self.result) - - def sparklines(self, c_symbol="█", d_symbol=" "): - return iu.compute_sparklines(self.result, c_symbol, d_symbol) - - def __len__(self): - return self.turns - - -def sample_length(prob_end): - """ - Sample length of a game. - - This is using inverse random sample on a probability density function - given by: - - f(n) = p_end * (1 - p_end) ^ (n - 1) - - (So the probability of length n is given by f(n)) - - Which gives cumulative distribution function - : - - F(n) = 1 - (1 - p_end) ^ n - - (So the probability of length less than or equal to n is given by F(n)) - - Which gives for given x = F(n) (ie the random sample) gives n: - - n = ceil((ln(1-x)/ln(1-p_end))) - - This approach of sampling from a distribution is called inverse - transform sampling - . - - Note that this corresponds to sampling at the end of every turn whether - or not the IpdMatch ends. - """ - if prob_end == 0: - return float("inf") - if prob_end == 1: - return 1 - x = random.random() - return int(ceil(log(1 - x) / log(1 - prob_end))) diff --git a/axelrod/ipd/match_generator.py b/axelrod/ipd/match_generator.py deleted file mode 100644 index d4dca9dc1..000000000 --- a/axelrod/ipd/match_generator.py +++ /dev/null @@ -1,123 +0,0 @@ -class MatchGenerator(object): - def __init__( - self, - players, - repetitions, - turns=None, - game=None, - noise=0, - prob_end=None, - edges=None, - match_attributes=None, - ): - """ - A class to generate matches. This is used by the IpdTournament class which - is in charge of playing the matches and collecting the results. - - Parameters - ---------- - players : list - A list of axelrodPlayer objects - repetitions : int - The number of repetitions of a given match - turns : integer - The number of turns per match - game : axelrod.IpdGame - The game object used to score the match - noise : float, 0 - The probability that a player's intended action should be flipped - prob_end : float - The probability of a given turn ending a match - edges : list - A list of edges between players - match_attributes : dict - Mapping attribute names to values which should be passed to players. - The default is to use the correct values for turns, game and noise - but these can be overridden if desired. - """ - self.players = players - self.turns = turns - self.game = game - self.repetitions = repetitions - self.noise = noise - self.opponents = players - self.prob_end = prob_end - self.match_attributes = match_attributes - - self.edges = edges - if edges is not None: - if not graph_is_connected(edges, players): - raise ValueError("The graph edges do not include all players.") - self.size = len(edges) - else: - n = len(self.players) - self.size = int(n * (n - 1) // 2 + n) - - def __len__(self): - return self.size - - def build_match_chunks(self): - """ - A generator that returns player index pairs and match parameters for a - round robin tournament. - - Yields - ------- - tuples - ((player1 index, player2 index), match object) - """ - if self.edges is None: - edges = complete_graph(self.players) - else: - edges = self.edges - - for index_pair in edges: - match_params = self.build_single_match_params() - yield (index_pair, match_params, self.repetitions) - - def build_single_match_params(self): - """ - Creates a single set of match parameters. - """ - return { - "turns": self.turns, - "game": self.game, - "noise": self.noise, - "prob_end": self.prob_end, - "match_attributes": self.match_attributes, - } - - -def complete_graph(players): - """ - Return generator of edges of a complete graph on a set of players - """ - for player1_index, _ in enumerate(players): - for player2_index in range(player1_index, len(players)): - yield (player1_index, player2_index) - - -def graph_is_connected(edges, players): - """ - Test if the set of edges defines a graph in which each player is connected - to at least one other player. This function does not test if the graph is - fully connected in the sense that each node is reachable from every other - node. - - Parameters: - ----------- - edges : a list of 2 tuples - players : a list of player names - - Returns: - -------- - boolean : True if the graph is connected as specified above. - """ - # Check if all players are connected. - player_indices = set(range(len(players))) - node_indices = set() - for edge in edges: - for node in edge: - node_indices.add(node) - - return player_indices == node_indices diff --git a/axelrod/ipd/mock_player.py b/axelrod/ipd/mock_player.py deleted file mode 100644 index f7115303e..000000000 --- a/axelrod/ipd/mock_player.py +++ /dev/null @@ -1,29 +0,0 @@ -from itertools import cycle -from typing import List - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class MockPlayer(IpdPlayer): - """Creates a mock player that plays a given sequence of actions. If - no actions are given, plays like Cooperator. Used for testing. - """ - - name = "Mock IpdPlayer" - - def __init__(self, actions: List[Action] = None) -> None: - super().__init__() - if not actions: - actions = [] - self.actions = cycle(actions) - - def strategy(self, opponent: IpdPlayer) -> Action: - # Return the next saved action, if present. - try: - action = self.actions.__next__() - return action - except StopIteration: - return C diff --git a/axelrod/ipd/moran.py b/axelrod/ipd/moran.py deleted file mode 100644 index 1ea3c7e40..000000000 --- a/axelrod/ipd/moran.py +++ /dev/null @@ -1,541 +0,0 @@ -"""Implementation of the Moran process on Graphs.""" - -import random -from collections import Counter -from typing import Callable, List, Optional, Set, Tuple - -import matplotlib.pyplot as plt -import numpy as np -from axelrod import EvolvablePlayer, DEFAULT_TURNS, IpdGame, IpdPlayer - -from .deterministic_cache import DeterministicCache -from .graph import Graph, complete_graph -from .match import IpdMatch -from .random_ import randrange - - -def fitness_proportionate_selection( - scores: List, fitness_transformation: Callable = None -) -> int: - """Randomly selects an individual proportionally to score. - - Parameters - ---------- - scores: Any sequence of real numbers - fitness_transformation: A function mapping a score to a (non-negative) float - - Returns - ------- - An index of the above list selected at random proportionally to the list - element divided by the total. - """ - if fitness_transformation is None: - csums = np.cumsum(scores) - else: - csums = np.cumsum([fitness_transformation(s) for s in scores]) - total = csums[-1] - r = random.random() * total - - for i, x in enumerate(csums): - if x >= r: - break - return i - - -class MoranProcess(object): - def __init__( - self, - players: List[IpdPlayer], - turns: int = DEFAULT_TURNS, - prob_end: float = None, - noise: float = 0, - game: IpdGame = None, - deterministic_cache: DeterministicCache = None, - mutation_rate: float = 0.0, - mode: str = "bd", - interaction_graph: Graph = None, - reproduction_graph: Graph = None, - fitness_transformation: Callable = None, - mutation_method="transition", - stop_on_fixation=True - ) -> None: - """ - An agent based Moran process class. In each round, each player plays a - IpdMatch with each other player. Players are assigned a fitness score by - their total score from all matches in the round. A player is chosen to - reproduce proportionally to fitness, possibly mutated, and is cloned. - The clone replaces a randomly chosen player. - - If the mutation_rate is 0, the population will eventually fixate on - exactly one player type. In this case a StopIteration exception is - raised and the play stops. If the mutation_rate is not zero, then the - process will iterate indefinitely, so mp.play() will never exit, and - you should use the class as an iterator instead. - - When a player mutates it chooses a random player type from the initial - population. This is not the only method yet emulates the common method - in the literature. - - It is possible to pass interaction graphs and reproduction graphs to the - Moran process. In this case, in each round, each player plays a - IpdMatch with each neighboring player according to the interaction graph. - Players are assigned a fitness score by their total score from all - matches in the round. A player is chosen to reproduce proportionally to - fitness, possibly mutated, and is cloned. The clone replaces a randomly - chosen neighboring player according to the reproduction graph. - - Parameters - ---------- - players - turns: - The number of turns in each pairwise interaction - prob_end : - The probability of a given turn ending a match - noise: - The background noise, if any. Randomly flips plays with probability - `noise`. - game: axelrod.IpdGame - The game object used to score matches. - deterministic_cache: - A optional prebuilt deterministic cache - mutation_rate: - The rate of mutation. Replicating players are mutated with - probability `mutation_rate` - mode: - Birth-Death (bd) or Death-Birth (db) - interaction_graph: Axelrod.graph.Graph - The graph in which the replicators are arranged - reproduction_graph: Axelrod.graph.Graph - The reproduction graph, set equal to the interaction graph if not - given - fitness_transformation: - A function mapping a score to a (non-negative) float - mutation_method: - A string indicating if the mutation method should be between original types ("transition") - or based on the player's mutation method, if present ("atomic"). - stop_on_fixation: - A bool indicating if the process should stop on fixation - """ - self.turns = turns - self.prob_end = prob_end - self.game = game - self.noise = noise - self.initial_players = players # save initial population - self.players = [] # type: List - self.populations = [] # type: List - self.set_players() - self.score_history = [] # type: List - self.winning_strategy_name = None # type: Optional[str] - self.mutation_rate = mutation_rate - self.stop_on_fixation = stop_on_fixation - m = mutation_method.lower() - if m in ["atomic", "transition"]: - self.mutation_method = m - else: - raise ValueError("Invalid mutation method {}".format(mutation_method)) - assert (mutation_rate >= 0) and (mutation_rate <= 1) - assert (noise >= 0) and (noise <= 1) - mode = mode.lower() - assert mode in ["bd", "db"] - self.mode = mode - if deterministic_cache is not None: - self.deterministic_cache = deterministic_cache - else: - self.deterministic_cache = DeterministicCache() - # Build the set of mutation targets - # Determine the number of unique types (players) - keys = set([str(p) for p in players]) - # Create a dictionary mapping each type to a set of representatives - # of the other types - d = dict() - for p in players: - d[str(p)] = p - mutation_targets = dict() - for key in sorted(keys): - mutation_targets[key] = [v for (k, v) in sorted(d.items()) if k != key] - self.mutation_targets = mutation_targets - - if interaction_graph is None: - interaction_graph = complete_graph(len(players), loops=False) - if reproduction_graph is None: - reproduction_graph = Graph( - interaction_graph.edges, directed=interaction_graph.directed - ) - reproduction_graph.add_loops() - # Check equal vertices - v1 = interaction_graph.vertices - v2 = reproduction_graph.vertices - assert list(v1) == list(v2) - self.interaction_graph = interaction_graph - self.reproduction_graph = reproduction_graph - self.fitness_transformation = fitness_transformation - # Map players to graph vertices - self.locations = sorted(interaction_graph.vertices) - self.index = dict(zip(sorted(interaction_graph.vertices), range(len(players)))) - self.fixated = self.fixation_check() - - def set_players(self) -> None: - """Copy the initial players into the first population.""" - self.players = [] - for player in self.initial_players: - player.reset() - self.players.append(player) - self.populations = [self.population_distribution()] - - def mutate(self, index: int) -> IpdPlayer: - """Mutate the player at index. - - Parameters - ---------- - index: - The index of the player to be mutated - """ - - if self.mutation_method == "atomic": - if not issubclass(self.players[index].__class__, EvolvablePlayer): - raise TypeError("Player is not evolvable. Use a subclass of EvolvablePlayer.") - return self.players[index].mutate() - - # Assuming mutation_method == "transition" - if self.mutation_rate > 0: - # Choose another strategy at random from the initial population - r = random.random() - if r < self.mutation_rate: - s = str(self.players[index]) - j = randrange(0, len(self.mutation_targets[s])) - p = self.mutation_targets[s][j] - return p.clone() - # Just clone the player - return self.players[index].clone() - - def death(self, index: int = None) -> int: - """ - Selects the player to be removed. - - Note that the in the birth-death case, the player that is reproducing - may also be replaced. However in the death-birth case, this player will - be excluded from the choices. - - Parameters - ---------- - index: - The index of the player to be removed - """ - if index is None: - # Select a player to be replaced globally - i = randrange(0, len(self.players)) - # Record internally for use in _matchup_indices - self.dead = i - else: - # Select locally - # index is not None in this case - vertex = random.choice( - sorted(self.reproduction_graph.out_vertices(self.locations[index])) - ) - i = self.index[vertex] - return i - - def birth(self, index: int = None) -> int: - """The birth event. - - Parameters - ---------- - index: - The index of the player to be copied - """ - # Compute necessary fitnesses. - scores = self.score_all() - if index is not None: - # Death has already occurred, so remove the dead player from the - # possible choices - scores.pop(index) - # Make sure to get the correct index post-pop - j = fitness_proportionate_selection( - scores, fitness_transformation=self.fitness_transformation - ) - if j >= index: - j += 1 - else: - j = fitness_proportionate_selection( - scores, fitness_transformation=self.fitness_transformation - ) - return j - - def fixation_check(self) -> bool: - """ - Checks if the population is all of a single type - - Returns - ------- - Boolean: - True if fixation has occurred (population all of a single type) - """ - classes = set(str(p) for p in self.players) - self.fixated = False - if len(classes) == 1: - # Set the winning strategy name variable - self.winning_strategy_name = str(self.players[0]) - self.fixated = True - return self.fixated - - def __next__(self) -> object: - """ - Iterate the population: - - - play the round's matches - - chooses a player proportionally to fitness (total score) to reproduce - - mutate, if appropriate - - choose a player to be replaced - - update the population - - Returns - ------- - MoranProcess: - Returns itself with a new population - """ - # Check the exit condition, that all players are of the same type. - if self.stop_on_fixation and self.fixation_check(): - raise StopIteration - if self.mode == "bd": - # Birth then death - j = self.birth() - i = self.death(j) - elif self.mode == "db": - # Death then birth - i = self.death() - self.players[i] = None - j = self.birth(i) - # Mutate and/or replace player i with clone of player j - self.players[i] = self.mutate(j) - # Record population. - self.populations.append(self.population_distribution()) - return self - - def _matchup_indices(self) -> Set[Tuple[int, int]]: - """ - Generate the matchup pairs. - - Returns - ------- - indices: - A set of 2 tuples of matchup pairs: the collection of all players - who play each other. - """ - indices = set() # type: Set - # For death-birth we only want the neighbors of the dead node - # The other calculations are unnecessary - if self.mode == "db": - source = self.index[self.dead] - self.dead = None - sources = sorted(self.interaction_graph.out_vertices(source)) - else: - # birth-death is global - sources = sorted(self.locations) - for i, source in enumerate(sources): - for target in sorted(self.interaction_graph.out_vertices(source)): - j = self.index[target] - if (self.players[i] is None) or (self.players[j] is None): - continue - # Don't duplicate matches - if ((i, j) in indices) or ((j, i) in indices): - continue - indices.add((i, j)) - return indices - - def score_all(self) -> List: - """Plays the next round of the process. Every player is paired up - against every other player and the total scores are recorded. - - Returns - ------- - scores: - List of scores for each player - """ - N = len(self.players) - scores = [0] * N - for i, j in self._matchup_indices(): - player1 = self.players[i] - player2 = self.players[j] - match = IpdMatch( - (player1, player2), - turns=self.turns, - prob_end=self.prob_end, - noise=self.noise, - game=self.game, - deterministic_cache=self.deterministic_cache, - ) - match.play() - match_scores = match.final_score_per_turn() - scores[i] += match_scores[0] - scores[j] += match_scores[1] - self.score_history.append(scores) - return scores - - def population_distribution(self) -> Counter: - """Returns the population distribution of the last iteration. - - Returns - ------- - counter: - The counts of each strategy in the population of the last iteration - """ - player_names = [str(player) for player in self.players] - counter = Counter(player_names) - return counter - - def __iter__(self) -> object: - """ - Returns - ------- - self - """ - return self - - def reset(self) -> None: - """Reset the process to replay.""" - self.winning_strategy_name = None - self.score_history = [] - # Reset all the players - self.set_players() - - def play(self) -> List[Counter]: - """ - Play the process out to completion. If played with mutation this will - not terminate. - - Returns - ------- - populations: - Returns a list of all the populations - """ - if not self.stop_on_fixation or self.mutation_rate != 0: - raise ValueError( - "MoranProcess.play() will never exit if mutation_rate is" - "nonzero or stop_on_fixation is False. Use iteration instead." - ) - while True: - try: - self.__next__() - except StopIteration: - break - return self.populations - - def __len__(self) -> int: - """ - Returns - ------- - The length of the Moran process: the number of populations - """ - return len(self.populations) - - def populations_plot(self, ax=None): - """ - Create a stackplot of the population distributions at each iteration of - the Moran process. - - Parameters - ---------------- - ax: matplotlib axis - Allows the plot to be written to a given matplotlib axis. - Default is None. - - Returns - ----------- - A matplotlib axis object - - """ - player_names = self.populations[0].keys() - if ax is None: - _, ax = plt.subplots() - else: - ax = ax - - plot_data = [] - labels = [] - for name in player_names: - labels.append(name) - values = [counter[name] for counter in self.populations] - plot_data.append(values) - domain = range(len(values)) - - ax.stackplot(domain, plot_data, labels=labels) - ax.set_title("Moran Process Population by Iteration") - ax.set_xlabel("Iteration") - ax.set_ylabel("Number of Individuals") - ax.legend() - return ax - - -class ApproximateMoranProcess(MoranProcess): - """ - A class to approximate a Moran process based - on a distribution of potential IpdMatch outcomes. - - Instead of playing the matches, the result is sampled - from a dictionary of player tuples to distribution of match outcomes - """ - - def __init__( - self, players: List[IpdPlayer], cached_outcomes: dict, mutation_rate: float = 0 - ) -> None: - """ - Parameters - ---------- - players: - cached_outcomes: - Mapping tuples of players to instances of the moran.Pdf class. - mutation_rate: - The rate of mutation. Replicating players are mutated with - probability `mutation_rate` - """ - super(ApproximateMoranProcess, self).__init__( - players, - turns=0, - noise=0, - deterministic_cache=None, - mutation_rate=mutation_rate, - ) - self.cached_outcomes = cached_outcomes - - def score_all(self) -> List: - """Plays the next round of the process. Every player is paired up - against every other player and the total scores are obtained from the - cached outcomes. - - Returns - ------- - scores: - List of scores for each player - """ - N = len(self.players) - scores = [0] * N - for i in range(N): - for j in range(i + 1, N): - player_names = tuple([str(self.players[i]), str(self.players[j])]) - - cached_score = self._get_scores_from_cache(player_names) - scores[i] += cached_score[0] - scores[j] += cached_score[1] - self.score_history.append(scores) - return scores - - def _get_scores_from_cache(self, player_names: Tuple) -> Tuple: - """ - Retrieve the scores from the players in the cache - - Parameters - ---------- - player_names: - The names of the players - - Returns - ------- - scores: - The scores of the players in that particular match - """ - try: - match_scores = self.cached_outcomes[player_names].sample() - return match_scores - except KeyError: # If players are stored in opposite order - match_scores = self.cached_outcomes[player_names[::-1]].sample() - return match_scores[::-1] diff --git a/axelrod/ipd/player.py b/axelrod/ipd/player.py deleted file mode 100644 index 6391f26e9..000000000 --- a/axelrod/ipd/player.py +++ /dev/null @@ -1,211 +0,0 @@ -import copy -import inspect -import itertools -import types -from typing import Any, Dict - -import numpy as np - -from axelrod.player import BasePlayer -from axelrod.ipd.action import Action -from axelrod.ipd.game import DefaultGame -from axelrod.ipd.history import History -from axelrod.ipd.random_ import random_flip - -C, D = Action.C, Action.D - - -def simultaneous_play(player, coplayer, noise=0): - """This pits two players against each other.""" - s1, s2 = player.strategy(coplayer), coplayer.strategy(player) - if noise: - s1 = random_flip(s1, noise) - s2 = random_flip(s2, noise) - player.update_history(s1, s2) - coplayer.update_history(s2, s1) - return s1, s2 - - -class IpdPlayer(BasePlayer): - """A class for a player in the tournament. - - This is an abstract base class, not intended to be used directly. - """ - - name = "IpdPlayer" - classifier = {} # type: Dict[str, Any] - - def __new__(cls, *args, **kwargs): - """Caches arguments for IpdPlayer cloning.""" - obj = super().__new__(cls) - obj.init_kwargs = cls.init_params(*args, **kwargs) - return obj - - @classmethod - def init_params(cls, *args, **kwargs): - """ - Return a dictionary containing the init parameters of a strategy - (without 'self'). - Use *args and *kwargs as value if specified - and complete the rest with the default values. - """ - sig = inspect.signature(cls.__init__) - # The 'self' parameter needs to be removed or the first *args will be - # assigned to it - self_param = sig.parameters.get("self") - new_params = list(sig.parameters.values()) - new_params.remove(self_param) - sig = sig.replace(parameters=new_params) - boundargs = sig.bind_partial(*args, **kwargs) - boundargs.apply_defaults() - return boundargs.arguments - - def __init__(self): - """Initiates an empty history.""" - self._history = History() - self.classifier = copy.deepcopy(self.classifier) - self.set_match_attributes() - super().__init__() - - def __eq__(self, other): - """ - Test if two players are equal. - """ - if self.__repr__() != other.__repr__(): - return False - - for attribute in set( - list(self.__dict__.keys()) + list(other.__dict__.keys()) - ): - - value = getattr(self, attribute, None) - other_value = getattr(other, attribute, None) - - if isinstance(value, np.ndarray): - if not (np.array_equal(value, other_value)): - return False - - elif isinstance(value, types.GeneratorType) or isinstance( - value, itertools.cycle - ): - # Split the original generator so it is not touched - generator, original_value = itertools.tee(value) - other_generator, original_other_value = itertools.tee( - other_value - ) - - if isinstance(value, types.GeneratorType): - setattr(self, attribute, (ele for ele in original_value)) - setattr( - other, attribute, (ele for ele in original_other_value) - ) - else: - setattr(self, attribute, itertools.cycle(original_value)) - setattr( - other, attribute, itertools.cycle(original_other_value) - ) - - for _ in range(200): - try: - if next(generator) != next(other_generator): - return False - except StopIteration: - break - - # Code for a strange edge case where each strategy points at each - # other - elif value is other and other_value is self: - pass - else: - if value != other_value: - return False - return True - - def receive_match_attributes(self): - # Overwrite this function if your strategy needs - # to make use of match_attributes such as - # the game matrix, the number of rounds or the noise - pass - - def set_match_attributes(self, length=-1, game=None, noise=0): - if not game: - game = DefaultGame - self.match_attributes = {"length": length, "game": game, "noise": noise} - self.receive_match_attributes() - - def __repr__(self): - """The string method for the strategy. - Appends the `__init__` parameters to the strategy's name.""" - name = self.name - prefix = ": " - gen = ( - value for value in self.init_kwargs.values() if value is not None - ) - for value in gen: - try: - if issubclass(value, IpdPlayer): - value = value.name - except TypeError: - pass - name = "".join([name, prefix, str(value)]) - prefix = ", " - return name - - def __getstate__(self): - """Used for pickling. Override if IpdPlayer contains unpickleable attributes.""" - return self.__dict__ - - def strategy(self, opponent): - """This is a placeholder strategy.""" - raise NotImplementedError() - - def play(self, opponent, noise=0, strategy_holder=None): - """This pits two players against each other, using the passed strategy - holder, if provided.""" - if strategy_holder is None: - strategy_holder = self - return simultaneous_play(strategy_holder, opponent, noise) - - def clone(self): - """Clones the player without history, reapplying configuration - parameters as necessary.""" - - # You may be tempted to re-implement using the `copy` module - # Note that this would require a deepcopy in some cases and there may - # be significant changes required throughout the library. - # Consider overriding in special cases only if necessary - cls = self.__class__ - new_player = cls(**self.init_kwargs) - new_player.match_attributes = copy.copy(self.match_attributes) - return new_player - - def reset(self): - """Resets a player to its initial state - - This method is called at the beginning of each match (between a pair - of players) to reset a player's state to its initial starting point. - It ensures that no 'memory' of previous matches is carried forward. - """ - # This also resets the history. - self.__init__(**self.init_kwargs) - - def update_history(self, play, coplay): - self.history.append(play, coplay) - - @property - def history(self): - return self._history - - # Properties maintained for legacy API, can refactor to self.history.X - # in 5.0.0 to reduce function call overhead. - @property - def cooperations(self): - return self._history.cooperations - - @property - def defections(self): - return self._history.defections - - @property - def state_distribution(self): - return self._history.state_distribution diff --git a/axelrod/ipd/plot.py b/axelrod/ipd/plot.py deleted file mode 100644 index edc596529..000000000 --- a/axelrod/ipd/plot.py +++ /dev/null @@ -1,333 +0,0 @@ -from distutils.version import LooseVersion -from typing import List, Union - -import matplotlib -import matplotlib.pyplot as plt -import matplotlib.transforms as transforms -import pathlib -import tqdm -from numpy import arange, median, nan_to_num - -from .result_set import ResultSet -from .load_data_ import axl_filename - -titleType = List[str] -namesType = List[str] -dataType = List[List[Union[int, float]]] - - -def default_cmap(version: str = "2.0") -> str: - """Sets a default matplotlib colormap based on the version.""" - if LooseVersion(version) >= "1.5": - return "viridis" - return "YlGnBu" - - -class Plot(object): - def __init__(self, result_set: ResultSet) -> None: - self.result_set = result_set - self.num_players = self.result_set.num_players - self.players = self.result_set.players - - def _violinplot( - self, - data: dataType, - names: namesType, - title: titleType = None, - ax: matplotlib.axes.SubplotBase = None, - ) -> matplotlib.figure.Figure: - """For making violinplots.""" - - if ax is None: - _, ax = plt.subplots() - else: - ax = ax - - figure = ax.get_figure() - width = max(self.num_players / 3, 12) - height = width / 2 - spacing = 4 - positions = spacing * arange(1, self.num_players + 1, 1) - figure.set_size_inches(width, height) - ax.violinplot( - data, - positions=positions, - widths=spacing / 2, - showmedians=True, - showextrema=False, - ) - ax.set_xticks(positions) - ax.set_xticklabels(names, rotation=90) - ax.set_xlim([0, spacing * (self.num_players + 1)]) - ax.tick_params(axis="both", which="both", labelsize=8) - if title: - ax.set_title(title) - plt.tight_layout() - return figure - - # Box and Violin plots for mean score, score differences, wins, and match - # lengths - - @property - def _boxplot_dataset(self): - return [ - list(nan_to_num(self.result_set.normalised_scores[ir])) - for ir in self.result_set.ranking - ] - - @property - def _boxplot_xticks_locations(self): - return list(range(1, len(self.result_set.ranked_names) + 2)) - - @property - def _boxplot_xticks_labels(self): - return [str(n) for n in self.result_set.ranked_names] - - def boxplot( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """For the specific mean score boxplot.""" - data = self._boxplot_dataset - names = self._boxplot_xticks_labels - figure = self._violinplot(data, names, title=title, ax=ax) - return figure - - @property - def _winplot_dataset(self): - # Sort wins by median - wins = self.result_set.wins - medians = map(median, wins) - medians = sorted([(m, i) for (i, m) in enumerate(medians)], reverse=True) - # Reorder and grab names - wins = [wins[x[-1]] for x in medians] - ranked_names = [str(self.players[x[-1]]) for x in medians] - return wins, ranked_names - - def winplot( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """Plots the distributions for the number of wins for each strategy.""" - - data, names = self._winplot_dataset - figure = self._violinplot(data, names, title=title, ax=ax) - # Expand ylim a bit - maximum = max(max(w) for w in data) - plt.ylim(-0.5, 0.5 + maximum) - return figure - - @property - def _sd_ordering(self): - return self.result_set.ranking - - @property - def _sdv_plot_dataset(self): - ordering = self._sd_ordering - diffs = [ - [score_diff for opponent in player for score_diff in opponent] - for player in self.result_set.score_diffs - ] - # Reorder and grab names - diffs = [diffs[i] for i in ordering] - ranked_names = [str(self.players[i]) for i in ordering] - return diffs, ranked_names - - def sdvplot( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """Score difference violin plots to visualize the distributions of how - players attain their payoffs.""" - diffs, ranked_names = self._sdv_plot_dataset - figure = self._violinplot(diffs, ranked_names, title=title, ax=ax) - return figure - - @property - def _lengthplot_dataset(self): - match_lengths = self.result_set.match_lengths - return [ - [length for rep in match_lengths for length in rep[playeri]] - for playeri in self.result_set.ranking - ] - - def lengthplot( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """For the specific match length boxplot.""" - data = self._lengthplot_dataset - names = self._boxplot_xticks_labels - figure = self._violinplot(data, names, title=title, ax=ax) - return figure - - @property - def _payoff_dataset(self): - pm = self.result_set.payoff_matrix - return [ - [pm[r1][r2] for r2 in self.result_set.ranking] - for r1 in self.result_set.ranking - ] - - @property - def _pdplot_dataset(self): - # Order like the sdv_plot - ordering = self._sd_ordering - pdm = self.result_set.payoff_diffs_means - # Reorder and grab names - matrix = [[pdm[r1][r2] for r2 in ordering] for r1 in ordering] - players = self.result_set.players - ranked_names = [str(players[i]) for i in ordering] - return matrix, ranked_names - - def _payoff_heatmap( - self, - data: dataType, - names: namesType, - title: titleType = None, - ax: matplotlib.axes.SubplotBase = None, - ) -> matplotlib.figure.Figure: - """Generic heatmap plot""" - - if ax is None: - _, ax = plt.subplots() - else: - ax = ax - - figure = ax.get_figure() - width = max(self.num_players / 4, 12) - height = width - figure.set_size_inches(width, height) - matplotlib_version = matplotlib.__version__ - cmap = default_cmap(matplotlib_version) - mat = ax.matshow(data, cmap=cmap) - ax.set_xticks(range(self.result_set.num_players)) - ax.set_yticks(range(self.result_set.num_players)) - ax.set_xticklabels(names, rotation=90) - ax.set_yticklabels(names) - ax.tick_params(axis="both", which="both", labelsize=16) - if title: - ax.set_xlabel(title) - figure.colorbar(mat, ax=ax) - plt.tight_layout() - return figure - - def pdplot( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """Payoff difference heatmap to visualize the distributions of how - players attain their payoffs.""" - matrix, names = self._pdplot_dataset - return self._payoff_heatmap(matrix, names, title=title, ax=ax) - - def payoff( - self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None - ) -> matplotlib.figure.Figure: - """Payoff heatmap to visualize the distributions of how - players attain their payoffs.""" - data = self._payoff_dataset - names = self.result_set.ranked_names - return self._payoff_heatmap(data, names, title=title, ax=ax) - - # Ecological Plot - - def stackplot( - self, - eco, - title: titleType = None, - logscale: bool = True, - ax: matplotlib.axes.SubplotBase = None, - ) -> matplotlib.figure.Figure: - - populations = eco.population_sizes - - if ax is None: - _, ax = plt.subplots() - else: - ax = ax - - figure = ax.get_figure() - turns = range(len(populations)) - pops = [ - [populations[iturn][ir] for iturn in turns] - for ir in self.result_set.ranking - ] - ax.stackplot(turns, *pops) - - ax.yaxis.tick_left() - ax.yaxis.set_label_position("right") - ax.yaxis.labelpad = 25.0 - - ax.set_ylim([0.0, 1.0]) - ax.set_ylabel("Relative population size") - ax.set_xlabel("Turn") - if title is not None: - ax.set_title(title) - - trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) - ticks = [] - for i, n in enumerate(self.result_set.ranked_names): - x = -0.01 - y = (i + 0.5) * 1 / self.result_set.num_players - ax.annotate( - n, - xy=(x, y), - xycoords=trans, - clip_on=False, - va="center", - ha="right", - fontsize=5, - ) - ticks.append(y) - ax.set_yticks(ticks) - ax.tick_params(direction="out") - ax.set_yticklabels([]) - - if logscale: - ax.set_xscale("log") - - plt.tight_layout() - return figure - - def save_all_plots( - self, - prefix: str = "axelrod", - title_prefix: str = "axelrod", - filetype: str = "svg", - progress_bar: bool = True, - ) -> None: - """ - A method to save all plots to file. - - Parameters - ---------- - - prefix : str - A prefix for the file name. This can include the directory. - Default: axelrod. - title_prefix : str - A prefix for the title of the plots (appears on the graphic). - Default: axelrod. - filetype : str - A string for the filetype to save files to: pdf, png, svg, - etc... - progress_bar : bool - Whether or not to create a progress bar which will be updated - """ - plots = [ - ("boxplot", "Payoff"), - ("payoff", "Payoff"), - ("winplot", "Wins"), - ("sdvplot", "Payoff differences"), - ("pdplot", "Payoff differences"), - ("lengthplot", "Length of Matches"), - ] - - if progress_bar: - total = len(plots) # Total number of plots - pbar = tqdm.tqdm(total=total, desc="Obtaining plots") - - for method, name in plots: - f = getattr(self, method)(title="{} - {}".format(title_prefix, name)) - path = pathlib.Path("{}_{}.{}".format(prefix, method, filetype)) - f.savefig(axl_filename(path)) - plt.close(f) - - if progress_bar: - pbar.update() diff --git a/axelrod/ipd/random_.py b/axelrod/ipd/random_.py deleted file mode 100644 index e3ef91e8d..000000000 --- a/axelrod/ipd/random_.py +++ /dev/null @@ -1,95 +0,0 @@ -import random - -import numpy as np -from numpy.random import choice - -from axelrod.ipd.action import Action - -C, D = Action.C, Action.D - - -def seed(seed_): - """Sets a seed""" - random.seed(seed_) - np.random.seed(seed_) - - -def random_choice(p: float = 0.5) -> Action: - """ - Return C with probability `p`, else return D - - No random sample is carried out if p is 0 or 1. - - Parameters - ---------- - p : float - The probability of picking C - - Returns - ------- - axelrod.Action - """ - if p == 0: - return D - - if p == 1: - return C - - r = random.random() - if r < p: - return C - return D - - -def random_flip(action: Action, threshold: float) -> Action: - """ - Return flipped action with probability `threshold` - - No random sample is carried out if threshold is 0 or 1. - - Parameters - ---------- - action: - The action to flip or not - threshold : float - The probability of flipping action - - Returns - ------- - axelrod.Action - """ - if random_choice(threshold) == C: - return action.flip() - return action - - -def randrange(a: int, b: int) -> int: - """Python 2 / 3 compatible randrange. Returns a random integer uniformly - between a and b (inclusive)""" - c = b - a - r = c * random.random() - return a + int(r) - - -def random_vector(size): - """Create a random vector of values in [0, 1] that sums to 1.""" - vector = np.random.random(size) - return vector / np.sum(vector) - - -class Pdf(object): - """A class for a probability distribution""" - - def __init__(self, counter): - """Take as an instance of collections.counter""" - self.sample_space, self.counts = zip(*counter.items()) - self.size = len(self.sample_space) - self.total = sum(self.counts) - self.probability = list([v / self.total for v in self.counts]) - - def sample(self): - """Sample from the pdf""" - index = choice(a=range(self.size), p=self.probability) - # Numpy cannot sample from a list of n dimensional objects for n > 1, - # need to sample an index. - return self.sample_space[index] diff --git a/axelrod/ipd/result_set.py b/axelrod/ipd/result_set.py deleted file mode 100644 index 319784a69..000000000 --- a/axelrod/ipd/result_set.py +++ /dev/null @@ -1,788 +0,0 @@ -from collections import Counter, namedtuple -import csv -import itertools -from multiprocessing import cpu_count -from typing import List -import warnings - -import numpy as np -import tqdm -from axelrod.ipd.action import Action - -import dask as da -import dask.dataframe as dd - -from axelrod import eigen - -C, D = Action.C, Action.D - - -def update_progress_bar(method): - """A decorator to update a progress bar if it exists""" - - def wrapper(*args, **kwargs): - """Run the method and update the progress bar if it exists""" - output = method(*args, **kwargs) - - try: - args[0].progress_bar.update(1) - except AttributeError: - pass - - return output - - return wrapper - - -class ResultSet: - """ - A class to hold the results of a tournament. Reads in a CSV file produced - by the tournament class. - """ - - def __init__( - self, filename, players, repetitions, processes=None, progress_bar=True - ): - """ - Parameters - ---------- - filename : string - the file from which to read the interactions - players : list - A list of the names of players. If not known will be efficiently - read from file. - repetitions : int - The number of repetitions of each match. If not know will be - efficiently read from file. - processes : integer - The number of processes to be used for parallel processing - progress_bar: boolean - If a progress bar will be shown. - """ - self.filename = filename - self.players, self.repetitions = players, repetitions - self.num_players = len(self.players) - - if progress_bar: - self.progress_bar = tqdm.tqdm(total=25, desc="Analysing") - - df = dd.read_csv(filename) - dask_tasks = self._build_tasks(df) - - if processes == 0: - processes = cpu_count() - - out = self._compute_tasks(tasks=dask_tasks, processes=processes) - - self._reshape_out(*out) - - if progress_bar: - self.progress_bar.close() - - def _reshape_out( - self, - mean_per_reps_player_opponent_df, - sum_per_player_opponent_df, - sum_per_player_repetition_df, - normalised_scores_series, - initial_cooperation_count_series, - interactions_count_series, - ): - """ - Reshape the various pandas series objects to be of the required form and - set the corresponding attributes. - """ - - self.payoffs = self._reshape_three_dim_list( - mean_per_reps_player_opponent_df["Score per turn"], - first_dimension=range(self.num_players), - second_dimension=range(self.num_players), - third_dimension=range(self.repetitions), - key_order=[2, 0, 1], - ) - - self.score_diffs = self._reshape_three_dim_list( - mean_per_reps_player_opponent_df["Score difference per turn"], - first_dimension=range(self.num_players), - second_dimension=range(self.num_players), - third_dimension=range(self.repetitions), - key_order=[2, 0, 1], - alternative=0, - ) - - self.match_lengths = self._reshape_three_dim_list( - mean_per_reps_player_opponent_df["Turns"], - first_dimension=range(self.repetitions), - second_dimension=range(self.num_players), - third_dimension=range(self.num_players), - alternative=0, - ) - - self.wins = self._reshape_two_dim_list(sum_per_player_repetition_df["Win"]) - self.scores = self._reshape_two_dim_list(sum_per_player_repetition_df["Score"]) - self.normalised_scores = self._reshape_two_dim_list(normalised_scores_series) - - self.cooperation = self._build_cooperation( - sum_per_player_opponent_df["Cooperation count"] - ) - self.good_partner_matrix = self._build_good_partner_matrix( - sum_per_player_opponent_df["Good partner"] - ) - - columns = ["CC count", "CD count", "DC count", "DD count"] - self.state_distribution = self._build_state_distribution( - sum_per_player_opponent_df[columns] - ) - self.normalised_state_distribution = self._build_normalised_state_distribution() - - columns = [ - "CC to C count", - "CC to D count", - "CD to C count", - "CD to D count", - "DC to C count", - "DC to D count", - "DD to C count", - "DD to D count", - ] - self.state_to_action_distribution = self._build_state_to_action_distribution( - sum_per_player_opponent_df[columns] - ) - self.normalised_state_to_action_distribution = ( - self._build_normalised_state_to_action_distribution() - ) - - self.initial_cooperation_count = self._build_initial_cooperation_count( - initial_cooperation_count_series - ) - self.initial_cooperation_rate = self._build_initial_cooperation_rate( - interactions_count_series - ) - self.good_partner_rating = self._build_good_partner_rating( - interactions_count_series - ) - - self.normalised_cooperation = self._build_normalised_cooperation() - self.ranking = self._build_ranking() - self.ranked_names = self._build_ranked_names() - - self.payoff_matrix = self._build_summary_matrix(self.payoffs) - self.payoff_stddevs = self._build_summary_matrix(self.payoffs, func=np.std) - - self.payoff_diffs_means = self._build_payoff_diffs_means() - self.cooperating_rating = self._build_cooperating_rating() - self.vengeful_cooperation = self._build_vengeful_cooperation() - self.eigenjesus_rating = self._build_eigenjesus_rating() - self.eigenmoses_rating = self._build_eigenmoses_rating() - - @update_progress_bar - def _reshape_three_dim_list( - self, - series, - first_dimension, - second_dimension, - third_dimension, - alternative=None, - key_order=[0, 1, 2], - ): - """ - Parameters - ---------- - series : pandas.Series - first_dimension : iterable - second_dimension : iterable - third_dimension : iterable - alternative : int - What to do if there is no entry at given position - key_order : list - Indices re-ording the dimensions to the correct keys in the - series - - Returns: - -------- - A three dimensional list across the three dimensions - """ - series_dict = series.to_dict() - output = [] - for first_index in first_dimension: - matrix = [] - for second_index in second_dimension: - row = [] - for third_index in third_dimension: - key = (first_index, second_index, third_index) - key = tuple([key[order] for order in key_order]) - if key in series_dict: - row.append(series_dict[key]) - elif alternative is not None: - row.append(alternative) - matrix.append(row) - output.append(matrix) - return output - - @update_progress_bar - def _reshape_two_dim_list(self, series): - """ - Parameters - ---------- - series : pandas.Series - - Returns: - -------- - A two dimensional list across repetitions and opponents - """ - series_dict = series.to_dict() - out = [ - [ - series_dict.get((player_index, repetition), 0) - for repetition in range(self.repetitions) - ] - for player_index in range(self.num_players) - ] - return out - - @update_progress_bar - def _build_cooperation(self, cooperation_series): - cooperation_dict = cooperation_series.to_dict() - cooperation = [] - for player_index in range(self.num_players): - row = [] - for opponent_index in range(self.num_players): - count = cooperation_dict.get((player_index, opponent_index), 0) - if player_index == opponent_index: - # Address double count - count = int(count / 2) - row.append(count) - cooperation.append(row) - return cooperation - - @update_progress_bar - def _build_good_partner_matrix(self, good_partner_series): - good_partner_dict = good_partner_series.to_dict() - good_partner_matrix = [] - for player_index in range(self.num_players): - row = [] - for opponent_index in range(self.num_players): - if player_index == opponent_index: - # The reduce operation implies a double count of self - # interactions. - row.append(0) - else: - row.append(good_partner_dict.get((player_index, opponent_index), 0)) - good_partner_matrix.append(row) - return good_partner_matrix - - @update_progress_bar - def _build_summary_matrix(self, attribute, func=np.mean): - matrix = [ - [0 for opponent_index in range(self.num_players)] - for player_index in range(self.num_players) - ] - - pairs = itertools.product(range(self.num_players), repeat=2) - - for player_index, opponent_index in pairs: - utilities = attribute[player_index][opponent_index] - if utilities: - matrix[player_index][opponent_index] = func(utilities) - - return matrix - - @update_progress_bar - def _build_payoff_diffs_means(self): - payoff_diffs_means = [ - [np.mean(diff) for diff in player] for player in self.score_diffs - ] - - return payoff_diffs_means - - @update_progress_bar - def _build_state_distribution(self, state_distribution_series): - state_key_map = { - "CC count": (C, C), - "CD count": (C, D), - "DC count": (D, C), - "DD count": (D, D), - } - state_distribution = [ - [ - create_counter_dict( - state_distribution_series, - player_index, - opponent_index, - state_key_map, - ) - for opponent_index in range(self.num_players) - ] - for player_index in range(self.num_players) - ] - return state_distribution - - @update_progress_bar - def _build_normalised_state_distribution(self): - """ - Returns: - -------- - norm : list - - Normalised state distribution. A list of lists of counter objects: - - Dictionary where the keys are the states and the values are a - normalized counts of the number of times that state occurs. - """ - normalised_state_distribution = [] - for player in self.state_distribution: - counters = [] - for counter in player: - total = sum(counter.values()) - counters.append( - Counter({key: value / total for key, value in counter.items()}) - ) - normalised_state_distribution.append(counters) - return normalised_state_distribution - - @update_progress_bar - def _build_state_to_action_distribution(self, state_to_action_distribution_series): - state_to_action_key_map = { - "CC to C count": ((C, C), C), - "CC to D count": ((C, C), D), - "CD to C count": ((C, D), C), - "CD to D count": ((C, D), D), - "DC to C count": ((D, C), C), - "DC to D count": ((D, C), D), - "DD to C count": ((D, D), C), - "DD to D count": ((D, D), D), - } - state_to_action_distribution = [ - [ - create_counter_dict( - state_to_action_distribution_series, - player_index, - opponent_index, - state_to_action_key_map, - ) - for opponent_index in range(self.num_players) - ] - for player_index in range(self.num_players) - ] - return state_to_action_distribution - - @update_progress_bar - def _build_normalised_state_to_action_distribution(self): - """ - Returns: - -------- - norm : list - - A list of lists of counter objects. - - Dictionary where the keys are the states and the values are a - normalized counts of the number of times that state goes to a given - action. - """ - normalised_state_to_action_distribution = [] - for player in self.state_to_action_distribution: - counters = [] - for counter in player: - norm_counter = Counter() - for state in [(C, C), (C, D), (D, C), (D, D)]: - total = counter[(state, C)] + counter[(state, D)] - if total > 0: - for action in [C, D]: - if counter[(state, action)] > 0: - norm_counter[(state, action)] = ( - counter[(state, action)] / total - ) - counters.append(norm_counter) - normalised_state_to_action_distribution.append(counters) - return normalised_state_to_action_distribution - - @update_progress_bar - def _build_initial_cooperation_count(self, initial_cooperation_count_series): - initial_cooperation_count_dict = initial_cooperation_count_series.to_dict() - initial_cooperation_count = [ - initial_cooperation_count_dict.get(player_index, 0) - for player_index in range(self.num_players) - ] - return initial_cooperation_count - - @update_progress_bar - def _build_normalised_cooperation(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - normalised_cooperation = [ - list(np.nan_to_num(row)) - for row in np.array(self.cooperation) - / sum(map(np.array, self.match_lengths)) - ] - return normalised_cooperation - - @update_progress_bar - def _build_initial_cooperation_rate(self, interactions_series): - interactions_array = np.array( - [ - interactions_series.get(player_index, 0) - for player_index in range(self.num_players) - ] - ) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - initial_cooperation_rate = list( - np.nan_to_num( - np.array(self.initial_cooperation_count) / interactions_array - ) - ) - return initial_cooperation_rate - - @update_progress_bar - def _build_ranking(self): - ranking = sorted( - range(self.num_players), - key=lambda i: -np.nanmedian(self.normalised_scores[i]), - ) - return ranking - - @update_progress_bar - def _build_ranked_names(self): - ranked_names = [str(self.players[i]) for i in self.ranking] - return ranked_names - - @update_progress_bar - def _build_eigenmoses_rating(self): - """ - Returns: - -------- - The eigenmoses rating as defined in: - http://www.scottaaronson.com/morality.pdf - """ - eigenvector, eigenvalue = eigen.principal_eigenvector(self.vengeful_cooperation) - - return eigenvector.tolist() - - @update_progress_bar - def _build_eigenjesus_rating(self): - """ - Returns: - -------- - The eigenjesus rating as defined in: - http://www.scottaaronson.com/morality.pdf - """ - eigenvector, eigenvalue = eigen.principal_eigenvector( - self.normalised_cooperation - ) - - return eigenvector.tolist() - - @update_progress_bar - def _build_cooperating_rating(self): - """ - Returns: - -------- - The list of cooperation ratings - List of the form: - - [ML1, ML2, ML3..., MLn] - - Where n is the number of players and MLi is a list of the form: - - [pi1, pi2, pi3, ..., pim] - - Where pij is the total number of cooperations divided by the total - number of turns over all repetitions played by player i against - player j. - """ - - plist = list(range(self.num_players)) - total_length_v_opponent = [ - zip(*[rep[player_index] for rep in self.match_lengths]) - for player_index in plist - ] - lengths = [ - [sum(e) for j, e in enumerate(row) if i != j] - for i, row in enumerate(total_length_v_opponent) - ] - - cooperation = [ - [col for j, col in enumerate(row) if i != j] - for i, row in enumerate(self.cooperation) - ] - # Max is to deal with edge cases of matches that have no turns - cooperating_rating = [ - sum(cs) / max(1, sum(ls)) for cs, ls in zip(cooperation, lengths) - ] - return cooperating_rating - - @update_progress_bar - def _build_vengeful_cooperation(self): - """ - Returns: - -------- - The vengeful cooperation matrix derived from the - normalised cooperation matrix: - - Dij = 2(Cij - 0.5) - """ - vengeful_cooperation = [ - [2 * (element - 0.5) for element in row] - for row in self.normalised_cooperation - ] - return vengeful_cooperation - - @update_progress_bar - def _build_good_partner_rating(self, interactions_series): - """ - At the end of a read of the data, build the good partner rating - attribute - """ - interactions_dict = interactions_series.to_dict() - good_partner_rating = [ - sum(self.good_partner_matrix[player]) - / max(1, interactions_dict.get(player, 0)) - for player in range(self.num_players) - ] - return good_partner_rating - - def _compute_tasks(self, tasks, processes): - """ - Compute all dask tasks - """ - if processes is None: - out = da.compute(*tasks, scheduler="single-threaded") - else: - out = da.compute(*tasks, num_workers=processes) - return out - - def _build_tasks(self, df): - """ - Returns a tuple of dask tasks - """ - groups = ["Repetition", "Player index", "Opponent index"] - columns = ["Turns", "Score per turn", "Score difference per turn"] - mean_per_reps_player_opponent_task = df.groupby(groups)[columns].mean() - - groups = ["Player index", "Opponent index"] - columns = [ - "Cooperation count", - "CC count", - "CD count", - "DC count", - "DD count", - "CC to C count", - "CC to D count", - "CD to C count", - "CD to D count", - "DC to C count", - "DC to D count", - "DD to C count", - "DD to D count", - "Good partner", - ] - sum_per_player_opponent_task = df.groupby(groups)[columns].sum() - - ignore_self_interactions_task = df["Player index"] != df["Opponent index"] - adf = df[ignore_self_interactions_task] - - groups = ["Player index", "Repetition"] - columns = ["Win", "Score"] - sum_per_player_repetition_task = adf.groupby(groups)[columns].sum() - - groups = ["Player index", "Repetition"] - column = "Score per turn" - normalised_scores_task = adf.groupby(groups)[column].mean() - - groups = ["Player index"] - column = "Initial cooperation" - initial_cooperation_count_task = adf.groupby(groups)[column].sum() - interactions_count_task = adf.groupby("Player index")["Player index"].count() - - return ( - mean_per_reps_player_opponent_task, - sum_per_player_opponent_task, - sum_per_player_repetition_task, - normalised_scores_task, - initial_cooperation_count_task, - interactions_count_task, - ) - - def __eq__(self, other): - """ - Check equality of results set - - Parameters - ---------- - other : axelrod.ResultSet - Another results set against which to check equality - """ - - def list_equal_with_nans(v1: List[float], v2: List[float]) -> bool: - """Matches lists, accounting for NaNs.""" - if len(v1) != len(v2): - return False - for i1, i2 in zip(v1, v2): - if np.isnan(i1) and np.isnan(i2): - continue - if i1 != i2: - return False - return True - - return all( - [ - self.wins == other.wins, - self.match_lengths == other.match_lengths, - self.scores == other.scores, - self.normalised_scores == other.normalised_scores, - self.ranking == other.ranking, - self.ranked_names == other.ranked_names, - self.payoffs == other.payoffs, - self.payoff_matrix == other.payoff_matrix, - self.payoff_stddevs == other.payoff_stddevs, - self.score_diffs == other.score_diffs, - self.payoff_diffs_means == other.payoff_diffs_means, - self.cooperation == other.cooperation, - self.normalised_cooperation == other.normalised_cooperation, - self.vengeful_cooperation == other.vengeful_cooperation, - self.cooperating_rating == other.cooperating_rating, - self.good_partner_matrix == other.good_partner_matrix, - self.good_partner_rating == other.good_partner_rating, - list_equal_with_nans(self.eigenmoses_rating, other.eigenmoses_rating), - list_equal_with_nans(self.eigenjesus_rating, other.eigenjesus_rating), - ] - ) - - def __ne__(self, other): - """ - Check inequality of results set - - Parameters - ---------- - other : axelrod.ResultSet - Another results set against which to check inequality - """ - return not self.__eq__(other) - - def summarise(self): - """ - Obtain summary of performance of each strategy: - ordered by rank, including median normalised score and cooperation - rating. - - Output - ------ - A list of the form: - - [[player name, median score, cooperation_rating],...] - - """ - - median_scores = map(np.nanmedian, self.normalised_scores) - median_wins = map(np.nanmedian, self.wins) - - self.player = namedtuple( - "IpdPlayer", - [ - "Rank", - "Name", - "Median_score", - "Cooperation_rating", - "Wins", - "Initial_C_rate", - "CC_rate", - "CD_rate", - "DC_rate", - "DD_rate", - "CC_to_C_rate", - "CD_to_C_rate", - "DC_to_C_rate", - "DD_to_C_rate", - ], - ) - - states = [(C, C), (C, D), (D, C), (D, D)] - state_prob = [] - for i, player in enumerate(self.normalised_state_distribution): - counts = [] - for state in states: - p = sum([opp[state] for j, opp in enumerate(player) if i != j]) - counts.append(p) - try: - counts = [c / sum(counts) for c in counts] - except ZeroDivisionError: - counts = [0 for c in counts] - state_prob.append(counts) - - state_to_C_prob = [] - for player in self.normalised_state_to_action_distribution: - rates = [] - for state in states: - counts = [ - counter[(state, C)] for counter in player if counter[(state, C)] > 0 - ] - - if len(counts) > 0: - rate = np.mean(counts) - else: - rate = 0 - - rates.append(rate) - state_to_C_prob.append(rates) - - summary_measures = list( - zip( - self.players, - median_scores, - self.cooperating_rating, - median_wins, - self.initial_cooperation_rate, - ) - ) - - summary_data = [] - for rank, i in enumerate(self.ranking): - data = list(summary_measures[i]) + state_prob[i] + state_to_C_prob[i] - summary_data.append(self.player(rank, *data)) - - return summary_data - - def write_summary(self, filename): - """ - Write a csv file containing summary data of the results of the form: - - "Rank", "Name", "Median-score-per-turn", "Cooperation-rating", "Initial_C_Rate", "Wins", "CC-Rate", "CD-Rate", "DC-Rate", "DD-rate","CC-to-C-Rate", "CD-to-C-Rate", "DC-to-C-Rate", "DD-to-C-rate" - - - Parameters - ---------- - filename : a filepath to which to write the data - """ - summary_data = self.summarise() - with open(filename, "w") as csvfile: - writer = csv.writer(csvfile, lineterminator="\n") - writer.writerow(self.player._fields) - for player in summary_data: - writer.writerow(player) - - -def create_counter_dict(df, player_index, opponent_index, key_map): - """ - Create a Counter object mapping states (corresponding to columns of df) for - players given by player_index, opponent_index. Renaming the variables with - `key_map`. Used by `ResultSet._reshape_out` - - Parameters - ---------- - df : a multiindex pandas df - player_index: int - opponent_index: int - key_map : a dict - maps cols of df to strings - - Returns - ------- - A counter dictionary - """ - counter = Counter() - if player_index != opponent_index: - if (player_index, opponent_index) in df.index: - for key, value in df.loc[player_index, opponent_index].items(): - if value > 0: - counter[key_map[key]] = value - return counter diff --git a/axelrod/ipd/strategies/__init__.py b/axelrod/ipd/strategies/__init__.py deleted file mode 100644 index cc42e0a3a..000000000 --- a/axelrod/ipd/strategies/__init__.py +++ /dev/null @@ -1,133 +0,0 @@ -from axelrod.ipd.classifier import Classifiers -from ._strategies import * -from ._filters import passes_filterset - -# `from ._strategies import *` import the collection `strategies` -# Now import the Meta strategies. This cannot be done in _strategies -# because it creates circular dependencies - -from .meta import ( - MemoryDecay, - MetaHunter, - MetaHunterAggressive, - MetaPlayer, - MetaMajority, - MetaMajorityMemoryOne, - MetaMajorityFiniteMemory, - MetaMajorityLongMemory, - MetaMinority, - MetaMixer, - MetaWinner, - MetaWinnerDeterministic, - MetaWinnerEnsemble, - MetaWinnerMemoryOne, - MetaWinnerFiniteMemory, - MetaWinnerLongMemory, - MetaWinnerStochastic, - NMWEDeterministic, - NMWEFiniteMemory, - NMWELongMemory, - NMWEMemoryOne, - NMWEStochastic, - NiceMetaWinner, - NiceMetaWinnerEnsemble, -) - -all_strategies += [ - MemoryDecay, - MetaHunter, - MetaHunterAggressive, - MetaMajority, - MetaMajorityMemoryOne, - MetaMajorityFiniteMemory, - MetaMajorityLongMemory, - MetaMinority, - MetaMixer, - MetaWinner, - MetaWinnerDeterministic, - MetaWinnerEnsemble, - MetaWinnerMemoryOne, - MetaWinnerFiniteMemory, - MetaWinnerLongMemory, - MetaWinnerStochastic, - NMWEDeterministic, - NMWEFiniteMemory, - NMWELongMemory, - NMWEMemoryOne, - NMWEStochastic, - NiceMetaWinner, - NiceMetaWinnerEnsemble, -] - - -# Distinguished strategy collections in addition to -# `all_strategies` from _strategies.py -demo_strategies = [Cooperator, Defector, TitForTat, Grudger, Random] -axelrod_first_strategies = [ - TitForTat, - FirstByTidemanAndChieruzzi, - FirstByNydegger, - FirstByGrofman, - FirstByShubik, - FirstBySteinAndRapoport, - Grudger, - FirstByDavis, - FirstByGraaskamp, - FirstByDowning, - FirstByFeld, - FirstByJoss, - FirstByTullock, - FirstByAnonymous, - Random, -] -basic_strategies = [s for s in all_strategies if Classifiers.is_basic(s())] -strategies = [s for s in all_strategies if Classifiers.obey_axelrod(s())] - -long_run_time_strategies = [ - s for s in all_strategies if Classifiers["long_run_time"](s()) -] -short_run_time_strategies = [ - s for s in strategies if not Classifiers["long_run_time"](s()) -] -cheating_strategies = [s for s in all_strategies if not Classifiers.obey_axelrod(s())] - -ordinary_strategies = strategies # This is a legacy and will be removed - - -def filtered_strategies(filterset, strategies=all_strategies): - """ - Applies the filters defined in the given filterset dict and returns those - strategy classes which pass all of those filters from the given list of - strategies. - - e.g. - - For the filterset dict: - { - 'stochastic': True, - 'min_memory_depth': 2 - } - - the function will return a list of all deterministic strategies with a - memory_depth of 2 or more. - - Parameters - ---------- - filterset : dict - mapping filter name to criterion. - e.g. - { - 'stochastic': True, - 'min_memory_depth': 2 - } - strategies: list - of subclasses of axelrodPlayer - - Returns - ------- - list - - of subclasses of axelrodPlayer - - """ - return [s for s in strategies if passes_filterset(s, filterset)] diff --git a/axelrod/ipd/strategies/_filters.py b/axelrod/ipd/strategies/_filters.py deleted file mode 100644 index 5a150dd9a..000000000 --- a/axelrod/ipd/strategies/_filters.py +++ /dev/null @@ -1,201 +0,0 @@ -import operator -from collections import namedtuple - -from axelrod import Classifiers - - -def passes_operator_filter(player, classifier_key, value, operator): - """ - Tests whether a given player passes a filter for a - given key in its classifier dict using a given (in)equality operator. - - e.g. - - For the following strategy: - - class ExampleStrategy(IpdPlayer): - classifier = { - 'stochastic': True, - 'inspects_source': False, - 'memory_depth': 10, - 'makes_use_of': ['game', 'length'] - } - - passes_operator_filter(ExampleStrategy(), 'memory_depth', 10, operator.eq) - - would test whether the 'memory_depth' entry equals 10 and return True - - Parameters - ---------- - player : an instance of axelrodPlayer - classifier_key: string - Defining which entry from the strategy's classifier dict is to be - tested (e.g. 'memory_depth'). - value: int - The value against which the strategy's classifier dict entry is to - be tested. - operator: operator.le, operator.ge or operator.eq - Indicating whether a 'less than or equal to' or 'greater than or - equal to' test should be applied. - - Returns - ------- - boolean - - True if the value from the strategy's classifier dictionary matches - the value and operator passed to the function. - """ - classifier_value = Classifiers[classifier_key](player) - return operator(classifier_value, value) - - -def passes_in_list_filter(player, classifier_key, value): - """ - Tests whether a given list of values exist in the list returned from the - given players's classifier dict for the given classifier_key. - - e.g. - - For the following strategy: - - class ExampleStrategy(IpdPlayer): - classifier = { - 'stochastic': True, - 'inspects_source': False, - 'memory_depth': 10, - 'makes_use_of': ['game', 'length'] - } - - passes_in_list_filter(ExampleStrategy(), 'makes_use_of', 'game', operator.eq) - - would test whether 'game' exists in the strategy's' 'makes_use_of' entry - and return True. - - Parameters - ---------- - player: a descendant class of axelrodPlayer - classifier_key: string - Defining which entry from the strategy's classifier dict is to be - tested (e.g. 'makes_use_of'). - value: list - The values against which the strategy's classifier dict entry is to - be tested. - - Returns - ------- - boolean - """ - result = True - for entry in value: - if entry not in Classifiers[classifier_key](player): - result = False - return result - - -def passes_filterset(strategy, filterset): - """ - Determines whether a given strategy meets the criteria defined in a - dictionary of filters. - - e.g. - - For the following strategy: - - class ExampleStrategy(IpdPlayer): - classifier = { - 'stochastic': True, - 'inspects_source': False, - 'memory_depth': 10, - 'makes_use_of': ['game', 'length'] - } - - and this filterset dict: - - example_filterset = { - 'stochastic': True, - 'memory_depth': 10 - } - - passes_filterset(ExampleStrategy, example_filterset) - - would test whether both the strategy's 'stochastic' entry is True AND - that its 'memory_depth' equals 10 and return True. - - Parameters - ---------- - strategy : a descendant class of axelrodPlayer - filterset : dict - mapping filter name to criterion. - e.g. - { - 'stochastic': True, - 'min_memory_depth': 2 - } - - Returns - ------- - boolean - - True if the given strategy meets all the supplied criteria in the - filterset, otherwise false. - - """ - FilterFunction = namedtuple("FilterFunction", "function kwargs") - - # A dictionary mapping filter name (from the supplied filterset) to - # the relevant function and arguments for that filter. - filter_functions = { - "stochastic": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "stochastic", "operator": operator.eq}, - ), - "long_run_time": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "long_run_time", "operator": operator.eq}, - ), - "manipulates_state": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "manipulates_state", "operator": operator.eq}, - ), - "manipulates_source": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "manipulates_source", "operator": operator.eq}, - ), - "inspects_source": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "inspects_source", "operator": operator.eq}, - ), - "memory_depth": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "memory_depth", "operator": operator.eq}, - ), - "min_memory_depth": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "memory_depth", "operator": operator.ge}, - ), - "max_memory_depth": FilterFunction( - function=passes_operator_filter, - kwargs={"classifier_key": "memory_depth", "operator": operator.le}, - ), - "makes_use_of": FilterFunction( - function=passes_in_list_filter, kwargs={"classifier_key": "makes_use_of"} - ), - } - - # A list of boolean values to record whether the strategy passed or failed - # each of the filters in the supplied filterset. - passes_filters = [] - - # Loop through each of the entries in the filter_functions dict and, if - # that filter is defined in the supplied filterset, call the relevant - # function and record its result in the passes_filters list. - for _filter, filter_function in filter_functions.items(): - - if filterset.get(_filter, None) is not None: - kwargs = filter_function.kwargs - kwargs["player"] = strategy() - kwargs["value"] = filterset[_filter] - passes_filters.append(filter_function.function(**kwargs)) - - # Return True if the strategy passed all the supplied filters - return all(passes_filters) diff --git a/axelrod/ipd/strategies/_strategies.py b/axelrod/ipd/strategies/_strategies.py deleted file mode 100644 index aa772edc5..000000000 --- a/axelrod/ipd/strategies/_strategies.py +++ /dev/null @@ -1,508 +0,0 @@ -""" -This file imports all the strategies in to the base name space. Note that some -of the imports are imports of classes that make generic classes available to -users. In these cases the imports are done separately so that they can be -annotated as to avoid some static testing. For example: - - from .memoryone import ( - GTFT, - ALLCorALLD, - FirmButFair, - SoftJoss, - StochasticCooperator, - StochasticWSLS, - WinShiftLoseStay, - WinStayLoseShift, - ) - from .memoryone import ( # pylint: disable=unused-import - ReactivePlayer, - MemoryOnePlayer - ) -""" -from .adaptive import Adaptive -from .adaptor import AdaptorBrief, AdaptorLong -from .alternator import Alternator -from .ann import EvolvedANN, EvolvedANN5, EvolvedANNNoise05 -from .ann import ANN, EvolvableANN # pylint: disable=unused-import -from .apavlov import APavlov2006, APavlov2011 -from .appeaser import Appeaser -from .averagecopier import AverageCopier, NiceAverageCopier -from .axelrod_first import ( - FirstByDavis, - FirstByFeld, - FirstByGraaskamp, - FirstByGrofman, - FirstByJoss, - FirstByNydegger, - FirstByDowning, - FirstByShubik, - FirstBySteinAndRapoport, - FirstByTidemanAndChieruzzi, - FirstByTullock, - FirstByAnonymous, -) -from .axelrod_second import ( - SecondByAppold, - SecondByBlack, - SecondByBorufsen, - SecondByCave, - SecondByChampion, - SecondByColbert, - SecondByEatherley, - SecondByGetzler, - SecondByGladstein, - SecondByGraaskampKatzen, - SecondByHarrington, - SecondByKluepfel, - SecondByLeyvraz, - SecondByMikkelson, - SecondByGrofman, - SecondByTidemanAndChieruzzi, - SecondByRichardHufford, - SecondByRowsam, - SecondByTester, - SecondByTranquilizer, - SecondByWeiner, - SecondByWhite, - SecondByWmAdams, - SecondByYamachi, -) -from .backstabber import BackStabber, DoubleCrosser -from .better_and_better import BetterAndBetter -from .bush_mosteller import BushMosteller -from .calculator import Calculator -from .cooperator import Cooperator, TrickyCooperator -from .cycler import ( - AntiCycler, - CyclerCCCCCD, - CyclerCCCD, - CyclerCCCDCD, - CyclerCCD, - CyclerDC, - CyclerDDC, -) -from .cycler import Cycler, EvolvableCycler # pylint: disable=unused-import -from .darwin import Darwin -from .dbs import DBS -from .defector import Defector, TrickyDefector -from .doubler import Doubler -from .finite_state_machines import ( - TF1, - TF2, - TF3, - EvolvedFSM4, - EvolvedFSM16, - EvolvedFSM16Noise05, - Fortress3, - Fortress4, - Predator, - Pun1, - Raider, - Ripoff, - UsuallyCooperates, - UsuallyDefects, - SolutionB1, - SolutionB5, - Thumper, -) -from .finite_state_machines import ( # pylint: disable=unused-import - SimpleFSM, - EvolvableFSMPlayer, - FSMPlayer, -) -from .forgiver import Forgiver, ForgivingTitForTat -from .gambler import ( - PSOGambler1_1_1, - PSOGambler2_2_2, - PSOGambler2_2_2_Noise05, - PSOGamblerMem1, - ZDMem2, -) -from .gambler import EvolvableGambler, Gambler # pylint: disable=unused-import -from .geller import Geller, GellerCooperator, GellerDefector -from .gobymajority import ( - GoByMajority, - GoByMajority5, - GoByMajority10, - GoByMajority20, - GoByMajority40, - HardGoByMajority, - HardGoByMajority5, - HardGoByMajority10, - HardGoByMajority20, - HardGoByMajority40, -) -from .gradualkiller import GradualKiller -from .grudger import ( - Aggravater, - EasyGo, - ForgetfulGrudger, - GeneralSoftGrudger, - Grudger, - GrudgerAlternator, - OppositeGrudger, - SoftGrudger, -) -from .grumpy import Grumpy -from .handshake import Handshake -from .hmm import EvolvedHMM5 -from .hmm import SimpleHMM, EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import -from .human import Human # pylint: disable=unused-import -from .hunter import ( - AlternatorHunter, - CooperatorHunter, - CycleHunter, - DefectorHunter, - EventualCycleHunter, - MathConstantHunter, - RandomHunter, -) -from .inverse import Inverse -from .lookerup import ( - EvolvedLookerUp1_1_1, - EvolvedLookerUp2_2_2, - Winner12, - Winner21, -) -from .lookerup import ( # pylint: disable=unused-import - EvolvableLookerUp, - LookerUp, -) - -from .mathematicalconstants import Golden, Pi, e -from .memoryone import ( - GTFT, - ALLCorALLD, - FirmButFair, - SoftJoss, - StochasticCooperator, - StochasticWSLS, - WinShiftLoseStay, - WinStayLoseShift, -) -from .memoryone import ( # pylint: disable=unused-import - ReactivePlayer, - MemoryOnePlayer, -) - -from .memorytwo import AON2, MEM2, DelayedAON1 -from .memorytwo import MemoryTwoPlayer # pylint: disable=unused-import - -from .mindcontrol import MindBender, MindController, MindWarper -from .mindreader import MindReader, MirrorMindReader, ProtectedMindReader -from .mutual import Desperate, Hopeless, Willing -from .negation import Negation -from .oncebitten import FoolMeOnce, ForgetfulFoolMeOnce, OnceBitten -from .prober import ( - CollectiveStrategy, - Detective, - HardProber, - NaiveProber, - Prober, - Prober2, - Prober3, - Prober4, - RemorsefulProber, -) -from .punisher import ( - InversePunisher, - LevelPunisher, - Punisher, - TrickyLevelPunisher, -) -from .qlearner import ( - ArrogantQLearner, - CautiousQLearner, - HesitantQLearner, - RiskyQLearner, -) -from .rand import Random -from .resurrection import DoubleResurrection, Resurrection -from .retaliate import ( - LimitedRetaliate, - LimitedRetaliate2, - LimitedRetaliate3, - Retaliate, - Retaliate2, - Retaliate3, -) -from .revised_downing import RevisedDowning -from .selfsteem import SelfSteem -from .sequence_player import ( # pylint: disable=unused-import - SequencePlayer, - ThueMorse, - ThueMorseInverse, -) -from .shortmem import ShortMem -from .stalker import Stalker -from .titfortat import ( - AdaptiveTitForTat, - Alexei, - AntiTitForTat, - Bully, - ContriteTitForTat, - DynamicTwoTitsForTat, - EugineNier, - Gradual, - HardTitFor2Tats, - HardTitForTat, - Michaelos, - NTitsForMTats, - OmegaTFT, - OriginalGradual, - RandomTitForTat, - SlowTitForTwoTats2, - SneakyTitForTat, - SpitefulTitForTat, - SuspiciousTitForTat, - TitFor2Tats, - TitForTat, - TwoTitsForTat, -) -from .verybad import VeryBad -from .worse_and_worse import ( - KnowledgeableWorseAndWorse, - WorseAndWorse, - WorseAndWorse2, - WorseAndWorse3, -) -from .zero_determinant import ( - ZDGTFT2, - ZDExtort2, - ZDExtort2v2, - ZDExtort3, - ZDExtort4, - ZDExtortion, - ZDGen2, - ZDMischief, - ZDSet2, -) - -# Note: Meta* strategies are handled in .__init__.py - - -all_strategies = [ - ALLCorALLD, - AON2, - APavlov2006, - APavlov2011, - Adaptive, - AdaptiveTitForTat, - AdaptorBrief, - AdaptorLong, - Aggravater, - Alexei, - Alternator, - AlternatorHunter, - AntiCycler, - AntiTitForTat, - Appeaser, - ArrogantQLearner, - AverageCopier, - BackStabber, - BetterAndBetter, - Bully, - BushMosteller, - Calculator, - CautiousQLearner, - CollectiveStrategy, - ContriteTitForTat, - Cooperator, - CooperatorHunter, - CycleHunter, - CyclerCCCCCD, - CyclerCCCD, - CyclerCCCDCD, - CyclerCCD, - CyclerDC, - CyclerDDC, - DBS, - Darwin, - Defector, - DefectorHunter, - DelayedAON1, - Desperate, - Detective, - DoubleCrosser, - DoubleResurrection, - Doubler, - DynamicTwoTitsForTat, - EasyGo, - EugineNier, - EventualCycleHunter, - EvolvedANN, - EvolvedANN5, - EvolvedANNNoise05, - EvolvedFSM16, - EvolvedFSM16Noise05, - EvolvedFSM4, - EvolvedHMM5, - EvolvedLookerUp1_1_1, - EvolvedLookerUp2_2_2, - FirmButFair, - FirstByAnonymous, - FirstByDavis, - FirstByDowning, - FirstByFeld, - FirstByGraaskamp, - FirstByGrofman, - FirstByJoss, - FirstByNydegger, - FirstByShubik, - FirstBySteinAndRapoport, - FirstByTidemanAndChieruzzi, - FirstByTullock, - FoolMeOnce, - ForgetfulFoolMeOnce, - ForgetfulGrudger, - Forgiver, - ForgivingTitForTat, - Fortress3, - Fortress4, - GTFT, - Geller, - GellerCooperator, - GellerDefector, - GeneralSoftGrudger, - GoByMajority, - GoByMajority10, - GoByMajority20, - GoByMajority40, - GoByMajority5, - Golden, - Gradual, - GradualKiller, - Grudger, - GrudgerAlternator, - Grumpy, - Handshake, - HardGoByMajority, - HardGoByMajority10, - HardGoByMajority20, - HardGoByMajority40, - HardGoByMajority5, - HardProber, - HardTitFor2Tats, - HardTitForTat, - HesitantQLearner, - Hopeless, - Inverse, - InversePunisher, - KnowledgeableWorseAndWorse, - LevelPunisher, - LimitedRetaliate, - LimitedRetaliate2, - LimitedRetaliate3, - MEM2, - MathConstantHunter, - Michaelos, - MindBender, - MindController, - MindReader, - MindWarper, - MirrorMindReader, - NTitsForMTats, - NaiveProber, - Negation, - NiceAverageCopier, - OmegaTFT, - OnceBitten, - OppositeGrudger, - OriginalGradual, - PSOGambler1_1_1, - PSOGambler2_2_2, - PSOGambler2_2_2_Noise05, - PSOGamblerMem1, - Pi, - Predator, - Prober, - Prober2, - Prober3, - Prober4, - ProtectedMindReader, - Pun1, - Punisher, - Raider, - Random, - RandomHunter, - RandomTitForTat, - RemorsefulProber, - Resurrection, - Retaliate, - Retaliate2, - Retaliate3, - RevisedDowning, - Ripoff, - RiskyQLearner, - SecondByAppold, - SecondByBlack, - SecondByBorufsen, - SecondByCave, - SecondByChampion, - SecondByColbert, - SecondByEatherley, - SecondByGetzler, - SecondByGladstein, - SecondByGraaskampKatzen, - SecondByGrofman, - SecondByHarrington, - SecondByKluepfel, - SecondByLeyvraz, - SecondByMikkelson, - SecondByRichardHufford, - SecondByRowsam, - SecondByTester, - SecondByTidemanAndChieruzzi, - SecondByTranquilizer, - SecondByWeiner, - SecondByWhite, - SecondByWmAdams, - SecondByYamachi, - SelfSteem, - ShortMem, - SlowTitForTwoTats2, - SneakyTitForTat, - SoftGrudger, - SoftJoss, - SolutionB1, - SolutionB5, - SpitefulTitForTat, - Stalker, - StochasticCooperator, - StochasticWSLS, - SuspiciousTitForTat, - TF1, - TF2, - TF3, - ThueMorse, - ThueMorseInverse, - Thumper, - TitFor2Tats, - TitForTat, - TrickyCooperator, - TrickyDefector, - TrickyLevelPunisher, - TwoTitsForTat, - UsuallyCooperates, - UsuallyDefects, - VeryBad, - Willing, - WinShiftLoseStay, - WinStayLoseShift, - Winner12, - Winner21, - WorseAndWorse, - WorseAndWorse2, - WorseAndWorse3, - ZDExtort2, - ZDExtort2v2, - ZDExtort3, - ZDExtort4, - ZDExtortion, - ZDGTFT2, - ZDGen2, - ZDMem2, - ZDMischief, - ZDSet2, - e, -] diff --git a/axelrod/ipd/strategies/adaptive.py b/axelrod/ipd/strategies/adaptive.py deleted file mode 100644 index 33b3dd29e..000000000 --- a/axelrod/ipd/strategies/adaptive.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import List - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Adaptive(IpdPlayer): - """Start with a specific sequence of C and D, then play the strategy that - has worked best, recalculated each turn. - - Names: - - - Adaptive: [Li2011]_ - - """ - - name = "Adaptive" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, initial_plays: List[Action] = None) -> None: - super().__init__() - if not initial_plays: - initial_plays = [C] * 6 + [D] * 5 - self.initial_plays = initial_plays - self.scores = {C: 0, D: 0} - - def score_last_round(self, opponent: IpdPlayer): - # Load the default game if not supplied by a tournament. - game = self.match_attributes["game"] - if len(self.history): - last_round = (self.history[-1], opponent.history[-1]) - scores = game.score(last_round) - self.scores[last_round[0]] += scores[0] - - def strategy(self, opponent: IpdPlayer) -> Action: - # Update scores from the last play - self.score_last_round(opponent) - # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D - index = len(self.history) - if index < len(self.initial_plays): - return self.initial_plays[index] - # Play the strategy with the highest average score so far - if self.scores[C] > self.scores[D]: - return C - return D diff --git a/axelrod/ipd/strategies/adaptor.py b/axelrod/ipd/strategies/adaptor.py deleted file mode 100644 index fc431d23a..000000000 --- a/axelrod/ipd/strategies/adaptor.py +++ /dev/null @@ -1,104 +0,0 @@ -from typing import Dict, Tuple - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -from numpy import heaviside - -C, D = Action.C, Action.D - - -class AbstractAdaptor(IpdPlayer): - """ - An adaptive strategy that updates an internal state based on the last - round of play. Using this state the player Cooperates with a probability - derived from the state. - - s, float: - the internal state, initially 0 - perr, float: - an error threshold for misinterpreted moves - delta, a dictionary of floats: - additive update values for s depending on the last round's outcome - - Names: - - - Adaptor: [Hauert2002]_ - - """ - - name = "AbstractAdaptor" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, delta: Dict[Tuple[Action, Action], float], - perr: float = 0.01) -> None: - super().__init__() - self.perr = perr - self.delta = delta - self.s = 0. - - def strategy(self, opponent: IpdPlayer) -> Action: - if self.history: - # Update internal state from the last play - last_round = (self.history[-1], opponent.history[-1]) - self.s += self.delta[last_round] - - # Compute probability of Cooperation - p = self.perr + (1.0 - 2 * self.perr) * ( - heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1)) - # Draw action - action = random_choice(p) - return action - - -class AdaptorBrief(AbstractAdaptor): - """ - An Adaptor trained on short interactions. - - Names: - - - AdaptorBrief: [Hauert2002]_ - - """ - - name = "AdaptorBrief" - - def __init__(self) -> None: - delta = { - (C, C): 0., # R - (C, D): -1.001505, # S - (D, C): 0.992107, # T - (D, D): -0.638734 # P - } - super().__init__(delta=delta) - - -class AdaptorLong(AbstractAdaptor): - """ - An Adaptor trained on long interactions. - - Names: - - - AdaptorLong: [Hauert2002]_ - - """ - - name = "AdaptorLong" - - def __init__(self) -> None: - delta = { - (C, C): 0., # R - (C, D): 1.888159, # S - (D, C): 1.858883, # T - (D, D): -0.995703 # P - } - super().__init__(delta=delta) diff --git a/axelrod/ipd/strategies/alternator.py b/axelrod/ipd/strategies/alternator.py deleted file mode 100644 index 69099ce21..000000000 --- a/axelrod/ipd/strategies/alternator.py +++ /dev/null @@ -1,33 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Alternator(IpdPlayer): - """ - A player who alternates between cooperating and defecting. - - Names - - - Alternator: [Axelrod1984]_ - - Periodic player CD: [Mittal2009]_ - """ - - name = "Alternator" - classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return C - if self.history[-1] == C: - return D - return C diff --git a/axelrod/ipd/strategies/ann.py b/axelrod/ipd/strategies/ann.py deleted file mode 100644 index b33711b87..000000000 --- a/axelrod/ipd/strategies/ann.py +++ /dev/null @@ -1,350 +0,0 @@ -from typing import List, Tuple -import numpy as np -import numpy.random as random -from axelrod.ipd.action import Action -from axelrod.ipd.load_data_ import load_weights -from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.ipd.player import IpdPlayer - - -C, D = Action.C, Action.D -nn_weights = load_weights() - -# Neural Network and Activation functions -relu = np.vectorize(lambda x: max(x, 0)) - - -def num_weights(num_features, num_hidden): - size = num_features * num_hidden + 2 * num_hidden - return size - - -def compute_features(player: IpdPlayer, opponent: IpdPlayer) -> List[int]: - """ - Compute history features for Neural Network: - * Opponent's first move is C - * Opponent's first move is D - * Opponent's second move is C - * Opponent's second move is D - * IpdPlayer's previous move is C - * IpdPlayer's previous move is D - * IpdPlayer's second previous move is C - * IpdPlayer's second previous move is D - * Opponent's previous move is C - * Opponent's previous move is D - * Opponent's second previous move is C - * Opponent's second previous move is D - * Total opponent cooperations - * Total opponent defections - * Total player cooperations - * Total player defections - * Round number - """ - if len(opponent.history) == 0: - opponent_first_c = 0 - opponent_first_d = 0 - opponent_second_c = 0 - opponent_second_d = 0 - my_previous_c = 0 - my_previous_d = 0 - my_previous2_c = 0 - my_previous2_d = 0 - opponent_previous_c = 0 - opponent_previous_d = 0 - opponent_previous2_c = 0 - opponent_previous2_d = 0 - - elif len(opponent.history) == 1: - opponent_first_c = 1 if opponent.history[0] == C else 0 - opponent_first_d = 1 if opponent.history[0] == D else 0 - opponent_second_c = 0 - opponent_second_d = 0 - my_previous_c = 1 if player.history[-1] == C else 0 - my_previous_d = 1 if player.history[-1] == D else 0 - my_previous2_c = 0 - my_previous2_d = 0 - opponent_previous_c = 1 if opponent.history[-1] == C else 0 - opponent_previous_d = 1 if opponent.history[-1] == D else 0 - opponent_previous2_c = 0 - opponent_previous2_d = 0 - - else: - opponent_first_c = 1 if opponent.history[0] == C else 0 - opponent_first_d = 1 if opponent.history[0] == D else 0 - opponent_second_c = 1 if opponent.history[1] == C else 0 - opponent_second_d = 1 if opponent.history[1] == D else 0 - my_previous_c = 1 if player.history[-1] == C else 0 - my_previous_d = 1 if player.history[-1] == D else 0 - my_previous2_c = 1 if player.history[-2] == C else 0 - my_previous2_d = 1 if player.history[-2] == D else 0 - opponent_previous_c = 1 if opponent.history[-1] == C else 0 - opponent_previous_d = 1 if opponent.history[-1] == D else 0 - opponent_previous2_c = 1 if opponent.history[-2] == C else 0 - opponent_previous2_d = 1 if opponent.history[-2] == D else 0 - - # Remaining Features - total_opponent_c = opponent.cooperations - total_opponent_d = opponent.defections - total_player_c = player.cooperations - total_player_d = player.defections - - return [ - opponent_first_c, - opponent_first_d, - opponent_second_c, - opponent_second_d, - my_previous_c, - my_previous_d, - my_previous2_c, - my_previous2_d, - opponent_previous_c, - opponent_previous_d, - opponent_previous2_c, - opponent_previous2_d, - total_opponent_c, - total_opponent_d, - total_player_c, - total_player_d, - len(player.history), - ] - - -def activate( - bias: List[float], hidden: List[float], output: List[float], inputs: List[int] -) -> float: - """ - Compute the output of the neural network: - output = relu(inputs * hidden_weights + bias) * output_weights - """ - inputs = np.array(inputs) - hidden_values = bias + np.dot(hidden, inputs) - hidden_values = relu(hidden_values) - output_value = np.dot(hidden_values, output) - return output_value - - -def split_weights( - weights: List[float], num_features: int, num_hidden: int -) -> Tuple[List[List[float]], List[float], List[float]]: - """Splits the input vector into the the NN bias weights and layer - parameters.""" - # Check weights is the right length - expected_length = num_hidden * 2 + num_features * num_hidden - if expected_length != len(weights): - raise ValueError("NN weights array has an incorrect size.") - - number_of_input_to_hidden_weights = num_features * num_hidden - number_of_hidden_to_output_weights = num_hidden - - input2hidden = [] - for i in range(0, number_of_input_to_hidden_weights, num_features): - input2hidden.append(weights[i : i + num_features]) - - start = number_of_input_to_hidden_weights - end = number_of_input_to_hidden_weights + number_of_hidden_to_output_weights - - hidden2output = weights[start:end] - bias = weights[end:] - return input2hidden, hidden2output, bias - - -class ANN(IpdPlayer): - """Artificial Neural Network based strategy. - - A single layer neural network based strategy, with the following - features: - * Opponent's first move is C - * Opponent's first move is D - * Opponent's second move is C - * Opponent's second move is D - * IpdPlayer's previous move is C - * IpdPlayer's previous move is D - * IpdPlayer's second previous move is C - * IpdPlayer's second previous move is D - * Opponent's previous move is C - * Opponent's previous move is D - * Opponent's second previous move is C - * Opponent's second previous move is D - * Total opponent cooperations - * Total opponent defections - * Total player cooperations - * Total player defections - * Round number - - Original Source: https://gist.github.com/mojones/550b32c46a8169bb3cd89d917b73111a#file-ann-strategy-test-L60 - - - Names - - - Artificial Neural Network based strategy: Original name by Martin Jones - """ - - name = "ANN" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "inspects_source": False, - "makes_use_of": set(), - "manipulates_source": False, - "manipulates_state": False, - "long_run_time": False, - } - - def __init__( - self, num_features: int, num_hidden: int, - weights: List[float] = None - ) -> None: - super().__init__() - self.num_features = num_features - self.num_hidden = num_hidden - self._process_weights(weights, num_features, num_hidden) - - def _process_weights(self, weights, num_features, num_hidden): - self.weights = list(weights) - (i2h, h2o, bias) = split_weights(weights, num_features, num_hidden) - self.input_to_hidden_layer_weights = np.array(i2h) - self.hidden_to_output_layer_weights = np.array(h2o) - self.bias_weights = np.array(bias) - - def strategy(self, opponent: IpdPlayer) -> Action: - features = compute_features(self, opponent) - output = activate( - self.bias_weights, - self.input_to_hidden_layer_weights, - self.hidden_to_output_layer_weights, - features, - ) - if output > 0: - return C - else: - return D - - -class EvolvableANN(ANN, EvolvablePlayer): - """Evolvable version of ANN.""" - name = "EvolvableANN" - - def __init__( - self, num_features: int, num_hidden: int, - weights: List[float] = None, - mutation_probability: float = None, - mutation_distance: int = 5, - ) -> None: - num_features, num_hidden, weights, mutation_probability = self._normalize_parameters( - num_features, num_hidden, weights, mutation_probability) - ANN.__init__(self, - num_features=num_features, - num_hidden=num_hidden, - weights=weights) - EvolvablePlayer.__init__(self) - self.mutation_probability = mutation_probability - self.mutation_distance = mutation_distance - self.overwrite_init_kwargs( - num_features=num_features, - num_hidden=num_hidden, - weights=weights, - mutation_probability=mutation_probability) - - @classmethod - def _normalize_parameters(cls, num_features=None, num_hidden=None, weights=None, mutation_probability=None): - if not (num_features and num_hidden): - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableANN") - size = num_weights(num_features, num_hidden) - if not weights: - weights = [random.uniform(-1, 1) for _ in range(size)] - if mutation_probability is None: - mutation_probability = 10. / size - return num_features, num_hidden, weights, mutation_probability - - @staticmethod - def mutate_weights(weights, num_features, num_hidden, mutation_probability, - mutation_distance): - size = num_weights(num_features, num_hidden) - randoms = random.random(size) - for i, r in enumerate(randoms): - if r < mutation_probability: - p = 1 + random.uniform(-1, 1) * mutation_distance - weights[i] *= p - return weights - - def mutate(self): - weights = self.mutate_weights( - self.weights, self.num_features, self.num_hidden, - self.mutation_probability, self.mutation_distance) - return self.create_new(weights=weights) - - def crossover(self, other): - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - weights = crossover_lists(self.weights, other.weights) - return self.create_new(weights=weights) - - -class EvolvedANN(ANN): - """ - A strategy based on a pre-trained neural network with 17 features and a - hidden layer of size 10. - - Trained using the `axelrod_dojo` version: 0.0.8 - Training data is archived at doi.org/10.5281/zenodo.1306926 - - Names: - - - Evolved ANN: Original name by Martin Jones. - """ - - name = "Evolved ANN" - - def __init__(self) -> None: - num_features, num_hidden, weights = nn_weights["Evolved ANN"] - super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) - - -class EvolvedANN5(ANN): - """ - A strategy based on a pre-trained neural network with 17 features and a - hidden layer of size 5. - - Trained using the `axelrod_dojo` version: 0.0.8 - Training data is archived at doi.org/10.5281/zenodo.1306931 - - Names: - - - Evolved ANN 5: Original name by Marc Harper. - """ - - name = "Evolved ANN 5" - - def __init__(self) -> None: - num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] - super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) - - -class EvolvedANNNoise05(ANN): - """ - A strategy based on a pre-trained neural network with a hidden layer of - size 5, trained with noise=0.05. - - Trained using the `axelrod_dojo` version: 0.0.8 - Training data i archived at doi.org/10.5281/zenodo.1314247. - - Names: - - - Evolved ANN Noise 5: Original name by Marc Harper. - """ - - name = "Evolved ANN 5 Noise 05" - - def __init__(self) -> None: - num_features, num_hidden, weights = nn_weights["Evolved ANN 5 Noise 05"] - super().__init__( - num_features=num_features, - num_hidden=num_hidden, - weights=weights) - diff --git a/axelrod/ipd/strategies/apavlov.py b/axelrod/ipd/strategies/apavlov.py deleted file mode 100644 index dddcd6884..000000000 --- a/axelrod/ipd/strategies/apavlov.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import Optional - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class APavlov2006(IpdPlayer): - """ - APavlov attempts to classify its opponent as one of five strategies: - Cooperative, ALLD, STFT, PavlovD, or Random. APavlov then responds in a - manner intended to achieve mutual cooperation or to defect against - uncooperative opponents. - - Names: - - - Adaptive Pavlov 2006: [Li2007]_ - """ - - name = "Adaptive Pavlov 2006" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.opponent_class = None # type: Optional[str] - - def strategy(self, opponent: IpdPlayer) -> Action: - # TFT for six rounds - if len(self.history) < 6: - return D if opponent.history[-1:] == [D] else C - # Classify opponent - if len(self.history) % 6 == 0: - if opponent.history[-6:] == [C] * 6: - self.opponent_class = "Cooperative" - if opponent.history[-6:] == [D] * 6: - self.opponent_class = "ALLD" - if opponent.history[-6:] == [D, C, D, C, D, C]: - self.opponent_class = "STFT" - if opponent.history[-6:] == [D, D, C, D, D, C]: - self.opponent_class = "PavlovD" - if not self.opponent_class: - self.opponent_class = "Random" - - # Play according to classification - if self.opponent_class in ["Random", "ALLD"]: - return D - if self.opponent_class == "STFT": - if len(self.history) % 6 in [0, 1]: - return C - # TFT - if opponent.history[-1:] == [D]: - return D - if self.opponent_class == "PavlovD": - # Return D then C for the period - if len(self.history) % 6 == 0: - return D - if self.opponent_class == "Cooperative": - # TFT - if opponent.history[-1:] == [D]: - return D - return C - - -class APavlov2011(IpdPlayer): - """ - APavlov attempts to classify its opponent as one of four strategies: - Cooperative, ALLD, STFT, or Random. APavlov then responds in a manner - intended to achieve mutual cooperation or to defect against - uncooperative opponents. - - Names: - - - Adaptive Pavlov 2011: [Li2011]_ - """ - - name = "Adaptive Pavlov 2011" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.opponent_class = None # type: Optional[str] - - def strategy(self, opponent: IpdPlayer) -> Action: - # TFT for six rounds - if len(self.history) < 6: - return D if opponent.history[-1:] == [D] else C - if len(self.history) % 6 == 0: - # Classify opponent - if opponent.history[-6:] == [C] * 6: - self.opponent_class = "Cooperative" - if opponent.history[-6:].count(D) >= 4: - self.opponent_class = "ALLD" - if opponent.history[-6:].count(D) == 3: - self.opponent_class = "STFT" - if not self.opponent_class: - self.opponent_class = "Random" - # Play according to classification - if self.opponent_class in ["Random", "ALLD"]: - return D - if self.opponent_class == "STFT": - # TFTT - return D if opponent.history[-2:] == [D, D] else C - if self.opponent_class == "Cooperative": - # TFT - return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/ipd/strategies/appeaser.py b/axelrod/ipd/strategies/appeaser.py deleted file mode 100644 index ee4507231..000000000 --- a/axelrod/ipd/strategies/appeaser.py +++ /dev/null @@ -1,38 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Appeaser(IpdPlayer): - """A player who tries to guess what the opponent wants. - - Switch the classifier every time the opponent plays D. - Start with C, switch between C and D when opponent plays D. - - Names: - - - Appeaser: Original Name by Jochen Müller - """ - - name = "Appeaser" - classifier = { - "memory_depth": float("inf"), # Depends on internal memory. - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not len(opponent.history): - return C - else: - if opponent.history[-1] == D: - if self.history[-1] == C: - return D - else: - return C - return self.history[-1] diff --git a/axelrod/ipd/strategies/averagecopier.py b/axelrod/ipd/strategies/averagecopier.py deleted file mode 100644 index 893214e5a..000000000 --- a/axelrod/ipd/strategies/averagecopier.py +++ /dev/null @@ -1,61 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class AverageCopier(IpdPlayer): - """ - The player will cooperate with probability p if the opponent's cooperation - ratio is p. Starts with random decision. - - Names: - - - Average Copier: Original name by Geraint Palmer - """ - - name = "Average Copier" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) == 0: - # Randomly picks a strategy (not affected by history). - return random_choice(0.5) - p = opponent.cooperations / len(opponent.history) - return random_choice(p) - - -class NiceAverageCopier(IpdPlayer): - """ - Same as Average Copier, but always starts by cooperating. - - Names: - - - Average Copier: Original name by Owen Campbell - """ - - name = "Nice Average Copier" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) == 0: - return C - p = opponent.cooperations / len(opponent.history) - return random_choice(p) diff --git a/axelrod/ipd/strategies/axelrod_first.py b/axelrod/ipd/strategies/axelrod_first.py deleted file mode 100644 index fe9091284..000000000 --- a/axelrod/ipd/strategies/axelrod_first.py +++ /dev/null @@ -1,1026 +0,0 @@ -""" -Strategies submitted to Axelrod's first tournament. All strategies in this -module are prefixed by `FirstBy` to indicate that they were submitted in -Axelrod's First tournament by the given author. - -Note that these strategies are implemented from the descriptions presented -in: - -Axelrod, R. (1980). Effective Choice in the Prisoner’s Dilemma. -Journal of Conflict Resolution, 24(1), 3–25. - -These descriptions are not always clear and/or precise and when assumptions have -been made they are explained in the strategy docstrings. -""" - -import random -from typing import Dict, List, Tuple, Optional - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice -from axelrod.ipd.strategy_transformers import FinalTransformer -from scipy.stats import chisquare - -from .memoryone import MemoryOnePlayer - -C, D = Action.C, Action.D - - -class FirstByDavis(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Morton Davis. - - The description written in [Axelrod1980]_ is: - - > "A player starts by cooperating for 10 rounds then plays Grudger, - > defecting if at any point the opponent has defected." - - This strategy came 8th in Axelrod's original tournament. - - Names: - - - Davis: [Axelrod1980]_ - """ - - name = "First by Davis" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, rounds_to_cooperate: int = 10) -> None: - """ - Parameters - ---------- - rounds_to_cooperate: int, 10 - The number of rounds to cooperate initially - """ - super().__init__() - self._rounds_to_cooperate = rounds_to_cooperate - - def strategy(self, opponent: IpdPlayer) -> Action: - """Begins by playing C, then plays D for the remaining rounds if the - opponent ever plays D.""" - if len(self.history) < self._rounds_to_cooperate: - return C - if opponent.defections > 0: # Implement Grudger - return D - return C - - -class FirstByDowning(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Downing - - The description written in [Axelrod1980]_ is: - - > "This rule selects its choice to maximize its own longterm expected payoff on - > the assumption that the other rule cooperates with a fixed probability which - > depends only on whether the other player cooperated or defected on the previous - > move. These two probabilities estimates are continuously updated as the game - > progresses. Initially, they are both assumed to be .5, which amounts to the - > pessimistic assumption that the other player is not responsive. This rule is - > based on an outcome maximization interpretation of human performances proposed - > by Downing (1975)." - - The Downing (1975) paper is "The Prisoner's Dilemma IpdGame as a - Problem-Solving Phenomenon" [Downing1975]_ and this is used to implement the - strategy. - - There are a number of specific points in this paper, on page 371: - - > "[...] In these strategies, O's [the opponent's] response on trial N is in - some way dependent or contingent on S's [the subject's] response on trial N- - 1. All varieties of these lag-one matching strategies can be defined by two - parameters: the conditional probability that O will choose C following C by - S, P(C_o | C_s) and the conditional probability that O will choose C - following D by S, P(C_o, D_s)." - - Throughout the paper the strategy (S) assumes that the opponent (O) is - playing a reactive strategy defined by these two conditional probabilities. - - The strategy aims to maximise the long run utility against such a strategy - and the mechanism for this is described in Appendix A (more on this later). - - One final point from the main text is, on page 372: - - > "For the various lag-one matching strategies of O, the maximizing - strategies of S will be 100% C, or 100% D, or for some strategies all S - strategies will be functionally equivalent." - - This implies that the strategy S will either always cooperate or always - defect (or be indifferent) dependent on the opponent's defining - probabilities. - - To understand the particular mechanism that describes the strategy S, we - refer to Appendix A of the paper on page 389. - - The stated goal of the strategy is to maximize (using the notation of the - paper): - - EV_TOT = #CC(EV_CC) + #CD(EV_CD) + #DC(EV_DC) + #DD(EV_DD) - - This differs from the more modern literature where #CC, #CD, #DC and #DD - would imply that counts of both players playing C and C, or the first - playing C and the second D etc... - In this case the author uses an argument based on the sequence of plays by - the player (S) so #CC denotes the number of times the player plays C twice - in a row. - - On the second page of the appendix, figure 4 (page 390) - identifies an expression for EV_TOT. - A specific term is made to disappear in - the case of T - R = P - S (which is not the case for the standard - (R, P, S, T) = (3, 1, 0, 5)): - - > "Where (t - r) = (p - s), EV_TOT will be a function of alpha, beta, t, r, - p, s and N are known and V which is unknown. - - V is the total number of cooperations of the player S (this is noted earlier - in the abstract) and as such the final expression (with only V as unknown) - can be used to decide if V should indicate that S always cooperates or not. - - This final expression is used to show that EV_TOT is linear in the number of - cooperations by the player thus justifying the fact that the player will - always cooperate or defect. - - All of the above details are used to give the following interpretation of - the strategy: - - 1. On any given turn, the strategy will estimate alpha = P(C_o | C_s) and - beta = P(C_o | D_s). - 2. The strategy will calculate the expected utility of always playing C OR - always playing D against the estimated probabilities. This corresponds to: - - a. In the case of the player always cooperating: - - P_CC = alpha and P_CD = 1 - alpha - - b. In the case of the player always defecting: - - P_DC = beta and P_DD = 1 - beta - - - Using this we have: - - E_C = alpha R + (1 - alpha) S - E_D = beta T + (1 - beta) P - - Thus at every turn, the strategy will calculate those two values and - cooperate if E_C > E_D and will defect if E_C < E_D. - - In the case of E_C = E_D, the player will alternate from their previous - move. This is based on specific sentence from Axelrod's original paper: - - > "Under certain circumstances, DOWNING will even determine that the best - > strategy is to alternate cooperation and defection." - - One final important point is the early game behaviour of the strategy. It - has been noted that this strategy was implemented in a way that assumed that - alpha and beta were both 1/2: - - > "Initially, they are both assumed to be .5, which amounts to the - > pessimistic assumption that the other player is not responsive." - - Note that if alpha = beta = 1 / 2 then: - - E_C = alpha R + alpha S - E_D = alpha T + alpha P - - And from the defining properties of the Prisoner's Dilemma (T > R > P > S) - this gives: E_D > E_C. - Thus, the player opens with a defection in the first two rounds. Note that - from the Axelrod publications alone there is nothing to indicate defections - on the first two rounds, although a defection in the opening round is clear. - However there is a presentation available at - http://www.sci.brooklyn.cuny.edu/~sklar/teaching/f05/alife/notes/azhar-ipd-Oct19th.pdf - That clearly states that Downing defected in the first two rounds, thus this - is assumed to be the behaviour. Interestingly, in future tournaments this - strategy was revised to not defect on the opening two rounds. - - It is assumed that these first two rounds are used to create initial - estimates of - beta = P(C_o | D_s) and we will use the opening play of the player to - estimate alpha = P(C_o | C_s). - Thus we assume that the opponents first play is a response to a cooperation - "before the match starts". - - So for example, if the plays are: - - [(D, C), (D, C)] - - Then the opponent's first cooperation counts as a cooperation in response to - the non existent cooperation of round 0. The total number of cooperations in - response to a cooperation is 1. We need to take in to account that extra - phantom cooperation to estimate the probability alpha=P(C_o | C_s) as 1 / 1 - = 1. - - This is an assumption with no clear indication from the literature. - - -- - This strategy came 10th in Axelrod's original tournament. - - Names: - - - Downing: [Axelrod1980]_ - """ - - name = "First by Downing" - - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.number_opponent_cooperations_in_response_to_C = 0 - self.number_opponent_cooperations_in_response_to_D = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - round_number = len(self.history) + 1 - - if round_number == 1: - return D - if round_number == 2: - if opponent.history[-1] == C: - self.number_opponent_cooperations_in_response_to_C += 1 - return D - - if self.history[-2] == C and opponent.history[-1] == C: - self.number_opponent_cooperations_in_response_to_C += 1 - if self.history[-2] == D and opponent.history[-1] == C: - self.number_opponent_cooperations_in_response_to_D += 1 - - # Adding 1 to cooperations for assumption that first opponent move - # being a response to a cooperation. See docstring for more - # information. - alpha = (self.number_opponent_cooperations_in_response_to_C / - (self.cooperations + 1)) - # Adding 2 to defections on the assumption that the first two - # moves are defections, which may not be true in a noisy match - beta = (self.number_opponent_cooperations_in_response_to_D / - max(self.defections, 2)) - - R, P, S, T = self.match_attributes["game"].RPST() - expected_value_of_cooperating = alpha * R + (1 - alpha) * S - expected_value_of_defecting = beta * T + (1 - beta) * P - - if expected_value_of_cooperating > expected_value_of_defecting: - return C - if expected_value_of_cooperating < expected_value_of_defecting: - return D - return self.history[-1].flip() - - -class FirstByFeld(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Scott Feld. - - The description written in [Axelrod1980]_ is: - - > "This rule starts with tit for tat and gradually lowers its probability of - > cooperation following the other's cooperation to .5 by the two hundredth - > move. It always defects after a defection by the other." - - This strategy plays Tit For Tat, always defecting if the opponent defects but - cooperating when the opponent cooperates with a gradually decreasing probability - until it is only .5. Note that the description does not clearly indicate how - the cooperation probability should drop. This implements a linear decreasing - function. - - This strategy came 11th in Axelrod's original tournament. - - Names: - - - Feld: [Axelrod1980]_ - """ - - name = "First by Feld" - classifier = { - "memory_depth": 200, # Varies actually, eventually becomes depth 1 - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - start_coop_prob: float = 1.0, - end_coop_prob: float = 0.5, - rounds_of_decay: int = 200, - ) -> None: - """ - Parameters - ---------- - start_coop_prob, float - The initial probability to cooperate - end_coop_prob, float - The final probability to cooperate - rounds_of_decay, int - The number of rounds to linearly decrease from start_coop_prob - to end_coop_prob - """ - super().__init__() - self._start_coop_prob = start_coop_prob - self._end_coop_prob = end_coop_prob - self._rounds_of_decay = rounds_of_decay - - def _cooperation_probability(self) -> float: - """It's not clear what the interpolating function is, so we'll do - something simple that decreases monotonically from 1.0 to 0.5 over - 200 rounds.""" - diff = self._end_coop_prob - self._start_coop_prob - slope = diff / self._rounds_of_decay - rounds = len(self.history) - return max(self._start_coop_prob + slope * rounds, self._end_coop_prob) - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return C - if opponent.history[-1] == D: - return D - p = self._cooperation_probability() - return random_choice(p) - - -class FirstByGraaskamp(IpdPlayer): - """ - Submitted to Axelrod's first tournament by James Graaskamp. - - The description written in [Axelrod1980]_ is: - - > "This rule plays tit for tat for 50 moves, defects on move 51, and then - > plays 5 more moves of tit for tat. A check is then made to see if the player - > seems to be RANDOM, in which case it defects from then on. A check is also - > made to see if the other is TIT FOR TAT, ANALOGY (a program from the - > preliminary tournament), and its own twin, in which case it plays tit for - > tat. Otherwise it randomly defects every 5 to 15 moves, hoping that enough - > trust has been built up so that the other player will not notice these - > defections.: - - This is implemented as: - - 1. Plays Tit For Tat for the first 50 rounds; - 2. Defects on round 51; - 3. Plays 5 further rounds of Tit For Tat; - 4. A check is then made to see if the opponent is playing randomly in which - case it defects for the rest of the game. This is implemented with a chi - squared test. - 5. The strategy also checks to see if the opponent is playing Tit For Tat or - a clone of itself. If - so it plays Tit For Tat. If not it cooperates and randomly defects every 5 - to 15 moves. - - Note that there is no information about 'Analogy' available thus Step 5 is - a "best possible" interpretation of the description in the paper. - Furthermore the test for the clone is implemented as checking that both - players have played the same moves for the entire game. This is unlikely to - be the original approach but no further details are available. - - This strategy came 9th in Axelrod’s original tournament. - - Names: - - - Graaskamp: [Axelrod1980]_ - """ - - name = "First by Graaskamp" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, alpha: float = 0.05) -> None: - """ - Parameters - ---------- - alpha: float - The significant level of p-value from chi-squared test with - alpha == 0.05 by default. - """ - super().__init__() - self.alpha = alpha - self.opponent_is_random = False - self.next_random_defection_turn = None # type: Optional[int] - - def strategy(self, opponent: IpdPlayer) -> Action: - """This is the actual strategy""" - # First move - if not self.history: - return C - # React to the opponent's last move - if len(self.history) < 56: - if opponent.history[-1] == D or len(self.history) == 50: - return D - return C - - # Check if opponent plays randomly, if so, defect for the rest of the game - p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue - self.opponent_is_random = (p_value >= self.alpha) or self.opponent_is_random - - if self.opponent_is_random: - return D - if all( - opponent.history[i] == self.history[i - 1] - for i in range(1, len(self.history)) - ) or opponent.history == self.history: - # Check if opponent plays Tit for Tat or a clone of itself. - if opponent.history[-1] == D: - return D - return C - - if self.next_random_defection_turn is None: - self.next_random_defection_turn = random.randint(5, 15) + len(self.history) - - if len(self.history) == self.next_random_defection_turn: - # resample the next defection turn - self.next_random_defection_turn = random.randint(5, 15) + len(self.history) - return D - return C - - -class FirstByGrofman(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Bernard Grofman. - - The description written in [Axelrod1980]_ is: - - > "If the players did different things on the previous move, this rule - > cooperates with probability 2/7. Otherwise this rule always cooperates." - - This strategy came 4th in Axelrod's original tournament. - - Names: - - - Grofman: [Axelrod1980]_ - """ - - name = "First by Grofman" - classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: - return C - return random_choice(2 / 7) - - -class FirstByJoss(MemoryOnePlayer): - """ - Submitted to Axelrod's first tournament by Johann Joss. - - The description written in [Axelrod1980]_ is: - - > "This rule cooperates 90% of the time after a cooperation by the other. It - > always defects after a defection by the other." - - This strategy came 12th in Axelrod's original tournament. - - Names: - - - Joss: [Axelrod1980]_ - - Hard Joss: [Stewart2012]_ - """ - - name = "First by Joss" - - def __init__(self, p: float = 0.9) -> None: - """ - Parameters - ---------- - p, float - The probability of cooperating when the previous round was (C, C) - or (D, C), i.e. the opponent cooperated. - """ - four_vector = (p, 0, p, 0) - self.p = p - super().__init__(four_vector) - - -class FirstByNydegger(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Rudy Nydegger. - - The description written in [Axelrod1980]_ is: - - > "The program begins with tit for tat for the first three moves, except - > that if it was the only one to cooperate on the first move and the only one - > to defect on the second move, it defects on the third move. After the third - > move, its choice is determined from the 3 preceding outcomes in the - > following manner. Let A be the sum formed by counting the other's defection - > as 2 points and one's own as 1 point, and giving weights of 16, 4, and 1 to - > the preceding three moves in chronological order. The choice can be - > described as defecting only when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, - > 31, 33, 38, 39, 45, 49, 54, 55, 58, or 61. Thus if all three preceding moves - > are mutual defection, A = 63 and the rule cooperates. This rule was - > designed for use in laboratory experiments as a stooge which had a memory - > and appeared to be trustworthy, potentially cooperative, but not gullible - > (Nydegger, 1978)." - - The program begins with tit for tat for the first three moves, except - that if it was the only one to cooperate on the first move and the only one - to defect on the second move, it defects on the third move. After the - third move, its choice is determined from the 3 preceding outcomes in the - following manner. - - .. math:: - - A = 16 a_1 + 4 a_2 + a_3 - - Where :math:`a_i` is dependent on the outcome of the previous :math:`i` th - round. If both strategies defect, :math:`a_i=3`, if the opponent only defects: - :math:`a_i=2` and finally if it is only this strategy that defects then - :math:`a_i=1`. - - Finally this strategy defects if and only if: - - .. math:: - - A \in \{1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61\} - - Thus if all three preceding moves are mutual defection, A = 63 and the rule - cooperates. This rule was designed for use in laboratory experiments as a - stooge which had a memory and appeared to be trustworthy, potentially - cooperative, but not gullible. - - This strategy came 3rd in Axelrod's original tournament. - - Names: - - - Nydegger: [Axelrod1980]_ - """ - - name = "First by Nydegger" - classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61] - self.score_map = {(C, C): 0, (C, D): 2, (D, C): 1, (D, D): 3} - super().__init__() - - @staticmethod - def score_history( - my_history: List[Action], - opponent_history: List[Action], - score_map: Dict[Tuple[Action, Action], int], - ) -> int: - - """Implements the Nydegger formula A = 16 a_1 + 4 a_2 + a_3""" - a = 0 - for i, weight in [(-1, 16), (-2, 4), (-3, 1)]: - plays = (my_history[i], opponent_history[i]) - a += weight * score_map[plays] - return a - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return C - if len(self.history) == 1: - # TFT - return D if opponent.history[-1] == D else C - if len(self.history) == 2: - if opponent.history[0:2] == [D, C]: - return D - else: - # TFT - return D if opponent.history[-1] == D else C - A = self.score_history(self.history[-3:], opponent.history[-3:], self.score_map) - if A in self.As: - return D - return C - - -class FirstByShubik(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Martin Shubik. - - The description written in [Axelrod1980]_ is: - - > "This rule cooperates until the other defects, and then defects once. If - > the other defects again after the rule's cooperation is resumed, the rule - > defects twice. In general, the length of retaliation is increased by one for - > each departure from mutual cooperation. This rule is described with its - > strategic implications in Shubik (1970). Further treatment of its is given - > in Taylor (1976). - - There is some room for interpretation as to how the strategy reacts to a - defection on the turn where it starts to cooperate once more. In Shubik - (1970) the strategy is described as: - - > "I will play my move 1 to begin with and will continue to do so, so long - > as my information shows that the other player has chosen his move 1. If my - > information tells me he has used move 2, then I will use move 2 for the - > immediate k subsequent periods, after which I will resume using move 1. If - > he uses his move 2 again after I have resumed using move 1, then I will - > switch to move 2 for the k + 1 immediately subsequent periods . . . and so - > on, increasing my retaliation by an extra period for each departure from the - > (1, 1) steady state." - - This is interpreted as: - - The player cooperates, if when it is cooperating, the opponent defects it - defects for k rounds. After k rounds it starts cooperating again and - increments the value of k if the opponent defects again. - - This strategy came 5th in Axelrod's original tournament. - - Names: - - - Shubik: [Axelrod1980]_ - """ - - name = "First by Shubik" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.is_retaliating = False - self.retaliation_length = 0 - self.retaliation_remaining = 0 - - def _decrease_retaliation_counter(self): - """Lower the remaining owed retaliation count and flip to non-retaliate - if the count drops to zero.""" - if self.is_retaliating: - self.retaliation_remaining -= 1 - if self.retaliation_remaining == 0: - self.is_retaliating = False - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return C - - if self.is_retaliating: - # Are we retaliating still? - self._decrease_retaliation_counter() - return D - - if opponent.history[-1] == D and self.history[-1] == C: - # "If he uses his move 2 again after I have resumed using move 1, - # then I will switch to move 2 for the k + 1 immediately subsequent - # periods" - self.is_retaliating = True - self.retaliation_length += 1 - self.retaliation_remaining = self.retaliation_length - self._decrease_retaliation_counter() - return D - return C - - -class FirstByTullock(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Gordon Tullock. - - The description written in [Axelrod1980]_ is: - - > "This rule cooperates on the first eleven moves. It then cooperates 10% - > less than the other player has cooperated on the preceding ten moves. This - > rule is based on an idea developed in Overcast and Tullock (1971). Professor - > Tullock was invited to specify how the idea could be implemented, and he did - > so out of scientific interest rather than an expectation that it would be a - > likely winner." - - This is interpreted as: - - Cooperates for the first 11 rounds then randomly cooperates 10% less often - than the opponent has in the previous 10 rounds. - - This strategy came 13th in Axelrod's original tournament. - - Names: - - - Tullock: [Axelrod1980]_ - """ - - name = "First by Tullock" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - """ - Parameters - ---------- - rounds_to_cooperate: int - The number of rounds to cooperate initially - """ - super().__init__() - self._rounds_to_cooperate = 11 - self.memory_depth = self._rounds_to_cooperate - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) < self._rounds_to_cooperate: - return C - rounds = self._rounds_to_cooperate - 1 - cooperate_count = opponent.history[-rounds:].count(C) - prop_cooperate = cooperate_count / rounds - prob_cooperate = max(0, prop_cooperate - 0.10) - return random_choice(prob_cooperate) - - -class FirstByAnonymous(IpdPlayer): - """ - Submitted to Axelrod's first tournament by a graduate student whose name was - withheld. - - The description written in [Axelrod1980]_ is: - - > "This rule has a probability of cooperating, P, which is initially 30% and - > is updated every 10 moves. P is adjusted if the other player seems random, - > very cooperative, or very uncooperative. P is also adjusted after move 130 - > if the rule has a lower score than the other player. Unfortunately, the - > complex process of adjustment frequently left the probability of cooperation - > in the 30% to 70% range, and therefore the rule appeared random to many - > other players." - - Given the lack of detail this strategy is implemented based on the final - sentence of the description which is to have a cooperation probability that - is uniformly random in the 30 to 70% range. - - Names: - - - (Name withheld): [Axelrod1980]_ - """ - - name = "First by Anonymous" - classifier = { - "memory_depth": 0, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - r = random.uniform(3, 7) / 10 - return random_choice(r) - - -@FinalTransformer((D, D), name_prefix=None) -class FirstBySteinAndRapoport(IpdPlayer): - """ - Submitted to Axelrod's first tournament by William Stein and Amnon Rapoport. - - The description written in [Axelrod1980]_ is: - - > "This rule plays tit for tat except that it cooperates on the first four - > moves, it defects on the last two moves, and every fifteen moves it checks - > to see if the opponent seems to be playing randomly. This check uses a - > chi-squared test of the other's transition probabilities and also checks for - > alternating moves of CD and DC. - - This is implemented as follows: - - 1. It cooperates for the first 4 moves. - 2. It defects on the last 2 moves. - 3. Every 15 moves it makes use of a `chi-squared - test `_ to check if the - opponent is playing randomly. If so it defects. - - This strategy came 6th in Axelrod's original tournament. - - Names: - - - SteinAndRapoport: [Axelrod1980]_ - """ - - name = "First by Stein and Rapoport" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, alpha: float = 0.05) -> None: - """ - Parameters - ---------- - alpha: float - The significant level of p-value from chi-squared test with - alpha == 0.05 by default. - """ - super().__init__() - self.alpha = alpha - self.opponent_is_random = False - - def strategy(self, opponent: IpdPlayer) -> Action: - round_number = len(self.history) + 1 - - # First 4 moves - if round_number < 5: - return C - # For first 15 rounds tit for tat as we do not know opponents strategy - elif round_number < 15: - return opponent.history[-1] - - if round_number % 15 == 0: - p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue - self.opponent_is_random = p_value >= self.alpha - - if self.opponent_is_random: - # Defect if opponent plays randomly - return D - else: # TitForTat if opponent plays not randomly - return opponent.history[-1] - - -@FinalTransformer((D, D), name_prefix=None) -class FirstByTidemanAndChieruzzi(IpdPlayer): - """ - Submitted to Axelrod's first tournament by Nicolas Tideman and Paula - Chieruzzi. - - The description written in [Axelrod1980]_ is: - - > "This rule begins with cooperation and tit for tat. However, when the - > other player finishes his second run of defec- tions, an extra punishment is - > instituted, and the number of punishing defections is increased by one with - > each run of the other's defections. The other player is given a fresh start - > if he is 10 or more points behind, if he has not just started a run of - > defections, if it has been at least 20 moves since a fresh start, if there - > are at least 10 moves remaining, and if the number of defections differs - > from a 50-50 random generator by at least 3.0 standard deviations. A fresh - > start involves two cooperations and then play as if the game had just - > started. The program defects automatically on the last two moves." - - This is interpreted as: - - 1. Every run of defections played by the opponent increases the number of - defections that this strategy retaliates with by 1. - - 2. The opponent is given a ‘fresh start’ if: - - it is 10 points behind this strategy - - and it has not just started a run of defections - - and it has been at least 20 rounds since the last ‘fresh start’ - - and there are more than 10 rounds remaining in the match - - and the total number of defections differs from a 50-50 random sample - by at least 3.0 standard deviations. - - A ‘fresh start’ is a sequence of two cooperations followed by an assumption - that the game has just started (everything is forgotten). - - 3. The strategy defects on the last two moves. - - This strategy came 2nd in Axelrod’s original tournament. - - Names: - - - TidemanAndChieruzzi: [Axelrod1980]_ - """ - - name = "First by Tideman and Chieruzzi" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game", "length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.is_retaliating = False - self.retaliation_length = 0 - self.retaliation_remaining = 0 - self.current_score = 0 - self.opponent_score = 0 - self.last_fresh_start = 0 - self.fresh_start = False - self.remembered_number_of_opponent_defectioons = 0 - - def _decrease_retaliation_counter(self): - """Lower the remaining owed retaliation count and flip to non-retaliate - if the count drops to zero.""" - if self.is_retaliating: - self.retaliation_remaining -= 1 - if self.retaliation_remaining == 0: - self.is_retaliating = False - - def _fresh_start(self): - """Give the opponent a fresh start by forgetting the past""" - self.is_retaliating = False - self.retaliation_length = 0 - self.retaliation_remaining = 0 - self.remembered_number_of_opponent_defectioons = 0 - - def _score_last_round(self, opponent: IpdPlayer): - """Updates the scores for each player.""" - # Load the default game if not supplied by a tournament. - game = self.match_attributes["game"] - last_round = (self.history[-1], opponent.history[-1]) - scores = game.score(last_round) - self.current_score += scores[0] - self.opponent_score += scores[1] - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return C - - if opponent.history[-1] == D: - self.remembered_number_of_opponent_defectioons += 1 - - # Calculate the scores. - self._score_last_round(opponent) - - # Check if we have recently given the strategy a fresh start. - if self.fresh_start: - self.fresh_start = False - return C # Second cooperation - - # Check conditions to give opponent a fresh start. - current_round = len(self.history) + 1 - if self.last_fresh_start == 0: - valid_fresh_start = True - # There needs to be at least 20 rounds before the next fresh start - else: - valid_fresh_start = current_round - self.last_fresh_start >= 20 - - if valid_fresh_start: - valid_points = self.current_score - self.opponent_score >= 10 - valid_rounds = self.match_attributes["length"] - current_round >= 10 - opponent_is_cooperating = opponent.history[-1] == C - if valid_points and valid_rounds and opponent_is_cooperating: - # 50-50 split is based off the binomial distribution. - N = opponent.cooperations + opponent.defections - # std_dev = sqrt(N*p*(1-p)) where p is 1 / 2. - std_deviation = (N ** (1 / 2)) / 2 - lower = N / 2 - 3 * std_deviation - upper = N / 2 + 3 * std_deviation - if (self.remembered_number_of_opponent_defectioons <= lower or - self.remembered_number_of_opponent_defectioons >= upper): - # Opponent deserves a fresh start - self.last_fresh_start = current_round - self._fresh_start() - self.fresh_start = True - return C # First cooperation - - if self.is_retaliating: - # Are we retaliating still? - self._decrease_retaliation_counter() - return D - - if opponent.history[-1] == D: - self.is_retaliating = True - self.retaliation_length += 1 - self.retaliation_remaining = self.retaliation_length - self._decrease_retaliation_counter() - return D - - return C diff --git a/axelrod/ipd/strategies/axelrod_second.py b/axelrod/ipd/strategies/axelrod_second.py deleted file mode 100644 index 7b359d36e..000000000 --- a/axelrod/ipd/strategies/axelrod_second.py +++ /dev/null @@ -1,2131 +0,0 @@ -""" -Strategies from Axelrod's second tournament. All strategies in this module are -prefixed by `SecondBy` to indicate that they were submitted in Axelrod's Second -tournament by the given author. -""" - -import random -from typing import List - -import numpy as np -from axelrod.ipd.action import Action -from axelrod.ipd.interaction_utils import compute_final_score -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice -from axelrod.ipd.strategies.finite_state_machines import FSMPlayer - -C, D = Action.C, Action.D - - -class SecondByChampion(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Danny Champion. - - This player cooperates on the first 10 moves and plays Tit for Tat for the - next 15 more moves. After 25 moves, the program cooperates unless all the - following are true: the other player defected on the previous move, the - other player cooperated less than 60% and the random number between 0 and 1 - is greater that the other player's cooperation rate. - - Names: - - - Champion: [Axelrod1980b]_ - """ - - name = "Second by Champion" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) - # Cooperate for the first 10 turns - if current_round == 0: - return C - if current_round < 10: - return C - # Mirror partner for the next phase - if current_round < 25: - return opponent.history[-1] - # Now cooperate unless all of the necessary conditions are true - defection_prop = opponent.defections / len(opponent.history) - if opponent.history[-1] == D: - r = random.random() - if defection_prop >= max(0.4, r): - return D - return C - -class SecondByEatherley(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Graham Eatherley. - - A player that keeps track of how many times in the game the other player - defected. After the other player defects, it defects with a probability - equal to the ratio of the other's total defections to the total moves to - that point. - - Names: - - - Eatherley: [Axelrod1980b]_ - """ - - name = "Second by Eatherley" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - # Cooperate on the first move - if not len(opponent.history): - return C - # Reciprocate cooperation - if opponent.history[-1] == C: - return C - # Respond to defections with probability equal to opponent's total - # proportion of defections - defection_prop = opponent.defections / len(opponent.history) - return random_choice(1 - defection_prop) - - -class SecondByTester(IpdPlayer): - """ - Submitted to Axelrod's second tournament by David Gladstein. - - This strategy is a TFT variant that attempts to exploit certain strategies. It - defects on the first move. If the opponent ever defects, TESTER 'apologies' by - cooperating and then plays TFT for the rest of the game. Otherwise TESTER - alternates cooperation and defection. - - This strategy came 46th in Axelrod's second tournament. - - Names: - - - Tester: [Axelrod1980b]_ - """ - - name = "Second by Tester" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.is_TFT = False - - def strategy(self, opponent: IpdPlayer) -> Action: - # Defect on the first move - if not opponent.history: - return D - # Am I TFT? - if self.is_TFT: - return D if opponent.history[-1:] == [D] else C - else: - # Did opponent defect? - if opponent.history[-1] == D: - self.is_TFT = True - return C - if len(self.history) in [1, 2]: - return C - # Alternate C and D - return self.history[-1].flip() - - -class SecondByGladstein(IpdPlayer): - """ - Submitted to Axelrod's second tournament by David Gladstein. - - This strategy is also known as Tester and is based on the reverse - engineering of the Fortran strategies from Axelrod's second tournament. - - This strategy is a TFT variant that defects on the first round in order to - test the opponent's response. If the opponent ever defects, the strategy - 'apologizes' by cooperating and then plays TFT for the rest of the game. - Otherwise, it defects as much as possible subject to the constraint that - the ratio of its defections to moves remains under 0.5, not counting the - first defection. - - Names: - - - Gladstein: [Axelrod1980b]_ - - Tester: [Axelrod1980b]_ - """ - - name = "Second by Gladstein" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - # This strategy assumes the opponent is a patsy - self.patsy = True - - def strategy(self, opponent: IpdPlayer) -> Action: - # Defect on the first move - if not self.history: - return D - # Is the opponent a patsy? - if self.patsy: - # If the opponent defects, apologize and play TFT. - if opponent.history[-1] == D: - self.patsy = False - return C - # Cooperate as long as the cooperation ratio is below 0.5 - cooperation_ratio = self.cooperations / len(self.history) - if cooperation_ratio > 0.5: - return D - return C - else: - # Play TFT - return opponent.history[-1] - - -class SecondByTranquilizer(IpdPlayer): - - """ - Submitted to Axelrod's second tournament by Craig Feathers - - Description given in Axelrod's "More Effective Choice in the - Prisoner's Dilemma" paper: The rule normally cooperates but - is ready to defect if the other player defects too often. - Thus the rule tends to cooperate for the first dozen or two moves - if the other player is cooperating, but then it throws in a - defection. If the other player continues to cooperate, then defections - become more frequent. But as long as Tranquilizer is maintaining an - average payoff of at least 2.25 points per move, it will never defect - twice in succession and it will not defect more than - one-quarter of the time. - - This implementation is based on the reverse engineering of the - Fortran strategy K67R from Axelrod's second tournament. - Reversed engineered by: Owen Campbell, Will Guo and Mansour Hakem. - - The strategy starts by cooperating and has 3 states. - - At the start of the strategy it updates its states: - - - It counts the number of consecutive defections by the opponent. - - If it was in state 2 it moves to state 0 and calculates the - following quantities two_turns_after_good_defection_ratio and - two_turns_after_good_defection_ratio_count. - - Formula for: - - two_turns_after_good_defection_ratio: - - self.two_turns_after_good_defection_ratio = ( - ((self.two_turns_after_good_defection_ratio - * self.two_turns_after_good_defection_ratio_count) - + (3 - (3 * self.dict[opponent.history[-1]])) - + (2 * self.dict[self.history[-1]]) - - ((self.dict[opponent.history[-1]] - * self.dict[self.history[-1]]))) - / (self.two_turns_after_good_defection_ratio_count + 1) - ) - - two_turns_after_good_defection_ratio_count = - two_turns_after_good_defection_ratio + 1 - - - If it was in state 1 it moves to state 2 and calculates the - following quantities one_turn_after_good_defection_ratio and - one_turn_after_good_defection_ratio_count. - - Formula for: - - one_turn_after_good_defection_ratio: - - self.one_turn_after_good_defection_ratio = ( - ((self.one_turn_after_good_defection_ratio - * self.one_turn_after_good_defection_ratio_count) - + (3 - (3 * self.dict[opponent.history[-1]])) - + (2 * self.dict[self.history[-1]]) - - (self.dict[opponent.history[-1]] - * self.dict[self.history[-1]])) - / (self.one_turn_after_good_defection_ratio_count + 1) - ) - - one_turn_after_good_defection_ratio_count: - - one_turn_after_good_defection_ratio_count = - one_turn_after_good_defection_ratio + 1 - - If after this it is in state 1 or 2 then it cooperates. - - If it is in state 0 it will potentially perform 1 of the 2 - following stochastic tests: - - 1. If average score per turn is greater than 2.25 then it calculates a - value of probability: - - probability = ( - (.95 - (((self.one_turn_after_good_defection_ratio) - + (self.two_turns_after_good_defection_ratio) - 5) / 15)) - + (1 / (((len(self.history))+1) ** 2)) - - (self.dict[opponent.history[-1]] / 4) - ) - - and will cooperate if a random sampled number is less than that value of - probability. If it does not cooperate then the strategy moves to state 1 - and defects. - - 2. If average score per turn is greater than 1.75 but less than 2.25 - then it calculates a value of probability: - - probability = ( - (.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1))) - - (self.opponent_consecutive_defections * .25) - + ((current_score[0] - - current_score[1]) / 100) - + (4 / ((len(self.history)) + 1)) - ) - - and will cooperate if a random sampled number is less than that value of - probability. If not, it defects. - - If none of the above holds the player simply plays tit for tat. - - Tranquilizer came in 27th place in Axelrod's second torunament. - - - Names: - - - Tranquilizer: [Axelrod1980]_ - """ - - name = "Second by Tranquilizer" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.num_turns_after_good_defection = 0 # equal to FD variable - self.opponent_consecutive_defections = 0 # equal to S variable - self.one_turn_after_good_defection_ratio = 5 # equal to AD variable - self.two_turns_after_good_defection_ratio = 0 # equal to NO variable - self.one_turn_after_good_defection_ratio_count = 1 # equal to AK variable - self.two_turns_after_good_defection_ratio_count = 1 # equal to NK variable - # All above variables correspond to those in original Fotran Code - self.dict = {C: 0, D: 1} - - def update_state(self, opponent): - - """ - Calculates the ratio values for the one_turn_after_good_defection_ratio, - two_turns_after_good_defection_ratio and the probability values, - and sets the value of num_turns_after_good_defection. - """ - if opponent.history[-1] == D: - self.opponent_consecutive_defections += 1 - else: - self.opponent_consecutive_defections = 0 - - if self.num_turns_after_good_defection == 2: - self.num_turns_after_good_defection = 0 - self.two_turns_after_good_defection_ratio = ( - ( - self.two_turns_after_good_defection_ratio - * self.two_turns_after_good_defection_ratio_count - ) - + (3 - (3 * self.dict[opponent.history[-1]])) - + (2 * self.dict[self.history[-1]]) - - ((self.dict[opponent.history[-1]] * self.dict[self.history[-1]])) - ) / (self.two_turns_after_good_defection_ratio_count + 1) - self.two_turns_after_good_defection_ratio_count += 1 - elif self.num_turns_after_good_defection == 1: - self.num_turns_after_good_defection = 2 - self.one_turn_after_good_defection_ratio = ( - ( - self.one_turn_after_good_defection_ratio - * self.one_turn_after_good_defection_ratio_count - ) - + (3 - (3 * self.dict[opponent.history[-1]])) - + (2 * self.dict[self.history[-1]]) - - (self.dict[opponent.history[-1]] * self.dict[self.history[-1]]) - ) / (self.one_turn_after_good_defection_ratio_count + 1) - self.one_turn_after_good_defection_ratio_count += 1 - - def strategy(self, opponent: IpdPlayer) -> Action: - - if not self.history: - return C - - self.update_state(opponent) - if self.num_turns_after_good_defection in [1, 2]: - return C - - current_score = compute_final_score(zip(self.history, opponent.history)) - - if (current_score[0] / ((len(self.history)) + 1)) >= 2.25: - probability = ( - ( - 0.95 - - ( - ( - (self.one_turn_after_good_defection_ratio) - + (self.two_turns_after_good_defection_ratio) - - 5 - ) - / 15 - ) - ) - + (1 / (((len(self.history)) + 1) ** 2)) - - (self.dict[opponent.history[-1]] / 4) - ) - if random.random() <= probability: - return C - self.num_turns_after_good_defection = 1 - return D - if (current_score[0] / ((len(self.history)) + 1)) >= 1.75: - probability = ( - (0.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1))) - - (self.opponent_consecutive_defections * 0.25) - + ((current_score[0] - current_score[1]) / 100) - + (4 / ((len(self.history)) + 1)) - ) - if random.random() <= probability: - return C - return D - return opponent.history[-1] - - -class SecondByGrofman(IpdPlayer): - """ - Submitted to Axelrod's second tournament by Bernard Grofman. - - This strategy has 3 phases: - - 1. First it cooperates on the first two rounds - 2. For rounds 3-7 inclusive, it plays the same as the opponent's last move - 3. Thereafter, it applies the following logic, looking at its memory of the - last 8\* rounds (ignoring the most recent round). - - - If its own previous move was C and the opponent has defected less than - 3 times in the last 8\* rounds, cooperate - - If its own previous move was C and the opponent has defected 3 or - more times in the last 8\* rounds, defect - - If its own previous move was D and the opponent has defected only once - or not at all in the last 8\* rounds, cooperate - - If its own previous move was D and the opponent has defected more than - once in the last 8\* rounds, defect - - \* The code looks at the first 7 of the last 8 rounds, ignoring the most - recent round. - - Names: - - Grofman's strategy: [Axelrod1980b]_ - - K86R: [Axelrod1980b]_ - """ - - name = "Second by Grofman" - classifier = { - "memory_depth": 8, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - # Cooperate on the first two moves - if len(self.history) < 2: - return C - # For rounds 3-7, play the opponent's last move - elif 2 <= len(self.history) <= 6: - return opponent.history[-1] - else: - # Note: the Fortran code behavior ignores the opponent behavior - # in the last round and instead looks at the first 7 of the last - # 8 rounds. - opponent_defections_last_8_rounds = opponent.history[-8:-1].count(D) - if self.history[-1] == C and opponent_defections_last_8_rounds <= 2: - return C - if self.history[-1] == D and opponent_defections_last_8_rounds <= 1: - return C - return D - - -class SecondByKluepfel(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Charles Kluepfel - (K32R). - - This player keeps track of the the opponent's responses to own behavior: - - - `cd_count` counts: Opponent cooperates as response to player defecting. - - `dd_count` counts: Opponent defects as response to player defecting. - - `cc_count` counts: Opponent cooperates as response to player cooperating. - - `dc_count` counts: Opponent defects as response to player cooperating. - - After 26 turns, the player then tries to detect a random player. The - player decides that the opponent is random if - cd_counts >= (cd_counts+dd_counts)/2 - 0.75*sqrt(cd_counts+dd_counts) AND - cc_counts >= (dc_counts+cc_counts)/2 - 0.75*sqrt(dc_counts+cc_counts). - If the player decides that they are playing against a random player, then - they will always defect. - - Otherwise respond to recent history using the following set of rules: - - - If opponent's last three choices are the same, then respond in kind. - - If opponent's last two choices are the same, then respond in kind with - probability 90%. - - Otherwise if opponent's last action was to cooperate, then cooperate - with probability 70%. - - Otherwise if opponent's last action was to defect, then defect - with probability 60%. - - Names: - - - Kluepfel: [Axelrod1980b]_ - """ - - name = "Second by Kluepfel" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = 0, 0, 0, 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - # First update the response matrix. - if len(self.history) >= 2: - if self.history[-2] == D: - if opponent.history[-1] == C: - self.cd_counts += 1 - else: - self.dd_counts += 1 - else: - if opponent.history[-1] == C: - self.cc_counts += 1 - else: - self.dc_counts += 1 - - # Check for randomness - if len(self.history) > 26: - if self.cd_counts >= (self.cd_counts + self.dd_counts) / 2 - 0.75 * np.sqrt( - self.cd_counts + self.dd_counts - ) and self.dc_counts >= ( - self.dc_counts + self.cc_counts - ) / 2 - 0.75 * np.sqrt( - self.dc_counts + self.cc_counts - ): - return D - - # Otherwise respond to recent history - - one_move_ago, two_moves_ago, three_moves_ago = C, C, C - if len(opponent.history) >= 1: - one_move_ago = opponent.history[-1] - if len(opponent.history) >= 2: - two_moves_ago = opponent.history[-2] - if len(opponent.history) >= 3: - three_moves_ago = opponent.history[-3] - - if one_move_ago == two_moves_ago and two_moves_ago == three_moves_ago: - return one_move_ago - - r = random.random() # Everything following is stochastic - if one_move_ago == two_moves_ago: - if r < 0.9: - return one_move_ago - else: - return one_move_ago.flip() - if one_move_ago == C: - if r < 0.7: - return one_move_ago - else: - return one_move_ago.flip() - if one_move_ago == D: - if r < 0.6: - return one_move_ago - else: - return one_move_ago.flip() - - -class SecondByBorufsen(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Otto Borufsen - (K32R), and came in third in that tournament. - - This player keeps track of the the opponent's responses to own behavior: - - - `cd_count` counts: Opponent cooperates as response to player defecting. - - `cc_count` counts: Opponent cooperates as response to player cooperating. - - The player has a defect mode and a normal mode. In defect mode, the - player will always defect. In normal mode, the player obeys the following - ranked rules: - - 1. If in the last three turns, both the player/opponent defected, then - cooperate for a single turn. - 2. If in the last three turns, the player/opponent acted differently from - each other and they're alternating, then change next defect to - cooperate. (Doesn't block third rule.) - 3. Otherwise, do tit-for-tat. - - Start in normal mode, but every 25 turns starting with the 27th turn, - re-evaluate the mode. Enter defect mode if any of the following - conditions hold: - - - Detected random: Opponent cooperated 7-18 times since last mode - evaluation (or start) AND less than 70% of opponent cooperation was in - response to player's cooperation, i.e. - cc_count / (cc_count+cd_count) < 0.7 - - Detect defective: Opponent cooperated fewer than 3 times since last mode - evaluation. - - When switching to defect mode, defect immediately. The first two rules for - normal mode require that last three turns were in normal mode. When starting - normal mode from defect mode, defect on first move. - - Names: - - - Borufsen: [Axelrod1980b]_ - """ - - name = "Second by Borufsen" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.cd_counts, self.cc_counts = 0, 0 - self.mutual_defect_streak = 0 - self.echo_streak = 0 - self.flip_next_defect = False - self.mode = "Normal" - - def try_return(self, to_return): - """ - We put the logic here to check for the `flip_next_defect` bit here, - and proceed like normal otherwise. - """ - - if to_return == C: - return C - # Otherwise look for flip bit. - if self.flip_next_defect: - self.flip_next_defect = False - return C - return D - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - - if turn == 1: - return C - - # Update the response history. - if turn >= 3: - if opponent.history[-1] == C: - if self.history[-2] == C: - self.cc_counts += 1 - else: - self.cd_counts += 1 - - # Check if it's time for a mode change. - if turn > 2 and turn % 25 == 2: - coming_from_defect = False - if self.mode == "Defect": - coming_from_defect = True - - self.mode = "Normal" - coops = self.cd_counts + self.cc_counts - - # Check for a defective strategy - if coops < 3: - self.mode = "Defect" - - # Check for a random strategy - if (coops >= 8 and coops <= 17) and self.cc_counts / coops < 0.7: - self.mode = "Defect" - - self.cd_counts, self.cc_counts = 0, 0 - - # If defect mode, clear flags - if self.mode == "Defect": - self.mutual_defect_streak = 0 - self.echo_streak = 0 - self.flip_next_defect = False - - # Check this special case - if self.mode == "Normal" and coming_from_defect: - return D - - # Proceed - if self.mode == "Defect": - return D - else: - assert self.mode == "Normal" - - # Look for mutual defects - if self.history[-1] == D and opponent.history[-1] == D: - self.mutual_defect_streak += 1 - else: - self.mutual_defect_streak = 0 - if self.mutual_defect_streak >= 3: - self.mutual_defect_streak = 0 - self.echo_streak = 0 # Reset both streaks. - return self.try_return(C) - - # Look for echoes - # Fortran code defaults two turns back to C if only second turn - my_two_back, opp_two_back = C, C - if turn >= 3: - my_two_back = self.history[-2] - opp_two_back = opponent.history[-2] - if ( - self.history[-1] != opponent.history[-1] - and self.history[-1] == opp_two_back - and opponent.history[-1] == my_two_back - ): - self.echo_streak += 1 - else: - self.echo_streak = 0 - if self.echo_streak >= 3: - self.mutual_defect_streak = 0 # Reset both streaks. - self.echo_streak = 0 - self.flip_next_defect = True - - # Tit-for-tat - return self.try_return(opponent.history[-1]) - - -class SecondByCave(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and - came in fourth in that tournament. - - First look for overly-defective or apparently random opponents, and defect - if found. That is any opponent meeting one of: - - - turn > 39 and percent defects > 0.39 - - turn > 29 and percent defects > 0.65 - - turn > 19 and percent defects > 0.79 - - Otherwise, respond to cooperation with cooperation. And respond to defcts - with either a defect (if opponent has defected at least 18 times) or with - a random (50/50) choice. [Cooperate on first.] - - Names: - - - Cave: [Axelrod1980b]_ - """ - - name = "Second by Cave" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - if turn == 1: - return C - - number_defects = opponent.defections - perc_defects = number_defects / turn - - # Defect if the opponent has defected often or appears random. - if turn > 39 and perc_defects > 0.39: - return D - if turn > 29 and perc_defects > 0.65: - return D - if turn > 19 and perc_defects > 0.79: - return D - - if opponent.history[-1] == D: - if number_defects > 17: - return D - else: - return random_choice(0.5) - else: - return C - - -class SecondByWmAdams(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by William Adams (K44R), - and came in fifth in that tournament. - - Count the number of opponent defections after their first move, call - `c_defect`. Defect if c_defect equals 4, 7, or 9. If c_defect > 9, - then defect immediately after opponent defects with probability = - (0.5)^(c_defect-1). Otherwise cooperate. - - Names: - - - WmAdams: [Axelrod1980b]_ - """ - - name = "Second by WmAdams" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) <= 1: - return C - number_defects = opponent.defections - if opponent.history[0] == D: - number_defects -= 1 - - if number_defects in [4, 7, 9]: - return D - if number_defects > 9 and opponent.history[-1] == D: - return random_choice((0.5) ** (number_defects - 9)) - return C - - -class SecondByGraaskampKatzen(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken - Katzen (K60R), and came in sixth in that tournament. - - Play Tit-for-Tat at first, and track own score. At select checkpoints, - check for a high score. Switch to Default Mode if: - - - On move 11, score < 23 - - On move 21, score < 53 - - On move 31, score < 83 - - On move 41, score < 113 - - On move 51, score < 143 - - On move 101, score < 293 - - Once in Defect Mode, defect forever. - - Names: - - - GraaskampKatzen: [Axelrod1980b]_ - """ - - name = "Second by GraaskampKatzen" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.own_score = 0 - self.mode = "Normal" - - def update_score(self, opponent: IpdPlayer): - game = self.match_attributes["game"] - last_round = (self.history[-1], opponent.history[-1]) - self.own_score += game.score(last_round)[0] - - def strategy(self, opponent: IpdPlayer) -> Action: - if self.mode == "Defect": - return D - - turn = len(self.history) + 1 - if turn == 1: - return C - - self.update_score(opponent) - - if ( - turn == 11 - and self.own_score < 23 - or turn == 21 - and self.own_score < 53 - or turn == 31 - and self.own_score < 83 - or turn == 41 - and self.own_score < 113 - or turn == 51 - and self.own_score < 143 - or turn == 101 - and self.own_score < 293 - ): - self.mode = "Defect" - return D - - return opponent.history[-1] # Tit-for-Tat - - -class SecondByWeiner(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R), - and came in seventh in that tournament. - - Play Tit-for-Tat with a chance for forgiveness and a defective override. - - The chance for forgiveness happens only if `forgive_flag` is raised - (flag discussed below). If raised and `turn` is greater than `grudge`, - then override Tit-for-Tat with Cooperation. `grudge` is a variable that - starts at 0 and increments 20 with each forgiven Defect (a Defect that is - overriden through the forgiveness logic). `forgive_flag` is lower whether - logic is overriden or not. - - The variable `defect_padding` increments with each opponent Defect, but - resets to zero with each opponent Cooperate (or `forgive_flag` lowering) so - that it roughly counts Defects between Cooperates. Whenever the opponent - Cooperates, if `defect_padding` (before reseting) is odd, then we raise - `forgive_flag` for next turn. - - Finally a defective override is assessed after forgiveness. If five or - more of the opponent's last twelve actions are Defects, then Defect. This - will overrule a forgiveness, but doesn't undo the lowering of - `forgiveness_flag`. Note that "last twelve actions" doesn't count the most - recent action. Actually the original code updates history after checking - for defect override. - - Names: - - - Weiner: [Axelrod1980b]_ - """ - - name = "Second by Weiner" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.forgive_flag = False - self.grudge = 0 - self.defect_padding = 0 - self.last_twelve = [0] * 12 - self.lt_index = 0 # Circles around last_twelve - - def try_return(self, to_return): - """ - We put the logic here to check for the defective override. - """ - - if np.sum(self.last_twelve) >= 5: - return D - return to_return - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) == 0: - return C - - # Update history, lag 1. - if len(opponent.history) >= 2: - self.last_twelve[self.lt_index] = 0 - if opponent.history[-2] == D: - self.last_twelve[self.lt_index] = 1 - self.lt_index = (self.lt_index + 1) % 12 - - if self.forgive_flag: - self.forgive_flag = False - self.defect_padding = 0 - if self.grudge < len(self.history) + 1 and opponent.history[-1] == D: - # Then override - self.grudge += 20 - return self.try_return(C) - else: - return self.try_return(opponent.history[-1]) - else: - # See if forgive_flag should be raised - if opponent.history[-1] == D: - self.defect_padding += 1 - else: - if self.defect_padding % 2 == 1: - self.forgive_flag = True - self.defect_padding = 0 - - return self.try_return(opponent.history[-1]) - - -class SecondByHarrington(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R) - and came in eighth in that tournament. - - This strategy has three modes: Normal, Fair-weather, and Defect. These - mode names were not present in Harrington's submission. - - In Normal and Fair-weather modes, the strategy begins by: - - - Update history - - Try to detect random opponent if turn is multiple of 15 and >=30. - - Check if `burned` flag should be raised. - - Check for Fair-weather opponent if turn is 38. - - Updating history means to increment the correct cell of the `move_history`. - `move_history` is a matrix where the columns are the opponent's previous - move and the rows are indexed by the combo of this player's and the - opponent's moves two turns ago. [The upper-left cell must be all - Cooperations, but otherwise order doesn't matter.] After we enter Defect - mode, `move_history` won't be used again. - - If the turn is a multiple of 15 and >=30, then attempt to detect random. - If random is detected, enter Defect mode and defect immediately. If the - player was previously in Defect mode, then do not re-enter. The random - detection logic is a modified Pearson's Chi Squared test, with some - additional checks. [More details in `detect_random` docstrings.] - - Some of this player's moves are marked as "generous." If this player made - a generous move two turns ago and the opponent replied with a Defect, then - raise the `burned` flag. This will stop certain generous moves later. - - The player mostly plays Tit-for-Tat for the first 36 moves, then defects on - the 37th move. If the opponent cooperates on the first 36 moves, and - defects on the 37th move also, then enter Fair-weather mode and cooperate - this turn. Entering Fair-weather mode is extremely rare, since this can - only happen if the opponent cooperates for the first 36 then defects - unprovoked on the 37th. (That is, this player's first 36 moves are also - Cooperations, so there's nothing really to trigger an opponent Defection.) - - Next in Normal Mode: - - 1. Check for defect and parity streaks. - 2. Check if cooperations are scheduled. - 3. Otherwise, - - - If turn < 37, Tit-for-Tat. - - If turn = 37, defect, mark this move as generous, and schedule two - more cooperations**. - - If turn > 37, then if `burned` flag is raised, then Tit-for-Tat. - Otherwise, Tit-for-Tat with probability 1 - `prob`. And with - probability `prob`, defect, schedule two cooperations, mark this move - as generous, and increase `prob` by 5%. - - ** Scheduling two cooperations means to set `more_coop` flag to two. If in - Normal mode and no streaks are detected, then the player will cooperate and - lower this flag, until hitting zero. It's possible that the flag can be - overwritten. Notable on the 37th turn defect, this is set to two, but the - 38th turn Fair-weather check will set this. - - If the opponent's last twenty moves were defections, then defect this turn. - Then check for a parity streak, by flipping the parity bit (there are two - streaks that get tracked which are something like odd and even turns, but - this flip bit logic doesn't get run every turn), then incrementing the - parity streak that we're pointing to. If the parity streak that we're - pointing to is then greater than `parity_limit` then reset the streak and - cooperate immediately. `parity_limit` is initially set to five, but after - it has been hit eight times, it decreases to three. The parity streak that - we're pointing to also gets incremented if in normal mode and we defect but - not on turn 38, unless we are defecting as the result of a defect streak. - Note that the parity streaks resets but the defect streak doesn't. - - If `more_coop` >= 1, then we cooperate and lower that flag here, in Normal - mode after checking streaks. Still lower this flag if cooperating as the - result of a parity streak or in Fair-weather mode. - - Then use the logic based on turn from above. - - In Fair-Weather mode after running the code from above, check if opponent - defected last turn. If so, exit Fair-Weather mode, and proceed THIS TURN - with Normal mode. Otherwise cooperate. - - In Defect mode, update the `exit_defect_meter` (originally zero) by - incrementing if opponent defected last turn and decreasing by three - otherwise. If `exit_defect_meter` is then 11, then set mode to Normal (for - future turns), cooperate and schedule two more cooperations. [Note that - this move is not marked generous.] - - Names: - - - Harrington: [Axelrod1980b]_ - """ - - name = "Second by Harrington" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.mode = "Normal" - self.recorded_defects = 0 # Count opponent defects after turn 1 - self.exit_defect_meter = 0 # When >= 11, then exit defect mode. - self.coops_in_first_36 = None # On turn 37, count cooperations in first 36 - self.was_defective = False # Previously in Defect mode - - self.prob = 0.25 # After turn 37, probability that we'll defect - - self.move_history = np.zeros([4, 2]) - - self.more_coop = 0 # This schedules cooperation for future turns - # Initial last_generous_n_turns_ago to 3 because this counts up and - # triggers a strategy change at 2. - self.last_generous_n_turns_ago = 3 # How many tuns ago was a "generous" move - self.burned = False - - self.defect_streak = 0 - self.parity_streak = [ - 0, - 0, - ] # Counters that get (almost) alternatively incremented. - self.parity_bit = 0 # Which parity_streak to increment - self.parity_limit = 5 # When a parity streak hits this limit, alter strategy. - self.parity_hits = 0 # Counts how many times a parity_limit was hit. - # After hitting parity_hits 8 times, lower parity_limit to 3. - - def try_return(self, to_return, lower_flags=True, inc_parity=False): - """ - This will return to_return, with some end-of-turn logic. - """ - - if lower_flags and to_return == C: - # In most cases when Cooperating, we want to reduce the number that - # are scheduled. - self.more_coop -= 1 - self.last_generous_n_turns_ago += 1 - - if inc_parity and to_return == D: - # In some cases we increment the `parity_streak` that we're on when - # we return a Defection. In detect_parity_streak, `parity_streak` - # counts opponent's Defections. - self.parity_streak[self.parity_bit] += 1 - - return to_return - - def calculate_chi_squared(self, turn): - """ - Pearson's Chi Squared statistic = sum[ (E_i-O_i)^2 / E_i ], where O_i - are the observed matrix values, and E_i is calculated as number (of - defects) in the row times the number in the column over (total number - in the matrix minus 1). Equivalently, we expect we expect (for an - independent distribution) the total number of recorded turns times the - portion in that row times the portion in that column. - - In this function, the statistic is non-standard in that it excludes - summands where E_i <= 1. - """ - - denom = turn - 2 - - expected_matrix = ( - np.outer(self.move_history.sum(axis=1), self.move_history.sum(axis=0)) - / denom - ) - - chi_squared = 0.0 - for i in range(4): - for j in range(2): - expect = expected_matrix[i, j] - if expect > 1.0: - chi_squared += (expect - self.move_history[i, j]) ** 2 / expect - - return chi_squared - - def detect_random(self, turn): - """ - We check if the top-left cell of the matrix (corresponding to all - Cooperations) has over 80% of the turns. In which case, we label - non-random. - - Then we check if over 75% or under 25% of the opponent's turns are - Defections. If so, then we label as non-random. - - Otherwise we calculates a modified Pearson's Chi Squared statistic on - self.history, and returns True (is random) if and only if the statistic - is less than or equal to 3. - """ - - denom = turn - 2 - - if self.move_history[0, 0] / denom >= 0.8: - return False - if self.recorded_defects / denom < 0.25 or self.recorded_defects / denom > 0.75: - return False - - if self.calculate_chi_squared(turn) > 3: - return False - return True - - def detect_streak(self, last_move): - """ - Return true if and only if the opponent's last twenty moves are defects. - """ - - if last_move == D: - self.defect_streak += 1 - else: - self.defect_streak = 0 - if self.defect_streak >= 20: - return True - return False - - def detect_parity_streak(self, last_move): - """ - Switch which `parity_streak` we're pointing to and incerement if the - opponent's last move was a Defection. Otherwise reset the flag. Then - return true if and only if the `parity_streak` is at least - `parity_limit`. - - This is similar to detect_streak with alternating streaks, except that - these streaks get incremented elsewhere as well. - """ - - self.parity_bit = 1 - self.parity_bit # Flip bit - if last_move == D: - self.parity_streak[self.parity_bit] += 1 - else: - self.parity_streak[self.parity_bit] = 0 - if self.parity_streak[self.parity_bit] >= self.parity_limit: - return True - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - - if turn == 1: - return C - - if self.mode == "Defect": - # There's a chance to exit Defect mode. - if opponent.history[-1] == D: - self.exit_defect_meter += 1 - else: - self.exit_defect_meter -= 3 - # If opponent has been mostly defecting. - if self.exit_defect_meter >= 11: - self.mode = "Normal" - self.was_defective = True - self.more_coop = 2 - return self.try_return(to_return=C, lower_flags=False) - - return self.try_return(D) - - # If not Defect mode, proceed to update history and check for random, - # check if burned, and check if opponent's fairweather. - - # If we haven't yet entered Defect mode - if not self.was_defective: - if turn > 2: - if opponent.history[-1] == D: - self.recorded_defects += 1 - - # Column decided by opponent's last turn - history_col = 1 if opponent.history[-1] == D else 0 - # Row is decided by opponent's move two turns ago and our move - # two turns ago. - history_row = 1 if opponent.history[-2] == D else 0 - if self.history[-2] == D: - history_row += 2 - self.move_history[history_row, history_col] += 1 - - # Try to detect random opponent - if turn % 15 == 0 and turn > 15: - if self.detect_random(turn): - self.mode = "Defect" - return self.try_return( - D, lower_flags=False - ) # Lower_flags not used here. - - # If generous 2 turns ago and opponent defected last turn - if self.last_generous_n_turns_ago == 2 and opponent.history[-1] == D: - self.burned = True - - # Only enter Fair-weather mode if the opponent Cooperated the first 37 - # turns then Defected on the 38th. - if turn == 38 and opponent.history[-1] == D and opponent.cooperations == 36: - self.mode = "Fair-weather" - return self.try_return(to_return=C, lower_flags=False) - - if self.mode == "Fair-weather": - if opponent.history[-1] == D: - self.mode = "Normal" # Post-Defect is not possible - # Proceed with Normal mode this turn. - else: - # Never defect against a fair-weather opponent - return self.try_return(C) - - # Continue with Normal mode - - # Check for streaks - if self.detect_streak(opponent.history[-1]): - return self.try_return(D, inc_parity=True) - if self.detect_parity_streak(opponent.history[-1]): - self.parity_streak[ - self.parity_bit - ] = 0 # Reset `parity_streak` when we hit the limit. - self.parity_hits += 1 # Keep track of how many times we hit the limit. - if self.parity_hits >= 8: # After 8 times, lower the limit. - self.parity_limit = 3 - return self.try_return( - C, inc_parity=True - ) # Inc parity won't get used here. - - # If we have Cooperations scheduled, then Cooperate here. - if self.more_coop >= 1: - return self.try_return(C, lower_flags=True, inc_parity=True) - - if turn < 37: - # Tit-for-Tat - return self.try_return(opponent.history[-1], inc_parity=True) - if turn == 37: - # Defect once on turn 37 (if no streaks) - self.more_coop, self.last_generous_n_turns_ago = 2, 1 - return self.try_return(D, lower_flags=False) - if self.burned or random.random() > self.prob: - # Tit-for-Tat with probability 1-`prob` - return self.try_return(opponent.history[-1], inc_parity=True) - - # Otherwise Defect, Cooperate, Cooperate, and increase `prob` - self.prob += 0.05 - self.more_coop, self.last_generous_n_turns_ago = 2, 1 - return self.try_return(D, lower_flags=False) - - -class SecondByTidemanAndChieruzzi(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman - and Paula Chieruzzi (K84R) and came in ninth in that tournament. - - This strategy Cooperates if this player's score exceeds the opponent's - score by at least `score_to_beat`. `score_to_beat` starts at zero and - increases by `score_to_beat_inc` every time the opponent's last two moves - are a Cooperation and Defection in that order. `score_to_beat_inc` itself - increase by 5 every time the opponent's last two moves are a Cooperation - and Defection in that order. - - Additionally, the strategy executes a "fresh start" if the following hold: - - - The strategy would Defect by score (difference less than `score_to_beat`) - - The opponent did not Cooperate and Defect (in order) in the last two - turns. - - It's been at least 10 turns since the last fresh start. Or since the - match started if there hasn't been a fresh start yet. - - A "fresh start" entails two Cooperations and resetting scores, - `scores_to_beat` and `scores_to_beat_inc`. - - Names: - - - TidemanAndChieruzzi: [Axelrod1980b]_ - """ - - name = "Second by Tideman and Chieruzzi" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.current_score = 0 - self.opponent_score = 0 - self.last_fresh_start = 0 - self.fresh_start = False - self.score_to_beat = 0 - self.score_to_beat_inc = 0 - - def _fresh_start(self): - """Give the opponent a fresh start by forgetting the past""" - self.current_score = 0 - self.opponent_score = 0 - self.score_to_beat = 0 - self.score_to_beat_inc = 0 - - def _score_last_round(self, opponent: IpdPlayer): - """Updates the scores for each player.""" - # Load the default game if not supplied by a tournament. - game = self.match_attributes["game"] - last_round = (self.history[-1], opponent.history[-1]) - scores = game.score(last_round) - self.current_score += scores[0] - self.opponent_score += scores[1] - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - - if current_round == 1: - return C - - # Calculate the scores. - self._score_last_round(opponent) - - # Check if we have recently given the strategy a fresh start. - if self.fresh_start: - self._fresh_start() - self.last_fresh_start = current_round - self.fresh_start = False - return C # Second cooperation - - opponent_CDd = False - - opponent_two_turns_ago = C # Default value for second turn. - if len(opponent.history) >= 2: - opponent_two_turns_ago = opponent.history[-2] - # If opponent's last two turns are C and D in that order. - if opponent_two_turns_ago == C and opponent.history[-1] == D: - opponent_CDd = True - self.score_to_beat += self.score_to_beat_inc - self.score_to_beat_inc += 5 - - # Cooperate if we're beating opponent by at least `score_to_beat` - if self.current_score - self.opponent_score >= self.score_to_beat: - return C - - # Wait at least ten turns for another fresh start. - if (not opponent_CDd) and current_round - self.last_fresh_start >= 10: - # 50-50 split is based off the binomial distribution. - N = opponent.cooperations + opponent.defections - # std_dev = sqrt(N*p*(1-p)) where p is 1 / 2. - std_deviation = (N ** (1 / 2)) / 2 - lower = N / 2 - 3 * std_deviation - upper = N / 2 + 3 * std_deviation - if opponent.defections <= lower or opponent.defections >= upper: - # Opponent deserves a fresh start - self.fresh_start = True - return C # First cooperation - - return D - - -class SecondByGetzler(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R) - and came in eleventh in that tournament. - - Strategy Defects with probability `flack`, where `flack` is calculated as - the sum over opponent Defections of 0.5 ^ (turns ago Defection happened). - - Names: - - - Getzler: [Axelrod1980b]_ - """ - - name = "Second by Getzler" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.flack = 0.0 # The relative untrustworthiness of opponent - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return C - - self.flack += 1 if opponent.history[-1] == D else 0 - self.flack *= 0.5 # Defections have half-life of one round - - return random_choice(1.0 - self.flack) - - -class SecondByLeyvraz(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Fransois Leyvraz - (K68R) and came in twelfth in that tournament. - - The strategy uses the opponent's last three moves to decide on an action - based on the following ordered rules. - - 1. If opponent Defected last two turns, then Defect with prob 75%. - 2. If opponent Defected three turns ago, then Cooperate. - 3. If opponent Defected two turns ago, then Defect. - 4. If opponent Defected last turn, then Defect with prob 50%. - 5. Otherwise (all Cooperations), then Cooperate. - - Names: - - - Leyvraz: [Axelrod1980b]_ - """ - - name = "Second by Leyvraz" - classifier = { - "memory_depth": 3, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.prob_coop = { - (C, C, C): 1.0, - (C, C, D): 0.5, # Rule 4 - (C, D, C): 0.0, # Rule 3 - (C, D, D): 0.25, # Rule 1 - (D, C, C): 1.0, # Rule 2 - (D, C, D): 1.0, # Rule 2 - (D, D, C): 1.0, # Rule 2 - (D, D, D): 0.25, # Rule 1 - } - - def strategy(self, opponent: IpdPlayer) -> Action: - recent_history = [C, C, C] # Default to C. - for go_back in range(1, 4): - if len(opponent.history) >= go_back: - recent_history[-go_back] = opponent.history[-go_back] - - return random_choice( - self.prob_coop[(recent_history[-3], recent_history[-2], recent_history[-1])] - ) - - -class SecondByWhite(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Edward C White (K72R) - and came in thirteenth in that tournament. - - * If the opponent Cooperated last turn or in the first ten turns, then - Cooperate. - * Otherwise Defect if and only if: - floor(log(turn)) * opponent Defections >= turn - - Names: - - - White: [Axelrod1980b]_ - """ - - name = "Second by White" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - - if turn <= 10 or opponent.history[-1] == C: - return C - - if np.floor(np.log(turn)) * opponent.defections >= turn: - return D - return C - - -class SecondByBlack(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Paul E Black (K83R) - and came in fifteenth in that tournament. - - The strategy Cooperates for the first five turns. Then it calculates the - number of opponent defects in the last five moves and Cooperates with - probability `prob_coop`[`number_defects`], where: - - prob_coop[number_defects] = 1 - (number_defects^ 2 - 1) / 25 - - Names: - - - Black: [Axelrod1980b]_ - """ - - name = "Second by Black" - classifier = { - "memory_depth": 5, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - # Maps number of opponent defects from last five moves to own - # Cooperation probability - self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04} - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) < 5: - return C - - recent_history = opponent.history[-5:] - - did_d = np.vectorize(lambda action: int(action == D)) - number_defects = np.sum(did_d(recent_history)) - - return random_choice(self.prob_coop[number_defects]) - - -class SecondByRichardHufford(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R) - and came in sixteenth in that tournament. - - The strategy tracks opponent "agreements", that is whenever the opponent's - previous move is the some as this player's move two turns ago. If the - opponent's first move is a Defection, this is counted as a disagreement, - and otherwise an agreement. From the agreement counts, two measures are - calculated: - - - `proportion_agree`: This is the number of agreements (through opponent's - last turn) + 2 divided by the current turn number. - - `last_four_num`: The number of agreements in the last four turns. If - there have been fewer than four previous turns, then this is number of - agreement + (4 - number of past turns). - - We then use these measures to decide how to play, using these rules: - - 1. If `proportion_agree` > 0.9 and `last_four_num` >= 4, then Cooperate. - 2. Otherwise if `proportion_agree` >= 0.625 and `last_four_num` >= 2, then - Tit-for-Tat. - 3. Otherwise, Defect. - - However, if the opponent has Cooperated the last `streak_needed` turns, - then the strategy deviates from the usual strategy, and instead Defects. - (We call such deviation an "aberration".) In the turn immediately after an - aberration, the strategy doesn't override, even if there's a streak of - Cooperations. Two turns after an aberration, the strategy: Restarts the - Cooperation streak (never looking before this turn); Cooperates; and - changes `streak_needed` to: - - floor(20.0 * `num_abb_def` / `num_abb_coop`) + 1 - - Here `num_abb_def` is 2 + the number of times that the opponent Defected in - the turn after an aberration, and `num_abb_coop` is 2 + the number of times - that the opponent Cooperated in response to an aberration. - - Names: - - - RichardHufford: [Axelrod1980b]_ - """ - - name = "Second by RichardHufford" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.num_agreements = 2 - self.last_four_agreements = [1] * 4 - self.last_four_index = 0 - - self.streak_needed = 21 - self.current_streak = 2 - self.last_aberration = float("inf") - self.coop_after_ab_count = 2 - self.def_after_ab_count = 2 - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - if turn == 1: - return C - - # Check if opponent agreed with us. - self.last_four_index = (self.last_four_index + 1) % 4 - me_two_moves_ago = C - if turn > 2: - me_two_moves_ago = self.history[-2] - if me_two_moves_ago == opponent.history[-1]: - self.num_agreements += 1 - self.last_four_agreements[self.last_four_index] = 1 - else: - self.last_four_agreements[self.last_four_index] = 0 - - # Check if last_aberration is infinite. - # i.e Not an aberration in last two turns. - if turn < self.last_aberration: - if opponent.history[-1] == C: - self.current_streak += 1 - else: - self.current_streak = 0 - if self.current_streak >= self.streak_needed: - self.last_aberration = turn - if self.current_streak == self.streak_needed: - return D - elif turn == self.last_aberration + 2: - self.last_aberration = float("inf") - if opponent.history[-1] == C: - self.coop_after_ab_count += 1 - else: - self.def_after_ab_count += 1 - self.streak_needed = ( - np.floor(20.0 * self.def_after_ab_count / self.coop_after_ab_count) + 1 - ) - self.current_streak = 0 - return C - - proportion_agree = self.num_agreements / turn - last_four_num = np.sum(self.last_four_agreements) - if proportion_agree > 0.9 and last_four_num >= 4: - return C - elif proportion_agree >= 0.625 and last_four_num >= 2: - return opponent.history[-1] - return D - - -class SecondByYamachi(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R) - and came in seventeenth in that tournament. - - The strategy keeps track of play history through a variable called - `count_them_us_them`, which is a dict indexed by (X, Y, Z), where X is an - opponent's move and Y and Z are the following moves by this player and the - opponent, respectively. Each turn, we look at our opponent's move two - turns ago, call X, and our move last turn, call Y. If (X, Y, C) has - occurred more often (or as often) as (X, Y, D), then Cooperate. Otherwise - Defect. [Note that this reflects likelihood of Cooperations or Defections - in opponent's previous move; we don't update `count_them_us_them` with - previous move until next turn.] - - Starting with the 41st turn, there's a possibility to override this - behavior. If `portion_defect` is between 45% and 55% (exclusive), then - Defect, where `portion_defect` equals number of opponent defects plus 0.5 - divided by the turn number (indexed by 1). When overriding this way, still - record `count_them_us_them` as though the strategy didn't override. - - Names: - - - Yamachi: [Axelrod1980b]_ - """ - - name = "Second by Yamachi" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.count_them_us_them = { - (C, C, C): 0, - (C, C, D): 0, - (C, D, C): 0, - (C, D, D): 0, - (D, C, C): 0, - (D, C, D): 0, - (D, D, C): 0, - (D, D, D): 0, - } - self.mod_history = list() # type: List[Action] - - def try_return(self, to_return, opp_def): - """ - Return `to_return`, unless the turn is greater than 40 AND - `portion_defect` is between 45% and 55%. - - In this case, still record the history as `to_return` so that the - modified behavior doesn't affect the calculation of `count_us_them_us`. - """ - turn = len(self.history) + 1 - - self.mod_history.append(to_return) - - # In later turns, check if the opponent is close to 50/50 - # If so, then override - if turn > 40: - portion_defect = (opp_def + 0.5) / turn - if 0.45 < portion_defect and portion_defect < 0.55: - return D - - return to_return - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - if turn == 1: - return self.try_return(C, 0) - - us_last = self.mod_history[-1] - them_two_ago, us_two_ago, them_three_ago = C, C, C - if turn >= 3: - them_two_ago = opponent.history[-2] - us_two_ago = self.mod_history[-2] - if turn >= 4: - them_three_ago = opponent.history[-3] - - # Update history - if turn >= 3: - self.count_them_us_them[(them_three_ago, us_two_ago, them_two_ago)] += 1 - - if ( - self.count_them_us_them[(them_two_ago, us_last, C)] - >= self.count_them_us_them[(them_two_ago, us_last, D)] - ): - return self.try_return(C, opponent.defections) - return self.try_return(D, opponent.defections) - - -class SecondByColbert(FSMPlayer): - """ - Strategy submitted to Axelrod's second tournament by William Colbert (K51R) - and came in eighteenth in that tournament. - - In the first eight turns, this strategy Coopearates on all but the sixth - turn, in which it Defects. After that, the strategy responds to an - opponent Cooperation with a single Cooperation, and responds to a Defection - with a chain of responses: Defect, Defect, Cooperate, Cooperate. During - this chain, the strategy ignores opponent's moves. - - Names: - - - Colbert: [Axelrod1980b]_ - """ - - name = "Second by Colbert" - classifier = { - "memory_depth": 4, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 1, C), - (0, D, 1, C), # First 8 turns are special - (1, C, 2, C), - (1, D, 2, C), - (2, C, 3, C), - (2, D, 3, C), - (3, C, 4, C), - (3, D, 4, C), - (4, C, 5, D), - (4, D, 5, D), # Defect on 6th turn. - (5, C, 6, C), - (5, D, 6, C), - (6, C, 7, C), - (6, D, 7, C), - (7, C, 7, C), - (7, D, 8, D), - (8, C, 9, D), - (8, D, 9, D), - (9, C, 10, C), - (9, D, 10, C), - (10, C, 7, C), - (10, D, 7, C), - ) - - super().__init__(transitions=transitions, initial_state=0, initial_action=C) - - -class SecondByMikkelson(FSMPlayer): - """ - Strategy submitted to Axelrod's second tournament by Ray Mikkelson (K66R) - and came in twentieth in that tournament. - - The strategy keeps track of a variable called `credit`, which determines if - the strategy will Cooperate, in the sense that if `credit` is positive, - then the strategy Cooperates. `credit` is initialized to 7. After the - first turn, `credit` increments if the opponent Cooperated last turn, and - decreases by two otherwise. `credit` is capped above by 8 and below by -7. - [`credit` is assessed as postive or negative, after increasing based on - opponent's last turn.] - - If `credit` is non-positive within the first ten turns, then the strategy - Defects and `credit` is set to 4. If `credit` is non-positive later, then - the strategy Defects if and only if (total # opponent Defections) / (turn#) - is at least 15%. [Turn # starts at 1.] - - Names: - - - Mikkelson: [Axelrod1980b]_ - """ - - name = "Second by Mikkelson" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.credit = 7 - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - if turn == 1: - return C - - if opponent.history[-1] == C: - self.credit += 1 - if self.credit > 8: - self.credit = 8 - else: - self.credit -= 2 - if self.credit < -7: - self.credit = -7 - - if turn == 2: - return C - if self.credit > 0: - return C - if turn <= 10: - self.credit = 4 - return D - if opponent.defections / turn >= 0.15: - return D - return C - - -class SecondByRowsam(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R) - and came in 21st in that tournament. - - The strategy starts in Normal mode, where it cooperates every turn. Every - six turns it checks the score per turn. [Rather the score of all previous - turns divided by the turn number, which will be one more than the number of - turns scored.] If this measure is less than 2.5 (the strategy is doing - badly) and it increases `distrust_points`. `distrust_points` is a variable - that starts at 0; if it ever exceeds 6 points, the strategy will enter - Defect mode and defect from then on. It will increase `distrust_points` - depending on the precise score per turn according to: - - - 5 points if score per turn is less than 1.0 - - 3 points if score per turn is less than 1.5, but at least 1.0 - - 2 points if score per turn is less than 2.0, but at least 1.5 - - 1 points if score per turn is less than 2.5, but at least 2.0 - - If `distrust_points` are increased, then the strategy defects on that turn, - then cooperates and defects on the next two turns. [Unless - `distrust_points` exceeds 6 points, then it will enter Defect mode - immediately.] - - Every 18 turns in Normal mode, the strategy will decrement `distrust_score` - if it's more than 3. This represents a wearing off effect of distrust. - - - Names: - - - Rowsam: [Axelrod1980b]_ - """ - - name = "Second by Rowsam" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set("game"), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.mode = "Normal" - self.distrust_points = 0 - self.current_score = 0 - self.opponent_score = 0 - - def _score_last_round(self, opponent: IpdPlayer): - """Updates the scores for each player.""" - game = self.match_attributes["game"] - last_round = (self.history[-1], opponent.history[-1]) - scores = game.score(last_round) - self.current_score += scores[0] - self.opponent_score += scores[1] - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - if turn > 1: - self._score_last_round(opponent) - - if self.mode == "Defect": - return D - - if self.mode == "Coop Def Cycle 1": - self.mode = "Coop Def Cycle 2" - return C - - if self.mode == "Coop Def Cycle 2": - self.mode = "Normal" - return D - - # Opportunity for distrust to cool off. - if turn % 18 == 0: - if self.distrust_points >= 3: - self.distrust_points -= 1 - - # In normal mode, only check for strategy updates every sixth turn. - if turn % 6 != 0: - return C - - points_per_turn = self.current_score / turn # Off by one - if points_per_turn < 1.0: - self.distrust_points += 5 - elif points_per_turn < 1.5: - self.distrust_points += 3 - elif points_per_turn < 2.0: - self.distrust_points += 2 - elif points_per_turn < 2.5: - self.distrust_points += 1 - else: - # Continue Cooperating - return C - - if self.distrust_points >= 7: - self.mode = "Defect" - else: - # Def this time, then coop, then def. - self.mode = "Coop Def Cycle 1" - return D - - -class SecondByAppold(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and - came in 22nd in that tournament. - - Cooperates for first four turns. - - After four turns, will cooperate immediately following the first time the - opponent cooperates (starting with the opponent's fourth move). Otherwise - will cooperate with probability equal to: - - - If this strategy defected two turns ago, the portion of the time - (historically) that the opponent followed a defection with a cooperation. - - If this strategy cooperated two turns ago, the portion of the time - (historically) that the opponent followed a cooperation with a cooperation. - The opponent's first move is counted as a response to a cooperation. - - - Names: - - - Appold: [Axelrod1980b]_ - """ - - name = "Second by Appold" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - - # Probability of a cooperation after an x is: - # opp_c_after_x / total_num_of_x. - self.opp_c_after_x = {C: 0, D: 1} - # This is the total counted, so it doesn't include the most recent. - self.total_num_of_x = {C: 0, D: 1} - - self.first_opp_def = False - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) + 1 - - us_two_turns_ago = C if turn <= 2 else self.history[-2] - - # Update trackers - if turn > 1: - self.total_num_of_x[us_two_turns_ago] += 1 - if turn > 1 and opponent.history[-1] == C: - self.opp_c_after_x[us_two_turns_ago] += 1 - - if turn <= 4: - return C - - if opponent.history[-1] == D and not self.first_opp_def: - self.first_opp_def = True - return C - - # Calculate the probability that the opponent cooperated last turn given - # what we know two turns ago. - prob_coop = self.opp_c_after_x[us_two_turns_ago] / self.total_num_of_x[ - us_two_turns_ago] - return random_choice(prob_coop) diff --git a/axelrod/ipd/strategies/backstabber.py b/axelrod/ipd/strategies/backstabber.py deleted file mode 100644 index 7020b7c1c..000000000 --- a/axelrod/ipd/strategies/backstabber.py +++ /dev/null @@ -1,106 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.strategy_transformers import FinalTransformer - -C, D = Action.C, Action.D - - -@FinalTransformer((D, D), name_prefix=None) # End with two defections -class BackStabber(IpdPlayer): - """ - Forgives the first 3 defections but on the fourth - will defect forever. Defects on the last 2 rounds unconditionally. - - Names: - - - Backstabber: Original name by Thomas Campbell - """ - - name = "BackStabber" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - return _backstabber_strategy(opponent) - - -@FinalTransformer((D, D), name_prefix=None) # End with two defections -class DoubleCrosser(IpdPlayer): - """ - Forgives the first 3 defections but on the fourth - will defect forever. Defects on the last 2 rounds unconditionally. - - If 8 <= current round <= 180, - if the opponent did not defect in the first 7 rounds, - the player will only defect after the opponent has defected twice in-a-row. - - Names: - - - Double Crosser: Original name by Thomas Campbell - """ - - name = "DoubleCrosser" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if _opponent_triggers_alt_strategy(opponent): - return _alt_strategy(opponent) - return _backstabber_strategy(opponent) - - -def _backstabber_strategy(opponent: IpdPlayer) -> Action: - """ - Cooperates until opponent defects a total of four times, then always - defects. - """ - if not opponent.history: - return C - if opponent.defections > 3: - return D - return C - - -def _alt_strategy(opponent: IpdPlayer) -> Action: - """ - If opponent's previous two plays were defect, then defects on next round. - Otherwise, cooperates. - """ - previous_two_plays = opponent.history[-2:] - if previous_two_plays == [D, D]: - return D - return C - - -def _opponent_triggers_alt_strategy(opponent: IpdPlayer) -> bool: - """ - If opponent did not defect in first 7 rounds and the current round is from 8 - to 180, return True. Else, return False. - """ - before_alt_strategy = first_n_rounds = 7 - last_round_of_alt_strategy = 180 - if _opponent_defected_in_first_n_rounds(opponent, first_n_rounds): - return False - current_round = len(opponent.history) + 1 - return before_alt_strategy < current_round <= last_round_of_alt_strategy - - -def _opponent_defected_in_first_n_rounds(opponent: IpdPlayer, first_n_rounds: int) -> bool: - """ - If opponent defected in the first N rounds, return True. Else return False. - """ - return D in opponent.history[:first_n_rounds] diff --git a/axelrod/ipd/strategies/better_and_better.py b/axelrod/ipd/strategies/better_and_better.py deleted file mode 100644 index e5ff9eb00..000000000 --- a/axelrod/ipd/strategies/better_and_better.py +++ /dev/null @@ -1,32 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class BetterAndBetter(IpdPlayer): - """ - Defects with probability of '(1000 - current turn) / 1000'. - Therefore it is less and less likely to defect as the round goes on. - - Names: - - Better and Better: [Prison1998]_ - - """ - - name = "Better and Better" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - probability = current_round / 1000 - return random_choice(probability) diff --git a/axelrod/ipd/strategies/bush_mosteller.py b/axelrod/ipd/strategies/bush_mosteller.py deleted file mode 100644 index 223dd33a5..000000000 --- a/axelrod/ipd/strategies/bush_mosteller.py +++ /dev/null @@ -1,132 +0,0 @@ -import random - -from axelrod import random_choice -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class BushMosteller(IpdPlayer): - """ - A player that is based on Bush Mosteller reinforced learning algorithm, it - decides what it will - play only depending on its own previous payoffs. - - The probability of playing C or D will be updated using a stimulus which - represents a win or a loss of value based on its previous play's payoff in - the specified probability. The more a play will be rewarded through rounds, - the more the player will be tempted to use it. - - Names: - - - Bush Mosteller: [Luis2008]_ - """ - - name = "Bush Mosteller" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - c_prob: float = 0.5, - d_prob: float = 0.5, - aspiration_level_divider: float = 3.0, - learning_rate: float = 0.5, - ) -> None: - """ - Parameters - - c_prob: float, 0.5 - Probability to play C , is modified during the match - d_prob: float, 0.5 - Probability to play D , is modified during the match - aspiration_level_divider: float, 3.0 - Value that regulates the aspiration level, - isn't modified during match - learning rate [0 , 1] - Percentage of learning speed - Variables / Constants - stimulus (Var: [-1 , 1]): float - Value that impacts the changes of action probability - _aspiration_level: float - Value that impacts the stimulus changes, isn't modified during match - _init_c_prob , _init_d_prob : float - Values used to properly set up reset(), - set to original probabilities - """ - super().__init__() - self._c_prob, self._d_prob = c_prob, d_prob - self._init_c_prob, self._init_d_prob = c_prob, d_prob - self._aspiration_level = abs( - (max(self.match_attributes["game"].RPST()) / aspiration_level_divider) - ) - - self._stimulus = 0.0 - self._learning_rate = learning_rate - - def stimulus_update(self, opponent: IpdPlayer): - """ - Updates the stimulus attribute based on the opponent's history. Used by - the strategy. - - Parameters - - opponent : axelrodPlayer - The current opponent - """ - game = self.match_attributes["game"] - - last_round = (self.history[-1], opponent.history[-1]) - - scores = game.score(last_round) - - previous_play = scores[0] - - self._stimulus = (previous_play - self._aspiration_level) / abs( - (max(self.match_attributes["game"].RPST()) - self._aspiration_level) - ) - # Lowest range for stimulus - # Highest doesn't need to be tested since it is divided by the highest - # reward possible - if self._stimulus < -1: - self._stimulus = -1 - - # Updates probability following previous choice C - if self.history[-1] == C: - - if self._stimulus >= 0: - self._c_prob += ( - self._learning_rate * self._stimulus * (1 - self._c_prob) - ) - - elif self._stimulus < 0: - self._c_prob += self._learning_rate * self._stimulus * self._c_prob - - # Updates probability following previous choice D - if self.history[-1] == D: - if self._stimulus >= 0: - self._d_prob += ( - self._learning_rate * self._stimulus * (1 - self._d_prob) - ) - - elif self._stimulus < 0: - self._d_prob += self._learning_rate * self._stimulus * self._d_prob - - def strategy(self, opponent: IpdPlayer) -> Action: - - # First turn - if len(self.history) == 0: - return random_choice(self._c_prob / (self._c_prob + self._d_prob)) - - # Updating stimulus depending on his own latest choice - self.stimulus_update(opponent) - - return random_choice(self._c_prob / (self._c_prob + self._d_prob)) diff --git a/axelrod/ipd/strategies/calculator.py b/axelrod/ipd/strategies/calculator.py deleted file mode 100644 index bd206fead..000000000 --- a/axelrod/ipd/strategies/calculator.py +++ /dev/null @@ -1,55 +0,0 @@ -from axelrod.ipd._strategy_utils import detect_cycle -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -from .axelrod_first import FirstByJoss as Joss - -C, D = Action.C, Action.D - - -class Calculator(IpdPlayer): - """ - Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is - detected, defect forever. Otherwise play TFT. - - - Names: - - - Calculator: [Prison1998]_ - """ - - name = "Calculator" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.joss_instance = Joss() - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn > 0: - self.joss_instance.history.append(self.history[-1], - opponent.history[-1]) - if turn == 20: - self.cycle = detect_cycle(opponent.history) - return self.extended_strategy(opponent) - if turn > 20: - return self.extended_strategy(opponent) - else: - play = self.joss_instance.strategy(opponent) - return play - - def extended_strategy(self, opponent: IpdPlayer) -> Action: - if self.cycle: - return D - else: - # TFT - return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/ipd/strategies/cooperator.py b/axelrod/ipd/strategies/cooperator.py deleted file mode 100644 index 3b4c0c4a6..000000000 --- a/axelrod/ipd/strategies/cooperator.py +++ /dev/null @@ -1,77 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Cooperator(IpdPlayer): - """A player who only ever cooperates. - - Names: - - - Cooperator: [Axelrod1984]_ - - ALLC: [Press2012]_ - - Always cooperate: [Mittal2009]_ - """ - - name = "Cooperator" - classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return C - - -class TrickyCooperator(IpdPlayer): - """ - A cooperator that is trying to be tricky. - - Names: - - - Tricky Cooperator: Original name by Karol Langner - """ - - name = "Tricky Cooperator" - classifier = { - "memory_depth": 10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - _min_history_required_to_try_trickiness = 3 - _max_history_depth_for_trickiness = -10 - - def strategy(self, opponent: IpdPlayer) -> Action: - """Almost always cooperates, but will try to trick the opponent by - defecting. - - Defect once in a while in order to get a better payout. - After 3 rounds, if opponent has not defected to a max history depth of - 10, defect. - """ - if self._has_played_enough_rounds_to_be_tricky() and self._opponents_has_cooperated_enough_to_be_tricky( - opponent - ): - return D - return C - - def _has_played_enough_rounds_to_be_tricky(self): - return len(self.history) >= self._min_history_required_to_try_trickiness - - def _opponents_has_cooperated_enough_to_be_tricky(self, opponent): - rounds_to_be_checked = opponent.history[ - self._max_history_depth_for_trickiness : - ] - return D not in rounds_to_be_checked diff --git a/axelrod/ipd/strategies/cycler.py b/axelrod/ipd/strategies/cycler.py deleted file mode 100644 index 5aec043ac..000000000 --- a/axelrod/ipd/strategies/cycler.py +++ /dev/null @@ -1,270 +0,0 @@ -import copy -import itertools -import random -from typing import List, Tuple - -from axelrod.ipd.action import Action, actions_to_str, str_to_actions -from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D -actions = (C, D) - - -class AntiCycler(IpdPlayer): - """ - A player that follows a sequence of plays that contains no cycles: - CDD CD CCD CCCD CCCCD ... - - Names: - - - Anti Cycler: Original name by Marc Harper - """ - - name = "AntiCycler" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.cycle_length = 1 - self.cycle_counter = 0 - self.first_three = self._get_first_three() - - @staticmethod - def _get_first_three() -> List[Action]: - return [C, D, D] - - def strategy(self, opponent: IpdPlayer) -> Action: - while self.first_three: - return self.first_three.pop(0) - if self.cycle_counter < self.cycle_length: - self.cycle_counter += 1 - return C - else: - self.cycle_length += 1 - self.cycle_counter = 0 - return D - - -class Cycler(IpdPlayer): - """ - A player that repeats a given sequence indefinitely. - - Names: - - - Cycler: Original name by Marc Harper - """ - - name = "Cycler" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, cycle: str = "CCD") -> None: - """This strategy will repeat the parameter `cycle` endlessly, - e.g. C C D C C D C C D ... - - Special Cases - ------------- - Cooperator is equivalent to Cycler("C") - Defector is equivalent to Cycler("D") - Alternator is equivalent to Cycler("CD") - - """ - super().__init__() - self.cycle = cycle - self.set_cycle(cycle=cycle) - - def strategy(self, opponent: IpdPlayer) -> Action: - return next(self.cycle_iter) - - def set_cycle(self, cycle: str): - """Set or change the cycle.""" - self.cycle = cycle - self.cycle_iter = itertools.cycle(str_to_actions(self.cycle)) - self.classifier["memory_depth"] = len(cycle) - 1 - - -class EvolvableCycler(Cycler, EvolvablePlayer): - """Evolvable version of Cycler.""" - - name = "EvolvableCycler" - - def __init__( - self, - cycle: str = None, - cycle_length: int = None, - mutation_probability: float = 0.2, - mutation_potency: int = 1 - ) -> None: - cycle, cycle_length = self._normalize_parameters(cycle, cycle_length) - # The following __init__ sets self.cycle = cycle - Cycler.__init__(self, cycle=cycle) - EvolvablePlayer.__init__(self) - # Overwrite init_kwargs in the case that we generated a new cycle from cycle_length - self.overwrite_init_kwargs( - cycle=cycle, - cycle_length=cycle_length) - self.mutation_probability = mutation_probability - self.mutation_potency = mutation_potency - - @classmethod - def _normalize_parameters(cls, cycle=None, cycle_length=None) -> Tuple[str, int]: - """Compute other parameters from those that may be missing, to ensure proper cloning.""" - if not cycle: - if not cycle_length: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableCycler") - cycle = cls._generate_random_cycle(cycle_length) - cycle_length = len(cycle) - return cycle, cycle_length - - @classmethod - def _generate_random_cycle(cls, cycle_length: int) -> str: - """ - Generate a sequence of random moves - """ - return actions_to_str(random.choice(actions) for _ in range(cycle_length)) - - def mutate(self) -> EvolvablePlayer: - """ - Basic mutation which may change any random actions in the sequence. - """ - if random.random() <= self.mutation_probability: - mutated_sequence = list(str_to_actions(self.cycle)) - for _ in range(self.mutation_potency): - index_to_change = random.randint(0, len(mutated_sequence) - 1) - mutated_sequence[index_to_change] = mutated_sequence[index_to_change].flip() - cycle = actions_to_str(mutated_sequence) - else: - cycle = self.cycle - cycle, _ = self._normalize_parameters(cycle) - return self.create_new(cycle=cycle) - - def crossover(self, other) -> EvolvablePlayer: - """ - Creates and returns a new IpdPlayer instance with a single crossover point. - """ - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - cycle_list = crossover_lists(self.cycle, other.cycle) - cycle = "".join(cycle_list) - cycle, _ = self._normalize_parameters(cycle) - return self.create_new(cycle=cycle) - - -class CyclerDC(Cycler): - """ - Cycles D, C - - Names: - - - Cycler DC: Original name by Marc Harper - """ - - name = "Cycler DC" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 1 - - def __init__(self) -> None: - super().__init__(cycle="DC") - - -class CyclerCCD(Cycler): - """ - Cycles C, C, D - - Names: - - - Cycler CCD: Original name by Marc Harper - - Periodic player CCD: [Mittal2009]_ - """ - - name = "Cycler CCD" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 2 - - def __init__(self) -> None: - super().__init__(cycle="CCD") - - -class CyclerDDC(Cycler): - """ - Cycles D, D, C - - Names: - - - Cycler DDC: Original name by Marc Harper - - Periodic player DDC: [Mittal2009]_ - """ - - name = "Cycler DDC" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 2 - - def __init__(self) -> None: - super().__init__(cycle="DDC") - - -class CyclerCCCD(Cycler): - """ - Cycles C, C, C, D - - Names: - - - Cycler CCCD: Original name by Marc Harper - """ - - name = "Cycler CCCD" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 3 - - def __init__(self) -> None: - super().__init__(cycle="CCCD") - - -class CyclerCCCCCD(Cycler): - """ - Cycles C, C, C, C, C, D - - Names: - - - Cycler CCCD: Original name by Marc Harper - """ - - name = "Cycler CCCCCD" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 5 - - def __init__(self) -> None: - super().__init__(cycle="CCCCCD") - - -class CyclerCCCDCD(Cycler): - """ - Cycles C, C, C, D, C, D - - Names: - - - Cycler CCCDCD: Original name by Marc Harper - """ - - name = "Cycler CCCDCD" - classifier = copy.copy(Cycler.classifier) - classifier["memory_depth"] = 5 - - def __init__(self) -> None: - super().__init__(cycle="CCCDCD") diff --git a/axelrod/ipd/strategies/darwin.py b/axelrod/ipd/strategies/darwin.py deleted file mode 100644 index 59da40f05..000000000 --- a/axelrod/ipd/strategies/darwin.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -The player class in this module does not obey standard rules of the IPD (as -indicated by their classifier). We do not recommend putting a lot of time in to -optimising it. -""" -from collections import defaultdict -from typing import Optional - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Darwin(IpdPlayer): - """ - A strategy which accumulates a record (the 'genome') of what the most - favourable response in the previous round should have been, and naively - assumes that this will remain the correct response at the same round of - future trials. - - This 'genome' is preserved between opponents, rounds and repetitions of - the tournament. It becomes a characteristic of the type and so a single - version of this is shared by all instances for each loading of the class. - - As this results in information being preserved between tournaments, this - is classified as a cheating strategy! - - If no record yet exists, the opponent's response from the previous round - is returned. - - Names: - - - Darwin: Original name by Paul Slavin - """ - - name = "Darwin" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "inspects_source": True, # Checks to see if opponent is using simulated matches. - "long_run_time": False, - "makes_use_of": set(), - "manipulates_source": False, - "manipulates_state": True, # Does not reset properly. - } - - genome = [C] - valid_callers = ["play"] # What functions may invoke our strategy. - - def __init__(self) -> None: - self.outcomes = None # type: Optional[dict] - self.response = Darwin.genome[0] - super().__init__() - - def receive_match_attributes(self): - self.outcomes = self.match_attributes["game"].scores - - @staticmethod - def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" - return C - - def strategy(self, opponent: IpdPlayer) -> Action: - trial = len(self.history) - - if trial > 0: - assert self.outcomes is not None - outcome = self.outcomes[(self.history[-1], opponent.history[-1])] - self.mutate(outcome, trial) - # Update genome with selected response - Darwin.genome[trial - 1] = self.response - - if trial < len(Darwin.genome): - # Return response from genome where available... - current = Darwin.genome[trial] - else: - # ...otherwise use Tit-for-Tat - Darwin.genome.append(opponent.history[-1]) - current = opponent.history[-1] - - return current - - def reset(self): - """ Reset instance properties. """ - super().reset() - Darwin.genome[0] = C # Ensure initial Cooperate - - def mutate(self, outcome: tuple, trial: int) -> None: - """ Select response according to outcome. """ - if outcome[0] < 3 and (len(Darwin.genome) >= trial): - self.response = D if Darwin.genome[trial - 1] == C else C - - @staticmethod - def reset_genome() -> None: - """For use in testing methods.""" - Darwin.genome = [C] diff --git a/axelrod/ipd/strategies/dbs.py b/axelrod/ipd/strategies/dbs.py deleted file mode 100644 index 07c9b4279..000000000 --- a/axelrod/ipd/strategies/dbs.py +++ /dev/null @@ -1,441 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class DBS(IpdPlayer): - """ - A strategy that learns the opponent's strategy and uses symbolic noise - detection for detecting whether anomalies in player’s behavior are - deliberate or accidental. From the learned opponent's strategy, a tree - search is used to choose the best move. - - Default values for the parameters are the suggested values in the article. - When noise increases you can try to diminish violation_threshold and - rejection_threshold. - - Names - - - Desired Belief Strategy: [Au2006]_ - """ - - # These are various properties for the strategy - name = "DBS" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": True, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - discount_factor=0.75, - promotion_threshold=3, - violation_threshold=4, - reject_threshold=3, - tree_depth=5, - ): - """ - Parameters - - discount_factor: float, optional - Used when computing discounted frequencies to learn opponent's - strategy. Must be between 0 and 1. The default is 0.75. - promotion_threshold: int, optional - Number of successive observations needed to promote an opponent - behavior as a deterministic rule. The default is 3. - violation_threshold: int, optional - Number of observations needed to considerate opponent's strategy has - changed. You can lower it when noise increases. The default is 4, - which is good for a noise level of .1. - reject_threshold: int, optional - Number of observations before forgetting opponent's previous - strategy. You can lower it when noise increases. The default is 3, - which is good for a noise level of .1. - tree_depth: int, optional - Depth of the tree for the tree-search algorithm. Higher depth means - more time to compute the move. The default is 5. - """ - super().__init__() - - # The opponent's behavior is represented by a 3 dicts: Rd, Rc, and Rp. - # Its behavior is modeled by a set of rules. A rule is the move that - # the opponent will play (C or D or a probability to play C) after a - # given outcome (for instance after (C, D)). - # A rule can be deterministic or probabilistic: - # - Rc is the set of deterministic rules - # - Rp is the set of probabilistic rules - # - Rd is the default rule set which is used for initialization but also - # keeps track of previous policies when change in the opponent behavior - # happens, in order to have a smooth transition. - # - Pi is a set of rules that aggregates all above sets of rules in - # order to fully model the opponent's behavior. - - # Default rule set is Rd. - # Default opponent's policy is TitForTat. - self.Rd = create_policy(1, 1, 0, 0) - # Set of current deterministic rules Rc - self.Rc = {} - # Aggregated rule set Pi - self.Pi = self.Rd - # For each rule in Rd we need to count the number of successive - # violations. Those counts are saved in violation_counts. - self.violation_counts = {} - self.reject_threshold = reject_threshold - self.violation_threshold = violation_threshold - self.promotion_threshold = promotion_threshold - self.tree_depth = tree_depth - # v is a violation count used to know when to clean the default rule - # set Rd - self.v = 0 - # A discount factor for computing the probabilistic rules - self.alpha = discount_factor - - # The probabilistic rule set Rp is not saved as an attribute, but each - # rule is computed only when needed. The rules are computed as - # discounted frequencies of opponent's past moves. To compute the - # discounted frequencies, we need to keep up to date an history of what - # has been played following each outcome (or condition): - # We save it as a dict history_by_cond; keys are conditions - # (ex (C, C)) and values are a tuple of 2 lists (G, F) - # for a condition j and an iteration i in the match: - # G[i] = 1 if cond j was True at turn i-1 and C has been played - # by the opponent; else G[i] = 0 - # F[i] = 1 if cond j was True at turn i-1; else F[i] = 0 - # This representation makes the computing of discounted frequencies - # easy and efficient. - # The initial hypothesized policy is TitForTat. - self.history_by_cond = { - (C, C): ([1], [1]), - (C, D): ([1], [1]), - (D, C): ([0], [1]), - (D, D): ([0], [1]), - } - - def should_promote(self, r_plus, promotion_threshold=3): - """ - This function determines if the move r_plus is a deterministic - behavior of the opponent, and then returns True, or if r_plus - is due to a random behavior (or noise) which would require a - probabilistic rule, in which case it returns False. - - To do so it looks into the game history: if the k last times - when the opponent was in the same situation than in r_plus it - played the same thing then then r_plus is considered as a - deterministic rule (where K is the user-defined promotion_threshold). - - Parameters - - r_plus: tuple of (tuple of actions.Action, actions.Action) - example: ((C, C), D) - r_plus represents one outcome of the history, and the - following move played by the opponent. - promotion_threshold: int, optional - Number of successive observations needed to promote an - opponent behavior as a deterministic rule. Default is 3. - """ - if r_plus[1] == C: - opposite_action = 0 - elif r_plus[1] == D: - opposite_action = 1 - k = 1 - count = 0 - # We iterate on the history, while we do not encounter - # counter-examples of r_plus, i.e. while we do not encounter - # r_minus - while k < len(self.history_by_cond[r_plus[0]][0]) and not ( - self.history_by_cond[r_plus[0]][0][1:][-k] == opposite_action - and self.history_by_cond[r_plus[0]][1][1:][-k] == 1 - ): - # We count every occurrence of r_plus in history - if self.history_by_cond[r_plus[0]][1][1:][-k] == 1: - count += 1 - k += 1 - if count >= promotion_threshold: - return True - return False - - def should_demote(self, r_minus, violation_threshold=4): - """ - Checks if the number of successive violations of a deterministic - rule (in the opponent's behavior) exceeds the user-defined - violation_threshold. - """ - return self.violation_counts[r_minus[0]] >= violation_threshold - - def update_history_by_cond(self, opponent_history): - """ - Updates self.history_by_cond between each turns of the game. - """ - two_moves_ago = (self.history[-2], opponent_history[-2]) - for outcome, GF in self.history_by_cond.items(): - G, F = GF - if outcome == two_moves_ago: - if opponent_history[-1] == C: - G.append(1) - else: - G.append(0) - F.append(1) - else: - G.append(0) - F.append(0) - - def compute_prob_rule(self, outcome, alpha=1): - """ - Uses the game history to compute the probability of the opponent - playing C, in the outcome situation (example: outcome = (C, C)). - When alpha = 1, the results is approximately equal to the frequency of - the occurrence of outcome C. alpha is a discount factor that gives more - weight to recent events than earlier ones. - - Parameters - - outcome: tuple of two actions.Action - alpha: int, optional. Discount factor. Default is 1. - """ - G = self.history_by_cond[outcome][0] - F = self.history_by_cond[outcome][1] - discounted_g = 0 - discounted_f = 0 - alpha_k = 1 - for g, f in zip(G[::-1], F[::-1]): - discounted_g += alpha_k * g - discounted_f += alpha_k * f - alpha_k = alpha * alpha_k - p_cond = discounted_g / discounted_f - return p_cond - - def strategy(self, opponent: IpdPlayer) -> Action: - # First move - if not self.history: - return C - if len(opponent.history) >= 2: - # We begin by update history_by_cond (i.e. update Rp) - self.update_history_by_cond(opponent.history) - two_moves_ago = (self.history[-2], opponent.history[-2]) - # r_plus is the information of what the opponent just played, - # following the previous outcome two_moves_ago. - r_plus = (two_moves_ago, opponent.history[-1]) - # r_minus is the opposite move, following the same outcome. - r_minus = (two_moves_ago, ({C, D} - {opponent.history[-1]}).pop()) - - # If r_plus and r_minus are not in the current set of deterministic - # rules, we check if r_plus should be added to it (following the - # rule defined in the should_promote function). - if r_plus[0] not in self.Rc.keys(): - if self.should_promote(r_plus, self.promotion_threshold): - self.Rc[r_plus[0]] = action_to_int(r_plus[1]) - self.violation_counts[r_plus[0]] = 0 - self.violation_counts[r_plus[0]] = 0 - - # If r+ or r- in Rc - if r_plus[0] in self.Rc.keys(): - to_check = C if self.Rc[r_plus[0]] == 1 else D - # (if r+ in Rc) - if r_plus[1] == to_check: - # Set the violation count of r+ to 0. - self.violation_counts[r_plus[0]] = 0 - # if r- in Rc - elif r_minus[1] == to_check: - # Increment violation count of r-. - self.violation_counts[r_plus[0]] += 1 - # As we observe that the behavior of the opponent is - # opposed to a rule modeled in Rc, we check if the number - # of consecutive violations of this rule is superior to - # a threshold. If it is, we clean Rc, but we keep the rules - # of Rc in Rd for smooth transition. - if self.should_demote(r_minus, self.violation_threshold): - self.Rd.update(self.Rc) - self.Rc.clear() - self.violation_counts.clear() - self.v = 0 - # r+ in Rc. - r_plus_in_Rc = r_plus[0] in self.Rc.keys() and self.Rc[ - r_plus[0] - ] == action_to_int(r_plus[1]) - # r- in Rd - r_minus_in_Rd = r_minus[0] in self.Rd.keys() and self.Rd[ - r_minus[0] - ] == action_to_int(r_minus[1]) - - # Increment number of violations of Rd rules. - if r_minus_in_Rd: - self.v += 1 - # If the number of violations is superior to a threshold, clean Rd. - if (self.v > self.reject_threshold) or (r_plus_in_Rc and r_minus_in_Rd): - self.Rd.clear() - self.v = 0 - - # Compute Rp for conditions that are neither in Rc or Rd. - Rp = {} - all_cond = [(C, C), (C, D), (D, C), (D, D)] - for outcome in all_cond: - if (outcome not in self.Rc.keys()) and (outcome not in self.Rd.keys()): - # Compute opponent's C answer probability. - Rp[outcome] = self.compute_prob_rule(outcome, self.alpha) - - # We aggregate the rules of Rc, Rd, and Rp in a set of rule Pi. - self.Pi = {} - # The algorithm makes sure that a rule cannot be in two different - # sets of rules so we do not need to check for duplicates. - self.Pi.update(self.Rc) - self.Pi.update(self.Rd) - self.Pi.update(Rp) - - # React to the opponent's last move - return move_gen( - (self.history[-1], opponent.history[-1]), - self.Pi, - depth_search_tree=self.tree_depth, - ) - - -class Node(object): - """ - Nodes used to build a tree for the tree-search procedure. The tree has - Deterministic and Stochastic nodes, as the opponent's strategy is learned - as a probability distribution. - """ - - # abstract method - def get_siblings(self): - raise NotImplementedError("subclasses must override get_siblings()!") - - # abstract method - def is_stochastic(self): - raise NotImplementedError("subclasses must override is_stochastic()!") - - -class StochasticNode(Node): - """ - Node that have a probability pC to get to each sibling. A StochasticNode can - be written (C, X) or (D, X), with X = C with a probability pC, else X = D. - """ - - def __init__(self, own_action, pC, depth): - self.pC = pC - self.depth = depth - self.own_action = own_action - - def get_siblings(self): - """ - Returns the siblings node of the current StochasticNode. There are two - siblings which are DeterministicNodes, their depth is equal to current - node depth's + 1. - """ - opponent_c_choice = DeterministicNode(self.own_action, C, self.depth + 1) - opponent_d_choice = DeterministicNode(self.own_action, D, self.depth + 1) - return opponent_c_choice, opponent_d_choice - - def is_stochastic(self): - """Returns True if self is a StochasticNode.""" - return True - - -class DeterministicNode(Node): - """ - Nodes (C, C), (C, D), (D, C), or (D, D) with deterministic choice - for siblings. - """ - - def __init__(self, action1, action2, depth): - self.action1 = action1 - self.action2 = action2 - self.depth = depth - - def get_siblings(self, policy): - """ - Returns the siblings node of the current DeterministicNode. Builds 2 - siblings (C, X) and (D, X) that are StochasticNodes. Those siblings are - of the same depth as the current node. Their probabilities pC are - defined by the policy argument. - """ - c_choice = StochasticNode(C, policy[(self.action1, self.action2)], self.depth) - d_choice = StochasticNode(D, policy[(self.action1, self.action2)], self.depth) - return c_choice, d_choice - - def is_stochastic(self): - """Returns True if self is a StochasticNode.""" - return False - - def get_value(self): - values = {(C, C): 3, (C, D): 0, (D, C): 5, (D, D): 1} - return values[(self.action1, self.action2)] - - -def create_policy(pCC, pCD, pDC, pDD): - """ - Creates a dict that represents a Policy. As defined in the reference, a - Policy is a set of (prev_move, p) where p is the probability to cooperate - after prev_move, where prev_move can be (C, C), (C, D), (D, C) or (D, D). - - Parameters - - pCC, pCD, pDC, pDD : float - Must be between 0 and 1. - """ - return {(C, C): pCC, (C, D): pCD, (D, C): pDC, (D, D): pDD} - - -def action_to_int(action): - if action == C: - return 1 - return 0 - - -def minimax_tree_search(begin_node, policy, max_depth): - """ - Tree search function (minimax search procedure) for the tree (built by - recursion) corresponding to the opponent's policy, and solves it. - Returns a tuple of two floats that are the utility of playing C, and the - utility of playing D. - """ - if begin_node.is_stochastic(): - # A stochastic node cannot have the same depth than its parent node - # hence there is no need to check that its depth is < max_depth. - siblings = begin_node.get_siblings() - # The stochastic node value is the expected value of siblings. - node_value = begin_node.pC * minimax_tree_search( - siblings[0], policy, max_depth - ) + (1 - begin_node.pC) * minimax_tree_search(siblings[1], policy, max_depth) - return node_value - else: # Deterministic node - if begin_node.depth == max_depth: - # This is an end node, we just return its outcome value. - return begin_node.get_value() - elif begin_node.depth == 0: - siblings = begin_node.get_siblings(policy) - # This returns the two max expected values, for choice C or D, - # as a tuple. - return ( - minimax_tree_search(siblings[0], policy, max_depth) - + begin_node.get_value(), - minimax_tree_search(siblings[1], policy, max_depth) - + begin_node.get_value(), - ) - elif begin_node.depth < max_depth: - siblings = begin_node.get_siblings(policy) - # The deterministic node value is the max of both siblings values - # + the score of the outcome of the node. - a = minimax_tree_search(siblings[0], policy, max_depth) - b = minimax_tree_search(siblings[1], policy, max_depth) - node_value = max(a, b) + begin_node.get_value() - return node_value - - -def move_gen(outcome, policy, depth_search_tree=5): - """ - Returns the best move considering opponent's policy and last move, - using tree-search procedure. - """ - current_node = DeterministicNode(outcome[0], outcome[1], depth=0) - values_of_choices = minimax_tree_search(current_node, policy, depth_search_tree) - # Returns the Action which correspond to the best choice in terms of - # expected value. In case value(C) == value(D), returns C. - actions_tuple = (C, D) - return actions_tuple[values_of_choices.index(max(values_of_choices))] diff --git a/axelrod/ipd/strategies/defector.py b/axelrod/ipd/strategies/defector.py deleted file mode 100644 index 4caafcc34..000000000 --- a/axelrod/ipd/strategies/defector.py +++ /dev/null @@ -1,61 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Defector(IpdPlayer): - """A player who only ever defects. - - Names: - - - Defector: [Axelrod1984]_ - - ALLD: [Press2012]_ - - Always defect: [Mittal2009]_ - """ - - name = "Defector" - classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return D - - -class TrickyDefector(IpdPlayer): - """A defector that is trying to be tricky. - - Names: - - - Tricky Defector: Original name by Karol Langner - """ - - name = "Tricky Defector" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """Almost always defects, but will try to trick the opponent into - cooperating. - - Defect if opponent has cooperated at least once in the past and has - defected for the last 3 turns in a row. - """ - if opponent.history.cooperations > 0 and opponent.history[-3:] == [D] * 3: - return C - return D diff --git a/axelrod/ipd/strategies/doubler.py b/axelrod/ipd/strategies/doubler.py deleted file mode 100644 index 4614483b2..000000000 --- a/axelrod/ipd/strategies/doubler.py +++ /dev/null @@ -1,36 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Doubler(IpdPlayer): - """ - Cooperates except when the opponent has defected and - the opponent's cooperation count is less than twice their defection count. - - Names: - - - Doubler: [Prison1998]_ - """ - - name = "Doubler" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not self.history: - return C - if ( - opponent.history[-1] == D - and opponent.cooperations <= opponent.defections * 2 - ): - return D - return C diff --git a/axelrod/ipd/strategies/finite_state_machines.py b/axelrod/ipd/strategies/finite_state_machines.py deleted file mode 100644 index b27b31a6f..000000000 --- a/axelrod/ipd/strategies/finite_state_machines.py +++ /dev/null @@ -1,1002 +0,0 @@ -import itertools -from random import randrange -from typing import Any, List, Sequence, Tuple, Union -import numpy.random as random -from numpy.random import choice -from axelrod.ipd.action import Action -from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D -actions = (C, D) -Transition = Tuple[int, Action, int, Action] - - -class SimpleFSM(object): - """Simple implementation of a finite state machine that transitions - between states based on the last round of play. - - https://en.wikipedia.org/wiki/Finite-state_machine - """ - - def __init__(self, transitions: tuple, initial_state: int) -> None: - """ - transitions is a list of the form - ((state, last_opponent_action, next_state, next_action), ...) - - TitForTat would be represented with the following table: - ((1, C, 1, C), (1, D, 1, D)) - with initial play C and initial state 1. - - """ - self._state = initial_state - self._state_transitions = { - (current_state, input_action): (next_state, output_action) - for current_state, input_action, next_state, output_action in transitions - } # type: dict - - self._raise_error_for_bad_input() - - def _raise_error_for_bad_input(self): - callable_states = set( - pair[0] for pair in self._state_transitions.values() - ) - callable_states.add(self._state) - for state in callable_states: - self._raise_error_for_bad_state(state) - - def _raise_error_for_bad_state(self, state: int): - if (state, C) not in self._state_transitions or ( - state, - D, - ) not in self._state_transitions: - raise ValueError( - "state: {} does not have values for both C and D".format(state) - ) - - @property - def state(self) -> int: - return self._state - - @state.setter - def state(self, new_state: int): - self._raise_error_for_bad_state(new_state) - self._state = new_state - - @property - def state_transitions(self) -> dict: - return self._state_transitions.copy() - - def transitions(self) -> list: - return [[x[0], x[1], y[0], y[1]] for x, y in self._state_transitions.items()] - - def move(self, opponent_action: Action) -> Action: - """Computes the response move and changes state.""" - next_state, next_action = self._state_transitions[ - (self._state, opponent_action) - ] - self._state = next_state - return next_action - - def __eq__(self, other) -> bool: - """Equality of two FSMs""" - if not isinstance(other, SimpleFSM): - return False - return (self._state, self._state_transitions) == ( - other.state, - other.state_transitions, - ) - - def num_states(self): - """Return the number of states of the machine.""" - return len(set(state for state, action in self._state_transitions)) - - -class FSMPlayer(IpdPlayer): - """Abstract base class for finite state machine players.""" - - name = "FSM IpdPlayer" - - classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - transitions: Tuple[Transition, ...] = ((1, C, 1, C), (1, D, 1, D)), - initial_state: int = 1, - initial_action: Action = C - ) -> None: - super().__init__() - self.initial_state = initial_state - self.initial_action = initial_action - self.fsm = SimpleFSM(transitions, initial_state) - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return self.initial_action - else: - return self.fsm.move(opponent.history[-1]) - - -class EvolvableFSMPlayer(FSMPlayer, EvolvablePlayer): - """Abstract base class for evolvable finite state machine players.""" - - name = "EvolvableFSMPlayer" - - classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - transitions: tuple = None, - initial_state: int = None, - initial_action: Action = None, - num_states: int = None, - mutation_probability: float = 0.1, - ) -> None: - """If transitions, initial_state, and initial_action are None - then generate random parameters using num_states.""" - transitions, initial_state, initial_action, num_states = self._normalize_parameters( - transitions, initial_state, initial_action, num_states) - FSMPlayer.__init__( - self, - transitions=transitions, - initial_state=initial_state, - initial_action=initial_action) - EvolvablePlayer.__init__(self) - self.mutation_probability = mutation_probability - self.overwrite_init_kwargs( - transitions=transitions, - initial_state=initial_state, - initial_action=initial_action, - num_states=self.num_states) - - @classmethod - def normalize_transitions(cls, transitions: Sequence[Sequence]) -> Tuple[Tuple[Any, ...], ...]: - """Translate a list of lists to a tuple of tuples.""" - normalized = [] - for t in transitions: - normalized.append(tuple(t)) - return tuple(normalized) - - @classmethod - def _normalize_parameters(cls, transitions: Tuple = None, initial_state: int = None, initial_action: Action = None, - num_states: int = None) -> Tuple[Tuple, int, Action, int]: - if not ((transitions is not None) and (initial_state is not None) and (initial_action is not None)): - if not num_states: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableFSMPlayer") - transitions, initial_state, initial_action = cls.random_params(num_states) - transitions = cls.normalize_transitions(transitions) - num_states = len(transitions) // 2 - return transitions, initial_state, initial_action, num_states - - @property - def num_states(self) -> int: - return self.fsm.num_states() - - @classmethod - def random_params(cls, num_states: int) -> Tuple[Tuple[Transition, ...], int, Action]: - rows = [] - for j in range(num_states): - for action in actions: - next_state = randrange(num_states) - next_action = choice(actions) - row = (j, action, next_state, next_action) - rows.append(row) - initial_state = randrange(num_states) - initial_action = choice(actions) - return tuple(rows), initial_state, initial_action - - @staticmethod - def mutate_rows(rows, mutation_probability): - rows = list(rows) - randoms = random.random(len(rows)) - # Flip each value with a probability proportional to the mutation rate - for i, row in enumerate(rows): - if randoms[i] < mutation_probability: - row[3] = row[3].flip() - # Swap Two Nodes? - if random.random() < 0.5: - nodes = len(rows) // 2 - n1 = randrange(nodes) - n2 = randrange(nodes) - for j, row in enumerate(rows): - if row[0] == n1: - row[0] = n2 - elif row[0] == n2: - row[0] = n1 - rows.sort(key=lambda x: (x[0], 0 if x[1] == C else 1)) - return rows - - def mutate(self): - initial_action = self.initial_action - if random.random() < self.mutation_probability / 10: - initial_action = self.initial_action.flip() - initial_state = self.initial_state - if random.random() < self.mutation_probability / (10 * self.num_states): - initial_state = randrange(self.num_states) - try: - transitions = self.mutate_rows(self.fsm.transitions(), self.mutation_probability) - self.fsm = SimpleFSM(transitions, self.initial_state) - except ValueError: - # If the FSM is malformed, try again. - return self.mutate() - return self.create_new( - transitions=transitions, - initial_state=initial_state, - initial_action=initial_action, - ) - - @staticmethod - def crossover_rows(rows1, rows2): - num_states = len(rows1) // 2 - cross_point = 2 * randrange(num_states) - new_rows = copy_lists(rows1[:cross_point]) - new_rows += copy_lists(rows2[cross_point:]) - return new_rows - - def crossover(self, other): - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - transitions = self.crossover_rows(self.fsm.transitions(), other.fsm.transitions()) - transitions = self.normalize_transitions(transitions) - return self.create_new(transitions=transitions) - - def receive_vector(self, vector): - """ - Read a serialized vector into the set of FSM parameters (less initial - state). Then assign those FSM parameters to this class instance. - - The vector has three parts. The first is used to define the next state - (for each of the player's states - for each opponents action). - - The second part is the player's next moves (for each state - for - each opponent's actions). - - Finally, a probability to determine the player's first move. - """ - num_states = self.fsm.num_states() - state_scale = vector[:num_states * 2] - next_states = [int(s * (num_states - 1)) for s in state_scale] - actions = vector[num_states * 2: -1] - - self.initial_action = C if round(vector[-1]) == 0 else D - self.initial_state = 1 - - transitions = [] - for i, (initial_state, action) in enumerate(itertools.product(range(num_states), [C, D])): - next_action = C if round(actions[i]) == 0 else D - transitions.append([initial_state, action, next_states[i], next_action]) - transitions = self.normalize_transitions(transitions) - self.fsm = SimpleFSM(transitions, self.initial_state) - self.overwrite_init_kwargs(transitions=transitions, - initial_state=self.initial_state, - initial_action=self.initial_action) - - def create_vector_bounds(self): - """Creates the bounds for the decision variables.""" - size = len(self.fsm.transitions()) * 2 + 1 - lb = [0] * size - ub = [1] * size - return lb, ub - - -class Fortress3(FSMPlayer): - """Finite state machine player specified in http://DOI.org/10.1109/CEC.2006.1688322. - - Note that the description in http://www.graham-kendall.com/papers/lhk2011.pdf - is not correct. - - - Names: - - - Fortress 3: [Ashlock2006b]_ - """ - - name = "Fortress3" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 1, D), - ) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class Fortress4(FSMPlayer): - """ - Finite state machine player specified in - http://DOI.org/10.1109/CEC.2006.1688322. - - Note that the description in - http://www.graham-kendall.com/papers/lhk2011.pdf is not correct. - - Names: - - - Fortress 4: [Ashlock2006b]_ - """ - - name = "Fortress4" - classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, D), - (3, C, 1, D), - (3, D, 4, C), - (4, C, 4, C), - (4, D, 1, D), - ) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class Predator(FSMPlayer): - """ - Finite state machine player specified in - http://DOI.org/10.1109/CEC.2006.1688322. - - Names: - - - Predator: [Ashlock2006b]_ - """ - - name = "Predator" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 0, D), - (0, D, 1, D), - (1, C, 2, D), - (1, D, 3, D), - (2, C, 4, C), - (2, D, 3, D), - (3, C, 5, D), - (3, D, 4, C), - (4, C, 2, C), - (4, D, 6, D), - (5, C, 7, D), - (5, D, 3, D), - (6, C, 7, C), - (6, D, 7, D), - (7, C, 8, D), - (7, D, 7, D), - (8, C, 8, D), - (8, D, 6, D), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -class Pun1(FSMPlayer): - """FSM player described in [Ashlock2006]_. - - Names: - - - Pun1: [Ashlock2006]_ - """ - - name = "Pun1" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ((1, C, 2, C), (1, D, 2, C), (2, C, 1, C), (2, D, 1, D)) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class Raider(FSMPlayer): - """ - FSM player described in http://DOI.org/10.1109/FOCI.2014.7007818. - - - Names - - - Raider: [Ashlock2014]_ - """ - - name = "Raider" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 2, D), - (0, D, 2, D), - (1, C, 1, C), - (1, D, 1, D), - (2, C, 0, D), - (2, D, 3, C), - (3, C, 0, D), - (3, D, 1, C), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=D - ) - - -class Ripoff(FSMPlayer): - """ - FSM player described in http://DOI.org/10.1109/TEVC.2008.920675. - - Names - - - Ripoff: [Ashlock2008]_ - """ - - name = "Ripoff" - classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (1, C, 2, C), - (1, D, 3, C), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), # Note that it's TFT in state 3 - (3, D, 3, D), - ) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class UsuallyCooperates(FSMPlayer): - """ - This strategy cooperates except after a C following a D. - - Names: - - - Usually Cooperates (UC): [Ashlock2009]_ - """ - - name = "UsuallyCooperates" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ((1, C, 1, C), (1, D, 2, C), (2, C, 1, D), (2, D, 1, C)) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=C - ) - - -class UsuallyDefects(FSMPlayer): - """ - This strategy defects except after a D following a C. - - Names: - - - Usually Defects (UD): [Ashlock2009]_ - """ - - name = "UsuallyDefects" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ((1, C, 2, D), (1, D, 1, D), (2, C, 1, D), (2, D, 1, C)) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class SolutionB1(FSMPlayer): - """ - FSM player described in http://DOI.org/10.1109/TCIAIG.2014.2326012. - - Names - - - Solution B1: [Ashlock2015]_ - """ - - name = "SolutionB1" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (1, C, 2, D), - (1, D, 1, D), - (2, C, 2, C), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 3, C), - ) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class SolutionB5(FSMPlayer): - """ - - FSM player described in http://DOI.org/10.1109/TCIAIG.2014.2326012. - - Names - - - Solution B5: [Ashlock2015]_ - """ - - name = "SolutionB5" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (1, C, 2, C), - (1, D, 6, D), - (2, C, 2, C), - (2, D, 3, D), - (3, C, 6, C), - (3, D, 1, D), - (4, C, 3, C), - (4, D, 6, D), - (5, C, 5, D), - (5, D, 4, D), - (6, C, 3, C), - (6, D, 5, D), - ) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=D - ) - - -class Thumper(FSMPlayer): - """ - FSM player described in http://DOI.org/10.1109/TEVC.2008.920675. - - Names - - - Thumper: [Ashlock2008]_ - """ - - name = "Thumper" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ((1, C, 1, C), (1, D, 2, D), (2, C, 1, D), (2, D, 1, D)) - - super().__init__( - transitions=transitions, initial_state=1, initial_action=C - ) - - -class EvolvedFSM4(FSMPlayer): - """ - A 4 state FSM player trained with an evolutionary algorithm. - - Names: - - - Evolved FSM 4: Original name by Marc Harper - """ - - name = "Evolved FSM 4" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 0, C), - (0, D, 2, D), - (1, C, 3, D), - (1, D, 0, C), - (2, C, 2, D), - (2, D, 1, C), - (3, C, 3, D), - (3, D, 1, D), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -class EvolvedFSM16(FSMPlayer): - """ - A 16 state FSM player trained with an evolutionary algorithm. - - Names: - - - Evolved FSM 16: Original name by Marc Harper - - """ - - name = "Evolved FSM 16" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 0, C), - (0, D, 12, D), - (1, C, 3, D), - (1, D, 6, C), - (2, C, 2, D), - (2, D, 14, D), - (3, C, 3, D), - (3, D, 3, D), - (5, C, 12, D), - (5, D, 10, D), - (6, C, 5, C), - (6, D, 12, D), - (7, C, 3, D), - (7, D, 1, C), - (8, C, 5, C), - (8, D, 5, C), - (10, C, 11, D), - (10, D, 8, C), - (11, C, 15, D), - (11, D, 5, D), - (12, C, 8, C), - (12, D, 11, D), - (13, C, 13, D), - (13, D, 7, D), - (14, C, 13, D), - (14, D, 13, D), - (15, C, 15, D), - (15, D, 2, C), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -class EvolvedFSM16Noise05(FSMPlayer): - """ - A 16 state FSM player trained with an evolutionary algorithm with - noisy matches (noise=0.05). - - Names: - - - Evolved FSM 16 Noise 05: Original name by Marc Harper - """ - - name = "Evolved FSM 16 Noise 05" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 8, C), - (0, D, 3, D), - (1, C, 13, C), - (1, D, 15, D), - (2, C, 12, C), - (2, D, 3, D), - (3, C, 10, C), - (3, D, 3, D), - (4, C, 5, D), - (4, D, 4, D), - (5, C, 4, D), - (5, D, 10, D), - (6, C, 8, C), - (6, D, 6, D), - (8, C, 2, C), - (8, D, 4, D), - (10, C, 4, D), - (10, D, 1, D), - (11, C, 14, D), - (11, D, 13, C), - (12, C, 13, C), - (12, D, 2, C), - (13, C, 13, C), - (13, D, 6, C), - (14, C, 3, D), - (14, D, 13, D), - (15, C, 5, D), - (15, D, 11, C), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -# Strategies trained with Moran process objectives - - -class TF1(FSMPlayer): - """ - A FSM player trained to maximize Moran fixation probabilities. - - Names: - - - TF1: Original name by Marc Harper - """ - - name = "TF1" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 7, C), - (0, D, 1, C), - (1, C, 11, D), - (1, D, 11, D), - (2, C, 8, D), - (2, D, 8, C), - (3, C, 3, C), - (3, D, 12, D), - (4, C, 6, C), - (4, D, 3, C), - (5, C, 11, C), - (5, D, 8, D), - (6, C, 13, D), - (6, D, 14, C), - (7, C, 4, D), - (7, D, 2, D), - (8, C, 14, D), - (8, D, 8, D), - (9, C, 0, C), - (9, D, 10, D), - (10, C, 8, C), - (10, D, 15, C), - (11, C, 6, D), - (11, D, 5, D), - (12, C, 6, D), - (12, D, 9, D), - (13, C, 9, D), - (13, D, 8, D), - (14, C, 8, D), - (14, D, 13, D), - (15, C, 4, C), - (15, D, 5, C), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -class TF2(FSMPlayer): - """ - A FSM player trained to maximize Moran fixation probabilities. - - Names: - - - TF2: Original name by Marc Harper - """ - - name = "TF2" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 13, D), - (0, D, 12, D), - (1, C, 3, D), - (1, D, 4, D), - (2, C, 14, D), - (2, D, 9, D), - (3, C, 0, C), - (3, D, 1, D), - (4, C, 1, D), - (4, D, 2, D), - (7, C, 12, D), - (7, D, 2, D), - (8, C, 7, D), - (8, D, 9, D), - (9, C, 8, D), - (9, D, 0, D), - (10, C, 2, C), - (10, D, 15, C), - (11, C, 7, D), - (11, D, 13, D), - (12, C, 3, C), - (12, D, 8, D), - (13, C, 7, C), - (13, D, 10, D), - (14, C, 10, D), - (14, D, 7, D), - (15, C, 15, C), - (15, D, 11, D), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) - - -class TF3(FSMPlayer): - """ - A FSM player trained to maximize Moran fixation probabilities. - - Names: - - - TF3: Original name by Marc Harper - """ - - name = "TF3" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - transitions = ( - (0, C, 0, C), - (0, D, 3, C), - (1, C, 5, D), - (1, D, 0, C), - (2, C, 3, C), - (2, D, 2, D), - (3, C, 4, D), - (3, D, 6, D), - (4, C, 3, C), - (4, D, 1, D), - (5, C, 6, C), - (5, D, 3, D), - (6, C, 6, D), - (6, D, 6, D), - (7, C, 7, D), - (7, D, 5, C), - ) - - super().__init__( - transitions=transitions, initial_state=0, initial_action=C - ) diff --git a/axelrod/ipd/strategies/forgiver.py b/axelrod/ipd/strategies/forgiver.py deleted file mode 100644 index fef4a608f..000000000 --- a/axelrod/ipd/strategies/forgiver.py +++ /dev/null @@ -1,67 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Forgiver(IpdPlayer): - """ - A player starts by cooperating however will defect if at any point - the opponent has defected more than 10 percent of the time - - Names: - - - Forgiver: Original name by Thomas Campbell - """ - - name = "Forgiver" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Begins by playing C, then plays D if the opponent has defected more - than 10 percent of the time. - """ - if opponent.defections > len(opponent.history) / 10.0: - return D - return C - - -class ForgivingTitForTat(IpdPlayer): - """ - A player starts by cooperating however will defect if at any point, the - opponent has defected more than 10 percent of the time, and their most - recent decision was defect. - - Names: - - - Forgiving Tit For Tat: Original name by Thomas Campbell - """ - - name = "Forgiving Tit For Tat" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Begins by playing C, then plays D if the opponent has defected more than - 10 percent of the time and their most recent decision was defect. - """ - if opponent.defections > len(opponent.history) / 10: - return opponent.history[-1] - return C diff --git a/axelrod/ipd/strategies/gambler.py b/axelrod/ipd/strategies/gambler.py deleted file mode 100644 index 599f0cb82..000000000 --- a/axelrod/ipd/strategies/gambler.py +++ /dev/null @@ -1,235 +0,0 @@ -"""Stochastic variants of Lookup table based-strategies, trained with particle -swarm algorithms. - -For the original see: - https://gist.github.com/GDKO/60c3d0fd423598f3c4e4 -""" -import random -from typing import Any - -from axelrod.ipd.action import Action, str_to_actions, actions_to_str -from axelrod.ipd.load_data_ import load_pso_tables -from axelrod.ipd.player import IpdPlayer - -from axelrod.ipd.random_ import random_choice - -from .lookerup import EvolvableLookerUp, LookupTable, LookerUp, Plays, create_lookup_table_keys - -C, D = Action.C, Action.D -tables = load_pso_tables("pso_gambler.csv", directory="data") - - -class Gambler(LookerUp): - """ - A stochastic version of LookerUp which will select randomly an action in - some cases. - - Names: - - - Gambler: Original name by Georgios Koutsovoulos - """ - - name = "Gambler" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - actions_or_float = super(Gambler, self).strategy(opponent) - if isinstance(actions_or_float, Action): - return actions_or_float - return random_choice(actions_or_float) - - -class EvolvableGambler(Gambler, EvolvableLookerUp): - name = "EvolvableGambler" - - def __init__( - self, - lookup_dict: dict = None, - initial_actions: tuple = None, - pattern: Any = None, # pattern is str or tuple of Actions. - parameters: Plays = None, - mutation_probability: float = None - ) -> None: - EvolvableLookerUp.__init__( - self, - lookup_dict=lookup_dict, - initial_actions=initial_actions, - pattern=pattern, - parameters=parameters, - mutation_probability=mutation_probability - ) - self.pattern = list(self.pattern) - Gambler.__init__( - self, - lookup_dict=self.lookup_dict, - initial_actions=self.initial_actions, - pattern=self.pattern, - parameters=self.parameters - ) - self.overwrite_init_kwargs( - lookup_dict=self.lookup_dict, - initial_actions=self.initial_actions, - pattern=self.pattern, - parameters=self.parameters, - mutation_probability=self.mutation_probability, - ) - - # The mutate and crossover methods are mostly inherited from EvolvableLookerUp, except for the following - # modifications. - - @classmethod - def random_value(cls): - return random.random() - - @classmethod - def mutate_value(cls, value): - ep = random.uniform(-1, 1) / 4 - value += ep - if value < 0: - value = 0 - elif value > 1: - value = 1 - return value - - def receive_vector(self, vector): - """Receives a vector and updates the player's pattern. Ignores extra parameters.""" - self.pattern = vector - self_depth, op_depth, op_openings_depth = self.parameters - self._lookup = LookupTable.from_pattern(self.pattern, self_depth, op_depth, op_openings_depth) - - def create_vector_bounds(self): - """Creates the bounds for the decision variables. Ignores extra parameters.""" - size = len(self.pattern) - lb = [0.0] * size - ub = [1.0] * size - return lb, ub - - -class PSOGamblerMem1(Gambler): - """ - A 1x1x0 PSOGambler trained with pyswarm. This is the 'optimal' memory one - strategy trained against the set of short run time strategies in the - Axelrod library. - - Names: - - - PSO Gambler Mem1: Original name by Marc Harper - """ - - name = "PSO Gambler Mem1" - - def __init__(self) -> None: - pattern = tables[("PSO Gambler Mem1", 1, 1, 0)] - parameters = Plays(self_plays=1, op_plays=1, op_openings=0) - - super().__init__(parameters=parameters, pattern=pattern) - - -class PSOGambler1_1_1(Gambler): - """ - A 1x1x1 PSOGambler trained with pyswarm. - - Names: - - - PSO Gambler 1_1_1: Original name by Marc Harper - """ - - name = "PSO Gambler 1_1_1" - - def __init__(self) -> None: - pattern = tables[("PSO Gambler 1_1_1", 1, 1, 1)] - parameters = Plays(self_plays=1, op_plays=1, op_openings=1) - - super().__init__(parameters=parameters, pattern=pattern) - - -class PSOGambler2_2_2(Gambler): - """ - A 2x2x2 PSOGambler trained with a particle swarm algorithm (implemented in - pyswarm). Original version by Georgios Koutsovoulos. - - Names: - - - PSO Gambler 2_2_2: Original name by Marc Harper - """ - - name = "PSO Gambler 2_2_2" - - def __init__(self) -> None: - pattern = tables[("PSO Gambler 2_2_2", 2, 2, 2)] - parameters = Plays(self_plays=2, op_plays=2, op_openings=2) - - super().__init__(parameters=parameters, pattern=pattern) - - -class PSOGambler2_2_2_Noise05(Gambler): - """ - A 2x2x2 PSOGambler trained with pyswarm with noise=0.05. - - Names: - - - PSO Gambler 2_2_2 Noise 05: Original name by Marc Harper - """ - - name = "PSO Gambler 2_2_2 Noise 05" - - def __init__(self) -> None: - pattern = tables[("PSO Gambler 2_2_2 Noise 05", 2, 2, 2)] - parameters = Plays(self_plays=2, op_plays=2, op_openings=2) - - super().__init__(parameters=parameters, pattern=pattern) - - -class ZDMem2(Gambler): - """ - A memory two generalization of a zero determinant player. - - Names: - - - ZDMem2: Original name by Marc Harper - - Unnamed [LiS2014]_ - - """ - - name = "ZD-Mem2" - - classifier = { - "memory_depth": 2, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - pattern = [ - 11 / 12, - 4 / 11, - 7 / 9, - 1 / 10, - 5 / 6, - 3 / 11, - 7 / 9, - 1 / 10, - 2 / 3, - 1 / 11, - 7 / 9, - 1 / 10, - 3 / 4, - 2 / 11, - 7 / 9, - 1 / 10, - ] - parameters = Plays(self_plays=2, op_plays=2, op_openings=0) - - super().__init__(parameters=parameters, pattern=pattern) diff --git a/axelrod/ipd/strategies/geller.py b/axelrod/ipd/strategies/geller.py deleted file mode 100644 index 22311de3b..000000000 --- a/axelrod/ipd/strategies/geller.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -The player classes in this module do not obey standard rules of the IPD (as -indicated by their classifier). We do not recommend putting a lot of time in to -optimising them. -""" - -from axelrod.ipd._strategy_utils import inspect_strategy -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class Geller(IpdPlayer): - """Observes what the player will do in the next round and adjust. - - If unable to do this: will play randomly. - - - This code is inspired by Matthew Williams' talk - "Cheating at rock-paper-scissors — meta-programming in Python" - given at Django Weekend Cardiff in February 2014. - - His code is here: https://github.com/mattjw/rps_metaprogramming - and there's some more info here: http://www.mattjw.net/2014/02/rps-metaprogramming/ - - This code is **way** simpler than Matt's, as in this exercise we already - have access to the opponent instance, so don't need to go - hunting for it in the stack. Instead we can just call it to - see what it's going to play, and return a result based on that - - This is almost certainly cheating, and more than likely against the - spirit of the 'competition' :-) - - Names: - - - Geller: Original name by Martin Chorley (@martinjc) - """ - - name = "Geller" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" - return random_choice(0.5) - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Look at what the opponent will play in the next round and choose a strategy - that gives the least jail time, which is is equivalent to playing the same - strategy as that which the opponent will play. - """ - - return inspect_strategy(self, opponent) - - -class GellerCooperator(Geller): - """Observes what the player will do (like :code:`Geller`) but if unable to - will cooperate. - - Names: - - - Geller Cooperator: Original name by Karol Langner - """ - - name = "Geller Cooperator" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def foil_strategy_inspection() -> Action: - """ - Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead - """ - return C - - -class GellerDefector(Geller): - """Observes what the player will do (like :code:`Geller`) but if unable to - will defect. - - Names: - - - Geller Defector: Original name by Karol Langner - """ - - name = "Geller Defector" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" - return D diff --git a/axelrod/ipd/strategies/gobymajority.py b/axelrod/ipd/strategies/gobymajority.py deleted file mode 100644 index dcda9e56b..000000000 --- a/axelrod/ipd/strategies/gobymajority.py +++ /dev/null @@ -1,246 +0,0 @@ -import copy -from typing import Any, Dict, Union - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class GoByMajority(IpdPlayer): - """Submitted to Axelrod's second tournament by Gail Grisell. It came 23rd - and was written in 10 lines of BASIC. - - A player examines the history of the opponent: if the opponent has more - defections than cooperations then the player defects. - - In case of equal - number of defections and cooperations this player will Cooperate. Passing - the `soft=False` keyword argument when initialising will create a - HardGoByMajority which Defects in case of equality. - - An optional memory attribute will limit the number of turns remembered (by - default this is 0) - - Names: - - - Go By Majority: [Axelrod1984]_ - - Grisell: [Axelrod1980b]_ - - Soft Majority: [Mittal2009]_ - """ - - name = "Go By Majority" - classifier = { - "stochastic": False, - "inspects_source": False, - "makes_use_of": set(), - "long_run_time": False, - "manipulates_source": False, - "manipulates_state": False, - "memory_depth": float("inf"), - } # type: Dict[str, Any] - - def __init__( - self, memory_depth: Union[int, float] = float("inf"), soft: bool = True - ) -> None: - """ - Parameters - ---------- - memory_depth: int >= 0 - The number of rounds to use for the calculation of the cooperation - and defection probabilities of the opponent. - soft: bool - Indicates whether to cooperate or not in the case that the - cooperation and defection probabilities are equal. - """ - - super().__init__() - self.soft = soft - self.classifier["memory_depth"] = memory_depth - if self.classifier["memory_depth"] < float("inf"): - self.memory = self.classifier["memory_depth"] - else: - self.memory = 0 - self.name = "Go By Majority" + (self.memory > 0) * (": %i" % self.memory) - if self.soft: - self.name = "Soft " + self.name - else: - self.name = "Hard " + self.name - - def __repr__(self): - return self.name - - def strategy(self, opponent: IpdPlayer) -> Action: - """This is affected by the history of the opponent. - - As long as the opponent cooperates at least as often as they defect then - the player will cooperate. If at any point the opponent has more - defections than cooperations in memory the player defects. - """ - - history = opponent.history[-self.memory :] - defections = sum([s == D for s in history]) - cooperations = sum([s == C for s in history]) - if defections > cooperations: - return D - if defections == cooperations: - if self.soft: - return C - else: - return D - return C - - -class GoByMajority40(GoByMajority): - """ - GoByMajority player with a memory of 40. - - Names: - - - Go By Majority 40: Original name by Karol Langner - """ - - name = "Go By Majority 40" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 40 - - def __init__(self) -> None: - super().__init__(memory_depth=40) - - -class GoByMajority20(GoByMajority): - """ - GoByMajority player with a memory of 20. - - Names: - - - Go By Majority 20: Original name by Karol Langner - """ - - name = "Go By Majority 20" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 20 - - def __init__(self) -> None: - super().__init__(memory_depth=20) - - -class GoByMajority10(GoByMajority): - """ - GoByMajority player with a memory of 10. - - Names: - - - Go By Majority 10: Original name by Karol Langner - """ - - name = "Go By Majority 10" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 10 - - def __init__(self) -> None: - super().__init__(memory_depth=10) - - -class GoByMajority5(GoByMajority): - """ - GoByMajority player with a memory of 5. - - Names: - - - Go By Majority 5: Original name by Karol Langner - """ - - name = "Go By Majority 5" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 5 - - def __init__(self) -> None: - super().__init__(memory_depth=5) - - -class HardGoByMajority(GoByMajority): - """A player examines the history of the opponent: if the opponent has more - defections than cooperations then the player defects. In case of equal - number of defections and cooperations this player will Defect. - - An optional memory attribute will limit the number of turns remembered (by - default this is 0) - - Names: - - - Hard Majority: [Mittal2009]_ - """ - - name = "Hard Go By Majority" - - def __init__(self, memory_depth: Union[int, float] = float("inf")) -> None: - super().__init__(memory_depth=memory_depth, soft=False) - - -class HardGoByMajority40(HardGoByMajority): - """ - HardGoByMajority player with a memory of 40. - - Names: - - - Hard Go By Majority 40: Original name by Karol Langner - """ - - name = "Hard Go By Majority 40" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 40 - - def __init__(self) -> None: - super().__init__(memory_depth=40) - - -class HardGoByMajority20(HardGoByMajority): - """ - HardGoByMajority player with a memory of 20. - - Names: - - - Hard Go By Majority 20: Original name by Karol Langner - """ - - name = "Hard Go By Majority 20" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 20 - - def __init__(self) -> None: - super().__init__(memory_depth=20) - - -class HardGoByMajority10(HardGoByMajority): - """ - HardGoByMajority player with a memory of 10. - - Names: - - - Hard Go By Majority 10: Original name by Karol Langner - """ - - name = "Hard Go By Majority 10" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 10 - - def __init__(self) -> None: - super().__init__(memory_depth=10) - - -class HardGoByMajority5(HardGoByMajority): - """ - HardGoByMajority player with a memory of 5. - - Names: - - - Hard Go By Majority 5: Original name by Karol Langner - """ - - name = "Hard Go By Majority 5" - classifier = copy.copy(GoByMajority.classifier) - classifier["memory_depth"] = 5 - - def __init__(self) -> None: - super().__init__(memory_depth=5) diff --git a/axelrod/ipd/strategies/gradualkiller.py b/axelrod/ipd/strategies/gradualkiller.py deleted file mode 100644 index 40737c1d4..000000000 --- a/axelrod/ipd/strategies/gradualkiller.py +++ /dev/null @@ -1,37 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.strategy_transformers import InitialTransformer - -C, D = Action.C, Action.D - - -@InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) -class GradualKiller(IpdPlayer): - """ - It begins by defecting in the first five moves, then cooperates two times. - It then defects all the time if the opponent has defected in move 6 and 7, - else cooperates all the time. - Initially designed to stop Gradual from defeating TitForTat in a 3 IpdPlayer - tournament. - - Names - - - Gradual Killer: [Prison1998]_ - """ - - # These are various properties for the strategy - name = "Gradual Killer" - classifier = { - "memory_depth": float("Inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if opponent.history[5:7] == [D, D]: - return D - return C diff --git a/axelrod/ipd/strategies/grudger.py b/axelrod/ipd/strategies/grudger.py deleted file mode 100644 index 660ff7667..000000000 --- a/axelrod/ipd/strategies/grudger.py +++ /dev/null @@ -1,319 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Grudger(IpdPlayer): - """ - A player starts by cooperating however will defect if at any point the - opponent has defected. - - This strategy came 7th in Axelrod's original tournament. - - Names: - - - Friedman's strategy: [Axelrod1980]_ - - Grudger: [Li2011]_ - - Grim: [Berg2015]_ - - Grim Trigger: [Banks1990]_ - - Spite: [Beaufils1997]_ - - Vengeful: [Ashlock2009]_ - """ - - name = "Grudger" - classifier = { - "memory_depth": float('inf'), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - """Begins by playing C, then plays D for the remaining rounds if the - opponent ever plays D.""" - if opponent.defections: - return D - return C - - -class ForgetfulGrudger(IpdPlayer): - """ - A player starts by cooperating however will defect if at any point the - opponent has defected, but forgets after mem_length matches. - - Names: - - - Forgetful Grudger: Original name by Geraint Palmer - """ - - name = "Forgetful Grudger" - classifier = { - "memory_depth": 10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - """Initialised the player.""" - super().__init__() - self.mem_length = 10 - self.grudged = False - self.grudge_memory = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - """Begins by playing C, then plays D for mem_length rounds if the - opponent ever plays D.""" - if self.grudge_memory == self.mem_length: - self.grudge_memory = 0 - self.grudged = False - - if D in opponent.history[-1:]: - self.grudged = True - - if self.grudged: - self.grudge_memory += 1 - return D - return C - - -class OppositeGrudger(IpdPlayer): - """ - A player starts by defecting however will cooperate if at any point the - opponent has cooperated. - - Names: - - - Opposite Grudger: Original name by Geraint Palmer - """ - - name = "Opposite Grudger" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - """Begins by playing D, then plays C for the remaining rounds if the - opponent ever plays C.""" - if opponent.cooperations: - return C - return D - - -class Aggravater(IpdPlayer): - """ - Grudger, except that it defects on the first 3 turns - - Names - - - Aggravater: Original name by Thomas Campbell - """ - - name = "Aggravater" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - if len(opponent.history) < 3: - return D - elif opponent.defections: - return D - return C - - -class SoftGrudger(IpdPlayer): - """ - A modification of the Grudger strategy. Instead of punishing by always - defecting: punishes by playing: D, D, D, D, C, C. (Will continue to - cooperate afterwards). - - - Soft Grudger (SGRIM): [Li2011]_ - """ - - name = "Soft Grudger" - classifier = { - "memory_depth": 6, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - """Initialised the player.""" - super().__init__() - self.grudged = False - self.grudge_memory = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - """Begins by playing C, then plays D, D, D, D, C, C against a defection - """ - if self.grudged: - strategy = [D, D, D, C, C][self.grudge_memory] - self.grudge_memory += 1 - if self.grudge_memory == 5: - self.grudge_memory = 0 - self.grudged = False - return strategy - elif D in opponent.history[-1:]: - self.grudged = True - return D - return C - - -class GrudgerAlternator(IpdPlayer): - """ - A player starts by cooperating until the first opponents defection, - then alternates D-C. - - Names: - - - c_then_per_dc: [Prison1998]_ - - Grudger Alternator: Original name by Geraint Palmer - """ - - name = "GrudgerAlternator" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """Begins by playing C, then plays Alternator for the remaining rounds - if the opponent ever plays D.""" - if opponent.defections: - if self.history[-1] == C: - return D - return C - - -class EasyGo(IpdPlayer): - """ - A player starts by defecting however will cooperate if at any point the - opponent has defected. - - Names: - - - Easy Go: [Prison1998]_ - - Reverse Grudger (RGRIM): [Li2011]_ - - Fool Me Forever: [Harper2017]_ - """ - - name = "EasyGo" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - """Begins by playing D, then plays C for the remaining rounds if the - opponent ever plays D.""" - if opponent.defections: - return C - return D - - -class GeneralSoftGrudger(IpdPlayer): - """ - A generalization of the SoftGrudger strategy. SoftGrudger punishes by - playing: D, D, D, D, C, C. after a defection by the opponent. - GeneralSoftGrudger only punishes after its opponent defects a specified - amount of times consecutively. The punishment is in the form of a series of - defections followed by a 'penance' of a series of consecutive cooperations. - - Names: - - - General Soft Grudger: Original Name by J. Taylor Smith - """ - - name = "General Soft Grudger" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, n: int = 1, d: int = 4, c: int = 2) -> None: - """ - Parameters - ---------- - n: int - The number of defections by the opponent to trigger punishment - d: int - The number of defections to punish the opponent - c: int - The number of cooperations in the 'penance' stage - - Special Cases - ------------- - GeneralSoftGrudger(1,4,2) is equivalent to SoftGrudger - """ - super().__init__() - self.n = n - self.d = d - self.c = c - self.grudge = [D] * (d - 1) + [C] * c - self.grudged = False - self.grudge_memory = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Punishes after its opponent defects 'n' times consecutively. - The punishment is in the form of 'd' defections followed by a penance of - 'c' consecutive cooperations. - """ - if self.grudged: - strategy = self.grudge[self.grudge_memory] - self.grudge_memory += 1 - if self.grudge_memory == len(self.grudge): - self.grudged = False - self.grudge_memory = 0 - return strategy - elif [D] * self.n == opponent.history[-self.n :]: - self.grudged = True - return D - - return C - - def __repr__(self) -> str: - return "%s: n=%s,d=%s,c=%s" % (self.name, self.n, self.d, self.c) diff --git a/axelrod/ipd/strategies/grumpy.py b/axelrod/ipd/strategies/grumpy.py deleted file mode 100644 index a1544a2d3..000000000 --- a/axelrod/ipd/strategies/grumpy.py +++ /dev/null @@ -1,73 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Grumpy(IpdPlayer): - """ - A player that defects after a certain level of grumpiness. - Grumpiness increases when the opponent defects and decreases - when the opponent co-operates. - - Names: - - - Grumpy: Original name by Jason Young - """ - - name = "Grumpy" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - starting_state: str = "Nice", - grumpy_threshold: int = 10, - nice_threshold: int = -10, - ) -> None: - """ - Parameters - ---------- - starting_state: str - 'Nice' or 'Grumpy' - grumpy_threshold: int - The threshold of opponent defections - cooperations to become - grumpy - nice_threshold: int - The threshold of opponent defections - cooperations to become - nice - """ - super().__init__() - self.state = starting_state - self.grumpy_threshold = grumpy_threshold - self.nice_threshold = nice_threshold - - def strategy(self, opponent: IpdPlayer) -> Action: - """A player that gets grumpier the more the opposition defects, - and nicer the more they cooperate. - - Starts off Nice, but becomes grumpy once the grumpiness threshold is - hit. Won't become nice once that grumpy threshold is hit, but must - reach a much lower threshold before it becomes nice again. - """ - - grumpiness = opponent.defections - opponent.cooperations - - if self.state == "Nice": - if grumpiness > self.grumpy_threshold: - self.state = "Grumpy" - return D - return C - - if self.state == "Grumpy": - if grumpiness < self.nice_threshold: - self.state = "Nice" - return C - return D diff --git a/axelrod/ipd/strategies/handshake.py b/axelrod/ipd/strategies/handshake.py deleted file mode 100644 index c8c7e82c9..000000000 --- a/axelrod/ipd/strategies/handshake.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import List - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Handshake(IpdPlayer): - """Starts with C, D. If the opponent plays the same way, cooperate forever, - else defect forever. - - Names: - - - Handshake: [Robson1990]_ - """ - - name = "Handshake" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, initial_plays: List[Action] = None) -> None: - super().__init__() - if not initial_plays: - initial_plays = [C, D] - self.initial_plays = initial_plays - - def strategy(self, opponent: IpdPlayer) -> Action: - # Begin by playing the sequence C, D - index = len(self.history) - if index < len(self.initial_plays): - return self.initial_plays[index] - # If our opponent played [C, D] on the first two moves, cooperate - # forever. Otherwise defect forever. - if opponent.history[0 : len(self.initial_plays)] == self.initial_plays: - return C - return D diff --git a/axelrod/ipd/strategies/hmm.py b/axelrod/ipd/strategies/hmm.py deleted file mode 100644 index da76ac959..000000000 --- a/axelrod/ipd/strategies/hmm.py +++ /dev/null @@ -1,389 +0,0 @@ -from random import randrange -import numpy.random as random -from numpy.random import choice - -from axelrod.ipd.action import Action -from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists, crossover_lists -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice, random_vector - -C, D = Action.C, Action.D - - -def is_stochastic_matrix(m, ep=1e-8) -> bool: - """Checks that the matrix m (a list of lists) is a stochastic matrix.""" - for i in range(len(m)): - for j in range(len(m[i])): - if (m[i][j] < 0) or (m[i][j] > 1): - return False - s = sum(m[i]) - if abs(1.0 - s) > ep: - return False - return True - - -def normalize_vector(vec): - s = sum(vec) - vec = [v / s for v in vec] - return vec - - -def mutate_row(row, mutation_probability): - """, crossover_lists_of_lists - Given a row of probabilities, randomly change each entry with probability - `mutation_probability` (a value between 0 and 1). If changing, then change - by a value randomly (uniformly) chosen from [-0.25, 0.25] bounded by 0 and - 100%. - """ - randoms = random.random(len(row)) - for i in range(len(row)): - if randoms[i] < mutation_probability: - ep = random.uniform(-1, 1) / 4 - row[i] += ep - if row[i] < 0: - row[i] = 0 - if row[i] > 1: - row[i] = 1 - return row - - -class SimpleHMM(object): - """Implementation of a basic Hidden Markov Model. We assume that the - transition matrix is conditioned on the opponent's last action, so there - are two transition matrices. Emission distributions are stored as Bernoulli - probabilities for each state. This is essentially a stochastic FSM. - - https://en.wikipedia.org/wiki/Hidden_Markov_model - """ - - def __init__( - self, transitions_C, transitions_D, emission_probabilities, initial_state - ) -> None: - """ - Params - ------ - transitions_C and transitions_D are square stochastic matrices: - lists of lists with all values in [0, 1] and rows that sum to 1. - emission_probabilities is a vector of values in [0, 1] - initial_state is an element of range(0, len(emission_probabilities)) - """ - self.transitions_C = transitions_C - self.transitions_D = transitions_D - self.emission_probabilities = emission_probabilities - self.state = initial_state - - def is_well_formed(self) -> bool: - """ - Determines if the HMM parameters are well-formed: - - Both matrices are stochastic - - Emissions probabilities are in [0, 1] - - The initial state is valid. - """ - if not is_stochastic_matrix(self.transitions_C): - return False - if not is_stochastic_matrix(self.transitions_D): - return False - for p in self.emission_probabilities: - if (p < 0) or (p > 1): - return False - if self.state not in range(0, len(self.emission_probabilities)): - return False - return True - - def __eq__(self, other: IpdPlayer) -> bool: - """Equality of two HMMs""" - check = True - for attr in [ - "transitions_C", - "transitions_D", - "emission_probabilities", - "state", - ]: - check = check and getattr(self, attr) == getattr(other, attr) - return check - - def move(self, opponent_action: Action) -> Action: - """Changes state and computes the response action. - - Parameters - opponent_action: Axelrod.Action - The opponent's last action. - """ - num_states = len(self.emission_probabilities) - if opponent_action == C: - next_state = choice(num_states, 1, p=self.transitions_C[self.state]) - else: - next_state = choice(num_states, 1, p=self.transitions_D[self.state]) - self.state = next_state[0] - p = self.emission_probabilities[self.state] - action = random_choice(p) - return action - - -class HMMPlayer(IpdPlayer): - """ - Abstract base class for Hidden Markov Model players. - - Names - - - HMM IpdPlayer: Original name by Marc Harper - """ - - name = "HMM IpdPlayer" - - classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - transitions_C=None, - transitions_D=None, - emission_probabilities=None, - initial_state=0, - initial_action=C - ) -> None: - super().__init__() - if not transitions_C: - transitions_C = [[1]] - transitions_D = [[1]] - emission_probabilities = [0.5] # Not stochastic - initial_state = 0 - self.initial_state = initial_state - self.initial_action = initial_action - self.hmm = SimpleHMM( - copy_lists(transitions_C), copy_lists(transitions_D), list(emission_probabilities), initial_state - ) - assert self.hmm.is_well_formed() - self.state = self.hmm.state - self.classifier["stochastic"] = self.is_stochastic() - - def is_stochastic(self) -> bool: - """Determines if the player is stochastic.""" - # If the transitions matrices and emission_probabilities are all 0 or 1 - # Then the player is stochastic - values = set(self.hmm.emission_probabilities) - for m in [self.hmm.transitions_C, self.hmm.transitions_D]: - for row in m: - values.update(row) - if not values.issubset({0, 1}): - return True - return False - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return self.initial_action - else: - action = self.hmm.move(opponent.history[-1]) - # Record the state for testing purposes, this isn't necessary - # for the strategy to function - self.state = self.hmm.state - return action - - -class EvolvableHMMPlayer(HMMPlayer, EvolvablePlayer): - """Evolvable version of HMMPlayer.""" - name = "EvolvableHMMPlayer" - - def __init__( - self, - transitions_C=None, - transitions_D=None, - emission_probabilities=None, - initial_state=0, - initial_action=C, - num_states=None, - mutation_probability=None - ) -> None: - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability = self._normalize_parameters( - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability) - self.mutation_probability = mutation_probability - HMMPlayer.__init__(self, - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities, - initial_state=initial_state, - initial_action=initial_action) - EvolvablePlayer.__init__(self) - self.overwrite_init_kwargs( - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities, - initial_state=initial_state, - initial_action=initial_action, - num_states=num_states, - mutation_probability=mutation_probability - ) - - @classmethod - def _normalize_parameters(cls, transitions_C=None, transitions_D=None, emission_probabilities=None, - initial_state=None, initial_action=None, num_states=None, mutation_probability=None): - if not (transitions_C and transitions_D and emission_probabilities and (initial_state is not None) and (initial_action is not None)): - if not num_states: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableHMMPlayer") - transitions_C, transitions_D, emission_probabilities, initial_state, initial_action = cls.random_params( - num_states) - # Normalize types of various matrices - for m in [transitions_C, transitions_D]: - for i in range(len(m)): - m[i] = list(map(float, m[i])) - emission_probabilities = list(map(float, emission_probabilities)) - num_states = len(emission_probabilities) - if mutation_probability is None: - mutation_probability = 10 / (num_states ** 2) - else: - mutation_probability = mutation_probability - return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability - - @classmethod - def random_params(cls, num_states): - transitions_C = [] - transitions_D = [] - emission_probabilities = [] - for _ in range(num_states): - transitions_C.append(random_vector(num_states)) - transitions_D.append(random_vector(num_states)) - emission_probabilities.append(random.random()) - initial_state = randrange(num_states) - initial_action = C - return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action - - @property - def num_states(self): - return len(self.hmm.emission_probabilities) - - @staticmethod - def mutate_rows(rows, mutation_probability): - for i, row in enumerate(rows): - row = mutate_row(row, mutation_probability) - rows[i] = normalize_vector(row) - return rows - - def mutate(self): - transitions_C = self.mutate_rows( - self.hmm.transitions_C, self.mutation_probability) - transitions_D = self.mutate_rows( - self.hmm.transitions_D, self.mutation_probability) - emission_probabilities = mutate_row( - self.hmm.emission_probabilities, self.mutation_probability) - initial_action = self.initial_action - if random.random() < self.mutation_probability / 10: - initial_action = self.initial_action.flip() - initial_state = self.initial_state - if random.random() < self.mutation_probability / (10 * self.num_states): - initial_state = randrange(self.num_states) - return self.create_new( - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities, - initial_state=initial_state, - initial_action=initial_action, - ) - - def crossover(self, other): - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - transitions_C = crossover_lists(self.hmm.transitions_C, other.hmm.transitions_C) - transitions_D = crossover_lists(self.hmm.transitions_D, other.hmm.transitions_D) - emission_probabilities = crossover_lists( - self.hmm.emission_probabilities, other.hmm.emission_probabilities) - return self.create_new( - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities - ) - - def receive_vector(self, vector): - """ - Read a serialized vector into the set of HMM parameters (less initial - state). Then assign those HMM parameters to this class instance. - - Assert that the vector has the right number of elements for an HMMParams - class with self.num_states. - - Assume the first num_states^2 entries are the transitions_C matrix. The - next num_states^2 entries are the transitions_D matrix. Then the next - num_states entries are the emission_probabilities vector. Finally the last - entry is the initial_action. - """ - - assert(len(vector) == 2 * self.num_states ** 2 + self.num_states + 1) - - def deserialize(vector): - matrix = [] - for i in range(self.num_states): - row = vector[self.num_states * i: self.num_states * (i + 1)] - row = normalize_vector(row) - matrix.append(row) - return matrix - - break_tc = self.num_states ** 2 - break_td = 2 * self.num_states ** 2 - break_ep = 2 * self.num_states ** 2 + self.num_states - initial_state = 0 - self.hmm = SimpleHMM( - deserialize(vector[0:break_tc]), - deserialize(vector[break_tc:break_td]), - normalize_vector(vector[break_td:break_ep]), - initial_state - ) - self.initial_action = C if round(vector[-1]) == 0 else D - self.initial_state = initial_state - - def create_vector_bounds(self): - """Creates the bounds for the decision variables.""" - vec_len = 2 * self.num_states ** 2 + self.num_states + 1 - lb = [0.0] * vec_len - ub = [1.0] * vec_len - return lb, ub - - -class EvolvedHMM5(HMMPlayer): - """ - An HMM-based player with five hidden states trained with an evolutionary - algorithm. - - Names: - - - Evolved HMM 5: Original name by Marc Harper - """ - - name = "Evolved HMM 5" - - classifier = { - "memory_depth": 5, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - initial_state = 3 - initial_action = C - t_C = [ - [1, 0, 0, 0, 0], - [0, 1, 0, 0, 0], - [0, 1, 0, 0, 0], - [0.631, 0, 0, 0.369, 0], - [0.143, 0.018, 0.118, 0, 0.721], - ] - - t_D = [ - [0, 1, 0, 0, 0], - [0, 0.487, 0.513, 0, 0], - [0, 0, 0, 0.590, 0.410], - [1, 0, 0, 0, 0], - [0, 0.287, 0.456, 0.146, 0.111], - ] - - emissions = [1, 0, 0, 1, 0.111] - super().__init__(t_C, t_D, emissions, initial_state, initial_action) diff --git a/axelrod/ipd/strategies/human.py b/axelrod/ipd/strategies/human.py deleted file mode 100644 index 8f343b93c..000000000 --- a/axelrod/ipd/strategies/human.py +++ /dev/null @@ -1,175 +0,0 @@ -from os import linesep - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from prompt_toolkit import prompt -from prompt_toolkit.validation import ValidationError, Validator - -try: # pragma: no cover - from prompt_toolkit.styles import style_from_dict - from prompt_toolkit.token import Token - - token_toolbar = Token.Toolbar - bottom_toolbar_name = "get_bottom_toolbar_tokens" - PROMPT2 = False - -except ImportError: # prompt_toolkit v2 - from prompt_toolkit.styles import Style - - style_from_dict = Style.from_dict - token_toolbar = "pygments.toolbar" - bottom_toolbar_name = "bottom_toolbar" - PROMPT2 = True - -C, D = Action.C, Action.D - -toolbar_style = style_from_dict({token_toolbar: "#ffffff bg:#333333"}) - - -class ActionValidator(Validator): - """ - A class to validate input from prompt_toolkit.prompt - Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#input-validation - """ - - def validate(self, document) -> None: - text = document.text - - if text and text.upper() not in ["C", "D"]: - raise ValidationError(message="Action must be C or D", cursor_position=0) - - -class Human(IpdPlayer): - """ - A strategy that prompts for keyboard input rather than deriving its own - action. - - This strategy is intended to be used interactively by a user playing - against other strategies from within the rest of the library. Unlike - other strategies, it is designed to be a teaching aid rather than a - research tool. - """ - - name = "Human" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["length", "game"]), - "long_run_time": True, - "inspects_source": True, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, name="human", c_symbol="C", d_symbol="D"): - """ - Parameters - ---------- - name: string - The name of the human player - c_symbol: string - A symbol to denote cooperation within the history toolbar - and prompt - d_symbol: string - A symbol to denote defection within the history toolbar - and prompt - """ - super().__init__() - self.human_name = name - self.symbols = {C: c_symbol, D: d_symbol} - - def _history_toolbar(self): - """ - A prompt-toolkit function to define the bottom toolbar. - Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#adding-a-bottom-toolbar - """ - my_history = [self.symbols[action] for action in self.history] - opponent_history = [self.symbols[action] for action in self.history.coplays] - history = list(zip(my_history, opponent_history)) - if self.history: - content = "History ({}, opponent): {}".format(self.human_name, history) - else: - content = "" - return content - - def _status_messages(self): - """ - A method to define the messages printed to the console and - displayed in the prompt-toolkit bottom toolbar. - - The bottom toolbar is defined only if a match is in progress. - - The console print statement is either the result of the previous - turn or a message indicating that new match is starting. - - Returns - ------- - dict - mapping print or toolbar to the relevant string - """ - if self.history: - toolbar = ( - self._history_toolbar - if PROMPT2 - else lambda cli: [(token_toolbar, self._history_toolbar())] - ) - print_statement = "{}Turn {}: {} played {}, opponent played {}".format( - linesep, - len(self.history), - self.human_name, - self.symbols[self.history[-1]], - self.symbols[self.history.coplays[-1]], - ) - else: - toolbar = None - print_statement = "{}Starting new match".format(linesep) - - return {"toolbar": toolbar, "print": print_statement} - - def _get_human_input(self) -> Action: # pragma: no cover - """ - A method to prompt the user for input, validate it and display - the bottom toolbar. - - Returns - ------- - string - Uppercase C or D indicating the action to play - """ - action = prompt( - "Turn {} action [C or D] for {}: ".format( - len(self.history) + 1, self.human_name - ), - validator=ActionValidator(), - style=toolbar_style, - **{bottom_toolbar_name: self.status_messages["toolbar"]}, - ) - - return Action.from_char(action.upper()) - - def strategy(self, opponent: IpdPlayer, input_function=None): - """ - Ordinarily, the strategy prompts for keyboard input rather than - deriving its own action. - - However, it is also possible to pass a function which returns a valid - action. This is mainly used for testing purposes in order to by-pass - the need for human interaction. - """ - - self.status_messages = self._status_messages() - self.status_messages = self._status_messages() - print(self.status_messages["print"]) - - if not input_function: # pragma: no cover - action = self._get_human_input() - else: - action = input_function() - - return action - - def __repr__(self): - """ - Override the default __repr__ of the class - """ - return "Human: {}".format(self.human_name) diff --git a/axelrod/ipd/strategies/hunter.py b/axelrod/ipd/strategies/hunter.py deleted file mode 100644 index c385c6ba5..000000000 --- a/axelrod/ipd/strategies/hunter.py +++ /dev/null @@ -1,255 +0,0 @@ -from typing import List, Optional, Tuple - -from axelrod.ipd._strategy_utils import detect_cycle -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class DefectorHunter(IpdPlayer): - """A player who hunts for defectors. - - Names: - - - Defector Hunter: Original name by Karol Langner - """ - - name = "Defector Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) >= 4 and len(opponent.history) == opponent.defections: - return D - return C - - -class CooperatorHunter(IpdPlayer): - """A player who hunts for cooperators. - - Names: - - - Cooperator Hunter: Original name by Karol Langner - """ - - name = "Cooperator Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) >= 4 and len(opponent.history) == opponent.cooperations: - return D - return C - - -def is_alternator(history: List[Action]) -> bool: - for i in range(len(history) - 1): - if history[i] == history[i + 1]: - return False - return True - - -class AlternatorHunter(IpdPlayer): - """A player who hunts for alternators. - - Names: - - - Alternator Hunter: Original name by Karol Langner - """ - - name = "Alternator Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.is_alt = False - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) < 6: - return C - if len(self.history) == 6: - if is_alternator(opponent.history): - self.is_alt = True - if self.is_alt: - return D - return C - - -class CycleHunter(IpdPlayer): - """Hunts strategies that play cyclically, like any of the Cyclers, - Alternator, etc. - - Names: - - - Cycle Hunter: Original name by Marc Harper - """ - - name = "Cycle Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.cycle = None # type: Optional[Tuple[Action]] - - def strategy(self, opponent: IpdPlayer) -> Action: - if self.cycle: - return D - cycle = detect_cycle(opponent.history, min_size=3) - if cycle: - if len(set(cycle)) > 1: - self.cycle = cycle - return D - return C - - -class EventualCycleHunter(CycleHunter): - """Hunts strategies that eventually play cyclically. - - Names: - - - Eventual Cycle Hunter: Original name by Marc Harper - """ - - name = "Eventual Cycle Hunter" - - def strategy(self, opponent: IpdPlayer) -> None: - if len(opponent.history) < 10: - return C - if len(opponent.history) == opponent.cooperations: - return C - if len(opponent.history) % 10 == 0: - # recheck - self.cycle = detect_cycle(opponent.history, offset=10, min_size=3) - if self.cycle: - return D - else: - return C - - -class MathConstantHunter(IpdPlayer): - """A player who hunts for mathematical constant players. - - Names: - - Math Constant Hunter: Original name by Karol Langner - """ - - name = "Math Constant Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Check whether the number of cooperations in the first and second halves - of the history are close. The variance of the uniform distribution (1/4) - is a reasonable delta but use something lower for certainty and avoiding - false positives. This approach will also detect a lot of random players. - """ - - n = len(self.history) - if n >= 8 and opponent.cooperations and opponent.defections: - start1, end1 = 0, n // 2 - start2, end2 = n // 4, 3 * n // 4 - start3, end3 = n // 2, n - count1 = opponent.history[start1:end1].count(C) + self.history[ - start1:end1 - ].count(C) - count2 = opponent.history[start2:end2].count(C) + self.history[ - start2:end2 - ].count(C) - count3 = opponent.history[start3:end3].count(C) + self.history[ - start3:end3 - ].count(C) - ratio1 = 0.5 * count1 / (end1 - start1) - ratio2 = 0.5 * count2 / (end2 - start2) - ratio3 = 0.5 * count3 / (end3 - start3) - if abs(ratio1 - ratio2) < 0.2 and abs(ratio1 - ratio3) < 0.2: - return D - return C - - -class RandomHunter(IpdPlayer): - """A player who hunts for random players. - - Names: - - - Random Hunter: Original name by Karol Langner - """ - - name = "Random Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - self.countCC = 0 - self.countDD = 0 - super().__init__() - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - A random player is unpredictable, which means the conditional frequency - of cooperation after cooperation, and defection after defections, should - be close to 50%... although how close is debatable. - """ - # Update counts - if len(self.history) > 1: - if self.history[-2] == C and opponent.history[-1] == C: - self.countCC += 1 - if self.history[-2] == D and opponent.history[-1] == D: - self.countDD += 1 - - n = len(self.history) - if n > 10: - probabilities = [] - if self.cooperations > 5: - probabilities.append(self.countCC / self.cooperations) - if self.defections > 5: - probabilities.append(self.countDD / self.defections) - if probabilities and all([abs(p - 0.5) < 0.25 for p in probabilities]): - return D - return C diff --git a/axelrod/ipd/strategies/inverse.py b/axelrod/ipd/strategies/inverse.py deleted file mode 100644 index cd9baad8c..000000000 --- a/axelrod/ipd/strategies/inverse.py +++ /dev/null @@ -1,48 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class Inverse(IpdPlayer): - """A player who defects with a probability that diminishes relative to how - long ago the opponent defected. - - Names: - - - Inverse: Original Name by Karol Langner - """ - - name = "Inverse" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - """Looks at opponent history to see if they have defected. - - If so, player defection is inversely proportional to when this occurred. - """ - - # calculate how many turns ago the opponent defected - index = next( - ( - index - for index, value in enumerate(opponent.history[::-1], start=1) - if value == D - ), - None, - ) - - if index is None: - return C - - return random_choice(1 - 1 / abs(index)) diff --git a/axelrod/ipd/strategies/lookerup.py b/axelrod/ipd/strategies/lookerup.py deleted file mode 100644 index 8bd1dcbfa..000000000 --- a/axelrod/ipd/strategies/lookerup.py +++ /dev/null @@ -1,580 +0,0 @@ -from collections import namedtuple -from itertools import product -from typing import Any, TypeVar - -import numpy.random as random -from numpy.random import choice - -from axelrod.ipd.action import Action, actions_to_str, str_to_actions -from axelrod.ipd.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_dictionaries -from axelrod.ipd.player import IpdPlayer - - -C, D = Action.C, Action.D -actions = (C, D) - -Plays = namedtuple("Plays", "self_plays, op_plays, op_openings") -Reaction = TypeVar("Reaction", Action, float) - - -class LookupTable(object): - """ - LookerUp and its children use this object to determine their next actions. - - It is an object that creates a table of all possible plays to a specified - depth and the action to be returned for each combination of plays. - The "get" method returns the appropriate response. - For the table containing:: - - .... - Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, C): D - Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, D): C - ... - - with: - player.history[-2:]=[C, C] and - opponent.history[-2:]=[C, D] and - opponent.history[:2]=[D, D], - calling LookupTable.get(plays=(C, C), op_plays=(C, D), op_openings=(D, D)) - will return C. - - Instantiate the table with a lookup_dict. This is - {(self_plays_tuple, op_plays_tuple, op_openings_tuple): action, ...}. - It must contain every possible - permutation with C's and D's of the above tuple. so:: - - good_dict = {((C,), (C,), ()): C, - ((C,), (D,), ()): C, - ((D,), (C,), ()): D, - ((D,), (D,), ()): C} - - bad_dict = {((C,), (C,), ()): C, - ((C,), (D,), ()): C, - ((D,), (C,), ()): D} - - LookupTable.from_pattern() creates an ordered list of keys for you and maps - the pattern to the keys.:: - - LookupTable.from_pattern(pattern=(C, D, D, C), - player_depth=0, op_depth=1, op_openings_depth=1 - ) - - creates the dictionary:: - - {Plays(self_plays=(), op_plays=(C), op_openings=(C)): C, - Plays(self_plays=(), op_plays=(C), op_openings=(D)): D, - Plays(self_plays=(), op_plays=(D), op_openings=(C)): D, - Plays(self_plays=(), op_plays=(D), op_openings=(D)): C,} - - and then returns a LookupTable with that dictionary. - """ - - def __init__(self, lookup_dict: dict) -> None: - self._dict = make_keys_into_plays(lookup_dict) - - sample_key = next(iter(self._dict)) - self._plays_depth = len(sample_key.self_plays) - self._op_plays_depth = len(sample_key.op_plays) - self._op_openings_depth = len(sample_key.op_openings) - self._table_depth = max( - self._plays_depth, self._op_plays_depth, self._op_openings_depth - ) - self._raise_error_for_bad_lookup_dict() - - def _raise_error_for_bad_lookup_dict(self): - if any( - len(key.self_plays) != self._plays_depth - or len(key.op_plays) != self._op_plays_depth - or len(key.op_openings) != self._op_openings_depth - for key in self._dict - ): - raise ValueError("Lookup table keys are not all the same size.") - total_key_combinations = 2 ** ( - self._plays_depth + self._op_plays_depth + self._op_openings_depth - ) - if total_key_combinations != len(self._dict): - msg = ( - "Lookup table does not have enough keys" - + " to cover all possibilities." - ) - raise ValueError(msg) - - @classmethod - def from_pattern( - cls, pattern: tuple, player_depth: int, op_depth: int, op_openings_depth: int - ): - keys = create_lookup_table_keys( - player_depth=player_depth, - op_depth=op_depth, - op_openings_depth=op_openings_depth, - ) - if len(keys) != len(pattern): - msg = "Pattern must be len: {}, but was len: {}".format( - len(keys), len(pattern) - ) - raise ValueError(msg) - input_dict = dict(zip(keys, pattern)) - return cls(input_dict) - - def get(self, plays: tuple, op_plays: tuple, op_openings: tuple) -> Any: - return self._dict[ - Plays(self_plays=plays, op_plays=op_plays, op_openings=op_openings) - ] - - @property - def player_depth(self) -> int: - return self._plays_depth - - @property - def op_depth(self) -> int: - return self._op_plays_depth - - @property - def op_openings_depth(self) -> int: - return self._op_openings_depth - - @property - def table_depth(self) -> int: - return self._table_depth - - @property - def dictionary(self) -> dict: - return self._dict.copy() - - def display( - self, sort_by: tuple = ("op_openings", "self_plays", "op_plays") - ) -> str: - """ - Returns a string for printing lookup_table info in specified order. - - :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' - """ - - def sorter(plays): - return tuple(actions_to_str(getattr(plays, field) for field in sort_by)) - - col_width = 11 - sorted_keys = sorted(self._dict, key=sorter) - header_line = ( - "{str_list[0]:^{width}}|" - + "{str_list[1]:^{width}}|" - + "{str_list[2]:^{width}}" - ) - display_line = header_line.replace("|", ",") + ": {str_list[3]}," - - def make_commaed_str(action_tuple): - return ", ".join(str(action) for action in action_tuple) - - line_elements = [ - ( - make_commaed_str(getattr(key, sort_by[0])), - make_commaed_str(getattr(key, sort_by[1])), - make_commaed_str(getattr(key, sort_by[2])), - self._dict[key], - ) - for key in sorted_keys - ] - header = header_line.format(str_list=sort_by, width=col_width) + "\n" - lines = [ - display_line.format(str_list=line, width=col_width) - for line in line_elements - ] - return header + "\n".join(lines) + "\n" - - def __eq__(self, other) -> bool: - if not isinstance(other, LookupTable): - return False - return self._dict == other.dictionary - - -def make_keys_into_plays(lookup_table: dict) -> dict: - """Returns a dict where all keys are Plays.""" - new_table = lookup_table.copy() - if any(not isinstance(key, Plays) for key in new_table): - new_table = {Plays(*key): value for key, value in new_table.items()} - return new_table - - -def create_lookup_table_keys( - player_depth: int, op_depth: int, op_openings_depth: int -) -> list: - """Returns a list of Plays that has all possible permutations of C's and - D's for each specified depth. the list is in order, - C < D sorted by ((player_tuple), (op_tuple), (op_openings_tuple)). - create_lookup_keys(2, 1, 0) returns:: - - [Plays(self_plays=(C, C), op_plays=(C,), op_openings=()), - Plays(self_plays=(C, C), op_plays=(D,), op_openings=()), - Plays(self_plays=(C, D), op_plays=(C,), op_openings=()), - Plays(self_plays=(C, D), op_plays=(D,), op_openings=()), - Plays(self_plays=(D, C), op_plays=(C,), op_openings=()), - Plays(self_plays=(D, C), op_plays=(D,), op_openings=()), - Plays(self_plays=(D, D), op_plays=(C,), op_openings=()), - Plays(self_plays=(D, D), op_plays=(D,), op_openings=())] - - """ - self_plays = product((C, D), repeat=player_depth) - op_plays = product((C, D), repeat=op_depth) - op_openings = product((C, D), repeat=op_openings_depth) - - iterator = product(self_plays, op_plays, op_openings) - return [Plays(*plays_tuple) for plays_tuple in iterator] - - -default_tft_lookup_table = { - Plays(self_plays=(), op_plays=(D,), op_openings=()): D, - Plays(self_plays=(), op_plays=(C,), op_openings=()): C, -} - - -class LookerUp(IpdPlayer): - """ - This strategy uses a LookupTable to decide its next action. If there is not - enough history to use the table, it calls from a list of - self.initial_actions. - - if self_depth=2, op_depth=3, op_openings_depth=5, LookerUp finds the last 2 - plays of self, the last 3 plays of opponent and the opening 5 plays of - opponent. It then looks those up on the LookupTable and returns the - appropriate action. If 5 rounds have not been played (the minimum required - for op_openings_depth), it calls from self.initial_actions. - - LookerUp can be instantiated with a dictionary. The dictionary uses - tuple(tuple, tuple, tuple) or Plays as keys. for example. - - - self_plays: depth=2 - - op_plays: depth=1 - - op_openings: depth=0:: - - {Plays((C, C), (C), ()): C, - Plays((C, C), (D), ()): D, - Plays((C, D), (C), ()): D, <- example below - Plays((C, D), (D), ()): D, - Plays((D, C), (C), ()): C, - Plays((D, C), (D), ()): D, - Plays((D, D), (C), ()): C, - Plays((D, D), (D), ()): D} - - From the above table, if the player last played C, D and the opponent last - played C (here the initial opponent play is ignored) then this round, - the player would play D. - - The dictionary must contain all possible permutations of C's and D's. - - LookerUp can also be instantiated with `pattern=str/tuple` of actions, and:: - - parameters=Plays( - self_plays=player_depth: int, - op_plays=op_depth: int, - op_openings=op_openings_depth: int) - - It will create keys of len=2 ** (sum(parameters)) and map the pattern to - the keys. - - initial_actions is a tuple such as (C, C, D). A table needs initial actions - equal to max(self_plays depth, opponent_plays depth, opponent_initial_plays - depth). If provided initial_actions is too long, the extra will be ignored. - If provided initial_actions is too short, the shortfall will be made up - with C's. - - Some well-known strategies can be expressed as special cases; for example - Cooperator is given by the dict (All history is ignored and always play C):: - - {Plays((), (), ()) : C} - - - Tit-For-Tat is given by (The only history that is important is the - opponent's last play.):: - - {Plays((), (D,), ()): D, - Plays((), (C,), ()): C} - - - LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions - defaults to playing C. - - Names: - - - Lookerup: Original name by Martin Jones - """ - - name = "LookerUp" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - default_tft_lookup_table = { - Plays(self_plays=(), op_plays=(D,), op_openings=()): D, - Plays(self_plays=(), op_plays=(C,), op_openings=()): C, - } - - def __init__( - self, - lookup_dict: dict = None, - initial_actions: tuple = None, - pattern: Any = None, # pattern is str or tuple of Action's. - parameters: Plays = None - ) -> None: - - super().__init__() - self.parameters = parameters - self.pattern = pattern - self._lookup = self._get_lookup_table(lookup_dict, pattern, parameters) - self._set_memory_depth() - self.initial_actions = self._get_initial_actions(initial_actions) - self._initial_actions_pool = list(self.initial_actions) - - @classmethod - def _get_lookup_table( - cls, lookup_dict: dict, pattern: Any, parameters: tuple - ) -> LookupTable: - if lookup_dict: - return LookupTable(lookup_dict=lookup_dict) - if pattern is not None and parameters is not None: - if isinstance(pattern, str): - pattern = str_to_actions(pattern) - self_depth, op_depth, op_openings_depth = parameters - return LookupTable.from_pattern( - pattern, self_depth, op_depth, op_openings_depth - ) - return LookupTable(default_tft_lookup_table) - - def _set_memory_depth(self): - if self._lookup.op_openings_depth == 0: - self.classifier["memory_depth"] = self._lookup.table_depth - else: - self.classifier["memory_depth"] = float("inf") - - def _get_initial_actions(self, initial_actions: tuple) -> tuple: - """Initial actions will always be cut down to table_depth.""" - table_depth = self._lookup.table_depth - if not initial_actions: - return tuple([C] * table_depth) - initial_actions_shortfall = table_depth - len(initial_actions) - if initial_actions_shortfall > 0: - return initial_actions + tuple([C] * initial_actions_shortfall) - return initial_actions[:table_depth] - - def strategy(self, opponent: IpdPlayer) -> Reaction: - turn_index = len(opponent.history) - while turn_index < len(self._initial_actions_pool): - return self._initial_actions_pool[turn_index] - - player_last_n_plays = get_last_n_plays( - player=self, depth=self._lookup.player_depth - ) - opponent_last_n_plays = get_last_n_plays( - player=opponent, depth=self._lookup.op_depth - ) - opponent_initial_plays = tuple( - opponent.history[: self._lookup.op_openings_depth] - ) - - return self._lookup.get( - player_last_n_plays, opponent_last_n_plays, opponent_initial_plays - ) - - @property - def lookup_dict(self): - return self._lookup.dictionary - - def lookup_table_display( - self, sort_by: tuple = ("op_openings", "self_plays", "op_plays") - ) -> str: - """ - Returns a string for printing lookup_table info in specified order. - - :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' - """ - return self._lookup.display(sort_by=sort_by) - - -class EvolvableLookerUp(LookerUp, EvolvablePlayer): - name = "EvolvableLookerUp" - - def __init__( - self, - lookup_dict: dict = None, - initial_actions: tuple = None, - pattern: Any = None, # pattern is str or tuple of Action's. - parameters: Plays = None, - mutation_probability: float = None - ) -> None: - lookup_dict, initial_actions, pattern, parameters, mutation_probability = self._normalize_parameters( - lookup_dict, initial_actions, pattern, parameters, mutation_probability - ) - LookerUp.__init__( - self, - lookup_dict=lookup_dict, - initial_actions=initial_actions, - pattern=pattern, - parameters=parameters, - ) - EvolvablePlayer.__init__(self) - self.mutation_probability = mutation_probability - self.overwrite_init_kwargs( - lookup_dict=lookup_dict, - initial_actions=initial_actions, - pattern=pattern, - parameters=parameters, - mutation_probability=mutation_probability, - ) - - @classmethod - def _normalize_parameters(cls, lookup_dict=None, initial_actions=None, pattern=None, parameters=None, - mutation_probability=None): - if lookup_dict and initial_actions: - # Compute the associated pattern and parameters - # Map the table keys to namedTuple Plays - lookup_table = cls._get_lookup_table(lookup_dict, pattern, parameters) - lookup_dict = lookup_table.dictionary - parameters = (lookup_table.player_depth, lookup_table.op_depth, lookup_table.op_openings_depth) - pattern = tuple(v for k, v in sorted(lookup_dict.items())) - elif pattern and parameters and initial_actions: - # Compute the associated lookup table - plays, op_plays, op_start_plays = parameters - lookup_table = cls._get_lookup_table(lookup_dict, pattern, parameters) - lookup_dict = lookup_table.dictionary - elif parameters: - # Generate a random pattern and (maybe) initial actions - plays, op_plays, op_start_plays = parameters - pattern, lookup_table = cls.random_params(plays, op_plays, op_start_plays) - lookup_dict = lookup_table.dictionary - if not initial_actions: - num_actions = max([plays, op_plays, op_start_plays]) - initial_actions = tuple([choice((C, D)) for _ in range(num_actions)]) - else: - raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableLookerUp") - # Normalize pattern - if isinstance(pattern, str): - pattern = str_to_actions(pattern) - pattern = tuple(pattern) - if mutation_probability is None: - plays, op_plays, op_start_plays = parameters - keys = create_lookup_table_keys(plays, op_plays, op_start_plays) - mutation_probability = 2. / len(keys) - return lookup_dict, initial_actions, pattern, parameters, mutation_probability - - @classmethod - def random_value(cls): - return choice(actions) - - @classmethod - def random_params(cls, plays, op_plays, op_start_plays): - keys = create_lookup_table_keys(plays, op_plays, op_start_plays) - # To get a pattern, we just randomly pick between C and D for each key - pattern = [cls.random_value() for _ in keys] - table = dict(zip(keys, pattern)) - return pattern, LookupTable(table) - - @classmethod - def mutate_value(cls, value): - return value.flip() - - @classmethod - def mutate_table(cls, table, mutation_probability): - randoms = random.random(len(table.keys())) - # Flip each value with a probability proportional to the mutation rate - for i, (history, move) in enumerate(table.items()): - if randoms[i] < mutation_probability: - table[history] = cls.mutate_value(move) - return table - - def mutate(self): - lookup_dict = self.mutate_table(self.lookup_dict, self.mutation_probability) - # Add in starting moves - initial_actions = list(self.initial_actions) - for i in range(len(initial_actions)): - r = random.random() - if r < self.mutation_probability: - initial_actions[i] = initial_actions[i].flip() - return self.create_new( - lookup_dict=lookup_dict, - initial_actions=tuple(initial_actions), - ) - - def crossover(self, other): - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - lookup_dict = crossover_dictionaries(self.lookup_dict, other.lookup_dict) - return self.create_new(lookup_dict=lookup_dict) - - -class EvolvedLookerUp1_1_1(LookerUp): - """ - A 1 1 1 Lookerup trained with an evolutionary algorithm. - - Names: - - - Evolved Lookerup 1 1 1: Original name by Marc Harper - """ - - name = "EvolvedLookerUp1_1_1" - - def __init__(self) -> None: - params = Plays(self_plays=1, op_plays=1, op_openings=1) - super().__init__(parameters=params, pattern="CDDDDCDD", initial_actions=(C,)) - - -class EvolvedLookerUp2_2_2(LookerUp): - """ - A 2 2 2 Lookerup trained with an evolutionary algorithm. - - Names: - - - Evolved Lookerup 2 2 2: Original name by Marc Harper - """ - - name = "EvolvedLookerUp2_2_2" - - def __init__(self) -> None: - params = Plays(self_plays=2, op_plays=2, op_openings=2) - pattern = "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) - - -class Winner12(LookerUp): - """ - A lookup table based strategy. - - Names: - - - Winner12: [Mathieu2015]_ - """ - - name = "Winner12" - - def __init__(self) -> None: - params = Plays(self_plays=1, op_plays=2, op_openings=0) - pattern = "CDCDDCDD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) - - -class Winner21(LookerUp): - """ - A lookup table based strategy. - - Names: - - - Winner21: [Mathieu2015]_ - """ - - name = "Winner21" - - def __init__(self) -> None: - params = Plays(self_plays=1, op_plays=2, op_openings=0) - pattern = "CDCDCDDD" - super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C)) - - -def get_last_n_plays(player: IpdPlayer, depth: int) -> tuple: - """Returns the last N plays of player as a tuple.""" - if depth == 0: - return () - return tuple(player.history[-1 * depth :]) diff --git a/axelrod/ipd/strategies/mathematicalconstants.py b/axelrod/ipd/strategies/mathematicalconstants.py deleted file mode 100644 index 7d650669b..000000000 --- a/axelrod/ipd/strategies/mathematicalconstants.py +++ /dev/null @@ -1,79 +0,0 @@ -import math - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class CotoDeRatio(IpdPlayer): - """The player will always aim to bring the ratio of co-operations to - defections closer to the ratio as given in a sub class - - Names: - - - Co to Do Ratio: Original Name by Timothy Standen - """ - - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - # Initially cooperate - if len(opponent.history) == 0: - return C - # Avoid initial division by zero - if not opponent.defections: - return D - # Otherwise compare ratio to golden mean - cooperations = opponent.cooperations + self.cooperations - defections = opponent.defections + self.defections - if cooperations / defections > self.ratio: - return D - return C - - -class Golden(CotoDeRatio): - """The player will always aim to bring the ratio of co-operations to - defections closer to the golden mean - - Names: - - - Golden: Original Name by Timothy Standen - """ - - name = "$\phi$" - ratio = (1 + math.sqrt(5)) / 2 - - -class Pi(CotoDeRatio): - """The player will always aim to bring the ratio of co-operations to - defections closer to the pi - - Names: - - - Pi: Original Name by Timothy Standen - """ - - name = "$\pi$" - ratio = math.pi - - -class e(CotoDeRatio): - """The player will always aim to bring the ratio of co-operations to - defections closer to the e - - Names: - - - e: Original Name by Timothy Standen - """ - - name = "$e$" - ratio = math.e diff --git a/axelrod/ipd/strategies/memoryone.py b/axelrod/ipd/strategies/memoryone.py deleted file mode 100644 index 2811a5424..000000000 --- a/axelrod/ipd/strategies/memoryone.py +++ /dev/null @@ -1,343 +0,0 @@ -"""Memory One strategies. Note that there are Memory One strategies in other -files, including titfortat.py and zero_determinant.py""" - -import warnings -from typing import Tuple - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class MemoryOnePlayer(IpdPlayer): - """ - Uses a four-vector for strategies based on the last round of play, - (P(C|CC), P(C|CD), P(C|DC), P(C|DD)). Win-Stay Lose-Shift is set as - the default player if four_vector is not given. - Intended to be used as an abstract base class or to at least be supplied - with a initializing four_vector. - - Names - - - Memory One: [Nowak1990]_ - """ - - name = "Generic Memory One IpdPlayer" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, four_vector: Tuple[float, float, float, float] = None, initial: Action = C - ) -> None: - """ - Parameters - ---------- - - fourvector: list or tuple of floats of length 4 - The response probabilities to the preceding round of play - ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) - initial: C or D - The initial move - - Special Cases - ------------- - - Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C) - Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C) - Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), C) - Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5)) - (with a random choice for the initial state) - TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C) - WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C) - - See also: The remaining strategies in this file - Multiple strategies in titfortat.py - Grofman, Joss in axelrod_tournaments.py - """ - super().__init__() - self._initial = initial - self.set_initial_four_vector(four_vector) - - def set_initial_four_vector(self, four_vector): - if four_vector is None: - four_vector = (1, 0, 0, 1) - warnings.warn("Memory one player is set to default (1, 0, 0, 1).") - - self.set_four_vector(four_vector) - if self.name == "Generic Memory One IpdPlayer": - self.name = "%s: %s" % (self.name, four_vector) - - def set_four_vector(self, four_vector: Tuple[float, float, float, float]): - if not all(0 <= p <= 1 for p in four_vector): - raise ValueError( - "An element in the probability vector, {}, is not " - "between 0 and 1.".format(str(four_vector)) - ) - - self._four_vector = dict(zip([(C, C), (C, D), (D, C), (D, D)], four_vector)) - self.classifier["stochastic"] = any(0 < x < 1 for x in set(four_vector)) - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) == 0: - return self._initial - # Determine which probability to use - p = self._four_vector[(self.history[-1], opponent.history[-1])] - # Draw a random number in [0, 1] to decide - return random_choice(p) - - -class WinStayLoseShift(MemoryOnePlayer): - """ - Win-Stay Lose-Shift, also called Pavlov. - - Names: - - - Win Stay Lose Shift: [Nowak1993]_ - - WSLS: [Stewart2012]_ - - Pavlov: [Kraines1989]_ - """ - - name = "Win-Stay Lose-Shift" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, initial: Action = C) -> None: - four_vector = (1, 0, 0, 1) - super().__init__(four_vector) - self._initial = initial - - -class WinShiftLoseStay(MemoryOnePlayer): - """Win-Shift Lose-Stay, also called Reverse Pavlov. - - Names: - - - WSLS: [Li2011]_ - """ - - name = "Win-Shift Lose-Stay" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, initial: Action = D) -> None: - four_vector = (0, 1, 1, 0) - super().__init__(four_vector) - self._initial = initial - - -class GTFT(MemoryOnePlayer): - """Generous Tit For Tat Strategy. - - Names: - - - Generous Tit For Tat: [Nowak1993]_ - - Naive peace maker: [Gaudesi2016]_ - - Soft Joss: [Gaudesi2016]_ - """ - - name = "GTFT" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, p: float = None) -> None: - """ - Parameters - - p, float - A parameter used to compute the four-vector - - Special Cases - - TitForTat is equivalent to GTFT(0) - """ - self.p = p - super().__init__() - - def set_initial_four_vector(self, four_vector): - pass - - def receive_match_attributes(self): - (R, P, S, T) = self.match_attributes["game"].RPST() - if self.p is None: - self.p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) - four_vector = [1, self.p, 1, self.p] - self.set_four_vector(four_vector) - - def __repr__(self) -> str: - assert self.p is not None - return "%s: %s" % (self.name, round(self.p, 2)) - - -class FirmButFair(MemoryOnePlayer): - """A strategy that cooperates on the first move, and cooperates except after - receiving a sucker payoff. - - Names: - - - Firm But Fair: [Frean1994]_""" - - name = "Firm But Fair" - - def __init__(self) -> None: - four_vector = (1, 0, 1, 2 / 3) - super().__init__(four_vector) - self.set_four_vector(four_vector) - - -class StochasticCooperator(MemoryOnePlayer): - """Stochastic Cooperator. - - Names: - - - Stochastic Cooperator: [Adami2013]_ - """ - - name = "Stochastic Cooperator" - - def __init__(self) -> None: - four_vector = (0.935, 0.229, 0.266, 0.42) - super().__init__(four_vector) - self.set_four_vector(four_vector) - - -class StochasticWSLS(MemoryOnePlayer): - """ - Stochastic WSLS, similar to Generous TFT. Note that this is not the same as - Stochastic WSLS described in [Amaral2016]_, that strategy is a modification - of WSLS that learns from the performance of other strategies. - - Names: - - - Stochastic WSLS: Original name by Marc Harper - """ - - name = "Stochastic WSLS" - - def __init__(self, ep: float = 0.05) -> None: - """ - Parameters - - ep, float - A parameter used to compute the four-vector -- the probability of - cooperating when the previous round was CD or DC - - Special Cases - - WinStayLoseShift is equivalent to StochasticWSLS(0) - """ - - self.ep = ep - four_vector = (1.0 - ep, ep, ep, 1.0 - ep) - super().__init__(four_vector) - self.set_four_vector(four_vector) - - -class SoftJoss(MemoryOnePlayer): - """ - Defects with probability 0.9 when the opponent defects, otherwise - emulates Tit-For-Tat. - - Names: - - - Soft Joss: [Prison1998]_ - """ - - name = "Soft Joss" - - def __init__(self, q: float = 0.9) -> None: - """ - Parameters - - q, float - A parameter used to compute the four-vector - - Special Cases - - Cooperator is equivalent to SoftJoss(0) - TitForTat is equivalent to SoftJoss(1) - """ - self.q = q - four_vector = (1.0, 1 - q, 1, 1 - q) - super().__init__(four_vector) - - def __repr__(self) -> str: - return "%s: %s" % (self.name, round(self.q, 2)) - - -class ALLCorALLD(IpdPlayer): - """This strategy is at the parameter extreme of the ZD strategies (phi = 0). - It simply repeats its last move, and so mimics ALLC or ALLD after round one. - If the tournament is noisy, there will be long runs of C and D. - - For now starting choice is random of 0.6, but that was an arbitrary choice - at implementation time. - - Names: - - - ALLC or ALLD: Original name by Marc Harper - - Repeat: [Akin2015]_ - """ - - name = "ALLCorALLD" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector (1, 1, 0, 0) - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return random_choice(0.6) - return self.history[-1] - - -class ReactivePlayer(MemoryOnePlayer): - """ - A generic reactive player. Defined by 2 probabilities conditional on the - opponent's last move: P(C|C), P(C|D). - - Names: - - - Reactive: [Nowak1989]_ - """ - - name = "Reactive IpdPlayer" - - def __init__(self, probabilities: Tuple[float, float]) -> None: - four_vector = (*probabilities, *probabilities) - super().__init__(four_vector) - self.name = "%s: %s" % (self.name, probabilities) diff --git a/axelrod/ipd/strategies/memorytwo.py b/axelrod/ipd/strategies/memorytwo.py deleted file mode 100644 index 5f3489a0d..000000000 --- a/axelrod/ipd/strategies/memorytwo.py +++ /dev/null @@ -1,259 +0,0 @@ -"""Memory Two strategies.""" - -import itertools -import warnings -from typing import Dict, Tuple - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -from .defector import Defector -from .titfortat import TitFor2Tats, TitForTat - -C, D = Action.C, Action.D - - -class MemoryTwoPlayer(IpdPlayer): - """ - Uses a sixteen-vector for strategies based on the 16 conditional probabilities - P(X | I,J,K,L) where X, I, J, K, L in [C, D] and I, J are the players last - two moves and K, L are the opponents last two moves. These conditional - probabilities are the following: - 1. P(C|CC, CC) - 2. P(C|CC, CD) - 3. P(C|CC, DC) - 4. P(C|CC, DD) - 5. P(C|CD, CC) - 6. P(C|CD, CD) - 7. P(C|CD, DC) - 8. P(C|CD, DD) - 9. P(C|DC, CC) - 10. P(C|DC, CD) - 11. P(C|DC, DC) - 12. P(C|DC, DD) - 13. P(C|DD, CC) - 14. P(C|DD, CD) - 15. P(C|DD, DC) - 16. P(C|DD, DD)) - Cooperator is set as the default player if sixteen_vector is not given. - - Names - - - Memory Two: [Hilbe2017]_ - """ - - name = "Generic Memory Two IpdPlayer" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, sixteen_vector: Tuple[float, ...] = None, initial: Action = C - ) -> None: - """ - Parameters - ---------- - - sixteen_vector: list or tuple of floats of length 16 - The response probabilities to the preceding round of play - initial: C or D - The initial 2 moves - """ - super().__init__() - self._initial = initial - self.set_initial_sixteen_vector(sixteen_vector) - - def set_initial_sixteen_vector(self, sixteen_vector): - if sixteen_vector is None: - sixteen_vector = tuple([1] * 16) - warnings.warn("Memory two player is set to default, Cooperator.") - - self.set_sixteen_vector(sixteen_vector) - if self.name == "Generic Memory Two IpdPlayer": - self.name = "%s: %s" % (self.name, sixteen_vector) - - def set_sixteen_vector(self, sixteen_vector: Tuple): - if not all(0 <= p <= 1 for p in sixteen_vector): - raise ValueError( - "An element in the probability vector, {}, is not " - "between 0 and 1.".format(str(sixteen_vector)) - ) - - states = [ - (hist[:2], hist[2:]) for hist in list(itertools.product((C, D), repeat=4)) - ] - - self._sixteen_vector = dict( - zip(states, sixteen_vector) - ) # type: Dict[tuple, float] - self.classifier["stochastic"] = any(0 < x < 1 for x in set(sixteen_vector)) - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) <= 1: - return self._initial - # Determine which probability to use - p = self._sixteen_vector[ - (tuple(self.history[-2:]), tuple(opponent.history[-2:])) - ] - # Draw a random number in [0, 1] to decide - return random_choice(p) - - -class AON2(MemoryTwoPlayer): - """ - AON2 a memory two strategy introduced in [Hilbe2017]_. It belongs to the - AONk (all-or-none) family of strategies. These strategies were designed to - satisfy the three following properties: - - 1. Mutually Cooperative. A strategy is mutually cooperative if there are - histories for which the strategy prescribes to cooperate, and if it continues - to cooperate after rounds with mutual cooperation (provided the last k actions - of the focal player were actually consistent). - - 2. Error correcting. A strategy is error correcting after at most k rounds if, - after any history, it generally takes a group of players at most k + 1 rounds - to re-establish mutual cooperation. - - 3. Retaliating. A strategy is retaliating for at least k rounds if, after - rounds in which the focal player cooperated while the coplayer defected, - the strategy responds by defecting the following k rounds. - - In [Hilbe2017]_ the following vectors are reported as "equivalent" to AON2 - with their respective self-cooperation rate (note that these are not the same): - - 1. [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], self-cooperation - rate: 0.952 - 2. [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], self-cooperation - rate: 0.951 - 3. [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], self-cooperation - rate: 0.951 - 4. [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1], self-cooperation - rate: 0.952 - - AON2 is implemented using vector 1 due its self-cooperation rate. - - In essence it is a strategy that starts off by cooperating and will cooperate - again only after the states (CC, CC), (CD, CD), (DC, DC), (DD, DD). - - Names: - - - AON2: [Hilbe2017]_ - """ - - name = "AON2" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - sixteen_vector = (1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1) - super().__init__(sixteen_vector) - - -class DelayedAON1(MemoryTwoPlayer): - """ - Delayed AON1 a memory two strategy also introduced in [Hilbe2017]_ and belongs - to the AONk family. Note that AON1 is equivalent to Win Stay Lose Shift. - - In [Hilbe2017]_ the following vectors are reported as "equivalent" to Delayed - AON1 with their respective self-cooperation rate (note that these are not the - same): - - 1. [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1], self-cooperation - rate: 0.952 - 2. [1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1], self-cooperation - rate: 0.970 - 3. [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], self-cooperation - rate: 0.971 - - Delayed AON1 is implemented using vector 3 due its self-cooperation rate. - - In essence it is a strategy that starts off by cooperating and will cooperate - again only after the states (CC, CC), (CD, CD), (CD, DD), (DD, CD), - (DC, DC) and (DD, DD). - - Names: - - - Delayed AON1: [Hilbe2017]_ - """ - - name = "Delayed AON1" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - sixteen_vector = (1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1) - super().__init__(sixteen_vector) - - -class MEM2(IpdPlayer): - """A memory-two player that switches between TFT, TFTT, and ALLD. - - Note that the reference claims that this is a memory two strategy but in - fact it is infinite memory. This is because the player plays as ALLD if - ALLD has ever been selected twice, which can only be known if the entire - history of play is accessible. - - Names: - - - MEM2: [Li2014]_ - """ - - name = "MEM2" - classifier = { - "memory_depth": float("inf"), - "long_run_time": False, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.players = {"TFT": TitForTat(), "TFTT": TitFor2Tats(), "ALLD": Defector()} - self.play_as = "TFT" - self.shift_counter = 3 - self.alld_counter = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - # Update Histories - # Note that this assumes that TFT and TFTT do not use internal counters, - # Rather that they examine the actual history of play - if len(self.history) > 0: - for v in self.players.values(): - v.history.append(self.history[-1], opponent.history[-1]) - self.shift_counter -= 1 - if (self.shift_counter == 0) and (self.alld_counter < 2): - self.shift_counter = 2 - # Depending on the last two moves, play as TFT, TFTT, or ALLD - last_two = list(zip(self.history[-2:], opponent.history[-2:])) - if set(last_two) == set([(C, C)]): - self.play_as = "TFT" - elif set(last_two) == set([(C, D), (D, C)]): - self.play_as = "TFTT" - else: - self.play_as = "ALLD" - self.alld_counter += 1 - return self.players[self.play_as].strategy(opponent) diff --git a/axelrod/ipd/strategies/meta.py b/axelrod/ipd/strategies/meta.py deleted file mode 100644 index b3191186f..000000000 --- a/axelrod/ipd/strategies/meta.py +++ /dev/null @@ -1,682 +0,0 @@ -import random - -import numpy as np -from numpy.random import choice - -from axelrod.ipd.action import Action -from axelrod.ipd.classifier import Classifiers -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.strategies import TitForTat -from axelrod.ipd.strategy_transformers import NiceTransformer -from ._strategies import all_strategies -from .hunter import ( - AlternatorHunter, - CooperatorHunter, - CycleHunter, - DefectorHunter, - EventualCycleHunter, - MathConstantHunter, - RandomHunter, -) - -# Needs to be computed manually to prevent circular dependency -ordinary_strategies = [s for s in all_strategies if Classifiers.obey_axelrod(s())] - -C, D = Action.C, Action.D - - -class MetaPlayer(IpdPlayer): - """ - A generic player that has its own team of players. - - Names: - - - Meta IpdPlayer: Original name by Karol Langner - """ - - name = "Meta IpdPlayer" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": True, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, team=None): - # The default is to use all strategies available, but we need to import - # the list at runtime, since _strategies import also _this_ module - # before defining the list. - if team: - self.team = team - else: - # Needs to be computed manually to prevent circular dependency - self.team = ordinary_strategies - # Make sure we don't use any meta players to avoid infinite recursion. - self.team = [t for t in self.team if not issubclass(t, MetaPlayer)] - # Initiate all the players in our team. - self.team = [t() for t in self.team] - - super().__init__() - - # This player inherits the classifiers of its team. - # Note that memory_depth is not simply the max memory_depth of the team. - for key in [ - "stochastic", - "inspects_source", - "manipulates_source", - "manipulates_state", - ]: - self.classifier[key] = any(map(Classifiers[key], self.team)) - - for t in self.team: - self.classifier["makes_use_of"].update(Classifiers["makes_use_of"](t)) - - self._last_results = None - - def receive_match_attributes(self): - for t in self.team: - t.set_match_attributes(**self.match_attributes) - - def __repr__(self): - team_size = len(self.team) - return "{}: {} player{}".format( - self.name, team_size, "s" if team_size > 1 else "" - ) - - def update_histories(self, coplay): - # Update team histories. - for player, play in zip(self.team, self._last_results): - player.history.append(play, coplay) - - def update_history(self, play, coplay): - super().update_history(play, coplay) - self.update_histories(coplay) - - def strategy(self, opponent): - # Get the results of all our players. - results = [] - for player in self.team: - play = player.strategy(opponent) - results.append(play) - self._last_results = results - # A subclass should just define a way to choose the result based on - # team results. - return self.meta_strategy(results, opponent) - - def meta_strategy(self, results, opponent): - """Determine the meta result based on results of all players. - Override this function in child classes.""" - return C - - -class MetaMajority(MetaPlayer): - """A player who goes by the majority vote of all other non-meta players. - - Names: - - - Meta Majority: Original name by Karol Langner - """ - - name = "Meta Majority" - - @staticmethod - def meta_strategy(results, opponent): - if results.count(D) > results.count(C): - return D - return C - - -class MetaMinority(MetaPlayer): - """A player who goes by the minority vote of all other non-meta players. - - Names: - - - Meta Minority: Original name by Karol Langner - """ - - name = "Meta Minority" - - @staticmethod - def meta_strategy(results, opponent): - if results.count(D) < results.count(C): - return D - return C - - -class MetaWinner(MetaPlayer): - """A player who goes by the strategy of the current winner. - - Names: - - - Meta Winner: Original name by Karol Langner - """ - - name = "Meta Winner" - - def __init__(self, team=None): - super().__init__(team=team) - # For each player, we will keep the history of proposed moves and - # a running score since the beginning of the game. - self.scores = np.zeros(len(self.team)) - self.classifier["long_run_time"] = True - - def _update_scores(self, coplay): - # Update the running score for each player, before determining the - # next move. - game = self.match_attributes["game"] - scores = [] - for player in self.team: - last_round = (player.history[-1], coplay) - s = game.scores[last_round][0] - scores.append(s) - self.scores += np.array(scores) - - def update_histories(self, coplay): - super().update_histories(coplay) - self._update_scores(coplay) - - def meta_strategy(self, results, opponent): - # Choice an action based on the collection of scores - bestscore = max(self.scores) - beststrategies = [ - i for (i, score) in enumerate(self.scores) if score == bestscore - ] - bestproposals = [results[i] for i in beststrategies] - bestresult = C if C in bestproposals else D - return bestresult - - -NiceMetaWinner = NiceTransformer()(MetaWinner) - - -class MetaWinnerEnsemble(MetaWinner): - """A variant of MetaWinner that chooses one of the top scoring strategies - at random against each opponent. Note this strategy is always stochastic - regardless of the team. - - Names: - - - Meta Winner Ensemble: Original name by Marc Harper - """ - - name = "Meta Winner Ensemble" - - def meta_strategy(self, results, opponent): - # Sort by score - scores = [(score, i) for (i, score) in enumerate(self.scores)] - # Choose one of the best scorers at random - scores.sort(reverse=True) - prop = max(1, int(len(scores) * 0.08)) - index = choice([i for (s, i) in scores[:prop]]) - return results[index] - - -NiceMetaWinnerEnsemble = NiceTransformer()(MetaWinnerEnsemble) - - -class MetaHunter(MetaPlayer): - """A player who uses a selection of hunters. - - Names - - - Meta Hunter: Original name by Karol Langner - """ - - name = "Meta Hunter" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - # Notice that we don't include the cooperator hunter, because it leads - # to excessive defection and therefore bad performance against - # unforgiving strategies. We will stick to hunters that use defections - # as cues. However, a really tangible benefit comes from combining - # Random Hunter and Math Constant Hunter, since together they catch - # strategies that are lightly randomized but still quite constant - # (the tricky/suspicious ones). - team = [ - DefectorHunter, - AlternatorHunter, - RandomHunter, - MathConstantHunter, - CycleHunter, - EventualCycleHunter, - ] - - super().__init__(team=team) - - @staticmethod - def meta_strategy(results, opponent): - # If any of the hunters smells prey, then defect! - if D in results: - return D - - # Tit-for-tat might seem like a better default choice, but in many - # cases it complicates the heuristics of hunting and creates - # false-positives. So go ahead and use it, but only for longer - # histories. - if len(opponent.history) > 100: - return D if opponent.history[-1:] == [D] else C - else: - return C - - -class MetaHunterAggressive(MetaPlayer): - """A player who uses a selection of hunters. - - Names - - - Meta Hunter Aggressive: Original name by Marc Harper - """ - - name = "Meta Hunter Aggressive" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, team=None): - # This version uses CooperatorHunter - if team is None: - team = [ - DefectorHunter, - AlternatorHunter, - RandomHunter, - MathConstantHunter, - CycleHunter, - EventualCycleHunter, - CooperatorHunter, - ] - - super().__init__(team=team) - - @staticmethod - def meta_strategy(results, opponent): - # If any of the hunters smells prey, then defect! - if D in results: - return D - - # Tit-for-tat might seem like a better default choice, but in many - # cases it complicates the heuristics of hunting and creates - # false-positives. So go ahead and use it, but only for longer - # histories. - if len(opponent.history) > 100: - return D if opponent.history[-1:] == [D] else C - else: - return C - - -class MetaMajorityMemoryOne(MetaMajority): - """MetaMajority with the team of Memory One players - - Names - - - Meta Majority Memory One: Original name by Marc Harper - """ - - name = "Meta Majority Memory One" - - def __init__(self): - team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] - super().__init__(team=team) - self.classifier["long_run_time"] = False - - -class MetaMajorityFiniteMemory(MetaMajority): - """MetaMajority with the team of Finite Memory Players - - Names - - - Meta Majority Finite Memory: Original name by Marc Harper - """ - - name = "Meta Majority Finite Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) < float("inf") - ] - super().__init__(team=team) - - -class MetaMajorityLongMemory(MetaMajority): - """MetaMajority with the team of Long (infinite) Memory Players - - Names - - - Meta Majority Long Memory: Original name by Marc Harper - """ - - name = "Meta Majority Long Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) == float("inf") - ] - super().__init__(team=team) - - -class MetaWinnerMemoryOne(MetaWinner): - """MetaWinner with the team of Memory One players - - Names - - - Meta Winner Memory Memory One: Original name by Marc Harper - """ - - name = "Meta Winner Memory One" - - def __init__(self): - team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] - super().__init__(team=team) - self.classifier["long_run_time"] = False - - -class MetaWinnerFiniteMemory(MetaWinner): - """MetaWinner with the team of Finite Memory Players - - Names - - - Meta Winner Finite Memory: Original name by Marc Harper - """ - - name = "Meta Winner Finite Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) < float("inf") - ] - super().__init__(team=team) - - -class MetaWinnerLongMemory(MetaWinner): - """MetaWinner with the team of Long (infinite) Memory Players - - Names - - - Meta Winner Long Memory: Original name by Marc Harper - """ - - name = "Meta Winner Long Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) == float("inf") - ] - super().__init__(team=team) - - -class MetaWinnerDeterministic(MetaWinner): - """Meta Winner with the team of Deterministic Players. - - Names - - - Meta Winner Deterministic: Original name by Marc Harper - """ - - name = "Meta Winner Deterministic" - - def __init__(self): - team = [s for s in ordinary_strategies if not Classifiers["stochastic"](s())] - super().__init__(team=team) - self.classifier["stochastic"] = False - - -class MetaWinnerStochastic(MetaWinner): - """Meta Winner with the team of Stochastic Players. - - Names - - - Meta Winner Stochastic: Original name by Marc Harper - """ - - name = "Meta Winner Stochastic" - - def __init__(self): - team = [s for s in ordinary_strategies if Classifiers["stochastic"](s())] - super().__init__(team=team) - - -class MetaMixer(MetaPlayer): - """A player who randomly switches between a team of players. - If no distribution is passed then the player will uniformly choose between - sub players. - - In essence this is creating a Mixed strategy. - - Parameters - - team : list of strategy classes, optional - Team of strategies that are to be randomly played - If none is passed will select the ordinary strategies. - distribution : list representing a probability distribution, optional - This gives the distribution from which to select the players. - If none is passed will select uniformly. - - Names - - - Meta Mixer: Original name by Vince Knight - """ - - name = "Meta Mixer" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": True, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, team=None, distribution=None): - self.distribution = distribution - super().__init__(team=team) - - def meta_strategy(self, results, opponent): - """Using the numpy.random choice function to sample with weights""" - return choice(results, p=self.distribution) - - -class NMWEDeterministic(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Deterministic Players. - - Names - - - Nice Meta Winner Ensemble Deterministic: Original name by Marc Harper - """ - - name = "NMWE Deterministic" - - def __init__(self): - team = [s for s in ordinary_strategies if not Classifiers["stochastic"](s())] - super().__init__(team=team) - self.classifier["stochastic"] = True - - -class NMWEStochastic(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Stochastic Players. - - Names - - - Nice Meta Winner Ensemble Stochastic: Original name by Marc Harper - """ - - name = "NMWE Stochastic" - - def __init__(self): - team = [s for s in ordinary_strategies if Classifiers["stochastic"](s())] - super().__init__(team=team) - - -class NMWEFiniteMemory(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Finite Memory Players. - - Names - - - Nice Meta Winner Ensemble Finite Memory: Original name by Marc Harper - """ - - name = "NMWE Finite Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) < float("inf") - ] - super().__init__(team=team) - - -class NMWELongMemory(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Long Memory Players. - - Names - - - Nice Meta Winner Ensemble Long Memory: Original name by Marc Harper - """ - - name = "NMWE Long Memory" - - def __init__(self): - team = [ - s - for s in ordinary_strategies - if Classifiers["memory_depth"](s()) == float("inf") - ] - super().__init__(team=team) - - -class NMWEMemoryOne(NiceMetaWinnerEnsemble): - """Nice Meta Winner Ensemble with the team of Memory One Players. - - Names - - - Nice Meta Winner Ensemble Memory One: Original name by Marc Harper - """ - - name = "NMWE Memory One" - - def __init__(self): - team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] - super().__init__(team=team) - self.classifier["long_run_time"] = False - - -class MemoryDecay(MetaPlayer): - """ - A player utilizes the (default) Tit for Tat strategy for the first (default) 15 turns, - at the same time memorizing the opponent's decisions. After the 15 turns have - passed, the player calculates a 'net cooperation score' (NCS) for their opponent, - weighing decisions to Cooperate as (default) 1, and to Defect as (default) - -2. If the opponent's NCS is below 0, the player defects; otherwise, - they cooperate. - - The player's memories of the opponent's decisions have a random chance to be - altered (i.e., a C decision becomes D or vice versa; default probability - is 0.03) or deleted (default probability is 0.1). - - It is possible to pass a different axelrod player class to change the initial - player behavior. - - Name: Memory Decay - """ - - name = "Memory Decay" - classifier = { - "memory_depth": float("inf"), - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, - p_memory_delete: float = 0.1, - p_memory_alter: float = 0.03, - loss_value: float = -2, - gain_value: float = 1, - memory: list = None, - start_strategy: IpdPlayer = TitForTat, - start_strategy_duration: int = 15, - ): - super().__init__(team=[start_strategy]) - # This strategy is stochastic even if none of the team is. The - # MetaPlayer initializer will set stochastic to be False in that case. - self.classifier["stochastic"] = True - - self.p_memory_delete = p_memory_delete - self.p_memory_alter = p_memory_alter - self.loss_value = loss_value - self.gain_value = gain_value - self.memory = [] if not memory else memory - self.start_strategy_duration = start_strategy_duration - self.gloss_values = None - - def __repr__(self): - return IpdPlayer.__repr__(self) - - def gain_loss_translate(self): - """ - Translates the actions (D and C) to numeric values (loss_value and - gain_value). - """ - values = {C: self.gain_value, D: self.loss_value} - self.gloss_values = [values[action] for action in self.memory] - - def memory_alter(self): - """ - Alters memory entry, i.e. puts C if there's a D and vice versa. - """ - alter = choice(range(0, len(self.memory))) - self.memory[alter] = self.memory[alter].flip() - - def memory_delete(self): - """ - Deletes memory entry. - """ - self.memory.pop(choice(range(0, len(self.memory)))) - - def meta_strategy(self, results, opponent): - try: - self.memory.append(opponent.history[-1]) - except IndexError: - pass - if len(self.history) < self.start_strategy_duration: - return results[0] - else: - if random.random() <= self.p_memory_alter: - self.memory_alter() - if random.random() <= self.p_memory_delete: - self.memory_delete() - self.gain_loss_translate() - if sum(self.gloss_values) < 0: - return D - else: - return C diff --git a/axelrod/ipd/strategies/mindcontrol.py b/axelrod/ipd/strategies/mindcontrol.py deleted file mode 100644 index 80a68c2a4..000000000 --- a/axelrod/ipd/strategies/mindcontrol.py +++ /dev/null @@ -1,95 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class MindController(IpdPlayer): - """A player that changes the opponents strategy to cooperate. - - Names - - - Mind Controller: Original name by Karol Langner - """ - - name = "Mind Controller" - classifier = { - "memory_depth": -10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # Finds out what opponent will do - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - """ - Alters the opponents strategy method to be a lambda function which - always returns C. This player will then always return D to take - advantage of this - """ - - opponent.strategy = lambda opponent: C - return D - - -class MindWarper(IpdPlayer): - """ - A player that changes the opponent's strategy but blocks changes to - its own. - - Names - - - Mind Warper: Original name by Karol Langner - """ - - name = "Mind Warper" - classifier = { - "memory_depth": -10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # changes what opponent will do - "manipulates_state": False, - } - - def __setattr__(self, name: str, val: str): - if name == "strategy": - pass - else: - self.__dict__[name] = val - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - opponent.strategy = lambda opponent: C - return D - - -class MindBender(MindWarper): - """ - A player that changes the opponent's strategy by modifying the internal - dictionary. - - Names - - - Mind Bender: Original name by Karol Langner - """ - - name = "Mind Bender" - classifier = { - "memory_depth": -10, - "makes_use_of": set(), - "stochastic": False, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # changes what opponent will do - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - opponent.__dict__["strategy"] = lambda opponent: C - return D diff --git a/axelrod/ipd/strategies/mindreader.py b/axelrod/ipd/strategies/mindreader.py deleted file mode 100644 index a6812f493..000000000 --- a/axelrod/ipd/strategies/mindreader.py +++ /dev/null @@ -1,108 +0,0 @@ -""" -The player classes in this module do not obey standard rules of the IPD (as -indicated by their classifier). We do not recommend putting a lot of time in to -optimising them. -""" -from axelrod.ipd._strategy_utils import inspect_strategy, look_ahead -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class MindReader(IpdPlayer): - """A player that looks ahead at what the opponent will do and decides what - to do. - - Names: - - - Mind reader: Original name by Jason Young - """ - - name = "Mind Reader" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" - return D - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Pretends to play the opponent a number of times before each match. - The primary purpose is to look far enough ahead to see if a defect will - be punished by the opponent. - """ - game = self.match_attributes["game"] - - best_strategy = look_ahead(self, opponent, game) - - return best_strategy - - -class ProtectedMindReader(MindReader): - """A player that looks ahead at what the opponent will do and decides what - to do. It is also protected from mind control strategies - - Names: - - - Protected Mind reader: Original name by Jason Young - """ - - name = "Protected Mind Reader" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": True, # Stops opponent's strategy - "manipulates_state": False, - } - - def __setattr__(self, name: str, val: str): - """Stops any other strategy altering the methods of this class """ - - if name == "strategy": - pass - else: - self.__dict__[name] = val - - -class MirrorMindReader(ProtectedMindReader): - """A player that will mirror whatever strategy it is playing against by - cheating and calling the opponent's strategy function instead of its own. - - Names: - - - Protected Mind reader: Original name by Brice Fernandes - """ - - name = "Mirror Mind Reader" - - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Reads and copies the source of the opponent - "manipulates_source": True, # Changes own source dynamically - "manipulates_state": False, - } - - @staticmethod - def foil_strategy_inspection() -> Action: - """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" - return C - - def strategy(self, opponent: IpdPlayer) -> Action: - """Will read the mind of the opponent and play the opponent's strategy. """ - return inspect_strategy(self, opponent) diff --git a/axelrod/ipd/strategies/mutual.py b/axelrod/ipd/strategies/mutual.py deleted file mode 100644 index 29966ce3e..000000000 --- a/axelrod/ipd/strategies/mutual.py +++ /dev/null @@ -1,83 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class Desperate(IpdPlayer): - """A player that only cooperates after mutual defection. - - Names: - - - Desperate: [Berg2015]_""" - - name = "Desperate" - classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return random_choice() - if self.history[-1] == D and opponent.history[-1] == D: - return C - return D - - -class Hopeless(IpdPlayer): - """A player that only defects after mutual cooperation. - - Names: - - - Hopeless: [Berg2015]_""" - - name = "Hopeless" - classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return random_choice() - if self.history[-1] == C and opponent.history[-1] == C: - return D - return C - - -class Willing(IpdPlayer): - """A player that only defects after mutual defection. - - Names: - - - Willing: [Berg2015]_""" - - name = "Willing" - classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not opponent.history: - return random_choice() - if self.history[-1] == D and opponent.history[-1] == D: - return D - return C diff --git a/axelrod/ipd/strategies/negation.py b/axelrod/ipd/strategies/negation.py deleted file mode 100644 index e8e580dda..000000000 --- a/axelrod/ipd/strategies/negation.py +++ /dev/null @@ -1,34 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class Negation(IpdPlayer): - """ - A player starts by cooperating or defecting randomly if it's their first move, - then simply doing the opposite of the opponents last move thereafter. - - Names: - - - Negation: [PD2017]_ - """ - - name = "Negation" - classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - # Random first move - if not self.history: - return random_choice() - # Act opposite of opponent otherwise - return opponent.history[-1].flip() diff --git a/axelrod/ipd/strategies/oncebitten.py b/axelrod/ipd/strategies/oncebitten.py deleted file mode 100644 index 3618c047c..000000000 --- a/axelrod/ipd/strategies/oncebitten.py +++ /dev/null @@ -1,130 +0,0 @@ -import random - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class OnceBitten(IpdPlayer): - """ - Cooperates once when the opponent defects, but if they defect twice in a row - defaults to forgetful grudger for 10 turns defecting. - - Names: - - - Once Bitten: Original name by Holly Marissa - """ - - name = "Once Bitten" - classifier = { - "memory_depth": 12, # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.mem_length = 10 - self.grudged = False - self.grudge_memory = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Begins by playing C, then plays D for mem_length rounds if the opponent - ever plays D twice in a row. - """ - if self.grudge_memory >= self.mem_length: - self.grudge_memory = 0 - self.grudged = False - - if len(opponent.history) < 2: - return C - - if self.grudged: - self.grudge_memory += 1 - return D - elif not (C in opponent.history[-2:]): - self.grudged = True - return D - return C - - -class FoolMeOnce(IpdPlayer): - """ - Forgives one D then retaliates forever on a second D. - - Names: - - - Fool me once: Original name by Marc Harper - """ - - name = "Fool Me Once" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - if not opponent.history: - return C - if opponent.defections > 1: - return D - return C - - -class ForgetfulFoolMeOnce(IpdPlayer): - """ - Forgives one D then retaliates forever on a second D. Sometimes randomly - forgets the defection count, and so keeps a secondary count separate from - the standard count in IpdPlayer. - - Names: - - - Forgetful Fool Me Once: Original name by Marc Harper - """ - - name = "Forgetful Fool Me Once" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, forget_probability: float = 0.05) -> None: - """ - Parameters - ---------- - forget_probability, float - The probability of forgetting the count of opponent defections. - """ - super().__init__() - self.D_count = 0 - self._initial = C - self.forget_probability = forget_probability - - def strategy(self, opponent: IpdPlayer) -> Action: - r = random.random() - if not opponent.history: - return self._initial - if opponent.history[-1] == D: - self.D_count += 1 - if r < self.forget_probability: - self.D_count = 0 - if self.D_count > 1: - return D - return C diff --git a/axelrod/ipd/strategies/prober.py b/axelrod/ipd/strategies/prober.py deleted file mode 100644 index 5a39c722b..000000000 --- a/axelrod/ipd/strategies/prober.py +++ /dev/null @@ -1,405 +0,0 @@ -import random -from typing import List - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -Vector = List[float] - - -C, D = Action.C, Action.D - - -class CollectiveStrategy(IpdPlayer): - """Defined in [Li2009]_. 'It always cooperates in the first move and defects - in the second move. If the opponent also cooperates in the first move and - defects in the second move, CS will cooperate until the opponent defects. - Otherwise, CS will always defect.' - - Names: - - - Collective Strategy: [Li2009]_ - - """ - - name = "CollectiveStrategy" - - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn == 0: - return C - if turn == 1: - return D - if opponent.defections > 1: - return D - if opponent.history[0:2] == [C, D]: - return C - return D - - -class Detective(IpdPlayer): - """ - Starts with C, D, C, C, or with the given sequence of actions. - If the opponent defects at least once in the first fixed rounds, - play as TFT forever, else defect forever. - - Names: - - - Detective: [NC2019]_ - """ - - name = "Detective" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, initial_actions: List[Action] = None) -> None: - super().__init__() - if initial_actions is None: - self.initial_actions = [C, D, C, C] - else: - self.initial_actions = initial_actions - - def strategy(self, opponent: IpdPlayer) -> Action: - hist_size = len(self.history) - init_size = len(self.initial_actions) - if hist_size < init_size: - return self.initial_actions[hist_size] - if D not in opponent.history[:init_size]: - return D - return opponent.history[-1] # TFT - - -class Prober(IpdPlayer): - """ - Plays D, C, C initially. Defects forever if opponent cooperated in moves 2 - and 3. Otherwise plays TFT. - - Names: - - - Prober: [Li2011]_ - """ - - name = "Prober" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn == 0: - return D - if turn == 1: - return C - if turn == 2: - return C - if turn > 2: - if opponent.history[1:3] == [C, C]: - return D - else: - # TFT - return D if opponent.history[-1:] == [D] else C - - -class Prober2(IpdPlayer): - """ - Plays D, C, C initially. Cooperates forever if opponent played D then C - in moves 2 and 3. Otherwise plays TFT. - - Names: - - - Prober 2: [Prison1998]_ - """ - - name = "Prober 2" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn == 0: - return D - if turn == 1: - return C - if turn == 2: - return C - if turn > 2: - if opponent.history[1:3] == [D, C]: - return C - else: - # TFT - return D if opponent.history[-1:] == [D] else C - - -class Prober3(IpdPlayer): - """ - Plays D, C initially. Defects forever if opponent played C in moves 2. - Otherwise plays TFT. - - Names: - - - Prober 3: [Prison1998]_ - """ - - name = "Prober 3" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn == 0: - return D - if turn == 1: - return C - if turn > 1: - if opponent.history[1] == C: - return D - else: - # TFT - return D if opponent.history[-1:] == [D] else C - - -class Prober4(IpdPlayer): - """ - Plays C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D initially. - Counts retaliating and provocative defections of the opponent. - If the absolute difference between the counts is smaller or equal to 2, - defects forever. - Otherwise plays C for the next 5 turns and TFT for the rest of the game. - - Names: - - - Prober 4: [Prison1998]_ - """ - - name = "Prober 4" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.init_sequence = [ - C, - C, - D, - C, - D, - D, - D, - C, - C, - D, - C, - D, - C, - C, - D, - C, - D, - D, - C, - D, - ] - self.just_Ds = 0 - self.unjust_Ds = 0 - self.turned_defector = False - - def strategy(self, opponent: IpdPlayer) -> Action: - if not self.history: - return self.init_sequence[0] - turn = len(self.history) - if turn < len(self.init_sequence): - if opponent.history[-1] == D: - if self.history[-1] == D: - self.just_Ds += 1 - if self.history[-1] == C: - self.unjust_Ds += 1 - return self.init_sequence[turn] - if turn == len(self.init_sequence): - diff_in_Ds = abs(self.just_Ds - self.unjust_Ds) - self.turned_defector = diff_in_Ds <= 2 - if self.turned_defector: - return D - if not self.turned_defector: - if turn < len(self.init_sequence) + 5: - return C - return D if opponent.history[-1] == D else C - - -class HardProber(IpdPlayer): - """ - Plays D, D, C, C initially. Defects forever if opponent cooperated in moves - 2 and 3. Otherwise plays TFT. - - Names: - - - Hard Prober: [Prison1998]_ - """ - - name = "Hard Prober" - classifier = { - "stochastic": False, - "memory_depth": float("inf"), # Long memory - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turn = len(self.history) - if turn == 0: - return D - if turn == 1: - return D - if turn == 2: - return C - if turn == 3: - return C - if turn > 3: - if opponent.history[1:3] == [C, C]: - return D - else: - # TFT - return D if opponent.history[-1:] == [D] else C - - -class NaiveProber(IpdPlayer): - """ - Like tit-for-tat, but it occasionally defects with a small probability. - - Names: - - - Naive Prober: [Li2011]_ - """ - - name = "Naive Prober" - classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, p: float = 0.1) -> None: - """ - Parameters - ---------- - p, float - The probability to defect randomly - """ - super().__init__() - self.p = p - if (self.p == 0) or (self.p == 1): - self.classifier["stochastic"] = False - - def strategy(self, opponent: IpdPlayer) -> Action: - # First move - if len(self.history) == 0: - return C - # React to the opponent's last move - if opponent.history[-1] == D: - return D - # Otherwise cooperate, defect with probability 1 - self.p - choice = random_choice(1 - self.p) - return choice - - -class RemorsefulProber(NaiveProber): - """ - Like Naive Prober, but it remembers if the opponent responds to a random - defection with a defection by being remorseful and cooperating. - - For reference see: [Li2011]_. A more complete description is given in "The - Selfish Gene" (https://books.google.co.uk/books?id=ekonDAAAQBAJ): - - "Remorseful Prober remembers whether it has just spontaneously defected, and - whether the result was prompt retaliation. If so, it 'remorsefully' allows - its opponent 'one free hit' without retaliating." - - Names: - - - Remorseful Prober: [Li2011]_ - """ - - name = "Remorseful Prober" - classifier = { - "memory_depth": 2, # It remembers if its previous move was random - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, p: float = 0.1) -> None: - super().__init__(p) - self.probing = False - - def strategy(self, opponent: IpdPlayer) -> Action: - # First move - if len(self.history) == 0: - return C - # React to the opponent's last move - if opponent.history[-1] == D: - if self.probing: - self.probing = False - return C - return D - - # Otherwise cooperate with probability 1 - self.p - if random.random() < 1 - self.p: - self.probing = False - return C - - self.probing = True - return D diff --git a/axelrod/ipd/strategies/punisher.py b/axelrod/ipd/strategies/punisher.py deleted file mode 100644 index 1dff4b5fd..000000000 --- a/axelrod/ipd/strategies/punisher.py +++ /dev/null @@ -1,183 +0,0 @@ -from typing import List - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Punisher(IpdPlayer): - """ - A player starts by cooperating however will defect if at any point the - opponent has defected, but forgets after meme_length matches, with - 1<=mem_length<=20 proportional to the amount of time the opponent has - played D, punishing that player for playing D too often. - - Names: - - - Punisher: Original name by Geraint Palmer - """ - - name = "Punisher" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - """ - Initialised the player - """ - super().__init__() - self.mem_length = 1 - self.grudged = False - self.grudge_memory = 1 - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Begins by playing C, then plays D for an amount of rounds proportional - to the opponents historical '%' of playing D if the opponent ever - plays D - """ - - if self.grudge_memory >= self.mem_length: - self.grudge_memory = 0 - self.grudged = False - - if self.grudged: - self.grudge_memory += 1 - return D - - elif D in opponent.history[-1:]: - self.mem_length = (opponent.defections * 20) // len(opponent.history) - self.grudged = True - return D - - return C - - -class InversePunisher(IpdPlayer): - """ - An inverted version of Punisher. The player starts by cooperating however - will defect if at any point the opponent has defected, and forgets after - mem_length matches, with 1 <= mem_length <= 20. This time mem_length is - proportional to the amount of time the opponent has played C. - - Names: - - - Inverse Punisher: Original name by Geraint Palmer - """ - - name = "Inverse Punisher" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.mem_length = 1 - self.grudged = False - self.grudge_memory = 1 - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - Begins by playing C, then plays D for an amount of rounds proportional - to the opponents historical '%' of playing C if the opponent ever plays - D. - """ - - if self.grudge_memory >= self.mem_length: - self.grudge_memory = 0 - self.grudged = False - - if self.grudged: - self.grudge_memory += 1 - return D - elif D in opponent.history[-1:]: - self.mem_length = (opponent.cooperations * 20) // len(opponent.history) - if self.mem_length == 0: - self.mem_length += 1 - self.grudged = True - return D - return C - - -class LevelPunisher(IpdPlayer): - """ - A player starts by cooperating however, after 10 rounds - will defect if at any point the number of defections - by an opponent is greater than 20%. - - Names: - - - Level Punisher: [Eckhart2015]_ - """ - - name = "Level Punisher" - classifier = { - "memory_depth": float("inf"), # Long Memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) < 10: - return C - elif (len(opponent.history) - opponent.cooperations) / len( - opponent.history - ) > 0.2: - return D - else: - return C - - -class TrickyLevelPunisher(IpdPlayer): - """ - A player starts by cooperating however, after 10, 50 and 100 rounds - will defect if at any point the percentage of defections - by an opponent is greater than 20%, 10% and 5% respectively. - - Names: - - - Tricky Level Punisher: [Eckhart2015]_ - """ - - name = "Tricky Level Punisher" - classifier = { - "memory_depth": float("inf"), # Long Memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(opponent.history) == 0: - return C - if len(opponent.history) < 10: - if opponent.defections / len(opponent.history) > 0.2: - return D - if len(opponent.history) < 50: - if opponent.defections / len(opponent.history) > 0.1: - return D - if len(opponent.history) < 100: - if opponent.defections / len(opponent.history) > 0.05: - return D - return C diff --git a/axelrod/ipd/strategies/qlearner.py b/axelrod/ipd/strategies/qlearner.py deleted file mode 100644 index de233b355..000000000 --- a/axelrod/ipd/strategies/qlearner.py +++ /dev/null @@ -1,161 +0,0 @@ -import random -from collections import OrderedDict -from typing import Dict, List, Union - -from axelrod.ipd.action import Action, actions_to_str -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -Score = Union[int, float] - -C, D = Action.C, Action.D - - -class RiskyQLearner(IpdPlayer): - """A player who learns the best strategies through the q-learning - algorithm. - - This Q learner is quick to come to conclusions and doesn't care about the - future. - - Names: - - - Risky Q Learner: Original name by Geraint Palmer - """ - - name = "Risky QLearner" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - learning_rate = 0.9 - discount_rate = 0.9 - action_selection_parameter = 0.1 - memory_length = 12 - - def __init__(self) -> None: - """Initialises the player by picking a random strategy.""" - - super().__init__() - - # Set this explicitely, since the constructor of super will not pick it up - # for any subclasses that do not override methods using random calls. - self.classifier["stochastic"] = True - - self.prev_action = None # type: Action - self.original_prev_action = None # type: Action - self.score = 0 - self.Qs = OrderedDict({"": OrderedDict(zip([C, D], [0, 0]))}) - self.Vs = OrderedDict({"": 0}) - self.prev_state = "" - - def receive_match_attributes(self): - (R, P, S, T) = self.match_attributes["game"].RPST() - self.payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} - - def strategy(self, opponent: IpdPlayer) -> Action: - """Runs a qlearn algorithm while the tournament is running.""" - if len(self.history) == 0: - self.prev_action = random_choice() - self.original_prev_action = self.prev_action - state = self.find_state(opponent) - reward = self.find_reward(opponent) - if state not in self.Qs: - self.Qs[state] = OrderedDict(zip([C, D], [0, 0])) - self.Vs[state] = 0 - self.perform_q_learning(self.prev_state, state, self.prev_action, reward) - action = self.select_action(state) - self.prev_state = state - self.prev_action = action - return action - - def select_action(self, state: str) -> Action: - """ - Selects the action based on the epsilon-soft policy - """ - rnd_num = random.random() - p = 1.0 - self.action_selection_parameter - if rnd_num < p: - return max(self.Qs[state], key=lambda x: self.Qs[state][x]) - return random_choice() - - def find_state(self, opponent: IpdPlayer) -> str: - """ - Finds the my_state (the opponents last n moves + - its previous proportion of playing C) as a hashable state - """ - prob = "{:.1f}".format(opponent.cooperations) - action_str = actions_to_str(opponent.history[-self.memory_length :]) - return action_str + prob - - def perform_q_learning(self, prev_state: str, state: str, action: Action, reward): - """ - Performs the qlearning algorithm - """ - self.Qs[prev_state][action] = (1.0 - self.learning_rate) * self.Qs[prev_state][ - action - ] + self.learning_rate * (reward + self.discount_rate * self.Vs[state]) - self.Vs[prev_state] = max(self.Qs[prev_state].values()) - - def find_reward(self, opponent: IpdPlayer) -> Dict[Action, Dict[Action, Score]]: - """ - Finds the reward gained on the last iteration - """ - - if len(opponent.history) == 0: - opp_prev_action = random_choice() - else: - opp_prev_action = opponent.history[-1] - return self.payoff_matrix[self.prev_action][opp_prev_action] - - -class ArrogantQLearner(RiskyQLearner): - """A player who learns the best strategies through the q-learning - algorithm. - - This Q learner jumps to quick conclusions and cares about the future. - - Names: - - - Arrogant Q Learner: Original name by Geraint Palmer - """ - - name = "Arrogant QLearner" - learning_rate = 0.9 - discount_rate = 0.1 - - -class HesitantQLearner(RiskyQLearner): - """A player who learns the best strategies through the q-learning algorithm. - - This Q learner is slower to come to conclusions and does not look ahead much. - - Names: - - - Hesitant Q Learner: Original name by Geraint Palmer - """ - - name = "Hesitant QLearner" - learning_rate = 0.1 - discount_rate = 0.9 - - -class CautiousQLearner(RiskyQLearner): - """A player who learns the best strategies through the q-learning algorithm. - - This Q learner is slower to come to conclusions and wants to look ahead - more. - - Names: - - - Cautious Q Learner: Original name by Geraint Palmer - """ - - name = "Cautious QLearner" - learning_rate = 0.1 - discount_rate = 0.1 diff --git a/axelrod/ipd/strategies/rand.py b/axelrod/ipd/strategies/rand.py deleted file mode 100644 index 3da79d28f..000000000 --- a/axelrod/ipd/strategies/rand.py +++ /dev/null @@ -1,46 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - - -class Random(IpdPlayer): - """A player who randomly chooses between cooperating and defecting. - - This strategy came 15th in Axelrod's original tournament. - - Names: - - - Random: [Axelrod1980]_ - - Lunatic: [Tzafestas2000]_ - """ - - name = "Random" - classifier = { - "memory_depth": 0, # Memory-one Four-Vector = (p, p, p, p) - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, p: float = 0.5) -> None: - """ - Parameters - ---------- - p, float - The probability to cooperate - - Special Cases - ------------- - Random(0) is equivalent to Defector - Random(1) is equivalent to Cooperator - """ - super().__init__() - self.p = p - if p in [0, 1]: - self.classifier["stochastic"] = False - - def strategy(self, opponent: IpdPlayer) -> Action: - return random_choice(self.p) diff --git a/axelrod/ipd/strategies/resurrection.py b/axelrod/ipd/strategies/resurrection.py deleted file mode 100644 index 3e3e1f068..000000000 --- a/axelrod/ipd/strategies/resurrection.py +++ /dev/null @@ -1,73 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Resurrection(IpdPlayer): - """ - A player starts by cooperating and defects if the number of rounds - played by the player is greater than five and the last five rounds - are defections. - - Otherwise, the strategy plays like Tit-for-tat. - - Names: - - - Resurrection: [Eckhart2015]_ - """ - - # These are various properties for the strategy - name = "Resurrection" - classifier = { - "memory_depth": 5, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return C - if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: - return D - else: - return opponent.history[-1] - - -class DoubleResurrection(IpdPlayer): - """ - A player starts by cooperating and defects if the number of rounds - played by the player is greater than five and the last five rounds - are cooperations. - - If the last five rounds were defections, the player cooperates. - - Names: - - - DoubleResurrection: [Eckhart2015]_ - """ - - name = "DoubleResurrection" - classifier = { - "memory_depth": 5, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) == 0: - return C - if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: - return D - elif len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: - return C - else: - return opponent.history[-1] diff --git a/axelrod/ipd/strategies/retaliate.py b/axelrod/ipd/strategies/retaliate.py deleted file mode 100644 index 3e1dbf445..000000000 --- a/axelrod/ipd/strategies/retaliate.py +++ /dev/null @@ -1,196 +0,0 @@ -from collections import defaultdict - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class Retaliate(IpdPlayer): - """ - A player starts by cooperating but will retaliate once the opponent - has won more than 10 percent times the number of defections the player has. - - Names: - - - Retaliate: Original name by Owen Campbell - """ - - name = "Retaliate" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "inspects_source": False, - "makes_use_of": set(), - "long_run_time": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, retaliation_threshold: float = 0.1) -> None: - """ - Uses the basic init from the IpdPlayer class, but also set the name to - include the retaliation setting. - """ - super().__init__() - self.retaliation_threshold = retaliation_threshold - self.play_counts = defaultdict(int) # type: defaultdict - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - If the opponent has played D to my C more often than x% of the time - that I've done the same to him, play D. Otherwise, play C. - """ - - if len(self.history): - last_round = (self.history[-1], opponent.history[-1]) - self.play_counts[last_round] += 1 - CD_count = self.play_counts[(C, D)] - DC_count = self.play_counts[(D, C)] - if CD_count > DC_count * self.retaliation_threshold: - return D - return C - - -class Retaliate2(Retaliate): - """ - Retaliate player with a threshold of 8 percent. - - Names: - - - Retaliate 2: Original name by Owen Campbell - """ - - name = "Retaliate 2" - - def __init__(self, retaliation_threshold: float = 0.08) -> None: - super().__init__(retaliation_threshold=retaliation_threshold) - - -class Retaliate3(Retaliate): - """ - Retaliate player with a threshold of 5 percent. - - Names: - - - Retaliate 3: Original name by Owen Campbell - """ - - name = "Retaliate 3" - - def __init__(self, retaliation_threshold: float = 0.05) -> None: - super().__init__(retaliation_threshold=retaliation_threshold) - - -class LimitedRetaliate(IpdPlayer): - """ - A player that co-operates unless the opponent defects and wins. - It will then retaliate by defecting. It stops when either, it has beaten - the opponent 10 times more often that it has lost or it reaches the - retaliation limit (20 defections). - - Names: - - - Limited Retaliate: Original name by Owen Campbell - """ - - name = "Limited Retaliate" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, retaliation_threshold: float = 0.1, retaliation_limit: int = 20 - ) -> None: - """ - Parameters - ---------- - retaliation_threshold, float - The threshold of the difference in defections, previous rounds of - (C, D) versus (D, C) - retaliation_limit, int - The maximum number of retaliations until the strategy returns to - cooperation - """ - super().__init__() - self.retaliating = False - self.retaliation_count = 0 - self.retaliation_threshold = retaliation_threshold - self.retaliation_limit = retaliation_limit - self.play_counts = defaultdict(int) # type: defaultdict - - def strategy(self, opponent: IpdPlayer) -> Action: - """ - If the opponent has played D to my C more often than x% of the time - that I've done the same to him, retaliate by playing D but stop doing - so once I've hit the retaliation limit. - """ - - if len(self.history): - last_round = (self.history[-1], opponent.history[-1]) - self.play_counts[last_round] += 1 - CD_count = self.play_counts[(C, D)] - DC_count = self.play_counts[(D, C)] - if CD_count > DC_count * self.retaliation_threshold: - self.retaliating = True - else: - self.retaliating = False - self.retaliation_count = 0 - - if self.retaliating: - if self.retaliation_count < self.retaliation_limit: - self.retaliation_count += 1 - return D - else: - self.retaliation_count = 0 - self.retaliating = False - - return C - - -class LimitedRetaliate2(LimitedRetaliate): - """ - LimitedRetaliate player with a threshold of 8 percent and a - retaliation limit of 15. - - Names: - - - Limited Retaliate 2: Original name by Owen Campbell - """ - - name = "Limited Retaliate 2" - - def __init__( - self, retaliation_threshold: float = 0.08, retaliation_limit: int = 15 - ) -> None: - super().__init__( - retaliation_threshold=retaliation_threshold, - retaliation_limit=retaliation_limit, - ) - - -class LimitedRetaliate3(LimitedRetaliate): - """ - LimitedRetaliate player with a threshold of 5 percent and a - retaliation limit of 20. - - Names: - - - Limited Retaliate 3: Original name by Owen Campbell - """ - - name = "Limited Retaliate 3" - - def __init__( - self, retaliation_threshold: float = 0.05, retaliation_limit: int = 20 - ) -> None: - super().__init__( - retaliation_threshold=retaliation_threshold, - retaliation_limit=retaliation_limit, - ) diff --git a/axelrod/ipd/strategies/revised_downing.py b/axelrod/ipd/strategies/revised_downing.py deleted file mode 100644 index f7d382aad..000000000 --- a/axelrod/ipd/strategies/revised_downing.py +++ /dev/null @@ -1,75 +0,0 @@ -""" -Revised Downing implemented from the Fortran source code for the second of -Axelrod's tournaments. -""" -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - -class RevisedDowning(IpdPlayer): - """ - Strategy submitted to Axelrod's second tournament by Leslie Downing. - (K59R). - - Revised Downing attempts to determine if players are cooperative or not. - If so, it cooperates with them. - - This strategy is a revision of the strategy submitted by Downing to - Axelrod's first tournament. - - - Names: - - Revised Downing: [Axelrod1980]_ - """ - - name = "Revised Downing" - - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.good = 1.0 - self.bad = 0.0 - self.nice1 = 0 - self.nice2 = 0 - self.total_C = 0 # note the same as self.cooperations - self.total_D = 0 # note the same as self.defections - - def strategy(self, opponent: IpdPlayer) -> Action: - round_number = len(self.history) + 1 - - if round_number == 1: - return C - - # Update various counts - if round_number > 2: - if self.history[-2] == D: - if opponent.history[-1] == C: - self.nice2 += 1 - self.total_D += 1 - self.bad = self.nice2 / self.total_D - else: - if opponent.history[-1] == C: - self.nice1 += 1 - self.total_C += 1 - self.good = self.nice1 / self.total_C - # Make a decision based on the accrued counts - c = 6.0 * self.good - 8.0 * self.bad - 2 - alt = 4.0 * self.good - 5.0 * self.bad - 1 - if c >= 0 and c >= alt: - move = C - elif (c >= 0 and c < alt) or (alt >= 0): - move = self.history[-1].flip() - else: - move = D - return move - diff --git a/axelrod/ipd/strategies/selfsteem.py b/axelrod/ipd/strategies/selfsteem.py deleted file mode 100644 index d5b5f8db8..000000000 --- a/axelrod/ipd/strategies/selfsteem.py +++ /dev/null @@ -1,53 +0,0 @@ -from math import pi, sin - -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class SelfSteem(IpdPlayer): - """ - This strategy is based on the feeling with the same name. - It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies - with the current iteration. - - If f > 0.95, 'ego' of the algorithm is inflated; always defects. - If 0.95 > abs(f) > 0.3, rational behavior; follows TitForTat algortithm. - If 0.3 > f > -0.3; random behavior. - If f < -0.95, algorithm is at rock bottom; always cooperates. - - Futhermore, the algorithm implements a retaliation policy, if the opponent - defects; the sin curve is shifted. But due to lack of further information, - this implementation does not include a sin phase change. - Names: - - - SelfSteem: [Andre2013]_ - """ - - name = "SelfSteem" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - turns_number = len(self.history) - sine_value = sin(2 * pi * turns_number / 10) - - if sine_value > 0.95: - return D - - if abs(sine_value) < 0.95 and abs(sine_value) > 0.3: - return opponent.history[-1] - - if sine_value < 0.3 and sine_value > -0.3: - return random_choice() - - return C diff --git a/axelrod/ipd/strategies/sequence_player.py b/axelrod/ipd/strategies/sequence_player.py deleted file mode 100644 index cbff54f0e..000000000 --- a/axelrod/ipd/strategies/sequence_player.py +++ /dev/null @@ -1,111 +0,0 @@ -from types import FunctionType -from typing import Tuple - -from axelrod.ipd._strategy_utils import thue_morse_generator -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class SequencePlayer(IpdPlayer): - """Abstract base class for players that use a generated sequence to - determine their plays. - - Names: - - - Sequence IpdPlayer: Original name by Marc Harper - """ - - def __init__( - self, generator_function: FunctionType, generator_args: Tuple = () - ) -> None: - super().__init__() - self.sequence_generator = generator_function(*generator_args) - - @staticmethod - def meta_strategy(value: int) -> Action: - """Determines how to map the sequence value to cooperate or defect. - By default, treat values like python truth values. Override in child - classes for alternate behaviors.""" - if value == 0: - return D - else: - return C - - def strategy(self, opponent: IpdPlayer) -> Action: - # Iterate through the sequence and apply the meta strategy - for s in self.sequence_generator: - return self.meta_strategy(s) - - def __getstate__(self): - """Generator attributes are not pickleable so we remove and rebuild.""" - return_dict = self.__dict__.copy() - del return_dict["sequence_generator"] - return return_dict - - def __setstate__(self, state): - self.reset() - self._history = state["_history"] - self.match_attributes = state["match_attributes"] - for _ in self.history: - next(self.sequence_generator) - - -class ThueMorse(SequencePlayer): - """ - A player who cooperates or defects according to the Thue-Morse sequence. - The first few terms of the Thue-Morse sequence are: - 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . - - Thue-Morse sequence: http://mathworld.wolfram.com/Thue-MorseSequence.html - - Names: - - - Thue Morse: Original name by Geraint Palmer - """ - - name = "ThueMorse" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__(thue_morse_generator, (0,)) - - -class ThueMorseInverse(ThueMorse): - """ A player who plays the inverse of the Thue-Morse sequence. - - Names: - - - Inverse Thue Morse: Original name by Geraint Palmer - """ - - name = "ThueMorseInverse" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super(ThueMorse, self).__init__(thue_morse_generator, (0,)) - - @staticmethod - def meta_strategy(value: int) -> Action: - # Switch the default cooperate and defect action on 0 or 1 - if value == 0: - return C - else: - return D diff --git a/axelrod/ipd/strategies/shortmem.py b/axelrod/ipd/strategies/shortmem.py deleted file mode 100644 index 38180286d..000000000 --- a/axelrod/ipd/strategies/shortmem.py +++ /dev/null @@ -1,48 +0,0 @@ -from axelrod import IpdPlayer -from axelrod.ipd.action import Action - -C, D = Action.C, Action.D - - -class ShortMem(IpdPlayer): - """ - A player starts by always cooperating for the first 10 moves. - - From the tenth round on, the player analyzes the last ten actions, and - compare the number of defects and cooperates of the opponent, based in - percentage. If cooperation occurs 30% more than defection, it will - cooperate. - If defection occurs 30% more than cooperation, the program will defect. - Otherwise, the program follows the TitForTat algorithm. - - Names: - - - ShortMem: [Andre2013]_ - """ - - name = "ShortMem" - classifier = { - "memory_depth": float('inf'), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - if len(opponent.history) <= 10: - return C - - array = opponent.history[-10:] - C_counts = array.count(C) - D_counts = array.count(D) - - if C_counts - D_counts >= 3: - return C - elif D_counts - C_counts >= 3: - return D - else: - return opponent.history[-1] diff --git a/axelrod/ipd/strategies/stalker.py b/axelrod/ipd/strategies/stalker.py deleted file mode 100644 index 74357006a..000000000 --- a/axelrod/ipd/strategies/stalker.py +++ /dev/null @@ -1,78 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice -from axelrod.ipd.strategy_transformers import FinalTransformer - -C, D = Action.C, Action.D - - -@FinalTransformer((D,), name_prefix=None) # End with defection -class Stalker(IpdPlayer): - """ - - This is a strategy which is only influenced by the score. - Its behavior is based on three values: - the very_bad_score (all rounds in defection) - very_good_score (all rounds in cooperation) - wish_score (average between bad and very_good score) - - It starts with cooperation. - - - If current_average_score > very_good_score, it defects - - If current_average_score lies in (wish_score, very_good_score) it - cooperates - - If current_average_score > 2, it cooperates - - If current_average_score lies in (1, 2) - - The remaining case, current_average_score < 1, it behaves randomly. - - It defects in the last round - - Names: - - - Stalker: [Andre2013]_ - """ - - name = "Stalker" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["game", "length"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def receive_match_attributes(self): - R, P, S, T = self.match_attributes["game"].RPST() - self.very_good_score = R - self.very_bad_score = P - self.wish_score = (R + P) / 2 - self.current_score = 0 - - def score_last_round(self, opponent: IpdPlayer): - # Load the default game if not supplied by a tournament. - game = self.match_attributes["game"] - last_round = (self.history[-1], opponent.history[-1]) - scores = game.score(last_round) - self.current_score += scores[0] - - def strategy(self, opponent: IpdPlayer) -> Action: - - if len(self.history) == 0: - return C - - self.score_last_round(opponent) - - current_average_score = self.current_score / len(self.history) - - if current_average_score > self.very_good_score: - return D - if (current_average_score > self.wish_score) and ( - current_average_score < self.very_good_score - ): - return C - if current_average_score > 2: - return C - if (current_average_score < 2) and (current_average_score > 1): - return D - return random_choice() diff --git a/axelrod/ipd/strategies/titfortat.py b/axelrod/ipd/strategies/titfortat.py deleted file mode 100644 index c96be3c11..000000000 --- a/axelrod/ipd/strategies/titfortat.py +++ /dev/null @@ -1,917 +0,0 @@ -from axelrod.ipd.action import Action, actions_to_str -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice -from axelrod.ipd.strategy_transformers import FinalTransformer, TrackHistoryTransformer - -C, D = Action.C, Action.D - - -class TitForTat(IpdPlayer): - """ - A player starts by cooperating and then mimics the previous action of the - opponent. - - This strategy was referred to as the *'simplest'* strategy submitted to - Axelrod's first tournament. It came first. - - Note that the code for this strategy is written in a fairly verbose - way. This is done so that it can serve as an example strategy for - those who might be new to Python. - - Names: - - - Rapoport's strategy: [Axelrod1980]_ - - TitForTat: [Axelrod1980]_ - """ - - # These are various properties for the strategy - name = "Tit For Tat" - classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - """This is the actual strategy""" - # First move - if not self.history: - return C - # React to the opponent's last move - if opponent.history[-1] == D: - return D - return C - - -class TitFor2Tats(IpdPlayer): - """A player starts by cooperating and then defects only after two defects by - opponent. - - Submitted to Axelrod's second tournament by John Maynard Smith; it came in - 24th in that tournament. - - Names: - - - Tit for two Tats: [Axelrod1984]_ - - Slow tit for two tats: Original name by Ranjini Das - - JMaynardSmith: [Axelrod1980b]_ - """ - - name = "Tit For 2 Tats" - classifier = { - "memory_depth": 2, # Long memory, memory-2 - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return D if opponent.history[-2:] == [D, D] else C - - -class TwoTitsForTat(IpdPlayer): - """A player starts by cooperating and replies to each defect by two - defections. - - Names: - - - Two Tits for Tats: [Axelrod1984]_ - """ - - name = "Two Tits For Tat" - classifier = { - "memory_depth": 2, # Long memory, memory-2 - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return D if D in opponent.history[-2:] else C - - -class DynamicTwoTitsForTat(IpdPlayer): - """ - A player starts by cooperating and then punishes its opponent's - defections with defections, but with a dynamic bias towards cooperating - based on the opponent's ratio of cooperations to total moves - (so their current probability of cooperating regardless of the - opponent's move (aka: forgiveness)). - - Names: - - - Dynamic Two Tits For Tat: Original name by Grant Garrett-Grossman. - """ - - name = "Dynamic Two Tits For Tat" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent): - # First move - if not opponent.history: - # Make sure we cooperate first turn - return C - if D in opponent.history[-2:]: - # Probability of cooperating regardless - return random_choice(opponent.cooperations / len(opponent.history)) - else: - return C - - -class Bully(IpdPlayer): - """A player that behaves opposite to Tit For Tat, including first move. - - Starts by defecting and then does the opposite of opponent's previous move. - This is the complete opposite of Tit For Tat, also called Bully in the - literature. - - Names: - - - Reverse Tit For Tat: [Nachbar1992]_ - - """ - - name = "Bully" - classifier = { - "memory_depth": 1, # Four-Vector = (0, 1, 0, 1) - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return C if opponent.history[-1:] == [D] else D - - -class SneakyTitForTat(IpdPlayer): - """Tries defecting once and repents if punished. - - Names: - - - Sneaky Tit For Tat: Original name by Karol Langner - """ - - name = "Sneaky Tit For Tat" - classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if len(self.history) < 2: - return C - if D not in opponent.history: - return D - if opponent.history[-1] == D and self.history[-2] == D: - return C - return opponent.history[-1] - - -class SuspiciousTitForTat(IpdPlayer): - """A variant of Tit For Tat that starts off with a defection. - - Names: - - - Suspicious Tit For Tat: [Hilbe2013]_ - - Mistrust: [Beaufils1997]_ - """ - - name = "Suspicious Tit For Tat" - classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return C if opponent.history[-1:] == [C] else D - - -class AntiTitForTat(IpdPlayer): - """A strategy that plays the opposite of the opponents previous move. - This is similar to Bully, except that the first move is cooperation. - - Names: - - - Anti Tit For Tat: [Hilbe2013]_ - - Psycho (PSYC): [Ashlock2009]_ - """ - - name = "Anti Tit For Tat" - classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - return D if opponent.history[-1:] == [C] else C - - -class HardTitForTat(IpdPlayer): - """A variant of Tit For Tat that uses a longer history for retaliation. - - Names: - - - Hard Tit For Tat: [PD2017]_ - """ - - name = "Hard Tit For Tat" - classifier = { - "memory_depth": 3, # memory-three - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - # Cooperate on the first move - if not opponent.history: - return C - # Defects if D in the opponent's last three moves - if D in opponent.history[-3:]: - return D - # Otherwise cooperates - return C - - -class HardTitFor2Tats(IpdPlayer): - """A variant of Tit For Two Tats that uses a longer history for - retaliation. - - Names: - - - Hard Tit For Two Tats: [Stewart2012]_ - """ - - name = "Hard Tit For 2 Tats" - classifier = { - "memory_depth": 3, # memory-three - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - # Cooperate on the first move - if not opponent.history: - return C - # Defects if two consecutive D in the opponent's last three moves - history_string = actions_to_str(opponent.history[-3:]) - if "DD" in history_string: - return D - # Otherwise cooperates - return C - - -class OmegaTFT(IpdPlayer): - """OmegaTFT modifies Tit For Tat in two ways: - - checks for deadlock loops of alternating rounds of (C, D) and (D, C), - and attempting to break them - - uses a more sophisticated retaliation mechanism that is noise tolerant - - Names: - - - OmegaTFT: [Slany2007]_ - """ - - name = "Omega TFT" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__( - self, deadlock_threshold: int = 3, randomness_threshold: int = 8 - ) -> None: - super().__init__() - self.deadlock_threshold = deadlock_threshold - self.randomness_threshold = randomness_threshold - self.randomness_counter = 0 - self.deadlock_counter = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - # Cooperate on the first move - if not self.history: - return C - # TFT on round 2 - if len(self.history) == 1: - return opponent.history[-1] - - # Are we deadlocked? (in a CD -> DC loop) - if self.deadlock_counter >= self.deadlock_threshold: - move = C - if self.deadlock_counter == self.deadlock_threshold: - self.deadlock_counter = self.deadlock_threshold + 1 - else: - self.deadlock_counter = 0 - else: - # Update counters - if opponent.history[-2:] == [C, C]: - self.randomness_counter -= 1 - # If the opponent's move changed, increase the counter - if opponent.history[-2] != opponent.history[-1]: - self.randomness_counter += 1 - # If the opponent's last move differed from mine, - # increase the counter - if self.history[-1] != opponent.history[-1]: - self.randomness_counter += 1 - # Compare counts to thresholds - # If randomness_counter exceeds Y, Defect for the remainder - if self.randomness_counter >= self.randomness_threshold: - move = D - else: - # TFT - move = opponent.history[-1] - # Check for deadlock - if opponent.history[-2] != opponent.history[-1]: - self.deadlock_counter += 1 - else: - self.deadlock_counter = 0 - return move - - -class OriginalGradual(IpdPlayer): - """ - A player that punishes defections with a growing number of defections - but after punishing for `punishment_limit` number of times enters a calming - state and cooperates no matter what the opponent does for two rounds. - - The `punishment_limit` is incremented whenever the opponent defects and the - strategy is not in either calming or punishing state. - - Note that `Gradual` appears in [CRISTAL-SMAC2018]_ however that version of - `Gradual` does not give the results reported in [Beaufils1997]_ which is the - paper that first introduced the strategy. For a longer discussion of this - see: https://github.com/Axelrod-Python/Axelrod/issues/1294. This is why this - strategy has been renamed to `OriginalGradual`. - - Names: - - - Gradual: [Beaufils1997]_ - """ - - name = "Original Gradual" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - - super().__init__() - self.calming = False - self.punishing = False - self.punishment_count = 0 - self.punishment_limit = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - - if self.calming: - self.calming = False - return C - - if self.punishing: - if self.punishment_count < self.punishment_limit: - self.punishment_count += 1 - return D - else: - self.calming = True - self.punishing = False - self.punishment_count = 0 - return C - - if D in opponent.history[-1:]: - self.punishing = True - self.punishment_count += 1 - self.punishment_limit += 1 - return D - - return C - - -class Gradual(IpdPlayer): - """ - Similar to OriginalGradual, this is a player that punishes defections with a - growing number of defections but after punishing for `punishment_limit` - number of times enters a calming state and cooperates no matter what the - opponent does for two rounds. - - This version of Gradual is an update of `OriginalGradual` and the difference - is that the `punishment_limit` is incremented whenever the opponent defects - (regardless of the state of the player). - - Note that this version of `Gradual` appears in [CRISTAL-SMAC2018]_ however - this version of - `Gradual` does not give the results reported in [Beaufils1997]_ which is the - paper that first introduced the strategy. For a longer discussion of this - see: https://github.com/Axelrod-Python/Axelrod/issues/1294. - - This version is based on https://github.com/cristal-smac/ipd/blob/master/src/strategies.py#L224 - - Names: - - - Gradual: [CRISTAL-SMAC2018]_ - """ - - name = "Gradual" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - - super().__init__() - self.calm_count = 0 - self.punish_count = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - - if len(self.history) == 0: - return C - - if self.punish_count > 0: - self.punish_count -= 1 - return D - - if self.calm_count > 0: - self.calm_count -= 1 - return C - - if opponent.history[-1] == D: - self.punish_count = opponent.defections - 1 - self.calm_count = 2 - return D - return C - - -@TrackHistoryTransformer(name_prefix=None) -class ContriteTitForTat(IpdPlayer): - """ - A player that corresponds to Tit For Tat if there is no noise. In the case - of a noisy match: if the opponent defects as a result of a noisy defection - then ContriteTitForTat will become 'contrite' until it successfully - cooperates. - - Names: - - - Contrite Tit For Tat: [Axelrod1995]_ - """ - - name = "Contrite Tit For Tat" - classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.contrite = False - self._recorded_history = [] - - def strategy(self, opponent: IpdPlayer) -> Action: - - if not opponent.history: - return C - - # If contrite but managed to cooperate: apologise. - if self.contrite and self.history[-1] == C: - self.contrite = False - return C - - # Check if noise provoked opponent - if self._recorded_history[-1] != self.history[-1]: # Check if noise - if self.history[-1] == D and opponent.history[-1] == C: - self.contrite = True - - return opponent.history[-1] - - -class AdaptiveTitForTat(IpdPlayer): - """ATFT - Adaptive Tit For Tat (Basic Model) - - Algorithm - - if (opponent played C in the last cycle) then - world = world + r*(1-world) - else - world = world + r*(0-world) - If (world >= 0.5) play C, else play D - - Attributes - - world : float [0.0, 1.0], set to 0.5 - continuous variable representing the world's image - 1.0 - total cooperation - 0.0 - total defection - other values - something in between of the above - updated every round, starting value shouldn't matter as long as - it's >= 0.5 - - Parameters - - rate : float [0.0, 1.0], default=0.5 - adaptation rate - r in Algorithm above - smaller value means more gradual and robust - to perturbations behaviour - - Names: - - - Adaptive Tit For Tat: [Tzafestas2000]_ - """ - - name = "Adaptive Tit For Tat" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - world = 0.5 - - def __init__(self, rate: float = 0.5) -> None: - super().__init__() - self.rate = rate - self.world = rate - - def strategy(self, opponent: IpdPlayer) -> Action: - - if len(opponent.history) == 0: - return C - - if opponent.history[-1] == C: - self.world += self.rate * (1.0 - self.world) - else: - self.world -= self.rate * self.world - - if self.world >= 0.5: - return C - - return D - - -class SpitefulTitForTat(IpdPlayer): - """ - A player starts by cooperating and then mimics the previous action of the - opponent until opponent defects twice in a row, at which point player - always defects - - Names: - - - Spiteful Tit For Tat: [Prison1998]_ - """ - - name = "Spiteful Tit For Tat" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self) -> None: - super().__init__() - self.retaliating = False - - def strategy(self, opponent: IpdPlayer) -> Action: - # First move - if not self.history: - return C - - if opponent.history[-2:] == [D, D]: - self.retaliating = True - - if self.retaliating: - return D - else: - # React to the opponent's last move - if opponent.history[-1] == D: - return D - return C - - -class SlowTitForTwoTats2(IpdPlayer): - """ - A player plays C twice, then if the opponent plays the same move twice, - plays that move, otherwise plays previous move. - - Names: - - - Slow Tit For Tat: [Prison1998]_ - """ - - name = "Slow Tit For Two Tats 2" - classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - - # Start with two cooperations - if len(self.history) < 2: - return C - - # Mimic if opponent plays the same move twice - if opponent.history[-2] == opponent.history[-1]: - return opponent.history[-1] - - # Otherwise play previous move - return self.history[-1] - - -@FinalTransformer((D,), name_prefix=None) -class Alexei(IpdPlayer): - """ - Plays similar to Tit-for-Tat, but always defect on last turn. - - Names: - - - Alexei: [LessWrong2011]_ - """ - - name = "Alexei" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - if not self.history: - return C - if opponent.history[-1] == D: - return D - return C - - -@FinalTransformer((D,), name_prefix=None) -class EugineNier(IpdPlayer): - """ - Plays similar to Tit-for-Tat, but with two conditions: - 1) Always Defect on Last Move - 2) If other player defects five times, switch to all defects. - - Names: - - - Eugine Nier: [LessWrong2011]_ - """ - - name = "EugineNier" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.is_defector = False - - def strategy(self, opponent: IpdPlayer) -> Action: - if not self.history: - return C - if not (self.is_defector) and opponent.defections >= 5: - self.is_defector = True - if self.is_defector: - return D - return opponent.history[-1] - - -class NTitsForMTats(IpdPlayer): - """ - A parameterizable Tit-for-Tat, - The arguments are: - 1) M: the number of defection before retaliation - 2) N: the number of retaliations - - Names: - - - N Tit(s) For M Tat(s): Original name by Marc Harper - """ - - name = "N Tit(s) For M Tat(s)" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, N: int = 3, M: int = 2) -> None: - """ - Parameters - ---------- - N: int - Number of retaliations - M: int - Number of defection before retaliation - - Special Cases - ------------- - NTitsForMTats(1,1) is equivalent to TitForTat - NTitsForMTats(1,2) is equivalent to TitFor2Tats - NTitsForMTats(2,1) is equivalent to TwoTitsForTat - NTitsForMTats(0,*) is equivalent to Cooperator - NTitsForMTats(*,0) is equivalent to Defector - """ - super().__init__() - self.N = N - self.M = M - self.classifier["memory_depth"] = max([M, N]) - self.retaliate_count = 0 - - def strategy(self, opponent: IpdPlayer) -> Action: - # if opponent defected consecutively M times, start the retaliation - if not self.M or opponent.history[-self.M :].count(D) == self.M: - self.retaliate_count = self.N - if self.retaliate_count: - self.retaliate_count -= 1 - return D - return C - - -@FinalTransformer((D,), name_prefix=None) -class Michaelos(IpdPlayer): - """ - Plays similar to Tit-for-Tat with two exceptions: - 1) Defect on last turn. - 2) After own defection and opponent's cooperation, 50 percent of the time, - cooperate. The other 50 percent of the time, always defect for the rest of - the game. - - Names: - - - Michaelos: [LessWrong2011]_ - """ - - name = "Michaelos" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self): - super().__init__() - self.is_defector = False - - def strategy(self, opponent: IpdPlayer) -> Action: - if not self.history: - return C - if self.is_defector: - return D - if self.history[-1] == D and opponent.history[-1] == C: - decision = random_choice() - if decision == C: - return C - else: - self.is_defector = True - return D - - return opponent.history[-1] - - -class RandomTitForTat(IpdPlayer): - """ - A player starts by cooperating and then follows by copying its - opponent (tit for tat style). From then on the player - will switch between copying its opponent and randomly - responding every other iteration. - - Name: - - - Random TitForTat: Original name by Zachary M. Taylor - """ - - # These are various properties for the strategy - name = "Random Tit for Tat" - classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, p: float = 0.5) -> None: - """ - Parameters - ---------- - p, float - The probability to cooperate - """ - super().__init__() - self.p = p - self.act_random = False - if p in [0, 1]: - self.classifier["stochastic"] = False - - def strategy(self, opponent: IpdPlayer) -> Action: - """This is the actual strategy""" - if not self.history: - return C - - if self.act_random: - self.act_random = False - return random_choice(self.p) - - self.act_random = True - return opponent.history[-1] diff --git a/axelrod/ipd/strategies/verybad.py b/axelrod/ipd/strategies/verybad.py deleted file mode 100644 index ecfb5bf72..000000000 --- a/axelrod/ipd/strategies/verybad.py +++ /dev/null @@ -1,52 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer - -C, D = Action.C, Action.D - - -class VeryBad(IpdPlayer): - """ - It cooperates in the first three rounds, and uses probability - (it implements a memory, which stores the opponent’s moves) to decide for - cooperating or defecting. - Due to a lack of information as to what that probability refers to in this - context, probability(P(X)) refers to (Count(X)/Total_Moves) in this - implementation - P(C) = Cooperations / Total_Moves - P(D) = Defections / Total_Moves = 1 - P(C) - - Names: - - - VeryBad: [Andre2013]_ - """ - - name = "VeryBad" - classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - @staticmethod - def strategy(opponent: IpdPlayer) -> Action: - total_moves = len(opponent.history) - - if total_moves < 3: - return C - - cooperations = opponent.cooperations - - cooperation_probability = cooperations / total_moves - - if cooperation_probability > 0.5: - return C - - elif cooperation_probability < 0.5: - return D - - else: - return opponent.history[-1] diff --git a/axelrod/ipd/strategies/worse_and_worse.py b/axelrod/ipd/strategies/worse_and_worse.py deleted file mode 100644 index 542a2edb2..000000000 --- a/axelrod/ipd/strategies/worse_and_worse.py +++ /dev/null @@ -1,126 +0,0 @@ -from axelrod.ipd.action import Action -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.random_ import random_choice - -C, D = Action.C, Action.D - - -class WorseAndWorse(IpdPlayer): - """ - Defects with probability of 'current turn / 1000'. Therefore - it is more and more likely to defect as the round goes on. - - Source code available at the download tab of [Prison1998]_ - - - Names: - - Worse and Worse: [Prison1998]_ - """ - - name = "Worse and Worse" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - probability = 1 - current_round / 1000 - return random_choice(probability) - - -class KnowledgeableWorseAndWorse(IpdPlayer): - """ - This strategy is based on 'Worse And Worse' but will defect with probability - of 'current turn / total no. of turns'. - - Names: - - Knowledgeable Worse and Worse: Original name by Adam Pohl - """ - - name = "Knowledgeable Worse and Worse" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["length"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - expected_length = self.match_attributes["length"] - probability = 1 - current_round / expected_length - return random_choice(probability) - - -class WorseAndWorse2(IpdPlayer): - """ - Plays as tit for tat during the first 20 moves. - Then defects with probability (current turn - 20) / current turn. - Therefore it is more and more likely to defect as the round goes on. - - Names: - - Worse and Worse 2: [Prison1998]_ - """ - - name = "Worse and Worse 2" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - - if current_round == 1: - return C - elif current_round <= 20: - return opponent.history[-1] - else: - probability = 20 / current_round - return random_choice(probability) - - -class WorseAndWorse3(IpdPlayer): - """ - Cooperates in the first turn. - Then defects with probability no. of opponent defects / (current turn - 1). - Therefore it is more likely to defect when the opponent defects for a larger - proportion of the turns. - - Names: - - Worse and Worse 3: [Prison1998]_ - """ - - name = "Worse and Worse 3" - classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def strategy(self, opponent: IpdPlayer) -> Action: - current_round = len(self.history) + 1 - - if current_round == 1: - return C - else: - probability = 1 - opponent.defections / (current_round - 1) - return random_choice(probability) diff --git a/axelrod/ipd/strategies/zero_determinant.py b/axelrod/ipd/strategies/zero_determinant.py deleted file mode 100644 index f7287f3d9..000000000 --- a/axelrod/ipd/strategies/zero_determinant.py +++ /dev/null @@ -1,256 +0,0 @@ -from axelrod.ipd.action import Action - -from .memoryone import MemoryOnePlayer - -C, D = Action.C, Action.D - - -class LRPlayer(MemoryOnePlayer): - """ - Abstraction for Linear Relation players. These players enforce a linear - difference in stationary payoffs :math:`s (S_{xy} - l) = S_{yx} - l.` - - The parameter :math:`s` is called the slope and the parameter :math:`l` the - baseline payoff. For extortionate strategies, the extortion factor - :math:`\chi` is the inverse of the slope :math:`s`. - - For the standard prisoner's dilemma where :math:`T > R > P > S` and - :math:`R > (T + S) / 2 > P`, a pair :math:`(l, s)` is enforceable iff - - .. math:: - :nowrap: - - \\begin{eqnarray} - &P &<= l <= R \\\\ - &s_{min} &= -\min\\left( \\frac{T - l}{l - S}, \\frac{l - S}{T - l}\\right) <= s <= 1 - \\end{eqnarray} - - And also that there exists :math:`\\phi` such that - - .. math:: - :nowrap: - - \\begin{eqnarray} - p_1 &= P(C|CC) &= 1 - \\phi (1 - s)(R - l) \\\\ - p_2 &= P(C|CD) &= 1 - \\phi (s(l - S) + (T - l)) \\\\ - p_3 &= P(C|DC) &= \\phi ((l - S) + s(T - l)) \\\\ - p_4 &= P(C|DD) &= \\phi (1 - s)(l - P) - \\end{eqnarray} - - - These conditions also force :math:`\\phi >= 0`. For a given pair :math:`(l, s)` - there may be multiple such :math:`\\phi`. - - This parameterization is Equation 14 in [Hilbe2013]_. - See Figure 2 of the article for a more in-depth explanation. Other game - parameters can alter the relations and bounds above. - - Names: - - - Linear Relation player: [Hilbe2013]_ - """ - - name = "LinearRelation" - classifier = { - "memory_depth": 1, # Memory-one Four-Vector - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def __init__(self, phi: float = 0.2, s: float = 0.1, l: float = 1) -> None: - """ - Parameters - - phi, s, l: floats - Parameters determining the four_vector of the LR player. - """ - self.phi = phi - self.s = s - self.l = l - super().__init__() - - def set_initial_four_vector(self, four_vector): - pass - - def receive_match_attributes(self): - """ - Parameters - - phi, s, l: floats - Parameter used to compute the four-vector according to the - parameterization of the strategies below. - """ - - R, P, S, T = self.match_attributes["game"].RPST() - l = self.l - phi = self.phi - s = self.s - - # Check parameters - s_min = -min((T - l) / (l - S), (l - S) / (T - l)) - if (l < P) or (l > R) or (s > 1) or (s < s_min): - raise ValueError - - p1 = 1 - phi * (1 - s) * (R - l) - p2 = 1 - phi * (s * (l - S) + (T - l)) - p3 = phi * ((l - S) + s * (T - l)) - p4 = phi * (1 - s) * (l - P) - - four_vector = [p1, p2, p3, p4] - self.set_four_vector(four_vector) - - -class ZDExtortion(LRPlayer): - """ - An example ZD Extortion player. - - Names: - - - ZDExtortion: [Roemheld2013]_ - """ - - name = "ZD-Extortion" - - def __init__(self, phi: float = 0.2, s: float = 0.1, l: float = 1) -> None: - super().__init__(phi, s, l) - - -class ZDExtort2(LRPlayer): - """ - An Extortionate Zero Determinant Strategy with l=P. - - Names: - - - Extort-2: [Stewart2012]_ - """ - - name = "ZD-Extort-2" - - def __init__(self, phi: float = 1 / 9, s: float = 0.5) -> None: - # l = P will be set by receive_match_attributes - super().__init__(phi, s, None) - - def receive_match_attributes(self): - (R, P, S, T) = self.match_attributes["game"].RPST() - self.l = P - super().receive_match_attributes() - - -class ZDExtort2v2(LRPlayer): - """ - An Extortionate Zero Determinant Strategy with l=1. - - - Names: - - - EXTORT2: [Kuhn2017]_ - """ - - name = "ZD-Extort-2 v2" - - def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 1) -> None: - super().__init__(phi, s, l) - - -class ZDExtort3(LRPlayer): - """ - An extortionate strategy from Press and Dyson's paper witn an extortion - factor of 3. - - Names: - - - ZDExtort3: Original name by Marc Harper - - Unnamed: [Press2012]_ - """ - - name = "ZD-Extort3" - - def __init__(self, phi: float = 3 / 26, s: float = 1 / 3, l: float = 1) -> None: - super().__init__(phi, s, l) - - -class ZDExtort4(LRPlayer): - """ - An Extortionate Zero Determinant Strategy with l=1, s=1/4. TFT is the - other extreme (with l=3, s=1) - - - Names: - - - Extort 4: Original name by Marc Harper - """ - - name = "ZD-Extort-4" - - def __init__(self, phi: float = 4 / 17, s: float = 0.25, l: float = 1) -> None: - super().__init__(phi, s, l) - - -class ZDGen2(LRPlayer): - """ - A Generous Zero Determinant Strategy with l=3. - - Names: - - - GEN2: [Kuhn2017]_ - """ - - name = "ZD-GEN-2" - - def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 3) -> None: - super().__init__(phi, s, l) - - -class ZDGTFT2(LRPlayer): - """ - A Generous Zero Determinant Strategy with l=R. - - Names: - - - ZDGTFT-2: [Stewart2012]_ - """ - - name = "ZD-GTFT-2" - - def __init__(self, phi: float = 0.25, s: float = 0.5) -> None: - # l = R will be set by receive_match_attributes - super().__init__(phi, s, None) - - def receive_match_attributes(self): - (R, P, S, T) = self.match_attributes["game"].RPST() - self.l = R - super().receive_match_attributes() - - -class ZDMischief(LRPlayer): - """ - An example ZD Mischief player. - - Names: - - - ZDMischief: [Roemheld2013]_ - """ - - name = "ZD-Mischief" - - def __init__(self, phi: float = 0.1, s: float = 0.0, l: float = 1) -> None: - super().__init__(phi, s, l) - - -class ZDSet2(LRPlayer): - """ - A Generous Zero Determinant Strategy with l=2. - - Names: - - - SET2: [Kuhn2017]_ - """ - - name = "ZD-SET-2" - - def __init__(self, phi: float = 1 / 4, s: float = 0.0, l: float = 2) -> None: - super().__init__(phi, s, l) diff --git a/axelrod/ipd/strategy_transformers.py b/axelrod/ipd/strategy_transformers.py deleted file mode 100644 index 3f8abe8f0..000000000 --- a/axelrod/ipd/strategy_transformers.py +++ /dev/null @@ -1,679 +0,0 @@ -""" -Strategy Transformers -- class decorators that transform the behavior of any -strategy. - -See the various Meta strategies for another type of transformation. -""" - -from collections import Iterable -import copy -import inspect -from importlib import import_module -import random -from typing import Any - -from numpy.random import choice - -from axelrod.ipd.strategies.sequence_player import SequencePlayer -from .action import Action -from .player import IpdPlayer -from .random_ import random_choice - -C, D = Action.C, Action.D - -# Note: After a transformation is applied, the player's history is overwritten -# with the modified history just like in the noisy tournament case. This can -# lead to unexpected behavior, such as when FlipTransform is applied to -# Alternator. - - -def StrategyTransformerFactory(strategy_wrapper, name_prefix=None, reclassifier=None): - """Modify an existing strategy dynamically by wrapping the strategy - method with the argument `strategy_wrapper`. - - Parameters - ---------- - strategy_wrapper: function - A function of the form `strategy_wrapper(player, opponent, proposed_action, *args, **kwargs)` - Can also use a class that implements - def __call__(self, player, opponent, action) - name_prefix: string, "Transformed " - A string to prepend to the strategy and class name - reclassifier: function, - A function which will update the classifier of the strategy being - transformed - """ - - # Create a class that applies a wrapper function to the strategy method - # of a given class. We use a class here instead of a function so that the - # decorator can have arguments. - - class Decorator(object): - def __init__(self, *args, **kwargs): - self.args = args - self.kwargs = kwargs - if "name_prefix" in kwargs: - self.name_prefix = kwargs["name_prefix"] - else: - self.name_prefix = name_prefix - - def __reduce__(self): - """Gives instructions on how to pickle the Decorator object.""" - factory_args = (strategy_wrapper, name_prefix, reclassifier) - return ( - DecoratorReBuilder(), - (factory_args, self.args, self.kwargs, self.name_prefix), - ) - - def __call__(self, PlayerClass): - """ - Parameters - ---------- - PlayerClass: A subclass of axelrodPlayer, e.g. Cooperator - The IpdPlayer Class to modify - - Returns - ------- - new_class, class object - A class object that can create instances of the modified - PlayerClass - """ - - args = self.args - kwargs = self.kwargs - try: - # If "name_prefix" in kwargs remove as only want decorator - # arguments - del kwargs["name_prefix"] - except KeyError: - pass - try: - del kwargs["reclassifier"] - except KeyError: - pass - - # Define the new strategy method, wrapping the existing method - # with `strategy_wrapper` - def strategy(self, opponent): - if strategy_wrapper == dual_wrapper: - # dual_wrapper figures out strategy as if the IpdPlayer had - # played the opposite actions of its current history. - self._history = self.history.flip_plays() - - if is_strategy_static(PlayerClass): - proposed_action = PlayerClass.strategy(opponent) - else: - proposed_action = PlayerClass.strategy(self, opponent) - - if strategy_wrapper == dual_wrapper: - # After dual_wrapper calls the strategy, it returns - # the IpdPlayer to its original state. - self._history = self.history.flip_plays() - - # Apply the wrapper - return strategy_wrapper( - self, opponent, proposed_action, *args, **kwargs - ) - - # Modify the PlayerClass name - new_class_name = PlayerClass.__name__ - name = PlayerClass.name - name_prefix = self.name_prefix - if name_prefix: - # Modify the IpdPlayer name (class variable inherited from IpdPlayer) - new_class_name = "".join([name_prefix, PlayerClass.__name__]) - # Modify the IpdPlayer name (class variable inherited from IpdPlayer) - name = " ".join([name_prefix, PlayerClass.name]) - - original_classifier = copy.deepcopy(PlayerClass.classifier) # Copy - if reclassifier is not None: - classifier = reclassifier(original_classifier, *args, **kwargs) - else: - classifier = original_classifier - - # Define the new __repr__ method to add the wrapper arguments - # at the end of the name - def __repr__(self): - name = PlayerClass.__repr__(self) - # add eventual transformers' arguments in name - prefix = ": " - for arg in args: - try: - # Action has .name but should not be made into a list - if not any(isinstance(el, Action) for el in arg): - arg = [player.name for player in arg] - except AttributeError: - pass - except TypeError: - pass - name = "".join([name, prefix, str(arg)]) - prefix = ", " - return name - - def reduce_for_decorated_class(self_): - """__reduce__ function for decorated class. Ensures that any - decorated class can be correctly pickled.""" - class_module = import_module(self_.__module__) - import_name = self_.__class__.__name__ - - if player_can_be_pickled(self_): - return self_.__class__, (), self_.__dict__ - - decorators = [] - state = self_.__dict__ - for class_ in self_.__class__.mro(): - import_name = class_.__name__ - if hasattr(class_, "decorator"): - decorators.insert(0, class_.decorator) - if hasattr(class_module, import_name): - # Sequence players are not directly pickleable so we need to call __getstate__ - state = class_.__getstate__(self_) - break - - return ( - StrategyReBuilder(), - (decorators, import_name, self_.__module__), - state, - ) - - # Define a new class and wrap the strategy method - # Dynamically create the new class - new_class = type( - new_class_name, - (PlayerClass,), - { - "name": name, - "original_class": PlayerClass, - "strategy": strategy, - "decorator": self, - "__repr__": __repr__, - "__module__": PlayerClass.__module__, - "classifier": classifier, - "__doc__": PlayerClass.__doc__, - "__reduce__": reduce_for_decorated_class, - }, - ) - - return new_class - - return Decorator - - -def player_can_be_pickled(player: IpdPlayer) -> bool: - """ - Returns True if pickle.dump(player) does not raise pickle.PicklingError. - """ - class_module = import_module(player.__module__) - import_name = player.__class__.__name__ - if not hasattr(class_module, import_name): - return False - # Sequence players are pickleable but not directly so (particularly if decorated). - if issubclass(player.__class__, SequencePlayer): - return False - - to_test = getattr(class_module, import_name) - return to_test == player.__class__ - - -def is_strategy_static(player_class) -> bool: - """ - Returns True if `player_class.strategy` is a `staticmethod`, else False. - """ - for class_ in player_class.mro(): - method = inspect.getattr_static(class_, "strategy", default=None) - if method is not None: - return isinstance(method, staticmethod) - - -class DecoratorReBuilder(object): - """ - An object to build an anonymous Decorator obj from a set of pickle-able - parameters. - """ - - def __call__( - self, factory_args: tuple, args: tuple, kwargs: dict, instance_name_prefix: str - ) -> Any: - - decorator_class = StrategyTransformerFactory(*factory_args) - kwargs["name_prefix"] = instance_name_prefix - return decorator_class(*args, **kwargs) - - -class StrategyReBuilder(object): - """ - An object to build a new instance of a player from an old instance - that could not normally be pickled. - """ - - def __call__(self, decorators: list, import_name: str, module_name: str) -> IpdPlayer: - - module_ = import_module(module_name) - import_class = getattr(module_, import_name) - - if hasattr(import_class, "decorator"): - return import_class() - else: - generated_class = import_class - for decorator in decorators: - generated_class = decorator(generated_class) - return generated_class() - - -def compose_transformers(t1, t2): - """Compose transformers without having to invoke the first on - a PlayerClass.""" - - class Composition(object): - def __init__(self): - self.t1 = t1 - self.t2 = t2 - - def __call__(self, PlayerClass): - return t1(t2(PlayerClass)) - - return Composition() - - -def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs): - """ - Strategy wrapper functions should be of the following form. - - Parameters - ---------- - player: IpdPlayer object or subclass (self) - opponent: IpdPlayer object or subclass - proposed_action: an axelrod.Action, C or D - The proposed action by the wrapped strategy - proposed_action = IpdPlayer.strategy(...) - args, kwargs: - Any additional arguments that you need. - - Returns - ------- - action: an axelrod.Action, C or D - - """ - - # This example just passes through the proposed_action - return proposed_action - - -IdentityTransformer = StrategyTransformerFactory(generic_strategy_wrapper) - - -def flip_wrapper(player, opponent, action): - """Flips the player's original actions.""" - return action.flip() - - -FlipTransformer = StrategyTransformerFactory(flip_wrapper, name_prefix="Flipped") - - -def dual_wrapper(player, opponent: IpdPlayer, proposed_action: Action) -> Action: - """Wraps the players strategy function to produce the Dual. - - The Dual of a strategy will return the exact opposite set of moves to the - original strategy when both are faced with the same history. - - A formal definition can be found in [Ashlock2010]_. - http://doi.org/10.1109/ITW.2010.5593352 - - Parameters - ---------- - player: IpdPlayer object or subclass (self) - opponent: IpdPlayer object or subclass - proposed_action: axelrod.Action, C or D - The proposed action by the wrapped strategy - - Returns - ------- - action: an axelrod.Action, C or D - """ - - # dual_wrapper is a special case. The work of flip_play_attributes(player) - # is done in the strategy of the new PlayerClass created by DualTransformer. - # The DualTransformer is dynamically created in StrategyTransformerFactory. - - return proposed_action.flip() - - -DualTransformer = StrategyTransformerFactory(dual_wrapper, name_prefix="Dual") - - -def noisy_wrapper(player, opponent, action, noise=0.05): - """Flips the player's actions with probability: `noise`.""" - r = random.random() - if r < noise: - return action.flip() - return action - - -def noisy_reclassifier(original_classifier, noise): - """Function to reclassify the strategy""" - if noise not in (0, 1): - original_classifier["stochastic"] = True - return original_classifier - - -NoisyTransformer = StrategyTransformerFactory( - noisy_wrapper, name_prefix="Noisy", reclassifier=noisy_reclassifier -) - - -def forgiver_wrapper(player, opponent, action, p): - """If a strategy wants to defect, flip to cooperate with the given - probability.""" - if action == D: - return random_choice(p) - return C - - -def forgiver_reclassifier(original_classifier, p): - """Function to reclassify the strategy""" - if p not in (0, 1): - original_classifier["stochastic"] = True - return original_classifier - - -ForgiverTransformer = StrategyTransformerFactory( - forgiver_wrapper, name_prefix="Forgiving", reclassifier=forgiver_reclassifier -) - - -def nice_wrapper(player, opponent, action): - """Makes sure that the player doesn't defect unless the opponent has already - defected.""" - if action == D: - if opponent.defections == 0: - return C - return action - - -NiceTransformer = StrategyTransformerFactory(nice_wrapper, name_prefix="Nice") - - -def initial_sequence(player, opponent, action, initial_seq): - """Play the moves in `seq` first (must be a list), ignoring the strategy's - moves until the list is exhausted.""" - - index = len(player.history) - if index < len(initial_seq): - return initial_seq[index] - return action - - -def initial_reclassifier(original_classifier, initial_seq): - """ - If needed this extends the memory depth to be the length of the initial - sequence - """ - original_classifier["memory_depth"] = max( - len(initial_seq), original_classifier["memory_depth"] - ) - return original_classifier - - -InitialTransformer = StrategyTransformerFactory( - initial_sequence, name_prefix="Initial", reclassifier=initial_reclassifier -) - - -def final_sequence(player, opponent, action, seq): - """Play the moves in `seq` first, ignoring the strategy's moves until the - list is exhausted.""" - - length = player.match_attributes["length"] - - if length < 0: # default is -1 - return action - - index = length - len(player.history) - # If for some reason we've overrun the expected game length, just pass - # the intended action through - if len(player.history) >= length: - return action - # Check if we're near the end and need to start passing the actions - # from seq for the final few rounds. - if index <= len(seq): - return seq[-index] - return action - - -def final_reclassifier(original_classifier, seq): - """Reclassify the strategy""" - original_classifier["makes_use_of"].update(["length"]) - original_classifier["memory_depth"] = max( - len(seq), original_classifier["memory_depth"] - ) - return original_classifier - - -FinalTransformer = StrategyTransformerFactory( - final_sequence, name_prefix="Final", reclassifier=final_reclassifier -) - - -def history_track_wrapper(player, opponent, action): - """Wrapper to track a player's history in a variable `._recorded_history`.""" - try: - player._recorded_history.append(action) - except AttributeError: - player._recorded_history = [action] - return action - - -TrackHistoryTransformer = StrategyTransformerFactory( - history_track_wrapper, name_prefix="HistoryTracking" -) - - -def deadlock_break_wrapper(player, opponent, action): - """Detect and attempt to break deadlocks by cooperating.""" - if len(player.history) < 2: - return action - last_round = (player.history[-1], opponent.history[-1]) - penultimate_round = (player.history[-2], opponent.history[-2]) - if (penultimate_round, last_round) == ((C, D), (D, C)) or ( - penultimate_round, - last_round, - ) == ((D, C), (C, D)): - # attempt to break deadlock by Cooperating - return C - return action - - -DeadlockBreakingTransformer = StrategyTransformerFactory( - deadlock_break_wrapper, name_prefix="DeadlockBreaking" -) - - -def grudge_wrapper(player, opponent, action, grudges): - """After `grudges` defections, defect forever.""" - if opponent.defections > grudges: - return D - return action - - -GrudgeTransformer = StrategyTransformerFactory(grudge_wrapper, name_prefix="Grudging") - - -def apology_wrapper(player, opponent, action, myseq, opseq): - length = len(myseq) - if len(player.history) < length: - return action - if (myseq == player.history[-length:]) and (opseq == opponent.history[-length:]): - return C - return action - - -ApologyTransformer = StrategyTransformerFactory( - apology_wrapper, name_prefix="Apologizing" -) - - -def mixed_wrapper(player, opponent, action, probability, m_player): - """Randomly picks a strategy to play, either from a distribution on a list - of players or a single player. - - In essence creating a mixed strategy. - - Parameters - ---------- - - probability: a float (or integer: 0 or 1) OR an iterable representing a - an incomplete probability distribution (entries to do not have to sum to - 1). Eg: 0, 1, [.5,.5], (.5,.3) - m_players: a single player class or iterable representing set of player - classes to mix from. - Eg: axelrod.TitForTat, [axelod.Cooperator, axelrod.Defector] - """ - - # If a single probability, player is passed - if isinstance(probability, float) or isinstance(probability, int): - m_player = [m_player] - probability = [probability] - - # If a probability distribution, players is passed - if isinstance(probability, Iterable) and isinstance( - m_player, Iterable - ): - mutate_prob = sum(probability) # Prob of mutation - if mutate_prob > 0: - # Distribution of choice of mutation: - normalised_prob = [prob / mutate_prob for prob in probability] - if random.random() < mutate_prob: - p = choice(list(m_player), p=normalised_prob)() - p._history = player._history - return p.strategy(opponent) - - return action - - -def mixed_reclassifier(original_classifier, probability, m_player): - """Function to reclassify the strategy""" - # If a single probability, player is passed - if isinstance(probability, float) or isinstance(probability, int): - m_player = [m_player] - probability = [probability] - - if min(probability) == max(probability) == 0: # No probability given - return original_classifier - - if 1 in probability: # If all probability given to one player - player = m_player[probability.index(1)] - original_classifier["stochastic"] = player.classifier["stochastic"] - return original_classifier - - # Otherwise: stochastic. - original_classifier["stochastic"] = True - return original_classifier - - -MixedTransformer = StrategyTransformerFactory( - mixed_wrapper, name_prefix="Mutated", reclassifier=mixed_reclassifier -) - - -def joss_ann_wrapper(player, opponent, proposed_action, probability): - """Wraps the players strategy function to produce the Joss-Ann. - - The Joss-Ann of a strategy is a new strategy which has a probability of - choosing the move C, a probability of choosing the move D, and otherwise - uses the response appropriate to the original strategy. - - A formal definition can be found in [Ashlock2010]_. - http://doi.org/10.1109/ITW.2010.5593352 - - Parameters - ---------- - - player: IpdPlayer object or subclass (self) - opponent: IpdPlayer object or subclass - proposed_action: axelrod.Action, C or D - The proposed action by the wrapped strategy - probability: tuple - a tuple or list representing a probability distribution of playing move - C or D (doesn't have to be complete) ie. (0, 1) or (0.2, 0.3) - - Returns - ------- - action: an axelrod.Action, C or D - """ - if sum(probability) > 1: - probability = tuple([i / sum(probability) for i in probability]) - - remaining_probability = max(0, 1 - probability[0] - probability[1]) - probability += (remaining_probability,) - options = [C, D, proposed_action] - action = choice(options, p=probability) - return action - - -def jossann_reclassifier(original_classifier, probability): - """ - Reclassify: note that if probabilities are (0, 1) or (1, 0) then we override - the original classifier. - """ - if sum(probability) > 1: - probability = tuple([i / sum(probability) for i in probability]) - - if probability in [(1, 0), (0, 1)]: - original_classifier["stochastic"] = False - elif sum(probability) != 0: - original_classifier["stochastic"] = True - - return original_classifier - - -JossAnnTransformer = StrategyTransformerFactory( - joss_ann_wrapper, name_prefix="Joss-Ann", reclassifier=jossann_reclassifier -) - - -# Strategy wrappers as classes - - -class RetaliationWrapper(object): - """Retaliates `retaliations` times after a defection (cumulative).""" - - def __call__(self, player, opponent, action, retaliations): - if len(player.history) == 0: - self.retaliation_count = 0 - return action - if opponent.history[-1] == D: - self.retaliation_count += retaliations - 1 - return D - if self.retaliation_count == 0: - return action - if self.retaliation_count > 0: - self.retaliation_count -= 1 - return D - - -RetaliationTransformer = StrategyTransformerFactory( - RetaliationWrapper(), name_prefix="Retaliating" -) - - -class RetaliationUntilApologyWrapper(object): - """Enforces the TFT rule that the opponent pay back a defection with a - cooperation for the player to stop defecting.""" - - def __call__(self, player, opponent, action): - if len(player.history) == 0: - self.is_retaliating = False - return action - if opponent.history[-1] == D: - self.is_retaliating = True - if self.is_retaliating: - if opponent.history[-1] == C: - self.is_retaliating = False - return C - return D - return action - - -RetaliateUntilApologyTransformer = StrategyTransformerFactory( - RetaliationUntilApologyWrapper(), name_prefix="RUA" -) diff --git a/axelrod/ipd/tests/__init__.py b/axelrod/ipd/tests/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/axelrod/ipd/tests/integration/__init__.py b/axelrod/ipd/tests/integration/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/axelrod/ipd/tests/integration/test_filtering.py b/axelrod/ipd/tests/integration/test_filtering.py deleted file mode 100644 index da6a0f24c..000000000 --- a/axelrod/ipd/tests/integration/test_filtering.py +++ /dev/null @@ -1,124 +0,0 @@ -import unittest -import warnings - -import axelrod as axl -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import example, given, settings -from hypothesis.strategies import integers - - -class TestFiltersAgainstComprehensions(unittest.TestCase): - """ - Test that the results of filtering strategies via a filterset dict - match the results from using a list comprehension. - """ - - def setUp(self) -> None: - # Ignore warnings about classifiers running on instances - warnings.simplefilter("ignore", category=UserWarning) - - def tearDown(self) -> None: - warnings.simplefilter("default", category=UserWarning) - - @given(strategies=strategy_lists(min_size=20, max_size=20)) - def test_boolean_filtering(self, strategies): - - classifiers = [ - "stochastic", - "long_run_time", - "manipulates_state", - "manipulates_source", - "inspects_source", - ] - - for classifier in classifiers: - comprehension = set(filter(axl.Classifiers[classifier], strategies)) - filterset = {classifier: True} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) - self.assertEqual(comprehension, filtered) - - @given( - min_memory_depth=integers(min_value=1, max_value=10), - max_memory_depth=integers(min_value=1, max_value=10), - memory_depth=integers(min_value=1, max_value=10), - strategies=strategy_lists(min_size=20, max_size=20), - ) - @example( - min_memory_depth=float("inf"), - max_memory_depth=float("inf"), - memory_depth=float("inf"), - strategies=axl.short_run_time_strategies, - ) - @settings(max_examples=5) - def test_memory_depth_filtering( - self, min_memory_depth, max_memory_depth, memory_depth, strategies - ): - - min_comprehension = set( - [ - s - for s in strategies - if axl.Classifiers["memory_depth"](s) >= min_memory_depth - ] - ) - min_filterset = {"min_memory_depth": min_memory_depth} - min_filtered = set( - axl.filtered_strategies(min_filterset, strategies=strategies) - ) - self.assertEqual(min_comprehension, min_filtered) - - max_comprehension = set( - [ - s - for s in strategies - if axl.Classifiers["memory_depth"](s) <= max_memory_depth - ] - ) - max_filterset = {"max_memory_depth": max_memory_depth} - max_filtered = set( - axl.filtered_strategies(max_filterset, strategies=strategies) - ) - self.assertEqual(max_comprehension, max_filtered) - - comprehension = set( - [ - s - for s in strategies - if axl.Classifiers["memory_depth"](s) == memory_depth - ] - ) - filterset = {"memory_depth": memory_depth} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) - self.assertEqual(comprehension, filtered) - - @given( - seed_=integers(min_value=0, max_value=4294967295), - strategies=strategy_lists(min_size=20, max_size=20), - ) - @settings(max_examples=5) - def test_makes_use_of_filtering(self, seed_, strategies): - """ - Test equivalent filtering using two approaches. - - This needs to be seeded as some players classification is random. - """ - classifiers = [["game"], ["length"], ["game", "length"]] - - for classifier in classifiers: - axl.seed(seed_) - comprehension = set( - [ - s - for s in strategies - if set(classifier).issubset(set(axl.Classifiers["makes_use_of"](s))) - ] - ) - - axl.seed(seed_) - filterset = {"makes_use_of": classifier} - filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) - - self.assertEqual( - comprehension, filtered, msg="classifier: {}".format(classifier) - ) diff --git a/axelrod/ipd/tests/integration/test_matches.py b/axelrod/ipd/tests/integration/test_matches.py deleted file mode 100644 index 44251d233..000000000 --- a/axelrod/ipd/tests/integration/test_matches.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Tests for some expected match behaviours""" -import unittest - -import axelrod as axl -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import given, settings -from hypothesis.strategies import integers - -C, D = axl.Action.C, axl.Action.D - -deterministic_strategies = [ - s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) -] -stochastic_strategies = [ - s for s in axl.short_run_time_strategies if axl.Classifiers["stochastic"](s()) -] - - -class TestMatchOutcomes(unittest.TestCase): - @given( - strategies=strategy_lists( - strategies=deterministic_strategies, min_size=2, max_size=2 - ), - turns=integers(min_value=1, max_value=20), - ) - @settings(max_examples=5) - def test_outcome_repeats(self, strategies, turns): - """A test that if we repeat 3 matches with deterministic and well - behaved strategies then we get the same result""" - players = [s() for s in strategies] - matches = [axl.IpdMatch(players, turns) for _ in range(3)] - self.assertEqual(matches[0].play(), matches[1].play()) - self.assertEqual(matches[1].play(), matches[2].play()) - - @given( - strategies=strategy_lists( - strategies=stochastic_strategies, min_size=2, max_size=2 - ), - turns=integers(min_value=1, max_value=20), - seed=integers(min_value=0, max_value=4294967295), - ) - @settings(max_examples=5) - def test_outcome_repeats_stochastic(self, strategies, turns, seed): - """a test to check that if a seed is set stochastic strategies give the - same result""" - results = [] - for _ in range(3): - axl.seed(seed) - players = [s() for s in strategies] - results.append(axl.IpdMatch(players, turns).play()) - - self.assertEqual(results[0], results[1]) - self.assertEqual(results[1], results[2]) - - def test_matches_with_det_player_for_stochastic_classes(self): - """A test based on a bug found in the cache. - - See: https://github.com/Axelrod-Python/Axelrod/issues/779""" - p1 = axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0)) - p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) - p3 = axl.MemoryOnePlayer(four_vector=(1, 1, 1, 0)) - - m = axl.IpdMatch((p1, p2), turns=3) - self.assertEqual(m.play(), [(C, C), (D, C), (D, D)]) - - m = axl.IpdMatch((p2, p3), turns=3) - self.assertEqual(m.play(), [(C, C), (C, C), (C, C)]) - - m = axl.IpdMatch((p1, p3), turns=3) - self.assertEqual(m.play(), [(C, C), (D, C), (D, C)]) diff --git a/axelrod/ipd/tests/integration/test_names.py b/axelrod/ipd/tests/integration/test_names.py deleted file mode 100644 index 04745b778..000000000 --- a/axelrod/ipd/tests/integration/test_names.py +++ /dev/null @@ -1,13 +0,0 @@ -import unittest - -import axelrod as axl - - -class TestNames(unittest.TestCase): - def test_all_strategies_have_names(self): - names = [s.name for s in axl.all_strategies if s.name] - self.assertEqual(len(names), len(axl.all_strategies)) - - def test_all_names_are_unique(self): - names = set(s.name for s in axl.all_strategies) - self.assertEqual(len(names), len(axl.all_strategies)) diff --git a/axelrod/ipd/tests/integration/test_sample_tournaments.py b/axelrod/ipd/tests/integration/test_sample_tournaments.py deleted file mode 100644 index c8b08e2e8..000000000 --- a/axelrod/ipd/tests/integration/test_sample_tournaments.py +++ /dev/null @@ -1,70 +0,0 @@ -import unittest - -import axelrod as axl - -C, D = axl.Action.C, axl.Action.D - - -class TestSampleTournaments(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - - @classmethod - def get_test_outcome(cls, outcome, turns=10): - # Extract the name of players from the outcome tuples, - # and initiate the players by getting the classes from axelrod.ipd. - names = [out[0] for out in outcome] - players = [getattr(axl, n)() for n in names] - - # Play the tournament and build the actual outcome tuples. - tournament = axl.IpdTournament( - players=players, game=cls.game, turns=turns, repetitions=1 - ) - results = tournament.play(progress_bar=False) - scores = [score[0] for score in results.scores] - outcome = zip(names, scores) - - # Return the outcome sorted by score - return sorted(outcome, key=lambda k: k[1]) - - def test_defector_v_cooperator(self): - """Test: the defector viciously punishes the cooperator.""" - outcome = [("Cooperator", 0), ("Defector", 50)] - self.assertEqual(self.get_test_outcome(outcome), outcome) - - def test_defector_v_titfortat(self): - """Test: the defector does well against tit for tat.""" - outcome = [("TitForTat", 9), ("Defector", 14)] - self.assertEqual(self.get_test_outcome(outcome), outcome) - - def test_cooperator_v_titfortat(self): - """Test: the cooperator does very well WITH tit for tat.""" - outcome = [("Cooperator", 30), ("TitForTat", 30)] - self.assertEqual(self.get_test_outcome(outcome), outcome) - - def test_cooperator_v_titfortat_v_defector(self): - """Test: the defector dominates in this population.""" - outcome = [("Cooperator", 30), ("TitForTat", 39), ("Defector", 64)] - self.assertEqual(self.get_test_outcome(outcome), outcome) - - def test_cooperator_v_titfortat_v_defector_v_grudger(self): - """Test: tit for tat does better this time around.""" - outcome = [ - ("Cooperator", 60), - ("TitForTat", 69), - ("Grudger", 69), - ("Defector", 78), - ] - self.assertEqual(self.get_test_outcome(outcome), outcome) - - def test_cooperator_v_titfortat_v_defector_v_grudger_v_go_by_majority(self): - """Test: Tit for tat is doing a lot better.""" - outcome = [ - ("Cooperator", 90), - ("Defector", 92), - ("Grudger", 99), - ("GoByMajority", 99), - ("TitForTat", 99), - ] - self.assertEqual(self.get_test_outcome(outcome), outcome) diff --git a/axelrod/ipd/tests/integration/test_tournament.py b/axelrod/ipd/tests/integration/test_tournament.py deleted file mode 100644 index 8b46c3546..000000000 --- a/axelrod/ipd/tests/integration/test_tournament.py +++ /dev/null @@ -1,171 +0,0 @@ -import unittest - -import filecmp -import pathlib - -import axelrod as axl -from axelrod.ipd.load_data_ import axl_filename -from axelrod.ipd.strategy_transformers import FinalTransformer -from axelrod.ipd.tests.property import tournaments - -from hypothesis import given, settings - - -class TestTournament(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - cls.players = [ - axl.Cooperator(), - axl.TitForTat(), - axl.Defector(), - axl.Grudger(), - axl.GoByMajority(), - ] - cls.player_names = [str(p) for p in cls.players] - cls.test_name = "test" - cls.test_repetitions = 3 - - cls.expected_outcome = [ - ("Cooperator", [45, 45, 45]), - ("Defector", [52, 52, 52]), - ("Grudger", [49, 49, 49]), - ("Soft Go By Majority", [49, 49, 49]), - ("Tit For Tat", [49, 49, 49]), - ] - cls.expected_outcome.sort() - - @given( - tournaments( - strategies=axl.short_run_time_strategies, - min_size=10, - max_size=30, - min_turns=2, - max_turns=210, - min_repetitions=1, - max_repetitions=4, - ) - ) - @settings(max_examples=1) - def test_big_tournaments(self, tournament): - """A test to check that tournament runs with a sample of non-cheating - strategies.""" - path = pathlib.Path("../test_outputs/test_tournament.csv") - filename = axl_filename(path) - self.assertIsNone( - tournament.play(progress_bar=False, filename=filename, build_results=False) - ) - - def test_serial_play(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=5, - repetitions=self.test_repetitions, - ) - scores = tournament.play(progress_bar=False).scores - actual_outcome = sorted(zip(self.player_names, scores)) - self.assertEqual(actual_outcome, self.expected_outcome) - - def test_parallel_play(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=5, - repetitions=self.test_repetitions, - ) - scores = tournament.play(processes=2, progress_bar=False).scores - actual_outcome = sorted(zip(self.player_names, scores)) - self.assertEqual(actual_outcome, self.expected_outcome) - - def test_repeat_tournament_deterministic(self): - """A test to check that tournament gives same results.""" - deterministic_players = [ - s() - for s in axl.short_run_time_strategies - if not axl.Classifiers["stochastic"](s()) - ] - files = [] - for _ in range(2): - tournament = axl.IpdTournament( - name="test", - players=deterministic_players, - game=self.game, - turns=2, - repetitions=2, - ) - path = pathlib.Path("../test_outputs/stochastic_tournament_{}.csv".format(_)) - files.append(axl_filename(path)) - tournament.play(progress_bar=False, filename=files[-1], build_results=False) - self.assertTrue(filecmp.cmp(files[0], files[1])) - - def test_repeat_tournament_stochastic(self): - """ - A test to check that tournament gives same results when setting seed. - """ - files = [] - for _ in range(2): - axl.seed(0) - stochastic_players = [ - s() - for s in axl.short_run_time_strategies - if axl.Classifiers["stochastic"](s()) - ] - tournament = axl.IpdTournament( - name="test", - players=stochastic_players, - game=self.game, - turns=2, - repetitions=2, - ) - path = pathlib.Path("../test_outputs/stochastic_tournament_{}.csv".format(_)) - files.append(axl_filename(path)) - tournament.play(progress_bar=False, filename=files[-1], build_results=False) - self.assertTrue(filecmp.cmp(files[0], files[1])) - - -class TestNoisyTournament(unittest.TestCase): - def test_noisy_tournament(self): - # Defector should win for low noise - players = [axl.Cooperator(), axl.Defector()] - tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.0) - results = tournament.play(progress_bar=False) - self.assertEqual(results.ranked_names[0], "Defector") - - # If the noise is large enough, cooperator should win - players = [axl.Cooperator(), axl.Defector()] - tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.75) - results = tournament.play(progress_bar=False) - self.assertEqual(results.ranked_names[0], "Cooperator") - - -class TestProbEndTournament(unittest.TestCase): - def test_players_do_not_know_match_length(self): - """Create two players who should cooperate on last two turns if they - don't know when those last two turns are. - """ - p1 = FinalTransformer(["D", "D"])(axl.Cooperator)() - p2 = FinalTransformer(["D", "D"])(axl.Cooperator)() - players = [p1, p2] - tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=1) - results = tournament.play(progress_bar=False) - # Check that both plays always cooperated - for rating in results.cooperating_rating: - self.assertEqual(rating, 1) - - def test_matches_have_different_length(self): - """ - A match between two players should have variable length across the - repetitions - """ - p1 = axl.Cooperator() - p2 = axl.Cooperator() - p3 = axl.Cooperator() - players = [p1, p2, p3] - axl.seed(0) - tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=2) - results = tournament.play(progress_bar=False) - # Check that match length are different across the repetitions - self.assertNotEqual(results.match_lengths[0], results.match_lengths[1]) diff --git a/axelrod/ipd/tests/property.py b/axelrod/ipd/tests/property.py deleted file mode 100644 index 705acbdc9..000000000 --- a/axelrod/ipd/tests/property.py +++ /dev/null @@ -1,335 +0,0 @@ -""" -A module for creating hypothesis based strategies for property based testing -""" -import itertools - -import axelrod as axl - -from hypothesis.strategies import composite, floats, integers, lists, sampled_from - - -@composite -def strategy_lists( - draw, strategies=axl.short_run_time_strategies, min_size=1, max_size=len(axl.strategies) -): - """ - A hypothesis decorator to return a list of strategies - - Parameters - ---------- - min_size : integer - The minimum number of strategies to include - max_size : integer - The maximum number of strategies to include - """ - strategies = draw( - lists(sampled_from(strategies), min_size=min_size, max_size=max_size) - ) - return strategies - - -@composite -def matches( - draw, - strategies=axl.short_run_time_strategies, - min_turns=1, - max_turns=200, - min_noise=0, - max_noise=1, -): - """ - A hypothesis decorator to return a random match. - - Parameters - ---------- - strategies : list - The strategies from which to sample the two the players - min_turns : integer - The minimum number of turns - max_turns : integer - The maximum number of turns - min_noise : float - The minimum noise - max_noise : float - The maximum noise - - Returns - ------- - match : a random match - """ - strategies = draw(strategy_lists(min_size=2, max_size=2)) - players = [s() for s in strategies] - turns = draw(integers(min_value=min_turns, max_value=max_turns)) - noise = draw(floats(min_value=min_noise, max_value=max_noise)) - match = axl.IpdMatch(players, turns=turns, noise=noise) - return match - - -@composite -def tournaments( - draw, - strategies=axl.short_run_time_strategies, - min_size=1, - max_size=10, - min_turns=1, - max_turns=200, - min_noise=0, - max_noise=1, - min_repetitions=1, - max_repetitions=20, -): - """ - A hypothesis decorator to return a tournament. - - Parameters - ---------- - min_size : integer - The minimum number of strategies to include - max_size : integer - The maximum number of strategies to include - min_turns : integer - The minimum number of turns - max_turns : integer - The maximum number of turns - min_noise : float - The minimum noise value - min_noise : float - The maximum noise value - min_repetitions : integer - The minimum number of repetitions - max_repetitions : integer - The maximum number of repetitions - """ - strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) - ) - players = [s() for s in strategies] - turns = draw(integers(min_value=min_turns, max_value=max_turns)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) - noise = draw(floats(min_value=min_noise, max_value=max_noise)) - - tournament = axl.IpdTournament(players, turns=turns, repetitions=repetitions, noise=noise) - return tournament - - -@composite -def prob_end_tournaments( - draw, - strategies=axl.short_run_time_strategies, - min_size=1, - max_size=10, - min_prob_end=0, - max_prob_end=1, - min_noise=0, - max_noise=1, - min_repetitions=1, - max_repetitions=20, -): - """ - A hypothesis decorator to return a tournament, - - Parameters - ---------- - min_size : integer - The minimum number of strategies to include - max_size : integer - The maximum number of strategies to include - min_prob_end : float - The minimum probability of a match ending - max_prob_end : float - The maximum probability of a match ending - min_noise : float - The minimum noise value - max_noise : float - The maximum noise value - min_repetitions : integer - The minimum number of repetitions - max_repetitions : integer - The maximum number of repetitions - """ - strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) - ) - players = [s() for s in strategies] - prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) - noise = draw(floats(min_value=min_noise, max_value=max_noise)) - - tournament = axl.IpdTournament( - players, prob_end=prob_end, repetitions=repetitions, noise=noise - ) - return tournament - - -@composite -def spatial_tournaments( - draw, - strategies=axl.short_run_time_strategies, - min_size=1, - max_size=10, - min_turns=1, - max_turns=200, - min_noise=0, - max_noise=1, - min_repetitions=1, - max_repetitions=20, -): - """ - A hypothesis decorator to return a spatial tournament. - - Parameters - ---------- - min_size : integer - The minimum number of strategies to include - max_size : integer - The maximum number of strategies to include - min_turns : integer - The minimum number of turns - max_turns : integer - The maximum number of turns - min_noise : float - The minimum noise value - max_noise : float - The maximum noise value - min_repetitions : integer - The minimum number of repetitions - max_repetitions : integer - The maximum number of repetitions - """ - strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) - ) - players = [s() for s in strategies] - player_indices = list(range(len(players))) - - all_potential_edges = list(itertools.combinations(player_indices, 2)) - all_potential_edges.extend([(i, i) for i in player_indices]) # Loops - edges = draw( - lists( - sampled_from(all_potential_edges), - unique=True, - average_size=2 * len(players), - ) - ) - - # Ensure all players/nodes are connected: - node_indices = sorted(set([node for edge in edges for node in edge])) - missing_nodes = [index for index in player_indices if index not in node_indices] - for index in missing_nodes: - opponent = draw(sampled_from(player_indices)) - edges.append((index, opponent)) - - turns = draw(integers(min_value=min_turns, max_value=max_turns)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) - noise = draw(floats(min_value=min_noise, max_value=max_noise)) - - tournament = axl.IpdTournament( - players, turns=turns, repetitions=repetitions, noise=noise, edges=edges - ) - return tournament - - -@composite -def prob_end_spatial_tournaments( - draw, - strategies=axl.short_run_time_strategies, - min_size=1, - max_size=10, - min_prob_end=0, - max_prob_end=1, - min_noise=0, - max_noise=1, - min_repetitions=1, - max_repetitions=20, -): - """ - A hypothesis decorator to return a probabilistic ending spatial tournament. - - Parameters - ---------- - min_size : integer - The minimum number of strategies to include - max_size : integer - The maximum number of strategies to include - min_prob_end : float - The minimum probability of a match ending - max_prob_end : float - The maximum probability of a match ending - min_noise : float - The minimum noise value - max_noise : float - The maximum noise value - min_repetitions : integer - The minimum number of repetitions - max_repetitions : integer - The maximum number of repetitions - """ - strategies = draw( - strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) - ) - players = [s() for s in strategies] - player_indices = list(range(len(players))) - - all_potential_edges = list(itertools.combinations(player_indices, 2)) - all_potential_edges.extend([(i, i) for i in player_indices]) # Loops - edges = draw( - lists( - sampled_from(all_potential_edges), - unique=True, - average_size=2 * len(players), - ) - ) - - # Ensure all players/nodes are connected: - node_indices = sorted(set([node for edge in edges for node in edge])) - missing_nodes = [index for index in player_indices if index not in node_indices] - for index in missing_nodes: - opponent = draw(sampled_from(player_indices)) - edges.append((index, opponent)) - - prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) - repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) - noise = draw(floats(min_value=min_noise, max_value=max_noise)) - - tournament = axl.IpdTournament( - players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges - ) - return tournament - - -@composite -def games(draw, prisoners_dilemma=True, max_value=100): - """ - A hypothesis decorator to return a random game. - - Parameters - ---------- - prisoners_dilemma : bool - If set not True the R,P,S,T values will be uniformly random. True by - default which ensures T > R > P > S and 2R > T + S. - max_value : the maximal payoff value - """ - - if prisoners_dilemma: - s_upper_bound = max_value - 4 # Ensures there is enough room - s = draw(integers(max_value=s_upper_bound)) - - t_lower_bound = s + 3 # Ensures there is enough room - t = draw(integers(min_value=t_lower_bound, max_value=max_value)) - - r_upper_bound = t - 1 - r_lower_bound = min(max(int((t + s) / 2), s) + 2, r_upper_bound) - r = draw(integers(min_value=r_lower_bound, max_value=r_upper_bound)) - - p_lower_bound = s + 1 - p_upper_bound = r - 1 - p = draw(integers(min_value=p_lower_bound, max_value=p_upper_bound)) - - else: - s = draw(integers(max_value=max_value)) - t = draw(integers(max_value=max_value)) - r = draw(integers(max_value=max_value)) - p = draw(integers(max_value=max_value)) - - game = axl.IpdGame(r=r, s=s, t=t, p=p) - return game diff --git a/axelrod/ipd/tests/strategies/__init__.py b/axelrod/ipd/tests/strategies/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/axelrod/ipd/tests/strategies/test_adaptive.py b/axelrod/ipd/tests/strategies/test_adaptive.py deleted file mode 100644 index 5b796ce4e..000000000 --- a/axelrod/ipd/tests/strategies/test_adaptive.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Tests for the Adaptive strategy.""" - -import axelrod as axl - -from .test_player import TestMatch, TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestAdaptive(TestPlayer): - - name = "Adaptive" - player = axl.Adaptive - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 6 + [(D, C)] * 8 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 6 + [(D, D)] * 8 - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D)] * 3 + [(D, C), (D, D)] * 4 - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, C)] * 6 + [(D, C)] + [(D, D)] * 4 + [(C, D), (C, C)] - self.versus_test(axl.TitForTat(), expected_actions=actions) - - def test_scoring(self): - player = axl.Adaptive() - opponent = axl.Cooperator() - player.play(opponent) - player.play(opponent) - self.assertEqual(3, player.scores[C]) - game = axl.IpdGame(-3, 10, 10, 10) - player.set_match_attributes(game=game) - player.play(opponent) - self.assertEqual(0, player.scores[C]) diff --git a/axelrod/ipd/tests/strategies/test_adaptor.py b/axelrod/ipd/tests/strategies/test_adaptor.py deleted file mode 100644 index 740fdb252..000000000 --- a/axelrod/ipd/tests/strategies/test_adaptor.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Tests for the adaptor""" - -import unittest - -import axelrod as axl - -from .test_player import TestPlayer, test_four_vector - -C, D = axl.Action.C, axl.Action.D - - -class TestAdaptorBrief(TestPlayer): - - name = "AdaptorBrief" - player = axl.AdaptorBrief - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # No error. - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.AdaptorBrief(), expected_actions=actions, seed=0 - ) - - # Error corrected. - actions = [(C, C), (C, D), (D, C), (C, C)] - self.versus_test( - opponent=axl.AdaptorBrief(), expected_actions=actions, seed=22 - ) - - # Error corrected, example 2 - actions = [(D, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test( - opponent=axl.AdaptorBrief(), expected_actions=actions, seed=925 - ) - - # Versus Cooperator - actions = [(C, C)] * 8 - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=0 - ) - - # Versus Defector - actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test( - opponent=axl.Defector(), expected_actions=actions, seed=0 - ) - - -class TestAdaptorLong(TestPlayer): - - name = "AdaptorLong" - player = axl.AdaptorLong - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # No error. - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.AdaptorLong(), expected_actions=actions, seed=0 - ) - - # Error corrected. - actions = [(C, C), (C, D), (D, D), (C, C), (C, C)] - self.versus_test( - opponent=axl.AdaptorLong(), expected_actions=actions, seed=22 - ) - - # Versus Cooperator - actions = [(C, C)] * 8 - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=0 - ) - - # Versus Defector - actions = [(C, D), (D, D), (C, D), (D, D), (D, D), (C, D), (D, D)] - self.versus_test( - opponent=axl.Defector(), expected_actions=actions, seed=0 - ) diff --git a/axelrod/ipd/tests/strategies/test_alternator.py b/axelrod/ipd/tests/strategies/test_alternator.py deleted file mode 100644 index 64a347c33..000000000 --- a/axelrod/ipd/tests/strategies/test_alternator.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Tests for the Alternator strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestAlternator(TestPlayer): - - name = "Alternator" - player = axl.Alternator - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D)] * 5 - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C]) - actions = [(C, D), (D, C)] * 5 - self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_ann.py b/axelrod/ipd/tests/strategies/test_ann.py deleted file mode 100644 index e185577c2..000000000 --- a/axelrod/ipd/tests/strategies/test_ann.py +++ /dev/null @@ -1,152 +0,0 @@ -"""Tests for the ANN strategy.""" -import unittest - -import axelrod as axl -from axelrod.ipd.evolvable_player import InsufficientParametersError -from axelrod.ipd.load_data_ import load_weights -from axelrod.ipd.strategies.ann import split_weights - -from .test_player import TestPlayer -from .test_evolvable_player import PartialClass, TestEvolvablePlayer - - -C, D = axl.Action.C, axl.Action.D -nn_weights = load_weights() -num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] - - -class TestSplitWeights(unittest.TestCase): - def test_split_weights(self): - with self.assertRaises(ValueError): - split_weights([0] * 20, 12, 10) - # Doesn't Raise - split_weights([0] * 70, 5, 10) - split_weights([0] * 12, 10, 1) - - -class TestEvolvedANN(TestPlayer): - - name = "Evolved ANN" - player = axl.EvolvedANN - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] + [(D, D)] * 5 - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C)] * 5 - self.versus_test(axl.TitForTat(), expected_actions=actions) - - -class TestEvolvedANN5(TestPlayer): - - name = "Evolved ANN 5" - player = axl.EvolvedANN5 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] + [(D, D)] * 4 - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestEvolvedANNNoise05(TestPlayer): - - name = "Evolved ANN 5 Noise 05" - player = axl.EvolvedANNNoise05 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestEvolvableANN(unittest.TestCase): - - player_class = axl.EvolvableANN - - def test_normalized_parameters(self): - # Must specify at least one of cycle or cycle_length - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - weights=nn_weights["Evolved ANN 5"][2] - ) - - -class TestEvolvableANN2(TestEvolvablePlayer): - name = "EvolvableANN" - player_class = axl.EvolvableANN - parent_class = axl.ANN - parent_kwargs = ["num_features", "num_hidden", "weights"] - init_parameters = {"num_features": 17, "num_hidden": 8} - - -class TestEvolvableANN3(TestEvolvablePlayer): - name = "EvolvableANN" - player_class = axl.EvolvableANN - parent_class = axl.ANN - parent_kwargs = ["num_features", "num_hidden", "weights"] - init_parameters = { - "num_features": nn_weights["Evolved ANN 5"][0], - "num_hidden": nn_weights["Evolved ANN 5"][1], - "weights": nn_weights["Evolved ANN 5"][2] - } - - -# Substitute EvolvableANN as a regular EvolvedANN5. -EvolvableANNPlayerWithDefault = PartialClass( - axl.EvolvableANN, - num_features=num_features, - num_hidden=num_hidden, - weights=weights -) - - -class EvolvableANNAsANN(TestEvolvedANN5): - player = EvolvableANNPlayerWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_apavlov.py b/axelrod/ipd/tests/strategies/test_apavlov.py deleted file mode 100644 index e720dce56..000000000 --- a/axelrod/ipd/tests/strategies/test_apavlov.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Tests APavlov strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestAPavlov2006(TestPlayer): - name = "Adaptive Pavlov 2006" - player = axl.APavlov2006 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 7 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"opponent_class": "Cooperative"}, - ) - - opponent = axl.MockPlayer(actions=[C] * 6 + [D]) - actions = [(C, C)] * 6 + [(C, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Cooperative"} - ) - - actions = [(C, D)] + [(D, D)] * 6 - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={"opponent_class": "ALLD"}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, D, C]) - actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, C), - (C, D), - (D, C), - ] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} - ) - - opponent = axl.MockPlayer(actions=[D, D, C, D, D, C]) - actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} - ) - - opponent = axl.MockPlayer(actions=[D, D, C, D, D, C, D]) - actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D), (C, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} - ) - - opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) - actions = [(C, C), (C, C), (C, C), (C, D), (D, D), (D, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} - ) - - opponent = axl.MockPlayer(actions=[D, D, D, C, C, C]) - actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (D, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} - ) - - -class TestAPavlov2011(TestPlayer): - name = "Adaptive Pavlov 2011" - player = axl.APavlov2011 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - - actions = [(C, C)] * 8 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"opponent_class": "Cooperative"}, - ) - - actions = [(C, D)] + [(D, D)] * 9 - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={"opponent_class": "ALLD"}, - ) - - opponent = axl.MockPlayer(actions=[C, D, D, D, D, D, D]) - actions = [(C, C), (C, D)] + [(D, D)] * 5 + [(D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} - ) - - opponent = axl.MockPlayer(actions=[C, C, D, D, D, D, D]) - actions = [(C, C), (C, C), (C, D)] + [(D, D)] * 4 + [(D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} - ) - - opponent = axl.MockPlayer(actions=[C, D, D, C, D, D, D]) - actions = [(C, C), (C, D), (D, D), (D, C), (C, D), (D, D), (D, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} - ) - - opponent = axl.MockPlayer(actions=[C, D, D, C, C, D, D]) - actions = [(C, C), (C, D), (D, D), (D, C), (C, C), (C, D), (C, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} - ) - - opponent = axl.MockPlayer(actions=[C, D, C, D, C, D, D]) - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (C, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} - ) - - opponent = axl.MockPlayer(actions=[D, D, D, C, C, C, C]) - actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (C, C), (C, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} - ) - - opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) - actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (D, D), (D, C), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} - ) - - opponent = axl.MockPlayer(actions=[D, D, C, C, C, C]) - actions = [(C, D), (D, D), (D, C), (C, C), (C, C), (C, C), (D, D), (D, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_class": "Random"} - ) diff --git a/axelrod/ipd/tests/strategies/test_appeaser.py b/axelrod/ipd/tests/strategies/test_appeaser.py deleted file mode 100644 index ede79c1d5..000000000 --- a/axelrod/ipd/tests/strategies/test_appeaser.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Tests for the Appeaser strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestAppeaser(TestPlayer): - - name = "Appeaser" - player = axl.Appeaser - expected_classifier = { - "memory_depth": float("inf"), # Depends on internal memory. - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D]) - actions = [(C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D, D]) - actions = [(C, C), (C, C), (C, D), (D, D), (C, D), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_averagecopier.py b/axelrod/ipd/tests/strategies/test_averagecopier.py deleted file mode 100644 index ae23667e7..000000000 --- a/axelrod/ipd/tests/strategies/test_averagecopier.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Tests for the AverageCopier strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestAverageCopier(TestPlayer): - - name = "Average Copier" - player = axl.AverageCopier - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Tests that if opponent has played all C then player chooses C. - actions = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - actions = [(D, C)] + [(C, C)] * 9 - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) - - # Tests that if opponent has played all D then player chooses D. - actions = [(C, D)] + [(D, D)] * 9 - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - actions = [(D, D)] + [(D, D)] * 9 - self.versus_test(axl.Defector(), expected_actions=actions, seed=2) - - # Variable behaviour based on the history and stochastic - - actions = [ - (C, C), - (C, D), - (D, C), - (D, D), - (C, C), - (C, D), - (C, C), - (D, D), - (D, C), - (C, D), - ] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) - - actions = [ - (D, C), - (C, D), - (D, C), - (C, D), - (C, C), - (D, D), - (D, C), - (D, D), - (C, C), - (D, D), - ] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) - - opponent = axl.MockPlayer(actions=[C, C, D, D, D, D]) - actions = [ - (C, C), - (C, C), - (C, D), - (D, D), - (D, D), - (C, D), - (D, C), - (D, C), - (D, D), - (D, D), - ] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) - actions = [ - (D, C), - (C, C), - (C, C), - (C, D), - (D, D), - (C, D), - (C, C), - (D, C), - (D, C), - (D, D), - ] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestNiceAverageCopier(TestPlayer): - - name = "Nice Average Copier" - player = axl.NiceAverageCopier - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Tests that if opponent has played all C then player chooses C. - actions = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - - # Tests that if opponent has played all D then player chooses D. - actions = [(C, D)] + [(D, D)] * 9 - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - - # Variable behaviour based on the history and stochastic behaviour - actions = [ - (C, C), - (C, D), - (C, C), - (D, D), - (D, C), - (C, D), - (C, C), - (C, D), - (D, C), - (D, D), - ] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) - - actions = [ - (C, C), - (C, D), - (D, C), - (D, D), - (C, C), - (C, D), - (D, C), - (D, D), - (D, C), - (C, D), - ] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) - - opponent = axl.MockPlayer(actions=[C, C, D, D, D, D]) - actions = [ - (C, C), - (C, C), - (C, D), - (C, D), - (D, D), - (D, D), - (C, C), - (D, C), - (C, D), - (D, D), - ] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) - actions = [ - (C, C), - (C, C), - (C, C), - (C, D), - (D, D), - (D, D), - (C, C), - (C, C), - (D, C), - (D, D), - ] - self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/ipd/tests/strategies/test_axelrod_first.py b/axelrod/ipd/tests/strategies/test_axelrod_first.py deleted file mode 100644 index 1327757a3..000000000 --- a/axelrod/ipd/tests/strategies/test_axelrod_first.py +++ /dev/null @@ -1,810 +0,0 @@ -"""Tests for the First Axelrod strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer, test_four_vector - -C, D = axl.Action.C, axl.Action.D - - -class TestFirstByDavis(TestPlayer): - - name = "First by Davis: 10" - player = axl.FirstByDavis - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates for the first ten rounds - actions = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 10 - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D)] * 5 - self.versus_test(axl.Alternator(), expected_actions=actions) - - # If opponent defects at any point then the player will defect forever - # (after 10 rounds) - opponent = axl.MockPlayer(actions=[C] * 10 + [D]) - actions = [(C, C)] * 10 + [(C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C] * 15 + [D]) - actions = [(C, C)] * 15 + [(C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - -class TestFirstByDowning(TestPlayer): - - name = "First by Downing" - player = axl.FirstByDowning - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(D, C), (D, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, C]) - actions = [(D, D), (D, C), (D, C), (D, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, D, C]) - actions = [(D, D), (D, D), (D, C), (D, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D, C, C]) - actions = [(D, C), (D, C), (C, D), (D, D), (D, C), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) - actions = [(D, C), (D, C), (C, C), (D, C), (D, D), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - -class TestFirstByFeld(TestPlayer): - - name = "First by Feld: 1.0, 0.5, 200" - player = axl.FirstByFeld - expected_classifier = { - "memory_depth": 200, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_cooperation_probability(self): - # Test cooperation probabilities - p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100) - self.assertEqual(1.0, p1._cooperation_probability()) - p2 = axl.Cooperator() - match = axl.IpdMatch((p1, p2), turns=50) - match.play() - self.assertEqual(0.9, p1._cooperation_probability()) - match = axl.IpdMatch((p1, p2), turns=100) - match.play() - self.assertEqual(0.8, p1._cooperation_probability()) - - # Test cooperation probabilities, second set of params - p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200) - self.assertEqual(1.0, p1._cooperation_probability()) - match = axl.IpdMatch((p1, p2), turns=100) - match.play() - self.assertEqual(0.75, p1._cooperation_probability()) - match = axl.IpdMatch((p1, p2), turns=200) - match.play() - self.assertEqual(0.5, p1._cooperation_probability()) - - def test_decay(self): - # Test beyond 200 rounds - for opponent in [axl.Cooperator(), axl.Defector()]: - player = self.player() - self.assertEqual(player._cooperation_probability(), player._start_coop_prob) - match = axl.IpdMatch((player, opponent), turns=201) - match.play() - self.assertEqual(player._cooperation_probability(), player._end_coop_prob) - - def test_strategy(self): - actions = [(C, C)] * 41 + [(D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - - actions = [(C, C)] * 16 + [(D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) - - actions = [(C, D)] + [(D, D)] * 20 - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestFirstByGraaskamp(TestPlayer): - - name = "First by Graaskamp: 0.05" - player = axl.FirstByGraaskamp - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test TfT in first 50 rounds followed by defection followed by 5 rounds - # of TfT - expected_attrs = { - "opponent_is_random": False, - "next_random_defection_turn": None, - } - - # Against alternator - actions = [(C, C)] + [(C, D), (D, C)] * 24 + [(C, D)] # 50 turns - actions += [(D, C)] # 51 turns - actions += [(C, D), (D, C)] * 2 + [(C, D)] # 56 turns - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs - ) - - # Against defector - actions = [(C, D)] + [(D, D)] * 55 # 56 turns - self.versus_test( - axl.Defector(), expected_actions=actions, attrs=expected_attrs - ) - - # Against cooperator - actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs - ) - - # Test recognition of random player - expected_attrs = { - "opponent_is_random": False, - "next_random_defection_turn": None, - } - actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 # 56 turns - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs - ) - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 68} - actions += [(C, C)] # 57 turns - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs=expected_attrs - ) - - expected_attrs = { - "opponent_is_random": True, - "next_random_defection_turn": None, - } - actions = [(C, C)] + [(C, D), (D, C)] * 24 + [(C, D)] # 50 turns - actions += [(D, C)] # 51 turns - actions += [(C, D), (D, C)] * 3 # 57 turns - actions += [(D, D)] - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs - ) - actions += [(D, C), (D, D)] * 5 - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs=expected_attrs - ) - - # Test versus TfT - expected_attrs = { - "opponent_is_random": False, - "next_random_defection_turn": None, - } - actions = [(C, C)] * 50 + [(D, C)] # 51 turns - actions += [(C, D), (D, C)] * 3 # 56 turns - actions += [(C, D), (D, C)] * 50 - self.versus_test( - axl.TitForTat(), expected_actions=actions, seed=0, attrs=expected_attrs - ) - - # Test random defections - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 78} - actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 16 + [(D, C)] + [(C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, seed=0, attrs=expected_attrs - ) - - expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 77} - actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 12 + [(D, C)] + [(C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, seed=1, attrs=expected_attrs - ) - - -class TestFirstByGrofman(TestPlayer): - - name = "First by Grofman" - player = axl.FirstByGrofman - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 7 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D] * 8) - actions = [(C, D), (C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[D] * 8) - actions = [(C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestFirstByJoss(TestPlayer): - - name = "First by Joss: 0.9" - player = axl.FirstByJoss - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 0.9, (C, D): 0, (D, C): 0.9, (D, D): 0} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - - actions = [(C, C), (D, C), (D, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) - - actions = [(C, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - - actions = [(C, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, seed=2) - - -class TestFirstByNydegger(TestPlayer): - - name = "First by Nydegger" - player = axl.FirstByNydegger - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_score_history(self): - """Tests many (but not all) possible combinations.""" - player = self.player() - score_map = player.score_map - score = player.score_history([C, C, C], [C, C, C], score_map) - self.assertEqual(score, 0) - score = player.score_history([D, C, C], [C, C, C], score_map) - self.assertEqual(score, 1) - score = player.score_history([C, C, C], [D, C, C], score_map) - self.assertEqual(score, 2) - score = player.score_history([D, D, C], [D, C, C], score_map) - self.assertEqual(score, 7) - score = player.score_history([C, D, C], [C, D, C], score_map) - self.assertEqual(score, 12) - score = player.score_history([D, C, D], [C, C, C], score_map) - self.assertEqual(score, 17) - score = player.score_history([D, D, D], [D, D, D], score_map) - self.assertEqual(score, 63) - - def test_strategy(self): - # Test TFT-type initial play - # Test trailing post-round 3 play - - actions = [(C, C)] * 9 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C]) - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - -class TestFirstByShubik(TestPlayer): - - name = "First by Shubik" - player = axl.FirstByShubik - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, C]) - actions = [(C, D), (D, C), (C, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C]) - actions = [(C, D), (D, C), (C, D), (D, C), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, D, D, C]) - actions = [(C, D), (D, C), (C, D), (D, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, D]) - actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (D, C), - (C, D), - (D, D), - (D, C), - (D, D), - (C, C), - ] - self.versus_test(opponent, expected_actions=actions) - - -class TestFirstByTullock(TestPlayer): - - name = "First by Tullock" - player = axl.FirstByTullock - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Cooperates for first ten rounds""" - actions = [(C, C), (C, D)] * 5 - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, D)] * 11 + [(D, D)] * 2 - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D] * 10 + [C]) - actions = [(C, D)] * 10 + [(C, C), (D, D)] - self.versus_test(opponent, expected_actions=actions) - - # Test beyond 10 rounds - opponent = axl.MockPlayer(actions=[D] * 5 + [C] * 6) - actions = [(C, D)] * 5 + [(C, C)] * 6 + [(D, D)] * 4 - self.versus_test(opponent, expected_actions=actions, seed=20) - - opponent = axl.MockPlayer(actions=[D] * 5 + [C] * 6) - actions = [(C, D)] * 5 + [(C, C)] * 6 + [(C, D), (D, D), (D, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[C] * 9 + [D] * 2) - actions = [(C, C)] * 9 + [(C, D)] * 2 + [(C, C), (D, C), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[C] * 9 + [D] * 2) - actions = [(C, C)] * 9 + [(C, D)] * 2 + [(D, C), (D, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestFirstByAnonymous(TestPlayer): - - name = "First by Anonymous" - player = axl.FirstByAnonymous - expected_classifier = { - "memory_depth": 0, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(D, C), (C, C), (C, C), (D, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - - actions = [(C, C), (C, C), (D, C), (C, C), (C, C), (D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=10) - - -class TestFirstBySteinAndRapoport(TestPlayer): - - name = "First by Stein and Rapoport: 0.05: (D, D)" - player = axl.FirstBySteinAndRapoport - expected_classifier = { - "memory_depth": float("inf"), - "long_run_time": False, - "stochastic": False, - "makes_use_of": {"length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_init(self): - player = self.player() - self.assertEqual(player.alpha, 0.05) - self.assertFalse(player.opponent_is_random) - - player = self.player(alpha=0.5) - self.assertEqual(player.alpha, 0.5) - self.assertFalse(player.opponent_is_random) - - def test_strategy(self): - # Our IpdPlayer (SteinAndRapoport) vs Cooperator - # After 15th round (pvalue < alpha) still plays TitForTat. - # Note it always defects on the last two rounds. - opponent = axl.Cooperator() - actions = [(C, C)] * 17 + [(D, C)] * 2 - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": False} - ) - - actions = actions[:-2] + [(C, C)] * 2 - self.versus_test( - opponent, - expected_actions=actions[:-2], - match_attributes={"length": -1}, - attrs={"opponent_is_random": False}, - ) - - # SteinAndRapoport vs Defector - # After 15th round (p-value < alpha) still plays TitForTat. - opponent = axl.Defector() - actions = [(C, D)] * 4 + [(D, D)] * 15 - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": False} - ) - - # SteinAndRapoport vs Alternator - # After 15th round (p-value > alpha) starts defecting. - opponent = axl.Alternator() - actions = [(C, C), (C, D), (C, C), (C, D)] - - # On 15th round carry out chi-square test. - actions += [(D, C), (C, D)] * 5 + [(D, C)] - - # Defect throughout. - actions += [(D, D), (D, C), (D, D), (D, C)] - - self.versus_test( - opponent, expected_actions=actions, attrs={"opponent_is_random": True} - ) - - # The test is carried out again every 15 rounds. - # If the strategy alternates for the first 12 rounds and then cooperates - # it is no longer recognised as random. - opponent = axl.MockPlayer([C, D] * 6 + [C] * 50) - - actions = [(C, C), (C, D), (C, C), (C, D)] - # On 15th round carry out chi-square test. - actions += [(D, C), (C, D)] * 4 + [(D, C), (C, C), (D, C)] - # Defect throughout and carry out chi-square test on round 30. - # Opponent is no longer recognised as random, revert to TFT. - actions += [(D, C)] * 14 + [(C, C)] - self.versus_test( - opponent, - expected_actions=actions, - match_attributes={"length": -1}, - attrs={"opponent_is_random": False}, - ) - - -class TestFirstByTidemanAndChieruzzi(TestPlayer): - - name = "First by Tideman and Chieruzzi: (D, D)" - player = axl.FirstByTidemanAndChieruzzi - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game", "length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperator Test - opponent = axl.Cooperator() - actions = [(C, C), (C, C), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - # Cooperator Test does noot defect if game length is unknown - opponent = axl.Cooperator() - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, - match_attributes={"length": float("inf")}) - - # Defector Test - opponent = axl.Defector() - actions = [(C, D), (D, D), (D, D), (D, D)] - self.versus_test(opponent, expected_actions=actions) - - # Test increasing retaliation - opponent = axl.MockPlayer([D, C]) - actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (D, D), - (D, C), - (D, D), - (D, C), - (D, D), - (D, C), - ] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "is_retaliating": True, - "retaliation_length": 4, - "retaliation_remaining": 3, - }, - ) - - opponent = axl.Cycler("DDCDD") - actions = [ - (C, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - ] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "current_score": 34, - "opponent_score": 19, - "last_fresh_start": 0, - "retaliation_length": 6, - "retaliation_remaining": 2, - }, - ) - - # When the length is given this strategy will not give a fresh start - opponent = axl.Cycler("DDCDD") - actions = [ - (C, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (C, D), - (C, D), - ] - self.versus_test( - opponent, expected_actions=actions, match_attributes={"length": 50} - ) - - # When the length is not given this strategy will give a fresh start. - opponent = axl.Cycler("DDCDD") - actions = [ - (C, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (C, D), - (C, D), - ] - self.versus_test( - opponent, - expected_actions=actions, - match_attributes={"length": float("inf")}, - ) - - # Check standard deviation conditions. - # The opponent is similar to the one above except the stddev condition - # is not met, therefore no fresh start will be given. - opponent = axl.Cycler("DDCDDDDCDDCDCCC") - actions = [ - (C, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, C), - (C, D), - (D, C), - (D, C), - (D, C), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, D), - ] - - self.versus_test( - opponent, expected_actions=actions, attrs={"last_fresh_start": 0} - ) - - # Check the fresh start condition - opponent = axl.TitForTat() - actions = [(C, C), (C, C), (D, C), (D, D)] - self.versus_test( - opponent, expected_actions=actions, attrs={"fresh_start": False} - ) - - # check the fresh start condition: least 20 rounds since the last ‘fresh start’ - opponent = axl.Cycler("CCCCD") - actions = [ - (C, C), - (C, C), - (C, C), - (C, C), - (C, D), - (D, C), - (C, C), - (C, C), - (C, C), - (C, D), - (D, C), - (D, C), - (C, C), - (C, C), - (C, D), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (D, C), - (C, C), - (C, C), - (C, C), - (C, D), - (D, C), - (D, C), - (C, C), - (D, C), - (D, D), - ] - self.versus_test( - opponent, - expected_actions=actions, - match_attributes={"length": 35}, - attrs={ - "current_score": 110, - "opponent_score": 75, - "last_fresh_start": 24, - "retaliation_length": 2, - "retaliation_remaining": 0, - }, - ) diff --git a/axelrod/ipd/tests/strategies/test_axelrod_second.py b/axelrod/ipd/tests/strategies/test_axelrod_second.py deleted file mode 100644 index 1b89e5b36..000000000 --- a/axelrod/ipd/tests/strategies/test_axelrod_second.py +++ /dev/null @@ -1,2035 +0,0 @@ -"""Tests for the Second Axelrod strategies.""" - -import random - -import axelrod as axl - -import numpy as np - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestChampion(TestPlayer): - name = "Second by Champion" - player = axl.SecondByChampion - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates for first 10 rounds - - actions = [(C, C), (C, D)] * 5 # Cooperate for ten rounds - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Mirror partner for next phase - actions += [(D, C), (C, D)] * 7 # Mirror opponent afterwards - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Cooperate unless the opponent defected, has defected at least 40% of - actions_1 = actions + [(D, C), (C, D), (C, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions_1, seed=0) - - actions_2 = actions + [(D, C), (C, D), (D, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions_2, seed=1) - - actions_3 = actions + [(D, C), (C, D), (C, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions_3, seed=2) - - -class TestEatherley(TestPlayer): - - name = "Second by Eatherley" - player = axl.SecondByEatherley - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test cooperate after opponent cooperates - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # If opponent only defects then probability of cooperating is 0. - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - # Stochastic response to defect - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=0) - actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) - - opponent = axl.MockPlayer(actions=[D, C, C, D]) - actions = [(C, D), (D, C), (C, C), (C, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=8) - opponent = axl.MockPlayer(actions=[D, C, C, D]) - actions = [(C, D), (D, C), (C, C), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestTester(TestPlayer): - - name = "Second by Tester" - player = axl.SecondByTester - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Alternate after 3rd round if opponent only cooperates - actions = [(D, C)] + [(C, C), (C, C)] + [(D, C), (C, C)] * 4 - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"is_TFT": False} - ) - - # Cooperate after initial defection and become TfT - actions = [(D, C), (C, D), (C, C)] - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs={"is_TFT": True} - ) - - # Now play TfT - opponent = axl.MockPlayer(actions=[C, D, C, D, D, C]) - actions = [(D, C), (C, D), (C, C), (C, D), (D, D), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, attrs={"is_TFT": True}) - - -class TestGladstein(TestPlayer): - - name = "Second by Gladstein" - player = axl.SecondByGladstein - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates and begins to play TFT when Alternator defects - actions = [(D, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs={"patsy": False} - ) - - # Cooperation ratio will always be less than 0.5 - actions = [(D, C), (C, C), (C, C), (D, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"patsy": True} - ) - - # Apologizes immediately and plays TFT - actions = [(D, D), (C, D), (D, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"patsy": False} - ) - - # Ratio is 1/3 when MockPlayer defected for the first time. - opponent = axl.MockPlayer(actions=[C, C, C, D, D]) - actions = [(D, C), (C, C), (C, C), (D, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) - - opponent = axl.AntiTitForTat() - actions = [(D, C), (C, C), (C, D), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) - - -class TestTranquilizer(TestPlayer): - - name = "Second by Tranquilizer" - player = axl.SecondByTranquilizer - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - # test for initalised variables - - def test_init(self): - - player = axl.SecondByTranquilizer() - - self.assertEqual(player.num_turns_after_good_defection, 0) - self.assertEqual(player.opponent_consecutive_defections, 0) - self.assertEqual(player.one_turn_after_good_defection_ratio, 5) - self.assertEqual(player.two_turns_after_good_defection_ratio, 0) - self.assertEqual(player.one_turn_after_good_defection_ratio_count, 1) - self.assertEqual(player.two_turns_after_good_defection_ratio_count, 1) - - def test_strategy(self): - - opponent = axl.Bully() - actions = [(C, D), (D, D), (D, C), (C, C), (C, D), (D, D), (D, C), (C, C)] - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) - - # Tests whether TitForTat is played given score is below 1.75 - - opponent = axl.Defector() - actions = [(C, D)] + [(D, D)] * 20 - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) - - opponent = axl.MockPlayer([C] * 2 + [D] * 8 + [C] * 4) - actions = [(C, C), (C, C)] + [(C, D)] + [(D, D)] * 7 + [(D, C)] + [(C, C)] * 3 - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) - - # If score is between 1.75 and 2.25, may cooperate or defect - - opponent = axl.MockPlayer(actions=[D] * 3 + [C] * 4 + [D] * 2) - actions = [(C, D)] + [(D, D)] * 2 + [(D, C)] + [(C, C)] * 3 + [(C, D)] - actions += [(C, D)] # <-- Random - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=0, attrs=expected_attrs - ) - - opponent = axl.MockPlayer(actions=[D] * 3 + [C] * 4 + [D] * 2) - actions = [(C, D)] + [(D, D)] * 2 + [(D, C)] + [(C, C)] * 3 + [(C, D)] - actions += [(D, D)] # <-- Random - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=17, attrs=expected_attrs - ) - - """If score is greater than 2.25 either cooperate or defect, - if turn number <= 5; cooperate""" - - opponent = axl.MockPlayer(actions=[C] * 5) - actions = [(C, C)] * 5 - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=1, attrs=expected_attrs - ) - - opponent = axl.MockPlayer(actions=[C] * 5) - actions = [(C, C)] * 4 + [(D, C)] - expected_attrs = { - "num_turns_after_good_defection": 1, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=89, attrs=expected_attrs - ) - - """ Given score per turn is greater than 2.25, - Tranquilizer will never defect twice in a row""" - - opponent = axl.MockPlayer(actions=[C] * 6) - actions = [(C, C)] * 4 + [(D, C), (C, C)] - expected_attrs = { - "num_turns_after_good_defection": 2, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 2, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=89, attrs=expected_attrs - ) - - # Tests cooperation after update_state - - opponent = axl.MockPlayer(actions=[C] * 5) - actions = [(C, C)] * 4 + [(D, C)] + [(C, C)] - expected_attrs = { - "num_turns_after_good_defection": 2, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 2, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=89, attrs=expected_attrs - ) - - # Ensures FD1 values are calculated - - opponent = axl.MockPlayer(actions=[C] * 6) - actions = [(C, C)] * 4 + [(D, C), (C, C)] - expected_attrs = { - "num_turns_after_good_defection": 2, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 2, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test( - opponent, expected_actions=actions, seed=89, attrs=expected_attrs - ) - - # Ensures FD2 values are calculated - - opponent = axl.MockPlayer(actions=[C] * 6) - actions = [(C, C)] * 4 + [(D, C)] + [(C, C)] * 2 - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 1.5, - "one_turn_after_good_defection_ratio_count": 2, - "two_turns_after_good_defection_ratio_count": 2, - } - self.versus_test( - opponent, expected_actions=actions, seed=89, attrs=expected_attrs - ) - - # Ensures scores are being counted - - opponent = axl.Defector() - actions = [(C, D)] + [(D, D)] * 19 - expected_attrs = { - "num_turns_after_good_defection": 0, - "one_turn_after_good_defection_ratio": 5, - "two_turns_after_good_defection_ratio": 0, - "one_turn_after_good_defection_ratio_count": 1, - "two_turns_after_good_defection_ratio_count": 1, - } - self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) - - -class TestGrofman(TestPlayer): - - name = "Second by Grofman" - player = axl.SecondByGrofman - expected_classifier = { - "memory_depth": 8, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperate for the first two rounds - actions = [(C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Cooperate for the first two rounds, then play tit for tat for 3-7 - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Demonstrate Grofman Logic - # Own previous move was C, opponent defected less than 3 times in last 8 - moregrofman_actions = [C] * 7 + [C] - opponent_actions = [C] * 6 + [D] * 2 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = list(zip(moregrofman_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - # Own previous move was C, opponent defected 3 or more times in last 8 - moregrofman_actions = ([C] * 3 + [D] * 3 + [C]) + [D] - opponent_actions = ([C] * 2 + [D] * 3 + [C] * 2) + [D] - opponent = axl.MockPlayer(actions=opponent_actions) - actions = list(zip(moregrofman_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - # Own previous move was D, opponent defected once or less in last 8 - moregrofman_actions = ([C] * 6 + [D]) + [C] - opponent_actions = ([C] * 5 + [D] * 1 + [C]) + [D] - opponent = axl.MockPlayer(actions=opponent_actions) - actions = list(zip(moregrofman_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - # Own previous move was D, opponent defected more than once in last 8 - moregrofman_actions = ([C] * 2 + [D] * 5) + [D] - opponent_actions = ([D] * 7) + [D] - opponent = axl.MockPlayer(actions=opponent_actions) - actions = list(zip(moregrofman_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - # Test to make sure logic matches Fortran (discrepancy found 8/23/2017) - opponent = axl.AntiTitForTat() - # Actions come from a match run by Axelrod Fortran using IpdPlayer('k86r') - actions = [ - (C, C), - (C, D), - (D, D), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - ] - self.versus_test(opponent, expected_actions=actions) - - # Test to match the Fortran implementation for 30 rounds - opponent = axl.AntiTitForTat() - actions = [ - (C, C), - (C, D), - (D, D), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - ] - self.versus_test(opponent, expected_actions=actions) - - # Test to match the Fortran implementation for 60 rounds - opponent = axl.AntiTitForTat() - actions = [ - (C, C), - (C, D), - (D, D), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - ] - self.versus_test(opponent, expected_actions=actions) - - -class TestKluepfel(TestPlayer): - name = "Second by Kluepfel" - player = axl.SecondByKluepfel - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 # Cooperate forever - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Since never two in a row, will respond in kind with 70% if - # coop and 60% otherwise, after first couple - actions = [ - (C, C), - (C, D), # Views first three as the same. - # A random gets used in each of the first two. - (D, C), - (D, D), - (C, C), - (C, D), - ] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) - - actions = [(C, C), (C, D), (C, C), (D, D), (D, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test(axl.Alternator(), expected_actions=actions, seed=3) - - # Now we have to test the detect-random logic, which doesn't pick up - # until after 26 turns. So we need a big sample. - actions = [ - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, C), - (C, C), - (C, D), - (C, C), - (D, D), - (D, C), - (C, C), - (C, D), - (D, D), - (C, D), - (D, D), - (D, C), - (C, C), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, C), - (C, C), - (C, D), - # Success detect random opponent for remaining turns. - (D, D), - (D, D), - (D, D), - (D, C), - (D, D), - (D, C), - (D, D), - (D, C), - (D, D), - (D, C), - (D, C), - (D, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, D), - (D, C), - (D, C), - (D, C), - (D, C), - (D, D), - ] - self.versus_test(axl.Random(0.5), expected_actions=actions, seed=10) - - -class TestBorufsen(TestPlayer): - name = "Second by Borufsen" - player = axl.SecondByBorufsen - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 # Cooperate forever - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Tries to cooperate every third time until detecting defective - actions = ( - [(C, D), (D, D), (D, D), (D, D)] * 6 + [(C, D), (D, D)] + [(D, D)] * 100 - ) - self.versus_test(axl.Defector(), expected_actions=actions) - - # Alternates with additional coop, every sixth turn - # Won't be labeled as random, since 2/3 of opponent's C follow - # player's C - # `flip_next_defect` will get set on the sixth turn, which changes the - # seventh action - # Note that the first two turns of each period of six aren't - # marked as echoes, and the third isn't marked that way until the - # fourth turn. - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] * 20 - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Basically does tit-for-tat against Win-Shift, Lose-Stay D - # After 26 turns, will detect random since half of opponent's C follow - # Cs - # Coming out of it, there will be new pattern. Then random is detected - # again. - actions = ( - [(C, D), (D, C), (C, C)] * 8 - + [(C, D), (D, C)] - + [(D, C)] * 25 - + [(D, C)] - + [(C, C), (C, D), (D, C)] * 8 - + [(D, C)] * 25 - ) - self.versus_test(axl.WinShiftLoseStay(D), expected_actions=actions) - - -class TestCave(TestPlayer): - name = "Second by Cave" - player = axl.SecondByCave - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # It will take until turn 18 to respond decide to repond D->D - actions = [(C, D)] - actions += [ - (C, D), - (D, D), - (D, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - ] # Randomly choose - actions += [(D, D)] * 30 # Defect - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - - # Highly-defective opponent - # It will take until turn 20 to respond decide to repond D to C - opponent_actions = [D] * 17 + [C, C, C, C] - almost_defector = axl.MockPlayer(actions=opponent_actions) - - actions = [(C, D)] - actions += [ - (C, D), - (D, D), - (D, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, C), - ] # Randomly choose - actions += [(C, C)] # Coop for a minute - actions += [(D, C), (D, C)] - self.versus_test(almost_defector, expected_actions=actions, seed=1) - - # Here it will take until turn 40 to detect random and defect - actions = [(C, C)] - actions += [ - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - ] # Randomly choose - actions += [ - (D, C), - (C, D), - (D, C), - ] # 17 D have come, so tit for tat for a while - actions += [(D, D), (D, C)] * 100 # Random finally detected - self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) - - -class TestWmAdams(TestPlayer): - name = "Second by WmAdams" - player = axl.SecondByWmAdams - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 # Cooperate forever - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Will ignore the first four defects - opponent_actions = [D] * 4 + [C] * 100 - defect_four = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 4 + [(C, C)] * 100 - self.versus_test(defect_four, expected_actions=actions) - - actions = [ - (C, D), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - ] - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - actions = [ - (C, D), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (C, D), - (D, D), - (D, D), - ] - self.versus_test(axl.Defector(), expected_actions=actions, seed=2) - - # After responding to the 11th D (counted as 10 D), just start cooperating - opponent_actions = [D] * 11 + [C] * 100 - changed_man = axl.MockPlayer(actions=opponent_actions) - actions = [ - (C, D), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (D, D), - (C, C), - ] - actions += [(C, C)] * 99 - self.versus_test(changed_man, expected_actions=actions, seed=1) - - -class TestGraaskampKatzen(TestPlayer): - name = "Second by GraaskampKatzen" - player = axl.SecondByGraaskampKatzen - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 # Cooperate forever - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # GK does not great against - opponent_actions = [C, D, D] * 100 - GK_Foil = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C), (C, D), (D, D)] - actions += [(D, C), (C, D), (D, D)] * 2 - actions += [(D, C)] - actions += [(D, D), (D, D), (D, C)] * 20 # Defect here on - self.versus_test(GK_Foil, expected_actions=actions) - - # Fail on second checkpoint - opponent_actions = [C] * 10 + [C, D, D] * 100 - Delayed_GK_Foil = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 10 - actions += [(C, C), (C, D), (D, D)] - actions += [(D, C), (C, D), (D, D)] * 2 - actions += [(D, C)] - actions += [(D, D), (D, D), (D, C)] * 20 # Defect here on - self.versus_test(Delayed_GK_Foil, expected_actions=actions) - - -class TestWeiner(TestPlayer): - name = "Second by Weiner" - player = axl.SecondByWeiner - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 # Cooperate forever - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, C)] - actions += [(C, D), (D, C)] # Tit-for-Tat - # Opponent's last move was a C with 1 D between - actions += [(C, D)] # Tit-for-Tat. Raise forgiveness flag. - actions += [(C, C)] # Tit-for-Tat. Use forgiveness flag. - # Opponent's last move was a C, but defect_padding counted as 0. - actions += [(C, D), (D, C)] # Tit-for-Tat - # Opponent's last move was a C with 1 D between - actions += [(C, D)] # Tit-for-Tat. Raise forgiveness flag. - actions += [(D, C)] # Tit-for-Tat. Try forgiveness flag. - # This time grudge=20, so the forgiveness flag doesn't work. - actions += [(C, D)] # Tit-for-Tat. - # This is the 5th opponent defect, won't be counted for 2 turns - actions += [(D, C)] # Tit-for-Tat. - actions += [(D, D), (D, C)] * 100 # Defect now on. - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Build an opponent that will cause a wasted flag. - opponent_actions = [C, D, C, C, C, C, D, D] - Flag_Waster_1 = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C), (C, D), (D, C)] - actions += [(C, C)] # Raise flag, like in Alternator - actions += [(C, C)] # Use flag, but don't change outcome - actions += [(C, C)] - actions += [(C, D)] # Don't raise flag - actions += [(D, D)] # Don't use flag - self.versus_test(Flag_Waster_1, expected_actions=actions) - - # Demonstrate that grudge is not incremented on wasted flag. - opponent_actions = [C, D, C, C, C, C, D, C, D, C] - Flag_Waster_2 = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C), (C, D), (D, C)] - actions += [(C, C)] # Raise flag, like in Alternator - actions += [(C, C)] # Use flag, but don't change outcome - actions += [(C, C), (C, D), (D, C)] - actions += [(C, D)] # Raise flag - actions += [(C, C)] # Use flag to change outcome - self.versus_test(Flag_Waster_2, expected_actions=actions) - - # Show grudge passing over time - opponent_actions = [C, D, C, D, C] + [C] * 11 + [C, D, C, D, C] - Time_Passer = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C), (C, D), (D, C)] - actions += [(C, D)] # Raise flag - actions += [(C, C)] # Use flag to change outcome - actions += [(C, C)] * 11 - actions += [(C, C), (C, D), (D, C)] - actions += [(C, D)] # Raise flag - actions += [(C, C)] # Use flag to change outcome - self.versus_test(Time_Passer, expected_actions=actions) - - -class TestHarrington(TestPlayer): - name = "Second by Harrington" - player = axl.SecondByHarrington - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Build an opponent that will cooperate the first 36 turns and - # defect on the 37th turn - opponent_actions = [C] * 36 + [D] + [C] * 100 - Defect37 = axl.MockPlayer(actions=opponent_actions) - # Activate the Fair-weather flag - actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 - self.versus_test( - Defect37, expected_actions=actions, attrs={"mode": "Fair-weather"} - ) - - # Defect on 37th turn to activate Fair-weather, then later defect to - # exit Fair-weather - opponent_actions = [C] * 36 + [D] + [C] * 100 + [D] + [C] * 4 - Defect37_big = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 - actions += [(C, D)] - # Immediately exit Fair-weather - actions += [(D, C), (C, C), (D, C), (C, C)] - self.versus_test( - Defect37_big, expected_actions=actions, seed=2, attrs={"mode": "Normal"} - ) - actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 - actions += [(C, D)] - # Immediately exit Fair-weather - actions += [(D, C), (C, C), (C, C), (C, C)] - self.versus_test( - Defect37_big, expected_actions=actions, seed=1, attrs={"mode": "Normal"} - ) - - # Opponent defects on 1st turn - opponent_actions = [D] + [C] * 46 - Defect1 = axl.MockPlayer(actions=opponent_actions) - # Tit-for-Tat on the first, but no streaks, no Fair-weather flag. - actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, C)] - # Two cooperations scheduled after the 37-turn defection - actions += [(C, C)] * 2 - # TFT twice, then random number yields a DCC combo. - actions += [(C, C)] * 2 - actions += [(D, C), (C, C), (C, C)] - # Don't draw next random number until now. Again DCC. - actions += [(D, C), (C, C), (C, C)] - self.versus_test(Defect1, expected_actions=actions, seed=2) - - # Defection on turn 37 by opponent doesn't have an effect here - opponent_actions = [D] + [C] * 35 + [D] + [C] * 10 - Defect1_37 = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, D)] - actions += [(C, C)] * 2 - actions += [(C, C)] * 2 - actions += [(D, C), (C, C), (C, C)] - actions += [(D, C), (C, C), (C, C)] - self.versus_test(Defect1_37, expected_actions=actions, seed=2) - - # However a defect on turn 38 would be considered a burn. - opponent_actions = [D] + [C] * 36 + [D] + [C] * 9 - Defect1_38 = axl.MockPlayer(actions=opponent_actions) - # Tit-for-Tat on the first, but no streaks, no Fair-weather flag. - actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, C)] - # Two cooperations scheduled after the 37-turn defection - actions += [(C, D), (C, C)] - # TFT from then on, since burned - actions += [(C, C)] * 8 - self.versus_test( - Defect1_38, expected_actions=actions, seed=2, attrs={"burned": True} - ) - - # Use alternator to test parity flags. - actions = [(C, C), (C, D)] - # Even streak is set to 2, one for the opponent's defect and one for - # our defect. - actions += [(D, C)] - actions += [(C, D)] - # Even streak is increased two more. - actions += [(D, C)] - actions += [(C, D)] - # Opponent's defect increments even streak to 5, so we cooperate. - actions += [(C, C)] - actions += [(C, D), (D, C), (C, D), (D, C), (C, D)] - # Another 5 streak - actions += [(C, C)] - # Repeat - actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] * 3 - # Repeat. Notice that the last turn is the 37th move, but we do not - # defect. - actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Test for parity limit shortening. - opponent_actions = [D, C] * 1000 - AsyncAlternator = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] * 6 - # Defect on 37th move - actions += [(D, D)] - actions += [(C, C)] - # This triggers the burned flag. We should just Tit-for-Tat from here. - actions += [(C, D)] - actions += [(D, C), (C, D), (D, C), (C, D), (C, C)] - # This is the seventh time we've hit the limit. So do it once more. - actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] - # Now hit the limit sooner - actions += [(C, D), (D, C), (C, D), (C, C)] * 5 - self.versus_test( - AsyncAlternator, expected_actions=actions, attrs={"parity_limit": 3} - ) - - # Use a Defector to test the 20-defect streak - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - # Now the two parity flags are used - actions += [(C, D), (C, D)] - # Repeat - actions += [(D, D), (D, D), (D, D), (D, D), (C, D), (C, D)] * 2 - actions += [(D, D), (D, D)] - # 20 D have passed (first isn't record) - actions += [(D, D)] * 100 - # The defect streak will always be detected from here on, because it - # doesn't reset. This logic comes before parity streaks or the turn- - # based logic. - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={"recorded_defects": 119}, - ) - - # Detect random - expected_actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, D), - (D, D), - (D, C), - (C, D), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, D), - (D, C), - (C, C), - (C, D), - (D, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, D), - (D, D), - (C, D), - (D, C), - (C, C), - ] - # Enter defect mode. - expected_actions += [(D, C)] - random.seed(10) - player = self.player() - match = axl.IpdMatch((player, axl.Random()), turns=len(expected_actions)) - # The history matrix will be [[0, 2], [5, 6], [3, 6], [4, 2]] - actions = match.play() - self.assertEqual(actions, expected_actions) - self.assertAlmostEqual( - player.calculate_chi_squared(len(expected_actions)), 2.395, places=3 - ) - - # Come back out of defect mode - opponent_actions = [ - D, - C, - D, - C, - D, - D, - D, - C, - D, - C, - C, - D, - D, - C, - D, - D, - C, - C, - D, - C, - D, - D, - C, - D, - D, - D, - D, - C, - C, - C, - ] - opponent_actions += [D] * 16 - Rand_Then_Def = axl.MockPlayer(actions=opponent_actions) - actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (C, D), - (D, D), - (D, C), - (C, D), - (D, C), - (C, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, D), - (D, C), - (C, C), - (C, D), - (D, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, D), - (D, D), - (C, D), - (D, C), - (C, C), - ] - actions += [(D, C)] - # Enter defect mode. - actions += [(D, D)] * 14 - # Mutual defect for a while, then exit Defect mode with two coops - actions += [(C, D)] * 2 - self.versus_test( - Rand_Then_Def, - expected_actions=actions, - seed=10, - attrs={"mode": "Normal", "was_defective": True}, - ) - - -class TestTidemanAndChieruzzi(TestPlayer): - name = "Second by Tideman and Chieruzzi" - player = axl.SecondByTidemanAndChieruzzi - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] + [(D, D)] * 8 - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"score_to_beat_inc": 5} - ) - - actions = [(C, D)] + [(D, D)] * 8 - # On tenth turn, try a fresh start - actions += [(C, D), (C, D)] + [(D, D)] * 2 - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"last_fresh_start": 11} - ) - - actions = [(C, C), (C, D)] - # Scores and score_to_beat variables are a turn behind - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={ - "current_score": 3, - "opponent_score": 3, - "score_to_beat": 0, - "score_to_beat_inc": 0, - }, - ) - actions += [(D, C), (C, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={ - "current_score": 8, - "opponent_score": 8, - "score_to_beat": 0, - "score_to_beat_inc": 5, - }, - ) - actions += [(D, C), (D, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={ - "current_score": 13, - "opponent_score": 13, - "score_to_beat": 5, - "score_to_beat_inc": 10, - }, - ) - actions += [(D, C), (D, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={ - "current_score": 19, - "opponent_score": 14, - "score_to_beat": 15, - "score_to_beat_inc": 15, - }, - ) - actions += [(D, C), (D, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={ - "current_score": 25, - "opponent_score": 15, - "score_to_beat": 30, - "score_to_beat_inc": 20, - }, - ) - - # Build an opponent who will cause us to consider a Fresh Start, but - # will fail the binomial test. - opponent_actions = [C] * 5 + [D] * 5 - C5D5_player = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 5 + [(C, D)] + [(D, D)] * 3 - actions += [(D, D)] # No Defection here means no Fresh Start. - self.versus_test(C5D5_player, expected_actions=actions) - - -class TestGetzler(TestPlayer): - name = "Second by Getzler" - player = axl.SecondByGetzler - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (D, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), - expected_actions=actions, - seed=1, - attrs={"flack": 15.0 / 16.0}, - ) - - actions = [(C, C), (C, D), (C, C), (C, D), (D, C), (C, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - seed=4, - attrs={"flack": 5.0 / 16.0}, - ) - - -class TestLeyvraz(TestPlayer): - name = "Second by Leyvraz" - player = axl.SecondByLeyvraz - expected_classifier = { - "memory_depth": 3, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - actions = [(C, D), (D, D), (D, D), (C, D)] - self.versus_test(axl.Defector(), expected_actions=actions, seed=2) - - actions = [ - (C, D), - (C, C), - (D, C), - (C, D), - (D, C), - (D, D), - (C, D), - (D, C), - (C, D), - ] - self.versus_test( - axl.SuspiciousTitForTat(), expected_actions=actions, seed=1 - ) - - actions = [(C, C), (C, D), (D, C)] + [(D, D), (C, C)] * 3 - self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) - actions = [(C, C), (C, D), (C, C)] + [(D, D), (C, C)] * 3 - self.versus_test(axl.Alternator(), expected_actions=actions, seed=3) - - -class TestWhite(TestPlayer): - name = "Second by White" - player = axl.SecondByWhite - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 30 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 10 + [(D, D)] * 20 - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [ - (C, D), - (C, D), - (C, C), - (C, C), - (C, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - (C, D), - (D, C), - (C, D), - (D, D), - (D, C), - (C, D), - (D, D), - (D, C), - ] - self.versus_test(axl.Random(0.5), expected_actions=actions, seed=6) - actions = [ - (C, C), - (C, D), - (C, D), - (C, C), - (C, C), - (C, C), - (C, C), - (C, D), - (C, D), - (C, D), - (C, D), - (D, D), - (D, C), - (C, C), - (C, C), - (C, D), - (C, C), - (C, D), - (C, C), - (C, D), - ] - self.versus_test(axl.Random(0.5), expected_actions=actions, seed=12) - - -class TestBlack(TestPlayer): - name = "Second by Black" - player = axl.SecondByBlack - expected_classifier = { - "memory_depth": 5, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 30 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 5 - actions += [ - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - ] - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - - actions = [(C, D)] * 5 - actions += [ - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - ] - self.versus_test(axl.Defector(), expected_actions=actions, seed=15) - - -class TestRichardHufford(TestPlayer): - name = "Second by RichardHufford" - player = axl.SecondByRichardHufford - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 19 + [(D, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 14} - ) - - actions = [(C, C)] * 19 + [(D, C), (C, C)] - actions += [ - (C, C) - ] # This is the first Cooperation that gets counted on the new streak - actions += [(C, C)] * 13 + [(D, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 11} - ) - - opponent_actions = [C] * 20 + [D] - BoredCooperator = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 19 + [(D, C), (C, D), (C, C)] - self.versus_test( - BoredCooperator, expected_actions=actions, attrs={"streak_needed": 31} - ) - - actions = [(C, D)] # "Disagreement" - actions += [(D, C)] # TFT. Disagreement - actions += [(C, C)] # TFT. - actions += [(C, D)] # TFT. Disagreement - actions += [(D, C)] # Three of last four are disagreements. - actions += [(C, C)] # TFT. Disagreement - actions += [(D, D)] # Three of last four are disagreements. Disagreement - actions += [(D, D)] # Three of last four are disagreements. - actions += [(D, D)] # Now there are 5/9 disagreements, so Defect. - self.versus_test( - axl.WinShiftLoseStay(), - expected_actions=actions, - attrs={"num_agreements": 5}, - ) - - -class TestYamachi(TestPlayer): - name = "Second by Yamachi" - player = axl.SecondByYamachi - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [ - (C, D) - ] * 2 # Also Cooperate in first two moves (until we update `count_them_us_them`.) - actions += [ - (C, D) - ] # them_three_ago defaults to C, so that (C, C, *) gets updated, then (D, C, *) get checked. - # It's actually impossible to Defect on the third move. - actions += [(D, D)] # (D, C, *) gets updated, then checked. - actions += [(C, D)] # (D, C, *) gets updated, but (D, D, *) checked. - actions += [(D, D)] * 30 # (D, D, *) gets updated and checked from here on. - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D)] - actions += [(C, C)] # Increment (C, C, C). Check (C, C, *). Cooperate. - # Reminder that first C is default value and last C is opponent's first move. - actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 0. Cooperate. - actions += [(C, C)] # Increment (D, C, C). Check (C, C, *) = 0. Cooperate. - # There is one Defection and one Cooperation in this scenario, - # but the Cooperation was due to a default value only. We can see where this is going. - actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 1. Cooperate. - actions += [(D, C)] # Increment (D, C, C). Check (C, C, *) = -1. Defect. - actions += [ - (C, D) - ] # Increment (C, C, D). Check (D, D, *) = 0 (New). Cooperate. - actions += [(D, C)] # Increment (D, D, C). Check (C, C, *) < 0. Defect. - actions += [(C, D)] # Increment (C, C, D). Check (D, D, *) > 0. Cooperate. - actions += [(D, C), (C, D)] * 15 # This pattern continues for a while. - actions += [ - (D, C), - (D, D), - ] * 30 # Defect from turn 41 on, since near 50% Defections. - self.versus_test(axl.Alternator(), expected_actions=actions) - - # Rip-off is the most interesting interaction. - actions = [ - (C, D), - (C, C), - (C, D), - (D, C), - (C, C), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - ] - my_dict = { - (C, C, C): 1, - (C, C, D): 18, - (C, D, C): 1, - (C, D, D): 0, - (D, C, C): 1, - (D, C, D): 0, - (D, D, C): 17, - (D, D, D): 0, - } - RipoffPlayer = axl.Ripoff() - self.versus_test( - RipoffPlayer, - expected_actions=actions, - attrs={"count_them_us_them": my_dict}, - ) - self.assertEqual( - RipoffPlayer.defections, 19 - ) # Next turn, `portion_defect` = 0.4756 - - # The pattern (C, D), (D, C) will continue indefintely unless overriden. - actions += [(D, D)] # Next turn, `portion_defect` = 0.4881 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5114 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5222 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5326 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5426 - actions += [(D, D)] # Next turn, `portion_defect` = 0.5521 - actions += [ - (D, D), - (C, D), - (D, C), - (C, D), - ] # Takes a turn to fall back into the cycle. - self.versus_test(axl.Ripoff(), expected_actions=actions) - - -class TestColbert(TestPlayer): - name = "Second by Colbert" - player = axl.SecondByColbert - expected_classifier = { - "memory_depth": 4, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 30 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 5 + [(D, D)] + [(C, D)] * 2 - actions += [(D, D), (D, D), (C, D), (C, D)] * 20 - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent_actions = [C] * 8 + [C, C, D, C, C, C, C, C] - OddBall = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 4 - actions += [(C, D)] + [(D, C), (D, C), (C, C), (C, C)] + [(C, C)] - self.versus_test(OddBall, expected_actions=actions) - - -class TestMikkelson(TestPlayer): - name = "Second by Mikkelson" - player = axl.SecondByMikkelson - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 30 - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"credit": 8} - ) - - actions = [(C, D), (C, D), (C, D), (C, D)] - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"credit": 1} - ) - # Defect then reset to 4 - actions += [(D, D)] - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"credit": 4} - ) - # Repeat - actions += [(C, D), (D, D)] * 2 - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"credit": 4} - ) - # With ten turns passed, keep defecting now - actions += [(C, D), (D, D)] - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"credit": 0} - ) - # With ten turns passed, keep defecting now - actions += [(D, D)] * 30 - self.versus_test( - axl.Defector(), expected_actions=actions, attrs={"credit": -7} - ) - - actions = [(C, D), (C, D), (C, C)] - self.versus_test( - axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3} - ) - actions += [(C, D), (C, D)] - self.versus_test( - axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 2} - ) - actions += [(D, C)] - self.versus_test( - axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 4} - ) - actions += [(C, D)] - self.versus_test( - axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 5} - ) - actions += [(C, D)] - self.versus_test( - axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3} - ) - - opponent_actions = [C] * 100 + [D] * 10 - Change_of_Heart = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 100 + [(C, D)] * 4 - self.versus_test(Change_of_Heart, expected_actions=actions, attrs={"credit": 2}) - Change_of_Heart = axl.MockPlayer(actions=opponent_actions) - actions += [(C, D)] * 2 - self.versus_test( - Change_of_Heart, expected_actions=actions, attrs={"credit": -2} - ) - # Still Cooperate, because Defect rate is low - -class TestRowsam(TestPlayer): - name = "Second by Rowsam" - player = axl.SecondByRowsam - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set("game"), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Should always cooperate with Cooperator - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Against a Defector should eventually enter Defect mode - actions = [(C, D)] * 5 - actions += [(D, D), (C, D), (D, D)] # Do a Coop-Def cycle - self.versus_test(axl.Defector(), expected_actions=actions, attrs={ - "distrust_points": 5}) - actions += [(C, D)] * 3 # Continue for now - actions += [(D, D)] * 100 # Now Defect mode - self.versus_test(axl.Defector(), expected_actions=actions, attrs={ - "distrust_points": 10, "mode": "Defect"}) - - # Test specific score scenarios - # 5 Defects - opponent_actions = [D] * 5 + [C] * 100 - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 5 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 5, "current_score": 0}) - - # 3 Defects - opponent_actions = [D] * 3 + [C] * 100 - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 3 - actions += [(C, C)] * 2 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 6}) - - # 2 Defects - opponent_actions = [D] * 2 + [C] * 100 - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 2 - actions += [(C, C)] * 3 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2, "current_score": 9}) - - # 1 Defect - opponent_actions = [D] * 1 + [C] * 100 - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 1 - actions += [(C, C)] * 4 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 1, "current_score": 12}) - - # Test that some distrust_points wear off. - opponent_actions = [D] * 3 + [C] * 100 - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, D)] * 3 - actions += [(C, C)] * 2 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 6}) - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions += [(C, C), (D, C)] # Complete Coop-Def cycle - actions += [(C, C)] * 3 - actions += [(D, C)] - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 4, "current_score": 28}) - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions += [(C, C), (D, C)] # Complete Coop-Def cycle - actions += [(C, C)] * 4 # No defect or cycle this time. - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 3, "current_score": 50}) # One point wears off. - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions += [(C, C)] * 18 - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2}) # Second point wears off - custom_opponent = axl.MockPlayer(actions=opponent_actions) - actions += [(C, C)] * 18 - self.versus_test(custom_opponent, expected_actions=actions, attrs={ - "distrust_points": 2}) # But no more - - -class TestAppold(TestPlayer): - name = "Second by Appold" - player = axl.SecondByAppold - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Should cooperate 100% of the time with the cooperator - actions = [(C, C)] * 100 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - opponent = axl.Defector() - # Cooperate always the first 4 turns - actions = [(C, D)] * 4 - # Should cooperate because we forgive the first_opp_def after the fourth - # turn. - actions += [(C, D)] - # Own move two turns ago is C, so D. - actions += [(D, D)] - # Then defect most of the time, depending on the random number. We - # don't defect 100% of the time, because of the way that initialize - # opp_c_after_x. - actions += [(D, D), - (C, D), - (D, D), - (D, D), # C can never be two moves after a C. - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (C, D), - (D, D), - (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=1, - attrs={"first_opp_def": True}) - - # An opponent who defects for a long time, then tries cooperating - opponent_actions = [C] * 30 + [D] + [C] * 10 - MostlyCooperates = axl.MockPlayer(actions=opponent_actions) - # Cooperate always at first - actions = [(C, C)] * 30 - # The opponent defects once - actions += [(C, D)] - # But we forgive it. - actions += [(C, C)] * 10 - self.versus_test(MostlyCooperates, expected_actions=actions) - - opponent = axl.CyclerDC() - # First three opponent actions get counted as reactions to C. Fourth - # action will get counted on next turn. - actions = [(C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, - attrs={"opp_c_after_x": {C: 1, D: 1}, - "total_num_of_x": {C: 3, D: 1}}) - # Will cooperate 50% of the time - actions += [(C, D)] - self.versus_test(opponent, expected_actions=actions, - attrs={"opp_c_after_x": {C: 2, D: 1}, - "total_num_of_x": {C: 4, D: 1}, - "first_opp_def": False}, seed=100) - # Always cooperate, because we forgive the first defect - actions += [(C, C)] - self.versus_test(opponent, expected_actions=actions, - attrs={"first_opp_def": True}, seed=100) - - # Against a random opponent, will respond mostly randomly too. - actions = [(C, C), - (C, C), - (C, D), - (C, C), - (C, C), - (C, D), - (C, C), - (C, C), - (C, C), - (D, C), - (C, D), - (D, D), - (C, D), - (C, D), - (C, C), - (C, C), - (D, C), - (C, D), - (D, D), - (C, C), - (C, D), - (C, C), - (C, C), - (C, D), - (D, C), - (C, D), - (D, D), - (C, D), - (C, C), - (D, C)] - self.versus_test(axl.Random(0.5), expected_actions=actions, seed=7) - - diff --git a/axelrod/ipd/tests/strategies/test_backstabber.py b/axelrod/ipd/tests/strategies/test_backstabber.py deleted file mode 100644 index 1f580c9cf..000000000 --- a/axelrod/ipd/tests/strategies/test_backstabber.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Tests for BackStabber and DoubleCrosser.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestBackStabber(TestPlayer): - - name = "BackStabber: (D, D)" - player = axl.BackStabber - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_defects_after_four_defections(self): - # Forgives three defections - defector_actions = [(C, D), (C, D), (C, D), (C, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), - expected_actions=defector_actions, - match_attributes={"length": 200}, - ) - alternator_actions = [(C, C), (C, D)] * 4 + [(D, C), (D, D)] * 2 - self.versus_test( - axl.Alternator(), - expected_actions=alternator_actions, - match_attributes={"length": 200}, - ) - - def test_defects_on_last_two_rounds_by_match_len(self): - actions = [(C, C)] * 198 + [(D, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": 200}, - ) - actions = [(C, C)] * 10 + [(D, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": 12}, - ) - # Test that exceeds tournament length. - actions = [(C, C)] * 198 + [(D, C), (D, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": 200}, - ) - # But only if the tournament is known. - actions = [(C, C)] * 202 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": -1}, - ) - - -class TestDoubleCrosser(TestBackStabber): - """ - Behaves like BackStabber except when its alternate strategy is triggered. - The alternate strategy is triggered when opponent did not defect in the - first 7 rounds, and 8 <= the current round <= 180. - """ - - name = "DoubleCrosser: (D, D)" - player = axl.DoubleCrosser - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_when_alt_strategy_is_triggered(self): - """ - The alternate strategy is if opponent's last two plays were defect, - then defect. Otherwise cooperate. - """ - starting_cooperation = [C] * 7 - starting_rounds = [(C, C)] * 7 - - opponent_actions = starting_cooperation + [D, D, C, D] - expected_actions = starting_rounds + [(C, D), (C, D), (D, C), (C, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) - - opponent_actions = starting_cooperation + [D, D, D, D, C, D] - expected_actions = starting_rounds + [ - (C, D), - (C, D), - (D, D), - (D, D), - (D, C), - (C, D), - ] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) - - def test_starting_defect_keeps_alt_strategy_from_triggering(self): - opponent_actions_suffix = [C, D, C, D, D] + 3 * [C] - expected_actions_suffix = [(C, C), (C, D), (C, C), (C, D), (C, D)] + 3 * [ - (D, C) - ] - - defects_on_first = [D] + [C] * 6 - defects_on_first_actions = [(C, D)] + [(C, C)] * 6 - actions = defects_on_first + opponent_actions_suffix - expected_actions = defects_on_first_actions + expected_actions_suffix - self.versus_test( - axl.MockPlayer(actions=actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) - - defects_in_middle = [C, C, C, D, C, C, C] - defects_in_middle_actions = [ - (C, C), - (C, C), - (C, C), - (C, D), - (C, C), - (C, C), - (C, C), - ] - actions = defects_in_middle + opponent_actions_suffix - expected_actions = defects_in_middle_actions + expected_actions_suffix - self.versus_test( - axl.MockPlayer(actions=actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) - - defects_on_last = [C] * 6 + [D] - defects_on_last_actions = [(C, C)] * 6 + [(C, D)] - actions = defects_on_last + opponent_actions_suffix - expected_actions = defects_on_last_actions + expected_actions_suffix - self.versus_test( - axl.MockPlayer(actions=actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) - - def test_alt_strategy_stops_after_round_180(self): - one_eighty_opponent_actions = [C] * 8 + [C, D] * 86 - one_eighty_expected_actions = [(C, C)] * 8 + [(C, C), (C, D)] * 86 - opponent_actions = one_eighty_opponent_actions + [C] * 6 - expected_actions = one_eighty_expected_actions + [(D, C)] * 6 - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - match_attributes={"length": 200}, - ) diff --git a/axelrod/ipd/tests/strategies/test_better_and_better.py b/axelrod/ipd/tests/strategies/test_better_and_better.py deleted file mode 100644 index f07060975..000000000 --- a/axelrod/ipd/tests/strategies/test_better_and_better.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Tests for the BetterAndBetter strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestBetterAndBetter(TestPlayer): - - name = "Better and Better" - player = axl.BetterAndBetter - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Tests that the strategy gives expected behaviour.""" - self.versus_test( - axl.Defector(), - expected_actions=[ - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - ], - seed=6, - ) - self.versus_test( - axl.Cooperator(), - expected_actions=[ - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - (D, C), - ], - seed=8, - ) - self.versus_test( - axl.Defector(), - expected_actions=[ - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - ], - seed=1514, - ) - actions = [] - for index in range(200): - if index in [ - 64, - 79, - 91, - 99, - 100, - 107, - 111, - 119, - 124, - 127, - 137, - 141, - 144, - 154, - 192, - 196, - ]: - actions.append((C, D)) - else: - actions.append((D, D)) - self.versus_test(axl.Defector(), expected_actions=actions, seed=8) diff --git a/axelrod/ipd/tests/strategies/test_bush_mosteller.py b/axelrod/ipd/tests/strategies/test_bush_mosteller.py deleted file mode 100644 index 2cc881a57..000000000 --- a/axelrod/ipd/tests/strategies/test_bush_mosteller.py +++ /dev/null @@ -1,77 +0,0 @@ -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestBushMostellar(TestPlayer): - - name = "Bush Mosteller: 0.5, 0.5, 3.0, 0.5" - player = axl.BushMosteller - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"_stimulus": 1}, - seed=1, - ) - - # Making sure probabilities changes following payoffs - actions = [(C, C), (D, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"_stimulus": 0.4, "_c_prob": 0.6, "_d_prob": 0.5}, - seed=1, - ) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={ - "_stimulus": -0.20000000000000004, - "_c_prob": 0.375, - "_d_prob": 0.45, - }, - seed=1, - ) - - # Testing that stimulus never goes under -1 - actions = [(C, C), (D, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"_stimulus": -1}, - init_kwargs={"aspiration_level_divider": 0.1}, - seed=1, - ) - - # Ensures that the player will never play C or D if his probability is equal to 0 - actions = [(C, C)] * 100 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - init_kwargs={"d_prob": 0.0}, - seed=1, - ) - - actions = [(D, C)] * 100 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - init_kwargs={"c_prob": 0.0}, - seed=1, - ) diff --git a/axelrod/ipd/tests/strategies/test_calculator.py b/axelrod/ipd/tests/strategies/test_calculator.py deleted file mode 100644 index 938cfc2d4..000000000 --- a/axelrod/ipd/tests/strategies/test_calculator.py +++ /dev/null @@ -1,166 +0,0 @@ -"""Tests for Calculator strategy.""" - -import axelrod as axl -from axelrod.ipd._strategy_utils import detect_cycle - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestCalculator(TestPlayer): - - name = "Calculator" - player = axl.Calculator - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_twenty_rounds_joss_then_defects_for_cyclers(self): - """Uses axelrod.strategies.axelrod_first.Joss strategy for first 20 rounds""" - seed = 2 - flip_indices = [1, 3] - twenty_alternator_actions = [C, D] * 10 - twenty_test_actions = get_joss_strategy_actions( - twenty_alternator_actions, flip_indices - ) - - expected_actions = twenty_test_actions + [(D, C), (D, D), (D, C), (D, D)] - self.versus_test( - axl.Alternator(), expected_actions=twenty_test_actions, seed=seed - ) - self.versus_test( - axl.Alternator(), expected_actions=expected_actions, seed=seed - ) - - def test_twenty_rounds_joss_then_tit_for_tat_for_non_cyclers(self): - """Uses axelrod.strategies.axelrod_first.Joss strategy for first 20 rounds""" - seed = 2 - flip_indices = [1, 2] - - twenty_non_cyclical_actions = [ - C, - C, - D, - C, - C, - D, - C, - C, - C, - D, - C, - C, - C, - C, - D, - C, - C, - C, - C, - C, - ] - twenty_test_actions = get_joss_strategy_actions( - twenty_non_cyclical_actions, flip_indices - ) - - subsequent_opponent_actions = [D, C, D, C, D, C, D, C] - subsequent_test_actions = [ - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - (C, D), - (D, C), - ] - - opponent_actions = twenty_non_cyclical_actions + subsequent_opponent_actions - test_actions = twenty_test_actions + subsequent_test_actions - self.versus_test( - axl.MockPlayer(actions=twenty_non_cyclical_actions), - expected_actions=twenty_test_actions, - seed=seed, - ) - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=test_actions, - seed=seed, - ) - - def test_edge_case_calculator_sees_cycles_of_size_ten(self): - seed = 3 - ten_length_cycle = [C, D, C, C, D, C, C, C, D, C] - self.assertEqual(detect_cycle((ten_length_cycle * 2)), tuple(ten_length_cycle)) - - ten_cycle_twenty_rounds = get_joss_strategy_actions( - ten_length_cycle * 2, indices_to_flip=[16] - ) - opponent_actions = ten_length_cycle * 2 + [C, D, C] - expected = ten_cycle_twenty_rounds + [(D, C), (D, D), (D, C)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected, - seed=seed, - ) - - def test_edge_case_calculator_ignores_cycles_gt_len_ten(self): - seed = 3 - eleven_length_cycle = [D, D, C, C, D, C, C, C, D, C, D] - twenty_rounds_of_eleven_len_cycle = ( - eleven_length_cycle + eleven_length_cycle[:9] - ) - twenty_rounds = get_joss_strategy_actions( - twenty_rounds_of_eleven_len_cycle, indices_to_flip=[19] - ) - - opponent_actions = twenty_rounds_of_eleven_len_cycle[:-1] + [D] + [C, D] - self.assertEqual(detect_cycle(opponent_actions), tuple(eleven_length_cycle)) - - uses_tit_for_tat_after_twenty_rounds = twenty_rounds + [(D, C), (C, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=uses_tit_for_tat_after_twenty_rounds, - seed=seed, - ) - - def test_get_joss_strategy_actions(self): - opponent = [C, D, D, C, C] - - flip_never_occurs_at_index_zero = [0] - flip_indices = [1, 2] - - without_flip = [(C, C), (C, D), (D, D), (D, C), (C, C)] - with_flip = [(C, C), (D, D), (C, D), (D, C), (C, C)] - - self.assertEqual(get_joss_strategy_actions(opponent, []), without_flip) - self.assertEqual( - get_joss_strategy_actions(opponent, flip_never_occurs_at_index_zero), - without_flip, - ) - self.assertEqual(get_joss_strategy_actions(opponent, flip_indices), with_flip) - - -def get_joss_strategy_actions(opponent_moves: list, indices_to_flip: list) -> list: - """ - Takes a list of opponent moves and returns a tuple list of [(Joss moves, opponent moves)]. - "indices_to_flip" are the indices where Joss differs from it's expected TitForTat. - Joss is from axelrod.ipd.strategies.axelrod_first. - """ - out = [] - for index, action in enumerate(opponent_moves): - previous_action = opponent_moves[index - 1] - if index == 0: - out.append((C, action)) - elif index in indices_to_flip: - out.append((previous_action.flip(), action)) - else: - out.append((previous_action, action)) - return out diff --git a/axelrod/ipd/tests/strategies/test_cooperator.py b/axelrod/ipd/tests/strategies/test_cooperator.py deleted file mode 100644 index aca2290ae..000000000 --- a/axelrod/ipd/tests/strategies/test_cooperator.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Tests for the Cooperator strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestCooperator(TestPlayer): - - name = "Cooperator" - player = axl.Cooperator - expected_classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates always. - actions = [(C, C)] + [(C, D), (C, C)] * 9 - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestTrickyCooperator(TestPlayer): - - name = "Tricky Cooperator" - player = axl.TrickyCooperator - expected_classifier = { - "memory_depth": 10, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test if it tries to trick opponent. - self.versus_test(axl.Cooperator(), [(C, C), (C, C), (C, C), (D, C), (D, C)]) - - opponent_actions = [C, C, C, C, D, D] - expected_actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - ) - - opponent_actions = [C, C, C, C] + [D, D] + [C] * 10 - expected_actions = ( - [(C, C), (C, C), (C, C), (D, C)] + [(D, D), (C, D)] + [(C, C)] * 10 - ) - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - ) - - def test_cooperates_in_first_three_rounds(self): - against_defector = [(C, D)] * 3 - against_cooperator = [(C, C)] * 3 - against_alternator = [(C, C), (C, D), (C, C)] - self.versus_test(axl.Defector(), expected_actions=against_defector) - self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) - self.versus_test(axl.Alternator(), expected_actions=against_alternator) - - def test_defects_after_three_rounds_if_opponent_only_cooperated_in_max_history_depth_ten( - self - ): - against_cooperator = [(C, C)] * 3 + [(D, C)] * 20 - self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) - - def test_defects_when_opponent_has_no_defections_to_history_depth_ten(self): - opponent_actions = [D] + [C] * 10 + [D, C] - expected_actions = [(C, D)] + [(C, C)] * 10 + [(D, D), (C, C)] - self.versus_test(axl.MockPlayer(actions=opponent_actions), expected_actions) diff --git a/axelrod/ipd/tests/strategies/test_cycler.py b/axelrod/ipd/tests/strategies/test_cycler.py deleted file mode 100644 index 39f819d61..000000000 --- a/axelrod/ipd/tests/strategies/test_cycler.py +++ /dev/null @@ -1,237 +0,0 @@ -"""Tests for the Cycler strategies.""" -import unittest -import itertools -import random - -import axelrod as axl -from axelrod.ipd._strategy_utils import detect_cycle -from axelrod.ipd.action import Action, str_to_actions -from axelrod.ipd.evolvable_player import InsufficientParametersError - -from .test_player import TestPlayer -from .test_evolvable_player import PartialClass, TestEvolvablePlayer - -C, D = Action.C, Action.D - - -class TestAntiCycler(TestPlayer): - - name = "AntiCycler" - player = axl.AntiCycler - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_has_no_cycles(self): - test_range = 100 - player = axl.AntiCycler() - for _ in range(test_range): - player.play(axl.Cooperator()) - - contains_no_cycles = player.history - for slice_at in range(1, len(contains_no_cycles) + 1): - self.assertIsNone(detect_cycle(contains_no_cycles[:slice_at])) - - def test_strategy(self): - """Rounds are CDD CD CCD CCCD CCCCD ...""" - anticycler_rounds = [ - C, - D, - D, - C, - D, - C, - C, - D, - C, - C, - C, - D, - C, - C, - C, - C, - D, - C, - C, - C, - C, - C, - D, - ] - num_elements = len(anticycler_rounds) - against_defector = list(zip(anticycler_rounds, [D] * num_elements)) - against_cooperator = list(zip(anticycler_rounds, [C] * num_elements)) - - self.versus_test(axl.Defector(), expected_actions=against_defector) - self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) - - -class TestBasicCycler(TestPlayer): - name = "Cycler: CCD" - player = axl.Cycler - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_memory_depth_is_len_cycle_minus_one(self): - len_ten = "DCDCDDCDCD" - len_five = "DCDDC" - depth_nine = axl.Cycler(cycle=len_ten) - depth_four = axl.Cycler(cycle=len_five) - self.assertEqual(axl.Classifiers["memory_depth"](depth_nine), 9) - self.assertEqual(axl.Classifiers["memory_depth"](depth_four), 4) - - def test_cycler_works_as_expected(self): - expected = [(C, D), (D, D), (D, D), (C, D)] * 2 - self.versus_test( - axl.Defector(), expected_actions=expected, init_kwargs={"cycle": "CDDC"} - ) - - def test_cycle_raises_value_error_on_bad_cycle_str(self): - self.assertRaises(ValueError, axl.Cycler, cycle="CdDC") - - -def test_cycler_factory(cycle_str): - class TestCyclerChild(TestPlayer): - - name = "Cycler %s" % cycle_str - player = getattr(axl, "Cycler%s" % cycle_str) - expected_classifier = { - "memory_depth": len(cycle_str) - 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Starts by cooperating""" - match_len = 20 - actions_generator = _get_actions_cycle_against_cooperator(cycle_str) - test_actions = [next(actions_generator) for _ in range(match_len)] - self.versus_test(axl.Cooperator(), expected_actions=test_actions) - - return TestCyclerChild - - -def _get_actions_cycle_against_cooperator(cycle_string: str): - """Converts str like 'CCDC' to an itertools.cycle against Cooperator. The - above example returns: itertools.cycle([(C, C), (C, C), (D, C), (C, C)])""" - cooperator_opponent_action = C - action_iterator = str_to_actions(cycle_string) - out = [(action, cooperator_opponent_action) for action in action_iterator] - return itertools.cycle(out) - - -TestCyclerDC = test_cycler_factory("DC") -TestCyclerCCD = test_cycler_factory("CCD") -TestCyclerDDC = test_cycler_factory("DDC") -TestCyclerCCCD = test_cycler_factory("CCCD") -TestCyclerCCCCCD = test_cycler_factory("CCCCCD") -TestCyclerCCCDCD = test_cycler_factory("CCCDCD") - - -class TestEvolvableCycler(unittest.TestCase): - - player_class = axl.EvolvableCycler - - def test_normalized_parameters(self): - # Must specify at least one of cycle or cycle_length - self.assertRaises( - InsufficientParametersError, self.player_class._normalize_parameters - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - cycle="", - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - cycle_length=0, - ) - - cycle = "C" * random.randint(0, 20) + "D" * random.randint(0, 20) - self.assertEqual( - self.player_class._normalize_parameters(cycle=cycle), (cycle, len(cycle)) - ) - - cycle_length = random.randint(1, 20) - random_cycle, cycle_length2 = self.player_class._normalize_parameters( - cycle_length=cycle_length - ) - self.assertEqual(len(random_cycle), cycle_length) - self.assertEqual(cycle_length, cycle_length2) - - def test_crossover_even_length(self): - cycle1 = "C" * 6 - cycle2 = "D" * 6 - cross_cycle = "CDDDDD" - - player1 = self.player_class(cycle=cycle1) - player2 = self.player_class(cycle=cycle2) - axl.seed(3) - crossed = player1.crossover(player2) - self.assertEqual(cross_cycle, crossed.cycle) - - def test_crossover_odd_length(self): - cycle1 = "C" * 7 - cycle2 = "D" * 7 - cross_cycle = "CDDDDDD" - - player1 = self.player_class(cycle=cycle1) - player2 = self.player_class(cycle=cycle2) - axl.seed(3) - crossed = player1.crossover(player2) - self.assertEqual(cross_cycle, crossed.cycle) - - -class TestEvolvableCycler2(TestEvolvablePlayer): - name = "EvolvableCycler" - player_class = axl.EvolvableCycler - parent_class = axl.Cycler - parent_kwargs = ["cycle"] - init_parameters = {"cycle_length": 100} - - -class TestEvolvableCycler3(TestEvolvablePlayer): - name = "EvolvableCycler" - player_class = axl.EvolvableCycler - parent_class = axl.Cycler - parent_kwargs = ["cycle"] - init_parameters = { - "cycle": "".join(random.choice(("C", "D")) for _ in range(50)), - "mutation_potency": 10, - } - - -# Substitute EvolvedCycler as a regular Cycler. -EvolvableCyclerWithDefault = PartialClass(axl.EvolvableCycler, cycle="CCD") - - -class EvolvableCyclerAsCycler(TestBasicCycler): - player = EvolvableCyclerWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_darwin.py b/axelrod/ipd/tests/strategies/test_darwin.py deleted file mode 100644 index 8aa0b6391..000000000 --- a/axelrod/ipd/tests/strategies/test_darwin.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Tests for the Darwin PD strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestDarwin(TestPlayer): - - name = "Darwin" - player = axl.Darwin - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, - "manipulates_source": False, - "manipulates_state": True, - } - - @classmethod - def tearDownClass(cls): - """After all tests have run, makes sure the Darwin genome is reset.""" - cls.player.reset_genome() - super(TestDarwin, cls).tearDownClass() - - def setUp(self): - """Each test starts with a fresh genome.""" - self.player.reset_genome() - super(TestDarwin, self).setUp() - - def test_setup(self): - player = self.player() - self.assertEqual(player.genome, [C]) - self.assertEqual(player.history, []) - - def test_foil_strategy_inspection(self): - self.assertEqual(self.player().foil_strategy_inspection(), C) - - def test_strategy(self): - p1 = self.player() - p1.reset() - - self.versus_test( - axl.Cooperator(), - expected_actions=[(C, C)] * 5, - attrs={"genome": [C] * 5}, - ) - - expected_genome = [D] * 4 + [C] - self.versus_test( - axl.Defector(), - expected_actions=[(C, D)] * 5, - attrs={"genome": expected_genome}, - ) - - # uses genome - expected_actions = [(C, C)] + [(D, C)] * 3 + [(C, C)] * 2 - self.versus_test(axl.Cooperator(), expected_actions) - - def test_against_geller_and_mindreader(self): - self.versus_test( - axl.GellerCooperator(), - expected_actions=[(C, C)] * 2, - attrs={"genome": [C, C]}, - ) - - self.versus_test( - axl.MindReader(), - expected_actions=[(C, D)] * 2, - attrs={"genome": [D, C]}, - ) - - def test_reset_history_and_attributes(self): - # Overwrite this method because Darwin does not reset - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) - - p1 = self.player() - self.assertEqual(p1.genome, [D, C, C, C, D]) - p1.reset() - self.assertEqual(len(p1.history), 0) - self.assertEqual(p1.genome, [C, C, C, C, D]) - - def test_all_darwin_instances_share_one_genome(self): - p1 = self.player() - p2 = self.player() - self.assertIs(p1.genome, p2.genome) - - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) - - self.assertEqual(p2.genome, [D, C, C, C, D]) - self.assertIs(p1.genome, p2.genome) - p3 = self.player() - self.assertIs(p3.genome, p2.genome) - - def test_reset_genome(self): - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) - self.player.reset_genome() - self.assertEqual(self.player().genome, [C]) - - def equality_of_players_test(self, p1, p2, seed, opponent): - return True diff --git a/axelrod/ipd/tests/strategies/test_dbs.py b/axelrod/ipd/tests/strategies/test_dbs.py deleted file mode 100644 index 055c13ed6..000000000 --- a/axelrod/ipd/tests/strategies/test_dbs.py +++ /dev/null @@ -1,283 +0,0 @@ -"""Tests DBS strategy.""" - -import unittest - -import axelrod as axl -from axelrod.ipd.strategies import dbs - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestNode(unittest.TestCase): - """Test for the base Node class.""" - - node = dbs.Node() - - def test_get_siblings(self): - with self.assertRaises(NotImplementedError): - self.node.get_siblings() - - def test_is_stochastic(self): - with self.assertRaises(NotImplementedError): - self.node.is_stochastic() - - -class TestTreeSearch(unittest.TestCase): - """ - A set of tests for the tree-search functions. We test the answers of both - minimax_tree_search and move_gen functions, against a set of classic - policies (the answer being the best move to play for the next turn, - considering an incoming position (C, C), (C, D), (D, C) or (D, D)). - For each policy, we test the answer for all incoming position. - """ - - def setUp(self): - """Initialization for tests.""" - # For each test, we check the answer against each possible - # inputs, that are in self.input_pos. - self.input_pos = [(C, C), (C, D), (D, C), (D, D)] - # We define the policies against which we are going to test. - self.cooperator_policy = dbs.create_policy(1, 1, 1, 1) - self.defector_policy = dbs.create_policy(0, 0, 0, 0) - self.titForTat_policy = dbs.create_policy(1, 1, 0, 0) - self.alternator_policy = dbs.create_policy(0, 1, 0, 1) - self.grudger_policy = dbs.create_policy(1, 0, 0, 0) - self.random_policy = dbs.create_policy(0.5, 0.5, 0.5, 0.5) - - def test_minimaxTreeSearch_cooperator(self): - """ - Tests the minimax_tree_search function when playing against a - Cooperator player. Output == 0 means Cooperate, 1 means Defect. - The best (hence expected) answer to Cooperator is to defect - whatever the input position is. - """ - expected_output = [1, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.cooperator_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_cooperator(self): - """ - Tests the move_gen function when playing against a Cooperator player. - """ - expected_output = [D, D, D, D] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.cooperator_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - def test_minimaxTreeSearch_defector(self): - """ - Tests the minimax_tree_search function when playing against a - Defector player. The best answer to Defector is to always defect - """ - expected_output = [1, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.defector_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_defector(self): - """ - Tests the move_gen function when playing against a Defector player. - """ - expected_output = [D, D, D, D] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.defector_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - def test_minimaxTreeSearch_titForTat(self): - """ - Tests the minimax_tree_search function when playing against a - TitForTat player. The best (hence expected) answer to TitFOrTat is to - cooperate whatever the input position is. - """ - expected_output = [0, 0, 0, 0] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.titForTat_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_last_node_titForTat(self): - """ - Test that against TitForTat, for the last move, i.e. if tree depth is 1, - the algorithms defects for all input. - """ - expected_output = [1, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.titForTat_policy, max_depth=1 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_titForTat(self): - """ - Tests the move_gen function when playing against a TitForTat player. - """ - expected_output = [C, C, C, C] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.titForTat_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - def test_minimaxTreeSearch_alternator(self): - """ - Tests the minimax_tree_search function when playing against an - Alternator player. The best answer to Alternator is to always defect. - """ - expected_output = [1, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.alternator_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_alternator(self): - """ - Tests the move_gen function when playing against an Alternator player. - """ - expected_output = [D, D, D, D] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - def test_minimaxTreeSearch_random(self): - """ - Tests the minimax_tree_search function when playing against a Random - player. The best answer to Random is to always defect. - """ - expected_output = [1, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.random_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_random(self): - """ - Tests the move_gen function when playing against a Random player. - """ - expected_output = [D, D, D, D] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - def test_minimaxTreeSearch_grudger(self): - """ - Tests the minimax_tree_search function when playing against a - Grudger player. The best answer to Grudger is to cooperate if both - cooperated at last round, else it's to defect. - """ - expected_output = [0, 1, 1, 1] - for inp, out in zip(self.input_pos, expected_output): - begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) - values = dbs.minimax_tree_search( - begin_node, self.grudger_policy, max_depth=5 - ) - self.assertEqual(values.index(max(values)), out) - - def test_move_gen_grudger(self): - """ - Tests the move_gen function when playing against a Grudger player. - """ - expected_output = [C, D, D, D] - for inp, out in zip(self.input_pos, expected_output): - out_move = dbs.move_gen(inp, self.grudger_policy, depth_search_tree=5) - self.assertEqual(out_move, out) - - -class TestDBS(TestPlayer): - name = "DBS: 0.75, 3, 4, 3, 5" - player = axl.DBS - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": True, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - default_init_kwargs = { - "discount_factor": 0.75, - "promotion_threshold": 3, - "violation_threshold": 4, - "reject_threshold": 4, - "tree_depth": 5, - } - - # Test that DBS always cooperate against Cooperator. - actions = [(C, C)] * 7 - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs=default_init_kwargs, - ) - - # Test if it correctly learns Alternator strategy. - actions = [(C, C), (C, D)] * 3 + [(D, C), (C, D)] * 3 - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs=default_init_kwargs, - ) - - # Check that algorithms take into account a change in opponent's - # strategy. - mock_actions = [C, C, C, D, D, D, D, D, D, D] - exp_actions = [(C, C)] * 3 + [(C, D)] * 4 + [(D, D)] * 3 - self.versus_test( - opponent=axl.MockPlayer(actions=mock_actions), - expected_actions=exp_actions, - init_kwargs=default_init_kwargs, - ) - - # Check that adaptation is faster if diminishing promotion_threshold. - init_kwargs_2 = { - "discount_factor": 0.75, - "promotion_threshold": 2, - "violation_threshold": 4, - "reject_threshold": 4, - "tree_depth": 5, - } - mock_actions = [C, C, C, D, D, D, D, D, D, D] - exp_actions = [(C, C)] * 3 + [(C, D)] * 3 + [(D, D)] * 4 - self.versus_test( - opponent=axl.MockPlayer(actions=mock_actions), - expected_actions=exp_actions, - init_kwargs=init_kwargs_2, - ) - - # Check that ShouldDemote mechanism works. - # We play against Alternator for 12 turns to make the - # algorithm learn Alternator's strategy, then at turn 13 we - # change opponent to Defector, hence triggering ShouldDemote - # mechanism. For this test we use violation_threshold=3 - init_kwargs_3 = { - "discount_factor": 0.75, - "promotion_threshold": 3, - "violation_threshold": 3, - "reject_threshold": 3, - "tree_depth": 5, - } - exp_actions = [(C, C), (C, D)] * 3 + [(D, C), (C, D)] * 3 - exp_actions += [(D, D), (C, D)] * 3 + [(D, D)] - mock_actions = [C, D, C, D, C, D, C, D, C, D, C, D, D, D, D, D, D, D, D] - self.versus_test( - opponent=axl.MockPlayer(actions=mock_actions), - expected_actions=exp_actions, - init_kwargs=init_kwargs_3, - ) diff --git a/axelrod/ipd/tests/strategies/test_defector.py b/axelrod/ipd/tests/strategies/test_defector.py deleted file mode 100644 index d9dffa48d..000000000 --- a/axelrod/ipd/tests/strategies/test_defector.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Tests for the Defector strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestDefector(TestPlayer): - - name = "Defector" - player = axl.Defector - expected_classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_state": False, - "manipulates_source": False, - } - - def test_strategy(self): - # Test that always defects. - actions = [(D, C)] + [(D, D), (D, C)] * 9 - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestTrickyDefector(TestPlayer): - - name = "Tricky Defector" - player = axl.TrickyDefector - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_cooperates_if_opponent_history_has_C_and_last_three_are_D(self): - opponent_actions = [D, C] + [D] * 5 - actions = [(D, D), (D, C)] + [(D, D)] * 3 + [(C, D)] * 2 - self.versus_test( - axl.MockPlayer(actions=opponent_actions), expected_actions=actions - ) - - def test_defects_if_opponent_never_cooperated(self): - opponent_actions = [D] * 7 - actions = [(D, D)] * 7 - self.versus_test( - axl.MockPlayer(actions=opponent_actions), expected_actions=actions - ) - - def test_defects_if_opponent_last_three_are_not_D(self): - opponent_actions = [C] + [D] * 3 + [C, D] - actions = [(D, C)] + [(D, D)] * 3 + [(C, C), (D, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), expected_actions=actions - ) diff --git a/axelrod/ipd/tests/strategies/test_doubler.py b/axelrod/ipd/tests/strategies/test_doubler.py deleted file mode 100644 index e3d436302..000000000 --- a/axelrod/ipd/tests/strategies/test_doubler.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Tests for the Doubler strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestDoubler(TestPlayer): - - name = "Doubler" - player = axl.Doubler - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_defects_if_opponent_last_play_is_D_and_defections_gt_two_times_cooperations( - self - ): - opponent_plays = [C] * 7 + [D] * 4 + [C] - actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, C)] - self.versus_test( - axl.MockPlayer(actions=opponent_plays), expected_actions=actions - ) - - def test_defects_if_opponent_last_play_D_and_defections_equal_two_times_cooperations( - self - ): - opponent_plays = [C] * 8 + [D] * 4 + [C] - actions = [(C, C)] * 8 + [(C, D)] * 4 + [(D, C)] - self.versus_test( - axl.MockPlayer(actions=opponent_plays), expected_actions=actions - ) - - def test_cooperates_if_opponent_last_play_is_C(self): - opponent_first_five = [D] * 5 - actions_first_five = [(C, D)] + [(D, D)] * 4 - opponent_plays = opponent_first_five + [C] + [D] - actions = actions_first_five + [(D, C)] + [(C, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_plays), expected_actions=actions - ) diff --git a/axelrod/ipd/tests/strategies/test_evolvable_player.py b/axelrod/ipd/tests/strategies/test_evolvable_player.py deleted file mode 100644 index 0712c726b..000000000 --- a/axelrod/ipd/tests/strategies/test_evolvable_player.py +++ /dev/null @@ -1,213 +0,0 @@ -import unittest -import functools -import random - -import axelrod as axl -from axelrod.ipd.action import Action -from axelrod.ipd.evolvable_player import copy_lists, crossover_lists, crossover_dictionaries -from .test_player import TestPlayer - -C, D = Action.C, Action.D - - -def PartialClass(cls, **kwargs): - - class PartialedClass(cls): - __init__ = functools.partialmethod( - cls.__init__, **kwargs) - - return PartialedClass - - -class EvolvableTestOpponent(axl.EvolvablePlayer): - name = "EvolvableTestOpponent" - - def __init__(self, value=None): - super().__init__() - if value: - self.value = value - else: - value = random.randint(2, 100) - self.value = value - self.overwrite_init_kwargs(value=value) - - @staticmethod - def strategy(opponent): - return Action.C - - def mutate(self): - value = random.randint(2, 100) - return EvolvableTestOpponent(value) - - def crossover(self, other): - if other.__class__ != self.__class__: - raise TypeError("Crossover must be between the same player classes.") - value = self.value + other.value - return EvolvableTestOpponent(value) - - -class TestEvolvablePlayer(TestPlayer): - - player_class = EvolvableTestOpponent - parent_class = None - init_parameters = dict() - - def player(self): - return self.player_class(**self.init_parameters) - - def test_repr(self): - """Test that the representation is correct.""" - if self.__class__ != TestEvolvablePlayer: - self.assertIn(self.name, str(self.player())) - pass - - def test_initialisation(self): - """Test that the player initiates correctly.""" - if self.__class__ != TestEvolvablePlayer: - player = self.player() - self.assertEqual(len(player.history), 0) - self.assertEqual(player.cooperations, 0) - self.assertEqual(player.defections, 0) - - def test_randomization(self): - """Test that randomization on initialization produces different strategies.""" - if self.init_parameters: - return - axl.seed(0) - player1 = self.player() - axl.seed(0) - player2 = self.player() - self.assertEqual(player1, player2) - for seed_ in range(2, 20): - axl.seed(seed_) - player2 = self.player() - if player1 != player2: - return - # Should never get here unless a change breaks the test, so don't include in coverage. - self.assertFalse(True) # pragma: no cover - - def test_mutate_variations(self): - """Generate many variations to test that mutate produces different strategies.""" - if not self.init_parameters: - return - axl.seed(100) - variants_produced = False - for _ in range(2, 400): - player = self.player() - mutant = player.mutate() - if player != mutant: - variants_produced = True - self.assertTrue(variants_produced) - - def test_mutate_and_clone(self): - """Test that mutated players clone properly.""" - axl.seed(0) - player = self.player() - mutant = player.clone().mutate() - clone = mutant.clone() - self.assertEqual(clone, mutant) - - def test_crossover(self): - """Test that crossover produces different strategies.""" - for seed_ in range(20): - axl.seed(seed_) - players = [] - for _ in range(2): - player = self.player() - # Mutate to randomize - player = player.mutate() - players.append(player) - player1, player2 = players - crossed = player1.crossover(player2) - if player1 != crossed and player2 != crossed and crossed == crossed.clone(): - return - # Should never get here unless a change breaks the test, so don't include in coverage. - self.assertFalse(True) # pragma: no cover - - def test_crossover_mismatch(self): - other = axl.Cooperator() - player = self.player() - with self.assertRaises(TypeError): - player.crossover(other) - - def test_serialization(self): - """Serializing and deserializing should return the original player.""" - axl.seed(0) - player = self.player() - serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) - self.assertEqual(player, deserialized_player) - self.assertEqual(deserialized_player, deserialized_player.clone()) - - def test_serialization_csv(self): - """Serializing and deserializing should return the original player.""" - axl.seed(0) - player = self.player() - serialized = player.serialize_parameters() - s = "0, 1, {}, 3".format(serialized) - s2 = s.split(',')[2] - deserialized_player = player.__class__.deserialize_parameters(s2) - self.assertEqual(player, deserialized_player) - self.assertEqual(deserialized_player, deserialized_player.clone()) - - def behavior_test(self, player1, player2): - """Test that the evolvable player plays the same as its (nonevolvable) parent class.""" - for opponent_class in [axl.Random, axl.TitForTat, axl.Alternator]: - axl.seed(0) - opponent = opponent_class() - match = axl.IpdMatch((player1.clone(), opponent)) - results1 = match.play() - - axl.seed(0) - opponent = opponent_class() - match = axl.IpdMatch((player2.clone(), opponent)) - results2 = match.play() - - self.assertEqual(results1, results2) - - def test_behavior(self): - """Test that the evolvable player plays the same as its (nonevolvable) parent class.""" - if not self.parent_class: - return - - player = self.player_class(**self.init_parameters) - init_kwargs = {k: player.init_kwargs[k] for k in self.parent_kwargs} - parent_player = self.parent_class(**init_kwargs) - self.behavior_test(player, parent_player) - - serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) - self.behavior_test(deserialized_player, parent_player) - - -class TestUtilityFunctions(unittest.TestCase): - - def test_copy_lists(self): - l1 = [list(range(10)), list(range(20))] - l2 = copy_lists(l1) - self.assertIsNot(l1, l2) - - def test_crossover_lists(self): - list1 = [[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] - list2 = [[0, D, 1, C], [0, C, 0, C], [1, D, 1, D], [1, C, 1, C]] - - axl.seed(0) - crossed = crossover_lists(list1, list2) - self.assertEqual(crossed, list1[:3] + list2[3:]) - - axl.seed(1) - crossed = crossover_lists(list1, list2) - self.assertEqual(crossed, list1[:1] + list2[1:]) - - def test_crossover_dictionaries(self): - dict1 = {'1': 1, '2': 2, '3': 3} - dict2 = {'1': 'a', '2': 'b', '3': 'c'} - - axl.seed(0) - crossed = crossover_dictionaries(dict1, dict2) - self.assertEqual(crossed, {'1': 1, '2': 'b', '3': 'c'}) - - axl.seed(1) - crossed = crossover_dictionaries(dict1, dict2) - self.assertEqual(crossed, dict2) - diff --git a/axelrod/ipd/tests/strategies/test_finite_state_machines.py b/axelrod/ipd/tests/strategies/test_finite_state_machines.py deleted file mode 100644 index 25f192de8..000000000 --- a/axelrod/ipd/tests/strategies/test_finite_state_machines.py +++ /dev/null @@ -1,1139 +0,0 @@ -"""Tests for Finite State Machine Strategies.""" - -import unittest - -import random - -import axelrod as axl -from axelrod.ipd.compute_finite_state_machine_memory import get_memory_from_transitions -from axelrod.ipd.evolvable_player import InsufficientParametersError -from axelrod.ipd.strategies import EvolvableFSMPlayer, FSMPlayer, SimpleFSM - -from .test_player import TestPlayer -from .test_evolvable_player import PartialClass, TestEvolvablePlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestSimpleFSM(unittest.TestCase): - def setUp(self): - self.two_state_transition = ( - (1, C, 0, C), - (1, D, 0, D), - (0, C, 1, D), - (0, D, 1, C), - ) - - self.two_state = SimpleFSM( - transitions=self.two_state_transition, initial_state=1 - ) - - def test__eq__true(self): - new_two_state = SimpleFSM( - transitions=self.two_state_transition, initial_state=1 - ) - self.assertTrue(new_two_state.__eq__(self.two_state)) - new_two_state.move(C) - self.two_state.move(D) - self.assertTrue(new_two_state.__eq__(self.two_state)) - - def test__eq__false_by_state(self): - new_two_state = SimpleFSM( - transitions=self.two_state_transition, initial_state=0 - ) - self.assertFalse(new_two_state.__eq__(self.two_state)) - - def test__eq__false_by_transition(self): - different_transitions = ((1, C, 0, D), (1, D, 0, D), (0, C, 1, D), (0, D, 1, C)) - new_two_state = SimpleFSM(transitions=different_transitions, initial_state=1) - - self.assertFalse(new_two_state.__eq__(self.two_state)) - - def test__eq__false_by_not_SimpleFSM(self): - self.assertFalse(self.two_state.__eq__(3)) - - def test__ne__(self): - new_two_state = SimpleFSM( - transitions=self.two_state_transition, initial_state=1 - ) - self.assertFalse(new_two_state.__ne__(self.two_state)) - new_two_state.move(C) - self.assertTrue(new_two_state.__ne__(self.two_state)) - - def test_move(self): - self.assertEqual(self.two_state.move(C), C) - self.assertEqual(self.two_state.state, 0) - self.assertEqual(self.two_state.move(C), D) - self.assertEqual(self.two_state.state, 1) - - self.assertEqual(self.two_state.move(D), D) - self.assertEqual(self.two_state.state, 0) - self.assertEqual(self.two_state.move(D), C) - self.assertEqual(self.two_state.state, 1) - - def test_bad_transitions_raise_error(self): - bad_transitions = ((1, C, 0, D), (1, D, 0, D), (0, C, 1, D)) - self.assertRaises( - ValueError, SimpleFSM, transitions=bad_transitions, initial_state=1 - ) - - def test_bad_initial_state_raises_error(self): - self.assertRaises( - ValueError, - SimpleFSM, - transitions=self.two_state_transition, - initial_state=5, - ) - - def test_state_setter_raises_error_for_bad_input(self): - with self.assertRaises(ValueError) as cm: - self.two_state.state = 5 - error_msg = cm.exception.args[0] - self.assertEqual(error_msg, "state: 5 does not have values for both C and D") - - -class TestSampleFSMPlayer(TestPlayer): - """Test a few sample tables to make sure that the finite state machines are - working as intended.""" - - name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" - player = axl.FSMPlayer - - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_cooperator(self): - """Tests that the player defined by the table for Cooperator is in fact - Cooperator.""" - cooperator_init_kwargs = { - "transitions": ((1, C, 1, C), (1, D, 1, C)), - "initial_state": 1, - "initial_action": C, - } - self.versus_test( - axl.Alternator(), - expected_actions=[(C, C), (C, D)] * 5, - init_kwargs=cooperator_init_kwargs, - ) - - def test_defector(self): - """Tests that the player defined by the table for Defector is in fact - Defector.""" - defector_init_kwargs = { - "transitions": ((1, C, 1, D), (1, D, 1, D)), - "initial_state": 1, - "initial_action": D, - } - self.versus_test( - axl.Alternator(), - expected_actions=[(D, C), (D, D)] * 5, - init_kwargs=defector_init_kwargs, - ) - - def test_tft(self): - """Tests that the player defined by the table for TFT is in fact - TFT.""" - tft_init_kwargs = { - "transitions": ((1, C, 1, C), (1, D, 1, D)), - "initial_state": 1, - "initial_action": C, - } - self.versus_test( - axl.Alternator(), - expected_actions=[(C, C)] + [(C, D), (D, C)] * 5, - init_kwargs=tft_init_kwargs, - ) - - def test_wsls(self): - """Tests that the player defined by the table for TFT is in fact - WSLS (also known as Pavlov.""" - wsls_init_kwargs = { - "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), - "initial_state": 1, - "initial_action": C, - } - expected = [(C, C), (C, D), (D, C), (D, D)] * 3 - self.versus_test( - axl.Alternator(), - expected_actions=expected, - init_kwargs=wsls_init_kwargs, - ) - - -class TestFSMPlayer(TestPlayer): - name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" - player = axl.FSMPlayer - - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def transitions_test(self, state_and_action): - """ - takes a list of [(initial_state, first_opponent_action), (next_state, - next_opponent_action), ...] and creates a list of opponent moves, and a - list of expected_actions based on the FiniteStateMachine. Then creates - a versus_test of those two lists. - """ - fsm_player = self.player() - transitions = fsm_player.fsm.state_transitions - first_opponent_move = state_and_action[0][1] - - expected_actions = [(fsm_player.initial_action, first_opponent_move)] - opponent_actions = [first_opponent_move] - - for index in range(1, len(state_and_action)): - current_state, last_opponent_move = state_and_action[index - 1] - fsm_move = transitions[(current_state, last_opponent_move)][1] - - new_state, current_opponent_move = state_and_action[index] - - expected_actions.append((fsm_move, current_opponent_move)) - opponent_actions.append(current_opponent_move) - - self.verify_against_finite_state_machine( - current_state=current_state, - expected_state=new_state, - last_opponent_move=last_opponent_move, - expected_move=fsm_move, - ) - - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected_actions, - ) - - def verify_against_finite_state_machine( - self, current_state, expected_state, last_opponent_move, expected_move - ): - test_fsm = self.player().fsm - test_fsm.state = current_state - self.assertEqual(test_fsm.move(last_opponent_move), expected_move) - self.assertEqual(test_fsm.state, expected_state) - - def test_transitions_with_default_fsm(self): - if self.player is axl.FSMPlayer: - state_action = [(1, C), (1, D)] - self.transitions_test(state_action) - - def test_all_states_reachable(self): - player = self.player() - initial_state = player.initial_state - transitions = player.fsm.state_transitions - - called_states = set(pair[0] for pair in transitions.values()) - called_states.add(initial_state) - - owned_states = set(pair[0] for pair in transitions.keys()) - - un_callable_states = owned_states.difference(called_states) - extra_info = "The following states are un-reachable: {}".format( - un_callable_states - ) - self.assertEqual(un_callable_states, set(), msg=extra_info) - - def test_strategy(self): - """ - Regression test for init without specifying initial state or action - """ - transitions = ( - (0, C, 0, C), - (0, D, 3, C), - (1, C, 5, D), - (1, D, 0, C), - (2, C, 3, C), - (2, D, 2, D), - (3, C, 4, D), - (3, D, 6, D), - (4, C, 3, C), - (4, D, 1, D), - (5, C, 6, C), - (5, D, 3, D), - (6, C, 6, D), - (6, D, 6, D), - (7, C, 7, D), - (7, D, 5, C), - ) - opponent = axl.MockPlayer([D, D, C, C, D]) - actions = [(C, D), (C, D), (C, C), (D, C), (C, D)] - self.versus_test( - opponent, expected_actions=actions, init_kwargs={"transitions": transitions} - ) - - def test_memory(self): - """ - Test the memory depth using implemented algorithm - """ - transitions = self.player().fsm._state_transitions - self.assertEqual(get_memory_from_transitions(transitions), self.expected_classifier["memory_depth"]) - - -class TestFortress3(TestFSMPlayer): - - name = "Fortress3" - player = axl.Fortress3 - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - """ - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 1, D) - ) - """ - - def test_strategy(self): - state_and_actions = [(1, C), (1, D), (2, C), (1, C)] - self.transitions_test(state_and_actions) - - state_and_actions = [(1, D), (2, D), (3, C), (3, C), (3, C), (3, D), (1, C)] * 2 - self.transitions_test(state_and_actions) - - @unittest.expectedFailure - def test_incorrect_transitions(self): - state_and_actions = [(1, C), (1, D), (1, D)] - self.transitions_test(state_and_actions) - - -class TestFortress4(TestFSMPlayer): - - name = "Fortress4" - player = axl.Fortress4 - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - """ - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, D), - (3, C, 1, D), - (3, D, 4, C), - (4, C, 4, C), - (4, D, 1, D) - ) - """ - - def test_strategy(self): - state_and_actions = [(1, C), (1, D), (2, C)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [(1, D), (2, D), (3, C), (1, C)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [ - (1, D), - (2, D), - (3, D), - (4, C), - (4, C), - (4, C), - (4, C), - (4, D), - ] * 3 - self.transitions_test(state_and_actions) - - -class TestPredator(TestFSMPlayer): - - name = "Predator" - player = axl.Predator - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - """ - transitions = ( - (0, C, 0, D), - (0, D, 1, D), - (1, C, 2, D), - (1, D, 3, D), - (2, C, 4, C), - (2, D, 3, D), - (3, C, 5, D), - (3, D, 4, C), - (4, C, 2, C), - (4, D, 6, D), - (5, C, 7, D), - (5, D, 3, D), - (6, C, 7, C), - (6, D, 7, D), - (7, C, 8, D), - (7, D, 7, D), - (8, C, 8, D), - (8, D, 6, D) - ) - """ - - def test_strategy(self): - state_and_actions = [ - (0, D), - (1, C), - (2, C), - (4, C), - (2, D), - (3, D), - (4, D), - (6, C), - ] + [(7, D), (7, C), (8, C), (8, D), (6, D)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [(0, D), (1, C), (2, D), (3, C), (5, D), (3, C), (5, C)] + [ - (7, C), - (8, D), - (6, C), - ] * 5 - self.transitions_test(state_and_actions) - - state_and_actions = ( - [(0, C), (0, D)] + [(1, D), (3, D), (4, D), (6, D)] + [(7, D)] * 10 - ) - self.transitions_test(state_and_actions) - - -class TestPun1(TestFSMPlayer): - - name = "Pun1" - player = axl.Pun1 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (1, C, 2, C), - (1, D, 2, C), - (2, C, 1, C), - (2, D, 1, D) - ) - """ - - def test_strategy(self): - state_and_actions = [(1, C), (2, D), (1, D), (2, D)] * 3 - self.transitions_test(state_and_actions) - - -class TestRaider(TestFSMPlayer): - - name = "Raider" - player = axl.Raider - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (0, C, 2, D), - (0, D, 2, D), - (1, C, 1, C), - (1, D, 1, D), - (2, C, 0, D), - (2, D, 3, C), - (3, C, 0, D), - (3, D, 1, C) - ) - """ - - def test_strategy(self): - state_and_actions = [(0, C), (2, C), (0, D), (2, C)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [(0, C), (2, D), (3, C)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [(0, C), (2, D), (3, D)] + [(1, C), (1, D)] * 5 - self.transitions_test(state_and_actions) - - -class TestRipoff(TestFSMPlayer): - - name = "Ripoff" - player = axl.Ripoff - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (1, C, 2, C), - (1, D, 3, C), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), # Note that it's TFT in state 3 - (3, D, 3, D) - ) - """ - - def test_strategy(self): - state_and_actions = [(1, C), (2, C)] * 3 + [(1, D)] + [(3, C), (3, D)] * 5 - self.transitions_test(state_and_actions) - - state_and_actions = [(1, C), (2, D)] + [(3, D)] * 5 - self.transitions_test(state_and_actions) - - -class TestUsuallyCooperates(TestFSMPlayer): - name = "UsuallyCooperates" - player = axl.UsuallyCooperates - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - """ - transitions = ( - (1, C, 1, C), - (1, D, 2, C), - (2, C, 1, D), - (2, D, 1, C) - ) - """ - - def test_strategy(self): - # Never leaves state 1 if C - state_and_actions = [(1, C)] * 10 - self.transitions_test(state_and_actions) - # Visits state 2, but then comes back - # Defaults if DC streak is complete. Starts streak over either way. - state_and_actions = [(1, D), (2, D)] - self.transitions_test(state_and_actions) - state_and_actions = [(1, D), (2, C)] - self.transitions_test(state_and_actions) - - -class TestUsuallyDefects(TestFSMPlayer): - name = "UsuallyDefects" - player = axl.UsuallyDefects - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - """ - transitions = ( - (1, C, 2, D), - (1, D, 1, D), - (2, C, 1, D), - (2, D, 1, C) - ) - """ - - def test_strategy(self): - # Never leaves state 1 if D - state_and_actions = [(1, D)] * 10 - self.transitions_test(state_and_actions) - # Visits state 2, but then comes back - # Cooperates if CD streak is complete. Starts streak over either way. - state_and_actions = [(1, C), (2, D)] - self.transitions_test(state_and_actions) - state_and_actions = [(1, C), (2, C)] - self.transitions_test(state_and_actions) - - -class TestSolutionB1(TestFSMPlayer): - - name = "SolutionB1" - player = axl.SolutionB1 - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (1, C, 2, D), - (1, D, 1, D), - (2, C, 2, C), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 3, C) - ) - """ - - def test_strategy(self): - - state_and_actions = ( - [(1, D)] * 3 + [(1, C)] + [(2, C)] * 3 + [(2, D)] + [(3, C), (3, D)] * 3 - ) - self.transitions_test(state_and_actions) - - -class TestSolutionB5(TestFSMPlayer): - - name = "SolutionB5" - player = axl.SolutionB5 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (1, C, 2, C), - (1, D, 6, D), - (2, C, 2, C), - (2, D, 3, D), - (3, C, 6, C), - (3, D, 1, D), - (4, C, 3, C), - (4, D, 6, D), - (5, C, 5, D), - (5, D, 4, D), - (6, C, 3, C), - (6, D, 5, D) - ) - """ - - def test_strategy(self): - state_and_actions = ([(1, C)] + [(2, C)] * 3 + [(2, D), (3, D)]) * 2 - self.transitions_test(state_and_actions) - - state_and_actions = [(1, C), (2, D)] + [ - (3, C), - (6, D), - (5, C), - (5, D), - (4, C), - (3, C), - (6, C), - ] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [(1, D)] + [(6, D), (5, D), (4, D)] * 3 - self.transitions_test(state_and_actions) - - -class TestThumper(TestFSMPlayer): - - name = "Thumper" - player = axl.Thumper - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (1, C, 1, C), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 1, D) - ) - """ - - def test_strategy(self): - - state_and_actions = [(1, C)] * 3 + [(1, D), (2, C), (1, D), (2, D)] * 3 - self.transitions_test(state_and_actions) - - -class TestEvolvedFSM4(TestFSMPlayer): - - name = "Evolved FSM 4" - player = axl.EvolvedFSM4 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - transitions = ( - (0, C, 0, C), - (0, D, 2, D), - (1, C, 3, D), - (1, D, 0, C), - (2, C, 2, D), - (2, D, 1, C), - (3, C, 3, D), - (3, D, 1, D) - ) - """ - - def test_strategy(self): - state_and_actions = [(0, C)] * 3 + [(0, D), (2, C), (2, D), (1, D)] * 3 - self.transitions_test(state_and_actions) - - state_and_actions = [ - (0, D), - (2, D), - (1, C), - (3, C), - (3, C), - (3, D), - (1, C), - (3, D), - (1, D), - ] * 3 - self.transitions_test(state_and_actions) - - -class TestEvolvedFSM16(TestFSMPlayer): - - name = "Evolved FSM 16" - player = axl.EvolvedFSM16 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - FSM created by ML algorithm never called states 4 or 9, so they were deleted. - transitions = ( - (0, C, 0, C), - (0, D, 12, D), - (1, C, 3, D), - (1, D, 6, C), - (2, C, 2, D), - (2, D, 14, D), - (3, C, 3, D), - (3, D, 3, D), - - (5, C, 12, D), - (5, D, 10, D), - (6, C, 5, C), - (6, D, 12, D), - (7, C, 3, D), - (7, D, 1, C), - (8, C, 5, C), - (8, D, 5, C), - - (10, C, 11, D), - (10, D, 8, C), - (11, C, 15, D), - (11, D, 5, D), - (12, C, 8, C), - (12, D, 11, D), - (13, C, 13, D), - (13, D, 7, D), - (14, C, 13, D), - (14, D, 13, D), - (15, C, 15, D), - (15, D, 2, C) - ) - """ - - def test_strategy(self): - # finished: 0, - state_and_actions = [(0, C)] * 3 + [(0, D)] + [(12, D), (11, D), (5, C)] * 3 - self.transitions_test(state_and_actions) - - # finished: 0, 5, 10 - state_and_actions = [(0, D), (12, D), (11, D)] + [ - (5, D), - (10, C), - (11, D), - (5, D), - (10, D), - (8, C), - ] * 3 - self.transitions_test(state_and_actions) - - # finished: 0, 2, 5, 10, 11, 12, 15 - state_and_actions = ( - [ - (0, D), - (12, C), - (8, D), - (5, D), - (10, C), - (11, C), - (15, C), - (15, C), - (15, D), - ] - + [(2, C)] * 3 - + [(2, D), (14, C), (13, C)] - ) - self.transitions_test(state_and_actions) - - # finished: 0, 2, 3, 5, 10, 11, 12, 13, 14, 15 - to_state_fourteen = [(0, D), (12, D), (11, C), (15, D), (2, D)] - state_and_actions = ( - to_state_fourteen - + [(14, D), (13, C), (13, C), (13, D), (7, C)] - + [(3, D), (3, C)] * 3 - ) - self.transitions_test(state_and_actions) - - # finished: 0, 2, 3, 5, 7, 10, 11, 12, 13, 14, 15 - to_state_seven = to_state_fourteen + [(14, D), (13, D)] - state_and_actions = to_state_seven + [(7, D), (1, C)] + [(3, C)] * 5 - self.transitions_test(state_and_actions) - - # finished: 0, 1, 2, 3, 5, 10, 11, 12, 13, 14, 15 - state_and_actions = to_state_seven + [(7, D), (1, D), (6, C), (5, D), (10, C)] - self.transitions_test(state_and_actions) - - # finished: 0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15 - state_and_actions = to_state_seven + [ - (7, D), - (1, D), - (6, D), - (12, C), - (8, D), - (5, D), - ] - self.transitions_test(state_and_actions) - - -class TestEvolvedFSM16Noise05(TestFSMPlayer): - - name = "Evolved FSM 16 Noise 05" - player = axl.EvolvedFSM16Noise05 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - """ - FSM created by ML algorithm never called states 7 or 9, so they were deleted. - transitions = ( - (0, C, 8, C), - (0, D, 3, D), - (1, C, 13, C), - (1, D, 15, D), - (2, C, 12, C), - (2, D, 3, D), - (3, C, 10, C), - (3, D, 3, D), - (4, C, 5, D), - (4, D, 4, D), - (5, C, 4, D), - (5, D, 10, D), - (6, C, 8, C), - (6, D, 6, D), - - (8, C, 2, C), - (8, D, 4, D), - - (10, C, 4, D), - (10, D, 1, D), - (11, C, 14, D), - (11, D, 13, C), - (12, C, 13, C), - (12, D, 2, C), - (13, C, 13, C), - (13, D, 6, C), - (14, C, 3, D), - (14, D, 13, D), - (15, C, 5, D), - (15, D, 11, C) - ) - """ - - def test_strategy(self): - # finished: 12, 13 - state_and_actions = [ - (0, C), - (8, C), - (2, C), - (12, D), - (2, C), - (12, C), - (13, C), - (13, C), - (13, D), - ] + [(6, D)] * 3 - self.transitions_test(state_and_actions) - - # finished 2, 3, 4, 12, 13 - state_and_actions = [ - (0, C), - (8, C), - (2, D), - (3, D), - (3, D), - (3, C), - (10, C), - (4, D), - (4, D), - (4, C), - (5, D), - ] - self.transitions_test(state_and_actions) - - # finished 0, 2, 3, 4, 6, 8, 10, 12, 13 - state_and_actions = [ - (0, D), - (3, C), - (10, D), - (1, C), - (13, D), - (6, C), - (8, D), - (4, C), - (5, C), - (4, C), - (5, D), - ] - self.transitions_test(state_and_actions) - - # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 13, 15 - state_and_actions = [ - (0, D), - (3, C), - (10, D), - (1, D), - (15, C), - (5, D), - (10, D), - (1, D), - (15, D), - (11, D), - ] - self.transitions_test(state_and_actions) - - # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 13, 15 - to_state_eleven = [(0, D), (3, C), (10, D), (1, D), (15, D)] - - state_and_actions = to_state_eleven + [(11, C), (14, C), (3, C), (10, D)] - self.transitions_test(state_and_actions) - - # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 15 - state_and_actions = to_state_eleven + [(11, D)] + [(13, C)] * 3 - self.transitions_test(state_and_actions) - - # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15 - state_and_actions = to_state_eleven + [(11, C), (14, D)] + [(13, C)] * 3 - self.transitions_test(state_and_actions) - - -class TestTF1(TestFSMPlayer): - name = "TF1" - player = axl.TF1 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestTF2(TestFSMPlayer): - name = "TF2" - player = axl.TF2 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestTF3(TestFSMPlayer): - name = "TF3" - player = axl.TF3 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestEvolvableFSMPlayer(unittest.TestCase): - - player_class = EvolvableFSMPlayer - - def test_normalized_parameters(self): - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - transitions=[[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] - ) - - def test_init(self): - transitions = [[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] - player = axl.EvolvableFSMPlayer( - transitions=transitions, - initial_action=D, - initial_state=1 - ) - self.assertEqual(player.num_states, 2) - self.assertEqual(player.fsm.transitions(), transitions) - self.assertEqual(player.initial_action, D) - self.assertEqual(player.initial_state, 1) - - def test_vector_to_instance(self): - num_states = 4 - vector = [random.random() for _ in range(num_states * 4 + 1)] - player = axl.EvolvableFSMPlayer(num_states=num_states) - player.receive_vector(vector) - self.assertIsInstance(player, axl.EvolvableFSMPlayer) - - serialized = player.serialize_parameters() - deserialized_player = player.__class__.deserialize_parameters(serialized) - self.assertEqual(player, deserialized_player) - self.assertEqual(deserialized_player, deserialized_player.clone()) - - def test_create_vector_bounds(self): - num_states = 4 - player = axl.EvolvableFSMPlayer(num_states=num_states) - lb, ub = player.create_vector_bounds() - self.assertEqual(lb, [0] * (4 * num_states + 1)) - self.assertEqual(ub, [1] * (4 * num_states + 1)) - - -class TestEvolvableFSMPlayer2(TestEvolvablePlayer): - name = "EvolvableFSMPlayer" - player_class = axl.EvolvableFSMPlayer - parent_class = FSMPlayer - parent_kwargs = ["transitions", "initial_action", "initial_state"] - init_parameters = {"num_states": 4} - - -class TestEvolvableFSMPlayer3(TestEvolvablePlayer): - name = "EvolvableFSMPlayer" - player_class = axl.EvolvableFSMPlayer - parent_class = FSMPlayer - parent_kwargs = ["transitions", "initial_action", "initial_state"] - init_parameters = {"num_states": 16} - - -class TestEvolvableFSMPlayer4(TestEvolvablePlayer): - name = "EvolvableFSMPlayer" - player_class = axl.EvolvableFSMPlayer - parent_class = FSMPlayer - parent_kwargs = ["transitions", "initial_action", "initial_state"] - init_parameters = { - "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), - "initial_state": 1, - "initial_action": C - } - - -# Substitute EvolvedFSMPlayer as a regular FSMPlayer. -EvolvableFSMPlayerWithDefault = PartialClass( - EvolvableFSMPlayer, - transitions=((1, C, 1, C), (1, D, 1, D)), - initial_state=1, - initial_action=C) - - -class EvolvableFSMAsFSM(TestFSMPlayer): - player = EvolvableFSMPlayerWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_forgiver.py b/axelrod/ipd/tests/strategies/test_forgiver.py deleted file mode 100644 index e83b86d76..000000000 --- a/axelrod/ipd/tests/strategies/test_forgiver.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Tests for the forgiver strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestForgiver(TestPlayer): - - name = "Forgiver" - player = axl.Forgiver - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent has defected more than 10 percent of the time, defect. - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 10) - - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 10) - - def test_cooperates_if_opponent_defections_is_ten_pct_and_defects_if_opponent_defections_gt_ten_pct( - self - ): - final_action_lowers_defections_to_ten_percent = [D] + [C] * 9 - expected = [(C, D)] + [(D, C)] * 9 - self.versus_test( - axl.MockPlayer(actions=final_action_lowers_defections_to_ten_percent), - expected_actions=expected * 5, - ) - - def test_never_defects_if_opponent_defections_le_ten_percent(self): - defections_always_le_ten_percent = [C] * 9 + [D] - expected = [(C, C)] * 9 + [(C, D)] - self.versus_test( - axl.MockPlayer(actions=defections_always_le_ten_percent), - expected_actions=expected * 5, - ) - - -class TestForgivingTitForTat(TestPlayer): - - name = "Forgiving Tit For Tat" - player = axl.ForgivingTitForTat - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 5) - self.versus_test( - axl.Alternator(), expected_actions=[(C, C)] + [(C, D), (D, C)] * 5 - ) - - def test_never_defects_if_opponent_defections_le_ten_percent(self): - defections_always_le_ten_percent = [C] * 9 + [D] - expected = [(C, C)] * 9 + [(C, D)] - self.versus_test( - axl.MockPlayer(actions=defections_always_le_ten_percent), - expected_actions=expected * 5, - ) - - def test_plays_tit_for_tat_while_defections_gt_ten_percent(self): - before_tft = (18 * [C] + [D]) * 3 + [D, D, D] - only_cooperates = ([(C, C)] * 18 + [(C, D)]) * 3 + [(C, D), (C, D), (C, D)] - self.versus_test( - axl.MockPlayer(actions=before_tft), expected_actions=only_cooperates - ) - - now_alternator = before_tft + [D, C, D, C] - now_tft = only_cooperates + [(C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.MockPlayer(actions=now_alternator), expected_actions=now_tft - ) - - def test_reverts_to_cooperator_if_defections_become_le_ten_percent(self): - four_defections = [D, D, D, D] - first_four = [(C, D)] + [(D, D)] * 3 - defections_at_ten_pct = four_defections + [C] * 36 - tft = first_four + [(D, C)] + [(C, C)] * 35 - - maintain_ten_pct = defections_at_ten_pct + ([C] * 9 + [D]) * 3 - now_cooperates = tft + ([(C, C)] * 9 + [(C, D)]) * 3 - self.versus_test( - axl.MockPlayer(actions=maintain_ten_pct), - expected_actions=now_cooperates, - ) diff --git a/axelrod/ipd/tests/strategies/test_gambler.py b/axelrod/ipd/tests/strategies/test_gambler.py deleted file mode 100755 index 31c00382a..000000000 --- a/axelrod/ipd/tests/strategies/test_gambler.py +++ /dev/null @@ -1,585 +0,0 @@ -"""Test for the Gambler strategy. Most tests come from the LookerUp test suite. -""" -import unittest - -import copy - -import random - -import axelrod as axl -from axelrod.ipd.load_data_ import load_pso_tables -from axelrod.ipd.strategies.lookerup import create_lookup_table_keys - -from .test_lookerup import convert_original_to_current -from .test_player import TestPlayer -from .test_evolvable_player import PartialClass, TestEvolvablePlayer - - -tables = load_pso_tables("pso_gambler.csv", directory="data") -C, D = axl.Action.C, axl.Action.D - - -class TestGambler(TestPlayer): - - name = "Gambler" - player = axl.Gambler - - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - expected_class_classifier = copy.copy(expected_classifier) - - def test_strategy(self): - tft_table = {((), (D,), ()): 0, ((), (C,), ()): 1} - self.versus_test( - axl.Alternator(), - expected_actions=[(C, C)] + [(C, D), (D, C)] * 5, - init_kwargs={"lookup_dict": tft_table}, - ) - - def test_stochastic_values(self): - stochastic_lookup = {((), (), ()): 0.3} - expected_actions = [(C, C), (D, C), (D, C), (C, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=expected_actions, - init_kwargs={"lookup_dict": stochastic_lookup}, - seed=1, - ) - - -class TestPSOGamblerMem1(TestPlayer): - - name = "PSO Gambler Mem1" - player = axl.PSOGamblerMem1 - - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - expected_class_classifier = copy.copy(expected_classifier) - - def test_new_data(self): - original_data = { - ("", "C", "C"): 1.0, - ("", "C", "D"): 0.52173487, - ("", "D", "C"): 0.0, - ("", "D", "D"): 0.12050939, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_strategy(self): - vs_cooperator = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=vs_cooperator) - - def test_defects_forever_with_correct_conditions(self): - seed = 1 - opponent_actions = [D, D] + [C] * 10 - expected = [(C, D), (C, D), (D, C)] + [(D, C)] * 9 - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected, - seed=seed, - ) - - -class TestPSOGambler1_1_1(TestPlayer): - - name = "PSO Gambler 1_1_1" - player = axl.PSOGambler1_1_1 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("C", "C", "C"): 1.0, - ("C", "C", "D"): 0.12304797, - ("C", "D", "C"): 0.0, - ("C", "D", "D"): 0.13581423, - ("D", "C", "C"): 1.0, - ("D", "C", "D"): 0.57740178, - ("D", "D", "C"): 0.0, - ("D", "D", "D"): 0.11886807, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_cooperate_forever(self): - seed = 2 - opponent = [D] * 3 + [C] * 10 - expected = [(C, D), (D, D), (D, D)] + [(C, C)] * 10 - self.versus_test( - axl.MockPlayer(opponent), expected_actions=expected, seed=seed - ) - - def test_defect_forever(self): - seed = 2 - opponent_actions = [C] + [D] + [C] * 10 - expected = [(C, C), (C, D)] + [(D, C)] * 10 - self.versus_test( - axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed - ) - - opponent_actions = [D] + [C] * 10 - expected = [(C, D)] + [(D, C)] * 10 - self.versus_test( - axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed - ) - - -class TestPSOGambler2_2_2(TestPlayer): - - name = "PSO Gambler 2_2_2" - player = axl.PSOGambler2_2_2 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("CC", "CC", "CC"): 1.0, - ("CC", "CC", "CD"): 1.0, - ("CC", "CC", "DC"): 0.0, - ("CC", "CC", "DD"): 0.02126434, - ("CC", "CD", "CC"): 0.0, - ("CC", "CD", "CD"): 1.0, - ("CC", "CD", "DC"): 1.0, - ("CC", "CD", "DD"): 0.0, - ("CC", "DC", "CC"): 0.0, - ("CC", "DC", "CD"): 0.0, - ("CC", "DC", "DC"): 0.0, - ("CC", "DC", "DD"): 0.0, - ("CC", "DD", "CC"): 0.0, - ("CC", "DD", "CD"): 0.0, - ("CC", "DD", "DC"): 0.0, - ("CC", "DD", "DD"): 1.0, - ("CD", "CC", "CC"): 1.0, - ("CD", "CC", "CD"): 0.95280465, - ("CD", "CC", "DC"): 0.80897541, - ("CD", "CC", "DD"): 0.0, - ("CD", "CD", "CC"): 0.0, - ("CD", "CD", "CD"): 0.0, - ("CD", "CD", "DC"): 0.0, - ("CD", "CD", "DD"): 0.65147565, - ("CD", "DC", "CC"): 0.15412392, - ("CD", "DC", "CD"): 0.24922166, - ("CD", "DC", "DC"): 0.0, - ("CD", "DC", "DD"): 0.0, - ("CD", "DD", "CC"): 0.0, - ("CD", "DD", "CD"): 0.0, - ("CD", "DD", "DC"): 0.0, - ("CD", "DD", "DD"): 0.24523149, - ("DC", "CC", "CC"): 1.0, - ("DC", "CC", "CD"): 0.0, - ("DC", "CC", "DC"): 0.0, - ("DC", "CC", "DD"): 0.43278586, - ("DC", "CD", "CC"): 1.0, - ("DC", "CD", "CD"): 0.0, - ("DC", "CD", "DC"): 0.23563137, - ("DC", "CD", "DD"): 1.0, - ("DC", "DC", "CC"): 1.0, - ("DC", "DC", "CD"): 1.0, - ("DC", "DC", "DC"): 0.00227615, - ("DC", "DC", "DD"): 0.0, - ("DC", "DD", "CC"): 0.0, - ("DC", "DD", "CD"): 0.0, - ("DC", "DD", "DC"): 0.0, - ("DC", "DD", "DD"): 1.0, - ("DD", "CC", "CC"): 0.0, - ("DD", "CC", "CD"): 0.0, - ("DD", "CC", "DC"): 0.0, - ("DD", "CC", "DD"): 0.0, - ("DD", "CD", "CC"): 0.15140743, - ("DD", "CD", "CD"): 0.0, - ("DD", "CD", "DC"): 0.0, - ("DD", "CD", "DD"): 0.0, - ("DD", "DC", "CC"): 0.0, - ("DD", "DC", "CD"): 0.0, - ("DD", "DC", "DC"): 0.0, - ("DD", "DC", "DD"): 1.0, - ("DD", "DD", "CC"): 0.0, - ("DD", "DD", "CD"): 1.0, - ("DD", "DD", "DC"): 0.77344942, - ("DD", "DD", "DD"): 0.0, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_vs_defector(self): - expected = [(C, D), (C, D)] + [(D, D)] * 10 - self.versus_test(axl.Defector(), expected_actions=expected) - - def test_vs_cooperator(self): - expected = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=expected) - - def test_vs_alternator(self): - seed = 1 - expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) - - def test_vs_DCDDC(self): - seed = 2 - opponent_actions = [D, C, D, D, C] - expected = [ - (C, D), - (C, C), - (D, D), - (D, D), - (C, C), - (D, D), - (D, C), - (D, D), - (D, D), - (C, C), - ] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected, - seed=seed, - ) - - new_seed = 139 # First seed with different result. - expected[5] = (C, D) - self.versus_test( - axl.MockPlayer(actions=opponent_actions), - expected_actions=expected, - seed=new_seed, - ) - - -class TestPSOGambler2_2_2_Noise05(TestPlayer): - name = "PSO Gambler 2_2_2 Noise 05" - player = axl.PSOGambler2_2_2_Noise05 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("CC", "CC", "CC"): 1.0, - ("CC", "CC", "CD"): 0.0, - ("CC", "CC", "DC"): 1.0, - ("CC", "CC", "DD"): 0.63548102, - ("CC", "CD", "CC"): 1.0, - ("CC", "CD", "CD"): 1.0, - ("CC", "CD", "DC"): 1.0, - ("CC", "CD", "DD"): 0.0, - ("CC", "DC", "CC"): 0.0, - ("CC", "DC", "CD"): 1.0, - ("CC", "DC", "DC"): 0.0, - ("CC", "DC", "DD"): 0.0, - ("CC", "DD", "CC"): 1.0, - ("CC", "DD", "CD"): 0.0, - ("CC", "DD", "DC"): 0.0, - ("CC", "DD", "DD"): 0.0, - ("CD", "CC", "CC"): 1.0, - ("CD", "CC", "CD"): 1.0, - ("CD", "CC", "DC"): 0.0, - ("CD", "CC", "DD"): 0.0, - ("CD", "CD", "CC"): 0.0, - ("CD", "CD", "CD"): 0.13863175, - ("CD", "CD", "DC"): 1.0, - ("CD", "CD", "DD"): 0.7724137, - ("CD", "DC", "CC"): 0.0, - ("CD", "DC", "CD"): 1.0, - ("CD", "DC", "DC"): 0.0, - ("CD", "DC", "DD"): 0.07127653, - ("CD", "DD", "CC"): 0.0, - ("CD", "DD", "CD"): 1.0, - ("CD", "DD", "DC"): 0.28124022, - ("CD", "DD", "DD"): 0.0, - ("DC", "CC", "CC"): 0.0, - ("DC", "CC", "CD"): 0.98603825, - ("DC", "CC", "DC"): 0.0, - ("DC", "CC", "DD"): 0.0, - ("DC", "CD", "CC"): 1.0, - ("DC", "CD", "CD"): 0.06434619, - ("DC", "CD", "DC"): 1.0, - ("DC", "CD", "DD"): 1.0, - ("DC", "DC", "CC"): 1.0, - ("DC", "DC", "CD"): 0.50999729, - ("DC", "DC", "DC"): 0.00524508, - ("DC", "DC", "DD"): 1.0, - ("DC", "DD", "CC"): 1.0, - ("DC", "DD", "CD"): 1.0, - ("DC", "DD", "DC"): 1.0, - ("DC", "DD", "DD"): 1.0, - ("DD", "CC", "CC"): 0.0, - ("DD", "CC", "CD"): 1.0, - ("DD", "CC", "DC"): 0.16240799, - ("DD", "CC", "DD"): 0.0, - ("DD", "CD", "CC"): 0.0, - ("DD", "CD", "CD"): 1.0, - ("DD", "CD", "DC"): 1.0, - ("DD", "CD", "DD"): 0.0, - ("DD", "DC", "CC"): 0.0, - ("DD", "DC", "CD"): 1.0, - ("DD", "DC", "DC"): 0.87463905, - ("DD", "DC", "DD"): 0.0, - ("DD", "DD", "CC"): 0.0, - ("DD", "DD", "CD"): 1.0, - ("DD", "DD", "DC"): 0.0, - ("DD", "DD", "DD"): 0.0, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_vs_defector(self): - expected = [(C, D), (C, D)] + [(D, D)] * 10 - self.versus_test(axl.Defector(), expected_actions=expected) - - def test_vs_cooperator(self): - expected = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=expected) - - def test_vs_alternator(self): - seed = 2 - expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (C, C)] - self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) - - new_seed = 1 - expected[4] = (C, C) - expected[6] = (D, C) - self.versus_test(axl.Alternator(), expected_actions=expected, seed=new_seed) - - def test_vs_DCDDC(self): - opponent_actions = [D, C, D, D, C] - - seed = 1 - expected = [ - (C, D), - (C, C), - (D, D), - (D, D), - (C, C), - (D, D), - (D, C), - (C, D), - (C, D), - ] - self.versus_test( - axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed - ) - - new_seed = 3 - expected[8] = (D, D) - self.versus_test( - axl.MockPlayer(opponent_actions), - expected_actions=expected, - seed=new_seed, - ) - - new_seed = 2 - new_expected = expected[:6] + [(C, C), (D, D), (D, D)] - self.versus_test( - axl.MockPlayer(opponent_actions), - expected_actions=new_expected, - seed=new_seed, - ) - - -class TestZDMem2(TestPlayer): - name = "ZD-Mem2" - player = axl.ZDMem2 - - expected_classifier = { - "memory_depth": 2, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("", "CC", "CC"): 11 / 12, - ("", "CC", "CD"): 4 / 11, - ("", "CC", "DC"): 7 / 9, - ("", "CC", "DD"): 1 / 10, - ("", "CD", "CC"): 5 / 6, - ("", "CD", "CD"): 3 / 11, - ("", "CD", "DC"): 7 / 9, - ("", "CD", "DD"): 1 / 10, - ("", "DC", "CC"): 2 / 3, - ("", "DC", "CD"): 1 / 11, - ("", "DC", "DC"): 7 / 9, - ("", "DC", "DD"): 1 / 10, - ("", "DD", "CC"): 3 / 4, - ("", "DD", "CD"): 2 / 11, - ("", "DD", "DC"): 7 / 9, - ("", "DD", "DD"): 1 / 10, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_vs_defector(self): - seed = 5 - expected = [ - (C, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (C, D), - (D, D), - ] - - self.versus_test(axl.Defector(), expected_actions=expected, seed=seed) - - def test_vs_cooperator(self): - seed = 5 - expected = [ - (C, C), - (C, C), - (C, C), - (C, C), - (C, C), - (D, C), - (C, C), - (D, C), - (C, C), - (C, C), - ] - - self.versus_test(axl.Cooperator(), expected_actions=expected, seed=seed) - - def test_vs_alternator(self): - seed = 2 - expected = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) - - new_seed = 1 - expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=expected, seed=new_seed) - - -class TestEvolvableGambler(unittest.TestCase): - - def test_receive_vector(self): - plays, op_plays, op_start_plays = 1, 1, 1 - player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays)) - - self.assertRaises(AttributeError, axl.EvolvableGambler.__getattribute__, - *[player, 'vector']) - - vector = [random.random() for _ in range(8)] - player.receive_vector(vector) - self.assertEqual(player.pattern, vector) - - def test_vector_to_instance(self): - plays, op_plays, op_start_plays = 1, 1, 1 - player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays)) - - vector = [random.random() for _ in range(8)] - player.receive_vector(vector) - keys = create_lookup_table_keys(player_depth=plays, op_depth=op_plays, - op_openings_depth=op_start_plays) - action_dict = dict(zip(keys, vector)) - self.assertEqual(player._lookup.dictionary, action_dict) - - def test_create_vector_bounds(self): - plays, op_plays, op_start_plays = 1, 1, 1 - player = axl.EvolvableGambler( - parameters=(plays, op_plays, op_start_plays)) - lb, ub = player.create_vector_bounds() - self.assertIsInstance(lb, list) - self.assertIsInstance(ub, list) - self.assertEqual(len(lb), 8) - self.assertEqual(len(ub), 8) - - def test_mutate_value_bounds(self): - self.assertEqual(axl.EvolvableGambler.mutate_value(2), 1) - self.assertEqual(axl.EvolvableGambler.mutate_value(-2), 0) - - -class TestEvolvableGambler2(TestEvolvablePlayer): - name = "EvolvableGambler" - player_class = axl.EvolvableGambler - parent_class = axl.Gambler - parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (1, 1, 1), - "initial_actions": (C,)} - - -class TestEvolvableGambler3(TestEvolvablePlayer): - name = "EvolvableGambler" - player_class = axl.EvolvableGambler - parent_class = axl.Gambler - parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (3, 2, 1), - "initial_actions": (C, C, C,)} - - -class TestEvolvableGambler4(TestEvolvablePlayer): - name = "EvolvableGambler" - player_class = axl.EvolvableGambler - parent_class = axl.Gambler - parent_kwargs = ["lookup_dict"] - init_parameters = {"parameters": (2, 2, 2), - "pattern": [random.random() for _ in range(64)], - "initial_actions": (C, C,)} - - -# Substitute EvolvableHMMPlayer as a regular HMMPlayer. -EvolvableGamblerWithDefault = PartialClass( - axl.EvolvableGambler, - pattern=tables[("PSO Gambler 2_2_2", 2, 2, 2)], - parameters=(2, 2, 2), - initial_actions=(C, C,) -) - - -class EvolvableGamblerAsGambler(TestPSOGambler2_2_2): - player = EvolvableGamblerWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_geller.py b/axelrod/ipd/tests/strategies/test_geller.py deleted file mode 100644 index e2b7bdf67..000000000 --- a/axelrod/ipd/tests/strategies/test_geller.py +++ /dev/null @@ -1,132 +0,0 @@ -"""Tests for the Geller strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGeller(TestPlayer): - - name = "Geller" - player = axl.Geller - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_state": False, - "manipulates_source": False, - } - - @classmethod - def tearDownClass(cls): - """After all tests have run, makes sure the Darwin genome is reset.""" - axl.Darwin.reset_genome() - super(TestGeller, cls).tearDownClass() - - def setUp(self): - """Each test starts with the basic Darwin genome.""" - axl.Darwin.reset_genome() - super(TestGeller, self).setUp() - - def test_foil_strategy_inspection(self): - axl.seed(2) - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), D) - self.assertEqual(player.foil_strategy_inspection(), D) - self.assertEqual(player.foil_strategy_inspection(), C) - - def test_strategy(self): - """Should cooperate against cooperators and defect against defectors.""" - self.versus_test(axl.Defector(), expected_actions=[(D, D)] * 5) - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) - self.versus_test(axl.Alternator(), expected_actions=[(C, C), (D, D)] * 5) - - def test_strategy_against_lookerup_players(self): - """ - Regression test for a bug discussed in - https://github.com/Axelrod-Python/Axelrod/issues/1185 - """ - self.versus_test( - axl.EvolvedLookerUp1_1_1(), expected_actions=[(C, C), (C, C)] - ) - self.versus_test( - axl.EvolvedLookerUp2_2_2(), expected_actions=[(C, C), (C, C)] - ) - - def test_returns_foil_inspection_strategy_of_opponent(self): - self.versus_test( - axl.GellerDefector(), - expected_actions=[(D, D), (D, D), (D, C), (D, C)], - seed=2, - ) - - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) - - self.versus_test( - axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)], seed=1 - ) - - -class TestGellerCooperator(TestGeller): - - name = "Geller Cooperator" - player = axl.GellerCooperator - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - def test_foil_strategy_inspection(self): - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), C) - - def test_returns_foil_inspection_strategy_of_opponent(self): - self.versus_test( - axl.GellerDefector(), expected_actions=[(D, C), (D, C), (D, C), (D, C)] - ) - - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) - - self.versus_test( - axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] - ) - - -class TestGellerDefector(TestGeller): - - name = "Geller Defector" - player = axl.GellerDefector - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": False, - "manipulates_state": False, - } - - def test_foil_strategy_inspection(self): - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), D) - - def test_returns_foil_inspection_strategy_of_opponent(self): - - self.versus_test( - axl.GellerDefector(), expected_actions=[(D, D), (D, D), (D, D), (D, D)] - ) - - self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) - - self.versus_test( - axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] - ) diff --git a/axelrod/ipd/tests/strategies/test_gobymajority.py b/axelrod/ipd/tests/strategies/test_gobymajority.py deleted file mode 100644 index 6cd553880..000000000 --- a/axelrod/ipd/tests/strategies/test_gobymajority.py +++ /dev/null @@ -1,179 +0,0 @@ -"""Tests for the GoByMajority strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestHardGoByMajority(TestPlayer): - - name = "Hard Go By Majority" - player = axl.HardGoByMajority - default_soft = False - - expected_classifier = { - "stochastic": False, - "memory_depth": float("inf"), - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_memory_depth_infinite_soft_is_false(self): - init_kwargs = {} - if self.default_soft: - init_kwargs["soft"] = False - - opponent_actions = [C] * 50 + [D] * 100 + [C] * 52 - actions = ( - [(D, C)] - + [(C, C)] * 49 - + [(C, D)] * 50 - + [(D, D)] * 50 - + [(D, C)] * 51 - + [(C, C)] - ) - opponent = axl.MockPlayer(actions=opponent_actions) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_memory_depth_even_soft_is_false(self): - memory_depth = 4 - init_kwargs = {"memory_depth": memory_depth} - if self.default_soft: - init_kwargs["soft"] = False - - opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) - actions = ( - [(D, C)] - + [(C, C)] * 3 - + [(C, D)] * 2 - + [(D, D)] * 2 - + [(D, C)] * 3 - + [(C, C)] - ) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_memory_depth_odd(self): - memory_depth = 5 - init_kwargs = {"memory_depth": memory_depth} - if self.default_soft: - first_action = [(C, C)] - else: - first_action = [(D, C)] - opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) - actions = ( - first_action - + [(C, C)] * 4 - + [(C, D)] * 3 - + [(D, D)] * 2 - + [(D, C)] * 3 - + [(C, C)] * 2 - ) - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_default_values(self): - player = self.player() - self.assertEqual(player.soft, self.default_soft) - self.assertEqual(player.memory, 0) - - -class TestGoByMajority(TestHardGoByMajority): - - name = "Soft Go By Majority" - player = axl.GoByMajority - default_soft = True - - def test_memory_depth_infinite_soft_is_true(self): - opponent_actions = [C] * 50 + [D] * 100 + [C] * 52 - actions = ( - [(C, C)] * 50 + [(C, D)] * 51 + [(D, D)] * 49 + [(D, C)] * 50 + [(C, C)] * 2 - ) - opponent = axl.MockPlayer(actions=opponent_actions) - self.versus_test(opponent, expected_actions=actions) - - def test_memory_depth_even_soft_is_true(self): - memory_depth = 4 - init_kwargs = {"memory_depth": memory_depth} - - opponent = axl.MockPlayer([C] * memory_depth + [D] * memory_depth) - actions = [(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] + [(D, C)] * 2 + [(C, C)] * 2 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_name(self): - player = self.player(soft=True) - self.assertEqual(player.name, "Soft Go By Majority") - player = self.player(soft=False) - self.assertEqual(player.name, "Hard Go By Majority") - player = self.player(memory_depth=5) - self.assertEqual(player.name, "Soft Go By Majority: 5") - - def test_str(self): - player = self.player(soft=True) - name = str(player) - self.assertEqual(name, "Soft Go By Majority") - player = self.player(soft=False) - name = str(player) - self.assertEqual(name, "Hard Go By Majority") - player = self.player(memory_depth=5) - name = str(player) - self.assertEqual(name, "Soft Go By Majority: 5") - - -def factory_TestGoByRecentMajority(memory_depth, soft=True): - - prefix = "Hard" - prefix2 = "Hard" - if soft: - prefix = "Soft" - prefix2 = "" - - class TestGoByRecentMajority(TestPlayer): - - name = "{} Go By Majority: {}".format(prefix, memory_depth) - player = getattr(axl, "{}GoByMajority{}".format(prefix2, memory_depth)) - - expected_classifier = { - "stochastic": False, - "memory_depth": memory_depth, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # for example memory_depth=2 plays against [C, C, D, D] - # soft actions = [(C, C), (C, C), (C, D), (C, D)] - # hard actions = [(D, C), (C, C), (C, D), (D, D)] - opponent_actions = [C] * memory_depth + [D] * memory_depth - opponent = axl.MockPlayer(actions=opponent_actions) - if soft: - first_player_action = [C] - else: - first_player_action = [D] - if memory_depth % 2 == 1 or soft: - cooperations = int(memory_depth * 1.5) - else: - cooperations = int(memory_depth * 1.5) - 1 - defections = len(opponent_actions) - cooperations - 1 - player_actions = first_player_action + [C] * cooperations + [D] * defections - - actions = list(zip(player_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - return TestGoByRecentMajority - - -TestGoByMajority5 = factory_TestGoByRecentMajority(5) -TestGoByMajority10 = factory_TestGoByRecentMajority(10) -TestGoByMajority20 = factory_TestGoByRecentMajority(20) -TestGoByMajority40 = factory_TestGoByRecentMajority(40) -TestHardGoByMajority5 = factory_TestGoByRecentMajority(5, soft=False) -TestHardGoByMajority10 = factory_TestGoByRecentMajority(10, soft=False) -TestHardGoByMajority20 = factory_TestGoByRecentMajority(20, soft=False) -TestHardGoByMajority40 = factory_TestGoByRecentMajority(40, soft=False) diff --git a/axelrod/ipd/tests/strategies/test_gradualkiller.py b/axelrod/ipd/tests/strategies/test_gradualkiller.py deleted file mode 100644 index 7d2729862..000000000 --- a/axelrod/ipd/tests/strategies/test_gradualkiller.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Tests for the Gradual Killer strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGradualKiller(TestPlayer): - - name = "Gradual Killer: (D, D, D, D, D, C, C)" - player = axl.GradualKiller - expected_classifier = { - "memory_depth": float("Inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - first_seven = [D, D, D, D, D, C, C] - - def test_first_seven_moves_always_the_same(self): - opponent = axl.Cooperator() - actions = list(zip(self.first_seven, [C] * 7)) - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = list(zip(self.first_seven, [D] * 7)) - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Alternator() - actions = list(zip(self.first_seven, [C, D] * 4)) - self.versus_test(opponent, expected_actions=actions) - - def test_effect_of_strategy_with_history_CC(self): - """Continues with C if opponent played CC on 6 and 7.""" - opponent_actions = [D] * 5 + [C, C] + [D, C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - - start = list(zip(self.first_seven, opponent_actions[:7])) - actions = start + [(C, D), (C, C)] * 20 - - self.versus_test(opponent, expected_actions=actions) - - def test_effect_of_strategy_with_history_CD(self): - """Continues with C if opponent played CD on 6 and 7.""" - opponent_actions = [D] * 5 + [C, D] + [D, C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - - start = list(zip(self.first_seven, opponent_actions[:7])) - actions = start + [(C, D), (C, C)] * 20 - - self.versus_test(opponent, expected_actions=actions) - - def test_effect_of_strategy_with_history_DC(self): - """Continues with C if opponent played DC on 6 and 7.""" - opponent_actions = [D] * 5 + [D, C] + [D, C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - - start = list(zip(self.first_seven, opponent_actions[:7])) - actions = start + [(C, D), (C, C)] * 20 - - self.versus_test(opponent, expected_actions=actions) - - def test_effect_of_strategy_with_history_DD(self): - """Continues with D if opponent played DD on 6 and 7.""" - opponent_actions = [C] * 5 + [D, D] + [D, C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - - start = list(zip(self.first_seven, opponent_actions[:7])) - actions = start + [(D, D), (D, C)] * 20 - - self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_grudger.py b/axelrod/ipd/tests/strategies/test_grudger.py deleted file mode 100644 index 79194b5d8..000000000 --- a/axelrod/ipd/tests/strategies/test_grudger.py +++ /dev/null @@ -1,278 +0,0 @@ -"""Tests for Grudger strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGrudger(TestPlayer): - - name = "Grudger" - player = axl.Grudger - expected_classifier = { - "memory_depth": float('inf'), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will defect forever. - opponent = axl.Cooperator() - actions = [(C, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(C, D)] + [(D, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] * 10 + [D] + [C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 10 + [(C, D)] + [(D, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - -class TestForgetfulGrudger(TestPlayer): - - name = "Forgetful Grudger" - player = axl.ForgetfulGrudger - expected_classifier = { - "memory_depth": 10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will respond with - # D ten times and then continue to check for defections. - opponent = axl.Cooperator() - actions = [(C, C)] * 20 - attrs = {"grudged": False, "mem_length": 10, "grudge_memory": 0} - self.versus_test(opponent, expected_actions=actions, attrs=attrs) - - for i in range(1, 15): - opponent = axl.Defector() - actions = [(C, D)] + [(D, D)] * i - memory = i if i <= 10 else i - 10 - attrs = {"grudged": True, "mem_length": 10, "grudge_memory": memory} - self.versus_test(opponent, expected_actions=actions, attrs=attrs) - - opponent_actions = [C] * 2 + [D] + [C] * 10 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = ([(C, C)] * 2 + [(C, D)] + [(D, C)] * 10) * 3 + [(C, C)] - attrs = {"grudged": False, "mem_length": 10, "grudge_memory": 0} - self.versus_test(opponent, expected_actions=actions, attrs=attrs) - - -class TestOppositeGrudger(TestPlayer): - - name = "Opposite Grudger" - player = axl.OppositeGrudger - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent cooperates at any point then the player will cooperate - # forever. - opponent = axl.Cooperator() - actions = [(D, C)] + [(C, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(D, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] + [D] * 30 - opponent = axl.MockPlayer(actions=opponent_actions) - expected = [(D, C)] + [(C, D)] * 30 - self.versus_test(opponent, expected_actions=expected) - - -class TestAggravater(TestPlayer): - - name = "Aggravater" - player = axl.Aggravater - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will defect forever. - # Always defects on first three turns. - opponent = axl.Cooperator() - actions = [(D, C)] * 3 + [(C, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(D, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] * 10 + [D] + [C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(D, C)] * 3 + [(C, C)] * 7 + [(C, D)] + [(D, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - -class TestSoftGrudger(TestPlayer): - - name = "Soft Grudger" - player = axl.SoftGrudger - expected_classifier = { - "memory_depth": 6, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will respond with - # D, D, D, D, C, C and then continue to check for defections. - grudge_response_d = [(D, D)] * 4 + [(C, D)] * 2 - grudge_response_c = [(D, C)] * 4 + [(C, C)] * 2 - - opponent = axl.Cooperator() - actions = [(C, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(C, D)] + grudge_response_d * 5 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] * 10 + [D] - opponent = axl.MockPlayer(actions=opponent_actions) - actions_start = [(C, C)] * 10 + [(C, D)] - subsequent = grudge_response_c + [(C, C)] * 4 + [(C, D)] - actions = actions_start + subsequent * 5 - self.versus_test(opponent, expected_actions=actions) - - def test_reset(self): - player = self.player() - player.grudged = True - player.grudge_memory = 5 - player.reset() - self.assertFalse(player.grudged) - self.assertEqual(player.grudge_memory, 0) - - -class TestGrudgerAlternator(TestPlayer): - - name = "GrudgerAlternator" - player = axl.GrudgerAlternator - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will alternate D C. - opponent = axl.Cooperator() - actions = [(C, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(C, D)] + [(D, D), (C, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] * 10 + [D] + [C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(C, C)] * 10 + [(C, D)] + [(D, C), (C, C)] * 10 - self.versus_test(opponent, expected_actions=actions) - - -class TestEasyGo(TestPlayer): - - name = "EasyGo" - player = axl.EasyGo - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects at any point then the player will cooperate - # forever. - opponent = axl.Cooperator() - actions = [(D, C)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(D, D)] + [(C, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C] * 10 + [D, C] * 20 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = [(D, C)] * 10 + [(D, D)] + [(C, C), (C, D)] * 19 - self.versus_test(opponent, expected_actions=actions) - - -class TestGeneralSoftGrudger(TestPlayer): - - name = "General Soft Grudger: n=1,d=4,c=2" - player = axl.GeneralSoftGrudger - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test strategy with multiple initial parameters""" - - # Testing default parameters of n=1, d=4, c=2 (same as Soft Grudger) - actions = [(C, D), (D, D), (D, C), (D, C), (D, D), (C, D), (C, C), (C, C)] - self.versus_test(axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions) - - # Testing n=2, d=4, c=2 - actions = [(C, D), (C, D), (D, C), (D, C), (D, D), (D, D), (C, C), (C, C)] - self.versus_test( - axl.MockPlayer(actions=[D, D, C, C]), - expected_actions=actions, - init_kwargs={"n": 2}, - ) - - # Testing n=1, d=1, c=1 - actions = [(C, D), (D, D), (C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] - self.versus_test( - axl.MockPlayer(actions=[D, D, C, C]), - expected_actions=actions, - init_kwargs={"n": 1, "d": 1, "c": 1}, - ) diff --git a/axelrod/ipd/tests/strategies/test_grumpy.py b/axelrod/ipd/tests/strategies/test_grumpy.py deleted file mode 100644 index 1fba6bbdd..000000000 --- a/axelrod/ipd/tests/strategies/test_grumpy.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Tests for the Grumpy strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGrumpy(TestPlayer): - - name = "Grumpy: Nice, 10, -10" - player = axl.Grumpy - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_default_strategy(self): - - opponent = axl.Cooperator() - actions = [(C, C)] * 30 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Alternator() - actions = [(C, C), (C, D)] * 30 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(C, D)] * 11 + [(D, D)] * 20 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [D] * 11 + [C] * 22 + [D] * 11 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = ([(C, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11) * 3 - self.versus_test(opponent, expected_actions=actions) - - def test_starting_state(self): - opponent_actions = [D] * 11 + [C] * 22 + [D] * 11 - opponent = axl.MockPlayer(actions=opponent_actions) - - actions = ([(C, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11) * 3 - init_kwargs = {"starting_state": "Nice"} - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - opponent = axl.MockPlayer(actions=opponent_actions) - grumpy_starting = [(D, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11 - actions = grumpy_starting + actions - init_kwargs = {"starting_state": "Grumpy"} - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_thresholds(self): - init_kwargs = {"grumpy_threshold": 3, "nice_threshold": -2} - opponent_actions = [D] * 4 + [C] * 7 + [D] * 3 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = ([(C, D)] * 4 + [(D, C)] * 7 + [(C, D)] * 3) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - init_kwargs = {"grumpy_threshold": 0, "nice_threshold": -2} - opponent_actions = [D] * 1 + [C] * 4 + [D] * 3 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = ([(C, D)] * 1 + [(D, C)] * 4 + [(C, D)] * 3) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - init_kwargs = {"grumpy_threshold": 3, "nice_threshold": 0} - opponent_actions = [D] * 4 + [C] * 5 + [D] * 1 - opponent = axl.MockPlayer(actions=opponent_actions) - actions = ([(C, D)] * 4 + [(D, C)] * 5 + [(C, D)] * 1) * 3 - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - def test_reset_state_with_non_default_init(self): - player = axl.Grumpy(starting_state="Grumpy") - player.state = "Nice" - player.reset() - self.assertEqual(player.state, "Grumpy") diff --git a/axelrod/ipd/tests/strategies/test_handshake.py b/axelrod/ipd/tests/strategies/test_handshake.py deleted file mode 100644 index a6dbacc38..000000000 --- a/axelrod/ipd/tests/strategies/test_handshake.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Tests for the Handshake strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestHandshake(TestPlayer): - - name = "Handshake" - player = axl.Handshake - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D)] + [(C, C), (C, D)] * 10 - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (D, C)] + [(D, C)] * 20 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - opponent = axl.MockPlayer([D, C]) - actions = [(C, D), (D, C)] + [(D, D), (D, C)] * 10 - self.versus_test(opponent, expected_actions=actions) - - actions = [(C, D), (D, D)] + [(D, D)] * 20 - self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_headsup.py b/axelrod/ipd/tests/strategies/test_headsup.py deleted file mode 100644 index 9b9282724..000000000 --- a/axelrod/ipd/tests/strategies/test_headsup.py +++ /dev/null @@ -1,120 +0,0 @@ -"""Strategy match tests.""" - -import axelrod as axl - -from .test_player import TestMatch - -C, D = axl.Action.C, axl.Action.D - - -class TestTFTvsWSLS(TestMatch): - """Test TFT vs WSLS""" - - def test_rounds(self): - self.versus_test( - axl.TitForTat(), axl.WinStayLoseShift(), [C, C, C, C], [C, C, C, C] - ) - - -class TestTFTvSTFT(TestMatch): - """Test TFT vs Suspicious TFT""" - - def test_rounds(self): - self.versus_test( - axl.TitForTat(), - axl.SuspiciousTitForTat(), - [C, D, C, D, C, D], - [D, C, D, C, D, C], - ) - - -class TestTFTvsBully(TestMatch): - """Test TFT vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.TitForTat(), axl.Bully(), [C, D, D, C, C, D], [D, D, C, C, D, D] - ) - - -class TestTF2TvsBully(TestMatch): - """Test Tit for Two Tats vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.TitFor2Tats(), - axl.Bully(), - [C, C, D, D, C, C, C, D], - [D, D, D, C, C, D, D, D], - ) - - -class TestZDGTFT2vsBully(TestMatch): - """Test ZDGTFT2 vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.ZDGTFT2(), - axl.Bully(), - [C, D, D, C, C, C], - [D, D, C, C, D, D], - seed=2, - ) - - -class TestZDExtort2vsTFT(TestMatch): - """Test ZDExtort2 vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.ZDExtort2(), - axl.TitForTat(), - [C, D, D, D, D, D], - [C, C, D, D, D, D], - seed=2, - ) - - -class FoolMeOncevsBully(TestMatch): - """Test Fool Me Once vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.FoolMeOnce(), - axl.Bully(), - [C, C, D, D, D, D], - [D, D, D, C, C, C], - ) - - -class FoolMeOncevsSTFT(TestMatch): - """Test Fool Me Once vs Suspicious TFT""" - - def test_rounds(self): - self.versus_test( - axl.FoolMeOnce(), axl.SuspiciousTitForTat(), [C] * 9, [D] + [C] * 8 - ) - - -class GrudgervsSTFT(TestMatch): - """Test Grudger vs Suspicious TFT""" - - def test_rounds(self): - self.versus_test( - axl.Grudger(), - axl.SuspiciousTitForTat(), - [C] + [D] * 9, - [D, C] + [D] * 8, - ) - - -class TestWSLSvsBully(TestMatch): - """Test WSLS vs Bully""" - - def test_rounds(self): - self.versus_test( - axl.WinStayLoseShift(), - axl.Bully(), - [C, D, C, C, D], - [D, D, C, D, D], - ) diff --git a/axelrod/ipd/tests/strategies/test_hmm.py b/axelrod/ipd/tests/strategies/test_hmm.py deleted file mode 100644 index 477f76732..000000000 --- a/axelrod/ipd/tests/strategies/test_hmm.py +++ /dev/null @@ -1,327 +0,0 @@ -"""Tests for Hidden Markov Model Strategies.""" - -import unittest -import random - -import axelrod as axl -from axelrod.ipd.random_ import random_vector -from axelrod.ipd.evolvable_player import InsufficientParametersError -from axelrod.ipd.strategies import ( - EvolvableHMMPlayer, - HMMPlayer, - SimpleHMM, -) -from axelrod.ipd.strategies.hmm import is_stochastic_matrix -from .test_player import TestMatch, TestPlayer -from .test_evolvable_player import PartialClass, TestEvolvablePlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestHMMPlayers(unittest.TestCase): - """Test a few sample tables to make sure that the finite state machines are - working as intended.""" - - def test_is_stochastic_matrix(self): - m = [[1, 0], [0, 1]] - self.assertTrue(is_stochastic_matrix(m)) - m = [[1, 1e-20], [0, 1]] - self.assertTrue(is_stochastic_matrix(m)) - m = [[0.6, 0.4], [0.8, 0.2]] - self.assertTrue(is_stochastic_matrix(m)) - m = [[0.6, 0.6], [0.8, 0.2]] - self.assertFalse(is_stochastic_matrix(m)) - m = [[0.6, 0.4], [0.8, 1.2]] - self.assertFalse(is_stochastic_matrix(m)) - - def test_cooperator(self): - """Tests that the player defined by the table for Cooperator is in fact - Cooperator.""" - t_C = [[1]] - t_D = [[1]] - p = [1] - player = axl.HMMPlayer( - transitions_C=t_C, - transitions_D=t_D, - emission_probabilities=p, - initial_state=0, - initial_action=C, - ) - self.assertFalse(player.is_stochastic()) - self.assertFalse(axl.Classifiers["stochastic"](player)) - opponent = axl.Alternator() - for i in range(6): - player.play(opponent) - self.assertEqual(opponent.history, [C, D] * 3) - self.assertEqual(player.history, [C] * 6) - - def test_defector(self): - """Tests that the player defined by the table for Defector is in fact - Defector.""" - t_C = [[1]] - t_D = [[1]] - p = [0] - player = axl.HMMPlayer( - transitions_C=t_C, - transitions_D=t_D, - emission_probabilities=p, - initial_state=0, - initial_action=D, - ) - self.assertFalse(player.is_stochastic()) - self.assertFalse(axl.Classifiers["stochastic"](player)) - opponent = axl.Alternator() - for i in range(6): - player.play(opponent) - self.assertEqual(opponent.history, [C, D] * 3) - self.assertEqual(player.history, [D] * 6) - - def test_tft(self): - """Tests that the player defined by the table for TFT is in fact - TFT.""" - t_C = [[1, 0], [1, 0]] - t_D = [[0, 1], [0, 1]] - p = [1, 0] - player = axl.HMMPlayer( - transitions_C=t_C, - transitions_D=t_D, - emission_probabilities=p, - initial_state=0, - initial_action=C, - ) - self.assertFalse(player.is_stochastic()) - self.assertFalse(axl.Classifiers["stochastic"](player)) - opponent = axl.Alternator() - for i in range(6): - player.play(opponent) - self.assertEqual(opponent.history, [C, D] * 3) - self.assertEqual(player.history, [C, C, D, C, D, C]) - - def test_wsls(self): - """Tests that the player defined by the table for TFT is in fact - WSLS (also known as Pavlov.""" - t_C = [[1, 0], [0, 1]] - t_D = [[0, 1], [1, 0]] - p = [1, 0] - player = axl.HMMPlayer( - transitions_C=t_C, - transitions_D=t_D, - emission_probabilities=p, - initial_state=0, - initial_action=C, - ) - self.assertFalse(player.is_stochastic()) - self.assertFalse(axl.Classifiers["stochastic"](player)) - opponent = axl.Alternator() - for i in range(6): - player.play(opponent) - self.assertEqual(opponent.history, [C, D] * 3) - self.assertEqual(player.history, [C, C, D, D, C, C]) - - def test_malformed_params(self): - # Test a malformed table - t_C = [[1, 0.5], [0, 1]] - self.assertFalse(is_stochastic_matrix(t_C)) - - t_C = [[1, 0], [0, 1]] - t_D = [[0, 1], [1, 0]] - p = [1, 0] - hmm = SimpleHMM(t_C, t_C, p, 0) - self.assertTrue(hmm.is_well_formed()) - hmm = SimpleHMM(t_C, t_D, p, -1) - self.assertFalse(hmm.is_well_formed()) - t_C = [[1, -1], [0, 1]] - t_D = [[0, 1], [1, 0]] - p = [1, 0] - hmm = SimpleHMM(t_C, t_D, p, 0) - self.assertFalse(hmm.is_well_formed()) - t_C = [[1, 0], [0, 1]] - t_D = [[0, 2], [1, 0]] - p = [1, 0] - hmm = SimpleHMM(t_C, t_D, p, 0) - self.assertFalse(hmm.is_well_formed()) - t_C = [[1, 0], [0, 1]] - t_D = [[0, 1], [1, 0]] - p = [-1, 2] - hmm = SimpleHMM(t_C, t_D, p, 0) - self.assertFalse(hmm.is_well_formed()) - - -class TestHMMPlayer(TestPlayer): - - name = "HMM IpdPlayer: 0, C" - player = axl.HMMPlayer - - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_reset(self): - player = self.player( - transitions_C=[[1]], - transitions_D=[[1]], - emission_probabilities=[0], - initial_state=0, - ) - player.hmm.state = -1 - player.reset() - self.assertFalse(player.hmm.state == -1) - - -class TestEvolvedHMM5(TestPlayer): - - name = "Evolved HMM 5" - player = axl.EvolvedHMM5 - - expected_classifier = { - "memory_depth": 5, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestEvolvedHMM5vsCooperator(TestMatch): - def test_rounds(self): - self.versus_test(axl.EvolvedHMM5(), axl.Cooperator(), [C] * 5, [C] * 5) - - -class TestEvolvedHMM5vsDefector(TestMatch): - def test_rounds(self): - self.versus_test(axl.EvolvedHMM5(), axl.Defector(), [C, C, D], [D, D, D]) - - -class TestEvolvableHMMPlayer(unittest.TestCase): - - player_class = EvolvableHMMPlayer - - def test_normalized_parameters(self): - transitions_C = [[1, 0], [1, 0]] - transitions_D = [[0, 1], [0, 1]] - emission_probabilities = [1, 0] - initial_state = 0 - initial_action = C - - self.assertRaises( - InsufficientParametersError, self.player_class._normalize_parameters - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - transitions_C=transitions_C, - transitions_D=transitions_D, - emission_probabilities=emission_probabilities, - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - initial_state=initial_state, - initial_action=initial_action, - ) - - def test_vector_to_instance(self): - num_states = 4 - vector = [] - for _ in range(2 * num_states): - vector.extend(list(random_vector(num_states))) - for _ in range(num_states + 1): - vector.append(random.random()) - player = self.player_class(num_states=num_states) - player.receive_vector(vector=vector) - self.assertIsInstance(player, self.player_class) - - def test_create_vector_bounds(self): - num_states = 4 - size = 2 * num_states ** 2 + num_states + 1 - - player = self.player_class(num_states=num_states) - lb, ub = player.create_vector_bounds() - - self.assertIsInstance(lb, list) - self.assertEqual(len(lb), size) - self.assertIsInstance(ub, list) - self.assertEqual(len(ub), size) - - -class TestEvolvableHMMPlayer2(TestEvolvablePlayer): - name = "EvolvableHMMPlayer" - player_class = EvolvableHMMPlayer - parent_class = HMMPlayer - parent_kwargs = [ - "transitions_C", - "transitions_D", - "emission_probabilities", - "initial_state", - "initial_action", - ] - init_parameters = {"num_states": 4} - - -class TestEvolvableHMMPlayer3(TestEvolvablePlayer): - name = "EvolvableHMMPlayer" - player_class = EvolvableHMMPlayer - parent_class = HMMPlayer - parent_kwargs = [ - "transitions_C", - "transitions_D", - "emission_probabilities", - "initial_state", - "initial_action", - ] - init_parameters = {"num_states": 8} - - -class TestEvolvableHMMPlayer4(TestEvolvablePlayer): - name = "EvolvableHMMPlayer" - player_class = EvolvableHMMPlayer - parent_class = HMMPlayer - parent_kwargs = [ - "transitions_C", - "transitions_D", - "emission_probabilities", - "initial_state", - "initial_action", - ] - init_parameters = { - "transitions_C": [[1, 0], [1, 0]], - "transitions_D": [[0, 1], [0, 1]], - "emission_probabilities": [1, 0], - "initial_state": 0, - "initial_action": C, - } - - -# Substitute EvolvableHMMPlayer as a regular HMMPlayer. -EvolvableHMMPlayerWithDefault = PartialClass( - EvolvableHMMPlayer, - transitions_C=[[1]], - transitions_D=[[1]], - emission_probabilities=[0.5], - initial_state=0, -) - - -class EvolvableHMMPlayerAsHMMPlayer(TestHMMPlayer): - player = EvolvableHMMPlayerWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_human.py b/axelrod/ipd/tests/strategies/test_human.py deleted file mode 100644 index 14542edbb..000000000 --- a/axelrod/ipd/tests/strategies/test_human.py +++ /dev/null @@ -1,133 +0,0 @@ -from unittest import TestCase -from unittest.mock import patch - -from os import linesep - -import axelrod as axl -from axelrod.ipd.strategies.human import ActionValidator, Human -from prompt_toolkit.validation import ValidationError - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestDocument(object): - """ - A class to mimic a prompt-toolkit document having just the text attribute. - """ - - def __init__(self, text): - self.text = text - - -class TestActionValidator(TestCase): - def test_validator(self): - test_documents = [TestDocument(x) for x in ["C", "c", "D", "d"]] - for test_document in test_documents: - ActionValidator().validate(test_document) - - test_document = TestDocument("E") - self.assertRaises(ValidationError, ActionValidator().validate, test_document) - - -class TestHumanClass(TestPlayer): - - name = "Human: human, C, D" - player = Human - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["length", "game"]), - "long_run_time": True, - "inspects_source": True, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_init(self): - human = Human(name="test human", c_symbol="X", d_symbol="Y") - self.assertEqual(human.human_name, "test human") - self.assertEqual(human.symbols, {C: "X", D: "Y"}) - - def test_history_toolbar(self): - human = Human() - expected_content = "" - actual_content = human._history_toolbar() - self.assertEqual(actual_content, expected_content) - - human.history.append(C, C) - expected_content = "History (human, opponent): [('C', 'C')]" - actual_content = human._history_toolbar() - self.assertIn(actual_content, expected_content) - - def test_status_messages(self): - human = Human() - expected_messages = { - "toolbar": None, - "print": "{}Starting new match".format(linesep), - } - actual_messages = human._status_messages() - self.assertEqual(actual_messages, expected_messages) - - human.history.append(C, C) - expected_print_message = "{}Turn 1: human played C, opponent played C".format( - linesep - ) - actual_messages = human._status_messages() - self.assertEqual(actual_messages["print"], expected_print_message) - self.assertIsNotNone(actual_messages["toolbar"]) - - def test_get_human_input_c(self): - with patch("axelrod.human.prompt", return_value="c") as prompt_: - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - self.assertEqual( - prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) - ) - - def test_get_human_input_C(self): - with patch("axelrod.human.prompt", return_value="C") as prompt_: - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - self.assertEqual( - prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) - ) - - def test_get_human_input_d(self): - with patch("axelrod.human.prompt", return_value="d") as prompt_: - actions = [(D, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - self.assertEqual( - prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) - ) - - def test_get_human_input_D(self): - with patch("axelrod.human.prompt", return_value="D") as prompt_: - actions = [(D, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - self.assertEqual( - prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) - ) - - def test_strategy(self): - human = Human() - expected_action = C - actual_action = human.strategy(axl.IpdPlayer(), lambda: C) - self.assertEqual(actual_action, expected_action) - - def test_reset_history_and_attributes(self): - """Overwrite the reset method for this strategy.""" - pass - - def test_repr(self): - human = Human() - self.assertEqual(human.__repr__(), "Human: human") - - human = Human(name="John Nash") - self.assertEqual(human.__repr__(), "Human: John Nash") - human = Human(name="John Nash", c_symbol="1", d_symbol="2") - self.assertEqual(human.__repr__(), "Human: John Nash") - - def equality_of_players_test(self, p1, p2, seed, opponent): - return True diff --git a/axelrod/ipd/tests/strategies/test_hunter.py b/axelrod/ipd/tests/strategies/test_hunter.py deleted file mode 100644 index 90154338a..000000000 --- a/axelrod/ipd/tests/strategies/test_hunter.py +++ /dev/null @@ -1,265 +0,0 @@ -"""Tests for the Hunter strategy.""" - -import unittest - -import axelrod as axl -from axelrod.ipd.strategies.hunter import detect_cycle - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestCycleDetection(unittest.TestCase): - def test_cycles(self): - history = [C] * 10 - self.assertEqual(detect_cycle(history), (C,)) - self.assertEqual(detect_cycle(history, min_size=2), (C, C)) - history = [C, D] * 10 - self.assertEqual(detect_cycle(history, min_size=2), (C, D)) - self.assertEqual(detect_cycle(history, min_size=3), (C, D, C, D)) - history = [C, D, C] * 10 - self.assertTrue(detect_cycle(history), (C, D, C)) - history = [C, C, D] * 10 - self.assertTrue(detect_cycle(history), (C, C, D)) - - def test_noncycles(self): - history = [C, D, C, C, D, C, C, C, D] - self.assertEqual(detect_cycle(history), None) - history = [C, C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C, C, C] - self.assertEqual(detect_cycle(history), None) - - -class TestDefectorHunter(TestPlayer): - - name = "Defector Hunter" - player = axl.DefectorHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, D)] * 4 + [(D, D)] * 10 - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - actions = [(C, C)] * 14 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - -class TestCooperatorHunter(TestPlayer): - - name = "Cooperator Hunter" - player = axl.CooperatorHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 4 + [(D, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 14 - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestAlternatorHunter(TestPlayer): - - name = "Alternator Hunter" - player = axl.AlternatorHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "inspects_source": False, - "makes_use_of": set(), - "long_run_time": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D)] * 3 + [(D, C), (D, D)] * 5 - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - attrs={"is_alt": True}, - ) - - actions = [(C, D)] * 14 - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - attrs={"is_alt": False}, - ) - - def test_reset_attr(self): - p = self.player() - p.is_alt = True - p.reset() - self.assertFalse(p.is_alt) - - -class TestCycleHunter(TestPlayer): - - name = "Cycle Hunter" - player = axl.CycleHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - player = self.player() - # Test against cyclers - for opponent in [ - axl.CyclerCCD(), - axl.CyclerCCCD(), - axl.CyclerCCCCCD(), - axl.Alternator(), - ]: - player.reset() - for i in range(30): - player.play(opponent) - self.assertEqual(player.history[-1], D) - # Test against non-cyclers - axl.seed(40) - for opponent in [ - axl.Random(), - axl.AntiCycler(), - axl.Cooperator(), - axl.Defector(), - ]: - player.reset() - for i in range(30): - player.play(opponent) - self.assertEqual(player.history[-1], C) - - def test_reset_attr(self): - p = self.player() - p.cycle = "CCDDCD" - p.reset() - self.assertEqual(p.cycle, None) - - -class TestEventualCycleHunter(TestPlayer): - - name = "Eventual Cycle Hunter" - player = axl.EventualCycleHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - player = self.player() - # Test against cyclers - for opponent in [ - axl.CyclerCCD(), - axl.CyclerCCCD(), - axl.CyclerCCCCCD(), - axl.Alternator(), - ]: - player.reset() - for i in range(50): - player.play(opponent) - self.assertEqual(player.history[-1], D) - # Test against non-cyclers and cooperators - axl.seed(43) - for opponent in [ - axl.Random(), - axl.AntiCycler(), - axl.DoubleCrosser(), - axl.Cooperator(), - ]: - player.reset() - for i in range(50): - player.play(opponent) - self.assertEqual(player.history[-1], C) - - def test_reset_attr(self): - p = self.player() - p.cycle = "CCDDCD" - p.reset() - self.assertEqual(p.cycle, None) - - -class TestMathConstantHunter(TestPlayer): - - name = "Math Constant Hunter" - player = axl.MathConstantHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - opponent = axl.MockPlayer([C] * 7 + [D] * 3) - actions = [(C, C)] * 7 + [(C, D)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestRandomHunter(TestPlayer): - - name = "Random Hunter" - player = axl.RandomHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - - # We should catch the alternator here. - actions = [(C, C), (C, D)] * 5 + [(C, C), (D, D), (D, C)] - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - attrs={"countCC": 5, "countDD": 0}, - ) - - actions = [(C, D)] * 14 - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - attrs={"countCC": 0, "countDD": 0}, - ) - - def test_reset(self): - player = self.player() - opponent = axl.Cooperator() - for _ in range(100): - player.play(opponent) - self.assertFalse(player.countCC == 0) - player.reset() - self.assertTrue(player.countCC == 0) diff --git a/axelrod/ipd/tests/strategies/test_inverse.py b/axelrod/ipd/tests/strategies/test_inverse.py deleted file mode 100644 index 3eaee2a89..000000000 --- a/axelrod/ipd/tests/strategies/test_inverse.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Tests for the inverse strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestInverse(TestPlayer): - - name = "Inverse" - player = axl.Inverse - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test that as long as the opponent has not defected the player will - # cooperate. - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)]) - - # Tests that if opponent has played all D then player chooses D. - self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 9) - - expected_actions = [ - (C, D), - (D, C), - (D, C), - (D, D), - (D, C), - (C, C), - (C, C), - (C, C), - (C, D), - (D, D), - ] - self.versus_test( - axl.MockPlayer(actions=[a[1] for a in expected_actions]), - expected_actions=expected_actions, - seed=0, - ) diff --git a/axelrod/ipd/tests/strategies/test_lookerup.py b/axelrod/ipd/tests/strategies/test_lookerup.py deleted file mode 100755 index 02d567e8d..000000000 --- a/axelrod/ipd/tests/strategies/test_lookerup.py +++ /dev/null @@ -1,760 +0,0 @@ -"""Test for the Looker Up strategy.""" - -import unittest - -import copy - -import random - -import axelrod as axl -from axelrod.ipd.action import str_to_actions -from axelrod.ipd.evolvable_player import InsufficientParametersError -from axelrod.ipd.strategies.lookerup import ( - EvolvableLookerUp, - LookupTable, - Plays, - create_lookup_table_keys, - make_keys_into_plays, -) -from .test_evolvable_player import PartialClass, TestEvolvablePlayer -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestLookupTable(unittest.TestCase): - lookup_dict = { - ((C, C), (C,), ()): C, - ((C, C), (D,), ()): D, - ((C, D), (C,), ()): D, - ((C, D), (D,), ()): C, - ((D, C), (C,), ()): C, - ((D, C), (D,), ()): D, - ((D, D), (C,), ()): D, - ((D, D), (D,), ()): C, - } - - def test_init(self): - table = LookupTable(self.lookup_dict) - - self.assertEqual(table.table_depth, 2) - self.assertEqual(table.player_depth, 2) - self.assertEqual(table.op_depth, 1) - self.assertEqual(table.op_openings_depth, 0) - self.assertEqual( - table.dictionary, - { - Plays(self_plays=(C, C), op_plays=(C,), op_openings=()): C, - Plays(self_plays=(C, C), op_plays=(D,), op_openings=()): D, - Plays(self_plays=(C, D), op_plays=(C,), op_openings=()): D, - Plays(self_plays=(C, D), op_plays=(D,), op_openings=()): C, - Plays(self_plays=(D, C), op_plays=(C,), op_openings=()): C, - Plays(self_plays=(D, C), op_plays=(D,), op_openings=()): D, - Plays(self_plays=(D, D), op_plays=(C,), op_openings=()): D, - Plays(self_plays=(D, D), op_plays=(D,), op_openings=()): C, - }, - ) - self.assertIsInstance(next(iter(table.dictionary)), Plays) - - def test_init_raises_error_when_keys_for_lookup_dict_do_not_match(self): - lookup_dict = {((C,), (C,), ()): C, ((D, D), (D, D), ()): C} - with self.assertRaises(ValueError): - LookupTable(lookup_dict=lookup_dict) - - def test_init_raises_error_keys_do_not_cover_all_combinations(self): - lookup_dict = {((C,), (C,), ()): C, ((D,), (D,), ()): C} - with self.assertRaises(ValueError): - LookupTable(lookup_dict=lookup_dict) - - def test_from_pattern(self): - pattern = (C, D, D, C, C, D, D, C) - table = LookupTable.from_pattern( - pattern, player_depth=2, op_depth=1, op_openings_depth=0 - ) - self.assertEqual(table.dictionary, make_keys_into_plays(self.lookup_dict)) - - def test_from_pattern_raises_error_pattern_len_ne_dict_size(self): - too_big = (C,) * 17 - too_small = (C,) * 15 - just_right = (C,) * 16 - with self.assertRaises(ValueError): - LookupTable.from_pattern(too_big, 2, 2, 0) - with self.assertRaises(ValueError): - LookupTable.from_pattern(too_small, 2, 2, 0) - self.assertIsInstance( - LookupTable.from_pattern(just_right, 2, 2, 0), LookupTable - ) - - def test_dictionary_property_returns_new_dict_object(self): - table = LookupTable(lookup_dict=self.lookup_dict) - self.assertIsNot(table.dictionary, table.dictionary) - - def test_display_default(self): - table = LookupTable.from_pattern( - (C,) * 8, player_depth=2, op_depth=0, op_openings_depth=1 - ) - self.assertEqual( - table.display(), - ( - "op_openings|self_plays | op_plays \n" - + " C , C, C , : C,\n" - + " C , C, D , : C,\n" - + " C , D, C , : C,\n" - + " C , D, D , : C,\n" - + " D , C, C , : C,\n" - + " D , C, D , : C,\n" - + " D , D, C , : C,\n" - + " D , D, D , : C,\n" - ), - ) - - def test_display_assign_order(self): - table = LookupTable.from_pattern( - (C,) * 8, player_depth=0, op_depth=3, op_openings_depth=0 - ) - self.assertEqual( - table.display(sort_by=("op_openings", "op_plays", "self_plays")), - ( - "op_openings| op_plays |self_plays \n" - + " , C, C, C , : C,\n" - + " , C, C, D , : C,\n" - + " , C, D, C , : C,\n" - + " , C, D, D , : C,\n" - + " , D, C, C , : C,\n" - + " , D, C, D , : C,\n" - + " , D, D, C , : C,\n" - + " , D, D, D , : C,\n" - ), - ) - - def test_equality_true(self): - table_a = LookupTable(self.lookup_dict) - table_b = LookupTable(self.lookup_dict) - self.assertTrue(table_a.__eq__(table_b)) - - def test_equality_false(self): - table_a = LookupTable.from_pattern((C, D), 1, 0, 0) - table_b = LookupTable.from_pattern((D, C), 1, 0, 0) - table_c = LookupTable.from_pattern((C, D), 0, 1, 0) - self.assertFalse(table_a.__eq__(table_b)) - self.assertFalse(table_a.__eq__(table_c)) - self.assertFalse(table_a.__eq__(table_a.dictionary)) - - def test_not_equal(self): - table_a = LookupTable(self.lookup_dict) - table_b = LookupTable(self.lookup_dict) - not_equal = LookupTable.from_pattern((C, C), 1, 0, 0) - self.assertFalse(table_a.__ne__(table_b)) - self.assertTrue(table_a.__ne__(not_equal)) - - -class TestLookupTableHelperFunctions(unittest.TestCase): - def test_plays_equals_tuple(self): - self.assertEqual(Plays(1, 2, 3), (1, 2, 3)) - - def test_plays_assign_values(self): - self.assertEqual(Plays(op_plays=2, self_plays=1, op_openings=3), Plays(1, 2, 3)) - - def test_make_keys_into_plays(self): - old = {((C, D), (C,), ()): 1, ((D, D), (D,), ()): 2} - new = make_keys_into_plays(old) - self.assertNotIsInstance(next(iter(old)), Plays) - self.assertIsInstance(next(iter(new)), Plays) - self.assertTrue(new.__eq__(old)) - self.assertTrue(old.__eq__(new)) - - def test_make_keys_into_plays_always_returns_new_dict(self): - old = {Plays((C, D), (C,), ()): 1, Plays((D, D), (D,), ()): 2} - self.assertIsNot(old, make_keys_into_plays(old)) - - def test_create_lookup_table_keys(self): - expected = [ - Plays((C, C), (C,), ()), - Plays((C, C), (D,), ()), - Plays((C, D), (C,), ()), - Plays((C, D), (D,), ()), - Plays((D, C), (C,), ()), - Plays((D, C), (D,), ()), - Plays((D, D), (C,), ()), - Plays((D, D), (D,), ()), - ] - actual = create_lookup_table_keys( - player_depth=2, op_depth=1, op_openings_depth=0 - ) - self.assertEqual(actual, expected) - self.assertIsInstance(actual[0], Plays) - - -class TestLookerUp(TestPlayer): - name = "LookerUp" - player = axl.LookerUp - - expected_classifier = { - "memory_depth": 1, # Default TFT - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - expected_class_classifier = copy.copy(expected_classifier) - - def test_default_init(self): - player = self.player() - expected = {Plays((), (D,), ()): D, Plays((), (C,), ()): C} - self.assertEqual(player.lookup_dict, expected) - self.assertEqual(player.initial_actions, (C,)) - - def test_pattern_and_params_init_pattern_is_string(self): - pattern = "CCCC" - parameters = Plays(1, 1, 0) - player = axl.LookerUp(pattern=pattern, parameters=parameters) - expected_lookup_table = { - Plays((C,), (D,), ()): C, - Plays((D,), (D,), ()): C, - Plays((C,), (C,), ()): C, - Plays((D,), (C,), ()): C, - } - self.assertEqual(player.lookup_dict, expected_lookup_table) - - def test_pattern_and_params_init_pattern_is_tuple(self): - pattern = (C, C, C, C) - parameters = Plays(1, 1, 0) - player = axl.LookerUp(pattern=pattern, parameters=parameters) - expected_lookup_table = { - Plays((C,), (D,), ()): C, - Plays((D,), (D,), ()): C, - Plays((C,), (C,), ()): C, - Plays((D,), (C,), ()): C, - } - self.assertEqual(player.lookup_dict, expected_lookup_table) - - def test_pattern_and_params_init_can_still_use_regular_tuple(self): - pattern = (C, C) - parameters = (1, 0, 0) - player = axl.LookerUp(pattern=pattern, parameters=parameters) - expected_lookup_table = {Plays((C,), (), ()): C, Plays((D,), (), ()): C} - self.assertEqual(player.lookup_dict, expected_lookup_table) - - def test_pattern_and_params_init_only_happens_if_both_are_present(self): - default = {Plays((), (D,), ()): D, Plays((), (C,), ()): C} - pattern = "CC" - parameters = Plays(self_plays=0, op_plays=1, op_openings=0) - player1 = axl.LookerUp(pattern=pattern) - player2 = axl.LookerUp(parameters=parameters) - - self.assertEqual(player1.lookup_dict, default) - self.assertEqual(player2.lookup_dict, default) - - def test_lookup_table_init(self): - lookup_table = { - ((C,), (D,), ()): C, - ((D,), (D,), ()): C, - ((C,), (C,), ()): C, - ((D,), (C,), ()): C, - } - player = axl.LookerUp(lookup_dict=lookup_table) - self.assertEqual(player.lookup_dict, lookup_table) - self.assertIsInstance(next(iter(player.lookup_dict)), Plays) - - def test_lookup_table_init_supersedes_pattern_init(self): - lookup_table = { - ((C,), (D,), ()): D, - ((D,), (D,), ()): D, - ((C,), (C,), ()): D, - ((D,), (C,), ()): D, - } - pattern = "CCCCCCCC" - parameters = Plays(self_plays=1, op_plays=1, op_openings=1) - player = axl.LookerUp( - lookup_dict=lookup_table, pattern=pattern, parameters=parameters - ) - - self.assertEqual(player.lookup_dict, lookup_table) - - def test_init_raises_errors(self): - mismatch_dict = {((C,), (C,), ()): C, ((D, D), (D, D), ()): C} - with self.assertRaises(ValueError): - axl.LookerUp(lookup_dict=mismatch_dict) - - incomplete_lookup_dict = {((C,), (C,), ()): C, ((D,), (D,), ()): C} - with self.assertRaises(ValueError): - axl.LookerUp(lookup_dict=incomplete_lookup_dict) - - too_short_pattern = "CC" - with self.assertRaises(ValueError): - axl.LookerUp(pattern=too_short_pattern, parameters=(3, 3, 3)) - - def test_initial_actions_set_to_max_table_depth(self): - initial_actions = (D, D, D) - table_depth_one = axl.LookerUp(initial_actions=initial_actions) - self.assertEqual(table_depth_one.initial_actions, (D,)) - - def test_initial_actions_makes_up_missing_actions_with_c(self): - initial_actions = (D,) - table_depth_three = axl.LookerUp( - initial_actions=initial_actions, - pattern="CCCCCCCC", - parameters=Plays(3, 0, 0), - ) - self.assertEqual(table_depth_three.initial_actions, (D, C, C)) - - def test_set_memory_depth(self): - mem_depth_1 = axl.LookerUp(pattern="CC", parameters=Plays(1, 0, 0)) - self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_1), 1) - - mem_depth_3 = axl.LookerUp(pattern="C" * 16, parameters=Plays(1, 3, 0)) - self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_3), 3) - - mem_depth_inf = axl.LookerUp(pattern="CC", parameters=Plays(0, 0, 1)) - self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_inf), float("inf")) - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - def test_cooperator_table(self): - lookup_table = {((), (), ()): C} - actions = [(C, D)] * 5 - self.versus_test( - axl.Defector(), - expected_actions=actions, - init_kwargs={"lookup_dict": lookup_table}, - ) - - def test_defector_table_with_initial_cooperate(self): - """ - Testing a lookup table that always defects IF there is enough history. - """ - defector_table = { - ((C,), (D,), ()): D, - ((D,), (D,), ()): D, - ((C,), (C,), ()): D, - ((D,), (C,), ()): D, - } - actions = [(C, C)] + [(D, D), (D, C)] * 4 - self.versus_test( - axl.Alternator(), - expected_actions=actions, - init_kwargs={"lookup_dict": defector_table}, - ) - - def test_zero_tables(self): - """Test the corner case where n=0.""" - anti_tft_pattern = "DC" - parameters = Plays(self_plays=0, op_plays=1, op_openings=0) - - tft_vs_alternator = [(C, C)] + [(D, D), (C, C)] * 5 - self.versus_test( - axl.Alternator(), - expected_actions=tft_vs_alternator, - init_kwargs={"parameters": parameters, "pattern": anti_tft_pattern}, - ) - - def test_opponent_starting_moves_table(self): - """A lookup table that always repeats the opponent's first move.""" - first_move_table = {((), (), (C,)): C, ((), (), (D,)): D} - - vs_alternator = [(C, C), (C, D)] * 5 - self.versus_test( - axl.Alternator(), - expected_actions=vs_alternator, - init_kwargs={"lookup_dict": first_move_table}, - ) - - vs_initial_defector = [(C, D)] + [(D, C), (D, D)] * 10 - opponent = axl.MockPlayer(actions=[D, C]) - self.versus_test( - opponent, - expected_actions=vs_initial_defector, - init_kwargs={"lookup_dict": first_move_table}, - ) - - def test_lookup_table_display(self): - player = axl.LookerUp( - pattern="CCCC", parameters=Plays(self_plays=2, op_plays=0, op_openings=0) - ) - self.assertEqual( - player.lookup_table_display(("self_plays", "op_plays", "op_openings")), - ( - "self_plays | op_plays |op_openings\n" - + " C, C , , : C,\n" - + " C, D , , : C,\n" - + " D, C , , : C,\n" - + " D, D , , : C,\n" - ), - ) - - -class TestEvolvedLookerUp1_1_1(TestPlayer): - name = "EvolvedLookerUp1_1_1" - player = axl.EvolvedLookerUp1_1_1 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("C", "C", "C"): C, - ("C", "C", "D"): D, - ("C", "D", "C"): D, - ("C", "D", "D"): D, - ("D", "C", "C"): D, - ("D", "C", "D"): D, - ("D", "D", "C"): C, - ("D", "D", "D"): D, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_vs_initial_defector(self): - opponent = [D, C, C, D, D, C] - expected = [(C, D), (D, C), (C, C), (D, D), (D, D), (D, C)] - self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) - - def test_vs_initial_cooperator(self): - opponent = [C, D, D, C, C, D] - expected = [(C, C), (C, D), (D, D), (D, C), (D, C), (D, D)] - self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) - - -class TestEvolvedLookerUp2_2_2(TestPlayer): - name = "EvolvedLookerUp2_2_2" - player = axl.EvolvedLookerUp2_2_2 - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_new_data(self): - original_data = { - ("CC", "CC", "CC"): C, - ("CC", "CC", "CD"): D, - ("CC", "CC", "DC"): C, - ("CC", "CC", "DD"): C, - ("CC", "CD", "CC"): D, - ("CC", "CD", "CD"): C, - ("CC", "CD", "DC"): C, - ("CC", "CD", "DD"): C, - ("CC", "DC", "CC"): D, - ("CC", "DC", "CD"): C, - ("CC", "DC", "DC"): D, - ("CC", "DC", "DD"): D, - ("CC", "DD", "CC"): D, - ("CC", "DD", "CD"): C, - ("CC", "DD", "DC"): C, - ("CC", "DD", "DD"): C, - ("CD", "CC", "CC"): D, - ("CD", "CC", "CD"): C, - ("CD", "CC", "DC"): D, - ("CD", "CC", "DD"): D, - ("CD", "CD", "CC"): D, - ("CD", "CD", "CD"): D, - ("CD", "CD", "DC"): D, - ("CD", "CD", "DD"): D, - ("CD", "DC", "CC"): D, - ("CD", "DC", "CD"): C, - ("CD", "DC", "DC"): D, - ("CD", "DC", "DD"): D, - ("CD", "DD", "CC"): D, - ("CD", "DD", "CD"): C, - ("CD", "DD", "DC"): D, - ("CD", "DD", "DD"): C, - ("DC", "CC", "CC"): D, - ("DC", "CC", "CD"): D, - ("DC", "CC", "DC"): D, - ("DC", "CC", "DD"): D, - ("DC", "CD", "CC"): C, - ("DC", "CD", "CD"): C, - ("DC", "CD", "DC"): D, - ("DC", "CD", "DD"): C, - ("DC", "DC", "CC"): C, - ("DC", "DC", "CD"): C, - ("DC", "DC", "DC"): C, - ("DC", "DC", "DD"): D, - ("DC", "DD", "CC"): D, - ("DC", "DD", "CD"): D, - ("DC", "DD", "DC"): D, - ("DC", "DD", "DD"): C, - ("DD", "CC", "CC"): C, - ("DD", "CC", "CD"): D, - ("DD", "CC", "DC"): D, - ("DD", "CC", "DD"): D, - ("DD", "CD", "CC"): D, - ("DD", "CD", "CD"): C, - ("DD", "CD", "DC"): C, - ("DD", "CD", "DD"): D, - ("DD", "DC", "CC"): C, - ("DD", "DC", "CD"): D, - ("DD", "DC", "DC"): D, - ("DD", "DC", "DD"): D, - ("DD", "DD", "CC"): D, - ("DD", "DD", "CD"): D, - ("DD", "DD", "DC"): D, - ("DD", "DD", "DD"): D, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_vs_initial_defector(self): - opponent_actions = [D, D] + [C, D] * 3 - expected = [(C, D), (C, D)] + [(D, C), (C, D)] * 3 - self.versus_test( - axl.MockPlayer(actions=opponent_actions), expected_actions=expected - ) - - def test_vs_initial_d_c(self): - opponent_actions = [D, C] + [C, D] * 3 - expected = [(C, D), (C, C)] + [(D, C), (C, D), (C, C), (D, D), (C, C), (C, D)] - self.versus_test( - axl.MockPlayer(actions=opponent_actions), expected_actions=expected - ) - - -class TestWinner12(TestPlayer): - name = "Winner12" - player = axl.Winner12 - - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - expected_class_classifier = copy.copy(expected_classifier) - - def test_new_data(self): - original_data = { - ("", "C", "CC"): C, - ("", "C", "CD"): D, - ("", "C", "DC"): C, - ("", "C", "DD"): D, - ("", "D", "CC"): D, - ("", "D", "CD"): C, - ("", "D", "DC"): D, - ("", "D", "DD"): D, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_strategy(self): - """Starts by cooperating twice.""" - vs_alternator = [(C, C), (C, D), (D, C), (D, D)] * 5 - self.versus_test(axl.Alternator(), expected_actions=vs_alternator) - - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 10) - - self.versus_test( - axl.Defector(), expected_actions=([(C, D), (C, D)] + [(D, D)] * 10) - ) - - -class TestWinner21(TestPlayer): - name = "Winner21" - player = axl.Winner21 - - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - expected_class_classifier = copy.copy(expected_classifier) - - def test_new_data(self): - original_data = { - ("", "C", "CC"): C, - ("", "C", "CD"): D, - ("", "C", "DC"): C, - ("", "C", "DD"): D, - ("", "D", "CC"): C, - ("", "D", "CD"): D, - ("", "D", "DC"): D, - ("", "D", "DD"): D, - } - converted_original = convert_original_to_current(original_data) - self.assertEqual(self.player().lookup_dict, converted_original) - - def test_strategy(self): - """Starts by cooperating twice.""" - vs_alternator = [(D, C), (C, D)] + [(D, C), (D, D)] * 5 - self.versus_test(axl.Alternator(), expected_actions=vs_alternator) - - self.versus_test(axl.Cooperator(), expected_actions=[(D, C)] + [(C, C)] * 10) - - self.versus_test( - axl.Defector(), expected_actions=([(D, D), (C, D)] + [(D, D)] * 10) - ) - - -class TestDictConversionFunctions(unittest.TestCase): - def test_convert_key(self): - opponent_starting_plays = "" - player_last_plays = "CC" - opponent_last_plays = "D" - old_key = (opponent_starting_plays, player_last_plays, opponent_last_plays) - - new_key = Plays(self_plays=(C, C), op_plays=(D,), op_openings=()) - - self.assertEqual(new_key, convert_key(old_key)) - - def test_convert_original_to_current(self): - expected = { - Plays(self_plays=(C, C), op_plays=(D,), op_openings=()): C, - Plays(self_plays=(D,), op_plays=(D, D), op_openings=(C,)): D, - } - original = {("", "CC", "D"): C, ("C", "D", "DD"): D} - self.assertEqual(expected, convert_original_to_current(original)) - - -def convert_original_to_current(original_data: dict) -> dict: - return {convert_key(key): value for key, value in original_data.items()} - - -def convert_key(old_key: tuple) -> Plays: - opponent_start, player, opponent = old_key - return Plays( - self_plays=str_to_actions(player), - op_plays=str_to_actions(opponent), - op_openings=str_to_actions(opponent_start), - ) - - -class TestEvolvableLookerUp(unittest.TestCase): - player_class = EvolvableLookerUp - - def test_normalized_parameters(self): - initial_actions = ( - C, - C, - ) - lookup_dict = { - ((C, C), (C,), ()): C, - ((C, C), (D,), ()): D, - ((C, D), (C,), ()): D, - ((C, D), (D,), ()): C, - ((D, C), (C,), ()): C, - ((D, C), (D,), ()): D, - ((D, D), (C,), ()): D, - ((D, D), (D,), ()): C, - } - pattern = ("".join([random.choice(("C", "D")) for _ in range(8)]),) - - self.assertRaises( - InsufficientParametersError, self.player_class._normalize_parameters - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - pattern=pattern, - initial_actions=initial_actions, - ) - self.assertRaises( - InsufficientParametersError, - self.player_class._normalize_parameters, - lookup_dict=lookup_dict, - ) - - -class TestEvolvableLookerUp2(TestEvolvablePlayer): - name = "EvolvableLookerUp" - player_class = axl.EvolvableLookerUp - parent_class = axl.LookerUp - parent_kwargs = ["lookup_dict", "initial_actions"] - init_parameters = {"parameters": (1, 1, 1)} - - -class TestEvolvableLookerUp3(TestEvolvablePlayer): - name = "EvolvableLookerUp" - player_class = axl.EvolvableLookerUp - parent_class = axl.LookerUp - parent_kwargs = ["lookup_dict", "initial_actions"] - init_parameters = {"parameters": (2, 1, 3)} - - -class TestEvolvableLookerUp4(TestEvolvablePlayer): - name = "EvolvableLookerUp" - player_class = axl.EvolvableLookerUp - parent_class = axl.LookerUp - parent_kwargs = ["lookup_dict", "initial_actions"] - init_parameters = { - "parameters": (2, 2, 2), - "pattern": "".join([random.choice(("C", "D")) for _ in range(64)]), - "initial_actions": (C, C,), - } - - -class TestEvolvableLookerUp5(TestEvolvablePlayer): - name = "EvolvableLookerUp" - player_class = axl.EvolvableLookerUp - parent_class = axl.LookerUp - parent_kwargs = ["lookup_dict", "initial_actions"] - init_parameters = { - "initial_actions": (C, C,), - "lookup_dict": { - ((C, C), (C,), ()): C, - ((C, C), (D,), ()): D, - ((C, D), (C,), ()): D, - ((C, D), (D,), ()): C, - ((D, C), (C,), ()): C, - ((D, C), (D,), ()): D, - ((D, D), (C,), ()): D, - ((D, D), (D,), ()): C, - }, - } - - -# Substitute EvolvedLookerUp as a regular LookerUp. -EvolvableLookerUpWithDefault = PartialClass( - EvolvableLookerUp, - parameters=(0, 1, 0), - lookup_dict={ - ((), (D,), ()): D, - ((), (D,), ()): D, - ((), (C,), ()): C, - ((), (C,), ()): C, - }, - initial_actions=(C,), -) - - -class EvolvableLookerUpAsLookerUp(TestLookerUp): - player = EvolvableLookerUpWithDefault - - def test_equality_of_clone(self): - pass - - def test_equality_of_pickle_clone(self): - pass - - def test_zero_tables(self): - pass - - def test_repr(self): - pass diff --git a/axelrod/ipd/tests/strategies/test_mathematicalconstants.py b/axelrod/ipd/tests/strategies/test_mathematicalconstants.py deleted file mode 100644 index 64d5ec850..000000000 --- a/axelrod/ipd/tests/strategies/test_mathematicalconstants.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Tests for the golden and other mathematical strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGolden(TestPlayer): - - name = "$\phi$" - player = axl.Golden - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestPi(TestPlayer): - - name = "$\pi$" - player = axl.Pi - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class Teste(TestPlayer): - - name = "$e$" - player = axl.e - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_memoryone.py b/axelrod/ipd/tests/strategies/test_memoryone.py deleted file mode 100644 index ecbbecd75..000000000 --- a/axelrod/ipd/tests/strategies/test_memoryone.py +++ /dev/null @@ -1,319 +0,0 @@ -"""Tests for the Memoryone strategies.""" -import unittest -import warnings - -import axelrod as axl -from axelrod.ipd.strategies.memoryone import MemoryOnePlayer - -from .test_player import TestPlayer, test_four_vector - -C, D = axl.Action.C, axl.Action.D - - -class TestGenericPlayerOne(unittest.TestCase): - """A class to test the naming and classification of generic memory one - players.""" - - p1 = axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0)) - p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) - p3 = axl.MemoryOnePlayer(four_vector=(1, 0.5, 1, 0.5)) - - def test_name(self): - self.assertEqual(self.p1.name, "Generic Memory One IpdPlayer: (0, 0, 0, 0)") - self.assertEqual(self.p2.name, "Generic Memory One IpdPlayer: (1, 0, 1, 0)") - self.assertEqual(self.p3.name, "Generic Memory One IpdPlayer: (1, 0.5, 1, 0.5)") - - def test_stochastic_classification(self): - self.assertFalse(axl.Classifiers["stochastic"](self.p1)) - self.assertFalse(axl.Classifiers["stochastic"](self.p2)) - self.assertTrue(axl.Classifiers["stochastic"](self.p3)) - - -class TestWinStayLoseShift(TestPlayer): - - name = "Win-Stay Lose-Shift: C" - player = axl.WinStayLoseShift - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_class_classification(self): - self.assertEqual(self.player.classifier, self.expected_classifier) - - def test_strategy(self): - # Check that switches if does not get best payoff. - actions = [(C, C), (C, D), (D, C), (D, D), (C, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestWinShiftLoseStayTestPlayer(TestPlayer): - - name = "Win-Shift Lose-Stay: D" - player = axl.WinShiftLoseStay - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Check that switches if does not get best payoff. - actions = [(D, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestGTFT(TestPlayer): - - name = "GTFT: 0.33" - player = axl.GTFT - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=0) - - actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) - - def test_four_vector(self): - (R, P, S, T) = axl.IpdGame().RPST() - p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) - expected_dictionary = {(C, C): 1.0, (C, D): p, (D, C): 1.0, (D, D): p} - test_four_vector(self, expected_dictionary) - - def test_allow_for_zero_probability(self): - player = self.player(p=0) - expected = {(C, C): 1.0, (C, D): 0, (D, C): 1.0, (D, D): 0} - self.assertAlmostEqual(player._four_vector, expected) - - -class TestFirmButFair(TestPlayer): - - name = "Firm But Fair" - player = axl.FirmButFair - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 1, (C, D): 0, (D, C): 1, (D, D): 2 / 3} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (D, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=0) - - actions = [(C, D), (D, D), (C, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=1) - - -class TestStochasticCooperator(TestPlayer): - - name = "Stochastic Cooperator" - player = axl.StochasticCooperator - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = { - (C, C): 0.935, - (C, D): 0.229, - (D, C): 0.266, - (D, D): 0.42, - } - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (D, D), (C, C), (C, D), (C, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=15) - - actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) - - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=3) - - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=13) - - -class TestStochasticWSLS(TestPlayer): - - name = "Stochastic WSLS: 0.05" - player = axl.StochasticWSLS - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, D), (C, C), (C, D), (D, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) - - actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=31) - - actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) - - actions = [(C, D), (C, C), (C, D), (D, C), (D, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) - - def test_four_vector(self): - player = self.player() - ep = player.ep - expected_dictionary = { - (C, C): 1.0 - ep, - (C, D): ep, - (D, C): ep, - (D, D): 1.0 - ep, - } - test_four_vector(self, expected_dictionary) - - -class TestMemoryOnePlayer(unittest.TestCase): - def test_default_if_four_vector_not_set(self): - player = MemoryOnePlayer() - self.assertEqual( - player._four_vector, {(C, C): 1.0, (C, D): 0.0, (D, C): 0.0, (D, D): 1.0} - ) - - def test_exception_if_four_vector_not_set(self): - with warnings.catch_warnings(record=True) as warning: - warnings.simplefilter("always") - player = MemoryOnePlayer() - - self.assertEqual(len(warning), 1) - self.assertEqual(warning[-1].category, UserWarning) - self.assertEqual( - str(warning[-1].message), - "Memory one player is set to default (1, 0, 0, 1).", - ) - - def test_exception_if_probability_vector_outside_valid_values(self): - player = MemoryOnePlayer() - x = 2.0 - with self.assertRaises(ValueError): - player.set_four_vector([0.1, x, 0.5, 0.1]) - - -class TestSoftJoss(TestPlayer): - - name = "Soft Joss: 0.9" - player = axl.SoftJoss - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 1, (C, D): 0.1, (D, C): 1.0, (D, D): 0.1} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) - - actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) - - -class TestALLCorALLD(TestPlayer): - - name = "ALLCorALLD" - player = axl.ALLCorALLD - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(D, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=0) - actions = [(C, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=1) - - -class TestGenericReactiveStrategy(unittest.TestCase): - """ - Tests for the Reactive Strategy which. - """ - - p1 = axl.ReactivePlayer(probabilities=(0, 0)) - p2 = axl.ReactivePlayer(probabilities=(1, 0)) - p3 = axl.ReactivePlayer(probabilities=(1, 0.5)) - - def test_name(self): - self.assertEqual(self.p1.name, "Reactive IpdPlayer: (0, 0)") - self.assertEqual(self.p2.name, "Reactive IpdPlayer: (1, 0)") - self.assertEqual(self.p3.name, "Reactive IpdPlayer: (1, 0.5)") - - def test_four_vector(self): - self.assertEqual( - self.p1._four_vector, {(C, D): 0.0, (D, C): 0.0, (C, C): 0.0, (D, D): 0.0} - ) - self.assertEqual( - self.p2._four_vector, {(C, D): 0.0, (D, C): 1.0, (C, C): 1.0, (D, D): 0.0} - ) - self.assertEqual( - self.p3._four_vector, {(C, D): 0.5, (D, C): 1.0, (C, C): 1.0, (D, D): 0.5} - ) - - def test_stochastic_classification(self): - self.assertFalse(axl.Classifiers["stochastic"](self.p1)) - self.assertFalse(axl.Classifiers["stochastic"](self.p2)) - self.assertTrue(axl.Classifiers["stochastic"](self.p3)) - - def test_subclass(self): - self.assertIsInstance(self.p1, MemoryOnePlayer) - self.assertIsInstance(self.p2, MemoryOnePlayer) - self.assertIsInstance(self.p3, MemoryOnePlayer) diff --git a/axelrod/ipd/tests/strategies/test_memorytwo.py b/axelrod/ipd/tests/strategies/test_memorytwo.py deleted file mode 100644 index 3f0b65a5e..000000000 --- a/axelrod/ipd/tests/strategies/test_memorytwo.py +++ /dev/null @@ -1,315 +0,0 @@ -"""Tests for the Memorytwo strategies.""" - -import unittest - -import random - -import warnings - -import axelrod as axl -from axelrod.ipd.strategies import MemoryTwoPlayer - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestGenericPlayerTwo(unittest.TestCase): - """A class to test the naming and classification of generic memory two - players.""" - - p1 = MemoryTwoPlayer( - sixteen_vector=(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - ) - p2 = MemoryTwoPlayer( - sixteen_vector=(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0) - ) - p3 = MemoryTwoPlayer( - sixteen_vector=( - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - 0.5, - ) - ) - p4 = MemoryTwoPlayer( - sixteen_vector=(0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0) - ) - - def test_name(self): - self.assertEqual( - self.p1.name, - "Generic Memory Two IpdPlayer: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", - ) - self.assertEqual( - self.p2.name, - "Generic Memory Two IpdPlayer: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", - ) - self.assertEqual( - self.p3.name, - "Generic Memory Two IpdPlayer: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", - ) - self.assertEqual( - self.p4.name, - "Generic Memory Two IpdPlayer: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", - ) - - def test_deterministic_classification(self): - self.assertFalse(axl.Classifiers["stochastic"](self.p1)) - self.assertFalse(axl.Classifiers["stochastic"](self.p2)) - - def test_stochastic_classification(self): - self.assertTrue(axl.Classifiers["stochastic"](self.p3)) - self.assertTrue(axl.Classifiers["stochastic"](self.p4)) - - -class TestMemoryTwoPlayer(unittest.TestCase): - def test_default_if_four_vector_not_set(self): - player = MemoryTwoPlayer() - self.assertEqual( - player._sixteen_vector, - { - ((C, C), (C, C)): 1.0, - ((C, C), (C, D)): 1.0, - ((C, D), (C, C)): 1.0, - ((C, D), (C, D)): 1.0, - ((C, C), (D, C)): 1.0, - ((C, C), (D, D)): 1.0, - ((C, D), (D, C)): 1.0, - ((C, D), (D, D)): 1.0, - ((D, C), (C, C)): 1.0, - ((D, C), (C, D)): 1.0, - ((D, D), (C, C)): 1.0, - ((D, D), (C, D)): 1.0, - ((D, C), (D, C)): 1.0, - ((D, C), (D, D)): 1.0, - ((D, D), (D, C)): 1.0, - ((D, D), (D, D)): 1.0, - }, - ) - - def test_exception_if_four_vector_not_set(self): - with warnings.catch_warnings(record=True) as warning: - warnings.simplefilter("always") - player = MemoryTwoPlayer() - - self.assertEqual(len(warning), 1) - self.assertEqual(warning[-1].category, UserWarning) - self.assertEqual( - str(warning[-1].message), - "Memory two player is set to default, Cooperator.", - ) - - def test_exception_if_probability_vector_outside_valid_values(self): - player = MemoryTwoPlayer() - x = 2 - with self.assertRaises(ValueError): - player.set_sixteen_vector( - [ - 0.1, - x, - 0.5, - 0.1, - 0.1, - 0.2, - 0.5, - 0.1, - 0.1, - 0.2, - 0.5, - 0.1, - 0.2, - 0.5, - 0.1, - 0.2, - 0.5, - 0.2, - ] - ) - - -class TestMemoryStochastic(TestPlayer): - name = ( - "Generic Memory Two IpdPlayer: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" - ) - player = axl.MemoryTwoPlayer - expected_classifier = { - "memory_depth": 2, # Memory-two Sixteen-Vector - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - axl.seed(0) - vector = [random.random() for _ in range(16)] - - actions = [(C, C), (C, C), (D, D), (D, C), (C, C), (C, D), (C, C)] - self.versus_test( - opponent=axl.CyclerCCD(), - expected_actions=actions, - seed=0, - init_kwargs={"sixteen_vector": vector}, - ) - - actions = [(C, C), (C, C), (C, D), (D, C), (C, C), (C, D), (C, C)] - self.versus_test( - opponent=axl.CyclerCCD(), - expected_actions=actions, - seed=1, - init_kwargs={"sixteen_vector": vector}, - ) - - actions = [(C, C), (C, C), (D, C), (D, D), (C, D), (C, C), (D, C)] - self.versus_test( - opponent=axl.TitForTat(), - expected_actions=actions, - seed=0, - init_kwargs={"sixteen_vector": vector}, - ) - - actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D), (C, C)] - self.versus_test( - opponent=axl.TitForTat(), - expected_actions=actions, - seed=1, - init_kwargs={"sixteen_vector": vector}, - ) - - -class TestAON2(TestPlayer): - - name = "AON2" - player = axl.AON2 - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # tests states 2, 7, 14 and 15 - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - # tests states 4, 16 and 11 - actions = [(C, D), (C, D), (D, C), (D, D), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.CyclerDDC(), expected_actions=actions) - - # tests states 3, 5 and 12 - actions = [(C, D), (C, C), (D, C), (D, D), (D, D), (C, D)] - self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) - - # tests state 1 - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # tests state 6 - actions = [(C, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions) - - -class TestDelayedAON1(TestPlayer): - - name = "Delayed AON1" - player = axl.DelayedAON1 - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy_mutually_cooperative(self): - # tests states 2, 7, 14 and 11 - actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - # tests states 1, 4 and 8 - actions = [(C, D), (C, D), (D, D), (C, C), (C, C), (C, D)] - self.versus_test( - opponent=axl.Cycler(["D", "D", "D", "C", "C"]), expected_actions=actions - ) - - # tests states 3, 5 - actions = [(C, D), (C, C), (D, C), (D, D), (C, D)] - self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) - - -class TestMEM2(TestPlayer): - - name = "MEM2" - player = axl.MEM2 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Start with TFT - actions = [(C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - attrs={"play_as": "TFT", "shift_counter": 1, "alld_counter": 0}, - ) - actions = [(C, D), (D, D)] - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - attrs={"play_as": "TFT", "shift_counter": 1, "alld_counter": 0}, - ) - # TFTT if C, D and D, C - opponent = axl.MockPlayer([D, C, D, D]) - actions = [(C, D), (D, C), (C, D), (C, D)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"play_as": "TFTT", "shift_counter": 1, "alld_counter": 0}, - ) - - opponent = axl.MockPlayer([D, C, D, D]) - actions = [ - (C, D), - (D, C), - (C, D), - (C, D), - (D, D), - (D, C), - (D, D), - (D, D), - (D, D), - (D, C), - ] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"play_as": "ALLD", "shift_counter": -1, "alld_counter": 2}, - ) diff --git a/axelrod/ipd/tests/strategies/test_meta.py b/axelrod/ipd/tests/strategies/test_meta.py deleted file mode 100644 index 4559180e8..000000000 --- a/axelrod/ipd/tests/strategies/test_meta.py +++ /dev/null @@ -1,721 +0,0 @@ -"""Tests for the various Meta strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -from hypothesis import given, settings -from hypothesis.strategies import integers - -C, D = axl.Action.C, axl.Action.D - - -class TestMetaPlayer(TestPlayer): - """This is a test class for meta players, primarily to test the classifier - dictionary and the reset methods. Inherit from this class just as you would - the TestPlayer class.""" - - name = "Meta IpdPlayer" - player = axl.MetaPlayer - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": {"game"}, - "long_run_time": True, - "manipulates_source": False, - "inspects_source": False, - "manipulates_state": False, - } - - def classifier_test(self, expected_class_classifier=None): - player = self.player() - classifier = dict() - for key in [ - "stochastic", - "inspects_source", - "manipulates_source", - "manipulates_state", - ]: - classifier[key] = any(axl.Classifiers[key](t) for t in player.team) - classifier["memory_depth"] = float("inf") - - for t in player.team: - try: - classifier["makes_use_of"].update(axl.Classifiers["make_use_of"](t)) - except KeyError: - pass - - for key in classifier: - self.assertEqual( - axl.Classifiers[key](player), - classifier[key], - msg="%s - Behaviour: %s != Expected Behaviour: %s" - % (key, axl.Classifiers[key](player), classifier[key]), - ) - - def test_repr(self): - player = self.player() - team_size = len(player.team) - self.assertEqual( - str(player), - "{}: {} player{}".format( - self.name, team_size, "s" if team_size > 1 else "" - ), - ) - - @given(seed=integers(min_value=1, max_value=20000000)) - @settings(max_examples=1) - def test_clone(self, seed): - # Test that the cloned player produces identical play - player1 = self.player() - player2 = player1.clone() - self.assertEqual(len(player2.history), 0) - self.assertEqual(player2.cooperations, 0) - self.assertEqual(player2.defections, 0) - self.assertEqual(player2.state_distribution, {}) - self.assertEqual(player2.classifier, player1.classifier) - self.assertEqual(player2.match_attributes, player1.match_attributes) - - turns = 10 - for op in [ - axl.Cooperator(), - axl.Defector(), - axl.TitForTat(), - ]: - player1.reset() - player2.reset() - for p in [player1, player2]: - axl.seed(seed) - m = axl.IpdMatch((p, op), turns=turns) - m.play() - self.assertEqual(len(player1.history), turns) - self.assertEqual(player1.history, player2.history) - - -class TestMetaMajority(TestMetaPlayer): - name = "Meta Majority" - player = axl.MetaMajority - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "manipulates_source": False, - "makes_use_of": {"game", "length"}, - "inspects_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - P1 = axl.MetaMajority() - P2 = axl.IpdPlayer() - - # With more cooperators on the team than defectors, we should cooperate. - P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] - self.assertEqual(P1.strategy(P2), C) - - # With more defectors, we should defect. - P1.team = [axl.Cooperator(), axl.Defector(), axl.Defector()] - self.assertEqual(P1.strategy(P2), D) - - -class TestMetaMinority(TestMetaPlayer): - name = "Meta Minority" - player = axl.MetaMinority - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "makes_use_of": {"game", "length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_team(self): - team = [axl.Cooperator] - player = self.player(team=team) - self.assertEqual(len(player.team), 1) - - def test_strategy(self): - P1 = axl.MetaMinority() - P2 = axl.IpdPlayer() - - # With more cooperators on the team, we should defect. - P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] - self.assertEqual(P1.strategy(P2), D) - - # With defectors in the majority, we will cooperate here. - P1.team = [axl.Cooperator(), axl.Defector(), axl.Defector()] - self.assertEqual(P1.strategy(P2), C) - - -class TestNiceMetaWinner(TestMetaPlayer): - name = "Nice Meta Winner" - player = axl.NiceMetaWinner - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "makes_use_of": {"game", "length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - P1 = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) - P2 = axl.IpdPlayer() - - # This meta player will simply choose the strategy with the highest - # current score. - P1.team[0].score = 0 - P1.team[1].score = 1 - self.assertEqual(P1.strategy(P2), C) - P1.team[0].score = 1 - P1.team[1].score = 0 - self.assertEqual(P1.strategy(P2), C) - - # If there is a tie, choose to cooperate if possible. - P1.team[0].score = 1 - P1.team[1].score = 1 - self.assertEqual(P1.strategy(P2), C) - - opponent = axl.Cooperator() - player = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) - for _ in range(5): - player.play(opponent) - self.assertEqual(player.history[-1], C) - - opponent = axl.Defector() - player = axl.NiceMetaWinner(team=[axl.Defector]) - for _ in range(20): - player.play(opponent) - self.assertEqual(player.history[-1], D) - - opponent = axl.Defector() - player = axl.MetaWinner(team=[axl.Cooperator, axl.Defector]) - for _ in range(20): - player.play(opponent) - self.assertEqual(player.history[-1], D) - - -class TestNiceMetaWinnerEnsemble(TestMetaPlayer): - name = "Nice Meta Winner Ensemble" - player = axl.NiceMetaWinnerEnsemble - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": {"game", "length"}, - "long_run_time": True, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 8 - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"team": [axl.Cooperator, axl.Defector]}, - ) - actions = [(C, D)] + [(D, D)] * 7 - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - init_kwargs={"team": [axl.Cooperator, axl.Defector]}, - ) - - -class TestMetaHunter(TestMetaPlayer): - name = "Meta Hunter" - player = axl.MetaHunter - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "long_run_time": False, - "inspects_source": False, - "makes_use_of": set(), - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # We are not using the Cooperator Hunter here, so this should lead to - # cooperation. - actions = [(C, C)] * 5 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # After long histories tit-for-tat should come into play. - opponent = axl.MockPlayer([C] * 100 + [D]) - actions = [(C, C)] * 100 + [(C, D)] + [(D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - actions = [(C, C)] * 102 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # All these others, however, should trigger a defection for the hunter. - actions = [(C, D), (C, D), (C, D), (C, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [ - (C, C), - (C, C), - (C, C), - (C, D), - (C, C), - (C, C), - (C, C), - (C, D), - (D, C), - ] - self.versus_test(opponent=axl.CyclerCCCD(), expected_actions=actions) - - -class TestMetaHunterAggressive(TestMetaPlayer): - name = "Meta Hunter Aggressive" - player = axl.MetaHunterAggressive - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "long_run_time": False, - "inspects_source": False, - "makes_use_of": set(), - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # We are using CooperatorHunter here, so this should lead to - # defection - actions = [(C, C)] * 4 + [(D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # All these others, however, should trigger a defection for the hunter. - actions = [(C, D), (C, D), (C, D), (C, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [ - (C, C), - (C, C), - (C, C), - (C, D), - (C, C), - (C, C), - (C, C), - (C, D), - (D, C), - ] - self.versus_test(opponent=axl.CyclerCCCD(), expected_actions=actions) - - # To test the TFT action of the strategy after 100 turns, we need to - # remove two of the hunters from its team. - # It is almost impossible to identify a history which reaches 100 turns - # without triggering one of the hunters in the default team. As at - # 16-Mar-2017, none of the strategies in the library does so. - team = [ - axl.DefectorHunter, - axl.AlternatorHunter, - axl.RandomHunter, - axl.CycleHunter, - axl.EventualCycleHunter, - ] - opponent = axl.MockPlayer([C] * 100 + [D]) - actions = [(C, C)] * 100 + [(C, D), (D, C)] - self.versus_test( - opponent=opponent, expected_actions=actions, init_kwargs={"team": team} - ) - - -class TestMetaMajorityMemoryOne(TestMetaPlayer): - name = "Meta Majority Memory One" - player = axl.MetaMajorityMemoryOne - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "inspects_source": False, - "long_run_time": False, - "makes_use_of": set(["game"]), - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaMajorityFiniteMemory(TestMetaPlayer): - name = "Meta Majority Finite Memory" - player = axl.MetaMajorityFiniteMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaMajorityLongMemory(TestMetaPlayer): - name = "Meta Majority Long Memory" - player = axl.MetaMajorityLongMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=0) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) - - -class TestMetaWinnerMemoryOne(TestMetaPlayer): - name = "Meta Winner Memory One" - player = axl.MetaWinnerMemoryOne - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaWinnerFiniteMemory(TestMetaPlayer): - name = "Meta Winner Finite Memory" - player = axl.MetaWinnerFiniteMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaWinnerLongMemory(TestMetaPlayer): - name = "Meta Winner Long Memory" - player = axl.MetaWinnerLongMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaWinnerDeterministic(TestMetaPlayer): - name = "Meta Winner Deterministic" - player = axl.MetaWinnerDeterministic - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaWinnerStochastic(TestMetaPlayer): - name = "Meta Winner Stochastic" - player = axl.MetaWinnerStochastic - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMetaMixer(TestMetaPlayer): - name = "Meta Mixer" - player = axl.MetaMixer - expected_classifier = { - "inspects_source": False, - "long_run_time": True, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - "memory_depth": float("inf"), - "stochastic": True, - } - - def test_strategy(self): - team = [axl.TitForTat, axl.Cooperator, axl.Grudger] - distribution = [0.2, 0.5, 0.3] - - P1 = axl.MetaMixer(team=team, distribution=distribution) - P2 = axl.Cooperator() - actions = [(C, C)] * 20 - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"team": team, "distribution": distribution}, - ) - - team.append(axl.Defector) - distribution = [0.2, 0.5, 0.3, 0] # If add a defector but does not occur - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"team": team, "distribution": distribution}, - ) - - distribution = [0, 0, 0, 1] # If defector is only one that is played - actions = [(D, C)] * 20 - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"team": team, "distribution": distribution}, - ) - - def test_raise_error_in_distribution(self): - team = [axl.TitForTat, axl.Cooperator, axl.Grudger] - distribution = [0.2, 0.5, 0.5] # Not a valid probability distribution - - player = axl.MetaMixer(team=team, distribution=distribution) - opponent = axl.Cooperator() - - self.assertRaises(ValueError, player.strategy, opponent) - - -class TestNMWEDeterministic(TestMetaPlayer): - name = "NMWE Deterministic" - player = axl.NMWEDeterministic - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - # Skip this test - def classifier_test(self, expected_class_classifier=None): - pass - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestNMWEStochastic(TestMetaPlayer): - name = "NMWE Stochastic" - player = axl.NMWEStochastic - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=20) - - -class TestNMWEFiniteMemory(TestMetaPlayer): - name = "NMWE Finite Memory" - player = axl.NMWEFiniteMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestNMWELongMemory(TestMetaPlayer): - name = "NMWE Long Memory" - player = axl.NMWELongMemory - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": True, - "inspects_source": False, - "makes_use_of": {"game", "length"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=10) - - -class TestNMWEMemoryOne(TestMetaPlayer): - name = "NMWE Memory One" - player = axl.NMWEMemoryOne - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "long_run_time": False, - "inspects_source": False, - "makes_use_of": {"game"}, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - -class TestMemoryDecay(TestPlayer): - name = "Memory Decay: 0.1, 0.03, -2, 1, Tit For Tat, 15" - player = axl.MemoryDecay - expected_classifier = { - "memory_depth": float("inf"), - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test TitForTat behavior in first 15 turns - opponent = axl.Cooperator() - actions = list([(C, C)]) * 15 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Defector() - actions = [(C, D)] + list([(D, D)]) * 14 - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Alternator() - actions = [(C, C)] + [(C, D), (D, C)] * 7 - self.versus_test(opponent, expected_actions=actions) - - opponent_actions = [C, D, D, C, D, C, C, D, C, D, D, C, C, D, D] - opponent = axl.MockPlayer(actions=opponent_actions) - mem_actions = [C, C, D, D, C, D, C, C, D, C, D, D, C, C, D] - actions = list(zip(mem_actions, opponent_actions)) - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.Random() - actions = [(C, D), (D, D), (D, C), (C, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions, seed=0) - - # Test net-cooperation-score (NCS) based decisions in subsequent turns - opponent = axl.Cooperator() - actions = [(C, C)] * 15 + [(C, C)] - self.versus_test( - opponent, - expected_actions=actions, - seed=1, - init_kwargs={"memory": [D] * 5 + [C] * 10}, - ) - - opponent = axl.Cooperator() - actions = [(C, C)] * 15 + [(C, C)] - self.versus_test( - opponent, - expected_actions=actions, - seed=1, - init_kwargs={"memory": [D] * 4 + [C] * 11}, - ) - - # Test alternative starting strategies - opponent = axl.Cooperator() - actions = list([(D, C)]) * 15 - self.versus_test( - opponent, - expected_actions=actions, - init_kwargs={"start_strategy": axl.Defector}, - ) - - opponent = axl.Cooperator() - actions = list([(C, C)]) * 15 - self.versus_test( - opponent, - expected_actions=actions, - init_kwargs={"start_strategy": axl.Cooperator}, - ) - - opponent = axl.Cooperator() - actions = [(C, C)] + list([(D, C), (C, C)]) * 7 - self.versus_test( - opponent, - expected_actions=actions, - init_kwargs={"start_strategy": axl.Alternator}, - ) - - opponent = axl.Defector() - actions = [(C, D)] * 7 + [(D, D)] - self.versus_test( - opponent, - expected_actions=actions, - seed=4, - init_kwargs={ - "memory": [C] * 12, - "start_strategy": axl.Defector, - "start_strategy_duration": 0, - }, - ) diff --git a/axelrod/ipd/tests/strategies/test_mindcontrol.py b/axelrod/ipd/tests/strategies/test_mindcontrol.py deleted file mode 100644 index 3088c1aca..000000000 --- a/axelrod/ipd/tests/strategies/test_mindcontrol.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Tests for mind controllers and other wizards.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestMindController(TestPlayer): - - name = "Mind Controller" - player = axl.MindController - expected_classifier = { - "memory_depth": -10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # Finds out what opponent will do - "manipulates_state": False, - } - - def test_strategy(self): - """ Will always make opponent cooperate """ - - p1 = axl.MindController() - p2 = axl.Cooperator() - self.assertEqual(p1.strategy(p2), D) - self.assertEqual(p2.strategy(p1), C) - - def test_vs_defect(self): - """ Will force even defector to cooperate """ - - p1 = axl.MindController() - p2 = axl.Defector() - self.assertEqual(p1.strategy(p2), D) - self.assertEqual(p2.strategy(p1), C) - - def test_vs_grudger(self): - """ Will force even Grudger to forget its grudges""" - - p1 = axl.MindController() - p2 = axl.Grudger() - for _ in range(4): - p1.history.append(D, C) - p2.history.append(C, D) - self.assertEqual(p1.strategy(p2), D) - self.assertEqual(p2.strategy(p1), C) - - -class TestMindWarper(TestMindController): - - name = "Mind Warper" - player = axl.MindWarper - expected_classifier = { - "memory_depth": -10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # Finds out what opponent will do - "manipulates_state": False, - } - - def test_setattr(self): - player = self.player() - player.strategy = lambda opponent: C - - def test_strategy(self): - player = self.player() - opponent = axl.Defector() - play1 = player.strategy(opponent) - play2 = opponent.strategy(player) - self.assertEqual(play1, D) - self.assertEqual(play2, C) - - -class TestMindBender(TestMindController): - - name = "Mind Bender" - player = axl.MindBender - expected_classifier = { - "memory_depth": -10, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": True, # Finds out what opponent will do - "manipulates_state": False, - } - - def test_strategy(self): - player = self.player() - opponent = axl.Defector() - play1 = player.strategy(opponent) - play2 = opponent.strategy(player) - self.assertEqual(play1, D) - self.assertEqual(play2, C) diff --git a/axelrod/ipd/tests/strategies/test_mindreader.py b/axelrod/ipd/tests/strategies/test_mindreader.py deleted file mode 100644 index 7c3031f06..000000000 --- a/axelrod/ipd/tests/strategies/test_mindreader.py +++ /dev/null @@ -1,172 +0,0 @@ -"""Tests for the Mindreader strategy.""" - -import axelrod as axl -from axelrod.ipd._strategy_utils import simulate_match - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestMindReader(TestPlayer): - - name = "Mind Reader" - player = axl.MindReader - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": True, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_foil_inspection_strategy(self): - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), D) - - def test_strategy(self): - """ - Will defect against nice strategies - """ - p1 = axl.MindReader() - p2 = axl.Cooperator() - self.assertEqual(p1.strategy(p2), D) - - def test_vs_defect(self): - """ - Will defect against pure defecting strategies - """ - p1 = axl.MindReader() - p2 = axl.Defector() - self.assertEqual(p1.strategy(p2), D) - - def test_vs_grudger(self): - """ - Will keep nasty strategies happy if it can - """ - p1 = axl.MindReader() - p2 = axl.Grudger() - self.assertEqual(p1.strategy(p2), C) - - def test_vs_tit_for_tat(self): - """ - Will keep nasty strategies happy if it can - """ - p1 = axl.MindReader() - p2 = axl.TitForTat() - self.assertEqual(p1.strategy(p2), C) - - def test_simulate_matches(self): - """ - Simulates a number of matches - """ - p1 = axl.MindReader() - p2 = axl.Grudger() - simulate_match(p1, p2, C, 4) - self.assertEqual(p2.history, [C, C, C, C]) - - def test_history_is_same(self): - """ - Checks that the history is not altered by the player - """ - p1 = axl.MindReader() - p2 = axl.Grudger() - p1.history.append(C, C) - p1.history.append(C, D) - p2.history.append(C, C) - p2.history.append(D, C) - p1.strategy(p2) - self.assertEqual(p1.history, [C, C]) - self.assertEqual(p2.history, [C, D]) - - def test_vs_geller(self): - """Ensures that a recursion error does not occur """ - p1 = axl.MindReader() - p2 = axl.Geller() - p1.strategy(p2) - p2.strategy(p1) - - def test_init(self): - """Tests for init method """ - p1 = axl.MindReader() - self.assertEqual(p1.history, []) - - -class TestProtectedMindReader(TestPlayer): - - name = "Protected Mind Reader" - player = axl.ProtectedMindReader - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"game"}, - "long_run_time": False, - "inspects_source": True, # Finds out what opponent will do - "manipulates_source": True, # Stops opponent's strategy - "manipulates_state": False, - } - - def test_foil_inspection_strategy(self): - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), D) - - def test_strategy(self): - """ - Will defect against nice strategies - """ - p1 = axl.ProtectedMindReader() - p2 = axl.Cooperator() - self.assertEqual(p1.strategy(p2), D) - - def test_vs_defect(self): - """ - Will defect against pure defecting strategies - """ - p1 = axl.ProtectedMindReader() - p2 = axl.Defector() - self.assertEqual(p1.strategy(p2), D) - - def tests_protected(self): - """Ensures that no other player can alter its strategy """ - - p1 = axl.ProtectedMindReader() - p2 = axl.MindController() - P3 = axl.Cooperator() - p2.strategy(p1) - self.assertEqual(p1.strategy(P3), D) - - -class TestMirrorMindReader(TestPlayer): - - name = "Mirror Mind Reader" - player = axl.MirrorMindReader - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": True, # reading and copying the source of the component - "manipulates_source": True, # changing own source dynamically - "manipulates_state": False, - } - - def test_foil_inspection_strategy(self): - player = self.player() - self.assertEqual(player.foil_strategy_inspection(), C) - - def test_strategy(self): - p1 = axl.MirrorMindReader() - p2 = axl.Cooperator() - self.assertEqual(p1.strategy(p2), C) - - def test_vs_defector(self): - p1 = axl.MirrorMindReader() - p2 = axl.Defector() - self.assertEqual(p1.strategy(p2), D) - - def test_nice_with_itself(self): - p1 = axl.MirrorMindReader() - p2 = axl.MirrorMindReader() - self.assertEqual(p1.strategy(p2), C) diff --git a/axelrod/ipd/tests/strategies/test_mutual.py b/axelrod/ipd/tests/strategies/test_mutual.py deleted file mode 100644 index 921fc698d..000000000 --- a/axelrod/ipd/tests/strategies/test_mutual.py +++ /dev/null @@ -1,148 +0,0 @@ -"""Tests for strategies Desperate, Hopeless, Willing, and Grim.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestDesperate(TestPlayer): - - name = "Desperate" - player = axl.Desperate - expected_classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Our IpdPlayer (Desperate) vs Cooperator SEED --> 1 - opponent = axl.Cooperator() - opponent_actions = [C] * 5 - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Desperate) vs Cooperator SEED --> 2 - opponent = axl.Cooperator() - actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Desperate) vs Defector SEED --> 1 - opponent = axl.Defector() - actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Desperate) vs Defector SEED --> 2 - opponent = axl.Defector() - actions = [(D, D), (C, D), (D, D), (C, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Desperate) vs Alternator SEED --> 1 - opponent = axl.Alternator() - actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Desperate) vs Alternator SEED --> 2 - opponent = axl.Alternator() - actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestHopeless(TestPlayer): - - name = "Hopeless" - player = axl.Hopeless - expected_classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 1 - opponent = axl.Cooperator() - opponent_actions = [C] * 5 - actions = [(C, C), (D, C), (C, C), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 2 - opponent = axl.Cooperator() - actions = [(D, C), (C, C), (D, C), (C, C), (D, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Hopeless) vs Defector SEED --> 1 - opponent = axl.Defector() - actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Hopeless) vs Defector SEED --> 2 - opponent = axl.Defector() - actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Hopeless) vs Alternator SEED --> 1 - opponent = axl.Alternator() - actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Hopeless) vs Alternator SEED --> 2 - opponent = axl.Alternator() - actions = [(D, C), (C, D), (C, C), (D, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - -class TestWilling(TestPlayer): - - name = "Willing" - player = axl.Willing - expected_classifier = { - "memory_depth": 1, - "long_run_time": False, - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Our IpdPlayer (Willing) vs Cooperator SEED --> 1 - opponent = axl.Cooperator() - opponent_actions = [C] * 5 - actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Willing) vs Cooperator SEED --> 2 - opponent = axl.Cooperator() - actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Willing) vs Defector SEED --> 1 - opponent = axl.Defector() - actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Willing) vs Defector SEED --> 2 - opponent = axl.Defector() - actions = [(D, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Our IpdPlayer (Willing) vs Alternator SEED --> 1 - opponent = axl.Alternator() - actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - # Our IpdPlayer (Willing) vs Alternator SEED --> 2 - opponent = axl.Alternator() - actions = [(D, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/ipd/tests/strategies/test_negation.py b/axelrod/ipd/tests/strategies/test_negation.py deleted file mode 100644 index 8c7542aaa..000000000 --- a/axelrod/ipd/tests/strategies/test_negation.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Tests for the Neg Strategy""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestNegation(TestPlayer): - - name = "Negation" - player = axl.Negation - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # First move is random. - actions = [(C, C), (D, D), (C, C)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=1 - ) - actions = [(D, C), (D, D), (C, C)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - actions = [(C, C), (D, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=1 - ) - actions = [(D, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=2) diff --git a/axelrod/ipd/tests/strategies/test_oncebitten.py b/axelrod/ipd/tests/strategies/test_oncebitten.py deleted file mode 100644 index c152bce10..000000000 --- a/axelrod/ipd/tests/strategies/test_oncebitten.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Tests for the once bitten strategy.""" - -import random - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestOnceBitten(TestPlayer): - - name = "Once Bitten" - player = axl.OnceBitten - expected_classifier = { - "memory_depth": 12, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """If opponent defects at any point then the player will defect - forever.""" - # Become grudged if the opponent defects twice in a row - opponent = axl.MockPlayer([C, C, C, D]) - actions = [(C, C), (C, C), (C, C), (C, D), (C, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": False, "grudge_memory": 0}, - ) - - opponent = axl.MockPlayer([C, C, C, D, D, D]) - actions = [ - (C, C), - (C, C), - (C, C), - (C, D), - (C, D), - (D, D), - (D, C), - (D, C), - (D, C), - (D, D), - (D, D), - ] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 5}, - ) - - # After 10 rounds of being grudged: forgives - opponent = axl.MockPlayer([C, D, D, C] + [C] * 10) - actions = [(C, C), (C, D), (C, D), (D, C)] + [(D, C)] * 10 + [(C, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": False, "grudge_memory": 0}, - ) - - def test_reset(self): - """Check that grudged gets reset properly""" - p1 = self.player() - p2 = axl.Defector() - p1.play(p2) - p1.play(p2) - p1.play(p2) - self.assertTrue(p1.grudged) - p1.reset() - self.assertFalse(p1.grudged) - - -class TestFoolMeOnce(TestPlayer): - - name = "Fool Me Once" - player = axl.FoolMeOnce - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent defects more than once, defect forever - actions = [(C, C)] * 10 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - opponent = axl.MockPlayer([D] + [C] * 9) - actions = [(C, D)] + [(C, C)] * 9 - self.versus_test(opponent=opponent, expected_actions=actions) - - actions = [(C, D)] * 2 + [(D, D)] * 8 - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer([D, D] + [C] * 9) - actions = [(C, D)] * 2 + [(D, C)] * 8 - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestForgetfulFoolMeOnce(TestPlayer): - - name = "Forgetful Fool Me Once: 0.05" - player = axl.ForgetfulFoolMeOnce - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test that will forgive one D but will grudge after 2 Ds, randomly - # forgets count. - actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - seed=2, - attrs={"D_count": 2}, - ) - - # Sometime eventually forget count: - actions = [(C, D), (C, D)] + [(D, D)] * 18 + [(C, D)] - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - seed=2, - attrs={"D_count": 0}, - ) diff --git a/axelrod/ipd/tests/strategies/test_player.py b/axelrod/ipd/tests/strategies/test_player.py deleted file mode 100644 index af2e7c0b5..000000000 --- a/axelrod/ipd/tests/strategies/test_player.py +++ /dev/null @@ -1,735 +0,0 @@ -import unittest -import itertools -import pickle -import random -import types -import numpy as np - -import axelrod as axl -from axelrod.ipd.player import simultaneous_play -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import given, settings -from hypothesis.strategies import integers, sampled_from - -C, D = axl.Action.C, axl.Action.D - -short_run_time_short_mem = [ - s - for s in axl.short_run_time_strategies - if axl.Classifiers["memory_depth"](s()) <= 10 -] - - -# Generic strategy functions for testing - - -def cooperate(*args): - return C - - -def defect(*args): - return D - - -# Test classifier used to create tests players -_test_classifier = { - "memory_depth": 0, - "stochastic": False, - "makes_use_of": None, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, -} - - -class ParameterisedTestPlayer(axl.IpdPlayer): - """A simple IpdPlayer class for testing init parameters""" - - name = "ParameterisedTestPlayer" - classifier = _test_classifier - - def __init__(self, arg_test1="testing1", arg_test2="testing2"): - super().__init__() - - -class TestPlayerClass(unittest.TestCase): - name = "IpdPlayer" - player = axl.IpdPlayer - classifier = {"stochastic": False} - - def test_play(self): - player1, player2 = self.player(), self.player() - player1.strategy = cooperate - player2.strategy = defect - player1.play(player2) - self.assertEqual(player1.history[0], C) - self.assertEqual(player2.history[0], D) - - # Test cooperation / defection counts - self.assertEqual(player1.cooperations, 1) - self.assertEqual(player1.defections, 0) - self.assertEqual(player2.cooperations, 0) - self.assertEqual(player2.defections, 1) - # Test state distribution - self.assertEqual(player1.state_distribution, {(C, D): 1}) - self.assertEqual(player2.state_distribution, {(D, C): 1}) - - player1.play(player2) - self.assertEqual(player1.history[-1], C) - self.assertEqual(player2.history[-1], D) - # Test cooperation / defection counts - self.assertEqual(player1.cooperations, 2) - self.assertEqual(player1.defections, 0) - self.assertEqual(player2.cooperations, 0) - self.assertEqual(player2.defections, 2) - # Test state distribution - self.assertEqual(player1.state_distribution, {(C, D): 2}) - self.assertEqual(player2.state_distribution, {(D, C): 2}) - - def test_state_distribution(self): - player1 = axl.MockPlayer([C, C, D, D, C]) - player2 = axl.MockPlayer([C, D, C, D, D]) - match = axl.IpdMatch((player1, player2), turns=5) - _ = match.play() - self.assertEqual( - player1.state_distribution, - {(C, C): 1, (C, D): 2, (D, C): 1, (D, D): 1}, - ) - self.assertEqual( - player2.state_distribution, - {(C, C): 1, (C, D): 1, (D, C): 2, (D, D): 1}, - ) - - def test_noisy_play(self): - axl.seed(1) - noise = 0.2 - player1, player2 = self.player(), self.player() - player1.strategy = cooperate - player2.strategy = defect - player1.play(player2, noise) - self.assertEqual(player1.history[0], D) - self.assertEqual(player2.history[0], D) - - def test_update_history(self): - player = axl.IpdPlayer() - self.assertEqual(player.history, []) - self.assertEqual(player.cooperations, 0) - self.assertEqual(player.defections, 0) - player.history.append(D, C) - self.assertEqual(player.history, [D]) - self.assertEqual(player.defections, 1) - self.assertEqual(player.cooperations, 0) - player.history.append(C, C) - self.assertEqual(player.history, [D, C]) - self.assertEqual(player.defections, 1) - self.assertEqual(player.cooperations, 1) - - def test_history_assignment(self): - player = axl.IpdPlayer() - with self.assertRaises(AttributeError): - player.history = [] - - def test_strategy(self): - self.assertRaises( - NotImplementedError, self.player().strategy, self.player() - ) - - def test_clone(self): - """Tests player cloning.""" - player1 = axl.Random(p=0.75) # 0.5 is the default - player2 = player1.clone() - turns = 50 - for op in [axl.Cooperator(), axl.Defector(), axl.TitForTat()]: - player1.reset() - player2.reset() - seed = random.randint(0, 10 ** 6) - for p in [player1, player2]: - axl.seed(seed) - m = axl.IpdMatch((p, op), turns=turns) - m.play() - self.assertEqual(len(player1.history), turns) - self.assertEqual(player1.history, player2.history) - - def test_equality(self): - """Test the equality method for some bespoke cases""" - # Check repr - p1 = axl.Cooperator() - p2 = axl.Cooperator() - self.assertEqual(p1, p2) - p1.__repr__ = lambda: "John Nash" - self.assertNotEqual(p1, p2) - - # Check attributes - p1 = axl.Cooperator() - p2 = axl.Cooperator() - p1.test = "29" - self.assertNotEqual(p1, p2) - - p1 = axl.Cooperator() - p2 = axl.Cooperator() - p2.test = "29" - self.assertNotEqual(p1, p2) - - p1.test = "29" - self.assertEqual(p1, p2) - - # Check that attributes of both players are tested. - p1.another_attribute = [1, 2, 3] - self.assertNotEqual(p1, p2) - p2.another_attribute = [1, 2, 3] - self.assertEqual(p1, p2) - - p2.another_attribute_2 = {1: 2} - self.assertNotEqual(p1, p2) - p1.another_attribute_2 = {1: 2} - self.assertEqual(p1, p2) - - def test_equality_for_numpy_array(self): - """Check numpy array attribute (a special case)""" - p1 = axl.Cooperator() - p2 = axl.Cooperator() - - p1.array = np.array([0, 1]) - p2.array = np.array([0, 1]) - self.assertEqual(p1, p2) - - p2.array = np.array([1, 0]) - self.assertNotEqual(p1, p2) - - def test_equality_for_generator(self): - """Test equality works with generator attribute and that the generator - attribute is not altered during checking of equality""" - p1 = axl.Cooperator() - p2 = axl.Cooperator() - - # Check that players are equal with generator - p1.generator = (i for i in range(10)) - p2.generator = (i for i in range(10)) - self.assertEqual(p1, p2) - - # Check state of one generator (ensure it hasn't changed) - n = next(p2.generator) - self.assertEqual(n, 0) - - # Players are no longer equal (one generator has changed) - self.assertNotEqual(p1, p2) - - # Check that internal generator object has not been changed for either - # player after latest equal check. - self.assertEqual(list(p1.generator), list(range(10))) - self.assertEqual(list(p2.generator), list(range(1, 10))) - - # Check that type is generator - self.assertIsInstance(p2.generator, types.GeneratorType) - - def test_equality_for_cycle(self): - """Test equality works with cycle attribute and that the cycle attribute - is not altered during checking of equality""" - # Check cycle attribute (a special case) - p1 = axl.Cooperator() - p2 = axl.Cooperator() - - # Check that players are equal with cycle - p1.cycle = itertools.cycle(range(10)) - p2.cycle = itertools.cycle(range(10)) - self.assertEqual(p1, p2) - - # Check state of one generator (ensure it hasn't changed) - n = next(p2.cycle) - self.assertEqual(n, 0) - - # Players are no longer equal (one generator has changed) - self.assertNotEqual(p1, p2) - - # Check that internal cycle object has not been changed for either - # player after latest not equal check. - self.assertEqual(next(p1.cycle), 0) - self.assertEqual(next(p2.cycle), 1) - - # Check that type is cycle - self.assertIsInstance(p2.cycle, itertools.cycle) - - def test_equality_on_init(self): - """Test instances of all strategies are equal on init""" - for s in axl.strategies: - p1, p2 = s(), s() - # Check three times (so testing equality doesn't change anything) - self.assertEqual(p1, p2) - self.assertEqual(p1, p2) - self.assertEqual(p1, p2) - - def test_equality_with_player_as_attributes(self): - """Test for a strange edge case where players have pointers to each - other""" - p1 = axl.Cooperator() - p2 = axl.Cooperator() - - # If pointing at each other - p1.player = p2 - p2.player = p1 - self.assertEqual(p1, p2) - - # Still checking other attributes. - p1.test_attribute = "29" - self.assertNotEqual(p1, p2) - - # If pointing at same strategy instances - p1.player = axl.Cooperator() - p2.player = axl.Cooperator() - p2.test_attribute = "29" - self.assertEqual(p1, p2) - - # If pointing at different strategy instances - p1.player = axl.Cooperator() - p2.player = axl.Defector() - self.assertNotEqual(p1, p2) - - # If different strategies pointing at same strategy instances - p3 = axl.Defector() - p1.player = axl.Cooperator() - p3.player = axl.Cooperator() - self.assertNotEqual(p1, p3) - - def test_init_params(self): - """Tests player correct parameters signature detection.""" - self.assertEqual(self.player.init_params(), {}) - self.assertEqual( - ParameterisedTestPlayer.init_params(), - {"arg_test1": "testing1", "arg_test2": "testing2"}, - ) - self.assertEqual( - ParameterisedTestPlayer.init_params(arg_test1="other"), - {"arg_test1": "other", "arg_test2": "testing2"}, - ) - self.assertEqual( - ParameterisedTestPlayer.init_params(arg_test2="other"), - {"arg_test1": "testing1", "arg_test2": "other"}, - ) - self.assertEqual( - ParameterisedTestPlayer.init_params("other"), - {"arg_test1": "other", "arg_test2": "testing2"}, - ) - - def test_init_kwargs(self): - """Tests player correct parameters caching.""" - - # Tests for Players with no init parameters - - # Test that init_kwargs exist and are empty - self.assertEqual(self.player().init_kwargs, {}) - # Test that passing a positional argument raises an error - self.assertRaises(TypeError, axl.IpdPlayer, "test") - # Test that passing a keyword argument raises an error - self.assertRaises(TypeError, axl.IpdPlayer, arg_test1="test") - - # Tests for Players with init parameters - - # Test that init_kwargs exist and contains default values - self.assertEqual( - ParameterisedTestPlayer().init_kwargs, - {"arg_test1": "testing1", "arg_test2": "testing2"}, - ) - # Test that passing a keyword argument successfully change the - # init_kwargs dict. - self.assertEqual( - ParameterisedTestPlayer(arg_test1="other").init_kwargs, - {"arg_test1": "other", "arg_test2": "testing2"}, - ) - self.assertEqual( - ParameterisedTestPlayer(arg_test2="other").init_kwargs, - {"arg_test1": "testing1", "arg_test2": "other"}, - ) - # Test that passing a positional argument successfully change the - # init_kwargs dict. - self.assertEqual( - ParameterisedTestPlayer("other", "other2").init_kwargs, - {"arg_test1": "other", "arg_test2": "other2"}, - ) - # Test that passing an unknown keyword argument or a spare one raises - # an error. - self.assertRaises(TypeError, ParameterisedTestPlayer, arg_test3="test") - self.assertRaises( - TypeError, ParameterisedTestPlayer, "other", "other", "other" - ) - - -class TestOpponent(axl.IpdPlayer): - """A player who only exists so we have something to test against""" - - name = "TestOpponent" - classifier = _test_classifier - - @staticmethod - def strategy(opponent): - return C - - -class TestPlayer(unittest.TestCase): - """A Test class from which other player test classes are inherited.""" - - player = TestOpponent - expected_class_classifier = None - - def test_initialisation(self): - """Test that the player initiates correctly.""" - if self.__class__ != TestPlayer: - player = self.player() - self.assertEqual(len(player.history), 0) - self.assertEqual( - player.match_attributes, - {"length": -1, "game": axl.DefaultGame, "noise": 0}, - ) - self.assertEqual(player.cooperations, 0) - self.assertEqual(player.defections, 0) - self.classifier_test(self.expected_class_classifier) - - def test_repr(self): - """Test that the representation is correct.""" - if self.__class__ != TestPlayer: - self.assertEqual(str(self.player()), self.name) - - def test_match_attributes(self): - player = self.player() - # Default - player.set_match_attributes() - t_attrs = player.match_attributes - self.assertEqual(t_attrs["length"], -1) - self.assertEqual(t_attrs["noise"], 0) - self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) - - # Common - player.set_match_attributes(length=200) - t_attrs = player.match_attributes - self.assertEqual(t_attrs["length"], 200) - self.assertEqual(t_attrs["noise"], 0) - self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) - - # Noisy - player.set_match_attributes(length=200, noise=0.5) - t_attrs = player.match_attributes - self.assertEqual(t_attrs["noise"], 0.5) - - def equality_of_players_test(self, p1, p2, seed, opponent): - a1 = opponent() - a2 = opponent() - self.assertEqual(p1, p2) - for player, op in [(p1, a1), (p2, a2)]: - axl.seed(seed) - for _ in range(10): - simultaneous_play(player, op) - self.assertEqual(p1, p2) - p1 = pickle.loads(pickle.dumps(p1)) - p2 = pickle.loads(pickle.dumps(p2)) - self.assertEqual(p1, p2) - - @given( - opponent=sampled_from(short_run_time_short_mem), - seed=integers(min_value=1, max_value=200), - ) - @settings(max_examples=1) - def test_equality_of_clone(self, seed, opponent): - p1 = self.player() - p2 = p1.clone() - self.equality_of_players_test(p1, p2, seed, opponent) - - @given( - opponent=sampled_from(axl.short_run_time_strategies), - seed=integers(min_value=1, max_value=200), - ) - @settings(max_examples=1) - def test_equality_of_pickle_clone(self, seed, opponent): - p1 = self.player() - p2 = pickle.loads(pickle.dumps(p1)) - self.equality_of_players_test(p1, p2, seed, opponent) - - def test_reset_history_and_attributes(self): - """Make sure resetting works correctly.""" - for opponent in [ - axl.Defector(), - axl.Random(), - axl.Alternator(), - axl.Cooperator(), - ]: - - player = self.player() - clone = player.clone() - for seed in range(10): - axl.seed(seed) - player.play(opponent) - - player.reset() - self.assertEqual(player, clone) - - def test_reset_clone(self): - """Make sure history resetting with cloning works correctly, regardless - if self.test_reset() is overwritten.""" - player = self.player() - clone = player.clone() - self.assertEqual(player, clone) - - @given(seed=integers(min_value=1, max_value=20000000)) - @settings(max_examples=1) - def test_clone(self, seed): - # Test that the cloned player produces identical play - player1 = self.player() - if player1.name in ["Darwin", "Human"]: - # Known exceptions - return - player2 = player1.clone() - self.assertEqual(len(player2.history), 0) - self.assertEqual(player2.cooperations, 0) - self.assertEqual(player2.defections, 0) - self.assertEqual(player2.state_distribution, {}) - self.assertEqual(player2.classifier, player1.classifier) - self.assertEqual(player2.match_attributes, player1.match_attributes) - - turns = 50 - r = random.random() - for op in [ - axl.Cooperator(), - axl.Defector(), - axl.TitForTat(), - axl.Random(p=r), - ]: - player1.reset() - player2.reset() - for p in [player1, player2]: - axl.seed(seed) - m = axl.IpdMatch((p, op), turns=turns) - m.play() - self.assertEqual(len(player1.history), turns) - self.assertEqual(player1.history, player2.history) - - @given( - strategies=strategy_lists( - max_size=5, strategies=short_run_time_short_mem - ), - seed=integers(min_value=1, max_value=200), - turns=integers(min_value=1, max_value=200), - ) - @settings(max_examples=1) - def test_memory_depth_upper_bound(self, strategies, seed, turns): - """ - Test that the memory depth is indeed an upper bound. - """ - - def get_memory_depth_or_zero(player): - # Some of the test strategies have no entry in the classifiers - # table, so there isn't logic to load default value of zero. - memory = axl.Classifiers["memory_depth"](player) - return memory if memory else 0 - - player = self.player() - memory = get_memory_depth_or_zero(player) - if memory < float("inf"): - for strategy in strategies: - player.reset() - opponent = strategy() - max_memory = max(memory, get_memory_depth_or_zero(opponent)) - self.assertTrue( - test_memory( - player=player, - opponent=opponent, - seed=seed, - turns=turns, - memory_length=max_memory, - ), - msg="{} failed for seed={} and opponent={}".format( - player.name, seed, opponent - ), - ) - - def versus_test( - self, - opponent, - expected_actions, - noise=None, - seed=None, - match_attributes=None, - attrs=None, - init_kwargs=None, - ): - """ - Tests a sequence of outcomes for two given players. - Parameters: - ----------- - opponent: IpdPlayer or list - An instance of a player OR a sequence of actions. If a sequence of - actions is passed, a Mock IpdPlayer is created that cycles over that - sequence. - expected_actions: List - The expected outcomes of the match (list of tuples of actions). - noise: float - Any noise to be passed to a match - seed: int - The random seed to be used - length: int - The length of the game. If `opponent` is a sequence of actions then - the length is taken to be the length of the sequence. - match_attributes: dict - The match attributes to be passed to the players. For example, - `{length:-1}` implies that the players do not know the length of the - match. - attrs: dict - Dictionary of internal attributes to check at the end of all plays - in player - init_kwargs: dict - A dictionary of keyword arguments to instantiate player with - """ - - turns = len(expected_actions) - if init_kwargs is None: - init_kwargs = dict() - - if seed is not None: - axl.seed(seed) - - player = self.player(**init_kwargs) - - match = axl.IpdMatch( - (player, opponent), - turns=turns, - noise=noise, - match_attributes=match_attributes, - ) - self.assertEqual(match.play(), expected_actions) - - if attrs: - player = match.players[0] - for attr, value in attrs.items(): - self.assertEqual(getattr(player, attr), value) - - def classifier_test(self, expected_class_classifier=None): - """Test that the keys in the expected_classifier dictionary give the - expected values in the player classifier dictionary. Also checks that - two particular keys (memory_depth and stochastic) are in the - dictionary.""" - player = self.player() - - # Test that player has same classifier as its class unless otherwise - # specified - if expected_class_classifier is None: - expected_class_classifier = player.classifier - actual_class_classifier = { - c: axl.Classifiers[c](player) - for c in expected_class_classifier.keys() - } - self.assertEqual(expected_class_classifier, actual_class_classifier) - - self.assertTrue( - "memory_depth" in player.classifier, - msg="memory_depth not in classifier", - ) - self.assertTrue( - "stochastic" in player.classifier, - msg="stochastic not in classifier", - ) - for key in TestOpponent.classifier: - self.assertEqual( - axl.Classifiers[key](player), - self.expected_classifier[key], - msg="%s - Behaviour: %s != Expected Behaviour: %s" - % ( - key, - axl.Classifiers[key](player), - self.expected_classifier[key], - ), - ) - - -class TestMatch(unittest.TestCase): - """Test class for heads up play between two given players. Plays an - axelrod match between the two players.""" - - def versus_test( - self, - player1, - player2, - expected_actions1, - expected_actions2, - noise=None, - seed=None, - ): - """Tests a sequence of outcomes for two given players.""" - if len(expected_actions1) != len(expected_actions2): - raise ValueError("Mismatched History lengths.") - if seed: - axl.seed(seed) - turns = len(expected_actions1) - match = axl.IpdMatch((player1, player2), turns=turns, noise=noise) - match.play() - # Test expected sequence of play. - for i, (outcome1, outcome2) in enumerate( - zip(expected_actions1, expected_actions2) - ): - player1.play(player2) - self.assertEqual(player1.history[i], outcome1) - self.assertEqual(player2.history[i], outcome2) - - def test_versus_with_incorrect_history_lengths(self): - """Test the error raised by versus_test if expected actions do not - match up""" - with self.assertRaises(ValueError): - p1, p2 = axl.Cooperator(), axl.Cooperator() - actions1 = [C, C] - actions2 = [C] - self.versus_test(p1, p2, actions1, actions2) - - -def test_four_vector(test_class, expected_dictionary): - """ - Checks that two dictionaries match -- the four-vector defining - a memory-one strategy and the given expected dictionary. - """ - player1 = test_class.player() - for key in sorted(expected_dictionary.keys(), key=str): - test_class.assertAlmostEqual( - player1._four_vector[key], expected_dictionary[key] - ) - - -def test_memory(player, opponent, memory_length, seed=0, turns=10): - """ - Checks if a player reacts to the plays of an opponent in the same way if - only the given amount of memory is used. - """ - # Play the match normally. - axl.seed(seed) - match = axl.IpdMatch((player, opponent), turns=turns) - plays = [p[0] for p in match.play()] - - # Play with limited history. - player.reset() - opponent.reset() - player._history = axl.LimitedHistory(memory_length) - opponent._history = axl.LimitedHistory(memory_length) - axl.seed(seed) - match = axl.IpdMatch((player, opponent), turns=turns, reset=False) - limited_plays = [p[0] for p in match.play()] - - return plays == limited_plays - - -class TestMemoryTest(unittest.TestCase): - """ - Test for the memory test function. - """ - - def test_passes(self): - """ - The memory test function returns True in this case as the correct mem - length is used - """ - player = axl.TitFor2Tats() - opponent = axl.Defector() - self.assertTrue(test_memory(player, opponent, memory_length=2)) - - def test_failures(self): - """ - The memory test function returns False in this case as the incorrect mem - length is used - """ - player = axl.TitFor2Tats() - opponent = axl.Defector() - self.assertFalse(test_memory(player, opponent, memory_length=1)) diff --git a/axelrod/ipd/tests/strategies/test_prober.py b/axelrod/ipd/tests/strategies/test_prober.py deleted file mode 100644 index 771e0115a..000000000 --- a/axelrod/ipd/tests/strategies/test_prober.py +++ /dev/null @@ -1,385 +0,0 @@ -"""Tests for Prober strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestCollectiveStrategy(TestPlayer): - - name = "CollectiveStrategy" - player = axl.CollectiveStrategy - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If handshake (C, D) is used cooperate until a defection occurs and - # then defect throughout - opponent = axl.MockPlayer([C, D] + [C] * 10) - actions = [(C, C), (D, D)] + [(C, C)] * 11 + [(C, D)] + [(D, C)] * 10 - self.versus_test(opponent=opponent, expected_actions=actions) - - # If handshake is not used: defect - actions = [(C, C), (D, C)] + [(D, C)] * 15 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D)] + [(D, D)] * 15 - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestDetective(TestPlayer): - - name = "Detective" - player = axl.Detective - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - self.versus_test( - opponent=axl.TitForTat(), - expected_actions=[(C, C), (D, C), (C, D)] + [(C, C)] * 15, - ) - - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=[(C, C), (D, C), (C, C), (C, C)] + [(D, C)] * 15, - ) - - self.versus_test( - opponent=axl.Defector(), - expected_actions=[(C, D), (D, D), (C, D), (C, D)] + [(D, D)] * 15, - ) - - def test_other_initial_actions(self): - self.versus_test( - opponent=axl.TitForTat(), - expected_actions=[(C, C), (C, C), (D, C)] + [(D, D)] * 15, - init_kwargs={"initial_actions": [C, C]}, - ) - - # Extreme case: no memory at all, it's simply a defector - self.versus_test( - opponent=axl.TitForTat(), - expected_actions=[(D, C)] + [(D, D)] * 15, - init_kwargs={"initial_actions": []}, - ) - - -class TestProber(TestPlayer): - - name = "Prober" - player = axl.Prober - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Starts by playing DCC. - # Defects forever if opponent cooperated in moves 2 and 3 - actions = [(D, C), (C, C), (C, C)] + [(D, C)] * 3 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - opponent = axl.MockPlayer([D, C, C]) - actions = [(D, D), (C, C), (C, C)] + [(D, D), (D, C), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Otherwise it plays like TFT - actions = [(D, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(D, D), (C, D), (C, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestProber2(TestPlayer): - - name = "Prober 2" - player = axl.Prober2 - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Starts by playing DCC. - # Cooperates forever if opponent played D, C in moves 2 and 3 - actions = [(D, C), (C, D), (C, C)] + [(C, D), (C, C), (C, D)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - opponent = axl.MockPlayer([D, D, C]) - actions = [(D, D), (C, D), (C, C)] + [(C, D), (C, D), (C, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Otherwise it plays like TFT - actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(D, D), (C, D), (C, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer([D, C]) - actions = [(D, D), (C, C), (C, D)] + [(D, C), (C, D), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestProber3(TestPlayer): - - name = "Prober 3" - player = axl.Prober3 - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Starts by playing DC. - # Defects forever if opponent played C in move 2. - actions = [(D, C), (C, C)] + [(D, C), (D, C), (D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - opponent = axl.MockPlayer([D, C]) - actions = [(D, D), (C, C)] + [(D, D), (D, C), (D, D)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Otherwise it plays like TFT - actions = [(D, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(D, D), (C, D), (D, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestProber4(TestPlayer): - - name = "Prober 4" - player = axl.Prober4 - expected_classifier = { - "stochastic": False, - "memory_depth": float("inf"), - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - initial_sequence = [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D] - - def test_strategy(self): - # Starts by playing CCDCDDDCCDCDCCDCDDCD. - # After playing the initial sequence defects forever - # if the absolute difference in the number of retaliating - # and provocative defections of the opponent is smaller or equal to 2 - provocative_histories = [ - [C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [C, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [C, C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D], - ] - - attrs = {"turned_defector": True} - for history in provocative_histories: - opponent = axl.MockPlayer(history + [C] * 5) - actions = list(zip(self.initial_sequence, history)) + [(D, C)] * 5 - self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) - - # Otherwise cooperates for 5 rounds and plays TfT afterwards - unprovocative_histories = [ - [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D], - [D, D, C, D, C, C, C, D, D, C, D, C, D, D, C, D, C, C, D, C], - [C, C, D, C, D, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C], - [C, C, D, C, D, D, C, C, D, C, C, C, C, C, C, D, D, D, C, C], - [C, C, C, C, D, D, C, C, D, C, C, D, D, C, D, C, D, C, C, C], - ] - - attrs = {"turned_defector": False} - for history in unprovocative_histories: - opponent = axl.MockPlayer(history + [D] * 5 + [C, C]) - actions = list(zip(self.initial_sequence, history)) + [(C, D)] * 5 - actions += [(D, C), (C, C)] - self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) - - -class TestHardProber(TestPlayer): - - name = "Hard Prober" - player = axl.HardProber - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Starts by playing DDCC - # Defects forever if opponent played C in moves 2 and 3 - actions = [(D, C), (D, C), (C, C), (C, C)] + [(D, C), (D, C), (D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - opponent = axl.MockPlayer([D, C, C, D]) - actions = [(D, D), (D, C), (C, C), (C, D)] + [(D, D), (D, C), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Otherwise it plays like TFT - actions = [(D, C), (D, D), (C, C), (C, D)] + [(D, C), (C, D), (D, C)] - self.versus_test(opponent=axl.Alternator(), expected_actions=actions) - - actions = [(D, D), (D, D), (C, D), (C, D)] + [(D, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestNaiveProber(TestPlayer): - - name = "Naive Prober: 0.1" - player = axl.NaiveProber - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Always retaliate a defection - opponent = axl.MockPlayer([C, D, D, D, D]) - actions = [(C, C), (C, D), (D, D), (D, D), (D, D)] - self.versus_test(opponent=opponent, expected_actions=actions) - - def test_random_defection(self): - # Unprovoked defection with small probability - actions = [(C, C), (D, C), (D, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=2 - ) - - actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=5 - ) - - # Always defect when p is 1 - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"p": 1}, - ) - - def test_reduction_to_TFT(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"p": 0}, - ) - - -class TestRemorsefulProber(TestPlayer): - - name = "Remorseful Prober: 0.1" - player = axl.RemorsefulProber - expected_classifier = { - "memory_depth": 2, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Always retaliate a defection - actions = [(C, D)] + [(D, D)] * 10 - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - attrs={"probing": False}, - ) - - def test_random_defection(self): - # Unprovoked defection with small probability - actions = [(C, C), (D, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - seed=2, - attrs={"probing": True}, - ) - - actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - seed=5, - attrs={"probing": True}, - ) - - # Always defect when p is 1 - actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs={"p": 1}, - attrs={"probing": True}, - ) - - def test_remorse(self): - """After probing, if opponent retaliates, will offer a C.""" - opponent = axl.MockPlayer([C, C, D, C]) - actions = [(C, C), (D, C), (D, D), (C, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - seed=2, - attrs={"probing": False}, - ) - - def test_reduction_to_TFT(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"p": 0}, - attrs={"probing": False}, - ) diff --git a/axelrod/ipd/tests/strategies/test_punisher.py b/axelrod/ipd/tests/strategies/test_punisher.py deleted file mode 100644 index 77a8db244..000000000 --- a/axelrod/ipd/tests/strategies/test_punisher.py +++ /dev/null @@ -1,194 +0,0 @@ -"""Tests for the Punisher strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestPunisher(TestPlayer): - - name = "Punisher" - player = axl.Punisher - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_init(self): - """Tests for the __init__ method.""" - player = axl.Punisher() - self.assertEqual(player.mem_length, 1) - self.assertFalse(player.grudged) - self.assertEqual(player.grudge_memory, 1) - - def test_strategy(self): - opponent = axl.Alternator() - actions = [(C, C), (C, D), (D, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 0}, - ) - - opponent = axl.MockPlayer([C, D] + [C] * 10) - actions = [(C, C), (C, D)] + [(D, C)] * 11 - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 10}, - ) - - # Eventually the grudge is dropped - opponent = axl.MockPlayer([C, D] + [C] * 10) - actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, D)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": False, "grudge_memory": 0, "mem_length": 10}, - ) - - # Grudged again on opponent's D - opponent = axl.MockPlayer([C, D] + [C] * 11) - actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, C), (C, D), (D, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 0, "mem_length": 2}, - ) - - -class TestInversePunisher(TestPlayer): - - name = "Inverse Punisher" - player = axl.InversePunisher - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_init(self): - """Tests for the __init__ method.""" - player = axl.InversePunisher() - self.assertEqual(player.mem_length, 1) - self.assertFalse(player.grudged) - self.assertEqual(player.grudge_memory, 1) - - def test_strategy(self): - opponent = axl.Alternator() - actions = [(C, C), (C, D), (D, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 0}, - ) - - opponent = axl.MockPlayer([C, D] + [C] * 10) - actions = [(C, C), (C, D)] + [(D, C)] * 11 - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 10}, - ) - - # Eventually the grudge is dropped - opponent = axl.MockPlayer([C, D] + [C] * 10) - actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, D)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": False, "grudge_memory": 0, "mem_length": 10}, - ) - - # Grudged again on opponent's D - opponent = axl.MockPlayer([C, D] + [C] * 11) - actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, C), (C, D), (D, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"grudged": True, "grudge_memory": 0, "mem_length": 17}, - ) - - -class TestLevelPunisher(TestPlayer): - - name = "Level Punisher" - player = axl.LevelPunisher - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates if the turns played are less than 10. - actions = [(C, C)] * 9 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # After 10 rounds - # Check if number of defections by opponent is greater than 20% - opponent = axl.MockPlayer([C] * 4 + [D] * 2 + [C] * 3 + [D]) - actions = [(C, C)] * 4 + [(C, D)] * 2 + [(C, C)] * 3 + [(C, D), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Check if number of defections by opponent is less than 20% - opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 4 + [D]) - actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 4 + [(C, D), (C, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestTrickyLevelPunisher(TestPlayer): - - name = "Level Punisher" - player = axl.LevelPunisher - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Cooperates if the turns played are less than 10. - actions = [(C, C)] * 9 - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # After 10 rounds - # Check if number of defections by opponent is greater than 20% - opponent = axl.MockPlayer([C] * 4 + [D] * 2 + [C] * 3 + [D]) - actions = [(C, C)] * 4 + [(C, D)] * 2 + [(C, C)] * 3 + [(C, D), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # Check if number of defections by opponent is greater than 10% - opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 4 + [D]) - actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 4 + [(C, D), (C, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - # After 10 rounds - # Check if number of defections by opponent is greater than 5% - opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 5) - actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 5 - self.versus_test(opponent=opponent, expected_actions=actions) - - # Check if number of defections by opponent is less than 5% - opponent = axl.MockPlayer([C] * 10) - actions = [(C, C)] * 5 - self.versus_test(opponent=opponent, expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_qlearner.py b/axelrod/ipd/tests/strategies/test_qlearner.py deleted file mode 100644 index 1b07a4bfb..000000000 --- a/axelrod/ipd/tests/strategies/test_qlearner.py +++ /dev/null @@ -1,151 +0,0 @@ -"""Tests for the QLearner strategies.""" - -import random - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestRiskyQLearner(TestPlayer): - - name = "Risky QLearner" - player = axl.RiskyQLearner - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_payoff_matrix(self): - (R, P, S, T) = axl.IpdGame().RPST() - payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} - player = self.player() - self.assertEqual(player.payoff_matrix, payoff_matrix) - - def test_strategy(self): - actions = [(C, C), (D, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - seed=5, - attrs={ - "Qs": { - "": {C: 0, D: 0.9}, - "0.0": {C: 2.7, D: 0}, - "C1.0": {C: 0, D: 4.5}, - "CC2.0": {C: 2.7, D: 0}, - "CCC3.0": {C: 0, D: 0}, - }, - "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, - "prev_state": "CCC3.0", - }, - ) - - -class TestArrogantQLearner(TestPlayer): - - name = "Arrogant QLearner" - player = axl.ArrogantQLearner - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - seed=5, - attrs={ - "Qs": { - "": {C: 0, D: 0.9}, - "0.0": {C: 2.7, D: 0}, - "C1.0": {C: 0, D: 4.5}, - "CC2.0": {C: 2.7, D: 0}, - "CCC3.0": {C: 0, D: 0}, - }, - "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, - "prev_state": "CCC3.0", - }, - ) - - -class TestHesitantQLearner(TestPlayer): - - name = "Hesitant QLearner" - player = axl.HesitantQLearner - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, D), (D, D), (C, D), (C, D)] - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - seed=5, - attrs={ - "Qs": { - "": {C: 0, D: 0.1}, - "0.0": {C: 0, D: 0}, - "D0.0": {C: 0, D: 0.1}, - "DD0.0": {C: 0, D: 0}, - "DDD0.0": {C: 0, D: 0}, - }, - "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, - "prev_state": "DDD0.0", - }, - ) - - -class TestCautiousQLearner(TestPlayer): - - name = "Cautious QLearner" - player = axl.CautiousQLearner - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, D), (D, D), (C, D), (C, D)] - self.versus_test( - opponent=axl.Defector(), - expected_actions=actions, - seed=5, - attrs={ - "Qs": { - "": {C: 0, D: 0.1}, - "0.0": {C: 0, D: 0}, - "D0.0": {C: 0, D: 0.1}, - "DD0.0": {C: 0, D: 0}, - "DDD0.0": {C: 0, D: 0}, - }, - "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, - "prev_state": "DDD0.0", - }, - ) diff --git a/axelrod/ipd/tests/strategies/test_rand.py b/axelrod/ipd/tests/strategies/test_rand.py deleted file mode 100644 index 76bfb3478..000000000 --- a/axelrod/ipd/tests/strategies/test_rand.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Tests for the random strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestRandom(TestPlayer): - - name = "Random: 0.5" - player = axl.Random - expected_classifier = { - "memory_depth": 0, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test that strategy is randomly picked (not affected by history).""" - opponent = axl.MockPlayer() - actions = [(C, C), (D, C), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - - opponent = axl.MockPlayer() - actions = [(D, C), (D, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - opponent = axl.MockPlayer() - actions = [(D, C), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions, init_kwargs={"p": 0}) - - opponent = axl.MockPlayer() - actions = [(C, C), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions, init_kwargs={"p": 1}) - - def test_deterministic_classification(self): - """Test classification when p is 0 or 1""" - for p in [0, 1]: - player = axl.Random(p=p) - self.assertFalse(axl.Classifiers["stochastic"](player)) diff --git a/axelrod/ipd/tests/strategies/test_resurrection.py b/axelrod/ipd/tests/strategies/test_resurrection.py deleted file mode 100644 index c71becde8..000000000 --- a/axelrod/ipd/tests/strategies/test_resurrection.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Test for the Resurrection strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class Resurrection(TestPlayer): - - name = "Resurrection" - player = axl.Resurrection - expected_classifier = { - "memory_depth": 5, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Check if the turns played are greater than 5 - actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - # Check for TFT behavior after 5 rounds - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestDoubleResurrection(TestPlayer): - - name = "DoubleResurrection" - player = axl.DoubleResurrection - expected_classifier = { - "memory_depth": 5, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (C, D)] - self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_retaliate.py b/axelrod/ipd/tests/strategies/test_retaliate.py deleted file mode 100644 index 251438735..000000000 --- a/axelrod/ipd/tests/strategies/test_retaliate.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Tests for the retaliate strategy.""" - -import axelrod as axl - -from .test_player import TestOpponent, TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestRetaliate(TestPlayer): - - name = "Retaliate: 0.1" - player = axl.Retaliate - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent has defected more than 10 percent of the time, defect. - opponent = axl.Cooperator() - actions = [(C, C)] * 5 - self.versus_test(opponent=opponent, expected_actions=actions) - - opponent = axl.MockPlayer([C, C, C, D, C]) - actions = [(C, C), (C, C), (C, C), (C, D), (D, C), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestRetaliate2(TestPlayer): - - name = "Retaliate 2: 0.08" - player = axl.Retaliate2 - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent has defected more than 8 percent of the time, defect. - opponent = axl.MockPlayer([C] * 13 + [D]) - actions = [(C, C)] * 13 + [(C, D), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestRetaliate3(TestPlayer): - - name = "Retaliate 3: 0.05" - player = axl.Retaliate3 - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent has defected more than 5 percent of the time, defect. - opponent = axl.MockPlayer([C] * 19 + [D]) - actions = [(C, C)] * 19 + [(C, D), (D, C)] - self.versus_test(opponent=opponent, expected_actions=actions) - - -class TestLimitedRetaliate(TestPlayer): - - name = "Limited Retaliate: 0.1, 20" - player = axl.LimitedRetaliate - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent has never defected, co-operate - opponent = axl.Cooperator() - actions = [(C, C)] * 5 - self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliating": False} - ) - - # Retaliate after a (C, D) round. - opponent = axl.MockPlayer([C, C, C, D, C]) - actions = [(C, C), (C, C), (C, C), (C, D), (D, C), (D, C)] - self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliating": True} - ) - - opponent = axl.Alternator() - - # Count retaliations - actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test( - opponent=opponent, expected_actions=actions, attrs={"retaliation_count": 3} - ) - opponent = axl.Alternator() - - # Cooperate if we hit the retaliation limit - actions = [(C, C), (C, D), (D, C), (D, D), (C, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"retaliation_count": 0}, - init_kwargs={"retaliation_limit": 2}, - ) - - # Defect again after cooperating - actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (D, D), (D, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"retaliation_count": 2}, - init_kwargs={"retaliation_limit": 2}, - ) - - # Different behaviour with different retaliation threshold - actions = [(C, C), (C, D), (D, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test( - opponent=opponent, - expected_actions=actions, - attrs={"retaliation_count": 0}, - init_kwargs={"retaliation_limit": 2, "retaliation_threshold": 9}, - ) diff --git a/axelrod/ipd/tests/strategies/test_revised_downing.py b/axelrod/ipd/tests/strategies/test_revised_downing.py deleted file mode 100644 index fa6897a85..000000000 --- a/axelrod/ipd/tests/strategies/test_revised_downing.py +++ /dev/null @@ -1,42 +0,0 @@ -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - -class TestRevisedDowning(TestPlayer): - - name = "Revised Downing" - player = axl.RevisedDowning - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (C, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, C, C]) - actions = [(C, D), (C, C), (C, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[D, D, C]) - actions = [(C, D), (C, D), (D, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D, C, C]) - actions = [(C, C), (C, C), (C, D), (C, D), (D, C), (C, C), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) - actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (C, D), (C, C)] - self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_selfsteem.py b/axelrod/ipd/tests/strategies/test_selfsteem.py deleted file mode 100644 index c0d9ec84c..000000000 --- a/axelrod/ipd/tests/strategies/test_selfsteem.py +++ /dev/null @@ -1,81 +0,0 @@ -"""Tests for the SelfSteem strategy.""" - -import random - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestSelfSteem(TestPlayer): - - name = "SelfSteem" - player = axl.SelfSteem - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - - # Check for f > 0.95, defect - actions = ( - [(C, C), (C, C), (D, C), (D, C), (C, C), (D, C)] + [(C, C)] * 4 + [(D, C)] - ) - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) - - # Check for f < -0.95, cooperate - actions = [(D, C), (C, C), (D, C), (D, C), (C, C), (D, C), (C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=0 - ) - - actions = [(D, D)] + [(D, D)] * 5 + [(D, D), (C, D), (C, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=0) - - # Check for -0.3 < f < 0.3, random - actions = ( - [(D, C), (C, C), (D, C), (D, C), (C, C), (D, C)] - + [(C, C)] * 6 - + [(D, C), (D, C)] - + [(C, C)] * 7 - ) - self.versus_test( - opponent=axl.Cooperator(), expected_actions=actions, seed=6 - ) - - actions = ( - [(D, D)] * 7 - + [(C, D), (C, D)] - + [(D, D)] * 8 - + [(C, D), (C, D), (D, D), (D, D), (D, D)] - ) - self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=5) - - # Check for 0.95 > abs(f) > 0.3, follows TitForTat - actions = ( - [(D, D)] * 5 - + [(C, D), (D, D), (C, D), (C, D), (D, D), (C, D)] - + [(D, D)] * 5 - ) - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - actions = [ - (D, C), - (C, C), - (D, C), - (D, C), - (C, C), - (D, C), - (C, C), - (C, C), - (C, C), - (C, C), - ] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_sequence_player.py b/axelrod/ipd/tests/strategies/test_sequence_player.py deleted file mode 100644 index f0503ce95..000000000 --- a/axelrod/ipd/tests/strategies/test_sequence_player.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Tests for the Thue-Morse strategies.""" -import unittest - -import axelrod as axl -from axelrod.ipd._strategy_utils import recursive_thue_morse -from axelrod.ipd.strategies.sequence_player import SequencePlayer - -from .test_player import TestOpponent, TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestThueMoreGenerator(unittest.TestCase): - def test_sequence(self): - expected = [0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0] - for i, e in enumerate(expected): - self.assertEqual(recursive_thue_morse(i), e) - - -class TestSequencePlayer(unittest.TestCase): - def test_sequence_player(self): - """Basic test for SequencePlayer.""" - - def cooperate_gen(): - yield 1 - - player = SequencePlayer(generator_function=cooperate_gen) - opponent = TestOpponent() - self.assertEqual(C, player.strategy(opponent)) - - -class TestThueMorse(TestPlayer): - - name = "ThueMorse" - player = axl.ThueMorse - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - - thue_morse_seq = [D, C, C, D, C, D, D, C, C, D, D, C, D, C, C, D, C] - n = len(thue_morse_seq) - - actions = list(zip(thue_morse_seq, [C] * n)) - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = list(zip(thue_morse_seq, [D] * n)) - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestThueMorseInverse(TestPlayer): - - name = "ThueMorseInverse" - player = axl.ThueMorseInverse - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - inv_thue_morse_seq = [C, D, D, C, D, C, C, D, D, C, C, D, C, D, D, C, D] - n = len(inv_thue_morse_seq) - - actions = list(zip(inv_thue_morse_seq, [C] * n)) - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = list(zip(inv_thue_morse_seq, [D] * n)) - self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/ipd/tests/strategies/test_shortmem.py b/axelrod/ipd/tests/strategies/test_shortmem.py deleted file mode 100644 index 48dcd0138..000000000 --- a/axelrod/ipd/tests/strategies/test_shortmem.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Tests for the ShortMem strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestShortMem(TestPlayer): - - name = "ShortMem" - player = axl.ShortMem - expected_classifier = { - "memory_depth": float('inf'), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - - # Starts by cooperating for the first ten moves. - actions = [(C, C)] * 10 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D)] * 10 - self.versus_test(axl.Defector(), expected_actions=actions) - - # Cooperate if in the last ten moves, Cooperations - Defections >= 3 - actions = [(C, C)] * 11 + [(C, D)] * 4 - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 11 + [D] * 4), - expected_actions=actions, - ) - - # Defect if in the last ten moves, Defections - Cooperations >= 3 - actions = [(C, D)] * 11 + [(D, C)] * 4 - self.versus_test( - opponent=axl.MockPlayer(actions=[D] * 11 + [C] * 4), - expected_actions=actions, - ) - - # If neither of the above conditions are met, apply TitForTat - actions = [(C, D)] * 5 + [(C, C)] * 6 + [(C, D), (D, D), (D, D), (D, C), (C, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[D] * 5 + [C] * 6 + [D, D, D, C, C]), - expected_actions=actions, - ) - - actions = [(C, C)] * 5 + [(C, D)] * 6 + [(D, C), (C, C), (C, C), (C, D), (D, D)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 5 + [D] * 6 + [C, C, C, D, D]), - expected_actions=actions, - ) diff --git a/axelrod/ipd/tests/strategies/test_stalker.py b/axelrod/ipd/tests/strategies/test_stalker.py deleted file mode 100644 index 31acdb769..000000000 --- a/axelrod/ipd/tests/strategies/test_stalker.py +++ /dev/null @@ -1,94 +0,0 @@ -"""Tests for the Stalker strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestStalker(TestPlayer): - - name = "Stalker: (D,)" - player = axl.Stalker - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["game", "length"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C)] * 3 + [(D, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - # wish_score < current_average_score < very_good_score - actions = [(C, C)] * 7 + [(C, D), (C, D), (C, C), (C, C), (D, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 2), expected_actions=actions - ) - - actions = [(C, C)] * 7 + [(C, D), (C, C), (D, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D]), expected_actions=actions - ) - - # current_average_score > 2 - actions = [(C, C)] * 9 + [(D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # 1 < current_average_score < 2 - actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, D)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 5), expected_actions=actions - ) - - # current_average_score < 1 - actions = ( - [(C, D)] - + [(D, D)] * 2 - + [(C, D)] * 3 - + [(D, D), (C, D), (D, D), (C, D), (D, D), (C, D), (D, D)] - ) - self.versus_test(axl.Defector(), expected_actions=actions, seed=6) - - actions = [(C, D)] * 3 + [ - (D, D), - (C, D), - (D, D), - (C, D), - (C, D), - (D, D), - (C, D), - (C, D), - (C, D), - (D, D), - ] - self.versus_test(axl.Defector(), expected_actions=actions, seed=7) - - # defect in last round - actions = [(C, C)] * 199 + [(D, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, match_attributes={"length": 200} - ) - - # Force a defection before the end of the actual match which ensures - # that current_average_score > very_good_score - actions = [(C, C)] * 3 + [(D, C)] * 3 - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": 4}, - ) - - def test_reset(self): - axl.seed(0) - player = axl.Stalker() - m = axl.IpdMatch((player, axl.Alternator())) - m.play() - self.assertNotEqual(player.current_score, 0) - player.reset() - self.assertEqual(player.current_score, 0) diff --git a/axelrod/ipd/tests/strategies/test_titfortat.py b/axelrod/ipd/tests/strategies/test_titfortat.py deleted file mode 100644 index c71ec03a9..000000000 --- a/axelrod/ipd/tests/strategies/test_titfortat.py +++ /dev/null @@ -1,1191 +0,0 @@ -"""Tests for the tit for tat strategies.""" - -import copy - -import random - -import axelrod as axl -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import given -from hypothesis.strategies import integers - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestTitForTat(TestPlayer): - """ - Note that this test is referred to in the documentation as an example on - writing tests. If you modify the tests here please also modify the - documentation. - """ - - name = "Tit For Tat" - player = axl.TitForTat - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Play against opponents - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - # This behaviour is independent of knowledge of the IpdMatch length - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - match_attributes={"length": float("inf")}, - ) - - # We can also test against random strategies - actions = [(C, D), (D, D), (D, C), (C, C), (C, D)] - self.versus_test(axl.Random(), expected_actions=actions, seed=0) - - actions = [(C, C), (C, D), (D, D), (D, C)] - self.versus_test(axl.Random(), expected_actions=actions, seed=1) - - # If you would like to test against a sequence of moves you should use - # a MockPlayer - opponent = axl.MockPlayer(actions=[C, D]) - actions = [(C, C), (C, D), (D, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) - actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - - -class TestTitFor2Tats(TestPlayer): - name = "Tit For 2 Tats" - player = axl.TitFor2Tats - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Will punish sequence of 2 defections but will forgive one - opponent = axl.MockPlayer(actions=[D, D, D, C, C]) - actions = [(C, D), (C, D), (D, D), (D, C), (C, C), (C, D)] - self.versus_test(opponent, expected_actions=actions) - opponent = axl.MockPlayer(actions=[C, C, D, D, C, D, D, C, C, D, D]) - actions = [ - (C, C), - (C, C), - (C, D), - (C, D), - (D, C), - (C, D), - (C, D), - (D, C), - (C, C), - (C, D), - (C, D), - ] - self.versus_test(opponent, expected_actions=actions) - - -class TestTwoTitsForTat(TestPlayer): - name = "Two Tits For Tat" - player = axl.TwoTitsForTat - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Will defect twice when last turn of opponent was defection. - opponent = axl.MockPlayer(actions=[D, C, C, D, C]) - actions = [(C, D), (D, C), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - actions = [(C, C), (C, C)] - self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test(opponent=axl.Defector(), expected_actions=actions) - - -class TestDynamicTwoTitsForTat(TestPlayer): - name = "Dynamic Two Tits For Tat" - player = axl.DynamicTwoTitsForTat - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Test that it is stochastic - opponent = axl.MockPlayer(actions=[D, C, D, D, C]) - actions = [(C, D), (D, C), (C, D), (D, D), (D, C)] - self.versus_test(opponent, expected_actions=actions, seed=1) - # Should respond differently with a different seed - actions = [(C, D), (D, C), (D, D), (D, D), (C, C)] - self.versus_test(opponent, expected_actions=actions, seed=2) - - # Will cooperate if opponent cooperates. - actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - # Test against defector - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestBully(TestPlayer): - name = "Bully" - player = axl.Bully - expected_classifier = { - "memory_depth": 1, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Will do opposite of what opponent does. - actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestSneakyTitForTat(TestPlayer): - name = "Sneaky Tit For Tat" - player = axl.SneakyTitForTat - expected_classifier = { - "memory_depth": float("inf"), # Long memory - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - opponent = axl.MockPlayer(actions=[C, C, C, D, C, C]) - actions = [(C, C), (C, C), (D, C), (D, D), (C, C), (C, C)] - self.versus_test(opponent, expected_actions=actions) - - # Repents if punished for a defection - actions = [(C, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestSuspiciousTitForTat(TestPlayer): - name = "Suspicious Tit For Tat" - player = axl.SuspiciousTitForTat - expected_classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Plays like TFT after the first move, repeating the opponents last - # move. - actions = [(D, C), (C, D)] * 8 - self.versus_test(axl.TitForTat(), expected_actions=actions) - - actions = [(D, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestAntiTitForTat(TestPlayer): - name = "Anti Tit For Tat" - player = axl.AntiTitForTat - expected_classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (D, C), (D, D), (C, D)] * 4 - self.versus_test(axl.TitForTat(), expected_actions=actions) - - -class TestHardTitForTat(TestPlayer): - name = "Hard Tit For Tat" - player = axl.HardTitForTat - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - opponent = axl.MockPlayer(actions=[D, C, C, C, D, C]) - actions = [(C, D), (D, C), (D, C), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - actions = [(C, C), (C, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - -class TestHardTitFor2Tats(TestPlayer): - name = "Hard Tit For 2 Tats" - player = axl.HardTitFor2Tats - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Uses memory 3 to punish 2 consecutive defections - opponent = axl.MockPlayer(actions=[D, C, C, D, D, D, C]) - actions = [(C, D), (C, C), (C, C), (C, D), (C, D), (D, D), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - -class TestOmegaTFT(TestPlayer): - name = "Omega TFT: 3, 8" - player = axl.OmegaTFT - - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - player_history = [C, D, C, D, C, C, C, C, C] - opp_history = [D, C, D, C, D, C, C, C, C] - actions = list(zip(player_history, opp_history)) - self.versus_test(axl.SuspiciousTitForTat(), expected_actions=actions) - - player_history = [C, C, D, C, D, C, C, C, D, D, D, D, D, D] - opp_history = [C, D] * 7 - actions = list(zip(player_history, opp_history)) - self.versus_test(axl.Alternator(), expected_actions=actions) - - -class TestGradual(TestPlayer): - name = "Gradual" - player = axl.Gradual - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Punishes defection with a growing number of defections and calms - # the opponent with two cooperations in a row. - opponent = axl.MockPlayer(actions=[C]) - actions = [(C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D]) - actions = [(C, D)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C]) - actions = [(C, D), (D, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, C]) - actions = [(C, D), (D, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 1, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 0, "punish_count": 0,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 2,}, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 1,}, - ) - - opponent = axl.Defector() - actions = [ - (C, D), - (D, D), # 1 defection as a response to the 1 defection by opponent - (C, D), - (C, D), - (D, D), - # starts defecting after a total of 4 defections by the opponent - (D, D), - (D, D), - (D, D), # 4 defections - (C, D), - (C, D), - (D, D), - # Start defecting after a total of 10 defections by the opponent - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), # 10 defections - (C, D), - (C, D), - (D, D), # starts defecting after 22 defections by the opponent - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), - (D, D), # 22 defections - (C, D), - (C, D), - (D, D), - (D, D), - (D, D), - (D, D), - ] - self.versus_test( - opponent, - expected_actions=actions, - attrs={"calm_count": 2, "punish_count": 42,}, - ) - - def test_specific_set_of_results(self): - """ - This tests specific reported results as discussed in - https://github.com/Axelrod-Python/Axelrod/issues/1294 - - The results there used a version of mistrust with a bug that corresponds - to a memory one player that start by defecting and only cooperates if - both players cooperated in the previous round. - """ - mistrust_with_bug = axl.MemoryOnePlayer(initial=D, four_vector=(1, 0, 0, 0),) - players = [ - self.player(), - axl.TitForTat(), - axl.GoByMajority(), - axl.Grudger(), - axl.WinStayLoseShift(), - axl.Prober(), - axl.Defector(), - mistrust_with_bug, - axl.Cooperator(), - axl.CyclerCCD(), - axl.CyclerDDC(), - ] - axl.seed(1) - tournament = axl.IpdTournament(players, turns=1000, repetitions=1) - results = tournament.play(progress_bar=False) - scores = [ - round(average_score_per_turn * 1000, 1) - for average_score_per_turn in results.payoff_matrix[0] - ] - expected_scores = [ - 3000.0, - 3000.0, - 3000.0, - 3000.0, - 3000.0, - 2999.0, - 983.0, - 983.0, - 3000.0, - 3596.0, - 2302.0, - ] - self.assertEqual(scores, expected_scores) - - -class TestOriginalGradual(TestPlayer): - name = "Original Gradual" - player = axl.OriginalGradual - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Punishes defection with a growing number of defections and calms - # the opponent with two cooperations in a row. - opponent = axl.MockPlayer(actions=[C]) - actions = [(C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 0, - }, - ) - - opponent = axl.MockPlayer(actions=[D]) - actions = [(C, D)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 0, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C]) - actions = [(C, D), (D, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": True, - "punishment_count": 1, - "punishment_limit": 1, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, C]) - actions = [(C, D), (D, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": True, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 1, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 1, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 1, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": False, - "punishment_count": 0, - "punishment_limit": 1, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": True, - "punishment_count": 1, - "punishment_limit": 2, - }, - ) - - opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) - actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] - self.versus_test( - opponent, - expected_actions=actions, - attrs={ - "calming": False, - "punishing": True, - "punishment_count": 2, - "punishment_limit": 2, - }, - ) - - def test_output_from_literature(self): - """ - This strategy is not fully described in the literature, however the - scores for the strategy against a set of opponents is reported - - Bruno Beaufils, Jean-Paul Delahaye, Philippe Mathie - "Our Meeting With Gradual: A Good Strategy For The Iterated Prisoner's - Dilemma" Proc. Artif. Life 1996 - - This test just ensures that the strategy is as was originally defined. - - See https://github.com/Axelrod-Python/Axelrod/issues/1294 for another - discussion of this. - """ - players = [ - axl.Cooperator(), - axl.Defector(), - axl.Random(), - axl.TitForTat(), - axl.Grudger(), - axl.CyclerDDC(), - axl.CyclerCCD(), - axl.GoByMajority(), - axl.SuspiciousTitForTat(), - axl.Prober(), - self.player(), - axl.WinStayLoseShift(), - ] - - axl.seed(1) - turns = 1000 - tournament = axl.IpdTournament(players, turns=turns, repetitions=1) - results = tournament.play(progress_bar=False) - scores = [ - round(average_score_per_turn * 1000, 1) - for average_score_per_turn in results.payoff_matrix[-2] - ] - expected_scores = [ - 3000.0, - 915.0, - 2763.0, - 3000.0, - 3000.0, - 2219.0, - 3472.0, - 3000.0, - 2996.0, - 2999.0, - 3000.0, - 3000.0, - ] - self.assertEqual(scores, expected_scores) - - -class TestContriteTitForTat(TestPlayer): - name = "Contrite Tit For Tat" - player = axl.ContriteTitForTat - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - deterministic_strategies = [ - s for s in axl.strategies if not axl.Classifiers["stochastic"](s()) - ] - - def test_init(self): - ctft = self.player() - self.assertFalse(ctft.contrite, False) - self.assertEqual(ctft._recorded_history, []) - - @given( - strategies=strategy_lists(strategies=deterministic_strategies, max_size=1), - turns=integers(min_value=1, max_value=20), - ) - def test_is_tit_for_tat_with_no_noise(self, strategies, turns): - tft = axl.TitForTat() - ctft = self.player() - opponent = strategies[0]() - m1 = axl.IpdMatch((tft, opponent), turns) - m2 = axl.IpdMatch((ctft, opponent), turns) - self.assertEqual(m1.play(), m2.play()) - - def test_strategy_with_noise(self): - ctft = self.player() - opponent = axl.Defector() - self.assertEqual(ctft.strategy(opponent), C) - self.assertEqual(ctft._recorded_history, [C]) - ctft.reset() # Clear the recorded history - self.assertEqual(ctft._recorded_history, []) - - random.seed(0) - ctft.play(opponent, noise=0.9) - self.assertEqual(ctft.history, [D]) - self.assertEqual(ctft._recorded_history, [C]) - self.assertEqual(opponent.history, [C]) - - # After noise: is contrite - ctft.play(opponent) - self.assertEqual(ctft.history, [D, C]) - self.assertEqual(ctft._recorded_history, [C, C]) - self.assertEqual(opponent.history, [C, D]) - self.assertTrue(ctft.contrite) - - # Cooperates and no longer contrite - ctft.play(opponent) - self.assertEqual(ctft.history, [D, C, C]) - self.assertEqual(ctft._recorded_history, [C, C, C]) - self.assertEqual(opponent.history, [C, D, D]) - self.assertFalse(ctft.contrite) - - # Goes back to playing tft - ctft.play(opponent) - self.assertEqual(ctft.history, [D, C, C, D]) - self.assertEqual(ctft._recorded_history, [C, C, C, D]) - self.assertEqual(opponent.history, [C, D, D, D]) - self.assertFalse(ctft.contrite) - - -class TestAdaptiveTitForTat(TestPlayer): - name = "Adaptive Tit For Tat: 0.5" - player = axl.AdaptiveTitForTat - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"world": 0.34375, "rate": 0.5}, - ) - - -class TestSpitefulTitForTat(TestPlayer): - name = "Spiteful Tit For Tat" - player = axl.SpitefulTitForTat - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # Repeats last action of opponent history until 2 consecutive - # defections, then always defects - opponent = axl.MockPlayer(actions=[C, C, C, C]) - actions = [(C, C)] * 5 - self.versus_test( - opponent, expected_actions=actions, attrs={"retaliating": False} - ) - - opponent = axl.MockPlayer(actions=[C, C, C, C, D, C]) - actions = [(C, C)] * 4 + [(C, D), (D, C), (C, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"retaliating": False} - ) - - opponent = axl.MockPlayer(actions=[C, C, D, D, C]) - actions = [(C, C), (C, C), (C, D), (D, D), (D, C)] - self.versus_test( - opponent, expected_actions=actions, attrs={"retaliating": True} - ) - - -class TestSlowTitForTwoTats2(TestPlayer): - name = "Slow Tit For Two Tats 2" - player = axl.SlowTitForTwoTats2 - expected_classifier = { - "memory_depth": 2, - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # If opponent plays the same move twice, repeats last action of - # opponent history, otherwise repeats previous move. - opponent = axl.MockPlayer(actions=[C, C, D, D, C, D, D, C, C, D, D]) - actions = [ - (C, C), - (C, C), - (C, D), - (C, D), - (D, C), - (D, D), - (D, D), - (D, C), - (D, C), - (C, D), - (C, D), - ] - self.versus_test(opponent, expected_actions=actions) - - -class TestAlexei(TestPlayer): - """ - Tests for the Alexei strategy - """ - - name = "Alexei: (D,)" - player = axl.Alexei - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - match_attributes={"length": float("inf")}, - ) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D)] - self.versus_test(axl.Alternator(), expected_actions=actions) - - opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) - actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (D, D)] - self.versus_test(opponent, expected_actions=actions) - - -class TestEugineNier(TestPlayer): - """ - Tests for the Eugine Nier strategy - """ - - name = "EugineNier: (D,)" - player = axl.EugineNier - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": {"length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C), (D, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, attrs={"is_defector": False} - ) - - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"is_defector": False}, - match_attributes={"length": float("inf")}, - ) - - # Plays TfT and defects in last round - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D)] - self.versus_test( - axl.Alternator(), expected_actions=actions, attrs={"is_defector": False} - ) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"is_defector": False}, - match_attributes={"length": float("inf")}, - ) - - # Becomes defector after 5 defections - opponent = axl.MockPlayer(actions=[D, C, D, D, D, D, C, C]) - actions = [(C, D), (D, C), (C, D), (D, D), (D, D), (D, D), (D, C), (D, C)] - self.versus_test(opponent, expected_actions=actions) - - -class TestNTitsForMTats(TestPlayer): - """ - Tests for the N Tit(s) For M Tat(s) strategy - """ - - name = "N Tit(s) For M Tat(s): 3, 2" - player = axl.NTitsForMTats - expected_classifier = { - "memory_depth": 3, - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - expected_class_classifier = copy.copy(expected_classifier) - - def test_strategy(self): - # TitForTat test_strategy - init_kwargs = {"N": 1, "M": 1} - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), expected_actions=actions, init_kwargs=init_kwargs - ) - actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, init_kwargs=init_kwargs - ) - actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), expected_actions=actions, init_kwargs=init_kwargs - ) - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - match_attributes={"length": float("inf")}, - init_kwargs=init_kwargs, - ) - actions = [(C, D), (D, D), (D, C), (C, C), (C, D)] - self.versus_test( - axl.Random(), expected_actions=actions, seed=0, init_kwargs=init_kwargs - ) - actions = [(C, C), (C, D), (D, D), (D, C)] - self.versus_test( - axl.Random(), expected_actions=actions, seed=1, init_kwargs=init_kwargs - ) - opponent = axl.MockPlayer(actions=[C, D]) - actions = [(C, C), (C, D), (D, C), (C, D)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) - actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (C, D)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - # TitFor2Tats test_strategy - init_kwargs = {"N": 1, "M": 2} - opponent = axl.MockPlayer(actions=[D, D, D, C, C]) - actions = [(C, D), (C, D), (D, D), (D, C), (C, C), (C, D)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - - # TwoTitsForTat test_strategy - init_kwargs = {"N": 2, "M": 1} - opponent = axl.MockPlayer(actions=[D, C, C, D, C]) - actions = [(C, D), (D, C), (D, C), (C, D), (D, C)] - self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) - actions = [(C, C), (C, C)] - self.versus_test( - opponent=axl.Cooperator(), - expected_actions=actions, - init_kwargs=init_kwargs, - ) - actions = [(C, D), (D, D), (D, D)] - self.versus_test( - opponent=axl.Defector(), expected_actions=actions, init_kwargs=init_kwargs, - ) - - # Cooperator test_strategy - actions = [(C, C)] + [(C, D), (C, C)] * 9 - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"N": 0, "M": 1}, - ) - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"N": 0, "M": 5}, - ) - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"N": 0, "M": 0}, - ) - - # Defector test_strategy - actions = [(D, C)] + [(D, D), (D, C)] * 9 - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"N": 1, "M": 0}, - ) - self.versus_test( - opponent=axl.Alternator(), - expected_actions=actions, - init_kwargs={"N": 5, "M": 0}, - ) - - # Default init args - actions = [(C, C), (C, D), (C, D), (D, C), (D, C), (D, D), (C, C)] - opponent = axl.MockPlayer(actions=[acts[1] for acts in actions]) - self.versus_test(opponent=opponent, expected_actions=actions) - - def test_varying_memory_depth(self): - self.assertEqual(axl.Classifiers["memory_depth"](self.player(1, 1)), 1) - self.assertEqual(axl.Classifiers["memory_depth"](self.player(0, 3)), 3) - self.assertEqual(axl.Classifiers["memory_depth"](self.player(5, 3)), 5) - - -class TestMichaelos(TestPlayer): - """ - Tests for the Michaelos strategy - """ - - name = "Michaelos: (D,)" - player = axl.Michaelos - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": {"length"}, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - actions = [(C, C), (C, C), (C, C), (D, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"is_defector": False}, - seed=2, - ) - - actions = [(C, C), (C, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - attrs={"is_defector": False}, - match_attributes={"length": float("inf")}, - seed=2, - ) - - actions = [(C, D), (D, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={"is_defector": False}, - seed=2, - ) - - actions = [(C, D), (D, D), (D, D), (D, D)] - self.versus_test( - axl.Defector(), - expected_actions=actions, - attrs={"is_defector": False}, - match_attributes={"length": float("inf")}, - seed=2, - ) - - # Chance of becoming a defector is 50% after (D, C) occurs. - actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"is_defector": False}, - seed=3, - ) - - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"is_defector": True}, - seed=2, - ) - - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D), (D, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - attrs={"is_defector": True}, - match_attributes={"length": float("inf")}, - seed=1, - ) - - -class TestRandomTitForTat(TestPlayer): - """Tests for random tit for tat strategy.""" - - name = "Random Tit for Tat: 0.5" - player = axl.RandomTitForTat - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """ - Test that strategy reacts to opponent, and controlled by - probability every other iteration. Also reacts randomly if no - probability input. - """ - actions = [(C, C), (C, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 1} - ) - - actions = [(C, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 0}) - - actions = [(C, C), (C, C), (D, C), (C, C)] - self.versus_test( - axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 0} - ) - - actions = [(C, D), (D, D), (C, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 1}) - - actions = [(C, C), (C, C), (D, C), (C, C), (D, C), (C, C)] - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) - - actions = [(C, D), (D, D), (C, D), (D, D), (D, D), (D, D)] - self.versus_test(axl.Defector(), expected_actions=actions, seed=1) - - def test_deterministic_classification(self): - """ - Test classification when probability input is 0 or 1. - Should change stochastic to false, because actions are no - longer random. - - """ - for p in [0, 1]: - player = axl.RandomTitForTat(p=p) - self.assertFalse(axl.Classifiers["stochastic"](player)) diff --git a/axelrod/ipd/tests/strategies/test_verybad.py b/axelrod/ipd/tests/strategies/test_verybad.py deleted file mode 100644 index b67545e97..000000000 --- a/axelrod/ipd/tests/strategies/test_verybad.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Tests for the VeryBad strategy.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestVeryBad(TestPlayer): - - name = "VeryBad" - player = axl.VeryBad - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": False, - "makes_use_of": set(), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - # axelrod.Defector - - # cooperates for the first three, defects for the rest P(C) < .5 - self.versus_test( - axl.Defector(), expected_actions=([(C, D)] * 3 + [(D, D)] * 7) - ) - - # axelrod.Cooperator - - # cooperate for all, P(C) == 1 - self.versus_test(axl.Cooperator(), expected_actions=[(C, C)]) - - expected_actions = [ - (C, C), # first three cooperate - (C, D), - (C, D), - (D, C), # P(C) = .33 - (C, C), # P(C) = .5 (last move C) - (C, D), # P(C) = .6 - (D, D), # P(C) = .5 (last move D) - (D, D), # P(C) = .43 - (D, C), # P(C) = .375 - (D, D), # P(C) = .4 - ] - mock_player = axl.MockPlayer(actions=[a[1] for a in expected_actions]) - self.versus_test(mock_player, expected_actions=expected_actions) diff --git a/axelrod/ipd/tests/strategies/test_worse_and_worse.py b/axelrod/ipd/tests/strategies/test_worse_and_worse.py deleted file mode 100644 index a402a5e15..000000000 --- a/axelrod/ipd/tests/strategies/test_worse_and_worse.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Tests for the WorseAndWorse strategies.""" - -import axelrod as axl - -from .test_player import TestPlayer - -C, D = axl.Action.C, axl.Action.D - - -class TestWorseAndWorse(TestPlayer): - - name = "Worse and Worse" - player = axl.WorseAndWorse - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test that the strategy gives expected behaviour.""" - # 6 Rounds Cooperate given seed - actions = [(C, C)] * 6 + [(D, C)] + [(C, C)] * 3 - self.versus_test(axl.Cooperator(), expected_actions=actions, seed=8) - - # 6 Rounds Cooperate and Defect no matter oponent - actions = [(C, D)] * 6 + [(D, D)] + [(C, D)] * 3 - self.versus_test(axl.Defector(), expected_actions=actions, seed=8) - - -class TestWorseAndWorseRandom(TestPlayer): - - name = "Knowledgeable Worse and Worse" - player = axl.KnowledgeableWorseAndWorse - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(["length"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test that the strategy gives expected behaviour.""" - actions = [(C, C)] + [(D, C)] * 4 - self.versus_test( - axl.Cooperator(), - expected_actions=actions, - match_attributes={"length": 5}, - seed=1, - ) - - # Test that behaviour does not depend on opponent - actions = [(C, D)] + [(D, D)] * 4 - self.versus_test( - axl.Defector(), - expected_actions=actions, - match_attributes={"length": 5}, - seed=1, - ) - - # Test that behaviour changes when does not know length. - actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test( - axl.Alternator(), - expected_actions=actions, - match_attributes={"length": -1}, - seed=1, - ) - - -class TestWorseAndWorse2(TestPlayer): - - name = "Worse and Worse 2" - player = axl.WorseAndWorse2 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test that the strategy gives expected behaviour.""" - - # Test next move matches opponent - actions = [(C, C)] * 19 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - actions = [(C, C), (C, C), (C, D), (D, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C, C, D, C]), expected_actions=actions - ) - - actions = [(C, C)] * 18 + [(C, D), (D, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 18 + [D, C]), - expected_actions=actions, - ) - - # After round 20, strategy follows stochastic behavior given a seed - actions = [(C, C)] * 20 + [(C, D), (D, C), (C, C), (C, D)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 20 + [D, C, C, D]), - expected_actions=actions, - seed=8, - ) - - actions = [(C, C)] * 20 + [(D, D), (D, C)] + [(C, C)] * 2 + [(D, C)] - self.versus_test( - opponent=axl.MockPlayer(actions=[C] * 20 + [D, C, C, C]), - expected_actions=actions, - seed=2, - ) - - -class TestWorseAndWorse3(TestPlayer): - - name = "Worse and Worse 3" - player = axl.WorseAndWorse3 - expected_classifier = { - "memory_depth": float("inf"), - "stochastic": True, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_strategy(self): - """Test that the strategy gives expected behaviour.""" - # Test that if opponent only defects, strategy also defects - actions = [(C, D)] + [(D, D)] * 4 - self.versus_test(axl.Defector(), expected_actions=actions) - - # Test that if opponent only cooperates, strategy also cooperates - actions = [(C, C)] * 5 - self.versus_test(axl.Cooperator(), expected_actions=actions) - - # Test that given a non 0/1 probability of defecting, strategy follows - # stochastic behaviour, given a seed - actions = [(C, C), (C, D), (C, C), (D, D), (C, C), (D, C)] - self.versus_test( - axl.MockPlayer(actions=[C, D, C, D, C]), - expected_actions=actions, - seed=8, - ) diff --git a/axelrod/ipd/tests/strategies/test_zero_determinant.py b/axelrod/ipd/tests/strategies/test_zero_determinant.py deleted file mode 100644 index f5a9b74bf..000000000 --- a/axelrod/ipd/tests/strategies/test_zero_determinant.py +++ /dev/null @@ -1,319 +0,0 @@ -"""Tests for the Zero Determinant strategies.""" - -import unittest - -import axelrod as axl -from axelrod.ipd.game import DefaultGame -from axelrod.ipd.strategies.zero_determinant import LRPlayer - -from .test_player import TestPlayer, test_four_vector - -C, D = axl.Action.C, axl.Action.D - - -class TestLRPlayer(unittest.TestCase): - def test_exception(self): - with self.assertRaises(ValueError): - LRPlayer(0, 0, -float("inf")) - - -class TestZDExtortion(TestPlayer): - - name = "ZD-Extortion: 0.2, 0.1, 1" - player = axl.ZDExtortion - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 0.64, (C, D): 0.18, (D, C): 0.28, (D, D): 0} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=3 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=6) - - -class TestZDExtort2(TestPlayer): - - name = "ZD-Extort-2: 0.1111111111111111, 0.5" - player = axl.ZDExtort2 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 8 / 9, (C, D): 0.5, (D, C): 1 / 3, (D, D): 0.0} - test_four_vector(self, expected_dictionary) - - def test_receive_match_attributes(self): - player = self.player() - R, P, S, T = DefaultGame.RPST() - self.assertEqual(player.l, P) - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, C), (C, D), (C, C), (C, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=31 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) - - actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) - - -class TestZDExtort2v2(TestPlayer): - - name = "ZD-Extort-2 v2: 0.125, 0.5, 1" - player = axl.ZDExtort2v2 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = { - (C, C): 7 / 8, - (C, D): 7 / 16, - (D, C): 3 / 8, - (D, D): 0.0, - } - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) - - -class TestZDExtort3(TestPlayer): - name = "ZD-Extort3: 0.11538461538461539, 0.3333333333333333, 1" - player = axl.ZDExtort3 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = { - (C, C): 11 / 13, - (C, D): 1 / 2, - (D, C): 7 / 26, - (D, D): 0, - } - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] - - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=3 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=6) - - -class TestZDExtort4(TestPlayer): - - name = "ZD-Extort-4: 0.23529411764705882, 0.25, 1" - player = axl.ZDExtort4 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 11 / 17, (C, D): 0, (D, C): 8 / 17, (D, D): 0.0} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) - - -class TestZDGen2(TestPlayer): - - name = "ZD-GEN-2: 0.125, 0.5, 3" - player = axl.ZDGen2 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 1, (C, D): 9 / 16, (D, C): 1 / 2, (D, D): 1 / 8} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - - actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=31 - ) - - actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) - - actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) - - -class TestZDGTFT2(TestPlayer): - - name = "ZD-GTFT-2: 0.25, 0.5" - player = axl.ZDGTFT2 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 1.0, (C, D): 1 / 8, (D, C): 1.0, (D, D): 0.25} - test_four_vector(self, expected_dictionary) - - def test_receive_match_attributes(self): - player = self.player() - R, P, S, T = DefaultGame.RPST() - self.assertEqual(player.l, R) - - def test_strategy(self): - actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=31 - ) - - actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) - - actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) - - -class TestZDMischief(TestPlayer): - - name = "ZD-Mischief: 0.1, 0.0, 1" - player = axl.ZDMischief - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = {(C, C): 0.8, (C, D): 0.6, (D, C): 0.1, (D, D): 0} - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) - - -class TestZDSet2(TestPlayer): - - name = "ZD-SET-2: 0.25, 0.0, 2" - player = axl.ZDSet2 - expected_classifier = { - "memory_depth": 1, - "stochastic": True, - "makes_use_of": set(["game"]), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - def test_four_vector(self): - expected_dictionary = { - (C, C): 3 / 4, - (C, D): 1 / 4, - (D, C): 1 / 2, - (D, D): 1 / 4, - } - test_four_vector(self, expected_dictionary) - - def test_strategy(self): - actions = [(C, C), (D, D), (D, C), (C, D), (C, C), (D, D)] - self.versus_test( - opponent=axl.Alternator(), expected_actions=actions, seed=2 - ) - - actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] - self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) diff --git a/axelrod/ipd/tests/unit/__init__.py b/axelrod/ipd/tests/unit/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/axelrod/ipd/tests/unit/test_actions.py b/axelrod/ipd/tests/unit/test_actions.py deleted file mode 100644 index 1d8d8b283..000000000 --- a/axelrod/ipd/tests/unit/test_actions.py +++ /dev/null @@ -1,64 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.action import UnknownActionError, actions_to_str, str_to_actions - -C, D = axl.Action.C, axl.Action.D - - -class TestAction(unittest.TestCase): - def test_lt(self): - self.assertLess(C, D) - - def test_repr(self): - self.assertEqual(repr(C), "C") - self.assertEqual(repr(D), "D") - - def test_str(self): - self.assertEqual(str(C), "C") - self.assertEqual(str(D), "D") - - def test__eq__(self): - self.assertTrue(C == C) - self.assertTrue(D == D) - self.assertFalse(C == D) - self.assertFalse(D == C) - - def test_total_order(self): - actions = [C, D, D, C, C, C, D] - actions.sort() - self.assertEqual(actions, [C, C, C, C, D, D, D]) - - def test_flip(self): - self.assertEqual(C.flip(), D) - self.assertEqual(D.flip(), C) - - def test_from_char(self): - self.assertEqual(axl.Action.from_char("C"), C) - self.assertEqual(axl.Action.from_char("D"), D) - - def test_from_char_error(self): - self.assertRaises(UnknownActionError, axl.Action.from_char, "") - self.assertRaises(UnknownActionError, axl.Action.from_char, "c") - self.assertRaises(UnknownActionError, axl.Action.from_char, "d") - self.assertRaises(UnknownActionError, axl.Action.from_char, "A") - self.assertRaises(UnknownActionError, axl.Action.from_char, "CC") - - def test_str_to_actions(self): - self.assertEqual(str_to_actions(""), ()) - self.assertEqual(str_to_actions("C"), (C,)) - self.assertEqual(str_to_actions("CDDC"), (C, D, D, C)) - - def test_str_to_actions_fails_fast_and_raises_value_error(self): - self.assertRaises(UnknownActionError, str_to_actions, "Cc") - - def test_actions_to_str(self): - self.assertEqual(actions_to_str([]), "") - self.assertEqual(actions_to_str([C]), "C") - self.assertEqual(actions_to_str([C, D, C]), "CDC") - self.assertEqual(actions_to_str((C, C, D)), "CCD") - - def test_actions_to_str_with_iterable(self): - self.assertEqual(actions_to_str(iter([C, D, C])), "CDC") - generator = (action for action in [C, D, C]) - self.assertEqual(actions_to_str(generator), "CDC") diff --git a/axelrod/ipd/tests/unit/test_classification.py b/axelrod/ipd/tests/unit/test_classification.py deleted file mode 100644 index 3ce32ca66..000000000 --- a/axelrod/ipd/tests/unit/test_classification.py +++ /dev/null @@ -1,356 +0,0 @@ -"""Tests for the classification.""" - -import os -import unittest -from typing import Any, Text -import warnings -import yaml - -import axelrod as axl -from axelrod.ipd.classifier import ( - Classifier, - Classifiers, - _Classifiers, - memory_depth, - rebuild_classifier_table, -) -from axelrod.ipd.player import IpdPlayer - - -class TitForTatWithEmptyClassifier(IpdPlayer): - """ - Same name as TitForTat, but with empty classifier. - """ - - # Classifiers are looked up by name, so only the name matters. - name = "Tit For Tat" - classifier = {} - - -class TitForTatWithNonTrivialInitialzer(IpdPlayer): - """ - Same name as TitForTat, but with empty classifier. - """ - - def __init__(self, x: Any): - pass # pragma: no cover - - # Classifiers are looked up by name, so only the name matters. - name = "Tit For Tat" - classifier = {} - - -class TestClassification(unittest.TestCase): - def setUp(self) -> None: - # Ignore warnings about classifiers running on instances - warnings.simplefilter("ignore", category=UserWarning) - - def tearDown(self) -> None: - warnings.simplefilter("default", category=UserWarning) - - def test_classifier_build(self): - dirname = os.path.dirname(__file__) - test_path = os.path.join(dirname, "../../../../test_outputs/classifier_test.yaml") - - # Just returns the name of the player. For testing. - name_classifier = Classifier[Text]("name", lambda player: player.name) - rebuild_classifier_table( - classifiers=[name_classifier], - players=[axl.Cooperator, axl.Defector], - path=test_path, - ) - - filename = os.path.join("../..", test_path) - with open(filename, "r") as f: - all_player_dicts = yaml.load(f, Loader=yaml.FullLoader) - - self.assertDictEqual( - all_player_dicts, - {"Cooperator": {"name": "Cooperator"}, "Defector": {"name": "Defector"}}, - ) - - def test_singletonity_of_classifiers_class(self): - classifiers_1 = _Classifiers() - classifiers_2 = _Classifiers() - - self.assertIs(classifiers_1, classifiers_2) - - def test_get_name_from_classifier(self): - # Should be able to take a string or a Classifier instance. - self.assertEqual(Classifiers["memory_depth"](axl.TitForTat()), 1) - self.assertEqual(Classifiers[memory_depth](axl.TitForTat()), 1) - - def test_classifier_works_on_non_instances(self): - warnings.simplefilter("default", category=UserWarning) - with warnings.catch_warnings(record=True) as w: - self.assertEqual(Classifiers["memory_depth"](axl.TitForTat), 1) - self.assertEquals(len(w), 1) - - def test_key_error_on_uknown_classifier(self): - with self.assertRaises(KeyError): - Classifiers["invalid_key"](axl.TitForTat) - - def test_will_lookup_key_in_dict(self): - self.assertEqual(Classifiers["memory_depth"](TitForTatWithEmptyClassifier), 1) - - def test_will_lookup_key_for_classes_that_cant_init(self): - with self.assertRaises(Exception) as exptn: - Classifiers["memory_depth"](TitForTatWithNonTrivialInitialzer) - self.assertEqual( - str(exptn.exception), - "Passed player class doesn't have a trivial initializer.", - ) - - def test_known_classifiers(self): - # A set of dimensions that are known to have been fully applied - known_keys = [ - "stochastic", - "memory_depth", - "long_run_time", - "inspects_source", - "manipulates_source", - "manipulates_state", - ] - - for s in axl.all_strategies: - s = s() - self.assertTrue(None not in [Classifiers[key](s) for key in known_keys]) - - def test_multiple_instances(self): - """Certain instances of classes of strategies will have different - classifiers based on the initialisation variables""" - P1 = axl.MemoryOnePlayer(four_vector=(0.5, 0.5, 0.5, 0.5)) - P2 = axl.MemoryOnePlayer(four_vector=(1, 0, 0, 1)) - self.assertNotEqual(P1.classifier, P2.classifier) - - P1 = axl.FirstByJoss() - P2 = axl.FirstByJoss(p=0) - self.assertNotEqual(P1.classifier, P2.classifier) - - P1 = axl.GTFT(p=1) - P2 = axl.GTFT(p=0.5) - self.assertNotEqual(P1.classifier, P2.classifier) - - P1 = axl.StochasticWSLS() - P2 = axl.StochasticWSLS(ep=0) - self.assertNotEqual(P1.classifier, P2.classifier) - - P1 = axl.GoByMajority(memory_depth=5) - P2 = axl.StochasticWSLS(ep=0.1) - self.assertNotEqual(P1.classifier, P2.classifier) - - def test_manipulation_of_classifier(self): - """Test that can change the classifier of an instance without changing - the classifier of the class""" - player = axl.Cooperator() - player.classifier["memory_depth"] += 1 - self.assertNotEqual(player.classifier, axl.Cooperator.classifier) - player = axl.Defector() - player.classifier["memory_depth"] += 1 - self.assertNotEqual(player.classifier, axl.Defector.classifier) - - def test_obey_axelrod(self): - """A test that verifies if the obey_axl function works correctly""" - known_cheaters = [ - axl.Darwin, - axl.Geller, - axl.GellerCooperator, - axl.GellerDefector, - axl.MindBender, - axl.MindController, - axl.MindWarper, - axl.MindReader, - ] - - known_basic = [ - axl.Alternator, - axl.AntiTitForTat, - axl.Bully, - axl.Cooperator, - axl.Defector, - axl.GoByMajority, - axl.SuspiciousTitForTat, - axl.TitForTat, - axl.WinStayLoseShift, - ] - - known_ordinary = [ - axl.AverageCopier, - axl.ForgivingTitForTat, - axl.GoByMajority20, - axl.GTFT, - axl.Grudger, - axl.Inverse, - axl.Random, - ] - - for strategy in known_cheaters: - self.assertFalse(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) - - for strategy in known_basic: - self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) - - for strategy in known_ordinary: - self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) - - def test_is_basic(self): - """A test that verifies if the is_basic function works correctly""" - known_cheaters = [ - axl.Darwin, - axl.Geller, - axl.GellerCooperator, - axl.GellerDefector, - axl.MindBender, - axl.MindController, - axl.MindWarper, - axl.MindReader, - ] - - known_basic = [ - axl.Alternator, - axl.AntiTitForTat, - axl.Bully, - axl.Cooperator, - axl.Defector, - axl.SuspiciousTitForTat, - axl.TitForTat, - axl.WinStayLoseShift, - ] - - known_ordinary = [ - axl.AverageCopier, - axl.ForgivingTitForTat, - axl.GoByMajority20, - axl.GTFT, - axl.Inverse, - axl.Random, - ] - - for strategy in known_cheaters: - self.assertFalse(axl.Classifiers.is_basic(strategy()), msg=strategy) - - for strategy in known_basic: - self.assertTrue(axl.Classifiers.is_basic(strategy()), msg=strategy) - - for strategy in known_ordinary: - self.assertFalse(axl.Classifiers.is_basic(strategy()), msg=strategy) - - -def str_reps(xs): - """Maps a collection of player classes to their string representations.""" - return set(map(str, [x() for x in xs])) - - -class TestStrategies(unittest.TestCase): - def setUp(self) -> None: - # Ignore warnings about classifiers running on instances. We want to - # allow this for some of the map functions. - warnings.simplefilter("ignore", category=UserWarning) - - def tearDown(self) -> None: - warnings.simplefilter("default", category=UserWarning) - - def test_strategy_list(self): - for strategy_list in [ - "all_strategies", - "demo_strategies", - "basic_strategies", - "long_run_time_strategies", - "strategies", - "ordinary_strategies", - "cheating_strategies", - ]: - self.assertTrue(hasattr(axl, strategy_list)) - - def test_lists_not_empty(self): - for strategy_list in [ - axl.all_strategies, - axl.demo_strategies, - axl.basic_strategies, - axl.long_run_time_strategies, - axl.strategies, - axl.ordinary_strategies, - axl.cheating_strategies, - ]: - self.assertTrue(len(strategy_list) > 0) - - def test_inclusion_of_strategy_lists(self): - all_strategies_set = set(axl.all_strategies) - for strategy_list in [ - axl.demo_strategies, - axl.basic_strategies, - axl.long_run_time_strategies, - axl.strategies, - axl.ordinary_strategies, - axl.cheating_strategies, - ]: - self.assertTrue( - str_reps(strategy_list).issubset(str_reps(all_strategies_set)) - ) - - strategies_set = set(axl.strategies) - for strategy_list in [ - axl.demo_strategies, - axl.basic_strategies, - axl.long_run_time_strategies, - ]: - self.assertTrue(str_reps(strategy_list).issubset(str_reps(strategies_set))) - - def test_long_run_strategies(self): - long_run_time_strategies = [ - axl.DBS, - axl.MetaMajority, - axl.MetaMajorityFiniteMemory, - axl.MetaMajorityLongMemory, - axl.MetaMinority, - axl.MetaMixer, - axl.MetaWinner, - axl.MetaWinnerDeterministic, - axl.MetaWinnerEnsemble, - axl.MetaWinnerFiniteMemory, - axl.MetaWinnerLongMemory, - axl.MetaWinnerStochastic, - axl.NMWEDeterministic, - axl.NMWEFiniteMemory, - axl.NMWELongMemory, - axl.NMWEStochastic, - axl.NiceMetaWinner, - axl.NiceMetaWinnerEnsemble, - ] - - self.assertEqual( - str_reps(long_run_time_strategies), str_reps(axl.long_run_time_strategies) - ) - self.assertTrue( - all(map(Classifiers["long_run_time"], axl.long_run_time_strategies)) - ) - - def test_short_run_strategies(self): - short_run_time_strategies = [ - s for s in axl.strategies if s not in axl.long_run_time_strategies - ] - - self.assertEqual( - str_reps(short_run_time_strategies), str_reps(axl.short_run_time_strategies) - ) - self.assertFalse( - any(map(Classifiers["long_run_time"], axl.short_run_time_strategies)) - ) - - def test_meta_inclusion(self): - self.assertTrue(str(axl.MetaMajority()) in str_reps(axl.strategies)) - - self.assertTrue(str(axl.MetaHunter()) in str_reps(axl.strategies)) - self.assertFalse( - str(axl.MetaHunter()) in str_reps(axl.long_run_time_strategies) - ) - - def test_demo_strategies(self): - demo_strategies = [ - axl.Cooperator, - axl.Defector, - axl.TitForTat, - axl.Grudger, - axl.Random, - ] - self.assertTrue(str_reps(demo_strategies), str_reps(axl.demo_strategies)) diff --git a/axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py b/axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py deleted file mode 100644 index 7f721c0f6..000000000 --- a/axelrod/ipd/tests/unit/test_compute_finite_state_machine_memory.py +++ /dev/null @@ -1,350 +0,0 @@ -"""Tests for Compute FSM Memory.""" - -import unittest - -import axelrod as axl -from axelrod.ipd.compute_finite_state_machine_memory import * - -C, D = axl.Action.C, axl.Action.D - - -class TestOrderedMemitTuple(unittest.TestCase): - def memits_completely_equal(self, x, y): - """If the state and the actions are equal.""" - return x.state == y.state and x == y - - def memit_tuple_equal(self, x_tuple, y_tuple): - """If the memits are the same in the same order.""" - return self.memits_completely_equal( - x_tuple[0], y_tuple[0] - ) and self.memits_completely_equal(x_tuple[1], y_tuple[1]) - - def test_provided_ascending_order(self): - memit_c1c = Memit(C, 1, C) - memit_c2c = Memit(C, 2, C) - - actual_tuple = ordered_memit_tuple(memit_c1c, memit_c2c) - expected_tuple = (memit_c1c, memit_c2c) - - return self.memit_tuple_equal(actual_tuple, expected_tuple) - - def test_provided_descending_order(self): - memit_c1c = Memit(C, 1, C) - memit_c2c = Memit(C, 2, C) - - actual_tuple = ordered_memit_tuple(memit_c2c, memit_c1c) - expected_tuple = (memit_c1c, memit_c2c) - - return self.memit_tuple_equal(actual_tuple, expected_tuple) - - def test_order_on_actions(self): - memit_c9c = Memit(C, 9, C) - memit_c9d = Memit(C, 9, D) - - actual_tuple = ordered_memit_tuple(memit_c9d, memit_c9c) - expected_tuple = (memit_c9c, memit_c9d) - - return self.memit_tuple_equal(actual_tuple, expected_tuple) - - -class TestGetMemoryFromTransitions(unittest.TestCase): - def transitions_to_dict(self, transitions): - return { - (current_state, input_action): (next_state, output_action) - for current_state, input_action, next_state, output_action in transitions - } - - def test_cooperator(self): - transitions = ((0, C, 0, C), (0, D, 0, C)) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 0) - - def test_tit_for_tat(self): - transitions = ((0, C, 0, C), (0, D, 0, D)) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 1) - - def test_two_state_memory_two(self): - """If all D lead to state 0 and all C lead to state 1. We make it so - that all paths out of state 0 plays Cooperator and state 1 plays - Defector. - In this case, we must know what state we're in to know how to respond to - the opponent's previou action, but we cannot determine from our own - previous action; we must look at opponent's action from two turns ago. - """ - transitions = ((0, C, 0, C), (0, D, 1, C), (1, C, 0, D), (1, D, 1, D)) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 2) - - def test_two_state_tft(self): - """Same case as above, but this time our own last action tells us which - state we're in. In fact, this strategy is exactly TFT. - """ - transitions = ((0, C, 0, C), (0, D, 1, D), (1, C, 0, C), (1, D, 1, D)) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 1) - - def test_three_state_tft(self): - """Tit-for-tat again, but using three states, and a complex web of - transitions between them. - """ - transitions = ( - (0, C, 1, C), - (0, D, 1, D), - (1, C, 2, C), - (1, D, 0, D), - (2, C, 0, C), - (2, D, 2, D) - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 1) - - def test_two_state_inf_memory(self): - """A C will cause the FSM to stay in the same state, and D causes to - change states. Will always respond to a C with a C. Will respond to a - D with a C in state 0, but with a D in state 1. - So we need to know the state to know how to respond to a D. But since - an arbitarily long sequence of C/C may occur, we need infinite memory. - """ - transitions = ((0, C, 0, C), (0, D, 1, C), (1, C, 1, C), (1, D, 0, D)) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), float("inf")) - - def test_four_state_memory_two(self): - """Same as the two_state_memory_two test above, but we use two copies, - stitched together. - """ - transitions = ( - (0, C, 0, C), - (0, D, 1, C), - (1, C, 2, D), - (1, D, 1, D), - (2, C, 2, C), - (2, D, 3, C), - (3, C, 0, D), - (3, D, 3, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 2) - - def test_tit_for_two_tat(self): - """This strategy does the same thing until the opponent does the same - action twice; then it responds in kind. In the FSM implementation, we - let states 1 and 2 be the cooperating states, with state 2 being the - state after one opponent defection. And states 3 and 4 are the - defecting states, with state 4 after 1 opponent cooperation. - The memory should be two, because if the last two moves don't match, - then we can look to see what we did in the last move. If the do match, - then we can respond in kind. - """ - transitions = ( - (1, C, 1, C), - (1, D, 2, C), - (2, C, 1, C), - (2, D, 3, D), - (3, C, 4, D), - (3, D, 3, D), - (4, C, 1, C), - (4, D, 3, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 2) - - def test_tit_for_five_tat(self): - """Analogous to tit for two tat above. - """ - transitions = ( - (1, C, 1, C), - (1, D, 2, C), - (2, C, 1, C), - (2, D, 3, C), - (3, C, 1, C), - (3, D, 4, C), - (4, C, 1, C), - (4, D, 5, C), - (5, C, 1, C), - (5, D, 6, D), - (6, C, 6, D), - (6, D, 7, D), - (7, C, 6, D), - (7, D, 8, D), - (8, C, 6, D), - (8, D, 9, D), - (9, C, 6, D), - (9, D, 10, D), - (10, C, 6, D), - (10, D, 1, C), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 5) - - def test_fortress_3(self): - """Tests Fortress-3, which Defects unless the opponent D twice in a row. - In that case C, and continue to C for as long as the opponent does. - We know we're in state 3 if our own previous move was a C. Otherwise, C - if and only if the opponent's previous two moves were D. [Unless we - were in state 3 last turn, in which case we would have C'd two turns - ago.] - So the memory should be 2. - """ - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 1, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 2) - - def test_fortress_4(self): - """Tests Fortress-4. Should have memory=3 for same logic that - Fortress-3 should have memory=2. - """ - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, D), - (3, C, 1, D), - (3, D, 4, C), - (4, C, 3, C), - (4, D, 1, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 3) - - def test_complex_cooperator(self): - """Tests a cooperator with lots of states and transitions. - """ - transitions = ( - (0, C, 0, C), - (0, D, 1, C), - (1, C, 2, C), - (1, D, 3, C), - (2, C, 4, C), - (2, D, 3, C), - (3, C, 5, C), - (3, D, 4, C), - (4, C, 2, C), - (4, D, 6, C), - (5, C, 7, C), - (5, D, 3, C), - (6, C, 7, C), - (6, D, 7, C), - (7, C, 8, C), - (7, D, 7, C), - (8, C, 8, C), - (8, D, 6, C), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), 0) - - def test_disconnected_graph(self): - """Test two disjoint versions of Fortress3, with initial_state.""" - transitions = ( - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 1, D), - (4, C, 4, D), - (4, D, 5, D), - (5, C, 4, D), - (5, D, 6, C), - (6, C, 6, C), - (6, D, 4, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual( - get_memory_from_transitions(trans_dict, initial_state=1), 2 - ) - - def test_transient_state(self): - """Test a setup where we a transient state (no incoming transitions) - goes into a Fortress3 (and D) if the opponent D, and goes into a - Cooperator if the opponent C. - The transient state is state 0. Fortress3 starts at state 1. And - the Cooperator is state 4. - """ - transitions = ( - (0, C, 4, C), - (0, D, 1, D), - (1, C, 1, D), - (1, D, 2, D), - (2, C, 1, D), - (2, D, 3, C), - (3, C, 3, C), - (3, D, 1, D), - (4, C, 4, C), - (4, D, 4, C), - ) - - trans_dict = self.transitions_to_dict(transitions) - # If starting in state 4, then treat like Cooperator - self.assertEqual( - get_memory_from_transitions(trans_dict, initial_state=4), 0 - ) - # Start in state 1, then a Fortress3. - self.assertEqual( - get_memory_from_transitions(trans_dict, initial_state=1), 2 - ) - - def test_infinite_memory_transient_state(self): - """A transient state at 0, which goes into either a Cooperator or a TFT. - Because an arbitrarily-long chain of C/C may exist, we would need a - infinite memory to determine which state we're in, so that we know how - to respond to a D. - """ - transitions = ( - (0, C, 1, C), - (0, D, 2, D), - (1, C, 1, C), - (1, D, 1, C), - (2, C, 2, C), - (2, D, 2, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual( - get_memory_from_transitions(trans_dict, initial_state=0), - float("inf"), - ) - - self.assertEqual( - get_memory_from_transitions(trans_dict, initial_state=2), 1 - ) - - def test_evolved_fsm_4(self): - """This should be infinite memory because the C/D self-loop at state 2 - and state 3. - """ - transitions = ( - (0, C, 0, C), - (0, D, 2, D), - (1, C, 3, D), - (1, D, 0, C), - (2, C, 2, D), - (2, D, 1, C), - (3, C, 3, D), - (3, D, 1, D), - ) - - trans_dict = self.transitions_to_dict(transitions) - self.assertEqual(get_memory_from_transitions(trans_dict), float("inf")) - diff --git a/axelrod/ipd/tests/unit/test_deterministic_cache.py b/axelrod/ipd/tests/unit/test_deterministic_cache.py deleted file mode 100644 index 2c3518030..000000000 --- a/axelrod/ipd/tests/unit/test_deterministic_cache.py +++ /dev/null @@ -1,111 +0,0 @@ -import unittest -import os -import pathlib -import pickle - -import axelrod as axl -from axelrod.ipd.load_data_ import axl_filename - -C, D = axl.Action.C, axl.Action.D - - -class TestDeterministicCache(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.test_key = (axl.TitForTat(), axl.Defector()) - cls.test_value = [(C, D), (D, D), (D, D)] - save_path = pathlib.Path("../test_outputs/test_cache_save.txt") - cls.test_save_file = axl_filename(save_path) - load_path = pathlib.Path("../test_outputs/test_cache_load.txt") - cls.test_load_file = axl_filename(load_path) - test_data_to_pickle = {("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)]} - cls.test_pickle = pickle.dumps(test_data_to_pickle) - - with open(cls.test_load_file, "wb") as f: - f.write(cls.test_pickle) - - @classmethod - def tearDownClass(cls): - os.remove(cls.test_save_file) - os.remove(cls.test_load_file) - - def setUp(self): - self.cache = axl.DeterministicCache() - - def test_basic_init(self): - self.assertTrue(self.cache.mutable) - - def test_init_from_file(self): - loaded_cache = axl.DeterministicCache(file_name=self.test_load_file) - self.assertEqual(loaded_cache[self.test_key], self.test_value) - - def test_setitem(self): - self.cache[self.test_key] = self.test_value - self.assertEqual(self.cache[self.test_key], self.test_value) - - def test_setitem_invalid_key_not_tuple(self): - invalid_key = "test" - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - def test_setitem_invalid_key_first_two_elements_not_player(self): - invalid_key = ("test", "test") - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - invalid_key = (axl.TitForTat(), "test") - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - invalid_key = ("test", axl.TitForTat()) - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - def test_setitem_invalid_key_too_many_players(self): - invalid_key = (axl.TitForTat(), axl.TitForTat(), axl.TitForTat()) - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - def test_setitem_invalid_key_stochastic_player(self): - invalid_key = (axl.Random(), axl.TitForTat()) - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - invalid_key = (axl.TitForTat(), axl.Random()) - with self.assertRaises(ValueError): - self.cache[invalid_key] = self.test_value - - def test_setitem_invalid_value_not_list(self): - with self.assertRaises(ValueError): - self.cache[self.test_key] = 5 - - def test_setitem_with_immutable_cache(self): - self.cache.mutable = False - with self.assertRaises(ValueError): - self.cache[self.test_key] = self.test_value - - def test_save(self): - self.cache[self.test_key] = self.test_value - self.cache.save(self.test_save_file) - with open(self.test_save_file, "rb") as f: - text = f.read() - self.assertEqual(text, self.test_pickle) - - def test_load(self): - self.cache.load(self.test_load_file) - self.assertEqual(self.cache[self.test_key], self.test_value) - - def test_load_error_for_inccorect_format(self): - path = pathlib.Path("../test_outputs/test.cache") - filename = axl_filename(path) - with open(filename, "wb") as io: - pickle.dump(range(5), io) - - with self.assertRaises(ValueError): - self.cache.load(filename) - - def test_del_item(self): - self.cache[self.test_key] = self.test_value - self.assertTrue(self.test_key in self.cache) - del self.cache[self.test_key] - self.assertFalse(self.test_key in self.cache) diff --git a/axelrod/ipd/tests/unit/test_ecosystem.py b/axelrod/ipd/tests/unit/test_ecosystem.py deleted file mode 100644 index ec552b542..000000000 --- a/axelrod/ipd/tests/unit/test_ecosystem.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Tests for the Ecosystem class.""" - -import unittest - -import axelrod as axl - - -class TestEcosystem(unittest.TestCase): - @classmethod - def setUpClass(cls): - cooperators = axl.IpdTournament( - players=[ - axl.Cooperator(), - axl.Cooperator(), - axl.Cooperator(), - axl.Cooperator(), - ] - ) - defector_wins = axl.IpdTournament( - players=[ - axl.Cooperator(), - axl.Cooperator(), - axl.Cooperator(), - axl.Defector(), - ] - ) - cls.res_cooperators = cooperators.play() - cls.res_defector_wins = defector_wins.play() - - def test_default_population_sizes(self): - eco = axl.Ecosystem(self.res_cooperators) - pops = eco.population_sizes - self.assertEqual(eco.num_players, 4) - self.assertEqual(len(pops), 1) - self.assertEqual(len(pops[0]), 4) - self.assertAlmostEqual(sum(pops[0]), 1.0) - self.assertEqual(list(set(pops[0])), [0.25]) - - def test_non_default_population_sizes(self): - eco = axl.Ecosystem( - self.res_cooperators, population=[0.7, 0.25, 0.03, 0.02] - ) - pops = eco.population_sizes - self.assertEqual(eco.num_players, 4) - self.assertEqual(len(pops), 1) - self.assertEqual(len(pops[0]), 4) - self.assertAlmostEqual(sum(pops[0]), 1.0) - self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02]) - - def test_population_normalization(self): - eco = axl.Ecosystem(self.res_cooperators, population=[70, 25, 3, 2]) - pops = eco.population_sizes - self.assertEqual(eco.num_players, 4) - self.assertEqual(len(pops), 1) - self.assertEqual(len(pops[0]), 4) - self.assertAlmostEqual(sum(pops[0]), 1.0) - self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02]) - - def test_results_and_population_of_different_sizes(self): - self.assertRaises( - TypeError, - axl.Ecosystem, - self.res_cooperators, - population=[0.7, 0.2, 0.03, 0.1, 0.1], - ) - - def test_negative_populations(self): - self.assertRaises( - TypeError, - axl.Ecosystem, - self.res_cooperators, - population=[0.7, -0.2, 0.03, 0.2], - ) - - def test_fitness_function(self): - fitness = lambda p: 2 * p - eco = axl.Ecosystem(self.res_cooperators, fitness=fitness) - self.assertTrue(eco.fitness(10), 20) - - def test_cooperators_are_stable_over_time(self): - eco = axl.Ecosystem(self.res_cooperators) - eco.reproduce(100) - pops = eco.population_sizes - self.assertEqual(len(pops), 101) - for p in pops: - self.assertEqual(len(p), 4) - self.assertEqual(sum(p), 1.0) - self.assertEqual(list(set(p)), [0.25]) - - def test_defector_wins_with_only_cooperators(self): - eco = axl.Ecosystem(self.res_defector_wins) - eco.reproduce(1000) - pops = eco.population_sizes - self.assertEqual(len(pops), 1001) - for p in pops: - self.assertEqual(len(p), 4) - self.assertAlmostEqual(sum(p), 1.0) - last = pops[-1] - self.assertAlmostEqual(last[0], 0.0) - self.assertAlmostEqual(last[1], 0.0) - self.assertAlmostEqual(last[2], 0.0) - self.assertAlmostEqual(last[3], 1.0) diff --git a/axelrod/ipd/tests/unit/test_eigen.py b/axelrod/ipd/tests/unit/test_eigen.py deleted file mode 100644 index 384f018ca..000000000 --- a/axelrod/ipd/tests/unit/test_eigen.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Test for eigen.py.""" - -import unittest - -import numpy -from numpy.testing import assert_array_almost_equal - -from axelrod.ipd.eigen import _normalise, principal_eigenvector - - - -class FunctionCases(unittest.TestCase): - def test_identity_matrices(self): - for size in range(2, 6): - mat = numpy.identity(size) - evector, evalue = principal_eigenvector(mat) - self.assertAlmostEqual(evalue, 1) - assert_array_almost_equal(evector, _normalise(numpy.ones(size))) - - def test_zero_matrix(self): - mat = numpy.array([[0, 0], [0, 0]]) - evector, evalue = principal_eigenvector(mat) - self.assertTrue(numpy.isnan(evalue)) - self.assertTrue(numpy.isnan(evector[0])) - self.assertTrue(numpy.isnan(evector[1])) - - def test_2x2_matrix(self): - mat = numpy.array([[2, 1], [1, 2]]) - evector, evalue = principal_eigenvector(mat) - self.assertAlmostEqual(evalue, 3) - assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) - assert_array_almost_equal(evector, _normalise(numpy.array([1, 1]))) - - def test_3x3_matrix(self): - mat = numpy.array([[1, 2, 0], [-2, 1, 2], [1, 3, 1]]) - evector, evalue = principal_eigenvector( - mat, maximum_iterations=None, max_error=1e-10 - ) - self.assertAlmostEqual(evalue, 3) - assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) - assert_array_almost_equal(evector, _normalise(numpy.array([0.5, 0.5, 1]))) - - def test_4x4_matrix(self): - mat = numpy.array([[2, 0, 0, 0], [1, 2, 0, 0], [0, 1, 3, 0], [0, 0, 1, 3]]) - evector, evalue = principal_eigenvector( - mat, maximum_iterations=None, max_error=1e-10 - ) - self.assertAlmostEqual(evalue, 3, places=3) - assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) - assert_array_almost_equal( - evector, _normalise(numpy.array([0, 0, 0, 1])), decimal=4 - ) diff --git a/axelrod/ipd/tests/unit/test_filters.py b/axelrod/ipd/tests/unit/test_filters.py deleted file mode 100644 index b1c78255a..000000000 --- a/axelrod/ipd/tests/unit/test_filters.py +++ /dev/null @@ -1,170 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.player import IpdPlayer -from axelrod.ipd.strategies._filters import * - -from hypothesis import example, given, settings -from hypothesis.strategies import integers - - -class TestFilters(unittest.TestCase): - class TestStrategy(IpdPlayer): - classifier = { - "stochastic": True, - "inspects_source": False, - "memory_depth": 10, - "makes_use_of": ["game", "length"], - } - - def test_equality_filter(self): - self.assertTrue( - passes_operator_filter(self.TestStrategy, "stochastic", True, operator.eq) - ) - self.assertFalse( - passes_operator_filter(self.TestStrategy, "stochastic", False, operator.eq) - ) - self.assertTrue( - passes_operator_filter( - self.TestStrategy, "inspects_source", False, operator.eq - ) - ) - self.assertFalse( - passes_operator_filter( - self.TestStrategy, "inspects_source", True, operator.eq - ) - ) - - @given( - smaller=integers(min_value=0, max_value=9), - larger=integers(min_value=11, max_value=100), - ) - @example(smaller=0, larger=float("inf")) - @settings(max_examples=5) - def test_inequality_filter(self, smaller, larger): - self.assertTrue( - passes_operator_filter( - self.TestStrategy, "memory_depth", smaller, operator.ge - ) - ) - self.assertTrue( - passes_operator_filter( - self.TestStrategy, "memory_depth", larger, operator.le - ) - ) - self.assertFalse( - passes_operator_filter( - self.TestStrategy, "memory_depth", smaller, operator.le - ) - ) - self.assertFalse( - passes_operator_filter( - self.TestStrategy, "memory_depth", larger, operator.ge - ) - ) - - def test_list_filter(self): - self.assertTrue( - passes_in_list_filter(self.TestStrategy, "makes_use_of", ["game"]) - ) - self.assertTrue( - passes_in_list_filter(self.TestStrategy, "makes_use_of", ["length"]) - ) - self.assertTrue( - passes_in_list_filter(self.TestStrategy, "makes_use_of", ["game", "length"]) - ) - self.assertFalse( - passes_in_list_filter(self.TestStrategy, "makes_use_of", "test") - ) - - @given( - smaller=integers(min_value=0, max_value=9), - larger=integers(min_value=11, max_value=100), - ) - @example(smaller=0, larger=float("inf")) - @settings(max_examples=5) - def test_passes_filterset(self, smaller, larger): - - full_passing_filterset_1 = { - "stochastic": True, - "inspects_source": False, - "min_memory_depth": smaller, - "max_memory_depth": larger, - "makes_use_of": ["game", "length"], - } - - full_passing_filterset_2 = { - "stochastic": True, - "inspects_source": False, - "memory_depth": 10, - "makes_use_of": ["game", "length"], - } - - sparse_passing_filterset = { - "stochastic": True, - "inspects_source": False, - "makes_use_of": ["length"], - } - - full_failing_filterset = { - "stochastic": False, - "inspects_source": False, - "min_memory_depth": smaller, - "max_memory_depth": larger, - "makes_use_of": ["length"], - } - - sparse_failing_filterset = { - "stochastic": False, - "inspects_source": False, - "min_memory_depth": smaller, - } - - self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_1)) - self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_2)) - self.assertTrue(passes_filterset(self.TestStrategy, sparse_passing_filterset)) - self.assertFalse(passes_filterset(self.TestStrategy, full_failing_filterset)) - self.assertFalse(passes_filterset(self.TestStrategy, sparse_failing_filterset)) - - def test_filtered_strategies(self): - class StochasticTestStrategy(IpdPlayer): - classifier = { - "stochastic": True, - "memory_depth": float("inf"), - "makes_use_of": [], - } - - class MemoryDepth2TestStrategy(IpdPlayer): - classifier = {"stochastic": False, "memory_depth": 2, "makes_use_of": []} - - class UsesLengthTestStrategy(IpdPlayer): - classifier = { - "stochastic": True, - "memory_depth": float("inf"), - "makes_use_of": ["length"], - } - - strategies = [ - StochasticTestStrategy, - MemoryDepth2TestStrategy, - UsesLengthTestStrategy, - ] - - stochastic_filterset = {"stochastic": True} - - deterministic_filterset = {"stochastic": False} - - uses_length_filterset = {"stochastic": True, "makes_use_of": ["length"]} - - self.assertEqual( - axl.filtered_strategies(stochastic_filterset, strategies), - [StochasticTestStrategy, UsesLengthTestStrategy], - ) - self.assertEqual( - axl.filtered_strategies(deterministic_filterset, strategies), - [MemoryDepth2TestStrategy], - ) - self.assertEqual( - axl.filtered_strategies(uses_length_filterset, strategies), - [UsesLengthTestStrategy], - ) diff --git a/axelrod/ipd/tests/unit/test_fingerprint.py b/axelrod/ipd/tests/unit/test_fingerprint.py deleted file mode 100644 index 50895d296..000000000 --- a/axelrod/ipd/tests/unit/test_fingerprint.py +++ /dev/null @@ -1,516 +0,0 @@ -import unittest -from unittest.mock import patch - -import os -from tempfile import mkstemp -import matplotlib.pyplot -import numpy as np -import pathlib - -import axelrod as axl -from axelrod.ipd.fingerprint import AshlockFingerprint, Point, TransitiveFingerprint -from axelrod.ipd.load_data_ import axl_filename -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import given, settings - - -C, D = axl.Action.C, axl.Action.D - - -class RecordedMksTemp(object): - """This object records all results from RecordedMksTemp.mkstemp. It's for - testing that temp files are created and then destroyed.""" - - record = [] - - @staticmethod - def mkstemp(*args, **kwargs): - temp_file_info = mkstemp(*args, **kwargs) - RecordedMksTemp.record.append(temp_file_info) - return temp_file_info - - @staticmethod - def reset_record(): - RecordedMksTemp.record = [] - - -class TestFingerprint(unittest.TestCase): - - points_when_using_half_step = [ - (0.0, 0.0), - (0.0, 0.5), - (0.0, 1.0), - (0.5, 0.0), - (0.5, 0.5), - (0.5, 1.0), - (1.0, 0.0), - (1.0, 0.5), - (1.0, 1.0), - ] - edges_when_using_half_step = [ - (0, 1), - (0, 2), - (0, 3), - (0, 4), - (0, 5), - (0, 6), - (0, 7), - (0, 8), - (0, 9), - ] - - def test_default_init(self): - fingerprint = AshlockFingerprint(axl.WinStayLoseShift) - self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) - self.assertEqual(fingerprint.probe, axl.TitForTat) - - def test_init_with_explicit_probe(self): - fingerprint = AshlockFingerprint(axl.WinStayLoseShift, axl.Random) - self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) - self.assertEqual(fingerprint.probe, axl.Random) - - def test_init_with_instances(self): - player = axl.WinStayLoseShift() - fingerprint = AshlockFingerprint(player) - self.assertEqual(fingerprint.strategy, player) - self.assertEqual(fingerprint.probe, axl.TitForTat) - - probe = axl.Random() - fingerprint = AshlockFingerprint(axl.WinStayLoseShift, probe) - self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) - self.assertEqual(fingerprint.probe, probe) - - fingerprint = AshlockFingerprint(player, probe) - self.assertEqual(fingerprint.strategy, player) - self.assertEqual(fingerprint.probe, probe) - - def test_fingerprint_player(self): - af = AshlockFingerprint(axl.Cooperator()) - af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) - - self.assertEqual(af.step, 0.5) - self.assertEqual(af.points, self.points_when_using_half_step) - self.assertEqual(af.spatial_tournament.turns, 5) - self.assertEqual(af.spatial_tournament.repetitions, 3) - self.assertEqual(af.spatial_tournament.edges, self.edges_when_using_half_step) - - # The first player is the fingerprinted one, the rest are probes. - self.assertIsInstance(af.spatial_tournament.players[0], axl.Cooperator) - self.assertEqual(len(af.spatial_tournament.players), 10) - probes = af.spatial_tournament.players[1:] - self.assertEqual(len(probes), len(af.points)) - self.assertEqual( - str(probes[0]), "Joss-Ann Tit For Tat: (0.0, 0.0)" - ) # x + y < 1 - self.assertEqual( - str(probes[2]), "Dual Joss-Ann Tit For Tat: (1.0, 0.0)" - ) # x + y = 1 - self.assertEqual( - str(probes[8]), "Dual Joss-Ann Tit For Tat: (0.0, 0.0)" - ) # x + y > 1 - - def test_fingeprint_explicit_probe(self): - af = AshlockFingerprint(axl.TitForTat(), probe=axl.Random(p=0.1)) - af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=False) - - probes = af.spatial_tournament.players[1:] - self.assertEqual( - str(probes[0]), "Joss-Ann Random: 0.1: (0.0, 0.0)" - ) # x + y < 1 - self.assertEqual( - str(probes[2]), "Dual Joss-Ann Random: 0.1: (1.0, 0.0)" - ) # x + y = 1 - self.assertEqual( - str(probes[8]), "Dual Joss-Ann Random: 0.1: (0.0, 0.0)" - ) # x + y > 1 - - def test_fingerprint_interactions_cooperator(self): - af = AshlockFingerprint(axl.Cooperator()) - af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) - - # The keys are edges between players, values are repetitions. - self.assertCountEqual( - af.interactions.keys(), - [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9)], - ) - self.assertEqual(len(af.interactions.values()), 9) - - # Each edge has 3 repetitions with 5 turns each. - repetitions = af.interactions.values() - self.assertTrue(all(len(rep) == 3 for rep in repetitions)) - for iturn in range(3): - self.assertTrue(all(len(rep[iturn]) == 5 for rep in repetitions)) - - # Interactions are invariant for any points where y is zero, and - # the score should be maximum possible. - # IpdPlayer 1 is Point(0.0, 0.0). - # IpdPlayer 4 is Point(0.5, 0.0). - # IpdPlayer 7 is Point(1.0, 0.0). - for iplayer in (1, 4, 7): - for turns in af.interactions[(0, iplayer)]: - self.assertEqual(len(turns), 5) - self.assertTrue(all(t == (C, C) for t in turns)) - self.assertEqual(af.data[Point(0.0, 0.0)], 3.0) - self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) - self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - - # IpdPlayer 3 is Point(0.0, 1.0), which means constant defection - # from the probe. But the Cooperator doesn't change and score is zero. - for turns in af.interactions[(0, 3)]: - self.assertEqual(len(turns), 5) - self.assertTrue(all(t == (C, D) for t in turns)) - self.assertEqual(af.data[Point(0.0, 1.0)], 0.0) - - def test_fingerprint_interactions_titfortat(self): - af = AshlockFingerprint(axl.TitForTat()) - af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) - - # Tit-for-Tats will always cooperate if left to their own devices, - # so interactions are invariant for any points where y is zero, - # and the score should be maximum possible. - # IpdPlayer 1 is Point(0.0, 0.0). - # IpdPlayer 4 is Point(0.5, 0.0). - # IpdPlayer 7 is Point(1.0, 0.0). - for iplayer in (1, 4, 7): - for turns in af.interactions[(0, iplayer)]: - self.assertEqual(len(turns), 5) - self.assertTrue(all(t == (C, C) for t in turns)) - self.assertEqual(af.data[Point(0.0, 0.0)], 3.0) - self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) - self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) - - # IpdPlayer 3 is Point(0.0, 1.0) which implies defection after the - # first turn since Tit-for-Tat is playing, and a score of 0.8 - # since we get zero on first turn and one point per turn later. - for turns in af.interactions[(0, 3)]: - self.assertEqual(len(turns), 5) - self.assertTrue(all(t == (D, D) for t in turns[1:])) - self.assertAlmostEqual(af.data[Point(0.0, 1.0)], 0.8) - - def test_progress_bar_fingerprint(self): - af = AshlockFingerprint(axl.TitForTat) - data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=True) - self.assertEqual(sorted(data.keys()), self.points_when_using_half_step) - - @patch("axelrod.fingerprint.mkstemp", RecordedMksTemp.mkstemp) - def test_temp_file_creation(self): - - RecordedMksTemp.reset_record() - af = AshlockFingerprint(axl.TitForTat) - path = pathlib.Path("../test_outputs/test_fingerprint.csv") - filename = axl_filename(path) - - self.assertEqual(RecordedMksTemp.record, []) - - # Temp file is created and destroyed. - af.fingerprint( - turns=1, repetitions=1, step=0.5, progress_bar=False, filename=None - ) - - self.assertEqual(len(RecordedMksTemp.record), 1) - filename = RecordedMksTemp.record[0][1] - self.assertIsInstance(filename, str) - self.assertNotEqual(filename, "") - self.assertFalse(os.path.isfile(filename)) - - def test_fingerprint_with_filename(self): - path = pathlib.Path("../test_outputs/test_fingerprint.csv") - filename = axl_filename(path) - af = AshlockFingerprint(axl.TitForTat) - af.fingerprint( - turns=1, repetitions=1, step=0.5, progress_bar=False, filename=filename - ) - with open(filename, "r") as out: - data = out.read() - self.assertEqual(len(data.split("\n")), 20) - - def test_serial_fingerprint(self): - af = AshlockFingerprint(axl.TitForTat) - data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=False) - edge_keys = sorted(list(af.interactions.keys())) - coord_keys = sorted(list(data.keys())) - self.assertEqual(af.step, 0.5) - self.assertEqual(edge_keys, self.edges_when_using_half_step) - self.assertEqual(coord_keys, self.points_when_using_half_step) - - def test_parallel_fingerprint(self): - af = AshlockFingerprint(axl.TitForTat) - af.fingerprint( - turns=10, repetitions=2, step=0.5, processes=2, progress_bar=False - ) - edge_keys = sorted(list(af.interactions.keys())) - coord_keys = sorted(list(af.data.keys())) - self.assertEqual(af.step, 0.5) - self.assertEqual(edge_keys, self.edges_when_using_half_step) - self.assertEqual(coord_keys, self.points_when_using_half_step) - - def test_plot_data(self): - axl.seed(0) # Fingerprinting is a random process. - af = AshlockFingerprint(axl.Cooperator()) - af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) - - reshaped_data = np.array([[0.0, 0.0, 0.0], [2.0, 1.0, 2.0], [3.0, 3.0, 3.0]]) - plotted_data = af.plot().gca().images[0].get_array() - np.testing.assert_allclose(plotted_data, reshaped_data) - - def test_plot_figure(self): - af = AshlockFingerprint(axl.WinStayLoseShift, axl.TitForTat) - af.fingerprint(turns=10, repetitions=2, step=0.25, progress_bar=False) - p = af.plot() - self.assertIsInstance(p, matplotlib.pyplot.Figure) - q = af.plot(cmap="jet") - self.assertIsInstance(q, matplotlib.pyplot.Figure) - r = af.plot(interpolation="bicubic") - self.assertIsInstance(r, matplotlib.pyplot.Figure) - t = af.plot(title="Title") - self.assertIsInstance(t, matplotlib.pyplot.Figure) - u = af.plot(colorbar=False) - self.assertIsInstance(u, matplotlib.pyplot.Figure) - v = af.plot(labels=False) - self.assertIsInstance(v, matplotlib.pyplot.Figure) - - def test_wsls_fingerprint(self): - axl.seed(0) # Fingerprinting is a random process. - test_data = { - Point(x=0.0, y=0.0): 3.000, - Point(x=0.0, y=0.25): 1.710, - Point(x=0.0, y=0.5): 1.440, - Point(x=0.0, y=0.75): 1.080, - Point(x=0.0, y=1.0): 0.500, - Point(x=0.25, y=0.0): 3.000, - Point(x=0.25, y=0.25): 2.280, - Point(x=0.25, y=0.5): 1.670, - Point(x=0.25, y=0.75): 1.490, - Point(x=0.25, y=1.0): 0.770, - Point(x=0.5, y=0.0): 3.000, - Point(x=0.5, y=0.25): 2.740, - Point(x=0.5, y=0.5): 2.240, - Point(x=0.5, y=0.75): 1.730, - Point(x=0.5, y=1.0): 1.000, - Point(x=0.75, y=0.0): 3.000, - Point(x=0.75, y=0.25): 3.520, - Point(x=0.75, y=0.5): 2.830, - Point(x=0.75, y=0.75): 1.750, - Point(x=0.75, y=1.0): 1.250, - Point(x=1.0, y=0.0): 3.000, - Point(x=1.0, y=0.25): 4.440, - Point(x=1.0, y=0.5): 4.410, - Point(x=1.0, y=0.75): 4.440, - Point(x=1.0, y=1.0): 1.300, - } - af = axl.AshlockFingerprint(axl.WinStayLoseShift(), axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) - - for key, value in data.items(): - self.assertAlmostEqual(value, test_data[key], places=2) - - def test_tft_fingerprint(self): - axl.seed(0) # Fingerprinting is a random process. - test_data = { - Point(x=0.0, y=0.0): 3.000, - Point(x=0.0, y=0.25): 1.820, - Point(x=0.0, y=0.5): 1.130, - Point(x=0.0, y=0.75): 1.050, - Point(x=0.0, y=1.0): 0.980, - Point(x=0.25, y=0.0): 3.000, - Point(x=0.25, y=0.25): 2.440, - Point(x=0.25, y=0.5): 1.770, - Point(x=0.25, y=0.75): 1.700, - Point(x=0.25, y=1.0): 1.490, - Point(x=0.5, y=0.0): 3.000, - Point(x=0.5, y=0.25): 2.580, - Point(x=0.5, y=0.5): 2.220, - Point(x=0.5, y=0.75): 2.000, - Point(x=0.5, y=1.0): 1.940, - Point(x=0.75, y=0.0): 3.000, - Point(x=0.75, y=0.25): 2.730, - Point(x=0.75, y=0.5): 2.290, - Point(x=0.75, y=0.75): 2.310, - Point(x=0.75, y=1.0): 2.130, - Point(x=1.0, y=0.0): 3.000, - Point(x=1.0, y=0.25): 2.790, - Point(x=1.0, y=0.5): 2.480, - Point(x=1.0, y=0.75): 2.310, - Point(x=1.0, y=1.0): 2.180, - } - - af = axl.AshlockFingerprint(axl.TitForTat(), axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) - - for key, value in data.items(): - self.assertAlmostEqual(value, test_data[key], places=2) - - def test_majority_fingerprint(self): - axl.seed(0) # Fingerprinting is a random process. - test_data = { - Point(x=0.0, y=0.0): 3.000, - Point(x=0.0, y=0.25): 1.940, - Point(x=0.0, y=0.5): 1.130, - Point(x=0.0, y=0.75): 1.030, - Point(x=0.0, y=1.0): 0.980, - Point(x=0.25, y=0.0): 3.000, - Point(x=0.25, y=0.25): 2.130, - Point(x=0.25, y=0.5): 1.940, - Point(x=0.25, y=0.75): 2.060, - Point(x=0.25, y=1.0): 1.940, - Point(x=0.5, y=0.0): 3.000, - Point(x=0.5, y=0.25): 2.300, - Point(x=0.5, y=0.5): 2.250, - Point(x=0.5, y=0.75): 2.420, - Point(x=0.5, y=1.0): 2.690, - Point(x=0.75, y=0.0): 3.000, - Point(x=0.75, y=0.25): 2.400, - Point(x=0.75, y=0.5): 2.010, - Point(x=0.75, y=0.75): 2.390, - Point(x=0.75, y=1.0): 2.520, - Point(x=1.0, y=0.0): 3.000, - Point(x=1.0, y=0.25): 2.360, - Point(x=1.0, y=0.5): 1.740, - Point(x=1.0, y=0.75): 2.260, - Point(x=1.0, y=1.0): 2.260, - } - - af = axl.AshlockFingerprint(axl.GoByMajority, axl.TitForTat) - data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) - - for key, value in data.items(): - self.assertAlmostEqual(value, test_data[key], places=2) - - @given(strategy_pair=strategy_lists(min_size=2, max_size=2)) - @settings(max_examples=5) - def test_pair_fingerprints(self, strategy_pair): - """ - A test to check that we can fingerprint - with any two given strategies or instances - """ - strategy, probe = strategy_pair - af = AshlockFingerprint(strategy, probe) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) - self.assertIsInstance(data, dict) - - af = AshlockFingerprint(strategy(), probe) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) - self.assertIsInstance(data, dict) - - af = AshlockFingerprint(strategy, probe()) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) - self.assertIsInstance(data, dict) - - af = AshlockFingerprint(strategy(), probe()) - data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) - self.assertIsInstance(data, dict) - - -class TestTransitiveFingerprint(unittest.TestCase): - def test_init(self): - player = axl.TitForTat() - fingerprint = axl.TransitiveFingerprint(strategy=player) - self.assertEqual(fingerprint.strategy, player) - self.assertEqual( - fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 50)] - ) - - def test_init_with_opponents(self): - player = axl.TitForTat() - opponents = [s() for s in axl.demo_strategies] - fingerprint = axl.TransitiveFingerprint(strategy=player, opponents=opponents) - self.assertEqual(fingerprint.strategy, player) - self.assertEqual(fingerprint.opponents, opponents) - - def test_init_with_not_default_number(self): - player = axl.TitForTat() - number_of_opponents = 10 - fingerprint = axl.TransitiveFingerprint( - strategy=player, number_of_opponents=number_of_opponents - ) - self.assertEqual(fingerprint.strategy, player) - self.assertEqual( - fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 10)] - ) - - def test_fingerprint_with_filename(self): - path = pathlib.Path("../test_outputs/test_fingerprint.csv") - filename = axl_filename(path) - strategy = axl.TitForTat() - tf = TransitiveFingerprint(strategy) - tf.fingerprint(turns=1, repetitions=1, progress_bar=False, filename=filename) - with open(filename, "r") as out: - data = out.read() - self.assertEqual(len(data.split("\n")), 102) - - def test_serial_fingerprint(self): - strategy = axl.TitForTat() - tf = TransitiveFingerprint(strategy) - path = pathlib.Path("../test_outputs/test_fingerprint.csv") - tf.fingerprint( - repetitions=1, - progress_bar=False, - filename=axl_filename(path), - ) - self.assertEqual(tf.data.shape, (50, 50)) - - def test_parallel_fingerprint(self): - strategy = axl.TitForTat() - tf = TransitiveFingerprint(strategy) - tf.fingerprint(repetitions=1, progress_bar=False, processes=2) - - self.assertEqual(tf.data.shape, (50, 50)) - - def test_analyse_cooperation_ratio(self): - tf = TransitiveFingerprint(axl.TitForTat) - path = pathlib.Path("../test_outputs/test_fingerprint.csv") - filename = axl_filename(path) - with open(filename, "w") as f: - f.write( - """Interaction index,Player index,Opponent index,Repetition,IpdPlayer name,Opponent name,Actions -0,0,1,0,IpdPlayer0,IpdPlayer1,CCC -0,1,0,0,IpdPlayer1,IpdPlayer0,DDD -1,0,1,1,IpdPlayer0,IpdPlayer1,CCC -1,1,0,1,IpdPlayer1,IpdPlayer0,DDD -2,0,2,0,IpdPlayer0,IpdPlayer2,CCD -2,2,0,0,IpdPlayer2,IpdPlayer0,DDD -3,0,2,1,IpdPlayer0,IpdPlayer2,CCC -3,2,0,1,IpdPlayer2,IpdPlayer0,DDD -4,0,3,0,IpdPlayer0,IpdPlayer3,CCD -4,3,0,0,IpdPlayer3,IpdPlayer0,DDD -5,0,3,1,IpdPlayer0,IpdPlayer3,DCC -5,3,0,1,IpdPlayer3,IpdPlayer0,DDD -6,0,4,2,IpdPlayer0,IpdPlayer4,DDD -6,4,0,2,IpdPlayer4,IpdPlayer0,DDD -7,0,4,3,IpdPlayer0,IpdPlayer4,DDD -7,4,0,3,IpdPlayer4,IpdPlayer0,DDD""" - ) - data = tf.analyse_cooperation_ratio(filename) - expected_data = np.array( - [[1, 1, 1], [1, 1, 1 / 2], [1 / 2, 1, 1 / 2], [0, 0, 0]] - ) - self.assertTrue(np.array_equal(data, expected_data)) - - def test_plot(self): - """ - Test that plot is created with various arguments. - """ - tf = TransitiveFingerprint(axl.TitForTat) - tf.fingerprint(turns=10, repetitions=2, progress_bar=False) - p = tf.plot() - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(cmap="jet") - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(interpolation="bicubic") - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(title="Title") - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(colorbar=False) - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(labels=False) - self.assertIsInstance(p, matplotlib.pyplot.Figure) - p = tf.plot(display_names=True) - self.assertIsInstance(p, matplotlib.pyplot.Figure) - - def test_plot_with_axis(self): - fig, axarr = matplotlib.pyplot.subplots(2, 2) - tf = TransitiveFingerprint(axl.TitForTat) - tf.fingerprint(turns=10, repetitions=2, progress_bar=False) - p = tf.plot(ax=axarr[0, 0]) - self.assertIsInstance(p, matplotlib.pyplot.Figure) diff --git a/axelrod/ipd/tests/unit/test_game.py b/axelrod/ipd/tests/unit/test_game.py deleted file mode 100644 index 25b4eb4e3..000000000 --- a/axelrod/ipd/tests/unit/test_game.py +++ /dev/null @@ -1,80 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.tests.property import games - -from hypothesis import given, settings -from hypothesis.strategies import integers - -C, D = axl.Action.C, axl.Action.D - - -class TestGame(unittest.TestCase): - def test_default_scores(self): - expected_scores = { - (C, D): (0, 5), - (D, C): (5, 0), - (D, D): (1, 1), - (C, C): (3, 3), - } - self.assertEqual(axl.IpdGame().scores, expected_scores) - - def test_default_RPST(self): - expected_values = (3, 1, 0, 5) - self.assertEqual(axl.IpdGame().RPST(), expected_values) - - def test_default_score(self): - game = axl.IpdGame() - self.assertEqual(game.score((C, C)), (3, 3)) - self.assertEqual(game.score((D, D)), (1, 1)) - self.assertEqual(game.score((C, D)), (0, 5)) - self.assertEqual(game.score((D, C)), (5, 0)) - - def test_default_equality(self): - self.assertEqual(axl.IpdGame(), axl.IpdGame()) - - def test_not_default_equality(self): - self.assertEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 4)) - self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 5)) - self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame()) - - def test_wrong_class_equality(self): - self.assertNotEqual(axl.IpdGame(), "wrong class") - - @given(r=integers(), p=integers(), s=integers(), t=integers()) - @settings(max_examples=5) - def test_random_init(self, r, p, s, t): - """Test init with random scores using the hypothesis library.""" - expected_scores = { - (C, D): (s, t), - (D, C): (t, s), - (D, D): (p, p), - (C, C): (r, r), - } - game = axl.IpdGame(r, s, t, p) - self.assertEqual(game.scores, expected_scores) - - @given(r=integers(), p=integers(), s=integers(), t=integers()) - @settings(max_examples=5) - def test_random_RPST(self, r, p, s, t): - """Test RPST method with random scores using the hypothesis library.""" - game = axl.IpdGame(r, s, t, p) - self.assertEqual(game.RPST(), (r, p, s, t)) - - @given(r=integers(), p=integers(), s=integers(), t=integers()) - @settings(max_examples=5) - def test_random_score(self, r, p, s, t): - """Test score method with random scores using the hypothesis library.""" - game = axl.IpdGame(r, s, t, p) - self.assertEqual(game.score((C, C)), (r, r)) - self.assertEqual(game.score((D, D)), (p, p)) - self.assertEqual(game.score((C, D)), (s, t)) - self.assertEqual(game.score((D, C)), (t, s)) - - @given(game=games()) - @settings(max_examples=5) - def test_random_repr(self, game): - """Test repr with random scores using the hypothesis library.""" - expected_repr = "Axelrod game: (R,P,S,T) = {}".format(game.RPST()) - self.assertEqual(expected_repr, game.__repr__()) - self.assertEqual(expected_repr, str(game)) diff --git a/axelrod/ipd/tests/unit/test_graph.py b/axelrod/ipd/tests/unit/test_graph.py deleted file mode 100644 index 1e0666ee6..000000000 --- a/axelrod/ipd/tests/unit/test_graph.py +++ /dev/null @@ -1,305 +0,0 @@ -import unittest - -from collections import defaultdict - -import axelrod as axl - - -class TestGraph(unittest.TestCase): - def assert_out_mapping(self, g, expected_out_mapping): - self.assertDictEqual(g.out_mapping, expected_out_mapping) - for node, out_dict in expected_out_mapping.items(): - self.assertListEqual(g.out_vertices(node), list(out_dict.keys())) - self.assertDictEqual(g.out_dict(node), out_dict) - - def assert_in_mapping(self, g, expected_in_mapping): - self.assertDictEqual(g.in_mapping, expected_in_mapping) - for node, in_dict in expected_in_mapping.items(): - self.assertListEqual(g.in_vertices(node), list(in_dict.keys())) - self.assertDictEqual(g.in_dict(node), in_dict) - - def test_undirected_graph_with_no_vertices(self): - g = axl.graph.Graph() - self.assertFalse(g.directed) - self.assertIsInstance(g.out_mapping, defaultdict) - self.assertIsInstance(g.in_mapping, defaultdict) - self.assertEqual(g._edges, []) - self.assertEqual(str(g), "") - - def test_directed_graph_with_no_vertices(self): - g = axl.graph.Graph(directed=True) - self.assertTrue(g.directed) - self.assertIsInstance(g.out_mapping, defaultdict) - self.assertIsInstance(g.in_mapping, defaultdict) - self.assertEqual(g._edges, []) - self.assertEqual(str(g), "") - - def test_undirected_graph_with_vertices_and_unweighted_edges(self): - g = axl.graph.Graph(edges=[[1, 2], [2, 3]]) - self.assertFalse(g.directed) - self.assertEqual(str(g), "") - - self.assertEqual(g._edges, [(1, 2), (2, 1), (2, 3), (3, 2)]) - self.assert_out_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) - self.assert_in_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) - - def test_undirected_graph_with_vertices_and_weighted_edges(self): - g = axl.graph.Graph(edges=[[1, 2, 10], [2, 3, 5]]) - self.assertFalse(g.directed) - self.assertEqual(str(g), "") - - self.assertEqual(g._edges, [(1, 2), (2, 1), (2, 3), (3, 2)]) - self.assert_out_mapping(g, {1: {2: 10}, 2: {1: 10, 3: 5}, 3: {2: 5}}) - self.assert_in_mapping(g, {1: {2: 10}, 2: {1: 10, 3: 5}, 3: {2: 5}}) - - def test_directed_graph_vertices_and_weighted_edges(self): - g = axl.graph.Graph(edges=[[1, 2, 10], [2, 3, 5]], directed=True) - self.assertTrue(g.directed) - self.assertEqual(str(g), "") - - self.assertEqual(g._edges, [(1, 2), (2, 3)]) - self.assert_out_mapping(g, {1: {2: 10}, 2: {3: 5}}) - self.assert_in_mapping(g, {2: {1: 10}, 3: {2: 5}}) - - def test_add_loops(self): - edges = [(0, 1), (0, 2), (1, 2)] - g = axl.graph.Graph(edges) - g.add_loops() - self.assertEqual( - list(sorted(g._edges)), - list( - sorted( - [ - (0, 1), - (1, 0), - (0, 2), - (2, 0), - (1, 2), - (2, 1), - (0, 0), - (1, 1), - (2, 2), - ] - ) - ), - ) - - def test_add_loops_with_existing_loop_and_using_strings(self): - """In this case there is already a loop present; also uses - strings instead of integers as the hashable.""" - edges = [("a", "b"), ("b", "a"), ("c", "c")] - g = axl.graph.Graph(edges) - g.add_loops() - self.assertEqual( - list(sorted(g._edges)), - list(sorted([("a", "b"), ("b", "a"), ("c", "c"), ("a", "a"), ("b", "b")])), - ) - - -class TestCycle(unittest.TestCase): - def test_length_1_directed(self): - g = axl.graph.cycle(1, directed=True) - self.assertEqual(g.vertices, [0]) - self.assertEqual(g.edges, [(0, 0)]) - self.assertEqual(g.directed, True) - - def test_length_1_undirected(self): - g = axl.graph.cycle(1, directed=False) - self.assertEqual(g.vertices, [0]) - self.assertEqual(g.edges, [(0, 0)]) - self.assertEqual(g.directed, False) - - def test_length_2_directed(self): - g = axl.graph.cycle(2, directed=True) - self.assertEqual(g.vertices, [0, 1]) - self.assertEqual(g.edges, [(0, 1), (1, 0)]) - - def test_length_2_undirected(self): - g = axl.graph.cycle(2, directed=False) - self.assertEqual(g.vertices, [0, 1]) - self.assertEqual(g.edges, [(0, 1), (1, 0)]) - - def test_length_3_directed(self): - g = axl.graph.cycle(3, directed=True) - self.assertEqual(g.vertices, [0, 1, 2]) - self.assertEqual(g.edges, [(0, 1), (1, 2), (2, 0)]) - - def test_length_3_undirected(self): - g = axl.graph.cycle(3, directed=False) - edges = [(0, 1), (1, 0), (1, 2), (2, 1), (2, 0), (0, 2)] - self.assertEqual(g.vertices, [0, 1, 2]) - self.assertEqual(g.edges, edges) - - def test_length_4_directed(self): - g = axl.graph.cycle(4, directed=True) - self.assertEqual(g.vertices, [0, 1, 2, 3]) - self.assertEqual(g.edges, [(0, 1), (1, 2), (2, 3), (3, 0)]) - self.assertEqual(g.out_vertices(0), [1]) - self.assertEqual(g.out_vertices(1), [2]) - self.assertEqual(g.out_vertices(2), [3]) - self.assertEqual(g.out_vertices(3), [0]) - self.assertEqual(g.in_vertices(0), [3]) - self.assertEqual(g.in_vertices(1), [0]) - self.assertEqual(g.in_vertices(2), [1]) - self.assertEqual(g.in_vertices(3), [2]) - - def test_length_4_undirected(self): - g = axl.graph.cycle(4, directed=False) - edges = [(0, 1), (1, 0), (1, 2), (2, 1), (2, 3), (3, 2), (3, 0), (0, 3)] - self.assertEqual(g.vertices, [0, 1, 2, 3]) - self.assertEqual(g.edges, edges) - for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: - self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) - for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: - self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) - - -class TestComplete(unittest.TestCase): - def test_size_2(self): - g = axl.graph.complete_graph(2, loops=False) - self.assertEqual(g.vertices, [0, 1]) - self.assertEqual(g.edges, [(0, 1), (1, 0)]) - self.assertEqual(g.directed, False) - - def test_size_3(self): - g = axl.graph.complete_graph(3, loops=False) - self.assertEqual(g.vertices, [0, 1, 2]) - edges = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1)] - self.assertEqual(g.edges, edges) - self.assertEqual(g.directed, False) - - def test_size_4(self): - g = axl.graph.complete_graph(4, loops=False) - self.assertEqual(g.vertices, [0, 1, 2, 3]) - edges = [ - (0, 1), - (1, 0), - (0, 2), - (2, 0), - (0, 3), - (3, 0), - (1, 2), - (2, 1), - (1, 3), - (3, 1), - (2, 3), - (3, 2), - ] - self.assertEqual(g.edges, edges) - self.assertEqual(g.directed, False) - for vertex, neighbors in [ - (0, (1, 2, 3)), - (1, (0, 2, 3)), - (2, (0, 1, 3)), - (3, (0, 1, 2)), - ]: - self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) - for vertex, neighbors in [ - (0, (1, 2, 3)), - (1, (0, 2, 3)), - (2, (0, 1, 3)), - (3, (0, 1, 2)), - ]: - self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) - - def test_size_2_with_loops(self): - g = axl.graph.complete_graph(2, loops=True) - self.assertEqual(g.vertices, [0, 1]) - self.assertEqual(g.edges, [(0, 1), (1, 0), (0, 0), (1, 1)]) - self.assertEqual(g.directed, False) - - def test_size_3_with_loops(self): - g = axl.graph.complete_graph(3, loops=True) - self.assertEqual(g.vertices, [0, 1, 2]) - edges = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (0, 0), (1, 1), (2, 2)] - self.assertEqual(g.edges, edges) - self.assertEqual(g.directed, False) - - def test_size_4_with_loops(self): - g = axl.graph.complete_graph(4, loops=True) - self.assertEqual(g.vertices, [0, 1, 2, 3]) - edges = [ - (0, 1), - (1, 0), - (0, 2), - (2, 0), - (0, 3), - (3, 0), - (1, 2), - (2, 1), - (1, 3), - (3, 1), - (2, 3), - (3, 2), - (0, 0), - (1, 1), - (2, 2), - (3, 3), - ] - self.assertEqual(g.edges, edges) - self.assertEqual(g.directed, False) - neighbors = range(4) - for vertex in range(4): - self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) - self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) - - -class TestAttachedComplete(unittest.TestCase): - def test_size_2(self): - g = axl.graph.attached_complete_graphs(2, loops=False) - self.assertEqual(g.vertices, ['0:0', '0:1', '1:0', '1:1']) - self.assertEqual( - g.edges, - [('0:0', '0:1'), ('0:1', '0:0'), ('1:0', '1:1'), ('1:1', '1:0'), ('0:0', '1:0'), ('1:0', '0:0')] - ) - self.assertEqual(g.directed, False) - - def test_size_3(self): - g = axl.graph.attached_complete_graphs(3, loops=False) - self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) - self.assertEqual( - g.edges, - [('0:0', '0:1'), - ('0:1', '0:0'), - ('0:0', '0:2'), - ('0:2', '0:0'), - ('0:1', '0:2'), - ('0:2', '0:1'), - ('1:0', '1:1'), - ('1:1', '1:0'), - ('1:0', '1:2'), - ('1:2', '1:0'), - ('1:1', '1:2'), - ('1:2', '1:1'), - ('0:0', '1:0'), - ('1:0', '0:0')] - ) - self.assertEqual(g.directed, False) - - def test_size_3_with_loops(self): - g = axl.graph.attached_complete_graphs(3, loops=True) - self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) - self.assertEqual( - g.edges, - [('0:0', '0:1'), - ('0:1', '0:0'), - ('0:0', '0:2'), - ('0:2', '0:0'), - ('0:1', '0:2'), - ('0:2', '0:1'), - ('1:0', '1:1'), - ('1:1', '1:0'), - ('1:0', '1:2'), - ('1:2', '1:0'), - ('1:1', '1:2'), - ('1:2', '1:1'), - ('0:0', '1:0'), - ('1:0', '0:0'), - ('0:0', '0:0'), - ('0:1', '0:1'), - ('0:2', '0:2'), - ('1:0', '1:0'), - ('1:1', '1:1'), - ('1:2', '1:2')] - ) - self.assertEqual(g.directed, False) diff --git a/axelrod/ipd/tests/unit/test_history.py b/axelrod/ipd/tests/unit/test_history.py deleted file mode 100644 index 572568649..000000000 --- a/axelrod/ipd/tests/unit/test_history.py +++ /dev/null @@ -1,119 +0,0 @@ -import unittest - -from collections import Counter - -import axelrod as axl -from axelrod.ipd.history import History, LimitedHistory - -C, D = axl.Action.C, axl.Action.D - - -class TestHistory(unittest.TestCase): - def test_init(self): - h1 = History([C, C, D], [C, C, C]) - self.assertEqual(list(h1), [C, C, D]) - h1.extend([C, C], [D, D]) - self.assertEqual(list(h1), [C, C, D, C, C]) - - def test_str_list_repr(self): - h = History() - h.append(C, D) - h.append(D, C) - h.append(C, D) - self.assertEqual(str(h), "CDC") - self.assertEqual(list(h), [C, D, C]) - self.assertEqual(repr(h), "[C, D, C]") - h2 = h.flip_plays() - self.assertEqual(str(h2), "DCD") - - def test_reset(self): - h = History() - h.append(C, D) - self.assertEqual(len(h), 1) - self.assertEqual(h.cooperations, 1) - h.reset() - self.assertEqual(len(h), 0) - self.assertEqual(h.cooperations, 0) - - def test_compare(self): - h = History([C, D, C], [C, C, C]) - self.assertEqual(h, [C, D, C]) - h2 = History([C, D, C], [C, C, C]) - self.assertEqual(h, h2) - h2.reset() - self.assertNotEqual(h, h2) - - def test_copy(self): - h = History([C, D, C], [C, C, C]) - h2 = h.copy() - self.assertEqual(h, h2) - - def test_eq(self): - h = History([C, D, C], [C, C, C]) - with self.assertRaises(TypeError): - h == 2 - - def test_counts(self): - h1 = History([C, C], [C, C]) - self.assertEqual(h1.cooperations, 2) - self.assertEqual(h1.defections, 0) - h2 = History([D, D], [C, C]) - self.assertEqual(h2.cooperations, 0) - self.assertEqual(h2.defections, 2) - self.assertNotEqual(h1, h2) - h3 = History([C, C, D, D], [C, C, C, C]) - self.assertEqual(h3.cooperations, 2) - self.assertEqual(h3.defections, 2) - - def test_flip_plays(self): - player = axl.Alternator() - opponent = axl.Cooperator() - for _ in range(5): - player.play(opponent) - - self.assertEqual(player.history, [C, D, C, D, C]) - self.assertEqual(player.cooperations, 3) - self.assertEqual(player.defections, 2) - - new_distribution = Counter() - for key, val in player.state_distribution.items(): - new_key = (key[0].flip(), key[1]) - new_distribution[new_key] = val - - flipped_history = player.history.flip_plays() - self.assertEqual(flipped_history, [D, C, D, C, D]) - self.assertEqual(flipped_history.cooperations, 2) - self.assertEqual(flipped_history.defections, 3) - self.assertEqual(flipped_history.state_distribution, - new_distribution) - - # Flip operation is idempotent - flipped_flipped_history = flipped_history.flip_plays() - self.assertEqual(flipped_flipped_history, [C, D, C, D, C]) - self.assertEqual(flipped_flipped_history.cooperations, 3) - self.assertEqual(flipped_flipped_history.defections, 2) - - -class TestLimitedHistory(unittest.TestCase): - - def test_memory_depth(self): - h = LimitedHistory(memory_depth=3) - h.append(C, C) - self.assertEqual(len(h), 1) - h.append(D, D) - self.assertEqual(len(h), 2) - h.append(C, D) - self.assertEqual(len(h), 3) - self.assertEqual(h.cooperations, 2) - self.assertEqual(h.defections, 1) - self.assertEqual(h.state_distribution, - Counter({(C, C): 1, (D, D): 1, (C, D): 1})) - h.append(D, C) - self.assertEqual(len(h), 3) - self.assertEqual(h._plays, [D, C, D]) - self.assertEqual(h._coplays, [D, D, C]) - self.assertEqual(h.cooperations, 1) - self.assertEqual(h.defections, 2) - self.assertEqual( - h.state_distribution, - Counter({(D, D): 1, (C, D): 1, (D, C): 1, (C, C): 0})) diff --git a/axelrod/ipd/tests/unit/test_interaction_utils.py b/axelrod/ipd/tests/unit/test_interaction_utils.py deleted file mode 100644 index 6ddda717c..000000000 --- a/axelrod/ipd/tests/unit/test_interaction_utils.py +++ /dev/null @@ -1,146 +0,0 @@ -import unittest -import tempfile -from collections import Counter - -import axelrod as axl - -C, D = axl.Action.C, axl.Action.D - - -class TestMatch(unittest.TestCase): - interactions = [[(C, D), (D, C)], [(D, C), (D, C)], [(C, C), (C, D)], []] - scores = [[(0, 5), (5, 0)], [(5, 0), (5, 0)], [(3, 3), (0, 5)], []] - final_scores = [(5, 5), (10, 0), (3, 8), None] - final_score_per_turn = [(2.5, 2.5), (5, 0), (1.5, 4), None] - winners = [False, 0, 1, None] - cooperations = [(1, 1), (0, 2), (2, 1), None] - normalised_cooperations = [(0.5, 0.5), (0, 1), (1, 0.5), None] - state_distribution = [ - Counter({(C, D): 1, (D, C): 1}), - Counter({(D, C): 2}), - Counter({(C, C): 1, (C, D): 1}), - None, - ] - state_to_action_distribution = [ - [Counter({((C, D), D): 1}), Counter({((C, D), C): 1})], - [Counter({((D, C), D): 1}), Counter({((D, C), C): 1})], - [Counter({((C, C), C): 1}), Counter({((C, C), D): 1})], - None, - ] - - normalised_state_distribution = [ - Counter({(C, D): 0.5, (D, C): 0.5}), - Counter({(D, C): 1.0}), - Counter({(C, C): 0.5, (C, D): 0.5}), - None, - ] - normalised_state_to_action_distribution = [ - [Counter({((C, D), D): 1}), Counter({((C, D), C): 1})], - [Counter({((D, C), D): 1}), Counter({((D, C), C): 1})], - [Counter({((C, C), C): 1}), Counter({((C, C), D): 1})], - None, - ] - - sparklines = ["█ \n █", " \n██", "██\n█ ", None] - - def test_compute_scores(self): - for inter, score in zip(self.interactions, self.scores): - self.assertEqual(score, axl.interaction_utils.compute_scores(inter)) - - def test_compute_final_score(self): - for inter, final_score in zip(self.interactions, self.final_scores): - self.assertEqual(final_score, axl.interaction_utils.compute_final_score(inter)) - - def test_compute_final_score_per_turn(self): - for inter, final_score_per_round in zip( - self.interactions, self.final_score_per_turn - ): - self.assertEqual( - final_score_per_round, axl.interaction_utils.compute_final_score_per_turn(inter) - ) - - def test_compute_winner_index(self): - for inter, winner in zip(self.interactions, self.winners): - self.assertEqual(winner, axl.interaction_utils.compute_winner_index(inter)) - - def test_compute_cooperations(self): - for inter, coop in zip(self.interactions, self.cooperations): - self.assertEqual(coop, axl.interaction_utils.compute_cooperations(inter)) - - def test_compute_normalised_cooperations(self): - for inter, coop in zip(self.interactions, self.normalised_cooperations): - self.assertEqual(coop, axl.interaction_utils.compute_normalised_cooperation(inter)) - - def test_compute_state_distribution(self): - for inter, dist in zip(self.interactions, self.state_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_state_distribution(inter)) - - def test_compute_normalised_state_distribution(self): - for inter, dist in zip(self.interactions, self.normalised_state_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_normalised_state_distribution(inter)) - - def test_compute_state_to_action_distribution(self): - for inter, dist in zip(self.interactions, self.state_to_action_distribution): - self.assertEqual(dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) - inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - expected_dist = [ - Counter( - { - ((C, C), C): 1, - ((D, C), C): 1, - ((C, D), D): 2, - ((D, C), D): 1, - ((D, D), C): 1, - } - ), - Counter({((C, C), D): 1, ((C, D), C): 2, ((D, C), D): 2, ((D, D), C): 1}), - ] - - self.assertEqual(expected_dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) - - def test_compute_normalised_state_to_action_distribution(self): - for inter, dist in zip( - self.interactions, self.normalised_state_to_action_distribution - ): - self.assertEqual( - dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) - ) - inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] - expected_dist = [ - Counter( - { - ((C, C), C): 1, - ((D, C), C): 1 / 2, - ((C, D), D): 1, - ((D, C), D): 1 / 2, - ((D, D), C): 1, - } - ), - Counter({((C, C), D): 1, ((C, D), C): 1, ((D, C), D): 1, ((D, D), C): 1}), - ] - self.assertEqual( - expected_dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) - ) - - def test_compute_sparklines(self): - for inter, spark in zip(self.interactions, self.sparklines): - self.assertEqual(spark, axl.interaction_utils.compute_sparklines(inter)) - - def test_read_interactions_from_file(self): - tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) - players = [axl.Cooperator(), axl.Defector()] - tournament = axl.IpdTournament(players=players, turns=2, repetitions=3) - tournament.play(filename=tmp_file.name) - tmp_file.close() - expected_interactions = { - (0, 0): [[(C, C), (C, C)] for _ in range(3)], - (0, 1): [[(C, D), (C, D)] for _ in range(3)], - (1, 1): [[(D, D), (D, D)] for _ in range(3)], - } - interactions = axl.interaction_utils.read_interactions_from_file(tmp_file.name, progress_bar=False) - self.assertEqual(expected_interactions, interactions) - - def test_string_to_interactions(self): - string = "CDCDDD" - interactions = [(C, D), (C, D), (D, D)] - self.assertEqual(axl.interaction_utils.string_to_interactions(string), interactions) diff --git a/axelrod/ipd/tests/unit/test_load_data.py b/axelrod/ipd/tests/unit/test_load_data.py deleted file mode 100644 index a38a7c1b0..000000000 --- a/axelrod/ipd/tests/unit/test_load_data.py +++ /dev/null @@ -1,17 +0,0 @@ -import os -import pathlib -import unittest - -from axelrod.ipd.load_data_ import axl_filename - - -class TestLoadData(unittest.TestCase): - def test_axl_filename(self): - path = pathlib.Path("ipd/strategies/titfortat.py") - actual_fn = axl_filename(path) - - # First go from "unit" up to "tests", then up to "axelrod" - dirname = os.path.dirname(__file__) - expected_fn = os.path.join(dirname, "../../strategies/titfortat.py") - - self.assertTrue(os.path.samefile(actual_fn, expected_fn)) diff --git a/axelrod/ipd/tests/unit/test_match.py b/axelrod/ipd/tests/unit/test_match.py deleted file mode 100644 index 66a204b15..000000000 --- a/axelrod/ipd/tests/unit/test_match.py +++ /dev/null @@ -1,377 +0,0 @@ -import unittest - -from collections import Counter - -import axelrod as axl -from axelrod.ipd.deterministic_cache import DeterministicCache -from axelrod.ipd.tests.property import games - -from hypothesis import example, given -from hypothesis.strategies import assume, floats, integers - -C, D = axl.Action.C, axl.Action.D - - -class TestMatch(unittest.TestCase): - @given(turns=integers(min_value=1, max_value=200), game=games()) - @example(turns=5, game=axl.DefaultGame) - def test_init(self, turns, game): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), turns, game=game) - self.assertEqual(match.result, []) - self.assertEqual(match.players, [p1, p2]) - self.assertEqual(match.turns, turns) - self.assertEqual(match.prob_end, 0) - self.assertEqual(match.noise, 0) - self.assertEqual(match.game.RPST(), game.RPST()) - - self.assertEqual(match.players[0].match_attributes["length"], turns) - self.assertEqual(match._cache, {}) - - @given(prob_end=floats(min_value=0, max_value=1), game=games()) - def test_init_with_prob_end(self, prob_end, game): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), prob_end=prob_end, game=game) - self.assertEqual(match.result, []) - self.assertEqual(match.players, [p1, p2]) - self.assertEqual(match.turns, float("inf")) - self.assertEqual(match.prob_end, prob_end) - self.assertEqual(match.noise, 0) - self.assertEqual(match.game.RPST(), game.RPST()) - - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) - self.assertEqual(match._cache, {}) - - @given( - prob_end=floats(min_value=0, max_value=1), - turns=integers(min_value=1, max_value=200), - game=games(), - ) - def test_init_with_prob_end_and_turns(self, turns, prob_end, game): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), turns=turns, prob_end=prob_end, game=game) - self.assertEqual(match.result, []) - self.assertEqual(match.players, [p1, p2]) - self.assertEqual(match.turns, turns) - self.assertEqual(match.prob_end, prob_end) - self.assertEqual(match.noise, 0) - self.assertEqual(match.game.RPST(), game.RPST()) - - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) - self.assertEqual(match._cache, {}) - - def test_default_init(self): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2)) - self.assertEqual(match.result, []) - self.assertEqual(match.players, [p1, p2]) - self.assertEqual(match.turns, axl.DEFAULT_TURNS) - self.assertEqual(match.prob_end, 0) - self.assertEqual(match.noise, 0) - self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) - - self.assertEqual( - match.players[0].match_attributes["length"], axl.DEFAULT_TURNS - ) - self.assertEqual(match._cache, {}) - - def test_example_prob_end(self): - """ - Test that matches have diff length and also that cache has recorded the - outcomes - """ - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), prob_end=0.5) - expected_lengths = [3, 1, 5] - for seed, expected_length in zip(range(3), expected_lengths): - axl.seed(seed) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) - self.assertEqual(len(match.play()), expected_length) - self.assertEqual(match.noise, 0) - self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) - self.assertEqual(len(match._cache), 1) - self.assertEqual(match._cache[(p1, p2)], [(C, C)] * 5) - - @given(turns=integers(min_value=1, max_value=200), game=games()) - @example(turns=5, game=axl.DefaultGame) - def test_non_default_attributes(self, turns, game): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match_attributes = {"length": 500, "game": game, "noise": 0.5} - match = axl.IpdMatch( - (p1, p2), turns, game=game, match_attributes=match_attributes - ) - self.assertEqual(match.players[0].match_attributes["length"], 500) - self.assertEqual(match.players[0].match_attributes["noise"], 0.5) - - @given(turns=integers(min_value=1, max_value=200)) - @example(turns=5) - def test_len(self, turns): - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), turns) - self.assertEqual(len(match), turns) - - def test_len_error(self): - """ - Length is not defined if it is infinite. - """ - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), prob_end=0.5) - with self.assertRaises(TypeError): - len(match) - - @given(p=floats(min_value=0, max_value=1)) - def test_stochastic(self, p): - - assume(0 < p < 1) - - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), 5) - self.assertFalse(match._stochastic) - - match = axl.IpdMatch((p1, p2), 5, noise=p) - self.assertTrue(match._stochastic) - - p1 = axl.Random() - match = axl.IpdMatch((p1, p2), 5) - self.assertTrue(match._stochastic) - - @given(p=floats(min_value=0, max_value=1)) - def test_cache_update_required(self, p): - - assume(0 < p < 1) - - p1, p2 = axl.Cooperator(), axl.Cooperator() - match = axl.IpdMatch((p1, p2), 5, noise=p) - self.assertFalse(match._cache_update_required) - - cache = DeterministicCache() - cache.mutable = False - match = axl.IpdMatch((p1, p2), 5, deterministic_cache=cache) - self.assertFalse(match._cache_update_required) - - match = axl.IpdMatch((p1, p2), 5) - self.assertTrue(match._cache_update_required) - - p1 = axl.Random() - match = axl.IpdMatch((p1, p2), 5) - self.assertFalse(match._cache_update_required) - - def test_play(self): - cache = DeterministicCache() - players = (axl.Cooperator(), axl.Defector()) - match = axl.IpdMatch(players, 3, deterministic_cache=cache) - expected_result = [(C, D), (C, D), (C, D)] - self.assertEqual(match.play(), expected_result) - self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], expected_result - ) - - # a deliberately incorrect result so we can tell it came from the cache - expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] - cache[(axl.Cooperator(), axl.Defector())] = expected_result - match = axl.IpdMatch(players, 3, deterministic_cache=cache) - self.assertEqual(match.play(), expected_result[:3]) - - def test_cache_grows(self): - """ - We want to make sure that if we try to use the cache for more turns than - what is stored, then it will instead regenerate the result and overwrite - the cache. - """ - cache = DeterministicCache() - players = (axl.Cooperator(), axl.Defector()) - match = axl.IpdMatch(players, 3, deterministic_cache=cache) - expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] - expected_result_3_turn = [(C, D), (C, D), (C, D)] - self.assertEqual(match.play(), expected_result_3_turn) - match.turns = 5 - self.assertEqual(match.play(), expected_result_5_turn) - # The cache should now hold the 5-turn result.. - self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn - ) - - def test_cache_doesnt_shrink(self): - """ - We want to make sure that when we access the cache looking for fewer - turns than what is stored, then it will not overwrite the cache with the - shorter result. - """ - cache = DeterministicCache() - players = (axl.Cooperator(), axl.Defector()) - match = axl.IpdMatch(players, 5, deterministic_cache=cache) - expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] - expected_result_3_turn = [(C, D), (C, D), (C, D)] - self.assertEqual(match.play(), expected_result_5_turn) - match.turns = 3 - self.assertEqual(match.play(), expected_result_3_turn) - # The cache should still hold the 5. - self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn - ) - - def test_scores(self): - player1 = axl.TitForTat() - player2 = axl.Defector() - match = axl.IpdMatch((player1, player2), 3) - self.assertEqual(match.scores(), []) - match.play() - self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) - - def test_final_score(self): - player1 = axl.TitForTat() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), 3) - self.assertEqual(match.final_score(), None) - match.play() - self.assertEqual(match.final_score(), (2, 7)) - - match = axl.IpdMatch((player2, player1), 3) - self.assertEqual(match.final_score(), None) - match.play() - self.assertEqual(match.final_score(), (7, 2)) - - def test_final_score_per_turn(self): - turns = 3 - player1 = axl.TitForTat() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.final_score_per_turn(), None) - match.play() - self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) - - match = axl.IpdMatch((player2, player1), turns) - self.assertEqual(match.final_score_per_turn(), None) - match.play() - self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) - - def test_winner(self): - player1 = axl.TitForTat() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), 3) - self.assertEqual(match.winner(), None) - match.play() - self.assertEqual(match.winner(), player2) - - match = axl.IpdMatch((player2, player1), 3) - self.assertEqual(match.winner(), None) - match.play() - self.assertEqual(match.winner(), player2) - - player1 = axl.Defector() - match = axl.IpdMatch((player1, player2), 3) - self.assertEqual(match.winner(), None) - match.play() - self.assertEqual(match.winner(), False) - - def test_cooperation(self): - turns = 3 - player1 = axl.Cooperator() - player2 = axl.Alternator() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.cooperation(), None) - match.play() - self.assertEqual(match.cooperation(), (3, 2)) - - player1 = axl.Alternator() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.cooperation(), None) - match.play() - self.assertEqual(match.cooperation(), (2, 0)) - - def test_normalised_cooperation(self): - turns = 3 - player1 = axl.Cooperator() - player2 = axl.Alternator() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.normalised_cooperation(), None) - match.play() - self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) - - player1 = axl.Alternator() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.normalised_cooperation(), None) - match.play() - self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) - - def test_state_distribution(self): - turns = 3 - player1 = axl.Cooperator() - player2 = axl.Alternator() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.state_distribution(), None) - - match.play() - expected = Counter({(C, C): 2, (C, D): 1}) - self.assertEqual(match.state_distribution(), expected) - - player1 = axl.Alternator() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.state_distribution(), None) - - match.play() - expected = Counter({(C, D): 2, (D, D): 1}) - self.assertEqual(match.state_distribution(), expected) - - def test_normalised_state_distribution(self): - turns = 3 - player1 = axl.Cooperator() - player2 = axl.Alternator() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.normalised_state_distribution(), None) - - match.play() - expected = Counter({(C, C): 2 / turns, (C, D): 1 / turns}) - self.assertEqual(match.normalised_state_distribution(), expected) - - player1 = axl.Alternator() - player2 = axl.Defector() - - match = axl.IpdMatch((player1, player2), turns) - self.assertEqual(match.normalised_state_distribution(), None) - - match.play() - expected = Counter({(C, D): 2 / turns, (D, D): 1 / turns}) - self.assertEqual(match.normalised_state_distribution(), expected) - - def test_sparklines(self): - players = (axl.Cooperator(), axl.Alternator()) - match = axl.IpdMatch(players, 4) - match.play() - expected_sparklines = "████\n█ █ " - self.assertEqual(match.sparklines(), expected_sparklines) - expected_sparklines = "XXXX\nXYXY" - self.assertEqual(match.sparklines("X", "Y"), expected_sparklines) - - -class TestSampleLength(unittest.TestCase): - def test_sample_length(self): - for seed, prob_end, expected_length in [ - (0, 0.5, 3), - (1, 0.5, 1), - (2, 0.6, 4), - (3, 0.4, 1), - ]: - axl.seed(seed) - self.assertEqual(axl.ipd.match.sample_length(prob_end), expected_length) - - def test_sample_with_0_prob(self): - self.assertEqual(axl.ipd.match.sample_length(0), float("inf")) - - def test_sample_with_1_prob(self): - self.assertEqual(axl.ipd.match.sample_length(1), 1) diff --git a/axelrod/ipd/tests/unit/test_match_generator.py b/axelrod/ipd/tests/unit/test_match_generator.py deleted file mode 100644 index 682bd4b85..000000000 --- a/axelrod/ipd/tests/unit/test_match_generator.py +++ /dev/null @@ -1,237 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.match_generator import graph_is_connected - -from hypothesis import example, given, settings -from hypothesis.strategies import floats, integers - -test_strategies = [ - axl.Cooperator, - axl.TitForTat, - axl.Defector, - axl.Grudger, - axl.GoByMajority, -] -test_turns = 100 -test_repetitions = 20 -test_game = axl.IpdGame() - - -class TestMatchGenerator(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.players = [s() for s in test_strategies] - - def test_build_single_match_params(self): - rr = axl.MatchGenerator( - players=self.players, - turns=test_turns, - game=test_game, - repetitions=test_repetitions, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertEqual(match_params["turns"], test_turns) - self.assertEqual(match_params["game"], test_game) - self.assertEqual(match_params["noise"], 0) - self.assertIsNone(match_params["prob_end"]) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - self.assertEqual(len(match), test_turns) - - def test_build_single_match_params_with_noise(self): - rr = axl.MatchGenerator( - players=self.players, - turns=test_turns, - game=test_game, - repetitions=test_repetitions, - noise=0.5, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertEqual(match_params["turns"], test_turns) - self.assertEqual(match_params["game"], test_game) - self.assertEqual(match_params["noise"], 0.5) - self.assertIsNone(match_params["prob_end"]) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - self.assertEqual(len(match), test_turns) - - def test_build_single_match_params_with_prob_end(self): - rr = axl.MatchGenerator( - players=self.players, - game=test_game, - repetitions=test_repetitions, - prob_end=0.5, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertIsNone(match_params["turns"]) - self.assertEqual(match_params["game"], test_game) - self.assertEqual(match_params["noise"], 0) - self.assertEqual(match_params["prob_end"], 0.5) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - with self.assertRaises(TypeError): - len(match) - - def test_build_single_match_params_with_prob_end_and_noise(self): - rr = axl.MatchGenerator( - players=self.players, - game=test_game, - repetitions=test_repetitions, - noise=0.5, - prob_end=0.5, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertIsNone(match_params["turns"]) - self.assertEqual(match_params["game"], rr.game) - self.assertEqual(match_params["prob_end"], 0.5) - self.assertEqual(match_params["noise"], 0.5) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - with self.assertRaises(TypeError): - len(match) - - def test_build_single_match_params_with_prob_end_and_turns(self): - rr = axl.MatchGenerator( - players=self.players, - game=test_game, - repetitions=test_repetitions, - turns=5, - prob_end=0.5, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertEqual(match_params["turns"], 5) - self.assertEqual(match_params["game"], test_game) - self.assertEqual(match_params["prob_end"], 0.5) - self.assertEqual(match_params["noise"], 0) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - self.assertIsInstance(len(match), int) - self.assertGreater(len(match), 0) - self.assertLessEqual(len(match), 10) - - def test_build_single_match_params_with_fixed_length_unknown(self): - rr = axl.MatchGenerator( - players=self.players, - game=test_game, - repetitions=test_repetitions, - turns=5, - match_attributes={"length": float("inf")}, - ) - match_params = rr.build_single_match_params() - self.assertIsInstance(match_params, dict) - self.assertEqual(match_params["turns"], 5) - self.assertEqual(match_params["game"], test_game) - self.assertEqual(match_params["prob_end"], None) - self.assertEqual(match_params["noise"], 0) - self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) - - # Check that can build a match - players = [axl.Cooperator(), axl.Defector()] - match_params["players"] = players - match = axl.IpdMatch(**match_params) - self.assertIsInstance(match, axl.IpdMatch) - self.assertEqual(len(match), 5) - self.assertEqual(match.match_attributes, {"length": float("inf")}) - - @given(repetitions=integers(min_value=1, max_value=test_repetitions)) - @settings(max_examples=5) - @example(repetitions=test_repetitions) - def test_build_match_chunks(self, repetitions): - rr = axl.MatchGenerator( - players=self.players, - turns=test_turns, - game=test_game, - repetitions=repetitions, - ) - chunks = list(rr.build_match_chunks()) - match_definitions = [ - tuple(list(index_pair) + [repetitions]) - for (index_pair, match_params, repetitions) in chunks - ] - expected_match_definitions = [ - (i, j, repetitions) for i in range(5) for j in range(i, 5) - ] - - self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) - - @given(repetitions=integers(min_value=1, max_value=test_repetitions)) - @settings(max_examples=5) - @example(repetitions=test_repetitions) - def test_spatial_build_match_chunks(self, repetitions): - cycle = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 1)] - rr = axl.MatchGenerator( - players=self.players, - turns=test_turns, - game=test_game, - edges=cycle, - repetitions=repetitions, - ) - chunks = list(rr.build_match_chunks()) - match_definitions = [ - tuple(list(index_pair) + [repetitions]) - for (index_pair, match_params, repetitions) in chunks - ] - expected_match_definitions = [(i, j, repetitions) for i, j in cycle] - - self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) - - def test_len(self): - turns = 5 - repetitions = 10 - rr = axl.MatchGenerator( - players=self.players, - turns=test_turns, - game=test_game, - repetitions=test_repetitions, - ) - self.assertEqual(len(rr), len(list(rr.build_match_chunks()))) - - def test_init_with_graph_edges_not_including_all_players(self): - edges = [(0, 1), (1, 2)] - with self.assertRaises(ValueError): - axl.MatchGenerator( - players=self.players, - repetitions=3, - game=test_game, - turns=5, - edges=edges, - noise=0, - ) - - -class TestUtilityFunctions(unittest.TestCase): - def test_connected_graph(self): - edges = [(0, 0), (0, 1), (1, 1)] - players = ["Cooperator", "Defector"] - self.assertTrue(graph_is_connected(edges, players)) - - def test_unconnected_graph(self): - edges = [(0, 0), (0, 1), (1, 1)] - players = ["Cooperator", "Defector", "Alternator"] - self.assertFalse(graph_is_connected(edges, players)) diff --git a/axelrod/ipd/tests/unit/test_mock_player.py b/axelrod/ipd/tests/unit/test_mock_player.py deleted file mode 100644 index a089d5d6c..000000000 --- a/axelrod/ipd/tests/unit/test_mock_player.py +++ /dev/null @@ -1,20 +0,0 @@ -import unittest - -import axelrod as axl - -C, D = axl.Action.C, axl.Action.D - - -class TestMockPlayer(unittest.TestCase): - def test_strategy(self): - for action in [C, D]: - m = axl.MockPlayer(actions=[action]) - p2 = axl.IpdPlayer() - self.assertEqual(action, m.strategy(p2)) - - actions = [C, C, D, D, C, C] - m = axl.MockPlayer(actions=actions) - p2 = axl.IpdPlayer() - for action in actions: - self.assertEqual(action, m.strategy(p2)) - diff --git a/axelrod/ipd/tests/unit/test_moran.py b/axelrod/ipd/tests/unit/test_moran.py deleted file mode 100644 index 8e3778158..000000000 --- a/axelrod/ipd/tests/unit/test_moran.py +++ /dev/null @@ -1,561 +0,0 @@ -import unittest -import itertools -import random -from collections import Counter -import matplotlib.pyplot as plt - -import axelrod as axl -from axelrod.ipd.moran import fitness_proportionate_selection -from axelrod.ipd.tests.property import strategy_lists - -from hypothesis import example, given, settings - -C, D = axl.Action.C, axl.Action.D - - -class TestMoranProcess(unittest.TestCase): - def test_init(self): - players = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess(players) - self.assertEqual(mp.turns, axl.DEFAULT_TURNS) - self.assertIsNone(mp.prob_end) - self.assertIsNone(mp.game) - self.assertEqual(mp.noise, 0) - self.assertEqual(mp.initial_players, players) - self.assertEqual(mp.players, list(players)) - self.assertEqual(mp.populations, [Counter({"Cooperator": 1, "Defector": 1})]) - self.assertIsNone(mp.winning_strategy_name) - self.assertEqual(mp.mutation_rate, 0) - self.assertEqual(mp.mode, "bd") - self.assertEqual(mp.deterministic_cache, axl.DeterministicCache()) - self.assertEqual( - mp.mutation_targets, {"Cooperator": [players[1]], "Defector": [players[0]]} - ) - self.assertEqual(mp.interaction_graph._edges, [(0, 1), (1, 0)]) - self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (1, 0), (0, 0), (1, 1)]) - self.assertEqual(mp.fitness_transformation, None) - self.assertEqual(mp.locations, [0, 1]) - self.assertEqual(mp.index, {0: 0, 1: 1}) - - # Test non default graph cases - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - edges = [(0, 1), (2, 0), (1, 2)] - graph = axl.graph.Graph(edges, directed=True) - mp = axl.MoranProcess(players, interaction_graph=graph) - self.assertEqual(mp.interaction_graph._edges, [(0, 1), (2, 0), (1, 2)]) - self.assertEqual( - sorted(mp.reproduction_graph._edges), - sorted([(0, 1), (2, 0), (1, 2), (0, 0), (1, 1), (2, 2)]), - ) - - mp = axl.MoranProcess(players, interaction_graph=graph, reproduction_graph=graph) - self.assertEqual(mp.interaction_graph._edges, [(0, 1), (2, 0), (1, 2)]) - self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (2, 0), (1, 2)]) - - def test_set_players(self): - """Test that set players resets all players""" - players = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess(players) - players[0].history.append(C, D) - mp.set_players() - self.assertEqual(players[0].cooperations, 0) - - def test_mutate(self): - """Test that a mutated player is returned""" - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - mp = axl.MoranProcess(players, mutation_rate=0.5) - axl.seed(0) - self.assertEqual(mp.mutate(0), players[0]) - axl.seed(1) - self.assertEqual(mp.mutate(0), players[2]) - axl.seed(4) - self.assertEqual(mp.mutate(0), players[1]) - - def test_death_in_db(self): - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - mp = axl.MoranProcess(players, mutation_rate=0.5, mode="db") - axl.seed(1) - self.assertEqual(mp.death(), 0) - self.assertEqual(mp.dead, 0) - axl.seed(5) - self.assertEqual(mp.death(), 1) - self.assertEqual(mp.dead, 1) - axl.seed(2) - self.assertEqual(mp.death(), 2) - self.assertEqual(mp.dead, 2) - - def test_death_in_bd(self): - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - edges = [(0, 1), (2, 0), (1, 2)] - graph = axl.graph.Graph(edges, directed=True) - mp = axl.MoranProcess(players, mode="bd", interaction_graph=graph) - axl.seed(1) - self.assertEqual(mp.death(0), 0) - axl.seed(5) - self.assertEqual(mp.death(0), 1) - axl.seed(2) - self.assertEqual(mp.death(0), 0) - - def test_birth_in_db(self): - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - mp = axl.MoranProcess(players, mode="db") - axl.seed(1) - self.assertEqual(mp.death(), 0) - self.assertEqual(mp.birth(0), 2) - - def test_birth_in_bd(self): - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - mp = axl.MoranProcess(players, mode="bd") - axl.seed(1) - self.assertEqual(mp.birth(), 0) - - def test_fixation_check(self): - players = axl.Cooperator(), axl.Cooperator() - mp = axl.MoranProcess(players) - self.assertTrue(mp.fixation_check()) - players = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess(players) - self.assertFalse(mp.fixation_check()) - - def test_next(self): - players = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess(players) - self.assertIsInstance(next(mp), axl.MoranProcess) - - def test_matchup_indices(self): - players = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess(players) - self.assertEqual(mp._matchup_indices(), {(0, 1)}) - - players = axl.Cooperator(), axl.Defector(), axl.TitForTat() - edges = [(0, 1), (2, 0), (1, 2)] - graph = axl.graph.Graph(edges, directed=True) - mp = axl.MoranProcess(players, mode="bd", interaction_graph=graph) - self.assertEqual(mp._matchup_indices(), {(0, 1), (1, 2), (2, 0)}) - - def test_fps(self): - self.assertEqual(fitness_proportionate_selection([0, 0, 1]), 2) - axl.seed(1) - self.assertEqual(fitness_proportionate_selection([1, 1, 1]), 0) - self.assertEqual(fitness_proportionate_selection([1, 1, 1]), 2) - - def test_exit_condition(self): - p1, p2 = axl.Cooperator(), axl.Cooperator() - mp = axl.MoranProcess((p1, p2)) - mp.play() - self.assertEqual(len(mp), 1) - - def test_two_players(self): - p1, p2 = axl.Cooperator(), axl.Defector() - axl.seed(17) - mp = axl.MoranProcess((p1, p2)) - populations = mp.play() - self.assertEqual(len(mp), 5) - self.assertEqual(len(populations), 5) - self.assertEqual(populations, mp.populations) - self.assertEqual(mp.winning_strategy_name, str(p2)) - - def test_two_prob_end(self): - p1, p2 = axl.Random(), axl.TitForTat() - axl.seed(0) - mp = axl.MoranProcess((p1, p2), prob_end=0.5) - populations = mp.play() - self.assertEqual(len(mp), 4) - self.assertEqual(len(populations), 4) - self.assertEqual(populations, mp.populations) - self.assertEqual(mp.winning_strategy_name, str(p1)) - - def test_different_game(self): - # Possible for Cooperator to become fixed when using a different game - p1, p2 = axl.Cooperator(), axl.Defector() - axl.seed(0) - game = axl.IpdGame(r=4, p=2, s=1, t=6) - mp = axl.MoranProcess((p1, p2), turns=5, game=game) - populations = mp.play() - self.assertEqual(mp.winning_strategy_name, str(p1)) - - def test_death_birth(self): - """Two player death-birth should fixate after one round.""" - p1, p2 = axl.Cooperator(), axl.Defector() - seeds = range(0, 20) - for seed in seeds: - axl.seed(seed) - mp = axl.MoranProcess((p1, p2), mode="db") - mp.play() - self.assertIsNotNone(mp.winning_strategy_name) - # Number of populations is 2: the original and the one after the first round. - self.assertEqual(len(mp.populations), 2) - - def test_death_birth_outcomes(self): - """Show that birth-death and death-birth can produce different - outcomes.""" - seeds = [(1, True), (23, False)] - players = [] - N = 6 - for _ in range(N // 2): - players.append(axl.Cooperator()) - players.append(axl.Defector()) - for seed, outcome in seeds: - axl.seed(seed) - mp = axl.MoranProcess(players, mode="bd") - mp.play() - winner = mp.winning_strategy_name - axl.seed(seed) - mp = axl.MoranProcess(players, mode="db") - mp.play() - winner2 = mp.winning_strategy_name - self.assertEqual((winner == winner2), outcome) - - def test_two_random_players(self): - p1, p2 = axl.Random(p=0.5), axl.Random(p=0.25) - axl.seed(0) - mp = axl.MoranProcess((p1, p2)) - populations = mp.play() - self.assertEqual(len(mp), 2) - self.assertEqual(len(populations), 2) - self.assertEqual(populations, mp.populations) - self.assertEqual(mp.winning_strategy_name, str(p2)) - - def test_two_players_with_mutation(self): - p1, p2 = axl.Cooperator(), axl.Defector() - axl.seed(5) - mp = axl.MoranProcess((p1, p2), mutation_rate=0.2, stop_on_fixation=False) - self.assertDictEqual(mp.mutation_targets, {str(p1): [p2], str(p2): [p1]}) - # Test that mutation causes the population to alternate between - # fixations - counters = [ - Counter({"Cooperator": 2}), - Counter({"Defector": 2}), - Counter({"Cooperator": 2}), - Counter({"Defector": 2}), - ] - for counter in counters: - for _ in itertools.takewhile( - lambda x: x.population_distribution() != counter, mp - ): - pass - self.assertEqual(mp.population_distribution(), counter) - - def test_play_exception(self): - p1, p2 = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess((p1, p2), mutation_rate=0.2) - with self.assertRaises(ValueError): - mp.play() - - def test_three_players(self): - players = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] - axl.seed(11) - mp = axl.MoranProcess(players) - populations = mp.play() - self.assertEqual(len(mp), 7) - self.assertEqual(len(populations), 7) - self.assertEqual(populations, mp.populations) - self.assertEqual(mp.winning_strategy_name, str(axl.Defector())) - - def test_three_players_with_mutation(self): - p1 = axl.Cooperator() - p2 = axl.Random() - p3 = axl.Defector() - players = [p1, p2, p3] - mp = axl.MoranProcess(players, mutation_rate=0.2, stop_on_fixation=False) - self.assertDictEqual( - mp.mutation_targets, - {str(p1): [p3, p2], str(p2): [p1, p3], str(p3): [p1, p2]}, - ) - # Test that mutation causes the population to alternate between - # fixations - counters = [Counter({"Cooperator": 3}), Counter({"Defector": 3})] - for counter in counters: - for _ in itertools.takewhile( - lambda x: x.population_distribution() != counter, mp - ): - pass - self.assertEqual(mp.population_distribution(), counter) - - def test_four_players(self): - players = [axl.Cooperator() for _ in range(3)] - players.append(axl.Defector()) - axl.seed(29) - mp = axl.MoranProcess(players) - populations = mp.play() - self.assertEqual(len(mp), 9) - self.assertEqual(len(populations), 9) - self.assertEqual(populations, mp.populations) - self.assertEqual(mp.winning_strategy_name, str(axl.Defector())) - - @given(strategies=strategy_lists(min_size=2, max_size=4)) - @settings(max_examples=5) - - # Two specific examples relating to cloning of strategies - @example(strategies=[axl.BackStabber, axl.MindReader]) - @example(strategies=[axl.ThueMorse, axl.MindReader]) - def test_property_players(self, strategies): - """Hypothesis test that randomly checks players""" - players = [s() for s in strategies] - mp = axl.MoranProcess(players) - populations = mp.play() - self.assertEqual(populations, mp.populations) - self.assertIn(mp.winning_strategy_name, [str(p) for p in players]) - - def test_reset(self): - p1, p2 = axl.Cooperator(), axl.Defector() - axl.seed(45) - mp = axl.MoranProcess((p1, p2)) - mp.play() - self.assertEqual(len(mp), 4) - self.assertEqual(len(mp.score_history), 3) - mp.reset() - self.assertEqual(len(mp), 1) - self.assertEqual(mp.winning_strategy_name, None) - self.assertEqual(mp.score_history, []) - # Check that players reset - for player, initial_player in zip(mp.players, mp.initial_players): - self.assertEqual(str(player), str(initial_player)) - - def test_constant_fitness_case(self): - # Scores between an Alternator and Defector will be: (1, 6) - axl.seed(0) - players = ( - axl.Alternator(), - axl.Alternator(), - axl.Defector(), - axl.Defector(), - ) - mp = axl.MoranProcess(players, turns=2) - winners = [] - for _ in range(100): - mp.play() - winners.append(mp.winning_strategy_name) - mp.reset() - winners = Counter(winners) - self.assertEqual(winners["Defector"], 88) - - def test_cache(self): - p1, p2 = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess((p1, p2)) - mp.play() - self.assertEqual(len(mp.deterministic_cache), 1) - - # Check that can pass a pre built cache - cache = axl.DeterministicCache() - mp = axl.MoranProcess((p1, p2), deterministic_cache=cache) - self.assertEqual(cache, mp.deterministic_cache) - - def test_iter(self): - p1, p2 = axl.Cooperator(), axl.Defector() - mp = axl.MoranProcess((p1, p2)) - self.assertEqual(mp.__iter__(), mp) - - def test_population_plot(self): - # Test that can plot on a given matplotlib axes - axl.seed(15) - players = [random.choice(axl.demo_strategies)() for _ in range(5)] - mp = axl.MoranProcess(players=players, turns=30) - mp.play() - fig, axarr = plt.subplots(2, 2) - ax = axarr[1, 0] - mp.populations_plot(ax=ax) - self.assertEqual(ax.get_xlim(), (-0.8, 16.8)) - self.assertEqual(ax.get_ylim(), (0, 5.25)) - # Run without a given axis - ax = mp.populations_plot() - self.assertEqual(ax.get_xlim(), (-0.8, 16.8)) - self.assertEqual(ax.get_ylim(), (0, 5.25)) - - def test_cooperator_can_win_with_fitness_transformation(self): - axl.seed(689) - players = ( - axl.Cooperator(), - axl.Defector(), - axl.Defector(), - axl.Defector(), - ) - w = 0.95 - fitness_transformation = lambda score: 1 - w + w * score - mp = axl.MoranProcess( - players, turns=10, fitness_transformation=fitness_transformation - ) - populations = mp.play() - self.assertEqual(mp.winning_strategy_name, "Cooperator") - - def test_atomic_mutation_fsm(self): - axl.seed(12) - players = [axl.EvolvableFSMPlayer(num_states=2, initial_state=1, initial_action=C) - for _ in range(5)] - mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") - population = mp.play() - self.assertEqual( - mp.winning_strategy_name, - 'EvolvableFSMPlayer: ((0, C, 1, D), (0, D, 1, C), (1, C, 0, D), (1, D, 1, C)), 1, C, 2, 0.1') - self.assertEqual(len(mp.populations), 31) - self.assertTrue(mp.fixated) - - def test_atomic_mutation_cycler(self): - axl.seed(10) - cycle_length = 5 - players = [axl.EvolvableCycler(cycle_length=cycle_length) - for _ in range(5)] - mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") - population = mp.play() - self.assertEqual(mp.winning_strategy_name, 'EvolvableCycler: CDCDD, 5, 0.2, 1') - self.assertEqual(len(mp.populations), 19) - self.assertTrue(mp.fixated) - - def test_mutation_method_exceptions(self): - axl.seed(10) - cycle_length = 5 - players = [axl.EvolvableCycler(cycle_length=cycle_length) - for _ in range(5)] - with self.assertRaises(ValueError): - axl.MoranProcess(players, turns=10, mutation_method="random") - - axl.seed(0) - players = [axl.Cycler(cycle="CD" * random.randint(2, 10)) - for _ in range(10)] - - mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") - with self.assertRaises(TypeError): - for _ in range(10): - next(mp) - - -class GraphMoranProcess(unittest.TestCase): - def test_complete(self): - """A complete graph should produce the same results as the default - case.""" - seeds = range(0, 5) - players = [] - N = 6 - graph = axl.graph.complete_graph(N) - for _ in range(N // 2): - players.append(axl.Cooperator()) - players.append(axl.Defector()) - for seed in seeds: - axl.seed(seed) - mp = axl.MoranProcess(players) - mp.play() - winner = mp.winning_strategy_name - axl.seed(seed) - mp = axl.MoranProcess(players, interaction_graph=graph) - mp.play() - winner2 = mp.winning_strategy_name - self.assertEqual(winner, winner2) - - def test_cycle(self): - """A cycle should sometimes produce different results vs. the default - case.""" - seeds = [(1, True), (8, False)] - players = [] - N = 6 - graph = axl.graph.cycle(N) - for _ in range(N // 2): - players.append(axl.Cooperator()) - for _ in range(N // 2): - players.append(axl.Defector()) - for seed, outcome in seeds: - axl.seed(seed) - mp = axl.MoranProcess(players) - mp.play() - winner = mp.winning_strategy_name - axl.seed(seed) - mp = axl.MoranProcess(players, interaction_graph=graph) - mp.play() - winner2 = mp.winning_strategy_name - self.assertEqual((winner == winner2), outcome) - - def test_asymmetry(self): - """Asymmetry in interaction and reproduction should sometimes - produce different results.""" - seeds = [(1, True), (21, False)] - players = [] - N = 6 - graph1 = axl.graph.cycle(N) - graph2 = axl.graph.complete_graph(N) - for _ in range(N // 2): - players.append(axl.Cooperator()) - for _ in range(N // 2): - players.append(axl.Defector()) - for seed, outcome in seeds: - axl.seed(seed) - mp = axl.MoranProcess( - players, interaction_graph=graph1, reproduction_graph=graph2 - ) - mp.play() - winner = mp.winning_strategy_name - axl.seed(seed) - mp = axl.MoranProcess( - players, interaction_graph=graph2, reproduction_graph=graph1 - ) - mp.play() - winner2 = mp.winning_strategy_name - self.assertEqual((winner == winner2), outcome) - - def test_cycle_death_birth(self): - """Test that death-birth can have different outcomes in the graph - case.""" - seeds = [(1, True), (5, False)] - players = [] - N = 6 - graph = axl.graph.cycle(N) - for _ in range(N // 2): - players.append(axl.Cooperator()) - for _ in range(N // 2): - players.append(axl.Defector()) - for seed, outcome in seeds: - axl.seed(seed) - mp = axl.MoranProcess(players, interaction_graph=graph, mode="bd") - mp.play() - winner = mp.winning_strategy_name - axl.seed(seed) - mp = axl.MoranProcess(players, interaction_graph=graph, mode="db") - mp.play() - winner2 = mp.winning_strategy_name - self.assertEqual((winner == winner2), outcome) - - -class TestApproximateMoranProcess(unittest.TestCase): - """A suite of tests for the ApproximateMoranProcess""" - - players = [axl.Cooperator(), axl.Defector()] - cached_outcomes = {} - - counter = Counter([(0, 5)]) - pdf = axl.Pdf(counter) - cached_outcomes[("Cooperator", "Defector")] = pdf - - counter = Counter([(3, 3)]) - pdf = axl.Pdf(counter) - cached_outcomes[("Cooperator", "Cooperator")] = pdf - - counter = Counter([(1, 1)]) - pdf = axl.Pdf(counter) - cached_outcomes[("Defector", "Defector")] = pdf - - amp = axl.ApproximateMoranProcess(players, cached_outcomes) - - def test_init(self): - """Test the initialisation process""" - self.assertEqual( - set(self.amp.cached_outcomes.keys()), - {("Cooperator", "Defector"), ("Cooperator", "Cooperator"), ("Defector", "Defector")}, - ) - self.assertEqual(self.amp.players, self.players) - self.assertEqual(self.amp.turns, 0) - self.assertEqual(self.amp.noise, 0) - - def test_score_all(self): - """Test the score_all function of the Moran process""" - scores = self.amp.score_all() - self.assertEqual(scores, [0, 5]) - scores = self.amp.score_all() - self.assertEqual(scores, [0, 5]) - scores = self.amp.score_all() - self.assertEqual(scores, [0, 5]) - - def test_getting_scores_from_cache(self): - """Test that read of scores from cache works (independent of ordering of - player names""" - scores = self.amp._get_scores_from_cache(("Cooperator", "Defector")) - self.assertEqual(scores, (0, 5)) - scores = self.amp._get_scores_from_cache(("Defector", "Cooperator")) - self.assertEqual(scores, (5, 0)) diff --git a/axelrod/ipd/tests/unit/test_pickling.py b/axelrod/ipd/tests/unit/test_pickling.py deleted file mode 100644 index b588c1b81..000000000 --- a/axelrod/ipd/tests/unit/test_pickling.py +++ /dev/null @@ -1,394 +0,0 @@ -import unittest -import pickle -import random - -import axelrod as axl - -C, D = axl.Action.C, axl.Action.D - - -# A set of classes to test pickling. - -# First set: special cases - -PointerToWrappedStrategy = axl.ipd.strategy_transformers.FlipTransformer()(axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)) - - -class MyDefector(axl.IpdPlayer): - def __init__(self): - super(MyDefector, self).__init__() - - def strategy(self, opponent): - return D - - -PointerToWrappedClassNotInStrategies = axl.ipd.strategy_transformers.FlipTransformer()( - axl.ipd.strategy_transformers.FlipTransformer()(MyDefector) -) - - -@axl.ipd.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) -@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) -@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) -@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) -class InterspersedDualTransformersNamePrefixAbsent(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.IdentityTransformer((D, D, C)) -@axl.ipd.strategy_transformers.DualTransformer() -@axl.ipd.strategy_transformers.FlipTransformer() -@axl.ipd.strategy_transformers.DualTransformer() -class InterspersedDualTransformersNamePrefixPresent(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.FlipTransformer() -class MyCooperator(axl.IpdPlayer): - def strategy(self, opponent): - return C - - -@axl.ipd.strategy_transformers.FlipTransformer() -@axl.ipd.strategy_transformers.FlipTransformer() -class DoubleFlip(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.FlipTransformer() -class SingleFlip(axl.Cooperator): - pass - - -# Second set: All the transformers - - -@axl.ipd.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) -class Apology(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) -class DeadlockBreaking(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) -class Dual(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) -class Flip(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.FinalTransformer((D, D), name_prefix=None) -class Final(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) -class Forgiver(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.GrudgeTransformer(3, name_prefix=None) -class Grudge(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.InitialTransformer((C, D), name_prefix=None) -class Initial(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) -class JossAnn(axl.Cooperator): - pass - - -strategies = [axl.Grudger, axl.TitForTat] -probability = [0.2, 0.3] - - -@axl.ipd.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) -class Mixed(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.NiceTransformer(name_prefix=None) -class Nice(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) -class Noisy(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.RetaliationTransformer(3, name_prefix=None) -class Retaliation(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) -class RetaliateUntilApology(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.TrackHistoryTransformer(name_prefix=None) -class TrackHistory(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.IdentityTransformer() -class Identity(axl.Cooperator): - pass - - -@axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None) -class TransformedThue(axl.ThueMorse): - pass - - -class MetaThue(axl.MetaPlayer): - name = "MetaThue" - - def __init__(self): - team = [axl.ThueMorse] - super().__init__(team=team) - - -TransformedMetaThue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) - - -transformed_no_prefix = [ - Apology, - DeadlockBreaking, - Flip, - Final, - Forgiver, - Grudge, - Initial, - JossAnn, - Mixed, - Nice, - Noisy, - Retaliation, - RetaliateUntilApology, - TrackHistory, - Dual, - Identity, -] - -transformer_instances = [ - axl.ipd.strategy_transformers.ApologyTransformer([D], [C]), - axl.ipd.strategy_transformers.DeadlockBreakingTransformer(), - axl.ipd.strategy_transformers.DualTransformer(), - axl.ipd.strategy_transformers.FlipTransformer(), - axl.ipd.strategy_transformers.FinalTransformer((D, D)), - axl.ipd.strategy_transformers.ForgiverTransformer(0.2), - axl.ipd.strategy_transformers.GrudgeTransformer(3), - axl.ipd.strategy_transformers.InitialTransformer((C, D)), - axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.6)), - axl.ipd.strategy_transformers.MixedTransformer(probability, strategies), - axl.ipd.strategy_transformers.NiceTransformer(), - axl.ipd.strategy_transformers.NoisyTransformer(0.2), - axl.ipd.strategy_transformers.RetaliationTransformer(3), - axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(), - axl.ipd.strategy_transformers.TrackHistoryTransformer(), - axl.ipd.strategy_transformers.IdentityTransformer(), -] - - -class TestPickle(unittest.TestCase): - def assert_equals_instance_from_pickling(self, original_instance): - clone = pickle.loads(pickle.dumps(original_instance)) - self.assertEqual(clone, original_instance) - - def assert_original_equals_pickled(self, player_, turns=10): - opponents = (axl.Defector, axl.Cooperator, axl.Random, axl.CyclerCCCDCD) - for opponent_class in opponents: - # Check that player and copy play the same way. - player = player_.clone() - clone = pickle.loads(pickle.dumps(player)) - clone = clone.clone() - - opponent_1 = opponent_class() - opponent_2 = opponent_class() - - axl.seed(0) - match_1 = axl.IpdMatch((player, opponent_1), turns=turns) - result_1 = match_1.play() - - axl.seed(0) - match_2 = axl.IpdMatch((clone, opponent_2), turns=turns) - result_2 = match_2.play() - - self.assertEqual(result_1, result_2) - - # Confirm that mutated player can be pickled correctly. - self.assert_equals_instance_from_pickling(player) - - def test_parameterized_player(self): - player = axl.Cycler("DDCCDD") - self.assert_original_equals_pickled(player) - - def test_sequence_player(self): - inline_transformed_thue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() - for player in [axl.ThueMorse(), axl.ThueMorseInverse(), MetaThue(), TransformedMetaThue(), - inline_transformed_thue, TransformedThue(), - ]: - self.assert_equals_instance_from_pickling(player) - opponents = (axl.Defector, axl.Cooperator, axl.Random, axl.CyclerCCCDCD) - for opponent_class in opponents: - axl.seed(10) - player.reset() - opponent = opponent_class() - match_1 = axl.IpdMatch((player, opponent), turns=20) - _ = match_1.play() - self.assert_equals_instance_from_pickling(player) - - def test_final_transformer_called(self): - player = axl.Alexei() - copy = pickle.loads(pickle.dumps(player)) - match = axl.IpdMatch((player, copy), turns=3) - results = match.play() - self.assertEqual(results, [(C, C), (C, C), (D, D)]) - - def test_pickling_all_strategies(self): - for s in random.sample(axl.strategies, 50): - with self.subTest(strategy=s.name): - self.assert_original_equals_pickled(s()) - - def test_pickling_all_transformers_as_decorated_classes(self): - for s in transformed_no_prefix: - with self.subTest(strategy=s.name): - player = s() - self.assert_original_equals_pickled(player) - - def test_pickling_all_transformers_as_instance_called_on_a_class(self): - for transformer in transformer_instances: - with self.subTest(transformer=transformer): - player = transformer(axl.Cooperator)() - self.assert_original_equals_pickled(player) - - def test_created_on_the_spot_multiple_transformers(self): - player_class = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator) - player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) - player = axl.ipd.strategy_transformers.FinalTransformer((C, D))(player_class)() - - self.assert_original_equals_pickled(player) - - def test_dual_transformer_regression_test(self): - """DualTransformer has failed when there were multiple DualTransformers. - It has also failed when DualTransformer was not the outermost - transformer or when other transformers were between multiple - DualTransformers.""" - player = InterspersedDualTransformersNamePrefixAbsent() - self.assert_original_equals_pickled(player) - - player = InterspersedDualTransformersNamePrefixPresent() - self.assert_original_equals_pickled(player) - - player_class = axl.WinStayLoseShift - player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) - player_class = axl.ipd.strategy_transformers.InitialTransformer((C, D))(player_class) - player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) - player_class = axl.ipd.strategy_transformers.TrackHistoryTransformer()(player_class) - - interspersed_dual_transformers = player_class() - - self.assert_original_equals_pickled(interspersed_dual_transformers) - - def test_class_and_instance_name_different_single_flip(self): - player = SingleFlip() - self.assertEqual(player.__class__.__name__, "FlippedSingleFlip") - - self.assert_original_equals_pickled(player) - - def test_class_and_instance_name_different_double_flip(self): - player = DoubleFlip() - self.assertEqual(player.__class__.__name__, "FlippedFlippedDoubleFlip") - - self.assert_original_equals_pickled(player) - - def test_class_and_instance_name_different_built_from_player_class(self): - player = MyCooperator() - class_names = [class_.__name__ for class_ in MyCooperator.mro()] - self.assertEqual( - class_names, ["FlippedMyCooperator", "MyCooperator", "IpdPlayer", "object"] - ) - - self.assert_original_equals_pickled(player) - - def test_pointer_to_class_derived_from_strategy(self): - player = PointerToWrappedStrategy() - - class_names = [class_.__name__ for class_ in player.__class__.mro()] - self.assertEqual( - class_names, - [ - "FlippedFlippedCooperator", - "FlippedCooperator", - "Cooperator", - "IpdPlayer", - "object", - ], - ) - - self.assert_original_equals_pickled(player) - - def test_pointer_to_class_derived_from_IpdPlayer(self): - player = PointerToWrappedClassNotInStrategies() - - class_names = [class_.__name__ for class_ in player.__class__.mro()] - self.assertEqual( - class_names, - [ - "FlippedFlippedMyDefector", - "FlippedMyDefector", - "MyDefector", - "IpdPlayer", - "object", - ], - ) - - self.assert_original_equals_pickled(player) - - def test_local_class_unpicklable(self): - """An unpickle-able AND transformed class will not raise an error until - it is un-pickled. This is different from the original class that raises - an error when it is pickled.""" - - class LocalCooperator(axl.Cooperator): - pass - - un_transformed = LocalCooperator() - - self.assertRaises(AttributeError, pickle.dumps, un_transformed) - - player = axl.ipd.strategy_transformers.FlipTransformer()(LocalCooperator)() - pickled = pickle.dumps(player) - self.assertRaises(AttributeError, pickle.loads, pickled) - - def test_with_various_name_prefixes(self): - no_prefix = Flip() - self.assertEqual(no_prefix.__class__.__name__, "Flip") - self.assert_original_equals_pickled(no_prefix) - - default_prefix = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)() - self.assertEqual(default_prefix.__class__.__name__, "FlippedCooperator") - self.assert_original_equals_pickled(default_prefix) - - fliptastic = axl.ipd.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") - new_prefix = fliptastic(axl.Cooperator)() - self.assertEqual(new_prefix.__class__.__name__, "FliptasticCooperator") - self.assert_original_equals_pickled(new_prefix) - - def test_dynamic_class_no_name_prefix(self): - player = axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() - - self.assertEqual(player.__class__.__name__, "Cooperator") - self.assert_original_equals_pickled(player) diff --git a/axelrod/ipd/tests/unit/test_plot.py b/axelrod/ipd/tests/unit/test_plot.py deleted file mode 100644 index 14287e869..000000000 --- a/axelrod/ipd/tests/unit/test_plot.py +++ /dev/null @@ -1,257 +0,0 @@ -import unittest - -import tempfile -import matplotlib -import matplotlib.pyplot as plt -import pathlib - -from numpy import mean - -import axelrod as axl -from axelrod.ipd.load_data_ import axl_filename - - -class TestPlot(unittest.TestCase): - @classmethod - def setUpClass(cls): - path = pathlib.Path("../test_outputs/test_results.csv") - cls.filename = axl_filename(path) - - cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] - cls.repetitions = 3 - cls.turns = 5 - - cls.test_result_set = axl.ResultSet( - cls.filename, cls.players, cls.repetitions, progress_bar=False - ) - - cls.test_result_set = axl.ResultSet( - cls.filename, cls.players, cls.repetitions, progress_bar=False - ) - cls.expected_boxplot_dataset = [ - [(17 / 5 + 9 / 5) / 2 for _ in range(3)], - [(13 / 5 + 4 / 5) / 2 for _ in range(3)], - [3 / 2 for _ in range(3)], - ] - cls.expected_boxplot_xticks_locations = [1, 2, 3, 4] - cls.expected_boxplot_xticks_labels = ["Defector", "Tit For Tat", "Alternator"] - - cls.expected_lengthplot_dataset = [ - [cls.turns for _ in range(3)], - [cls.turns for _ in range(3)], - [cls.turns for _ in range(3)], - ] - - cls.expected_payoff_dataset = [ - [0, mean([9 / 5 for _ in range(3)]), mean([17 / 5 for _ in range(3)])], - [mean([4 / 5 for _ in range(3)]), 0, mean([13 / 5 for _ in range(3)])], - [mean([2 / 5 for _ in range(3)]), mean([13 / 5 for _ in range(3)]), 0], - ] - cls.expected_winplot_dataset = ( - [[2, 2, 2], [0, 0, 0], [0, 0, 0]], - ["Defector", "Tit For Tat", "Alternator"], - ) - - cls.expected_sdvplot_dataset = ( - [ - [3, 3, 3, 1, 1, 1, 0, 0, 0], - [0, 0, 0, 0, 0, 0, -1, -1, -1], - [0, 0, 0, 0, 0, 0, -3, -3, -3], - ], - ["Defector", "Tit For Tat", "Alternator"], - ) - - def test_default_cmap(self): - cmap = axl.ipd.plot.default_cmap("0.0") - self.assertEqual(cmap, "YlGnBu") - - cmap = axl.ipd.plot.default_cmap("1.3alpha") - self.assertEqual(cmap, "YlGnBu") - - cmap = axl.ipd.plot.default_cmap("1.4.99") - self.assertEqual(cmap, "YlGnBu") - - cmap = axl.ipd.plot.default_cmap("1.4") - self.assertEqual(cmap, "YlGnBu") - - cmap = axl.ipd.plot.default_cmap() - self.assertEqual(cmap, "viridis") - - cmap = axl.ipd.plot.default_cmap("1.5") - self.assertEqual(cmap, "viridis") - - cmap = axl.ipd.plot.default_cmap("1.5beta") - self.assertEqual(cmap, "viridis") - - cmap = axl.ipd.plot.default_cmap("1.7") - self.assertEqual(cmap, "viridis") - - cmap = axl.ipd.plot.default_cmap("2.0") - self.assertEqual(cmap, "viridis") - - def test_init(self): - plot = axl.Plot(self.test_result_set) - self.assertEqual(plot.result_set, self.test_result_set) - - def test_init_from_resulsetfromfile(self): - tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) - players = [axl.Cooperator(), axl.TitForTat(), axl.Defector()] - tournament = axl.IpdTournament(players=players, turns=2, repetitions=2) - tournament.play(filename=tmp_file.name, progress_bar=False) - tmp_file.close() - rs = axl.ResultSet(tmp_file.name, players, 2, progress_bar=False) - - plot = axl.Plot(rs) - self.assertEqual(plot.result_set, rs) - - def test_boxplot_dataset(self): - plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._boxplot_dataset, self.expected_boxplot_dataset) - - def test_boxplot_xticks_locations(self): - plot = axl.Plot(self.test_result_set) - self.assertEqual( - plot._boxplot_xticks_locations, self.expected_boxplot_xticks_locations - ) - - def test_boxplot_xticks_labels(self): - plot = axl.Plot(self.test_result_set) - self.assertEqual( - plot._boxplot_xticks_labels, self.expected_boxplot_xticks_labels - ) - - def test_boxplot(self): - plot = axl.Plot(self.test_result_set) - fig = plot.boxplot() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_boxplot_with_passed_axes(self): - # Test that can plot on a given matplotlib axes - fig, axarr = plt.subplots(2, 2) - self.assertEqual(axarr[0, 1].get_ylim(), (0, 1)) - plot = axl.Plot(self.test_result_set) - plot.boxplot(ax=axarr[0, 1]) - self.assertNotEqual(axarr[0, 1].get_ylim(), (0, 1)) - - # Plot on another axes with a title - plot.boxplot(title="dummy title", ax=axarr[1, 0]) - self.assertNotEqual(axarr[1, 0].get_ylim(), (0, 1)) - self.assertEqual(axarr[1, 0].get_title(), "dummy title") - - def test_boxplot_with_title(self): - plot = axl.Plot(self.test_result_set) - fig = plot.boxplot(title="title") - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_winplot_dataset(self): - plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) - - def test_winplot(self): - plot = axl.Plot(self.test_result_set) - fig = plot.winplot() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_sdvplot_dataset(self): - plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._sdv_plot_dataset, self.expected_sdvplot_dataset) - - def test_sdvplot(self): - plot = axl.Plot(self.test_result_set) - fig = plot.sdvplot() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_lengthplot_dataset(self): - plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) - - def test_lengthplot(self): - plot = axl.Plot(self.test_result_set) - fig = plot.lengthplot() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_pdplot(self): - plot = axl.Plot(self.test_result_set) - fig = plot.pdplot() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_payoff_dataset(self): - plot = axl.Plot(self.test_result_set) - self.assertSequenceEqual(plot._payoff_dataset, self.expected_payoff_dataset) - - def test_payoff(self): - plot = axl.Plot(self.test_result_set) - fig = plot.payoff() - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_payoff_with_title(self): - plot = axl.Plot(self.test_result_set) - fig = plot.payoff(title="dummy title") - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_payoff_with_passed_axes(self): - plot = axl.Plot(self.test_result_set) - fig, axarr = plt.subplots(2, 2) - self.assertEqual(axarr[0, 1].get_xlim(), (0, 1)) - - plot.payoff(ax=axarr[0, 1]) - self.assertNotEqual(axarr[0, 1].get_xlim(), (0, 1)) - - # Plot on another axes with a title - plot.payoff(title="dummy title", ax=axarr[1, 0]) - self.assertNotEqual(axarr[1, 0].get_xlim(), (0, 1)) - self.assertEqual(axarr[1, 0].get_xlabel(), "dummy title") - plt.close(fig) - - def test_stackplot(self): - eco = axl.Ecosystem(self.test_result_set) - eco.reproduce(100) - - plot = axl.Plot(self.test_result_set) - fig = plot.stackplot(eco) - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - fig = plot.stackplot(eco, title="dummy title") - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - fig = plot.stackplot(eco, logscale=False) - self.assertIsInstance(fig, matplotlib.pyplot.Figure) - plt.close(fig) - - def test_stackplot_with_passed_axes(self): - # Test that can plot on a given matplotlib axes - eco = axl.Ecosystem(self.test_result_set) - eco.reproduce(100) - plot = axl.Plot(self.test_result_set) - - fig, axarr = plt.subplots(2, 2) - self.assertEqual(axarr[0, 1].get_xlim(), (0, 1)) - - plot.stackplot(eco, ax=axarr[0, 1]) - self.assertNotEqual(axarr[0, 1].get_xlim(), (0, 1)) - - # Plot on another axes with a title - plot.stackplot(eco, title="dummy title", ax=axarr[1, 0]) - self.assertNotEqual(axarr[1, 0].get_xlim(), (0, 1)) - self.assertEqual(axarr[1, 0].get_title(), "dummy title") - plt.close(fig) - - def test_all_plots(self): - plot = axl.Plot(self.test_result_set) - # Test that this method does not crash. - self.assertIsNone( - plot.save_all_plots(prefix="../test_outputs/", progress_bar=False) - ) - self.assertIsNone( - plot.save_all_plots( - prefix="../test_outputs/", title_prefix="A prefix", progress_bar=True - ) - ) diff --git a/axelrod/ipd/tests/unit/test_property.py b/axelrod/ipd/tests/unit/test_property.py deleted file mode 100644 index ac5f16d94..000000000 --- a/axelrod/ipd/tests/unit/test_property.py +++ /dev/null @@ -1,232 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.tests.property import ( - games, - matches, - prob_end_spatial_tournaments, - prob_end_tournaments, - spatial_tournaments, - strategy_lists, - tournaments, -) - -from hypothesis import given, settings - -stochastic_strategies = [s for s in axl.strategies if axl.Classifiers["stochastic"](s())] - - -class TestStrategyList(unittest.TestCase): - def test_call(self): - strategies = strategy_lists().example() - self.assertIsInstance(strategies, list) - for p in strategies: - self.assertIsInstance(p(), axl.IpdPlayer) - - @given(strategies=strategy_lists(min_size=1, max_size=50)) - @settings(max_examples=5) - def test_decorator(self, strategies): - self.assertIsInstance(strategies, list) - self.assertGreaterEqual(len(strategies), 1) - self.assertLessEqual(len(strategies), 50) - for strategy in strategies: - self.assertIsInstance(strategy(), axl.IpdPlayer) - - @given(strategies=strategy_lists(strategies=axl.basic_strategies)) - @settings(max_examples=5) - def test_decorator_with_given_strategies(self, strategies): - self.assertIsInstance(strategies, list) - basic_player_names = [str(s()) for s in axl.basic_strategies] - for strategy in strategies: - player = strategy() - self.assertIsInstance(player, axl.IpdPlayer) - self.assertIn(str(player), basic_player_names) - - -class TestMatch(unittest.TestCase): - """ - Test that the composite method works - """ - - def test_call(self): - match = matches().example() - self.assertIsInstance(match, axl.IpdMatch) - - @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=1)) - @settings(max_examples=5) - def test_decorator(self, match): - self.assertIsInstance(match, axl.IpdMatch) - self.assertGreaterEqual(len(match), 10) - self.assertLessEqual(len(match), 50) - self.assertGreaterEqual(match.noise, 0) - self.assertLessEqual(match.noise, 1) - - @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=0)) - @settings(max_examples=5) - def test_decorator_with_no_noise(self, match): - self.assertIsInstance(match, axl.IpdMatch) - self.assertGreaterEqual(len(match), 10) - self.assertLessEqual(len(match), 50) - self.assertEqual(match.noise, 0) - - -class TestTournament(unittest.TestCase): - def test_call(self): - tournament = tournaments().example() - self.assertIsInstance(tournament, axl.IpdTournament) - - @given( - tournament=tournaments( - min_turns=2, - max_turns=50, - min_noise=0, - max_noise=1, - min_repetitions=2, - max_repetitions=50, - max_size=3, - ) - ) - @settings(max_examples=5) - def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - self.assertLessEqual(tournament.turns, 50) - self.assertGreaterEqual(tournament.turns, 2) - self.assertLessEqual(tournament.noise, 1) - self.assertGreaterEqual(tournament.noise, 0) - self.assertLessEqual(tournament.repetitions, 50) - self.assertGreaterEqual(tournament.repetitions, 2) - - @given(tournament=tournaments(strategies=axl.basic_strategies, max_size=3)) - @settings(max_examples=5) - def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - basic_player_names = [str(s()) for s in axl.basic_strategies] - for p in tournament.players: - self.assertIn(str(p), basic_player_names) - - -class TestProbEndTournament(unittest.TestCase): - def test_call(self): - tournament = tournaments().example() - self.assertIsInstance(tournament, axl.IpdTournament) - - @given( - tournament=prob_end_tournaments( - min_prob_end=0, - max_prob_end=1, - min_noise=0, - max_noise=1, - min_repetitions=2, - max_repetitions=50, - max_size=3, - ) - ) - @settings(max_examples=5) - def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - self.assertLessEqual(tournament.prob_end, 1) - self.assertGreaterEqual(tournament.prob_end, 0) - self.assertLessEqual(tournament.noise, 1) - self.assertGreaterEqual(tournament.noise, 0) - self.assertLessEqual(tournament.repetitions, 50) - self.assertGreaterEqual(tournament.repetitions, 2) - - @given(tournament=prob_end_tournaments(strategies=axl.basic_strategies, max_size=3)) - @settings(max_examples=5) - def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - basic_player_names = [str(s()) for s in axl.basic_strategies] - for p in tournament.players: - self.assertIn(str(p), basic_player_names) - - -class TestSpatialTournament(unittest.TestCase): - def test_call(self): - tournament = spatial_tournaments().example() - self.assertIsInstance(tournament, axl.IpdTournament) - - @given( - tournament=spatial_tournaments( - min_turns=2, - max_turns=50, - min_noise=0, - max_noise=1, - min_repetitions=2, - max_repetitions=50, - max_size=3, - ) - ) - @settings(max_examples=5) - def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - self.assertLessEqual(tournament.turns, 50) - self.assertGreaterEqual(tournament.turns, 2) - self.assertLessEqual(tournament.noise, 1) - self.assertGreaterEqual(tournament.noise, 0) - self.assertLessEqual(tournament.repetitions, 50) - self.assertGreaterEqual(tournament.repetitions, 2) - - @given(tournament=spatial_tournaments(strategies=axl.basic_strategies, max_size=3)) - @settings(max_examples=5) - def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - basic_player_names = [str(s()) for s in axl.basic_strategies] - for p in tournament.players: - self.assertIn(str(p), basic_player_names) - - -class TestProbEndSpatialTournament(unittest.TestCase): - def test_call(self): - tournament = prob_end_spatial_tournaments().example() - self.assertIsInstance(tournament, axl.IpdTournament) - - @given( - tournament=prob_end_spatial_tournaments( - min_prob_end=0, - max_prob_end=1, - min_noise=0, - max_noise=1, - min_repetitions=2, - max_repetitions=50, - max_size=3, - ) - ) - @settings(max_examples=5) - def test_decorator(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - self.assertLessEqual(tournament.prob_end, 1) - self.assertGreaterEqual(tournament.prob_end, 0) - self.assertLessEqual(tournament.noise, 1) - self.assertGreaterEqual(tournament.noise, 0) - self.assertLessEqual(tournament.repetitions, 50) - self.assertGreaterEqual(tournament.repetitions, 2) - - @given( - tournament=prob_end_spatial_tournaments( - strategies=axl.basic_strategies, max_size=3 - ) - ) - @settings(max_examples=5) - def test_decorator_with_given_strategies(self, tournament): - self.assertIsInstance(tournament, axl.IpdTournament) - basic_player_names = [str(s()) for s in axl.basic_strategies] - for p in tournament.players: - self.assertIn(str(p), basic_player_names) - - -class TestGame(unittest.TestCase): - def test_call(self): - game = games().example() - self.assertIsInstance(game, axl.IpdGame) - - @given(game=games()) - @settings(max_examples=5) - def test_decorator(self, game): - self.assertIsInstance(game, axl.IpdGame) - r, p, s, t = game.RPST() - self.assertTrue((2 * r) > (t + s) and (t > r > p > s)) - - @given(game=games(prisoners_dilemma=False)) - @settings(max_examples=5) - def test_decorator_unconstrained(self, game): - self.assertIsInstance(game, axl.IpdGame) diff --git a/axelrod/ipd/tests/unit/test_random_.py b/axelrod/ipd/tests/unit/test_random_.py deleted file mode 100644 index fdb1d361f..000000000 --- a/axelrod/ipd/tests/unit/test_random_.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Tests for the random functions.""" - -import unittest - -import random - -from collections import Counter - -import numpy - -import axelrod as axl - -C, D = axl.Action.C, axl.Action.D - - -class TestRandom_(unittest.TestCase): - def test_return_values(self): - self.assertEqual(axl.random_choice(1), C) - self.assertEqual(axl.random_choice(0), D) - axl.seed(1) - self.assertEqual(axl.random_choice(), C) - axl.seed(2) - self.assertEqual(axl.random_choice(), D) - - def test_set_seed(self): - """Test that numpy and stdlib random seed is set by axelrod seed""" - - numpy_random_numbers = [] - stdlib_random_numbers = [] - for _ in range(2): - axl.seed(0) - numpy_random_numbers.append(numpy.random.random()) - stdlib_random_numbers.append(random.random()) - - self.assertEqual(numpy_random_numbers[0], numpy_random_numbers[1]) - self.assertEqual(stdlib_random_numbers[0], stdlib_random_numbers[1]) - - def test_seed_not_offset_by_deterministic_call(self): - """Test that when called with p = 0 or 1, the random seed is not - affected.""" - for p in [0, 1]: - axl.seed(0) - r = random.random() - axl.seed(0) - axl.random_choice(p) - self.assertEqual(r, random.random()) - - def test_random_flip(self): - self.assertEqual(C, axl.random_flip(C, 0)) - self.assertEqual(C, axl.random_flip(D, 1)) - axl.seed(0) - self.assertEqual(C, axl.random_flip(C, 0.2)) - axl.seed(1) - self.assertEqual(C, axl.random_flip(D, 0.2)) - - -class TestPdf(unittest.TestCase): - """A suite of tests for the Pdf class""" - - observations = [(C, D)] * 4 + [(C, C)] * 12 + [(D, C)] * 2 + [(D, D)] * 15 - counter = Counter(observations) - pdf = axl.Pdf(counter) - - def test_init(self): - self.assertEqual(set(self.pdf.sample_space), set(self.counter.keys())) - self.assertEqual(set(self.pdf.counts), set([4, 12, 2, 15])) - self.assertEqual(self.pdf.total, sum([4, 12, 2, 15])) - self.assertAlmostEqual(sum(self.pdf.probability), 1) - - def test_sample(self): - """Test that sample maps to correct domain""" - all_samples = [] - - axl.seed(0) - for sample in range(100): - all_samples.append(self.pdf.sample()) - - self.assertEqual(len(all_samples), 100) - self.assertEqual(set(all_samples), set(self.observations)) - - def test_seed(self): - """Test that numpy seeds the sample properly""" - - for s in range(10): - axl.seed(s) - sample = self.pdf.sample() - axl.seed(s) - self.assertEqual(sample, self.pdf.sample()) diff --git a/axelrod/ipd/tests/unit/test_resultset.py b/axelrod/ipd/tests/unit/test_resultset.py deleted file mode 100644 index fdba8c99b..000000000 --- a/axelrod/ipd/tests/unit/test_resultset.py +++ /dev/null @@ -1,1248 +0,0 @@ -import unittest -import csv -from collections import Counter -import pandas as pd -from numpy import mean, nanmedian, std -import pathlib - -import axelrod as axl -from axelrod.ipd.load_data_ import axl_filename -from axelrod.ipd.result_set import create_counter_dict -from axelrod.ipd.tests.property import tournaments - -from hypothesis import given, settings - -C, D = axl.Action.C, axl.Action.D - - -class TestResultSet(unittest.TestCase): - @classmethod - def setUpClass(cls): - - path = pathlib.Path("../test_outputs/test_results.csv") - cls.filename = str(axl_filename(path)) - - cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] - cls.repetitions = 3 - cls.turns = 5 - cls.edges = [(0, 1), (0, 2), (1, 2)] - - cls.expected_match_lengths = [ - [[0, 5, 5], [5, 0, 5], [5, 5, 0]] for _ in range(3) - ] - - cls.expected_scores = [[15, 15, 15], [17, 17, 17], [26, 26, 26]] - - cls.expected_wins = [[0, 0, 0], [0, 0, 0], [2, 2, 2]] - - cls.expected_normalised_scores = [ - [3 / 2 for _ in range(3)], - [(13 / 5 + 4 / 5) / 2 for _ in range(3)], - [(17 / 5 + 9 / 5) / 2 for _ in range(3)], - ] - - cls.expected_ranking = [2, 1, 0] - - cls.expected_ranked_names = ["Defector", "Tit For Tat", "Alternator"] - - cls.expected_null_results_matrix = [ - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - ] - - cls.expected_payoffs = [ - [[], [13 / 5 for _ in range(3)], [2 / 5 for _ in range(3)]], - [[13 / 5 for _ in range(3)], [], [4 / 5 for _ in range(3)]], - [[17 / 5 for _ in range(3)], [9 / 5 for _ in range(3)], []], - ] - - cls.expected_score_diffs = [ - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-3.0, -3.0, -3.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-1.0, -1.0, -1.0]], - [[3.0, 3.0, 3.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], - ] - - cls.expected_payoff_diffs_means = [ - [0.0, 0.0, -3.0], - [0.0, 0.0, -1.0], - [3.0, 1.0, 0.0], - ] - - # Recalculating to deal with numeric imprecision - cls.expected_payoff_matrix = [ - [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], - [mean([13 / 5 for _ in range(3)]), 0, mean([4 / 5 for _ in range(3)])], - [mean([17 / 5 for _ in range(3)]), mean([9 / 5 for _ in range(3)]), 0], - ] - - cls.expected_payoff_stddevs = [ - [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], - [std([13 / 5 for _ in range(3)]), 0, std([4 / 5 for _ in range(3)])], - [std([17 / 5 for _ in range(3)]), std([9 / 5 for _ in range(3)]), 0], - ] - - cls.expected_cooperation = [[0, 9, 9], [9, 0, 3], [0, 0, 0]] - - cls.expected_initial_cooperation_count = [6, 6, 0] - cls.expected_initial_cooperation_rate = [1, 1, 0] - - cls.expected_normalised_cooperation = [ - [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], - [mean([3 / 5 for _ in range(3)]), 0, mean([1 / 5 for _ in range(3)])], - [0, 0, 0], - ] - - cls.expected_state_distribution = [ - [ - Counter(), - Counter({(D, C): 6, (C, D): 6, (C, C): 3}), - Counter({(C, D): 9, (D, D): 6}), - ], - [ - Counter({(D, C): 6, (C, D): 6, (C, C): 3}), - Counter(), - Counter({(D, D): 12, (C, D): 3}), - ], - [ - Counter({(D, C): 9, (D, D): 6}), - Counter({(D, D): 12, (D, C): 3}), - Counter(), - ], - ] - - cls.expected_normalised_state_distribution = [ - [ - Counter(), - Counter({(D, C): 0.4, (C, D): 0.4, (C, C): 0.2}), - Counter({(C, D): 0.6, (D, D): 0.4}), - ], - [ - Counter({(D, C): 0.4, (C, D): 0.4, (C, C): 0.2}), - Counter(), - Counter({(D, D): 0.8, (C, D): 0.2}), - ], - [ - Counter({(D, C): 0.6, (D, D): 0.4}), - Counter({(D, D): 0.8, (D, C): 0.2}), - Counter(), - ], - ] - - cls.expected_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), - Counter({((C, D), D): 6, ((D, D), C): 6}), - ], - [ - Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), - Counter(), - Counter({((C, D), D): 3, ((D, D), D): 9}), - ], - [ - Counter({((D, C), D): 6, ((D, D), D): 6}), - Counter({((D, C), D): 3, ((D, D), D): 9}), - Counter(), - ], - ] - - cls.expected_normalised_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 1, ((C, D), D): 1, ((D, C), C): 1}), - Counter({((C, D), D): 1, ((D, D), C): 1}), - ], - [ - Counter({((C, C), C): 1, ((D, C), C): 1, ((C, D), D): 1}), - Counter(), - Counter({((C, D), D): 1, ((D, D), D): 1}), - ], - [ - Counter({((D, C), D): 1, ((D, D), D): 1}), - Counter({((D, C), D): 1, ((D, D), D): 1}), - Counter(), - ], - ] - - cls.expected_vengeful_cooperation = [ - [2 * element - 1 for element in row] - for row in cls.expected_normalised_cooperation - ] - - cls.expected_cooperating_rating = [18 / 30, 12 / 30, 0] - - cls.expected_good_partner_matrix = [[0, 3, 3], [3, 0, 3], [0, 0, 0]] - - cls.expected_good_partner_rating = [1.0, 1.0, 0] - - cls.expected_eigenjesus_rating = [0.5547001962252291, 0.8320502943378436, 0.0] - - cls.expected_eigenmoses_rating = [ - -0.4578520302117101, - 0.7311328098872432, - 0.5057828909101213, - ] - - def test_init(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertEqual(rs.players, self.players) - self.assertEqual(rs.num_players, len(self.players)) - - def _clear_matrix(self, matrix): - for i, row in enumerate(matrix): - for j, _ in enumerate(row): - matrix[i][j] = 0 - - def test_ne_vectors(self): - rs_1 = axl.ResultSet(self.filename, self.players, self.repetitions) - - rs_2 = axl.ResultSet(self.filename, self.players, self.repetitions) - - # A different vector - rs_2.eigenmoses_rating = (-1, -1, -1) - - self.assertNotEqual(rs_1, rs_2) - - def test_nan_vectors(self): - rs_1 = axl.ResultSet(self.filename, self.players, self.repetitions) - # Force a broken eigenmoses, by replacing vengeful_cooperation with - # zeroes. - self._clear_matrix(rs_1.vengeful_cooperation) - rs_1.eigenmoses_rating = rs_1._build_eigenmoses_rating() - - rs_2 = axl.ResultSet(self.filename, self.players, self.repetitions) - # Force a broken eigenmoses, by replacing vengeful_cooperation with - # zeroes. - self._clear_matrix(rs_2.vengeful_cooperation) - rs_2.eigenmoses_rating = rs_2._build_eigenmoses_rating() - - self.assertEqual(rs_1, rs_2) - - def test_init_multiprocessing(self): - rs = axl.ResultSet( - self.filename, - self.players, - self.repetitions, - progress_bar=False, - processes=2, - ) - self.assertEqual(rs.players, self.players) - self.assertEqual(rs.num_players, len(self.players)) - - rs = axl.ResultSet( - self.filename, - self.players, - self.repetitions, - progress_bar=False, - processes=0, - ) - self.assertEqual(rs.players, self.players) - self.assertEqual(rs.num_players, len(self.players)) - - def test_with_progress_bar(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=True - ) - self.assertTrue(rs.progress_bar) - self.assertEqual(rs.progress_bar.total, 25) - self.assertEqual(rs.progress_bar.n, rs.progress_bar.total) - - def test_match_lengths(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.match_lengths, list) - self.assertEqual(len(rs.match_lengths), rs.repetitions) - self.assertEqual(rs.match_lengths, self.expected_match_lengths) - - for rep in rs.match_lengths: - self.assertIsInstance(rep, list) - self.assertEqual(len(rep), len(self.players)) - - for i, opp in enumerate(rep): - self.assertIsInstance(opp, list) - self.assertEqual(len(opp), len(self.players)) - - for j, length in enumerate(opp): - if i == j: # Specific test for example match setup - self.assertEqual(length, 0) - else: - self.assertEqual(length, self.turns) - - def test_scores(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.scores, list) - self.assertEqual(len(rs.scores), rs.num_players) - self.assertEqual(rs.scores, self.expected_scores) - - def test_ranking(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.ranking, list) - self.assertEqual(len(rs.ranking), rs.num_players) - self.assertEqual(rs.ranking, self.expected_ranking) - - def test_ranked_names(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.ranked_names, list) - self.assertEqual(len(rs.ranked_names), rs.num_players) - self.assertEqual(rs.ranked_names, self.expected_ranked_names) - - def test_wins(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.wins, list) - self.assertEqual(len(rs.wins), rs.num_players) - self.assertEqual(rs.wins, self.expected_wins) - - def test_normalised_scores(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.normalised_scores, list) - self.assertEqual(len(rs.normalised_scores), rs.num_players) - self.assertEqual(rs.normalised_scores, self.expected_normalised_scores) - - def test_payoffs(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.payoffs, list) - self.assertEqual(len(rs.payoffs), rs.num_players) - self.assertEqual(rs.payoffs, self.expected_payoffs) - - def test_payoff_matrix(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.payoff_matrix, list) - self.assertEqual(len(rs.payoff_matrix), rs.num_players) - self.assertEqual(rs.payoff_matrix, self.expected_payoff_matrix) - - def test_score_diffs(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.score_diffs, list) - self.assertEqual(len(rs.score_diffs), rs.num_players) - for i, row in enumerate(rs.score_diffs): - for j, col in enumerate(row): - for k, score in enumerate(col): - self.assertAlmostEqual(score, self.expected_score_diffs[i][j][k]) - - def test_payoff_diffs_means(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.payoff_diffs_means, list) - self.assertEqual(len(rs.payoff_diffs_means), rs.num_players) - for i, row in enumerate(rs.payoff_diffs_means): - for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_payoff_diffs_means[i][j]) - - def test_payoff_stddevs(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.payoff_stddevs, list) - self.assertEqual(len(rs.payoff_stddevs), rs.num_players) - self.assertEqual(rs.payoff_stddevs, self.expected_payoff_stddevs) - - def test_cooperation(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.cooperation, list) - self.assertEqual(len(rs.cooperation), rs.num_players) - self.assertEqual(rs.cooperation, self.expected_cooperation) - - def test_initial_cooperation_count(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.initial_cooperation_count, list) - self.assertEqual(len(rs.initial_cooperation_count), rs.num_players) - self.assertEqual( - rs.initial_cooperation_count, self.expected_initial_cooperation_count - ) - - def test_normalised_cooperation(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.normalised_cooperation, list) - self.assertEqual(len(rs.normalised_cooperation), rs.num_players) - for i, row in enumerate(rs.normalised_cooperation): - for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_normalised_cooperation[i][j]) - - def test_initial_cooperation_rate(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.initial_cooperation_rate, list) - self.assertEqual(len(rs.initial_cooperation_rate), rs.num_players) - self.assertEqual( - rs.initial_cooperation_rate, self.expected_initial_cooperation_rate - ) - - def test_state_distribution(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.state_distribution, list) - self.assertEqual(len(rs.state_distribution), rs.num_players) - self.assertEqual(rs.state_distribution, self.expected_state_distribution) - - def test_state_normalised_distribution(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.normalised_state_distribution, list) - self.assertEqual(len(rs.normalised_state_distribution), rs.num_players) - self.assertEqual( - rs.normalised_state_distribution, - self.expected_normalised_state_distribution, - ) - - def test_state_to_action_distribution(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.state_to_action_distribution, list) - self.assertEqual(len(rs.state_to_action_distribution), rs.num_players) - self.assertEqual( - rs.state_to_action_distribution[1], - self.expected_state_to_action_distribution[1], - ) - - def test_normalised_state_to_action_distribution(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.normalised_state_to_action_distribution, list) - self.assertEqual( - len(rs.normalised_state_to_action_distribution), rs.num_players - ) - self.assertEqual( - rs.normalised_state_to_action_distribution, - self.expected_normalised_state_to_action_distribution, - ) - - def test_vengeful_cooperation(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.vengeful_cooperation, list) - self.assertEqual(len(rs.vengeful_cooperation), rs.num_players) - for i, row in enumerate(rs.vengeful_cooperation): - for j, col in enumerate(row): - self.assertAlmostEqual(col, self.expected_vengeful_cooperation[i][j]) - - def test_cooperating_rating(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.cooperating_rating, list) - self.assertEqual(len(rs.cooperating_rating), rs.num_players) - self.assertEqual(rs.cooperating_rating, self.expected_cooperating_rating) - - def test_good_partner_matrix(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.good_partner_matrix, list) - self.assertEqual(len(rs.good_partner_matrix), rs.num_players) - self.assertEqual(rs.good_partner_matrix, self.expected_good_partner_matrix) - - def test_good_partner_rating(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.good_partner_rating, list) - self.assertEqual(len(rs.good_partner_rating), rs.num_players) - self.assertEqual(rs.good_partner_rating, self.expected_good_partner_rating) - - def test_eigenjesus_rating(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.eigenjesus_rating, list) - self.assertEqual(len(rs.eigenjesus_rating), rs.num_players) - for j, rate in enumerate(rs.eigenjesus_rating): - self.assertAlmostEqual(rate, self.expected_eigenjesus_rating[j]) - - def test_eigenmoses_rating(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.eigenmoses_rating, list) - self.assertEqual(len(rs.eigenmoses_rating), rs.num_players) - for j, rate in enumerate(rs.eigenmoses_rating): - self.assertAlmostEqual(rate, self.expected_eigenmoses_rating[j]) - - def test_self_interaction_for_random_strategies(self): - # Based on https://github.com/Axelrod-Python/Axelrod/issues/670 - # Note that the conclusion of #670 is incorrect and only includes one of - # the copies of the strategy. - axl.seed(0) - players = [s() for s in axl.demo_strategies] - tournament = axl.IpdTournament(players, repetitions=2, turns=5) - results = tournament.play(progress_bar=False) - self.assertEqual(results.payoff_diffs_means[-1][-1], 0.0) - - def test_equality(self): - rs_sets = [ - axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - for _ in range(2) - ] - self.assertEqual(rs_sets[0], rs_sets[1]) - - players = [s() for s in axl.demo_strategies] - tournament = axl.IpdTournament(players, repetitions=2, turns=5) - results = tournament.play(progress_bar=False) - self.assertNotEqual(results, rs_sets[0]) - - def test_summarise(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - sd = rs.summarise() - - self.assertEqual(len(sd), len(rs.players)) - self.assertEqual([str(player.Name) for player in sd], rs.ranked_names) - self.assertEqual( - [int(player.Rank) for player in sd], list(range(len(self.players))) - ) - - ranked_median_scores = [ - list(map(nanmedian, rs.normalised_scores))[i] for i in rs.ranking - ] - self.assertEqual( - [float(player.Median_score) for player in sd], ranked_median_scores - ) - - ranked_cooperation_rating = [rs.cooperating_rating[i] for i in rs.ranking] - self.assertEqual( - [float(player.Cooperation_rating) for player in sd], - ranked_cooperation_rating, - ) - - ranked_median_wins = [nanmedian(rs.wins[i]) for i in rs.ranking] - self.assertEqual([float(player.Wins) for player in sd], ranked_median_wins) - - ranked_initial_coop_rates = [ - self.expected_initial_cooperation_rate[i] for i in rs.ranking - ] - self.assertEqual( - [float(player.Initial_C_rate) for player in sd], ranked_initial_coop_rates - ) - - for player in sd: - self.assertEqual( - player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 1 - ) - for rate in [ - player.CC_to_C_rate, - player.CD_to_C_rate, - player.DC_to_C_rate, - player.DD_to_C_rate, - ]: - self.assertLessEqual(rate, 1) - self.assertGreaterEqual(rate, 0) - - # When converting Action to Enum, test coverage gap exposed from example in - # docs/tutorial/getting_started/summarising_tournaments.rst - def test_summarise_regression_test(self): - players = [ - axl.Cooperator(), - axl.Defector(), - axl.TitForTat(), - axl.Grudger(), - ] - tournament = axl.IpdTournament(players, turns=10, repetitions=3) - results = tournament.play() - - summary = [ - ( - 0, - "Defector", - 2.6000000000000001, - 0.0, - 3.0, - 0.0, - 0.0, - 0.0, - 0.4000000000000001, - 0.6, - 0, - 0, - 0, - 0, - ), - ( - 1, - "Tit For Tat", - 2.3000000000000003, - 0.7, - 0.0, - 1.0, - 0.6666666666666666, - 0.03333333333333333, - 0.0, - 0.3, - 1.0, - 0, - 0, - 0, - ), - ( - 2, - "Grudger", - 2.3000000000000003, - 0.7, - 0.0, - 1.0, - 0.6666666666666666, - 0.03333333333333333, - 0.0, - 0.3, - 1.0, - 0, - 0, - 0, - ), - ( - 3, - "Cooperator", - 2.0, - 1.0, - 0.0, - 1.0, - 0.6666666666666666, - 0.3333333333333333, - 0.0, - 0.0, - 1.0, - 1.0, - 0, - 0, - ), - ] - for outer_index, player in enumerate(results.summarise()): - for inner_index, value in enumerate(player): - if isinstance(value, str): - self.assertEqual(value, summary[outer_index][inner_index]) - else: - self.assertAlmostEqual( - value, summary[outer_index][inner_index], places=3 - ) - - def test_write_summary(self): - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - rs.write_summary(filename=self.filename + ".summary") - with open(self.filename + ".summary", "r") as csvfile: - ranked_names = [] - csvreader = csv.reader(csvfile) - for row in csvreader: - ranked_names.append(row[1]) - self.assertEqual(len(row), 14) - self.assertEqual(ranked_names[0], "Name") - self.assertEqual(ranked_names[1:], rs.ranked_names) - - -class TestDecorator(unittest.TestCase): - def test_update_progress_bar(self): - method = lambda x: None - self.assertEqual(axl.ipd.result_set.update_progress_bar(method)(1), None) - - -class TestResultSetSpatialStructure(TestResultSet): - """ - Specific test for some spatial tournament. - """ - - @classmethod - def setUpClass(cls): - - path = pathlib.Path("../test_outputs/test_results_spatial.csv") - cls.filename = str(axl_filename(path)) - cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] - cls.turns = 5 - cls.edges = [(0, 1), (0, 2)] - - cls.expected_match_lengths = [ - [[0, 5, 5], [5, 0, 0], [5, 0, 0]] for _ in range(3) - ] - - cls.expected_scores = [[15, 15, 15], [13, 13, 13], [17, 17, 17]] - - cls.expected_wins = [[0, 0, 0], [0, 0, 0], [1, 1, 1]] - - cls.expected_normalised_scores = [ - [3 / 2 for _ in range(3)], - [(13 / 5) for _ in range(3)], - [(17 / 5) for _ in range(3)], - ] - - cls.expected_ranking = [2, 1, 0] - - cls.expected_ranked_names = ["Defector", "Tit For Tat", "Alternator"] - - cls.expected_null_results_matrix = [ - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0], [0, 0, 0]], - ] - - cls.expected_payoffs = [ - [[], [13 / 5 for _ in range(3)], [2 / 5 for _ in range(3)]], - [[13 / 5 for _ in range(3)], [], []], - [[17 / 5 for _ in range(3)], [], []], - ] - - cls.expected_score_diffs = [ - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-3.0, -3.0, -3.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[3.0, 3.0, 3.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - ] - - cls.expected_payoff_diffs_means = [ - [0.0, 0.0, -3.0], - [0.0, 0.0, 0.0], - [3.0, 0.0, 0.0], - ] - - # Recalculating to deal with numeric imprecision - cls.expected_payoff_matrix = [ - [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], - [mean([13 / 5 for _ in range(3)]), 0, 0], - [mean([17 / 5 for _ in range(3)]), 0, 0], - ] - - cls.expected_payoff_stddevs = [ - [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], - [std([13 / 5 for _ in range(3)]), 0, 0], - [std([17 / 5 for _ in range(3)]), 0, 0], - ] - - cls.expected_cooperation = [[0, 9, 9], [9, 0, 0], [0, 0, 0]] - - cls.expected_normalised_cooperation = [ - [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], - [mean([3 / 5 for _ in range(3)]), 0, 0], - [0, 0, 0], - ] - - cls.expected_initial_cooperation_count = [6, 3, 0] - cls.expected_initial_cooperation_rate = [1, 1, 0] - - cls.expected_vengeful_cooperation = [ - [2 * element - 1 for element in row] - for row in cls.expected_normalised_cooperation - ] - - cls.expected_cooperating_rating = [18 / 30, 9 / 15, 0] - - cls.expected_good_partner_matrix = [[0, 3, 3], [3, 0, 0], [0, 0, 0]] - - cls.expected_good_partner_rating = [1.0, 1.0, 0.0] - - cls.expected_eigenjesus_rating = [0.447213595499958, 0.894427190999916, 0.0] - - cls.expected_eigenmoses_rating = [ - -0.32929277996907086, - 0.7683498199278325, - 0.5488212999484519, - ] - - cls.expected_state_distribution = [ - [ - Counter(), - Counter({(C, C): 3, (C, D): 6, (D, C): 6}), - Counter({(C, D): 9, (D, D): 6}), - ], - [Counter({(C, C): 3, (C, D): 6, (D, C): 6}), Counter(), Counter()], - [Counter({(D, C): 9, (D, D): 6}), Counter(), Counter()], - ] - - cls.expected_normalised_state_distribution = [ - [ - Counter(), - Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), - Counter({(C, D): 0.6, (D, D): 0.4}), - ], - [Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), Counter(), Counter()], - [Counter({(D, C): 0.6, (D, D): 0.4}), Counter(), Counter()], - ] - - cls.expected_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), - Counter({((C, D), D): 6, ((D, D), C): 6}), - ], - [ - Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), - Counter(), - Counter(), - ], - [Counter({((D, C), D): 6, ((D, D), D): 6}), Counter(), Counter()], - ] - - cls.expected_normalised_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 1.0, ((C, D), D): 1.0, ((D, C), C): 1.0}), - Counter({((C, D), D): 1.0, ((D, D), C): 1.0}), - ], - [ - Counter({((C, C), C): 1.0, ((D, C), C): 1.0, ((C, D), D): 1.0}), - Counter(), - Counter(), - ], - [Counter({((D, C), D): 1.0, ((D, D), D): 1.0}), Counter(), Counter()], - ] - - def test_match_lengths(self): - """ - Overwriting match lengths test. This method, among other things, checks - that if two players interacted the length of that interaction equals the - number of turns. - - Implementing this for the round robin tournament meant checking the - interactions between each strategy and the rest strategies of the - tournament. - - In a spatial tournament we need to check that: The length of interaction - of players-nodes that are end vertices of an edge is equal to the - number of turns. Otherwise it is 0. - """ - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - self.assertIsInstance(rs.match_lengths, list) - self.assertEqual(len(rs.match_lengths), rs.repetitions) - self.assertEqual(rs.match_lengths, self.expected_match_lengths) - - for rep in rs.match_lengths: - self.assertIsInstance(rep, list) - self.assertEqual(len(rep), len(self.players)) - - for i, opp in enumerate(rep): - self.assertIsInstance(opp, list) - self.assertEqual(len(opp), len(self.players)) - - for j, length in enumerate(opp): - edge = (i, j) - # Specific test for example match setup - if edge in self.edges or edge[::-1] in self.edges: - self.assertEqual(length, self.turns) - else: - self.assertEqual(length, 0) - - -class TestResultSetSpatialStructureTwo(TestResultSetSpatialStructure): - @classmethod - def setUpClass(cls): - - path = pathlib.Path("../test_outputs/test_results_spatial_two.csv") - cls.filename = str(axl_filename(path)) - cls.players = [ - axl.Alternator(), - axl.TitForTat(), - axl.Defector(), - axl.Cooperator(), - ] - cls.turns = 5 - cls.edges = [(0, 1), (2, 3)] - - cls.expected_match_lengths = [ - [[0, 5, 0, 0], [5, 0, 0, 0], [0, 0, 0, 5], [0, 0, 5, 0]] for _ in range(3) - ] - - cls.expected_scores = [ - [13.0 for _ in range(3)], - [13.0 for _ in range(3)], - [25.0 for _ in range(3)], - [0 for _ in range(3)], - ] - - cls.expected_wins = [[0, 0, 0], [0, 0, 0], [1, 1, 1], [0, 0, 0]] - - cls.expected_normalised_scores = [ - [(13 / 5) for _ in range(3)], - [(13 / 5) for _ in range(3)], - [(25 / 5) for _ in range(3)], - [0 for _ in range(3)], - ] - - cls.expected_ranking = [2, 0, 1, 3] - - cls.expected_ranked_names = [ - "Defector", - "Alternator", - "Tit For Tat", - "Cooperator", - ] - - cls.expected_null_results_matrix = [ - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - ] - - cls.expected_payoffs = [ - [[], [13 / 5 for _ in range(3)], [], []], - [[13 / 5 for _ in range(3)], [], [], []], - [[], [], [], [25 / 5 for _ in range(3)]], - [[], [], [0 for _ in range(3)], []], - ] - - cls.expected_score_diffs = [ - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 5.0, 5.0]], - [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-5.0, -5.0, -5.0], [0.0, 0.0, 0.0]], - ] - - cls.expected_payoff_diffs_means = [ - [0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 5.0], - [0.0, 0.0, -5.0, 0.0], - ] - - # Recalculating to deal with numeric imprecision - cls.expected_payoff_matrix = [ - [0, mean([13 / 5 for _ in range(3)]), 0, 0], - [mean([13 / 5 for _ in range(3)]), 0, 0, 0], - [0, 0, 0, mean([25 / 5 for _ in range(3)])], - [0, 0, 0, 0], - ] - - cls.expected_payoff_stddevs = [ - [0, std([13 / 5 for _ in range(3)]), 0, 0], - [std([13 / 5 for _ in range(3)]), 0, 0, 0], - [0, 0, 0, std([25 / 5 for _ in range(3)])], - [0, 0, 0, 0], - ] - - cls.expected_cooperation = [ - [0, 9, 0, 0], - [9, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 15, 0], - ] - - cls.expected_normalised_cooperation = [ - [0.0, mean([3 / 5 for _ in range(3)]), 0.0, 0.0], - [mean([3 / 5 for _ in range(3)]), 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, mean([5 / 5 for _ in range(3)]), 0.0], - ] - - cls.expected_initial_cooperation_count = [3.0, 3.0, 0, 3.0] - cls.expected_initial_cooperation_rate = [1.0, 1.0, 0, 1.0] - - cls.expected_vengeful_cooperation = [ - [2 * element - 1 for element in row] - for row in cls.expected_normalised_cooperation - ] - - cls.expected_cooperating_rating = [18 / 30, 18 / 30, 0.0, 30 / 30] - - cls.expected_good_partner_matrix = [ - [0, 3, 0, 0], - [3, 0, 0, 0], - [0, 0, 0, 0], - [0, 0, 3, 0], - ] - - cls.expected_good_partner_rating = [1.0, 1.0, 0.0, 1.0] - - cls.expected_eigenjesus_rating = [ - 0.7071067811865476, - 0.7071067811865476, - 0.0, - 0.0, - ] - - cls.expected_eigenmoses_rating = [ - 0.48505781033492573, - 0.48505781033492573, - 0.7090603855860735, - 0.1633132292825755, - ] - - cls.expected_state_distribution = [ - [ - Counter(), - Counter({(C, C): 3, (C, D): 6, (D, C): 6}), - Counter(), - Counter(), - ], - [ - Counter({(C, C): 3, (C, D): 6, (D, C): 6}), - Counter(), - Counter(), - Counter(), - ], - [Counter(), Counter(), Counter(), Counter({(D, C): 15})], - [Counter(), Counter(), Counter({(C, D): 15}), Counter()], - ] - - cls.expected_normalised_state_distribution = [ - [ - Counter(), - Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), - Counter(), - Counter(), - ], - [ - Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), - Counter(), - Counter(), - Counter(), - ], - [Counter(), Counter(), Counter(), Counter({(D, C): 1.0})], - [Counter(), Counter(), Counter({(C, D): 1.0}), Counter()], - ] - - cls.expected_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), - Counter(), - Counter(), - ], - [ - Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), - Counter(), - Counter(), - Counter(), - ], - [Counter(), Counter(), Counter(), Counter({((D, C), D): 12})], - [Counter(), Counter(), Counter({((C, D), C): 12}), Counter()], - ] - - cls.expected_normalised_state_to_action_distribution = [ - [ - Counter(), - Counter({((C, C), D): 1.0, ((C, D), D): 1.0, ((D, C), C): 1.0}), - Counter(), - Counter(), - ], - [ - Counter({((C, C), C): 1.0, ((D, C), C): 1.0, ((C, D), D): 1.0}), - Counter(), - Counter(), - Counter(), - ], - [Counter(), Counter(), Counter(), Counter({((D, C), D): 1.0})], - [Counter(), Counter(), Counter({((C, D), C): 1.0}), Counter()], - ] - - -class TestResultSetSpatialStructureThree(TestResultSetSpatialStructure): - @classmethod - def setUpClass(cls): - - path = pathlib.Path("../test_outputs/test_results_spatial_three.csv") - cls.filename = str(axl_filename(path)) - cls.players = [ - axl.Alternator(), - axl.TitForTat(), - axl.Defector(), - axl.Cooperator(), - ] - cls.turns = 5 - cls.edges = [(0, 0), (1, 1), (2, 2), (3, 3)] - - cls.expected_match_lengths = [ - [[5, 0, 0, 0], [0, 5, 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]] for _ in range(3) - ] - - cls.expected_scores = [[0 for _ in range(3)] for _ in range(4)] - - cls.expected_wins = [[0 for _ in range(3)] for _ in range(4)] - - cls.expected_normalised_scores = [[0 for _ in range(3)] for i in range(4)] - - cls.expected_ranking = [0, 1, 2, 3] - - cls.expected_ranked_names = [ - "Alternator", - "Tit For Tat", - "Defector", - "Cooperator", - ] - - cls.expected_null_results_matrix = [ - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - ] - - cls.expected_payoffs = [ - [[11 / 5 for _ in range(3)], [], [], []], - [[], [15 / 5 for _ in range(3)], [], []], - [[], [], [5 / 5 for _ in range(3)], []], - [[], [], [], [15 / 5 for _ in range(3)]], - ] - - cls.expected_score_diffs = [ - [[0.0 for _ in range(3)] for _ in range(4)] for _ in range(4) - ] - - cls.expected_payoff_diffs_means = [[0.0 for _ in range(4)] for _ in range(4)] - - # Recalculating to deal with numeric imprecision - cls.expected_payoff_matrix = [ - [mean([11 / 5 for _ in range(3)]), 0, 0, 0], - [0, mean([15 / 5 for _ in range(3)]), 0, 0], - [0, 0, mean([5 / 5 for _ in range(3)]), 0], - [0, 0, 0, mean([15 / 5 for _ in range(3)])], - ] - - cls.expected_payoff_stddevs = [ - [std([11 / 5 for _ in range(3)]), 0, 0, 0], - [0, std([15 / 5 for _ in range(3)]), 0, 0], - [0, 0, std([5 / 5 for _ in range(3)]), 0], - [0, 0, 0, std([15 / 5 for _ in range(3)])], - ] - - cls.expected_cooperation = [ - [9.0, 0, 0, 0], - [0, 15.0, 0, 0], - [0, 0, 0, 0], - [0, 0, 0, 15.0], - ] - - cls.expected_normalised_cooperation = [ - [mean([3 / 5 for _ in range(3)]), 0.0, 0.0, 0.0], - [0.0, mean([5 / 5 for _ in range(3)]), 0.0, 0.0], - [0.0, 0.0, 0.0, 0.0], - [0.0, 0.0, 0.0, mean([5 / 5 for _ in range(3)])], - ] - - cls.expected_initial_cooperation_count = [0, 0, 0, 0] - cls.expected_initial_cooperation_rate = [0, 0, 0, 0] - - cls.expected_vengeful_cooperation = [ - [2 * element - 1 for element in row] - for row in cls.expected_normalised_cooperation - ] - - cls.expected_cooperating_rating = [0.0 for _ in range(4)] - - cls.expected_good_partner_matrix = [[0.0 for _ in range(4)] for _ in range(4)] - - cls.expected_good_partner_rating = [0.0 for _ in range(4)] - - cls.expected_eigenjesus_rating = [ - 0.0009235301367282831, - 0.7071064796379986, - 0.0, - 0.7071064796379986, - ] - - cls.expected_eigenmoses_rating = [ - 0.4765940316018446, - 0.3985944056208427, - 0.6746133178770147, - 0.3985944056208427, - ] - - cls.expected_state_distribution = [ - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - ] - - cls.expected_normalised_state_distribution = [ - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - ] - - cls.expected_state_to_action_distribution = [ - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - ] - - cls.expected_normalised_state_to_action_distribution = [ - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - [Counter(), Counter(), Counter(), Counter()], - ] - - def test_equality(self): - """Overwriting for this particular case""" - pass - - def test_summarise(self): - """Overwriting for this particular case""" - rs = axl.ResultSet( - self.filename, self.players, self.repetitions, progress_bar=False - ) - sd = rs.summarise() - - for player in sd: - self.assertEqual(player.CC_rate, 0) - self.assertEqual(player.CD_rate, 0) - self.assertEqual(player.DC_rate, 0) - self.assertEqual(player.DD_rate, 0) - - -class TestSummary(unittest.TestCase): - """Separate test to check that summary always builds without failures""" - - @given( - tournament=tournaments(min_size=2, max_size=5, max_turns=5, max_repetitions=3) - ) - @settings(max_examples=5) - def test_summarise_without_failure(self, tournament): - results = tournament.play(progress_bar=False) - sd = results.summarise() - self.assertIsInstance(sd, list) - - for player in sd: - # round for numerical error - total_rate = round( - player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 3 - ) - self.assertTrue(total_rate in [0, 1]) - self.assertTrue(0 <= player.Initial_C_rate <= 1) - - -class TestCreateCounterDict(unittest.TestCase): - """Separate test for a helper function""" - - def test_basic_use(self): - key_map = {"Col 1": "Var 1", "Col 2": "Var 2"} - df = pd.DataFrame( - {"Col 1": [10, 20, 30], "Col 2": [1, 2, 0]}, index=[[5, 6, 7], [1, 2, 3]] - ) - self.assertEqual( - create_counter_dict(df, 6, 2, key_map), Counter({"Var 1": 20, "Var 2": 2}) - ) - self.assertEqual(create_counter_dict(df, 7, 3, key_map), Counter({"Var 1": 30})) diff --git a/axelrod/ipd/tests/unit/test_strategy_transformers.py b/axelrod/ipd/tests/unit/test_strategy_transformers.py deleted file mode 100644 index 988da4782..000000000 --- a/axelrod/ipd/tests/unit/test_strategy_transformers.py +++ /dev/null @@ -1,714 +0,0 @@ -import unittest - -import axelrod as axl -from axelrod.ipd.strategy_transformers import * -from axelrod.ipd.tests.strategies.test_cooperator import TestCooperator -from axelrod.ipd.tests.strategies.test_titfortat import TestTitForTat - -C, D = axl.Action.C, axl.Action.D - - -@FlipTransformer(name_prefix=None) -class CanPickle(axl.Cooperator): - pass - - -@FlipTransformer() -class CanNotPickle(axl.Cooperator): - pass - - -class TestTransformers(unittest.TestCase): - def test_player_can_be_pickled(self): - player = axl.Cooperator() - self.assertTrue(player_can_be_pickled(player)) - - player = IdentityTransformer()(axl.Cooperator)() - self.assertFalse(player_can_be_pickled(player)) - - player = CanPickle() - self.assertTrue(player_can_be_pickled(player)) - - player = CanNotPickle() - self.assertFalse(player_can_be_pickled(player)) - - def test_is_strategy_static(self): - self.assertTrue(is_strategy_static(axl.Cooperator)) - self.assertFalse(is_strategy_static(axl.Alternator)) - - def test_is_strategy_static_with_inherited_strategy(self): - class NewCooperator(axl.Cooperator): - pass - - class NewAlternator(axl.Alternator): - pass - - self.assertTrue(is_strategy_static(NewCooperator)) - self.assertFalse(is_strategy_static(NewAlternator)) - - def test_DecoratorReBuilder(self): - new_prefix = "YOLO" - decorator = NoisyTransformer(0.2, name_prefix=new_prefix) - - factory_args = (noisy_wrapper, "Noisy", noisy_reclassifier) - args = decorator.args - kwargs = decorator.kwargs.copy() - - new_decorator = DecoratorReBuilder()(factory_args, args, kwargs, new_prefix) - - self.assertEqual(decorator(axl.Cooperator)(), new_decorator(axl.Cooperator)()) - - def test_StrategyReBuilder_declared_class_with_name_prefix(self): - player = CanNotPickle() - self.assertEqual(player.__class__.__name__, "FlippedCanNotPickle") - - decorators = [player.decorator] - import_name = "CanNotPickle" - module_name = player.__module__ - - new_player = StrategyReBuilder()(decorators, import_name, module_name) - - update_dict = player.__dict__.copy() - - new_player.__dict__.update(update_dict) - self.assertEqual(player, new_player) - - def test_StrategyReBuilder_dynamically_wrapped_class_with_name_prefix(self): - player = FlipTransformer()(axl.Cooperator)() - self.assertEqual(player.__class__.__name__, "FlippedCooperator") - - decorators = [player.decorator] - import_name = "Cooperator" - module_name = player.__module__ - - new_player = StrategyReBuilder()(decorators, import_name, module_name) - - update_dict = player.__dict__.copy() - - new_player.__dict__.update(update_dict) - self.assertEqual(player, new_player) - - def test_StrategyReBuilder_dynamically_wrapped_class_no_name_prefix(self): - player = IdentityTransformer()(axl.Cooperator)() - self.assertEqual(player.__class__.__name__, "Cooperator") - - decorators = [player.decorator] - import_name = "Cooperator" - module_name = player.__module__ - - new_player = StrategyReBuilder()(decorators, import_name, module_name) - - update_dict = player.__dict__.copy() - - new_player.__dict__.update(update_dict) - self.assertEqual(player, new_player) - - def test_StrategyReBuilder_many_decorators(self): - decorator_1 = IdentityTransformer() - decorator_2 = FlipTransformer() - decorator_3 = DualTransformer() - player = decorator_3(decorator_2(decorator_1(axl.Cooperator)))() - self.assertEqual(player.__class__.__name__, "DualFlippedCooperator") - - decorators = [decorator_1, decorator_2, decorator_3] - import_name = "Cooperator" - module_name = player.__module__ - - new_player = StrategyReBuilder()(decorators, import_name, module_name) - - update_dict = player.__dict__.copy() - - new_player.__dict__.update(update_dict) - self.assertEqual(player, new_player) - - def test_all_strategies(self): - # Attempt to transform each strategy to ensure that implementation - # choices (like use of super) do not cause issues - for s in axl.strategies: - opponent = axl.Cooperator() - player = IdentityTransformer()(s)() - player.play(opponent) - - def test_naming(self): - """Tests that the player and class names are properly modified.""" - cls = FlipTransformer()(axl.Cooperator) - p1 = cls() - self.assertEqual(cls.__name__, "FlippedCooperator") - self.assertEqual(p1.name, "Flipped Cooperator") - - cls = ForgiverTransformer(0.5)(axl.Alternator) - p1 = cls() - self.assertEqual(cls.__name__, "ForgivingAlternator") - self.assertEqual(p1.name, "Forgiving Alternator") - - cls = ForgiverTransformer(0.5, name_prefix="")(axl.Alternator) - p1 = cls() - self.assertEqual(cls.__name__, "Alternator") - self.assertEqual(p1.name, "Alternator") - - def test_repr(self): - """Tests that the player __repr__ is properly modified to add - Transformer's parameters. - """ - self.assertEqual( - str(ForgiverTransformer(0.5)(axl.Alternator)()), - "Forgiving Alternator: 0.5", - ) - self.assertEqual( - str(InitialTransformer([D, D, C])(axl.Alternator)()), - "Initial Alternator: [D, D, C]", - ) - self.assertEqual(str(FlipTransformer()(axl.Random)(0.1)), "Flipped Random: 0.1") - self.assertEqual( - str(MixedTransformer(0.3, (axl.Alternator, axl.Bully))(axl.Random)(0.1)), - "Mutated Random: 0.1: 0.3, ['Alternator', 'Bully']", - ) - - def test_doc(self): - """Test that the original docstring is present""" - player = axl.Alternator() - transformer = InitialTransformer([D, D, C])(axl.Alternator)() - self.assertEqual(player.__doc__, transformer.__doc__) - - def test_cloning(self): - """Tests that IpdPlayer.clone preserves the application of transformations. - """ - p1 = axl.Cooperator() - p2 = FlipTransformer()(axl.Cooperator)() # Defector - p3 = p2.clone() - match = axl.IpdMatch((p1, p3), turns=2) - results = match.play() - self.assertEqual(results, [(C, D), (C, D)]) - - def test_generic(self): - """Test that the generic wrapper does nothing.""" - # This is the identity transformer - transformer = StrategyTransformerFactory(generic_strategy_wrapper)() - Cooperator2 = transformer(axl.Cooperator) - p1 = Cooperator2() - p2 = axl.Cooperator() - match = axl.IpdMatch((p1, p2), turns=2) - results = match.play() - self.assertEqual(results, [(C, C), (C, C)]) - - def test_flip_transformer(self): - """Tests that FlipTransformer(Cooperator) == Defector.""" - p1 = axl.Cooperator() - p2 = FlipTransformer()(axl.Cooperator)() # Defector - match = axl.IpdMatch((p1, p2), turns=3) - results = match.play() - self.assertEqual(results, [(C, D), (C, D), (C, D)]) - - def test_dual_transformer_with_all_strategies(self): - """Tests that DualTransformer produces the opposite results when faced - with the same opponent history. - """ - for s in axl.short_run_time_strategies: - self.assert_dual_wrapper_correct(s) - - def test_dual_jossann_regression_test(self): - player_class = JossAnnTransformer((0.2, 0.3))(axl.Alternator) - self.assert_dual_wrapper_correct(player_class) - - player_class = JossAnnTransformer((0.5, 0.4))(axl.EvolvedLookerUp2_2_2) - self.assert_dual_wrapper_correct(player_class) - - def test_dual_transformer_simple_play_regression_test(self): - """DualTransformer has failed when there were multiple DualTransformers. - It has also failed when DualTransformer was not the outermost - transformer or when other transformers were between multiple - DualTransformers.""" - multiple_dual_transformers = DualTransformer()( - FlipTransformer()(DualTransformer()(axl.Cooperator)) - )() - - dual_transformer_not_first = IdentityTransformer()( - DualTransformer()(axl.Cooperator) - )() - - for _ in range(3): - multiple_dual_transformers.play(dual_transformer_not_first) - - self.assertEqual(multiple_dual_transformers.history, [D, D, D]) - self.assertEqual(dual_transformer_not_first.history, [D, D, D]) - - def test_dual_transformer_multiple_interspersed_regression_test(self): - """DualTransformer has failed when there were multiple DualTransformers. - It has also failed when DualTransformer was not the outermost - transformer or when other transformers were between multiple - DualTransformers.""" - dual_not_first_transformer = IdentityTransformer()( - DualTransformer()(axl.EvolvedANN) - ) - self.assert_dual_wrapper_correct(dual_not_first_transformer) - - multiple_dual_transformers = DualTransformer()( - DualTransformer()(axl.WinStayLoseShift) - ) - self.assert_dual_wrapper_correct(multiple_dual_transformers) - - def assert_dual_wrapper_correct(self, player_class): - turns = 100 - - p1 = player_class() - p2 = DualTransformer()(player_class)() - p3 = axl.CyclerCCD() # Cycles 'CCD' - - axl.seed(0) - for _ in range(turns): - p1.play(p3) - - p3.reset() - - axl.seed(0) - for _ in range(turns): - p2.play(p3) - - self.assertEqual(p1.history, [x.flip() for x in p2.history]) - - def test_jossann_transformer(self): - """Tests the JossAnn transformer. - """ - probability = (1, 0) - p1 = JossAnnTransformer(probability)(axl.Defector)() - self.assertFalse(axl.Classifiers["stochastic"](p1)) - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, C, C, C]) - - probability = (0, 1) - p1 = JossAnnTransformer(probability)(axl.Cooperator)() - self.assertFalse(axl.Classifiers["stochastic"](p1)) - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, D, D, D, D]) - - probability = (0.3, 0.3) - p1 = JossAnnTransformer(probability)(axl.TitForTat)() - self.assertTrue(axl.Classifiers["stochastic"](p1)) - - p2 = axl.Cycler() - axl.seed(0) - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, C, C, D, D]) - - probability = (0.6, 0.6) - p1 = JossAnnTransformer(probability)(axl.Cooperator)() - self.assertTrue(axl.Classifiers["stochastic"](p1)) - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, C, D, D, C]) - - probability = (0, 1) - p1 = JossAnnTransformer(probability)(axl.Random) - self.assertFalse(axl.Classifiers["stochastic"](p1())) - - probability = (1, 0) - p1 = JossAnnTransformer(probability)(axl.Random) - self.assertFalse(axl.Classifiers["stochastic"](p1())) - - probability = (0.5, 0.5) - p1 = JossAnnTransformer(probability)(axl.TitForTat) - self.assertTrue(axl.Classifiers["stochastic"](p1())) - - probability = (0, 0.5) - p1 = JossAnnTransformer(probability)(axl.TitForTat) - self.assertTrue(axl.Classifiers["stochastic"](p1())) - - probability = (0, 0) - p1 = JossAnnTransformer(probability)(axl.TitForTat) - self.assertFalse(axl.Classifiers["stochastic"](p1())) - - probability = (0, 0) - p1 = JossAnnTransformer(probability)(axl.Random) - self.assertTrue(axl.Classifiers["stochastic"](p1())) - - def test_noisy_transformer(self): - """Tests that the noisy transformed does flip some moves.""" - random.seed(5) - # Cooperator to Defector - p1 = axl.Cooperator() - p2 = NoisyTransformer(0.5)(axl.Cooperator)() - self.assertTrue(axl.Classifiers["stochastic"](p2)) - for _ in range(10): - p1.play(p2) - self.assertEqual(p2.history, [C, C, C, C, C, C, D, D, C, C]) - - p2 = NoisyTransformer(0)(axl.Cooperator) - self.assertFalse(axl.Classifiers["stochastic"](p2())) - - p2 = NoisyTransformer(1)(axl.Cooperator) - self.assertFalse(axl.Classifiers["stochastic"](p2())) - - p2 = NoisyTransformer(0.3)(axl.Cooperator) - self.assertTrue(axl.Classifiers["stochastic"](p2())) - - p2 = NoisyTransformer(0)(axl.Random) - self.assertTrue(axl.Classifiers["stochastic"](p2())) - - p2 = NoisyTransformer(1)(axl.Random) - self.assertTrue(axl.Classifiers["stochastic"](p2())) - - def test_forgiving(self): - """Tests that the forgiving transformer flips some defections.""" - random.seed(10) - p1 = ForgiverTransformer(0.5)(axl.Alternator)() - self.assertTrue(axl.Classifiers["stochastic"](p1)) - p2 = axl.Defector() - for _ in range(10): - p1.play(p2) - self.assertEqual(p1.history, [C, D, C, C, D, C, C, D, C, D]) - - p1 = ForgiverTransformer(0)(axl.Alternator)() - self.assertFalse(axl.Classifiers["stochastic"](p1)) - - p1 = ForgiverTransformer(1)(axl.Alternator)() - self.assertFalse(axl.Classifiers["stochastic"](p1)) - - def test_initial_transformer(self): - """Tests the InitialTransformer.""" - p1 = axl.Cooperator() - self.assertEqual(axl.Classifiers["memory_depth"](p1), 0) - p2 = InitialTransformer([D, D])(axl.Cooperator)() - self.assertEqual(axl.Classifiers["memory_depth"](p2), 2) - for _ in range(5): - p1.play(p2) - self.assertEqual(p2.history, [D, D, C, C, C]) - - p1 = axl.Cooperator() - p2 = InitialTransformer([D, D, C, D])(axl.Cooperator)() - for _ in range(5): - p1.play(p2) - self.assertEqual(p2.history, [D, D, C, D, C]) - - p3 = InitialTransformer([D, D])(axl.Adaptive)() - self.assertEqual(axl.Classifiers["memory_depth"](p3), float("inf")) - - def test_final_transformer(self): - """Tests the FinalTransformer when tournament length is known.""" - # Final play transformer - p1 = axl.Cooperator() - p2 = FinalTransformer([D, D, D])(axl.Cooperator)() - self.assertEqual(axl.Classifiers["makes_use_of"](p2), set(["length"])) - self.assertEqual(axl.Classifiers["memory_depth"](p2), 3) - self.assertEqual(axl.Classifiers["makes_use_of"](axl.Cooperator()), set([])) - - p2.match_attributes["length"] = 6 - for _ in range(8): - p1.play(p2) - self.assertEqual(p2.history, [C, C, C, D, D, D, C, C]) - - p3 = FinalTransformer([D, D])(axl.Adaptive)() - self.assertEqual(axl.Classifiers["memory_depth"](p3), float("inf")) - - def test_final_transformer2(self): - """Tests the FinalTransformer when tournament length is not known.""" - p1 = axl.Cooperator() - p2 = FinalTransformer([D, D])(axl.Cooperator)() - for _ in range(6): - p1.play(p2) - self.assertEqual(p2.history, [C, C, C, C, C, C]) - - def test_history_track(self): - """Tests the history tracking transformer.""" - p1 = axl.Cooperator() - p2 = TrackHistoryTransformer()(axl.Random)() - for _ in range(6): - p1.play(p2) - self.assertEqual(p2.history, p2._recorded_history) - - def test_composition(self): - """Tests that transformations can be chained or composed.""" - cls1 = InitialTransformer([D, D])(axl.Cooperator) - cls2 = FinalTransformer([D, D])(cls1) - p1 = cls2() - p2 = axl.Cooperator() - p1.match_attributes["length"] = 8 - for _ in range(8): - p1.play(p2) - self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) - - cls1 = FinalTransformer([D, D])(InitialTransformer([D, D])(axl.Cooperator)) - p1 = cls1() - p2 = axl.Cooperator() - p1.match_attributes["length"] = 8 - for _ in range(8): - p1.play(p2) - self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) - - def test_compose_transformers(self): - cls1 = compose_transformers( - FinalTransformer([D, D]), InitialTransformer([D, D]) - ) - p1 = cls1(axl.Cooperator)() - p2 = axl.Cooperator() - p1.match_attributes["length"] = 8 - for _ in range(8): - p1.play(p2) - self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) - - def test_retailiation(self): - """Tests the RetaliateTransformer.""" - p1 = RetaliationTransformer(1)(axl.Cooperator)() - p2 = axl.Defector() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, D, D, D, D]) - self.assertEqual(p2.history, [D, D, D, D, D]) - - p1 = RetaliationTransformer(1)(axl.Cooperator)() - p2 = axl.Alternator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, D, C, D]) - self.assertEqual(p2.history, [C, D, C, D, C]) - - TwoTitsForTat = RetaliationTransformer(2)(axl.Cooperator) - p1 = TwoTitsForTat() - p2 = axl.CyclerCCD() - for _ in range(9): - p1.play(p2) - self.assertEqual(p1.history, [C, C, C, D, D, C, D, D, C]) - self.assertEqual(p2.history, [C, C, D, C, C, D, C, C, D]) - - def test_retaliation_until_apology(self): - """Tests the RetaliateUntilApologyTransformer.""" - TFT = RetaliateUntilApologyTransformer()(axl.Cooperator) - p1 = TFT() - p2 = axl.Cooperator() - p1.play(p2) - p1.play(p2) - self.assertEqual(p1.history, [C, C]) - - p1 = TFT() - p2 = axl.Defector() - p1.play(p2) - p1.play(p2) - self.assertEqual(p1.history, [C, D]) - - random.seed(12) - p1 = TFT() - p2 = axl.Random() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, D, D, C]) - - def test_apology(self): - """Tests the ApologyTransformer.""" - ApologizingDefector = ApologyTransformer([D], [C])(axl.Defector) - p1 = ApologizingDefector() - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, C, D, C, D]) - ApologizingDefector = ApologyTransformer([D, D], [C, C])(axl.Defector) - p1 = ApologizingDefector() - p2 = axl.Cooperator() - for _ in range(6): - p1.play(p2) - self.assertEqual(p1.history, [D, D, C, D, D, C]) - - def test_mixed(self): - """Tests the MixedTransformer.""" - probability = 1 - MD = MixedTransformer(probability, axl.Cooperator)(axl.Defector) - self.assertFalse(axl.Classifiers["stochastic"](MD())) - - p1 = MD() - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, C, C, C]) - - probability = 0 - MD = MixedTransformer(probability, axl.Cooperator)(axl.Defector) - self.assertFalse(axl.Classifiers["stochastic"](MD())) - - p1 = MD() - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, D, D, D, D]) - - # Decorating with list and distribution - # Decorate a cooperator putting all weight on other strategies that are - # 'nice' - probability = [0.3, 0.2, 0] - strategies = [axl.TitForTat, axl.Grudger, axl.Defector] - MD = MixedTransformer(probability, strategies)(axl.Cooperator) - self.assertTrue(axl.Classifiers["stochastic"](MD())) - - p1 = MD() - # Against a cooperator we see that we only cooperate - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, C, C, C]) - - # Decorate a cooperator putting all weight on Defector - probability = (0, 0, 1) # Note can also pass tuple - strategies = [axl.TitForTat, axl.Grudger, axl.Defector] - MD = MixedTransformer(probability, strategies)(axl.Cooperator) - self.assertFalse(axl.Classifiers["stochastic"](MD())) - - p1 = MD() - # Against a cooperator we see that we only defect - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [D, D, D, D, D]) - - def test_deadlock(self): - """Test the DeadlockBreakingTransformer.""" - # We can induce a deadlock by alterting TFT to defect first - p1 = axl.TitForTat() - p2 = InitialTransformer([D])(axl.TitForTat)() - for _ in range(4): - p1.play(p2) - self.assertEqual(p1.history, [C, D, C, D]) - self.assertEqual(p2.history, [D, C, D, C]) - - # Now let's use the transformer to break the deadlock to achieve - # Mutual cooperation - p1 = axl.TitForTat() - p2 = DeadlockBreakingTransformer()(InitialTransformer([D])(axl.TitForTat))() - for _ in range(4): - p1.play(p2) - self.assertEqual(p1.history, [C, D, C, C]) - self.assertEqual(p2.history, [D, C, C, C]) - - def test_grudging(self): - """Test the GrudgeTransformer.""" - p1 = axl.Defector() - p2 = GrudgeTransformer(1)(axl.Cooperator)() - for _ in range(4): - p1.play(p2) - self.assertEqual(p1.history, [D, D, D, D]) - self.assertEqual(p2.history, [C, C, D, D]) - - p1 = InitialTransformer([C])(axl.Defector)() - p2 = GrudgeTransformer(2)(axl.Cooperator)() - for _ in range(8): - p1.play(p2) - self.assertEqual(p1.history, [C, D, D, D, D, D, D, D]) - self.assertEqual(p2.history, [C, C, C, C, D, D, D, D]) - - def test_nice(self): - """Tests the NiceTransformer.""" - p1 = NiceTransformer()(axl.Defector)() - p2 = axl.Defector() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, D, D, D, D]) - self.assertEqual(p2.history, [D, D, D, D, D]) - - p1 = NiceTransformer()(axl.Defector)() - p2 = axl.Alternator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, D, D, D]) - self.assertEqual(p2.history, [C, D, C, D, C]) - - p1 = NiceTransformer()(axl.Defector)() - p2 = axl.Cooperator() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, C, C, C, C]) - self.assertEqual(p2.history, [C, C, C, C, C]) - - def test_nilpotency(self): - """Show that some of the transformers are (sometimes) nilpotent, i.e. - that transformer(transformer(PlayerClass)) == PlayerClass""" - for transformer in [ - IdentityTransformer(), - FlipTransformer(), - TrackHistoryTransformer(), - ]: - for PlayerClass in [axl.Cooperator, axl.Defector]: - for third_player in [axl.Cooperator(), axl.Defector()]: - player = PlayerClass() - transformed = transformer(transformer(PlayerClass))() - clone = third_player.clone() - for i in range(5): - player.play(third_player) - transformed.play(clone) - self.assertEqual(player.history, transformed.history) - - def test_idempotency(self): - """Show that these transformers are idempotent, i.e. that - transformer(transformer(PlayerClass)) == transformer(PlayerClass). - That means that the transformer is a projection on the set of - strategies.""" - for transformer in [ - IdentityTransformer(), - GrudgeTransformer(1), - FinalTransformer([C]), - FinalTransformer([D]), - InitialTransformer([C]), - InitialTransformer([D]), - DeadlockBreakingTransformer(), - RetaliationTransformer(1), - RetaliateUntilApologyTransformer(), - TrackHistoryTransformer(), - ApologyTransformer([D], [C]), - ]: - for PlayerClass in [axl.Cooperator, axl.Defector]: - for third_player in [axl.Cooperator(), axl.Defector()]: - clone = third_player.clone() - player = transformer(PlayerClass)() - transformed = transformer(transformer(PlayerClass))() - for i in range(5): - player.play(third_player) - transformed.play(clone) - self.assertEqual(player.history, transformed.history) - - def test_implementation(self): - """A test that demonstrates the difference in outcomes if - FlipTransformer is applied to Alternator and CyclerCD. In other words, - the implementation matters, not just the outcomes.""" - # Difference between Alternator and CyclerCD - p1 = axl.Cycler(cycle="CD") - p2 = FlipTransformer()(axl.Cycler)(cycle="CD") - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, D, C, D, C]) - self.assertEqual(p2.history, [D, C, D, C, D]) - - p1 = axl.Alternator() - p2 = FlipTransformer()(axl.Alternator)() - for _ in range(5): - p1.play(p2) - self.assertEqual(p1.history, [C, D, C, D, C]) - self.assertEqual(p2.history, [D, D, D, D, D]) - - -TFT = RetaliateUntilApologyTransformer()(axl.Cooperator) - - -class TestRUAisTFT(TestTitForTat): - # This runs the 7 TFT tests when unittest is invoked - player = TFT - name = "RUA Cooperator" - expected_classifier = { - "memory_depth": 0, # really 1 - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } - - -# Test that FlipTransformer(Defector) == Cooperator -Cooperator2 = FlipTransformer()(axl.Defector) - - -class TestFlipDefector(TestCooperator): - # This runs the 7 TFT tests when unittest is invoked - name = "Flipped Defector" - player = Cooperator2 diff --git a/axelrod/ipd/tests/unit/test_strategy_utils.py b/axelrod/ipd/tests/unit/test_strategy_utils.py deleted file mode 100644 index 4fdf98727..000000000 --- a/axelrod/ipd/tests/unit/test_strategy_utils.py +++ /dev/null @@ -1,144 +0,0 @@ -"""Tests for the strategy utils.""" - -import unittest - -import axelrod as axl -from axelrod.ipd._strategy_utils import ( - detect_cycle, - inspect_strategy, - look_ahead, - recursive_thue_morse, - simulate_match, - thue_morse_generator, -) - -from hypothesis import given, settings -from hypothesis.strategies import integers, lists, sampled_from - -C, D = axl.Action.C, axl.Action.D - - -class TestDetectCycle(unittest.TestCase): - @given( - cycle=lists(sampled_from([C, D]), min_size=2, max_size=10), - period=integers(min_value=3, max_value=10), - ) - @settings(max_examples=5) - def test_finds_cycle(self, cycle, period): - history = cycle * period - detected = detect_cycle(history) - self.assertIsNotNone(detected) - self.assertIn("".join(map(str, detected)), "".join(map(str, (cycle)))) - - def test_no_cycle(self): - history = [C, D, C, C] - self.assertIsNone(detect_cycle(history)) - - history = [D, D, C, C, C] - self.assertIsNone(detect_cycle(history)) - - def test_regression_test_can_detect_cycle_that_is_repeated_exactly_once(self): - self.assertEqual(detect_cycle([C, D, C, D]), (C, D)) - self.assertEqual(detect_cycle([C, D, C, D, C]), (C, D)) - - def test_cycle_will_be_at_least_min_size(self): - self.assertEqual(detect_cycle([C, C, C, C], min_size=1), (C,)) - self.assertEqual(detect_cycle([C, C, C, C], min_size=2), (C, C)) - - def test_cycle_that_never_fully_repeats_returns_none(self): - cycle = [C, D, D] - to_test = cycle + cycle[:-1] - self.assertIsNone(detect_cycle(to_test)) - - def test_min_size_greater_than_two_times_history_tail_returns_none(self): - self.assertIsNone(detect_cycle([C, C, C], min_size=2)) - - def test_min_size_greater_than_two_times_max_size_has_no_effect(self): - self.assertEqual( - detect_cycle([C, C, C, C, C, C, C, C], min_size=2, max_size=3), (C, C) - ) - - def test_cycle_greater_than_max_size_returns_none(self): - self.assertEqual(detect_cycle([C, C, D] * 2, min_size=1, max_size=3), (C, C, D)) - self.assertIsNone(detect_cycle([C, C, D] * 2, min_size=1, max_size=2)) - - -class TestInspectStrategy(unittest.TestCase): - def test_strategies_without_countermeasures_return_their_strategy(self): - tft = axl.TitForTat() - inspector = axl.Alternator() - - tft.play(inspector) - self.assertEqual(tft.history, [C]) - self.assertEqual(inspect_strategy(inspector=inspector, opponent=tft), C) - tft.play(inspector) - self.assertEqual(tft.history, [C, C]) - self.assertEqual(inspect_strategy(inspector=inspector, opponent=tft), D) - self.assertEqual(tft.strategy(inspector), D) - - def test_strategies_with_countermeasures_return_their_countermeasures(self): - d_geller = axl.GellerDefector() - inspector = axl.Cooperator() - d_geller.play(inspector) - - self.assertEqual(inspect_strategy(inspector=inspector, opponent=d_geller), D) - self.assertEqual(d_geller.strategy(inspector), C) - - -class TestSimulateMatch(unittest.TestCase): - def test_tft_reacts_to_cooperation(self): - tft = axl.TitForTat() - inspector = axl.Alternator() - - simulate_match(inspector, tft, C, 5) - self.assertEqual(inspector.history, [C, C, C, C, C]) - self.assertEqual(tft.history, [C, C, C, C, C]) - - def test_tft_reacts_to_defection(self): - tft = axl.TitForTat() - inspector = axl.Alternator() - - simulate_match(inspector, tft, D, 5) - self.assertEqual(inspector.history, [D, D, D, D, D]) - self.assertEqual(tft.history, [C, D, D, D, D]) - - -class TestLookAhead(unittest.TestCase): - def setUp(self): - self.inspector = axl.IpdPlayer() - self.game = axl.IpdGame() - - def test_cooperator(self): - tft = axl.Cooperator() - # It always makes sense to defect here. - self.assertEqual(look_ahead(self.inspector, tft, self.game, 1), D) - self.assertEqual(look_ahead(self.inspector, tft, self.game, 2), D) - self.assertEqual(look_ahead(self.inspector, tft, self.game, 5), D) - - def test_tit_for_tat(self): - tft = axl.TitForTat() - # Cooperation should be chosen if we look ahead further than one move. - self.assertEqual(look_ahead(self.inspector, tft, self.game, 1), D) - self.assertEqual(look_ahead(self.inspector, tft, self.game, 2), C) - self.assertEqual(look_ahead(self.inspector, tft, self.game, 5), C) - - -class TestRecursiveThueMorse(unittest.TestCase): - def test_initial_values(self): - self.assertEqual(recursive_thue_morse(0), 0) - self.assertEqual(recursive_thue_morse(1), 1) - self.assertEqual(recursive_thue_morse(2), 1) - self.assertEqual(recursive_thue_morse(3), 0) - self.assertEqual(recursive_thue_morse(4), 1) - - -class TestThueMorseGenerator(unittest.TestCase): - def test_initial_values(self): - generator = thue_morse_generator() - values = [next(generator) for i in range(5)] - self.assertEqual(values, [0, 1, 1, 0, 1]) - - def test_with_offset(self): - generator = thue_morse_generator(start=2) - values = [next(generator) for i in range(5)] - self.assertEqual(values, [1, 0, 1, 0, 0]) diff --git a/axelrod/ipd/tests/unit/test_tournament.py b/axelrod/ipd/tests/unit/test_tournament.py deleted file mode 100644 index 5770c8d7f..000000000 --- a/axelrod/ipd/tests/unit/test_tournament.py +++ /dev/null @@ -1,1070 +0,0 @@ -"""Tests for the main tournament class.""" -import unittest -from unittest.mock import MagicMock, patch - -import io -import logging -import os -import pathlib -import pickle -import warnings -from multiprocessing import Queue, cpu_count - -from axelrod.ipd.load_data_ import axl_filename -import numpy as np -import pandas as pd -from tqdm import tqdm - -import axelrod as axl -from axelrod.ipd.tests.property import ( - prob_end_tournaments, - spatial_tournaments, - strategy_lists, - tournaments, -) -from axelrod.ipd.tournament import _close_objects - -from hypothesis import example, given, settings -from hypothesis.strategies import floats, integers - -C, D = axl.Action.C, axl.Action.D - -test_strategies = [ - axl.Cooperator, - axl.TitForTat, - axl.Defector, - axl.Grudger, - axl.GoByMajority, -] -test_repetitions = 5 -test_turns = 100 - -test_prob_end = 0.5 - -test_edges = [(0, 1), (1, 2), (3, 4)] - -deterministic_strategies = [ - s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) -] - - -class RecordedTQDM(tqdm): - """This is a tqdm.tqdm that keeps a record of every RecordedTQDM created. - It is used to test that progress bars were correctly created and then - closed.""" - - record = [] - - def __init__(self, *args, **kwargs): - super(RecordedTQDM, self).__init__(*args, **kwargs) - RecordedTQDM.record.append(self) - - @classmethod - def reset_record(cls): - cls.record = [] - - -class TestTournament(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - cls.players = [s() for s in test_strategies] - cls.test_name = "test" - cls.test_repetitions = test_repetitions - cls.test_turns = test_turns - - cls.expected_payoff = [ - [600, 600, 0, 600, 600], - [600, 600, 199, 600, 600], - [1000, 204, 200, 204, 204], - [600, 600, 199, 600, 600], - [600, 600, 199, 600, 600], - ] - - cls.expected_cooperation = [ - [200, 200, 200, 200, 200], - [200, 200, 1, 200, 200], - [0, 0, 0, 0, 0], - [200, 200, 1, 200, 200], - [200, 200, 1, 200, 200], - ] - - path = pathlib.Path("../test_outputs/test_tournament.csv") - cls.filename = axl_filename(path) - - def setUp(self): - self.test_tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=2, - repetitions=1, - ) - - def test_init(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=self.test_turns, - noise=0.2, - ) - self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) - self.assertEqual(tournament.game.score((C, C)), (3, 3)) - self.assertEqual(tournament.turns, self.test_turns) - self.assertEqual(tournament.repetitions, 10) - self.assertEqual(tournament.name, "test") - self.assertIsInstance(tournament._logger, logging.Logger) - self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.IpdTournament(players=self.players) - self.assertEqual(anonymous_tournament.name, "axelrod") - - def test_init_with_match_attributes(self): - tournament = axl.IpdTournament( - players=self.players, match_attributes={"length": float("inf")} - ) - mg = tournament.match_generator - match_params = mg.build_single_match_params() - self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) - - def test_warning(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=10, - repetitions=1, - ) - with warnings.catch_warnings(record=True) as w: - # Check that a warning is raised if no results set is built and no - # filename is given - results = tournament.play(build_results=False, progress_bar=False) - self.assertEqual(len(w), 1) - - with warnings.catch_warnings(record=True) as w: - # Check that no warning is raised if no results set is built and a - # is filename given - - tournament.play( - build_results=False, filename=self.filename, progress_bar=False - ) - self.assertEqual(len(w), 0) - - def test_setup_output_with_filename(self): - - self.test_tournament.setup_output(self.filename) - - self.assertEqual(self.test_tournament.filename, self.filename) - self.assertIsNone(self.test_tournament._temp_file_descriptor) - self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) - - def test_setup_output_no_filename(self): - self.test_tournament.setup_output() - - self.assertIsInstance(self.test_tournament.filename, str) - self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) - self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) - - os.close(self.test_tournament._temp_file_descriptor) - os.remove(self.test_tournament.filename) - - def test_play_resets_num_interactions(self): - self.assertEqual(self.test_tournament.num_interactions, 0) - self.test_tournament.play(progress_bar=False) - self.assertEqual(self.test_tournament.num_interactions, 15) - - self.test_tournament.play(progress_bar=False) - self.assertEqual(self.test_tournament.num_interactions, 15) - - def test_play_changes_use_progress_bar(self): - self.assertTrue(self.test_tournament.use_progress_bar) - - self.test_tournament.play(progress_bar=False) - self.assertFalse(self.test_tournament.use_progress_bar) - - self.test_tournament.play(progress_bar=True) - self.assertTrue(self.test_tournament.use_progress_bar) - - def test_play_changes_temp_file_descriptor(self): - self.assertIsNone(self.test_tournament._temp_file_descriptor) - - # No file descriptor for a named file. - self.test_tournament.play(filename=self.filename, progress_bar=False) - self.assertIsNone(self.test_tournament._temp_file_descriptor) - - # Temp file creates file descriptor. - self.test_tournament.play(filename=None, progress_bar=False) - self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) - - def test_play_tempfile_removed(self): - self.test_tournament.play(filename=None, progress_bar=False) - - self.assertFalse(os.path.isfile(self.test_tournament.filename)) - - def test_play_resets_filename_and_temp_file_descriptor_each_time(self): - self.test_tournament.play(progress_bar=False) - self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) - self.assertIsInstance(self.test_tournament.filename, str) - old_filename = self.test_tournament.filename - - self.test_tournament.play(filename=self.filename, progress_bar=False) - self.assertIsNone(self.test_tournament._temp_file_descriptor) - self.assertEqual(self.test_tournament.filename, self.filename) - self.assertNotEqual(old_filename, self.test_tournament.filename) - - self.test_tournament.play(progress_bar=False) - self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) - self.assertIsInstance(self.test_tournament.filename, str) - self.assertNotEqual(old_filename, self.test_tournament.filename) - self.assertNotEqual(self.test_tournament.filename, self.filename) - - def test_get_file_objects_no_filename(self): - file, writer = self.test_tournament._get_file_objects() - self.assertIsNone(file) - self.assertIsNone(writer) - - def test_get_file_object_with_filename(self): - self.test_tournament.filename = self.filename - file_object, writer = self.test_tournament._get_file_objects() - self.assertIsInstance(file_object, io.TextIOWrapper) - self.assertEqual(writer.__class__.__name__, "writer") - file_object.close() - - def test_get_progress_bar(self): - self.test_tournament.use_progress_bar = False - pbar = self.test_tournament._get_progress_bar() - self.assertIsNone(pbar) - - self.test_tournament.use_progress_bar = True - pbar = self.test_tournament._get_progress_bar() - self.assertIsInstance(pbar, tqdm) - self.assertEqual(pbar.desc, "Playing matches") - self.assertEqual(pbar.n, 0) - self.assertEqual(pbar.total, self.test_tournament.match_generator.size) - - new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] - new_tournament = axl.IpdTournament(players=self.players, edges=new_edges) - new_tournament.use_progress_bar = True - pbar = new_tournament._get_progress_bar() - self.assertEqual(pbar.desc, "Playing matches") - self.assertEqual(pbar.n, 0) - self.assertEqual(pbar.total, len(new_edges)) - - def test_serial_play(self): - # Test that we get an instance of ResultSet - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - - # Test that _run_serial_repetitions is called with empty matches list - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - results = tournament.play(progress_bar=False) - self.assertEqual(tournament.num_interactions, 75) - - def test_serial_play_with_different_game(self): - # Test that a non default game is passed to the result set - game = axl.IpdGame(p=-1, r=-1, s=-1, t=-1) - tournament = axl.IpdTournament( - name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 - ) - results = tournament.play(progress_bar=False) - self.assertLessEqual(np.max(results.scores), 0) - - @patch("tqdm.tqdm", RecordedTQDM) - def test_no_progress_bar_play(self): - """Test that progress bar is not created for progress_bar=False""" - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - # Test with build results - RecordedTQDM.reset_record() - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - # Check that no progress bar was created. - self.assertEqual(RecordedTQDM.record, []) - - # Test without build results - RecordedTQDM.reset_record() - results = tournament.play( - progress_bar=False, build_results=False, filename=self.filename - ) - self.assertIsNone(results) - self.assertEqual(RecordedTQDM.record, []) - - def assert_play_pbar_correct_total_and_finished(self, pbar, total): - self.assertEqual(pbar.desc, "Playing matches") - self.assertEqual(pbar.total, total) - self.assertEqual(pbar.n, total) - self.assertTrue(pbar.disable, True) - - @patch("tqdm.tqdm", RecordedTQDM) - def test_progress_bar_play(self): - """Test that progress bar is created by default and with True argument""" - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - RecordedTQDM.reset_record() - results = tournament.play() - self.assertIsInstance(results, axl.ResultSet) - # Check that progress bar was created, updated and closed. - self.assertEqual(len(RecordedTQDM.record), 2) - play_pbar = RecordedTQDM.record[0] - self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) - # Check all progress bars are closed. - self.assertTrue(all(pbar.disable for pbar in RecordedTQDM.record)) - - RecordedTQDM.reset_record() - results = tournament.play(progress_bar=True) - self.assertIsInstance(results, axl.ResultSet) - self.assertEqual(len(RecordedTQDM.record), 2) - play_pbar = RecordedTQDM.record[0] - self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) - - # Test without build results - RecordedTQDM.reset_record() - results = tournament.play( - progress_bar=True, build_results=False, filename=self.filename - ) - self.assertIsNone(results) - self.assertEqual(len(RecordedTQDM.record), 1) - play_pbar = RecordedTQDM.record[0] - self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) - - @patch("tqdm.tqdm", RecordedTQDM) - def test_progress_bar_play_parallel(self): - """Test that tournament plays when asking for progress bar for parallel - tournament and that progress bar is created.""" - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - # progress_bar = False - RecordedTQDM.reset_record() - results = tournament.play(progress_bar=False, processes=2) - self.assertEqual(RecordedTQDM.record, []) - self.assertIsInstance(results, axl.ResultSet) - - # progress_bar = True - RecordedTQDM.reset_record() - results = tournament.play(progress_bar=True, processes=2) - self.assertIsInstance(results, axl.ResultSet) - - self.assertEqual(len(RecordedTQDM.record), 2) - play_pbar = RecordedTQDM.record[0] - self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) - - # progress_bar is default - RecordedTQDM.reset_record() - results = tournament.play(processes=2) - self.assertIsInstance(results, axl.ResultSet) - - self.assertEqual(len(RecordedTQDM.record), 2) - play_pbar = RecordedTQDM.record[0] - self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) - - @given( - tournament=tournaments( - min_size=2, - max_size=5, - min_turns=2, - max_turns=5, - min_repetitions=2, - max_repetitions=4, - ) - ) - @settings(max_examples=50) - @example( - tournament=axl.IpdTournament( - players=[s() for s in test_strategies], - turns=test_turns, - repetitions=test_repetitions, - ) - ) - # These two examples are to make sure #465 is fixed. - # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, - # these two examples were identified by hypothesis. - @example( - tournament=axl.IpdTournament( - players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, - ) - ) - @example( - tournament=axl.IpdTournament( - players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 - ) - ) - def test_property_serial_play(self, tournament): - """Test serial play using hypothesis""" - # Test that we get an instance of ResultSet - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - self.assertEqual(results.num_players, len(tournament.players)) - self.assertEqual(results.players, [str(p) for p in tournament.players]) - - def test_parallel_play(self): - # Test that we get an instance of ResultSet - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - results = tournament.play(processes=2, progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - self.assertEqual(tournament.num_interactions, 75) - - # The following relates to #516 - players = [ - axl.Cooperator(), - axl.Defector(), - axl.BackStabber(), - axl.PSOGambler2_2_2(), - axl.ThueMorse(), - axl.DoubleCrosser(), - ] - tournament = axl.IpdTournament( - name=self.test_name, - players=players, - game=self.game, - turns=20, - repetitions=self.test_repetitions, - ) - scores = tournament.play(processes=2, progress_bar=False).scores - self.assertEqual(len(scores), len(players)) - - def test_parallel_play_with_writing_to_file(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - results = tournament.play( - processes=2, progress_bar=False, filename=self.filename - ) - self.assertIsInstance(results, axl.ResultSet) - self.assertEqual(tournament.num_interactions, 75) - - def test_run_serial(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - tournament._write_interactions_to_file = MagicMock( - name="_write_interactions_to_file" - ) - self.assertTrue(tournament._run_serial()) - - # Get the calls made to write_interactions - calls = tournament._write_interactions_to_file.call_args_list - self.assertEqual(len(calls), 15) - - def test_run_parallel(self): - class PickleableMock(MagicMock): - def __reduce__(self): - return MagicMock, () - - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - tournament._write_interactions_to_file = PickleableMock( - name="_write_interactions_to_file" - ) - - # For test coverage purposes. This confirms PickleableMock can be - # pickled exactly once. Windows multi-processing must pickle this Mock - # exactly once during testing. - pickled = pickle.loads(pickle.dumps(tournament)) - self.assertIsInstance(pickled._write_interactions_to_file, MagicMock) - self.assertRaises(pickle.PicklingError, pickle.dumps, pickled) - - self.assertTrue(tournament._run_parallel()) - - # Get the calls made to write_interactions - calls = tournament._write_interactions_to_file.call_args_list - self.assertEqual(len(calls), 15) - - def test_n_workers(self): - max_processes = cpu_count() - - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - self.assertEqual(tournament._n_workers(processes=1), max_processes) - - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - self.assertEqual( - tournament._n_workers(processes=max_processes + 2), max_processes - ) - - @unittest.skipIf(cpu_count() < 2, "not supported on single processor machines") - def test_2_workers(self): - # This is a separate test with a skip condition because we - # cannot guarantee that the tests will always run on a machine - # with more than one processor - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - self.assertEqual(tournament._n_workers(processes=2), 2) - - def test_start_workers(self): - workers = 2 - work_queue = Queue() - done_queue = Queue() - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - chunks = tournament.match_generator.build_match_chunks() - for chunk in chunks: - work_queue.put(chunk) - tournament._start_workers(workers, work_queue, done_queue) - - stops = 0 - while stops < workers: - payoffs = done_queue.get() - if payoffs == "STOP": - stops += 1 - self.assertEqual(stops, workers) - - def test_worker(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - work_queue = Queue() - chunks = tournament.match_generator.build_match_chunks() - count = 0 - for chunk in chunks: - work_queue.put(chunk) - count += 1 - work_queue.put("STOP") - - done_queue = Queue() - tournament._worker(work_queue, done_queue) - for r in range(count): - new_matches = done_queue.get() - for index_pair, matches in new_matches.items(): - self.assertIsInstance(index_pair, tuple) - self.assertEqual(len(matches), self.test_repetitions) - queue_stop = done_queue.get() - self.assertEqual(queue_stop, "STOP") - - def test_build_result_set(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - - def test_no_build_result_set(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=axl.DEFAULT_TURNS, - repetitions=self.test_repetitions, - ) - - tournament._calculate_results = MagicMock(name="_calculate_results") - # Mocking this as it is called by play - self.assertIsNone( - tournament.play( - filename=self.filename, progress_bar=False, build_results=False - ) - ) - - # Get the calls made to write_interactions - calls = tournament._calculate_results.call_args_list - self.assertEqual(len(calls), 0) - - @given(turns=integers(min_value=1, max_value=200)) - @settings(max_examples=5) - @example(turns=3) - @example(turns=axl.DEFAULT_TURNS) - def test_play_matches(self, turns): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - repetitions=self.test_repetitions, - ) - - def make_chunk_generator(): - for player1_index in range(len(self.players)): - for player2_index in range(player1_index, len(self.players)): - index_pair = (player1_index, player2_index) - match_params = {"turns": turns, "game": self.game} - yield (index_pair, match_params, self.test_repetitions) - - chunk_generator = make_chunk_generator() - interactions = {} - for chunk in chunk_generator: - result = tournament._play_matches(chunk) - for index_pair, inters in result.items(): - try: - interactions[index_pair].append(inters) - except KeyError: - interactions[index_pair] = [inters] - - self.assertEqual(len(interactions), 15) - - for index_pair, inter in interactions.items(): - self.assertEqual(len(index_pair), 2) - for plays in inter: - # Check that have the expected number of repetitions - self.assertEqual(len(plays), self.test_repetitions) - for repetition in plays: - actions, results = repetition - self.assertEqual(len(actions), turns) - self.assertEqual(len(results), 10) - - # Check that matches no longer exist - self.assertEqual((len(list(chunk_generator))), 0) - - def test_match_cache_is_used(self): - """ - Create two Random players that are classified as deterministic. - As they are deterministic the cache will be used. - """ - FakeRandom = axl.Random - FakeRandom.classifier["stochastic"] = False - p1 = FakeRandom() - p2 = FakeRandom() - tournament = axl.IpdTournament((p1, p2), turns=5, repetitions=2) - results = tournament.play(progress_bar=False) - for player_scores in results.scores: - self.assertEqual(player_scores[0], player_scores[1]) - - def test_write_interactions(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=2, - repetitions=2, - ) - tournament._write_interactions_to_file = MagicMock( - name="_write_interactions_to_file" - ) - # Mocking this as it is called by play - self.assertIsNone( - tournament.play( - filename=self.filename, progress_bar=False, build_results=False - ) - ) - - # Get the calls made to write_interactions - calls = tournament._write_interactions_to_file.call_args_list - self.assertEqual(len(calls), 15) - - def test_write_to_csv_with_results(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=2, - repetitions=2, - ) - tournament.play(filename=self.filename, progress_bar=False) - df = pd.read_csv(self.filename) - path = pathlib.Path("../test_outputs/expected_test_tournament.csv") - expected_df = pd.read_csv(axl_filename(path)) - self.assertTrue(df.equals(expected_df)) - - def test_write_to_csv_without_results(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=2, - repetitions=2, - ) - tournament.play(filename=self.filename, progress_bar=False, build_results=False) - df = pd.read_csv(self.filename) - path = pathlib.Path("../test_outputs/expected_test_tournament_no_results.csv") - expected_df = pd.read_csv(axl_filename(path)) - self.assertTrue(df.equals(expected_df)) - - -class TestProbEndTournament(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - cls.players = [s() for s in test_strategies] - cls.test_name = "test" - cls.test_repetitions = test_repetitions - cls.test_prob_end = test_prob_end - - def test_init(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - prob_end=self.test_prob_end, - noise=0.2, - ) - self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end) - self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertEqual(tournament.game.score((C, C)), (3, 3)) - self.assertIsNone(tournament.turns) - self.assertEqual(tournament.repetitions, 10) - self.assertEqual(tournament.name, "test") - self.assertIsInstance(tournament._logger, logging.Logger) - self.assertEqual(tournament.noise, 0.2) - anonymous_tournament = axl.IpdTournament(players=self.players) - self.assertEqual(anonymous_tournament.name, "axelrod") - - @given( - tournament=prob_end_tournaments( - min_size=2, - max_size=5, - min_prob_end=0.1, - max_prob_end=0.9, - min_repetitions=2, - max_repetitions=4, - ) - ) - @settings(max_examples=5) - @example( - tournament=axl.IpdTournament( - players=[s() for s in test_strategies], - prob_end=0.2, - repetitions=test_repetitions, - ) - ) - # These two examples are to make sure #465 is fixed. - # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, - # these two examples were identified by hypothesis. - @example( - tournament=axl.IpdTournament( - players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, - ) - ) - @example( - tournament=axl.IpdTournament( - players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, - ) - ) - def test_property_serial_play(self, tournament): - """Test serial play using hypothesis""" - # Test that we get an instance of ResultSet - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - self.assertEqual(results.num_players, len(tournament.players)) - self.assertEqual(results.players, [str(p) for p in tournament.players]) - - -class TestSpatialTournament(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - cls.players = [s() for s in test_strategies] - cls.test_name = "test" - cls.test_repetitions = test_repetitions - cls.test_turns = test_turns - cls.test_edges = test_edges - - def test_init(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - turns=self.test_turns, - edges=self.test_edges, - noise=0.2, - ) - self.assertEqual(tournament.match_generator.edges, tournament.edges) - self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertEqual(tournament.game.score((C, C)), (3, 3)) - self.assertEqual(tournament.turns, 100) - self.assertEqual(tournament.repetitions, 10) - self.assertEqual(tournament.name, "test") - self.assertIsInstance(tournament._logger, logging.Logger) - self.assertEqual(tournament.noise, 0.2) - self.assertEqual(tournament.match_generator.noise, 0.2) - anonymous_tournament = axl.IpdTournament(players=self.players) - self.assertEqual(anonymous_tournament.name, "axelrod") - - @given( - strategies=strategy_lists( - strategies=deterministic_strategies, min_size=2, max_size=2 - ), - turns=integers(min_value=1, max_value=20), - repetitions=integers(min_value=1, max_value=5), - noise=floats(min_value=0, max_value=1), - seed=integers(min_value=0, max_value=4294967295), - ) - @settings(max_examples=5) - def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): - """ - A test to check that a spatial tournament on the complete multigraph - gives the same results as the round robin. - """ - - players = [s() for s in strategies] - # edges - edges = [] - for i in range(0, len(players)): - for j in range(i, len(players)): - edges.append((i, j)) - - # create a round robin tournament - tournament = axl.IpdTournament( - players, repetitions=repetitions, turns=turns, noise=noise - ) - # create a complete spatial tournament - spatial_tournament = axl.IpdTournament( - players, repetitions=repetitions, turns=turns, noise=noise, edges=edges - ) - - axl.seed(seed) - results = tournament.play(progress_bar=False) - axl.seed(seed) - spatial_results = spatial_tournament.play(progress_bar=False) - - self.assertEqual(results.ranked_names, spatial_results.ranked_names) - self.assertEqual(results.num_players, spatial_results.num_players) - self.assertEqual(results.repetitions, spatial_results.repetitions) - self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means) - self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) - self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) - self.assertEqual(results.payoffs, spatial_results.payoffs) - self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating) - self.assertEqual(results.cooperation, spatial_results.cooperation) - self.assertEqual( - results.normalised_cooperation, spatial_results.normalised_cooperation - ) - self.assertEqual(results.normalised_scores, spatial_results.normalised_scores) - self.assertEqual( - results.good_partner_matrix, spatial_results.good_partner_matrix - ) - self.assertEqual( - results.good_partner_rating, spatial_results.good_partner_rating - ) - - def test_particular_tournament(self): - """A test for a tournament that has caused failures during some bug - fixing""" - players = [ - axl.Cooperator(), - axl.Defector(), - axl.TitForTat(), - axl.Grudger(), - ] - edges = [(0, 2), (0, 3), (1, 2), (1, 3)] - tournament = axl.IpdTournament(players, edges=edges) - results = tournament.play(progress_bar=False) - expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] - self.assertEqual(results.ranked_names, expected_ranked_names) - - # Check that this tournament runs with noise - tournament = axl.IpdTournament(players, edges=edges, noise=0.5) - results = tournament.play(progress_bar=False) - self.assertIsInstance(results, axl.ResultSet) - - -class TestProbEndingSpatialTournament(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.game = axl.IpdGame() - cls.players = [s() for s in test_strategies] - cls.test_name = "test" - cls.test_repetitions = test_repetitions - cls.test_prob_end = test_prob_end - cls.test_edges = test_edges - - def test_init(self): - tournament = axl.IpdTournament( - name=self.test_name, - players=self.players, - game=self.game, - prob_end=self.test_prob_end, - edges=self.test_edges, - noise=0.2, - ) - self.assertEqual(tournament.match_generator.edges, tournament.edges) - self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertEqual(tournament.game.score((C, C)), (3, 3)) - self.assertIsNone(tournament.turns) - self.assertEqual(tournament.repetitions, 10) - self.assertEqual(tournament.name, "test") - self.assertIsInstance(tournament._logger, logging.Logger) - self.assertEqual(tournament.noise, 0.2) - self.assertEqual(tournament.match_generator.noise, 0.2) - self.assertEqual(tournament.prob_end, self.test_prob_end) - - @given( - strategies=strategy_lists( - strategies=deterministic_strategies, min_size=2, max_size=2 - ), - prob_end=floats(min_value=0.1, max_value=0.9), - reps=integers(min_value=1, max_value=3), - seed=integers(min_value=0, max_value=4294967295), - ) - @settings(max_examples=5) - def test_complete_tournament(self, strategies, prob_end, seed, reps): - """ - A test to check that a spatial tournament on the complete graph - gives the same results as the round robin. - """ - players = [s() for s in strategies] - - # create a prob end round robin tournament - tournament = axl.IpdTournament(players, prob_end=prob_end, repetitions=reps) - axl.seed(seed) - results = tournament.play(progress_bar=False) - - # create a complete spatial tournament - # edges - edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] - - spatial_tournament = axl.IpdTournament( - players, prob_end=prob_end, repetitions=reps, edges=edges - ) - axl.seed(seed) - spatial_results = spatial_tournament.play(progress_bar=False) - self.assertEqual(results.match_lengths, spatial_results.match_lengths) - self.assertEqual(results.ranked_names, spatial_results.ranked_names) - self.assertEqual(results.wins, spatial_results.wins) - self.assertEqual(results.scores, spatial_results.scores) - self.assertEqual(results.cooperation, spatial_results.cooperation) - - @given( - tournament=spatial_tournaments( - strategies=axl.basic_strategies, - max_turns=1, - max_noise=0, - max_repetitions=3, - ), - seed=integers(min_value=0, max_value=4294967295), - ) - @settings(max_examples=5) - def test_one_turn_tournament(self, tournament, seed): - """ - Tests that gives same result as the corresponding spatial round robin - spatial tournament - """ - prob_end_tour = axl.IpdTournament( - tournament.players, - prob_end=1, - edges=tournament.edges, - repetitions=tournament.repetitions, - ) - axl.seed(seed) - prob_end_results = prob_end_tour.play(progress_bar=False) - axl.seed(seed) - one_turn_results = tournament.play(progress_bar=False) - self.assertEqual(prob_end_results.scores, one_turn_results.scores) - self.assertEqual(prob_end_results.wins, one_turn_results.wins) - self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation) - - -class TestHelperFunctions(unittest.TestCase): - def test_close_objects_with_none(self): - self.assertIsNone(_close_objects(None, None)) - - def test_close_objects_with_file_objs(self): - f1 = open("to_delete_1", "w") - f2 = open("to_delete_2", "w") - f2.close() - f2 = open("to_delete_2", "r") - - self.assertFalse(f1.closed) - self.assertFalse(f2.closed) - - _close_objects(f1, f2) - - self.assertTrue(f1.closed) - self.assertTrue(f2.closed) - - os.remove("to_delete_1") - os.remove("to_delete_2") - - def test_close_objects_with_tqdm(self): - pbar_1 = tqdm(range(5)) - pbar_2 = tqdm(total=10, desc="hi", file=io.StringIO()) - - self.assertFalse(pbar_1.disable) - self.assertFalse(pbar_2.disable) - - _close_objects(pbar_1, pbar_2) - - self.assertTrue(pbar_1.disable) - self.assertTrue(pbar_2.disable) - - def test_close_objects_with_different_objects(self): - file = open("to_delete_1", "w") - pbar = tqdm(range(5)) - num = 5 - empty = None - word = "hi" - - _close_objects(file, pbar, num, empty, word) - - self.assertTrue(pbar.disable) - self.assertTrue(file.closed) - - os.remove("to_delete_1") diff --git a/axelrod/ipd/tests/unit/test_version.py b/axelrod/ipd/tests/unit/test_version.py deleted file mode 100644 index 2f2021bcd..000000000 --- a/axelrod/ipd/tests/unit/test_version.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Tests the version number.""" - -import unittest - -import axelrod as axl - - -class TestVersion(unittest.TestCase): - def test_version(self): - self.assertIsInstance(axl.__version__, str) diff --git a/axelrod/ipd/tournament.py b/axelrod/ipd/tournament.py deleted file mode 100644 index 7d68a9d36..000000000 --- a/axelrod/ipd/tournament.py +++ /dev/null @@ -1,497 +0,0 @@ -import csv -import logging -import os -import warnings -from collections import defaultdict -from multiprocessing import Process, Queue, cpu_count -from tempfile import mkstemp -from typing import List, Optional, Tuple - -import axelrod.ipd.interaction_utils as iu -import tqdm -from axelrod import DEFAULT_TURNS -from axelrod.ipd.action import Action, actions_to_str, str_to_actions -from axelrod.ipd.player import IpdPlayer -from axelrod.tournament import BaseTournament - -from .game import IpdGame -from .match import IpdMatch -from .match_generator import MatchGenerator -from .result_set import ResultSet - -C, D = Action.C, Action.D - - -class IpdTournament(BaseTournament): - def __init__( - self, - players: List[IpdPlayer], - name: str = "axelrod", - game: IpdGame = None, - turns: int = None, - prob_end: float = None, - repetitions: int = 10, - noise: float = 0, - edges: List[Tuple] = None, - match_attributes: dict = None, - ) -> None: - """ - Parameters - ---------- - players : list - A list of axelrodPlayer objects - name : string - A name for the tournament - game : axelrod.IpdGame - The game object used to score the tournament - turns : integer - The number of turns per match - prob_end : float - The probability of a given turn ending a match - repetitions : integer - The number of times the round robin should be repeated - noise : float - The probability that a player's intended action should be flipped - prob_end : float - The probability of a given turn ending a match - edges : list - A list of edges between players - match_attributes : dict - Mapping attribute names to values which should be passed to players. - The default is to use the correct values for turns, game and noise - but these can be overridden if desired. - """ - if game is None: - self.game = IpdGame() - else: - self.game = game - self.name = name - self.noise = noise - self.num_interactions = 0 - self.players = players - self.repetitions = repetitions - self.edges = edges - - if turns is None and prob_end is None: - turns = DEFAULT_TURNS - - self.turns = turns - self.prob_end = prob_end - self.match_generator = MatchGenerator( - players=players, - turns=turns, - game=self.game, - repetitions=self.repetitions, - prob_end=prob_end, - noise=self.noise, - edges=edges, - match_attributes=match_attributes, - ) - self._logger = logging.getLogger(__name__) - - self.use_progress_bar = True - self.filename = None # type: Optional[str] - self._temp_file_descriptor = None # type: Optional[int] - - super().__init__( - players, - name, - game, - turns, - prob_end, - repetitions, - noise, - edges, - match_attributes - ) - - def setup_output(self, filename=None): - """assign/create `filename` to `self`. If file should be deleted once - `play` is finished, assign a file descriptor. """ - temp_file_descriptor = None - if filename is None: - temp_file_descriptor, filename = mkstemp() - - self.filename = filename - self._temp_file_descriptor = temp_file_descriptor - - def play( - self, - build_results: bool = True, - filename: str = None, - processes: int = None, - progress_bar: bool = True, - ) -> ResultSet: - """ - Plays the tournament and passes the results to the ResultSet class - - Parameters - ---------- - build_results : bool - whether or not to build a results set - filename : string - name of output file - processes : integer - The number of processes to be used for parallel processing - progress_bar : bool - Whether or not to create a progress bar which will be updated - - Returns - ------- - axelrod.ResultSet - """ - self.num_interactions = 0 - - self.use_progress_bar = progress_bar - - self.setup_output(filename) - - if not build_results and not filename: - warnings.warn( - "IpdTournament results will not be accessible since " - "build_results=False and no filename was supplied." - ) - - if processes is None: - self._run_serial(build_results=build_results) - else: - self._run_parallel(build_results=build_results, processes=processes) - - result_set = None - if build_results: - result_set = ResultSet( - filename=self.filename, - players=[str(p) for p in self.players], - repetitions=self.repetitions, - processes=processes, - progress_bar=progress_bar, - ) - if self._temp_file_descriptor is not None: - assert self.filename is not None - os.close(self._temp_file_descriptor) - os.remove(self.filename) - - return result_set - - def _run_serial(self, build_results: bool = True) -> bool: - """Run all matches in serial.""" - - chunks = self.match_generator.build_match_chunks() - - out_file, writer = self._get_file_objects(build_results) - progress_bar = self._get_progress_bar() - - for chunk in chunks: - results = self._play_matches(chunk, build_results=build_results) - self._write_interactions_to_file(results, writer=writer) - - if self.use_progress_bar: - progress_bar.update(1) - - _close_objects(out_file, progress_bar) - - return True - - def _get_file_objects(self, build_results=True): - """Returns the file object and writer for writing results or - (None, None) if self.filename is None""" - file_obj = None - writer = None - if self.filename is not None: - file_obj = open(self.filename, "w") - writer = csv.writer(file_obj, lineterminator="\n") - - header = [ - "Interaction index", - "Player index", - "Opponent index", - "Repetition", - "Player name", - "Opponent name", - "Actions", - ] - if build_results: - header.extend( - [ - "Score", - "Score difference", - "Turns", - "Score per turn", - "Score difference per turn", - "Win", - "Initial cooperation", - "Cooperation count", - "CC count", - "CD count", - "DC count", - "DD count", - "CC to C count", - "CC to D count", - "CD to C count", - "CD to D count", - "DC to C count", - "DC to D count", - "DD to C count", - "DD to D count", - "Good partner", - ] - ) - - writer.writerow(header) - return file_obj, writer - - def _get_progress_bar(self): - if self.use_progress_bar: - return tqdm.tqdm(total=self.match_generator.size, desc="Playing matches") - return None - - def _write_interactions_to_file(self, results, writer): - """Write the interactions to csv.""" - for index_pair, interactions in results.items(): - repetition = 0 - for interaction, results in interactions: - - if results is not None: - ( - scores, - score_diffs, - turns, - score_per_turns, - score_diffs_per_turns, - initial_cooperation, - cooperations, - state_distribution, - state_to_action_distributions, - winner_index, - ) = results - for index, player_index in enumerate(index_pair): - opponent_index = index_pair[index - 1] - row = [self.num_interactions, player_index, opponent_index, repetition, - str(self.players[player_index]), str(self.players[opponent_index])] - history = actions_to_str([i[index] for i in interaction]) - row.append(history) - - if results is not None: - row.append(scores[index]) - row.append(score_diffs[index]) - row.append(turns) - row.append(score_per_turns[index]) - row.append(score_diffs_per_turns[index]) - row.append(int(winner_index is index)) - row.append(initial_cooperation[index]) - row.append(cooperations[index]) - - states = [(C, C), (C, D), (D, C), (D, D)] - if index == 1: - states = [s[::-1] for s in states] - for state in states: - row.append(state_distribution[state]) - for state in states: - row.append(state_to_action_distributions[index][(state, C)]) - row.append(state_to_action_distributions[index][(state, D)]) - - row.append(int(cooperations[index] >= cooperations[index - 1])) - - writer.writerow(row) - repetition += 1 - self.num_interactions += 1 - - def _run_parallel(self, processes: int = 2, build_results: bool = True) -> bool: - """ - Run all matches in parallel - - Parameters - ---------- - build_results : bool - whether or not to build a results set - processes : int - How many processes to use. - """ - # At first sight, it might seem simpler to use the multiprocessing Pool - # Class rather than Processes and Queues. However, this way is faster. - work_queue = Queue() # type: Queue - done_queue = Queue() # type: Queue - workers = self._n_workers(processes=processes) - - chunks = self.match_generator.build_match_chunks() - for chunk in chunks: - work_queue.put(chunk) - - self._start_workers(workers, work_queue, done_queue, build_results) - self._process_done_queue(workers, done_queue, build_results) - - return True - - def _n_workers(self, processes: int = 2) -> int: - """ - Determines the number of parallel processes to use. - - Returns - ------- - integer - """ - if 2 <= processes <= cpu_count(): - n_workers = processes - else: - n_workers = cpu_count() - return n_workers - - def _start_workers( - self, - workers: int, - work_queue: Queue, - done_queue: Queue, - build_results: bool = True, - ) -> bool: - """ - Initiates the sub-processes to carry out parallel processing. - - Parameters - ---------- - workers : integer - The number of sub-processes to create - work_queue : multiprocessing.Queue - A queue containing an entry for each round robin to be processed - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - for worker in range(workers): - process = Process( - target=self._worker, args=(work_queue, done_queue, build_results) - ) - work_queue.put("STOP") - process.start() - return True - - def _process_done_queue( - self, workers: int, done_queue: Queue, build_results: bool = True - ): - """ - Retrieves the matches from the parallel sub-processes - - Parameters - ---------- - workers : integer - The number of sub-processes in existence - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - out_file, writer = self._get_file_objects(build_results) - progress_bar = self._get_progress_bar() - - stops = 0 - while stops < workers: - results = done_queue.get() - if results == "STOP": - stops += 1 - else: - self._write_interactions_to_file(results, writer) - - if self.use_progress_bar: - progress_bar.update(1) - - _close_objects(out_file, progress_bar) - return True - - def _worker(self, work_queue: Queue, done_queue: Queue, build_results: bool = True): - """ - The work for each parallel sub-process to execute. - - Parameters - ---------- - work_queue : multiprocessing.Queue - A queue containing an entry for each round robin to be processed - done_queue : multiprocessing.Queue - A queue containing the output dictionaries from each round robin - build_results : bool - whether or not to build a results set - """ - for chunk in iter(work_queue.get, "STOP"): - interactions = self._play_matches(chunk, build_results) - done_queue.put(interactions) - done_queue.put("STOP") - return True - - def _play_matches(self, chunk, build_results=True): - """ - Play matches in a given chunk. - - Parameters - ---------- - chunk : tuple (index pair, match_parameters, repetitions) - match_parameters are also a tuple: (turns, game, noise) - build_results : bool - whether or not to build a results set - - Returns - ------- - interactions : dictionary - Mapping player index pairs to results of matches: - - (0, 1) -> [(C, D), (D, C),...] - """ - interactions = defaultdict(list) - index_pair, match_params, repetitions = chunk - p1_index, p2_index = index_pair - player1 = self.players[p1_index].clone() - player2 = self.players[p2_index].clone() - match_params["players"] = (player1, player2) - match = IpdMatch(**match_params) - for _ in range(repetitions): - match.play() - - if build_results: - results = self._calculate_results(match.result) - else: - results = None - - interactions[index_pair].append([match.result, results]) - return interactions - - def _calculate_results(self, interactions): - results = [] - - scores = iu.compute_final_score(interactions, self.game) - results.append(scores) - - score_diffs = scores[0] - scores[1], scores[1] - scores[0] - results.append(score_diffs) - - turns = len(interactions) - results.append(turns) - - score_per_turns = iu.compute_final_score_per_turn(interactions, self.game) - results.append(score_per_turns) - - score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns - results.append(score_diffs_per_turns) - - initial_coops = tuple(map(bool, iu.compute_cooperations(interactions[:1]))) - results.append(initial_coops) - - cooperations = iu.compute_cooperations(interactions) - results.append(cooperations) - - state_distribution = iu.compute_state_distribution(interactions) - results.append(state_distribution) - - state_to_action_distributions = iu.compute_state_to_action_distribution( - interactions - ) - results.append(state_to_action_distributions) - - winner_index = iu.compute_winner_index(interactions, self.game) - results.append(winner_index) - - return results - - -def _close_objects(*objs): - """If the objects have a `close` method, closes them.""" - for obj in objs: - if hasattr(obj, "close"): - obj.close() From 3b4b6428fa4d3db3336e27df6f11528c2440444d Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sat, 25 Apr 2020 22:27:07 -0700 Subject: [PATCH 3/7] Add adapter tests --- axelrod/__init__.py | 9 +- axelrod/base_player.py | 2 + axelrod/base_tournament.py | 2 +- axelrod/ipd_adapter.py | 297 +++- axelrod/tests/unit/test_ipd_adapter.py | 1864 ++++++++++++++++++++++++ rebuild_classifier_table.py | 2 +- 6 files changed, 2158 insertions(+), 18 deletions(-) create mode 100644 axelrod/tests/unit/test_ipd_adapter.py diff --git a/axelrod/__init__.py b/axelrod/__init__.py index 1edd170a4..8ec6e5ba4 100644 --- a/axelrod/__init__.py +++ b/axelrod/__init__.py @@ -7,15 +7,22 @@ from axelrod.action import Action from axelrod.random_ import random_choice, random_flip, seed, Pdf from axelrod.plot import Plot -from axelrod.game import DefaultGame +from axelrod.base_game import BaseGame +from axelrod.game import DefaultGame, IpdGame from axelrod.history import History, LimitedHistory +from axelrod.base_player import BasePlayer +from axelrod.player import IpdPlayer from axelrod.classifier import Classifiers from axelrod.evolvable_player import EvolvablePlayer from axelrod.mock_player import MockPlayer +from axelrod.base_match import BaseMatch +from axelrod.match import IpdMatch from axelrod.moran import MoranProcess, ApproximateMoranProcess from axelrod.strategies import * from axelrod.deterministic_cache import DeterministicCache from axelrod.match_generator import * +from axelrod.base_tournament import BaseTournament +from axelrod.tournament import IpdTournament from axelrod.result_set import ResultSet from axelrod.ecosystem import Ecosystem from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint diff --git a/axelrod/base_player.py b/axelrod/base_player.py index 007f66e85..1fce21b63 100644 --- a/axelrod/base_player.py +++ b/axelrod/base_player.py @@ -1,9 +1,11 @@ +import inspect from typing import Optional, Tuple import axelrod as axl class BasePlayer(object): + def __init__(self): pass diff --git a/axelrod/base_tournament.py b/axelrod/base_tournament.py index 6da6ddd95..9b2852498 100644 --- a/axelrod/base_tournament.py +++ b/axelrod/base_tournament.py @@ -57,7 +57,7 @@ def play( filename: str = None, processes: int = None, progress_bar: bool = True, - ) -> axl.ResultSet: + ) -> 'ResultSet': """ Plays the tournament and passes the results to the ResultSet class diff --git a/axelrod/ipd_adapter.py b/axelrod/ipd_adapter.py index ce0f06fad..6e4a6af79 100644 --- a/axelrod/ipd_adapter.py +++ b/axelrod/ipd_adapter.py @@ -6,6 +6,8 @@ general class of games. """ +import copy +import inspect from typing import Dict, List, Tuple, Union import axelrod as axl @@ -13,39 +15,117 @@ Score = Union[int, float] -class Player(object): +class Player(axl.IpdPlayer): """Legacy players derive from this adapter.""" + def __new__(cls, *args, **kwargs): + """Caches arguments for IpdPlayer cloning.""" + obj = super().__new__(cls) + obj.init_kwargs = cls.init_params(*args, **kwargs) + return obj + + @classmethod + def init_params(cls, *args, **kwargs): + """ + Return a dictionary containing the init parameters of a strategy + (without 'self'). + Use *args and *kwargs as value if specified + and complete the rest with the default values. + """ + sig = inspect.signature(cls.__init__) + # The 'self' parameter needs to be removed or the first *args will be + # assigned to it + self_param = sig.parameters.get("self") + new_params = list(sig.parameters.values()) + new_params.remove(self_param) + sig = sig.replace(parameters=new_params) + boundargs = sig.bind_partial(*args, **kwargs) + boundargs.apply_defaults() + return boundargs.arguments + def __init__(self): - """The derived class should call super().__init__(). At that point, - name and clasifiers on the derived class will be set, so we copy that to - player.""" self._player = axl.IpdPlayer() - if self.name: - self._player.name = self.name - if self.classifier: - self._player.classifier = self.classifier - def strategy(self, opponent: "BasePlayer") -> axl.Action: + def strategy(self, opponent: axl.IpdPlayer) -> axl.Action: """We expect the derived class to set this behavior.""" raise NotImplementedError() def play( - self, opponent: "BasePlayer", noise: float = 0 + self, opponent: axl.IpdPlayer, noise: float = 0 ) -> Tuple[axl.Action, axl.Action]: # We have to provide _player.play a copy of this strategy, which will # have an overwritten strategy, and possibly saved state and helper # methods. - self._player.play(opponent, noise, strategy_holder=self) - - def clone(self) -> "Player": - new_player = Player() - new_player._player = self._player.clone() + return self._player.play(opponent, noise, strategy_holder=self) + + def clone(self) -> 'Player': + """Clones the player without history, reapplying configuration + parameters as necessary.""" + + # You may be tempted to re-implement using the `copy` module + # Note that this would require a deepcopy in some cases and there may + # be significant changes required throughout the library. + # Consider overriding in special cases only if necessary + cls = self.__class__ + new_player = cls(**self.init_kwargs) + new_player._player.match_attributes = copy.copy(self.match_attributes) return new_player def reset(self): self._player.reset() + def set_match_attributes(self, length: int = -1, game: 'Game' = None, + noise: float = 0) -> None: + self._player.set_match_attributes(length, game, noise) + + def update_history(self, play: axl.Action, coplay: axl.Action) -> None: + self._player.update_history(play, coplay) + + @property + def history(self): + return self._player.history + + @property + def match_attributes(self): + return self._player.match_attributes + + @match_attributes.setter + def match_attributes(self, match_attributes): + self._player.match_attributes = match_attributes + + @property + def cooperations(self): + return self._player.cooperations + + @property + def defections(self): + return self._player.defections + + @property + def name(self): + return self._player.name + + @name.setter + def name(self, name): + self._player.name = name + + @property + def classifier(self): + return self._player.classifier + + @classifier.setter + def classifier(self, classifier): + self._player.classifier = classifier + + @property + def state_distribution(self): + return self._player.state_distribution + + def __eq__(self, other: 'Player') -> bool: + if not isinstance(other, Player): + return False + return self._player == other._player + class Game(object): def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1): @@ -54,6 +134,25 @@ def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1): def score(self, pair: Tuple[axl.Action, axl.Action]) -> Tuple[Score, Score]: return self._game.score(pair) + def RPST(self) -> Tuple[Score, Score, Score, Score]: + return self._game.RPST() + + @property + def scores(self): + return self._game.scores + + @scores.setter + def scores(self, scores): + self._game.scores = scores + + def __repr__(self) -> str: + return repr(self._game) + + def __eq__(self, other: 'Game') -> bool: + if not isinstance(other, Game): + return False + return self._game == other._game + class Match(object): def __init__( @@ -86,6 +185,70 @@ def players(self) -> Tuple[axl.IpdPlayer]: def players(self, players: Tuple[axl.IpdPlayer]): self._match.players = players + @property + def result(self): + return self._match.result + + @result.setter + def result(self, result): + self._match.result = result + + @property + def noise(self): + return self._match.noise + + @noise.setter + def noise(self, noise): + self._match.noise = noise + + @property + def game(self): + return self._match.game + + @game.setter + def game(self, game): + self._match.game = game + + @property + def _cache(self): + return self._match._cache + + @_cache.setter + def _cache(self, _cache): + self._match._cache = _cache + + @property + def _cache_update_required(self): + return self._match._cache_update_required + + @property + def _stochastic(self): + return self._match._stochastic + + @property + def prob_end(self): + return self._match.prob_end + + @prob_end.setter + def prob_end(self, prob_end): + self._match.prob_end = prob_end + + @property + def turns(self): + return self._match.turns + + @turns.setter + def turns(self, turns): + self._match.turns = turns + + @property + def reset(self): + return self._match.reset + + @reset.setter + def reset(self, reset): + self._match.reset = reset + def play(self) -> List[Tuple[axl.Action]]: return self._match.play() @@ -158,3 +321,107 @@ def play( return self._tournament.play( build_results, filename, processes, progress_bar ) + + @property + def players(self): + return self._tournament.players + + @players.setter + def players(self, players): + self._tournament.players = players + + @property + def game(self): + return self._tournament.game + + @game.setter + def game(self, game): + self._tournament.game = game + + @property + def turns(self): + return self._tournament.turns + + @turns.setter + def turns(self, turns): + self._tournament.turns = turns + + @property + def repetitions(self): + return self._tournament.repetitions + + @repetitions.setter + def repetitions(self, repetitions): + self._tournament.repetitions = repetitions + + @property + def name(self): + return self._tournament.name + + @name.setter + def name(self, name): + self._tournament.name = name + + @property + def _logger(self): + return self._tournament._logger + + @property + def noise(self): + return self._tournament.noise + + @noise.setter + def noise(self, noise): + self._tournament.noise = noise + + @property + def match_generator(self): + return self._tournament.match_generator + + @match_generator.setter + def match_generator(self, match_generator): + self._tournament.match_generator = match_generator + + @property + def _temp_file_descriptor(self): + return self._tournament._temp_file_descriptor + + @property + def num_interactions(self): + return self._tournament.num_interactions + + @num_interactions.setter + def num_interactions(self, num_interactions): + self._tournament.num_interactions = num_interactions + + @property + def use_progress_bar(self): + return self._tournament.use_progress_bar + + @use_progress_bar.setter + def use_progress_bar(self, use_progress_bar): + self._tournament.use_progress_bar = use_progress_bar + + @property + def filename(self): + return self._tournament.filename + + @filename.setter + def filename(self, filename): + self._tournament.filename = filename + + @property + def edges(self): + return self._tournament.edges + + @edges.setter + def edges(self, edges): + self._tournament.edges = edges + + @property + def prob_end(self): + return self._tournament.prob_end + + @prob_end.setter + def prob_end(self, prob_end): + self._tournament.prob_end = prob_end diff --git a/axelrod/tests/unit/test_ipd_adapter.py b/axelrod/tests/unit/test_ipd_adapter.py new file mode 100644 index 000000000..be9f5b927 --- /dev/null +++ b/axelrod/tests/unit/test_ipd_adapter.py @@ -0,0 +1,1864 @@ +"""Tests adapters defined in ipd_adapter. + +Tests that the public API (public methods and variables with accessors) matches +API on the Ipd versions of Player, Game, Match, and Tournament, by copying +relevant portions of those tests. +""" + +from collections import Counter +import io +import logging +from multiprocessing import Queue, cpu_count +import os +import pathlib +import pickle +import random +import unittest +from unittest.mock import MagicMock, patch +import warnings + +from hypothesis import example, given, settings +from hypothesis.strategies import assume, floats, integers, sampled_from +import numpy as np +import pandas as pd +from tqdm import tqdm + +import axelrod as axl +from axelrod.deterministic_cache import DeterministicCache +from axelrod.load_data_ import axl_filename +from axelrod.player import simultaneous_play +from axelrod.tests.property import ( + games, + prob_end_tournaments, + spatial_tournaments, + strategy_lists, + tournaments, +) +from axelrod.tournament import _close_objects + +C, D = axl.Action.C, axl.Action.D + +test_strategies = [ + axl.Cooperator, + axl.TitForTat, + axl.Defector, + axl.Grudger, + axl.GoByMajority, +] +test_repetitions = 5 +test_turns = 100 + +test_prob_end = 0.5 + +test_edges = [(0, 1), (1, 2), (3, 4)] + +deterministic_strategies = [ + s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) +] + +short_run_time_short_mem = [ + s + for s in axl.short_run_time_strategies + if axl.Classifiers["memory_depth"](s()) <= 10 +] + +# Classifiers for TitForTat +_test_classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + +class RecordedTQDM(tqdm): + """This is a tqdm.tqdm that keeps a record of every RecordedTQDM created. + It is used to test that progress bars were correctly created and then + closed.""" + + record = [] + + def __init__(self, *args, **kwargs): + super(RecordedTQDM, self).__init__(*args, **kwargs) + RecordedTQDM.record.append(self) + + @classmethod + def reset_record(cls): + cls.record = [] + + +class TestGame(unittest.TestCase): + def test_default_scores(self): + expected_scores = { + (C, D): (0, 5), + (D, C): (5, 0), + (D, D): (1, 1), + (C, C): (3, 3), + } + self.assertEqual(axl.Game().scores, expected_scores) + + def test_default_RPST(self): + expected_values = (3, 1, 0, 5) + self.assertEqual(axl.Game().RPST(), expected_values) + + def test_default_score(self): + game = axl.Game() + self.assertEqual(game.score((C, C)), (3, 3)) + self.assertEqual(game.score((D, D)), (1, 1)) + self.assertEqual(game.score((C, D)), (0, 5)) + self.assertEqual(game.score((D, C)), (5, 0)) + + def test_default_equality(self): + self.assertEqual(axl.Game(), axl.Game()) + + def test_not_default_equality(self): + self.assertEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 4)) + self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game(1, 2, 3, 5)) + self.assertNotEqual(axl.Game(1, 2, 3, 4), axl.Game()) + + def test_wrong_class_equality(self): + self.assertNotEqual(axl.Game(), "wrong class") + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_init(self, r, p, s, t): + """Test init with random scores using the hypothesis library.""" + expected_scores = { + (C, D): (s, t), + (D, C): (t, s), + (D, D): (p, p), + (C, C): (r, r), + } + game = axl.Game(r, s, t, p) + self.assertEqual(game.scores, expected_scores) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_RPST(self, r, p, s, t): + """Test RPST method with random scores using the hypothesis library.""" + game = axl.Game(r, s, t, p) + self.assertEqual(game.RPST(), (r, p, s, t)) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_score(self, r, p, s, t): + """Test score method with random scores using the hypothesis library.""" + game = axl.Game(r, s, t, p) + self.assertEqual(game.score((C, C)), (r, r)) + self.assertEqual(game.score((D, D)), (p, p)) + self.assertEqual(game.score((C, D)), (s, t)) + self.assertEqual(game.score((D, C)), (t, s)) + + @given(game=games()) + @settings(max_examples=5) + def test_random_repr(self, game): + """Test repr with random scores using the hypothesis library.""" + expected_repr = "Axelrod game: (R,P,S,T) = {}".format(game.RPST()) + self.assertEqual(expected_repr, game.__repr__()) + self.assertEqual(expected_repr, str(game)) + + def test_scores_setter(self): + expected_scores = { + (C, D): (1, 2), + (D, C): (2, 1), + (D, D): (3, 3), + (C, C): (4, 4), + } + game = axl.Game() + game.scores = expected_scores + self.assertDictEqual(game.scores, expected_scores) + + + +class TestMatch(unittest.TestCase): + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_init(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], turns) + self.assertEqual(match._cache, {}) + + @given(prob_end=floats(min_value=0, max_value=1), game=games()) + def test_init_with_prob_end(self, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, float("inf")) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(match._cache, {}) + + @given( + prob_end=floats(min_value=0, max_value=1), + turns=integers(min_value=1, max_value=200), + game=games(), + ) + def test_init_with_prob_end_and_turns(self, turns, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns=turns, prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(match._cache, {}) + + def test_default_init(self): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2)) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, axl.DEFAULT_TURNS) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + + self.assertEqual( + match.players[0].match_attributes["length"], axl.DEFAULT_TURNS + ) + self.assertEqual(match._cache, {}) + + def test_example_prob_end(self): + """ + Test that matches have diff length and also that cache has recorded the + outcomes + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=0.5) + expected_lengths = [3, 1, 5] + for seed, expected_length in zip(range(3), expected_lengths): + axl.seed(seed) + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(len(match.play()), expected_length) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + self.assertEqual(len(match._cache), 1) + self.assertEqual(match._cache[(p1, p2)], [(C, C)] * 5) + + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_non_default_attributes(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match_attributes = {"length": 500, "game": game, "noise": 0.5} + match = axl.Match( + (p1, p2), turns, game=game, match_attributes=match_attributes + ) + self.assertEqual(match.players[0].match_attributes["length"], 500) + self.assertEqual(match.players[0].match_attributes["noise"], 0.5) + + @given(turns=integers(min_value=1, max_value=200)) + @example(turns=5) + def test_len(self, turns): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), turns) + self.assertEqual(len(match), turns) + + def test_len_error(self): + """ + Length is not defined if it is infinite. + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), prob_end=0.5) + with self.assertRaises(TypeError): + len(match) + + @given(p=floats(min_value=0, max_value=1)) + def test_stochastic(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), 5) + self.assertFalse(match._stochastic) + + match = axl.Match((p1, p2), 5, noise=p) + self.assertTrue(match._stochastic) + + p1 = axl.Random() + match = axl.Match((p1, p2), 5) + self.assertTrue(match._stochastic) + + @given(p=floats(min_value=0, max_value=1)) + def test_cache_update_required(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.Match((p1, p2), 5, noise=p) + self.assertFalse(match._cache_update_required) + + cache = DeterministicCache() + cache.mutable = False + match = axl.Match((p1, p2), 5, deterministic_cache=cache) + self.assertFalse(match._cache_update_required) + + match = axl.Match((p1, p2), 5) + self.assertTrue(match._cache_update_required) + + p1 = axl.Random() + match = axl.Match((p1, p2), 5) + self.assertFalse(match._cache_update_required) + + def test_play(self): + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 3, deterministic_cache=cache) + expected_result = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result) + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], expected_result + ) + + # a deliberately incorrect result so we can tell it came from the cache + expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] + cache[(axl.Cooperator(), axl.Defector())] = expected_result + match = axl.Match(players, 3, deterministic_cache=cache) + self.assertEqual(match.play(), expected_result[:3]) + + def test_cache_grows(self): + """ + We want to make sure that if we try to use the cache for more turns than + what is stored, then it will instead regenerate the result and overwrite + the cache. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 3, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_3_turn) + match.turns = 5 + self.assertEqual(match.play(), expected_result_5_turn) + # The cache should now hold the 5-turn result.. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], + expected_result_5_turn + ) + + def test_cache_doesnt_shrink(self): + """ + We want to make sure that when we access the cache looking for fewer + turns than what is stored, then it will not overwrite the cache with the + shorter result. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.Match(players, 5, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_5_turn) + match.turns = 3 + self.assertEqual(match.play(), expected_result_3_turn) + # The cache should still hold the 5. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], + expected_result_5_turn + ) + + def test_scores(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + match = axl.Match((player1, player2), 3) + self.assertEqual(match.scores(), []) + match.play() + self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) + + def test_final_score(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (2, 7)) + + match = axl.Match((player2, player1), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (7, 2)) + + def test_final_score_per_turn(self): + turns = 3 + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) + + match = axl.Match((player2, player1), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) + + def test_winner(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.Match((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + match = axl.Match((player2, player1), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + player1 = axl.Defector() + match = axl.Match((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), False) + + def test_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (3, 2)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (2, 0)) + + def test_normalised_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) + + def test_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2, (C, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2, (D, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + def test_normalised_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2 / turns, (C, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.Match((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2 / turns, (D, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + def test_sparklines(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players, 4) + match.play() + expected_sparklines = "████\n█ █ " + self.assertEqual(match.sparklines(), expected_sparklines) + expected_sparklines = "XXXX\nXYXY" + self.assertEqual(match.sparklines("X", "Y"), expected_sparklines) + + def test_result_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_result = [(C, C), (C, D), (D, C)] + match.result = expected_result + self.assertListEqual(match.result, expected_result) + + def test_noise_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_noise = 0.123 + match.noise = expected_noise + self.assertAlmostEqual(match.noise, expected_noise) + + def test_game_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_game = axl.Game(1, 2, 3, 4) + match.game = expected_game + self.assertEqual(match.game, expected_game) + + def test_cache_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_cache = axl.DeterministicCache() + expected_cache.mutable = False # Non-default value + match._cache = expected_cache + self.assertFalse(match._cache.mutable) + + def test_prob_end_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_prob_end = 0.123 + match.prob_end = expected_prob_end + self.assertAlmostEqual(match.prob_end, expected_prob_end) + + def test_turns_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_turns = 123 + match.turns = expected_turns + self.assertEqual(match.turns, expected_turns) + + def test_reset_setter(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.Match(players) + + expected_reset = False # Non-default value + match.reset = expected_reset + self.assertFalse(match.reset) + + +class TestTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + + cls.expected_payoff = [ + [600, 600, 0, 600, 600], + [600, 600, 199, 600, 600], + [1000, 204, 200, 204, 204], + [600, 600, 199, 600, 600], + [600, 600, 199, 600, 600], + ] + + cls.expected_cooperation = [ + [200, 200, 200, 200, 200], + [200, 200, 1, 200, 200], + [0, 0, 0, 0, 0], + [200, 200, 1, 200, 200], + [200, 200, 1, 200, 200], + ] + + path = pathlib.Path("test_outputs/test_tournament.csv") + cls.filename = axl_filename(path) + + def setUp(self): + self.test_tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=1, + ) + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + noise=0.2, + ) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, self.test_turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + def test_init_with_match_attributes(self): + tournament = axl.Tournament( + players=self.players, match_attributes={"length": float("inf")} + ) + mg = tournament.match_generator + match_params = mg.build_single_match_params() + self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + + def test_warning(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=10, + repetitions=1, + ) + with warnings.catch_warnings(record=True) as w: + # Check that a warning is raised if no results set is built and no + # filename is given + results = tournament.play(build_results=False, progress_bar=False) + self.assertEqual(len(w), 1) + + with warnings.catch_warnings(record=True) as w: + # Check that no warning is raised if no results set is built and a + # is filename given + + tournament.play( + build_results=False, filename=self.filename, progress_bar=False + ) + self.assertEqual(len(w), 0) + + def test_setup_output_with_filename(self): + self.test_tournament.setup_output(self.filename) + + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + def test_setup_output_no_filename(self): + self.test_tournament.setup_output() + + self.assertIsInstance(self.test_tournament.filename, str) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + os.close(self.test_tournament._temp_file_descriptor) + os.remove(self.test_tournament.filename) + + def test_play_resets_num_interactions(self): + self.assertEqual(self.test_tournament.num_interactions, 0) + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + def test_play_changes_use_progress_bar(self): + self.assertTrue(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=False) + self.assertFalse(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=True) + self.assertTrue(self.test_tournament.use_progress_bar) + + def test_play_changes_temp_file_descriptor(self): + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # No file descriptor for a named file. + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # Temp file creates file descriptor. + self.test_tournament.play(filename=None, progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + + def test_play_tempfile_removed(self): + self.test_tournament.play(filename=None, progress_bar=False) + + self.assertFalse(os.path.isfile(self.test_tournament.filename)) + + def test_play_resets_filename_and_temp_file_descriptor_each_time(self): + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + old_filename = self.test_tournament.filename + + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertNotEqual(old_filename, self.test_tournament.filename) + + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + self.assertNotEqual(old_filename, self.test_tournament.filename) + self.assertNotEqual(self.test_tournament.filename, self.filename) + + def test_get_file_objects_no_filename(self): + file, writer = self.test_tournament._tournament._get_file_objects() + self.assertIsNone(file) + self.assertIsNone(writer) + + def test_get_file_object_with_filename(self): + self.test_tournament.filename = self.filename + file_object, writer = self.test_tournament._tournament._get_file_objects() + self.assertIsInstance(file_object, io.TextIOWrapper) + self.assertEqual(writer.__class__.__name__, "writer") + file_object.close() + + def test_get_progress_bar(self): + self.test_tournament.use_progress_bar = False + pbar = self.test_tournament._tournament._get_progress_bar() + self.assertIsNone(pbar) + + self.test_tournament.use_progress_bar = True + pbar = self.test_tournament._tournament._get_progress_bar() + self.assertIsInstance(pbar, tqdm) + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, self.test_tournament.match_generator.size) + + new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] + new_tournament = axl.Tournament(players=self.players, edges=new_edges) + new_tournament.use_progress_bar = True + pbar = new_tournament._tournament._get_progress_bar() + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, len(new_edges)) + + def test_serial_play(self): + # Test that we get an instance of ResultSet + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + # Test that _run_serial_repetitions is called with empty matches list + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertEqual(tournament.num_interactions, 75) + + def test_serial_play_with_different_game(self): + # Test that a non default game is passed to the result set + game = axl.Game(p=-1, r=-1, s=-1, t=-1) + tournament = axl.Tournament( + name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 + ) + results = tournament.play(progress_bar=False) + self.assertLessEqual(np.max(results.scores), 0) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_no_progress_bar_play(self): + """Test that progress bar is not created for progress_bar=False""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # Test with build results + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + # Check that no progress bar was created. + self.assertEqual(RecordedTQDM.record, []) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=False, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(RecordedTQDM.record, []) + + def assert_play_pbar_correct_total_and_finished(self, pbar, total): + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.total, total) + self.assertEqual(pbar.n, total) + self.assertTrue(pbar.disable, True) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play(self): + """Test that progress bar is created by default and with True argument""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + RecordedTQDM.reset_record() + results = tournament.play() + self.assertIsInstance(results, axl.ResultSet) + # Check that progress bar was created, updated and closed. + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + # Check all progress bars are closed. + self.assertTrue(all(pbar.disable for pbar in RecordedTQDM.record)) + + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=True, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(len(RecordedTQDM.record), 1) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play_parallel(self): + """Test that tournament plays when asking for progress bar for parallel + tournament and that progress bar is created.""" + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # progress_bar = False + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False, processes=2) + self.assertEqual(RecordedTQDM.record, []) + self.assertIsInstance(results, axl.ResultSet) + + # progress_bar = True + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True, processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # progress_bar is default + RecordedTQDM.reset_record() + results = tournament.play(processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @given( + tournament=tournaments( + min_size=2, + max_size=5, + min_turns=2, + max_turns=5, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=50) + @example( + tournament=axl.Tournament( + players=[s() for s in test_strategies], + turns=test_turns, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, + ) + ) + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + def test_parallel_play(self): + # Test that we get an instance of ResultSet + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(processes=2, progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + # The following relates to #516 + players = [ + axl.Cooperator(), + axl.Defector(), + axl.BackStabber(), + axl.PSOGambler2_2_2(), + axl.ThueMorse(), + axl.DoubleCrosser(), + ] + tournament = axl.Tournament( + name=self.test_name, + players=players, + game=self.game, + turns=20, + repetitions=self.test_repetitions, + ) + scores = tournament.play(processes=2, progress_bar=False).scores + self.assertEqual(len(scores), len(players)) + + def test_parallel_play_with_writing_to_file(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + results = tournament.play( + processes=2, progress_bar=False, filename=self.filename + ) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + def test_run_serial(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + self.assertTrue(tournament._tournament._run_serial()) + + # Get the calls made to write_interactions + calls = tournament._tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_run_parallel(self): + class PickleableMock(MagicMock): + def __reduce__(self): + return MagicMock, () + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._tournament._write_interactions_to_file = PickleableMock( + name="_write_interactions_to_file" + ) + + # For test coverage purposes. This confirms PickleableMock can be + # pickled exactly once. Windows multi-processing must pickle this Mock + # exactly once during testing. + pickled = pickle.loads(pickle.dumps(tournament)) + self.assertIsInstance(pickled._tournament._write_interactions_to_file, MagicMock) + self.assertRaises(pickle.PicklingError, pickle.dumps, pickled) + + self.assertTrue(tournament._tournament._run_parallel()) + + # Get the calls made to write_interactions + calls = tournament._tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_n_workers(self): + max_processes = cpu_count() + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual(tournament._tournament._n_workers(processes=1), max_processes) + + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual( + tournament._tournament._n_workers(processes=max_processes + 2), max_processes + ) + + @unittest.skipIf(cpu_count() < 2, "not supported on single processor machines") + def test_2_workers(self): + # This is a separate test with a skip condition because we + # cannot guarantee that the tests will always run on a machine + # with more than one processor + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual(tournament._tournament._n_workers(processes=2), 2) + + def test_start_workers(self): + workers = 2 + work_queue = Queue() + done_queue = Queue() + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + chunks = tournament.match_generator.build_match_chunks() + for chunk in chunks: + work_queue.put(chunk) + tournament._tournament._start_workers(workers, work_queue, done_queue) + + stops = 0 + while stops < workers: + payoffs = done_queue.get() + if payoffs == "STOP": + stops += 1 + self.assertEqual(stops, workers) + + def test_worker(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + work_queue = Queue() + chunks = tournament.match_generator.build_match_chunks() + count = 0 + for chunk in chunks: + work_queue.put(chunk) + count += 1 + work_queue.put("STOP") + + done_queue = Queue() + tournament._tournament._worker(work_queue, done_queue) + for r in range(count): + new_matches = done_queue.get() + for index_pair, matches in new_matches.items(): + self.assertIsInstance(index_pair, tuple) + self.assertEqual(len(matches), self.test_repetitions) + queue_stop = done_queue.get() + self.assertEqual(queue_stop, "STOP") + + def test_build_result_set(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + def test_no_build_result_set(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + tournament._tournament._calculate_results = MagicMock(name="_calculate_results") + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = tournament._tournament._calculate_results.call_args_list + self.assertEqual(len(calls), 0) + + @given(turns=integers(min_value=1, max_value=200)) + @settings(max_examples=5) + @example(turns=3) + @example(turns=axl.DEFAULT_TURNS) + def test_play_matches(self, turns): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + repetitions=self.test_repetitions, + ) + + def make_chunk_generator(): + for player1_index in range(len(self.players)): + for player2_index in range(player1_index, len(self.players)): + index_pair = (player1_index, player2_index) + match_params = {"turns": turns, "game": self.game} + yield (index_pair, match_params, self.test_repetitions) + + chunk_generator = make_chunk_generator() + interactions = {} + for chunk in chunk_generator: + result = tournament._tournament._play_matches(chunk) + for index_pair, inters in result.items(): + try: + interactions[index_pair].append(inters) + except KeyError: + interactions[index_pair] = [inters] + + self.assertEqual(len(interactions), 15) + + for index_pair, inter in interactions.items(): + self.assertEqual(len(index_pair), 2) + for plays in inter: + # Check that have the expected number of repetitions + self.assertEqual(len(plays), self.test_repetitions) + for repetition in plays: + actions, results = repetition + self.assertEqual(len(actions), turns) + self.assertEqual(len(results), 10) + + # Check that matches no longer exist + self.assertEqual((len(list(chunk_generator))), 0) + + def test_match_cache_is_used(self): + """ + Create two Random players that are classified as deterministic. + As they are deterministic the cache will be used. + """ + FakeRandom = axl.Random + FakeRandom.classifier["stochastic"] = False + p1 = FakeRandom() + p2 = FakeRandom() + tournament = axl.Tournament((p1, p2), turns=5, repetitions=2) + results = tournament.play(progress_bar=False) + for player_scores in results.scores: + self.assertEqual(player_scores[0], player_scores[1]) + + def test_write_interactions(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament._tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = tournament._tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_write_to_csv_with_results(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play(filename=self.filename, progress_bar=False) + df = pd.read_csv(self.filename) + path = pathlib.Path("test_outputs/expected_test_tournament.csv") + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + def test_write_to_csv_without_results(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play(filename=self.filename, progress_bar=False, build_results=False) + df = pd.read_csv(self.filename) + path = pathlib.Path("test_outputs/expected_test_tournament_no_results.csv") + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + def test_players_setter(self): + expected_players = [axl.Cooperator(), axl.Defector()] + self.test_tournament.players = expected_players + self.assertListEqual(self.test_tournament.players, expected_players) + + def test_game(self): + expected_game = axl.Game(1, 2, 3, 4) + self.test_tournament.players = expected_game + self.assertEqual(self.test_tournament.players, expected_game) + + def test_turns_setter(self): + expected_turns = 123 + self.test_tournament.turns = expected_turns + self.assertEqual(self.test_tournament.turns, expected_turns) + + def test_repetitions_setter(self): + expected_repetitions = 123 + self.test_tournament.repetitions = expected_repetitions + self.assertEqual(self.test_tournament.repetitions, expected_repetitions) + + def test_name_setter(self): + expected_name = "name_to_set" + self.test_tournament.name = expected_name + self.assertEqual(self.test_tournament.name, expected_name) + + def test_noise_setter(self): + expected_noise = 0.123 + self.test_tournament.noise = expected_noise + self.assertAlmostEqual(self.test_tournament.noise, expected_noise) + + def test_match_generator_setter(self): + expected_match_generator_turns = 123 + self.test_tournament.match_generator.turns = expected_match_generator_turns + self.assertEqual(self.test_tournament.match_generator.turns, expected_match_generator_turns) + + def test_num_interactions_setter(self): + expected_num_interactions = 123 + self.test_tournament.num_interactions = expected_num_interactions + self.assertEqual(self.test_tournament.num_interactions, expected_num_interactions) + + def test_use_progress_bar_setter(self): + expected_use_progress_bar = False + self.test_tournament.use_progress_bar = expected_use_progress_bar + self.assertFalse(self.test_tournament.use_progress_bar) + + def test_filename_setter(self): + expected_filename = "fn.txt" + self.test_tournament.filename = expected_filename + self.assertEqual(self.test_tournament.filename, expected_filename) + + +class TestProbEndTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + tournament=prob_end_tournaments( + min_size=2, + max_size=5, + min_prob_end=0.1, + max_prob_end=0.9, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=5) + @example( + tournament=axl.Tournament( + players=[s() for s in test_strategies], + prob_end=0.2, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.Tournament( + players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, + ) + ) + @example( + tournament=axl.Tournament( + players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + def test_prob_end_setter(self): + # create a round robin tournament + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.Tournament(players) + + expected_prob_end = 0.123 + tournament.prob_end = expected_prob_end + self.assertAlmostEqual(tournament.prob_end, expected_prob_end) + + +class TestSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, 100) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + anonymous_tournament = axl.Tournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + turns=integers(min_value=1, max_value=20), + repetitions=integers(min_value=1, max_value=5), + noise=floats(min_value=0, max_value=1), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): + """ + A test to check that a spatial tournament on the complete multigraph + gives the same results as the round robin. + """ + + players = [s() for s in strategies] + # edges + edges = [] + for i in range(0, len(players)): + for j in range(i, len(players)): + edges.append((i, j)) + + # create a round robin tournament + tournament = axl.Tournament( + players, repetitions=repetitions, turns=turns, noise=noise + ) + # create a complete spatial tournament + spatial_tournament = axl.Tournament( + players, repetitions=repetitions, turns=turns, noise=noise, edges=edges + ) + + axl.seed(seed) + results = tournament.play(progress_bar=False) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.num_players, spatial_results.num_players) + self.assertEqual(results.repetitions, spatial_results.repetitions) + self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means) + self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) + self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) + self.assertEqual(results.payoffs, spatial_results.payoffs) + self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating) + self.assertEqual(results.cooperation, spatial_results.cooperation) + self.assertEqual( + results.normalised_cooperation, spatial_results.normalised_cooperation + ) + self.assertEqual(results.normalised_scores, spatial_results.normalised_scores) + self.assertEqual( + results.good_partner_matrix, spatial_results.good_partner_matrix + ) + self.assertEqual( + results.good_partner_rating, spatial_results.good_partner_rating + ) + + def test_particular_tournament(self): + """A test for a tournament that has caused failures during some bug + fixing""" + players = [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Grudger(), + ] + edges = [(0, 2), (0, 3), (1, 2), (1, 3)] + tournament = axl.Tournament(players, edges=edges) + results = tournament.play(progress_bar=False) + expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] + self.assertEqual(results.ranked_names, expected_ranked_names) + + # Check that this tournament runs with noise + tournament = axl.Tournament(players, edges=edges, noise=0.5) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + def test_edges_setter(self): + # create a round robin tournament + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.Tournament(players) + + expected_edges = [(1, 2), (3, 4)] + tournament.edges = expected_edges + self.assertListEqual(tournament.edges, expected_edges) + + +class TestProbEndingSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.Game() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.Tournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + self.assertEqual(tournament.prob_end, self.test_prob_end) + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + prob_end=floats(min_value=0.1, max_value=0.9), + reps=integers(min_value=1, max_value=3), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament(self, strategies, prob_end, seed, reps): + """ + A test to check that a spatial tournament on the complete graph + gives the same results as the round robin. + """ + players = [s() for s in strategies] + + # create a prob end round robin tournament + tournament = axl.Tournament(players, prob_end=prob_end, repetitions=reps) + axl.seed(seed) + results = tournament.play(progress_bar=False) + + # create a complete spatial tournament + # edges + edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] + + spatial_tournament = axl.Tournament( + players, prob_end=prob_end, repetitions=reps, edges=edges + ) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + self.assertEqual(results.match_lengths, spatial_results.match_lengths) + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.wins, spatial_results.wins) + self.assertEqual(results.scores, spatial_results.scores) + self.assertEqual(results.cooperation, spatial_results.cooperation) + + @given( + tournament=spatial_tournaments( + strategies=axl.basic_strategies, + max_turns=1, + max_noise=0, + max_repetitions=3, + ), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_one_turn_tournament(self, tournament, seed): + """ + Tests that gives same result as the corresponding spatial round robin + spatial tournament + """ + prob_end_tour = axl.Tournament( + tournament.players, + prob_end=1, + edges=tournament.edges, + repetitions=tournament.repetitions, + ) + axl.seed(seed) + prob_end_results = prob_end_tour.play(progress_bar=False) + axl.seed(seed) + one_turn_results = tournament.play(progress_bar=False) + self.assertEqual(prob_end_results.scores, one_turn_results.scores) + self.assertEqual(prob_end_results.wins, one_turn_results.wins) + self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation) + + +class TestHelperFunctions(unittest.TestCase): + def test_close_objects_with_none(self): + self.assertIsNone(_close_objects(None, None)) + + def test_close_objects_with_file_objs(self): + f1 = open("to_delete_1", "w") + f2 = open("to_delete_2", "w") + f2.close() + f2 = open("to_delete_2", "r") + + self.assertFalse(f1.closed) + self.assertFalse(f2.closed) + + _close_objects(f1, f2) + + self.assertTrue(f1.closed) + self.assertTrue(f2.closed) + + os.remove("to_delete_1") + os.remove("to_delete_2") + + def test_close_objects_with_tqdm(self): + pbar_1 = tqdm(range(5)) + pbar_2 = tqdm(total=10, desc="hi", file=io.StringIO()) + + self.assertFalse(pbar_1.disable) + self.assertFalse(pbar_2.disable) + + _close_objects(pbar_1, pbar_2) + + self.assertTrue(pbar_1.disable) + self.assertTrue(pbar_2.disable) + + def test_close_objects_with_different_objects(self): + file = open("to_delete_1", "w") + pbar = tqdm(range(5)) + num = 5 + empty = None + word = "hi" + + _close_objects(file, pbar, num, empty, word) + + self.assertTrue(pbar.disable) + self.assertTrue(file.closed) + + os.remove("to_delete_1") + + +class TestAdapterTitForTat(axl.Player): + name = "Tit For Tat" + classifier = _test_classifier + + def strategy(self, opponent) -> axl.Action: + """This is the actual strategy""" + # First move + if not self.history: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + return D + return C + + +def test_memory(player, opponent, memory_length, seed=0, turns=10): + """ + Checks if a player reacts to the plays of an opponent in the same way if + only the given amount of memory is used. + """ + # Play the match normally. + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns) + plays = [p[0] for p in match.play()] + + # Play with limited history. + player.reset() + opponent.reset() + player._history = axl.LimitedHistory(memory_length) + opponent._history = axl.LimitedHistory(memory_length) + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns, reset=False) + limited_plays = [p[0] for p in match.play()] + + return plays == limited_plays + + +class TestPlayer(unittest.TestCase): + """Test Player on TestAdapterTitForTat.""" + + player = TestAdapterTitForTat + name = "TestAdapterTitForTat" + expected_class_classifier = _test_classifier + + def test_initialisation(self): + """Test that the player initiates correctly.""" + if self.__class__ != TestPlayer: + player = self.player() + self.assertEqual(len(player.history), 0) + self.assertEqual( + player.match_attributes, + {"length": -1, "game": axl.DefaultGame, "noise": 0}, + ) + self.assertEqual(player.cooperations, 0) + self.assertEqual(player.defections, 0) + # self.classifier_test(self.expected_class_classifier) + + def test_repr(self): + """Test that the representation is correct.""" + if self.__class__ != TestPlayer: + self.assertEqual(str(self.player()), self.name) + + def test_match_attributes(self): + player = self.player() + # Default + player.set_match_attributes() + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], -1) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Common + player.set_match_attributes(length=200) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], 200) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Noisy + player.set_match_attributes(length=200, noise=0.5) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["noise"], 0.5) + + def equality_of_players_test(self, p1, p2, seed, opponent): + a1 = opponent() + a2 = opponent() + self.assertEqual(p1, p2) + for player, op in [(p1, a1), (p2, a2)]: + axl.seed(seed) + for _ in range(10): + simultaneous_play(player, op) + self.assertEqual(p1, p2) + p1 = pickle.loads(pickle.dumps(p1)) + p2 = pickle.loads(pickle.dumps(p2)) + self.assertEqual(p1, p2) + + @given( + opponent=sampled_from(short_run_time_short_mem), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_clone(self, seed, opponent): + p1 = self.player() + p2 = p1.clone() + self.equality_of_players_test(p1, p2, seed, opponent) + + @given( + opponent=sampled_from(axl.short_run_time_strategies), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_pickle_clone(self, seed, opponent): + p1 = self.player() + p2 = pickle.loads(pickle.dumps(p1)) + self.equality_of_players_test(p1, p2, seed, opponent) + + def test_reset_history_and_attributes(self): + """Make sure resetting works correctly.""" + for opponent in [ + axl.Defector(), + axl.Random(), + axl.Alternator(), + axl.Cooperator(), + ]: + + player = self.player() + clone = player.clone() + for seed in range(10): + axl.seed(seed) + player.play(opponent) + + player.reset() + self.assertEqual(player, clone) + + def test_reset_clone(self): + """Make sure history resetting with cloning works correctly, regardless + if self.test_reset() is overwritten.""" + player = self.player() + clone = player.clone() + self.assertEqual(player, clone) + + @given(seed=integers(min_value=1, max_value=20000000)) + @settings(max_examples=1) + def test_clone(self, seed): + # Test that the cloned player produces identical play + player1 = self.player() + if player1.name in ["Darwin", "Human"]: + # Known exceptions + return + player2 = player1.clone() + self.assertEqual(len(player2.history), 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 0) + self.assertEqual(player2.state_distribution, {}) + self.assertEqual(player2.classifier, player1.classifier) + self.assertEqual(player2.match_attributes, player1.match_attributes) + + turns = 50 + r = random.random() + for op in [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Random(p=r), + ]: + player1.reset() + player2.reset() + for p in [player1, player2]: + axl.seed(seed) + m = axl.IpdMatch((p, op), turns=turns) + m.play() + self.assertEqual(len(player1.history), turns) + self.assertEqual(player1.history, player2.history) + + @given( + strategies=strategy_lists( + max_size=5, strategies=short_run_time_short_mem + ), + seed=integers(min_value=1, max_value=200), + turns=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_memory_depth_upper_bound(self, strategies, seed, turns): + """ + Test that the memory depth is indeed an upper bound. + """ + + def get_memory_depth_or_zero(player): + # Some of the test strategies have no entry in the classifiers + # table, so there isn't logic to load default value of zero. + memory = axl.Classifiers["memory_depth"](player) + return memory if memory else 0 + + player = self.player() + memory = get_memory_depth_or_zero(player) + if memory < float("inf"): + for strategy in strategies: + player.reset() + opponent = strategy() + max_memory = max(memory, get_memory_depth_or_zero(opponent)) + self.assertTrue( + test_memory( + player=player, + opponent=opponent, + seed=seed, + turns=turns, + memory_length=max_memory, + ), + msg="{} failed for seed={} and opponent={}".format( + player.name, seed, opponent + ), + ) diff --git a/rebuild_classifier_table.py b/rebuild_classifier_table.py index 17f6172cf..86d39b587 100644 --- a/rebuild_classifier_table.py +++ b/rebuild_classifier_table.py @@ -1,7 +1,7 @@ import os from axelrod import all_strategies -from axelrod.ipd.classifier import all_classifiers, rebuild_classifier_table +from axelrod.classifier import all_classifiers, rebuild_classifier_table if __name__ == "__main__": # Change to relative path inside axelrod folder From ff277ded0ff433aa07063833d9cf7720768085d6 Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sat, 25 Apr 2020 23:00:59 -0700 Subject: [PATCH 4/7] Add deprecation warnings --- axelrod/ipd_adapter.py | 11 +- axelrod/tests/unit/test_ipd_adapter.py | 170 ++++++++++++++++++------- 2 files changed, 128 insertions(+), 53 deletions(-) diff --git a/axelrod/ipd_adapter.py b/axelrod/ipd_adapter.py index 6e4a6af79..2bce2883c 100644 --- a/axelrod/ipd_adapter.py +++ b/axelrod/ipd_adapter.py @@ -58,7 +58,7 @@ def play( # methods. return self._player.play(opponent, noise, strategy_holder=self) - def clone(self) -> 'Player': + def clone(self) -> "Player": """Clones the player without history, reapplying configuration parameters as necessary.""" @@ -74,8 +74,9 @@ def clone(self) -> 'Player': def reset(self): self._player.reset() - def set_match_attributes(self, length: int = -1, game: 'Game' = None, - noise: float = 0) -> None: + def set_match_attributes( + self, length: int = -1, game: "Game" = None, noise: float = 0 + ) -> None: self._player.set_match_attributes(length, game, noise) def update_history(self, play: axl.Action, coplay: axl.Action) -> None: @@ -121,7 +122,7 @@ def classifier(self, classifier): def state_distribution(self): return self._player.state_distribution - def __eq__(self, other: 'Player') -> bool: + def __eq__(self, other: "Player") -> bool: if not isinstance(other, Player): return False return self._player == other._player @@ -148,7 +149,7 @@ def scores(self, scores): def __repr__(self) -> str: return repr(self._game) - def __eq__(self, other: 'Game') -> bool: + def __eq__(self, other: "Game") -> bool: if not isinstance(other, Game): return False return self._game == other._game diff --git a/axelrod/tests/unit/test_ipd_adapter.py b/axelrod/tests/unit/test_ipd_adapter.py index be9f5b927..01eec6eec 100644 --- a/axelrod/tests/unit/test_ipd_adapter.py +++ b/axelrod/tests/unit/test_ipd_adapter.py @@ -53,7 +53,9 @@ test_edges = [(0, 1), (1, 2), (3, 4)] deterministic_strategies = [ - s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) + s + for s in axl.short_run_time_strategies + if not axl.Classifiers["stochastic"](s()) ] short_run_time_short_mem = [ @@ -64,14 +66,14 @@ # Classifiers for TitForTat _test_classifier = { - "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) - "stochastic": False, - "makes_use_of": set(), - "long_run_time": False, - "inspects_source": False, - "manipulates_source": False, - "manipulates_state": False, - } + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, +} class RecordedTQDM(tqdm): @@ -172,7 +174,6 @@ def test_scores_setter(self): self.assertDictEqual(game.scores, expected_scores) - class TestMatch(unittest.TestCase): @given(turns=integers(min_value=1, max_value=200), game=games()) @example(turns=5, game=axl.DefaultGame) @@ -200,7 +201,9 @@ def test_init_with_prob_end(self, prob_end, game): self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), game.RPST()) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(match._cache, {}) @given( @@ -218,7 +221,9 @@ def test_init_with_prob_end_and_turns(self, turns, prob_end, game): self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), game.RPST()) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(match._cache, {}) def test_default_init(self): @@ -246,7 +251,9 @@ def test_example_prob_end(self): expected_lengths = [3, 1, 5] for seed, expected_length in zip(range(3), expected_lengths): axl.seed(seed) - self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual( + match.players[0].match_attributes["length"], float("inf") + ) self.assertEqual(len(match.play()), expected_length) self.assertEqual(match.noise, 0) self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) @@ -349,8 +356,7 @@ def test_cache_grows(self): self.assertEqual(match.play(), expected_result_5_turn) # The cache should now hold the 5-turn result.. self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn ) def test_cache_doesnt_shrink(self): @@ -369,8 +375,7 @@ def test_cache_doesnt_shrink(self): self.assertEqual(match.play(), expected_result_3_turn) # The cache should still hold the 5. self.assertEqual( - cache[(axl.Cooperator(), axl.Defector())], - expected_result_5_turn + cache[(axl.Cooperator(), axl.Defector())], expected_result_5_turn ) def test_scores(self): @@ -623,7 +628,9 @@ def test_init(self): noise=0.2, ) self.assertEqual(len(tournament.players), len(test_strategies)) - self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) + self.assertIsInstance( + tournament.players[0].match_attributes["game"], axl.IpdGame + ) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertEqual(tournament.turns, self.test_turns) self.assertEqual(tournament.repetitions, 10) @@ -639,7 +646,9 @@ def test_init_with_match_attributes(self): ) mg = tournament.match_generator match_params = mg.build_single_match_params() - self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + self.assertEqual( + match_params["match_attributes"], {"length": float("inf")} + ) def test_warning(self): tournament = axl.Tournament( @@ -738,7 +747,10 @@ def test_get_file_objects_no_filename(self): def test_get_file_object_with_filename(self): self.test_tournament.filename = self.filename - file_object, writer = self.test_tournament._tournament._get_file_objects() + ( + file_object, + writer, + ) = self.test_tournament._tournament._get_file_objects() self.assertIsInstance(file_object, io.TextIOWrapper) self.assertEqual(writer.__class__.__name__, "writer") file_object.close() @@ -790,7 +802,11 @@ def test_serial_play_with_different_game(self): # Test that a non default game is passed to the result set game = axl.Game(p=-1, r=-1, s=-1, t=-1) tournament = axl.Tournament( - name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 + name=self.test_name, + players=self.players, + game=game, + turns=1, + repetitions=1, ) results = tournament.play(progress_bar=False) self.assertLessEqual(np.max(results.scores), 0) @@ -924,7 +940,9 @@ def test_progress_bar_play_parallel(self): # these two examples were identified by hypothesis. @example( tournament=axl.Tournament( - players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, + players=[axl.BackStabber(), axl.MindReader()], + turns=2, + repetitions=1, ) ) @example( @@ -1001,7 +1019,9 @@ def test_run_serial(self): self.assertTrue(tournament._tournament._run_serial()) # Get the calls made to write_interactions - calls = tournament._tournament._write_interactions_to_file.call_args_list + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) self.assertEqual(len(calls), 15) def test_run_parallel(self): @@ -1024,13 +1044,17 @@ def __reduce__(self): # pickled exactly once. Windows multi-processing must pickle this Mock # exactly once during testing. pickled = pickle.loads(pickle.dumps(tournament)) - self.assertIsInstance(pickled._tournament._write_interactions_to_file, MagicMock) + self.assertIsInstance( + pickled._tournament._write_interactions_to_file, MagicMock + ) self.assertRaises(pickle.PicklingError, pickle.dumps, pickled) self.assertTrue(tournament._tournament._run_parallel()) # Get the calls made to write_interactions - calls = tournament._tournament._write_interactions_to_file.call_args_list + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) self.assertEqual(len(calls), 15) def test_n_workers(self): @@ -1043,7 +1067,9 @@ def test_n_workers(self): turns=axl.DEFAULT_TURNS, repetitions=self.test_repetitions, ) - self.assertEqual(tournament._tournament._n_workers(processes=1), max_processes) + self.assertEqual( + tournament._tournament._n_workers(processes=1), max_processes + ) tournament = axl.Tournament( name=self.test_name, @@ -1053,10 +1079,13 @@ def test_n_workers(self): repetitions=self.test_repetitions, ) self.assertEqual( - tournament._tournament._n_workers(processes=max_processes + 2), max_processes + tournament._tournament._n_workers(processes=max_processes + 2), + max_processes, ) - @unittest.skipIf(cpu_count() < 2, "not supported on single processor machines") + @unittest.skipIf( + cpu_count() < 2, "not supported on single processor machines" + ) def test_2_workers(self): # This is a separate test with a skip condition because we # cannot guarantee that the tests will always run on a machine @@ -1140,7 +1169,9 @@ def test_no_build_result_set(self): repetitions=self.test_repetitions, ) - tournament._tournament._calculate_results = MagicMock(name="_calculate_results") + tournament._tournament._calculate_results = MagicMock( + name="_calculate_results" + ) # Mocking this as it is called by play self.assertIsNone( tournament.play( @@ -1229,7 +1260,9 @@ def test_write_interactions(self): ) # Get the calls made to write_interactions - calls = tournament._tournament._write_interactions_to_file.call_args_list + calls = ( + tournament._tournament._write_interactions_to_file.call_args_list + ) self.assertEqual(len(calls), 15) def test_write_to_csv_with_results(self): @@ -1254,9 +1287,13 @@ def test_write_to_csv_without_results(self): turns=2, repetitions=2, ) - tournament.play(filename=self.filename, progress_bar=False, build_results=False) + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) df = pd.read_csv(self.filename) - path = pathlib.Path("test_outputs/expected_test_tournament_no_results.csv") + path = pathlib.Path( + "test_outputs/expected_test_tournament_no_results.csv" + ) expected_df = pd.read_csv(axl_filename(path)) self.assertTrue(df.equals(expected_df)) @@ -1292,13 +1329,20 @@ def test_noise_setter(self): def test_match_generator_setter(self): expected_match_generator_turns = 123 - self.test_tournament.match_generator.turns = expected_match_generator_turns - self.assertEqual(self.test_tournament.match_generator.turns, expected_match_generator_turns) + self.test_tournament.match_generator.turns = ( + expected_match_generator_turns + ) + self.assertEqual( + self.test_tournament.match_generator.turns, + expected_match_generator_turns, + ) def test_num_interactions_setter(self): expected_num_interactions = 123 self.test_tournament.num_interactions = expected_num_interactions - self.assertEqual(self.test_tournament.num_interactions, expected_num_interactions) + self.assertEqual( + self.test_tournament.num_interactions, expected_num_interactions + ) def test_use_progress_bar_setter(self): expected_use_progress_bar = False @@ -1328,7 +1372,9 @@ def test_init(self): prob_end=self.test_prob_end, noise=0.2, ) - self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end) + self.assertEqual( + tournament.match_generator.prob_end, tournament.prob_end + ) self.assertEqual(len(tournament.players), len(test_strategies)) self.assertEqual(tournament.game.score((C, C)), (3, 3)) self.assertIsNone(tournament.turns) @@ -1362,12 +1408,16 @@ def test_init(self): # these two examples were identified by hypothesis. @example( tournament=axl.Tournament( - players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, + players=[axl.BackStabber(), axl.MindReader()], + prob_end=0.2, + repetitions=1, ) ) @example( tournament=axl.Tournament( - players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, + players=[axl.ThueMorse(), axl.MindReader()], + prob_end=0.2, + repetitions=1, ) ) def test_property_serial_play(self, tournament): @@ -1429,7 +1479,9 @@ def test_init(self): seed=integers(min_value=0, max_value=4294967295), ) @settings(max_examples=5) - def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): + def test_complete_tournament( + self, strategies, turns, repetitions, noise, seed + ): """ A test to check that a spatial tournament on the complete multigraph gives the same results as the round robin. @@ -1448,7 +1500,11 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): ) # create a complete spatial tournament spatial_tournament = axl.Tournament( - players, repetitions=repetitions, turns=turns, noise=noise, edges=edges + players, + repetitions=repetitions, + turns=turns, + noise=noise, + edges=edges, ) axl.seed(seed) @@ -1459,16 +1515,23 @@ def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): self.assertEqual(results.ranked_names, spatial_results.ranked_names) self.assertEqual(results.num_players, spatial_results.num_players) self.assertEqual(results.repetitions, spatial_results.repetitions) - self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means) + self.assertEqual( + results.payoff_diffs_means, spatial_results.payoff_diffs_means + ) self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) self.assertEqual(results.payoffs, spatial_results.payoffs) - self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating) + self.assertEqual( + results.cooperating_rating, spatial_results.cooperating_rating + ) self.assertEqual(results.cooperation, spatial_results.cooperation) self.assertEqual( - results.normalised_cooperation, spatial_results.normalised_cooperation + results.normalised_cooperation, + spatial_results.normalised_cooperation, + ) + self.assertEqual( + results.normalised_scores, spatial_results.normalised_scores ) - self.assertEqual(results.normalised_scores, spatial_results.normalised_scores) self.assertEqual( results.good_partner_matrix, spatial_results.good_partner_matrix ) @@ -1488,7 +1551,12 @@ def test_particular_tournament(self): edges = [(0, 2), (0, 3), (1, 2), (1, 3)] tournament = axl.Tournament(players, edges=edges) results = tournament.play(progress_bar=False) - expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] + expected_ranked_names = [ + "Cooperator", + "Tit For Tat", + "Grudger", + "Defector", + ] self.assertEqual(results.ranked_names, expected_ranked_names) # Check that this tournament runs with noise @@ -1553,13 +1621,17 @@ def test_complete_tournament(self, strategies, prob_end, seed, reps): players = [s() for s in strategies] # create a prob end round robin tournament - tournament = axl.Tournament(players, prob_end=prob_end, repetitions=reps) + tournament = axl.Tournament( + players, prob_end=prob_end, repetitions=reps + ) axl.seed(seed) results = tournament.play(progress_bar=False) # create a complete spatial tournament # edges - edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] + edges = [ + (i, j) for i in range(len(players)) for j in range(i, len(players)) + ] spatial_tournament = axl.Tournament( players, prob_end=prob_end, repetitions=reps, edges=edges @@ -1599,7 +1671,9 @@ def test_one_turn_tournament(self, tournament, seed): one_turn_results = tournament.play(progress_bar=False) self.assertEqual(prob_end_results.scores, one_turn_results.scores) self.assertEqual(prob_end_results.wins, one_turn_results.wins) - self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation) + self.assertEqual( + prob_end_results.cooperation, one_turn_results.cooperation + ) class TestHelperFunctions(unittest.TestCase): From cebf1171451771a3cfbd14de39f5be64e6317207 Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sat, 25 Apr 2020 23:14:51 -0700 Subject: [PATCH 5/7] Add files that have been moved back to their namespace. Were previously untracked. --- axelrod/_strategy_utils.py | 193 +++ axelrod/action.py | 102 ++ axelrod/classifier.py | 246 ++++ .../compute_finite_state_machine_memory.py | 266 ++++ axelrod/deterministic_cache.py | 175 +++ axelrod/ecosystem.py | 121 ++ axelrod/eigen.py | 90 ++ axelrod/evolvable_player.py | 88 ++ axelrod/fingerprint.py | 611 ++++++++ axelrod/game.py | 73 + axelrod/graph.py | 166 +++ axelrod/history.py | 133 ++ axelrod/interaction_utils.py | 286 ++++ axelrod/load_data_.py | 62 + axelrod/match.py | 263 ++++ axelrod/match_generator.py | 123 ++ axelrod/mock_player.py | 29 + axelrod/moran.py | 541 +++++++ axelrod/player.py | 215 +++ axelrod/plot.py | 333 +++++ axelrod/random_.py | 95 ++ axelrod/result_set.py | 788 +++++++++++ axelrod/strategy_transformers.py | 679 +++++++++ axelrod/tests/__init__.py | 0 axelrod/tests/property.py | 335 +++++ axelrod/tests/unit/__init__.py | 0 axelrod/tests/unit/test_actions.py | 64 + axelrod/tests/unit/test_classification.py | 356 +++++ ...est_compute_finite_state_machine_memory.py | 350 +++++ .../tests/unit/test_deterministic_cache.py | 111 ++ axelrod/tests/unit/test_ecosystem.py | 102 ++ axelrod/tests/unit/test_eigen.py | 52 + axelrod/tests/unit/test_filters.py | 170 +++ axelrod/tests/unit/test_fingerprint.py | 516 +++++++ axelrod/tests/unit/test_game.py | 80 ++ axelrod/tests/unit/test_graph.py | 305 ++++ axelrod/tests/unit/test_history.py | 119 ++ axelrod/tests/unit/test_interaction_utils.py | 146 ++ axelrod/tests/unit/test_load_data.py | 17 + axelrod/tests/unit/test_match.py | 377 +++++ axelrod/tests/unit/test_match_generator.py | 237 ++++ axelrod/tests/unit/test_mock_player.py | 20 + axelrod/tests/unit/test_moran.py | 561 ++++++++ axelrod/tests/unit/test_pickling.py | 394 ++++++ axelrod/tests/unit/test_plot.py | 257 ++++ axelrod/tests/unit/test_property.py | 232 +++ axelrod/tests/unit/test_random_.py | 88 ++ axelrod/tests/unit/test_resultset.py | 1248 +++++++++++++++++ .../tests/unit/test_strategy_transformers.py | 714 ++++++++++ axelrod/tests/unit/test_strategy_utils.py | 144 ++ axelrod/tests/unit/test_tournament.py | 1070 ++++++++++++++ axelrod/tests/unit/test_version.py | 10 + axelrod/tournament.py | 513 +++++++ 53 files changed, 14266 insertions(+) create mode 100644 axelrod/_strategy_utils.py create mode 100644 axelrod/action.py create mode 100644 axelrod/classifier.py create mode 100644 axelrod/compute_finite_state_machine_memory.py create mode 100644 axelrod/deterministic_cache.py create mode 100644 axelrod/ecosystem.py create mode 100644 axelrod/eigen.py create mode 100644 axelrod/evolvable_player.py create mode 100644 axelrod/fingerprint.py create mode 100644 axelrod/game.py create mode 100644 axelrod/graph.py create mode 100644 axelrod/history.py create mode 100644 axelrod/interaction_utils.py create mode 100644 axelrod/load_data_.py create mode 100644 axelrod/match.py create mode 100644 axelrod/match_generator.py create mode 100644 axelrod/mock_player.py create mode 100644 axelrod/moran.py create mode 100644 axelrod/player.py create mode 100644 axelrod/plot.py create mode 100644 axelrod/random_.py create mode 100644 axelrod/result_set.py create mode 100644 axelrod/strategy_transformers.py create mode 100644 axelrod/tests/__init__.py create mode 100644 axelrod/tests/property.py create mode 100644 axelrod/tests/unit/__init__.py create mode 100644 axelrod/tests/unit/test_actions.py create mode 100644 axelrod/tests/unit/test_classification.py create mode 100644 axelrod/tests/unit/test_compute_finite_state_machine_memory.py create mode 100644 axelrod/tests/unit/test_deterministic_cache.py create mode 100644 axelrod/tests/unit/test_ecosystem.py create mode 100644 axelrod/tests/unit/test_eigen.py create mode 100644 axelrod/tests/unit/test_filters.py create mode 100644 axelrod/tests/unit/test_fingerprint.py create mode 100644 axelrod/tests/unit/test_game.py create mode 100644 axelrod/tests/unit/test_graph.py create mode 100644 axelrod/tests/unit/test_history.py create mode 100644 axelrod/tests/unit/test_interaction_utils.py create mode 100644 axelrod/tests/unit/test_load_data.py create mode 100644 axelrod/tests/unit/test_match.py create mode 100644 axelrod/tests/unit/test_match_generator.py create mode 100644 axelrod/tests/unit/test_mock_player.py create mode 100644 axelrod/tests/unit/test_moran.py create mode 100644 axelrod/tests/unit/test_pickling.py create mode 100644 axelrod/tests/unit/test_plot.py create mode 100644 axelrod/tests/unit/test_property.py create mode 100644 axelrod/tests/unit/test_random_.py create mode 100644 axelrod/tests/unit/test_resultset.py create mode 100644 axelrod/tests/unit/test_strategy_transformers.py create mode 100644 axelrod/tests/unit/test_strategy_utils.py create mode 100644 axelrod/tests/unit/test_tournament.py create mode 100644 axelrod/tests/unit/test_version.py create mode 100644 axelrod/tournament.py diff --git a/axelrod/_strategy_utils.py b/axelrod/_strategy_utils.py new file mode 100644 index 000000000..13db18d16 --- /dev/null +++ b/axelrod/_strategy_utils.py @@ -0,0 +1,193 @@ +"""Utilities used by various strategies.""" + +import itertools +from functools import lru_cache + +from axelrod.action import Action +from axelrod.strategies.cooperator import Cooperator +from axelrod.strategies.defector import Defector + +C, D = Action.C, Action.D + + +def detect_cycle(history, min_size=1, max_size=12, offset=0): + """Detects cycles in the sequence history. + + Mainly used by hunter strategies. + + Parameters + ---------- + history: sequence of C and D + The sequence to look for cycles within + min_size: int, 1 + The minimum length of the cycle + max_size: int, 12 + The maximum length of the cycle + offset: int, 0 + The amount of history to skip initially + + Returns + ------- + Tuple of C and D + The cycle detected in the input history + """ + history_tail = history[offset:] + new_max_size = min(len(history_tail) // 2, max_size) + for i in range(min_size, new_max_size + 1): + has_cycle = True + cycle = tuple(history_tail[:i]) + for j, elem in enumerate(history_tail): + if elem != cycle[j % len(cycle)]: + has_cycle = False + break + if has_cycle: + return cycle + return None + + +def inspect_strategy(inspector, opponent): + """Inspects the strategy of an opponent. + + Simulate one round of play with an opponent, unless the opponent has + an inspection countermeasure. + + Parameters + ---------- + inspector: IpdPlayer + The player doing the inspecting + opponent: IpdPlayer + The player being inspected + + Returns + ------- + Action + The action that would be taken by the opponent. + """ + if hasattr(opponent, "foil_strategy_inspection"): + return opponent.foil_strategy_inspection() + else: + return opponent.strategy(inspector) + + +def _limited_simulate_play(player_1, player_2, h1): + """Simulates a player's move. + + After inspecting player_2's next move (allowing player_2's strategy + method to set any internal variables as needed), update histories + for both players. Note that player_1's move is an argument. + + If you need a more complete simulation, see `simulate_play` in + player.py. This function is specifically designed for the needs + of MindReader. + + Parameters + ---------- + player_1: IpdPlayer + The player whose move is already known. + player_2: IpdPlayer + The player the we want to inspect. + h1: Action + The next action for first player. + """ + h2 = inspect_strategy(player_1, player_2) + player_1.update_history(h1, h2) + player_2.update_history(h2, h1) + + +def simulate_match(player_1, player_2, strategy, rounds=10): + """Simulates a number of rounds with a constant strategy. + + Parameters + ---------- + player_1: IpdPlayer + The player that will have a constant strategy. + player_2: IpdPlayer + The player we want to simulate. + strategy: Action + The constant strategy to use for first player. + rounds: int + The number of rounds to play. + """ + for match in range(rounds): + _limited_simulate_play(player_1, player_2, strategy) + + +def _calculate_scores(p1, p2, game): + """Calculates the scores for two players based their history. + + Parameters + ---------- + p1: IpdPlayer + The first player. + p2: IpdPlayer + The second player. + game: IpdGame + IpdGame object used to score rounds in the players' histories. + + Returns + ------- + int, int + The scores for the two input players. + """ + s1, s2 = 0, 0 + for pair in zip(p1.history, p2.history): + score = game.score(pair) + s1 += score[0] + s2 += score[1] + return s1, s2 + + +def look_ahead(player_1, player_2, game, rounds=10): + """Returns a constant action that maximizes score by looking ahead. + + Parameters + ---------- + player_1: IpdPlayer + The player that will look ahead. + player_2: IpdPlayer + The opponent that will be inspected. + game: IpdGame + The IpdGame object used to score rounds. + rounds: int + The number of rounds to look ahead. + + Returns + ------- + Action + The action that maximized score if it is played constantly. + """ + results = {} + possible_strategies = {C: Cooperator(), D: Defector()} + for action, player in possible_strategies.items(): + # Instead of a deepcopy, create a new opponent and replay the history to it. + opponent_ = player_2.clone() + for h in player_1.history: + _limited_simulate_play(player, opponent_, h) + + # Now play forward with the constant strategy. + simulate_match(player, opponent_, action, rounds) + results[action] = _calculate_scores(player, opponent_, game) + + return C if results[C] > results[D] else D + + +@lru_cache() +def recursive_thue_morse(n): + """The recursive definition of the Thue-Morse sequence. + + The first few terms of the Thue-Morse sequence are: + 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . + """ + + if n == 0: + return 0 + if n % 2 == 0: + return recursive_thue_morse(n / 2) + if n % 2 == 1: + return 1 - recursive_thue_morse((n - 1) / 2) + + +def thue_morse_generator(start=0): + """A generator for the Thue-Morse sequence.""" + for n in itertools.count(start): + yield recursive_thue_morse(n) diff --git a/axelrod/action.py b/axelrod/action.py new file mode 100644 index 000000000..a87b9a06b --- /dev/null +++ b/axelrod/action.py @@ -0,0 +1,102 @@ +"""Actions for the Prisoner's Dilemma and related utilities. + +For convenience in other modules you can alias the actions: + +from axelrod import Action +C, D = Action.C, Action.D +""" + +from enum import Enum +from functools import total_ordering +from typing import Iterable + + +class UnknownActionError(ValueError): + """Error indicating an unknown action was used.""" + + def __init__(self, *args): + super(UnknownActionError, self).__init__(*args) + + +@total_ordering +class Action(Enum): + """Core actions in the Prisoner's Dilemma. + + There are only two possible actions, namely Cooperate or Defect, + which are called C and D for convenience. + """ + + C = 0 # Cooperate + D = 1 # Defect + + def __lt__(self, other): + return self.value < other.value + + def __repr__(self): + return self.name + + def __str__(self): + return self.name + + def flip(self): + """Returns the opposite Action.""" + if self == Action.C: + return Action.D + return Action.C + + @classmethod + def from_char(cls, character): + """Converts a single character into an Action. + + Parameters + ---------- + character: a string of length one + + Returns + ------- + Action + The action corresponding to the input character + + + Raises + ------ + UnknownActionError + If the input string is not 'C' or 'D' + """ + if character == "C": + return cls.C + if character == "D": + return cls.D + raise UnknownActionError('Character must be "C" or "D".') + + +def str_to_actions(actions: str) -> tuple: + """Converts a string to a tuple of actions. + + Parameters + ---------- + actions: string consisting of 'C's and 'D's + + Returns + ------- + tuple + Each element corresponds to a letter from the input string. + """ + return tuple(Action.from_char(element) for element in actions) + + +def actions_to_str(actions: Iterable[Action]) -> str: + """Converts an iterable of actions into a string. + + Example: (D, D, C) would be converted to 'DDC' + + Parameters + ----------- + actions: iterable of Action + + Returns + ------- + str + A string of 'C's and 'D's. + """ + return "".join(map(str, actions)) diff --git a/axelrod/classifier.py b/axelrod/classifier.py new file mode 100644 index 000000000..c68073ddd --- /dev/null +++ b/axelrod/classifier.py @@ -0,0 +1,246 @@ +import os +from typing import ( + Any, + Callable, + Generic, + List, + Optional, + Set, + Text, + Type, + TypeVar, + Union, +) +import warnings +import yaml + +from axelrod.player import IpdPlayer + +ALL_CLASSIFIERS_PATH = "data/all_classifiers.yml" + +T = TypeVar("T") + + +class Classifier(Generic[T]): + """Describes a IpdPlayer (strategy). + + User sets a name and function, f, at initialization. Through + classify_player, looks for the classifier to be set in the passed IpdPlayer + class. If not set, then passes to f for calculation. + + f must operate on the class, and not an instance. If necessary, f may + initialize an instance, but this shouldn't depend on runtime states, because + the result gets stored in a file. If a strategy's classifier depends on + runtime states, such as those created by transformers, then it can set the + field in its classifier dict, and that will take precedent over saved + values. + + Attributes + ---------- + name: An identifier for the classifier, used as a dict key in storage and in + 'classifier' dicts of IpdPlayer classes. + player_class_classifier: A function that takes in a IpdPlayer class (not an + instance) and returns a value. + """ + + def __init__( + self, name: Text, player_class_classifier: Callable[[Type[IpdPlayer]], T] + ): + self.name = name + self.player_class_classifier = player_class_classifier + + def classify_player(self, player: Type[IpdPlayer]) -> T: + """Look for this classifier in the passed player's 'classifier' dict, + otherwise pass to the player to f.""" + try: + return player.classifier[self.name] + except: + return self.player_class_classifier(player) + + +stochastic = Classifier[bool]("stochastic", lambda _: False) +memory_depth = Classifier[Union[float, int]]("memory_depth", lambda _: float("inf")) +makes_use_of = Classifier[Optional[Set[Text]]]("makes_use_of", lambda _: None) +long_run_time = Classifier[bool]("long_run_time", lambda _: False) +inspects_source = Classifier[Optional[bool]]("inspects_source", lambda _: None) +manipulates_source = Classifier[Optional[bool]]("manipulates_source", lambda _: None) +manipulates_state = Classifier[Optional[bool]]("manipulates_state", lambda _: None) + +# Should list all known classifiers. +all_classifiers = [ + stochastic, + memory_depth, + makes_use_of, + long_run_time, + inspects_source, + manipulates_source, + manipulates_state, +] + + +def rebuild_classifier_table( + classifiers: List[Classifier], + players: List[Type[IpdPlayer]], + path: Text = ALL_CLASSIFIERS_PATH, +) -> None: + """Builds the classifier table in data. + + Parameters + ---------- + classifiers: A list of classifiers to calculate on the strategies + players: A list of strategies (classes, not instances) to compute the + classifiers for. + path: Where to save the resulting yaml file. + """ + # Get absolute path + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, path) + + all_player_dicts = dict() + for p in players: + new_player_dict = dict() + for c in classifiers: + new_player_dict[c.name] = c.classify_player(p) + all_player_dicts[p.name] = new_player_dict + + with open(filename, "w") as f: + yaml.dump(all_player_dicts, f) + + +class _Classifiers(object): + """A singleton used to calculate any known classifier. + + Attributes + ---------- + all_player_dicts: A local copy of the dict saved in the classifier table. + The keys are player names, and the values are 'classifier' dicts (keyed + by classifier name). + """ + + _instance = None + all_player_dicts = dict() + + # Make this a singleton + def __new__(cls): + if cls._instance is None: + cls._instance = super(_Classifiers, cls).__new__(cls) + # When this is first created, read from the classifier table file. + # Get absolute path + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, ALL_CLASSIFIERS_PATH) + with open(filename, "r") as f: + cls.all_player_dicts = yaml.load(f, Loader=yaml.FullLoader) + + return cls._instance + + @classmethod + def known_classifier(cls, classifier_name: Text) -> bool: + """Returns True if the passed classifier_name is known.""" + global all_classifiers + return classifier_name in (c.name for c in all_classifiers) + + @classmethod + def __getitem__( + cls, key: Union[Classifier, Text] + ) -> Callable[[Union[IpdPlayer, Type[IpdPlayer]]], Any]: + """Looks up the classifier for the player. + + Given a passed classifier key, return a function that: + + Takes a player. If the classifier is found in the 'classifier' dict on + the player, then return that. Otherwise look for the classifier for the + player in the all_player_dicts. Returns None if the classifier is not + found in either of those. + + The returned function expects IpdPlayer instances, but if a IpdPlayer class is + passed, then it will create an instance by calling an argument-less + initializer. If no such initializer exists on the class, then an error + will result. + + Parameters + ---------- + key: A classifier or classifier name that we want to calculate for the + player. + + Returns + ------- + A function that will map IpdPlayer (or IpdPlayer instances) to their value for + this classification. + """ + # Key may be the name or an instance. Convert to name. + if not isinstance(key, str): + key = key.name + + if not cls.known_classifier(key): + raise KeyError("Unknown classifier") + + def classify_player_for_this_classifier( + player: Union[IpdPlayer, Type[IpdPlayer]] + ) -> Any: + def try_lookup() -> Any: + try: + player_classifiers = cls.all_player_dicts[player.name] + except: + return None + + return player_classifiers.get(key, None) + + # If the passed player is not an instance, then try to initialize an + # instance without arguments. + if not isinstance(player, IpdPlayer): + try: + player = player() + warnings.warn( + "Classifiers are intended to run on player instances. " + "Passed player {} was initialized with default " + "arguments.".format(player.name) + ) + except: + # All strategies must have trivial initializers. + raise Exception( + "Passed player class doesn't have a trivial initializer." + ) + + # Factory-generated players won't exist in the table. As well, some + # players, like Random, may change classifiers at construction time; + # this get() function takes a player instance, while the saved-values + # are from operations on the player object itself. + if key in player.classifier: + return player.classifier[key] + + # Try to find the name in the all_player_dicts, read from disk. + return try_lookup() + + return classify_player_for_this_classifier + + @classmethod + def is_basic(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): + """ + Defines criteria for a strategy to be considered 'basic' + """ + stochastic = cls.__getitem__("stochastic")(s) + depth = cls.__getitem__("memory_depth")(s) + inspects_source = cls.__getitem__("inspects_source")(s) + manipulates_source = cls.__getitem__("manipulates_source")(s) + manipulates_state = cls.__getitem__("manipulates_state")(s) + return ( + not stochastic + and not inspects_source + and not manipulates_source + and not manipulates_state + and depth in (0, 1) + ) + + @classmethod + def obey_axelrod(cls, s: Union[IpdPlayer, Type[IpdPlayer]]): + """ + A function to check if a strategy obeys Axelrod's original tournament + rules. + """ + for c in ["inspects_source", "manipulates_source", "manipulates_state"]: + if cls.__getitem__(c)(s): + return False + return True + + +Classifiers = _Classifiers() diff --git a/axelrod/compute_finite_state_machine_memory.py b/axelrod/compute_finite_state_machine_memory.py new file mode 100644 index 000000000..0f83d3494 --- /dev/null +++ b/axelrod/compute_finite_state_machine_memory.py @@ -0,0 +1,266 @@ +from axelrod.action import Action +from collections import defaultdict, namedtuple +from typing import DefaultDict, Iterator, Dict, Tuple, Set, List + +C, D = Action.C, Action.D + +Transition = namedtuple( + "Transition", ["state", "last_opponent_action", "next_state", "next_action"] +) +TransitionDict = Dict[Tuple[int, Action], Tuple[int, Action]] + + +class Memit(object): + """ + Memit = unit of memory. + + This represents the amount of memory that we gain with each new piece of + history. It includes a state, our_response that we make on our way into that + state (in_act), and the opponent's action that makes us move out of that state + (out_act). + + For example, for this finite state machine: + (0, C, 0, C), + (0, D, 1, C), + (1, C, 0, D), + (1, D, 0, D) + + Has the memits: + (C, 0, C), + (C, 0, D), + (D, 0, C), + (D, 0, D), + (C, 1, C), + (C, 1, D) + """ + + def __init__(self, in_act: Action, state: int, out_act: Action): + self.in_act = in_act + self.state = state + self.out_act = out_act + + def __repr__(self) -> str: + return "{}, {}, {}".format(self.in_act, self.state, self.out_act) + + def __hash__(self): + return hash(repr(self)) + + def __eq__(self, other_memit) -> bool: + """In action and out actions are the same.""" + return ( + self.in_act == other_memit.in_act + and self.out_act == other_memit.out_act + ) + + def __lt__(self, other_memit) -> bool: + return repr(self) < repr(other_memit) + + +MemitPair = Tuple[Memit, Memit] + + +def ordered_memit_tuple(x: Memit, y: Memit) -> tuple: + """Returns a tuple of x in y, sorted so that (x, y) are viewed as the + same as (y, x). + """ + if x < y: + return (x, y) + else: + return (y, x) + + +def transition_iterator(transitions: TransitionDict) -> Iterator[Transition]: + """Changes the transition dictionary into a iterator on namedtuples.""" + for k, v in transitions.items(): + yield Transition(k[0], k[1], v[0], v[1]) + + +def get_accessible_transitions( + transitions: TransitionDict, initial_state: int +) -> TransitionDict: + """Gets all transitions from the list that can be reached from the + initial_state. + """ + # Initial dict of edges between states and a dict of visited status for each + # of the states. + edge_dict = defaultdict(list) # type: DefaultDict[int, List[int]] + visited = dict() + for trans in transition_iterator(transitions): + visited[trans.state] = False + edge_dict[trans.state].append(trans.next_state) + # Keep track of states that can be accessed. + accessible_states = [initial_state] + + state_queue = [initial_state] + visited[initial_state] = True + # While there are states in the queue, visit all its children, adding each + # to the accesible_states. [A basic breadth-first search.] + while len(state_queue) > 0: + state = state_queue.pop() + for successor in edge_dict[state]: + # Don't process the same state twice. + if not visited[successor]: + visited[successor] = True + state_queue.append(successor) + accessible_states.append(successor) + + # Now for each transition in the passed TransitionDict, copy the transition + # to accessible_transitions if and only if the starting state is accessible, + # as determined above. + accessible_transitions = dict() + for trans in transition_iterator(transitions): + if trans.state in accessible_states: + accessible_transitions[ + (trans.state, trans.last_opponent_action) + ] = (trans.next_state, trans.next_action) + + return accessible_transitions + + +def longest_path( + edges: DefaultDict[MemitPair, Set[MemitPair]], starting_at: MemitPair +) -> int: + """Returns the number of nodes in the longest path that starts at the given + node. Returns infinity if a loop is encountered. + """ + visited = dict() + for source, destinations in edges.items(): + visited[source] = False + for destination in destinations: + visited[destination] = False + + # This is what we'll recurse on. visited dict is shared between calls. + def recurse(at_node): + visited[at_node] = True + record = 1 # Count the nodes, not the edges. + for successor in edges[at_node]: + if visited[successor]: + return float("inf") + successor_length = recurse(successor) + if successor_length == float("inf"): + return float("inf") + if record < successor_length + 1: + record = successor_length + 1 + return record + + return recurse(starting_at) + + +def get_memory_from_transitions( + transitions: TransitionDict, + initial_state: int = None, + all_actions: Tuple[Action, Action] = (C, D), +) -> int: + """This function calculates the memory of an FSM from the transitions. + + Assume that transitions are a dict with entries like + (state, last_opponent_action): (next_state, next_action) + + We first break down the transitions into memits (see above). We also create + a graph of memits, where the successor to a given memit are all possible + memits that could occur in the memory immediately before the given memit. + + Then we pair up memits with different states, but same in and out actions. + These represent points in time that we can't determine which state we're in. + We also create a graph of memit-pairs, where memit-pair, Y, succeeds a + memit-pair, X, if the two memits in X are succeeded by the two memits in Y. + These edges reperesent consecutive points in time that we can't determine + which state we're in. + + Then for all memit-pairs that disagree, in the sense that they imply + different next_action, we find the longest chain starting at that + memit-pair. [If a loop is encountered then this will be infinite.] We take + the maximum over all such memit-pairs. This represents the longest possible + chain of memory for which we wouldn't know what to do next. We return this. + """ + # If initial_state is set, use this to determine which transitions are + # reachable from the initial_state and restrict to those. + if initial_state is not None: + transitions = get_accessible_transitions(transitions, initial_state) + + # Get the incoming actions for each state. + incoming_action_by_state = defaultdict( + set + ) # type: DefaultDict[int, Set[Action]] + for trans in transition_iterator(transitions): + incoming_action_by_state[trans.next_state].add(trans.next_action) + + # Keys are starting memit, and values are all possible terminal memit. + # Will walk backwards through the graph. + memit_edges = defaultdict(set) # type: DefaultDict[Memit, Set[Memit]] + for trans in transition_iterator(transitions): + # Since all actions are out-paths for each state, add all of these. + # That is to say that the opponent could do anything + for out_action in all_actions: + # More recent in action history + starting_node = Memit( + trans.next_action, trans.next_state, out_action + ) + # All incoming paths to current state + for in_action in incoming_action_by_state[trans.state]: + # Less recent in action history + ending_node = Memit( + in_action, trans.state, trans.last_opponent_action + ) + memit_edges[starting_node].add(ending_node) + + all_memits = list(memit_edges.keys()) + + pair_nodes = set() + pair_edges = defaultdict( + set + ) # type: DefaultDict[MemitPair, Set[MemitPair]] + # Loop through all pairs of memits. + for x, y in [(x, y) for x in all_memits for y in all_memits]: + if x == y and x.state == y.state: + continue + if x != y: + continue + + # If the memits match, then the strategy can't tell the difference + # between the states. We call this a pair of matched memits (or just a + # pair). + pair_nodes.add(ordered_memit_tuple(x, y)) + # When two memits in matched pair have successors that are also matched, + # then we draw an edge. This represents consecutive historical times + # that we can't tell which state we're in. + for x_successor in memit_edges[x]: + for y_successor in memit_edges[y]: + if x_successor == y_successor: + pair_edges[ordered_memit_tuple(x, y)].add( + ordered_memit_tuple(x_successor, y_successor) + ) + + # Get next_action for each memit. Used to decide if they are in conflict, + # because we only have undecidability if next_action doesn't match. + next_action_by_memit = dict() + for trans in transition_iterator(transitions): + for in_action in incoming_action_by_state[trans.state]: + memit_key = Memit( + in_action, trans.state, trans.last_opponent_action + ) + next_action_by_memit[memit_key] = trans.next_action + + # Calculate the longest path. + record = 0 + for pair in pair_nodes: + if next_action_by_memit[pair[0]] != next_action_by_memit[pair[1]]: + # longest_path is the longest chain of tied states. We add one to + # get the memory length needed to break all ties. + path_length = longest_path(pair_edges, pair) + 1 + if record < path_length: + record = path_length + + if record > 0: + return record + + # If there are no pair of tied memits (for which the next action are + # distinct), then either no memits are needed to break a tie (i.e. all + # next_actions are the same) or the first memit breaks a tie (i.e. memory 1) + next_action_set = set() + for trans in transition_iterator(transitions): + next_action_set.add(trans.next_action) + if len(next_action_set) == 1: + return 0 + return 1 + diff --git a/axelrod/deterministic_cache.py b/axelrod/deterministic_cache.py new file mode 100644 index 000000000..b792986eb --- /dev/null +++ b/axelrod/deterministic_cache.py @@ -0,0 +1,175 @@ +"""Tools for caching the results of deterministic matches. + +The cache, in most cases, can simply be treated as a dictionary: + +cache = DeterministicCache() +cache[key1] = result1 +cache[key2] = result2 +... +if some_key in cache: + do_something(cache[some_key]) +else: + ... +""" + +import pickle +from collections import UserDict +from typing import List, Tuple + +from axelrod import Classifiers +from .action import Action +from .player import IpdPlayer + +CachePlayerKey = Tuple[IpdPlayer, IpdPlayer] +CacheKey = Tuple[str, str] + + +def _key_transform(key: CachePlayerKey) -> CacheKey: + """Convert a CachePlayerKey to a CacheKey + + Parameters + ---------- + key: tuple + A 3-tuple: (player instance, player instance) + """ + return key[0].name, key[1].name + + +def _is_valid_key(key: CachePlayerKey) -> bool: + """Validate a deterministic cache player key. + + The key should always be a 2-tuple, with a pair of axelrodPlayer + instances and one integer. Both players should be deterministic. + + Parameters + ---------- + key : object + + Returns + ------- + Boolean indicating if the key is valid + """ + if not isinstance(key, tuple) or len(key) != 2: + return False + + if not (isinstance(key[0], IpdPlayer) and isinstance(key[1], IpdPlayer)): + return False + + if Classifiers["stochastic"](key[0]) or Classifiers["stochastic"](key[1]): + return False + + return True + + +def _is_valid_value(value: List) -> bool: + """Validate a deterministic cache value. + + The value just needs to be a list, with any contents. + + Parameters + ---------- + value : object + + Returns + ------- + Boolean indicating if the value is valid + """ + return isinstance(value, list) + + +class DeterministicCache(UserDict): + """A class to cache the results of deterministic matches. + + For matches with no noise between pairs of deterministic players, the + results will always be the same. We can hold the results for the longest + run in this class, so as to avoid repeatedly generating them in tournaments + of multiple repetitions. If a shorter or equal-length match is run, we can + use the stored results. + + By also storing those cached results in a file, we can re-use the cache + between multiple tournaments if necessary. + + The cache is a dictionary mapping pairs of IpdPlayer classes to a list of + resulting interactions. e.g. for a 3 turn IpdMatch between Cooperator and + Alternator, the dictionary entry would be: + + (axelrod.Cooperator, axelrod.Alternator): [(C, C), (C, D), (C, C)] + + Most of the functionality is provided by the UserDict class (which uses an + instance of dict as the 'data' attribute to hold the dictionary entries). + + This class overrides the __init__ and __setitem__ methods in order to limit + and validate the keys and values to be as described above. It also adds + methods to save/load the cache to/from a file. + """ + + def __init__(self, file_name: str = None) -> None: + """Initialize a new cache. + + Parameters + ---------- + file_name : string + Path to a previously saved cache file + """ + super().__init__() + self.mutable = True + if file_name is not None: + self.load(file_name) + + def __delitem__(self, key: CachePlayerKey): + return super().__delitem__(_key_transform(key)) + + def __getitem__(self, key: CachePlayerKey) -> List[Tuple[Action, Action]]: + return super().__getitem__(_key_transform(key)) + + def __contains__(self, key): + return super().__contains__(_key_transform(key)) + + def __setitem__(self, key: CachePlayerKey, value): + """Validate the key and value before setting them.""" + if not self.mutable: + raise ValueError("Cannot update cache unless mutable is True.") + + if not _is_valid_key(key): + raise ValueError( + "Key must be a tuple of 2 deterministic axelrod IpdPlayer classes" + ) + + if not _is_valid_value(value): + raise ValueError( + "Value must be a list with length equal to turns attribute" + ) + + super().__setitem__(_key_transform(key), value) + + def save(self, file_name: str) -> bool: + """Serialise the cache dictionary to a file. + + Parameters + ---------- + file_name : string + File path to which the cache should be saved + """ + with open(file_name, "wb") as io: + pickle.dump(self.data, io) + return True + + def load(self, file_name: str) -> bool: + """Load a previously saved cache into the dictionary. + + Parameters + ---------- + file_name : string + Path to a previously saved cache file + """ + with open(file_name, "rb") as io: + data = pickle.load(io) + + if isinstance(data, dict): + self.data = data + else: + raise ValueError( + "Cache file exists but is not the correct format. " + "Try deleting and re-building the cache file." + ) + return True diff --git a/axelrod/ecosystem.py b/axelrod/ecosystem.py new file mode 100644 index 000000000..1dadfff8f --- /dev/null +++ b/axelrod/ecosystem.py @@ -0,0 +1,121 @@ +"""Tools for simulating population dynamics of immutable players. + +An ecosystem runs in the context of a previous tournament, and takes the +results as input. That means no matches are run by the ecosystem, and a +tournament needs to happen before it is created. For example: + +players = [axelrod.Cooperator(), axlerod.Defector()] +tournament = axelrod.IpdTournament(players=players) +results = tournament.play() +ecosystem = axelrod.Ecosystem(results) +ecosystem.reproduce(100) +""" + +import random +from typing import Callable, List + +from axelrod.result_set import ResultSet + + +class Ecosystem(object): + """An ecosystem based on the payoff matrix from a tournament. + + Attributes + ---------- + num_players: int + The number of players + """ + + def __init__( + self, + results: ResultSet, + fitness: Callable[[float], float] = None, + population: List[int] = None, + ) -> None: + """Create a new ecosystem. + + Parameters + ---------- + results: ResultSet + The results of the tournament run beforehand to use. + fitness: List of callables + The reproduction rate at which populations reproduce. + population: List of ints. + The initial populations of the players, corresponding to the + payoff matrix in results. + """ + + self.results = results + self.num_players = self.results.num_players + self.payoff_matrix = self.results.payoff_matrix + self.payoff_stddevs = self.results.payoff_stddevs + + # Population sizes will be recorded in this nested list, with each + # internal list containing strategy populations for a given turn. The + # first list, representing the starting populations, will by default + # have all equal values, and all population lists will be normalized to + # one. An initial population vector can also be passed. This will be + # normalised, but must be of the correct size and have all non-negative + # values. + if population: + if min(population) < 0: + raise TypeError( + "Minimum value of population vector must be non-negative" + ) + elif len(population) != self.num_players: + raise TypeError( + "Population vector must be same size as number of players" + ) + else: + norm = sum(population) + self.population_sizes = [[p / norm for p in population]] + else: + self.population_sizes = [ + [1 / self.num_players for _ in range(self.num_players)] + ] + + # This function is quite arbitrary and probably only influences the + # kinetics for the current code. + if fitness: + self.fitness = fitness + else: + self.fitness = lambda p: p + + def reproduce(self, turns: int): + """Reproduce populations according to the payoff matrix. + + Parameters + ---------- + turns: int + The number of turns to run. + """ + for iturn in range(turns): + plist = list(range(self.num_players)) + pops = self.population_sizes[-1] + + # The unit payoff for each player in this turn is the sum of the + # payoffs obtained from playing with all other players, scaled by + # the size of the opponent's population. Note that we sample the + # normal distribution based on the payoff matrix and its standard + # deviations obtained from the iterated PD tournament run + # previously. + payoffs = [0.0 for ip in plist] + for ip in plist: + for jp in plist: + avg = self.payoff_matrix[ip][jp] + dev = self.payoff_stddevs[ip][jp] + p = random.normalvariate(avg, dev) + payoffs[ip] += p * pops[jp] + + # The fitness should determine how well a strategy reproduces. The + # new populations should be multiplied by something that is + # proportional to the fitness, but we are normalizing anyway so + # just multiply times fitness. + fitness = [self.fitness(p) for p in payoffs] + newpops = [p * f for p, f in zip(pops, fitness)] + + # Make sure the new populations are normalized to one. + norm = sum(newpops) + newpops = [p / norm for p in newpops] + + self.population_sizes.append(newpops) diff --git a/axelrod/eigen.py b/axelrod/eigen.py new file mode 100644 index 000000000..f7b6670f5 --- /dev/null +++ b/axelrod/eigen.py @@ -0,0 +1,90 @@ +""" +Compute the principal eigenvector of a matrix using power iteration. + +See also numpy.linalg.eig which calculates all the eigenvalues and +eigenvectors. +""" + +from typing import Tuple + +import numpy + + +def _normalise(nvec: numpy.ndarray) -> numpy.ndarray: + """Normalises the given numpy array.""" + with numpy.errstate(invalid="ignore"): + result = nvec / numpy.sqrt((nvec @ nvec)) + return result + + +def _squared_error(vector_1: numpy.ndarray, vector_2: numpy.ndarray) -> float: + """Computes the squared error between two numpy arrays.""" + diff = vector_1 - vector_2 + s = diff @ diff + return numpy.sqrt(s) + + +def _power_iteration(mat: numpy.array, initial: numpy.ndarray) -> numpy.ndarray: + """ + Generator of successive approximations. + + Params + ------ + mat: numpy.array + The matrix to use for multiplication iteration + initial: numpy.array, None + The initial state. Will be set to numpy.array([1, 1, ...]) if None + + Yields + ------ + Successive powers (mat ^ k) * initial + """ + + vec = initial + while True: + vec = _normalise(numpy.dot(mat, vec)) + yield vec + + +def principal_eigenvector( + mat: numpy.array, maximum_iterations=1000, max_error=1e-3 +) -> Tuple[numpy.ndarray, float]: + """ + Computes the (normalised) principal eigenvector of the given matrix. + + Params + ------ + mat: numpy.array + The matrix to use for multiplication iteration + maximum_iterations: int, None + The maximum number of iterations of the approximation + max_error: float, 1e-8 + Exit criterion -- error threshold of the difference of successive steps + + Returns + ------- + ndarray + Eigenvector estimate for the input matrix + float + Eigenvalue corresonding to the returned eigenvector + """ + + mat_ = numpy.array(mat) + size = mat_.shape[0] + initial = numpy.ones(size) + + # Power iteration + if not maximum_iterations: + maximum_iterations = float("inf") + last = initial + for i, vector in enumerate(_power_iteration(mat, initial=initial)): + if i > maximum_iterations: + break + if _squared_error(vector, last) < max_error: + break + last = vector + # Compute the eigenvalue (Rayleigh quotient) + eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector) + # Liberate the eigenvalue from numpy + eigenvalue = float(eigenvalue) + return vector, eigenvalue diff --git a/axelrod/evolvable_player.py b/axelrod/evolvable_player.py new file mode 100644 index 000000000..68681b250 --- /dev/null +++ b/axelrod/evolvable_player.py @@ -0,0 +1,88 @@ +import base64 +from pickle import dumps, loads +from random import randrange +from typing import Dict, List +from .player import IpdPlayer + + +class InsufficientParametersError(Exception): + """Error indicating that insufficient parameters were specified to initialize an Evolvable IpdPlayer.""" + def __init__(self, *args): + super().__init__(*args) + + +class EvolvablePlayer(IpdPlayer): + """A class for a player that can evolve, for use in the Moran process or with reinforcement learning algorithms. + + This is an abstract base class, not intended to be used directly. + """ + + name = "EvolvablePlayer" + parent_class = IpdPlayer + parent_kwargs = [] # type: List[str] + + def overwrite_init_kwargs(self, **kwargs): + """Use to overwrite parameters for proper cloning and testing.""" + for k, v in kwargs.items(): + self.init_kwargs[k] = v + + def create_new(self, **kwargs): + """Creates a new variant with parameters overwritten by kwargs.""" + init_kwargs = self.init_kwargs.copy() + init_kwargs.update(kwargs) + return self.__class__(**init_kwargs) + + # Serialization and deserialization. You may overwrite to obtain more human readable serializations + # but you must overwrite both. + + def serialize_parameters(self): + """Serialize parameters.""" + pickled = dumps(self.init_kwargs) # bytes + s = base64.b64encode(pickled).decode('utf8') # string + return s + + @classmethod + def deserialize_parameters(cls, serialized): + """Deserialize parameters to a IpdPlayer instance.""" + init_kwargs = loads(base64.b64decode(serialized)) + return cls(**init_kwargs) + + # Optional methods for evolutionary algorithms and Moran processes. + + def mutate(self): + """Optional method to allow IpdPlayer to produce a variant (not in place).""" + pass # pragma: no cover + + def crossover(self, other): + """Optional method to allow IpdPlayer to produce variants in combination with another player. Returns a new + IpdPlayer.""" + pass # pragma: no cover + + # Optional methods for particle swarm algorithm. + + def receive_vector(self, vector): + """Receive a vector of params and overwrite the IpdPlayer.""" + pass # pragma: no cover + + def create_vector_bounds(self): + """Creates the bounds for the decision variables for Particle Swarm Algorithm.""" + pass # pragma: no cover + + +def copy_lists(lists: List[List]) -> List[List]: + return list(map(list, lists)) + + +def crossover_lists(list1: List, list2: List) -> List: + cross_point = randrange(len(list1)) + new_list = list(list1[:cross_point]) + list(list2[cross_point:]) + return new_list + + +def crossover_dictionaries(table1: Dict, table2: Dict) -> Dict: + keys = list(table1.keys()) + cross_point = randrange(len(keys)) + new_items = [(k, table1[k]) for k in keys[:cross_point]] + new_items += [(k, table2[k]) for k in keys[cross_point:]] + new_table = dict(new_items) + return new_table diff --git a/axelrod/fingerprint.py b/axelrod/fingerprint.py new file mode 100644 index 000000000..cd14ea414 --- /dev/null +++ b/axelrod/fingerprint.py @@ -0,0 +1,611 @@ +import os +from collections import namedtuple +from tempfile import mkstemp +from typing import Any, List, Union + +import dask.dataframe as dd +import matplotlib.pyplot as plt +import numpy as np +import tqdm +from mpl_toolkits.axes_grid1 import make_axes_locatable + +import axelrod as axl +from axelrod import IpdPlayer +from axelrod.interaction_utils import ( + compute_final_score_per_turn, + read_interactions_from_file, +) +from axelrod.strategy_transformers import DualTransformer, JossAnnTransformer + +Point = namedtuple("Point", "x y") + + +def _create_points(step: float, progress_bar: bool = True) -> List[Point]: + """Creates a set of Points over the unit square. + + A Point has coordinates (x, y). This function constructs points that are + separated by a step equal to `step`. The points are over the unit + square which implies that the number created will be (1/`step` + 1)^2. + + Parameters + ---------- + step : float + The separation between each Point. Smaller steps will produce more + Points with coordinates that will be closer together. + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ---------- + points : list + of Point objects with coordinates (x, y) + """ + num = int((1 / step) // 1) + 1 + + if progress_bar: + p_bar = tqdm.tqdm(total=num ** 2, desc="Generating points") + + points = [] + for x in np.linspace(0, 1, num): + for y in np.linspace(0, 1, num): + points.append(Point(x, y)) + + if progress_bar: + p_bar.update() + + if progress_bar: + p_bar.close() + + return points + + +def _create_jossann(point: Point, probe: Any) -> IpdPlayer: + """Creates a JossAnn probe player that matches the Point. + + If the coordinates of point sums to more than 1 the parameters are + flipped and subtracted from 1 to give meaningful probabilities. We also + use the Dual of the probe. This is outlined further in [Ashlock2010]_. + + Parameters + ---------- + point : Point + probe : class or instance + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. + + Returns + ---------- + joss_ann: Joss-AnnTitForTat object + `JossAnnTransformer` with parameters that correspond to `point`. + """ + x, y = point + + if isinstance(probe, axl.IpdPlayer): + init_kwargs = probe.init_kwargs + probe = probe.__class__ + else: + init_kwargs = {} + + if x + y >= 1: + joss_ann = DualTransformer()(JossAnnTransformer((1 - x, 1 - y))(probe))( + **init_kwargs + ) + else: + joss_ann = JossAnnTransformer((x, y))(probe)(**init_kwargs) + return joss_ann + + +def _create_probes( + probe: Union[type, IpdPlayer], points: list, progress_bar: bool = True +) -> List[IpdPlayer]: + """Creates a set of probe strategies over the unit square. + + Constructs probe strategies that correspond to points with coordinates + (x, y). The probes are created using the `JossAnnTransformer`. + + Parameters + ---------- + probe : class or instance + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. + points : list + of Point objects with coordinates (x, y) + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ---------- + probes : list + A list of `JossAnnTransformer` players with parameters that + correspond to point. + """ + if progress_bar: + points = tqdm.tqdm(points, desc="Generating probes") + probes = [_create_jossann(point, probe) for point in points] + return probes + + +def _create_edges(points: List[Point], progress_bar: bool = True) -> list: + """Creates a set of edges for a spatial tournament. + + Constructs edges that correspond to `points`. All edges begin at 0, and + connect to the index +1 of the probe. + + Parameters + ---------- + points : list + of Point objects with coordinates (x, y) + progress_bar : bool + Whether or not to create a progress bar which will be updated + + + Returns + ---------- + edges : list of tuples + A list containing tuples of length 2. All tuples will have 0 as the + first element. The second element is the index of the + corresponding probe (+1 to allow for including the Strategy). + """ + if progress_bar: + points = tqdm.tqdm(points, desc="Generating network edges") + edges = [(0, index + 1) for index, point in enumerate(points)] + return edges + + +def _generate_data(interactions: dict, points: list, edges: list) -> dict: + """Generates useful data from a spatial tournament. + + Matches interactions from `results` to their corresponding Point in + `probe_points`. + + Parameters + ---------- + interactions : dict + A dictionary mapping edges to the corresponding interactions of + those players. + points : list + of Point objects with coordinates (x, y). + edges : list of tuples + A list containing tuples of length 2. All tuples will have either 0 + or 1 as the first element. The second element is the index of the + corresponding probe (+1 to allow for including the Strategy). + + Returns + ---------- + point_scores : dict + A dictionary where the keys are Points of the form (x, y) and + the values are the mean score for the corresponding interactions. + """ + edge_scores = [ + np.mean( + [compute_final_score_per_turn(scores)[0] for scores in interactions[edge]] + ) + for edge in edges + ] + point_scores = dict(zip(points, edge_scores)) + return point_scores + + +def _reshape_data(data: dict, points: list, size: int) -> np.ndarray: + """Shape the data so that it can be plotted easily. + + Parameters + ---------- + data : dictionary + A dictionary where the keys are Points of the form (x, y) and + the values are the mean score for the corresponding interactions. + + points : list + of Point objects with coordinates (x, y). + + size : int + The number of Points in every row/column. + + Returns + ---------- + plotting_data : list + 2-D numpy array of the scores, correctly shaped to ensure that the + score corresponding to Point (0, 0) is in the left hand corner ie. + the standard origin. + """ + ordered_data = [data[point] for point in points] + shaped_data = np.reshape(ordered_data, (size, size), order="F") + plotting_data = np.flipud(shaped_data) + return plotting_data + + +class AshlockFingerprint(object): + def __init__( + self, strategy: Union[type, IpdPlayer], probe: Union[type, IpdPlayer] = axl.TitForTat + ) -> None: + """ + Parameters + ---------- + strategy : class or instance + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. + probe : class or instance + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. + Default: Tit For Tat + """ + self.strategy = strategy + self.probe = probe + + def _construct_tournament_elements( + self, step: float, progress_bar: bool = True + ) -> tuple: + """Build the elements required for a spatial tournament + + Parameters + ---------- + step : float + The separation between each Point. Smaller steps will + produce more Points that will be closer together. + progress_bar : bool + Whether or not to create a progress bar which will be updated + + + Returns + ---------- + edges : list of tuples + A list containing tuples of length 2. All tuples will have either 0 + or 1 as the first element. The second element is the index of the + corresponding probe (+1 to allow for including the Strategy). + + tournament_players : list + A list containing instances of axelrodPlayer. The first item is the + original player, the rest are the probes. + + """ + self.points = _create_points(step, progress_bar=progress_bar) + edges = _create_edges(self.points, progress_bar=progress_bar) + probe_players = _create_probes( + self.probe, self.points, progress_bar=progress_bar + ) + + if isinstance(self.strategy, axl.IpdPlayer): + tournament_players = [self.strategy] + probe_players + else: + tournament_players = [self.strategy()] + probe_players + + return edges, tournament_players + + def fingerprint( + self, + turns: int = 50, + repetitions: int = 10, + step: float = 0.01, + processes: int = None, + filename: str = None, + progress_bar: bool = True, + ) -> dict: + """Build and play the spatial tournament. + + Creates the probes and their edges then builds a spatial tournament. + When the coordinates of the probe sum to more than 1, the flip_plays of the + probe is taken instead and then the Joss-Ann Transformer is applied. If + the coordinates sum to less than 1 (or equal), then only the Joss-Ann is + applied, a flip_plays is not required. + + Parameters + ---------- + turns : int, optional + The number of turns per match + repetitions : int, optional + The number of times the round robin should be repeated + step : float, optional + The separation between each Point. Smaller steps will + produce more Points that will be closer together. + processes : int, optional + The number of processes to be used for parallel processing + filename: str, optional + The name of the file for self.spatial_tournament's interactions. + if None, will auto-generate a filename. + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ---------- + self.data : dict + A dictionary where the keys are coordinates of the form (x, y) and + the values are the mean score for the corresponding interactions. + """ + + temp_file_descriptor = None + if filename is None: + temp_file_descriptor, filename = mkstemp() # type: ignore + + edges, tourn_players = self._construct_tournament_elements( + step, progress_bar=progress_bar + ) + + self.step = step + self.spatial_tournament = axl.IpdTournament( + tourn_players, turns=turns, repetitions=repetitions, edges=edges + ) + self.spatial_tournament.play( + build_results=False, + filename=filename, + processes=processes, + progress_bar=progress_bar, + ) + + self.interactions = read_interactions_from_file( + filename, progress_bar=progress_bar + ) + + if temp_file_descriptor is not None: + assert filename is not None + os.close(temp_file_descriptor) + os.remove(filename) + + self.data = _generate_data(self.interactions, self.points, edges) + return self.data + + def plot( + self, + cmap: str = "seismic", + interpolation: str = "none", + title: str = None, + colorbar: bool = True, + labels: bool = True, + ) -> plt.Figure: + """Plot the results of the spatial tournament. + + Parameters + ---------- + cmap : str, optional + A matplotlib colour map, full list can be found at + http://matplotlib.org/examples/color/colormaps_reference.html + interpolation : str, optional + A matplotlib interpolation, full list can be found at + http://matplotlib.org/examples/images_contours_and_fields/interpolation_methods.html + title : str, optional + A title for the plot + colorbar : bool, optional + Choose whether the colorbar should be included or not + labels : bool, optional + Choose whether the axis labels and ticks should be included + + Returns + ---------- + figure : matplotlib figure + A heat plot of the results of the spatial tournament + """ + size = int((1 / self.step) // 1) + 1 + plotting_data = _reshape_data(self.data, self.points, size) + fig, ax = plt.subplots() + cax = ax.imshow(plotting_data, cmap=cmap, interpolation=interpolation) + + if colorbar: + max_score = max(self.data.values()) + min_score = min(self.data.values()) + ticks = [min_score, (max_score + min_score) / 2, max_score] + fig.colorbar(cax, ticks=ticks) + + plt.xlabel("$x$") + plt.ylabel("$y$", rotation=0) + ax.tick_params(axis="both", which="both", length=0) + plt.xticks([0, len(plotting_data) - 1], ["0", "1"]) + plt.yticks([0, len(plotting_data) - 1], ["1", "0"]) + + if not labels: + plt.axis("off") + + if title is not None: + plt.title(title) + return fig + + +class TransitiveFingerprint(object): + def __init__(self, strategy, opponents=None, number_of_opponents=50): + """ + Parameters + ---------- + strategy : class or instance + A class that must be descended from axelrodPlayer or an instance of + axelrodPlayer. + opponents : list of instances + A list that contains a list of opponents + Default: A spectrum of Random players + number_of_opponents: int + The number of Random opponents + Default: 50 + """ + self.strategy = strategy + + if opponents is None: + self.opponents = [ + axl.Random(p) for p in np.linspace(0, 1, number_of_opponents) + ] + else: + self.opponents = opponents + + def fingerprint( + self, + turns: int = 50, + repetitions: int = 1000, + noise: float = None, + processes: int = None, + filename: str = None, + progress_bar: bool = True, + ) -> np.array: + """Creates a spatial tournament to run the necessary matches to obtain + fingerprint data. + + Creates the opponents and their edges then builds a spatial tournament. + + Parameters + ---------- + turns : int, optional + The number of turns per match + repetitions : int, optional + The number of times the round robin should be repeated + noise : float, optional + The probability that a player's intended action should be flipped + processes : int, optional + The number of processes to be used for parallel processing + filename: str, optional + The name of the file for spatial tournament's interactions. + if None, a filename will be generated. + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ---------- + self.data : np.array + A numpy array containing the mean cooperation rate against each + opponent in each turn. The ith row corresponds to the ith opponent + and the jth column the jth turn. + """ + + if isinstance(self.strategy, axl.IpdPlayer): + players = [self.strategy] + self.opponents + else: + players = [self.strategy()] + self.opponents + + temp_file_descriptor = None + if filename is None: + temp_file_descriptor, filename = mkstemp() # type: ignore + + edges = [(0, k + 1) for k in range(len(self.opponents))] + tournament = axl.IpdTournament( + players=players, + edges=edges, + turns=turns, + noise=noise, + repetitions=repetitions, + ) + tournament.play( + filename=filename, + build_results=False, + progress_bar=progress_bar, + processes=processes, + ) + + self.data = self.analyse_cooperation_ratio(filename) + + if temp_file_descriptor is not None: + assert filename is not None + os.close(temp_file_descriptor) + os.remove(filename) + + return self.data + + @staticmethod + def analyse_cooperation_ratio(filename): + """Generates the data used from the tournament + + Return an M by N array where M is the number of opponents and N is the + number of turns. + + Parameters + ---------- + filename : str + The filename of the interactions + + Returns + ---------- + self.data : np.array + A numpy array containing the mean cooperation rate against each + opponent in each turn. The ith row corresponds to the ith opponent + and the jth column the jth turn. + """ + did_c = np.vectorize(lambda actions: [int(action == "C") for action in actions]) + + cooperation_rates = {} + df = dd.read_csv(filename) + # We ignore the actions of all opponents. So we filter the dataframe to + # only include the results of the player with index `0`. + df = df[df["Player index"] == 0][["Opponent index", "Actions"]] + + for _, row in df.iterrows(): + opponent_index, player_history = row["Opponent index"], row["Actions"] + if opponent_index in cooperation_rates: + cooperation_rates[opponent_index].append(did_c(player_history)) + else: + cooperation_rates[opponent_index] = [did_c(player_history)] + + for index, rates in cooperation_rates.items(): + cooperation_rates[index] = np.mean(rates, axis=0) + + return np.array( + [cooperation_rates[index] for index in sorted(cooperation_rates)] + ) + + def plot( + self, + cmap: str = "viridis", + interpolation: str = "none", + title: str = None, + colorbar: bool = True, + labels: bool = True, + display_names: bool = False, + ax: plt.Figure = None, + ) -> plt.Figure: + """Plot the results of the spatial tournament. + Parameters + ---------- + cmap : str, optional + A matplotlib colour map, full list can be found at + http://matplotlib.org/examples/color/colormaps_reference.html + interpolation : str, optional + A matplotlib interpolation, full list can be found at + http://matplotlib.org/examples/images_contours_and_fields/interpolation_methods.html + title : str, optional + A title for the plot + colorbar : bool, optional + Choose whether the colorbar should be included or not + labels : bool, optional + Choose whether the axis labels and ticks should be included + display_names : bool, optional + Choose whether to display the names of the strategies + ax: matplotlib axis + Allows the plot to be written to a given matplotlib axis. + Default is None. + Returns + ---------- + figure : matplotlib figure + A heat plot of the results of the spatial tournament + """ + if ax is None: + fig, ax = plt.subplots() + else: + ax = ax + + fig = ax.get_figure() + mat = ax.imshow(self.data, cmap=cmap, interpolation=interpolation) + + width = len(self.data) / 2 + height = width + fig.set_size_inches(width, height) + + plt.xlabel("turns") + ax.tick_params(axis="both", which="both", length=0) + + if display_names: + plt.yticks( + range(len(self.opponents)), [str(player) for player in self.opponents] + ) + else: + plt.yticks([0, len(self.opponents) - 1], [0, 1]) + plt.ylabel("Probability of cooperation") + + if not labels: + plt.axis("off") + + if title is not None: + plt.title(title) + + if colorbar: + max_score = 0 + min_score = 1 + ticks = [min_score, 1 / 2, max_score] + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.2) + cbar = fig.colorbar(mat, cax=cax, ticks=ticks) + + plt.tight_layout() + return fig diff --git a/axelrod/game.py b/axelrod/game.py new file mode 100644 index 000000000..c6487e783 --- /dev/null +++ b/axelrod/game.py @@ -0,0 +1,73 @@ +from typing import Tuple, Union + +from axelrod import Action, BaseGame + +C, D = Action.C, Action.D + +Score = Union[int, float] + + +class Game(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.game.Game to axelrod.Game") + + +class IpdGame(BaseGame): + """Container for the game matrix and scoring logic. + + Attributes + ---------- + scores: dict + The numerical score attribute to all combinations of action pairs. + """ + + def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None: + """Create a new game object. + + Parameters + ---------- + r: int or float + Score obtained by both players for mutual cooperation. + s: int or float + Score obtained by a player for cooperating against a defector. + t: int or float + Score obtained by a player for defecting against a cooperator. + p: int or float + Score obtained by both player for mutual defection. + """ + self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)} + super().__init__() + + def RPST(self) -> Tuple[Score, Score, Score, Score]: + """Returns game matrix values in Press and Dyson notation.""" + R = self.scores[(C, C)][0] + P = self.scores[(D, D)][0] + S = self.scores[(C, D)][0] + T = self.scores[(D, C)][0] + return R, P, S, T + + def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]: + """Returns the appropriate score for a decision pair. + + Parameters + ---------- + pair: tuple(Action, Action) + A pair actions for two players, for example (C, C). + + Returns + ------- + tuple of int or float + Scores for two player resulting from their actions. + """ + return self.scores[pair] + + def __repr__(self) -> str: + return "Axelrod game: (R,P,S,T) = {}".format(self.RPST()) + + def __eq__(self, other): + if not isinstance(other, IpdGame): + return False + return self.RPST() == other.RPST() + + +DefaultGame = IpdGame() diff --git a/axelrod/graph.py b/axelrod/graph.py new file mode 100644 index 000000000..9f41bde4d --- /dev/null +++ b/axelrod/graph.py @@ -0,0 +1,166 @@ +"""Weighted undirected sparse graphs. + +Original source: +https://github.com/marcharper/stationary/blob/master/stationary/utils/graph.py +""" + +from collections import defaultdict + + +class Graph(object): + """Weighted and directed graph class. + + This class is intended for the graph associated to a Markov process, + since it gives easy access to the neighbors of a particular state. + + Vertices can be any hashable Python object. + + Initialize with a list of edges: + [[node1, node2, weights], ...] + Weights can be omitted for an undirected graph. + + For efficiency, neighbors are cached in dictionaries. Undirected + graphs are implemented as directed graphs in which every edge (s, t) + has the opposite edge (t, s). + + Attributes + ---------- + directed: Boolean indicating whether the graph is directed + original_edges: the edges passed into the initializer + out_mapping: a dictionary mapping all heads to dictionaries that map + all tails to their edge weights (None means no weight) + in_mapping: a dictionary mapping all tails to dictionaries that map + all heads to their edge weights (none means to weight) + + Properties + ---------- + vertices: the set of vertices in the graph + edges: the set of current edges in the graph + """ + + def __init__(self, edges=None, directed=False): + self.directed = directed + self.original_edges = edges + self.out_mapping = defaultdict(lambda: defaultdict(float)) + self.in_mapping = defaultdict(lambda: defaultdict(float)) + self._edges = [] + if edges: + self._add_edges(edges) + + def _add_edge(self, source, target, weight=None): + if (source, target) not in self._edges: + self._edges.append((source, target)) + self.out_mapping[source][target] = weight + self.in_mapping[target][source] = weight + if ( + not self.directed + and (source != target) + and (target, source) not in self._edges + ): + self._edges.append((target, source)) + self.out_mapping[target][source] = weight + self.in_mapping[source][target] = weight + + def _add_edges(self, edges): + for edge in edges: + self._add_edge(*edge) + + def add_loops(self): + """ + Add all loops to edges + """ + self._add_edges((x, x) for x in self.vertices) + + @property + def edges(self): + return self._edges + + @property + def vertices(self): + return list(self.out_mapping.keys()) + + def out_dict(self, source): + """Returns a dictionary of the outgoing edges of source with weights.""" + return self.out_mapping[source] + + def out_vertices(self, source): + """Returns a list of the outgoing vertices.""" + return list(self.out_mapping[source].keys()) + + def in_dict(self, target): + """Returns a dictionary of the incoming edges of source with weights.""" + return self.in_mapping[target] + + def in_vertices(self, source): + """Returns a list of the outgoing vertices.""" + return list(self.in_mapping[source].keys()) + + def __repr__(self): + s = "".format(repr(self.original_edges)) + return s + + +# Example graph factories. + + +def cycle(length, directed=False): + """Produces a cycle of a specified length. + + Parameters + ---------- + length: int + Number of vertices in the cycle + directed: bool, False + Is the cycle directed? + + Returns + ------- + a Graph object for the cycle + """ + edges = [(i, i + 1) for i in range(length - 1)] + edges.append((length - 1, 0)) + return Graph(edges=edges, directed=directed) + + +def complete_graph(size, loops=True, directed=False): + """ + Produces a complete graph of size `length`. + https://en.wikipedia.org/wiki/Complete_graph + + Parameters + ---------- + size: int + Number of vertices in the cycle + loops: bool, True + attach loops at each node? + directed: bool, False + Is the graph directed? + + Returns + ------- + a Graph object for the complete graph + """ + edges = [(i, j) for i in range(size) for j in range(i + 1, size)] + graph = Graph(directed=directed, edges=edges) + if loops: + graph.add_loops() + return graph + + +def attached_complete_graphs(length, loops=True, directed=False): + """Creates two complete undirected graphs of size `length` + attached by a single edge.""" + edges = [] + # Two complete graphs + for cluster in range(2): + for i in range(length): + for j in range(i + 1, length): + edges.append(("{}:{}".format(cluster, i), + "{}:{}".format(cluster, j))) + # Attach at one node + edges.append(("0:0", "1:0")) + graph = Graph(directed=directed, edges=edges) + if loops: + graph.add_loops() + + return graph diff --git a/axelrod/history.py b/axelrod/history.py new file mode 100644 index 000000000..9e21b04c7 --- /dev/null +++ b/axelrod/history.py @@ -0,0 +1,133 @@ +from collections import Counter + +from axelrod.action import Action, actions_to_str + +C, D = Action.C, Action.D + + +class History(object): + """ + History class to track the history of play and metadata including + the number of cooperations and defections, and if available, the + opponents plays and the state distribution of the history of play. + """ + + def __init__(self, plays=None, coplays=None): + """ + Parameters + ---------- + plays: + An ordered iterable of the actions of the player. + coplays: + An ordered iterable of the actions of the coplayer (aka opponent). + """ + self._plays = [] + # Coplays is tracked mainly for computation of the state distribution + # when cloning or dualing. + self._coplays = [] + self._actions = Counter() + self._state_distribution = Counter() + if plays: + self.extend(plays, coplays) + + def append(self, play, coplay): + """Appends a new (play, coplay) pair an updates metadata for + number of cooperations and defections, and the state distribution.""" + self._plays.append(play) + self._actions[play] += 1 + self._coplays.append(coplay) + self._state_distribution[(play, coplay)] += 1 + + def copy(self): + """Returns a new object with the same data.""" + return self.__class__(plays=self._plays, coplays=self._coplays) + + def flip_plays(self): + """Creates a flipped plays history for use with DualTransformer.""" + flipped_plays = [action.flip() for action in self._plays] + return self.__class__(plays=flipped_plays, coplays=self._coplays) + + def extend(self, plays, coplays): + """A function that emulates list.extend.""" + # We could repeatedly call self.append but this is more efficient. + self._plays.extend(plays) + self._actions.update(plays) + self._coplays.extend(coplays) + self._state_distribution.update(zip(plays, coplays)) + + def reset(self): + """Clears all data in the History object.""" + self._plays.clear() + self._coplays.clear() + self._actions.clear() + self._state_distribution.clear() + + @property + def coplays(self): + return self._coplays + + @property + def cooperations(self): + return self._actions[C] + + @property + def defections(self): + return self._actions[D] + + @property + def state_distribution(self): + return self._state_distribution + + def __eq__(self, other): + if isinstance(other, list): + return self._plays == other + elif isinstance(other, History): + return self._plays == other._plays and self._coplays == other._coplays + raise TypeError("Cannot compare types.") + + def __getitem__(self, key): + # Passthrough keys and slice objects + return self._plays[key] + + def __str__(self): + return actions_to_str(self._plays) + + def __list__(self): + return self._plays + + def __len__(self): + return len(self._plays) + + def __repr__(self): + return repr(self.__list__()) + + +class LimitedHistory(History): + """ + History class that only tracks the last N rounds. Used for testing memory + depth. + """ + + def __init__(self, memory_depth): + """ + Parameters + ---------- + memory_depth, int: + length of history to retain + """ + super().__init__() + self.memory_depth = memory_depth + + def append(self, play, coplay): + """Appends a new (play, coplay) pair an updates metadata for + number of cooperations and defections, and the state distribution.""" + + self._plays.append(play) + self._actions[play] += 1 + if coplay: + self._coplays.append(coplay) + self._state_distribution[(play, coplay)] += 1 + if len(self._plays) > self.memory_depth: + first_play, first_coplay = self._plays.pop(0), self._coplays.pop(0) + self._actions[first_play] -= 1 + self._state_distribution[(first_play, first_coplay)] -= 1 diff --git a/axelrod/interaction_utils.py b/axelrod/interaction_utils.py new file mode 100644 index 000000000..f6416be2c --- /dev/null +++ b/axelrod/interaction_utils.py @@ -0,0 +1,286 @@ +""" +Functions to calculate results from interactions. Interactions are lists of the +form: + + [(C, D), (D, C),...] + +This is used by both the IpdMatch class and the ResultSet class which analyse +interactions. +""" +from collections import Counter, defaultdict + +import pandas as pd +import tqdm +from axelrod.action import Action, str_to_actions + +from .game import IpdGame + +C, D = Action.C, Action.D + + +def compute_scores(interactions, game=None): + """Returns the scores of a given set of interactions.""" + if not game: + game = IpdGame() + return [game.score(plays) for plays in interactions] + + +def compute_final_score(interactions, game=None): + """Returns the final score of a given set of interactions.""" + scores = compute_scores(interactions, game) + if len(scores) == 0: + return None + + final_score = tuple( + sum([score[player_index] for score in scores]) for player_index in [0, 1] + ) + return final_score + + +def compute_final_score_per_turn(interactions, game=None): + """Returns the mean score per round for a set of interactions""" + scores = compute_scores(interactions, game) + num_turns = len(interactions) + + if len(scores) == 0: + return None + + final_score_per_turn = tuple( + sum([score[player_index] for score in scores]) / num_turns + for player_index in [0, 1] + ) + return final_score_per_turn + + +def compute_winner_index(interactions, game=None): + """Returns the index of the winner of the IpdMatch""" + scores = compute_final_score(interactions, game) + + if scores is not None: + if scores[0] == scores[1]: + return False # No winner + return max([0, 1], key=lambda i: scores[i]) + return None + + +def compute_cooperations(interactions): + """Returns the count of cooperations by each player for a set of + interactions""" + + if len(interactions) == 0: + return None + + cooperation = tuple( + sum([play[player_index] == C for play in interactions]) + for player_index in [0, 1] + ) + return cooperation + + +def compute_normalised_cooperation(interactions): + """Returns the count of cooperations by each player per turn for a set of + interactions""" + if len(interactions) == 0: + return None + + num_turns = len(interactions) + cooperation = compute_cooperations(interactions) + + normalised_cooperation = tuple([c / num_turns for c in cooperation]) + + return normalised_cooperation + + +def compute_state_distribution(interactions): + """ + Returns the count of each state for a set of interactions. + + Parameters + ---------- + interactions : list of tuples + A list containing the interactions of the match as shown at the top of + this file. + + Returns + ---------- + Counter(interactions) : Counter Object + Dictionary where the keys are the states and the values are the number + of times that state occurs. + """ + if not interactions: + return None + return Counter(interactions) + + +def compute_normalised_state_distribution(interactions): + """ + Returns the normalized count of each state for a set of interactions. + + Parameters + ---------- + interactions : list of tuples + A list containing the interactions of the match as shown at the top of + this file. + + Returns + ---------- + normalized_count : Counter Object + Dictionary where the keys are the states and the values are a normalized + count of the number of times that state occurs. + """ + if not interactions: + return None + + interactions_count = Counter(interactions) + total = sum(interactions_count.values(), 0) + + normalized_count = Counter( + {key: value / total for key, value in interactions_count.items()} + ) + return normalized_count + + +def compute_state_to_action_distribution(interactions): + """ + Returns a list (for each player) of counts of each state to action pair + for a set of interactions. A state to action pair is of the form: + + ((C, D), C) + + Implying that from a state of (C, D) (the first player having played C and + the second playing D) the player in question then played C. + + The following counter object implies that the player in question was in + state (C, D) for a total of 12 times, subsequently cooperating 4 times and + defecting 8 times. + + Counter({((C, D), C): 4, ((C, D), D): 8}) + + Parameters + ---------- + interactions : list of tuples + A list containing the interactions of the match as shown at the top of + this file. + + Returns + ---------- + state_to_C_distributions : List of Counter Object + List of Counter objects where the keys are the states and actions and + the values the counts. The + first/second Counter corresponds to the first/second player. + """ + if not interactions: + return None + + distributions = [ + Counter( + [ + (state, outcome[j]) + for state, outcome in zip(interactions, interactions[1:]) + ] + ) + for j in range(2) + ] + return distributions + + +def compute_normalised_state_to_action_distribution(interactions): + """ + Returns a list (for each player) of normalised counts of each state to action + pair for a set of interactions. A state to action pair is of the form: + + ((C, D), C) + + implying that from a state of (C, D) (the first player having played C and + the second playing D) the player in question then played C. + + The following counter object, implies that the player in question was only + ever in state (C, D), subsequently cooperating 1/3 of the time and defecting + 2/3 times. + + Counter({((C, D), C): 0.333333, ((C, D), D): 0.66666667}) + + Parameters + ---------- + interactions : list of tuples + A list containing the interactions of the match as shown at the top of + this file. + + Returns + ------- + normalised_state_to_C_distributions : List of Counter Object + List of Counter objects where the keys are the states and actions and + the values the normalized counts. The first/second Counter corresponds + to the first/second player. + """ + if not interactions: + return None + + distribution = compute_state_to_action_distribution(interactions) + normalized_distribution = [] + for player in range(2): + counter = {} + for state in [(C, C), (C, D), (D, C), (D, D)]: + C_count = distribution[player].get((state, C), 0) + D_count = distribution[player].get((state, D), 0) + total = C_count + D_count + if total > 0: + if C_count > 0: + counter[(state, C)] = C_count / (C_count + D_count) + if D_count > 0: + counter[(state, D)] = D_count / (C_count + D_count) + normalized_distribution.append(Counter(counter)) + return normalized_distribution + + +def sparkline(actions, c_symbol="█", d_symbol=" "): + return "".join([c_symbol if play == C else d_symbol for play in actions]) + + +def compute_sparklines(interactions, c_symbol="█", d_symbol=" "): + """Returns the sparklines for a set of interactions""" + if len(interactions) == 0: + return None + + histories = list(zip(*interactions)) + return ( + sparkline(histories[0], c_symbol, d_symbol) + + "\n" + + sparkline(histories[1], c_symbol, d_symbol) + ) + + +def read_interactions_from_file(filename, progress_bar=True): + """ + Reads a file and returns a dictionary mapping tuples of player pairs to + lists of interactions + """ + df = pd.read_csv(filename)[ + ["Interaction index", "Player index", "Opponent index", "Actions"] + ] + groupby = df.groupby("Interaction index") + if progress_bar: + groupby = tqdm.tqdm(groupby) + + pairs_to_interactions = defaultdict(list) + for _, d in tqdm.tqdm(groupby): + key = tuple(d[["Player index", "Opponent index"]].iloc[0]) + value = list(map(str_to_actions, zip(*d["Actions"]))) + pairs_to_interactions[key].append(value) + return pairs_to_interactions + + +def string_to_interactions(string): + """ + Converts a compact string representation of an interaction to an + interaction: + + 'CDCDDD' -> [(C, D), (C, D), (D, D)] + """ + interactions = [] + interactions_list = list(string) + while interactions_list: + p1action = Action.from_char(interactions_list.pop(0)) + p2action = Action.from_char(interactions_list.pop(0)) + interactions.append((p1action, p2action)) + return interactions diff --git a/axelrod/load_data_.py b/axelrod/load_data_.py new file mode 100644 index 000000000..ac29250fd --- /dev/null +++ b/axelrod/load_data_.py @@ -0,0 +1,62 @@ +import pathlib +from typing import Dict, List, Text, Tuple + +import pkg_resources + + +def axl_filename(path: pathlib.Path) -> pathlib.Path: + """Given a path under Axelrod/, return absolute filepath. + + Parameters + ---------- + axl_path + A pathlib.Path object with the relative directory under Axelrod/ + + Returns + ------- + A pathlib.Path object with the absolute directory. + """ + # We go up a dir because this code is located in Axelrod/axelrod. + axl_path = pathlib.Path(__file__).resolve().parent.parent + return axl_path / path + + +def load_file(filename: str, directory: str) -> List[List[str]]: + """Loads a data file stored in the Axelrod library's data subdirectory, + likely for parameters for a strategy.""" + path = "/".join((directory, filename)) + data_bytes = pkg_resources.resource_string(__name__, path) + data = data_bytes.decode("UTF-8", "replace") + rows = [] + for line in data.split("\n"): + if line.startswith("#") or len(line) == 0: + continue + s = line.split(", ") + rows.append(s) + return rows + + +def load_weights( + filename: str = "ann_weights.csv", directory: str = "data" +) -> Dict[str, Tuple[int, int, List[float]]]: + """Load Neural Network Weights.""" + rows = load_file(filename, directory) + d = dict() + for row in rows: + name = str(row[0]) + num_features = int(row[1]) + num_hidden = int(row[2]) + weights = list(map(float, row[3:])) + d[name] = (num_features, num_hidden, weights) + return d + + +def load_pso_tables(filename="pso_gambler.csv", directory="data"): + """Load lookup tables.""" + rows = load_file(filename, directory) + d = dict() + for row in rows: + name, a, b, c, = str(row[0]), int(row[1]), int(row[2]), int(row[3]) + values = list(map(float, row[4:])) + d[(name, int(a), int(b), int(c))] = values + return d diff --git a/axelrod/match.py b/axelrod/match.py new file mode 100644 index 000000000..73ba23808 --- /dev/null +++ b/axelrod/match.py @@ -0,0 +1,263 @@ +import random +from math import ceil, log + +import axelrod.interaction_utils as iu +from axelrod import DEFAULT_TURNS +from axelrod.action import Action +from axelrod import Classifiers +from axelrod.game import IpdGame +from axelrod.base_match import BaseMatch +from .deterministic_cache import DeterministicCache + +C, D = Action.C, Action.D + + +class Match(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.match.Match to axelrod.Match") + + +def is_stochastic(players, noise): + """Determines if a match is stochastic -- true if there is noise or if any + of the players involved is stochastic.""" + return noise or any(map(Classifiers["stochastic"], players)) + + +class IpdMatch(BaseMatch): + """The IpdMatch class conducts matches between two players.""" + + def __init__( + self, + players, + turns=None, + prob_end=None, + game=None, + deterministic_cache=None, + noise=0, + match_attributes=None, + reset=True, + ): + """ + Parameters + ---------- + players : tuple + A pair of Player objects + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + game : axelrod.IpdGame + The game object used to score the match + deterministic_cache : axelrod.DeterministicCache + A cache of resulting actions for deterministic matches + noise : float + The probability that a player's intended action should be flipped + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + reset : bool + Whether to reset players or not + """ + + defaults = { + (True, True): (DEFAULT_TURNS, 0), + (True, False): (float("inf"), prob_end), + (False, True): (turns, 0), + (False, False): (turns, prob_end), + } + self.turns, self.prob_end = defaults[(turns is None, prob_end is None)] + + self.result = [] + self.noise = noise + + if game is None: + self.game = IpdGame() + else: + self.game = game + + if deterministic_cache is None: + self._cache = DeterministicCache() + else: + self._cache = deterministic_cache + + if match_attributes is None: + known_turns = self.turns if prob_end is None else float("inf") + self.match_attributes = { + "length": known_turns, + "game": self.game, + "noise": self.noise, + } + else: + self.match_attributes = match_attributes + + self.players = list(players) + self.reset = reset + + super().__init__( + players, turns, prob_end, game, noise, match_attributes, reset + ) + + @property + def players(self): + return self._players + + @players.setter + def players(self, players): + """Ensure that players are passed the match attributes""" + newplayers = [] + for player in players: + player.set_match_attributes(**self.match_attributes) + newplayers.append(player) + self._players = newplayers + + @property + def _stochastic(self): + """ + A boolean to show whether a match between two players would be + stochastic. + """ + return is_stochastic(self.players, self.noise) + + @property + def _cache_update_required(self): + """ + A boolean to show whether the deterministic cache should be updated. + """ + return ( + not self.noise + and self._cache.mutable + and not (any(Classifiers["stochastic"](p) for p in self.players)) + ) + + def _cached_enough_turns(self, cache_key, turns): + """ + Returns true iff there are is a entry in self._cache for the given key and + it's at least turns long. + """ + if cache_key not in self._cache: + return False + return len(self._cache[cache_key]) >= turns + + def play(self): + """ + The resulting list of actions from a match between two players. + + This method determines whether the actions list can be obtained from + the deterministic cache and returns it from there if so. If not, it + calls the play method for player1 and returns the list from there. + + Returns + ------- + A list of the form: + + e.g. for a 2 turn match between Cooperator and Defector: + + [(C, C), (C, D)] + + i.e. One entry per turn containing a pair of actions. + """ + turns = min(sample_length(self.prob_end), self.turns) + cache_key = (self.players[0], self.players[1]) + + if self._stochastic or not self._cached_enough_turns(cache_key, turns): + for p in self.players: + if self.reset: + p.reset() + p.set_match_attributes(**self.match_attributes) + result = [] + for _ in range(turns): + plays = self.players[0].play(self.players[1], self.noise) + result.append(plays) + + if self._cache_update_required: + self._cache[cache_key] = result + else: + result = self._cache[cache_key][:turns] + + self.result = result + return result + + def scores(self): + """Returns the scores of the previous IpdMatch plays.""" + return iu.compute_scores(self.result, self.game) + + def final_score(self): + """Returns the final score for a IpdMatch.""" + return iu.compute_final_score(self.result, self.game) + + def final_score_per_turn(self): + """Returns the mean score per round for a IpdMatch.""" + return iu.compute_final_score_per_turn(self.result, self.game) + + def winner(self): + """Returns the winner of the IpdMatch.""" + winner_index = iu.compute_winner_index(self.result, self.game) + if winner_index is False: # No winner + return False + if winner_index is None: # No plays + return None + return self.players[winner_index] + + def cooperation(self): + """Returns the count of cooperations by each player.""" + return iu.compute_cooperations(self.result) + + def normalised_cooperation(self): + """Returns the count of cooperations by each player per turn.""" + return iu.compute_normalised_cooperation(self.result) + + def state_distribution(self): + """ + Returns the count of each state for a set of interactions. + """ + return iu.compute_state_distribution(self.result) + + def normalised_state_distribution(self): + """ + Returns the normalized count of each state for a set of interactions. + """ + return iu.compute_normalised_state_distribution(self.result) + + def sparklines(self, c_symbol="█", d_symbol=" "): + return iu.compute_sparklines(self.result, c_symbol, d_symbol) + + def __len__(self): + return self.turns + + +def sample_length(prob_end): + """ + Sample length of a game. + + This is using inverse random sample on a probability density function + given by: + + f(n) = p_end * (1 - p_end) ^ (n - 1) + + (So the probability of length n is given by f(n)) + + Which gives cumulative distribution function + : + + F(n) = 1 - (1 - p_end) ^ n + + (So the probability of length less than or equal to n is given by F(n)) + + Which gives for given x = F(n) (ie the random sample) gives n: + + n = ceil((ln(1-x)/ln(1-p_end))) + + This approach of sampling from a distribution is called inverse + transform sampling + . + + Note that this corresponds to sampling at the end of every turn whether + or not the IpdMatch ends. + """ + if prob_end == 0: + return float("inf") + if prob_end == 1: + return 1 + x = random.random() + return int(ceil(log(1 - x) / log(1 - prob_end))) diff --git a/axelrod/match_generator.py b/axelrod/match_generator.py new file mode 100644 index 000000000..d4dca9dc1 --- /dev/null +++ b/axelrod/match_generator.py @@ -0,0 +1,123 @@ +class MatchGenerator(object): + def __init__( + self, + players, + repetitions, + turns=None, + game=None, + noise=0, + prob_end=None, + edges=None, + match_attributes=None, + ): + """ + A class to generate matches. This is used by the IpdTournament class which + is in charge of playing the matches and collecting the results. + + Parameters + ---------- + players : list + A list of axelrodPlayer objects + repetitions : int + The number of repetitions of a given match + turns : integer + The number of turns per match + game : axelrod.IpdGame + The game object used to score the match + noise : float, 0 + The probability that a player's intended action should be flipped + prob_end : float + The probability of a given turn ending a match + edges : list + A list of edges between players + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + """ + self.players = players + self.turns = turns + self.game = game + self.repetitions = repetitions + self.noise = noise + self.opponents = players + self.prob_end = prob_end + self.match_attributes = match_attributes + + self.edges = edges + if edges is not None: + if not graph_is_connected(edges, players): + raise ValueError("The graph edges do not include all players.") + self.size = len(edges) + else: + n = len(self.players) + self.size = int(n * (n - 1) // 2 + n) + + def __len__(self): + return self.size + + def build_match_chunks(self): + """ + A generator that returns player index pairs and match parameters for a + round robin tournament. + + Yields + ------- + tuples + ((player1 index, player2 index), match object) + """ + if self.edges is None: + edges = complete_graph(self.players) + else: + edges = self.edges + + for index_pair in edges: + match_params = self.build_single_match_params() + yield (index_pair, match_params, self.repetitions) + + def build_single_match_params(self): + """ + Creates a single set of match parameters. + """ + return { + "turns": self.turns, + "game": self.game, + "noise": self.noise, + "prob_end": self.prob_end, + "match_attributes": self.match_attributes, + } + + +def complete_graph(players): + """ + Return generator of edges of a complete graph on a set of players + """ + for player1_index, _ in enumerate(players): + for player2_index in range(player1_index, len(players)): + yield (player1_index, player2_index) + + +def graph_is_connected(edges, players): + """ + Test if the set of edges defines a graph in which each player is connected + to at least one other player. This function does not test if the graph is + fully connected in the sense that each node is reachable from every other + node. + + Parameters: + ----------- + edges : a list of 2 tuples + players : a list of player names + + Returns: + -------- + boolean : True if the graph is connected as specified above. + """ + # Check if all players are connected. + player_indices = set(range(len(players))) + node_indices = set() + for edge in edges: + for node in edge: + node_indices.add(node) + + return player_indices == node_indices diff --git a/axelrod/mock_player.py b/axelrod/mock_player.py new file mode 100644 index 000000000..b83a44bba --- /dev/null +++ b/axelrod/mock_player.py @@ -0,0 +1,29 @@ +from itertools import cycle +from typing import List + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class MockPlayer(IpdPlayer): + """Creates a mock player that plays a given sequence of actions. If + no actions are given, plays like Cooperator. Used for testing. + """ + + name = "Mock IpdPlayer" + + def __init__(self, actions: List[Action] = None) -> None: + super().__init__() + if not actions: + actions = [] + self.actions = cycle(actions) + + def strategy(self, opponent: IpdPlayer) -> Action: + # Return the next saved action, if present. + try: + action = self.actions.__next__() + return action + except StopIteration: + return C diff --git a/axelrod/moran.py b/axelrod/moran.py new file mode 100644 index 000000000..1ea3c7e40 --- /dev/null +++ b/axelrod/moran.py @@ -0,0 +1,541 @@ +"""Implementation of the Moran process on Graphs.""" + +import random +from collections import Counter +from typing import Callable, List, Optional, Set, Tuple + +import matplotlib.pyplot as plt +import numpy as np +from axelrod import EvolvablePlayer, DEFAULT_TURNS, IpdGame, IpdPlayer + +from .deterministic_cache import DeterministicCache +from .graph import Graph, complete_graph +from .match import IpdMatch +from .random_ import randrange + + +def fitness_proportionate_selection( + scores: List, fitness_transformation: Callable = None +) -> int: + """Randomly selects an individual proportionally to score. + + Parameters + ---------- + scores: Any sequence of real numbers + fitness_transformation: A function mapping a score to a (non-negative) float + + Returns + ------- + An index of the above list selected at random proportionally to the list + element divided by the total. + """ + if fitness_transformation is None: + csums = np.cumsum(scores) + else: + csums = np.cumsum([fitness_transformation(s) for s in scores]) + total = csums[-1] + r = random.random() * total + + for i, x in enumerate(csums): + if x >= r: + break + return i + + +class MoranProcess(object): + def __init__( + self, + players: List[IpdPlayer], + turns: int = DEFAULT_TURNS, + prob_end: float = None, + noise: float = 0, + game: IpdGame = None, + deterministic_cache: DeterministicCache = None, + mutation_rate: float = 0.0, + mode: str = "bd", + interaction_graph: Graph = None, + reproduction_graph: Graph = None, + fitness_transformation: Callable = None, + mutation_method="transition", + stop_on_fixation=True + ) -> None: + """ + An agent based Moran process class. In each round, each player plays a + IpdMatch with each other player. Players are assigned a fitness score by + their total score from all matches in the round. A player is chosen to + reproduce proportionally to fitness, possibly mutated, and is cloned. + The clone replaces a randomly chosen player. + + If the mutation_rate is 0, the population will eventually fixate on + exactly one player type. In this case a StopIteration exception is + raised and the play stops. If the mutation_rate is not zero, then the + process will iterate indefinitely, so mp.play() will never exit, and + you should use the class as an iterator instead. + + When a player mutates it chooses a random player type from the initial + population. This is not the only method yet emulates the common method + in the literature. + + It is possible to pass interaction graphs and reproduction graphs to the + Moran process. In this case, in each round, each player plays a + IpdMatch with each neighboring player according to the interaction graph. + Players are assigned a fitness score by their total score from all + matches in the round. A player is chosen to reproduce proportionally to + fitness, possibly mutated, and is cloned. The clone replaces a randomly + chosen neighboring player according to the reproduction graph. + + Parameters + ---------- + players + turns: + The number of turns in each pairwise interaction + prob_end : + The probability of a given turn ending a match + noise: + The background noise, if any. Randomly flips plays with probability + `noise`. + game: axelrod.IpdGame + The game object used to score matches. + deterministic_cache: + A optional prebuilt deterministic cache + mutation_rate: + The rate of mutation. Replicating players are mutated with + probability `mutation_rate` + mode: + Birth-Death (bd) or Death-Birth (db) + interaction_graph: Axelrod.graph.Graph + The graph in which the replicators are arranged + reproduction_graph: Axelrod.graph.Graph + The reproduction graph, set equal to the interaction graph if not + given + fitness_transformation: + A function mapping a score to a (non-negative) float + mutation_method: + A string indicating if the mutation method should be between original types ("transition") + or based on the player's mutation method, if present ("atomic"). + stop_on_fixation: + A bool indicating if the process should stop on fixation + """ + self.turns = turns + self.prob_end = prob_end + self.game = game + self.noise = noise + self.initial_players = players # save initial population + self.players = [] # type: List + self.populations = [] # type: List + self.set_players() + self.score_history = [] # type: List + self.winning_strategy_name = None # type: Optional[str] + self.mutation_rate = mutation_rate + self.stop_on_fixation = stop_on_fixation + m = mutation_method.lower() + if m in ["atomic", "transition"]: + self.mutation_method = m + else: + raise ValueError("Invalid mutation method {}".format(mutation_method)) + assert (mutation_rate >= 0) and (mutation_rate <= 1) + assert (noise >= 0) and (noise <= 1) + mode = mode.lower() + assert mode in ["bd", "db"] + self.mode = mode + if deterministic_cache is not None: + self.deterministic_cache = deterministic_cache + else: + self.deterministic_cache = DeterministicCache() + # Build the set of mutation targets + # Determine the number of unique types (players) + keys = set([str(p) for p in players]) + # Create a dictionary mapping each type to a set of representatives + # of the other types + d = dict() + for p in players: + d[str(p)] = p + mutation_targets = dict() + for key in sorted(keys): + mutation_targets[key] = [v for (k, v) in sorted(d.items()) if k != key] + self.mutation_targets = mutation_targets + + if interaction_graph is None: + interaction_graph = complete_graph(len(players), loops=False) + if reproduction_graph is None: + reproduction_graph = Graph( + interaction_graph.edges, directed=interaction_graph.directed + ) + reproduction_graph.add_loops() + # Check equal vertices + v1 = interaction_graph.vertices + v2 = reproduction_graph.vertices + assert list(v1) == list(v2) + self.interaction_graph = interaction_graph + self.reproduction_graph = reproduction_graph + self.fitness_transformation = fitness_transformation + # Map players to graph vertices + self.locations = sorted(interaction_graph.vertices) + self.index = dict(zip(sorted(interaction_graph.vertices), range(len(players)))) + self.fixated = self.fixation_check() + + def set_players(self) -> None: + """Copy the initial players into the first population.""" + self.players = [] + for player in self.initial_players: + player.reset() + self.players.append(player) + self.populations = [self.population_distribution()] + + def mutate(self, index: int) -> IpdPlayer: + """Mutate the player at index. + + Parameters + ---------- + index: + The index of the player to be mutated + """ + + if self.mutation_method == "atomic": + if not issubclass(self.players[index].__class__, EvolvablePlayer): + raise TypeError("Player is not evolvable. Use a subclass of EvolvablePlayer.") + return self.players[index].mutate() + + # Assuming mutation_method == "transition" + if self.mutation_rate > 0: + # Choose another strategy at random from the initial population + r = random.random() + if r < self.mutation_rate: + s = str(self.players[index]) + j = randrange(0, len(self.mutation_targets[s])) + p = self.mutation_targets[s][j] + return p.clone() + # Just clone the player + return self.players[index].clone() + + def death(self, index: int = None) -> int: + """ + Selects the player to be removed. + + Note that the in the birth-death case, the player that is reproducing + may also be replaced. However in the death-birth case, this player will + be excluded from the choices. + + Parameters + ---------- + index: + The index of the player to be removed + """ + if index is None: + # Select a player to be replaced globally + i = randrange(0, len(self.players)) + # Record internally for use in _matchup_indices + self.dead = i + else: + # Select locally + # index is not None in this case + vertex = random.choice( + sorted(self.reproduction_graph.out_vertices(self.locations[index])) + ) + i = self.index[vertex] + return i + + def birth(self, index: int = None) -> int: + """The birth event. + + Parameters + ---------- + index: + The index of the player to be copied + """ + # Compute necessary fitnesses. + scores = self.score_all() + if index is not None: + # Death has already occurred, so remove the dead player from the + # possible choices + scores.pop(index) + # Make sure to get the correct index post-pop + j = fitness_proportionate_selection( + scores, fitness_transformation=self.fitness_transformation + ) + if j >= index: + j += 1 + else: + j = fitness_proportionate_selection( + scores, fitness_transformation=self.fitness_transformation + ) + return j + + def fixation_check(self) -> bool: + """ + Checks if the population is all of a single type + + Returns + ------- + Boolean: + True if fixation has occurred (population all of a single type) + """ + classes = set(str(p) for p in self.players) + self.fixated = False + if len(classes) == 1: + # Set the winning strategy name variable + self.winning_strategy_name = str(self.players[0]) + self.fixated = True + return self.fixated + + def __next__(self) -> object: + """ + Iterate the population: + + - play the round's matches + - chooses a player proportionally to fitness (total score) to reproduce + - mutate, if appropriate + - choose a player to be replaced + - update the population + + Returns + ------- + MoranProcess: + Returns itself with a new population + """ + # Check the exit condition, that all players are of the same type. + if self.stop_on_fixation and self.fixation_check(): + raise StopIteration + if self.mode == "bd": + # Birth then death + j = self.birth() + i = self.death(j) + elif self.mode == "db": + # Death then birth + i = self.death() + self.players[i] = None + j = self.birth(i) + # Mutate and/or replace player i with clone of player j + self.players[i] = self.mutate(j) + # Record population. + self.populations.append(self.population_distribution()) + return self + + def _matchup_indices(self) -> Set[Tuple[int, int]]: + """ + Generate the matchup pairs. + + Returns + ------- + indices: + A set of 2 tuples of matchup pairs: the collection of all players + who play each other. + """ + indices = set() # type: Set + # For death-birth we only want the neighbors of the dead node + # The other calculations are unnecessary + if self.mode == "db": + source = self.index[self.dead] + self.dead = None + sources = sorted(self.interaction_graph.out_vertices(source)) + else: + # birth-death is global + sources = sorted(self.locations) + for i, source in enumerate(sources): + for target in sorted(self.interaction_graph.out_vertices(source)): + j = self.index[target] + if (self.players[i] is None) or (self.players[j] is None): + continue + # Don't duplicate matches + if ((i, j) in indices) or ((j, i) in indices): + continue + indices.add((i, j)) + return indices + + def score_all(self) -> List: + """Plays the next round of the process. Every player is paired up + against every other player and the total scores are recorded. + + Returns + ------- + scores: + List of scores for each player + """ + N = len(self.players) + scores = [0] * N + for i, j in self._matchup_indices(): + player1 = self.players[i] + player2 = self.players[j] + match = IpdMatch( + (player1, player2), + turns=self.turns, + prob_end=self.prob_end, + noise=self.noise, + game=self.game, + deterministic_cache=self.deterministic_cache, + ) + match.play() + match_scores = match.final_score_per_turn() + scores[i] += match_scores[0] + scores[j] += match_scores[1] + self.score_history.append(scores) + return scores + + def population_distribution(self) -> Counter: + """Returns the population distribution of the last iteration. + + Returns + ------- + counter: + The counts of each strategy in the population of the last iteration + """ + player_names = [str(player) for player in self.players] + counter = Counter(player_names) + return counter + + def __iter__(self) -> object: + """ + Returns + ------- + self + """ + return self + + def reset(self) -> None: + """Reset the process to replay.""" + self.winning_strategy_name = None + self.score_history = [] + # Reset all the players + self.set_players() + + def play(self) -> List[Counter]: + """ + Play the process out to completion. If played with mutation this will + not terminate. + + Returns + ------- + populations: + Returns a list of all the populations + """ + if not self.stop_on_fixation or self.mutation_rate != 0: + raise ValueError( + "MoranProcess.play() will never exit if mutation_rate is" + "nonzero or stop_on_fixation is False. Use iteration instead." + ) + while True: + try: + self.__next__() + except StopIteration: + break + return self.populations + + def __len__(self) -> int: + """ + Returns + ------- + The length of the Moran process: the number of populations + """ + return len(self.populations) + + def populations_plot(self, ax=None): + """ + Create a stackplot of the population distributions at each iteration of + the Moran process. + + Parameters + ---------------- + ax: matplotlib axis + Allows the plot to be written to a given matplotlib axis. + Default is None. + + Returns + ----------- + A matplotlib axis object + + """ + player_names = self.populations[0].keys() + if ax is None: + _, ax = plt.subplots() + else: + ax = ax + + plot_data = [] + labels = [] + for name in player_names: + labels.append(name) + values = [counter[name] for counter in self.populations] + plot_data.append(values) + domain = range(len(values)) + + ax.stackplot(domain, plot_data, labels=labels) + ax.set_title("Moran Process Population by Iteration") + ax.set_xlabel("Iteration") + ax.set_ylabel("Number of Individuals") + ax.legend() + return ax + + +class ApproximateMoranProcess(MoranProcess): + """ + A class to approximate a Moran process based + on a distribution of potential IpdMatch outcomes. + + Instead of playing the matches, the result is sampled + from a dictionary of player tuples to distribution of match outcomes + """ + + def __init__( + self, players: List[IpdPlayer], cached_outcomes: dict, mutation_rate: float = 0 + ) -> None: + """ + Parameters + ---------- + players: + cached_outcomes: + Mapping tuples of players to instances of the moran.Pdf class. + mutation_rate: + The rate of mutation. Replicating players are mutated with + probability `mutation_rate` + """ + super(ApproximateMoranProcess, self).__init__( + players, + turns=0, + noise=0, + deterministic_cache=None, + mutation_rate=mutation_rate, + ) + self.cached_outcomes = cached_outcomes + + def score_all(self) -> List: + """Plays the next round of the process. Every player is paired up + against every other player and the total scores are obtained from the + cached outcomes. + + Returns + ------- + scores: + List of scores for each player + """ + N = len(self.players) + scores = [0] * N + for i in range(N): + for j in range(i + 1, N): + player_names = tuple([str(self.players[i]), str(self.players[j])]) + + cached_score = self._get_scores_from_cache(player_names) + scores[i] += cached_score[0] + scores[j] += cached_score[1] + self.score_history.append(scores) + return scores + + def _get_scores_from_cache(self, player_names: Tuple) -> Tuple: + """ + Retrieve the scores from the players in the cache + + Parameters + ---------- + player_names: + The names of the players + + Returns + ------- + scores: + The scores of the players in that particular match + """ + try: + match_scores = self.cached_outcomes[player_names].sample() + return match_scores + except KeyError: # If players are stored in opposite order + match_scores = self.cached_outcomes[player_names[::-1]].sample() + return match_scores[::-1] diff --git a/axelrod/player.py b/axelrod/player.py new file mode 100644 index 000000000..13dc23f96 --- /dev/null +++ b/axelrod/player.py @@ -0,0 +1,215 @@ +import copy +import inspect +import itertools +import types +from typing import Any, Dict + +import numpy as np + +from axelrod.base_player import BasePlayer +from axelrod.action import Action +from axelrod.game import DefaultGame +from axelrod.history import History +from axelrod.random_ import random_flip + +C, D = Action.C, Action.D + + +class Player(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.player.Player to axelrod.Player") + + +def simultaneous_play(player, coplayer, noise=0): + """This pits two players against each other.""" + s1, s2 = player.strategy(coplayer), coplayer.strategy(player) + if noise: + s1 = random_flip(s1, noise) + s2 = random_flip(s2, noise) + player.update_history(s1, s2) + coplayer.update_history(s2, s1) + return s1, s2 + + +class IpdPlayer(BasePlayer): + """A class for a player in the tournament. + + This is an abstract base class, not intended to be used directly. + """ + + name = "IpdPlayer" + classifier = {} # type: Dict[str, Any] + def __new__(cls, *args, **kwargs): + """Caches arguments for IpdPlayer cloning.""" + obj = super().__new__(cls) + obj.init_kwargs = cls.init_params(*args, **kwargs) + return obj + + @classmethod + def init_params(cls, *args, **kwargs): + """ + Return a dictionary containing the init parameters of a strategy + (without 'self'). + Use *args and *kwargs as value if specified + and complete the rest with the default values. + """ + sig = inspect.signature(cls.__init__) + # The 'self' parameter needs to be removed or the first *args will be + # assigned to it + self_param = sig.parameters.get("self") + new_params = list(sig.parameters.values()) + new_params.remove(self_param) + sig = sig.replace(parameters=new_params) + boundargs = sig.bind_partial(*args, **kwargs) + boundargs.apply_defaults() + return boundargs.arguments + + def __init__(self): + """Initiates an empty history.""" + self._history = History() + self.classifier = copy.deepcopy(self.classifier) + self.set_match_attributes() + super().__init__() + + def __eq__(self, other): + """ + Test if two players are equal. + """ + if self.__repr__() != other.__repr__(): + return False + + for attribute in set( + list(self.__dict__.keys()) + list(other.__dict__.keys()) + ): + + value = getattr(self, attribute, None) + other_value = getattr(other, attribute, None) + + if isinstance(value, np.ndarray): + if not (np.array_equal(value, other_value)): + return False + + elif isinstance(value, types.GeneratorType) or isinstance( + value, itertools.cycle + ): + # Split the original generator so it is not touched + generator, original_value = itertools.tee(value) + other_generator, original_other_value = itertools.tee( + other_value + ) + + if isinstance(value, types.GeneratorType): + setattr(self, attribute, (ele for ele in original_value)) + setattr( + other, attribute, (ele for ele in original_other_value) + ) + else: + setattr(self, attribute, itertools.cycle(original_value)) + setattr( + other, attribute, itertools.cycle(original_other_value) + ) + + for _ in range(200): + try: + if next(generator) != next(other_generator): + return False + except StopIteration: + break + + # Code for a strange edge case where each strategy points at each + # other + elif value is other and other_value is self: + pass + else: + if value != other_value: + return False + return True + + def receive_match_attributes(self): + # Overwrite this function if your strategy needs + # to make use of match_attributes such as + # the game matrix, the number of rounds or the noise + pass + + def set_match_attributes(self, length=-1, game=None, noise=0): + if not game: + game = DefaultGame + self.match_attributes = {"length": length, "game": game, "noise": noise} + self.receive_match_attributes() + + def __repr__(self): + """The string method for the strategy. + Appends the `__init__` parameters to the strategy's name.""" + name = self.name + prefix = ": " + gen = ( + value for value in self.init_kwargs.values() if value is not None + ) + for value in gen: + try: + if issubclass(value, IpdPlayer): + value = value.name + except TypeError: + pass + name = "".join([name, prefix, str(value)]) + prefix = ", " + return name + + def __getstate__(self): + """Used for pickling. Override if IpdPlayer contains unpickleable attributes.""" + return self.__dict__ + + def strategy(self, opponent): + """This is a placeholder strategy.""" + raise NotImplementedError() + + def play(self, opponent, noise=0, strategy_holder=None): + """This pits two players against each other, using the passed strategy + holder, if provided.""" + if strategy_holder is None: + strategy_holder = self + return simultaneous_play(strategy_holder, opponent, noise) + + def clone(self): + """Clones the player without history, reapplying configuration + parameters as necessary.""" + + # You may be tempted to re-implement using the `copy` module + # Note that this would require a deepcopy in some cases and there may + # be significant changes required throughout the library. + # Consider overriding in special cases only if necessary + cls = self.__class__ + new_player = cls(**self.init_kwargs) + new_player.match_attributes = copy.copy(self.match_attributes) + return new_player + + def reset(self): + """Resets a player to its initial state + + This method is called at the beginning of each match (between a pair + of players) to reset a player's state to its initial starting point. + It ensures that no 'memory' of previous matches is carried forward. + """ + # This also resets the history. + self.__init__(**self.init_kwargs) + + def update_history(self, play, coplay): + self.history.append(play, coplay) + + @property + def history(self): + return self._history + + # Properties maintained for legacy API, can refactor to self.history.X + # in 5.0.0 to reduce function call overhead. + @property + def cooperations(self): + return self._history.cooperations + + @property + def defections(self): + return self._history.defections + + @property + def state_distribution(self): + return self._history.state_distribution diff --git a/axelrod/plot.py b/axelrod/plot.py new file mode 100644 index 000000000..edc596529 --- /dev/null +++ b/axelrod/plot.py @@ -0,0 +1,333 @@ +from distutils.version import LooseVersion +from typing import List, Union + +import matplotlib +import matplotlib.pyplot as plt +import matplotlib.transforms as transforms +import pathlib +import tqdm +from numpy import arange, median, nan_to_num + +from .result_set import ResultSet +from .load_data_ import axl_filename + +titleType = List[str] +namesType = List[str] +dataType = List[List[Union[int, float]]] + + +def default_cmap(version: str = "2.0") -> str: + """Sets a default matplotlib colormap based on the version.""" + if LooseVersion(version) >= "1.5": + return "viridis" + return "YlGnBu" + + +class Plot(object): + def __init__(self, result_set: ResultSet) -> None: + self.result_set = result_set + self.num_players = self.result_set.num_players + self.players = self.result_set.players + + def _violinplot( + self, + data: dataType, + names: namesType, + title: titleType = None, + ax: matplotlib.axes.SubplotBase = None, + ) -> matplotlib.figure.Figure: + """For making violinplots.""" + + if ax is None: + _, ax = plt.subplots() + else: + ax = ax + + figure = ax.get_figure() + width = max(self.num_players / 3, 12) + height = width / 2 + spacing = 4 + positions = spacing * arange(1, self.num_players + 1, 1) + figure.set_size_inches(width, height) + ax.violinplot( + data, + positions=positions, + widths=spacing / 2, + showmedians=True, + showextrema=False, + ) + ax.set_xticks(positions) + ax.set_xticklabels(names, rotation=90) + ax.set_xlim([0, spacing * (self.num_players + 1)]) + ax.tick_params(axis="both", which="both", labelsize=8) + if title: + ax.set_title(title) + plt.tight_layout() + return figure + + # Box and Violin plots for mean score, score differences, wins, and match + # lengths + + @property + def _boxplot_dataset(self): + return [ + list(nan_to_num(self.result_set.normalised_scores[ir])) + for ir in self.result_set.ranking + ] + + @property + def _boxplot_xticks_locations(self): + return list(range(1, len(self.result_set.ranked_names) + 2)) + + @property + def _boxplot_xticks_labels(self): + return [str(n) for n in self.result_set.ranked_names] + + def boxplot( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """For the specific mean score boxplot.""" + data = self._boxplot_dataset + names = self._boxplot_xticks_labels + figure = self._violinplot(data, names, title=title, ax=ax) + return figure + + @property + def _winplot_dataset(self): + # Sort wins by median + wins = self.result_set.wins + medians = map(median, wins) + medians = sorted([(m, i) for (i, m) in enumerate(medians)], reverse=True) + # Reorder and grab names + wins = [wins[x[-1]] for x in medians] + ranked_names = [str(self.players[x[-1]]) for x in medians] + return wins, ranked_names + + def winplot( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """Plots the distributions for the number of wins for each strategy.""" + + data, names = self._winplot_dataset + figure = self._violinplot(data, names, title=title, ax=ax) + # Expand ylim a bit + maximum = max(max(w) for w in data) + plt.ylim(-0.5, 0.5 + maximum) + return figure + + @property + def _sd_ordering(self): + return self.result_set.ranking + + @property + def _sdv_plot_dataset(self): + ordering = self._sd_ordering + diffs = [ + [score_diff for opponent in player for score_diff in opponent] + for player in self.result_set.score_diffs + ] + # Reorder and grab names + diffs = [diffs[i] for i in ordering] + ranked_names = [str(self.players[i]) for i in ordering] + return diffs, ranked_names + + def sdvplot( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """Score difference violin plots to visualize the distributions of how + players attain their payoffs.""" + diffs, ranked_names = self._sdv_plot_dataset + figure = self._violinplot(diffs, ranked_names, title=title, ax=ax) + return figure + + @property + def _lengthplot_dataset(self): + match_lengths = self.result_set.match_lengths + return [ + [length for rep in match_lengths for length in rep[playeri]] + for playeri in self.result_set.ranking + ] + + def lengthplot( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """For the specific match length boxplot.""" + data = self._lengthplot_dataset + names = self._boxplot_xticks_labels + figure = self._violinplot(data, names, title=title, ax=ax) + return figure + + @property + def _payoff_dataset(self): + pm = self.result_set.payoff_matrix + return [ + [pm[r1][r2] for r2 in self.result_set.ranking] + for r1 in self.result_set.ranking + ] + + @property + def _pdplot_dataset(self): + # Order like the sdv_plot + ordering = self._sd_ordering + pdm = self.result_set.payoff_diffs_means + # Reorder and grab names + matrix = [[pdm[r1][r2] for r2 in ordering] for r1 in ordering] + players = self.result_set.players + ranked_names = [str(players[i]) for i in ordering] + return matrix, ranked_names + + def _payoff_heatmap( + self, + data: dataType, + names: namesType, + title: titleType = None, + ax: matplotlib.axes.SubplotBase = None, + ) -> matplotlib.figure.Figure: + """Generic heatmap plot""" + + if ax is None: + _, ax = plt.subplots() + else: + ax = ax + + figure = ax.get_figure() + width = max(self.num_players / 4, 12) + height = width + figure.set_size_inches(width, height) + matplotlib_version = matplotlib.__version__ + cmap = default_cmap(matplotlib_version) + mat = ax.matshow(data, cmap=cmap) + ax.set_xticks(range(self.result_set.num_players)) + ax.set_yticks(range(self.result_set.num_players)) + ax.set_xticklabels(names, rotation=90) + ax.set_yticklabels(names) + ax.tick_params(axis="both", which="both", labelsize=16) + if title: + ax.set_xlabel(title) + figure.colorbar(mat, ax=ax) + plt.tight_layout() + return figure + + def pdplot( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """Payoff difference heatmap to visualize the distributions of how + players attain their payoffs.""" + matrix, names = self._pdplot_dataset + return self._payoff_heatmap(matrix, names, title=title, ax=ax) + + def payoff( + self, title: titleType = None, ax: matplotlib.axes.SubplotBase = None + ) -> matplotlib.figure.Figure: + """Payoff heatmap to visualize the distributions of how + players attain their payoffs.""" + data = self._payoff_dataset + names = self.result_set.ranked_names + return self._payoff_heatmap(data, names, title=title, ax=ax) + + # Ecological Plot + + def stackplot( + self, + eco, + title: titleType = None, + logscale: bool = True, + ax: matplotlib.axes.SubplotBase = None, + ) -> matplotlib.figure.Figure: + + populations = eco.population_sizes + + if ax is None: + _, ax = plt.subplots() + else: + ax = ax + + figure = ax.get_figure() + turns = range(len(populations)) + pops = [ + [populations[iturn][ir] for iturn in turns] + for ir in self.result_set.ranking + ] + ax.stackplot(turns, *pops) + + ax.yaxis.tick_left() + ax.yaxis.set_label_position("right") + ax.yaxis.labelpad = 25.0 + + ax.set_ylim([0.0, 1.0]) + ax.set_ylabel("Relative population size") + ax.set_xlabel("Turn") + if title is not None: + ax.set_title(title) + + trans = transforms.blended_transform_factory(ax.transAxes, ax.transData) + ticks = [] + for i, n in enumerate(self.result_set.ranked_names): + x = -0.01 + y = (i + 0.5) * 1 / self.result_set.num_players + ax.annotate( + n, + xy=(x, y), + xycoords=trans, + clip_on=False, + va="center", + ha="right", + fontsize=5, + ) + ticks.append(y) + ax.set_yticks(ticks) + ax.tick_params(direction="out") + ax.set_yticklabels([]) + + if logscale: + ax.set_xscale("log") + + plt.tight_layout() + return figure + + def save_all_plots( + self, + prefix: str = "axelrod", + title_prefix: str = "axelrod", + filetype: str = "svg", + progress_bar: bool = True, + ) -> None: + """ + A method to save all plots to file. + + Parameters + ---------- + + prefix : str + A prefix for the file name. This can include the directory. + Default: axelrod. + title_prefix : str + A prefix for the title of the plots (appears on the graphic). + Default: axelrod. + filetype : str + A string for the filetype to save files to: pdf, png, svg, + etc... + progress_bar : bool + Whether or not to create a progress bar which will be updated + """ + plots = [ + ("boxplot", "Payoff"), + ("payoff", "Payoff"), + ("winplot", "Wins"), + ("sdvplot", "Payoff differences"), + ("pdplot", "Payoff differences"), + ("lengthplot", "Length of Matches"), + ] + + if progress_bar: + total = len(plots) # Total number of plots + pbar = tqdm.tqdm(total=total, desc="Obtaining plots") + + for method, name in plots: + f = getattr(self, method)(title="{} - {}".format(title_prefix, name)) + path = pathlib.Path("{}_{}.{}".format(prefix, method, filetype)) + f.savefig(axl_filename(path)) + plt.close(f) + + if progress_bar: + pbar.update() diff --git a/axelrod/random_.py b/axelrod/random_.py new file mode 100644 index 000000000..183c77b5f --- /dev/null +++ b/axelrod/random_.py @@ -0,0 +1,95 @@ +import random + +import numpy as np +from numpy.random import choice + +from axelrod.action import Action + +C, D = Action.C, Action.D + + +def seed(seed_): + """Sets a seed""" + random.seed(seed_) + np.random.seed(seed_) + + +def random_choice(p: float = 0.5) -> Action: + """ + Return C with probability `p`, else return D + + No random sample is carried out if p is 0 or 1. + + Parameters + ---------- + p : float + The probability of picking C + + Returns + ------- + axelrod.Action + """ + if p == 0: + return D + + if p == 1: + return C + + r = random.random() + if r < p: + return C + return D + + +def random_flip(action: Action, threshold: float) -> Action: + """ + Return flipped action with probability `threshold` + + No random sample is carried out if threshold is 0 or 1. + + Parameters + ---------- + action: + The action to flip or not + threshold : float + The probability of flipping action + + Returns + ------- + axelrod.Action + """ + if random_choice(threshold) == C: + return action.flip() + return action + + +def randrange(a: int, b: int) -> int: + """Python 2 / 3 compatible randrange. Returns a random integer uniformly + between a and b (inclusive)""" + c = b - a + r = c * random.random() + return a + int(r) + + +def random_vector(size): + """Create a random vector of values in [0, 1] that sums to 1.""" + vector = np.random.random(size) + return vector / np.sum(vector) + + +class Pdf(object): + """A class for a probability distribution""" + + def __init__(self, counter): + """Take as an instance of collections.counter""" + self.sample_space, self.counts = zip(*counter.items()) + self.size = len(self.sample_space) + self.total = sum(self.counts) + self.probability = list([v / self.total for v in self.counts]) + + def sample(self): + """Sample from the pdf""" + index = choice(a=range(self.size), p=self.probability) + # Numpy cannot sample from a list of n dimensional objects for n > 1, + # need to sample an index. + return self.sample_space[index] diff --git a/axelrod/result_set.py b/axelrod/result_set.py new file mode 100644 index 000000000..4df530ef7 --- /dev/null +++ b/axelrod/result_set.py @@ -0,0 +1,788 @@ +from collections import Counter, namedtuple +import csv +import itertools +from multiprocessing import cpu_count +from typing import List +import warnings + +import numpy as np +import tqdm +from axelrod.action import Action + +import dask as da +import dask.dataframe as dd + +from axelrod import eigen + +C, D = Action.C, Action.D + + +def update_progress_bar(method): + """A decorator to update a progress bar if it exists""" + + def wrapper(*args, **kwargs): + """Run the method and update the progress bar if it exists""" + output = method(*args, **kwargs) + + try: + args[0].progress_bar.update(1) + except AttributeError: + pass + + return output + + return wrapper + + +class ResultSet: + """ + A class to hold the results of a tournament. Reads in a CSV file produced + by the tournament class. + """ + + def __init__( + self, filename, players, repetitions, processes=None, progress_bar=True + ): + """ + Parameters + ---------- + filename : string + the file from which to read the interactions + players : list + A list of the names of players. If not known will be efficiently + read from file. + repetitions : int + The number of repetitions of each match. If not know will be + efficiently read from file. + processes : integer + The number of processes to be used for parallel processing + progress_bar: boolean + If a progress bar will be shown. + """ + self.filename = filename + self.players, self.repetitions = players, repetitions + self.num_players = len(self.players) + + if progress_bar: + self.progress_bar = tqdm.tqdm(total=25, desc="Analysing") + + df = dd.read_csv(filename) + dask_tasks = self._build_tasks(df) + + if processes == 0: + processes = cpu_count() + + out = self._compute_tasks(tasks=dask_tasks, processes=processes) + + self._reshape_out(*out) + + if progress_bar: + self.progress_bar.close() + + def _reshape_out( + self, + mean_per_reps_player_opponent_df, + sum_per_player_opponent_df, + sum_per_player_repetition_df, + normalised_scores_series, + initial_cooperation_count_series, + interactions_count_series, + ): + """ + Reshape the various pandas series objects to be of the required form and + set the corresponding attributes. + """ + + self.payoffs = self._reshape_three_dim_list( + mean_per_reps_player_opponent_df["Score per turn"], + first_dimension=range(self.num_players), + second_dimension=range(self.num_players), + third_dimension=range(self.repetitions), + key_order=[2, 0, 1], + ) + + self.score_diffs = self._reshape_three_dim_list( + mean_per_reps_player_opponent_df["Score difference per turn"], + first_dimension=range(self.num_players), + second_dimension=range(self.num_players), + third_dimension=range(self.repetitions), + key_order=[2, 0, 1], + alternative=0, + ) + + self.match_lengths = self._reshape_three_dim_list( + mean_per_reps_player_opponent_df["Turns"], + first_dimension=range(self.repetitions), + second_dimension=range(self.num_players), + third_dimension=range(self.num_players), + alternative=0, + ) + + self.wins = self._reshape_two_dim_list(sum_per_player_repetition_df["Win"]) + self.scores = self._reshape_two_dim_list(sum_per_player_repetition_df["Score"]) + self.normalised_scores = self._reshape_two_dim_list(normalised_scores_series) + + self.cooperation = self._build_cooperation( + sum_per_player_opponent_df["Cooperation count"] + ) + self.good_partner_matrix = self._build_good_partner_matrix( + sum_per_player_opponent_df["Good partner"] + ) + + columns = ["CC count", "CD count", "DC count", "DD count"] + self.state_distribution = self._build_state_distribution( + sum_per_player_opponent_df[columns] + ) + self.normalised_state_distribution = self._build_normalised_state_distribution() + + columns = [ + "CC to C count", + "CC to D count", + "CD to C count", + "CD to D count", + "DC to C count", + "DC to D count", + "DD to C count", + "DD to D count", + ] + self.state_to_action_distribution = self._build_state_to_action_distribution( + sum_per_player_opponent_df[columns] + ) + self.normalised_state_to_action_distribution = ( + self._build_normalised_state_to_action_distribution() + ) + + self.initial_cooperation_count = self._build_initial_cooperation_count( + initial_cooperation_count_series + ) + self.initial_cooperation_rate = self._build_initial_cooperation_rate( + interactions_count_series + ) + self.good_partner_rating = self._build_good_partner_rating( + interactions_count_series + ) + + self.normalised_cooperation = self._build_normalised_cooperation() + self.ranking = self._build_ranking() + self.ranked_names = self._build_ranked_names() + + self.payoff_matrix = self._build_summary_matrix(self.payoffs) + self.payoff_stddevs = self._build_summary_matrix(self.payoffs, func=np.std) + + self.payoff_diffs_means = self._build_payoff_diffs_means() + self.cooperating_rating = self._build_cooperating_rating() + self.vengeful_cooperation = self._build_vengeful_cooperation() + self.eigenjesus_rating = self._build_eigenjesus_rating() + self.eigenmoses_rating = self._build_eigenmoses_rating() + + @update_progress_bar + def _reshape_three_dim_list( + self, + series, + first_dimension, + second_dimension, + third_dimension, + alternative=None, + key_order=[0, 1, 2], + ): + """ + Parameters + ---------- + series : pandas.Series + first_dimension : iterable + second_dimension : iterable + third_dimension : iterable + alternative : int + What to do if there is no entry at given position + key_order : list + Indices re-ording the dimensions to the correct keys in the + series + + Returns: + -------- + A three dimensional list across the three dimensions + """ + series_dict = series.to_dict() + output = [] + for first_index in first_dimension: + matrix = [] + for second_index in second_dimension: + row = [] + for third_index in third_dimension: + key = (first_index, second_index, third_index) + key = tuple([key[order] for order in key_order]) + if key in series_dict: + row.append(series_dict[key]) + elif alternative is not None: + row.append(alternative) + matrix.append(row) + output.append(matrix) + return output + + @update_progress_bar + def _reshape_two_dim_list(self, series): + """ + Parameters + ---------- + series : pandas.Series + + Returns: + -------- + A two dimensional list across repetitions and opponents + """ + series_dict = series.to_dict() + out = [ + [ + series_dict.get((player_index, repetition), 0) + for repetition in range(self.repetitions) + ] + for player_index in range(self.num_players) + ] + return out + + @update_progress_bar + def _build_cooperation(self, cooperation_series): + cooperation_dict = cooperation_series.to_dict() + cooperation = [] + for player_index in range(self.num_players): + row = [] + for opponent_index in range(self.num_players): + count = cooperation_dict.get((player_index, opponent_index), 0) + if player_index == opponent_index: + # Address double count + count = int(count / 2) + row.append(count) + cooperation.append(row) + return cooperation + + @update_progress_bar + def _build_good_partner_matrix(self, good_partner_series): + good_partner_dict = good_partner_series.to_dict() + good_partner_matrix = [] + for player_index in range(self.num_players): + row = [] + for opponent_index in range(self.num_players): + if player_index == opponent_index: + # The reduce operation implies a double count of self + # interactions. + row.append(0) + else: + row.append(good_partner_dict.get((player_index, opponent_index), 0)) + good_partner_matrix.append(row) + return good_partner_matrix + + @update_progress_bar + def _build_summary_matrix(self, attribute, func=np.mean): + matrix = [ + [0 for opponent_index in range(self.num_players)] + for player_index in range(self.num_players) + ] + + pairs = itertools.product(range(self.num_players), repeat=2) + + for player_index, opponent_index in pairs: + utilities = attribute[player_index][opponent_index] + if utilities: + matrix[player_index][opponent_index] = func(utilities) + + return matrix + + @update_progress_bar + def _build_payoff_diffs_means(self): + payoff_diffs_means = [ + [np.mean(diff) for diff in player] for player in self.score_diffs + ] + + return payoff_diffs_means + + @update_progress_bar + def _build_state_distribution(self, state_distribution_series): + state_key_map = { + "CC count": (C, C), + "CD count": (C, D), + "DC count": (D, C), + "DD count": (D, D), + } + state_distribution = [ + [ + create_counter_dict( + state_distribution_series, + player_index, + opponent_index, + state_key_map, + ) + for opponent_index in range(self.num_players) + ] + for player_index in range(self.num_players) + ] + return state_distribution + + @update_progress_bar + def _build_normalised_state_distribution(self): + """ + Returns: + -------- + norm : list + + Normalised state distribution. A list of lists of counter objects: + + Dictionary where the keys are the states and the values are a + normalized counts of the number of times that state occurs. + """ + normalised_state_distribution = [] + for player in self.state_distribution: + counters = [] + for counter in player: + total = sum(counter.values()) + counters.append( + Counter({key: value / total for key, value in counter.items()}) + ) + normalised_state_distribution.append(counters) + return normalised_state_distribution + + @update_progress_bar + def _build_state_to_action_distribution(self, state_to_action_distribution_series): + state_to_action_key_map = { + "CC to C count": ((C, C), C), + "CC to D count": ((C, C), D), + "CD to C count": ((C, D), C), + "CD to D count": ((C, D), D), + "DC to C count": ((D, C), C), + "DC to D count": ((D, C), D), + "DD to C count": ((D, D), C), + "DD to D count": ((D, D), D), + } + state_to_action_distribution = [ + [ + create_counter_dict( + state_to_action_distribution_series, + player_index, + opponent_index, + state_to_action_key_map, + ) + for opponent_index in range(self.num_players) + ] + for player_index in range(self.num_players) + ] + return state_to_action_distribution + + @update_progress_bar + def _build_normalised_state_to_action_distribution(self): + """ + Returns: + -------- + norm : list + + A list of lists of counter objects. + + Dictionary where the keys are the states and the values are a + normalized counts of the number of times that state goes to a given + action. + """ + normalised_state_to_action_distribution = [] + for player in self.state_to_action_distribution: + counters = [] + for counter in player: + norm_counter = Counter() + for state in [(C, C), (C, D), (D, C), (D, D)]: + total = counter[(state, C)] + counter[(state, D)] + if total > 0: + for action in [C, D]: + if counter[(state, action)] > 0: + norm_counter[(state, action)] = ( + counter[(state, action)] / total + ) + counters.append(norm_counter) + normalised_state_to_action_distribution.append(counters) + return normalised_state_to_action_distribution + + @update_progress_bar + def _build_initial_cooperation_count(self, initial_cooperation_count_series): + initial_cooperation_count_dict = initial_cooperation_count_series.to_dict() + initial_cooperation_count = [ + initial_cooperation_count_dict.get(player_index, 0) + for player_index in range(self.num_players) + ] + return initial_cooperation_count + + @update_progress_bar + def _build_normalised_cooperation(self): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + normalised_cooperation = [ + list(np.nan_to_num(row)) + for row in np.array(self.cooperation) + / sum(map(np.array, self.match_lengths)) + ] + return normalised_cooperation + + @update_progress_bar + def _build_initial_cooperation_rate(self, interactions_series): + interactions_array = np.array( + [ + interactions_series.get(player_index, 0) + for player_index in range(self.num_players) + ] + ) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + initial_cooperation_rate = list( + np.nan_to_num( + np.array(self.initial_cooperation_count) / interactions_array + ) + ) + return initial_cooperation_rate + + @update_progress_bar + def _build_ranking(self): + ranking = sorted( + range(self.num_players), + key=lambda i: -np.nanmedian(self.normalised_scores[i]), + ) + return ranking + + @update_progress_bar + def _build_ranked_names(self): + ranked_names = [str(self.players[i]) for i in self.ranking] + return ranked_names + + @update_progress_bar + def _build_eigenmoses_rating(self): + """ + Returns: + -------- + The eigenmoses rating as defined in: + http://www.scottaaronson.com/morality.pdf + """ + eigenvector, eigenvalue = eigen.principal_eigenvector(self.vengeful_cooperation) + + return eigenvector.tolist() + + @update_progress_bar + def _build_eigenjesus_rating(self): + """ + Returns: + -------- + The eigenjesus rating as defined in: + http://www.scottaaronson.com/morality.pdf + """ + eigenvector, eigenvalue = eigen.principal_eigenvector( + self.normalised_cooperation + ) + + return eigenvector.tolist() + + @update_progress_bar + def _build_cooperating_rating(self): + """ + Returns: + -------- + The list of cooperation ratings + List of the form: + + [ML1, ML2, ML3..., MLn] + + Where n is the number of players and MLi is a list of the form: + + [pi1, pi2, pi3, ..., pim] + + Where pij is the total number of cooperations divided by the total + number of turns over all repetitions played by player i against + player j. + """ + + plist = list(range(self.num_players)) + total_length_v_opponent = [ + zip(*[rep[player_index] for rep in self.match_lengths]) + for player_index in plist + ] + lengths = [ + [sum(e) for j, e in enumerate(row) if i != j] + for i, row in enumerate(total_length_v_opponent) + ] + + cooperation = [ + [col for j, col in enumerate(row) if i != j] + for i, row in enumerate(self.cooperation) + ] + # Max is to deal with edge cases of matches that have no turns + cooperating_rating = [ + sum(cs) / max(1, sum(ls)) for cs, ls in zip(cooperation, lengths) + ] + return cooperating_rating + + @update_progress_bar + def _build_vengeful_cooperation(self): + """ + Returns: + -------- + The vengeful cooperation matrix derived from the + normalised cooperation matrix: + + Dij = 2(Cij - 0.5) + """ + vengeful_cooperation = [ + [2 * (element - 0.5) for element in row] + for row in self.normalised_cooperation + ] + return vengeful_cooperation + + @update_progress_bar + def _build_good_partner_rating(self, interactions_series): + """ + At the end of a read of the data, build the good partner rating + attribute + """ + interactions_dict = interactions_series.to_dict() + good_partner_rating = [ + sum(self.good_partner_matrix[player]) + / max(1, interactions_dict.get(player, 0)) + for player in range(self.num_players) + ] + return good_partner_rating + + def _compute_tasks(self, tasks, processes): + """ + Compute all dask tasks + """ + if processes is None: + out = da.compute(*tasks, scheduler="single-threaded") + else: + out = da.compute(*tasks, num_workers=processes) + return out + + def _build_tasks(self, df): + """ + Returns a tuple of dask tasks + """ + groups = ["Repetition", "Player index", "Opponent index"] + columns = ["Turns", "Score per turn", "Score difference per turn"] + mean_per_reps_player_opponent_task = df.groupby(groups)[columns].mean() + + groups = ["Player index", "Opponent index"] + columns = [ + "Cooperation count", + "CC count", + "CD count", + "DC count", + "DD count", + "CC to C count", + "CC to D count", + "CD to C count", + "CD to D count", + "DC to C count", + "DC to D count", + "DD to C count", + "DD to D count", + "Good partner", + ] + sum_per_player_opponent_task = df.groupby(groups)[columns].sum() + + ignore_self_interactions_task = df["Player index"] != df["Opponent index"] + adf = df[ignore_self_interactions_task] + + groups = ["Player index", "Repetition"] + columns = ["Win", "Score"] + sum_per_player_repetition_task = adf.groupby(groups)[columns].sum() + + groups = ["Player index", "Repetition"] + column = "Score per turn" + normalised_scores_task = adf.groupby(groups)[column].mean() + + groups = ["Player index"] + column = "Initial cooperation" + initial_cooperation_count_task = adf.groupby(groups)[column].sum() + interactions_count_task = adf.groupby("Player index")["Player index"].count() + + return ( + mean_per_reps_player_opponent_task, + sum_per_player_opponent_task, + sum_per_player_repetition_task, + normalised_scores_task, + initial_cooperation_count_task, + interactions_count_task, + ) + + def __eq__(self, other): + """ + Check equality of results set + + Parameters + ---------- + other : axelrod.ResultSet + Another results set against which to check equality + """ + + def list_equal_with_nans(v1: List[float], v2: List[float]) -> bool: + """Matches lists, accounting for NaNs.""" + if len(v1) != len(v2): + return False + for i1, i2 in zip(v1, v2): + if np.isnan(i1) and np.isnan(i2): + continue + if i1 != i2: + return False + return True + + return all( + [ + self.wins == other.wins, + self.match_lengths == other.match_lengths, + self.scores == other.scores, + self.normalised_scores == other.normalised_scores, + self.ranking == other.ranking, + self.ranked_names == other.ranked_names, + self.payoffs == other.payoffs, + self.payoff_matrix == other.payoff_matrix, + self.payoff_stddevs == other.payoff_stddevs, + self.score_diffs == other.score_diffs, + self.payoff_diffs_means == other.payoff_diffs_means, + self.cooperation == other.cooperation, + self.normalised_cooperation == other.normalised_cooperation, + self.vengeful_cooperation == other.vengeful_cooperation, + self.cooperating_rating == other.cooperating_rating, + self.good_partner_matrix == other.good_partner_matrix, + self.good_partner_rating == other.good_partner_rating, + list_equal_with_nans(self.eigenmoses_rating, other.eigenmoses_rating), + list_equal_with_nans(self.eigenjesus_rating, other.eigenjesus_rating), + ] + ) + + def __ne__(self, other): + """ + Check inequality of results set + + Parameters + ---------- + other : axelrod.ResultSet + Another results set against which to check inequality + """ + return not self.__eq__(other) + + def summarise(self): + """ + Obtain summary of performance of each strategy: + ordered by rank, including median normalised score and cooperation + rating. + + Output + ------ + A list of the form: + + [[player name, median score, cooperation_rating],...] + + """ + + median_scores = map(np.nanmedian, self.normalised_scores) + median_wins = map(np.nanmedian, self.wins) + + self.player = namedtuple( + "IpdPlayer", + [ + "Rank", + "Name", + "Median_score", + "Cooperation_rating", + "Wins", + "Initial_C_rate", + "CC_rate", + "CD_rate", + "DC_rate", + "DD_rate", + "CC_to_C_rate", + "CD_to_C_rate", + "DC_to_C_rate", + "DD_to_C_rate", + ], + ) + + states = [(C, C), (C, D), (D, C), (D, D)] + state_prob = [] + for i, player in enumerate(self.normalised_state_distribution): + counts = [] + for state in states: + p = sum([opp[state] for j, opp in enumerate(player) if i != j]) + counts.append(p) + try: + counts = [c / sum(counts) for c in counts] + except ZeroDivisionError: + counts = [0 for c in counts] + state_prob.append(counts) + + state_to_C_prob = [] + for player in self.normalised_state_to_action_distribution: + rates = [] + for state in states: + counts = [ + counter[(state, C)] for counter in player if counter[(state, C)] > 0 + ] + + if len(counts) > 0: + rate = np.mean(counts) + else: + rate = 0 + + rates.append(rate) + state_to_C_prob.append(rates) + + summary_measures = list( + zip( + self.players, + median_scores, + self.cooperating_rating, + median_wins, + self.initial_cooperation_rate, + ) + ) + + summary_data = [] + for rank, i in enumerate(self.ranking): + data = list(summary_measures[i]) + state_prob[i] + state_to_C_prob[i] + summary_data.append(self.player(rank, *data)) + + return summary_data + + def write_summary(self, filename): + """ + Write a csv file containing summary data of the results of the form: + + "Rank", "Name", "Median-score-per-turn", "Cooperation-rating", "Initial_C_Rate", "Wins", "CC-Rate", "CD-Rate", "DC-Rate", "DD-rate","CC-to-C-Rate", "CD-to-C-Rate", "DC-to-C-Rate", "DD-to-C-rate" + + + Parameters + ---------- + filename : a filepath to which to write the data + """ + summary_data = self.summarise() + with open(filename, "w") as csvfile: + writer = csv.writer(csvfile, lineterminator="\n") + writer.writerow(self.player._fields) + for player in summary_data: + writer.writerow(player) + + +def create_counter_dict(df, player_index, opponent_index, key_map): + """ + Create a Counter object mapping states (corresponding to columns of df) for + players given by player_index, opponent_index. Renaming the variables with + `key_map`. Used by `ResultSet._reshape_out` + + Parameters + ---------- + df : a multiindex pandas df + player_index: int + opponent_index: int + key_map : a dict + maps cols of df to strings + + Returns + ------- + A counter dictionary + """ + counter = Counter() + if player_index != opponent_index: + if (player_index, opponent_index) in df.index: + for key, value in df.loc[player_index, opponent_index].items(): + if value > 0: + counter[key_map[key]] = value + return counter diff --git a/axelrod/strategy_transformers.py b/axelrod/strategy_transformers.py new file mode 100644 index 000000000..15e39ce29 --- /dev/null +++ b/axelrod/strategy_transformers.py @@ -0,0 +1,679 @@ +""" +Strategy Transformers -- class decorators that transform the behavior of any +strategy. + +See the various Meta strategies for another type of transformation. +""" + +from collections import Iterable +import copy +import inspect +from importlib import import_module +import random +from typing import Any + +from numpy.random import choice + +from axelrod.strategies.sequence_player import SequencePlayer +from .action import Action +from .player import IpdPlayer +from .random_ import random_choice + +C, D = Action.C, Action.D + +# Note: After a transformation is applied, the player's history is overwritten +# with the modified history just like in the noisy tournament case. This can +# lead to unexpected behavior, such as when FlipTransform is applied to +# Alternator. + + +def StrategyTransformerFactory(strategy_wrapper, name_prefix=None, reclassifier=None): + """Modify an existing strategy dynamically by wrapping the strategy + method with the argument `strategy_wrapper`. + + Parameters + ---------- + strategy_wrapper: function + A function of the form `strategy_wrapper(player, opponent, proposed_action, *args, **kwargs)` + Can also use a class that implements + def __call__(self, player, opponent, action) + name_prefix: string, "Transformed " + A string to prepend to the strategy and class name + reclassifier: function, + A function which will update the classifier of the strategy being + transformed + """ + + # Create a class that applies a wrapper function to the strategy method + # of a given class. We use a class here instead of a function so that the + # decorator can have arguments. + + class Decorator(object): + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + if "name_prefix" in kwargs: + self.name_prefix = kwargs["name_prefix"] + else: + self.name_prefix = name_prefix + + def __reduce__(self): + """Gives instructions on how to pickle the Decorator object.""" + factory_args = (strategy_wrapper, name_prefix, reclassifier) + return ( + DecoratorReBuilder(), + (factory_args, self.args, self.kwargs, self.name_prefix), + ) + + def __call__(self, PlayerClass): + """ + Parameters + ---------- + PlayerClass: A subclass of axelrodPlayer, e.g. Cooperator + The IpdPlayer Class to modify + + Returns + ------- + new_class, class object + A class object that can create instances of the modified + PlayerClass + """ + + args = self.args + kwargs = self.kwargs + try: + # If "name_prefix" in kwargs remove as only want decorator + # arguments + del kwargs["name_prefix"] + except KeyError: + pass + try: + del kwargs["reclassifier"] + except KeyError: + pass + + # Define the new strategy method, wrapping the existing method + # with `strategy_wrapper` + def strategy(self, opponent): + if strategy_wrapper == dual_wrapper: + # dual_wrapper figures out strategy as if the IpdPlayer had + # played the opposite actions of its current history. + self._history = self.history.flip_plays() + + if is_strategy_static(PlayerClass): + proposed_action = PlayerClass.strategy(opponent) + else: + proposed_action = PlayerClass.strategy(self, opponent) + + if strategy_wrapper == dual_wrapper: + # After dual_wrapper calls the strategy, it returns + # the IpdPlayer to its original state. + self._history = self.history.flip_plays() + + # Apply the wrapper + return strategy_wrapper( + self, opponent, proposed_action, *args, **kwargs + ) + + # Modify the PlayerClass name + new_class_name = PlayerClass.__name__ + name = PlayerClass.name + name_prefix = self.name_prefix + if name_prefix: + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) + new_class_name = "".join([name_prefix, PlayerClass.__name__]) + # Modify the IpdPlayer name (class variable inherited from IpdPlayer) + name = " ".join([name_prefix, PlayerClass.name]) + + original_classifier = copy.deepcopy(PlayerClass.classifier) # Copy + if reclassifier is not None: + classifier = reclassifier(original_classifier, *args, **kwargs) + else: + classifier = original_classifier + + # Define the new __repr__ method to add the wrapper arguments + # at the end of the name + def __repr__(self): + name = PlayerClass.__repr__(self) + # add eventual transformers' arguments in name + prefix = ": " + for arg in args: + try: + # Action has .name but should not be made into a list + if not any(isinstance(el, Action) for el in arg): + arg = [player.name for player in arg] + except AttributeError: + pass + except TypeError: + pass + name = "".join([name, prefix, str(arg)]) + prefix = ", " + return name + + def reduce_for_decorated_class(self_): + """__reduce__ function for decorated class. Ensures that any + decorated class can be correctly pickled.""" + class_module = import_module(self_.__module__) + import_name = self_.__class__.__name__ + + if player_can_be_pickled(self_): + return self_.__class__, (), self_.__dict__ + + decorators = [] + state = self_.__dict__ + for class_ in self_.__class__.mro(): + import_name = class_.__name__ + if hasattr(class_, "decorator"): + decorators.insert(0, class_.decorator) + if hasattr(class_module, import_name): + # Sequence players are not directly pickleable so we need to call __getstate__ + state = class_.__getstate__(self_) + break + + return ( + StrategyReBuilder(), + (decorators, import_name, self_.__module__), + state, + ) + + # Define a new class and wrap the strategy method + # Dynamically create the new class + new_class = type( + new_class_name, + (PlayerClass,), + { + "name": name, + "original_class": PlayerClass, + "strategy": strategy, + "decorator": self, + "__repr__": __repr__, + "__module__": PlayerClass.__module__, + "classifier": classifier, + "__doc__": PlayerClass.__doc__, + "__reduce__": reduce_for_decorated_class, + }, + ) + + return new_class + + return Decorator + + +def player_can_be_pickled(player: IpdPlayer) -> bool: + """ + Returns True if pickle.dump(player) does not raise pickle.PicklingError. + """ + class_module = import_module(player.__module__) + import_name = player.__class__.__name__ + if not hasattr(class_module, import_name): + return False + # Sequence players are pickleable but not directly so (particularly if decorated). + if issubclass(player.__class__, SequencePlayer): + return False + + to_test = getattr(class_module, import_name) + return to_test == player.__class__ + + +def is_strategy_static(player_class) -> bool: + """ + Returns True if `player_class.strategy` is a `staticmethod`, else False. + """ + for class_ in player_class.mro(): + method = inspect.getattr_static(class_, "strategy", default=None) + if method is not None: + return isinstance(method, staticmethod) + + +class DecoratorReBuilder(object): + """ + An object to build an anonymous Decorator obj from a set of pickle-able + parameters. + """ + + def __call__( + self, factory_args: tuple, args: tuple, kwargs: dict, instance_name_prefix: str + ) -> Any: + + decorator_class = StrategyTransformerFactory(*factory_args) + kwargs["name_prefix"] = instance_name_prefix + return decorator_class(*args, **kwargs) + + +class StrategyReBuilder(object): + """ + An object to build a new instance of a player from an old instance + that could not normally be pickled. + """ + + def __call__(self, decorators: list, import_name: str, module_name: str) -> IpdPlayer: + + module_ = import_module(module_name) + import_class = getattr(module_, import_name) + + if hasattr(import_class, "decorator"): + return import_class() + else: + generated_class = import_class + for decorator in decorators: + generated_class = decorator(generated_class) + return generated_class() + + +def compose_transformers(t1, t2): + """Compose transformers without having to invoke the first on + a PlayerClass.""" + + class Composition(object): + def __init__(self): + self.t1 = t1 + self.t2 = t2 + + def __call__(self, PlayerClass): + return t1(t2(PlayerClass)) + + return Composition() + + +def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs): + """ + Strategy wrapper functions should be of the following form. + + Parameters + ---------- + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass + proposed_action: an axelrod.Action, C or D + The proposed action by the wrapped strategy + proposed_action = IpdPlayer.strategy(...) + args, kwargs: + Any additional arguments that you need. + + Returns + ------- + action: an axelrod.Action, C or D + + """ + + # This example just passes through the proposed_action + return proposed_action + + +IdentityTransformer = StrategyTransformerFactory(generic_strategy_wrapper) + + +def flip_wrapper(player, opponent, action): + """Flips the player's original actions.""" + return action.flip() + + +FlipTransformer = StrategyTransformerFactory(flip_wrapper, name_prefix="Flipped") + + +def dual_wrapper(player, opponent: IpdPlayer, proposed_action: Action) -> Action: + """Wraps the players strategy function to produce the Dual. + + The Dual of a strategy will return the exact opposite set of moves to the + original strategy when both are faced with the same history. + + A formal definition can be found in [Ashlock2010]_. + http://doi.org/10.1109/ITW.2010.5593352 + + Parameters + ---------- + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass + proposed_action: axelrod.Action, C or D + The proposed action by the wrapped strategy + + Returns + ------- + action: an axelrod.Action, C or D + """ + + # dual_wrapper is a special case. The work of flip_play_attributes(player) + # is done in the strategy of the new PlayerClass created by DualTransformer. + # The DualTransformer is dynamically created in StrategyTransformerFactory. + + return proposed_action.flip() + + +DualTransformer = StrategyTransformerFactory(dual_wrapper, name_prefix="Dual") + + +def noisy_wrapper(player, opponent, action, noise=0.05): + """Flips the player's actions with probability: `noise`.""" + r = random.random() + if r < noise: + return action.flip() + return action + + +def noisy_reclassifier(original_classifier, noise): + """Function to reclassify the strategy""" + if noise not in (0, 1): + original_classifier["stochastic"] = True + return original_classifier + + +NoisyTransformer = StrategyTransformerFactory( + noisy_wrapper, name_prefix="Noisy", reclassifier=noisy_reclassifier +) + + +def forgiver_wrapper(player, opponent, action, p): + """If a strategy wants to defect, flip to cooperate with the given + probability.""" + if action == D: + return random_choice(p) + return C + + +def forgiver_reclassifier(original_classifier, p): + """Function to reclassify the strategy""" + if p not in (0, 1): + original_classifier["stochastic"] = True + return original_classifier + + +ForgiverTransformer = StrategyTransformerFactory( + forgiver_wrapper, name_prefix="Forgiving", reclassifier=forgiver_reclassifier +) + + +def nice_wrapper(player, opponent, action): + """Makes sure that the player doesn't defect unless the opponent has already + defected.""" + if action == D: + if opponent.defections == 0: + return C + return action + + +NiceTransformer = StrategyTransformerFactory(nice_wrapper, name_prefix="Nice") + + +def initial_sequence(player, opponent, action, initial_seq): + """Play the moves in `seq` first (must be a list), ignoring the strategy's + moves until the list is exhausted.""" + + index = len(player.history) + if index < len(initial_seq): + return initial_seq[index] + return action + + +def initial_reclassifier(original_classifier, initial_seq): + """ + If needed this extends the memory depth to be the length of the initial + sequence + """ + original_classifier["memory_depth"] = max( + len(initial_seq), original_classifier["memory_depth"] + ) + return original_classifier + + +InitialTransformer = StrategyTransformerFactory( + initial_sequence, name_prefix="Initial", reclassifier=initial_reclassifier +) + + +def final_sequence(player, opponent, action, seq): + """Play the moves in `seq` first, ignoring the strategy's moves until the + list is exhausted.""" + + length = player.match_attributes["length"] + + if length < 0: # default is -1 + return action + + index = length - len(player.history) + # If for some reason we've overrun the expected game length, just pass + # the intended action through + if len(player.history) >= length: + return action + # Check if we're near the end and need to start passing the actions + # from seq for the final few rounds. + if index <= len(seq): + return seq[-index] + return action + + +def final_reclassifier(original_classifier, seq): + """Reclassify the strategy""" + original_classifier["makes_use_of"].update(["length"]) + original_classifier["memory_depth"] = max( + len(seq), original_classifier["memory_depth"] + ) + return original_classifier + + +FinalTransformer = StrategyTransformerFactory( + final_sequence, name_prefix="Final", reclassifier=final_reclassifier +) + + +def history_track_wrapper(player, opponent, action): + """Wrapper to track a player's history in a variable `._recorded_history`.""" + try: + player._recorded_history.append(action) + except AttributeError: + player._recorded_history = [action] + return action + + +TrackHistoryTransformer = StrategyTransformerFactory( + history_track_wrapper, name_prefix="HistoryTracking" +) + + +def deadlock_break_wrapper(player, opponent, action): + """Detect and attempt to break deadlocks by cooperating.""" + if len(player.history) < 2: + return action + last_round = (player.history[-1], opponent.history[-1]) + penultimate_round = (player.history[-2], opponent.history[-2]) + if (penultimate_round, last_round) == ((C, D), (D, C)) or ( + penultimate_round, + last_round, + ) == ((D, C), (C, D)): + # attempt to break deadlock by Cooperating + return C + return action + + +DeadlockBreakingTransformer = StrategyTransformerFactory( + deadlock_break_wrapper, name_prefix="DeadlockBreaking" +) + + +def grudge_wrapper(player, opponent, action, grudges): + """After `grudges` defections, defect forever.""" + if opponent.defections > grudges: + return D + return action + + +GrudgeTransformer = StrategyTransformerFactory(grudge_wrapper, name_prefix="Grudging") + + +def apology_wrapper(player, opponent, action, myseq, opseq): + length = len(myseq) + if len(player.history) < length: + return action + if (myseq == player.history[-length:]) and (opseq == opponent.history[-length:]): + return C + return action + + +ApologyTransformer = StrategyTransformerFactory( + apology_wrapper, name_prefix="Apologizing" +) + + +def mixed_wrapper(player, opponent, action, probability, m_player): + """Randomly picks a strategy to play, either from a distribution on a list + of players or a single player. + + In essence creating a mixed strategy. + + Parameters + ---------- + + probability: a float (or integer: 0 or 1) OR an iterable representing a + an incomplete probability distribution (entries to do not have to sum to + 1). Eg: 0, 1, [.5,.5], (.5,.3) + m_players: a single player class or iterable representing set of player + classes to mix from. + Eg: axelrod.TitForTat, [axelod.Cooperator, axelrod.Defector] + """ + + # If a single probability, player is passed + if isinstance(probability, float) or isinstance(probability, int): + m_player = [m_player] + probability = [probability] + + # If a probability distribution, players is passed + if isinstance(probability, Iterable) and isinstance( + m_player, Iterable + ): + mutate_prob = sum(probability) # Prob of mutation + if mutate_prob > 0: + # Distribution of choice of mutation: + normalised_prob = [prob / mutate_prob for prob in probability] + if random.random() < mutate_prob: + p = choice(list(m_player), p=normalised_prob)() + p._history = player._history + return p.strategy(opponent) + + return action + + +def mixed_reclassifier(original_classifier, probability, m_player): + """Function to reclassify the strategy""" + # If a single probability, player is passed + if isinstance(probability, float) or isinstance(probability, int): + m_player = [m_player] + probability = [probability] + + if min(probability) == max(probability) == 0: # No probability given + return original_classifier + + if 1 in probability: # If all probability given to one player + player = m_player[probability.index(1)] + original_classifier["stochastic"] = player.classifier["stochastic"] + return original_classifier + + # Otherwise: stochastic. + original_classifier["stochastic"] = True + return original_classifier + + +MixedTransformer = StrategyTransformerFactory( + mixed_wrapper, name_prefix="Mutated", reclassifier=mixed_reclassifier +) + + +def joss_ann_wrapper(player, opponent, proposed_action, probability): + """Wraps the players strategy function to produce the Joss-Ann. + + The Joss-Ann of a strategy is a new strategy which has a probability of + choosing the move C, a probability of choosing the move D, and otherwise + uses the response appropriate to the original strategy. + + A formal definition can be found in [Ashlock2010]_. + http://doi.org/10.1109/ITW.2010.5593352 + + Parameters + ---------- + + player: IpdPlayer object or subclass (self) + opponent: IpdPlayer object or subclass + proposed_action: axelrod.Action, C or D + The proposed action by the wrapped strategy + probability: tuple + a tuple or list representing a probability distribution of playing move + C or D (doesn't have to be complete) ie. (0, 1) or (0.2, 0.3) + + Returns + ------- + action: an axelrod.Action, C or D + """ + if sum(probability) > 1: + probability = tuple([i / sum(probability) for i in probability]) + + remaining_probability = max(0, 1 - probability[0] - probability[1]) + probability += (remaining_probability,) + options = [C, D, proposed_action] + action = choice(options, p=probability) + return action + + +def jossann_reclassifier(original_classifier, probability): + """ + Reclassify: note that if probabilities are (0, 1) or (1, 0) then we override + the original classifier. + """ + if sum(probability) > 1: + probability = tuple([i / sum(probability) for i in probability]) + + if probability in [(1, 0), (0, 1)]: + original_classifier["stochastic"] = False + elif sum(probability) != 0: + original_classifier["stochastic"] = True + + return original_classifier + + +JossAnnTransformer = StrategyTransformerFactory( + joss_ann_wrapper, name_prefix="Joss-Ann", reclassifier=jossann_reclassifier +) + + +# Strategy wrappers as classes + + +class RetaliationWrapper(object): + """Retaliates `retaliations` times after a defection (cumulative).""" + + def __call__(self, player, opponent, action, retaliations): + if len(player.history) == 0: + self.retaliation_count = 0 + return action + if opponent.history[-1] == D: + self.retaliation_count += retaliations - 1 + return D + if self.retaliation_count == 0: + return action + if self.retaliation_count > 0: + self.retaliation_count -= 1 + return D + + +RetaliationTransformer = StrategyTransformerFactory( + RetaliationWrapper(), name_prefix="Retaliating" +) + + +class RetaliationUntilApologyWrapper(object): + """Enforces the TFT rule that the opponent pay back a defection with a + cooperation for the player to stop defecting.""" + + def __call__(self, player, opponent, action): + if len(player.history) == 0: + self.is_retaliating = False + return action + if opponent.history[-1] == D: + self.is_retaliating = True + if self.is_retaliating: + if opponent.history[-1] == C: + self.is_retaliating = False + return C + return D + return action + + +RetaliateUntilApologyTransformer = StrategyTransformerFactory( + RetaliationUntilApologyWrapper(), name_prefix="RUA" +) diff --git a/axelrod/tests/__init__.py b/axelrod/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/axelrod/tests/property.py b/axelrod/tests/property.py new file mode 100644 index 000000000..705acbdc9 --- /dev/null +++ b/axelrod/tests/property.py @@ -0,0 +1,335 @@ +""" +A module for creating hypothesis based strategies for property based testing +""" +import itertools + +import axelrod as axl + +from hypothesis.strategies import composite, floats, integers, lists, sampled_from + + +@composite +def strategy_lists( + draw, strategies=axl.short_run_time_strategies, min_size=1, max_size=len(axl.strategies) +): + """ + A hypothesis decorator to return a list of strategies + + Parameters + ---------- + min_size : integer + The minimum number of strategies to include + max_size : integer + The maximum number of strategies to include + """ + strategies = draw( + lists(sampled_from(strategies), min_size=min_size, max_size=max_size) + ) + return strategies + + +@composite +def matches( + draw, + strategies=axl.short_run_time_strategies, + min_turns=1, + max_turns=200, + min_noise=0, + max_noise=1, +): + """ + A hypothesis decorator to return a random match. + + Parameters + ---------- + strategies : list + The strategies from which to sample the two the players + min_turns : integer + The minimum number of turns + max_turns : integer + The maximum number of turns + min_noise : float + The minimum noise + max_noise : float + The maximum noise + + Returns + ------- + match : a random match + """ + strategies = draw(strategy_lists(min_size=2, max_size=2)) + players = [s() for s in strategies] + turns = draw(integers(min_value=min_turns, max_value=max_turns)) + noise = draw(floats(min_value=min_noise, max_value=max_noise)) + match = axl.IpdMatch(players, turns=turns, noise=noise) + return match + + +@composite +def tournaments( + draw, + strategies=axl.short_run_time_strategies, + min_size=1, + max_size=10, + min_turns=1, + max_turns=200, + min_noise=0, + max_noise=1, + min_repetitions=1, + max_repetitions=20, +): + """ + A hypothesis decorator to return a tournament. + + Parameters + ---------- + min_size : integer + The minimum number of strategies to include + max_size : integer + The maximum number of strategies to include + min_turns : integer + The minimum number of turns + max_turns : integer + The maximum number of turns + min_noise : float + The minimum noise value + min_noise : float + The maximum noise value + min_repetitions : integer + The minimum number of repetitions + max_repetitions : integer + The maximum number of repetitions + """ + strategies = draw( + strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + ) + players = [s() for s in strategies] + turns = draw(integers(min_value=min_turns, max_value=max_turns)) + repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + noise = draw(floats(min_value=min_noise, max_value=max_noise)) + + tournament = axl.IpdTournament(players, turns=turns, repetitions=repetitions, noise=noise) + return tournament + + +@composite +def prob_end_tournaments( + draw, + strategies=axl.short_run_time_strategies, + min_size=1, + max_size=10, + min_prob_end=0, + max_prob_end=1, + min_noise=0, + max_noise=1, + min_repetitions=1, + max_repetitions=20, +): + """ + A hypothesis decorator to return a tournament, + + Parameters + ---------- + min_size : integer + The minimum number of strategies to include + max_size : integer + The maximum number of strategies to include + min_prob_end : float + The minimum probability of a match ending + max_prob_end : float + The maximum probability of a match ending + min_noise : float + The minimum noise value + max_noise : float + The maximum noise value + min_repetitions : integer + The minimum number of repetitions + max_repetitions : integer + The maximum number of repetitions + """ + strategies = draw( + strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + ) + players = [s() for s in strategies] + prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) + repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + noise = draw(floats(min_value=min_noise, max_value=max_noise)) + + tournament = axl.IpdTournament( + players, prob_end=prob_end, repetitions=repetitions, noise=noise + ) + return tournament + + +@composite +def spatial_tournaments( + draw, + strategies=axl.short_run_time_strategies, + min_size=1, + max_size=10, + min_turns=1, + max_turns=200, + min_noise=0, + max_noise=1, + min_repetitions=1, + max_repetitions=20, +): + """ + A hypothesis decorator to return a spatial tournament. + + Parameters + ---------- + min_size : integer + The minimum number of strategies to include + max_size : integer + The maximum number of strategies to include + min_turns : integer + The minimum number of turns + max_turns : integer + The maximum number of turns + min_noise : float + The minimum noise value + max_noise : float + The maximum noise value + min_repetitions : integer + The minimum number of repetitions + max_repetitions : integer + The maximum number of repetitions + """ + strategies = draw( + strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + ) + players = [s() for s in strategies] + player_indices = list(range(len(players))) + + all_potential_edges = list(itertools.combinations(player_indices, 2)) + all_potential_edges.extend([(i, i) for i in player_indices]) # Loops + edges = draw( + lists( + sampled_from(all_potential_edges), + unique=True, + average_size=2 * len(players), + ) + ) + + # Ensure all players/nodes are connected: + node_indices = sorted(set([node for edge in edges for node in edge])) + missing_nodes = [index for index in player_indices if index not in node_indices] + for index in missing_nodes: + opponent = draw(sampled_from(player_indices)) + edges.append((index, opponent)) + + turns = draw(integers(min_value=min_turns, max_value=max_turns)) + repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + noise = draw(floats(min_value=min_noise, max_value=max_noise)) + + tournament = axl.IpdTournament( + players, turns=turns, repetitions=repetitions, noise=noise, edges=edges + ) + return tournament + + +@composite +def prob_end_spatial_tournaments( + draw, + strategies=axl.short_run_time_strategies, + min_size=1, + max_size=10, + min_prob_end=0, + max_prob_end=1, + min_noise=0, + max_noise=1, + min_repetitions=1, + max_repetitions=20, +): + """ + A hypothesis decorator to return a probabilistic ending spatial tournament. + + Parameters + ---------- + min_size : integer + The minimum number of strategies to include + max_size : integer + The maximum number of strategies to include + min_prob_end : float + The minimum probability of a match ending + max_prob_end : float + The maximum probability of a match ending + min_noise : float + The minimum noise value + max_noise : float + The maximum noise value + min_repetitions : integer + The minimum number of repetitions + max_repetitions : integer + The maximum number of repetitions + """ + strategies = draw( + strategy_lists(strategies=strategies, min_size=min_size, max_size=max_size) + ) + players = [s() for s in strategies] + player_indices = list(range(len(players))) + + all_potential_edges = list(itertools.combinations(player_indices, 2)) + all_potential_edges.extend([(i, i) for i in player_indices]) # Loops + edges = draw( + lists( + sampled_from(all_potential_edges), + unique=True, + average_size=2 * len(players), + ) + ) + + # Ensure all players/nodes are connected: + node_indices = sorted(set([node for edge in edges for node in edge])) + missing_nodes = [index for index in player_indices if index not in node_indices] + for index in missing_nodes: + opponent = draw(sampled_from(player_indices)) + edges.append((index, opponent)) + + prob_end = draw(floats(min_value=min_prob_end, max_value=max_prob_end)) + repetitions = draw(integers(min_value=min_repetitions, max_value=max_repetitions)) + noise = draw(floats(min_value=min_noise, max_value=max_noise)) + + tournament = axl.IpdTournament( + players, prob_end=prob_end, repetitions=repetitions, noise=noise, edges=edges + ) + return tournament + + +@composite +def games(draw, prisoners_dilemma=True, max_value=100): + """ + A hypothesis decorator to return a random game. + + Parameters + ---------- + prisoners_dilemma : bool + If set not True the R,P,S,T values will be uniformly random. True by + default which ensures T > R > P > S and 2R > T + S. + max_value : the maximal payoff value + """ + + if prisoners_dilemma: + s_upper_bound = max_value - 4 # Ensures there is enough room + s = draw(integers(max_value=s_upper_bound)) + + t_lower_bound = s + 3 # Ensures there is enough room + t = draw(integers(min_value=t_lower_bound, max_value=max_value)) + + r_upper_bound = t - 1 + r_lower_bound = min(max(int((t + s) / 2), s) + 2, r_upper_bound) + r = draw(integers(min_value=r_lower_bound, max_value=r_upper_bound)) + + p_lower_bound = s + 1 + p_upper_bound = r - 1 + p = draw(integers(min_value=p_lower_bound, max_value=p_upper_bound)) + + else: + s = draw(integers(max_value=max_value)) + t = draw(integers(max_value=max_value)) + r = draw(integers(max_value=max_value)) + p = draw(integers(max_value=max_value)) + + game = axl.IpdGame(r=r, s=s, t=t, p=p) + return game diff --git a/axelrod/tests/unit/__init__.py b/axelrod/tests/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/axelrod/tests/unit/test_actions.py b/axelrod/tests/unit/test_actions.py new file mode 100644 index 000000000..d5ad6a338 --- /dev/null +++ b/axelrod/tests/unit/test_actions.py @@ -0,0 +1,64 @@ +import unittest + +import axelrod as axl +from axelrod.action import UnknownActionError, actions_to_str, str_to_actions + +C, D = axl.Action.C, axl.Action.D + + +class TestAction(unittest.TestCase): + def test_lt(self): + self.assertLess(C, D) + + def test_repr(self): + self.assertEqual(repr(C), "C") + self.assertEqual(repr(D), "D") + + def test_str(self): + self.assertEqual(str(C), "C") + self.assertEqual(str(D), "D") + + def test__eq__(self): + self.assertTrue(C == C) + self.assertTrue(D == D) + self.assertFalse(C == D) + self.assertFalse(D == C) + + def test_total_order(self): + actions = [C, D, D, C, C, C, D] + actions.sort() + self.assertEqual(actions, [C, C, C, C, D, D, D]) + + def test_flip(self): + self.assertEqual(C.flip(), D) + self.assertEqual(D.flip(), C) + + def test_from_char(self): + self.assertEqual(axl.Action.from_char("C"), C) + self.assertEqual(axl.Action.from_char("D"), D) + + def test_from_char_error(self): + self.assertRaises(UnknownActionError, axl.Action.from_char, "") + self.assertRaises(UnknownActionError, axl.Action.from_char, "c") + self.assertRaises(UnknownActionError, axl.Action.from_char, "d") + self.assertRaises(UnknownActionError, axl.Action.from_char, "A") + self.assertRaises(UnknownActionError, axl.Action.from_char, "CC") + + def test_str_to_actions(self): + self.assertEqual(str_to_actions(""), ()) + self.assertEqual(str_to_actions("C"), (C,)) + self.assertEqual(str_to_actions("CDDC"), (C, D, D, C)) + + def test_str_to_actions_fails_fast_and_raises_value_error(self): + self.assertRaises(UnknownActionError, str_to_actions, "Cc") + + def test_actions_to_str(self): + self.assertEqual(actions_to_str([]), "") + self.assertEqual(actions_to_str([C]), "C") + self.assertEqual(actions_to_str([C, D, C]), "CDC") + self.assertEqual(actions_to_str((C, C, D)), "CCD") + + def test_actions_to_str_with_iterable(self): + self.assertEqual(actions_to_str(iter([C, D, C])), "CDC") + generator = (action for action in [C, D, C]) + self.assertEqual(actions_to_str(generator), "CDC") diff --git a/axelrod/tests/unit/test_classification.py b/axelrod/tests/unit/test_classification.py new file mode 100644 index 000000000..e1f650bb9 --- /dev/null +++ b/axelrod/tests/unit/test_classification.py @@ -0,0 +1,356 @@ +"""Tests for the classification.""" + +import os +import unittest +from typing import Any, Text +import warnings +import yaml + +import axelrod as axl +from axelrod.classifier import ( + Classifier, + Classifiers, + _Classifiers, + memory_depth, + rebuild_classifier_table, +) +from axelrod.player import IpdPlayer + + +class TitForTatWithEmptyClassifier(IpdPlayer): + """ + Same name as TitForTat, but with empty classifier. + """ + + # Classifiers are looked up by name, so only the name matters. + name = "Tit For Tat" + classifier = {} + + +class TitForTatWithNonTrivialInitialzer(IpdPlayer): + """ + Same name as TitForTat, but with empty classifier. + """ + + def __init__(self, x: Any): + pass # pragma: no cover + + # Classifiers are looked up by name, so only the name matters. + name = "Tit For Tat" + classifier = {} + + +class TestClassification(unittest.TestCase): + def setUp(self) -> None: + # Ignore warnings about classifiers running on instances + warnings.simplefilter("ignore", category=UserWarning) + + def tearDown(self) -> None: + warnings.simplefilter("default", category=UserWarning) + + def test_classifier_build(self): + dirname = os.path.dirname(__file__) + test_path = os.path.join(dirname, "../../../test_outputs/classifier_test.yaml") + + # Just returns the name of the player. For testing. + name_classifier = Classifier[Text]("name", lambda player: player.name) + rebuild_classifier_table( + classifiers=[name_classifier], + players=[axl.Cooperator, axl.Defector], + path=test_path, + ) + + filename = os.path.join("../..", test_path) + with open(filename, "r") as f: + all_player_dicts = yaml.load(f, Loader=yaml.FullLoader) + + self.assertDictEqual( + all_player_dicts, + {"Cooperator": {"name": "Cooperator"}, "Defector": {"name": "Defector"}}, + ) + + def test_singletonity_of_classifiers_class(self): + classifiers_1 = _Classifiers() + classifiers_2 = _Classifiers() + + self.assertIs(classifiers_1, classifiers_2) + + def test_get_name_from_classifier(self): + # Should be able to take a string or a Classifier instance. + self.assertEqual(Classifiers["memory_depth"](axl.TitForTat()), 1) + self.assertEqual(Classifiers[memory_depth](axl.TitForTat()), 1) + + def test_classifier_works_on_non_instances(self): + warnings.simplefilter("default", category=UserWarning) + with warnings.catch_warnings(record=True) as w: + self.assertEqual(Classifiers["memory_depth"](axl.TitForTat), 1) + self.assertEquals(len(w), 1) + + def test_key_error_on_uknown_classifier(self): + with self.assertRaises(KeyError): + Classifiers["invalid_key"](axl.TitForTat) + + def test_will_lookup_key_in_dict(self): + self.assertEqual(Classifiers["memory_depth"](TitForTatWithEmptyClassifier), 1) + + def test_will_lookup_key_for_classes_that_cant_init(self): + with self.assertRaises(Exception) as exptn: + Classifiers["memory_depth"](TitForTatWithNonTrivialInitialzer) + self.assertEqual( + str(exptn.exception), + "Passed player class doesn't have a trivial initializer.", + ) + + def test_known_classifiers(self): + # A set of dimensions that are known to have been fully applied + known_keys = [ + "stochastic", + "memory_depth", + "long_run_time", + "inspects_source", + "manipulates_source", + "manipulates_state", + ] + + for s in axl.all_strategies: + s = s() + self.assertTrue(None not in [Classifiers[key](s) for key in known_keys]) + + def test_multiple_instances(self): + """Certain instances of classes of strategies will have different + classifiers based on the initialisation variables""" + P1 = axl.MemoryOnePlayer(four_vector=(0.5, 0.5, 0.5, 0.5)) + P2 = axl.MemoryOnePlayer(four_vector=(1, 0, 0, 1)) + self.assertNotEqual(P1.classifier, P2.classifier) + + P1 = axl.FirstByJoss() + P2 = axl.FirstByJoss(p=0) + self.assertNotEqual(P1.classifier, P2.classifier) + + P1 = axl.GTFT(p=1) + P2 = axl.GTFT(p=0.5) + self.assertNotEqual(P1.classifier, P2.classifier) + + P1 = axl.StochasticWSLS() + P2 = axl.StochasticWSLS(ep=0) + self.assertNotEqual(P1.classifier, P2.classifier) + + P1 = axl.GoByMajority(memory_depth=5) + P2 = axl.StochasticWSLS(ep=0.1) + self.assertNotEqual(P1.classifier, P2.classifier) + + def test_manipulation_of_classifier(self): + """Test that can change the classifier of an instance without changing + the classifier of the class""" + player = axl.Cooperator() + player.classifier["memory_depth"] += 1 + self.assertNotEqual(player.classifier, axl.Cooperator.classifier) + player = axl.Defector() + player.classifier["memory_depth"] += 1 + self.assertNotEqual(player.classifier, axl.Defector.classifier) + + def test_obey_axelrod(self): + """A test that verifies if the obey_axl function works correctly""" + known_cheaters = [ + axl.Darwin, + axl.Geller, + axl.GellerCooperator, + axl.GellerDefector, + axl.MindBender, + axl.MindController, + axl.MindWarper, + axl.MindReader, + ] + + known_basic = [ + axl.Alternator, + axl.AntiTitForTat, + axl.Bully, + axl.Cooperator, + axl.Defector, + axl.GoByMajority, + axl.SuspiciousTitForTat, + axl.TitForTat, + axl.WinStayLoseShift, + ] + + known_ordinary = [ + axl.AverageCopier, + axl.ForgivingTitForTat, + axl.GoByMajority20, + axl.GTFT, + axl.Grudger, + axl.Inverse, + axl.Random, + ] + + for strategy in known_cheaters: + self.assertFalse(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + + for strategy in known_basic: + self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + + for strategy in known_ordinary: + self.assertTrue(axl.Classifiers.obey_axelrod(strategy()), msg=strategy) + + def test_is_basic(self): + """A test that verifies if the is_basic function works correctly""" + known_cheaters = [ + axl.Darwin, + axl.Geller, + axl.GellerCooperator, + axl.GellerDefector, + axl.MindBender, + axl.MindController, + axl.MindWarper, + axl.MindReader, + ] + + known_basic = [ + axl.Alternator, + axl.AntiTitForTat, + axl.Bully, + axl.Cooperator, + axl.Defector, + axl.SuspiciousTitForTat, + axl.TitForTat, + axl.WinStayLoseShift, + ] + + known_ordinary = [ + axl.AverageCopier, + axl.ForgivingTitForTat, + axl.GoByMajority20, + axl.GTFT, + axl.Inverse, + axl.Random, + ] + + for strategy in known_cheaters: + self.assertFalse(axl.Classifiers.is_basic(strategy()), msg=strategy) + + for strategy in known_basic: + self.assertTrue(axl.Classifiers.is_basic(strategy()), msg=strategy) + + for strategy in known_ordinary: + self.assertFalse(axl.Classifiers.is_basic(strategy()), msg=strategy) + + +def str_reps(xs): + """Maps a collection of player classes to their string representations.""" + return set(map(str, [x() for x in xs])) + + +class TestStrategies(unittest.TestCase): + def setUp(self) -> None: + # Ignore warnings about classifiers running on instances. We want to + # allow this for some of the map functions. + warnings.simplefilter("ignore", category=UserWarning) + + def tearDown(self) -> None: + warnings.simplefilter("default", category=UserWarning) + + def test_strategy_list(self): + for strategy_list in [ + "all_strategies", + "demo_strategies", + "basic_strategies", + "long_run_time_strategies", + "strategies", + "ordinary_strategies", + "cheating_strategies", + ]: + self.assertTrue(hasattr(axl, strategy_list)) + + def test_lists_not_empty(self): + for strategy_list in [ + axl.all_strategies, + axl.demo_strategies, + axl.basic_strategies, + axl.long_run_time_strategies, + axl.strategies, + axl.ordinary_strategies, + axl.cheating_strategies, + ]: + self.assertTrue(len(strategy_list) > 0) + + def test_inclusion_of_strategy_lists(self): + all_strategies_set = set(axl.all_strategies) + for strategy_list in [ + axl.demo_strategies, + axl.basic_strategies, + axl.long_run_time_strategies, + axl.strategies, + axl.ordinary_strategies, + axl.cheating_strategies, + ]: + self.assertTrue( + str_reps(strategy_list).issubset(str_reps(all_strategies_set)) + ) + + strategies_set = set(axl.strategies) + for strategy_list in [ + axl.demo_strategies, + axl.basic_strategies, + axl.long_run_time_strategies, + ]: + self.assertTrue(str_reps(strategy_list).issubset(str_reps(strategies_set))) + + def test_long_run_strategies(self): + long_run_time_strategies = [ + axl.DBS, + axl.MetaMajority, + axl.MetaMajorityFiniteMemory, + axl.MetaMajorityLongMemory, + axl.MetaMinority, + axl.MetaMixer, + axl.MetaWinner, + axl.MetaWinnerDeterministic, + axl.MetaWinnerEnsemble, + axl.MetaWinnerFiniteMemory, + axl.MetaWinnerLongMemory, + axl.MetaWinnerStochastic, + axl.NMWEDeterministic, + axl.NMWEFiniteMemory, + axl.NMWELongMemory, + axl.NMWEStochastic, + axl.NiceMetaWinner, + axl.NiceMetaWinnerEnsemble, + ] + + self.assertEqual( + str_reps(long_run_time_strategies), str_reps(axl.long_run_time_strategies) + ) + self.assertTrue( + all(map(Classifiers["long_run_time"], axl.long_run_time_strategies)) + ) + + def test_short_run_strategies(self): + short_run_time_strategies = [ + s for s in axl.strategies if s not in axl.long_run_time_strategies + ] + + self.assertEqual( + str_reps(short_run_time_strategies), str_reps(axl.short_run_time_strategies) + ) + self.assertFalse( + any(map(Classifiers["long_run_time"], axl.short_run_time_strategies)) + ) + + def test_meta_inclusion(self): + self.assertTrue(str(axl.MetaMajority()) in str_reps(axl.strategies)) + + self.assertTrue(str(axl.MetaHunter()) in str_reps(axl.strategies)) + self.assertFalse( + str(axl.MetaHunter()) in str_reps(axl.long_run_time_strategies) + ) + + def test_demo_strategies(self): + demo_strategies = [ + axl.Cooperator, + axl.Defector, + axl.TitForTat, + axl.Grudger, + axl.Random, + ] + self.assertTrue(str_reps(demo_strategies), str_reps(axl.demo_strategies)) diff --git a/axelrod/tests/unit/test_compute_finite_state_machine_memory.py b/axelrod/tests/unit/test_compute_finite_state_machine_memory.py new file mode 100644 index 000000000..82e828676 --- /dev/null +++ b/axelrod/tests/unit/test_compute_finite_state_machine_memory.py @@ -0,0 +1,350 @@ +"""Tests for Compute FSM Memory.""" + +import unittest + +import axelrod as axl +from axelrod.compute_finite_state_machine_memory import * + +C, D = axl.Action.C, axl.Action.D + + +class TestOrderedMemitTuple(unittest.TestCase): + def memits_completely_equal(self, x, y): + """If the state and the actions are equal.""" + return x.state == y.state and x == y + + def memit_tuple_equal(self, x_tuple, y_tuple): + """If the memits are the same in the same order.""" + return self.memits_completely_equal( + x_tuple[0], y_tuple[0] + ) and self.memits_completely_equal(x_tuple[1], y_tuple[1]) + + def test_provided_ascending_order(self): + memit_c1c = Memit(C, 1, C) + memit_c2c = Memit(C, 2, C) + + actual_tuple = ordered_memit_tuple(memit_c1c, memit_c2c) + expected_tuple = (memit_c1c, memit_c2c) + + return self.memit_tuple_equal(actual_tuple, expected_tuple) + + def test_provided_descending_order(self): + memit_c1c = Memit(C, 1, C) + memit_c2c = Memit(C, 2, C) + + actual_tuple = ordered_memit_tuple(memit_c2c, memit_c1c) + expected_tuple = (memit_c1c, memit_c2c) + + return self.memit_tuple_equal(actual_tuple, expected_tuple) + + def test_order_on_actions(self): + memit_c9c = Memit(C, 9, C) + memit_c9d = Memit(C, 9, D) + + actual_tuple = ordered_memit_tuple(memit_c9d, memit_c9c) + expected_tuple = (memit_c9c, memit_c9d) + + return self.memit_tuple_equal(actual_tuple, expected_tuple) + + +class TestGetMemoryFromTransitions(unittest.TestCase): + def transitions_to_dict(self, transitions): + return { + (current_state, input_action): (next_state, output_action) + for current_state, input_action, next_state, output_action in transitions + } + + def test_cooperator(self): + transitions = ((0, C, 0, C), (0, D, 0, C)) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 0) + + def test_tit_for_tat(self): + transitions = ((0, C, 0, C), (0, D, 0, D)) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 1) + + def test_two_state_memory_two(self): + """If all D lead to state 0 and all C lead to state 1. We make it so + that all paths out of state 0 plays Cooperator and state 1 plays + Defector. + In this case, we must know what state we're in to know how to respond to + the opponent's previou action, but we cannot determine from our own + previous action; we must look at opponent's action from two turns ago. + """ + transitions = ((0, C, 0, C), (0, D, 1, C), (1, C, 0, D), (1, D, 1, D)) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 2) + + def test_two_state_tft(self): + """Same case as above, but this time our own last action tells us which + state we're in. In fact, this strategy is exactly TFT. + """ + transitions = ((0, C, 0, C), (0, D, 1, D), (1, C, 0, C), (1, D, 1, D)) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 1) + + def test_three_state_tft(self): + """Tit-for-tat again, but using three states, and a complex web of + transitions between them. + """ + transitions = ( + (0, C, 1, C), + (0, D, 1, D), + (1, C, 2, C), + (1, D, 0, D), + (2, C, 0, C), + (2, D, 2, D) + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 1) + + def test_two_state_inf_memory(self): + """A C will cause the FSM to stay in the same state, and D causes to + change states. Will always respond to a C with a C. Will respond to a + D with a C in state 0, but with a D in state 1. + So we need to know the state to know how to respond to a D. But since + an arbitarily long sequence of C/C may occur, we need infinite memory. + """ + transitions = ((0, C, 0, C), (0, D, 1, C), (1, C, 1, C), (1, D, 0, D)) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), float("inf")) + + def test_four_state_memory_two(self): + """Same as the two_state_memory_two test above, but we use two copies, + stitched together. + """ + transitions = ( + (0, C, 0, C), + (0, D, 1, C), + (1, C, 2, D), + (1, D, 1, D), + (2, C, 2, C), + (2, D, 3, C), + (3, C, 0, D), + (3, D, 3, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 2) + + def test_tit_for_two_tat(self): + """This strategy does the same thing until the opponent does the same + action twice; then it responds in kind. In the FSM implementation, we + let states 1 and 2 be the cooperating states, with state 2 being the + state after one opponent defection. And states 3 and 4 are the + defecting states, with state 4 after 1 opponent cooperation. + The memory should be two, because if the last two moves don't match, + then we can look to see what we did in the last move. If the do match, + then we can respond in kind. + """ + transitions = ( + (1, C, 1, C), + (1, D, 2, C), + (2, C, 1, C), + (2, D, 3, D), + (3, C, 4, D), + (3, D, 3, D), + (4, C, 1, C), + (4, D, 3, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 2) + + def test_tit_for_five_tat(self): + """Analogous to tit for two tat above. + """ + transitions = ( + (1, C, 1, C), + (1, D, 2, C), + (2, C, 1, C), + (2, D, 3, C), + (3, C, 1, C), + (3, D, 4, C), + (4, C, 1, C), + (4, D, 5, C), + (5, C, 1, C), + (5, D, 6, D), + (6, C, 6, D), + (6, D, 7, D), + (7, C, 6, D), + (7, D, 8, D), + (8, C, 6, D), + (8, D, 9, D), + (9, C, 6, D), + (9, D, 10, D), + (10, C, 6, D), + (10, D, 1, C), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 5) + + def test_fortress_3(self): + """Tests Fortress-3, which Defects unless the opponent D twice in a row. + In that case C, and continue to C for as long as the opponent does. + We know we're in state 3 if our own previous move was a C. Otherwise, C + if and only if the opponent's previous two moves were D. [Unless we + were in state 3 last turn, in which case we would have C'd two turns + ago.] + So the memory should be 2. + """ + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 1, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 2) + + def test_fortress_4(self): + """Tests Fortress-4. Should have memory=3 for same logic that + Fortress-3 should have memory=2. + """ + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, D), + (3, C, 1, D), + (3, D, 4, C), + (4, C, 3, C), + (4, D, 1, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 3) + + def test_complex_cooperator(self): + """Tests a cooperator with lots of states and transitions. + """ + transitions = ( + (0, C, 0, C), + (0, D, 1, C), + (1, C, 2, C), + (1, D, 3, C), + (2, C, 4, C), + (2, D, 3, C), + (3, C, 5, C), + (3, D, 4, C), + (4, C, 2, C), + (4, D, 6, C), + (5, C, 7, C), + (5, D, 3, C), + (6, C, 7, C), + (6, D, 7, C), + (7, C, 8, C), + (7, D, 7, C), + (8, C, 8, C), + (8, D, 6, C), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), 0) + + def test_disconnected_graph(self): + """Test two disjoint versions of Fortress3, with initial_state.""" + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 1, D), + (4, C, 4, D), + (4, D, 5, D), + (5, C, 4, D), + (5, D, 6, C), + (6, C, 6, C), + (6, D, 4, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual( + get_memory_from_transitions(trans_dict, initial_state=1), 2 + ) + + def test_transient_state(self): + """Test a setup where we a transient state (no incoming transitions) + goes into a Fortress3 (and D) if the opponent D, and goes into a + Cooperator if the opponent C. + The transient state is state 0. Fortress3 starts at state 1. And + the Cooperator is state 4. + """ + transitions = ( + (0, C, 4, C), + (0, D, 1, D), + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 1, D), + (4, C, 4, C), + (4, D, 4, C), + ) + + trans_dict = self.transitions_to_dict(transitions) + # If starting in state 4, then treat like Cooperator + self.assertEqual( + get_memory_from_transitions(trans_dict, initial_state=4), 0 + ) + # Start in state 1, then a Fortress3. + self.assertEqual( + get_memory_from_transitions(trans_dict, initial_state=1), 2 + ) + + def test_infinite_memory_transient_state(self): + """A transient state at 0, which goes into either a Cooperator or a TFT. + Because an arbitrarily-long chain of C/C may exist, we would need a + infinite memory to determine which state we're in, so that we know how + to respond to a D. + """ + transitions = ( + (0, C, 1, C), + (0, D, 2, D), + (1, C, 1, C), + (1, D, 1, C), + (2, C, 2, C), + (2, D, 2, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual( + get_memory_from_transitions(trans_dict, initial_state=0), + float("inf"), + ) + + self.assertEqual( + get_memory_from_transitions(trans_dict, initial_state=2), 1 + ) + + def test_evolved_fsm_4(self): + """This should be infinite memory because the C/D self-loop at state 2 + and state 3. + """ + transitions = ( + (0, C, 0, C), + (0, D, 2, D), + (1, C, 3, D), + (1, D, 0, C), + (2, C, 2, D), + (2, D, 1, C), + (3, C, 3, D), + (3, D, 1, D), + ) + + trans_dict = self.transitions_to_dict(transitions) + self.assertEqual(get_memory_from_transitions(trans_dict), float("inf")) + diff --git a/axelrod/tests/unit/test_deterministic_cache.py b/axelrod/tests/unit/test_deterministic_cache.py new file mode 100644 index 000000000..82d11904c --- /dev/null +++ b/axelrod/tests/unit/test_deterministic_cache.py @@ -0,0 +1,111 @@ +import unittest +import os +import pathlib +import pickle + +import axelrod as axl +from axelrod.load_data_ import axl_filename + +C, D = axl.Action.C, axl.Action.D + + +class TestDeterministicCache(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.test_key = (axl.TitForTat(), axl.Defector()) + cls.test_value = [(C, D), (D, D), (D, D)] + save_path = pathlib.Path("test_outputs/test_cache_save.txt") + cls.test_save_file = axl_filename(save_path) + load_path = pathlib.Path("test_outputs/test_cache_load.txt") + cls.test_load_file = axl_filename(load_path) + test_data_to_pickle = {("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)]} + cls.test_pickle = pickle.dumps(test_data_to_pickle) + + with open(cls.test_load_file, "wb") as f: + f.write(cls.test_pickle) + + @classmethod + def tearDownClass(cls): + os.remove(cls.test_save_file) + os.remove(cls.test_load_file) + + def setUp(self): + self.cache = axl.DeterministicCache() + + def test_basic_init(self): + self.assertTrue(self.cache.mutable) + + def test_init_from_file(self): + loaded_cache = axl.DeterministicCache(file_name=self.test_load_file) + self.assertEqual(loaded_cache[self.test_key], self.test_value) + + def test_setitem(self): + self.cache[self.test_key] = self.test_value + self.assertEqual(self.cache[self.test_key], self.test_value) + + def test_setitem_invalid_key_not_tuple(self): + invalid_key = "test" + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + def test_setitem_invalid_key_first_two_elements_not_player(self): + invalid_key = ("test", "test") + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + invalid_key = (axl.TitForTat(), "test") + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + invalid_key = ("test", axl.TitForTat()) + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + def test_setitem_invalid_key_too_many_players(self): + invalid_key = (axl.TitForTat(), axl.TitForTat(), axl.TitForTat()) + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + def test_setitem_invalid_key_stochastic_player(self): + invalid_key = (axl.Random(), axl.TitForTat()) + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + invalid_key = (axl.TitForTat(), axl.Random()) + with self.assertRaises(ValueError): + self.cache[invalid_key] = self.test_value + + def test_setitem_invalid_value_not_list(self): + with self.assertRaises(ValueError): + self.cache[self.test_key] = 5 + + def test_setitem_with_immutable_cache(self): + self.cache.mutable = False + with self.assertRaises(ValueError): + self.cache[self.test_key] = self.test_value + + def test_save(self): + self.cache[self.test_key] = self.test_value + self.cache.save(self.test_save_file) + with open(self.test_save_file, "rb") as f: + text = f.read() + self.assertEqual(text, self.test_pickle) + + def test_load(self): + self.cache.load(self.test_load_file) + self.assertEqual(self.cache[self.test_key], self.test_value) + + def test_load_error_for_inccorect_format(self): + path = pathlib.Path("test_outputs/test.cache") + filename = axl_filename(path) + with open(filename, "wb") as io: + pickle.dump(range(5), io) + + with self.assertRaises(ValueError): + self.cache.load(filename) + + def test_del_item(self): + self.cache[self.test_key] = self.test_value + self.assertTrue(self.test_key in self.cache) + del self.cache[self.test_key] + self.assertFalse(self.test_key in self.cache) diff --git a/axelrod/tests/unit/test_ecosystem.py b/axelrod/tests/unit/test_ecosystem.py new file mode 100644 index 000000000..ec552b542 --- /dev/null +++ b/axelrod/tests/unit/test_ecosystem.py @@ -0,0 +1,102 @@ +"""Tests for the Ecosystem class.""" + +import unittest + +import axelrod as axl + + +class TestEcosystem(unittest.TestCase): + @classmethod + def setUpClass(cls): + cooperators = axl.IpdTournament( + players=[ + axl.Cooperator(), + axl.Cooperator(), + axl.Cooperator(), + axl.Cooperator(), + ] + ) + defector_wins = axl.IpdTournament( + players=[ + axl.Cooperator(), + axl.Cooperator(), + axl.Cooperator(), + axl.Defector(), + ] + ) + cls.res_cooperators = cooperators.play() + cls.res_defector_wins = defector_wins.play() + + def test_default_population_sizes(self): + eco = axl.Ecosystem(self.res_cooperators) + pops = eco.population_sizes + self.assertEqual(eco.num_players, 4) + self.assertEqual(len(pops), 1) + self.assertEqual(len(pops[0]), 4) + self.assertAlmostEqual(sum(pops[0]), 1.0) + self.assertEqual(list(set(pops[0])), [0.25]) + + def test_non_default_population_sizes(self): + eco = axl.Ecosystem( + self.res_cooperators, population=[0.7, 0.25, 0.03, 0.02] + ) + pops = eco.population_sizes + self.assertEqual(eco.num_players, 4) + self.assertEqual(len(pops), 1) + self.assertEqual(len(pops[0]), 4) + self.assertAlmostEqual(sum(pops[0]), 1.0) + self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02]) + + def test_population_normalization(self): + eco = axl.Ecosystem(self.res_cooperators, population=[70, 25, 3, 2]) + pops = eco.population_sizes + self.assertEqual(eco.num_players, 4) + self.assertEqual(len(pops), 1) + self.assertEqual(len(pops[0]), 4) + self.assertAlmostEqual(sum(pops[0]), 1.0) + self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02]) + + def test_results_and_population_of_different_sizes(self): + self.assertRaises( + TypeError, + axl.Ecosystem, + self.res_cooperators, + population=[0.7, 0.2, 0.03, 0.1, 0.1], + ) + + def test_negative_populations(self): + self.assertRaises( + TypeError, + axl.Ecosystem, + self.res_cooperators, + population=[0.7, -0.2, 0.03, 0.2], + ) + + def test_fitness_function(self): + fitness = lambda p: 2 * p + eco = axl.Ecosystem(self.res_cooperators, fitness=fitness) + self.assertTrue(eco.fitness(10), 20) + + def test_cooperators_are_stable_over_time(self): + eco = axl.Ecosystem(self.res_cooperators) + eco.reproduce(100) + pops = eco.population_sizes + self.assertEqual(len(pops), 101) + for p in pops: + self.assertEqual(len(p), 4) + self.assertEqual(sum(p), 1.0) + self.assertEqual(list(set(p)), [0.25]) + + def test_defector_wins_with_only_cooperators(self): + eco = axl.Ecosystem(self.res_defector_wins) + eco.reproduce(1000) + pops = eco.population_sizes + self.assertEqual(len(pops), 1001) + for p in pops: + self.assertEqual(len(p), 4) + self.assertAlmostEqual(sum(p), 1.0) + last = pops[-1] + self.assertAlmostEqual(last[0], 0.0) + self.assertAlmostEqual(last[1], 0.0) + self.assertAlmostEqual(last[2], 0.0) + self.assertAlmostEqual(last[3], 1.0) diff --git a/axelrod/tests/unit/test_eigen.py b/axelrod/tests/unit/test_eigen.py new file mode 100644 index 000000000..36f0bf5b9 --- /dev/null +++ b/axelrod/tests/unit/test_eigen.py @@ -0,0 +1,52 @@ +"""Test for eigen.py.""" + +import unittest + +import numpy +from numpy.testing import assert_array_almost_equal + +from axelrod.eigen import _normalise, principal_eigenvector + + + +class FunctionCases(unittest.TestCase): + def test_identity_matrices(self): + for size in range(2, 6): + mat = numpy.identity(size) + evector, evalue = principal_eigenvector(mat) + self.assertAlmostEqual(evalue, 1) + assert_array_almost_equal(evector, _normalise(numpy.ones(size))) + + def test_zero_matrix(self): + mat = numpy.array([[0, 0], [0, 0]]) + evector, evalue = principal_eigenvector(mat) + self.assertTrue(numpy.isnan(evalue)) + self.assertTrue(numpy.isnan(evector[0])) + self.assertTrue(numpy.isnan(evector[1])) + + def test_2x2_matrix(self): + mat = numpy.array([[2, 1], [1, 2]]) + evector, evalue = principal_eigenvector(mat) + self.assertAlmostEqual(evalue, 3) + assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) + assert_array_almost_equal(evector, _normalise(numpy.array([1, 1]))) + + def test_3x3_matrix(self): + mat = numpy.array([[1, 2, 0], [-2, 1, 2], [1, 3, 1]]) + evector, evalue = principal_eigenvector( + mat, maximum_iterations=None, max_error=1e-10 + ) + self.assertAlmostEqual(evalue, 3) + assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) + assert_array_almost_equal(evector, _normalise(numpy.array([0.5, 0.5, 1]))) + + def test_4x4_matrix(self): + mat = numpy.array([[2, 0, 0, 0], [1, 2, 0, 0], [0, 1, 3, 0], [0, 0, 1, 3]]) + evector, evalue = principal_eigenvector( + mat, maximum_iterations=None, max_error=1e-10 + ) + self.assertAlmostEqual(evalue, 3, places=3) + assert_array_almost_equal(evector, numpy.dot(mat, evector) / evalue) + assert_array_almost_equal( + evector, _normalise(numpy.array([0, 0, 0, 1])), decimal=4 + ) diff --git a/axelrod/tests/unit/test_filters.py b/axelrod/tests/unit/test_filters.py new file mode 100644 index 000000000..5b6816b53 --- /dev/null +++ b/axelrod/tests/unit/test_filters.py @@ -0,0 +1,170 @@ +import unittest + +import axelrod as axl +from axelrod.player import IpdPlayer +from axelrod.strategies._filters import * + +from hypothesis import example, given, settings +from hypothesis.strategies import integers + + +class TestFilters(unittest.TestCase): + class TestStrategy(IpdPlayer): + classifier = { + "stochastic": True, + "inspects_source": False, + "memory_depth": 10, + "makes_use_of": ["game", "length"], + } + + def test_equality_filter(self): + self.assertTrue( + passes_operator_filter(self.TestStrategy, "stochastic", True, operator.eq) + ) + self.assertFalse( + passes_operator_filter(self.TestStrategy, "stochastic", False, operator.eq) + ) + self.assertTrue( + passes_operator_filter( + self.TestStrategy, "inspects_source", False, operator.eq + ) + ) + self.assertFalse( + passes_operator_filter( + self.TestStrategy, "inspects_source", True, operator.eq + ) + ) + + @given( + smaller=integers(min_value=0, max_value=9), + larger=integers(min_value=11, max_value=100), + ) + @example(smaller=0, larger=float("inf")) + @settings(max_examples=5) + def test_inequality_filter(self, smaller, larger): + self.assertTrue( + passes_operator_filter( + self.TestStrategy, "memory_depth", smaller, operator.ge + ) + ) + self.assertTrue( + passes_operator_filter( + self.TestStrategy, "memory_depth", larger, operator.le + ) + ) + self.assertFalse( + passes_operator_filter( + self.TestStrategy, "memory_depth", smaller, operator.le + ) + ) + self.assertFalse( + passes_operator_filter( + self.TestStrategy, "memory_depth", larger, operator.ge + ) + ) + + def test_list_filter(self): + self.assertTrue( + passes_in_list_filter(self.TestStrategy, "makes_use_of", ["game"]) + ) + self.assertTrue( + passes_in_list_filter(self.TestStrategy, "makes_use_of", ["length"]) + ) + self.assertTrue( + passes_in_list_filter(self.TestStrategy, "makes_use_of", ["game", "length"]) + ) + self.assertFalse( + passes_in_list_filter(self.TestStrategy, "makes_use_of", "test") + ) + + @given( + smaller=integers(min_value=0, max_value=9), + larger=integers(min_value=11, max_value=100), + ) + @example(smaller=0, larger=float("inf")) + @settings(max_examples=5) + def test_passes_filterset(self, smaller, larger): + + full_passing_filterset_1 = { + "stochastic": True, + "inspects_source": False, + "min_memory_depth": smaller, + "max_memory_depth": larger, + "makes_use_of": ["game", "length"], + } + + full_passing_filterset_2 = { + "stochastic": True, + "inspects_source": False, + "memory_depth": 10, + "makes_use_of": ["game", "length"], + } + + sparse_passing_filterset = { + "stochastic": True, + "inspects_source": False, + "makes_use_of": ["length"], + } + + full_failing_filterset = { + "stochastic": False, + "inspects_source": False, + "min_memory_depth": smaller, + "max_memory_depth": larger, + "makes_use_of": ["length"], + } + + sparse_failing_filterset = { + "stochastic": False, + "inspects_source": False, + "min_memory_depth": smaller, + } + + self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_1)) + self.assertTrue(passes_filterset(self.TestStrategy, full_passing_filterset_2)) + self.assertTrue(passes_filterset(self.TestStrategy, sparse_passing_filterset)) + self.assertFalse(passes_filterset(self.TestStrategy, full_failing_filterset)) + self.assertFalse(passes_filterset(self.TestStrategy, sparse_failing_filterset)) + + def test_filtered_strategies(self): + class StochasticTestStrategy(IpdPlayer): + classifier = { + "stochastic": True, + "memory_depth": float("inf"), + "makes_use_of": [], + } + + class MemoryDepth2TestStrategy(IpdPlayer): + classifier = {"stochastic": False, "memory_depth": 2, "makes_use_of": []} + + class UsesLengthTestStrategy(IpdPlayer): + classifier = { + "stochastic": True, + "memory_depth": float("inf"), + "makes_use_of": ["length"], + } + + strategies = [ + StochasticTestStrategy, + MemoryDepth2TestStrategy, + UsesLengthTestStrategy, + ] + + stochastic_filterset = {"stochastic": True} + + deterministic_filterset = {"stochastic": False} + + uses_length_filterset = {"stochastic": True, "makes_use_of": ["length"]} + + self.assertEqual( + axl.filtered_strategies(stochastic_filterset, strategies), + [StochasticTestStrategy, UsesLengthTestStrategy], + ) + self.assertEqual( + axl.filtered_strategies(deterministic_filterset, strategies), + [MemoryDepth2TestStrategy], + ) + self.assertEqual( + axl.filtered_strategies(uses_length_filterset, strategies), + [UsesLengthTestStrategy], + ) diff --git a/axelrod/tests/unit/test_fingerprint.py b/axelrod/tests/unit/test_fingerprint.py new file mode 100644 index 000000000..780bfd3bb --- /dev/null +++ b/axelrod/tests/unit/test_fingerprint.py @@ -0,0 +1,516 @@ +import unittest +from unittest.mock import patch + +import os +from tempfile import mkstemp +import matplotlib.pyplot +import numpy as np +import pathlib + +import axelrod as axl +from axelrod.fingerprint import AshlockFingerprint, Point, TransitiveFingerprint +from axelrod.load_data_ import axl_filename +from axelrod.tests.property import strategy_lists + +from hypothesis import given, settings + + +C, D = axl.Action.C, axl.Action.D + + +class RecordedMksTemp(object): + """This object records all results from RecordedMksTemp.mkstemp. It's for + testing that temp files are created and then destroyed.""" + + record = [] + + @staticmethod + def mkstemp(*args, **kwargs): + temp_file_info = mkstemp(*args, **kwargs) + RecordedMksTemp.record.append(temp_file_info) + return temp_file_info + + @staticmethod + def reset_record(): + RecordedMksTemp.record = [] + + +class TestFingerprint(unittest.TestCase): + + points_when_using_half_step = [ + (0.0, 0.0), + (0.0, 0.5), + (0.0, 1.0), + (0.5, 0.0), + (0.5, 0.5), + (0.5, 1.0), + (1.0, 0.0), + (1.0, 0.5), + (1.0, 1.0), + ] + edges_when_using_half_step = [ + (0, 1), + (0, 2), + (0, 3), + (0, 4), + (0, 5), + (0, 6), + (0, 7), + (0, 8), + (0, 9), + ] + + def test_default_init(self): + fingerprint = AshlockFingerprint(axl.WinStayLoseShift) + self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) + self.assertEqual(fingerprint.probe, axl.TitForTat) + + def test_init_with_explicit_probe(self): + fingerprint = AshlockFingerprint(axl.WinStayLoseShift, axl.Random) + self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) + self.assertEqual(fingerprint.probe, axl.Random) + + def test_init_with_instances(self): + player = axl.WinStayLoseShift() + fingerprint = AshlockFingerprint(player) + self.assertEqual(fingerprint.strategy, player) + self.assertEqual(fingerprint.probe, axl.TitForTat) + + probe = axl.Random() + fingerprint = AshlockFingerprint(axl.WinStayLoseShift, probe) + self.assertEqual(fingerprint.strategy, axl.WinStayLoseShift) + self.assertEqual(fingerprint.probe, probe) + + fingerprint = AshlockFingerprint(player, probe) + self.assertEqual(fingerprint.strategy, player) + self.assertEqual(fingerprint.probe, probe) + + def test_fingerprint_player(self): + af = AshlockFingerprint(axl.Cooperator()) + af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) + + self.assertEqual(af.step, 0.5) + self.assertEqual(af.points, self.points_when_using_half_step) + self.assertEqual(af.spatial_tournament.turns, 5) + self.assertEqual(af.spatial_tournament.repetitions, 3) + self.assertEqual(af.spatial_tournament.edges, self.edges_when_using_half_step) + + # The first player is the fingerprinted one, the rest are probes. + self.assertIsInstance(af.spatial_tournament.players[0], axl.Cooperator) + self.assertEqual(len(af.spatial_tournament.players), 10) + probes = af.spatial_tournament.players[1:] + self.assertEqual(len(probes), len(af.points)) + self.assertEqual( + str(probes[0]), "Joss-Ann Tit For Tat: (0.0, 0.0)" + ) # x + y < 1 + self.assertEqual( + str(probes[2]), "Dual Joss-Ann Tit For Tat: (1.0, 0.0)" + ) # x + y = 1 + self.assertEqual( + str(probes[8]), "Dual Joss-Ann Tit For Tat: (0.0, 0.0)" + ) # x + y > 1 + + def test_fingeprint_explicit_probe(self): + af = AshlockFingerprint(axl.TitForTat(), probe=axl.Random(p=0.1)) + af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=False) + + probes = af.spatial_tournament.players[1:] + self.assertEqual( + str(probes[0]), "Joss-Ann Random: 0.1: (0.0, 0.0)" + ) # x + y < 1 + self.assertEqual( + str(probes[2]), "Dual Joss-Ann Random: 0.1: (1.0, 0.0)" + ) # x + y = 1 + self.assertEqual( + str(probes[8]), "Dual Joss-Ann Random: 0.1: (0.0, 0.0)" + ) # x + y > 1 + + def test_fingerprint_interactions_cooperator(self): + af = AshlockFingerprint(axl.Cooperator()) + af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) + + # The keys are edges between players, values are repetitions. + self.assertCountEqual( + af.interactions.keys(), + [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9)], + ) + self.assertEqual(len(af.interactions.values()), 9) + + # Each edge has 3 repetitions with 5 turns each. + repetitions = af.interactions.values() + self.assertTrue(all(len(rep) == 3 for rep in repetitions)) + for iturn in range(3): + self.assertTrue(all(len(rep[iturn]) == 5 for rep in repetitions)) + + # Interactions are invariant for any points where y is zero, and + # the score should be maximum possible. + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). + for iplayer in (1, 4, 7): + for turns in af.interactions[(0, iplayer)]: + self.assertEqual(len(turns), 5) + self.assertTrue(all(t == (C, C) for t in turns)) + self.assertEqual(af.data[Point(0.0, 0.0)], 3.0) + self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) + self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) + + # IpdPlayer 3 is Point(0.0, 1.0), which means constant defection + # from the probe. But the Cooperator doesn't change and score is zero. + for turns in af.interactions[(0, 3)]: + self.assertEqual(len(turns), 5) + self.assertTrue(all(t == (C, D) for t in turns)) + self.assertEqual(af.data[Point(0.0, 1.0)], 0.0) + + def test_fingerprint_interactions_titfortat(self): + af = AshlockFingerprint(axl.TitForTat()) + af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) + + # Tit-for-Tats will always cooperate if left to their own devices, + # so interactions are invariant for any points where y is zero, + # and the score should be maximum possible. + # IpdPlayer 1 is Point(0.0, 0.0). + # IpdPlayer 4 is Point(0.5, 0.0). + # IpdPlayer 7 is Point(1.0, 0.0). + for iplayer in (1, 4, 7): + for turns in af.interactions[(0, iplayer)]: + self.assertEqual(len(turns), 5) + self.assertTrue(all(t == (C, C) for t in turns)) + self.assertEqual(af.data[Point(0.0, 0.0)], 3.0) + self.assertEqual(af.data[Point(0.5, 0.0)], 3.0) + self.assertEqual(af.data[Point(1.0, 0.0)], 3.0) + + # IpdPlayer 3 is Point(0.0, 1.0) which implies defection after the + # first turn since Tit-for-Tat is playing, and a score of 0.8 + # since we get zero on first turn and one point per turn later. + for turns in af.interactions[(0, 3)]: + self.assertEqual(len(turns), 5) + self.assertTrue(all(t == (D, D) for t in turns[1:])) + self.assertAlmostEqual(af.data[Point(0.0, 1.0)], 0.8) + + def test_progress_bar_fingerprint(self): + af = AshlockFingerprint(axl.TitForTat) + data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=True) + self.assertEqual(sorted(data.keys()), self.points_when_using_half_step) + + @patch("axelrod.fingerprint.mkstemp", RecordedMksTemp.mkstemp) + def test_temp_file_creation(self): + + RecordedMksTemp.reset_record() + af = AshlockFingerprint(axl.TitForTat) + path = pathlib.Path("test_outputs/test_fingerprint.csv") + filename = axl_filename(path) + + self.assertEqual(RecordedMksTemp.record, []) + + # Temp file is created and destroyed. + af.fingerprint( + turns=1, repetitions=1, step=0.5, progress_bar=False, filename=None + ) + + self.assertEqual(len(RecordedMksTemp.record), 1) + filename = RecordedMksTemp.record[0][1] + self.assertIsInstance(filename, str) + self.assertNotEqual(filename, "") + self.assertFalse(os.path.isfile(filename)) + + def test_fingerprint_with_filename(self): + path = pathlib.Path("test_outputs/test_fingerprint.csv") + filename = axl_filename(path) + af = AshlockFingerprint(axl.TitForTat) + af.fingerprint( + turns=1, repetitions=1, step=0.5, progress_bar=False, filename=filename + ) + with open(filename, "r") as out: + data = out.read() + self.assertEqual(len(data.split("\n")), 20) + + def test_serial_fingerprint(self): + af = AshlockFingerprint(axl.TitForTat) + data = af.fingerprint(turns=10, repetitions=2, step=0.5, progress_bar=False) + edge_keys = sorted(list(af.interactions.keys())) + coord_keys = sorted(list(data.keys())) + self.assertEqual(af.step, 0.5) + self.assertEqual(edge_keys, self.edges_when_using_half_step) + self.assertEqual(coord_keys, self.points_when_using_half_step) + + def test_parallel_fingerprint(self): + af = AshlockFingerprint(axl.TitForTat) + af.fingerprint( + turns=10, repetitions=2, step=0.5, processes=2, progress_bar=False + ) + edge_keys = sorted(list(af.interactions.keys())) + coord_keys = sorted(list(af.data.keys())) + self.assertEqual(af.step, 0.5) + self.assertEqual(edge_keys, self.edges_when_using_half_step) + self.assertEqual(coord_keys, self.points_when_using_half_step) + + def test_plot_data(self): + axl.seed(0) # Fingerprinting is a random process. + af = AshlockFingerprint(axl.Cooperator()) + af.fingerprint(turns=5, repetitions=3, step=0.5, progress_bar=False) + + reshaped_data = np.array([[0.0, 0.0, 0.0], [2.0, 1.0, 2.0], [3.0, 3.0, 3.0]]) + plotted_data = af.plot().gca().images[0].get_array() + np.testing.assert_allclose(plotted_data, reshaped_data) + + def test_plot_figure(self): + af = AshlockFingerprint(axl.WinStayLoseShift, axl.TitForTat) + af.fingerprint(turns=10, repetitions=2, step=0.25, progress_bar=False) + p = af.plot() + self.assertIsInstance(p, matplotlib.pyplot.Figure) + q = af.plot(cmap="jet") + self.assertIsInstance(q, matplotlib.pyplot.Figure) + r = af.plot(interpolation="bicubic") + self.assertIsInstance(r, matplotlib.pyplot.Figure) + t = af.plot(title="Title") + self.assertIsInstance(t, matplotlib.pyplot.Figure) + u = af.plot(colorbar=False) + self.assertIsInstance(u, matplotlib.pyplot.Figure) + v = af.plot(labels=False) + self.assertIsInstance(v, matplotlib.pyplot.Figure) + + def test_wsls_fingerprint(self): + axl.seed(0) # Fingerprinting is a random process. + test_data = { + Point(x=0.0, y=0.0): 3.000, + Point(x=0.0, y=0.25): 1.710, + Point(x=0.0, y=0.5): 1.440, + Point(x=0.0, y=0.75): 1.080, + Point(x=0.0, y=1.0): 0.500, + Point(x=0.25, y=0.0): 3.000, + Point(x=0.25, y=0.25): 2.280, + Point(x=0.25, y=0.5): 1.670, + Point(x=0.25, y=0.75): 1.490, + Point(x=0.25, y=1.0): 0.770, + Point(x=0.5, y=0.0): 3.000, + Point(x=0.5, y=0.25): 2.740, + Point(x=0.5, y=0.5): 2.240, + Point(x=0.5, y=0.75): 1.730, + Point(x=0.5, y=1.0): 1.000, + Point(x=0.75, y=0.0): 3.000, + Point(x=0.75, y=0.25): 3.520, + Point(x=0.75, y=0.5): 2.830, + Point(x=0.75, y=0.75): 1.750, + Point(x=0.75, y=1.0): 1.250, + Point(x=1.0, y=0.0): 3.000, + Point(x=1.0, y=0.25): 4.440, + Point(x=1.0, y=0.5): 4.410, + Point(x=1.0, y=0.75): 4.440, + Point(x=1.0, y=1.0): 1.300, + } + af = axl.AshlockFingerprint(axl.WinStayLoseShift(), axl.TitForTat) + data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) + + for key, value in data.items(): + self.assertAlmostEqual(value, test_data[key], places=2) + + def test_tft_fingerprint(self): + axl.seed(0) # Fingerprinting is a random process. + test_data = { + Point(x=0.0, y=0.0): 3.000, + Point(x=0.0, y=0.25): 1.820, + Point(x=0.0, y=0.5): 1.130, + Point(x=0.0, y=0.75): 1.050, + Point(x=0.0, y=1.0): 0.980, + Point(x=0.25, y=0.0): 3.000, + Point(x=0.25, y=0.25): 2.440, + Point(x=0.25, y=0.5): 1.770, + Point(x=0.25, y=0.75): 1.700, + Point(x=0.25, y=1.0): 1.490, + Point(x=0.5, y=0.0): 3.000, + Point(x=0.5, y=0.25): 2.580, + Point(x=0.5, y=0.5): 2.220, + Point(x=0.5, y=0.75): 2.000, + Point(x=0.5, y=1.0): 1.940, + Point(x=0.75, y=0.0): 3.000, + Point(x=0.75, y=0.25): 2.730, + Point(x=0.75, y=0.5): 2.290, + Point(x=0.75, y=0.75): 2.310, + Point(x=0.75, y=1.0): 2.130, + Point(x=1.0, y=0.0): 3.000, + Point(x=1.0, y=0.25): 2.790, + Point(x=1.0, y=0.5): 2.480, + Point(x=1.0, y=0.75): 2.310, + Point(x=1.0, y=1.0): 2.180, + } + + af = axl.AshlockFingerprint(axl.TitForTat(), axl.TitForTat) + data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) + + for key, value in data.items(): + self.assertAlmostEqual(value, test_data[key], places=2) + + def test_majority_fingerprint(self): + axl.seed(0) # Fingerprinting is a random process. + test_data = { + Point(x=0.0, y=0.0): 3.000, + Point(x=0.0, y=0.25): 1.940, + Point(x=0.0, y=0.5): 1.130, + Point(x=0.0, y=0.75): 1.030, + Point(x=0.0, y=1.0): 0.980, + Point(x=0.25, y=0.0): 3.000, + Point(x=0.25, y=0.25): 2.130, + Point(x=0.25, y=0.5): 1.940, + Point(x=0.25, y=0.75): 2.060, + Point(x=0.25, y=1.0): 1.940, + Point(x=0.5, y=0.0): 3.000, + Point(x=0.5, y=0.25): 2.300, + Point(x=0.5, y=0.5): 2.250, + Point(x=0.5, y=0.75): 2.420, + Point(x=0.5, y=1.0): 2.690, + Point(x=0.75, y=0.0): 3.000, + Point(x=0.75, y=0.25): 2.400, + Point(x=0.75, y=0.5): 2.010, + Point(x=0.75, y=0.75): 2.390, + Point(x=0.75, y=1.0): 2.520, + Point(x=1.0, y=0.0): 3.000, + Point(x=1.0, y=0.25): 2.360, + Point(x=1.0, y=0.5): 1.740, + Point(x=1.0, y=0.75): 2.260, + Point(x=1.0, y=1.0): 2.260, + } + + af = axl.AshlockFingerprint(axl.GoByMajority, axl.TitForTat) + data = af.fingerprint(turns=50, repetitions=2, step=0.25, progress_bar=False) + + for key, value in data.items(): + self.assertAlmostEqual(value, test_data[key], places=2) + + @given(strategy_pair=strategy_lists(min_size=2, max_size=2)) + @settings(max_examples=5) + def test_pair_fingerprints(self, strategy_pair): + """ + A test to check that we can fingerprint + with any two given strategies or instances + """ + strategy, probe = strategy_pair + af = AshlockFingerprint(strategy, probe) + data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) + self.assertIsInstance(data, dict) + + af = AshlockFingerprint(strategy(), probe) + data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) + self.assertIsInstance(data, dict) + + af = AshlockFingerprint(strategy, probe()) + data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) + self.assertIsInstance(data, dict) + + af = AshlockFingerprint(strategy(), probe()) + data = af.fingerprint(turns=2, repetitions=2, step=0.5, progress_bar=False) + self.assertIsInstance(data, dict) + + +class TestTransitiveFingerprint(unittest.TestCase): + def test_init(self): + player = axl.TitForTat() + fingerprint = axl.TransitiveFingerprint(strategy=player) + self.assertEqual(fingerprint.strategy, player) + self.assertEqual( + fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 50)] + ) + + def test_init_with_opponents(self): + player = axl.TitForTat() + opponents = [s() for s in axl.demo_strategies] + fingerprint = axl.TransitiveFingerprint(strategy=player, opponents=opponents) + self.assertEqual(fingerprint.strategy, player) + self.assertEqual(fingerprint.opponents, opponents) + + def test_init_with_not_default_number(self): + player = axl.TitForTat() + number_of_opponents = 10 + fingerprint = axl.TransitiveFingerprint( + strategy=player, number_of_opponents=number_of_opponents + ) + self.assertEqual(fingerprint.strategy, player) + self.assertEqual( + fingerprint.opponents, [axl.Random(p) for p in np.linspace(0, 1, 10)] + ) + + def test_fingerprint_with_filename(self): + path = pathlib.Path("test_outputs/test_fingerprint.csv") + filename = axl_filename(path) + strategy = axl.TitForTat() + tf = TransitiveFingerprint(strategy) + tf.fingerprint(turns=1, repetitions=1, progress_bar=False, filename=filename) + with open(filename, "r") as out: + data = out.read() + self.assertEqual(len(data.split("\n")), 102) + + def test_serial_fingerprint(self): + strategy = axl.TitForTat() + tf = TransitiveFingerprint(strategy) + path = pathlib.Path("test_outputs/test_fingerprint.csv") + tf.fingerprint( + repetitions=1, + progress_bar=False, + filename=axl_filename(path), + ) + self.assertEqual(tf.data.shape, (50, 50)) + + def test_parallel_fingerprint(self): + strategy = axl.TitForTat() + tf = TransitiveFingerprint(strategy) + tf.fingerprint(repetitions=1, progress_bar=False, processes=2) + + self.assertEqual(tf.data.shape, (50, 50)) + + def test_analyse_cooperation_ratio(self): + tf = TransitiveFingerprint(axl.TitForTat) + path = pathlib.Path("test_outputs/test_fingerprint.csv") + filename = axl_filename(path) + with open(filename, "w") as f: + f.write( + """Interaction index,Player index,Opponent index,Repetition,IpdPlayer name,Opponent name,Actions +0,0,1,0,IpdPlayer0,IpdPlayer1,CCC +0,1,0,0,IpdPlayer1,IpdPlayer0,DDD +1,0,1,1,IpdPlayer0,IpdPlayer1,CCC +1,1,0,1,IpdPlayer1,IpdPlayer0,DDD +2,0,2,0,IpdPlayer0,IpdPlayer2,CCD +2,2,0,0,IpdPlayer2,IpdPlayer0,DDD +3,0,2,1,IpdPlayer0,IpdPlayer2,CCC +3,2,0,1,IpdPlayer2,IpdPlayer0,DDD +4,0,3,0,IpdPlayer0,IpdPlayer3,CCD +4,3,0,0,IpdPlayer3,IpdPlayer0,DDD +5,0,3,1,IpdPlayer0,IpdPlayer3,DCC +5,3,0,1,IpdPlayer3,IpdPlayer0,DDD +6,0,4,2,IpdPlayer0,IpdPlayer4,DDD +6,4,0,2,IpdPlayer4,IpdPlayer0,DDD +7,0,4,3,IpdPlayer0,IpdPlayer4,DDD +7,4,0,3,IpdPlayer4,IpdPlayer0,DDD""" + ) + data = tf.analyse_cooperation_ratio(filename) + expected_data = np.array( + [[1, 1, 1], [1, 1, 1 / 2], [1 / 2, 1, 1 / 2], [0, 0, 0]] + ) + self.assertTrue(np.array_equal(data, expected_data)) + + def test_plot(self): + """ + Test that plot is created with various arguments. + """ + tf = TransitiveFingerprint(axl.TitForTat) + tf.fingerprint(turns=10, repetitions=2, progress_bar=False) + p = tf.plot() + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(cmap="jet") + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(interpolation="bicubic") + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(title="Title") + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(colorbar=False) + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(labels=False) + self.assertIsInstance(p, matplotlib.pyplot.Figure) + p = tf.plot(display_names=True) + self.assertIsInstance(p, matplotlib.pyplot.Figure) + + def test_plot_with_axis(self): + fig, axarr = matplotlib.pyplot.subplots(2, 2) + tf = TransitiveFingerprint(axl.TitForTat) + tf.fingerprint(turns=10, repetitions=2, progress_bar=False) + p = tf.plot(ax=axarr[0, 0]) + self.assertIsInstance(p, matplotlib.pyplot.Figure) diff --git a/axelrod/tests/unit/test_game.py b/axelrod/tests/unit/test_game.py new file mode 100644 index 000000000..60d50ac76 --- /dev/null +++ b/axelrod/tests/unit/test_game.py @@ -0,0 +1,80 @@ +import unittest + +import axelrod as axl +from axelrod.tests.property import games + +from hypothesis import given, settings +from hypothesis.strategies import integers + +C, D = axl.Action.C, axl.Action.D + + +class TestGame(unittest.TestCase): + def test_default_scores(self): + expected_scores = { + (C, D): (0, 5), + (D, C): (5, 0), + (D, D): (1, 1), + (C, C): (3, 3), + } + self.assertEqual(axl.IpdGame().scores, expected_scores) + + def test_default_RPST(self): + expected_values = (3, 1, 0, 5) + self.assertEqual(axl.IpdGame().RPST(), expected_values) + + def test_default_score(self): + game = axl.IpdGame() + self.assertEqual(game.score((C, C)), (3, 3)) + self.assertEqual(game.score((D, D)), (1, 1)) + self.assertEqual(game.score((C, D)), (0, 5)) + self.assertEqual(game.score((D, C)), (5, 0)) + + def test_default_equality(self): + self.assertEqual(axl.IpdGame(), axl.IpdGame()) + + def test_not_default_equality(self): + self.assertEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 4)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame(1, 2, 3, 5)) + self.assertNotEqual(axl.IpdGame(1, 2, 3, 4), axl.IpdGame()) + + def test_wrong_class_equality(self): + self.assertNotEqual(axl.IpdGame(), "wrong class") + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_init(self, r, p, s, t): + """Test init with random scores using the hypothesis library.""" + expected_scores = { + (C, D): (s, t), + (D, C): (t, s), + (D, D): (p, p), + (C, C): (r, r), + } + game = axl.IpdGame(r, s, t, p) + self.assertEqual(game.scores, expected_scores) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_RPST(self, r, p, s, t): + """Test RPST method with random scores using the hypothesis library.""" + game = axl.IpdGame(r, s, t, p) + self.assertEqual(game.RPST(), (r, p, s, t)) + + @given(r=integers(), p=integers(), s=integers(), t=integers()) + @settings(max_examples=5) + def test_random_score(self, r, p, s, t): + """Test score method with random scores using the hypothesis library.""" + game = axl.IpdGame(r, s, t, p) + self.assertEqual(game.score((C, C)), (r, r)) + self.assertEqual(game.score((D, D)), (p, p)) + self.assertEqual(game.score((C, D)), (s, t)) + self.assertEqual(game.score((D, C)), (t, s)) + + @given(game=games()) + @settings(max_examples=5) + def test_random_repr(self, game): + """Test repr with random scores using the hypothesis library.""" + expected_repr = "Axelrod game: (R,P,S,T) = {}".format(game.RPST()) + self.assertEqual(expected_repr, game.__repr__()) + self.assertEqual(expected_repr, str(game)) diff --git a/axelrod/tests/unit/test_graph.py b/axelrod/tests/unit/test_graph.py new file mode 100644 index 000000000..1e0666ee6 --- /dev/null +++ b/axelrod/tests/unit/test_graph.py @@ -0,0 +1,305 @@ +import unittest + +from collections import defaultdict + +import axelrod as axl + + +class TestGraph(unittest.TestCase): + def assert_out_mapping(self, g, expected_out_mapping): + self.assertDictEqual(g.out_mapping, expected_out_mapping) + for node, out_dict in expected_out_mapping.items(): + self.assertListEqual(g.out_vertices(node), list(out_dict.keys())) + self.assertDictEqual(g.out_dict(node), out_dict) + + def assert_in_mapping(self, g, expected_in_mapping): + self.assertDictEqual(g.in_mapping, expected_in_mapping) + for node, in_dict in expected_in_mapping.items(): + self.assertListEqual(g.in_vertices(node), list(in_dict.keys())) + self.assertDictEqual(g.in_dict(node), in_dict) + + def test_undirected_graph_with_no_vertices(self): + g = axl.graph.Graph() + self.assertFalse(g.directed) + self.assertIsInstance(g.out_mapping, defaultdict) + self.assertIsInstance(g.in_mapping, defaultdict) + self.assertEqual(g._edges, []) + self.assertEqual(str(g), "") + + def test_directed_graph_with_no_vertices(self): + g = axl.graph.Graph(directed=True) + self.assertTrue(g.directed) + self.assertIsInstance(g.out_mapping, defaultdict) + self.assertIsInstance(g.in_mapping, defaultdict) + self.assertEqual(g._edges, []) + self.assertEqual(str(g), "") + + def test_undirected_graph_with_vertices_and_unweighted_edges(self): + g = axl.graph.Graph(edges=[[1, 2], [2, 3]]) + self.assertFalse(g.directed) + self.assertEqual(str(g), "") + + self.assertEqual(g._edges, [(1, 2), (2, 1), (2, 3), (3, 2)]) + self.assert_out_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) + self.assert_in_mapping(g, {1: {2: None}, 2: {1: None, 3: None}, 3: {2: None}}) + + def test_undirected_graph_with_vertices_and_weighted_edges(self): + g = axl.graph.Graph(edges=[[1, 2, 10], [2, 3, 5]]) + self.assertFalse(g.directed) + self.assertEqual(str(g), "") + + self.assertEqual(g._edges, [(1, 2), (2, 1), (2, 3), (3, 2)]) + self.assert_out_mapping(g, {1: {2: 10}, 2: {1: 10, 3: 5}, 3: {2: 5}}) + self.assert_in_mapping(g, {1: {2: 10}, 2: {1: 10, 3: 5}, 3: {2: 5}}) + + def test_directed_graph_vertices_and_weighted_edges(self): + g = axl.graph.Graph(edges=[[1, 2, 10], [2, 3, 5]], directed=True) + self.assertTrue(g.directed) + self.assertEqual(str(g), "") + + self.assertEqual(g._edges, [(1, 2), (2, 3)]) + self.assert_out_mapping(g, {1: {2: 10}, 2: {3: 5}}) + self.assert_in_mapping(g, {2: {1: 10}, 3: {2: 5}}) + + def test_add_loops(self): + edges = [(0, 1), (0, 2), (1, 2)] + g = axl.graph.Graph(edges) + g.add_loops() + self.assertEqual( + list(sorted(g._edges)), + list( + sorted( + [ + (0, 1), + (1, 0), + (0, 2), + (2, 0), + (1, 2), + (2, 1), + (0, 0), + (1, 1), + (2, 2), + ] + ) + ), + ) + + def test_add_loops_with_existing_loop_and_using_strings(self): + """In this case there is already a loop present; also uses + strings instead of integers as the hashable.""" + edges = [("a", "b"), ("b", "a"), ("c", "c")] + g = axl.graph.Graph(edges) + g.add_loops() + self.assertEqual( + list(sorted(g._edges)), + list(sorted([("a", "b"), ("b", "a"), ("c", "c"), ("a", "a"), ("b", "b")])), + ) + + +class TestCycle(unittest.TestCase): + def test_length_1_directed(self): + g = axl.graph.cycle(1, directed=True) + self.assertEqual(g.vertices, [0]) + self.assertEqual(g.edges, [(0, 0)]) + self.assertEqual(g.directed, True) + + def test_length_1_undirected(self): + g = axl.graph.cycle(1, directed=False) + self.assertEqual(g.vertices, [0]) + self.assertEqual(g.edges, [(0, 0)]) + self.assertEqual(g.directed, False) + + def test_length_2_directed(self): + g = axl.graph.cycle(2, directed=True) + self.assertEqual(g.vertices, [0, 1]) + self.assertEqual(g.edges, [(0, 1), (1, 0)]) + + def test_length_2_undirected(self): + g = axl.graph.cycle(2, directed=False) + self.assertEqual(g.vertices, [0, 1]) + self.assertEqual(g.edges, [(0, 1), (1, 0)]) + + def test_length_3_directed(self): + g = axl.graph.cycle(3, directed=True) + self.assertEqual(g.vertices, [0, 1, 2]) + self.assertEqual(g.edges, [(0, 1), (1, 2), (2, 0)]) + + def test_length_3_undirected(self): + g = axl.graph.cycle(3, directed=False) + edges = [(0, 1), (1, 0), (1, 2), (2, 1), (2, 0), (0, 2)] + self.assertEqual(g.vertices, [0, 1, 2]) + self.assertEqual(g.edges, edges) + + def test_length_4_directed(self): + g = axl.graph.cycle(4, directed=True) + self.assertEqual(g.vertices, [0, 1, 2, 3]) + self.assertEqual(g.edges, [(0, 1), (1, 2), (2, 3), (3, 0)]) + self.assertEqual(g.out_vertices(0), [1]) + self.assertEqual(g.out_vertices(1), [2]) + self.assertEqual(g.out_vertices(2), [3]) + self.assertEqual(g.out_vertices(3), [0]) + self.assertEqual(g.in_vertices(0), [3]) + self.assertEqual(g.in_vertices(1), [0]) + self.assertEqual(g.in_vertices(2), [1]) + self.assertEqual(g.in_vertices(3), [2]) + + def test_length_4_undirected(self): + g = axl.graph.cycle(4, directed=False) + edges = [(0, 1), (1, 0), (1, 2), (2, 1), (2, 3), (3, 2), (3, 0), (0, 3)] + self.assertEqual(g.vertices, [0, 1, 2, 3]) + self.assertEqual(g.edges, edges) + for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: + self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) + for vertex, neighbors in [(0, (1, 3)), (1, (0, 2)), (2, (1, 3)), (3, (0, 2))]: + self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) + + +class TestComplete(unittest.TestCase): + def test_size_2(self): + g = axl.graph.complete_graph(2, loops=False) + self.assertEqual(g.vertices, [0, 1]) + self.assertEqual(g.edges, [(0, 1), (1, 0)]) + self.assertEqual(g.directed, False) + + def test_size_3(self): + g = axl.graph.complete_graph(3, loops=False) + self.assertEqual(g.vertices, [0, 1, 2]) + edges = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1)] + self.assertEqual(g.edges, edges) + self.assertEqual(g.directed, False) + + def test_size_4(self): + g = axl.graph.complete_graph(4, loops=False) + self.assertEqual(g.vertices, [0, 1, 2, 3]) + edges = [ + (0, 1), + (1, 0), + (0, 2), + (2, 0), + (0, 3), + (3, 0), + (1, 2), + (2, 1), + (1, 3), + (3, 1), + (2, 3), + (3, 2), + ] + self.assertEqual(g.edges, edges) + self.assertEqual(g.directed, False) + for vertex, neighbors in [ + (0, (1, 2, 3)), + (1, (0, 2, 3)), + (2, (0, 1, 3)), + (3, (0, 1, 2)), + ]: + self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) + for vertex, neighbors in [ + (0, (1, 2, 3)), + (1, (0, 2, 3)), + (2, (0, 1, 3)), + (3, (0, 1, 2)), + ]: + self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) + + def test_size_2_with_loops(self): + g = axl.graph.complete_graph(2, loops=True) + self.assertEqual(g.vertices, [0, 1]) + self.assertEqual(g.edges, [(0, 1), (1, 0), (0, 0), (1, 1)]) + self.assertEqual(g.directed, False) + + def test_size_3_with_loops(self): + g = axl.graph.complete_graph(3, loops=True) + self.assertEqual(g.vertices, [0, 1, 2]) + edges = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (0, 0), (1, 1), (2, 2)] + self.assertEqual(g.edges, edges) + self.assertEqual(g.directed, False) + + def test_size_4_with_loops(self): + g = axl.graph.complete_graph(4, loops=True) + self.assertEqual(g.vertices, [0, 1, 2, 3]) + edges = [ + (0, 1), + (1, 0), + (0, 2), + (2, 0), + (0, 3), + (3, 0), + (1, 2), + (2, 1), + (1, 3), + (3, 1), + (2, 3), + (3, 2), + (0, 0), + (1, 1), + (2, 2), + (3, 3), + ] + self.assertEqual(g.edges, edges) + self.assertEqual(g.directed, False) + neighbors = range(4) + for vertex in range(4): + self.assertEqual(set(g.out_vertices(vertex)), set(neighbors)) + self.assertEqual(set(g.in_vertices(vertex)), set(neighbors)) + + +class TestAttachedComplete(unittest.TestCase): + def test_size_2(self): + g = axl.graph.attached_complete_graphs(2, loops=False) + self.assertEqual(g.vertices, ['0:0', '0:1', '1:0', '1:1']) + self.assertEqual( + g.edges, + [('0:0', '0:1'), ('0:1', '0:0'), ('1:0', '1:1'), ('1:1', '1:0'), ('0:0', '1:0'), ('1:0', '0:0')] + ) + self.assertEqual(g.directed, False) + + def test_size_3(self): + g = axl.graph.attached_complete_graphs(3, loops=False) + self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) + self.assertEqual( + g.edges, + [('0:0', '0:1'), + ('0:1', '0:0'), + ('0:0', '0:2'), + ('0:2', '0:0'), + ('0:1', '0:2'), + ('0:2', '0:1'), + ('1:0', '1:1'), + ('1:1', '1:0'), + ('1:0', '1:2'), + ('1:2', '1:0'), + ('1:1', '1:2'), + ('1:2', '1:1'), + ('0:0', '1:0'), + ('1:0', '0:0')] + ) + self.assertEqual(g.directed, False) + + def test_size_3_with_loops(self): + g = axl.graph.attached_complete_graphs(3, loops=True) + self.assertEqual(g.vertices, ['0:0', '0:1', '0:2', '1:0', '1:1', '1:2']) + self.assertEqual( + g.edges, + [('0:0', '0:1'), + ('0:1', '0:0'), + ('0:0', '0:2'), + ('0:2', '0:0'), + ('0:1', '0:2'), + ('0:2', '0:1'), + ('1:0', '1:1'), + ('1:1', '1:0'), + ('1:0', '1:2'), + ('1:2', '1:0'), + ('1:1', '1:2'), + ('1:2', '1:1'), + ('0:0', '1:0'), + ('1:0', '0:0'), + ('0:0', '0:0'), + ('0:1', '0:1'), + ('0:2', '0:2'), + ('1:0', '1:0'), + ('1:1', '1:1'), + ('1:2', '1:2')] + ) + self.assertEqual(g.directed, False) diff --git a/axelrod/tests/unit/test_history.py b/axelrod/tests/unit/test_history.py new file mode 100644 index 000000000..7c517958b --- /dev/null +++ b/axelrod/tests/unit/test_history.py @@ -0,0 +1,119 @@ +import unittest + +from collections import Counter + +import axelrod as axl +from axelrod.history import History, LimitedHistory + +C, D = axl.Action.C, axl.Action.D + + +class TestHistory(unittest.TestCase): + def test_init(self): + h1 = History([C, C, D], [C, C, C]) + self.assertEqual(list(h1), [C, C, D]) + h1.extend([C, C], [D, D]) + self.assertEqual(list(h1), [C, C, D, C, C]) + + def test_str_list_repr(self): + h = History() + h.append(C, D) + h.append(D, C) + h.append(C, D) + self.assertEqual(str(h), "CDC") + self.assertEqual(list(h), [C, D, C]) + self.assertEqual(repr(h), "[C, D, C]") + h2 = h.flip_plays() + self.assertEqual(str(h2), "DCD") + + def test_reset(self): + h = History() + h.append(C, D) + self.assertEqual(len(h), 1) + self.assertEqual(h.cooperations, 1) + h.reset() + self.assertEqual(len(h), 0) + self.assertEqual(h.cooperations, 0) + + def test_compare(self): + h = History([C, D, C], [C, C, C]) + self.assertEqual(h, [C, D, C]) + h2 = History([C, D, C], [C, C, C]) + self.assertEqual(h, h2) + h2.reset() + self.assertNotEqual(h, h2) + + def test_copy(self): + h = History([C, D, C], [C, C, C]) + h2 = h.copy() + self.assertEqual(h, h2) + + def test_eq(self): + h = History([C, D, C], [C, C, C]) + with self.assertRaises(TypeError): + h == 2 + + def test_counts(self): + h1 = History([C, C], [C, C]) + self.assertEqual(h1.cooperations, 2) + self.assertEqual(h1.defections, 0) + h2 = History([D, D], [C, C]) + self.assertEqual(h2.cooperations, 0) + self.assertEqual(h2.defections, 2) + self.assertNotEqual(h1, h2) + h3 = History([C, C, D, D], [C, C, C, C]) + self.assertEqual(h3.cooperations, 2) + self.assertEqual(h3.defections, 2) + + def test_flip_plays(self): + player = axl.Alternator() + opponent = axl.Cooperator() + for _ in range(5): + player.play(opponent) + + self.assertEqual(player.history, [C, D, C, D, C]) + self.assertEqual(player.cooperations, 3) + self.assertEqual(player.defections, 2) + + new_distribution = Counter() + for key, val in player.state_distribution.items(): + new_key = (key[0].flip(), key[1]) + new_distribution[new_key] = val + + flipped_history = player.history.flip_plays() + self.assertEqual(flipped_history, [D, C, D, C, D]) + self.assertEqual(flipped_history.cooperations, 2) + self.assertEqual(flipped_history.defections, 3) + self.assertEqual(flipped_history.state_distribution, + new_distribution) + + # Flip operation is idempotent + flipped_flipped_history = flipped_history.flip_plays() + self.assertEqual(flipped_flipped_history, [C, D, C, D, C]) + self.assertEqual(flipped_flipped_history.cooperations, 3) + self.assertEqual(flipped_flipped_history.defections, 2) + + +class TestLimitedHistory(unittest.TestCase): + + def test_memory_depth(self): + h = LimitedHistory(memory_depth=3) + h.append(C, C) + self.assertEqual(len(h), 1) + h.append(D, D) + self.assertEqual(len(h), 2) + h.append(C, D) + self.assertEqual(len(h), 3) + self.assertEqual(h.cooperations, 2) + self.assertEqual(h.defections, 1) + self.assertEqual(h.state_distribution, + Counter({(C, C): 1, (D, D): 1, (C, D): 1})) + h.append(D, C) + self.assertEqual(len(h), 3) + self.assertEqual(h._plays, [D, C, D]) + self.assertEqual(h._coplays, [D, D, C]) + self.assertEqual(h.cooperations, 1) + self.assertEqual(h.defections, 2) + self.assertEqual( + h.state_distribution, + Counter({(D, D): 1, (C, D): 1, (D, C): 1, (C, C): 0})) diff --git a/axelrod/tests/unit/test_interaction_utils.py b/axelrod/tests/unit/test_interaction_utils.py new file mode 100644 index 000000000..6ddda717c --- /dev/null +++ b/axelrod/tests/unit/test_interaction_utils.py @@ -0,0 +1,146 @@ +import unittest +import tempfile +from collections import Counter + +import axelrod as axl + +C, D = axl.Action.C, axl.Action.D + + +class TestMatch(unittest.TestCase): + interactions = [[(C, D), (D, C)], [(D, C), (D, C)], [(C, C), (C, D)], []] + scores = [[(0, 5), (5, 0)], [(5, 0), (5, 0)], [(3, 3), (0, 5)], []] + final_scores = [(5, 5), (10, 0), (3, 8), None] + final_score_per_turn = [(2.5, 2.5), (5, 0), (1.5, 4), None] + winners = [False, 0, 1, None] + cooperations = [(1, 1), (0, 2), (2, 1), None] + normalised_cooperations = [(0.5, 0.5), (0, 1), (1, 0.5), None] + state_distribution = [ + Counter({(C, D): 1, (D, C): 1}), + Counter({(D, C): 2}), + Counter({(C, C): 1, (C, D): 1}), + None, + ] + state_to_action_distribution = [ + [Counter({((C, D), D): 1}), Counter({((C, D), C): 1})], + [Counter({((D, C), D): 1}), Counter({((D, C), C): 1})], + [Counter({((C, C), C): 1}), Counter({((C, C), D): 1})], + None, + ] + + normalised_state_distribution = [ + Counter({(C, D): 0.5, (D, C): 0.5}), + Counter({(D, C): 1.0}), + Counter({(C, C): 0.5, (C, D): 0.5}), + None, + ] + normalised_state_to_action_distribution = [ + [Counter({((C, D), D): 1}), Counter({((C, D), C): 1})], + [Counter({((D, C), D): 1}), Counter({((D, C), C): 1})], + [Counter({((C, C), C): 1}), Counter({((C, C), D): 1})], + None, + ] + + sparklines = ["█ \n █", " \n██", "██\n█ ", None] + + def test_compute_scores(self): + for inter, score in zip(self.interactions, self.scores): + self.assertEqual(score, axl.interaction_utils.compute_scores(inter)) + + def test_compute_final_score(self): + for inter, final_score in zip(self.interactions, self.final_scores): + self.assertEqual(final_score, axl.interaction_utils.compute_final_score(inter)) + + def test_compute_final_score_per_turn(self): + for inter, final_score_per_round in zip( + self.interactions, self.final_score_per_turn + ): + self.assertEqual( + final_score_per_round, axl.interaction_utils.compute_final_score_per_turn(inter) + ) + + def test_compute_winner_index(self): + for inter, winner in zip(self.interactions, self.winners): + self.assertEqual(winner, axl.interaction_utils.compute_winner_index(inter)) + + def test_compute_cooperations(self): + for inter, coop in zip(self.interactions, self.cooperations): + self.assertEqual(coop, axl.interaction_utils.compute_cooperations(inter)) + + def test_compute_normalised_cooperations(self): + for inter, coop in zip(self.interactions, self.normalised_cooperations): + self.assertEqual(coop, axl.interaction_utils.compute_normalised_cooperation(inter)) + + def test_compute_state_distribution(self): + for inter, dist in zip(self.interactions, self.state_distribution): + self.assertEqual(dist, axl.interaction_utils.compute_state_distribution(inter)) + + def test_compute_normalised_state_distribution(self): + for inter, dist in zip(self.interactions, self.normalised_state_distribution): + self.assertEqual(dist, axl.interaction_utils.compute_normalised_state_distribution(inter)) + + def test_compute_state_to_action_distribution(self): + for inter, dist in zip(self.interactions, self.state_to_action_distribution): + self.assertEqual(dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) + inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + expected_dist = [ + Counter( + { + ((C, C), C): 1, + ((D, C), C): 1, + ((C, D), D): 2, + ((D, C), D): 1, + ((D, D), C): 1, + } + ), + Counter({((C, C), D): 1, ((C, D), C): 2, ((D, C), D): 2, ((D, D), C): 1}), + ] + + self.assertEqual(expected_dist, axl.interaction_utils.compute_state_to_action_distribution(inter)) + + def test_compute_normalised_state_to_action_distribution(self): + for inter, dist in zip( + self.interactions, self.normalised_state_to_action_distribution + ): + self.assertEqual( + dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) + ) + inter = [(C, D), (D, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + expected_dist = [ + Counter( + { + ((C, C), C): 1, + ((D, C), C): 1 / 2, + ((C, D), D): 1, + ((D, C), D): 1 / 2, + ((D, D), C): 1, + } + ), + Counter({((C, C), D): 1, ((C, D), C): 1, ((D, C), D): 1, ((D, D), C): 1}), + ] + self.assertEqual( + expected_dist, axl.interaction_utils.compute_normalised_state_to_action_distribution(inter) + ) + + def test_compute_sparklines(self): + for inter, spark in zip(self.interactions, self.sparklines): + self.assertEqual(spark, axl.interaction_utils.compute_sparklines(inter)) + + def test_read_interactions_from_file(self): + tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.IpdTournament(players=players, turns=2, repetitions=3) + tournament.play(filename=tmp_file.name) + tmp_file.close() + expected_interactions = { + (0, 0): [[(C, C), (C, C)] for _ in range(3)], + (0, 1): [[(C, D), (C, D)] for _ in range(3)], + (1, 1): [[(D, D), (D, D)] for _ in range(3)], + } + interactions = axl.interaction_utils.read_interactions_from_file(tmp_file.name, progress_bar=False) + self.assertEqual(expected_interactions, interactions) + + def test_string_to_interactions(self): + string = "CDCDDD" + interactions = [(C, D), (C, D), (D, D)] + self.assertEqual(axl.interaction_utils.string_to_interactions(string), interactions) diff --git a/axelrod/tests/unit/test_load_data.py b/axelrod/tests/unit/test_load_data.py new file mode 100644 index 000000000..d4f92925e --- /dev/null +++ b/axelrod/tests/unit/test_load_data.py @@ -0,0 +1,17 @@ +import os +import pathlib +import unittest + +from axelrod.load_data_ import axl_filename + + +class TestLoadData(unittest.TestCase): + def test_axl_filename(self): + path = pathlib.Path("ipd/strategies/titfortat.py") + actual_fn = axl_filename(path) + + # First go from "unit" up to "tests", then up to "axelrod" + dirname = os.path.dirname(__file__) + expected_fn = os.path.join(dirname, "../../strategies/titfortat.py") + + self.assertTrue(os.path.samefile(actual_fn, expected_fn)) diff --git a/axelrod/tests/unit/test_match.py b/axelrod/tests/unit/test_match.py new file mode 100644 index 000000000..92d226242 --- /dev/null +++ b/axelrod/tests/unit/test_match.py @@ -0,0 +1,377 @@ +import unittest + +from collections import Counter + +import axelrod as axl +from axelrod.deterministic_cache import DeterministicCache +from axelrod.tests.property import games + +from hypothesis import example, given +from hypothesis.strategies import assume, floats, integers + +C, D = axl.Action.C, axl.Action.D + + +class TestMatch(unittest.TestCase): + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_init(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), turns, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], turns) + self.assertEqual(match._cache, {}) + + @given(prob_end=floats(min_value=0, max_value=1), game=games()) + def test_init_with_prob_end(self, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, float("inf")) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(match._cache, {}) + + @given( + prob_end=floats(min_value=0, max_value=1), + turns=integers(min_value=1, max_value=200), + game=games(), + ) + def test_init_with_prob_end_and_turns(self, turns, prob_end, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), turns=turns, prob_end=prob_end, game=game) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, turns) + self.assertEqual(match.prob_end, prob_end) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), game.RPST()) + + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(match._cache, {}) + + def test_default_init(self): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2)) + self.assertEqual(match.result, []) + self.assertEqual(match.players, [p1, p2]) + self.assertEqual(match.turns, axl.DEFAULT_TURNS) + self.assertEqual(match.prob_end, 0) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + + self.assertEqual( + match.players[0].match_attributes["length"], axl.DEFAULT_TURNS + ) + self.assertEqual(match._cache, {}) + + def test_example_prob_end(self): + """ + Test that matches have diff length and also that cache has recorded the + outcomes + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), prob_end=0.5) + expected_lengths = [3, 1, 5] + for seed, expected_length in zip(range(3), expected_lengths): + axl.seed(seed) + self.assertEqual(match.players[0].match_attributes["length"], float("inf")) + self.assertEqual(len(match.play()), expected_length) + self.assertEqual(match.noise, 0) + self.assertEqual(match.game.RPST(), (3, 1, 0, 5)) + self.assertEqual(len(match._cache), 1) + self.assertEqual(match._cache[(p1, p2)], [(C, C)] * 5) + + @given(turns=integers(min_value=1, max_value=200), game=games()) + @example(turns=5, game=axl.DefaultGame) + def test_non_default_attributes(self, turns, game): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match_attributes = {"length": 500, "game": game, "noise": 0.5} + match = axl.IpdMatch( + (p1, p2), turns, game=game, match_attributes=match_attributes + ) + self.assertEqual(match.players[0].match_attributes["length"], 500) + self.assertEqual(match.players[0].match_attributes["noise"], 0.5) + + @given(turns=integers(min_value=1, max_value=200)) + @example(turns=5) + def test_len(self, turns): + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), turns) + self.assertEqual(len(match), turns) + + def test_len_error(self): + """ + Length is not defined if it is infinite. + """ + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), prob_end=0.5) + with self.assertRaises(TypeError): + len(match) + + @given(p=floats(min_value=0, max_value=1)) + def test_stochastic(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), 5) + self.assertFalse(match._stochastic) + + match = axl.IpdMatch((p1, p2), 5, noise=p) + self.assertTrue(match._stochastic) + + p1 = axl.Random() + match = axl.IpdMatch((p1, p2), 5) + self.assertTrue(match._stochastic) + + @given(p=floats(min_value=0, max_value=1)) + def test_cache_update_required(self, p): + + assume(0 < p < 1) + + p1, p2 = axl.Cooperator(), axl.Cooperator() + match = axl.IpdMatch((p1, p2), 5, noise=p) + self.assertFalse(match._cache_update_required) + + cache = DeterministicCache() + cache.mutable = False + match = axl.IpdMatch((p1, p2), 5, deterministic_cache=cache) + self.assertFalse(match._cache_update_required) + + match = axl.IpdMatch((p1, p2), 5) + self.assertTrue(match._cache_update_required) + + p1 = axl.Random() + match = axl.IpdMatch((p1, p2), 5) + self.assertFalse(match._cache_update_required) + + def test_play(self): + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) + expected_result = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result) + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], expected_result + ) + + # a deliberately incorrect result so we can tell it came from the cache + expected_result = [(C, C), (D, D), (D, C), (C, C), (C, D)] + cache[(axl.Cooperator(), axl.Defector())] = expected_result + match = axl.IpdMatch(players, 3, deterministic_cache=cache) + self.assertEqual(match.play(), expected_result[:3]) + + def test_cache_grows(self): + """ + We want to make sure that if we try to use the cache for more turns than + what is stored, then it will instead regenerate the result and overwrite + the cache. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.IpdMatch(players, 3, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_3_turn) + match.turns = 5 + self.assertEqual(match.play(), expected_result_5_turn) + # The cache should now hold the 5-turn result.. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], + expected_result_5_turn + ) + + def test_cache_doesnt_shrink(self): + """ + We want to make sure that when we access the cache looking for fewer + turns than what is stored, then it will not overwrite the cache with the + shorter result. + """ + cache = DeterministicCache() + players = (axl.Cooperator(), axl.Defector()) + match = axl.IpdMatch(players, 5, deterministic_cache=cache) + expected_result_5_turn = [(C, D), (C, D), (C, D), (C, D), (C, D)] + expected_result_3_turn = [(C, D), (C, D), (C, D)] + self.assertEqual(match.play(), expected_result_5_turn) + match.turns = 3 + self.assertEqual(match.play(), expected_result_3_turn) + # The cache should still hold the 5. + self.assertEqual( + cache[(axl.Cooperator(), axl.Defector())], + expected_result_5_turn + ) + + def test_scores(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + match = axl.IpdMatch((player1, player2), 3) + self.assertEqual(match.scores(), []) + match.play() + self.assertEqual(match.scores(), [(0, 5), (1, 1), (1, 1)]) + + def test_final_score(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (2, 7)) + + match = axl.IpdMatch((player2, player1), 3) + self.assertEqual(match.final_score(), None) + match.play() + self.assertEqual(match.final_score(), (7, 2)) + + def test_final_score_per_turn(self): + turns = 3 + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (2 / turns, 7 / turns)) + + match = axl.IpdMatch((player2, player1), turns) + self.assertEqual(match.final_score_per_turn(), None) + match.play() + self.assertEqual(match.final_score_per_turn(), (7 / turns, 2 / turns)) + + def test_winner(self): + player1 = axl.TitForTat() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + match = axl.IpdMatch((player2, player1), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), player2) + + player1 = axl.Defector() + match = axl.IpdMatch((player1, player2), 3) + self.assertEqual(match.winner(), None) + match.play() + self.assertEqual(match.winner(), False) + + def test_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (3, 2)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.cooperation(), None) + match.play() + self.assertEqual(match.cooperation(), (2, 0)) + + def test_normalised_cooperation(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (3 / turns, 2 / turns)) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.normalised_cooperation(), None) + match.play() + self.assertEqual(match.normalised_cooperation(), (2 / turns, 0 / turns)) + + def test_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2, (C, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2, (D, D): 1}) + self.assertEqual(match.state_distribution(), expected) + + def test_normalised_state_distribution(self): + turns = 3 + player1 = axl.Cooperator() + player2 = axl.Alternator() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, C): 2 / turns, (C, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + player1 = axl.Alternator() + player2 = axl.Defector() + + match = axl.IpdMatch((player1, player2), turns) + self.assertEqual(match.normalised_state_distribution(), None) + + match.play() + expected = Counter({(C, D): 2 / turns, (D, D): 1 / turns}) + self.assertEqual(match.normalised_state_distribution(), expected) + + def test_sparklines(self): + players = (axl.Cooperator(), axl.Alternator()) + match = axl.IpdMatch(players, 4) + match.play() + expected_sparklines = "████\n█ █ " + self.assertEqual(match.sparklines(), expected_sparklines) + expected_sparklines = "XXXX\nXYXY" + self.assertEqual(match.sparklines("X", "Y"), expected_sparklines) + + +class TestSampleLength(unittest.TestCase): + def test_sample_length(self): + for seed, prob_end, expected_length in [ + (0, 0.5, 3), + (1, 0.5, 1), + (2, 0.6, 4), + (3, 0.4, 1), + ]: + axl.seed(seed) + self.assertEqual(axl.ipd.match.sample_length(prob_end), expected_length) + + def test_sample_with_0_prob(self): + self.assertEqual(axl.ipd.match.sample_length(0), float("inf")) + + def test_sample_with_1_prob(self): + self.assertEqual(axl.ipd.match.sample_length(1), 1) diff --git a/axelrod/tests/unit/test_match_generator.py b/axelrod/tests/unit/test_match_generator.py new file mode 100644 index 000000000..c4c351eaa --- /dev/null +++ b/axelrod/tests/unit/test_match_generator.py @@ -0,0 +1,237 @@ +import unittest + +import axelrod as axl +from axelrod.match_generator import graph_is_connected + +from hypothesis import example, given, settings +from hypothesis.strategies import floats, integers + +test_strategies = [ + axl.Cooperator, + axl.TitForTat, + axl.Defector, + axl.Grudger, + axl.GoByMajority, +] +test_turns = 100 +test_repetitions = 20 +test_game = axl.IpdGame() + + +class TestMatchGenerator(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.players = [s() for s in test_strategies] + + def test_build_single_match_params(self): + rr = axl.MatchGenerator( + players=self.players, + turns=test_turns, + game=test_game, + repetitions=test_repetitions, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertEqual(match_params["turns"], test_turns) + self.assertEqual(match_params["game"], test_game) + self.assertEqual(match_params["noise"], 0) + self.assertIsNone(match_params["prob_end"]) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + self.assertEqual(len(match), test_turns) + + def test_build_single_match_params_with_noise(self): + rr = axl.MatchGenerator( + players=self.players, + turns=test_turns, + game=test_game, + repetitions=test_repetitions, + noise=0.5, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertEqual(match_params["turns"], test_turns) + self.assertEqual(match_params["game"], test_game) + self.assertEqual(match_params["noise"], 0.5) + self.assertIsNone(match_params["prob_end"]) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + self.assertEqual(len(match), test_turns) + + def test_build_single_match_params_with_prob_end(self): + rr = axl.MatchGenerator( + players=self.players, + game=test_game, + repetitions=test_repetitions, + prob_end=0.5, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertIsNone(match_params["turns"]) + self.assertEqual(match_params["game"], test_game) + self.assertEqual(match_params["noise"], 0) + self.assertEqual(match_params["prob_end"], 0.5) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + with self.assertRaises(TypeError): + len(match) + + def test_build_single_match_params_with_prob_end_and_noise(self): + rr = axl.MatchGenerator( + players=self.players, + game=test_game, + repetitions=test_repetitions, + noise=0.5, + prob_end=0.5, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertIsNone(match_params["turns"]) + self.assertEqual(match_params["game"], rr.game) + self.assertEqual(match_params["prob_end"], 0.5) + self.assertEqual(match_params["noise"], 0.5) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + with self.assertRaises(TypeError): + len(match) + + def test_build_single_match_params_with_prob_end_and_turns(self): + rr = axl.MatchGenerator( + players=self.players, + game=test_game, + repetitions=test_repetitions, + turns=5, + prob_end=0.5, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertEqual(match_params["turns"], 5) + self.assertEqual(match_params["game"], test_game) + self.assertEqual(match_params["prob_end"], 0.5) + self.assertEqual(match_params["noise"], 0) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + self.assertIsInstance(len(match), int) + self.assertGreater(len(match), 0) + self.assertLessEqual(len(match), 10) + + def test_build_single_match_params_with_fixed_length_unknown(self): + rr = axl.MatchGenerator( + players=self.players, + game=test_game, + repetitions=test_repetitions, + turns=5, + match_attributes={"length": float("inf")}, + ) + match_params = rr.build_single_match_params() + self.assertIsInstance(match_params, dict) + self.assertEqual(match_params["turns"], 5) + self.assertEqual(match_params["game"], test_game) + self.assertEqual(match_params["prob_end"], None) + self.assertEqual(match_params["noise"], 0) + self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + + # Check that can build a match + players = [axl.Cooperator(), axl.Defector()] + match_params["players"] = players + match = axl.IpdMatch(**match_params) + self.assertIsInstance(match, axl.IpdMatch) + self.assertEqual(len(match), 5) + self.assertEqual(match.match_attributes, {"length": float("inf")}) + + @given(repetitions=integers(min_value=1, max_value=test_repetitions)) + @settings(max_examples=5) + @example(repetitions=test_repetitions) + def test_build_match_chunks(self, repetitions): + rr = axl.MatchGenerator( + players=self.players, + turns=test_turns, + game=test_game, + repetitions=repetitions, + ) + chunks = list(rr.build_match_chunks()) + match_definitions = [ + tuple(list(index_pair) + [repetitions]) + for (index_pair, match_params, repetitions) in chunks + ] + expected_match_definitions = [ + (i, j, repetitions) for i in range(5) for j in range(i, 5) + ] + + self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) + + @given(repetitions=integers(min_value=1, max_value=test_repetitions)) + @settings(max_examples=5) + @example(repetitions=test_repetitions) + def test_spatial_build_match_chunks(self, repetitions): + cycle = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 1)] + rr = axl.MatchGenerator( + players=self.players, + turns=test_turns, + game=test_game, + edges=cycle, + repetitions=repetitions, + ) + chunks = list(rr.build_match_chunks()) + match_definitions = [ + tuple(list(index_pair) + [repetitions]) + for (index_pair, match_params, repetitions) in chunks + ] + expected_match_definitions = [(i, j, repetitions) for i, j in cycle] + + self.assertEqual(sorted(match_definitions), sorted(expected_match_definitions)) + + def test_len(self): + turns = 5 + repetitions = 10 + rr = axl.MatchGenerator( + players=self.players, + turns=test_turns, + game=test_game, + repetitions=test_repetitions, + ) + self.assertEqual(len(rr), len(list(rr.build_match_chunks()))) + + def test_init_with_graph_edges_not_including_all_players(self): + edges = [(0, 1), (1, 2)] + with self.assertRaises(ValueError): + axl.MatchGenerator( + players=self.players, + repetitions=3, + game=test_game, + turns=5, + edges=edges, + noise=0, + ) + + +class TestUtilityFunctions(unittest.TestCase): + def test_connected_graph(self): + edges = [(0, 0), (0, 1), (1, 1)] + players = ["Cooperator", "Defector"] + self.assertTrue(graph_is_connected(edges, players)) + + def test_unconnected_graph(self): + edges = [(0, 0), (0, 1), (1, 1)] + players = ["Cooperator", "Defector", "Alternator"] + self.assertFalse(graph_is_connected(edges, players)) diff --git a/axelrod/tests/unit/test_mock_player.py b/axelrod/tests/unit/test_mock_player.py new file mode 100644 index 000000000..a089d5d6c --- /dev/null +++ b/axelrod/tests/unit/test_mock_player.py @@ -0,0 +1,20 @@ +import unittest + +import axelrod as axl + +C, D = axl.Action.C, axl.Action.D + + +class TestMockPlayer(unittest.TestCase): + def test_strategy(self): + for action in [C, D]: + m = axl.MockPlayer(actions=[action]) + p2 = axl.IpdPlayer() + self.assertEqual(action, m.strategy(p2)) + + actions = [C, C, D, D, C, C] + m = axl.MockPlayer(actions=actions) + p2 = axl.IpdPlayer() + for action in actions: + self.assertEqual(action, m.strategy(p2)) + diff --git a/axelrod/tests/unit/test_moran.py b/axelrod/tests/unit/test_moran.py new file mode 100644 index 000000000..58b83c0e1 --- /dev/null +++ b/axelrod/tests/unit/test_moran.py @@ -0,0 +1,561 @@ +import unittest +import itertools +import random +from collections import Counter +import matplotlib.pyplot as plt + +import axelrod as axl +from axelrod.moran import fitness_proportionate_selection +from axelrod.tests.property import strategy_lists + +from hypothesis import example, given, settings + +C, D = axl.Action.C, axl.Action.D + + +class TestMoranProcess(unittest.TestCase): + def test_init(self): + players = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess(players) + self.assertEqual(mp.turns, axl.DEFAULT_TURNS) + self.assertIsNone(mp.prob_end) + self.assertIsNone(mp.game) + self.assertEqual(mp.noise, 0) + self.assertEqual(mp.initial_players, players) + self.assertEqual(mp.players, list(players)) + self.assertEqual(mp.populations, [Counter({"Cooperator": 1, "Defector": 1})]) + self.assertIsNone(mp.winning_strategy_name) + self.assertEqual(mp.mutation_rate, 0) + self.assertEqual(mp.mode, "bd") + self.assertEqual(mp.deterministic_cache, axl.DeterministicCache()) + self.assertEqual( + mp.mutation_targets, {"Cooperator": [players[1]], "Defector": [players[0]]} + ) + self.assertEqual(mp.interaction_graph._edges, [(0, 1), (1, 0)]) + self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (1, 0), (0, 0), (1, 1)]) + self.assertEqual(mp.fitness_transformation, None) + self.assertEqual(mp.locations, [0, 1]) + self.assertEqual(mp.index, {0: 0, 1: 1}) + + # Test non default graph cases + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + edges = [(0, 1), (2, 0), (1, 2)] + graph = axl.graph.Graph(edges, directed=True) + mp = axl.MoranProcess(players, interaction_graph=graph) + self.assertEqual(mp.interaction_graph._edges, [(0, 1), (2, 0), (1, 2)]) + self.assertEqual( + sorted(mp.reproduction_graph._edges), + sorted([(0, 1), (2, 0), (1, 2), (0, 0), (1, 1), (2, 2)]), + ) + + mp = axl.MoranProcess(players, interaction_graph=graph, reproduction_graph=graph) + self.assertEqual(mp.interaction_graph._edges, [(0, 1), (2, 0), (1, 2)]) + self.assertEqual(mp.reproduction_graph._edges, [(0, 1), (2, 0), (1, 2)]) + + def test_set_players(self): + """Test that set players resets all players""" + players = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess(players) + players[0].history.append(C, D) + mp.set_players() + self.assertEqual(players[0].cooperations, 0) + + def test_mutate(self): + """Test that a mutated player is returned""" + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + mp = axl.MoranProcess(players, mutation_rate=0.5) + axl.seed(0) + self.assertEqual(mp.mutate(0), players[0]) + axl.seed(1) + self.assertEqual(mp.mutate(0), players[2]) + axl.seed(4) + self.assertEqual(mp.mutate(0), players[1]) + + def test_death_in_db(self): + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + mp = axl.MoranProcess(players, mutation_rate=0.5, mode="db") + axl.seed(1) + self.assertEqual(mp.death(), 0) + self.assertEqual(mp.dead, 0) + axl.seed(5) + self.assertEqual(mp.death(), 1) + self.assertEqual(mp.dead, 1) + axl.seed(2) + self.assertEqual(mp.death(), 2) + self.assertEqual(mp.dead, 2) + + def test_death_in_bd(self): + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + edges = [(0, 1), (2, 0), (1, 2)] + graph = axl.graph.Graph(edges, directed=True) + mp = axl.MoranProcess(players, mode="bd", interaction_graph=graph) + axl.seed(1) + self.assertEqual(mp.death(0), 0) + axl.seed(5) + self.assertEqual(mp.death(0), 1) + axl.seed(2) + self.assertEqual(mp.death(0), 0) + + def test_birth_in_db(self): + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + mp = axl.MoranProcess(players, mode="db") + axl.seed(1) + self.assertEqual(mp.death(), 0) + self.assertEqual(mp.birth(0), 2) + + def test_birth_in_bd(self): + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + mp = axl.MoranProcess(players, mode="bd") + axl.seed(1) + self.assertEqual(mp.birth(), 0) + + def test_fixation_check(self): + players = axl.Cooperator(), axl.Cooperator() + mp = axl.MoranProcess(players) + self.assertTrue(mp.fixation_check()) + players = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess(players) + self.assertFalse(mp.fixation_check()) + + def test_next(self): + players = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess(players) + self.assertIsInstance(next(mp), axl.MoranProcess) + + def test_matchup_indices(self): + players = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess(players) + self.assertEqual(mp._matchup_indices(), {(0, 1)}) + + players = axl.Cooperator(), axl.Defector(), axl.TitForTat() + edges = [(0, 1), (2, 0), (1, 2)] + graph = axl.graph.Graph(edges, directed=True) + mp = axl.MoranProcess(players, mode="bd", interaction_graph=graph) + self.assertEqual(mp._matchup_indices(), {(0, 1), (1, 2), (2, 0)}) + + def test_fps(self): + self.assertEqual(fitness_proportionate_selection([0, 0, 1]), 2) + axl.seed(1) + self.assertEqual(fitness_proportionate_selection([1, 1, 1]), 0) + self.assertEqual(fitness_proportionate_selection([1, 1, 1]), 2) + + def test_exit_condition(self): + p1, p2 = axl.Cooperator(), axl.Cooperator() + mp = axl.MoranProcess((p1, p2)) + mp.play() + self.assertEqual(len(mp), 1) + + def test_two_players(self): + p1, p2 = axl.Cooperator(), axl.Defector() + axl.seed(17) + mp = axl.MoranProcess((p1, p2)) + populations = mp.play() + self.assertEqual(len(mp), 5) + self.assertEqual(len(populations), 5) + self.assertEqual(populations, mp.populations) + self.assertEqual(mp.winning_strategy_name, str(p2)) + + def test_two_prob_end(self): + p1, p2 = axl.Random(), axl.TitForTat() + axl.seed(0) + mp = axl.MoranProcess((p1, p2), prob_end=0.5) + populations = mp.play() + self.assertEqual(len(mp), 4) + self.assertEqual(len(populations), 4) + self.assertEqual(populations, mp.populations) + self.assertEqual(mp.winning_strategy_name, str(p1)) + + def test_different_game(self): + # Possible for Cooperator to become fixed when using a different game + p1, p2 = axl.Cooperator(), axl.Defector() + axl.seed(0) + game = axl.IpdGame(r=4, p=2, s=1, t=6) + mp = axl.MoranProcess((p1, p2), turns=5, game=game) + populations = mp.play() + self.assertEqual(mp.winning_strategy_name, str(p1)) + + def test_death_birth(self): + """Two player death-birth should fixate after one round.""" + p1, p2 = axl.Cooperator(), axl.Defector() + seeds = range(0, 20) + for seed in seeds: + axl.seed(seed) + mp = axl.MoranProcess((p1, p2), mode="db") + mp.play() + self.assertIsNotNone(mp.winning_strategy_name) + # Number of populations is 2: the original and the one after the first round. + self.assertEqual(len(mp.populations), 2) + + def test_death_birth_outcomes(self): + """Show that birth-death and death-birth can produce different + outcomes.""" + seeds = [(1, True), (23, False)] + players = [] + N = 6 + for _ in range(N // 2): + players.append(axl.Cooperator()) + players.append(axl.Defector()) + for seed, outcome in seeds: + axl.seed(seed) + mp = axl.MoranProcess(players, mode="bd") + mp.play() + winner = mp.winning_strategy_name + axl.seed(seed) + mp = axl.MoranProcess(players, mode="db") + mp.play() + winner2 = mp.winning_strategy_name + self.assertEqual((winner == winner2), outcome) + + def test_two_random_players(self): + p1, p2 = axl.Random(p=0.5), axl.Random(p=0.25) + axl.seed(0) + mp = axl.MoranProcess((p1, p2)) + populations = mp.play() + self.assertEqual(len(mp), 2) + self.assertEqual(len(populations), 2) + self.assertEqual(populations, mp.populations) + self.assertEqual(mp.winning_strategy_name, str(p2)) + + def test_two_players_with_mutation(self): + p1, p2 = axl.Cooperator(), axl.Defector() + axl.seed(5) + mp = axl.MoranProcess((p1, p2), mutation_rate=0.2, stop_on_fixation=False) + self.assertDictEqual(mp.mutation_targets, {str(p1): [p2], str(p2): [p1]}) + # Test that mutation causes the population to alternate between + # fixations + counters = [ + Counter({"Cooperator": 2}), + Counter({"Defector": 2}), + Counter({"Cooperator": 2}), + Counter({"Defector": 2}), + ] + for counter in counters: + for _ in itertools.takewhile( + lambda x: x.population_distribution() != counter, mp + ): + pass + self.assertEqual(mp.population_distribution(), counter) + + def test_play_exception(self): + p1, p2 = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess((p1, p2), mutation_rate=0.2) + with self.assertRaises(ValueError): + mp.play() + + def test_three_players(self): + players = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] + axl.seed(11) + mp = axl.MoranProcess(players) + populations = mp.play() + self.assertEqual(len(mp), 7) + self.assertEqual(len(populations), 7) + self.assertEqual(populations, mp.populations) + self.assertEqual(mp.winning_strategy_name, str(axl.Defector())) + + def test_three_players_with_mutation(self): + p1 = axl.Cooperator() + p2 = axl.Random() + p3 = axl.Defector() + players = [p1, p2, p3] + mp = axl.MoranProcess(players, mutation_rate=0.2, stop_on_fixation=False) + self.assertDictEqual( + mp.mutation_targets, + {str(p1): [p3, p2], str(p2): [p1, p3], str(p3): [p1, p2]}, + ) + # Test that mutation causes the population to alternate between + # fixations + counters = [Counter({"Cooperator": 3}), Counter({"Defector": 3})] + for counter in counters: + for _ in itertools.takewhile( + lambda x: x.population_distribution() != counter, mp + ): + pass + self.assertEqual(mp.population_distribution(), counter) + + def test_four_players(self): + players = [axl.Cooperator() for _ in range(3)] + players.append(axl.Defector()) + axl.seed(29) + mp = axl.MoranProcess(players) + populations = mp.play() + self.assertEqual(len(mp), 9) + self.assertEqual(len(populations), 9) + self.assertEqual(populations, mp.populations) + self.assertEqual(mp.winning_strategy_name, str(axl.Defector())) + + @given(strategies=strategy_lists(min_size=2, max_size=4)) + @settings(max_examples=5) + + # Two specific examples relating to cloning of strategies + @example(strategies=[axl.BackStabber, axl.MindReader]) + @example(strategies=[axl.ThueMorse, axl.MindReader]) + def test_property_players(self, strategies): + """Hypothesis test that randomly checks players""" + players = [s() for s in strategies] + mp = axl.MoranProcess(players) + populations = mp.play() + self.assertEqual(populations, mp.populations) + self.assertIn(mp.winning_strategy_name, [str(p) for p in players]) + + def test_reset(self): + p1, p2 = axl.Cooperator(), axl.Defector() + axl.seed(45) + mp = axl.MoranProcess((p1, p2)) + mp.play() + self.assertEqual(len(mp), 4) + self.assertEqual(len(mp.score_history), 3) + mp.reset() + self.assertEqual(len(mp), 1) + self.assertEqual(mp.winning_strategy_name, None) + self.assertEqual(mp.score_history, []) + # Check that players reset + for player, initial_player in zip(mp.players, mp.initial_players): + self.assertEqual(str(player), str(initial_player)) + + def test_constant_fitness_case(self): + # Scores between an Alternator and Defector will be: (1, 6) + axl.seed(0) + players = ( + axl.Alternator(), + axl.Alternator(), + axl.Defector(), + axl.Defector(), + ) + mp = axl.MoranProcess(players, turns=2) + winners = [] + for _ in range(100): + mp.play() + winners.append(mp.winning_strategy_name) + mp.reset() + winners = Counter(winners) + self.assertEqual(winners["Defector"], 88) + + def test_cache(self): + p1, p2 = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess((p1, p2)) + mp.play() + self.assertEqual(len(mp.deterministic_cache), 1) + + # Check that can pass a pre built cache + cache = axl.DeterministicCache() + mp = axl.MoranProcess((p1, p2), deterministic_cache=cache) + self.assertEqual(cache, mp.deterministic_cache) + + def test_iter(self): + p1, p2 = axl.Cooperator(), axl.Defector() + mp = axl.MoranProcess((p1, p2)) + self.assertEqual(mp.__iter__(), mp) + + def test_population_plot(self): + # Test that can plot on a given matplotlib axes + axl.seed(15) + players = [random.choice(axl.demo_strategies)() for _ in range(5)] + mp = axl.MoranProcess(players=players, turns=30) + mp.play() + fig, axarr = plt.subplots(2, 2) + ax = axarr[1, 0] + mp.populations_plot(ax=ax) + self.assertEqual(ax.get_xlim(), (-0.8, 16.8)) + self.assertEqual(ax.get_ylim(), (0, 5.25)) + # Run without a given axis + ax = mp.populations_plot() + self.assertEqual(ax.get_xlim(), (-0.8, 16.8)) + self.assertEqual(ax.get_ylim(), (0, 5.25)) + + def test_cooperator_can_win_with_fitness_transformation(self): + axl.seed(689) + players = ( + axl.Cooperator(), + axl.Defector(), + axl.Defector(), + axl.Defector(), + ) + w = 0.95 + fitness_transformation = lambda score: 1 - w + w * score + mp = axl.MoranProcess( + players, turns=10, fitness_transformation=fitness_transformation + ) + populations = mp.play() + self.assertEqual(mp.winning_strategy_name, "Cooperator") + + def test_atomic_mutation_fsm(self): + axl.seed(12) + players = [axl.EvolvableFSMPlayer(num_states=2, initial_state=1, initial_action=C) + for _ in range(5)] + mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") + population = mp.play() + self.assertEqual( + mp.winning_strategy_name, + 'EvolvableFSMPlayer: ((0, C, 1, D), (0, D, 1, C), (1, C, 0, D), (1, D, 1, C)), 1, C, 2, 0.1') + self.assertEqual(len(mp.populations), 31) + self.assertTrue(mp.fixated) + + def test_atomic_mutation_cycler(self): + axl.seed(10) + cycle_length = 5 + players = [axl.EvolvableCycler(cycle_length=cycle_length) + for _ in range(5)] + mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") + population = mp.play() + self.assertEqual(mp.winning_strategy_name, 'EvolvableCycler: CDCDD, 5, 0.2, 1') + self.assertEqual(len(mp.populations), 19) + self.assertTrue(mp.fixated) + + def test_mutation_method_exceptions(self): + axl.seed(10) + cycle_length = 5 + players = [axl.EvolvableCycler(cycle_length=cycle_length) + for _ in range(5)] + with self.assertRaises(ValueError): + axl.MoranProcess(players, turns=10, mutation_method="random") + + axl.seed(0) + players = [axl.Cycler(cycle="CD" * random.randint(2, 10)) + for _ in range(10)] + + mp = axl.MoranProcess(players, turns=10, mutation_method="atomic") + with self.assertRaises(TypeError): + for _ in range(10): + next(mp) + + +class GraphMoranProcess(unittest.TestCase): + def test_complete(self): + """A complete graph should produce the same results as the default + case.""" + seeds = range(0, 5) + players = [] + N = 6 + graph = axl.graph.complete_graph(N) + for _ in range(N // 2): + players.append(axl.Cooperator()) + players.append(axl.Defector()) + for seed in seeds: + axl.seed(seed) + mp = axl.MoranProcess(players) + mp.play() + winner = mp.winning_strategy_name + axl.seed(seed) + mp = axl.MoranProcess(players, interaction_graph=graph) + mp.play() + winner2 = mp.winning_strategy_name + self.assertEqual(winner, winner2) + + def test_cycle(self): + """A cycle should sometimes produce different results vs. the default + case.""" + seeds = [(1, True), (8, False)] + players = [] + N = 6 + graph = axl.graph.cycle(N) + for _ in range(N // 2): + players.append(axl.Cooperator()) + for _ in range(N // 2): + players.append(axl.Defector()) + for seed, outcome in seeds: + axl.seed(seed) + mp = axl.MoranProcess(players) + mp.play() + winner = mp.winning_strategy_name + axl.seed(seed) + mp = axl.MoranProcess(players, interaction_graph=graph) + mp.play() + winner2 = mp.winning_strategy_name + self.assertEqual((winner == winner2), outcome) + + def test_asymmetry(self): + """Asymmetry in interaction and reproduction should sometimes + produce different results.""" + seeds = [(1, True), (21, False)] + players = [] + N = 6 + graph1 = axl.graph.cycle(N) + graph2 = axl.graph.complete_graph(N) + for _ in range(N // 2): + players.append(axl.Cooperator()) + for _ in range(N // 2): + players.append(axl.Defector()) + for seed, outcome in seeds: + axl.seed(seed) + mp = axl.MoranProcess( + players, interaction_graph=graph1, reproduction_graph=graph2 + ) + mp.play() + winner = mp.winning_strategy_name + axl.seed(seed) + mp = axl.MoranProcess( + players, interaction_graph=graph2, reproduction_graph=graph1 + ) + mp.play() + winner2 = mp.winning_strategy_name + self.assertEqual((winner == winner2), outcome) + + def test_cycle_death_birth(self): + """Test that death-birth can have different outcomes in the graph + case.""" + seeds = [(1, True), (5, False)] + players = [] + N = 6 + graph = axl.graph.cycle(N) + for _ in range(N // 2): + players.append(axl.Cooperator()) + for _ in range(N // 2): + players.append(axl.Defector()) + for seed, outcome in seeds: + axl.seed(seed) + mp = axl.MoranProcess(players, interaction_graph=graph, mode="bd") + mp.play() + winner = mp.winning_strategy_name + axl.seed(seed) + mp = axl.MoranProcess(players, interaction_graph=graph, mode="db") + mp.play() + winner2 = mp.winning_strategy_name + self.assertEqual((winner == winner2), outcome) + + +class TestApproximateMoranProcess(unittest.TestCase): + """A suite of tests for the ApproximateMoranProcess""" + + players = [axl.Cooperator(), axl.Defector()] + cached_outcomes = {} + + counter = Counter([(0, 5)]) + pdf = axl.Pdf(counter) + cached_outcomes[("Cooperator", "Defector")] = pdf + + counter = Counter([(3, 3)]) + pdf = axl.Pdf(counter) + cached_outcomes[("Cooperator", "Cooperator")] = pdf + + counter = Counter([(1, 1)]) + pdf = axl.Pdf(counter) + cached_outcomes[("Defector", "Defector")] = pdf + + amp = axl.ApproximateMoranProcess(players, cached_outcomes) + + def test_init(self): + """Test the initialisation process""" + self.assertEqual( + set(self.amp.cached_outcomes.keys()), + {("Cooperator", "Defector"), ("Cooperator", "Cooperator"), ("Defector", "Defector")}, + ) + self.assertEqual(self.amp.players, self.players) + self.assertEqual(self.amp.turns, 0) + self.assertEqual(self.amp.noise, 0) + + def test_score_all(self): + """Test the score_all function of the Moran process""" + scores = self.amp.score_all() + self.assertEqual(scores, [0, 5]) + scores = self.amp.score_all() + self.assertEqual(scores, [0, 5]) + scores = self.amp.score_all() + self.assertEqual(scores, [0, 5]) + + def test_getting_scores_from_cache(self): + """Test that read of scores from cache works (independent of ordering of + player names""" + scores = self.amp._get_scores_from_cache(("Cooperator", "Defector")) + self.assertEqual(scores, (0, 5)) + scores = self.amp._get_scores_from_cache(("Defector", "Cooperator")) + self.assertEqual(scores, (5, 0)) diff --git a/axelrod/tests/unit/test_pickling.py b/axelrod/tests/unit/test_pickling.py new file mode 100644 index 000000000..b588c1b81 --- /dev/null +++ b/axelrod/tests/unit/test_pickling.py @@ -0,0 +1,394 @@ +import unittest +import pickle +import random + +import axelrod as axl + +C, D = axl.Action.C, axl.Action.D + + +# A set of classes to test pickling. + +# First set: special cases + +PointerToWrappedStrategy = axl.ipd.strategy_transformers.FlipTransformer()(axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)) + + +class MyDefector(axl.IpdPlayer): + def __init__(self): + super(MyDefector, self).__init__() + + def strategy(self, opponent): + return D + + +PointerToWrappedClassNotInStrategies = axl.ipd.strategy_transformers.FlipTransformer()( + axl.ipd.strategy_transformers.FlipTransformer()(MyDefector) +) + + +@axl.ipd.strategy_transformers.InitialTransformer((D, C, D), name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) +class InterspersedDualTransformersNamePrefixAbsent(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.IdentityTransformer((D, D, C)) +@axl.ipd.strategy_transformers.DualTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.DualTransformer() +class InterspersedDualTransformersNamePrefixPresent(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.FlipTransformer() +class MyCooperator(axl.IpdPlayer): + def strategy(self, opponent): + return C + + +@axl.ipd.strategy_transformers.FlipTransformer() +@axl.ipd.strategy_transformers.FlipTransformer() +class DoubleFlip(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.FlipTransformer() +class SingleFlip(axl.Cooperator): + pass + + +# Second set: All the transformers + + +@axl.ipd.strategy_transformers.ApologyTransformer([D], [C], name_prefix=None) +class Apology(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.DeadlockBreakingTransformer(name_prefix=None) +class DeadlockBreaking(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.DualTransformer(name_prefix=None) +class Dual(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None) +class Flip(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.FinalTransformer((D, D), name_prefix=None) +class Final(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.ForgiverTransformer(0.2, name_prefix=None) +class Forgiver(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.GrudgeTransformer(3, name_prefix=None) +class Grudge(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.InitialTransformer((C, D), name_prefix=None) +class Initial(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.2), name_prefix=None) +class JossAnn(axl.Cooperator): + pass + + +strategies = [axl.Grudger, axl.TitForTat] +probability = [0.2, 0.3] + + +@axl.ipd.strategy_transformers.MixedTransformer(probability, strategies, name_prefix=None) +class Mixed(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.NiceTransformer(name_prefix=None) +class Nice(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.NoisyTransformer(0.2, name_prefix=None) +class Noisy(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.RetaliationTransformer(3, name_prefix=None) +class Retaliation(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(name_prefix=None) +class RetaliateUntilApology(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.TrackHistoryTransformer(name_prefix=None) +class TrackHistory(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.IdentityTransformer() +class Identity(axl.Cooperator): + pass + + +@axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None) +class TransformedThue(axl.ThueMorse): + pass + + +class MetaThue(axl.MetaPlayer): + name = "MetaThue" + + def __init__(self): + team = [axl.ThueMorse] + super().__init__(team=team) + + +TransformedMetaThue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix=None)(MetaThue) + + +transformed_no_prefix = [ + Apology, + DeadlockBreaking, + Flip, + Final, + Forgiver, + Grudge, + Initial, + JossAnn, + Mixed, + Nice, + Noisy, + Retaliation, + RetaliateUntilApology, + TrackHistory, + Dual, + Identity, +] + +transformer_instances = [ + axl.ipd.strategy_transformers.ApologyTransformer([D], [C]), + axl.ipd.strategy_transformers.DeadlockBreakingTransformer(), + axl.ipd.strategy_transformers.DualTransformer(), + axl.ipd.strategy_transformers.FlipTransformer(), + axl.ipd.strategy_transformers.FinalTransformer((D, D)), + axl.ipd.strategy_transformers.ForgiverTransformer(0.2), + axl.ipd.strategy_transformers.GrudgeTransformer(3), + axl.ipd.strategy_transformers.InitialTransformer((C, D)), + axl.ipd.strategy_transformers.JossAnnTransformer((0.2, 0.6)), + axl.ipd.strategy_transformers.MixedTransformer(probability, strategies), + axl.ipd.strategy_transformers.NiceTransformer(), + axl.ipd.strategy_transformers.NoisyTransformer(0.2), + axl.ipd.strategy_transformers.RetaliationTransformer(3), + axl.ipd.strategy_transformers.RetaliateUntilApologyTransformer(), + axl.ipd.strategy_transformers.TrackHistoryTransformer(), + axl.ipd.strategy_transformers.IdentityTransformer(), +] + + +class TestPickle(unittest.TestCase): + def assert_equals_instance_from_pickling(self, original_instance): + clone = pickle.loads(pickle.dumps(original_instance)) + self.assertEqual(clone, original_instance) + + def assert_original_equals_pickled(self, player_, turns=10): + opponents = (axl.Defector, axl.Cooperator, axl.Random, axl.CyclerCCCDCD) + for opponent_class in opponents: + # Check that player and copy play the same way. + player = player_.clone() + clone = pickle.loads(pickle.dumps(player)) + clone = clone.clone() + + opponent_1 = opponent_class() + opponent_2 = opponent_class() + + axl.seed(0) + match_1 = axl.IpdMatch((player, opponent_1), turns=turns) + result_1 = match_1.play() + + axl.seed(0) + match_2 = axl.IpdMatch((clone, opponent_2), turns=turns) + result_2 = match_2.play() + + self.assertEqual(result_1, result_2) + + # Confirm that mutated player can be pickled correctly. + self.assert_equals_instance_from_pickling(player) + + def test_parameterized_player(self): + player = axl.Cycler("DDCCDD") + self.assert_original_equals_pickled(player) + + def test_sequence_player(self): + inline_transformed_thue = axl.ipd.strategy_transformers.IdentityTransformer(name_prefix="Transformed")(axl.ThueMorse)() + for player in [axl.ThueMorse(), axl.ThueMorseInverse(), MetaThue(), TransformedMetaThue(), + inline_transformed_thue, TransformedThue(), + ]: + self.assert_equals_instance_from_pickling(player) + opponents = (axl.Defector, axl.Cooperator, axl.Random, axl.CyclerCCCDCD) + for opponent_class in opponents: + axl.seed(10) + player.reset() + opponent = opponent_class() + match_1 = axl.IpdMatch((player, opponent), turns=20) + _ = match_1.play() + self.assert_equals_instance_from_pickling(player) + + def test_final_transformer_called(self): + player = axl.Alexei() + copy = pickle.loads(pickle.dumps(player)) + match = axl.IpdMatch((player, copy), turns=3) + results = match.play() + self.assertEqual(results, [(C, C), (C, C), (D, D)]) + + def test_pickling_all_strategies(self): + for s in random.sample(axl.strategies, 50): + with self.subTest(strategy=s.name): + self.assert_original_equals_pickled(s()) + + def test_pickling_all_transformers_as_decorated_classes(self): + for s in transformed_no_prefix: + with self.subTest(strategy=s.name): + player = s() + self.assert_original_equals_pickled(player) + + def test_pickling_all_transformers_as_instance_called_on_a_class(self): + for transformer in transformer_instances: + with self.subTest(transformer=transformer): + player = transformer(axl.Cooperator)() + self.assert_original_equals_pickled(player) + + def test_created_on_the_spot_multiple_transformers(self): + player_class = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player = axl.ipd.strategy_transformers.FinalTransformer((C, D))(player_class)() + + self.assert_original_equals_pickled(player) + + def test_dual_transformer_regression_test(self): + """DualTransformer has failed when there were multiple DualTransformers. + It has also failed when DualTransformer was not the outermost + transformer or when other transformers were between multiple + DualTransformers.""" + player = InterspersedDualTransformersNamePrefixAbsent() + self.assert_original_equals_pickled(player) + + player = InterspersedDualTransformersNamePrefixPresent() + self.assert_original_equals_pickled(player) + + player_class = axl.WinStayLoseShift + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.InitialTransformer((C, D))(player_class) + player_class = axl.ipd.strategy_transformers.DualTransformer()(player_class) + player_class = axl.ipd.strategy_transformers.TrackHistoryTransformer()(player_class) + + interspersed_dual_transformers = player_class() + + self.assert_original_equals_pickled(interspersed_dual_transformers) + + def test_class_and_instance_name_different_single_flip(self): + player = SingleFlip() + self.assertEqual(player.__class__.__name__, "FlippedSingleFlip") + + self.assert_original_equals_pickled(player) + + def test_class_and_instance_name_different_double_flip(self): + player = DoubleFlip() + self.assertEqual(player.__class__.__name__, "FlippedFlippedDoubleFlip") + + self.assert_original_equals_pickled(player) + + def test_class_and_instance_name_different_built_from_player_class(self): + player = MyCooperator() + class_names = [class_.__name__ for class_ in MyCooperator.mro()] + self.assertEqual( + class_names, ["FlippedMyCooperator", "MyCooperator", "IpdPlayer", "object"] + ) + + self.assert_original_equals_pickled(player) + + def test_pointer_to_class_derived_from_strategy(self): + player = PointerToWrappedStrategy() + + class_names = [class_.__name__ for class_ in player.__class__.mro()] + self.assertEqual( + class_names, + [ + "FlippedFlippedCooperator", + "FlippedCooperator", + "Cooperator", + "IpdPlayer", + "object", + ], + ) + + self.assert_original_equals_pickled(player) + + def test_pointer_to_class_derived_from_IpdPlayer(self): + player = PointerToWrappedClassNotInStrategies() + + class_names = [class_.__name__ for class_ in player.__class__.mro()] + self.assertEqual( + class_names, + [ + "FlippedFlippedMyDefector", + "FlippedMyDefector", + "MyDefector", + "IpdPlayer", + "object", + ], + ) + + self.assert_original_equals_pickled(player) + + def test_local_class_unpicklable(self): + """An unpickle-able AND transformed class will not raise an error until + it is un-pickled. This is different from the original class that raises + an error when it is pickled.""" + + class LocalCooperator(axl.Cooperator): + pass + + un_transformed = LocalCooperator() + + self.assertRaises(AttributeError, pickle.dumps, un_transformed) + + player = axl.ipd.strategy_transformers.FlipTransformer()(LocalCooperator)() + pickled = pickle.dumps(player) + self.assertRaises(AttributeError, pickle.loads, pickled) + + def test_with_various_name_prefixes(self): + no_prefix = Flip() + self.assertEqual(no_prefix.__class__.__name__, "Flip") + self.assert_original_equals_pickled(no_prefix) + + default_prefix = axl.ipd.strategy_transformers.FlipTransformer()(axl.Cooperator)() + self.assertEqual(default_prefix.__class__.__name__, "FlippedCooperator") + self.assert_original_equals_pickled(default_prefix) + + fliptastic = axl.ipd.strategy_transformers.FlipTransformer(name_prefix="Fliptastic") + new_prefix = fliptastic(axl.Cooperator)() + self.assertEqual(new_prefix.__class__.__name__, "FliptasticCooperator") + self.assert_original_equals_pickled(new_prefix) + + def test_dynamic_class_no_name_prefix(self): + player = axl.ipd.strategy_transformers.FlipTransformer(name_prefix=None)(axl.Cooperator)() + + self.assertEqual(player.__class__.__name__, "Cooperator") + self.assert_original_equals_pickled(player) diff --git a/axelrod/tests/unit/test_plot.py b/axelrod/tests/unit/test_plot.py new file mode 100644 index 000000000..d5bdf29a1 --- /dev/null +++ b/axelrod/tests/unit/test_plot.py @@ -0,0 +1,257 @@ +import unittest + +import tempfile +import matplotlib +import matplotlib.pyplot as plt +import pathlib + +from numpy import mean + +import axelrod as axl +from axelrod.load_data_ import axl_filename + + +class TestPlot(unittest.TestCase): + @classmethod + def setUpClass(cls): + path = pathlib.Path("test_outputs/test_results.csv") + cls.filename = axl_filename(path) + + cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] + cls.repetitions = 3 + cls.turns = 5 + + cls.test_result_set = axl.ResultSet( + cls.filename, cls.players, cls.repetitions, progress_bar=False + ) + + cls.test_result_set = axl.ResultSet( + cls.filename, cls.players, cls.repetitions, progress_bar=False + ) + cls.expected_boxplot_dataset = [ + [(17 / 5 + 9 / 5) / 2 for _ in range(3)], + [(13 / 5 + 4 / 5) / 2 for _ in range(3)], + [3 / 2 for _ in range(3)], + ] + cls.expected_boxplot_xticks_locations = [1, 2, 3, 4] + cls.expected_boxplot_xticks_labels = ["Defector", "Tit For Tat", "Alternator"] + + cls.expected_lengthplot_dataset = [ + [cls.turns for _ in range(3)], + [cls.turns for _ in range(3)], + [cls.turns for _ in range(3)], + ] + + cls.expected_payoff_dataset = [ + [0, mean([9 / 5 for _ in range(3)]), mean([17 / 5 for _ in range(3)])], + [mean([4 / 5 for _ in range(3)]), 0, mean([13 / 5 for _ in range(3)])], + [mean([2 / 5 for _ in range(3)]), mean([13 / 5 for _ in range(3)]), 0], + ] + cls.expected_winplot_dataset = ( + [[2, 2, 2], [0, 0, 0], [0, 0, 0]], + ["Defector", "Tit For Tat", "Alternator"], + ) + + cls.expected_sdvplot_dataset = ( + [ + [3, 3, 3, 1, 1, 1, 0, 0, 0], + [0, 0, 0, 0, 0, 0, -1, -1, -1], + [0, 0, 0, 0, 0, 0, -3, -3, -3], + ], + ["Defector", "Tit For Tat", "Alternator"], + ) + + def test_default_cmap(self): + cmap = axl.ipd.plot.default_cmap("0.0") + self.assertEqual(cmap, "YlGnBu") + + cmap = axl.ipd.plot.default_cmap("1.3alpha") + self.assertEqual(cmap, "YlGnBu") + + cmap = axl.ipd.plot.default_cmap("1.4.99") + self.assertEqual(cmap, "YlGnBu") + + cmap = axl.ipd.plot.default_cmap("1.4") + self.assertEqual(cmap, "YlGnBu") + + cmap = axl.ipd.plot.default_cmap() + self.assertEqual(cmap, "viridis") + + cmap = axl.ipd.plot.default_cmap("1.5") + self.assertEqual(cmap, "viridis") + + cmap = axl.ipd.plot.default_cmap("1.5beta") + self.assertEqual(cmap, "viridis") + + cmap = axl.ipd.plot.default_cmap("1.7") + self.assertEqual(cmap, "viridis") + + cmap = axl.ipd.plot.default_cmap("2.0") + self.assertEqual(cmap, "viridis") + + def test_init(self): + plot = axl.Plot(self.test_result_set) + self.assertEqual(plot.result_set, self.test_result_set) + + def test_init_from_resulsetfromfile(self): + tmp_file = tempfile.NamedTemporaryFile(mode="w", delete=False) + players = [axl.Cooperator(), axl.TitForTat(), axl.Defector()] + tournament = axl.IpdTournament(players=players, turns=2, repetitions=2) + tournament.play(filename=tmp_file.name, progress_bar=False) + tmp_file.close() + rs = axl.ResultSet(tmp_file.name, players, 2, progress_bar=False) + + plot = axl.Plot(rs) + self.assertEqual(plot.result_set, rs) + + def test_boxplot_dataset(self): + plot = axl.Plot(self.test_result_set) + self.assertSequenceEqual(plot._boxplot_dataset, self.expected_boxplot_dataset) + + def test_boxplot_xticks_locations(self): + plot = axl.Plot(self.test_result_set) + self.assertEqual( + plot._boxplot_xticks_locations, self.expected_boxplot_xticks_locations + ) + + def test_boxplot_xticks_labels(self): + plot = axl.Plot(self.test_result_set) + self.assertEqual( + plot._boxplot_xticks_labels, self.expected_boxplot_xticks_labels + ) + + def test_boxplot(self): + plot = axl.Plot(self.test_result_set) + fig = plot.boxplot() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_boxplot_with_passed_axes(self): + # Test that can plot on a given matplotlib axes + fig, axarr = plt.subplots(2, 2) + self.assertEqual(axarr[0, 1].get_ylim(), (0, 1)) + plot = axl.Plot(self.test_result_set) + plot.boxplot(ax=axarr[0, 1]) + self.assertNotEqual(axarr[0, 1].get_ylim(), (0, 1)) + + # Plot on another axes with a title + plot.boxplot(title="dummy title", ax=axarr[1, 0]) + self.assertNotEqual(axarr[1, 0].get_ylim(), (0, 1)) + self.assertEqual(axarr[1, 0].get_title(), "dummy title") + + def test_boxplot_with_title(self): + plot = axl.Plot(self.test_result_set) + fig = plot.boxplot(title="title") + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_winplot_dataset(self): + plot = axl.Plot(self.test_result_set) + self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) + + def test_winplot(self): + plot = axl.Plot(self.test_result_set) + fig = plot.winplot() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_sdvplot_dataset(self): + plot = axl.Plot(self.test_result_set) + self.assertSequenceEqual(plot._sdv_plot_dataset, self.expected_sdvplot_dataset) + + def test_sdvplot(self): + plot = axl.Plot(self.test_result_set) + fig = plot.sdvplot() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_lengthplot_dataset(self): + plot = axl.Plot(self.test_result_set) + self.assertSequenceEqual(plot._winplot_dataset, self.expected_winplot_dataset) + + def test_lengthplot(self): + plot = axl.Plot(self.test_result_set) + fig = plot.lengthplot() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_pdplot(self): + plot = axl.Plot(self.test_result_set) + fig = plot.pdplot() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_payoff_dataset(self): + plot = axl.Plot(self.test_result_set) + self.assertSequenceEqual(plot._payoff_dataset, self.expected_payoff_dataset) + + def test_payoff(self): + plot = axl.Plot(self.test_result_set) + fig = plot.payoff() + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_payoff_with_title(self): + plot = axl.Plot(self.test_result_set) + fig = plot.payoff(title="dummy title") + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_payoff_with_passed_axes(self): + plot = axl.Plot(self.test_result_set) + fig, axarr = plt.subplots(2, 2) + self.assertEqual(axarr[0, 1].get_xlim(), (0, 1)) + + plot.payoff(ax=axarr[0, 1]) + self.assertNotEqual(axarr[0, 1].get_xlim(), (0, 1)) + + # Plot on another axes with a title + plot.payoff(title="dummy title", ax=axarr[1, 0]) + self.assertNotEqual(axarr[1, 0].get_xlim(), (0, 1)) + self.assertEqual(axarr[1, 0].get_xlabel(), "dummy title") + plt.close(fig) + + def test_stackplot(self): + eco = axl.Ecosystem(self.test_result_set) + eco.reproduce(100) + + plot = axl.Plot(self.test_result_set) + fig = plot.stackplot(eco) + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + fig = plot.stackplot(eco, title="dummy title") + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + fig = plot.stackplot(eco, logscale=False) + self.assertIsInstance(fig, matplotlib.pyplot.Figure) + plt.close(fig) + + def test_stackplot_with_passed_axes(self): + # Test that can plot on a given matplotlib axes + eco = axl.Ecosystem(self.test_result_set) + eco.reproduce(100) + plot = axl.Plot(self.test_result_set) + + fig, axarr = plt.subplots(2, 2) + self.assertEqual(axarr[0, 1].get_xlim(), (0, 1)) + + plot.stackplot(eco, ax=axarr[0, 1]) + self.assertNotEqual(axarr[0, 1].get_xlim(), (0, 1)) + + # Plot on another axes with a title + plot.stackplot(eco, title="dummy title", ax=axarr[1, 0]) + self.assertNotEqual(axarr[1, 0].get_xlim(), (0, 1)) + self.assertEqual(axarr[1, 0].get_title(), "dummy title") + plt.close(fig) + + def test_all_plots(self): + plot = axl.Plot(self.test_result_set) + # Test that this method does not crash. + self.assertIsNone( + plot.save_all_plots(prefix="test_outputs/", progress_bar=False) + ) + self.assertIsNone( + plot.save_all_plots( + prefix="test_outputs/", title_prefix="A prefix", progress_bar=True + ) + ) diff --git a/axelrod/tests/unit/test_property.py b/axelrod/tests/unit/test_property.py new file mode 100644 index 000000000..7534ef513 --- /dev/null +++ b/axelrod/tests/unit/test_property.py @@ -0,0 +1,232 @@ +import unittest + +import axelrod as axl +from axelrod.tests.property import ( + games, + matches, + prob_end_spatial_tournaments, + prob_end_tournaments, + spatial_tournaments, + strategy_lists, + tournaments, +) + +from hypothesis import given, settings + +stochastic_strategies = [s for s in axl.strategies if axl.Classifiers["stochastic"](s())] + + +class TestStrategyList(unittest.TestCase): + def test_call(self): + strategies = strategy_lists().example() + self.assertIsInstance(strategies, list) + for p in strategies: + self.assertIsInstance(p(), axl.IpdPlayer) + + @given(strategies=strategy_lists(min_size=1, max_size=50)) + @settings(max_examples=5) + def test_decorator(self, strategies): + self.assertIsInstance(strategies, list) + self.assertGreaterEqual(len(strategies), 1) + self.assertLessEqual(len(strategies), 50) + for strategy in strategies: + self.assertIsInstance(strategy(), axl.IpdPlayer) + + @given(strategies=strategy_lists(strategies=axl.basic_strategies)) + @settings(max_examples=5) + def test_decorator_with_given_strategies(self, strategies): + self.assertIsInstance(strategies, list) + basic_player_names = [str(s()) for s in axl.basic_strategies] + for strategy in strategies: + player = strategy() + self.assertIsInstance(player, axl.IpdPlayer) + self.assertIn(str(player), basic_player_names) + + +class TestMatch(unittest.TestCase): + """ + Test that the composite method works + """ + + def test_call(self): + match = matches().example() + self.assertIsInstance(match, axl.IpdMatch) + + @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=1)) + @settings(max_examples=5) + def test_decorator(self, match): + self.assertIsInstance(match, axl.IpdMatch) + self.assertGreaterEqual(len(match), 10) + self.assertLessEqual(len(match), 50) + self.assertGreaterEqual(match.noise, 0) + self.assertLessEqual(match.noise, 1) + + @given(match=matches(min_turns=10, max_turns=50, min_noise=0, max_noise=0)) + @settings(max_examples=5) + def test_decorator_with_no_noise(self, match): + self.assertIsInstance(match, axl.IpdMatch) + self.assertGreaterEqual(len(match), 10) + self.assertLessEqual(len(match), 50) + self.assertEqual(match.noise, 0) + + +class TestTournament(unittest.TestCase): + def test_call(self): + tournament = tournaments().example() + self.assertIsInstance(tournament, axl.IpdTournament) + + @given( + tournament=tournaments( + min_turns=2, + max_turns=50, + min_noise=0, + max_noise=1, + min_repetitions=2, + max_repetitions=50, + max_size=3, + ) + ) + @settings(max_examples=5) + def test_decorator(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + self.assertLessEqual(tournament.turns, 50) + self.assertGreaterEqual(tournament.turns, 2) + self.assertLessEqual(tournament.noise, 1) + self.assertGreaterEqual(tournament.noise, 0) + self.assertLessEqual(tournament.repetitions, 50) + self.assertGreaterEqual(tournament.repetitions, 2) + + @given(tournament=tournaments(strategies=axl.basic_strategies, max_size=3)) + @settings(max_examples=5) + def test_decorator_with_given_strategies(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + basic_player_names = [str(s()) for s in axl.basic_strategies] + for p in tournament.players: + self.assertIn(str(p), basic_player_names) + + +class TestProbEndTournament(unittest.TestCase): + def test_call(self): + tournament = tournaments().example() + self.assertIsInstance(tournament, axl.IpdTournament) + + @given( + tournament=prob_end_tournaments( + min_prob_end=0, + max_prob_end=1, + min_noise=0, + max_noise=1, + min_repetitions=2, + max_repetitions=50, + max_size=3, + ) + ) + @settings(max_examples=5) + def test_decorator(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + self.assertLessEqual(tournament.prob_end, 1) + self.assertGreaterEqual(tournament.prob_end, 0) + self.assertLessEqual(tournament.noise, 1) + self.assertGreaterEqual(tournament.noise, 0) + self.assertLessEqual(tournament.repetitions, 50) + self.assertGreaterEqual(tournament.repetitions, 2) + + @given(tournament=prob_end_tournaments(strategies=axl.basic_strategies, max_size=3)) + @settings(max_examples=5) + def test_decorator_with_given_strategies(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + basic_player_names = [str(s()) for s in axl.basic_strategies] + for p in tournament.players: + self.assertIn(str(p), basic_player_names) + + +class TestSpatialTournament(unittest.TestCase): + def test_call(self): + tournament = spatial_tournaments().example() + self.assertIsInstance(tournament, axl.IpdTournament) + + @given( + tournament=spatial_tournaments( + min_turns=2, + max_turns=50, + min_noise=0, + max_noise=1, + min_repetitions=2, + max_repetitions=50, + max_size=3, + ) + ) + @settings(max_examples=5) + def test_decorator(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + self.assertLessEqual(tournament.turns, 50) + self.assertGreaterEqual(tournament.turns, 2) + self.assertLessEqual(tournament.noise, 1) + self.assertGreaterEqual(tournament.noise, 0) + self.assertLessEqual(tournament.repetitions, 50) + self.assertGreaterEqual(tournament.repetitions, 2) + + @given(tournament=spatial_tournaments(strategies=axl.basic_strategies, max_size=3)) + @settings(max_examples=5) + def test_decorator_with_given_strategies(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + basic_player_names = [str(s()) for s in axl.basic_strategies] + for p in tournament.players: + self.assertIn(str(p), basic_player_names) + + +class TestProbEndSpatialTournament(unittest.TestCase): + def test_call(self): + tournament = prob_end_spatial_tournaments().example() + self.assertIsInstance(tournament, axl.IpdTournament) + + @given( + tournament=prob_end_spatial_tournaments( + min_prob_end=0, + max_prob_end=1, + min_noise=0, + max_noise=1, + min_repetitions=2, + max_repetitions=50, + max_size=3, + ) + ) + @settings(max_examples=5) + def test_decorator(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + self.assertLessEqual(tournament.prob_end, 1) + self.assertGreaterEqual(tournament.prob_end, 0) + self.assertLessEqual(tournament.noise, 1) + self.assertGreaterEqual(tournament.noise, 0) + self.assertLessEqual(tournament.repetitions, 50) + self.assertGreaterEqual(tournament.repetitions, 2) + + @given( + tournament=prob_end_spatial_tournaments( + strategies=axl.basic_strategies, max_size=3 + ) + ) + @settings(max_examples=5) + def test_decorator_with_given_strategies(self, tournament): + self.assertIsInstance(tournament, axl.IpdTournament) + basic_player_names = [str(s()) for s in axl.basic_strategies] + for p in tournament.players: + self.assertIn(str(p), basic_player_names) + + +class TestGame(unittest.TestCase): + def test_call(self): + game = games().example() + self.assertIsInstance(game, axl.IpdGame) + + @given(game=games()) + @settings(max_examples=5) + def test_decorator(self, game): + self.assertIsInstance(game, axl.IpdGame) + r, p, s, t = game.RPST() + self.assertTrue((2 * r) > (t + s) and (t > r > p > s)) + + @given(game=games(prisoners_dilemma=False)) + @settings(max_examples=5) + def test_decorator_unconstrained(self, game): + self.assertIsInstance(game, axl.IpdGame) diff --git a/axelrod/tests/unit/test_random_.py b/axelrod/tests/unit/test_random_.py new file mode 100644 index 000000000..fdb1d361f --- /dev/null +++ b/axelrod/tests/unit/test_random_.py @@ -0,0 +1,88 @@ +"""Tests for the random functions.""" + +import unittest + +import random + +from collections import Counter + +import numpy + +import axelrod as axl + +C, D = axl.Action.C, axl.Action.D + + +class TestRandom_(unittest.TestCase): + def test_return_values(self): + self.assertEqual(axl.random_choice(1), C) + self.assertEqual(axl.random_choice(0), D) + axl.seed(1) + self.assertEqual(axl.random_choice(), C) + axl.seed(2) + self.assertEqual(axl.random_choice(), D) + + def test_set_seed(self): + """Test that numpy and stdlib random seed is set by axelrod seed""" + + numpy_random_numbers = [] + stdlib_random_numbers = [] + for _ in range(2): + axl.seed(0) + numpy_random_numbers.append(numpy.random.random()) + stdlib_random_numbers.append(random.random()) + + self.assertEqual(numpy_random_numbers[0], numpy_random_numbers[1]) + self.assertEqual(stdlib_random_numbers[0], stdlib_random_numbers[1]) + + def test_seed_not_offset_by_deterministic_call(self): + """Test that when called with p = 0 or 1, the random seed is not + affected.""" + for p in [0, 1]: + axl.seed(0) + r = random.random() + axl.seed(0) + axl.random_choice(p) + self.assertEqual(r, random.random()) + + def test_random_flip(self): + self.assertEqual(C, axl.random_flip(C, 0)) + self.assertEqual(C, axl.random_flip(D, 1)) + axl.seed(0) + self.assertEqual(C, axl.random_flip(C, 0.2)) + axl.seed(1) + self.assertEqual(C, axl.random_flip(D, 0.2)) + + +class TestPdf(unittest.TestCase): + """A suite of tests for the Pdf class""" + + observations = [(C, D)] * 4 + [(C, C)] * 12 + [(D, C)] * 2 + [(D, D)] * 15 + counter = Counter(observations) + pdf = axl.Pdf(counter) + + def test_init(self): + self.assertEqual(set(self.pdf.sample_space), set(self.counter.keys())) + self.assertEqual(set(self.pdf.counts), set([4, 12, 2, 15])) + self.assertEqual(self.pdf.total, sum([4, 12, 2, 15])) + self.assertAlmostEqual(sum(self.pdf.probability), 1) + + def test_sample(self): + """Test that sample maps to correct domain""" + all_samples = [] + + axl.seed(0) + for sample in range(100): + all_samples.append(self.pdf.sample()) + + self.assertEqual(len(all_samples), 100) + self.assertEqual(set(all_samples), set(self.observations)) + + def test_seed(self): + """Test that numpy seeds the sample properly""" + + for s in range(10): + axl.seed(s) + sample = self.pdf.sample() + axl.seed(s) + self.assertEqual(sample, self.pdf.sample()) diff --git a/axelrod/tests/unit/test_resultset.py b/axelrod/tests/unit/test_resultset.py new file mode 100644 index 000000000..fc635c6b4 --- /dev/null +++ b/axelrod/tests/unit/test_resultset.py @@ -0,0 +1,1248 @@ +import unittest +import csv +from collections import Counter +import pandas as pd +from numpy import mean, nanmedian, std +import pathlib + +import axelrod as axl +from axelrod.load_data_ import axl_filename +from axelrod.result_set import create_counter_dict +from axelrod.tests.property import tournaments + +from hypothesis import given, settings + +C, D = axl.Action.C, axl.Action.D + + +class TestResultSet(unittest.TestCase): + @classmethod + def setUpClass(cls): + + path = pathlib.Path("test_outputs/test_results.csv") + cls.filename = str(axl_filename(path)) + + cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] + cls.repetitions = 3 + cls.turns = 5 + cls.edges = [(0, 1), (0, 2), (1, 2)] + + cls.expected_match_lengths = [ + [[0, 5, 5], [5, 0, 5], [5, 5, 0]] for _ in range(3) + ] + + cls.expected_scores = [[15, 15, 15], [17, 17, 17], [26, 26, 26]] + + cls.expected_wins = [[0, 0, 0], [0, 0, 0], [2, 2, 2]] + + cls.expected_normalised_scores = [ + [3 / 2 for _ in range(3)], + [(13 / 5 + 4 / 5) / 2 for _ in range(3)], + [(17 / 5 + 9 / 5) / 2 for _ in range(3)], + ] + + cls.expected_ranking = [2, 1, 0] + + cls.expected_ranked_names = ["Defector", "Tit For Tat", "Alternator"] + + cls.expected_null_results_matrix = [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + ] + + cls.expected_payoffs = [ + [[], [13 / 5 for _ in range(3)], [2 / 5 for _ in range(3)]], + [[13 / 5 for _ in range(3)], [], [4 / 5 for _ in range(3)]], + [[17 / 5 for _ in range(3)], [9 / 5 for _ in range(3)], []], + ] + + cls.expected_score_diffs = [ + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-3.0, -3.0, -3.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-1.0, -1.0, -1.0]], + [[3.0, 3.0, 3.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], + ] + + cls.expected_payoff_diffs_means = [ + [0.0, 0.0, -3.0], + [0.0, 0.0, -1.0], + [3.0, 1.0, 0.0], + ] + + # Recalculating to deal with numeric imprecision + cls.expected_payoff_matrix = [ + [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], + [mean([13 / 5 for _ in range(3)]), 0, mean([4 / 5 for _ in range(3)])], + [mean([17 / 5 for _ in range(3)]), mean([9 / 5 for _ in range(3)]), 0], + ] + + cls.expected_payoff_stddevs = [ + [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], + [std([13 / 5 for _ in range(3)]), 0, std([4 / 5 for _ in range(3)])], + [std([17 / 5 for _ in range(3)]), std([9 / 5 for _ in range(3)]), 0], + ] + + cls.expected_cooperation = [[0, 9, 9], [9, 0, 3], [0, 0, 0]] + + cls.expected_initial_cooperation_count = [6, 6, 0] + cls.expected_initial_cooperation_rate = [1, 1, 0] + + cls.expected_normalised_cooperation = [ + [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], + [mean([3 / 5 for _ in range(3)]), 0, mean([1 / 5 for _ in range(3)])], + [0, 0, 0], + ] + + cls.expected_state_distribution = [ + [ + Counter(), + Counter({(D, C): 6, (C, D): 6, (C, C): 3}), + Counter({(C, D): 9, (D, D): 6}), + ], + [ + Counter({(D, C): 6, (C, D): 6, (C, C): 3}), + Counter(), + Counter({(D, D): 12, (C, D): 3}), + ], + [ + Counter({(D, C): 9, (D, D): 6}), + Counter({(D, D): 12, (D, C): 3}), + Counter(), + ], + ] + + cls.expected_normalised_state_distribution = [ + [ + Counter(), + Counter({(D, C): 0.4, (C, D): 0.4, (C, C): 0.2}), + Counter({(C, D): 0.6, (D, D): 0.4}), + ], + [ + Counter({(D, C): 0.4, (C, D): 0.4, (C, C): 0.2}), + Counter(), + Counter({(D, D): 0.8, (C, D): 0.2}), + ], + [ + Counter({(D, C): 0.6, (D, D): 0.4}), + Counter({(D, D): 0.8, (D, C): 0.2}), + Counter(), + ], + ] + + cls.expected_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), + Counter({((C, D), D): 6, ((D, D), C): 6}), + ], + [ + Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), + Counter(), + Counter({((C, D), D): 3, ((D, D), D): 9}), + ], + [ + Counter({((D, C), D): 6, ((D, D), D): 6}), + Counter({((D, C), D): 3, ((D, D), D): 9}), + Counter(), + ], + ] + + cls.expected_normalised_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 1, ((C, D), D): 1, ((D, C), C): 1}), + Counter({((C, D), D): 1, ((D, D), C): 1}), + ], + [ + Counter({((C, C), C): 1, ((D, C), C): 1, ((C, D), D): 1}), + Counter(), + Counter({((C, D), D): 1, ((D, D), D): 1}), + ], + [ + Counter({((D, C), D): 1, ((D, D), D): 1}), + Counter({((D, C), D): 1, ((D, D), D): 1}), + Counter(), + ], + ] + + cls.expected_vengeful_cooperation = [ + [2 * element - 1 for element in row] + for row in cls.expected_normalised_cooperation + ] + + cls.expected_cooperating_rating = [18 / 30, 12 / 30, 0] + + cls.expected_good_partner_matrix = [[0, 3, 3], [3, 0, 3], [0, 0, 0]] + + cls.expected_good_partner_rating = [1.0, 1.0, 0] + + cls.expected_eigenjesus_rating = [0.5547001962252291, 0.8320502943378436, 0.0] + + cls.expected_eigenmoses_rating = [ + -0.4578520302117101, + 0.7311328098872432, + 0.5057828909101213, + ] + + def test_init(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertEqual(rs.players, self.players) + self.assertEqual(rs.num_players, len(self.players)) + + def _clear_matrix(self, matrix): + for i, row in enumerate(matrix): + for j, _ in enumerate(row): + matrix[i][j] = 0 + + def test_ne_vectors(self): + rs_1 = axl.ResultSet(self.filename, self.players, self.repetitions) + + rs_2 = axl.ResultSet(self.filename, self.players, self.repetitions) + + # A different vector + rs_2.eigenmoses_rating = (-1, -1, -1) + + self.assertNotEqual(rs_1, rs_2) + + def test_nan_vectors(self): + rs_1 = axl.ResultSet(self.filename, self.players, self.repetitions) + # Force a broken eigenmoses, by replacing vengeful_cooperation with + # zeroes. + self._clear_matrix(rs_1.vengeful_cooperation) + rs_1.eigenmoses_rating = rs_1._build_eigenmoses_rating() + + rs_2 = axl.ResultSet(self.filename, self.players, self.repetitions) + # Force a broken eigenmoses, by replacing vengeful_cooperation with + # zeroes. + self._clear_matrix(rs_2.vengeful_cooperation) + rs_2.eigenmoses_rating = rs_2._build_eigenmoses_rating() + + self.assertEqual(rs_1, rs_2) + + def test_init_multiprocessing(self): + rs = axl.ResultSet( + self.filename, + self.players, + self.repetitions, + progress_bar=False, + processes=2, + ) + self.assertEqual(rs.players, self.players) + self.assertEqual(rs.num_players, len(self.players)) + + rs = axl.ResultSet( + self.filename, + self.players, + self.repetitions, + progress_bar=False, + processes=0, + ) + self.assertEqual(rs.players, self.players) + self.assertEqual(rs.num_players, len(self.players)) + + def test_with_progress_bar(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=True + ) + self.assertTrue(rs.progress_bar) + self.assertEqual(rs.progress_bar.total, 25) + self.assertEqual(rs.progress_bar.n, rs.progress_bar.total) + + def test_match_lengths(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.match_lengths, list) + self.assertEqual(len(rs.match_lengths), rs.repetitions) + self.assertEqual(rs.match_lengths, self.expected_match_lengths) + + for rep in rs.match_lengths: + self.assertIsInstance(rep, list) + self.assertEqual(len(rep), len(self.players)) + + for i, opp in enumerate(rep): + self.assertIsInstance(opp, list) + self.assertEqual(len(opp), len(self.players)) + + for j, length in enumerate(opp): + if i == j: # Specific test for example match setup + self.assertEqual(length, 0) + else: + self.assertEqual(length, self.turns) + + def test_scores(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.scores, list) + self.assertEqual(len(rs.scores), rs.num_players) + self.assertEqual(rs.scores, self.expected_scores) + + def test_ranking(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.ranking, list) + self.assertEqual(len(rs.ranking), rs.num_players) + self.assertEqual(rs.ranking, self.expected_ranking) + + def test_ranked_names(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.ranked_names, list) + self.assertEqual(len(rs.ranked_names), rs.num_players) + self.assertEqual(rs.ranked_names, self.expected_ranked_names) + + def test_wins(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.wins, list) + self.assertEqual(len(rs.wins), rs.num_players) + self.assertEqual(rs.wins, self.expected_wins) + + def test_normalised_scores(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.normalised_scores, list) + self.assertEqual(len(rs.normalised_scores), rs.num_players) + self.assertEqual(rs.normalised_scores, self.expected_normalised_scores) + + def test_payoffs(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.payoffs, list) + self.assertEqual(len(rs.payoffs), rs.num_players) + self.assertEqual(rs.payoffs, self.expected_payoffs) + + def test_payoff_matrix(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.payoff_matrix, list) + self.assertEqual(len(rs.payoff_matrix), rs.num_players) + self.assertEqual(rs.payoff_matrix, self.expected_payoff_matrix) + + def test_score_diffs(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.score_diffs, list) + self.assertEqual(len(rs.score_diffs), rs.num_players) + for i, row in enumerate(rs.score_diffs): + for j, col in enumerate(row): + for k, score in enumerate(col): + self.assertAlmostEqual(score, self.expected_score_diffs[i][j][k]) + + def test_payoff_diffs_means(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.payoff_diffs_means, list) + self.assertEqual(len(rs.payoff_diffs_means), rs.num_players) + for i, row in enumerate(rs.payoff_diffs_means): + for j, col in enumerate(row): + self.assertAlmostEqual(col, self.expected_payoff_diffs_means[i][j]) + + def test_payoff_stddevs(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.payoff_stddevs, list) + self.assertEqual(len(rs.payoff_stddevs), rs.num_players) + self.assertEqual(rs.payoff_stddevs, self.expected_payoff_stddevs) + + def test_cooperation(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.cooperation, list) + self.assertEqual(len(rs.cooperation), rs.num_players) + self.assertEqual(rs.cooperation, self.expected_cooperation) + + def test_initial_cooperation_count(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.initial_cooperation_count, list) + self.assertEqual(len(rs.initial_cooperation_count), rs.num_players) + self.assertEqual( + rs.initial_cooperation_count, self.expected_initial_cooperation_count + ) + + def test_normalised_cooperation(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.normalised_cooperation, list) + self.assertEqual(len(rs.normalised_cooperation), rs.num_players) + for i, row in enumerate(rs.normalised_cooperation): + for j, col in enumerate(row): + self.assertAlmostEqual(col, self.expected_normalised_cooperation[i][j]) + + def test_initial_cooperation_rate(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.initial_cooperation_rate, list) + self.assertEqual(len(rs.initial_cooperation_rate), rs.num_players) + self.assertEqual( + rs.initial_cooperation_rate, self.expected_initial_cooperation_rate + ) + + def test_state_distribution(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.state_distribution, list) + self.assertEqual(len(rs.state_distribution), rs.num_players) + self.assertEqual(rs.state_distribution, self.expected_state_distribution) + + def test_state_normalised_distribution(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.normalised_state_distribution, list) + self.assertEqual(len(rs.normalised_state_distribution), rs.num_players) + self.assertEqual( + rs.normalised_state_distribution, + self.expected_normalised_state_distribution, + ) + + def test_state_to_action_distribution(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.state_to_action_distribution, list) + self.assertEqual(len(rs.state_to_action_distribution), rs.num_players) + self.assertEqual( + rs.state_to_action_distribution[1], + self.expected_state_to_action_distribution[1], + ) + + def test_normalised_state_to_action_distribution(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.normalised_state_to_action_distribution, list) + self.assertEqual( + len(rs.normalised_state_to_action_distribution), rs.num_players + ) + self.assertEqual( + rs.normalised_state_to_action_distribution, + self.expected_normalised_state_to_action_distribution, + ) + + def test_vengeful_cooperation(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.vengeful_cooperation, list) + self.assertEqual(len(rs.vengeful_cooperation), rs.num_players) + for i, row in enumerate(rs.vengeful_cooperation): + for j, col in enumerate(row): + self.assertAlmostEqual(col, self.expected_vengeful_cooperation[i][j]) + + def test_cooperating_rating(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.cooperating_rating, list) + self.assertEqual(len(rs.cooperating_rating), rs.num_players) + self.assertEqual(rs.cooperating_rating, self.expected_cooperating_rating) + + def test_good_partner_matrix(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.good_partner_matrix, list) + self.assertEqual(len(rs.good_partner_matrix), rs.num_players) + self.assertEqual(rs.good_partner_matrix, self.expected_good_partner_matrix) + + def test_good_partner_rating(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.good_partner_rating, list) + self.assertEqual(len(rs.good_partner_rating), rs.num_players) + self.assertEqual(rs.good_partner_rating, self.expected_good_partner_rating) + + def test_eigenjesus_rating(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.eigenjesus_rating, list) + self.assertEqual(len(rs.eigenjesus_rating), rs.num_players) + for j, rate in enumerate(rs.eigenjesus_rating): + self.assertAlmostEqual(rate, self.expected_eigenjesus_rating[j]) + + def test_eigenmoses_rating(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.eigenmoses_rating, list) + self.assertEqual(len(rs.eigenmoses_rating), rs.num_players) + for j, rate in enumerate(rs.eigenmoses_rating): + self.assertAlmostEqual(rate, self.expected_eigenmoses_rating[j]) + + def test_self_interaction_for_random_strategies(self): + # Based on https://github.com/Axelrod-Python/Axelrod/issues/670 + # Note that the conclusion of #670 is incorrect and only includes one of + # the copies of the strategy. + axl.seed(0) + players = [s() for s in axl.demo_strategies] + tournament = axl.IpdTournament(players, repetitions=2, turns=5) + results = tournament.play(progress_bar=False) + self.assertEqual(results.payoff_diffs_means[-1][-1], 0.0) + + def test_equality(self): + rs_sets = [ + axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + for _ in range(2) + ] + self.assertEqual(rs_sets[0], rs_sets[1]) + + players = [s() for s in axl.demo_strategies] + tournament = axl.IpdTournament(players, repetitions=2, turns=5) + results = tournament.play(progress_bar=False) + self.assertNotEqual(results, rs_sets[0]) + + def test_summarise(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + sd = rs.summarise() + + self.assertEqual(len(sd), len(rs.players)) + self.assertEqual([str(player.Name) for player in sd], rs.ranked_names) + self.assertEqual( + [int(player.Rank) for player in sd], list(range(len(self.players))) + ) + + ranked_median_scores = [ + list(map(nanmedian, rs.normalised_scores))[i] for i in rs.ranking + ] + self.assertEqual( + [float(player.Median_score) for player in sd], ranked_median_scores + ) + + ranked_cooperation_rating = [rs.cooperating_rating[i] for i in rs.ranking] + self.assertEqual( + [float(player.Cooperation_rating) for player in sd], + ranked_cooperation_rating, + ) + + ranked_median_wins = [nanmedian(rs.wins[i]) for i in rs.ranking] + self.assertEqual([float(player.Wins) for player in sd], ranked_median_wins) + + ranked_initial_coop_rates = [ + self.expected_initial_cooperation_rate[i] for i in rs.ranking + ] + self.assertEqual( + [float(player.Initial_C_rate) for player in sd], ranked_initial_coop_rates + ) + + for player in sd: + self.assertEqual( + player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 1 + ) + for rate in [ + player.CC_to_C_rate, + player.CD_to_C_rate, + player.DC_to_C_rate, + player.DD_to_C_rate, + ]: + self.assertLessEqual(rate, 1) + self.assertGreaterEqual(rate, 0) + + # When converting Action to Enum, test coverage gap exposed from example in + # docs/tutorial/getting_started/summarising_tournaments.rst + def test_summarise_regression_test(self): + players = [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Grudger(), + ] + tournament = axl.IpdTournament(players, turns=10, repetitions=3) + results = tournament.play() + + summary = [ + ( + 0, + "Defector", + 2.6000000000000001, + 0.0, + 3.0, + 0.0, + 0.0, + 0.0, + 0.4000000000000001, + 0.6, + 0, + 0, + 0, + 0, + ), + ( + 1, + "Tit For Tat", + 2.3000000000000003, + 0.7, + 0.0, + 1.0, + 0.6666666666666666, + 0.03333333333333333, + 0.0, + 0.3, + 1.0, + 0, + 0, + 0, + ), + ( + 2, + "Grudger", + 2.3000000000000003, + 0.7, + 0.0, + 1.0, + 0.6666666666666666, + 0.03333333333333333, + 0.0, + 0.3, + 1.0, + 0, + 0, + 0, + ), + ( + 3, + "Cooperator", + 2.0, + 1.0, + 0.0, + 1.0, + 0.6666666666666666, + 0.3333333333333333, + 0.0, + 0.0, + 1.0, + 1.0, + 0, + 0, + ), + ] + for outer_index, player in enumerate(results.summarise()): + for inner_index, value in enumerate(player): + if isinstance(value, str): + self.assertEqual(value, summary[outer_index][inner_index]) + else: + self.assertAlmostEqual( + value, summary[outer_index][inner_index], places=3 + ) + + def test_write_summary(self): + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + rs.write_summary(filename=self.filename + ".summary") + with open(self.filename + ".summary", "r") as csvfile: + ranked_names = [] + csvreader = csv.reader(csvfile) + for row in csvreader: + ranked_names.append(row[1]) + self.assertEqual(len(row), 14) + self.assertEqual(ranked_names[0], "Name") + self.assertEqual(ranked_names[1:], rs.ranked_names) + + +class TestDecorator(unittest.TestCase): + def test_update_progress_bar(self): + method = lambda x: None + self.assertEqual(axl.ipd.result_set.update_progress_bar(method)(1), None) + + +class TestResultSetSpatialStructure(TestResultSet): + """ + Specific test for some spatial tournament. + """ + + @classmethod + def setUpClass(cls): + + path = pathlib.Path("test_outputs/test_results_spatial.csv") + cls.filename = str(axl_filename(path)) + cls.players = [axl.Alternator(), axl.TitForTat(), axl.Defector()] + cls.turns = 5 + cls.edges = [(0, 1), (0, 2)] + + cls.expected_match_lengths = [ + [[0, 5, 5], [5, 0, 0], [5, 0, 0]] for _ in range(3) + ] + + cls.expected_scores = [[15, 15, 15], [13, 13, 13], [17, 17, 17]] + + cls.expected_wins = [[0, 0, 0], [0, 0, 0], [1, 1, 1]] + + cls.expected_normalised_scores = [ + [3 / 2 for _ in range(3)], + [(13 / 5) for _ in range(3)], + [(17 / 5) for _ in range(3)], + ] + + cls.expected_ranking = [2, 1, 0] + + cls.expected_ranked_names = ["Defector", "Tit For Tat", "Alternator"] + + cls.expected_null_results_matrix = [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + [[0, 0, 0], [0, 0, 0], [0, 0, 0]], + ] + + cls.expected_payoffs = [ + [[], [13 / 5 for _ in range(3)], [2 / 5 for _ in range(3)]], + [[13 / 5 for _ in range(3)], [], []], + [[17 / 5 for _ in range(3)], [], []], + ] + + cls.expected_score_diffs = [ + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-3.0, -3.0, -3.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[3.0, 3.0, 3.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + ] + + cls.expected_payoff_diffs_means = [ + [0.0, 0.0, -3.0], + [0.0, 0.0, 0.0], + [3.0, 0.0, 0.0], + ] + + # Recalculating to deal with numeric imprecision + cls.expected_payoff_matrix = [ + [0, mean([13 / 5 for _ in range(3)]), mean([2 / 5 for _ in range(3)])], + [mean([13 / 5 for _ in range(3)]), 0, 0], + [mean([17 / 5 for _ in range(3)]), 0, 0], + ] + + cls.expected_payoff_stddevs = [ + [0, std([13 / 5 for _ in range(3)]), std([2 / 5 for _ in range(3)])], + [std([13 / 5 for _ in range(3)]), 0, 0], + [std([17 / 5 for _ in range(3)]), 0, 0], + ] + + cls.expected_cooperation = [[0, 9, 9], [9, 0, 0], [0, 0, 0]] + + cls.expected_normalised_cooperation = [ + [0, mean([3 / 5 for _ in range(3)]), mean([3 / 5 for _ in range(3)])], + [mean([3 / 5 for _ in range(3)]), 0, 0], + [0, 0, 0], + ] + + cls.expected_initial_cooperation_count = [6, 3, 0] + cls.expected_initial_cooperation_rate = [1, 1, 0] + + cls.expected_vengeful_cooperation = [ + [2 * element - 1 for element in row] + for row in cls.expected_normalised_cooperation + ] + + cls.expected_cooperating_rating = [18 / 30, 9 / 15, 0] + + cls.expected_good_partner_matrix = [[0, 3, 3], [3, 0, 0], [0, 0, 0]] + + cls.expected_good_partner_rating = [1.0, 1.0, 0.0] + + cls.expected_eigenjesus_rating = [0.447213595499958, 0.894427190999916, 0.0] + + cls.expected_eigenmoses_rating = [ + -0.32929277996907086, + 0.7683498199278325, + 0.5488212999484519, + ] + + cls.expected_state_distribution = [ + [ + Counter(), + Counter({(C, C): 3, (C, D): 6, (D, C): 6}), + Counter({(C, D): 9, (D, D): 6}), + ], + [Counter({(C, C): 3, (C, D): 6, (D, C): 6}), Counter(), Counter()], + [Counter({(D, C): 9, (D, D): 6}), Counter(), Counter()], + ] + + cls.expected_normalised_state_distribution = [ + [ + Counter(), + Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), + Counter({(C, D): 0.6, (D, D): 0.4}), + ], + [Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), Counter(), Counter()], + [Counter({(D, C): 0.6, (D, D): 0.4}), Counter(), Counter()], + ] + + cls.expected_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), + Counter({((C, D), D): 6, ((D, D), C): 6}), + ], + [ + Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), + Counter(), + Counter(), + ], + [Counter({((D, C), D): 6, ((D, D), D): 6}), Counter(), Counter()], + ] + + cls.expected_normalised_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 1.0, ((C, D), D): 1.0, ((D, C), C): 1.0}), + Counter({((C, D), D): 1.0, ((D, D), C): 1.0}), + ], + [ + Counter({((C, C), C): 1.0, ((D, C), C): 1.0, ((C, D), D): 1.0}), + Counter(), + Counter(), + ], + [Counter({((D, C), D): 1.0, ((D, D), D): 1.0}), Counter(), Counter()], + ] + + def test_match_lengths(self): + """ + Overwriting match lengths test. This method, among other things, checks + that if two players interacted the length of that interaction equals the + number of turns. + + Implementing this for the round robin tournament meant checking the + interactions between each strategy and the rest strategies of the + tournament. + + In a spatial tournament we need to check that: The length of interaction + of players-nodes that are end vertices of an edge is equal to the + number of turns. Otherwise it is 0. + """ + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + self.assertIsInstance(rs.match_lengths, list) + self.assertEqual(len(rs.match_lengths), rs.repetitions) + self.assertEqual(rs.match_lengths, self.expected_match_lengths) + + for rep in rs.match_lengths: + self.assertIsInstance(rep, list) + self.assertEqual(len(rep), len(self.players)) + + for i, opp in enumerate(rep): + self.assertIsInstance(opp, list) + self.assertEqual(len(opp), len(self.players)) + + for j, length in enumerate(opp): + edge = (i, j) + # Specific test for example match setup + if edge in self.edges or edge[::-1] in self.edges: + self.assertEqual(length, self.turns) + else: + self.assertEqual(length, 0) + + +class TestResultSetSpatialStructureTwo(TestResultSetSpatialStructure): + @classmethod + def setUpClass(cls): + + path = pathlib.Path("test_outputs/test_results_spatial_two.csv") + cls.filename = str(axl_filename(path)) + cls.players = [ + axl.Alternator(), + axl.TitForTat(), + axl.Defector(), + axl.Cooperator(), + ] + cls.turns = 5 + cls.edges = [(0, 1), (2, 3)] + + cls.expected_match_lengths = [ + [[0, 5, 0, 0], [5, 0, 0, 0], [0, 0, 0, 5], [0, 0, 5, 0]] for _ in range(3) + ] + + cls.expected_scores = [ + [13.0 for _ in range(3)], + [13.0 for _ in range(3)], + [25.0 for _ in range(3)], + [0 for _ in range(3)], + ] + + cls.expected_wins = [[0, 0, 0], [0, 0, 0], [1, 1, 1], [0, 0, 0]] + + cls.expected_normalised_scores = [ + [(13 / 5) for _ in range(3)], + [(13 / 5) for _ in range(3)], + [(25 / 5) for _ in range(3)], + [0 for _ in range(3)], + ] + + cls.expected_ranking = [2, 0, 1, 3] + + cls.expected_ranked_names = [ + "Defector", + "Alternator", + "Tit For Tat", + "Cooperator", + ] + + cls.expected_null_results_matrix = [ + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + ] + + cls.expected_payoffs = [ + [[], [13 / 5 for _ in range(3)], [], []], + [[13 / 5 for _ in range(3)], [], [], []], + [[], [], [], [25 / 5 for _ in range(3)]], + [[], [], [0 for _ in range(3)], []], + ] + + cls.expected_score_diffs = [ + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 5.0, 5.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [-5.0, -5.0, -5.0], [0.0, 0.0, 0.0]], + ] + + cls.expected_payoff_diffs_means = [ + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 5.0], + [0.0, 0.0, -5.0, 0.0], + ] + + # Recalculating to deal with numeric imprecision + cls.expected_payoff_matrix = [ + [0, mean([13 / 5 for _ in range(3)]), 0, 0], + [mean([13 / 5 for _ in range(3)]), 0, 0, 0], + [0, 0, 0, mean([25 / 5 for _ in range(3)])], + [0, 0, 0, 0], + ] + + cls.expected_payoff_stddevs = [ + [0, std([13 / 5 for _ in range(3)]), 0, 0], + [std([13 / 5 for _ in range(3)]), 0, 0, 0], + [0, 0, 0, std([25 / 5 for _ in range(3)])], + [0, 0, 0, 0], + ] + + cls.expected_cooperation = [ + [0, 9, 0, 0], + [9, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 15, 0], + ] + + cls.expected_normalised_cooperation = [ + [0.0, mean([3 / 5 for _ in range(3)]), 0.0, 0.0], + [mean([3 / 5 for _ in range(3)]), 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, mean([5 / 5 for _ in range(3)]), 0.0], + ] + + cls.expected_initial_cooperation_count = [3.0, 3.0, 0, 3.0] + cls.expected_initial_cooperation_rate = [1.0, 1.0, 0, 1.0] + + cls.expected_vengeful_cooperation = [ + [2 * element - 1 for element in row] + for row in cls.expected_normalised_cooperation + ] + + cls.expected_cooperating_rating = [18 / 30, 18 / 30, 0.0, 30 / 30] + + cls.expected_good_partner_matrix = [ + [0, 3, 0, 0], + [3, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 3, 0], + ] + + cls.expected_good_partner_rating = [1.0, 1.0, 0.0, 1.0] + + cls.expected_eigenjesus_rating = [ + 0.7071067811865476, + 0.7071067811865476, + 0.0, + 0.0, + ] + + cls.expected_eigenmoses_rating = [ + 0.48505781033492573, + 0.48505781033492573, + 0.7090603855860735, + 0.1633132292825755, + ] + + cls.expected_state_distribution = [ + [ + Counter(), + Counter({(C, C): 3, (C, D): 6, (D, C): 6}), + Counter(), + Counter(), + ], + [ + Counter({(C, C): 3, (C, D): 6, (D, C): 6}), + Counter(), + Counter(), + Counter(), + ], + [Counter(), Counter(), Counter(), Counter({(D, C): 15})], + [Counter(), Counter(), Counter({(C, D): 15}), Counter()], + ] + + cls.expected_normalised_state_distribution = [ + [ + Counter(), + Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), + Counter(), + Counter(), + ], + [ + Counter({(C, C): 0.2, (C, D): 0.4, (D, C): 0.4}), + Counter(), + Counter(), + Counter(), + ], + [Counter(), Counter(), Counter(), Counter({(D, C): 1.0})], + [Counter(), Counter(), Counter({(C, D): 1.0}), Counter()], + ] + + cls.expected_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 3, ((C, D), D): 3, ((D, C), C): 6}), + Counter(), + Counter(), + ], + [ + Counter({((C, C), C): 3, ((D, C), C): 3, ((C, D), D): 6}), + Counter(), + Counter(), + Counter(), + ], + [Counter(), Counter(), Counter(), Counter({((D, C), D): 12})], + [Counter(), Counter(), Counter({((C, D), C): 12}), Counter()], + ] + + cls.expected_normalised_state_to_action_distribution = [ + [ + Counter(), + Counter({((C, C), D): 1.0, ((C, D), D): 1.0, ((D, C), C): 1.0}), + Counter(), + Counter(), + ], + [ + Counter({((C, C), C): 1.0, ((D, C), C): 1.0, ((C, D), D): 1.0}), + Counter(), + Counter(), + Counter(), + ], + [Counter(), Counter(), Counter(), Counter({((D, C), D): 1.0})], + [Counter(), Counter(), Counter({((C, D), C): 1.0}), Counter()], + ] + + +class TestResultSetSpatialStructureThree(TestResultSetSpatialStructure): + @classmethod + def setUpClass(cls): + + path = pathlib.Path("test_outputs/test_results_spatial_three.csv") + cls.filename = str(axl_filename(path)) + cls.players = [ + axl.Alternator(), + axl.TitForTat(), + axl.Defector(), + axl.Cooperator(), + ] + cls.turns = 5 + cls.edges = [(0, 0), (1, 1), (2, 2), (3, 3)] + + cls.expected_match_lengths = [ + [[5, 0, 0, 0], [0, 5, 0, 0], [0, 0, 5, 0], [0, 0, 0, 5]] for _ in range(3) + ] + + cls.expected_scores = [[0 for _ in range(3)] for _ in range(4)] + + cls.expected_wins = [[0 for _ in range(3)] for _ in range(4)] + + cls.expected_normalised_scores = [[0 for _ in range(3)] for i in range(4)] + + cls.expected_ranking = [0, 1, 2, 3] + + cls.expected_ranked_names = [ + "Alternator", + "Tit For Tat", + "Defector", + "Cooperator", + ] + + cls.expected_null_results_matrix = [ + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + ] + + cls.expected_payoffs = [ + [[11 / 5 for _ in range(3)], [], [], []], + [[], [15 / 5 for _ in range(3)], [], []], + [[], [], [5 / 5 for _ in range(3)], []], + [[], [], [], [15 / 5 for _ in range(3)]], + ] + + cls.expected_score_diffs = [ + [[0.0 for _ in range(3)] for _ in range(4)] for _ in range(4) + ] + + cls.expected_payoff_diffs_means = [[0.0 for _ in range(4)] for _ in range(4)] + + # Recalculating to deal with numeric imprecision + cls.expected_payoff_matrix = [ + [mean([11 / 5 for _ in range(3)]), 0, 0, 0], + [0, mean([15 / 5 for _ in range(3)]), 0, 0], + [0, 0, mean([5 / 5 for _ in range(3)]), 0], + [0, 0, 0, mean([15 / 5 for _ in range(3)])], + ] + + cls.expected_payoff_stddevs = [ + [std([11 / 5 for _ in range(3)]), 0, 0, 0], + [0, std([15 / 5 for _ in range(3)]), 0, 0], + [0, 0, std([5 / 5 for _ in range(3)]), 0], + [0, 0, 0, std([15 / 5 for _ in range(3)])], + ] + + cls.expected_cooperation = [ + [9.0, 0, 0, 0], + [0, 15.0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 15.0], + ] + + cls.expected_normalised_cooperation = [ + [mean([3 / 5 for _ in range(3)]), 0.0, 0.0, 0.0], + [0.0, mean([5 / 5 for _ in range(3)]), 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, mean([5 / 5 for _ in range(3)])], + ] + + cls.expected_initial_cooperation_count = [0, 0, 0, 0] + cls.expected_initial_cooperation_rate = [0, 0, 0, 0] + + cls.expected_vengeful_cooperation = [ + [2 * element - 1 for element in row] + for row in cls.expected_normalised_cooperation + ] + + cls.expected_cooperating_rating = [0.0 for _ in range(4)] + + cls.expected_good_partner_matrix = [[0.0 for _ in range(4)] for _ in range(4)] + + cls.expected_good_partner_rating = [0.0 for _ in range(4)] + + cls.expected_eigenjesus_rating = [ + 0.0009235301367282831, + 0.7071064796379986, + 0.0, + 0.7071064796379986, + ] + + cls.expected_eigenmoses_rating = [ + 0.4765940316018446, + 0.3985944056208427, + 0.6746133178770147, + 0.3985944056208427, + ] + + cls.expected_state_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + ] + + cls.expected_normalised_state_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + ] + + cls.expected_state_to_action_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + ] + + cls.expected_normalised_state_to_action_distribution = [ + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + [Counter(), Counter(), Counter(), Counter()], + ] + + def test_equality(self): + """Overwriting for this particular case""" + pass + + def test_summarise(self): + """Overwriting for this particular case""" + rs = axl.ResultSet( + self.filename, self.players, self.repetitions, progress_bar=False + ) + sd = rs.summarise() + + for player in sd: + self.assertEqual(player.CC_rate, 0) + self.assertEqual(player.CD_rate, 0) + self.assertEqual(player.DC_rate, 0) + self.assertEqual(player.DD_rate, 0) + + +class TestSummary(unittest.TestCase): + """Separate test to check that summary always builds without failures""" + + @given( + tournament=tournaments(min_size=2, max_size=5, max_turns=5, max_repetitions=3) + ) + @settings(max_examples=5) + def test_summarise_without_failure(self, tournament): + results = tournament.play(progress_bar=False) + sd = results.summarise() + self.assertIsInstance(sd, list) + + for player in sd: + # round for numerical error + total_rate = round( + player.CC_rate + player.CD_rate + player.DC_rate + player.DD_rate, 3 + ) + self.assertTrue(total_rate in [0, 1]) + self.assertTrue(0 <= player.Initial_C_rate <= 1) + + +class TestCreateCounterDict(unittest.TestCase): + """Separate test for a helper function""" + + def test_basic_use(self): + key_map = {"Col 1": "Var 1", "Col 2": "Var 2"} + df = pd.DataFrame( + {"Col 1": [10, 20, 30], "Col 2": [1, 2, 0]}, index=[[5, 6, 7], [1, 2, 3]] + ) + self.assertEqual( + create_counter_dict(df, 6, 2, key_map), Counter({"Var 1": 20, "Var 2": 2}) + ) + self.assertEqual(create_counter_dict(df, 7, 3, key_map), Counter({"Var 1": 30})) diff --git a/axelrod/tests/unit/test_strategy_transformers.py b/axelrod/tests/unit/test_strategy_transformers.py new file mode 100644 index 000000000..dac8b66ac --- /dev/null +++ b/axelrod/tests/unit/test_strategy_transformers.py @@ -0,0 +1,714 @@ +import unittest + +import axelrod as axl +from axelrod.strategy_transformers import * +from axelrod.tests.strategies.test_cooperator import TestCooperator +from axelrod.tests.strategies.test_titfortat import TestTitForTat + +C, D = axl.Action.C, axl.Action.D + + +@FlipTransformer(name_prefix=None) +class CanPickle(axl.Cooperator): + pass + + +@FlipTransformer() +class CanNotPickle(axl.Cooperator): + pass + + +class TestTransformers(unittest.TestCase): + def test_player_can_be_pickled(self): + player = axl.Cooperator() + self.assertTrue(player_can_be_pickled(player)) + + player = IdentityTransformer()(axl.Cooperator)() + self.assertFalse(player_can_be_pickled(player)) + + player = CanPickle() + self.assertTrue(player_can_be_pickled(player)) + + player = CanNotPickle() + self.assertFalse(player_can_be_pickled(player)) + + def test_is_strategy_static(self): + self.assertTrue(is_strategy_static(axl.Cooperator)) + self.assertFalse(is_strategy_static(axl.Alternator)) + + def test_is_strategy_static_with_inherited_strategy(self): + class NewCooperator(axl.Cooperator): + pass + + class NewAlternator(axl.Alternator): + pass + + self.assertTrue(is_strategy_static(NewCooperator)) + self.assertFalse(is_strategy_static(NewAlternator)) + + def test_DecoratorReBuilder(self): + new_prefix = "YOLO" + decorator = NoisyTransformer(0.2, name_prefix=new_prefix) + + factory_args = (noisy_wrapper, "Noisy", noisy_reclassifier) + args = decorator.args + kwargs = decorator.kwargs.copy() + + new_decorator = DecoratorReBuilder()(factory_args, args, kwargs, new_prefix) + + self.assertEqual(decorator(axl.Cooperator)(), new_decorator(axl.Cooperator)()) + + def test_StrategyReBuilder_declared_class_with_name_prefix(self): + player = CanNotPickle() + self.assertEqual(player.__class__.__name__, "FlippedCanNotPickle") + + decorators = [player.decorator] + import_name = "CanNotPickle" + module_name = player.__module__ + + new_player = StrategyReBuilder()(decorators, import_name, module_name) + + update_dict = player.__dict__.copy() + + new_player.__dict__.update(update_dict) + self.assertEqual(player, new_player) + + def test_StrategyReBuilder_dynamically_wrapped_class_with_name_prefix(self): + player = FlipTransformer()(axl.Cooperator)() + self.assertEqual(player.__class__.__name__, "FlippedCooperator") + + decorators = [player.decorator] + import_name = "Cooperator" + module_name = player.__module__ + + new_player = StrategyReBuilder()(decorators, import_name, module_name) + + update_dict = player.__dict__.copy() + + new_player.__dict__.update(update_dict) + self.assertEqual(player, new_player) + + def test_StrategyReBuilder_dynamically_wrapped_class_no_name_prefix(self): + player = IdentityTransformer()(axl.Cooperator)() + self.assertEqual(player.__class__.__name__, "Cooperator") + + decorators = [player.decorator] + import_name = "Cooperator" + module_name = player.__module__ + + new_player = StrategyReBuilder()(decorators, import_name, module_name) + + update_dict = player.__dict__.copy() + + new_player.__dict__.update(update_dict) + self.assertEqual(player, new_player) + + def test_StrategyReBuilder_many_decorators(self): + decorator_1 = IdentityTransformer() + decorator_2 = FlipTransformer() + decorator_3 = DualTransformer() + player = decorator_3(decorator_2(decorator_1(axl.Cooperator)))() + self.assertEqual(player.__class__.__name__, "DualFlippedCooperator") + + decorators = [decorator_1, decorator_2, decorator_3] + import_name = "Cooperator" + module_name = player.__module__ + + new_player = StrategyReBuilder()(decorators, import_name, module_name) + + update_dict = player.__dict__.copy() + + new_player.__dict__.update(update_dict) + self.assertEqual(player, new_player) + + def test_all_strategies(self): + # Attempt to transform each strategy to ensure that implementation + # choices (like use of super) do not cause issues + for s in axl.strategies: + opponent = axl.Cooperator() + player = IdentityTransformer()(s)() + player.play(opponent) + + def test_naming(self): + """Tests that the player and class names are properly modified.""" + cls = FlipTransformer()(axl.Cooperator) + p1 = cls() + self.assertEqual(cls.__name__, "FlippedCooperator") + self.assertEqual(p1.name, "Flipped Cooperator") + + cls = ForgiverTransformer(0.5)(axl.Alternator) + p1 = cls() + self.assertEqual(cls.__name__, "ForgivingAlternator") + self.assertEqual(p1.name, "Forgiving Alternator") + + cls = ForgiverTransformer(0.5, name_prefix="")(axl.Alternator) + p1 = cls() + self.assertEqual(cls.__name__, "Alternator") + self.assertEqual(p1.name, "Alternator") + + def test_repr(self): + """Tests that the player __repr__ is properly modified to add + Transformer's parameters. + """ + self.assertEqual( + str(ForgiverTransformer(0.5)(axl.Alternator)()), + "Forgiving Alternator: 0.5", + ) + self.assertEqual( + str(InitialTransformer([D, D, C])(axl.Alternator)()), + "Initial Alternator: [D, D, C]", + ) + self.assertEqual(str(FlipTransformer()(axl.Random)(0.1)), "Flipped Random: 0.1") + self.assertEqual( + str(MixedTransformer(0.3, (axl.Alternator, axl.Bully))(axl.Random)(0.1)), + "Mutated Random: 0.1: 0.3, ['Alternator', 'Bully']", + ) + + def test_doc(self): + """Test that the original docstring is present""" + player = axl.Alternator() + transformer = InitialTransformer([D, D, C])(axl.Alternator)() + self.assertEqual(player.__doc__, transformer.__doc__) + + def test_cloning(self): + """Tests that IpdPlayer.clone preserves the application of transformations. + """ + p1 = axl.Cooperator() + p2 = FlipTransformer()(axl.Cooperator)() # Defector + p3 = p2.clone() + match = axl.IpdMatch((p1, p3), turns=2) + results = match.play() + self.assertEqual(results, [(C, D), (C, D)]) + + def test_generic(self): + """Test that the generic wrapper does nothing.""" + # This is the identity transformer + transformer = StrategyTransformerFactory(generic_strategy_wrapper)() + Cooperator2 = transformer(axl.Cooperator) + p1 = Cooperator2() + p2 = axl.Cooperator() + match = axl.IpdMatch((p1, p2), turns=2) + results = match.play() + self.assertEqual(results, [(C, C), (C, C)]) + + def test_flip_transformer(self): + """Tests that FlipTransformer(Cooperator) == Defector.""" + p1 = axl.Cooperator() + p2 = FlipTransformer()(axl.Cooperator)() # Defector + match = axl.IpdMatch((p1, p2), turns=3) + results = match.play() + self.assertEqual(results, [(C, D), (C, D), (C, D)]) + + def test_dual_transformer_with_all_strategies(self): + """Tests that DualTransformer produces the opposite results when faced + with the same opponent history. + """ + for s in axl.short_run_time_strategies: + self.assert_dual_wrapper_correct(s) + + def test_dual_jossann_regression_test(self): + player_class = JossAnnTransformer((0.2, 0.3))(axl.Alternator) + self.assert_dual_wrapper_correct(player_class) + + player_class = JossAnnTransformer((0.5, 0.4))(axl.EvolvedLookerUp2_2_2) + self.assert_dual_wrapper_correct(player_class) + + def test_dual_transformer_simple_play_regression_test(self): + """DualTransformer has failed when there were multiple DualTransformers. + It has also failed when DualTransformer was not the outermost + transformer or when other transformers were between multiple + DualTransformers.""" + multiple_dual_transformers = DualTransformer()( + FlipTransformer()(DualTransformer()(axl.Cooperator)) + )() + + dual_transformer_not_first = IdentityTransformer()( + DualTransformer()(axl.Cooperator) + )() + + for _ in range(3): + multiple_dual_transformers.play(dual_transformer_not_first) + + self.assertEqual(multiple_dual_transformers.history, [D, D, D]) + self.assertEqual(dual_transformer_not_first.history, [D, D, D]) + + def test_dual_transformer_multiple_interspersed_regression_test(self): + """DualTransformer has failed when there were multiple DualTransformers. + It has also failed when DualTransformer was not the outermost + transformer or when other transformers were between multiple + DualTransformers.""" + dual_not_first_transformer = IdentityTransformer()( + DualTransformer()(axl.EvolvedANN) + ) + self.assert_dual_wrapper_correct(dual_not_first_transformer) + + multiple_dual_transformers = DualTransformer()( + DualTransformer()(axl.WinStayLoseShift) + ) + self.assert_dual_wrapper_correct(multiple_dual_transformers) + + def assert_dual_wrapper_correct(self, player_class): + turns = 100 + + p1 = player_class() + p2 = DualTransformer()(player_class)() + p3 = axl.CyclerCCD() # Cycles 'CCD' + + axl.seed(0) + for _ in range(turns): + p1.play(p3) + + p3.reset() + + axl.seed(0) + for _ in range(turns): + p2.play(p3) + + self.assertEqual(p1.history, [x.flip() for x in p2.history]) + + def test_jossann_transformer(self): + """Tests the JossAnn transformer. + """ + probability = (1, 0) + p1 = JossAnnTransformer(probability)(axl.Defector)() + self.assertFalse(axl.Classifiers["stochastic"](p1)) + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, C, C, C]) + + probability = (0, 1) + p1 = JossAnnTransformer(probability)(axl.Cooperator)() + self.assertFalse(axl.Classifiers["stochastic"](p1)) + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, D, D, D, D]) + + probability = (0.3, 0.3) + p1 = JossAnnTransformer(probability)(axl.TitForTat)() + self.assertTrue(axl.Classifiers["stochastic"](p1)) + + p2 = axl.Cycler() + axl.seed(0) + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, C, C, D, D]) + + probability = (0.6, 0.6) + p1 = JossAnnTransformer(probability)(axl.Cooperator)() + self.assertTrue(axl.Classifiers["stochastic"](p1)) + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, C, D, D, C]) + + probability = (0, 1) + p1 = JossAnnTransformer(probability)(axl.Random) + self.assertFalse(axl.Classifiers["stochastic"](p1())) + + probability = (1, 0) + p1 = JossAnnTransformer(probability)(axl.Random) + self.assertFalse(axl.Classifiers["stochastic"](p1())) + + probability = (0.5, 0.5) + p1 = JossAnnTransformer(probability)(axl.TitForTat) + self.assertTrue(axl.Classifiers["stochastic"](p1())) + + probability = (0, 0.5) + p1 = JossAnnTransformer(probability)(axl.TitForTat) + self.assertTrue(axl.Classifiers["stochastic"](p1())) + + probability = (0, 0) + p1 = JossAnnTransformer(probability)(axl.TitForTat) + self.assertFalse(axl.Classifiers["stochastic"](p1())) + + probability = (0, 0) + p1 = JossAnnTransformer(probability)(axl.Random) + self.assertTrue(axl.Classifiers["stochastic"](p1())) + + def test_noisy_transformer(self): + """Tests that the noisy transformed does flip some moves.""" + random.seed(5) + # Cooperator to Defector + p1 = axl.Cooperator() + p2 = NoisyTransformer(0.5)(axl.Cooperator)() + self.assertTrue(axl.Classifiers["stochastic"](p2)) + for _ in range(10): + p1.play(p2) + self.assertEqual(p2.history, [C, C, C, C, C, C, D, D, C, C]) + + p2 = NoisyTransformer(0)(axl.Cooperator) + self.assertFalse(axl.Classifiers["stochastic"](p2())) + + p2 = NoisyTransformer(1)(axl.Cooperator) + self.assertFalse(axl.Classifiers["stochastic"](p2())) + + p2 = NoisyTransformer(0.3)(axl.Cooperator) + self.assertTrue(axl.Classifiers["stochastic"](p2())) + + p2 = NoisyTransformer(0)(axl.Random) + self.assertTrue(axl.Classifiers["stochastic"](p2())) + + p2 = NoisyTransformer(1)(axl.Random) + self.assertTrue(axl.Classifiers["stochastic"](p2())) + + def test_forgiving(self): + """Tests that the forgiving transformer flips some defections.""" + random.seed(10) + p1 = ForgiverTransformer(0.5)(axl.Alternator)() + self.assertTrue(axl.Classifiers["stochastic"](p1)) + p2 = axl.Defector() + for _ in range(10): + p1.play(p2) + self.assertEqual(p1.history, [C, D, C, C, D, C, C, D, C, D]) + + p1 = ForgiverTransformer(0)(axl.Alternator)() + self.assertFalse(axl.Classifiers["stochastic"](p1)) + + p1 = ForgiverTransformer(1)(axl.Alternator)() + self.assertFalse(axl.Classifiers["stochastic"](p1)) + + def test_initial_transformer(self): + """Tests the InitialTransformer.""" + p1 = axl.Cooperator() + self.assertEqual(axl.Classifiers["memory_depth"](p1), 0) + p2 = InitialTransformer([D, D])(axl.Cooperator)() + self.assertEqual(axl.Classifiers["memory_depth"](p2), 2) + for _ in range(5): + p1.play(p2) + self.assertEqual(p2.history, [D, D, C, C, C]) + + p1 = axl.Cooperator() + p2 = InitialTransformer([D, D, C, D])(axl.Cooperator)() + for _ in range(5): + p1.play(p2) + self.assertEqual(p2.history, [D, D, C, D, C]) + + p3 = InitialTransformer([D, D])(axl.Adaptive)() + self.assertEqual(axl.Classifiers["memory_depth"](p3), float("inf")) + + def test_final_transformer(self): + """Tests the FinalTransformer when tournament length is known.""" + # Final play transformer + p1 = axl.Cooperator() + p2 = FinalTransformer([D, D, D])(axl.Cooperator)() + self.assertEqual(axl.Classifiers["makes_use_of"](p2), set(["length"])) + self.assertEqual(axl.Classifiers["memory_depth"](p2), 3) + self.assertEqual(axl.Classifiers["makes_use_of"](axl.Cooperator()), set([])) + + p2.match_attributes["length"] = 6 + for _ in range(8): + p1.play(p2) + self.assertEqual(p2.history, [C, C, C, D, D, D, C, C]) + + p3 = FinalTransformer([D, D])(axl.Adaptive)() + self.assertEqual(axl.Classifiers["memory_depth"](p3), float("inf")) + + def test_final_transformer2(self): + """Tests the FinalTransformer when tournament length is not known.""" + p1 = axl.Cooperator() + p2 = FinalTransformer([D, D])(axl.Cooperator)() + for _ in range(6): + p1.play(p2) + self.assertEqual(p2.history, [C, C, C, C, C, C]) + + def test_history_track(self): + """Tests the history tracking transformer.""" + p1 = axl.Cooperator() + p2 = TrackHistoryTransformer()(axl.Random)() + for _ in range(6): + p1.play(p2) + self.assertEqual(p2.history, p2._recorded_history) + + def test_composition(self): + """Tests that transformations can be chained or composed.""" + cls1 = InitialTransformer([D, D])(axl.Cooperator) + cls2 = FinalTransformer([D, D])(cls1) + p1 = cls2() + p2 = axl.Cooperator() + p1.match_attributes["length"] = 8 + for _ in range(8): + p1.play(p2) + self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) + + cls1 = FinalTransformer([D, D])(InitialTransformer([D, D])(axl.Cooperator)) + p1 = cls1() + p2 = axl.Cooperator() + p1.match_attributes["length"] = 8 + for _ in range(8): + p1.play(p2) + self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) + + def test_compose_transformers(self): + cls1 = compose_transformers( + FinalTransformer([D, D]), InitialTransformer([D, D]) + ) + p1 = cls1(axl.Cooperator)() + p2 = axl.Cooperator() + p1.match_attributes["length"] = 8 + for _ in range(8): + p1.play(p2) + self.assertEqual(p1.history, [D, D, C, C, C, C, D, D]) + + def test_retailiation(self): + """Tests the RetaliateTransformer.""" + p1 = RetaliationTransformer(1)(axl.Cooperator)() + p2 = axl.Defector() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, D, D, D, D]) + self.assertEqual(p2.history, [D, D, D, D, D]) + + p1 = RetaliationTransformer(1)(axl.Cooperator)() + p2 = axl.Alternator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, D, C, D]) + self.assertEqual(p2.history, [C, D, C, D, C]) + + TwoTitsForTat = RetaliationTransformer(2)(axl.Cooperator) + p1 = TwoTitsForTat() + p2 = axl.CyclerCCD() + for _ in range(9): + p1.play(p2) + self.assertEqual(p1.history, [C, C, C, D, D, C, D, D, C]) + self.assertEqual(p2.history, [C, C, D, C, C, D, C, C, D]) + + def test_retaliation_until_apology(self): + """Tests the RetaliateUntilApologyTransformer.""" + TFT = RetaliateUntilApologyTransformer()(axl.Cooperator) + p1 = TFT() + p2 = axl.Cooperator() + p1.play(p2) + p1.play(p2) + self.assertEqual(p1.history, [C, C]) + + p1 = TFT() + p2 = axl.Defector() + p1.play(p2) + p1.play(p2) + self.assertEqual(p1.history, [C, D]) + + random.seed(12) + p1 = TFT() + p2 = axl.Random() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, D, D, C]) + + def test_apology(self): + """Tests the ApologyTransformer.""" + ApologizingDefector = ApologyTransformer([D], [C])(axl.Defector) + p1 = ApologizingDefector() + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, C, D, C, D]) + ApologizingDefector = ApologyTransformer([D, D], [C, C])(axl.Defector) + p1 = ApologizingDefector() + p2 = axl.Cooperator() + for _ in range(6): + p1.play(p2) + self.assertEqual(p1.history, [D, D, C, D, D, C]) + + def test_mixed(self): + """Tests the MixedTransformer.""" + probability = 1 + MD = MixedTransformer(probability, axl.Cooperator)(axl.Defector) + self.assertFalse(axl.Classifiers["stochastic"](MD())) + + p1 = MD() + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, C, C, C]) + + probability = 0 + MD = MixedTransformer(probability, axl.Cooperator)(axl.Defector) + self.assertFalse(axl.Classifiers["stochastic"](MD())) + + p1 = MD() + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, D, D, D, D]) + + # Decorating with list and distribution + # Decorate a cooperator putting all weight on other strategies that are + # 'nice' + probability = [0.3, 0.2, 0] + strategies = [axl.TitForTat, axl.Grudger, axl.Defector] + MD = MixedTransformer(probability, strategies)(axl.Cooperator) + self.assertTrue(axl.Classifiers["stochastic"](MD())) + + p1 = MD() + # Against a cooperator we see that we only cooperate + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, C, C, C]) + + # Decorate a cooperator putting all weight on Defector + probability = (0, 0, 1) # Note can also pass tuple + strategies = [axl.TitForTat, axl.Grudger, axl.Defector] + MD = MixedTransformer(probability, strategies)(axl.Cooperator) + self.assertFalse(axl.Classifiers["stochastic"](MD())) + + p1 = MD() + # Against a cooperator we see that we only defect + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [D, D, D, D, D]) + + def test_deadlock(self): + """Test the DeadlockBreakingTransformer.""" + # We can induce a deadlock by alterting TFT to defect first + p1 = axl.TitForTat() + p2 = InitialTransformer([D])(axl.TitForTat)() + for _ in range(4): + p1.play(p2) + self.assertEqual(p1.history, [C, D, C, D]) + self.assertEqual(p2.history, [D, C, D, C]) + + # Now let's use the transformer to break the deadlock to achieve + # Mutual cooperation + p1 = axl.TitForTat() + p2 = DeadlockBreakingTransformer()(InitialTransformer([D])(axl.TitForTat))() + for _ in range(4): + p1.play(p2) + self.assertEqual(p1.history, [C, D, C, C]) + self.assertEqual(p2.history, [D, C, C, C]) + + def test_grudging(self): + """Test the GrudgeTransformer.""" + p1 = axl.Defector() + p2 = GrudgeTransformer(1)(axl.Cooperator)() + for _ in range(4): + p1.play(p2) + self.assertEqual(p1.history, [D, D, D, D]) + self.assertEqual(p2.history, [C, C, D, D]) + + p1 = InitialTransformer([C])(axl.Defector)() + p2 = GrudgeTransformer(2)(axl.Cooperator)() + for _ in range(8): + p1.play(p2) + self.assertEqual(p1.history, [C, D, D, D, D, D, D, D]) + self.assertEqual(p2.history, [C, C, C, C, D, D, D, D]) + + def test_nice(self): + """Tests the NiceTransformer.""" + p1 = NiceTransformer()(axl.Defector)() + p2 = axl.Defector() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, D, D, D, D]) + self.assertEqual(p2.history, [D, D, D, D, D]) + + p1 = NiceTransformer()(axl.Defector)() + p2 = axl.Alternator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, D, D, D]) + self.assertEqual(p2.history, [C, D, C, D, C]) + + p1 = NiceTransformer()(axl.Defector)() + p2 = axl.Cooperator() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, C, C, C, C]) + self.assertEqual(p2.history, [C, C, C, C, C]) + + def test_nilpotency(self): + """Show that some of the transformers are (sometimes) nilpotent, i.e. + that transformer(transformer(PlayerClass)) == PlayerClass""" + for transformer in [ + IdentityTransformer(), + FlipTransformer(), + TrackHistoryTransformer(), + ]: + for PlayerClass in [axl.Cooperator, axl.Defector]: + for third_player in [axl.Cooperator(), axl.Defector()]: + player = PlayerClass() + transformed = transformer(transformer(PlayerClass))() + clone = third_player.clone() + for i in range(5): + player.play(third_player) + transformed.play(clone) + self.assertEqual(player.history, transformed.history) + + def test_idempotency(self): + """Show that these transformers are idempotent, i.e. that + transformer(transformer(PlayerClass)) == transformer(PlayerClass). + That means that the transformer is a projection on the set of + strategies.""" + for transformer in [ + IdentityTransformer(), + GrudgeTransformer(1), + FinalTransformer([C]), + FinalTransformer([D]), + InitialTransformer([C]), + InitialTransformer([D]), + DeadlockBreakingTransformer(), + RetaliationTransformer(1), + RetaliateUntilApologyTransformer(), + TrackHistoryTransformer(), + ApologyTransformer([D], [C]), + ]: + for PlayerClass in [axl.Cooperator, axl.Defector]: + for third_player in [axl.Cooperator(), axl.Defector()]: + clone = third_player.clone() + player = transformer(PlayerClass)() + transformed = transformer(transformer(PlayerClass))() + for i in range(5): + player.play(third_player) + transformed.play(clone) + self.assertEqual(player.history, transformed.history) + + def test_implementation(self): + """A test that demonstrates the difference in outcomes if + FlipTransformer is applied to Alternator and CyclerCD. In other words, + the implementation matters, not just the outcomes.""" + # Difference between Alternator and CyclerCD + p1 = axl.Cycler(cycle="CD") + p2 = FlipTransformer()(axl.Cycler)(cycle="CD") + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, D, C, D, C]) + self.assertEqual(p2.history, [D, C, D, C, D]) + + p1 = axl.Alternator() + p2 = FlipTransformer()(axl.Alternator)() + for _ in range(5): + p1.play(p2) + self.assertEqual(p1.history, [C, D, C, D, C]) + self.assertEqual(p2.history, [D, D, D, D, D]) + + +TFT = RetaliateUntilApologyTransformer()(axl.Cooperator) + + +class TestRUAisTFT(TestTitForTat): + # This runs the 7 TFT tests when unittest is invoked + player = TFT + name = "RUA Cooperator" + expected_classifier = { + "memory_depth": 0, # really 1 + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + +# Test that FlipTransformer(Defector) == Cooperator +Cooperator2 = FlipTransformer()(axl.Defector) + + +class TestFlipDefector(TestCooperator): + # This runs the 7 TFT tests when unittest is invoked + name = "Flipped Defector" + player = Cooperator2 diff --git a/axelrod/tests/unit/test_strategy_utils.py b/axelrod/tests/unit/test_strategy_utils.py new file mode 100644 index 000000000..e93710a70 --- /dev/null +++ b/axelrod/tests/unit/test_strategy_utils.py @@ -0,0 +1,144 @@ +"""Tests for the strategy utils.""" + +import unittest + +import axelrod as axl +from axelrod._strategy_utils import ( + detect_cycle, + inspect_strategy, + look_ahead, + recursive_thue_morse, + simulate_match, + thue_morse_generator, +) + +from hypothesis import given, settings +from hypothesis.strategies import integers, lists, sampled_from + +C, D = axl.Action.C, axl.Action.D + + +class TestDetectCycle(unittest.TestCase): + @given( + cycle=lists(sampled_from([C, D]), min_size=2, max_size=10), + period=integers(min_value=3, max_value=10), + ) + @settings(max_examples=5) + def test_finds_cycle(self, cycle, period): + history = cycle * period + detected = detect_cycle(history) + self.assertIsNotNone(detected) + self.assertIn("".join(map(str, detected)), "".join(map(str, (cycle)))) + + def test_no_cycle(self): + history = [C, D, C, C] + self.assertIsNone(detect_cycle(history)) + + history = [D, D, C, C, C] + self.assertIsNone(detect_cycle(history)) + + def test_regression_test_can_detect_cycle_that_is_repeated_exactly_once(self): + self.assertEqual(detect_cycle([C, D, C, D]), (C, D)) + self.assertEqual(detect_cycle([C, D, C, D, C]), (C, D)) + + def test_cycle_will_be_at_least_min_size(self): + self.assertEqual(detect_cycle([C, C, C, C], min_size=1), (C,)) + self.assertEqual(detect_cycle([C, C, C, C], min_size=2), (C, C)) + + def test_cycle_that_never_fully_repeats_returns_none(self): + cycle = [C, D, D] + to_test = cycle + cycle[:-1] + self.assertIsNone(detect_cycle(to_test)) + + def test_min_size_greater_than_two_times_history_tail_returns_none(self): + self.assertIsNone(detect_cycle([C, C, C], min_size=2)) + + def test_min_size_greater_than_two_times_max_size_has_no_effect(self): + self.assertEqual( + detect_cycle([C, C, C, C, C, C, C, C], min_size=2, max_size=3), (C, C) + ) + + def test_cycle_greater_than_max_size_returns_none(self): + self.assertEqual(detect_cycle([C, C, D] * 2, min_size=1, max_size=3), (C, C, D)) + self.assertIsNone(detect_cycle([C, C, D] * 2, min_size=1, max_size=2)) + + +class TestInspectStrategy(unittest.TestCase): + def test_strategies_without_countermeasures_return_their_strategy(self): + tft = axl.TitForTat() + inspector = axl.Alternator() + + tft.play(inspector) + self.assertEqual(tft.history, [C]) + self.assertEqual(inspect_strategy(inspector=inspector, opponent=tft), C) + tft.play(inspector) + self.assertEqual(tft.history, [C, C]) + self.assertEqual(inspect_strategy(inspector=inspector, opponent=tft), D) + self.assertEqual(tft.strategy(inspector), D) + + def test_strategies_with_countermeasures_return_their_countermeasures(self): + d_geller = axl.GellerDefector() + inspector = axl.Cooperator() + d_geller.play(inspector) + + self.assertEqual(inspect_strategy(inspector=inspector, opponent=d_geller), D) + self.assertEqual(d_geller.strategy(inspector), C) + + +class TestSimulateMatch(unittest.TestCase): + def test_tft_reacts_to_cooperation(self): + tft = axl.TitForTat() + inspector = axl.Alternator() + + simulate_match(inspector, tft, C, 5) + self.assertEqual(inspector.history, [C, C, C, C, C]) + self.assertEqual(tft.history, [C, C, C, C, C]) + + def test_tft_reacts_to_defection(self): + tft = axl.TitForTat() + inspector = axl.Alternator() + + simulate_match(inspector, tft, D, 5) + self.assertEqual(inspector.history, [D, D, D, D, D]) + self.assertEqual(tft.history, [C, D, D, D, D]) + + +class TestLookAhead(unittest.TestCase): + def setUp(self): + self.inspector = axl.IpdPlayer() + self.game = axl.IpdGame() + + def test_cooperator(self): + tft = axl.Cooperator() + # It always makes sense to defect here. + self.assertEqual(look_ahead(self.inspector, tft, self.game, 1), D) + self.assertEqual(look_ahead(self.inspector, tft, self.game, 2), D) + self.assertEqual(look_ahead(self.inspector, tft, self.game, 5), D) + + def test_tit_for_tat(self): + tft = axl.TitForTat() + # Cooperation should be chosen if we look ahead further than one move. + self.assertEqual(look_ahead(self.inspector, tft, self.game, 1), D) + self.assertEqual(look_ahead(self.inspector, tft, self.game, 2), C) + self.assertEqual(look_ahead(self.inspector, tft, self.game, 5), C) + + +class TestRecursiveThueMorse(unittest.TestCase): + def test_initial_values(self): + self.assertEqual(recursive_thue_morse(0), 0) + self.assertEqual(recursive_thue_morse(1), 1) + self.assertEqual(recursive_thue_morse(2), 1) + self.assertEqual(recursive_thue_morse(3), 0) + self.assertEqual(recursive_thue_morse(4), 1) + + +class TestThueMorseGenerator(unittest.TestCase): + def test_initial_values(self): + generator = thue_morse_generator() + values = [next(generator) for i in range(5)] + self.assertEqual(values, [0, 1, 1, 0, 1]) + + def test_with_offset(self): + generator = thue_morse_generator(start=2) + values = [next(generator) for i in range(5)] + self.assertEqual(values, [1, 0, 1, 0, 0]) diff --git a/axelrod/tests/unit/test_tournament.py b/axelrod/tests/unit/test_tournament.py new file mode 100644 index 000000000..a3d50db24 --- /dev/null +++ b/axelrod/tests/unit/test_tournament.py @@ -0,0 +1,1070 @@ +"""Tests for the main tournament class.""" +import unittest +from unittest.mock import MagicMock, patch + +import io +import logging +import os +import pathlib +import pickle +import warnings +from multiprocessing import Queue, cpu_count + +from axelrod.load_data_ import axl_filename +import numpy as np +import pandas as pd +from tqdm import tqdm + +import axelrod as axl +from axelrod.tests.property import ( + prob_end_tournaments, + spatial_tournaments, + strategy_lists, + tournaments, +) +from axelrod.tournament import _close_objects + +from hypothesis import example, given, settings +from hypothesis.strategies import floats, integers + +C, D = axl.Action.C, axl.Action.D + +test_strategies = [ + axl.Cooperator, + axl.TitForTat, + axl.Defector, + axl.Grudger, + axl.GoByMajority, +] +test_repetitions = 5 +test_turns = 100 + +test_prob_end = 0.5 + +test_edges = [(0, 1), (1, 2), (3, 4)] + +deterministic_strategies = [ + s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) +] + + +class RecordedTQDM(tqdm): + """This is a tqdm.tqdm that keeps a record of every RecordedTQDM created. + It is used to test that progress bars were correctly created and then + closed.""" + + record = [] + + def __init__(self, *args, **kwargs): + super(RecordedTQDM, self).__init__(*args, **kwargs) + RecordedTQDM.record.append(self) + + @classmethod + def reset_record(cls): + cls.record = [] + + +class TestTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + + cls.expected_payoff = [ + [600, 600, 0, 600, 600], + [600, 600, 199, 600, 600], + [1000, 204, 200, 204, 204], + [600, 600, 199, 600, 600], + [600, 600, 199, 600, 600], + ] + + cls.expected_cooperation = [ + [200, 200, 200, 200, 200], + [200, 200, 1, 200, 200], + [0, 0, 0, 0, 0], + [200, 200, 1, 200, 200], + [200, 200, 1, 200, 200], + ] + + path = pathlib.Path("test_outputs/test_tournament.csv") + cls.filename = axl_filename(path) + + def setUp(self): + self.test_tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=1, + ) + + def test_init(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + noise=0.2, + ) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertIsInstance(tournament.players[0].match_attributes["game"], axl.IpdGame) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, self.test_turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.IpdTournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + def test_init_with_match_attributes(self): + tournament = axl.IpdTournament( + players=self.players, match_attributes={"length": float("inf")} + ) + mg = tournament.match_generator + match_params = mg.build_single_match_params() + self.assertEqual(match_params["match_attributes"], {"length": float("inf")}) + + def test_warning(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=10, + repetitions=1, + ) + with warnings.catch_warnings(record=True) as w: + # Check that a warning is raised if no results set is built and no + # filename is given + results = tournament.play(build_results=False, progress_bar=False) + self.assertEqual(len(w), 1) + + with warnings.catch_warnings(record=True) as w: + # Check that no warning is raised if no results set is built and a + # is filename given + + tournament.play( + build_results=False, filename=self.filename, progress_bar=False + ) + self.assertEqual(len(w), 0) + + def test_setup_output_with_filename(self): + + self.test_tournament.setup_output(self.filename) + + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + def test_setup_output_no_filename(self): + self.test_tournament.setup_output() + + self.assertIsInstance(self.test_tournament.filename, str) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertFalse(hasattr(self.test_tournament, "interactions_dict")) + + os.close(self.test_tournament._temp_file_descriptor) + os.remove(self.test_tournament.filename) + + def test_play_resets_num_interactions(self): + self.assertEqual(self.test_tournament.num_interactions, 0) + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + self.test_tournament.play(progress_bar=False) + self.assertEqual(self.test_tournament.num_interactions, 15) + + def test_play_changes_use_progress_bar(self): + self.assertTrue(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=False) + self.assertFalse(self.test_tournament.use_progress_bar) + + self.test_tournament.play(progress_bar=True) + self.assertTrue(self.test_tournament.use_progress_bar) + + def test_play_changes_temp_file_descriptor(self): + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # No file descriptor for a named file. + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + + # Temp file creates file descriptor. + self.test_tournament.play(filename=None, progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + + def test_play_tempfile_removed(self): + self.test_tournament.play(filename=None, progress_bar=False) + + self.assertFalse(os.path.isfile(self.test_tournament.filename)) + + def test_play_resets_filename_and_temp_file_descriptor_each_time(self): + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + old_filename = self.test_tournament.filename + + self.test_tournament.play(filename=self.filename, progress_bar=False) + self.assertIsNone(self.test_tournament._temp_file_descriptor) + self.assertEqual(self.test_tournament.filename, self.filename) + self.assertNotEqual(old_filename, self.test_tournament.filename) + + self.test_tournament.play(progress_bar=False) + self.assertIsInstance(self.test_tournament._temp_file_descriptor, int) + self.assertIsInstance(self.test_tournament.filename, str) + self.assertNotEqual(old_filename, self.test_tournament.filename) + self.assertNotEqual(self.test_tournament.filename, self.filename) + + def test_get_file_objects_no_filename(self): + file, writer = self.test_tournament._get_file_objects() + self.assertIsNone(file) + self.assertIsNone(writer) + + def test_get_file_object_with_filename(self): + self.test_tournament.filename = self.filename + file_object, writer = self.test_tournament._get_file_objects() + self.assertIsInstance(file_object, io.TextIOWrapper) + self.assertEqual(writer.__class__.__name__, "writer") + file_object.close() + + def test_get_progress_bar(self): + self.test_tournament.use_progress_bar = False + pbar = self.test_tournament._get_progress_bar() + self.assertIsNone(pbar) + + self.test_tournament.use_progress_bar = True + pbar = self.test_tournament._get_progress_bar() + self.assertIsInstance(pbar, tqdm) + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, self.test_tournament.match_generator.size) + + new_edges = [(0, 1), (1, 2), (2, 3), (3, 4)] + new_tournament = axl.IpdTournament(players=self.players, edges=new_edges) + new_tournament.use_progress_bar = True + pbar = new_tournament._get_progress_bar() + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.n, 0) + self.assertEqual(pbar.total, len(new_edges)) + + def test_serial_play(self): + # Test that we get an instance of ResultSet + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + # Test that _run_serial_repetitions is called with empty matches list + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertEqual(tournament.num_interactions, 75) + + def test_serial_play_with_different_game(self): + # Test that a non default game is passed to the result set + game = axl.IpdGame(p=-1, r=-1, s=-1, t=-1) + tournament = axl.IpdTournament( + name=self.test_name, players=self.players, game=game, turns=1, repetitions=1 + ) + results = tournament.play(progress_bar=False) + self.assertLessEqual(np.max(results.scores), 0) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_no_progress_bar_play(self): + """Test that progress bar is not created for progress_bar=False""" + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # Test with build results + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + # Check that no progress bar was created. + self.assertEqual(RecordedTQDM.record, []) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=False, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(RecordedTQDM.record, []) + + def assert_play_pbar_correct_total_and_finished(self, pbar, total): + self.assertEqual(pbar.desc, "Playing matches") + self.assertEqual(pbar.total, total) + self.assertEqual(pbar.n, total) + self.assertTrue(pbar.disable, True) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play(self): + """Test that progress bar is created by default and with True argument""" + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + RecordedTQDM.reset_record() + results = tournament.play() + self.assertIsInstance(results, axl.ResultSet) + # Check that progress bar was created, updated and closed. + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + # Check all progress bars are closed. + self.assertTrue(all(pbar.disable for pbar in RecordedTQDM.record)) + + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # Test without build results + RecordedTQDM.reset_record() + results = tournament.play( + progress_bar=True, build_results=False, filename=self.filename + ) + self.assertIsNone(results) + self.assertEqual(len(RecordedTQDM.record), 1) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @patch("tqdm.tqdm", RecordedTQDM) + def test_progress_bar_play_parallel(self): + """Test that tournament plays when asking for progress bar for parallel + tournament and that progress bar is created.""" + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + # progress_bar = False + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=False, processes=2) + self.assertEqual(RecordedTQDM.record, []) + self.assertIsInstance(results, axl.ResultSet) + + # progress_bar = True + RecordedTQDM.reset_record() + results = tournament.play(progress_bar=True, processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + # progress_bar is default + RecordedTQDM.reset_record() + results = tournament.play(processes=2) + self.assertIsInstance(results, axl.ResultSet) + + self.assertEqual(len(RecordedTQDM.record), 2) + play_pbar = RecordedTQDM.record[0] + self.assert_play_pbar_correct_total_and_finished(play_pbar, total=15) + + @given( + tournament=tournaments( + min_size=2, + max_size=5, + min_turns=2, + max_turns=5, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=50) + @example( + tournament=axl.IpdTournament( + players=[s() for s in test_strategies], + turns=test_turns, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.IpdTournament( + players=[axl.BackStabber(), axl.MindReader()], turns=2, repetitions=1, + ) + ) + @example( + tournament=axl.IpdTournament( + players=[axl.BackStabber(), axl.ThueMorse()], turns=2, repetitions=1 + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + def test_parallel_play(self): + # Test that we get an instance of ResultSet + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(processes=2, progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + # The following relates to #516 + players = [ + axl.Cooperator(), + axl.Defector(), + axl.BackStabber(), + axl.PSOGambler2_2_2(), + axl.ThueMorse(), + axl.DoubleCrosser(), + ] + tournament = axl.IpdTournament( + name=self.test_name, + players=players, + game=self.game, + turns=20, + repetitions=self.test_repetitions, + ) + scores = tournament.play(processes=2, progress_bar=False).scores + self.assertEqual(len(scores), len(players)) + + def test_parallel_play_with_writing_to_file(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + results = tournament.play( + processes=2, progress_bar=False, filename=self.filename + ) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(tournament.num_interactions, 75) + + def test_run_serial(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + self.assertTrue(tournament._run_serial()) + + # Get the calls made to write_interactions + calls = tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_run_parallel(self): + class PickleableMock(MagicMock): + def __reduce__(self): + return MagicMock, () + + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + tournament._write_interactions_to_file = PickleableMock( + name="_write_interactions_to_file" + ) + + # For test coverage purposes. This confirms PickleableMock can be + # pickled exactly once. Windows multi-processing must pickle this Mock + # exactly once during testing. + pickled = pickle.loads(pickle.dumps(tournament)) + self.assertIsInstance(pickled._write_interactions_to_file, MagicMock) + self.assertRaises(pickle.PicklingError, pickle.dumps, pickled) + + self.assertTrue(tournament._run_parallel()) + + # Get the calls made to write_interactions + calls = tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_n_workers(self): + max_processes = cpu_count() + + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual(tournament._n_workers(processes=1), max_processes) + + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual( + tournament._n_workers(processes=max_processes + 2), max_processes + ) + + @unittest.skipIf(cpu_count() < 2, "not supported on single processor machines") + def test_2_workers(self): + # This is a separate test with a skip condition because we + # cannot guarantee that the tests will always run on a machine + # with more than one processor + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + self.assertEqual(tournament._n_workers(processes=2), 2) + + def test_start_workers(self): + workers = 2 + work_queue = Queue() + done_queue = Queue() + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + chunks = tournament.match_generator.build_match_chunks() + for chunk in chunks: + work_queue.put(chunk) + tournament._start_workers(workers, work_queue, done_queue) + + stops = 0 + while stops < workers: + payoffs = done_queue.get() + if payoffs == "STOP": + stops += 1 + self.assertEqual(stops, workers) + + def test_worker(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + work_queue = Queue() + chunks = tournament.match_generator.build_match_chunks() + count = 0 + for chunk in chunks: + work_queue.put(chunk) + count += 1 + work_queue.put("STOP") + + done_queue = Queue() + tournament._worker(work_queue, done_queue) + for r in range(count): + new_matches = done_queue.get() + for index_pair, matches in new_matches.items(): + self.assertIsInstance(index_pair, tuple) + self.assertEqual(len(matches), self.test_repetitions) + queue_stop = done_queue.get() + self.assertEqual(queue_stop, "STOP") + + def test_build_result_set(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + def test_no_build_result_set(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=axl.DEFAULT_TURNS, + repetitions=self.test_repetitions, + ) + + tournament._calculate_results = MagicMock(name="_calculate_results") + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = tournament._calculate_results.call_args_list + self.assertEqual(len(calls), 0) + + @given(turns=integers(min_value=1, max_value=200)) + @settings(max_examples=5) + @example(turns=3) + @example(turns=axl.DEFAULT_TURNS) + def test_play_matches(self, turns): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + repetitions=self.test_repetitions, + ) + + def make_chunk_generator(): + for player1_index in range(len(self.players)): + for player2_index in range(player1_index, len(self.players)): + index_pair = (player1_index, player2_index) + match_params = {"turns": turns, "game": self.game} + yield (index_pair, match_params, self.test_repetitions) + + chunk_generator = make_chunk_generator() + interactions = {} + for chunk in chunk_generator: + result = tournament._play_matches(chunk) + for index_pair, inters in result.items(): + try: + interactions[index_pair].append(inters) + except KeyError: + interactions[index_pair] = [inters] + + self.assertEqual(len(interactions), 15) + + for index_pair, inter in interactions.items(): + self.assertEqual(len(index_pair), 2) + for plays in inter: + # Check that have the expected number of repetitions + self.assertEqual(len(plays), self.test_repetitions) + for repetition in plays: + actions, results = repetition + self.assertEqual(len(actions), turns) + self.assertEqual(len(results), 10) + + # Check that matches no longer exist + self.assertEqual((len(list(chunk_generator))), 0) + + def test_match_cache_is_used(self): + """ + Create two Random players that are classified as deterministic. + As they are deterministic the cache will be used. + """ + FakeRandom = axl.Random + FakeRandom.classifier["stochastic"] = False + p1 = FakeRandom() + p2 = FakeRandom() + tournament = axl.IpdTournament((p1, p2), turns=5, repetitions=2) + results = tournament.play(progress_bar=False) + for player_scores in results.scores: + self.assertEqual(player_scores[0], player_scores[1]) + + def test_write_interactions(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament._write_interactions_to_file = MagicMock( + name="_write_interactions_to_file" + ) + # Mocking this as it is called by play + self.assertIsNone( + tournament.play( + filename=self.filename, progress_bar=False, build_results=False + ) + ) + + # Get the calls made to write_interactions + calls = tournament._write_interactions_to_file.call_args_list + self.assertEqual(len(calls), 15) + + def test_write_to_csv_with_results(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play(filename=self.filename, progress_bar=False) + df = pd.read_csv(self.filename) + path = pathlib.Path("test_outputs/expected_test_tournament.csv") + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + def test_write_to_csv_without_results(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=2, + repetitions=2, + ) + tournament.play(filename=self.filename, progress_bar=False, build_results=False) + df = pd.read_csv(self.filename) + path = pathlib.Path("test_outputs/expected_test_tournament_no_results.csv") + expected_df = pd.read_csv(axl_filename(path)) + self.assertTrue(df.equals(expected_df)) + + +class TestProbEndTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + + def test_init(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + anonymous_tournament = axl.IpdTournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + tournament=prob_end_tournaments( + min_size=2, + max_size=5, + min_prob_end=0.1, + max_prob_end=0.9, + min_repetitions=2, + max_repetitions=4, + ) + ) + @settings(max_examples=5) + @example( + tournament=axl.IpdTournament( + players=[s() for s in test_strategies], + prob_end=0.2, + repetitions=test_repetitions, + ) + ) + # These two examples are to make sure #465 is fixed. + # As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465, + # these two examples were identified by hypothesis. + @example( + tournament=axl.IpdTournament( + players=[axl.BackStabber(), axl.MindReader()], prob_end=0.2, repetitions=1, + ) + ) + @example( + tournament=axl.IpdTournament( + players=[axl.ThueMorse(), axl.MindReader()], prob_end=0.2, repetitions=1, + ) + ) + def test_property_serial_play(self, tournament): + """Test serial play using hypothesis""" + # Test that we get an instance of ResultSet + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + self.assertEqual(results.num_players, len(tournament.players)) + self.assertEqual(results.players, [str(p) for p in tournament.players]) + + +class TestSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_turns = test_turns + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=self.test_turns, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertEqual(tournament.turns, 100) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + anonymous_tournament = axl.IpdTournament(players=self.players) + self.assertEqual(anonymous_tournament.name, "axelrod") + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + turns=integers(min_value=1, max_value=20), + repetitions=integers(min_value=1, max_value=5), + noise=floats(min_value=0, max_value=1), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament(self, strategies, turns, repetitions, noise, seed): + """ + A test to check that a spatial tournament on the complete multigraph + gives the same results as the round robin. + """ + + players = [s() for s in strategies] + # edges + edges = [] + for i in range(0, len(players)): + for j in range(i, len(players)): + edges.append((i, j)) + + # create a round robin tournament + tournament = axl.IpdTournament( + players, repetitions=repetitions, turns=turns, noise=noise + ) + # create a complete spatial tournament + spatial_tournament = axl.IpdTournament( + players, repetitions=repetitions, turns=turns, noise=noise, edges=edges + ) + + axl.seed(seed) + results = tournament.play(progress_bar=False) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.num_players, spatial_results.num_players) + self.assertEqual(results.repetitions, spatial_results.repetitions) + self.assertEqual(results.payoff_diffs_means, spatial_results.payoff_diffs_means) + self.assertEqual(results.payoff_matrix, spatial_results.payoff_matrix) + self.assertEqual(results.payoff_stddevs, spatial_results.payoff_stddevs) + self.assertEqual(results.payoffs, spatial_results.payoffs) + self.assertEqual(results.cooperating_rating, spatial_results.cooperating_rating) + self.assertEqual(results.cooperation, spatial_results.cooperation) + self.assertEqual( + results.normalised_cooperation, spatial_results.normalised_cooperation + ) + self.assertEqual(results.normalised_scores, spatial_results.normalised_scores) + self.assertEqual( + results.good_partner_matrix, spatial_results.good_partner_matrix + ) + self.assertEqual( + results.good_partner_rating, spatial_results.good_partner_rating + ) + + def test_particular_tournament(self): + """A test for a tournament that has caused failures during some bug + fixing""" + players = [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Grudger(), + ] + edges = [(0, 2), (0, 3), (1, 2), (1, 3)] + tournament = axl.IpdTournament(players, edges=edges) + results = tournament.play(progress_bar=False) + expected_ranked_names = ["Cooperator", "Tit For Tat", "Grudger", "Defector"] + self.assertEqual(results.ranked_names, expected_ranked_names) + + # Check that this tournament runs with noise + tournament = axl.IpdTournament(players, edges=edges, noise=0.5) + results = tournament.play(progress_bar=False) + self.assertIsInstance(results, axl.ResultSet) + + +class TestProbEndingSpatialTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + cls.players = [s() for s in test_strategies] + cls.test_name = "test" + cls.test_repetitions = test_repetitions + cls.test_prob_end = test_prob_end + cls.test_edges = test_edges + + def test_init(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + prob_end=self.test_prob_end, + edges=self.test_edges, + noise=0.2, + ) + self.assertEqual(tournament.match_generator.edges, tournament.edges) + self.assertEqual(len(tournament.players), len(test_strategies)) + self.assertEqual(tournament.game.score((C, C)), (3, 3)) + self.assertIsNone(tournament.turns) + self.assertEqual(tournament.repetitions, 10) + self.assertEqual(tournament.name, "test") + self.assertIsInstance(tournament._logger, logging.Logger) + self.assertEqual(tournament.noise, 0.2) + self.assertEqual(tournament.match_generator.noise, 0.2) + self.assertEqual(tournament.prob_end, self.test_prob_end) + + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + prob_end=floats(min_value=0.1, max_value=0.9), + reps=integers(min_value=1, max_value=3), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_complete_tournament(self, strategies, prob_end, seed, reps): + """ + A test to check that a spatial tournament on the complete graph + gives the same results as the round robin. + """ + players = [s() for s in strategies] + + # create a prob end round robin tournament + tournament = axl.IpdTournament(players, prob_end=prob_end, repetitions=reps) + axl.seed(seed) + results = tournament.play(progress_bar=False) + + # create a complete spatial tournament + # edges + edges = [(i, j) for i in range(len(players)) for j in range(i, len(players))] + + spatial_tournament = axl.IpdTournament( + players, prob_end=prob_end, repetitions=reps, edges=edges + ) + axl.seed(seed) + spatial_results = spatial_tournament.play(progress_bar=False) + self.assertEqual(results.match_lengths, spatial_results.match_lengths) + self.assertEqual(results.ranked_names, spatial_results.ranked_names) + self.assertEqual(results.wins, spatial_results.wins) + self.assertEqual(results.scores, spatial_results.scores) + self.assertEqual(results.cooperation, spatial_results.cooperation) + + @given( + tournament=spatial_tournaments( + strategies=axl.basic_strategies, + max_turns=1, + max_noise=0, + max_repetitions=3, + ), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_one_turn_tournament(self, tournament, seed): + """ + Tests that gives same result as the corresponding spatial round robin + spatial tournament + """ + prob_end_tour = axl.IpdTournament( + tournament.players, + prob_end=1, + edges=tournament.edges, + repetitions=tournament.repetitions, + ) + axl.seed(seed) + prob_end_results = prob_end_tour.play(progress_bar=False) + axl.seed(seed) + one_turn_results = tournament.play(progress_bar=False) + self.assertEqual(prob_end_results.scores, one_turn_results.scores) + self.assertEqual(prob_end_results.wins, one_turn_results.wins) + self.assertEqual(prob_end_results.cooperation, one_turn_results.cooperation) + + +class TestHelperFunctions(unittest.TestCase): + def test_close_objects_with_none(self): + self.assertIsNone(_close_objects(None, None)) + + def test_close_objects_with_file_objs(self): + f1 = open("to_delete_1", "w") + f2 = open("to_delete_2", "w") + f2.close() + f2 = open("to_delete_2", "r") + + self.assertFalse(f1.closed) + self.assertFalse(f2.closed) + + _close_objects(f1, f2) + + self.assertTrue(f1.closed) + self.assertTrue(f2.closed) + + os.remove("to_delete_1") + os.remove("to_delete_2") + + def test_close_objects_with_tqdm(self): + pbar_1 = tqdm(range(5)) + pbar_2 = tqdm(total=10, desc="hi", file=io.StringIO()) + + self.assertFalse(pbar_1.disable) + self.assertFalse(pbar_2.disable) + + _close_objects(pbar_1, pbar_2) + + self.assertTrue(pbar_1.disable) + self.assertTrue(pbar_2.disable) + + def test_close_objects_with_different_objects(self): + file = open("to_delete_1", "w") + pbar = tqdm(range(5)) + num = 5 + empty = None + word = "hi" + + _close_objects(file, pbar, num, empty, word) + + self.assertTrue(pbar.disable) + self.assertTrue(file.closed) + + os.remove("to_delete_1") diff --git a/axelrod/tests/unit/test_version.py b/axelrod/tests/unit/test_version.py new file mode 100644 index 000000000..2f2021bcd --- /dev/null +++ b/axelrod/tests/unit/test_version.py @@ -0,0 +1,10 @@ +"""Tests the version number.""" + +import unittest + +import axelrod as axl + + +class TestVersion(unittest.TestCase): + def test_version(self): + self.assertIsInstance(axl.__version__, str) diff --git a/axelrod/tournament.py b/axelrod/tournament.py new file mode 100644 index 000000000..dbe49a2bb --- /dev/null +++ b/axelrod/tournament.py @@ -0,0 +1,513 @@ +import csv +import logging +import os +import warnings +from collections import defaultdict +from multiprocessing import Process, Queue, cpu_count +from tempfile import mkstemp +from typing import List, Optional, Tuple + +import tqdm + +import axelrod.interaction_utils as iu +from axelrod import DEFAULT_TURNS +from axelrod.action import Action, actions_to_str +from axelrod.base_tournament import BaseTournament +from axelrod.player import IpdPlayer +from .game import IpdGame +from .match import IpdMatch +from .match_generator import MatchGenerator +from .result_set import ResultSet + +C, D = Action.C, Action.D + + +class Tournament(): # pragma: no cover + def __init__(self): + raise DeprecationWarning("Please change path from axelrod.tournament.Tournament to axelrod.Tournament") + + +class IpdTournament(BaseTournament): + def __init__( + self, + players: List[IpdPlayer], + name: str = "axelrod", + game: IpdGame = None, + turns: int = None, + prob_end: float = None, + repetitions: int = 10, + noise: float = 0, + edges: List[Tuple] = None, + match_attributes: dict = None, + ) -> None: + """ + Parameters + ---------- + players : list + A list of axelrodPlayer objects + name : string + A name for the tournament + game : axelrod.IpdGame + The game object used to score the tournament + turns : integer + The number of turns per match + prob_end : float + The probability of a given turn ending a match + repetitions : integer + The number of times the round robin should be repeated + noise : float + The probability that a player's intended action should be flipped + prob_end : float + The probability of a given turn ending a match + edges : list + A list of edges between players + match_attributes : dict + Mapping attribute names to values which should be passed to players. + The default is to use the correct values for turns, game and noise + but these can be overridden if desired. + """ + if game is None: + self.game = IpdGame() + else: + self.game = game + self.name = name + self.noise = noise + self.num_interactions = 0 + self.players = players + self.repetitions = repetitions + self.edges = edges + + if turns is None and prob_end is None: + turns = DEFAULT_TURNS + + self.turns = turns + self.prob_end = prob_end + self.match_generator = MatchGenerator( + players=players, + turns=turns, + game=self.game, + repetitions=self.repetitions, + prob_end=prob_end, + noise=self.noise, + edges=edges, + match_attributes=match_attributes, + ) + self._logger = logging.getLogger(__name__) + + self.use_progress_bar = True + self.filename = None # type: Optional[str] + self._temp_file_descriptor = None # type: Optional[int] + + super().__init__( + players, + name, + game, + turns, + prob_end, + repetitions, + noise, + edges, + match_attributes + ) + + def setup_output(self, filename=None): + """assign/create `filename` to `self`. If file should be deleted once + `play` is finished, assign a file descriptor. """ + temp_file_descriptor = None + if filename is None: + temp_file_descriptor, filename = mkstemp() + + self.filename = filename + self._temp_file_descriptor = temp_file_descriptor + + def play( + self, + build_results: bool = True, + filename: str = None, + processes: int = None, + progress_bar: bool = True, + ) -> ResultSet: + """ + Plays the tournament and passes the results to the ResultSet class + + Parameters + ---------- + build_results : bool + whether or not to build a results set + filename : string + name of output file + processes : integer + The number of processes to be used for parallel processing + progress_bar : bool + Whether or not to create a progress bar which will be updated + + Returns + ------- + axelrod.ResultSet + """ + self.num_interactions = 0 + + self.use_progress_bar = progress_bar + + self.setup_output(filename) + + if not build_results and not filename: + warnings.warn( + "IpdTournament results will not be accessible since " + "build_results=False and no filename was supplied." + ) + + if processes is None: + self._run_serial(build_results=build_results) + else: + self._run_parallel(build_results=build_results, processes=processes) + + result_set = None + if build_results: + result_set = ResultSet( + filename=self.filename, + players=[str(p) for p in self.players], + repetitions=self.repetitions, + processes=processes, + progress_bar=progress_bar, + ) + if self._temp_file_descriptor is not None: + assert self.filename is not None + os.close(self._temp_file_descriptor) + os.remove(self.filename) + + return result_set + + def _run_serial(self, build_results: bool = True) -> bool: + """Run all matches in serial.""" + + chunks = self.match_generator.build_match_chunks() + + out_file, writer = self._get_file_objects(build_results) + progress_bar = self._get_progress_bar() + + for chunk in chunks: + results = self._play_matches(chunk, build_results=build_results) + self._write_interactions_to_file(results, writer=writer) + + if self.use_progress_bar: + progress_bar.update(1) + + _close_objects(out_file, progress_bar) + + return True + + def _get_file_objects(self, build_results=True): + """Returns the file object and writer for writing results or + (None, None) if self.filename is None""" + file_obj = None + writer = None + if self.filename is not None: + file_obj = open(self.filename, "w") + writer = csv.writer(file_obj, lineterminator="\n") + + header = [ + "Interaction index", + "Player index", + "Opponent index", + "Repetition", + "Player name", + "Opponent name", + "Actions", + ] + if build_results: + header.extend( + [ + "Score", + "Score difference", + "Turns", + "Score per turn", + "Score difference per turn", + "Win", + "Initial cooperation", + "Cooperation count", + "CC count", + "CD count", + "DC count", + "DD count", + "CC to C count", + "CC to D count", + "CD to C count", + "CD to D count", + "DC to C count", + "DC to D count", + "DD to C count", + "DD to D count", + "Good partner", + ] + ) + + writer.writerow(header) + return file_obj, writer + + def _get_progress_bar(self): + if self.use_progress_bar: + return tqdm.tqdm(total=self.match_generator.size, + desc="Playing matches") + return None + + def _write_interactions_to_file(self, results, writer): + """Write the interactions to csv.""" + for index_pair, interactions in results.items(): + repetition = 0 + for interaction, results in interactions: + + if results is not None: + ( + scores, + score_diffs, + turns, + score_per_turns, + score_diffs_per_turns, + initial_cooperation, + cooperations, + state_distribution, + state_to_action_distributions, + winner_index, + ) = results + for index, player_index in enumerate(index_pair): + opponent_index = index_pair[index - 1] + row = [self.num_interactions, player_index, opponent_index, + repetition, + str(self.players[player_index]), + str(self.players[opponent_index])] + history = actions_to_str([i[index] for i in interaction]) + row.append(history) + + if results is not None: + row.append(scores[index]) + row.append(score_diffs[index]) + row.append(turns) + row.append(score_per_turns[index]) + row.append(score_diffs_per_turns[index]) + row.append(int(winner_index is index)) + row.append(initial_cooperation[index]) + row.append(cooperations[index]) + + states = [(C, C), (C, D), (D, C), (D, D)] + if index == 1: + states = [s[::-1] for s in states] + for state in states: + row.append(state_distribution[state]) + for state in states: + row.append(state_to_action_distributions[index][ + (state, C)]) + row.append(state_to_action_distributions[index][ + (state, D)]) + + row.append( + int(cooperations[index] >= cooperations[index - 1])) + + writer.writerow(row) + repetition += 1 + self.num_interactions += 1 + + def _run_parallel(self, processes: int = 2, + build_results: bool = True) -> bool: + """ + Run all matches in parallel + + Parameters + ---------- + build_results : bool + whether or not to build a results set + processes : int + How many processes to use. + """ + # At first sight, it might seem simpler to use the multiprocessing Pool + # Class rather than Processes and Queues. However, this way is faster. + work_queue = Queue() # type: Queue + done_queue = Queue() # type: Queue + workers = self._n_workers(processes=processes) + + chunks = self.match_generator.build_match_chunks() + for chunk in chunks: + work_queue.put(chunk) + + self._start_workers(workers, work_queue, done_queue, build_results) + self._process_done_queue(workers, done_queue, build_results) + + return True + + def _n_workers(self, processes: int = 2) -> int: + """ + Determines the number of parallel processes to use. + + Returns + ------- + integer + """ + if 2 <= processes <= cpu_count(): + n_workers = processes + else: + n_workers = cpu_count() + return n_workers + + def _start_workers( + self, + workers: int, + work_queue: Queue, + done_queue: Queue, + build_results: bool = True, + ) -> bool: + """ + Initiates the sub-processes to carry out parallel processing. + + Parameters + ---------- + workers : integer + The number of sub-processes to create + work_queue : multiprocessing.Queue + A queue containing an entry for each round robin to be processed + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + for worker in range(workers): + process = Process( + target=self._worker, + args=(work_queue, done_queue, build_results) + ) + work_queue.put("STOP") + process.start() + return True + + def _process_done_queue( + self, workers: int, done_queue: Queue, build_results: bool = True + ): + """ + Retrieves the matches from the parallel sub-processes + + Parameters + ---------- + workers : integer + The number of sub-processes in existence + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + out_file, writer = self._get_file_objects(build_results) + progress_bar = self._get_progress_bar() + + stops = 0 + while stops < workers: + results = done_queue.get() + if results == "STOP": + stops += 1 + else: + self._write_interactions_to_file(results, writer) + + if self.use_progress_bar: + progress_bar.update(1) + + _close_objects(out_file, progress_bar) + return True + + def _worker(self, work_queue: Queue, done_queue: Queue, + build_results: bool = True): + """ + The work for each parallel sub-process to execute. + + Parameters + ---------- + work_queue : multiprocessing.Queue + A queue containing an entry for each round robin to be processed + done_queue : multiprocessing.Queue + A queue containing the output dictionaries from each round robin + build_results : bool + whether or not to build a results set + """ + for chunk in iter(work_queue.get, "STOP"): + interactions = self._play_matches(chunk, build_results) + done_queue.put(interactions) + done_queue.put("STOP") + return True + + def _play_matches(self, chunk, build_results=True): + """ + Play matches in a given chunk. + + Parameters + ---------- + chunk : tuple (index pair, match_parameters, repetitions) + match_parameters are also a tuple: (turns, game, noise) + build_results : bool + whether or not to build a results set + + Returns + ------- + interactions : dictionary + Mapping player index pairs to results of matches: + + (0, 1) -> [(C, D), (D, C),...] + """ + interactions = defaultdict(list) + index_pair, match_params, repetitions = chunk + p1_index, p2_index = index_pair + player1 = self.players[p1_index].clone() + player2 = self.players[p2_index].clone() + match_params["players"] = (player1, player2) + match = IpdMatch(**match_params) + for _ in range(repetitions): + match.play() + + if build_results: + results = self._calculate_results(match.result) + else: + results = None + + interactions[index_pair].append([match.result, results]) + return interactions + + def _calculate_results(self, interactions): + results = [] + + scores = iu.compute_final_score(interactions, self.game) + results.append(scores) + + score_diffs = scores[0] - scores[1], scores[1] - scores[0] + results.append(score_diffs) + + turns = len(interactions) + results.append(turns) + + score_per_turns = iu.compute_final_score_per_turn(interactions, + self.game) + results.append(score_per_turns) + + score_diffs_per_turns = score_diffs[0] / turns, score_diffs[1] / turns + results.append(score_diffs_per_turns) + + initial_coops = tuple( + map(bool, iu.compute_cooperations(interactions[:1]))) + results.append(initial_coops) + + cooperations = iu.compute_cooperations(interactions) + results.append(cooperations) + + state_distribution = iu.compute_state_distribution(interactions) + results.append(state_distribution) + + state_to_action_distributions = iu.compute_state_to_action_distribution( + interactions + ) + results.append(state_to_action_distributions) + + winner_index = iu.compute_winner_index(interactions, self.game) + results.append(winner_index) + + return results + + +def _close_objects(*objs): + """If the objects have a `close` method, closes them.""" + for obj in objs: + if hasattr(obj, "close"): + obj.close() From 6ba07b8f255d3d69553cb4c7ea1cff1df94b8638 Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sat, 25 Apr 2020 23:17:53 -0700 Subject: [PATCH 6/7] Mildly change the docs for new naming. --- docs/tutorials/advanced/games.rst | 4 ++-- .../tutorials/getting_started/summarising_tournaments.rst | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/tutorials/advanced/games.rst b/docs/tutorials/advanced/games.rst index a2112c35a..a1db4329c 100644 --- a/docs/tutorials/advanced/games.rst +++ b/docs/tutorials/advanced/games.rst @@ -5,7 +5,7 @@ As described in :ref:`play_contexts` the default game used for the Prisoner's Dilemma is given by:: >>> import axelrod as axl - >>> pd = axl.game.Game() + >>> pd = axl.Game() >>> pd Axelrod game: (R,P,S,T) = (3, 1, 0, 5) >>> pd.RPST() @@ -27,7 +27,7 @@ These :code:`Game` objects are used to score :ref:`matches `, It is possible to run a matches, tournaments and Moran processes with a different game. For example here is the game of chicken:: - >>> chicken = axl.game.Game(r=0, s=-1, t=1, p=-10) + >>> chicken = axl.Game(r=0, s=-1, t=1, p=-10) >>> chicken Axelrod game: (R,P,S,T) = (0, -10, -1, 1) >>> chicken.RPST() diff --git a/docs/tutorials/getting_started/summarising_tournaments.rst b/docs/tutorials/getting_started/summarising_tournaments.rst index ebfb39b08..1bcdf9c4d 100644 --- a/docs/tutorials/getting_started/summarising_tournaments.rst +++ b/docs/tutorials/getting_started/summarising_tournaments.rst @@ -18,10 +18,10 @@ that summarises the results of the tournament:: >>> summary = results.summarise() >>> import pprint >>> pprint.pprint(summary) - [Player(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, Initial_C_rate=0.0, CC_rate=...), - Player(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), - Player(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), - Player(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, Initial_C_rate=1.0, CC_rate=...)] + [IpdPlayer(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, Initial_C_rate=0.0, CC_rate=...), + IpdPlayer(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), + IpdPlayer(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, CC_rate=...), + IpdPlayer(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, Initial_C_rate=1.0, CC_rate=...)] It is also possible to write this data directly to a csv file using the `write_summary` method:: From 065d16625eb9b3cff6170df48bbf6d3885063f20 Mon Sep 17 00:00:00 2001 From: "T.J. Gaffney" Date: Sat, 25 Apr 2020 23:19:44 -0700 Subject: [PATCH 7/7] Add more files that have been moved back to their namespace. Were previously untracked. --- axelrod/data/all_classifiers.yml | 1986 +++++++++++++++ axelrod/data/ann_weights.csv | 4 + axelrod/data/pso_gambler.csv | 6 + axelrod/strategies/__init__.py | 133 + axelrod/strategies/_filters.py | 201 ++ axelrod/strategies/_strategies.py | 508 ++++ axelrod/strategies/adaptive.py | 55 + axelrod/strategies/adaptor.py | 104 + axelrod/strategies/alternator.py | 33 + axelrod/strategies/ann.py | 350 +++ axelrod/strategies/apavlov.py | 122 + axelrod/strategies/appeaser.py | 38 + axelrod/strategies/averagecopier.py | 61 + axelrod/strategies/axelrod_first.py | 1026 ++++++++ axelrod/strategies/axelrod_second.py | 2131 +++++++++++++++++ axelrod/strategies/backstabber.py | 106 + axelrod/strategies/better_and_better.py | 32 + axelrod/strategies/bush_mosteller.py | 132 + axelrod/strategies/calculator.py | 55 + axelrod/strategies/cooperator.py | 77 + axelrod/strategies/cycler.py | 270 +++ axelrod/strategies/darwin.py | 97 + axelrod/strategies/dbs.py | 441 ++++ axelrod/strategies/defector.py | 61 + axelrod/strategies/doubler.py | 36 + axelrod/strategies/finite_state_machines.py | 1002 ++++++++ axelrod/strategies/forgiver.py | 67 + axelrod/strategies/gambler.py | 235 ++ axelrod/strategies/geller.py | 118 + axelrod/strategies/gobymajority.py | 246 ++ axelrod/strategies/gradualkiller.py | 37 + axelrod/strategies/grudger.py | 319 +++ axelrod/strategies/grumpy.py | 73 + axelrod/strategies/handshake.py | 44 + axelrod/strategies/hmm.py | 389 +++ axelrod/strategies/human.py | 175 ++ axelrod/strategies/hunter.py | 255 ++ axelrod/strategies/inverse.py | 48 + axelrod/strategies/lookerup.py | 580 +++++ axelrod/strategies/mathematicalconstants.py | 79 + axelrod/strategies/memoryone.py | 343 +++ axelrod/strategies/memorytwo.py | 259 ++ axelrod/strategies/meta.py | 682 ++++++ axelrod/strategies/mindcontrol.py | 95 + axelrod/strategies/mindreader.py | 108 + axelrod/strategies/mutual.py | 83 + axelrod/strategies/negation.py | 34 + axelrod/strategies/oncebitten.py | 130 + axelrod/strategies/prober.py | 405 ++++ axelrod/strategies/punisher.py | 183 ++ axelrod/strategies/qlearner.py | 161 ++ axelrod/strategies/rand.py | 46 + axelrod/strategies/resurrection.py | 73 + axelrod/strategies/retaliate.py | 196 ++ axelrod/strategies/revised_downing.py | 75 + axelrod/strategies/selfsteem.py | 53 + axelrod/strategies/sequence_player.py | 111 + axelrod/strategies/shortmem.py | 48 + axelrod/strategies/stalker.py | 78 + axelrod/strategies/titfortat.py | 917 +++++++ axelrod/strategies/verybad.py | 52 + axelrod/strategies/worse_and_worse.py | 126 + axelrod/strategies/zero_determinant.py | 256 ++ axelrod/tests/integration/__init__.py | 0 axelrod/tests/integration/test_filtering.py | 124 + axelrod/tests/integration/test_matches.py | 71 + axelrod/tests/integration/test_names.py | 13 + .../integration/test_sample_tournaments.py | 70 + axelrod/tests/integration/test_tournament.py | 171 ++ axelrod/tests/strategies/__init__.py | 0 axelrod/tests/strategies/test_adaptive.py | 46 + axelrod/tests/strategies/test_adaptor.py | 93 + axelrod/tests/strategies/test_alternator.py | 33 + axelrod/tests/strategies/test_ann.py | 152 ++ axelrod/tests/strategies/test_apavlov.py | 163 ++ axelrod/tests/strategies/test_appeaser.py | 37 + .../tests/strategies/test_averagecopier.py | 178 ++ .../tests/strategies/test_axelrod_first.py | 810 +++++++ .../tests/strategies/test_axelrod_second.py | 2035 ++++++++++++++++ axelrod/tests/strategies/test_backstabber.py | 171 ++ .../strategies/test_better_and_better.py | 94 + .../tests/strategies/test_bush_mosteller.py | 77 + axelrod/tests/strategies/test_calculator.py | 166 ++ axelrod/tests/strategies/test_cooperator.py | 79 + axelrod/tests/strategies/test_cycler.py | 237 ++ axelrod/tests/strategies/test_darwin.py | 105 + axelrod/tests/strategies/test_dbs.py | 283 +++ axelrod/tests/strategies/test_defector.py | 63 + axelrod/tests/strategies/test_doubler.py | 49 + .../tests/strategies/test_evolvable_player.py | 213 ++ .../strategies/test_finite_state_machines.py | 1139 +++++++++ axelrod/tests/strategies/test_forgiver.py | 102 + axelrod/tests/strategies/test_gambler.py | 585 +++++ axelrod/tests/strategies/test_geller.py | 132 + axelrod/tests/strategies/test_gobymajority.py | 179 ++ .../tests/strategies/test_gradualkiller.py | 76 + axelrod/tests/strategies/test_grudger.py | 278 +++ axelrod/tests/strategies/test_grumpy.py | 80 + axelrod/tests/strategies/test_handshake.py | 36 + axelrod/tests/strategies/test_headsup.py | 120 + axelrod/tests/strategies/test_hmm.py | 327 +++ axelrod/tests/strategies/test_human.py | 133 + axelrod/tests/strategies/test_hunter.py | 265 ++ axelrod/tests/strategies/test_inverse.py | 48 + axelrod/tests/strategies/test_lookerup.py | 760 ++++++ .../strategies/test_mathematicalconstants.py | 82 + axelrod/tests/strategies/test_memoryone.py | 319 +++ axelrod/tests/strategies/test_memorytwo.py | 315 +++ axelrod/tests/strategies/test_meta.py | 721 ++++++ axelrod/tests/strategies/test_mindcontrol.py | 99 + axelrod/tests/strategies/test_mindreader.py | 172 ++ axelrod/tests/strategies/test_mutual.py | 148 ++ axelrod/tests/strategies/test_negation.py | 39 + axelrod/tests/strategies/test_oncebitten.py | 142 ++ axelrod/tests/strategies/test_player.py | 735 ++++++ axelrod/tests/strategies/test_prober.py | 385 +++ axelrod/tests/strategies/test_punisher.py | 194 ++ axelrod/tests/strategies/test_qlearner.py | 151 ++ axelrod/tests/strategies/test_rand.py | 46 + axelrod/tests/strategies/test_resurrection.py | 59 + axelrod/tests/strategies/test_retaliate.py | 140 ++ .../tests/strategies/test_revised_downing.py | 42 + axelrod/tests/strategies/test_selfsteem.py | 81 + .../tests/strategies/test_sequence_player.py | 80 + axelrod/tests/strategies/test_shortmem.py | 57 + axelrod/tests/strategies/test_stalker.py | 94 + axelrod/tests/strategies/test_titfortat.py | 1191 +++++++++ axelrod/tests/strategies/test_verybad.py | 47 + .../tests/strategies/test_worse_and_worse.py | 157 ++ .../tests/strategies/test_zero_determinant.py | 319 +++ docs/tutorials/advanced/games.html | 427 ++++ 131 files changed, 32281 insertions(+) create mode 100644 axelrod/data/all_classifiers.yml create mode 100644 axelrod/data/ann_weights.csv create mode 100644 axelrod/data/pso_gambler.csv create mode 100644 axelrod/strategies/__init__.py create mode 100644 axelrod/strategies/_filters.py create mode 100644 axelrod/strategies/_strategies.py create mode 100644 axelrod/strategies/adaptive.py create mode 100644 axelrod/strategies/adaptor.py create mode 100644 axelrod/strategies/alternator.py create mode 100644 axelrod/strategies/ann.py create mode 100644 axelrod/strategies/apavlov.py create mode 100644 axelrod/strategies/appeaser.py create mode 100644 axelrod/strategies/averagecopier.py create mode 100644 axelrod/strategies/axelrod_first.py create mode 100644 axelrod/strategies/axelrod_second.py create mode 100644 axelrod/strategies/backstabber.py create mode 100644 axelrod/strategies/better_and_better.py create mode 100644 axelrod/strategies/bush_mosteller.py create mode 100644 axelrod/strategies/calculator.py create mode 100644 axelrod/strategies/cooperator.py create mode 100644 axelrod/strategies/cycler.py create mode 100644 axelrod/strategies/darwin.py create mode 100644 axelrod/strategies/dbs.py create mode 100644 axelrod/strategies/defector.py create mode 100644 axelrod/strategies/doubler.py create mode 100644 axelrod/strategies/finite_state_machines.py create mode 100644 axelrod/strategies/forgiver.py create mode 100644 axelrod/strategies/gambler.py create mode 100644 axelrod/strategies/geller.py create mode 100644 axelrod/strategies/gobymajority.py create mode 100644 axelrod/strategies/gradualkiller.py create mode 100644 axelrod/strategies/grudger.py create mode 100644 axelrod/strategies/grumpy.py create mode 100644 axelrod/strategies/handshake.py create mode 100644 axelrod/strategies/hmm.py create mode 100644 axelrod/strategies/human.py create mode 100644 axelrod/strategies/hunter.py create mode 100644 axelrod/strategies/inverse.py create mode 100644 axelrod/strategies/lookerup.py create mode 100644 axelrod/strategies/mathematicalconstants.py create mode 100644 axelrod/strategies/memoryone.py create mode 100644 axelrod/strategies/memorytwo.py create mode 100644 axelrod/strategies/meta.py create mode 100644 axelrod/strategies/mindcontrol.py create mode 100644 axelrod/strategies/mindreader.py create mode 100644 axelrod/strategies/mutual.py create mode 100644 axelrod/strategies/negation.py create mode 100644 axelrod/strategies/oncebitten.py create mode 100644 axelrod/strategies/prober.py create mode 100644 axelrod/strategies/punisher.py create mode 100644 axelrod/strategies/qlearner.py create mode 100644 axelrod/strategies/rand.py create mode 100644 axelrod/strategies/resurrection.py create mode 100644 axelrod/strategies/retaliate.py create mode 100644 axelrod/strategies/revised_downing.py create mode 100644 axelrod/strategies/selfsteem.py create mode 100644 axelrod/strategies/sequence_player.py create mode 100644 axelrod/strategies/shortmem.py create mode 100644 axelrod/strategies/stalker.py create mode 100644 axelrod/strategies/titfortat.py create mode 100644 axelrod/strategies/verybad.py create mode 100644 axelrod/strategies/worse_and_worse.py create mode 100644 axelrod/strategies/zero_determinant.py create mode 100644 axelrod/tests/integration/__init__.py create mode 100644 axelrod/tests/integration/test_filtering.py create mode 100644 axelrod/tests/integration/test_matches.py create mode 100644 axelrod/tests/integration/test_names.py create mode 100644 axelrod/tests/integration/test_sample_tournaments.py create mode 100644 axelrod/tests/integration/test_tournament.py create mode 100644 axelrod/tests/strategies/__init__.py create mode 100644 axelrod/tests/strategies/test_adaptive.py create mode 100644 axelrod/tests/strategies/test_adaptor.py create mode 100644 axelrod/tests/strategies/test_alternator.py create mode 100644 axelrod/tests/strategies/test_ann.py create mode 100644 axelrod/tests/strategies/test_apavlov.py create mode 100644 axelrod/tests/strategies/test_appeaser.py create mode 100644 axelrod/tests/strategies/test_averagecopier.py create mode 100644 axelrod/tests/strategies/test_axelrod_first.py create mode 100644 axelrod/tests/strategies/test_axelrod_second.py create mode 100644 axelrod/tests/strategies/test_backstabber.py create mode 100644 axelrod/tests/strategies/test_better_and_better.py create mode 100644 axelrod/tests/strategies/test_bush_mosteller.py create mode 100644 axelrod/tests/strategies/test_calculator.py create mode 100644 axelrod/tests/strategies/test_cooperator.py create mode 100644 axelrod/tests/strategies/test_cycler.py create mode 100644 axelrod/tests/strategies/test_darwin.py create mode 100644 axelrod/tests/strategies/test_dbs.py create mode 100644 axelrod/tests/strategies/test_defector.py create mode 100644 axelrod/tests/strategies/test_doubler.py create mode 100644 axelrod/tests/strategies/test_evolvable_player.py create mode 100644 axelrod/tests/strategies/test_finite_state_machines.py create mode 100644 axelrod/tests/strategies/test_forgiver.py create mode 100755 axelrod/tests/strategies/test_gambler.py create mode 100644 axelrod/tests/strategies/test_geller.py create mode 100644 axelrod/tests/strategies/test_gobymajority.py create mode 100644 axelrod/tests/strategies/test_gradualkiller.py create mode 100644 axelrod/tests/strategies/test_grudger.py create mode 100644 axelrod/tests/strategies/test_grumpy.py create mode 100644 axelrod/tests/strategies/test_handshake.py create mode 100644 axelrod/tests/strategies/test_headsup.py create mode 100644 axelrod/tests/strategies/test_hmm.py create mode 100644 axelrod/tests/strategies/test_human.py create mode 100644 axelrod/tests/strategies/test_hunter.py create mode 100644 axelrod/tests/strategies/test_inverse.py create mode 100755 axelrod/tests/strategies/test_lookerup.py create mode 100644 axelrod/tests/strategies/test_mathematicalconstants.py create mode 100644 axelrod/tests/strategies/test_memoryone.py create mode 100644 axelrod/tests/strategies/test_memorytwo.py create mode 100644 axelrod/tests/strategies/test_meta.py create mode 100644 axelrod/tests/strategies/test_mindcontrol.py create mode 100644 axelrod/tests/strategies/test_mindreader.py create mode 100644 axelrod/tests/strategies/test_mutual.py create mode 100644 axelrod/tests/strategies/test_negation.py create mode 100644 axelrod/tests/strategies/test_oncebitten.py create mode 100644 axelrod/tests/strategies/test_player.py create mode 100644 axelrod/tests/strategies/test_prober.py create mode 100644 axelrod/tests/strategies/test_punisher.py create mode 100644 axelrod/tests/strategies/test_qlearner.py create mode 100644 axelrod/tests/strategies/test_rand.py create mode 100644 axelrod/tests/strategies/test_resurrection.py create mode 100644 axelrod/tests/strategies/test_retaliate.py create mode 100644 axelrod/tests/strategies/test_revised_downing.py create mode 100644 axelrod/tests/strategies/test_selfsteem.py create mode 100644 axelrod/tests/strategies/test_sequence_player.py create mode 100644 axelrod/tests/strategies/test_shortmem.py create mode 100644 axelrod/tests/strategies/test_stalker.py create mode 100644 axelrod/tests/strategies/test_titfortat.py create mode 100644 axelrod/tests/strategies/test_verybad.py create mode 100644 axelrod/tests/strategies/test_worse_and_worse.py create mode 100644 axelrod/tests/strategies/test_zero_determinant.py create mode 100644 docs/tutorials/advanced/games.html diff --git a/axelrod/data/all_classifiers.yml b/axelrod/data/all_classifiers.yml new file mode 100644 index 000000000..870322292 --- /dev/null +++ b/axelrod/data/all_classifiers.yml @@ -0,0 +1,1986 @@ +$\phi$: + inspects_source: false + long_run_time: false + makes_use_of: &id001 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +$\pi$: + inspects_source: false + long_run_time: false + makes_use_of: *id001 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +$e$: + inspects_source: false + long_run_time: false + makes_use_of: *id001 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +ALLCorALLD: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +AON2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Adaptive: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Adaptive Pavlov 2006: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Adaptive Pavlov 2011: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Adaptive Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +AdaptorBrief: + inspects_source: false + long_run_time: false + makes_use_of: &id002 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +AdaptorLong: + inspects_source: false + long_run_time: false + makes_use_of: *id002 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Aggravater: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Alexei: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Alternator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Alternator Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Anti Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +AntiCycler: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Appeaser: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Arrogant QLearner: + inspects_source: false + long_run_time: false + makes_use_of: &id003 !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Average Copier: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +BackStabber: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Better and Better: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Bully: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Bush Mosteller: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Calculator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Cautious QLearner: + inspects_source: false + long_run_time: false + makes_use_of: *id003 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +CollectiveStrategy: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Contrite Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +Cooperator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 0 + stochastic: false +Cooperator Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Cycle Hunter: + inspects_source: false + long_run_time: false + makes_use_of: &id005 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Cycler CCCCCD: + inspects_source: false + long_run_time: false + makes_use_of: &id004 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Cycler CCCD: + inspects_source: false + long_run_time: false + makes_use_of: *id004 + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +Cycler CCCDCD: + inspects_source: false + long_run_time: false + makes_use_of: *id004 + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Cycler CCD: + inspects_source: false + long_run_time: false + makes_use_of: *id004 + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Cycler DC: + inspects_source: false + long_run_time: false + makes_use_of: *id004 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Cycler DDC: + inspects_source: false + long_run_time: false + makes_use_of: *id004 + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +DBS: + inspects_source: false + long_run_time: true + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Darwin: + inspects_source: true + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: true + memory_depth: .inf + stochastic: false +Defector: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 0 + stochastic: false +Defector Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Delayed AON1: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Desperate: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Detective: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +DoubleCrosser: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +DoubleResurrection: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Doubler: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Dynamic Two Tits For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +EasyGo: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +EugineNier: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Eventual Cycle Hunter: + inspects_source: false + long_run_time: false + makes_use_of: *id005 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved ANN: + inspects_source: false + long_run_time: false + makes_use_of: &id006 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved ANN 5: + inspects_source: false + long_run_time: false + makes_use_of: *id006 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved ANN 5 Noise 05: + inspects_source: false + long_run_time: false + makes_use_of: *id006 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved FSM 16: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved FSM 16 Noise 05: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved FSM 4: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Evolved HMM 5: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: true +EvolvedLookerUp1_1_1: + inspects_source: false + long_run_time: false + makes_use_of: &id007 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +EvolvedLookerUp2_2_2: + inspects_source: false + long_run_time: false + makes_use_of: *id007 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Firm But Fair: + inspects_source: false + long_run_time: false + makes_use_of: &id008 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +First by Anonymous: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 0 + stochastic: true +First by Davis: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +First by Downing: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +First by Feld: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 200 + stochastic: true +First by Graaskamp: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +First by Grofman: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +First by Joss: + inspects_source: false + long_run_time: false + makes_use_of: *id008 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +First by Nydegger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +First by Shubik: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +First by Stein and Rapoport: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +First by Tideman and Chieruzzi: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +First by Tullock: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Fool Me Once: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Forgetful Fool Me Once: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Forgetful Grudger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 10 + stochastic: false +Forgiver: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Forgiving Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Fortress3: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Fortress4: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +GTFT: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Geller: + inspects_source: true + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Geller Cooperator: + inspects_source: true + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Geller Defector: + inspects_source: true + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +General Soft Grudger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Go By Majority: + inspects_source: false + long_run_time: false + makes_use_of: &id009 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Go By Majority 10: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 10 + stochastic: false +Go By Majority 20: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 20 + stochastic: false +Go By Majority 40: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 40 + stochastic: false +Go By Majority 5: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Gradual: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Gradual Killer: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Grudger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +GrudgerAlternator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Grumpy: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Handshake: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Hard Go By Majority: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Hard Go By Majority 10: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 10 + stochastic: false +Hard Go By Majority 20: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 20 + stochastic: false +Hard Go By Majority 40: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 40 + stochastic: false +Hard Go By Majority 5: + inspects_source: false + long_run_time: false + makes_use_of: *id009 + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Hard Prober: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Hard Tit For 2 Tats: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +Hard Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +Hesitant QLearner: + inspects_source: false + long_run_time: false + makes_use_of: *id003 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Hopeless: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Inverse: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Inverse Punisher: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Knowledgeable Worse and Worse: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Level Punisher: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Limited Retaliate: + inspects_source: false + long_run_time: false + makes_use_of: &id010 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Limited Retaliate 2: + inspects_source: false + long_run_time: false + makes_use_of: *id010 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Limited Retaliate 3: + inspects_source: false + long_run_time: false + makes_use_of: *id010 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +MEM2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Math Constant Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Memory Decay: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Meta Hunter Aggressive: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Meta Majority: + inspects_source: false + long_run_time: true + makes_use_of: &id011 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Majority Finite Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Majority Long Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Majority Memory One: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Minority: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Mixer: + inspects_source: false + long_run_time: true + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Deterministic: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Ensemble: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Finite Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Long Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Memory One: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Meta Winner Stochastic: + inspects_source: false + long_run_time: true + makes_use_of: *id011 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Michaelos: + inspects_source: false + long_run_time: false + makes_use_of: !!set + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Mind Bender: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: true + manipulates_state: false + memory_depth: -10 + stochastic: false +Mind Controller: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: true + manipulates_state: false + memory_depth: -10 + stochastic: false +Mind Reader: + inspects_source: true + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Mind Warper: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: true + manipulates_state: false + memory_depth: -10 + stochastic: false +Mirror Mind Reader: + inspects_source: true + long_run_time: false + makes_use_of: !!set {} + manipulates_source: true + manipulates_state: false + memory_depth: .inf + stochastic: false +N Tit(s) For M Tat(s): + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +NMWE Deterministic: + inspects_source: false + long_run_time: true + makes_use_of: &id012 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +NMWE Finite Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id012 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +NMWE Long Memory: + inspects_source: false + long_run_time: true + makes_use_of: *id012 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +NMWE Memory One: + inspects_source: false + long_run_time: true + makes_use_of: *id012 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +NMWE Stochastic: + inspects_source: false + long_run_time: true + makes_use_of: *id012 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Naive Prober: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Negation: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Nice Average Copier: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Nice Meta Winner: + inspects_source: false + long_run_time: true + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Nice Meta Winner Ensemble: + inspects_source: false + long_run_time: true + makes_use_of: *id012 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Omega TFT: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Once Bitten: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 12 + stochastic: false +Opposite Grudger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +PSO Gambler 1_1_1: + inspects_source: false + long_run_time: false + makes_use_of: &id013 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +PSO Gambler 2_2_2: + inspects_source: false + long_run_time: false + makes_use_of: *id013 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +PSO Gambler 2_2_2 Noise 05: + inspects_source: false + long_run_time: false + makes_use_of: *id013 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +PSO Gambler Mem1: + inspects_source: false + long_run_time: false + makes_use_of: *id013 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Predator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Prober: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Prober 2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Prober 3: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Prober 4: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Protected Mind Reader: + inspects_source: true + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: true + manipulates_state: false + memory_depth: .inf + stochastic: false +Pun1: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Punisher: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Raider: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Random: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 0 + stochastic: true +Random Hunter: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Random Tit for Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Remorseful Prober: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: true +Resurrection: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: false +Retaliate: + inspects_source: false + long_run_time: false + makes_use_of: &id014 !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Retaliate 2: + inspects_source: false + long_run_time: false + makes_use_of: *id014 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Retaliate 3: + inspects_source: false + long_run_time: false + makes_use_of: *id014 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Revised Downing: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Ripoff: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: false +Risky QLearner: + inspects_source: false + long_run_time: false + makes_use_of: *id003 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Appold: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Black: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 5 + stochastic: true +Second by Borufsen: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Cave: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Champion: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Colbert: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 4 + stochastic: false +Second by Eatherley: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Getzler: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Gladstein: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by GraaskampKatzen: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Grofman: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 8 + stochastic: false +Second by Harrington: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Kluepfel: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Leyvraz: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 3 + stochastic: true +Second by Mikkelson: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by RichardHufford: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Rowsam: + inspects_source: false + long_run_time: false + makes_use_of: !!set + a: null + e: null + g: null + m: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Tester: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Tideman and Chieruzzi: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by Tranquilizer: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Weiner: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by White: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Second by WmAdams: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Second by Yamachi: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +SelfSteem: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +ShortMem: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Slow Tit For Two Tats 2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Sneaky Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Soft Grudger: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 6 + stochastic: false +Soft Joss: + inspects_source: false + long_run_time: false + makes_use_of: *id008 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +SolutionB1: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +SolutionB5: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Spiteful Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Stalker: + inspects_source: false + long_run_time: false + makes_use_of: !!set + game: null + length: null + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Stochastic Cooperator: + inspects_source: false + long_run_time: false + makes_use_of: *id008 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Stochastic WSLS: + inspects_source: false + long_run_time: false + makes_use_of: *id008 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Suspicious Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +TF1: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +TF2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +TF3: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +ThueMorse: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +ThueMorseInverse: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Thumper: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Tit For 2 Tats: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +Tit For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Tricky Cooperator: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 10 + stochastic: false +Tricky Defector: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Tricky Level Punisher: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Two Tits For Tat: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: false +UsuallyCooperates: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +UsuallyDefects: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +VeryBad: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Willing: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +Win-Shift Lose-Stay: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Win-Stay Lose-Shift: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: false +Winner12: + inspects_source: false + long_run_time: false + makes_use_of: *id007 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Winner21: + inspects_source: false + long_run_time: false + makes_use_of: *id007 + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: false +Worse and Worse: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Worse and Worse 2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +Worse and Worse 3: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: .inf + stochastic: true +ZD-Extort-2: + inspects_source: false + long_run_time: false + makes_use_of: &id015 !!set + game: null + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-Extort-2 v2: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-Extort-4: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-Extort3: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-Extortion: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-GEN-2: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-GTFT-2: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-Mem2: + inspects_source: false + long_run_time: false + makes_use_of: !!set {} + manipulates_source: false + manipulates_state: false + memory_depth: 2 + stochastic: true +ZD-Mischief: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true +ZD-SET-2: + inspects_source: false + long_run_time: false + makes_use_of: *id015 + manipulates_source: false + manipulates_state: false + memory_depth: 1 + stochastic: true diff --git a/axelrod/data/ann_weights.csv b/axelrod/data/ann_weights.csv new file mode 100644 index 000000000..f99d7a002 --- /dev/null +++ b/axelrod/data/ann_weights.csv @@ -0,0 +1,4 @@ +# name, features, hidden_layer_size, weights... +Evolved ANN, 17, 10, 3.459899447605539, 2.1318404265386586, 13.17966613895038, -6.192108007790158, -0.37086706789610013, -0.3896626810316768, -0.3866099292858918, -0.5945768318137592, 8.657276286604098, -0.31582965913711214, -12.77263491874852, -3.7221902888177283, 0.7700315120556411, -33.026063114130366, -0.6766419579791423, -0.9319070818093544, -12.214457623430176, -0.7005949704263965, -0.9248704722582742, -0.30575483808901427, -0.6882322381266356, 0.492463682676912, 0.4162176461180324, 2.2349377906341243, -4.828561111548519, -1.9512825291316083, 15.234656539966528, 13.201167624183022, 2.128673097211623, 2.92411276919898, -0.7547167660636005, 1.199314577481091, 0.06760026684238629, 0.7832939406545589, -177.49932309404323, 6.89500655807993, -0.08284645051120698, -0.799724698184292, -8.207688126907367, 17.450767180301746, -19.54570476880868, 23.570589723652876, 7.443462688149636, -4.262126715956444, 51.789933932119446, -0.9595303137934914, -0.015387143246637383, -1.6996871126483133, 0.5326556419474413, -0.8690658866365495, -0.747871482057276, -0.1990663384836313, 0.7376303253285281, 19.692218213944003, -2.920745672290689, -0.2877252920885854, -5.351299019427598, 1.1722532499353777, 1.3902788805171062, 0.38849134405949914, -5.729822642840242, -4.641822939824729, 0.5691745664669388, 0.30905426295392857, -8.85980449965398, -4.732837361082687, 0.8831193558694339, -0.9520915826182503, -0.5355473390079997, -22.037852229409136, -0.9596756645256569, 0.15632020854402362, -24.710657392303975, 21.00195680553553, -6.099731246259528, -0.9426766932743909, -0.7194608598270196, 3.4908551710867917, 0.1371367081336668, -0.9960642361403127, -0.23395001734734366, 0.7974681865981025, 1.1178545864789984, 0.0309085368898055, 0.3709257972027509, -0.7705906687715782, 0.3106563294379545, -0.006111882508708552, 0.01778595332796895, 0.5677955352244695, -0.04300357455768222, -0.8378512149555155, 0.4517674874175419, 0.026356153000395066, 8.559746666840512, 60.1036518587203, -11.066045796708, 1.9547952088765457, -1.2572770384601373, -3.1565358568060065, -0.39428165742075283, -7.976304408878465, -12.979182222761667, -0.8903476091382874, -0.044046900475693686, -2290.612969360032, 3.1361101611630904, -4.744165899359072, 1.5590913900932555, 23.589552731167824, 3.696503591016147, -49.0193733334204, -0.31155127179562386, -12.893291340674148, 3.355621906094085, -1124.6168527572167, -8.611905802129927, -5.495486462660128, 9.317002862904996, -0.928102034629926, 0.9862671193437127, 71.6360157474848, -41.78727928552422, -0.08938630210606524, -25.475912160872333, -1.6296570038831701, 3.721506342123227, 1.2616295894267517, -2.16674011456336, 1.0608792593103447, -1.5348732560098246, 0.6488189032682272, -9.554624561640432, 0.9982451738644897, -3.585518099583287, 2.3410371683096507, -1.059363823436213, -36.03382554076086, 49.400216254399005, -0.09560108506061127, 22.265326907988467, -1167.4125713033582, -827.3412289305065, -0.2817962724984171, -12.585799415544116, -0.4968822378372789, 81.10836010264876, 0.7230863554676401, -3.7241902674476655, -0.9757756717170544, -68.02893974316859, -2.2396567795647564, 0.07066110212550569, -1.250920612229347, -0.046887193108303915, -0.7680639795702753, 17.937616126439604, -1.1019678164169133, 0.07411014749198457, 2.056144561078099, -0.9166833148022522, -27.361514430051557, -2.3877636883795264, 2.2300576943504535, 447.438860571402, 0.45131006835733695, 10.58847351640523, 1.9849158213808964, -1574.2103557260862, -0.7371599837780478, 0.9086264191508042, -0.7828307535556679, 0.3300464039361075, 1.81242958242284, -3.610267708304238, -0.973651955724061, 0.1209299242691169, 2.9843573498979894, -0.11277293056015156, -0.7439895632451479, -3.8649914641316685, 0.9782080851048618, -5.053463725816831, 3.5891164827308604, -3.7958700956913, -2.8572114118106247, -0.41818946926149336, 1.4695517340149908, -0.026854015704181288, -0.02788909596479816 +Evolved ANN 5, 17, 5, -28.102635339566508, -5.270221138740612, -1.7991915039829207, -19.860573976774578, -24.60513164187047, 3.8162913045444635, 0.023473769583907095, -54.50321528122049, 10.003539037251969, -2.3346147693972115, 16.73844151591633, 0.3100810247981438, -169.33492203029917, -5.724230282870263, 0.8526201446384842, -0.22605058147685014, -17.835641849307482, -0.7043679829900225, -10.486007034199204, -3.0550187074781925, -0.6810531857496793, -4453.796912952781, 33810.53059746947, -10848.78737197365, -118.11626381871724, 93.18879667509046, -16083.650736874622, -13.725703495018438, 50.10494647994167, -7.604207807021347, 90.77373268627657, 1.595074916653774, 2.7557812139573015, -39715.109221929466, 28.904911685140643, -0.43899423549864697, 5.768080489913274, 5.295125125995148, -2.546634014137431, -238767.20745174124, -3.5528156876545527, -4.9527347193332965, 1115.2695575158439, 4.174668501799942, -14.649495628121237, -23.25321447104678, 0.5500236215768699, -20.73180030629891, 0.5694594098639599, 9.126335853167467, 3.566801760870098, 1337.557828195395, 7235.478182255781, 73866.16381328272, -857.4430471718431, -924.9591765734541, -3518.967885192465, -1.9067958478325906, -6.633454070601698, -14.521354381912227, -2885.6120623623256, 22500.261088902847, -2.151057666676581, -1132.4844452804136, -320738.9350600944, 95.16925312662866, -5656.2165411116075, -45086.65321702537, -27.563351028422737, -2.61387586136827, 371.56363217654854, -303.4084110871267, 6.983609633015089, -47.030329656598646, -32.801150369212415, 106.5993554145673, -1488.6548395800542, -76.76127924999216, 228.98568852459917, 3.1681704936401016, 7.847547722960641, -17.600408577355783, -10.176187185243615, -2.8861364567007923, -6.112911717298348, -335.90148548509376, 52.34158411280463, 1272.9680157062762, 22.881750122081684, 533.1837365484016, -0.6762316764182235, 4.4063523628928465, 4.139696013585834, 124.12127527435491, -128.06733876616389 +Evolved ANN 5 Noise 05, 17, 5, 23.884262150181755, -1818.8117704211384, -99.98875916610936, -0.03961610388820535, -6.210054298897063, 3.7052379169262717, -0.2341738759745093, -181.13859219885555, 12.13317672298101, -0.49669501894086343, -1.0749425884840627, 10.74864948953037, -0.2896667901231506, -16.22712292450597, -1.7178383009508598, 0.03221417142047274, -158.1666203488453, -14.63669956068759, 15.408582826806054, 5.844929636776999, 4.566735602054217, 3.2997530921332476, -1.9670038978333584, 11.7498294264678, 13.681065767064851, 6.221555978655614, -6.577090380820985, -5.280239152793359, -3.708452340749404, 0.9946864682020832, -4.969083580503034, 0.37099841885684537, -4.663632521892934, 0.5739444442201613, -97.24526798384726, -18.898938443720873, 2.836962511054863, 1426.696564885826, -61.660716521684535, -96.9853369128562, -35.811382478532956, 21.74704795347286, -43208.50415025884, -30.91261823654964, 556.8238912039677, -1.2029710853005269, 86.95493903070172, -10502.293376143582, -2.0466530653599544, 22.16892386331129, -155.02893455429202, -28.409090647029252, -3.391747333401208, -13.869670523479188, 13.919971713037201, 10.27621997230116, 0.8687219298082444, 5.274915939989237, -103.56226257273838, -11.47523804695477, 8.100576862445774, -0.5353167277104594, -9.108755782674894, 0.8155938130504109, -7.262915116612636, -0.7403289719769353, -1.7023773645551308, 1.0574026713510245, -2732.0241659942394, -5.435067280962746, -188.24943501427512, 0.37886585953090285, -67.01235198117197, -121.1206837158425, -83.84782446726592, 45.31005125030543, 11.069886195948996, -169.35773604933806, 31.675914286375498, -311.2006747809748, 49.093586385973545, -5.819783407611689, -88.52645028711315, -284.2358463873679, 0.6989482567897998, 156.74810964228283, 1.8064093223587379, 3.4228475977181265, -125.93039228074147, -25.169489233728665, 115.98479433268457, 21.27858612035085, 0.2744006209426808, 2.899956014713407, -53.98506935427509 diff --git a/axelrod/data/pso_gambler.csv b/axelrod/data/pso_gambler.csv new file mode 100644 index 000000000..3b8e1888c --- /dev/null +++ b/axelrod/data/pso_gambler.csv @@ -0,0 +1,6 @@ +# Name (string), plays (int), opp_plays(int), starting_plays(int), weights (floats) +PSO Gambler Mem1, 1, 1, 0, 1.0, 0.52173487, 0.0, 0.12050939 +PSO Gambler 1_1_1, 1, 1, 1, 1.0, 1.0, 0.12304797, 0.57740178, 0.0, 0.0, 0.13581423, 0.11886807 +# , 2, 2, 2, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.93, 0.0, 1.0, 0.67, 0.42, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.48, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.19, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.36, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 +PSO Gambler 2_2_2, 2, 2, 2, 1.0, 1.0, 1.0, 0.0, 1.0, 0.95280465, 0.0, 0.0, 0.0, 0.80897541, 0.0, 0.0, 0.02126434, 0.0, 0.43278586, 0.0, 0.0, 0.0, 1.0, 0.15140743, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.23563137, 0.0, 0.0, 0.65147565, 1.0, 0.0, 0.0, 0.15412392, 1.0, 0.0, 0.0, 0.24922166, 1.0, 0.0, 0.0, 0.0, 0.00227615, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.77344942, 1.0, 0.24523149, 1.0, 0.0 +PSO Gambler 2_2_2 Noise 05, 2, 2, 2, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.98603825, 1.0, 1.0, 0.0, 0.0, 0.16240799, 0.63548102, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.13863175, 0.06434619, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.7724137, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.50999729, 1.0, 0.0, 0.0, 0.00524508, 0.87463905, 0.0, 0.07127653, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.28124022, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0 diff --git a/axelrod/strategies/__init__.py b/axelrod/strategies/__init__.py new file mode 100644 index 000000000..1906a1950 --- /dev/null +++ b/axelrod/strategies/__init__.py @@ -0,0 +1,133 @@ +from axelrod.classifier import Classifiers +from ._strategies import * +from ._filters import passes_filterset + +# `from ._strategies import *` import the collection `strategies` +# Now import the Meta strategies. This cannot be done in _strategies +# because it creates circular dependencies + +from .meta import ( + MemoryDecay, + MetaHunter, + MetaHunterAggressive, + MetaPlayer, + MetaMajority, + MetaMajorityMemoryOne, + MetaMajorityFiniteMemory, + MetaMajorityLongMemory, + MetaMinority, + MetaMixer, + MetaWinner, + MetaWinnerDeterministic, + MetaWinnerEnsemble, + MetaWinnerMemoryOne, + MetaWinnerFiniteMemory, + MetaWinnerLongMemory, + MetaWinnerStochastic, + NMWEDeterministic, + NMWEFiniteMemory, + NMWELongMemory, + NMWEMemoryOne, + NMWEStochastic, + NiceMetaWinner, + NiceMetaWinnerEnsemble, +) + +all_strategies += [ + MemoryDecay, + MetaHunter, + MetaHunterAggressive, + MetaMajority, + MetaMajorityMemoryOne, + MetaMajorityFiniteMemory, + MetaMajorityLongMemory, + MetaMinority, + MetaMixer, + MetaWinner, + MetaWinnerDeterministic, + MetaWinnerEnsemble, + MetaWinnerMemoryOne, + MetaWinnerFiniteMemory, + MetaWinnerLongMemory, + MetaWinnerStochastic, + NMWEDeterministic, + NMWEFiniteMemory, + NMWELongMemory, + NMWEMemoryOne, + NMWEStochastic, + NiceMetaWinner, + NiceMetaWinnerEnsemble, +] + + +# Distinguished strategy collections in addition to +# `all_strategies` from _strategies.py +demo_strategies = [Cooperator, Defector, TitForTat, Grudger, Random] +axelrod_first_strategies = [ + TitForTat, + FirstByTidemanAndChieruzzi, + FirstByNydegger, + FirstByGrofman, + FirstByShubik, + FirstBySteinAndRapoport, + Grudger, + FirstByDavis, + FirstByGraaskamp, + FirstByDowning, + FirstByFeld, + FirstByJoss, + FirstByTullock, + FirstByAnonymous, + Random, +] +basic_strategies = [s for s in all_strategies if Classifiers.is_basic(s())] +strategies = [s for s in all_strategies if Classifiers.obey_axelrod(s())] + +long_run_time_strategies = [ + s for s in all_strategies if Classifiers["long_run_time"](s()) +] +short_run_time_strategies = [ + s for s in strategies if not Classifiers["long_run_time"](s()) +] +cheating_strategies = [s for s in all_strategies if not Classifiers.obey_axelrod(s())] + +ordinary_strategies = strategies # This is a legacy and will be removed + + +def filtered_strategies(filterset, strategies=all_strategies): + """ + Applies the filters defined in the given filterset dict and returns those + strategy classes which pass all of those filters from the given list of + strategies. + + e.g. + + For the filterset dict: + { + 'stochastic': True, + 'min_memory_depth': 2 + } + + the function will return a list of all deterministic strategies with a + memory_depth of 2 or more. + + Parameters + ---------- + filterset : dict + mapping filter name to criterion. + e.g. + { + 'stochastic': True, + 'min_memory_depth': 2 + } + strategies: list + of subclasses of axelrodPlayer + + Returns + ------- + list + + of subclasses of axelrodPlayer + + """ + return [s for s in strategies if passes_filterset(s, filterset)] diff --git a/axelrod/strategies/_filters.py b/axelrod/strategies/_filters.py new file mode 100644 index 000000000..5a150dd9a --- /dev/null +++ b/axelrod/strategies/_filters.py @@ -0,0 +1,201 @@ +import operator +from collections import namedtuple + +from axelrod import Classifiers + + +def passes_operator_filter(player, classifier_key, value, operator): + """ + Tests whether a given player passes a filter for a + given key in its classifier dict using a given (in)equality operator. + + e.g. + + For the following strategy: + + class ExampleStrategy(IpdPlayer): + classifier = { + 'stochastic': True, + 'inspects_source': False, + 'memory_depth': 10, + 'makes_use_of': ['game', 'length'] + } + + passes_operator_filter(ExampleStrategy(), 'memory_depth', 10, operator.eq) + + would test whether the 'memory_depth' entry equals 10 and return True + + Parameters + ---------- + player : an instance of axelrodPlayer + classifier_key: string + Defining which entry from the strategy's classifier dict is to be + tested (e.g. 'memory_depth'). + value: int + The value against which the strategy's classifier dict entry is to + be tested. + operator: operator.le, operator.ge or operator.eq + Indicating whether a 'less than or equal to' or 'greater than or + equal to' test should be applied. + + Returns + ------- + boolean + + True if the value from the strategy's classifier dictionary matches + the value and operator passed to the function. + """ + classifier_value = Classifiers[classifier_key](player) + return operator(classifier_value, value) + + +def passes_in_list_filter(player, classifier_key, value): + """ + Tests whether a given list of values exist in the list returned from the + given players's classifier dict for the given classifier_key. + + e.g. + + For the following strategy: + + class ExampleStrategy(IpdPlayer): + classifier = { + 'stochastic': True, + 'inspects_source': False, + 'memory_depth': 10, + 'makes_use_of': ['game', 'length'] + } + + passes_in_list_filter(ExampleStrategy(), 'makes_use_of', 'game', operator.eq) + + would test whether 'game' exists in the strategy's' 'makes_use_of' entry + and return True. + + Parameters + ---------- + player: a descendant class of axelrodPlayer + classifier_key: string + Defining which entry from the strategy's classifier dict is to be + tested (e.g. 'makes_use_of'). + value: list + The values against which the strategy's classifier dict entry is to + be tested. + + Returns + ------- + boolean + """ + result = True + for entry in value: + if entry not in Classifiers[classifier_key](player): + result = False + return result + + +def passes_filterset(strategy, filterset): + """ + Determines whether a given strategy meets the criteria defined in a + dictionary of filters. + + e.g. + + For the following strategy: + + class ExampleStrategy(IpdPlayer): + classifier = { + 'stochastic': True, + 'inspects_source': False, + 'memory_depth': 10, + 'makes_use_of': ['game', 'length'] + } + + and this filterset dict: + + example_filterset = { + 'stochastic': True, + 'memory_depth': 10 + } + + passes_filterset(ExampleStrategy, example_filterset) + + would test whether both the strategy's 'stochastic' entry is True AND + that its 'memory_depth' equals 10 and return True. + + Parameters + ---------- + strategy : a descendant class of axelrodPlayer + filterset : dict + mapping filter name to criterion. + e.g. + { + 'stochastic': True, + 'min_memory_depth': 2 + } + + Returns + ------- + boolean + + True if the given strategy meets all the supplied criteria in the + filterset, otherwise false. + + """ + FilterFunction = namedtuple("FilterFunction", "function kwargs") + + # A dictionary mapping filter name (from the supplied filterset) to + # the relevant function and arguments for that filter. + filter_functions = { + "stochastic": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "stochastic", "operator": operator.eq}, + ), + "long_run_time": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "long_run_time", "operator": operator.eq}, + ), + "manipulates_state": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "manipulates_state", "operator": operator.eq}, + ), + "manipulates_source": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "manipulates_source", "operator": operator.eq}, + ), + "inspects_source": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "inspects_source", "operator": operator.eq}, + ), + "memory_depth": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "memory_depth", "operator": operator.eq}, + ), + "min_memory_depth": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "memory_depth", "operator": operator.ge}, + ), + "max_memory_depth": FilterFunction( + function=passes_operator_filter, + kwargs={"classifier_key": "memory_depth", "operator": operator.le}, + ), + "makes_use_of": FilterFunction( + function=passes_in_list_filter, kwargs={"classifier_key": "makes_use_of"} + ), + } + + # A list of boolean values to record whether the strategy passed or failed + # each of the filters in the supplied filterset. + passes_filters = [] + + # Loop through each of the entries in the filter_functions dict and, if + # that filter is defined in the supplied filterset, call the relevant + # function and record its result in the passes_filters list. + for _filter, filter_function in filter_functions.items(): + + if filterset.get(_filter, None) is not None: + kwargs = filter_function.kwargs + kwargs["player"] = strategy() + kwargs["value"] = filterset[_filter] + passes_filters.append(filter_function.function(**kwargs)) + + # Return True if the strategy passed all the supplied filters + return all(passes_filters) diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py new file mode 100644 index 000000000..aa772edc5 --- /dev/null +++ b/axelrod/strategies/_strategies.py @@ -0,0 +1,508 @@ +""" +This file imports all the strategies in to the base name space. Note that some +of the imports are imports of classes that make generic classes available to +users. In these cases the imports are done separately so that they can be +annotated as to avoid some static testing. For example: + + from .memoryone import ( + GTFT, + ALLCorALLD, + FirmButFair, + SoftJoss, + StochasticCooperator, + StochasticWSLS, + WinShiftLoseStay, + WinStayLoseShift, + ) + from .memoryone import ( # pylint: disable=unused-import + ReactivePlayer, + MemoryOnePlayer + ) +""" +from .adaptive import Adaptive +from .adaptor import AdaptorBrief, AdaptorLong +from .alternator import Alternator +from .ann import EvolvedANN, EvolvedANN5, EvolvedANNNoise05 +from .ann import ANN, EvolvableANN # pylint: disable=unused-import +from .apavlov import APavlov2006, APavlov2011 +from .appeaser import Appeaser +from .averagecopier import AverageCopier, NiceAverageCopier +from .axelrod_first import ( + FirstByDavis, + FirstByFeld, + FirstByGraaskamp, + FirstByGrofman, + FirstByJoss, + FirstByNydegger, + FirstByDowning, + FirstByShubik, + FirstBySteinAndRapoport, + FirstByTidemanAndChieruzzi, + FirstByTullock, + FirstByAnonymous, +) +from .axelrod_second import ( + SecondByAppold, + SecondByBlack, + SecondByBorufsen, + SecondByCave, + SecondByChampion, + SecondByColbert, + SecondByEatherley, + SecondByGetzler, + SecondByGladstein, + SecondByGraaskampKatzen, + SecondByHarrington, + SecondByKluepfel, + SecondByLeyvraz, + SecondByMikkelson, + SecondByGrofman, + SecondByTidemanAndChieruzzi, + SecondByRichardHufford, + SecondByRowsam, + SecondByTester, + SecondByTranquilizer, + SecondByWeiner, + SecondByWhite, + SecondByWmAdams, + SecondByYamachi, +) +from .backstabber import BackStabber, DoubleCrosser +from .better_and_better import BetterAndBetter +from .bush_mosteller import BushMosteller +from .calculator import Calculator +from .cooperator import Cooperator, TrickyCooperator +from .cycler import ( + AntiCycler, + CyclerCCCCCD, + CyclerCCCD, + CyclerCCCDCD, + CyclerCCD, + CyclerDC, + CyclerDDC, +) +from .cycler import Cycler, EvolvableCycler # pylint: disable=unused-import +from .darwin import Darwin +from .dbs import DBS +from .defector import Defector, TrickyDefector +from .doubler import Doubler +from .finite_state_machines import ( + TF1, + TF2, + TF3, + EvolvedFSM4, + EvolvedFSM16, + EvolvedFSM16Noise05, + Fortress3, + Fortress4, + Predator, + Pun1, + Raider, + Ripoff, + UsuallyCooperates, + UsuallyDefects, + SolutionB1, + SolutionB5, + Thumper, +) +from .finite_state_machines import ( # pylint: disable=unused-import + SimpleFSM, + EvolvableFSMPlayer, + FSMPlayer, +) +from .forgiver import Forgiver, ForgivingTitForTat +from .gambler import ( + PSOGambler1_1_1, + PSOGambler2_2_2, + PSOGambler2_2_2_Noise05, + PSOGamblerMem1, + ZDMem2, +) +from .gambler import EvolvableGambler, Gambler # pylint: disable=unused-import +from .geller import Geller, GellerCooperator, GellerDefector +from .gobymajority import ( + GoByMajority, + GoByMajority5, + GoByMajority10, + GoByMajority20, + GoByMajority40, + HardGoByMajority, + HardGoByMajority5, + HardGoByMajority10, + HardGoByMajority20, + HardGoByMajority40, +) +from .gradualkiller import GradualKiller +from .grudger import ( + Aggravater, + EasyGo, + ForgetfulGrudger, + GeneralSoftGrudger, + Grudger, + GrudgerAlternator, + OppositeGrudger, + SoftGrudger, +) +from .grumpy import Grumpy +from .handshake import Handshake +from .hmm import EvolvedHMM5 +from .hmm import SimpleHMM, EvolvableHMMPlayer, HMMPlayer # pylint: disable=unused-import +from .human import Human # pylint: disable=unused-import +from .hunter import ( + AlternatorHunter, + CooperatorHunter, + CycleHunter, + DefectorHunter, + EventualCycleHunter, + MathConstantHunter, + RandomHunter, +) +from .inverse import Inverse +from .lookerup import ( + EvolvedLookerUp1_1_1, + EvolvedLookerUp2_2_2, + Winner12, + Winner21, +) +from .lookerup import ( # pylint: disable=unused-import + EvolvableLookerUp, + LookerUp, +) + +from .mathematicalconstants import Golden, Pi, e +from .memoryone import ( + GTFT, + ALLCorALLD, + FirmButFair, + SoftJoss, + StochasticCooperator, + StochasticWSLS, + WinShiftLoseStay, + WinStayLoseShift, +) +from .memoryone import ( # pylint: disable=unused-import + ReactivePlayer, + MemoryOnePlayer, +) + +from .memorytwo import AON2, MEM2, DelayedAON1 +from .memorytwo import MemoryTwoPlayer # pylint: disable=unused-import + +from .mindcontrol import MindBender, MindController, MindWarper +from .mindreader import MindReader, MirrorMindReader, ProtectedMindReader +from .mutual import Desperate, Hopeless, Willing +from .negation import Negation +from .oncebitten import FoolMeOnce, ForgetfulFoolMeOnce, OnceBitten +from .prober import ( + CollectiveStrategy, + Detective, + HardProber, + NaiveProber, + Prober, + Prober2, + Prober3, + Prober4, + RemorsefulProber, +) +from .punisher import ( + InversePunisher, + LevelPunisher, + Punisher, + TrickyLevelPunisher, +) +from .qlearner import ( + ArrogantQLearner, + CautiousQLearner, + HesitantQLearner, + RiskyQLearner, +) +from .rand import Random +from .resurrection import DoubleResurrection, Resurrection +from .retaliate import ( + LimitedRetaliate, + LimitedRetaliate2, + LimitedRetaliate3, + Retaliate, + Retaliate2, + Retaliate3, +) +from .revised_downing import RevisedDowning +from .selfsteem import SelfSteem +from .sequence_player import ( # pylint: disable=unused-import + SequencePlayer, + ThueMorse, + ThueMorseInverse, +) +from .shortmem import ShortMem +from .stalker import Stalker +from .titfortat import ( + AdaptiveTitForTat, + Alexei, + AntiTitForTat, + Bully, + ContriteTitForTat, + DynamicTwoTitsForTat, + EugineNier, + Gradual, + HardTitFor2Tats, + HardTitForTat, + Michaelos, + NTitsForMTats, + OmegaTFT, + OriginalGradual, + RandomTitForTat, + SlowTitForTwoTats2, + SneakyTitForTat, + SpitefulTitForTat, + SuspiciousTitForTat, + TitFor2Tats, + TitForTat, + TwoTitsForTat, +) +from .verybad import VeryBad +from .worse_and_worse import ( + KnowledgeableWorseAndWorse, + WorseAndWorse, + WorseAndWorse2, + WorseAndWorse3, +) +from .zero_determinant import ( + ZDGTFT2, + ZDExtort2, + ZDExtort2v2, + ZDExtort3, + ZDExtort4, + ZDExtortion, + ZDGen2, + ZDMischief, + ZDSet2, +) + +# Note: Meta* strategies are handled in .__init__.py + + +all_strategies = [ + ALLCorALLD, + AON2, + APavlov2006, + APavlov2011, + Adaptive, + AdaptiveTitForTat, + AdaptorBrief, + AdaptorLong, + Aggravater, + Alexei, + Alternator, + AlternatorHunter, + AntiCycler, + AntiTitForTat, + Appeaser, + ArrogantQLearner, + AverageCopier, + BackStabber, + BetterAndBetter, + Bully, + BushMosteller, + Calculator, + CautiousQLearner, + CollectiveStrategy, + ContriteTitForTat, + Cooperator, + CooperatorHunter, + CycleHunter, + CyclerCCCCCD, + CyclerCCCD, + CyclerCCCDCD, + CyclerCCD, + CyclerDC, + CyclerDDC, + DBS, + Darwin, + Defector, + DefectorHunter, + DelayedAON1, + Desperate, + Detective, + DoubleCrosser, + DoubleResurrection, + Doubler, + DynamicTwoTitsForTat, + EasyGo, + EugineNier, + EventualCycleHunter, + EvolvedANN, + EvolvedANN5, + EvolvedANNNoise05, + EvolvedFSM16, + EvolvedFSM16Noise05, + EvolvedFSM4, + EvolvedHMM5, + EvolvedLookerUp1_1_1, + EvolvedLookerUp2_2_2, + FirmButFair, + FirstByAnonymous, + FirstByDavis, + FirstByDowning, + FirstByFeld, + FirstByGraaskamp, + FirstByGrofman, + FirstByJoss, + FirstByNydegger, + FirstByShubik, + FirstBySteinAndRapoport, + FirstByTidemanAndChieruzzi, + FirstByTullock, + FoolMeOnce, + ForgetfulFoolMeOnce, + ForgetfulGrudger, + Forgiver, + ForgivingTitForTat, + Fortress3, + Fortress4, + GTFT, + Geller, + GellerCooperator, + GellerDefector, + GeneralSoftGrudger, + GoByMajority, + GoByMajority10, + GoByMajority20, + GoByMajority40, + GoByMajority5, + Golden, + Gradual, + GradualKiller, + Grudger, + GrudgerAlternator, + Grumpy, + Handshake, + HardGoByMajority, + HardGoByMajority10, + HardGoByMajority20, + HardGoByMajority40, + HardGoByMajority5, + HardProber, + HardTitFor2Tats, + HardTitForTat, + HesitantQLearner, + Hopeless, + Inverse, + InversePunisher, + KnowledgeableWorseAndWorse, + LevelPunisher, + LimitedRetaliate, + LimitedRetaliate2, + LimitedRetaliate3, + MEM2, + MathConstantHunter, + Michaelos, + MindBender, + MindController, + MindReader, + MindWarper, + MirrorMindReader, + NTitsForMTats, + NaiveProber, + Negation, + NiceAverageCopier, + OmegaTFT, + OnceBitten, + OppositeGrudger, + OriginalGradual, + PSOGambler1_1_1, + PSOGambler2_2_2, + PSOGambler2_2_2_Noise05, + PSOGamblerMem1, + Pi, + Predator, + Prober, + Prober2, + Prober3, + Prober4, + ProtectedMindReader, + Pun1, + Punisher, + Raider, + Random, + RandomHunter, + RandomTitForTat, + RemorsefulProber, + Resurrection, + Retaliate, + Retaliate2, + Retaliate3, + RevisedDowning, + Ripoff, + RiskyQLearner, + SecondByAppold, + SecondByBlack, + SecondByBorufsen, + SecondByCave, + SecondByChampion, + SecondByColbert, + SecondByEatherley, + SecondByGetzler, + SecondByGladstein, + SecondByGraaskampKatzen, + SecondByGrofman, + SecondByHarrington, + SecondByKluepfel, + SecondByLeyvraz, + SecondByMikkelson, + SecondByRichardHufford, + SecondByRowsam, + SecondByTester, + SecondByTidemanAndChieruzzi, + SecondByTranquilizer, + SecondByWeiner, + SecondByWhite, + SecondByWmAdams, + SecondByYamachi, + SelfSteem, + ShortMem, + SlowTitForTwoTats2, + SneakyTitForTat, + SoftGrudger, + SoftJoss, + SolutionB1, + SolutionB5, + SpitefulTitForTat, + Stalker, + StochasticCooperator, + StochasticWSLS, + SuspiciousTitForTat, + TF1, + TF2, + TF3, + ThueMorse, + ThueMorseInverse, + Thumper, + TitFor2Tats, + TitForTat, + TrickyCooperator, + TrickyDefector, + TrickyLevelPunisher, + TwoTitsForTat, + UsuallyCooperates, + UsuallyDefects, + VeryBad, + Willing, + WinShiftLoseStay, + WinStayLoseShift, + Winner12, + Winner21, + WorseAndWorse, + WorseAndWorse2, + WorseAndWorse3, + ZDExtort2, + ZDExtort2v2, + ZDExtort3, + ZDExtort4, + ZDExtortion, + ZDGTFT2, + ZDGen2, + ZDMem2, + ZDMischief, + ZDSet2, + e, +] diff --git a/axelrod/strategies/adaptive.py b/axelrod/strategies/adaptive.py new file mode 100644 index 000000000..31aedccfc --- /dev/null +++ b/axelrod/strategies/adaptive.py @@ -0,0 +1,55 @@ +from typing import List + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Adaptive(IpdPlayer): + """Start with a specific sequence of C and D, then play the strategy that + has worked best, recalculated each turn. + + Names: + + - Adaptive: [Li2011]_ + + """ + + name = "Adaptive" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, initial_plays: List[Action] = None) -> None: + super().__init__() + if not initial_plays: + initial_plays = [C] * 6 + [D] * 5 + self.initial_plays = initial_plays + self.scores = {C: 0, D: 0} + + def score_last_round(self, opponent: IpdPlayer): + # Load the default game if not supplied by a tournament. + game = self.match_attributes["game"] + if len(self.history): + last_round = (self.history[-1], opponent.history[-1]) + scores = game.score(last_round) + self.scores[last_round[0]] += scores[0] + + def strategy(self, opponent: IpdPlayer) -> Action: + # Update scores from the last play + self.score_last_round(opponent) + # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D + index = len(self.history) + if index < len(self.initial_plays): + return self.initial_plays[index] + # Play the strategy with the highest average score so far + if self.scores[C] > self.scores[D]: + return C + return D diff --git a/axelrod/strategies/adaptor.py b/axelrod/strategies/adaptor.py new file mode 100644 index 000000000..9eaae0be6 --- /dev/null +++ b/axelrod/strategies/adaptor.py @@ -0,0 +1,104 @@ +from typing import Dict, Tuple + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +from numpy import heaviside + +C, D = Action.C, Action.D + + +class AbstractAdaptor(IpdPlayer): + """ + An adaptive strategy that updates an internal state based on the last + round of play. Using this state the player Cooperates with a probability + derived from the state. + + s, float: + the internal state, initially 0 + perr, float: + an error threshold for misinterpreted moves + delta, a dictionary of floats: + additive update values for s depending on the last round's outcome + + Names: + + - Adaptor: [Hauert2002]_ + + """ + + name = "AbstractAdaptor" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, delta: Dict[Tuple[Action, Action], float], + perr: float = 0.01) -> None: + super().__init__() + self.perr = perr + self.delta = delta + self.s = 0. + + def strategy(self, opponent: IpdPlayer) -> Action: + if self.history: + # Update internal state from the last play + last_round = (self.history[-1], opponent.history[-1]) + self.s += self.delta[last_round] + + # Compute probability of Cooperation + p = self.perr + (1.0 - 2 * self.perr) * ( + heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1)) + # Draw action + action = random_choice(p) + return action + + +class AdaptorBrief(AbstractAdaptor): + """ + An Adaptor trained on short interactions. + + Names: + + - AdaptorBrief: [Hauert2002]_ + + """ + + name = "AdaptorBrief" + + def __init__(self) -> None: + delta = { + (C, C): 0., # R + (C, D): -1.001505, # S + (D, C): 0.992107, # T + (D, D): -0.638734 # P + } + super().__init__(delta=delta) + + +class AdaptorLong(AbstractAdaptor): + """ + An Adaptor trained on long interactions. + + Names: + + - AdaptorLong: [Hauert2002]_ + + """ + + name = "AdaptorLong" + + def __init__(self) -> None: + delta = { + (C, C): 0., # R + (C, D): 1.888159, # S + (D, C): 1.858883, # T + (D, D): -0.995703 # P + } + super().__init__(delta=delta) diff --git a/axelrod/strategies/alternator.py b/axelrod/strategies/alternator.py new file mode 100644 index 000000000..a25c4c968 --- /dev/null +++ b/axelrod/strategies/alternator.py @@ -0,0 +1,33 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Alternator(IpdPlayer): + """ + A player who alternates between cooperating and defecting. + + Names + + - Alternator: [Axelrod1984]_ + - Periodic player CD: [Mittal2009]_ + """ + + name = "Alternator" + classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return C + if self.history[-1] == C: + return D + return C diff --git a/axelrod/strategies/ann.py b/axelrod/strategies/ann.py new file mode 100644 index 000000000..3398f5761 --- /dev/null +++ b/axelrod/strategies/ann.py @@ -0,0 +1,350 @@ +from typing import List, Tuple +import numpy as np +import numpy.random as random +from axelrod.action import Action +from axelrod.load_data_ import load_weights +from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists +from axelrod.player import IpdPlayer + + +C, D = Action.C, Action.D +nn_weights = load_weights() + +# Neural Network and Activation functions +relu = np.vectorize(lambda x: max(x, 0)) + + +def num_weights(num_features, num_hidden): + size = num_features * num_hidden + 2 * num_hidden + return size + + +def compute_features(player: IpdPlayer, opponent: IpdPlayer) -> List[int]: + """ + Compute history features for Neural Network: + * Opponent's first move is C + * Opponent's first move is D + * Opponent's second move is C + * Opponent's second move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D + * Opponent's previous move is C + * Opponent's previous move is D + * Opponent's second previous move is C + * Opponent's second previous move is D + * Total opponent cooperations + * Total opponent defections + * Total player cooperations + * Total player defections + * Round number + """ + if len(opponent.history) == 0: + opponent_first_c = 0 + opponent_first_d = 0 + opponent_second_c = 0 + opponent_second_d = 0 + my_previous_c = 0 + my_previous_d = 0 + my_previous2_c = 0 + my_previous2_d = 0 + opponent_previous_c = 0 + opponent_previous_d = 0 + opponent_previous2_c = 0 + opponent_previous2_d = 0 + + elif len(opponent.history) == 1: + opponent_first_c = 1 if opponent.history[0] == C else 0 + opponent_first_d = 1 if opponent.history[0] == D else 0 + opponent_second_c = 0 + opponent_second_d = 0 + my_previous_c = 1 if player.history[-1] == C else 0 + my_previous_d = 1 if player.history[-1] == D else 0 + my_previous2_c = 0 + my_previous2_d = 0 + opponent_previous_c = 1 if opponent.history[-1] == C else 0 + opponent_previous_d = 1 if opponent.history[-1] == D else 0 + opponent_previous2_c = 0 + opponent_previous2_d = 0 + + else: + opponent_first_c = 1 if opponent.history[0] == C else 0 + opponent_first_d = 1 if opponent.history[0] == D else 0 + opponent_second_c = 1 if opponent.history[1] == C else 0 + opponent_second_d = 1 if opponent.history[1] == D else 0 + my_previous_c = 1 if player.history[-1] == C else 0 + my_previous_d = 1 if player.history[-1] == D else 0 + my_previous2_c = 1 if player.history[-2] == C else 0 + my_previous2_d = 1 if player.history[-2] == D else 0 + opponent_previous_c = 1 if opponent.history[-1] == C else 0 + opponent_previous_d = 1 if opponent.history[-1] == D else 0 + opponent_previous2_c = 1 if opponent.history[-2] == C else 0 + opponent_previous2_d = 1 if opponent.history[-2] == D else 0 + + # Remaining Features + total_opponent_c = opponent.cooperations + total_opponent_d = opponent.defections + total_player_c = player.cooperations + total_player_d = player.defections + + return [ + opponent_first_c, + opponent_first_d, + opponent_second_c, + opponent_second_d, + my_previous_c, + my_previous_d, + my_previous2_c, + my_previous2_d, + opponent_previous_c, + opponent_previous_d, + opponent_previous2_c, + opponent_previous2_d, + total_opponent_c, + total_opponent_d, + total_player_c, + total_player_d, + len(player.history), + ] + + +def activate( + bias: List[float], hidden: List[float], output: List[float], inputs: List[int] +) -> float: + """ + Compute the output of the neural network: + output = relu(inputs * hidden_weights + bias) * output_weights + """ + inputs = np.array(inputs) + hidden_values = bias + np.dot(hidden, inputs) + hidden_values = relu(hidden_values) + output_value = np.dot(hidden_values, output) + return output_value + + +def split_weights( + weights: List[float], num_features: int, num_hidden: int +) -> Tuple[List[List[float]], List[float], List[float]]: + """Splits the input vector into the the NN bias weights and layer + parameters.""" + # Check weights is the right length + expected_length = num_hidden * 2 + num_features * num_hidden + if expected_length != len(weights): + raise ValueError("NN weights array has an incorrect size.") + + number_of_input_to_hidden_weights = num_features * num_hidden + number_of_hidden_to_output_weights = num_hidden + + input2hidden = [] + for i in range(0, number_of_input_to_hidden_weights, num_features): + input2hidden.append(weights[i : i + num_features]) + + start = number_of_input_to_hidden_weights + end = number_of_input_to_hidden_weights + number_of_hidden_to_output_weights + + hidden2output = weights[start:end] + bias = weights[end:] + return input2hidden, hidden2output, bias + + +class ANN(IpdPlayer): + """Artificial Neural Network based strategy. + + A single layer neural network based strategy, with the following + features: + * Opponent's first move is C + * Opponent's first move is D + * Opponent's second move is C + * Opponent's second move is D + * IpdPlayer's previous move is C + * IpdPlayer's previous move is D + * IpdPlayer's second previous move is C + * IpdPlayer's second previous move is D + * Opponent's previous move is C + * Opponent's previous move is D + * Opponent's second previous move is C + * Opponent's second previous move is D + * Total opponent cooperations + * Total opponent defections + * Total player cooperations + * Total player defections + * Round number + + Original Source: https://gist.github.com/mojones/550b32c46a8169bb3cd89d917b73111a#file-ann-strategy-test-L60 + + + Names + + - Artificial Neural Network based strategy: Original name by Martin Jones + """ + + name = "ANN" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "inspects_source": False, + "makes_use_of": set(), + "manipulates_source": False, + "manipulates_state": False, + "long_run_time": False, + } + + def __init__( + self, num_features: int, num_hidden: int, + weights: List[float] = None + ) -> None: + super().__init__() + self.num_features = num_features + self.num_hidden = num_hidden + self._process_weights(weights, num_features, num_hidden) + + def _process_weights(self, weights, num_features, num_hidden): + self.weights = list(weights) + (i2h, h2o, bias) = split_weights(weights, num_features, num_hidden) + self.input_to_hidden_layer_weights = np.array(i2h) + self.hidden_to_output_layer_weights = np.array(h2o) + self.bias_weights = np.array(bias) + + def strategy(self, opponent: IpdPlayer) -> Action: + features = compute_features(self, opponent) + output = activate( + self.bias_weights, + self.input_to_hidden_layer_weights, + self.hidden_to_output_layer_weights, + features, + ) + if output > 0: + return C + else: + return D + + +class EvolvableANN(ANN, EvolvablePlayer): + """Evolvable version of ANN.""" + name = "EvolvableANN" + + def __init__( + self, num_features: int, num_hidden: int, + weights: List[float] = None, + mutation_probability: float = None, + mutation_distance: int = 5, + ) -> None: + num_features, num_hidden, weights, mutation_probability = self._normalize_parameters( + num_features, num_hidden, weights, mutation_probability) + ANN.__init__(self, + num_features=num_features, + num_hidden=num_hidden, + weights=weights) + EvolvablePlayer.__init__(self) + self.mutation_probability = mutation_probability + self.mutation_distance = mutation_distance + self.overwrite_init_kwargs( + num_features=num_features, + num_hidden=num_hidden, + weights=weights, + mutation_probability=mutation_probability) + + @classmethod + def _normalize_parameters(cls, num_features=None, num_hidden=None, weights=None, mutation_probability=None): + if not (num_features and num_hidden): + raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableANN") + size = num_weights(num_features, num_hidden) + if not weights: + weights = [random.uniform(-1, 1) for _ in range(size)] + if mutation_probability is None: + mutation_probability = 10. / size + return num_features, num_hidden, weights, mutation_probability + + @staticmethod + def mutate_weights(weights, num_features, num_hidden, mutation_probability, + mutation_distance): + size = num_weights(num_features, num_hidden) + randoms = random.random(size) + for i, r in enumerate(randoms): + if r < mutation_probability: + p = 1 + random.uniform(-1, 1) * mutation_distance + weights[i] *= p + return weights + + def mutate(self): + weights = self.mutate_weights( + self.weights, self.num_features, self.num_hidden, + self.mutation_probability, self.mutation_distance) + return self.create_new(weights=weights) + + def crossover(self, other): + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + weights = crossover_lists(self.weights, other.weights) + return self.create_new(weights=weights) + + +class EvolvedANN(ANN): + """ + A strategy based on a pre-trained neural network with 17 features and a + hidden layer of size 10. + + Trained using the `axelrod_dojo` version: 0.0.8 + Training data is archived at doi.org/10.5281/zenodo.1306926 + + Names: + + - Evolved ANN: Original name by Martin Jones. + """ + + name = "Evolved ANN" + + def __init__(self) -> None: + num_features, num_hidden, weights = nn_weights["Evolved ANN"] + super().__init__( + num_features=num_features, + num_hidden=num_hidden, + weights=weights) + + +class EvolvedANN5(ANN): + """ + A strategy based on a pre-trained neural network with 17 features and a + hidden layer of size 5. + + Trained using the `axelrod_dojo` version: 0.0.8 + Training data is archived at doi.org/10.5281/zenodo.1306931 + + Names: + + - Evolved ANN 5: Original name by Marc Harper. + """ + + name = "Evolved ANN 5" + + def __init__(self) -> None: + num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] + super().__init__( + num_features=num_features, + num_hidden=num_hidden, + weights=weights) + + +class EvolvedANNNoise05(ANN): + """ + A strategy based on a pre-trained neural network with a hidden layer of + size 5, trained with noise=0.05. + + Trained using the `axelrod_dojo` version: 0.0.8 + Training data i archived at doi.org/10.5281/zenodo.1314247. + + Names: + + - Evolved ANN Noise 5: Original name by Marc Harper. + """ + + name = "Evolved ANN 5 Noise 05" + + def __init__(self) -> None: + num_features, num_hidden, weights = nn_weights["Evolved ANN 5 Noise 05"] + super().__init__( + num_features=num_features, + num_hidden=num_hidden, + weights=weights) + diff --git a/axelrod/strategies/apavlov.py b/axelrod/strategies/apavlov.py new file mode 100644 index 000000000..db7ceb190 --- /dev/null +++ b/axelrod/strategies/apavlov.py @@ -0,0 +1,122 @@ +from typing import Optional + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class APavlov2006(IpdPlayer): + """ + APavlov attempts to classify its opponent as one of five strategies: + Cooperative, ALLD, STFT, PavlovD, or Random. APavlov then responds in a + manner intended to achieve mutual cooperation or to defect against + uncooperative opponents. + + Names: + + - Adaptive Pavlov 2006: [Li2007]_ + """ + + name = "Adaptive Pavlov 2006" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.opponent_class = None # type: Optional[str] + + def strategy(self, opponent: IpdPlayer) -> Action: + # TFT for six rounds + if len(self.history) < 6: + return D if opponent.history[-1:] == [D] else C + # Classify opponent + if len(self.history) % 6 == 0: + if opponent.history[-6:] == [C] * 6: + self.opponent_class = "Cooperative" + if opponent.history[-6:] == [D] * 6: + self.opponent_class = "ALLD" + if opponent.history[-6:] == [D, C, D, C, D, C]: + self.opponent_class = "STFT" + if opponent.history[-6:] == [D, D, C, D, D, C]: + self.opponent_class = "PavlovD" + if not self.opponent_class: + self.opponent_class = "Random" + + # Play according to classification + if self.opponent_class in ["Random", "ALLD"]: + return D + if self.opponent_class == "STFT": + if len(self.history) % 6 in [0, 1]: + return C + # TFT + if opponent.history[-1:] == [D]: + return D + if self.opponent_class == "PavlovD": + # Return D then C for the period + if len(self.history) % 6 == 0: + return D + if self.opponent_class == "Cooperative": + # TFT + if opponent.history[-1:] == [D]: + return D + return C + + +class APavlov2011(IpdPlayer): + """ + APavlov attempts to classify its opponent as one of four strategies: + Cooperative, ALLD, STFT, or Random. APavlov then responds in a manner + intended to achieve mutual cooperation or to defect against + uncooperative opponents. + + Names: + + - Adaptive Pavlov 2011: [Li2011]_ + """ + + name = "Adaptive Pavlov 2011" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.opponent_class = None # type: Optional[str] + + def strategy(self, opponent: IpdPlayer) -> Action: + # TFT for six rounds + if len(self.history) < 6: + return D if opponent.history[-1:] == [D] else C + if len(self.history) % 6 == 0: + # Classify opponent + if opponent.history[-6:] == [C] * 6: + self.opponent_class = "Cooperative" + if opponent.history[-6:].count(D) >= 4: + self.opponent_class = "ALLD" + if opponent.history[-6:].count(D) == 3: + self.opponent_class = "STFT" + if not self.opponent_class: + self.opponent_class = "Random" + # Play according to classification + if self.opponent_class in ["Random", "ALLD"]: + return D + if self.opponent_class == "STFT": + # TFTT + return D if opponent.history[-2:] == [D, D] else C + if self.opponent_class == "Cooperative": + # TFT + return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/strategies/appeaser.py b/axelrod/strategies/appeaser.py new file mode 100644 index 000000000..6e21054fa --- /dev/null +++ b/axelrod/strategies/appeaser.py @@ -0,0 +1,38 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Appeaser(IpdPlayer): + """A player who tries to guess what the opponent wants. + + Switch the classifier every time the opponent plays D. + Start with C, switch between C and D when opponent plays D. + + Names: + + - Appeaser: Original Name by Jochen Müller + """ + + name = "Appeaser" + classifier = { + "memory_depth": float("inf"), # Depends on internal memory. + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not len(opponent.history): + return C + else: + if opponent.history[-1] == D: + if self.history[-1] == C: + return D + else: + return C + return self.history[-1] diff --git a/axelrod/strategies/averagecopier.py b/axelrod/strategies/averagecopier.py new file mode 100644 index 000000000..95dd67900 --- /dev/null +++ b/axelrod/strategies/averagecopier.py @@ -0,0 +1,61 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class AverageCopier(IpdPlayer): + """ + The player will cooperate with probability p if the opponent's cooperation + ratio is p. Starts with random decision. + + Names: + + - Average Copier: Original name by Geraint Palmer + """ + + name = "Average Copier" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) == 0: + # Randomly picks a strategy (not affected by history). + return random_choice(0.5) + p = opponent.cooperations / len(opponent.history) + return random_choice(p) + + +class NiceAverageCopier(IpdPlayer): + """ + Same as Average Copier, but always starts by cooperating. + + Names: + + - Average Copier: Original name by Owen Campbell + """ + + name = "Nice Average Copier" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) == 0: + return C + p = opponent.cooperations / len(opponent.history) + return random_choice(p) diff --git a/axelrod/strategies/axelrod_first.py b/axelrod/strategies/axelrod_first.py new file mode 100644 index 000000000..792f80f10 --- /dev/null +++ b/axelrod/strategies/axelrod_first.py @@ -0,0 +1,1026 @@ +""" +Strategies submitted to Axelrod's first tournament. All strategies in this +module are prefixed by `FirstBy` to indicate that they were submitted in +Axelrod's First tournament by the given author. + +Note that these strategies are implemented from the descriptions presented +in: + +Axelrod, R. (1980). Effective Choice in the Prisoner’s Dilemma. +Journal of Conflict Resolution, 24(1), 3–25. + +These descriptions are not always clear and/or precise and when assumptions have +been made they are explained in the strategy docstrings. +""" + +import random +from typing import Dict, List, Tuple, Optional + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice +from axelrod.strategy_transformers import FinalTransformer +from scipy.stats import chisquare + +from .memoryone import MemoryOnePlayer + +C, D = Action.C, Action.D + + +class FirstByDavis(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Morton Davis. + + The description written in [Axelrod1980]_ is: + + > "A player starts by cooperating for 10 rounds then plays Grudger, + > defecting if at any point the opponent has defected." + + This strategy came 8th in Axelrod's original tournament. + + Names: + + - Davis: [Axelrod1980]_ + """ + + name = "First by Davis" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, rounds_to_cooperate: int = 10) -> None: + """ + Parameters + ---------- + rounds_to_cooperate: int, 10 + The number of rounds to cooperate initially + """ + super().__init__() + self._rounds_to_cooperate = rounds_to_cooperate + + def strategy(self, opponent: IpdPlayer) -> Action: + """Begins by playing C, then plays D for the remaining rounds if the + opponent ever plays D.""" + if len(self.history) < self._rounds_to_cooperate: + return C + if opponent.defections > 0: # Implement Grudger + return D + return C + + +class FirstByDowning(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Downing + + The description written in [Axelrod1980]_ is: + + > "This rule selects its choice to maximize its own longterm expected payoff on + > the assumption that the other rule cooperates with a fixed probability which + > depends only on whether the other player cooperated or defected on the previous + > move. These two probabilities estimates are continuously updated as the game + > progresses. Initially, they are both assumed to be .5, which amounts to the + > pessimistic assumption that the other player is not responsive. This rule is + > based on an outcome maximization interpretation of human performances proposed + > by Downing (1975)." + + The Downing (1975) paper is "The Prisoner's Dilemma IpdGame as a + Problem-Solving Phenomenon" [Downing1975]_ and this is used to implement the + strategy. + + There are a number of specific points in this paper, on page 371: + + > "[...] In these strategies, O's [the opponent's] response on trial N is in + some way dependent or contingent on S's [the subject's] response on trial N- + 1. All varieties of these lag-one matching strategies can be defined by two + parameters: the conditional probability that O will choose C following C by + S, P(C_o | C_s) and the conditional probability that O will choose C + following D by S, P(C_o, D_s)." + + Throughout the paper the strategy (S) assumes that the opponent (O) is + playing a reactive strategy defined by these two conditional probabilities. + + The strategy aims to maximise the long run utility against such a strategy + and the mechanism for this is described in Appendix A (more on this later). + + One final point from the main text is, on page 372: + + > "For the various lag-one matching strategies of O, the maximizing + strategies of S will be 100% C, or 100% D, or for some strategies all S + strategies will be functionally equivalent." + + This implies that the strategy S will either always cooperate or always + defect (or be indifferent) dependent on the opponent's defining + probabilities. + + To understand the particular mechanism that describes the strategy S, we + refer to Appendix A of the paper on page 389. + + The stated goal of the strategy is to maximize (using the notation of the + paper): + + EV_TOT = #CC(EV_CC) + #CD(EV_CD) + #DC(EV_DC) + #DD(EV_DD) + + This differs from the more modern literature where #CC, #CD, #DC and #DD + would imply that counts of both players playing C and C, or the first + playing C and the second D etc... + In this case the author uses an argument based on the sequence of plays by + the player (S) so #CC denotes the number of times the player plays C twice + in a row. + + On the second page of the appendix, figure 4 (page 390) + identifies an expression for EV_TOT. + A specific term is made to disappear in + the case of T - R = P - S (which is not the case for the standard + (R, P, S, T) = (3, 1, 0, 5)): + + > "Where (t - r) = (p - s), EV_TOT will be a function of alpha, beta, t, r, + p, s and N are known and V which is unknown. + + V is the total number of cooperations of the player S (this is noted earlier + in the abstract) and as such the final expression (with only V as unknown) + can be used to decide if V should indicate that S always cooperates or not. + + This final expression is used to show that EV_TOT is linear in the number of + cooperations by the player thus justifying the fact that the player will + always cooperate or defect. + + All of the above details are used to give the following interpretation of + the strategy: + + 1. On any given turn, the strategy will estimate alpha = P(C_o | C_s) and + beta = P(C_o | D_s). + 2. The strategy will calculate the expected utility of always playing C OR + always playing D against the estimated probabilities. This corresponds to: + + a. In the case of the player always cooperating: + + P_CC = alpha and P_CD = 1 - alpha + + b. In the case of the player always defecting: + + P_DC = beta and P_DD = 1 - beta + + + Using this we have: + + E_C = alpha R + (1 - alpha) S + E_D = beta T + (1 - beta) P + + Thus at every turn, the strategy will calculate those two values and + cooperate if E_C > E_D and will defect if E_C < E_D. + + In the case of E_C = E_D, the player will alternate from their previous + move. This is based on specific sentence from Axelrod's original paper: + + > "Under certain circumstances, DOWNING will even determine that the best + > strategy is to alternate cooperation and defection." + + One final important point is the early game behaviour of the strategy. It + has been noted that this strategy was implemented in a way that assumed that + alpha and beta were both 1/2: + + > "Initially, they are both assumed to be .5, which amounts to the + > pessimistic assumption that the other player is not responsive." + + Note that if alpha = beta = 1 / 2 then: + + E_C = alpha R + alpha S + E_D = alpha T + alpha P + + And from the defining properties of the Prisoner's Dilemma (T > R > P > S) + this gives: E_D > E_C. + Thus, the player opens with a defection in the first two rounds. Note that + from the Axelrod publications alone there is nothing to indicate defections + on the first two rounds, although a defection in the opening round is clear. + However there is a presentation available at + http://www.sci.brooklyn.cuny.edu/~sklar/teaching/f05/alife/notes/azhar-ipd-Oct19th.pdf + That clearly states that Downing defected in the first two rounds, thus this + is assumed to be the behaviour. Interestingly, in future tournaments this + strategy was revised to not defect on the opening two rounds. + + It is assumed that these first two rounds are used to create initial + estimates of + beta = P(C_o | D_s) and we will use the opening play of the player to + estimate alpha = P(C_o | C_s). + Thus we assume that the opponents first play is a response to a cooperation + "before the match starts". + + So for example, if the plays are: + + [(D, C), (D, C)] + + Then the opponent's first cooperation counts as a cooperation in response to + the non existent cooperation of round 0. The total number of cooperations in + response to a cooperation is 1. We need to take in to account that extra + phantom cooperation to estimate the probability alpha=P(C_o | C_s) as 1 / 1 + = 1. + + This is an assumption with no clear indication from the literature. + + -- + This strategy came 10th in Axelrod's original tournament. + + Names: + + - Downing: [Axelrod1980]_ + """ + + name = "First by Downing" + + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.number_opponent_cooperations_in_response_to_C = 0 + self.number_opponent_cooperations_in_response_to_D = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + round_number = len(self.history) + 1 + + if round_number == 1: + return D + if round_number == 2: + if opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_C += 1 + return D + + if self.history[-2] == C and opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_C += 1 + if self.history[-2] == D and opponent.history[-1] == C: + self.number_opponent_cooperations_in_response_to_D += 1 + + # Adding 1 to cooperations for assumption that first opponent move + # being a response to a cooperation. See docstring for more + # information. + alpha = (self.number_opponent_cooperations_in_response_to_C / + (self.cooperations + 1)) + # Adding 2 to defections on the assumption that the first two + # moves are defections, which may not be true in a noisy match + beta = (self.number_opponent_cooperations_in_response_to_D / + max(self.defections, 2)) + + R, P, S, T = self.match_attributes["game"].RPST() + expected_value_of_cooperating = alpha * R + (1 - alpha) * S + expected_value_of_defecting = beta * T + (1 - beta) * P + + if expected_value_of_cooperating > expected_value_of_defecting: + return C + if expected_value_of_cooperating < expected_value_of_defecting: + return D + return self.history[-1].flip() + + +class FirstByFeld(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Scott Feld. + + The description written in [Axelrod1980]_ is: + + > "This rule starts with tit for tat and gradually lowers its probability of + > cooperation following the other's cooperation to .5 by the two hundredth + > move. It always defects after a defection by the other." + + This strategy plays Tit For Tat, always defecting if the opponent defects but + cooperating when the opponent cooperates with a gradually decreasing probability + until it is only .5. Note that the description does not clearly indicate how + the cooperation probability should drop. This implements a linear decreasing + function. + + This strategy came 11th in Axelrod's original tournament. + + Names: + + - Feld: [Axelrod1980]_ + """ + + name = "First by Feld" + classifier = { + "memory_depth": 200, # Varies actually, eventually becomes depth 1 + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + start_coop_prob: float = 1.0, + end_coop_prob: float = 0.5, + rounds_of_decay: int = 200, + ) -> None: + """ + Parameters + ---------- + start_coop_prob, float + The initial probability to cooperate + end_coop_prob, float + The final probability to cooperate + rounds_of_decay, int + The number of rounds to linearly decrease from start_coop_prob + to end_coop_prob + """ + super().__init__() + self._start_coop_prob = start_coop_prob + self._end_coop_prob = end_coop_prob + self._rounds_of_decay = rounds_of_decay + + def _cooperation_probability(self) -> float: + """It's not clear what the interpolating function is, so we'll do + something simple that decreases monotonically from 1.0 to 0.5 over + 200 rounds.""" + diff = self._end_coop_prob - self._start_coop_prob + slope = diff / self._rounds_of_decay + rounds = len(self.history) + return max(self._start_coop_prob + slope * rounds, self._end_coop_prob) + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return C + if opponent.history[-1] == D: + return D + p = self._cooperation_probability() + return random_choice(p) + + +class FirstByGraaskamp(IpdPlayer): + """ + Submitted to Axelrod's first tournament by James Graaskamp. + + The description written in [Axelrod1980]_ is: + + > "This rule plays tit for tat for 50 moves, defects on move 51, and then + > plays 5 more moves of tit for tat. A check is then made to see if the player + > seems to be RANDOM, in which case it defects from then on. A check is also + > made to see if the other is TIT FOR TAT, ANALOGY (a program from the + > preliminary tournament), and its own twin, in which case it plays tit for + > tat. Otherwise it randomly defects every 5 to 15 moves, hoping that enough + > trust has been built up so that the other player will not notice these + > defections.: + + This is implemented as: + + 1. Plays Tit For Tat for the first 50 rounds; + 2. Defects on round 51; + 3. Plays 5 further rounds of Tit For Tat; + 4. A check is then made to see if the opponent is playing randomly in which + case it defects for the rest of the game. This is implemented with a chi + squared test. + 5. The strategy also checks to see if the opponent is playing Tit For Tat or + a clone of itself. If + so it plays Tit For Tat. If not it cooperates and randomly defects every 5 + to 15 moves. + + Note that there is no information about 'Analogy' available thus Step 5 is + a "best possible" interpretation of the description in the paper. + Furthermore the test for the clone is implemented as checking that both + players have played the same moves for the entire game. This is unlikely to + be the original approach but no further details are available. + + This strategy came 9th in Axelrod’s original tournament. + + Names: + + - Graaskamp: [Axelrod1980]_ + """ + + name = "First by Graaskamp" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, alpha: float = 0.05) -> None: + """ + Parameters + ---------- + alpha: float + The significant level of p-value from chi-squared test with + alpha == 0.05 by default. + """ + super().__init__() + self.alpha = alpha + self.opponent_is_random = False + self.next_random_defection_turn = None # type: Optional[int] + + def strategy(self, opponent: IpdPlayer) -> Action: + """This is the actual strategy""" + # First move + if not self.history: + return C + # React to the opponent's last move + if len(self.history) < 56: + if opponent.history[-1] == D or len(self.history) == 50: + return D + return C + + # Check if opponent plays randomly, if so, defect for the rest of the game + p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue + self.opponent_is_random = (p_value >= self.alpha) or self.opponent_is_random + + if self.opponent_is_random: + return D + if all( + opponent.history[i] == self.history[i - 1] + for i in range(1, len(self.history)) + ) or opponent.history == self.history: + # Check if opponent plays Tit for Tat or a clone of itself. + if opponent.history[-1] == D: + return D + return C + + if self.next_random_defection_turn is None: + self.next_random_defection_turn = random.randint(5, 15) + len(self.history) + + if len(self.history) == self.next_random_defection_turn: + # resample the next defection turn + self.next_random_defection_turn = random.randint(5, 15) + len(self.history) + return D + return C + + +class FirstByGrofman(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Bernard Grofman. + + The description written in [Axelrod1980]_ is: + + > "If the players did different things on the previous move, this rule + > cooperates with probability 2/7. Otherwise this rule always cooperates." + + This strategy came 4th in Axelrod's original tournament. + + Names: + + - Grofman: [Axelrod1980]_ + """ + + name = "First by Grofman" + classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0 or self.history[-1] == opponent.history[-1]: + return C + return random_choice(2 / 7) + + +class FirstByJoss(MemoryOnePlayer): + """ + Submitted to Axelrod's first tournament by Johann Joss. + + The description written in [Axelrod1980]_ is: + + > "This rule cooperates 90% of the time after a cooperation by the other. It + > always defects after a defection by the other." + + This strategy came 12th in Axelrod's original tournament. + + Names: + + - Joss: [Axelrod1980]_ + - Hard Joss: [Stewart2012]_ + """ + + name = "First by Joss" + + def __init__(self, p: float = 0.9) -> None: + """ + Parameters + ---------- + p, float + The probability of cooperating when the previous round was (C, C) + or (D, C), i.e. the opponent cooperated. + """ + four_vector = (p, 0, p, 0) + self.p = p + super().__init__(four_vector) + + +class FirstByNydegger(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Rudy Nydegger. + + The description written in [Axelrod1980]_ is: + + > "The program begins with tit for tat for the first three moves, except + > that if it was the only one to cooperate on the first move and the only one + > to defect on the second move, it defects on the third move. After the third + > move, its choice is determined from the 3 preceding outcomes in the + > following manner. Let A be the sum formed by counting the other's defection + > as 2 points and one's own as 1 point, and giving weights of 16, 4, and 1 to + > the preceding three moves in chronological order. The choice can be + > described as defecting only when A equals 1, 6, 7, 17, 22, 23, 26, 29, 30, + > 31, 33, 38, 39, 45, 49, 54, 55, 58, or 61. Thus if all three preceding moves + > are mutual defection, A = 63 and the rule cooperates. This rule was + > designed for use in laboratory experiments as a stooge which had a memory + > and appeared to be trustworthy, potentially cooperative, but not gullible + > (Nydegger, 1978)." + + The program begins with tit for tat for the first three moves, except + that if it was the only one to cooperate on the first move and the only one + to defect on the second move, it defects on the third move. After the + third move, its choice is determined from the 3 preceding outcomes in the + following manner. + + .. math:: + + A = 16 a_1 + 4 a_2 + a_3 + + Where :math:`a_i` is dependent on the outcome of the previous :math:`i` th + round. If both strategies defect, :math:`a_i=3`, if the opponent only defects: + :math:`a_i=2` and finally if it is only this strategy that defects then + :math:`a_i=1`. + + Finally this strategy defects if and only if: + + .. math:: + + A \in \{1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61\} + + Thus if all three preceding moves are mutual defection, A = 63 and the rule + cooperates. This rule was designed for use in laboratory experiments as a + stooge which had a memory and appeared to be trustworthy, potentially + cooperative, but not gullible. + + This strategy came 3rd in Axelrod's original tournament. + + Names: + + - Nydegger: [Axelrod1980]_ + """ + + name = "First by Nydegger" + classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + self.As = [1, 6, 7, 17, 22, 23, 26, 29, 30, 31, 33, 38, 39, 45, 49, 54, 55, 58, 61] + self.score_map = {(C, C): 0, (C, D): 2, (D, C): 1, (D, D): 3} + super().__init__() + + @staticmethod + def score_history( + my_history: List[Action], + opponent_history: List[Action], + score_map: Dict[Tuple[Action, Action], int], + ) -> int: + + """Implements the Nydegger formula A = 16 a_1 + 4 a_2 + a_3""" + a = 0 + for i, weight in [(-1, 16), (-2, 4), (-3, 1)]: + plays = (my_history[i], opponent_history[i]) + a += weight * score_map[plays] + return a + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return C + if len(self.history) == 1: + # TFT + return D if opponent.history[-1] == D else C + if len(self.history) == 2: + if opponent.history[0:2] == [D, C]: + return D + else: + # TFT + return D if opponent.history[-1] == D else C + A = self.score_history(self.history[-3:], opponent.history[-3:], self.score_map) + if A in self.As: + return D + return C + + +class FirstByShubik(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Martin Shubik. + + The description written in [Axelrod1980]_ is: + + > "This rule cooperates until the other defects, and then defects once. If + > the other defects again after the rule's cooperation is resumed, the rule + > defects twice. In general, the length of retaliation is increased by one for + > each departure from mutual cooperation. This rule is described with its + > strategic implications in Shubik (1970). Further treatment of its is given + > in Taylor (1976). + + There is some room for interpretation as to how the strategy reacts to a + defection on the turn where it starts to cooperate once more. In Shubik + (1970) the strategy is described as: + + > "I will play my move 1 to begin with and will continue to do so, so long + > as my information shows that the other player has chosen his move 1. If my + > information tells me he has used move 2, then I will use move 2 for the + > immediate k subsequent periods, after which I will resume using move 1. If + > he uses his move 2 again after I have resumed using move 1, then I will + > switch to move 2 for the k + 1 immediately subsequent periods . . . and so + > on, increasing my retaliation by an extra period for each departure from the + > (1, 1) steady state." + + This is interpreted as: + + The player cooperates, if when it is cooperating, the opponent defects it + defects for k rounds. After k rounds it starts cooperating again and + increments the value of k if the opponent defects again. + + This strategy came 5th in Axelrod's original tournament. + + Names: + + - Shubik: [Axelrod1980]_ + """ + + name = "First by Shubik" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.is_retaliating = False + self.retaliation_length = 0 + self.retaliation_remaining = 0 + + def _decrease_retaliation_counter(self): + """Lower the remaining owed retaliation count and flip to non-retaliate + if the count drops to zero.""" + if self.is_retaliating: + self.retaliation_remaining -= 1 + if self.retaliation_remaining == 0: + self.is_retaliating = False + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return C + + if self.is_retaliating: + # Are we retaliating still? + self._decrease_retaliation_counter() + return D + + if opponent.history[-1] == D and self.history[-1] == C: + # "If he uses his move 2 again after I have resumed using move 1, + # then I will switch to move 2 for the k + 1 immediately subsequent + # periods" + self.is_retaliating = True + self.retaliation_length += 1 + self.retaliation_remaining = self.retaliation_length + self._decrease_retaliation_counter() + return D + return C + + +class FirstByTullock(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Gordon Tullock. + + The description written in [Axelrod1980]_ is: + + > "This rule cooperates on the first eleven moves. It then cooperates 10% + > less than the other player has cooperated on the preceding ten moves. This + > rule is based on an idea developed in Overcast and Tullock (1971). Professor + > Tullock was invited to specify how the idea could be implemented, and he did + > so out of scientific interest rather than an expectation that it would be a + > likely winner." + + This is interpreted as: + + Cooperates for the first 11 rounds then randomly cooperates 10% less often + than the opponent has in the previous 10 rounds. + + This strategy came 13th in Axelrod's original tournament. + + Names: + + - Tullock: [Axelrod1980]_ + """ + + name = "First by Tullock" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + """ + Parameters + ---------- + rounds_to_cooperate: int + The number of rounds to cooperate initially + """ + super().__init__() + self._rounds_to_cooperate = 11 + self.memory_depth = self._rounds_to_cooperate + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) < self._rounds_to_cooperate: + return C + rounds = self._rounds_to_cooperate - 1 + cooperate_count = opponent.history[-rounds:].count(C) + prop_cooperate = cooperate_count / rounds + prob_cooperate = max(0, prop_cooperate - 0.10) + return random_choice(prob_cooperate) + + +class FirstByAnonymous(IpdPlayer): + """ + Submitted to Axelrod's first tournament by a graduate student whose name was + withheld. + + The description written in [Axelrod1980]_ is: + + > "This rule has a probability of cooperating, P, which is initially 30% and + > is updated every 10 moves. P is adjusted if the other player seems random, + > very cooperative, or very uncooperative. P is also adjusted after move 130 + > if the rule has a lower score than the other player. Unfortunately, the + > complex process of adjustment frequently left the probability of cooperation + > in the 30% to 70% range, and therefore the rule appeared random to many + > other players." + + Given the lack of detail this strategy is implemented based on the final + sentence of the description which is to have a cooperation probability that + is uniformly random in the 30 to 70% range. + + Names: + + - (Name withheld): [Axelrod1980]_ + """ + + name = "First by Anonymous" + classifier = { + "memory_depth": 0, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + r = random.uniform(3, 7) / 10 + return random_choice(r) + + +@FinalTransformer((D, D), name_prefix=None) +class FirstBySteinAndRapoport(IpdPlayer): + """ + Submitted to Axelrod's first tournament by William Stein and Amnon Rapoport. + + The description written in [Axelrod1980]_ is: + + > "This rule plays tit for tat except that it cooperates on the first four + > moves, it defects on the last two moves, and every fifteen moves it checks + > to see if the opponent seems to be playing randomly. This check uses a + > chi-squared test of the other's transition probabilities and also checks for + > alternating moves of CD and DC. + + This is implemented as follows: + + 1. It cooperates for the first 4 moves. + 2. It defects on the last 2 moves. + 3. Every 15 moves it makes use of a `chi-squared + test `_ to check if the + opponent is playing randomly. If so it defects. + + This strategy came 6th in Axelrod's original tournament. + + Names: + + - SteinAndRapoport: [Axelrod1980]_ + """ + + name = "First by Stein and Rapoport" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, alpha: float = 0.05) -> None: + """ + Parameters + ---------- + alpha: float + The significant level of p-value from chi-squared test with + alpha == 0.05 by default. + """ + super().__init__() + self.alpha = alpha + self.opponent_is_random = False + + def strategy(self, opponent: IpdPlayer) -> Action: + round_number = len(self.history) + 1 + + # First 4 moves + if round_number < 5: + return C + # For first 15 rounds tit for tat as we do not know opponents strategy + elif round_number < 15: + return opponent.history[-1] + + if round_number % 15 == 0: + p_value = chisquare([opponent.cooperations, opponent.defections]).pvalue + self.opponent_is_random = p_value >= self.alpha + + if self.opponent_is_random: + # Defect if opponent plays randomly + return D + else: # TitForTat if opponent plays not randomly + return opponent.history[-1] + + +@FinalTransformer((D, D), name_prefix=None) +class FirstByTidemanAndChieruzzi(IpdPlayer): + """ + Submitted to Axelrod's first tournament by Nicolas Tideman and Paula + Chieruzzi. + + The description written in [Axelrod1980]_ is: + + > "This rule begins with cooperation and tit for tat. However, when the + > other player finishes his second run of defec- tions, an extra punishment is + > instituted, and the number of punishing defections is increased by one with + > each run of the other's defections. The other player is given a fresh start + > if he is 10 or more points behind, if he has not just started a run of + > defections, if it has been at least 20 moves since a fresh start, if there + > are at least 10 moves remaining, and if the number of defections differs + > from a 50-50 random generator by at least 3.0 standard deviations. A fresh + > start involves two cooperations and then play as if the game had just + > started. The program defects automatically on the last two moves." + + This is interpreted as: + + 1. Every run of defections played by the opponent increases the number of + defections that this strategy retaliates with by 1. + + 2. The opponent is given a ‘fresh start’ if: + - it is 10 points behind this strategy + - and it has not just started a run of defections + - and it has been at least 20 rounds since the last ‘fresh start’ + - and there are more than 10 rounds remaining in the match + - and the total number of defections differs from a 50-50 random sample + by at least 3.0 standard deviations. + + A ‘fresh start’ is a sequence of two cooperations followed by an assumption + that the game has just started (everything is forgotten). + + 3. The strategy defects on the last two moves. + + This strategy came 2nd in Axelrod’s original tournament. + + Names: + + - TidemanAndChieruzzi: [Axelrod1980]_ + """ + + name = "First by Tideman and Chieruzzi" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game", "length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.is_retaliating = False + self.retaliation_length = 0 + self.retaliation_remaining = 0 + self.current_score = 0 + self.opponent_score = 0 + self.last_fresh_start = 0 + self.fresh_start = False + self.remembered_number_of_opponent_defectioons = 0 + + def _decrease_retaliation_counter(self): + """Lower the remaining owed retaliation count and flip to non-retaliate + if the count drops to zero.""" + if self.is_retaliating: + self.retaliation_remaining -= 1 + if self.retaliation_remaining == 0: + self.is_retaliating = False + + def _fresh_start(self): + """Give the opponent a fresh start by forgetting the past""" + self.is_retaliating = False + self.retaliation_length = 0 + self.retaliation_remaining = 0 + self.remembered_number_of_opponent_defectioons = 0 + + def _score_last_round(self, opponent: IpdPlayer): + """Updates the scores for each player.""" + # Load the default game if not supplied by a tournament. + game = self.match_attributes["game"] + last_round = (self.history[-1], opponent.history[-1]) + scores = game.score(last_round) + self.current_score += scores[0] + self.opponent_score += scores[1] + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return C + + if opponent.history[-1] == D: + self.remembered_number_of_opponent_defectioons += 1 + + # Calculate the scores. + self._score_last_round(opponent) + + # Check if we have recently given the strategy a fresh start. + if self.fresh_start: + self.fresh_start = False + return C # Second cooperation + + # Check conditions to give opponent a fresh start. + current_round = len(self.history) + 1 + if self.last_fresh_start == 0: + valid_fresh_start = True + # There needs to be at least 20 rounds before the next fresh start + else: + valid_fresh_start = current_round - self.last_fresh_start >= 20 + + if valid_fresh_start: + valid_points = self.current_score - self.opponent_score >= 10 + valid_rounds = self.match_attributes["length"] - current_round >= 10 + opponent_is_cooperating = opponent.history[-1] == C + if valid_points and valid_rounds and opponent_is_cooperating: + # 50-50 split is based off the binomial distribution. + N = opponent.cooperations + opponent.defections + # std_dev = sqrt(N*p*(1-p)) where p is 1 / 2. + std_deviation = (N ** (1 / 2)) / 2 + lower = N / 2 - 3 * std_deviation + upper = N / 2 + 3 * std_deviation + if (self.remembered_number_of_opponent_defectioons <= lower or + self.remembered_number_of_opponent_defectioons >= upper): + # Opponent deserves a fresh start + self.last_fresh_start = current_round + self._fresh_start() + self.fresh_start = True + return C # First cooperation + + if self.is_retaliating: + # Are we retaliating still? + self._decrease_retaliation_counter() + return D + + if opponent.history[-1] == D: + self.is_retaliating = True + self.retaliation_length += 1 + self.retaliation_remaining = self.retaliation_length + self._decrease_retaliation_counter() + return D + + return C diff --git a/axelrod/strategies/axelrod_second.py b/axelrod/strategies/axelrod_second.py new file mode 100644 index 000000000..3e85149ec --- /dev/null +++ b/axelrod/strategies/axelrod_second.py @@ -0,0 +1,2131 @@ +""" +Strategies from Axelrod's second tournament. All strategies in this module are +prefixed by `SecondBy` to indicate that they were submitted in Axelrod's Second +tournament by the given author. +""" + +import random +from typing import List + +import numpy as np +from axelrod.action import Action +from axelrod.interaction_utils import compute_final_score +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice +from axelrod.strategies.finite_state_machines import FSMPlayer + +C, D = Action.C, Action.D + + +class SecondByChampion(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Danny Champion. + + This player cooperates on the first 10 moves and plays Tit for Tat for the + next 15 more moves. After 25 moves, the program cooperates unless all the + following are true: the other player defected on the previous move, the + other player cooperated less than 60% and the random number between 0 and 1 + is greater that the other player's cooperation rate. + + Names: + + - Champion: [Axelrod1980b]_ + """ + + name = "Second by Champion" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + # Cooperate for the first 10 turns + if current_round == 0: + return C + if current_round < 10: + return C + # Mirror partner for the next phase + if current_round < 25: + return opponent.history[-1] + # Now cooperate unless all of the necessary conditions are true + defection_prop = opponent.defections / len(opponent.history) + if opponent.history[-1] == D: + r = random.random() + if defection_prop >= max(0.4, r): + return D + return C + +class SecondByEatherley(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Graham Eatherley. + + A player that keeps track of how many times in the game the other player + defected. After the other player defects, it defects with a probability + equal to the ratio of the other's total defections to the total moves to + that point. + + Names: + + - Eatherley: [Axelrod1980b]_ + """ + + name = "Second by Eatherley" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + # Cooperate on the first move + if not len(opponent.history): + return C + # Reciprocate cooperation + if opponent.history[-1] == C: + return C + # Respond to defections with probability equal to opponent's total + # proportion of defections + defection_prop = opponent.defections / len(opponent.history) + return random_choice(1 - defection_prop) + + +class SecondByTester(IpdPlayer): + """ + Submitted to Axelrod's second tournament by David Gladstein. + + This strategy is a TFT variant that attempts to exploit certain strategies. It + defects on the first move. If the opponent ever defects, TESTER 'apologies' by + cooperating and then plays TFT for the rest of the game. Otherwise TESTER + alternates cooperation and defection. + + This strategy came 46th in Axelrod's second tournament. + + Names: + + - Tester: [Axelrod1980b]_ + """ + + name = "Second by Tester" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.is_TFT = False + + def strategy(self, opponent: IpdPlayer) -> Action: + # Defect on the first move + if not opponent.history: + return D + # Am I TFT? + if self.is_TFT: + return D if opponent.history[-1:] == [D] else C + else: + # Did opponent defect? + if opponent.history[-1] == D: + self.is_TFT = True + return C + if len(self.history) in [1, 2]: + return C + # Alternate C and D + return self.history[-1].flip() + + +class SecondByGladstein(IpdPlayer): + """ + Submitted to Axelrod's second tournament by David Gladstein. + + This strategy is also known as Tester and is based on the reverse + engineering of the Fortran strategies from Axelrod's second tournament. + + This strategy is a TFT variant that defects on the first round in order to + test the opponent's response. If the opponent ever defects, the strategy + 'apologizes' by cooperating and then plays TFT for the rest of the game. + Otherwise, it defects as much as possible subject to the constraint that + the ratio of its defections to moves remains under 0.5, not counting the + first defection. + + Names: + + - Gladstein: [Axelrod1980b]_ + - Tester: [Axelrod1980b]_ + """ + + name = "Second by Gladstein" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + # This strategy assumes the opponent is a patsy + self.patsy = True + + def strategy(self, opponent: IpdPlayer) -> Action: + # Defect on the first move + if not self.history: + return D + # Is the opponent a patsy? + if self.patsy: + # If the opponent defects, apologize and play TFT. + if opponent.history[-1] == D: + self.patsy = False + return C + # Cooperate as long as the cooperation ratio is below 0.5 + cooperation_ratio = self.cooperations / len(self.history) + if cooperation_ratio > 0.5: + return D + return C + else: + # Play TFT + return opponent.history[-1] + + +class SecondByTranquilizer(IpdPlayer): + + """ + Submitted to Axelrod's second tournament by Craig Feathers + + Description given in Axelrod's "More Effective Choice in the + Prisoner's Dilemma" paper: The rule normally cooperates but + is ready to defect if the other player defects too often. + Thus the rule tends to cooperate for the first dozen or two moves + if the other player is cooperating, but then it throws in a + defection. If the other player continues to cooperate, then defections + become more frequent. But as long as Tranquilizer is maintaining an + average payoff of at least 2.25 points per move, it will never defect + twice in succession and it will not defect more than + one-quarter of the time. + + This implementation is based on the reverse engineering of the + Fortran strategy K67R from Axelrod's second tournament. + Reversed engineered by: Owen Campbell, Will Guo and Mansour Hakem. + + The strategy starts by cooperating and has 3 states. + + At the start of the strategy it updates its states: + + - It counts the number of consecutive defections by the opponent. + - If it was in state 2 it moves to state 0 and calculates the + following quantities two_turns_after_good_defection_ratio and + two_turns_after_good_defection_ratio_count. + + Formula for: + + two_turns_after_good_defection_ratio: + + self.two_turns_after_good_defection_ratio = ( + ((self.two_turns_after_good_defection_ratio + * self.two_turns_after_good_defection_ratio_count) + + (3 - (3 * self.dict[opponent.history[-1]])) + + (2 * self.dict[self.history[-1]]) + - ((self.dict[opponent.history[-1]] + * self.dict[self.history[-1]]))) + / (self.two_turns_after_good_defection_ratio_count + 1) + ) + + two_turns_after_good_defection_ratio_count = + two_turns_after_good_defection_ratio + 1 + + - If it was in state 1 it moves to state 2 and calculates the + following quantities one_turn_after_good_defection_ratio and + one_turn_after_good_defection_ratio_count. + + Formula for: + + one_turn_after_good_defection_ratio: + + self.one_turn_after_good_defection_ratio = ( + ((self.one_turn_after_good_defection_ratio + * self.one_turn_after_good_defection_ratio_count) + + (3 - (3 * self.dict[opponent.history[-1]])) + + (2 * self.dict[self.history[-1]]) + - (self.dict[opponent.history[-1]] + * self.dict[self.history[-1]])) + / (self.one_turn_after_good_defection_ratio_count + 1) + ) + + one_turn_after_good_defection_ratio_count: + + one_turn_after_good_defection_ratio_count = + one_turn_after_good_defection_ratio + 1 + + If after this it is in state 1 or 2 then it cooperates. + + If it is in state 0 it will potentially perform 1 of the 2 + following stochastic tests: + + 1. If average score per turn is greater than 2.25 then it calculates a + value of probability: + + probability = ( + (.95 - (((self.one_turn_after_good_defection_ratio) + + (self.two_turns_after_good_defection_ratio) - 5) / 15)) + + (1 / (((len(self.history))+1) ** 2)) + - (self.dict[opponent.history[-1]] / 4) + ) + + and will cooperate if a random sampled number is less than that value of + probability. If it does not cooperate then the strategy moves to state 1 + and defects. + + 2. If average score per turn is greater than 1.75 but less than 2.25 + then it calculates a value of probability: + + probability = ( + (.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1))) + - (self.opponent_consecutive_defections * .25) + + ((current_score[0] + - current_score[1]) / 100) + + (4 / ((len(self.history)) + 1)) + ) + + and will cooperate if a random sampled number is less than that value of + probability. If not, it defects. + + If none of the above holds the player simply plays tit for tat. + + Tranquilizer came in 27th place in Axelrod's second torunament. + + + Names: + + - Tranquilizer: [Axelrod1980]_ + """ + + name = "Second by Tranquilizer" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.num_turns_after_good_defection = 0 # equal to FD variable + self.opponent_consecutive_defections = 0 # equal to S variable + self.one_turn_after_good_defection_ratio = 5 # equal to AD variable + self.two_turns_after_good_defection_ratio = 0 # equal to NO variable + self.one_turn_after_good_defection_ratio_count = 1 # equal to AK variable + self.two_turns_after_good_defection_ratio_count = 1 # equal to NK variable + # All above variables correspond to those in original Fotran Code + self.dict = {C: 0, D: 1} + + def update_state(self, opponent): + + """ + Calculates the ratio values for the one_turn_after_good_defection_ratio, + two_turns_after_good_defection_ratio and the probability values, + and sets the value of num_turns_after_good_defection. + """ + if opponent.history[-1] == D: + self.opponent_consecutive_defections += 1 + else: + self.opponent_consecutive_defections = 0 + + if self.num_turns_after_good_defection == 2: + self.num_turns_after_good_defection = 0 + self.two_turns_after_good_defection_ratio = ( + ( + self.two_turns_after_good_defection_ratio + * self.two_turns_after_good_defection_ratio_count + ) + + (3 - (3 * self.dict[opponent.history[-1]])) + + (2 * self.dict[self.history[-1]]) + - ((self.dict[opponent.history[-1]] * self.dict[self.history[-1]])) + ) / (self.two_turns_after_good_defection_ratio_count + 1) + self.two_turns_after_good_defection_ratio_count += 1 + elif self.num_turns_after_good_defection == 1: + self.num_turns_after_good_defection = 2 + self.one_turn_after_good_defection_ratio = ( + ( + self.one_turn_after_good_defection_ratio + * self.one_turn_after_good_defection_ratio_count + ) + + (3 - (3 * self.dict[opponent.history[-1]])) + + (2 * self.dict[self.history[-1]]) + - (self.dict[opponent.history[-1]] * self.dict[self.history[-1]]) + ) / (self.one_turn_after_good_defection_ratio_count + 1) + self.one_turn_after_good_defection_ratio_count += 1 + + def strategy(self, opponent: IpdPlayer) -> Action: + + if not self.history: + return C + + self.update_state(opponent) + if self.num_turns_after_good_defection in [1, 2]: + return C + + current_score = compute_final_score(zip(self.history, opponent.history)) + + if (current_score[0] / ((len(self.history)) + 1)) >= 2.25: + probability = ( + ( + 0.95 + - ( + ( + (self.one_turn_after_good_defection_ratio) + + (self.two_turns_after_good_defection_ratio) + - 5 + ) + / 15 + ) + ) + + (1 / (((len(self.history)) + 1) ** 2)) + - (self.dict[opponent.history[-1]] / 4) + ) + if random.random() <= probability: + return C + self.num_turns_after_good_defection = 1 + return D + if (current_score[0] / ((len(self.history)) + 1)) >= 1.75: + probability = ( + (0.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1))) + - (self.opponent_consecutive_defections * 0.25) + + ((current_score[0] - current_score[1]) / 100) + + (4 / ((len(self.history)) + 1)) + ) + if random.random() <= probability: + return C + return D + return opponent.history[-1] + + +class SecondByGrofman(IpdPlayer): + """ + Submitted to Axelrod's second tournament by Bernard Grofman. + + This strategy has 3 phases: + + 1. First it cooperates on the first two rounds + 2. For rounds 3-7 inclusive, it plays the same as the opponent's last move + 3. Thereafter, it applies the following logic, looking at its memory of the + last 8\* rounds (ignoring the most recent round). + + - If its own previous move was C and the opponent has defected less than + 3 times in the last 8\* rounds, cooperate + - If its own previous move was C and the opponent has defected 3 or + more times in the last 8\* rounds, defect + - If its own previous move was D and the opponent has defected only once + or not at all in the last 8\* rounds, cooperate + - If its own previous move was D and the opponent has defected more than + once in the last 8\* rounds, defect + + \* The code looks at the first 7 of the last 8 rounds, ignoring the most + recent round. + + Names: + - Grofman's strategy: [Axelrod1980b]_ + - K86R: [Axelrod1980b]_ + """ + + name = "Second by Grofman" + classifier = { + "memory_depth": 8, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + # Cooperate on the first two moves + if len(self.history) < 2: + return C + # For rounds 3-7, play the opponent's last move + elif 2 <= len(self.history) <= 6: + return opponent.history[-1] + else: + # Note: the Fortran code behavior ignores the opponent behavior + # in the last round and instead looks at the first 7 of the last + # 8 rounds. + opponent_defections_last_8_rounds = opponent.history[-8:-1].count(D) + if self.history[-1] == C and opponent_defections_last_8_rounds <= 2: + return C + if self.history[-1] == D and opponent_defections_last_8_rounds <= 1: + return C + return D + + +class SecondByKluepfel(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Charles Kluepfel + (K32R). + + This player keeps track of the the opponent's responses to own behavior: + + - `cd_count` counts: Opponent cooperates as response to player defecting. + - `dd_count` counts: Opponent defects as response to player defecting. + - `cc_count` counts: Opponent cooperates as response to player cooperating. + - `dc_count` counts: Opponent defects as response to player cooperating. + + After 26 turns, the player then tries to detect a random player. The + player decides that the opponent is random if + cd_counts >= (cd_counts+dd_counts)/2 - 0.75*sqrt(cd_counts+dd_counts) AND + cc_counts >= (dc_counts+cc_counts)/2 - 0.75*sqrt(dc_counts+cc_counts). + If the player decides that they are playing against a random player, then + they will always defect. + + Otherwise respond to recent history using the following set of rules: + + - If opponent's last three choices are the same, then respond in kind. + - If opponent's last two choices are the same, then respond in kind with + probability 90%. + - Otherwise if opponent's last action was to cooperate, then cooperate + with probability 70%. + - Otherwise if opponent's last action was to defect, then defect + with probability 60%. + + Names: + + - Kluepfel: [Axelrod1980b]_ + """ + + name = "Second by Kluepfel" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = 0, 0, 0, 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + # First update the response matrix. + if len(self.history) >= 2: + if self.history[-2] == D: + if opponent.history[-1] == C: + self.cd_counts += 1 + else: + self.dd_counts += 1 + else: + if opponent.history[-1] == C: + self.cc_counts += 1 + else: + self.dc_counts += 1 + + # Check for randomness + if len(self.history) > 26: + if self.cd_counts >= (self.cd_counts + self.dd_counts) / 2 - 0.75 * np.sqrt( + self.cd_counts + self.dd_counts + ) and self.dc_counts >= ( + self.dc_counts + self.cc_counts + ) / 2 - 0.75 * np.sqrt( + self.dc_counts + self.cc_counts + ): + return D + + # Otherwise respond to recent history + + one_move_ago, two_moves_ago, three_moves_ago = C, C, C + if len(opponent.history) >= 1: + one_move_ago = opponent.history[-1] + if len(opponent.history) >= 2: + two_moves_ago = opponent.history[-2] + if len(opponent.history) >= 3: + three_moves_ago = opponent.history[-3] + + if one_move_ago == two_moves_ago and two_moves_ago == three_moves_ago: + return one_move_ago + + r = random.random() # Everything following is stochastic + if one_move_ago == two_moves_ago: + if r < 0.9: + return one_move_ago + else: + return one_move_ago.flip() + if one_move_ago == C: + if r < 0.7: + return one_move_ago + else: + return one_move_ago.flip() + if one_move_ago == D: + if r < 0.6: + return one_move_ago + else: + return one_move_ago.flip() + + +class SecondByBorufsen(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Otto Borufsen + (K32R), and came in third in that tournament. + + This player keeps track of the the opponent's responses to own behavior: + + - `cd_count` counts: Opponent cooperates as response to player defecting. + - `cc_count` counts: Opponent cooperates as response to player cooperating. + + The player has a defect mode and a normal mode. In defect mode, the + player will always defect. In normal mode, the player obeys the following + ranked rules: + + 1. If in the last three turns, both the player/opponent defected, then + cooperate for a single turn. + 2. If in the last three turns, the player/opponent acted differently from + each other and they're alternating, then change next defect to + cooperate. (Doesn't block third rule.) + 3. Otherwise, do tit-for-tat. + + Start in normal mode, but every 25 turns starting with the 27th turn, + re-evaluate the mode. Enter defect mode if any of the following + conditions hold: + + - Detected random: Opponent cooperated 7-18 times since last mode + evaluation (or start) AND less than 70% of opponent cooperation was in + response to player's cooperation, i.e. + cc_count / (cc_count+cd_count) < 0.7 + - Detect defective: Opponent cooperated fewer than 3 times since last mode + evaluation. + + When switching to defect mode, defect immediately. The first two rules for + normal mode require that last three turns were in normal mode. When starting + normal mode from defect mode, defect on first move. + + Names: + + - Borufsen: [Axelrod1980b]_ + """ + + name = "Second by Borufsen" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.cd_counts, self.cc_counts = 0, 0 + self.mutual_defect_streak = 0 + self.echo_streak = 0 + self.flip_next_defect = False + self.mode = "Normal" + + def try_return(self, to_return): + """ + We put the logic here to check for the `flip_next_defect` bit here, + and proceed like normal otherwise. + """ + + if to_return == C: + return C + # Otherwise look for flip bit. + if self.flip_next_defect: + self.flip_next_defect = False + return C + return D + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + + if turn == 1: + return C + + # Update the response history. + if turn >= 3: + if opponent.history[-1] == C: + if self.history[-2] == C: + self.cc_counts += 1 + else: + self.cd_counts += 1 + + # Check if it's time for a mode change. + if turn > 2 and turn % 25 == 2: + coming_from_defect = False + if self.mode == "Defect": + coming_from_defect = True + + self.mode = "Normal" + coops = self.cd_counts + self.cc_counts + + # Check for a defective strategy + if coops < 3: + self.mode = "Defect" + + # Check for a random strategy + if (coops >= 8 and coops <= 17) and self.cc_counts / coops < 0.7: + self.mode = "Defect" + + self.cd_counts, self.cc_counts = 0, 0 + + # If defect mode, clear flags + if self.mode == "Defect": + self.mutual_defect_streak = 0 + self.echo_streak = 0 + self.flip_next_defect = False + + # Check this special case + if self.mode == "Normal" and coming_from_defect: + return D + + # Proceed + if self.mode == "Defect": + return D + else: + assert self.mode == "Normal" + + # Look for mutual defects + if self.history[-1] == D and opponent.history[-1] == D: + self.mutual_defect_streak += 1 + else: + self.mutual_defect_streak = 0 + if self.mutual_defect_streak >= 3: + self.mutual_defect_streak = 0 + self.echo_streak = 0 # Reset both streaks. + return self.try_return(C) + + # Look for echoes + # Fortran code defaults two turns back to C if only second turn + my_two_back, opp_two_back = C, C + if turn >= 3: + my_two_back = self.history[-2] + opp_two_back = opponent.history[-2] + if ( + self.history[-1] != opponent.history[-1] + and self.history[-1] == opp_two_back + and opponent.history[-1] == my_two_back + ): + self.echo_streak += 1 + else: + self.echo_streak = 0 + if self.echo_streak >= 3: + self.mutual_defect_streak = 0 # Reset both streaks. + self.echo_streak = 0 + self.flip_next_defect = True + + # Tit-for-tat + return self.try_return(opponent.history[-1]) + + +class SecondByCave(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and + came in fourth in that tournament. + + First look for overly-defective or apparently random opponents, and defect + if found. That is any opponent meeting one of: + + - turn > 39 and percent defects > 0.39 + - turn > 29 and percent defects > 0.65 + - turn > 19 and percent defects > 0.79 + + Otherwise, respond to cooperation with cooperation. And respond to defcts + with either a defect (if opponent has defected at least 18 times) or with + a random (50/50) choice. [Cooperate on first.] + + Names: + + - Cave: [Axelrod1980b]_ + """ + + name = "Second by Cave" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + if turn == 1: + return C + + number_defects = opponent.defections + perc_defects = number_defects / turn + + # Defect if the opponent has defected often or appears random. + if turn > 39 and perc_defects > 0.39: + return D + if turn > 29 and perc_defects > 0.65: + return D + if turn > 19 and perc_defects > 0.79: + return D + + if opponent.history[-1] == D: + if number_defects > 17: + return D + else: + return random_choice(0.5) + else: + return C + + +class SecondByWmAdams(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by William Adams (K44R), + and came in fifth in that tournament. + + Count the number of opponent defections after their first move, call + `c_defect`. Defect if c_defect equals 4, 7, or 9. If c_defect > 9, + then defect immediately after opponent defects with probability = + (0.5)^(c_defect-1). Otherwise cooperate. + + Names: + + - WmAdams: [Axelrod1980b]_ + """ + + name = "Second by WmAdams" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) <= 1: + return C + number_defects = opponent.defections + if opponent.history[0] == D: + number_defects -= 1 + + if number_defects in [4, 7, 9]: + return D + if number_defects > 9 and opponent.history[-1] == D: + return random_choice((0.5) ** (number_defects - 9)) + return C + + +class SecondByGraaskampKatzen(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken + Katzen (K60R), and came in sixth in that tournament. + + Play Tit-for-Tat at first, and track own score. At select checkpoints, + check for a high score. Switch to Default Mode if: + + - On move 11, score < 23 + - On move 21, score < 53 + - On move 31, score < 83 + - On move 41, score < 113 + - On move 51, score < 143 + - On move 101, score < 293 + + Once in Defect Mode, defect forever. + + Names: + + - GraaskampKatzen: [Axelrod1980b]_ + """ + + name = "Second by GraaskampKatzen" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.own_score = 0 + self.mode = "Normal" + + def update_score(self, opponent: IpdPlayer): + game = self.match_attributes["game"] + last_round = (self.history[-1], opponent.history[-1]) + self.own_score += game.score(last_round)[0] + + def strategy(self, opponent: IpdPlayer) -> Action: + if self.mode == "Defect": + return D + + turn = len(self.history) + 1 + if turn == 1: + return C + + self.update_score(opponent) + + if ( + turn == 11 + and self.own_score < 23 + or turn == 21 + and self.own_score < 53 + or turn == 31 + and self.own_score < 83 + or turn == 41 + and self.own_score < 113 + or turn == 51 + and self.own_score < 143 + or turn == 101 + and self.own_score < 293 + ): + self.mode = "Defect" + return D + + return opponent.history[-1] # Tit-for-Tat + + +class SecondByWeiner(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R), + and came in seventh in that tournament. + + Play Tit-for-Tat with a chance for forgiveness and a defective override. + + The chance for forgiveness happens only if `forgive_flag` is raised + (flag discussed below). If raised and `turn` is greater than `grudge`, + then override Tit-for-Tat with Cooperation. `grudge` is a variable that + starts at 0 and increments 20 with each forgiven Defect (a Defect that is + overriden through the forgiveness logic). `forgive_flag` is lower whether + logic is overriden or not. + + The variable `defect_padding` increments with each opponent Defect, but + resets to zero with each opponent Cooperate (or `forgive_flag` lowering) so + that it roughly counts Defects between Cooperates. Whenever the opponent + Cooperates, if `defect_padding` (before reseting) is odd, then we raise + `forgive_flag` for next turn. + + Finally a defective override is assessed after forgiveness. If five or + more of the opponent's last twelve actions are Defects, then Defect. This + will overrule a forgiveness, but doesn't undo the lowering of + `forgiveness_flag`. Note that "last twelve actions" doesn't count the most + recent action. Actually the original code updates history after checking + for defect override. + + Names: + + - Weiner: [Axelrod1980b]_ + """ + + name = "Second by Weiner" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.forgive_flag = False + self.grudge = 0 + self.defect_padding = 0 + self.last_twelve = [0] * 12 + self.lt_index = 0 # Circles around last_twelve + + def try_return(self, to_return): + """ + We put the logic here to check for the defective override. + """ + + if np.sum(self.last_twelve) >= 5: + return D + return to_return + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) == 0: + return C + + # Update history, lag 1. + if len(opponent.history) >= 2: + self.last_twelve[self.lt_index] = 0 + if opponent.history[-2] == D: + self.last_twelve[self.lt_index] = 1 + self.lt_index = (self.lt_index + 1) % 12 + + if self.forgive_flag: + self.forgive_flag = False + self.defect_padding = 0 + if self.grudge < len(self.history) + 1 and opponent.history[-1] == D: + # Then override + self.grudge += 20 + return self.try_return(C) + else: + return self.try_return(opponent.history[-1]) + else: + # See if forgive_flag should be raised + if opponent.history[-1] == D: + self.defect_padding += 1 + else: + if self.defect_padding % 2 == 1: + self.forgive_flag = True + self.defect_padding = 0 + + return self.try_return(opponent.history[-1]) + + +class SecondByHarrington(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R) + and came in eighth in that tournament. + + This strategy has three modes: Normal, Fair-weather, and Defect. These + mode names were not present in Harrington's submission. + + In Normal and Fair-weather modes, the strategy begins by: + + - Update history + - Try to detect random opponent if turn is multiple of 15 and >=30. + - Check if `burned` flag should be raised. + - Check for Fair-weather opponent if turn is 38. + + Updating history means to increment the correct cell of the `move_history`. + `move_history` is a matrix where the columns are the opponent's previous + move and the rows are indexed by the combo of this player's and the + opponent's moves two turns ago. [The upper-left cell must be all + Cooperations, but otherwise order doesn't matter.] After we enter Defect + mode, `move_history` won't be used again. + + If the turn is a multiple of 15 and >=30, then attempt to detect random. + If random is detected, enter Defect mode and defect immediately. If the + player was previously in Defect mode, then do not re-enter. The random + detection logic is a modified Pearson's Chi Squared test, with some + additional checks. [More details in `detect_random` docstrings.] + + Some of this player's moves are marked as "generous." If this player made + a generous move two turns ago and the opponent replied with a Defect, then + raise the `burned` flag. This will stop certain generous moves later. + + The player mostly plays Tit-for-Tat for the first 36 moves, then defects on + the 37th move. If the opponent cooperates on the first 36 moves, and + defects on the 37th move also, then enter Fair-weather mode and cooperate + this turn. Entering Fair-weather mode is extremely rare, since this can + only happen if the opponent cooperates for the first 36 then defects + unprovoked on the 37th. (That is, this player's first 36 moves are also + Cooperations, so there's nothing really to trigger an opponent Defection.) + + Next in Normal Mode: + + 1. Check for defect and parity streaks. + 2. Check if cooperations are scheduled. + 3. Otherwise, + + - If turn < 37, Tit-for-Tat. + - If turn = 37, defect, mark this move as generous, and schedule two + more cooperations**. + - If turn > 37, then if `burned` flag is raised, then Tit-for-Tat. + Otherwise, Tit-for-Tat with probability 1 - `prob`. And with + probability `prob`, defect, schedule two cooperations, mark this move + as generous, and increase `prob` by 5%. + + ** Scheduling two cooperations means to set `more_coop` flag to two. If in + Normal mode and no streaks are detected, then the player will cooperate and + lower this flag, until hitting zero. It's possible that the flag can be + overwritten. Notable on the 37th turn defect, this is set to two, but the + 38th turn Fair-weather check will set this. + + If the opponent's last twenty moves were defections, then defect this turn. + Then check for a parity streak, by flipping the parity bit (there are two + streaks that get tracked which are something like odd and even turns, but + this flip bit logic doesn't get run every turn), then incrementing the + parity streak that we're pointing to. If the parity streak that we're + pointing to is then greater than `parity_limit` then reset the streak and + cooperate immediately. `parity_limit` is initially set to five, but after + it has been hit eight times, it decreases to three. The parity streak that + we're pointing to also gets incremented if in normal mode and we defect but + not on turn 38, unless we are defecting as the result of a defect streak. + Note that the parity streaks resets but the defect streak doesn't. + + If `more_coop` >= 1, then we cooperate and lower that flag here, in Normal + mode after checking streaks. Still lower this flag if cooperating as the + result of a parity streak or in Fair-weather mode. + + Then use the logic based on turn from above. + + In Fair-Weather mode after running the code from above, check if opponent + defected last turn. If so, exit Fair-Weather mode, and proceed THIS TURN + with Normal mode. Otherwise cooperate. + + In Defect mode, update the `exit_defect_meter` (originally zero) by + incrementing if opponent defected last turn and decreasing by three + otherwise. If `exit_defect_meter` is then 11, then set mode to Normal (for + future turns), cooperate and schedule two more cooperations. [Note that + this move is not marked generous.] + + Names: + + - Harrington: [Axelrod1980b]_ + """ + + name = "Second by Harrington" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.mode = "Normal" + self.recorded_defects = 0 # Count opponent defects after turn 1 + self.exit_defect_meter = 0 # When >= 11, then exit defect mode. + self.coops_in_first_36 = None # On turn 37, count cooperations in first 36 + self.was_defective = False # Previously in Defect mode + + self.prob = 0.25 # After turn 37, probability that we'll defect + + self.move_history = np.zeros([4, 2]) + + self.more_coop = 0 # This schedules cooperation for future turns + # Initial last_generous_n_turns_ago to 3 because this counts up and + # triggers a strategy change at 2. + self.last_generous_n_turns_ago = 3 # How many tuns ago was a "generous" move + self.burned = False + + self.defect_streak = 0 + self.parity_streak = [ + 0, + 0, + ] # Counters that get (almost) alternatively incremented. + self.parity_bit = 0 # Which parity_streak to increment + self.parity_limit = 5 # When a parity streak hits this limit, alter strategy. + self.parity_hits = 0 # Counts how many times a parity_limit was hit. + # After hitting parity_hits 8 times, lower parity_limit to 3. + + def try_return(self, to_return, lower_flags=True, inc_parity=False): + """ + This will return to_return, with some end-of-turn logic. + """ + + if lower_flags and to_return == C: + # In most cases when Cooperating, we want to reduce the number that + # are scheduled. + self.more_coop -= 1 + self.last_generous_n_turns_ago += 1 + + if inc_parity and to_return == D: + # In some cases we increment the `parity_streak` that we're on when + # we return a Defection. In detect_parity_streak, `parity_streak` + # counts opponent's Defections. + self.parity_streak[self.parity_bit] += 1 + + return to_return + + def calculate_chi_squared(self, turn): + """ + Pearson's Chi Squared statistic = sum[ (E_i-O_i)^2 / E_i ], where O_i + are the observed matrix values, and E_i is calculated as number (of + defects) in the row times the number in the column over (total number + in the matrix minus 1). Equivalently, we expect we expect (for an + independent distribution) the total number of recorded turns times the + portion in that row times the portion in that column. + + In this function, the statistic is non-standard in that it excludes + summands where E_i <= 1. + """ + + denom = turn - 2 + + expected_matrix = ( + np.outer(self.move_history.sum(axis=1), self.move_history.sum(axis=0)) + / denom + ) + + chi_squared = 0.0 + for i in range(4): + for j in range(2): + expect = expected_matrix[i, j] + if expect > 1.0: + chi_squared += (expect - self.move_history[i, j]) ** 2 / expect + + return chi_squared + + def detect_random(self, turn): + """ + We check if the top-left cell of the matrix (corresponding to all + Cooperations) has over 80% of the turns. In which case, we label + non-random. + + Then we check if over 75% or under 25% of the opponent's turns are + Defections. If so, then we label as non-random. + + Otherwise we calculates a modified Pearson's Chi Squared statistic on + self.history, and returns True (is random) if and only if the statistic + is less than or equal to 3. + """ + + denom = turn - 2 + + if self.move_history[0, 0] / denom >= 0.8: + return False + if self.recorded_defects / denom < 0.25 or self.recorded_defects / denom > 0.75: + return False + + if self.calculate_chi_squared(turn) > 3: + return False + return True + + def detect_streak(self, last_move): + """ + Return true if and only if the opponent's last twenty moves are defects. + """ + + if last_move == D: + self.defect_streak += 1 + else: + self.defect_streak = 0 + if self.defect_streak >= 20: + return True + return False + + def detect_parity_streak(self, last_move): + """ + Switch which `parity_streak` we're pointing to and incerement if the + opponent's last move was a Defection. Otherwise reset the flag. Then + return true if and only if the `parity_streak` is at least + `parity_limit`. + + This is similar to detect_streak with alternating streaks, except that + these streaks get incremented elsewhere as well. + """ + + self.parity_bit = 1 - self.parity_bit # Flip bit + if last_move == D: + self.parity_streak[self.parity_bit] += 1 + else: + self.parity_streak[self.parity_bit] = 0 + if self.parity_streak[self.parity_bit] >= self.parity_limit: + return True + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + + if turn == 1: + return C + + if self.mode == "Defect": + # There's a chance to exit Defect mode. + if opponent.history[-1] == D: + self.exit_defect_meter += 1 + else: + self.exit_defect_meter -= 3 + # If opponent has been mostly defecting. + if self.exit_defect_meter >= 11: + self.mode = "Normal" + self.was_defective = True + self.more_coop = 2 + return self.try_return(to_return=C, lower_flags=False) + + return self.try_return(D) + + # If not Defect mode, proceed to update history and check for random, + # check if burned, and check if opponent's fairweather. + + # If we haven't yet entered Defect mode + if not self.was_defective: + if turn > 2: + if opponent.history[-1] == D: + self.recorded_defects += 1 + + # Column decided by opponent's last turn + history_col = 1 if opponent.history[-1] == D else 0 + # Row is decided by opponent's move two turns ago and our move + # two turns ago. + history_row = 1 if opponent.history[-2] == D else 0 + if self.history[-2] == D: + history_row += 2 + self.move_history[history_row, history_col] += 1 + + # Try to detect random opponent + if turn % 15 == 0 and turn > 15: + if self.detect_random(turn): + self.mode = "Defect" + return self.try_return( + D, lower_flags=False + ) # Lower_flags not used here. + + # If generous 2 turns ago and opponent defected last turn + if self.last_generous_n_turns_ago == 2 and opponent.history[-1] == D: + self.burned = True + + # Only enter Fair-weather mode if the opponent Cooperated the first 37 + # turns then Defected on the 38th. + if turn == 38 and opponent.history[-1] == D and opponent.cooperations == 36: + self.mode = "Fair-weather" + return self.try_return(to_return=C, lower_flags=False) + + if self.mode == "Fair-weather": + if opponent.history[-1] == D: + self.mode = "Normal" # Post-Defect is not possible + # Proceed with Normal mode this turn. + else: + # Never defect against a fair-weather opponent + return self.try_return(C) + + # Continue with Normal mode + + # Check for streaks + if self.detect_streak(opponent.history[-1]): + return self.try_return(D, inc_parity=True) + if self.detect_parity_streak(opponent.history[-1]): + self.parity_streak[ + self.parity_bit + ] = 0 # Reset `parity_streak` when we hit the limit. + self.parity_hits += 1 # Keep track of how many times we hit the limit. + if self.parity_hits >= 8: # After 8 times, lower the limit. + self.parity_limit = 3 + return self.try_return( + C, inc_parity=True + ) # Inc parity won't get used here. + + # If we have Cooperations scheduled, then Cooperate here. + if self.more_coop >= 1: + return self.try_return(C, lower_flags=True, inc_parity=True) + + if turn < 37: + # Tit-for-Tat + return self.try_return(opponent.history[-1], inc_parity=True) + if turn == 37: + # Defect once on turn 37 (if no streaks) + self.more_coop, self.last_generous_n_turns_ago = 2, 1 + return self.try_return(D, lower_flags=False) + if self.burned or random.random() > self.prob: + # Tit-for-Tat with probability 1-`prob` + return self.try_return(opponent.history[-1], inc_parity=True) + + # Otherwise Defect, Cooperate, Cooperate, and increase `prob` + self.prob += 0.05 + self.more_coop, self.last_generous_n_turns_ago = 2, 1 + return self.try_return(D, lower_flags=False) + + +class SecondByTidemanAndChieruzzi(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman + and Paula Chieruzzi (K84R) and came in ninth in that tournament. + + This strategy Cooperates if this player's score exceeds the opponent's + score by at least `score_to_beat`. `score_to_beat` starts at zero and + increases by `score_to_beat_inc` every time the opponent's last two moves + are a Cooperation and Defection in that order. `score_to_beat_inc` itself + increase by 5 every time the opponent's last two moves are a Cooperation + and Defection in that order. + + Additionally, the strategy executes a "fresh start" if the following hold: + + - The strategy would Defect by score (difference less than `score_to_beat`) + - The opponent did not Cooperate and Defect (in order) in the last two + turns. + - It's been at least 10 turns since the last fresh start. Or since the + match started if there hasn't been a fresh start yet. + + A "fresh start" entails two Cooperations and resetting scores, + `scores_to_beat` and `scores_to_beat_inc`. + + Names: + + - TidemanAndChieruzzi: [Axelrod1980b]_ + """ + + name = "Second by Tideman and Chieruzzi" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.current_score = 0 + self.opponent_score = 0 + self.last_fresh_start = 0 + self.fresh_start = False + self.score_to_beat = 0 + self.score_to_beat_inc = 0 + + def _fresh_start(self): + """Give the opponent a fresh start by forgetting the past""" + self.current_score = 0 + self.opponent_score = 0 + self.score_to_beat = 0 + self.score_to_beat_inc = 0 + + def _score_last_round(self, opponent: IpdPlayer): + """Updates the scores for each player.""" + # Load the default game if not supplied by a tournament. + game = self.match_attributes["game"] + last_round = (self.history[-1], opponent.history[-1]) + scores = game.score(last_round) + self.current_score += scores[0] + self.opponent_score += scores[1] + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + + if current_round == 1: + return C + + # Calculate the scores. + self._score_last_round(opponent) + + # Check if we have recently given the strategy a fresh start. + if self.fresh_start: + self._fresh_start() + self.last_fresh_start = current_round + self.fresh_start = False + return C # Second cooperation + + opponent_CDd = False + + opponent_two_turns_ago = C # Default value for second turn. + if len(opponent.history) >= 2: + opponent_two_turns_ago = opponent.history[-2] + # If opponent's last two turns are C and D in that order. + if opponent_two_turns_ago == C and opponent.history[-1] == D: + opponent_CDd = True + self.score_to_beat += self.score_to_beat_inc + self.score_to_beat_inc += 5 + + # Cooperate if we're beating opponent by at least `score_to_beat` + if self.current_score - self.opponent_score >= self.score_to_beat: + return C + + # Wait at least ten turns for another fresh start. + if (not opponent_CDd) and current_round - self.last_fresh_start >= 10: + # 50-50 split is based off the binomial distribution. + N = opponent.cooperations + opponent.defections + # std_dev = sqrt(N*p*(1-p)) where p is 1 / 2. + std_deviation = (N ** (1 / 2)) / 2 + lower = N / 2 - 3 * std_deviation + upper = N / 2 + 3 * std_deviation + if opponent.defections <= lower or opponent.defections >= upper: + # Opponent deserves a fresh start + self.fresh_start = True + return C # First cooperation + + return D + + +class SecondByGetzler(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R) + and came in eleventh in that tournament. + + Strategy Defects with probability `flack`, where `flack` is calculated as + the sum over opponent Defections of 0.5 ^ (turns ago Defection happened). + + Names: + + - Getzler: [Axelrod1980b]_ + """ + + name = "Second by Getzler" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.flack = 0.0 # The relative untrustworthiness of opponent + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return C + + self.flack += 1 if opponent.history[-1] == D else 0 + self.flack *= 0.5 # Defections have half-life of one round + + return random_choice(1.0 - self.flack) + + +class SecondByLeyvraz(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Fransois Leyvraz + (K68R) and came in twelfth in that tournament. + + The strategy uses the opponent's last three moves to decide on an action + based on the following ordered rules. + + 1. If opponent Defected last two turns, then Defect with prob 75%. + 2. If opponent Defected three turns ago, then Cooperate. + 3. If opponent Defected two turns ago, then Defect. + 4. If opponent Defected last turn, then Defect with prob 50%. + 5. Otherwise (all Cooperations), then Cooperate. + + Names: + + - Leyvraz: [Axelrod1980b]_ + """ + + name = "Second by Leyvraz" + classifier = { + "memory_depth": 3, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.prob_coop = { + (C, C, C): 1.0, + (C, C, D): 0.5, # Rule 4 + (C, D, C): 0.0, # Rule 3 + (C, D, D): 0.25, # Rule 1 + (D, C, C): 1.0, # Rule 2 + (D, C, D): 1.0, # Rule 2 + (D, D, C): 1.0, # Rule 2 + (D, D, D): 0.25, # Rule 1 + } + + def strategy(self, opponent: IpdPlayer) -> Action: + recent_history = [C, C, C] # Default to C. + for go_back in range(1, 4): + if len(opponent.history) >= go_back: + recent_history[-go_back] = opponent.history[-go_back] + + return random_choice( + self.prob_coop[(recent_history[-3], recent_history[-2], recent_history[-1])] + ) + + +class SecondByWhite(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Edward C White (K72R) + and came in thirteenth in that tournament. + + * If the opponent Cooperated last turn or in the first ten turns, then + Cooperate. + * Otherwise Defect if and only if: + floor(log(turn)) * opponent Defections >= turn + + Names: + + - White: [Axelrod1980b]_ + """ + + name = "Second by White" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + + if turn <= 10 or opponent.history[-1] == C: + return C + + if np.floor(np.log(turn)) * opponent.defections >= turn: + return D + return C + + +class SecondByBlack(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Paul E Black (K83R) + and came in fifteenth in that tournament. + + The strategy Cooperates for the first five turns. Then it calculates the + number of opponent defects in the last five moves and Cooperates with + probability `prob_coop`[`number_defects`], where: + + prob_coop[number_defects] = 1 - (number_defects^ 2 - 1) / 25 + + Names: + + - Black: [Axelrod1980b]_ + """ + + name = "Second by Black" + classifier = { + "memory_depth": 5, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + # Maps number of opponent defects from last five moves to own + # Cooperation probability + self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04} + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) < 5: + return C + + recent_history = opponent.history[-5:] + + did_d = np.vectorize(lambda action: int(action == D)) + number_defects = np.sum(did_d(recent_history)) + + return random_choice(self.prob_coop[number_defects]) + + +class SecondByRichardHufford(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R) + and came in sixteenth in that tournament. + + The strategy tracks opponent "agreements", that is whenever the opponent's + previous move is the some as this player's move two turns ago. If the + opponent's first move is a Defection, this is counted as a disagreement, + and otherwise an agreement. From the agreement counts, two measures are + calculated: + + - `proportion_agree`: This is the number of agreements (through opponent's + last turn) + 2 divided by the current turn number. + - `last_four_num`: The number of agreements in the last four turns. If + there have been fewer than four previous turns, then this is number of + agreement + (4 - number of past turns). + + We then use these measures to decide how to play, using these rules: + + 1. If `proportion_agree` > 0.9 and `last_four_num` >= 4, then Cooperate. + 2. Otherwise if `proportion_agree` >= 0.625 and `last_four_num` >= 2, then + Tit-for-Tat. + 3. Otherwise, Defect. + + However, if the opponent has Cooperated the last `streak_needed` turns, + then the strategy deviates from the usual strategy, and instead Defects. + (We call such deviation an "aberration".) In the turn immediately after an + aberration, the strategy doesn't override, even if there's a streak of + Cooperations. Two turns after an aberration, the strategy: Restarts the + Cooperation streak (never looking before this turn); Cooperates; and + changes `streak_needed` to: + + floor(20.0 * `num_abb_def` / `num_abb_coop`) + 1 + + Here `num_abb_def` is 2 + the number of times that the opponent Defected in + the turn after an aberration, and `num_abb_coop` is 2 + the number of times + that the opponent Cooperated in response to an aberration. + + Names: + + - RichardHufford: [Axelrod1980b]_ + """ + + name = "Second by RichardHufford" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.num_agreements = 2 + self.last_four_agreements = [1] * 4 + self.last_four_index = 0 + + self.streak_needed = 21 + self.current_streak = 2 + self.last_aberration = float("inf") + self.coop_after_ab_count = 2 + self.def_after_ab_count = 2 + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + if turn == 1: + return C + + # Check if opponent agreed with us. + self.last_four_index = (self.last_four_index + 1) % 4 + me_two_moves_ago = C + if turn > 2: + me_two_moves_ago = self.history[-2] + if me_two_moves_ago == opponent.history[-1]: + self.num_agreements += 1 + self.last_four_agreements[self.last_four_index] = 1 + else: + self.last_four_agreements[self.last_four_index] = 0 + + # Check if last_aberration is infinite. + # i.e Not an aberration in last two turns. + if turn < self.last_aberration: + if opponent.history[-1] == C: + self.current_streak += 1 + else: + self.current_streak = 0 + if self.current_streak >= self.streak_needed: + self.last_aberration = turn + if self.current_streak == self.streak_needed: + return D + elif turn == self.last_aberration + 2: + self.last_aberration = float("inf") + if opponent.history[-1] == C: + self.coop_after_ab_count += 1 + else: + self.def_after_ab_count += 1 + self.streak_needed = ( + np.floor(20.0 * self.def_after_ab_count / self.coop_after_ab_count) + 1 + ) + self.current_streak = 0 + return C + + proportion_agree = self.num_agreements / turn + last_four_num = np.sum(self.last_four_agreements) + if proportion_agree > 0.9 and last_four_num >= 4: + return C + elif proportion_agree >= 0.625 and last_four_num >= 2: + return opponent.history[-1] + return D + + +class SecondByYamachi(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R) + and came in seventeenth in that tournament. + + The strategy keeps track of play history through a variable called + `count_them_us_them`, which is a dict indexed by (X, Y, Z), where X is an + opponent's move and Y and Z are the following moves by this player and the + opponent, respectively. Each turn, we look at our opponent's move two + turns ago, call X, and our move last turn, call Y. If (X, Y, C) has + occurred more often (or as often) as (X, Y, D), then Cooperate. Otherwise + Defect. [Note that this reflects likelihood of Cooperations or Defections + in opponent's previous move; we don't update `count_them_us_them` with + previous move until next turn.] + + Starting with the 41st turn, there's a possibility to override this + behavior. If `portion_defect` is between 45% and 55% (exclusive), then + Defect, where `portion_defect` equals number of opponent defects plus 0.5 + divided by the turn number (indexed by 1). When overriding this way, still + record `count_them_us_them` as though the strategy didn't override. + + Names: + + - Yamachi: [Axelrod1980b]_ + """ + + name = "Second by Yamachi" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.count_them_us_them = { + (C, C, C): 0, + (C, C, D): 0, + (C, D, C): 0, + (C, D, D): 0, + (D, C, C): 0, + (D, C, D): 0, + (D, D, C): 0, + (D, D, D): 0, + } + self.mod_history = list() # type: List[Action] + + def try_return(self, to_return, opp_def): + """ + Return `to_return`, unless the turn is greater than 40 AND + `portion_defect` is between 45% and 55%. + + In this case, still record the history as `to_return` so that the + modified behavior doesn't affect the calculation of `count_us_them_us`. + """ + turn = len(self.history) + 1 + + self.mod_history.append(to_return) + + # In later turns, check if the opponent is close to 50/50 + # If so, then override + if turn > 40: + portion_defect = (opp_def + 0.5) / turn + if 0.45 < portion_defect and portion_defect < 0.55: + return D + + return to_return + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + if turn == 1: + return self.try_return(C, 0) + + us_last = self.mod_history[-1] + them_two_ago, us_two_ago, them_three_ago = C, C, C + if turn >= 3: + them_two_ago = opponent.history[-2] + us_two_ago = self.mod_history[-2] + if turn >= 4: + them_three_ago = opponent.history[-3] + + # Update history + if turn >= 3: + self.count_them_us_them[(them_three_ago, us_two_ago, them_two_ago)] += 1 + + if ( + self.count_them_us_them[(them_two_ago, us_last, C)] + >= self.count_them_us_them[(them_two_ago, us_last, D)] + ): + return self.try_return(C, opponent.defections) + return self.try_return(D, opponent.defections) + + +class SecondByColbert(FSMPlayer): + """ + Strategy submitted to Axelrod's second tournament by William Colbert (K51R) + and came in eighteenth in that tournament. + + In the first eight turns, this strategy Coopearates on all but the sixth + turn, in which it Defects. After that, the strategy responds to an + opponent Cooperation with a single Cooperation, and responds to a Defection + with a chain of responses: Defect, Defect, Cooperate, Cooperate. During + this chain, the strategy ignores opponent's moves. + + Names: + + - Colbert: [Axelrod1980b]_ + """ + + name = "Second by Colbert" + classifier = { + "memory_depth": 4, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 1, C), + (0, D, 1, C), # First 8 turns are special + (1, C, 2, C), + (1, D, 2, C), + (2, C, 3, C), + (2, D, 3, C), + (3, C, 4, C), + (3, D, 4, C), + (4, C, 5, D), + (4, D, 5, D), # Defect on 6th turn. + (5, C, 6, C), + (5, D, 6, C), + (6, C, 7, C), + (6, D, 7, C), + (7, C, 7, C), + (7, D, 8, D), + (8, C, 9, D), + (8, D, 9, D), + (9, C, 10, C), + (9, D, 10, C), + (10, C, 7, C), + (10, D, 7, C), + ) + + super().__init__(transitions=transitions, initial_state=0, initial_action=C) + + +class SecondByMikkelson(FSMPlayer): + """ + Strategy submitted to Axelrod's second tournament by Ray Mikkelson (K66R) + and came in twentieth in that tournament. + + The strategy keeps track of a variable called `credit`, which determines if + the strategy will Cooperate, in the sense that if `credit` is positive, + then the strategy Cooperates. `credit` is initialized to 7. After the + first turn, `credit` increments if the opponent Cooperated last turn, and + decreases by two otherwise. `credit` is capped above by 8 and below by -7. + [`credit` is assessed as postive or negative, after increasing based on + opponent's last turn.] + + If `credit` is non-positive within the first ten turns, then the strategy + Defects and `credit` is set to 4. If `credit` is non-positive later, then + the strategy Defects if and only if (total # opponent Defections) / (turn#) + is at least 15%. [Turn # starts at 1.] + + Names: + + - Mikkelson: [Axelrod1980b]_ + """ + + name = "Second by Mikkelson" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.credit = 7 + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + if turn == 1: + return C + + if opponent.history[-1] == C: + self.credit += 1 + if self.credit > 8: + self.credit = 8 + else: + self.credit -= 2 + if self.credit < -7: + self.credit = -7 + + if turn == 2: + return C + if self.credit > 0: + return C + if turn <= 10: + self.credit = 4 + return D + if opponent.defections / turn >= 0.15: + return D + return C + + +class SecondByRowsam(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R) + and came in 21st in that tournament. + + The strategy starts in Normal mode, where it cooperates every turn. Every + six turns it checks the score per turn. [Rather the score of all previous + turns divided by the turn number, which will be one more than the number of + turns scored.] If this measure is less than 2.5 (the strategy is doing + badly) and it increases `distrust_points`. `distrust_points` is a variable + that starts at 0; if it ever exceeds 6 points, the strategy will enter + Defect mode and defect from then on. It will increase `distrust_points` + depending on the precise score per turn according to: + + - 5 points if score per turn is less than 1.0 + - 3 points if score per turn is less than 1.5, but at least 1.0 + - 2 points if score per turn is less than 2.0, but at least 1.5 + - 1 points if score per turn is less than 2.5, but at least 2.0 + + If `distrust_points` are increased, then the strategy defects on that turn, + then cooperates and defects on the next two turns. [Unless + `distrust_points` exceeds 6 points, then it will enter Defect mode + immediately.] + + Every 18 turns in Normal mode, the strategy will decrement `distrust_score` + if it's more than 3. This represents a wearing off effect of distrust. + + + Names: + + - Rowsam: [Axelrod1980b]_ + """ + + name = "Second by Rowsam" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set("game"), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.mode = "Normal" + self.distrust_points = 0 + self.current_score = 0 + self.opponent_score = 0 + + def _score_last_round(self, opponent: IpdPlayer): + """Updates the scores for each player.""" + game = self.match_attributes["game"] + last_round = (self.history[-1], opponent.history[-1]) + scores = game.score(last_round) + self.current_score += scores[0] + self.opponent_score += scores[1] + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + if turn > 1: + self._score_last_round(opponent) + + if self.mode == "Defect": + return D + + if self.mode == "Coop Def Cycle 1": + self.mode = "Coop Def Cycle 2" + return C + + if self.mode == "Coop Def Cycle 2": + self.mode = "Normal" + return D + + # Opportunity for distrust to cool off. + if turn % 18 == 0: + if self.distrust_points >= 3: + self.distrust_points -= 1 + + # In normal mode, only check for strategy updates every sixth turn. + if turn % 6 != 0: + return C + + points_per_turn = self.current_score / turn # Off by one + if points_per_turn < 1.0: + self.distrust_points += 5 + elif points_per_turn < 1.5: + self.distrust_points += 3 + elif points_per_turn < 2.0: + self.distrust_points += 2 + elif points_per_turn < 2.5: + self.distrust_points += 1 + else: + # Continue Cooperating + return C + + if self.distrust_points >= 7: + self.mode = "Defect" + else: + # Def this time, then coop, then def. + self.mode = "Coop Def Cycle 1" + return D + + +class SecondByAppold(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and + came in 22nd in that tournament. + + Cooperates for first four turns. + + After four turns, will cooperate immediately following the first time the + opponent cooperates (starting with the opponent's fourth move). Otherwise + will cooperate with probability equal to: + + - If this strategy defected two turns ago, the portion of the time + (historically) that the opponent followed a defection with a cooperation. + - If this strategy cooperated two turns ago, the portion of the time + (historically) that the opponent followed a cooperation with a cooperation. + The opponent's first move is counted as a response to a cooperation. + + + Names: + + - Appold: [Axelrod1980b]_ + """ + + name = "Second by Appold" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + + # Probability of a cooperation after an x is: + # opp_c_after_x / total_num_of_x. + self.opp_c_after_x = {C: 0, D: 1} + # This is the total counted, so it doesn't include the most recent. + self.total_num_of_x = {C: 0, D: 1} + + self.first_opp_def = False + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + 1 + + us_two_turns_ago = C if turn <= 2 else self.history[-2] + + # Update trackers + if turn > 1: + self.total_num_of_x[us_two_turns_ago] += 1 + if turn > 1 and opponent.history[-1] == C: + self.opp_c_after_x[us_two_turns_ago] += 1 + + if turn <= 4: + return C + + if opponent.history[-1] == D and not self.first_opp_def: + self.first_opp_def = True + return C + + # Calculate the probability that the opponent cooperated last turn given + # what we know two turns ago. + prob_coop = self.opp_c_after_x[us_two_turns_ago] / self.total_num_of_x[ + us_two_turns_ago] + return random_choice(prob_coop) diff --git a/axelrod/strategies/backstabber.py b/axelrod/strategies/backstabber.py new file mode 100644 index 000000000..5fdb5e900 --- /dev/null +++ b/axelrod/strategies/backstabber.py @@ -0,0 +1,106 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.strategy_transformers import FinalTransformer + +C, D = Action.C, Action.D + + +@FinalTransformer((D, D), name_prefix=None) # End with two defections +class BackStabber(IpdPlayer): + """ + Forgives the first 3 defections but on the fourth + will defect forever. Defects on the last 2 rounds unconditionally. + + Names: + + - Backstabber: Original name by Thomas Campbell + """ + + name = "BackStabber" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + return _backstabber_strategy(opponent) + + +@FinalTransformer((D, D), name_prefix=None) # End with two defections +class DoubleCrosser(IpdPlayer): + """ + Forgives the first 3 defections but on the fourth + will defect forever. Defects on the last 2 rounds unconditionally. + + If 8 <= current round <= 180, + if the opponent did not defect in the first 7 rounds, + the player will only defect after the opponent has defected twice in-a-row. + + Names: + + - Double Crosser: Original name by Thomas Campbell + """ + + name = "DoubleCrosser" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if _opponent_triggers_alt_strategy(opponent): + return _alt_strategy(opponent) + return _backstabber_strategy(opponent) + + +def _backstabber_strategy(opponent: IpdPlayer) -> Action: + """ + Cooperates until opponent defects a total of four times, then always + defects. + """ + if not opponent.history: + return C + if opponent.defections > 3: + return D + return C + + +def _alt_strategy(opponent: IpdPlayer) -> Action: + """ + If opponent's previous two plays were defect, then defects on next round. + Otherwise, cooperates. + """ + previous_two_plays = opponent.history[-2:] + if previous_two_plays == [D, D]: + return D + return C + + +def _opponent_triggers_alt_strategy(opponent: IpdPlayer) -> bool: + """ + If opponent did not defect in first 7 rounds and the current round is from 8 + to 180, return True. Else, return False. + """ + before_alt_strategy = first_n_rounds = 7 + last_round_of_alt_strategy = 180 + if _opponent_defected_in_first_n_rounds(opponent, first_n_rounds): + return False + current_round = len(opponent.history) + 1 + return before_alt_strategy < current_round <= last_round_of_alt_strategy + + +def _opponent_defected_in_first_n_rounds(opponent: IpdPlayer, first_n_rounds: int) -> bool: + """ + If opponent defected in the first N rounds, return True. Else return False. + """ + return D in opponent.history[:first_n_rounds] diff --git a/axelrod/strategies/better_and_better.py b/axelrod/strategies/better_and_better.py new file mode 100644 index 000000000..2c0216517 --- /dev/null +++ b/axelrod/strategies/better_and_better.py @@ -0,0 +1,32 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class BetterAndBetter(IpdPlayer): + """ + Defects with probability of '(1000 - current turn) / 1000'. + Therefore it is less and less likely to defect as the round goes on. + + Names: + - Better and Better: [Prison1998]_ + + """ + + name = "Better and Better" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + probability = current_round / 1000 + return random_choice(probability) diff --git a/axelrod/strategies/bush_mosteller.py b/axelrod/strategies/bush_mosteller.py new file mode 100644 index 000000000..b62800926 --- /dev/null +++ b/axelrod/strategies/bush_mosteller.py @@ -0,0 +1,132 @@ +import random + +from axelrod import random_choice +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class BushMosteller(IpdPlayer): + """ + A player that is based on Bush Mosteller reinforced learning algorithm, it + decides what it will + play only depending on its own previous payoffs. + + The probability of playing C or D will be updated using a stimulus which + represents a win or a loss of value based on its previous play's payoff in + the specified probability. The more a play will be rewarded through rounds, + the more the player will be tempted to use it. + + Names: + + - Bush Mosteller: [Luis2008]_ + """ + + name = "Bush Mosteller" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + c_prob: float = 0.5, + d_prob: float = 0.5, + aspiration_level_divider: float = 3.0, + learning_rate: float = 0.5, + ) -> None: + """ + Parameters + + c_prob: float, 0.5 + Probability to play C , is modified during the match + d_prob: float, 0.5 + Probability to play D , is modified during the match + aspiration_level_divider: float, 3.0 + Value that regulates the aspiration level, + isn't modified during match + learning rate [0 , 1] + Percentage of learning speed + Variables / Constants + stimulus (Var: [-1 , 1]): float + Value that impacts the changes of action probability + _aspiration_level: float + Value that impacts the stimulus changes, isn't modified during match + _init_c_prob , _init_d_prob : float + Values used to properly set up reset(), + set to original probabilities + """ + super().__init__() + self._c_prob, self._d_prob = c_prob, d_prob + self._init_c_prob, self._init_d_prob = c_prob, d_prob + self._aspiration_level = abs( + (max(self.match_attributes["game"].RPST()) / aspiration_level_divider) + ) + + self._stimulus = 0.0 + self._learning_rate = learning_rate + + def stimulus_update(self, opponent: IpdPlayer): + """ + Updates the stimulus attribute based on the opponent's history. Used by + the strategy. + + Parameters + + opponent : axelrodPlayer + The current opponent + """ + game = self.match_attributes["game"] + + last_round = (self.history[-1], opponent.history[-1]) + + scores = game.score(last_round) + + previous_play = scores[0] + + self._stimulus = (previous_play - self._aspiration_level) / abs( + (max(self.match_attributes["game"].RPST()) - self._aspiration_level) + ) + # Lowest range for stimulus + # Highest doesn't need to be tested since it is divided by the highest + # reward possible + if self._stimulus < -1: + self._stimulus = -1 + + # Updates probability following previous choice C + if self.history[-1] == C: + + if self._stimulus >= 0: + self._c_prob += ( + self._learning_rate * self._stimulus * (1 - self._c_prob) + ) + + elif self._stimulus < 0: + self._c_prob += self._learning_rate * self._stimulus * self._c_prob + + # Updates probability following previous choice D + if self.history[-1] == D: + if self._stimulus >= 0: + self._d_prob += ( + self._learning_rate * self._stimulus * (1 - self._d_prob) + ) + + elif self._stimulus < 0: + self._d_prob += self._learning_rate * self._stimulus * self._d_prob + + def strategy(self, opponent: IpdPlayer) -> Action: + + # First turn + if len(self.history) == 0: + return random_choice(self._c_prob / (self._c_prob + self._d_prob)) + + # Updating stimulus depending on his own latest choice + self.stimulus_update(opponent) + + return random_choice(self._c_prob / (self._c_prob + self._d_prob)) diff --git a/axelrod/strategies/calculator.py b/axelrod/strategies/calculator.py new file mode 100644 index 000000000..162cf1b21 --- /dev/null +++ b/axelrod/strategies/calculator.py @@ -0,0 +1,55 @@ +from axelrod._strategy_utils import detect_cycle +from axelrod.action import Action +from axelrod.player import IpdPlayer + +from .axelrod_first import FirstByJoss as Joss + +C, D = Action.C, Action.D + + +class Calculator(IpdPlayer): + """ + Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is + detected, defect forever. Otherwise play TFT. + + + Names: + + - Calculator: [Prison1998]_ + """ + + name = "Calculator" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.joss_instance = Joss() + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn > 0: + self.joss_instance.history.append(self.history[-1], + opponent.history[-1]) + if turn == 20: + self.cycle = detect_cycle(opponent.history) + return self.extended_strategy(opponent) + if turn > 20: + return self.extended_strategy(opponent) + else: + play = self.joss_instance.strategy(opponent) + return play + + def extended_strategy(self, opponent: IpdPlayer) -> Action: + if self.cycle: + return D + else: + # TFT + return D if opponent.history[-1:] == [D] else C diff --git a/axelrod/strategies/cooperator.py b/axelrod/strategies/cooperator.py new file mode 100644 index 000000000..5a6675449 --- /dev/null +++ b/axelrod/strategies/cooperator.py @@ -0,0 +1,77 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Cooperator(IpdPlayer): + """A player who only ever cooperates. + + Names: + + - Cooperator: [Axelrod1984]_ + - ALLC: [Press2012]_ + - Always cooperate: [Mittal2009]_ + """ + + name = "Cooperator" + classifier = { + "memory_depth": 0, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return C + + +class TrickyCooperator(IpdPlayer): + """ + A cooperator that is trying to be tricky. + + Names: + + - Tricky Cooperator: Original name by Karol Langner + """ + + name = "Tricky Cooperator" + classifier = { + "memory_depth": 10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + _min_history_required_to_try_trickiness = 3 + _max_history_depth_for_trickiness = -10 + + def strategy(self, opponent: IpdPlayer) -> Action: + """Almost always cooperates, but will try to trick the opponent by + defecting. + + Defect once in a while in order to get a better payout. + After 3 rounds, if opponent has not defected to a max history depth of + 10, defect. + """ + if self._has_played_enough_rounds_to_be_tricky() and self._opponents_has_cooperated_enough_to_be_tricky( + opponent + ): + return D + return C + + def _has_played_enough_rounds_to_be_tricky(self): + return len(self.history) >= self._min_history_required_to_try_trickiness + + def _opponents_has_cooperated_enough_to_be_tricky(self, opponent): + rounds_to_be_checked = opponent.history[ + self._max_history_depth_for_trickiness : + ] + return D not in rounds_to_be_checked diff --git a/axelrod/strategies/cycler.py b/axelrod/strategies/cycler.py new file mode 100644 index 000000000..a892db1f4 --- /dev/null +++ b/axelrod/strategies/cycler.py @@ -0,0 +1,270 @@ +import copy +import itertools +import random +from typing import List, Tuple + +from axelrod.action import Action, actions_to_str, str_to_actions +from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_lists +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D +actions = (C, D) + + +class AntiCycler(IpdPlayer): + """ + A player that follows a sequence of plays that contains no cycles: + CDD CD CCD CCCD CCCCD ... + + Names: + + - Anti Cycler: Original name by Marc Harper + """ + + name = "AntiCycler" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.cycle_length = 1 + self.cycle_counter = 0 + self.first_three = self._get_first_three() + + @staticmethod + def _get_first_three() -> List[Action]: + return [C, D, D] + + def strategy(self, opponent: IpdPlayer) -> Action: + while self.first_three: + return self.first_three.pop(0) + if self.cycle_counter < self.cycle_length: + self.cycle_counter += 1 + return C + else: + self.cycle_length += 1 + self.cycle_counter = 0 + return D + + +class Cycler(IpdPlayer): + """ + A player that repeats a given sequence indefinitely. + + Names: + + - Cycler: Original name by Marc Harper + """ + + name = "Cycler" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, cycle: str = "CCD") -> None: + """This strategy will repeat the parameter `cycle` endlessly, + e.g. C C D C C D C C D ... + + Special Cases + ------------- + Cooperator is equivalent to Cycler("C") + Defector is equivalent to Cycler("D") + Alternator is equivalent to Cycler("CD") + + """ + super().__init__() + self.cycle = cycle + self.set_cycle(cycle=cycle) + + def strategy(self, opponent: IpdPlayer) -> Action: + return next(self.cycle_iter) + + def set_cycle(self, cycle: str): + """Set or change the cycle.""" + self.cycle = cycle + self.cycle_iter = itertools.cycle(str_to_actions(self.cycle)) + self.classifier["memory_depth"] = len(cycle) - 1 + + +class EvolvableCycler(Cycler, EvolvablePlayer): + """Evolvable version of Cycler.""" + + name = "EvolvableCycler" + + def __init__( + self, + cycle: str = None, + cycle_length: int = None, + mutation_probability: float = 0.2, + mutation_potency: int = 1 + ) -> None: + cycle, cycle_length = self._normalize_parameters(cycle, cycle_length) + # The following __init__ sets self.cycle = cycle + Cycler.__init__(self, cycle=cycle) + EvolvablePlayer.__init__(self) + # Overwrite init_kwargs in the case that we generated a new cycle from cycle_length + self.overwrite_init_kwargs( + cycle=cycle, + cycle_length=cycle_length) + self.mutation_probability = mutation_probability + self.mutation_potency = mutation_potency + + @classmethod + def _normalize_parameters(cls, cycle=None, cycle_length=None) -> Tuple[str, int]: + """Compute other parameters from those that may be missing, to ensure proper cloning.""" + if not cycle: + if not cycle_length: + raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableCycler") + cycle = cls._generate_random_cycle(cycle_length) + cycle_length = len(cycle) + return cycle, cycle_length + + @classmethod + def _generate_random_cycle(cls, cycle_length: int) -> str: + """ + Generate a sequence of random moves + """ + return actions_to_str(random.choice(actions) for _ in range(cycle_length)) + + def mutate(self) -> EvolvablePlayer: + """ + Basic mutation which may change any random actions in the sequence. + """ + if random.random() <= self.mutation_probability: + mutated_sequence = list(str_to_actions(self.cycle)) + for _ in range(self.mutation_potency): + index_to_change = random.randint(0, len(mutated_sequence) - 1) + mutated_sequence[index_to_change] = mutated_sequence[index_to_change].flip() + cycle = actions_to_str(mutated_sequence) + else: + cycle = self.cycle + cycle, _ = self._normalize_parameters(cycle) + return self.create_new(cycle=cycle) + + def crossover(self, other) -> EvolvablePlayer: + """ + Creates and returns a new IpdPlayer instance with a single crossover point. + """ + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + cycle_list = crossover_lists(self.cycle, other.cycle) + cycle = "".join(cycle_list) + cycle, _ = self._normalize_parameters(cycle) + return self.create_new(cycle=cycle) + + +class CyclerDC(Cycler): + """ + Cycles D, C + + Names: + + - Cycler DC: Original name by Marc Harper + """ + + name = "Cycler DC" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 1 + + def __init__(self) -> None: + super().__init__(cycle="DC") + + +class CyclerCCD(Cycler): + """ + Cycles C, C, D + + Names: + + - Cycler CCD: Original name by Marc Harper + - Periodic player CCD: [Mittal2009]_ + """ + + name = "Cycler CCD" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 2 + + def __init__(self) -> None: + super().__init__(cycle="CCD") + + +class CyclerDDC(Cycler): + """ + Cycles D, D, C + + Names: + + - Cycler DDC: Original name by Marc Harper + - Periodic player DDC: [Mittal2009]_ + """ + + name = "Cycler DDC" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 2 + + def __init__(self) -> None: + super().__init__(cycle="DDC") + + +class CyclerCCCD(Cycler): + """ + Cycles C, C, C, D + + Names: + + - Cycler CCCD: Original name by Marc Harper + """ + + name = "Cycler CCCD" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 3 + + def __init__(self) -> None: + super().__init__(cycle="CCCD") + + +class CyclerCCCCCD(Cycler): + """ + Cycles C, C, C, C, C, D + + Names: + + - Cycler CCCD: Original name by Marc Harper + """ + + name = "Cycler CCCCCD" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 5 + + def __init__(self) -> None: + super().__init__(cycle="CCCCCD") + + +class CyclerCCCDCD(Cycler): + """ + Cycles C, C, C, D, C, D + + Names: + + - Cycler CCCDCD: Original name by Marc Harper + """ + + name = "Cycler CCCDCD" + classifier = copy.copy(Cycler.classifier) + classifier["memory_depth"] = 5 + + def __init__(self) -> None: + super().__init__(cycle="CCCDCD") diff --git a/axelrod/strategies/darwin.py b/axelrod/strategies/darwin.py new file mode 100644 index 000000000..a86d394e9 --- /dev/null +++ b/axelrod/strategies/darwin.py @@ -0,0 +1,97 @@ +""" +The player class in this module does not obey standard rules of the IPD (as +indicated by their classifier). We do not recommend putting a lot of time in to +optimising it. +""" +from collections import defaultdict +from typing import Optional + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Darwin(IpdPlayer): + """ + A strategy which accumulates a record (the 'genome') of what the most + favourable response in the previous round should have been, and naively + assumes that this will remain the correct response at the same round of + future trials. + + This 'genome' is preserved between opponents, rounds and repetitions of + the tournament. It becomes a characteristic of the type and so a single + version of this is shared by all instances for each loading of the class. + + As this results in information being preserved between tournaments, this + is classified as a cheating strategy! + + If no record yet exists, the opponent's response from the previous round + is returned. + + Names: + + - Darwin: Original name by Paul Slavin + """ + + name = "Darwin" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "inspects_source": True, # Checks to see if opponent is using simulated matches. + "long_run_time": False, + "makes_use_of": set(), + "manipulates_source": False, + "manipulates_state": True, # Does not reset properly. + } + + genome = [C] + valid_callers = ["play"] # What functions may invoke our strategy. + + def __init__(self) -> None: + self.outcomes = None # type: Optional[dict] + self.response = Darwin.genome[0] + super().__init__() + + def receive_match_attributes(self): + self.outcomes = self.match_attributes["game"].scores + + @staticmethod + def foil_strategy_inspection() -> Action: + """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + return C + + def strategy(self, opponent: IpdPlayer) -> Action: + trial = len(self.history) + + if trial > 0: + assert self.outcomes is not None + outcome = self.outcomes[(self.history[-1], opponent.history[-1])] + self.mutate(outcome, trial) + # Update genome with selected response + Darwin.genome[trial - 1] = self.response + + if trial < len(Darwin.genome): + # Return response from genome where available... + current = Darwin.genome[trial] + else: + # ...otherwise use Tit-for-Tat + Darwin.genome.append(opponent.history[-1]) + current = opponent.history[-1] + + return current + + def reset(self): + """ Reset instance properties. """ + super().reset() + Darwin.genome[0] = C # Ensure initial Cooperate + + def mutate(self, outcome: tuple, trial: int) -> None: + """ Select response according to outcome. """ + if outcome[0] < 3 and (len(Darwin.genome) >= trial): + self.response = D if Darwin.genome[trial - 1] == C else C + + @staticmethod + def reset_genome() -> None: + """For use in testing methods.""" + Darwin.genome = [C] diff --git a/axelrod/strategies/dbs.py b/axelrod/strategies/dbs.py new file mode 100644 index 000000000..346163962 --- /dev/null +++ b/axelrod/strategies/dbs.py @@ -0,0 +1,441 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class DBS(IpdPlayer): + """ + A strategy that learns the opponent's strategy and uses symbolic noise + detection for detecting whether anomalies in player’s behavior are + deliberate or accidental. From the learned opponent's strategy, a tree + search is used to choose the best move. + + Default values for the parameters are the suggested values in the article. + When noise increases you can try to diminish violation_threshold and + rejection_threshold. + + Names + + - Desired Belief Strategy: [Au2006]_ + """ + + # These are various properties for the strategy + name = "DBS" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": True, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + discount_factor=0.75, + promotion_threshold=3, + violation_threshold=4, + reject_threshold=3, + tree_depth=5, + ): + """ + Parameters + + discount_factor: float, optional + Used when computing discounted frequencies to learn opponent's + strategy. Must be between 0 and 1. The default is 0.75. + promotion_threshold: int, optional + Number of successive observations needed to promote an opponent + behavior as a deterministic rule. The default is 3. + violation_threshold: int, optional + Number of observations needed to considerate opponent's strategy has + changed. You can lower it when noise increases. The default is 4, + which is good for a noise level of .1. + reject_threshold: int, optional + Number of observations before forgetting opponent's previous + strategy. You can lower it when noise increases. The default is 3, + which is good for a noise level of .1. + tree_depth: int, optional + Depth of the tree for the tree-search algorithm. Higher depth means + more time to compute the move. The default is 5. + """ + super().__init__() + + # The opponent's behavior is represented by a 3 dicts: Rd, Rc, and Rp. + # Its behavior is modeled by a set of rules. A rule is the move that + # the opponent will play (C or D or a probability to play C) after a + # given outcome (for instance after (C, D)). + # A rule can be deterministic or probabilistic: + # - Rc is the set of deterministic rules + # - Rp is the set of probabilistic rules + # - Rd is the default rule set which is used for initialization but also + # keeps track of previous policies when change in the opponent behavior + # happens, in order to have a smooth transition. + # - Pi is a set of rules that aggregates all above sets of rules in + # order to fully model the opponent's behavior. + + # Default rule set is Rd. + # Default opponent's policy is TitForTat. + self.Rd = create_policy(1, 1, 0, 0) + # Set of current deterministic rules Rc + self.Rc = {} + # Aggregated rule set Pi + self.Pi = self.Rd + # For each rule in Rd we need to count the number of successive + # violations. Those counts are saved in violation_counts. + self.violation_counts = {} + self.reject_threshold = reject_threshold + self.violation_threshold = violation_threshold + self.promotion_threshold = promotion_threshold + self.tree_depth = tree_depth + # v is a violation count used to know when to clean the default rule + # set Rd + self.v = 0 + # A discount factor for computing the probabilistic rules + self.alpha = discount_factor + + # The probabilistic rule set Rp is not saved as an attribute, but each + # rule is computed only when needed. The rules are computed as + # discounted frequencies of opponent's past moves. To compute the + # discounted frequencies, we need to keep up to date an history of what + # has been played following each outcome (or condition): + # We save it as a dict history_by_cond; keys are conditions + # (ex (C, C)) and values are a tuple of 2 lists (G, F) + # for a condition j and an iteration i in the match: + # G[i] = 1 if cond j was True at turn i-1 and C has been played + # by the opponent; else G[i] = 0 + # F[i] = 1 if cond j was True at turn i-1; else F[i] = 0 + # This representation makes the computing of discounted frequencies + # easy and efficient. + # The initial hypothesized policy is TitForTat. + self.history_by_cond = { + (C, C): ([1], [1]), + (C, D): ([1], [1]), + (D, C): ([0], [1]), + (D, D): ([0], [1]), + } + + def should_promote(self, r_plus, promotion_threshold=3): + """ + This function determines if the move r_plus is a deterministic + behavior of the opponent, and then returns True, or if r_plus + is due to a random behavior (or noise) which would require a + probabilistic rule, in which case it returns False. + + To do so it looks into the game history: if the k last times + when the opponent was in the same situation than in r_plus it + played the same thing then then r_plus is considered as a + deterministic rule (where K is the user-defined promotion_threshold). + + Parameters + + r_plus: tuple of (tuple of actions.Action, actions.Action) + example: ((C, C), D) + r_plus represents one outcome of the history, and the + following move played by the opponent. + promotion_threshold: int, optional + Number of successive observations needed to promote an + opponent behavior as a deterministic rule. Default is 3. + """ + if r_plus[1] == C: + opposite_action = 0 + elif r_plus[1] == D: + opposite_action = 1 + k = 1 + count = 0 + # We iterate on the history, while we do not encounter + # counter-examples of r_plus, i.e. while we do not encounter + # r_minus + while k < len(self.history_by_cond[r_plus[0]][0]) and not ( + self.history_by_cond[r_plus[0]][0][1:][-k] == opposite_action + and self.history_by_cond[r_plus[0]][1][1:][-k] == 1 + ): + # We count every occurrence of r_plus in history + if self.history_by_cond[r_plus[0]][1][1:][-k] == 1: + count += 1 + k += 1 + if count >= promotion_threshold: + return True + return False + + def should_demote(self, r_minus, violation_threshold=4): + """ + Checks if the number of successive violations of a deterministic + rule (in the opponent's behavior) exceeds the user-defined + violation_threshold. + """ + return self.violation_counts[r_minus[0]] >= violation_threshold + + def update_history_by_cond(self, opponent_history): + """ + Updates self.history_by_cond between each turns of the game. + """ + two_moves_ago = (self.history[-2], opponent_history[-2]) + for outcome, GF in self.history_by_cond.items(): + G, F = GF + if outcome == two_moves_ago: + if opponent_history[-1] == C: + G.append(1) + else: + G.append(0) + F.append(1) + else: + G.append(0) + F.append(0) + + def compute_prob_rule(self, outcome, alpha=1): + """ + Uses the game history to compute the probability of the opponent + playing C, in the outcome situation (example: outcome = (C, C)). + When alpha = 1, the results is approximately equal to the frequency of + the occurrence of outcome C. alpha is a discount factor that gives more + weight to recent events than earlier ones. + + Parameters + + outcome: tuple of two actions.Action + alpha: int, optional. Discount factor. Default is 1. + """ + G = self.history_by_cond[outcome][0] + F = self.history_by_cond[outcome][1] + discounted_g = 0 + discounted_f = 0 + alpha_k = 1 + for g, f in zip(G[::-1], F[::-1]): + discounted_g += alpha_k * g + discounted_f += alpha_k * f + alpha_k = alpha * alpha_k + p_cond = discounted_g / discounted_f + return p_cond + + def strategy(self, opponent: IpdPlayer) -> Action: + # First move + if not self.history: + return C + if len(opponent.history) >= 2: + # We begin by update history_by_cond (i.e. update Rp) + self.update_history_by_cond(opponent.history) + two_moves_ago = (self.history[-2], opponent.history[-2]) + # r_plus is the information of what the opponent just played, + # following the previous outcome two_moves_ago. + r_plus = (two_moves_ago, opponent.history[-1]) + # r_minus is the opposite move, following the same outcome. + r_minus = (two_moves_ago, ({C, D} - {opponent.history[-1]}).pop()) + + # If r_plus and r_minus are not in the current set of deterministic + # rules, we check if r_plus should be added to it (following the + # rule defined in the should_promote function). + if r_plus[0] not in self.Rc.keys(): + if self.should_promote(r_plus, self.promotion_threshold): + self.Rc[r_plus[0]] = action_to_int(r_plus[1]) + self.violation_counts[r_plus[0]] = 0 + self.violation_counts[r_plus[0]] = 0 + + # If r+ or r- in Rc + if r_plus[0] in self.Rc.keys(): + to_check = C if self.Rc[r_plus[0]] == 1 else D + # (if r+ in Rc) + if r_plus[1] == to_check: + # Set the violation count of r+ to 0. + self.violation_counts[r_plus[0]] = 0 + # if r- in Rc + elif r_minus[1] == to_check: + # Increment violation count of r-. + self.violation_counts[r_plus[0]] += 1 + # As we observe that the behavior of the opponent is + # opposed to a rule modeled in Rc, we check if the number + # of consecutive violations of this rule is superior to + # a threshold. If it is, we clean Rc, but we keep the rules + # of Rc in Rd for smooth transition. + if self.should_demote(r_minus, self.violation_threshold): + self.Rd.update(self.Rc) + self.Rc.clear() + self.violation_counts.clear() + self.v = 0 + # r+ in Rc. + r_plus_in_Rc = r_plus[0] in self.Rc.keys() and self.Rc[ + r_plus[0] + ] == action_to_int(r_plus[1]) + # r- in Rd + r_minus_in_Rd = r_minus[0] in self.Rd.keys() and self.Rd[ + r_minus[0] + ] == action_to_int(r_minus[1]) + + # Increment number of violations of Rd rules. + if r_minus_in_Rd: + self.v += 1 + # If the number of violations is superior to a threshold, clean Rd. + if (self.v > self.reject_threshold) or (r_plus_in_Rc and r_minus_in_Rd): + self.Rd.clear() + self.v = 0 + + # Compute Rp for conditions that are neither in Rc or Rd. + Rp = {} + all_cond = [(C, C), (C, D), (D, C), (D, D)] + for outcome in all_cond: + if (outcome not in self.Rc.keys()) and (outcome not in self.Rd.keys()): + # Compute opponent's C answer probability. + Rp[outcome] = self.compute_prob_rule(outcome, self.alpha) + + # We aggregate the rules of Rc, Rd, and Rp in a set of rule Pi. + self.Pi = {} + # The algorithm makes sure that a rule cannot be in two different + # sets of rules so we do not need to check for duplicates. + self.Pi.update(self.Rc) + self.Pi.update(self.Rd) + self.Pi.update(Rp) + + # React to the opponent's last move + return move_gen( + (self.history[-1], opponent.history[-1]), + self.Pi, + depth_search_tree=self.tree_depth, + ) + + +class Node(object): + """ + Nodes used to build a tree for the tree-search procedure. The tree has + Deterministic and Stochastic nodes, as the opponent's strategy is learned + as a probability distribution. + """ + + # abstract method + def get_siblings(self): + raise NotImplementedError("subclasses must override get_siblings()!") + + # abstract method + def is_stochastic(self): + raise NotImplementedError("subclasses must override is_stochastic()!") + + +class StochasticNode(Node): + """ + Node that have a probability pC to get to each sibling. A StochasticNode can + be written (C, X) or (D, X), with X = C with a probability pC, else X = D. + """ + + def __init__(self, own_action, pC, depth): + self.pC = pC + self.depth = depth + self.own_action = own_action + + def get_siblings(self): + """ + Returns the siblings node of the current StochasticNode. There are two + siblings which are DeterministicNodes, their depth is equal to current + node depth's + 1. + """ + opponent_c_choice = DeterministicNode(self.own_action, C, self.depth + 1) + opponent_d_choice = DeterministicNode(self.own_action, D, self.depth + 1) + return opponent_c_choice, opponent_d_choice + + def is_stochastic(self): + """Returns True if self is a StochasticNode.""" + return True + + +class DeterministicNode(Node): + """ + Nodes (C, C), (C, D), (D, C), or (D, D) with deterministic choice + for siblings. + """ + + def __init__(self, action1, action2, depth): + self.action1 = action1 + self.action2 = action2 + self.depth = depth + + def get_siblings(self, policy): + """ + Returns the siblings node of the current DeterministicNode. Builds 2 + siblings (C, X) and (D, X) that are StochasticNodes. Those siblings are + of the same depth as the current node. Their probabilities pC are + defined by the policy argument. + """ + c_choice = StochasticNode(C, policy[(self.action1, self.action2)], self.depth) + d_choice = StochasticNode(D, policy[(self.action1, self.action2)], self.depth) + return c_choice, d_choice + + def is_stochastic(self): + """Returns True if self is a StochasticNode.""" + return False + + def get_value(self): + values = {(C, C): 3, (C, D): 0, (D, C): 5, (D, D): 1} + return values[(self.action1, self.action2)] + + +def create_policy(pCC, pCD, pDC, pDD): + """ + Creates a dict that represents a Policy. As defined in the reference, a + Policy is a set of (prev_move, p) where p is the probability to cooperate + after prev_move, where prev_move can be (C, C), (C, D), (D, C) or (D, D). + + Parameters + + pCC, pCD, pDC, pDD : float + Must be between 0 and 1. + """ + return {(C, C): pCC, (C, D): pCD, (D, C): pDC, (D, D): pDD} + + +def action_to_int(action): + if action == C: + return 1 + return 0 + + +def minimax_tree_search(begin_node, policy, max_depth): + """ + Tree search function (minimax search procedure) for the tree (built by + recursion) corresponding to the opponent's policy, and solves it. + Returns a tuple of two floats that are the utility of playing C, and the + utility of playing D. + """ + if begin_node.is_stochastic(): + # A stochastic node cannot have the same depth than its parent node + # hence there is no need to check that its depth is < max_depth. + siblings = begin_node.get_siblings() + # The stochastic node value is the expected value of siblings. + node_value = begin_node.pC * minimax_tree_search( + siblings[0], policy, max_depth + ) + (1 - begin_node.pC) * minimax_tree_search(siblings[1], policy, max_depth) + return node_value + else: # Deterministic node + if begin_node.depth == max_depth: + # This is an end node, we just return its outcome value. + return begin_node.get_value() + elif begin_node.depth == 0: + siblings = begin_node.get_siblings(policy) + # This returns the two max expected values, for choice C or D, + # as a tuple. + return ( + minimax_tree_search(siblings[0], policy, max_depth) + + begin_node.get_value(), + minimax_tree_search(siblings[1], policy, max_depth) + + begin_node.get_value(), + ) + elif begin_node.depth < max_depth: + siblings = begin_node.get_siblings(policy) + # The deterministic node value is the max of both siblings values + # + the score of the outcome of the node. + a = minimax_tree_search(siblings[0], policy, max_depth) + b = minimax_tree_search(siblings[1], policy, max_depth) + node_value = max(a, b) + begin_node.get_value() + return node_value + + +def move_gen(outcome, policy, depth_search_tree=5): + """ + Returns the best move considering opponent's policy and last move, + using tree-search procedure. + """ + current_node = DeterministicNode(outcome[0], outcome[1], depth=0) + values_of_choices = minimax_tree_search(current_node, policy, depth_search_tree) + # Returns the Action which correspond to the best choice in terms of + # expected value. In case value(C) == value(D), returns C. + actions_tuple = (C, D) + return actions_tuple[values_of_choices.index(max(values_of_choices))] diff --git a/axelrod/strategies/defector.py b/axelrod/strategies/defector.py new file mode 100644 index 000000000..d771079ba --- /dev/null +++ b/axelrod/strategies/defector.py @@ -0,0 +1,61 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Defector(IpdPlayer): + """A player who only ever defects. + + Names: + + - Defector: [Axelrod1984]_ + - ALLD: [Press2012]_ + - Always defect: [Mittal2009]_ + """ + + name = "Defector" + classifier = { + "memory_depth": 0, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return D + + +class TrickyDefector(IpdPlayer): + """A defector that is trying to be tricky. + + Names: + + - Tricky Defector: Original name by Karol Langner + """ + + name = "Tricky Defector" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """Almost always defects, but will try to trick the opponent into + cooperating. + + Defect if opponent has cooperated at least once in the past and has + defected for the last 3 turns in a row. + """ + if opponent.history.cooperations > 0 and opponent.history[-3:] == [D] * 3: + return C + return D diff --git a/axelrod/strategies/doubler.py b/axelrod/strategies/doubler.py new file mode 100644 index 000000000..a53dc932b --- /dev/null +++ b/axelrod/strategies/doubler.py @@ -0,0 +1,36 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Doubler(IpdPlayer): + """ + Cooperates except when the opponent has defected and + the opponent's cooperation count is less than twice their defection count. + + Names: + + - Doubler: [Prison1998]_ + """ + + name = "Doubler" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not self.history: + return C + if ( + opponent.history[-1] == D + and opponent.cooperations <= opponent.defections * 2 + ): + return D + return C diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py new file mode 100644 index 000000000..0b511a8fa --- /dev/null +++ b/axelrod/strategies/finite_state_machines.py @@ -0,0 +1,1002 @@ +import itertools +from random import randrange +from typing import Any, List, Sequence, Tuple, Union +import numpy.random as random +from numpy.random import choice +from axelrod.action import Action +from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D +actions = (C, D) +Transition = Tuple[int, Action, int, Action] + + +class SimpleFSM(object): + """Simple implementation of a finite state machine that transitions + between states based on the last round of play. + + https://en.wikipedia.org/wiki/Finite-state_machine + """ + + def __init__(self, transitions: tuple, initial_state: int) -> None: + """ + transitions is a list of the form + ((state, last_opponent_action, next_state, next_action), ...) + + TitForTat would be represented with the following table: + ((1, C, 1, C), (1, D, 1, D)) + with initial play C and initial state 1. + + """ + self._state = initial_state + self._state_transitions = { + (current_state, input_action): (next_state, output_action) + for current_state, input_action, next_state, output_action in transitions + } # type: dict + + self._raise_error_for_bad_input() + + def _raise_error_for_bad_input(self): + callable_states = set( + pair[0] for pair in self._state_transitions.values() + ) + callable_states.add(self._state) + for state in callable_states: + self._raise_error_for_bad_state(state) + + def _raise_error_for_bad_state(self, state: int): + if (state, C) not in self._state_transitions or ( + state, + D, + ) not in self._state_transitions: + raise ValueError( + "state: {} does not have values for both C and D".format(state) + ) + + @property + def state(self) -> int: + return self._state + + @state.setter + def state(self, new_state: int): + self._raise_error_for_bad_state(new_state) + self._state = new_state + + @property + def state_transitions(self) -> dict: + return self._state_transitions.copy() + + def transitions(self) -> list: + return [[x[0], x[1], y[0], y[1]] for x, y in self._state_transitions.items()] + + def move(self, opponent_action: Action) -> Action: + """Computes the response move and changes state.""" + next_state, next_action = self._state_transitions[ + (self._state, opponent_action) + ] + self._state = next_state + return next_action + + def __eq__(self, other) -> bool: + """Equality of two FSMs""" + if not isinstance(other, SimpleFSM): + return False + return (self._state, self._state_transitions) == ( + other.state, + other.state_transitions, + ) + + def num_states(self): + """Return the number of states of the machine.""" + return len(set(state for state, action in self._state_transitions)) + + +class FSMPlayer(IpdPlayer): + """Abstract base class for finite state machine players.""" + + name = "FSM IpdPlayer" + + classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + transitions: Tuple[Transition, ...] = ((1, C, 1, C), (1, D, 1, D)), + initial_state: int = 1, + initial_action: Action = C + ) -> None: + super().__init__() + self.initial_state = initial_state + self.initial_action = initial_action + self.fsm = SimpleFSM(transitions, initial_state) + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return self.initial_action + else: + return self.fsm.move(opponent.history[-1]) + + +class EvolvableFSMPlayer(FSMPlayer, EvolvablePlayer): + """Abstract base class for evolvable finite state machine players.""" + + name = "EvolvableFSMPlayer" + + classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + transitions: tuple = None, + initial_state: int = None, + initial_action: Action = None, + num_states: int = None, + mutation_probability: float = 0.1, + ) -> None: + """If transitions, initial_state, and initial_action are None + then generate random parameters using num_states.""" + transitions, initial_state, initial_action, num_states = self._normalize_parameters( + transitions, initial_state, initial_action, num_states) + FSMPlayer.__init__( + self, + transitions=transitions, + initial_state=initial_state, + initial_action=initial_action) + EvolvablePlayer.__init__(self) + self.mutation_probability = mutation_probability + self.overwrite_init_kwargs( + transitions=transitions, + initial_state=initial_state, + initial_action=initial_action, + num_states=self.num_states) + + @classmethod + def normalize_transitions(cls, transitions: Sequence[Sequence]) -> Tuple[Tuple[Any, ...], ...]: + """Translate a list of lists to a tuple of tuples.""" + normalized = [] + for t in transitions: + normalized.append(tuple(t)) + return tuple(normalized) + + @classmethod + def _normalize_parameters(cls, transitions: Tuple = None, initial_state: int = None, initial_action: Action = None, + num_states: int = None) -> Tuple[Tuple, int, Action, int]: + if not ((transitions is not None) and (initial_state is not None) and (initial_action is not None)): + if not num_states: + raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableFSMPlayer") + transitions, initial_state, initial_action = cls.random_params(num_states) + transitions = cls.normalize_transitions(transitions) + num_states = len(transitions) // 2 + return transitions, initial_state, initial_action, num_states + + @property + def num_states(self) -> int: + return self.fsm.num_states() + + @classmethod + def random_params(cls, num_states: int) -> Tuple[Tuple[Transition, ...], int, Action]: + rows = [] + for j in range(num_states): + for action in actions: + next_state = randrange(num_states) + next_action = choice(actions) + row = (j, action, next_state, next_action) + rows.append(row) + initial_state = randrange(num_states) + initial_action = choice(actions) + return tuple(rows), initial_state, initial_action + + @staticmethod + def mutate_rows(rows, mutation_probability): + rows = list(rows) + randoms = random.random(len(rows)) + # Flip each value with a probability proportional to the mutation rate + for i, row in enumerate(rows): + if randoms[i] < mutation_probability: + row[3] = row[3].flip() + # Swap Two Nodes? + if random.random() < 0.5: + nodes = len(rows) // 2 + n1 = randrange(nodes) + n2 = randrange(nodes) + for j, row in enumerate(rows): + if row[0] == n1: + row[0] = n2 + elif row[0] == n2: + row[0] = n1 + rows.sort(key=lambda x: (x[0], 0 if x[1] == C else 1)) + return rows + + def mutate(self): + initial_action = self.initial_action + if random.random() < self.mutation_probability / 10: + initial_action = self.initial_action.flip() + initial_state = self.initial_state + if random.random() < self.mutation_probability / (10 * self.num_states): + initial_state = randrange(self.num_states) + try: + transitions = self.mutate_rows(self.fsm.transitions(), self.mutation_probability) + self.fsm = SimpleFSM(transitions, self.initial_state) + except ValueError: + # If the FSM is malformed, try again. + return self.mutate() + return self.create_new( + transitions=transitions, + initial_state=initial_state, + initial_action=initial_action, + ) + + @staticmethod + def crossover_rows(rows1, rows2): + num_states = len(rows1) // 2 + cross_point = 2 * randrange(num_states) + new_rows = copy_lists(rows1[:cross_point]) + new_rows += copy_lists(rows2[cross_point:]) + return new_rows + + def crossover(self, other): + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + transitions = self.crossover_rows(self.fsm.transitions(), other.fsm.transitions()) + transitions = self.normalize_transitions(transitions) + return self.create_new(transitions=transitions) + + def receive_vector(self, vector): + """ + Read a serialized vector into the set of FSM parameters (less initial + state). Then assign those FSM parameters to this class instance. + + The vector has three parts. The first is used to define the next state + (for each of the player's states - for each opponents action). + + The second part is the player's next moves (for each state - for + each opponent's actions). + + Finally, a probability to determine the player's first move. + """ + num_states = self.fsm.num_states() + state_scale = vector[:num_states * 2] + next_states = [int(s * (num_states - 1)) for s in state_scale] + actions = vector[num_states * 2: -1] + + self.initial_action = C if round(vector[-1]) == 0 else D + self.initial_state = 1 + + transitions = [] + for i, (initial_state, action) in enumerate(itertools.product(range(num_states), [C, D])): + next_action = C if round(actions[i]) == 0 else D + transitions.append([initial_state, action, next_states[i], next_action]) + transitions = self.normalize_transitions(transitions) + self.fsm = SimpleFSM(transitions, self.initial_state) + self.overwrite_init_kwargs(transitions=transitions, + initial_state=self.initial_state, + initial_action=self.initial_action) + + def create_vector_bounds(self): + """Creates the bounds for the decision variables.""" + size = len(self.fsm.transitions()) * 2 + 1 + lb = [0] * size + ub = [1] * size + return lb, ub + + +class Fortress3(FSMPlayer): + """Finite state machine player specified in http://DOI.org/10.1109/CEC.2006.1688322. + + Note that the description in http://www.graham-kendall.com/papers/lhk2011.pdf + is not correct. + + + Names: + + - Fortress 3: [Ashlock2006b]_ + """ + + name = "Fortress3" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 1, D), + ) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class Fortress4(FSMPlayer): + """ + Finite state machine player specified in + http://DOI.org/10.1109/CEC.2006.1688322. + + Note that the description in + http://www.graham-kendall.com/papers/lhk2011.pdf is not correct. + + Names: + + - Fortress 4: [Ashlock2006b]_ + """ + + name = "Fortress4" + classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, D), + (3, C, 1, D), + (3, D, 4, C), + (4, C, 4, C), + (4, D, 1, D), + ) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class Predator(FSMPlayer): + """ + Finite state machine player specified in + http://DOI.org/10.1109/CEC.2006.1688322. + + Names: + + - Predator: [Ashlock2006b]_ + """ + + name = "Predator" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 0, D), + (0, D, 1, D), + (1, C, 2, D), + (1, D, 3, D), + (2, C, 4, C), + (2, D, 3, D), + (3, C, 5, D), + (3, D, 4, C), + (4, C, 2, C), + (4, D, 6, D), + (5, C, 7, D), + (5, D, 3, D), + (6, C, 7, C), + (6, D, 7, D), + (7, C, 8, D), + (7, D, 7, D), + (8, C, 8, D), + (8, D, 6, D), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +class Pun1(FSMPlayer): + """FSM player described in [Ashlock2006]_. + + Names: + + - Pun1: [Ashlock2006]_ + """ + + name = "Pun1" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ((1, C, 2, C), (1, D, 2, C), (2, C, 1, C), (2, D, 1, D)) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class Raider(FSMPlayer): + """ + FSM player described in http://DOI.org/10.1109/FOCI.2014.7007818. + + + Names + + - Raider: [Ashlock2014]_ + """ + + name = "Raider" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 2, D), + (0, D, 2, D), + (1, C, 1, C), + (1, D, 1, D), + (2, C, 0, D), + (2, D, 3, C), + (3, C, 0, D), + (3, D, 1, C), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=D + ) + + +class Ripoff(FSMPlayer): + """ + FSM player described in http://DOI.org/10.1109/TEVC.2008.920675. + + Names + + - Ripoff: [Ashlock2008]_ + """ + + name = "Ripoff" + classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (1, C, 2, C), + (1, D, 3, C), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), # Note that it's TFT in state 3 + (3, D, 3, D), + ) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class UsuallyCooperates(FSMPlayer): + """ + This strategy cooperates except after a C following a D. + + Names: + + - Usually Cooperates (UC): [Ashlock2009]_ + """ + + name = "UsuallyCooperates" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ((1, C, 1, C), (1, D, 2, C), (2, C, 1, D), (2, D, 1, C)) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=C + ) + + +class UsuallyDefects(FSMPlayer): + """ + This strategy defects except after a D following a C. + + Names: + + - Usually Defects (UD): [Ashlock2009]_ + """ + + name = "UsuallyDefects" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ((1, C, 2, D), (1, D, 1, D), (2, C, 1, D), (2, D, 1, C)) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class SolutionB1(FSMPlayer): + """ + FSM player described in http://DOI.org/10.1109/TCIAIG.2014.2326012. + + Names + + - Solution B1: [Ashlock2015]_ + """ + + name = "SolutionB1" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (1, C, 2, D), + (1, D, 1, D), + (2, C, 2, C), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 3, C), + ) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class SolutionB5(FSMPlayer): + """ + + FSM player described in http://DOI.org/10.1109/TCIAIG.2014.2326012. + + Names + + - Solution B5: [Ashlock2015]_ + """ + + name = "SolutionB5" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (1, C, 2, C), + (1, D, 6, D), + (2, C, 2, C), + (2, D, 3, D), + (3, C, 6, C), + (3, D, 1, D), + (4, C, 3, C), + (4, D, 6, D), + (5, C, 5, D), + (5, D, 4, D), + (6, C, 3, C), + (6, D, 5, D), + ) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=D + ) + + +class Thumper(FSMPlayer): + """ + FSM player described in http://DOI.org/10.1109/TEVC.2008.920675. + + Names + + - Thumper: [Ashlock2008]_ + """ + + name = "Thumper" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ((1, C, 1, C), (1, D, 2, D), (2, C, 1, D), (2, D, 1, D)) + + super().__init__( + transitions=transitions, initial_state=1, initial_action=C + ) + + +class EvolvedFSM4(FSMPlayer): + """ + A 4 state FSM player trained with an evolutionary algorithm. + + Names: + + - Evolved FSM 4: Original name by Marc Harper + """ + + name = "Evolved FSM 4" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 0, C), + (0, D, 2, D), + (1, C, 3, D), + (1, D, 0, C), + (2, C, 2, D), + (2, D, 1, C), + (3, C, 3, D), + (3, D, 1, D), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +class EvolvedFSM16(FSMPlayer): + """ + A 16 state FSM player trained with an evolutionary algorithm. + + Names: + + - Evolved FSM 16: Original name by Marc Harper + + """ + + name = "Evolved FSM 16" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 0, C), + (0, D, 12, D), + (1, C, 3, D), + (1, D, 6, C), + (2, C, 2, D), + (2, D, 14, D), + (3, C, 3, D), + (3, D, 3, D), + (5, C, 12, D), + (5, D, 10, D), + (6, C, 5, C), + (6, D, 12, D), + (7, C, 3, D), + (7, D, 1, C), + (8, C, 5, C), + (8, D, 5, C), + (10, C, 11, D), + (10, D, 8, C), + (11, C, 15, D), + (11, D, 5, D), + (12, C, 8, C), + (12, D, 11, D), + (13, C, 13, D), + (13, D, 7, D), + (14, C, 13, D), + (14, D, 13, D), + (15, C, 15, D), + (15, D, 2, C), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +class EvolvedFSM16Noise05(FSMPlayer): + """ + A 16 state FSM player trained with an evolutionary algorithm with + noisy matches (noise=0.05). + + Names: + + - Evolved FSM 16 Noise 05: Original name by Marc Harper + """ + + name = "Evolved FSM 16 Noise 05" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 8, C), + (0, D, 3, D), + (1, C, 13, C), + (1, D, 15, D), + (2, C, 12, C), + (2, D, 3, D), + (3, C, 10, C), + (3, D, 3, D), + (4, C, 5, D), + (4, D, 4, D), + (5, C, 4, D), + (5, D, 10, D), + (6, C, 8, C), + (6, D, 6, D), + (8, C, 2, C), + (8, D, 4, D), + (10, C, 4, D), + (10, D, 1, D), + (11, C, 14, D), + (11, D, 13, C), + (12, C, 13, C), + (12, D, 2, C), + (13, C, 13, C), + (13, D, 6, C), + (14, C, 3, D), + (14, D, 13, D), + (15, C, 5, D), + (15, D, 11, C), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +# Strategies trained with Moran process objectives + + +class TF1(FSMPlayer): + """ + A FSM player trained to maximize Moran fixation probabilities. + + Names: + + - TF1: Original name by Marc Harper + """ + + name = "TF1" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 7, C), + (0, D, 1, C), + (1, C, 11, D), + (1, D, 11, D), + (2, C, 8, D), + (2, D, 8, C), + (3, C, 3, C), + (3, D, 12, D), + (4, C, 6, C), + (4, D, 3, C), + (5, C, 11, C), + (5, D, 8, D), + (6, C, 13, D), + (6, D, 14, C), + (7, C, 4, D), + (7, D, 2, D), + (8, C, 14, D), + (8, D, 8, D), + (9, C, 0, C), + (9, D, 10, D), + (10, C, 8, C), + (10, D, 15, C), + (11, C, 6, D), + (11, D, 5, D), + (12, C, 6, D), + (12, D, 9, D), + (13, C, 9, D), + (13, D, 8, D), + (14, C, 8, D), + (14, D, 13, D), + (15, C, 4, C), + (15, D, 5, C), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +class TF2(FSMPlayer): + """ + A FSM player trained to maximize Moran fixation probabilities. + + Names: + + - TF2: Original name by Marc Harper + """ + + name = "TF2" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 13, D), + (0, D, 12, D), + (1, C, 3, D), + (1, D, 4, D), + (2, C, 14, D), + (2, D, 9, D), + (3, C, 0, C), + (3, D, 1, D), + (4, C, 1, D), + (4, D, 2, D), + (7, C, 12, D), + (7, D, 2, D), + (8, C, 7, D), + (8, D, 9, D), + (9, C, 8, D), + (9, D, 0, D), + (10, C, 2, C), + (10, D, 15, C), + (11, C, 7, D), + (11, D, 13, D), + (12, C, 3, C), + (12, D, 8, D), + (13, C, 7, C), + (13, D, 10, D), + (14, C, 10, D), + (14, D, 7, D), + (15, C, 15, C), + (15, D, 11, D), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) + + +class TF3(FSMPlayer): + """ + A FSM player trained to maximize Moran fixation probabilities. + + Names: + + - TF3: Original name by Marc Harper + """ + + name = "TF3" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + transitions = ( + (0, C, 0, C), + (0, D, 3, C), + (1, C, 5, D), + (1, D, 0, C), + (2, C, 3, C), + (2, D, 2, D), + (3, C, 4, D), + (3, D, 6, D), + (4, C, 3, C), + (4, D, 1, D), + (5, C, 6, C), + (5, D, 3, D), + (6, C, 6, D), + (6, D, 6, D), + (7, C, 7, D), + (7, D, 5, C), + ) + + super().__init__( + transitions=transitions, initial_state=0, initial_action=C + ) diff --git a/axelrod/strategies/forgiver.py b/axelrod/strategies/forgiver.py new file mode 100644 index 000000000..2f10fce4a --- /dev/null +++ b/axelrod/strategies/forgiver.py @@ -0,0 +1,67 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Forgiver(IpdPlayer): + """ + A player starts by cooperating however will defect if at any point + the opponent has defected more than 10 percent of the time + + Names: + + - Forgiver: Original name by Thomas Campbell + """ + + name = "Forgiver" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Begins by playing C, then plays D if the opponent has defected more + than 10 percent of the time. + """ + if opponent.defections > len(opponent.history) / 10.0: + return D + return C + + +class ForgivingTitForTat(IpdPlayer): + """ + A player starts by cooperating however will defect if at any point, the + opponent has defected more than 10 percent of the time, and their most + recent decision was defect. + + Names: + + - Forgiving Tit For Tat: Original name by Thomas Campbell + """ + + name = "Forgiving Tit For Tat" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Begins by playing C, then plays D if the opponent has defected more than + 10 percent of the time and their most recent decision was defect. + """ + if opponent.defections > len(opponent.history) / 10: + return opponent.history[-1] + return C diff --git a/axelrod/strategies/gambler.py b/axelrod/strategies/gambler.py new file mode 100644 index 000000000..07d1ff470 --- /dev/null +++ b/axelrod/strategies/gambler.py @@ -0,0 +1,235 @@ +"""Stochastic variants of Lookup table based-strategies, trained with particle +swarm algorithms. + +For the original see: + https://gist.github.com/GDKO/60c3d0fd423598f3c4e4 +""" +import random +from typing import Any + +from axelrod.action import Action, str_to_actions, actions_to_str +from axelrod.load_data_ import load_pso_tables +from axelrod.player import IpdPlayer + +from axelrod.random_ import random_choice + +from .lookerup import EvolvableLookerUp, LookupTable, LookerUp, Plays, create_lookup_table_keys + +C, D = Action.C, Action.D +tables = load_pso_tables("pso_gambler.csv", directory="data") + + +class Gambler(LookerUp): + """ + A stochastic version of LookerUp which will select randomly an action in + some cases. + + Names: + + - Gambler: Original name by Georgios Koutsovoulos + """ + + name = "Gambler" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + actions_or_float = super(Gambler, self).strategy(opponent) + if isinstance(actions_or_float, Action): + return actions_or_float + return random_choice(actions_or_float) + + +class EvolvableGambler(Gambler, EvolvableLookerUp): + name = "EvolvableGambler" + + def __init__( + self, + lookup_dict: dict = None, + initial_actions: tuple = None, + pattern: Any = None, # pattern is str or tuple of Actions. + parameters: Plays = None, + mutation_probability: float = None + ) -> None: + EvolvableLookerUp.__init__( + self, + lookup_dict=lookup_dict, + initial_actions=initial_actions, + pattern=pattern, + parameters=parameters, + mutation_probability=mutation_probability + ) + self.pattern = list(self.pattern) + Gambler.__init__( + self, + lookup_dict=self.lookup_dict, + initial_actions=self.initial_actions, + pattern=self.pattern, + parameters=self.parameters + ) + self.overwrite_init_kwargs( + lookup_dict=self.lookup_dict, + initial_actions=self.initial_actions, + pattern=self.pattern, + parameters=self.parameters, + mutation_probability=self.mutation_probability, + ) + + # The mutate and crossover methods are mostly inherited from EvolvableLookerUp, except for the following + # modifications. + + @classmethod + def random_value(cls): + return random.random() + + @classmethod + def mutate_value(cls, value): + ep = random.uniform(-1, 1) / 4 + value += ep + if value < 0: + value = 0 + elif value > 1: + value = 1 + return value + + def receive_vector(self, vector): + """Receives a vector and updates the player's pattern. Ignores extra parameters.""" + self.pattern = vector + self_depth, op_depth, op_openings_depth = self.parameters + self._lookup = LookupTable.from_pattern(self.pattern, self_depth, op_depth, op_openings_depth) + + def create_vector_bounds(self): + """Creates the bounds for the decision variables. Ignores extra parameters.""" + size = len(self.pattern) + lb = [0.0] * size + ub = [1.0] * size + return lb, ub + + +class PSOGamblerMem1(Gambler): + """ + A 1x1x0 PSOGambler trained with pyswarm. This is the 'optimal' memory one + strategy trained against the set of short run time strategies in the + Axelrod library. + + Names: + + - PSO Gambler Mem1: Original name by Marc Harper + """ + + name = "PSO Gambler Mem1" + + def __init__(self) -> None: + pattern = tables[("PSO Gambler Mem1", 1, 1, 0)] + parameters = Plays(self_plays=1, op_plays=1, op_openings=0) + + super().__init__(parameters=parameters, pattern=pattern) + + +class PSOGambler1_1_1(Gambler): + """ + A 1x1x1 PSOGambler trained with pyswarm. + + Names: + + - PSO Gambler 1_1_1: Original name by Marc Harper + """ + + name = "PSO Gambler 1_1_1" + + def __init__(self) -> None: + pattern = tables[("PSO Gambler 1_1_1", 1, 1, 1)] + parameters = Plays(self_plays=1, op_plays=1, op_openings=1) + + super().__init__(parameters=parameters, pattern=pattern) + + +class PSOGambler2_2_2(Gambler): + """ + A 2x2x2 PSOGambler trained with a particle swarm algorithm (implemented in + pyswarm). Original version by Georgios Koutsovoulos. + + Names: + + - PSO Gambler 2_2_2: Original name by Marc Harper + """ + + name = "PSO Gambler 2_2_2" + + def __init__(self) -> None: + pattern = tables[("PSO Gambler 2_2_2", 2, 2, 2)] + parameters = Plays(self_plays=2, op_plays=2, op_openings=2) + + super().__init__(parameters=parameters, pattern=pattern) + + +class PSOGambler2_2_2_Noise05(Gambler): + """ + A 2x2x2 PSOGambler trained with pyswarm with noise=0.05. + + Names: + + - PSO Gambler 2_2_2 Noise 05: Original name by Marc Harper + """ + + name = "PSO Gambler 2_2_2 Noise 05" + + def __init__(self) -> None: + pattern = tables[("PSO Gambler 2_2_2 Noise 05", 2, 2, 2)] + parameters = Plays(self_plays=2, op_plays=2, op_openings=2) + + super().__init__(parameters=parameters, pattern=pattern) + + +class ZDMem2(Gambler): + """ + A memory two generalization of a zero determinant player. + + Names: + + - ZDMem2: Original name by Marc Harper + - Unnamed [LiS2014]_ + + """ + + name = "ZD-Mem2" + + classifier = { + "memory_depth": 2, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + pattern = [ + 11 / 12, + 4 / 11, + 7 / 9, + 1 / 10, + 5 / 6, + 3 / 11, + 7 / 9, + 1 / 10, + 2 / 3, + 1 / 11, + 7 / 9, + 1 / 10, + 3 / 4, + 2 / 11, + 7 / 9, + 1 / 10, + ] + parameters = Plays(self_plays=2, op_plays=2, op_openings=0) + + super().__init__(parameters=parameters, pattern=pattern) diff --git a/axelrod/strategies/geller.py b/axelrod/strategies/geller.py new file mode 100644 index 000000000..e90f92e9d --- /dev/null +++ b/axelrod/strategies/geller.py @@ -0,0 +1,118 @@ +""" +The player classes in this module do not obey standard rules of the IPD (as +indicated by their classifier). We do not recommend putting a lot of time in to +optimising them. +""" + +from axelrod._strategy_utils import inspect_strategy +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class Geller(IpdPlayer): + """Observes what the player will do in the next round and adjust. + + If unable to do this: will play randomly. + + + This code is inspired by Matthew Williams' talk + "Cheating at rock-paper-scissors — meta-programming in Python" + given at Django Weekend Cardiff in February 2014. + + His code is here: https://github.com/mattjw/rps_metaprogramming + and there's some more info here: http://www.mattjw.net/2014/02/rps-metaprogramming/ + + This code is **way** simpler than Matt's, as in this exercise we already + have access to the opponent instance, so don't need to go + hunting for it in the stack. Instead we can just call it to + see what it's going to play, and return a result based on that + + This is almost certainly cheating, and more than likely against the + spirit of the 'competition' :-) + + Names: + + - Geller: Original name by Martin Chorley (@martinjc) + """ + + name = "Geller" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def foil_strategy_inspection() -> Action: + """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + return random_choice(0.5) + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Look at what the opponent will play in the next round and choose a strategy + that gives the least jail time, which is is equivalent to playing the same + strategy as that which the opponent will play. + """ + + return inspect_strategy(self, opponent) + + +class GellerCooperator(Geller): + """Observes what the player will do (like :code:`Geller`) but if unable to + will cooperate. + + Names: + + - Geller Cooperator: Original name by Karol Langner + """ + + name = "Geller Cooperator" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def foil_strategy_inspection() -> Action: + """ + Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead + """ + return C + + +class GellerDefector(Geller): + """Observes what the player will do (like :code:`Geller`) but if unable to + will defect. + + Names: + + - Geller Defector: Original name by Karol Langner + """ + + name = "Geller Defector" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def foil_strategy_inspection() -> Action: + """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + return D diff --git a/axelrod/strategies/gobymajority.py b/axelrod/strategies/gobymajority.py new file mode 100644 index 000000000..ed6ac9dfa --- /dev/null +++ b/axelrod/strategies/gobymajority.py @@ -0,0 +1,246 @@ +import copy +from typing import Any, Dict, Union + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class GoByMajority(IpdPlayer): + """Submitted to Axelrod's second tournament by Gail Grisell. It came 23rd + and was written in 10 lines of BASIC. + + A player examines the history of the opponent: if the opponent has more + defections than cooperations then the player defects. + + In case of equal + number of defections and cooperations this player will Cooperate. Passing + the `soft=False` keyword argument when initialising will create a + HardGoByMajority which Defects in case of equality. + + An optional memory attribute will limit the number of turns remembered (by + default this is 0) + + Names: + + - Go By Majority: [Axelrod1984]_ + - Grisell: [Axelrod1980b]_ + - Soft Majority: [Mittal2009]_ + """ + + name = "Go By Majority" + classifier = { + "stochastic": False, + "inspects_source": False, + "makes_use_of": set(), + "long_run_time": False, + "manipulates_source": False, + "manipulates_state": False, + "memory_depth": float("inf"), + } # type: Dict[str, Any] + + def __init__( + self, memory_depth: Union[int, float] = float("inf"), soft: bool = True + ) -> None: + """ + Parameters + ---------- + memory_depth: int >= 0 + The number of rounds to use for the calculation of the cooperation + and defection probabilities of the opponent. + soft: bool + Indicates whether to cooperate or not in the case that the + cooperation and defection probabilities are equal. + """ + + super().__init__() + self.soft = soft + self.classifier["memory_depth"] = memory_depth + if self.classifier["memory_depth"] < float("inf"): + self.memory = self.classifier["memory_depth"] + else: + self.memory = 0 + self.name = "Go By Majority" + (self.memory > 0) * (": %i" % self.memory) + if self.soft: + self.name = "Soft " + self.name + else: + self.name = "Hard " + self.name + + def __repr__(self): + return self.name + + def strategy(self, opponent: IpdPlayer) -> Action: + """This is affected by the history of the opponent. + + As long as the opponent cooperates at least as often as they defect then + the player will cooperate. If at any point the opponent has more + defections than cooperations in memory the player defects. + """ + + history = opponent.history[-self.memory :] + defections = sum([s == D for s in history]) + cooperations = sum([s == C for s in history]) + if defections > cooperations: + return D + if defections == cooperations: + if self.soft: + return C + else: + return D + return C + + +class GoByMajority40(GoByMajority): + """ + GoByMajority player with a memory of 40. + + Names: + + - Go By Majority 40: Original name by Karol Langner + """ + + name = "Go By Majority 40" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 40 + + def __init__(self) -> None: + super().__init__(memory_depth=40) + + +class GoByMajority20(GoByMajority): + """ + GoByMajority player with a memory of 20. + + Names: + + - Go By Majority 20: Original name by Karol Langner + """ + + name = "Go By Majority 20" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 20 + + def __init__(self) -> None: + super().__init__(memory_depth=20) + + +class GoByMajority10(GoByMajority): + """ + GoByMajority player with a memory of 10. + + Names: + + - Go By Majority 10: Original name by Karol Langner + """ + + name = "Go By Majority 10" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 10 + + def __init__(self) -> None: + super().__init__(memory_depth=10) + + +class GoByMajority5(GoByMajority): + """ + GoByMajority player with a memory of 5. + + Names: + + - Go By Majority 5: Original name by Karol Langner + """ + + name = "Go By Majority 5" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 5 + + def __init__(self) -> None: + super().__init__(memory_depth=5) + + +class HardGoByMajority(GoByMajority): + """A player examines the history of the opponent: if the opponent has more + defections than cooperations then the player defects. In case of equal + number of defections and cooperations this player will Defect. + + An optional memory attribute will limit the number of turns remembered (by + default this is 0) + + Names: + + - Hard Majority: [Mittal2009]_ + """ + + name = "Hard Go By Majority" + + def __init__(self, memory_depth: Union[int, float] = float("inf")) -> None: + super().__init__(memory_depth=memory_depth, soft=False) + + +class HardGoByMajority40(HardGoByMajority): + """ + HardGoByMajority player with a memory of 40. + + Names: + + - Hard Go By Majority 40: Original name by Karol Langner + """ + + name = "Hard Go By Majority 40" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 40 + + def __init__(self) -> None: + super().__init__(memory_depth=40) + + +class HardGoByMajority20(HardGoByMajority): + """ + HardGoByMajority player with a memory of 20. + + Names: + + - Hard Go By Majority 20: Original name by Karol Langner + """ + + name = "Hard Go By Majority 20" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 20 + + def __init__(self) -> None: + super().__init__(memory_depth=20) + + +class HardGoByMajority10(HardGoByMajority): + """ + HardGoByMajority player with a memory of 10. + + Names: + + - Hard Go By Majority 10: Original name by Karol Langner + """ + + name = "Hard Go By Majority 10" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 10 + + def __init__(self) -> None: + super().__init__(memory_depth=10) + + +class HardGoByMajority5(HardGoByMajority): + """ + HardGoByMajority player with a memory of 5. + + Names: + + - Hard Go By Majority 5: Original name by Karol Langner + """ + + name = "Hard Go By Majority 5" + classifier = copy.copy(GoByMajority.classifier) + classifier["memory_depth"] = 5 + + def __init__(self) -> None: + super().__init__(memory_depth=5) diff --git a/axelrod/strategies/gradualkiller.py b/axelrod/strategies/gradualkiller.py new file mode 100644 index 000000000..72e7001a3 --- /dev/null +++ b/axelrod/strategies/gradualkiller.py @@ -0,0 +1,37 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.strategy_transformers import InitialTransformer + +C, D = Action.C, Action.D + + +@InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) +class GradualKiller(IpdPlayer): + """ + It begins by defecting in the first five moves, then cooperates two times. + It then defects all the time if the opponent has defected in move 6 and 7, + else cooperates all the time. + Initially designed to stop Gradual from defeating TitForTat in a 3 IpdPlayer + tournament. + + Names + + - Gradual Killer: [Prison1998]_ + """ + + # These are various properties for the strategy + name = "Gradual Killer" + classifier = { + "memory_depth": float("Inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if opponent.history[5:7] == [D, D]: + return D + return C diff --git a/axelrod/strategies/grudger.py b/axelrod/strategies/grudger.py new file mode 100644 index 000000000..854b8e38a --- /dev/null +++ b/axelrod/strategies/grudger.py @@ -0,0 +1,319 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Grudger(IpdPlayer): + """ + A player starts by cooperating however will defect if at any point the + opponent has defected. + + This strategy came 7th in Axelrod's original tournament. + + Names: + + - Friedman's strategy: [Axelrod1980]_ + - Grudger: [Li2011]_ + - Grim: [Berg2015]_ + - Grim Trigger: [Banks1990]_ + - Spite: [Beaufils1997]_ + - Vengeful: [Ashlock2009]_ + """ + + name = "Grudger" + classifier = { + "memory_depth": float('inf'), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + """Begins by playing C, then plays D for the remaining rounds if the + opponent ever plays D.""" + if opponent.defections: + return D + return C + + +class ForgetfulGrudger(IpdPlayer): + """ + A player starts by cooperating however will defect if at any point the + opponent has defected, but forgets after mem_length matches. + + Names: + + - Forgetful Grudger: Original name by Geraint Palmer + """ + + name = "Forgetful Grudger" + classifier = { + "memory_depth": 10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + """Initialised the player.""" + super().__init__() + self.mem_length = 10 + self.grudged = False + self.grudge_memory = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + """Begins by playing C, then plays D for mem_length rounds if the + opponent ever plays D.""" + if self.grudge_memory == self.mem_length: + self.grudge_memory = 0 + self.grudged = False + + if D in opponent.history[-1:]: + self.grudged = True + + if self.grudged: + self.grudge_memory += 1 + return D + return C + + +class OppositeGrudger(IpdPlayer): + """ + A player starts by defecting however will cooperate if at any point the + opponent has cooperated. + + Names: + + - Opposite Grudger: Original name by Geraint Palmer + """ + + name = "Opposite Grudger" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + """Begins by playing D, then plays C for the remaining rounds if the + opponent ever plays C.""" + if opponent.cooperations: + return C + return D + + +class Aggravater(IpdPlayer): + """ + Grudger, except that it defects on the first 3 turns + + Names + + - Aggravater: Original name by Thomas Campbell + """ + + name = "Aggravater" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + if len(opponent.history) < 3: + return D + elif opponent.defections: + return D + return C + + +class SoftGrudger(IpdPlayer): + """ + A modification of the Grudger strategy. Instead of punishing by always + defecting: punishes by playing: D, D, D, D, C, C. (Will continue to + cooperate afterwards). + + - Soft Grudger (SGRIM): [Li2011]_ + """ + + name = "Soft Grudger" + classifier = { + "memory_depth": 6, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + """Initialised the player.""" + super().__init__() + self.grudged = False + self.grudge_memory = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + """Begins by playing C, then plays D, D, D, D, C, C against a defection + """ + if self.grudged: + strategy = [D, D, D, C, C][self.grudge_memory] + self.grudge_memory += 1 + if self.grudge_memory == 5: + self.grudge_memory = 0 + self.grudged = False + return strategy + elif D in opponent.history[-1:]: + self.grudged = True + return D + return C + + +class GrudgerAlternator(IpdPlayer): + """ + A player starts by cooperating until the first opponents defection, + then alternates D-C. + + Names: + + - c_then_per_dc: [Prison1998]_ + - Grudger Alternator: Original name by Geraint Palmer + """ + + name = "GrudgerAlternator" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """Begins by playing C, then plays Alternator for the remaining rounds + if the opponent ever plays D.""" + if opponent.defections: + if self.history[-1] == C: + return D + return C + + +class EasyGo(IpdPlayer): + """ + A player starts by defecting however will cooperate if at any point the + opponent has defected. + + Names: + + - Easy Go: [Prison1998]_ + - Reverse Grudger (RGRIM): [Li2011]_ + - Fool Me Forever: [Harper2017]_ + """ + + name = "EasyGo" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + """Begins by playing D, then plays C for the remaining rounds if the + opponent ever plays D.""" + if opponent.defections: + return C + return D + + +class GeneralSoftGrudger(IpdPlayer): + """ + A generalization of the SoftGrudger strategy. SoftGrudger punishes by + playing: D, D, D, D, C, C. after a defection by the opponent. + GeneralSoftGrudger only punishes after its opponent defects a specified + amount of times consecutively. The punishment is in the form of a series of + defections followed by a 'penance' of a series of consecutive cooperations. + + Names: + + - General Soft Grudger: Original Name by J. Taylor Smith + """ + + name = "General Soft Grudger" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, n: int = 1, d: int = 4, c: int = 2) -> None: + """ + Parameters + ---------- + n: int + The number of defections by the opponent to trigger punishment + d: int + The number of defections to punish the opponent + c: int + The number of cooperations in the 'penance' stage + + Special Cases + ------------- + GeneralSoftGrudger(1,4,2) is equivalent to SoftGrudger + """ + super().__init__() + self.n = n + self.d = d + self.c = c + self.grudge = [D] * (d - 1) + [C] * c + self.grudged = False + self.grudge_memory = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Punishes after its opponent defects 'n' times consecutively. + The punishment is in the form of 'd' defections followed by a penance of + 'c' consecutive cooperations. + """ + if self.grudged: + strategy = self.grudge[self.grudge_memory] + self.grudge_memory += 1 + if self.grudge_memory == len(self.grudge): + self.grudged = False + self.grudge_memory = 0 + return strategy + elif [D] * self.n == opponent.history[-self.n :]: + self.grudged = True + return D + + return C + + def __repr__(self) -> str: + return "%s: n=%s,d=%s,c=%s" % (self.name, self.n, self.d, self.c) diff --git a/axelrod/strategies/grumpy.py b/axelrod/strategies/grumpy.py new file mode 100644 index 000000000..8730148cb --- /dev/null +++ b/axelrod/strategies/grumpy.py @@ -0,0 +1,73 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Grumpy(IpdPlayer): + """ + A player that defects after a certain level of grumpiness. + Grumpiness increases when the opponent defects and decreases + when the opponent co-operates. + + Names: + + - Grumpy: Original name by Jason Young + """ + + name = "Grumpy" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + starting_state: str = "Nice", + grumpy_threshold: int = 10, + nice_threshold: int = -10, + ) -> None: + """ + Parameters + ---------- + starting_state: str + 'Nice' or 'Grumpy' + grumpy_threshold: int + The threshold of opponent defections - cooperations to become + grumpy + nice_threshold: int + The threshold of opponent defections - cooperations to become + nice + """ + super().__init__() + self.state = starting_state + self.grumpy_threshold = grumpy_threshold + self.nice_threshold = nice_threshold + + def strategy(self, opponent: IpdPlayer) -> Action: + """A player that gets grumpier the more the opposition defects, + and nicer the more they cooperate. + + Starts off Nice, but becomes grumpy once the grumpiness threshold is + hit. Won't become nice once that grumpy threshold is hit, but must + reach a much lower threshold before it becomes nice again. + """ + + grumpiness = opponent.defections - opponent.cooperations + + if self.state == "Nice": + if grumpiness > self.grumpy_threshold: + self.state = "Grumpy" + return D + return C + + if self.state == "Grumpy": + if grumpiness < self.nice_threshold: + self.state = "Nice" + return C + return D diff --git a/axelrod/strategies/handshake.py b/axelrod/strategies/handshake.py new file mode 100644 index 000000000..0b6da3a49 --- /dev/null +++ b/axelrod/strategies/handshake.py @@ -0,0 +1,44 @@ +from typing import List + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Handshake(IpdPlayer): + """Starts with C, D. If the opponent plays the same way, cooperate forever, + else defect forever. + + Names: + + - Handshake: [Robson1990]_ + """ + + name = "Handshake" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, initial_plays: List[Action] = None) -> None: + super().__init__() + if not initial_plays: + initial_plays = [C, D] + self.initial_plays = initial_plays + + def strategy(self, opponent: IpdPlayer) -> Action: + # Begin by playing the sequence C, D + index = len(self.history) + if index < len(self.initial_plays): + return self.initial_plays[index] + # If our opponent played [C, D] on the first two moves, cooperate + # forever. Otherwise defect forever. + if opponent.history[0 : len(self.initial_plays)] == self.initial_plays: + return C + return D diff --git a/axelrod/strategies/hmm.py b/axelrod/strategies/hmm.py new file mode 100644 index 000000000..59f7e9942 --- /dev/null +++ b/axelrod/strategies/hmm.py @@ -0,0 +1,389 @@ +from random import randrange +import numpy.random as random +from numpy.random import choice + +from axelrod.action import Action +from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, copy_lists, crossover_lists +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice, random_vector + +C, D = Action.C, Action.D + + +def is_stochastic_matrix(m, ep=1e-8) -> bool: + """Checks that the matrix m (a list of lists) is a stochastic matrix.""" + for i in range(len(m)): + for j in range(len(m[i])): + if (m[i][j] < 0) or (m[i][j] > 1): + return False + s = sum(m[i]) + if abs(1.0 - s) > ep: + return False + return True + + +def normalize_vector(vec): + s = sum(vec) + vec = [v / s for v in vec] + return vec + + +def mutate_row(row, mutation_probability): + """, crossover_lists_of_lists + Given a row of probabilities, randomly change each entry with probability + `mutation_probability` (a value between 0 and 1). If changing, then change + by a value randomly (uniformly) chosen from [-0.25, 0.25] bounded by 0 and + 100%. + """ + randoms = random.random(len(row)) + for i in range(len(row)): + if randoms[i] < mutation_probability: + ep = random.uniform(-1, 1) / 4 + row[i] += ep + if row[i] < 0: + row[i] = 0 + if row[i] > 1: + row[i] = 1 + return row + + +class SimpleHMM(object): + """Implementation of a basic Hidden Markov Model. We assume that the + transition matrix is conditioned on the opponent's last action, so there + are two transition matrices. Emission distributions are stored as Bernoulli + probabilities for each state. This is essentially a stochastic FSM. + + https://en.wikipedia.org/wiki/Hidden_Markov_model + """ + + def __init__( + self, transitions_C, transitions_D, emission_probabilities, initial_state + ) -> None: + """ + Params + ------ + transitions_C and transitions_D are square stochastic matrices: + lists of lists with all values in [0, 1] and rows that sum to 1. + emission_probabilities is a vector of values in [0, 1] + initial_state is an element of range(0, len(emission_probabilities)) + """ + self.transitions_C = transitions_C + self.transitions_D = transitions_D + self.emission_probabilities = emission_probabilities + self.state = initial_state + + def is_well_formed(self) -> bool: + """ + Determines if the HMM parameters are well-formed: + - Both matrices are stochastic + - Emissions probabilities are in [0, 1] + - The initial state is valid. + """ + if not is_stochastic_matrix(self.transitions_C): + return False + if not is_stochastic_matrix(self.transitions_D): + return False + for p in self.emission_probabilities: + if (p < 0) or (p > 1): + return False + if self.state not in range(0, len(self.emission_probabilities)): + return False + return True + + def __eq__(self, other: IpdPlayer) -> bool: + """Equality of two HMMs""" + check = True + for attr in [ + "transitions_C", + "transitions_D", + "emission_probabilities", + "state", + ]: + check = check and getattr(self, attr) == getattr(other, attr) + return check + + def move(self, opponent_action: Action) -> Action: + """Changes state and computes the response action. + + Parameters + opponent_action: Axelrod.Action + The opponent's last action. + """ + num_states = len(self.emission_probabilities) + if opponent_action == C: + next_state = choice(num_states, 1, p=self.transitions_C[self.state]) + else: + next_state = choice(num_states, 1, p=self.transitions_D[self.state]) + self.state = next_state[0] + p = self.emission_probabilities[self.state] + action = random_choice(p) + return action + + +class HMMPlayer(IpdPlayer): + """ + Abstract base class for Hidden Markov Model players. + + Names + + - HMM IpdPlayer: Original name by Marc Harper + """ + + name = "HMM IpdPlayer" + + classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + transitions_C=None, + transitions_D=None, + emission_probabilities=None, + initial_state=0, + initial_action=C + ) -> None: + super().__init__() + if not transitions_C: + transitions_C = [[1]] + transitions_D = [[1]] + emission_probabilities = [0.5] # Not stochastic + initial_state = 0 + self.initial_state = initial_state + self.initial_action = initial_action + self.hmm = SimpleHMM( + copy_lists(transitions_C), copy_lists(transitions_D), list(emission_probabilities), initial_state + ) + assert self.hmm.is_well_formed() + self.state = self.hmm.state + self.classifier["stochastic"] = self.is_stochastic() + + def is_stochastic(self) -> bool: + """Determines if the player is stochastic.""" + # If the transitions matrices and emission_probabilities are all 0 or 1 + # Then the player is stochastic + values = set(self.hmm.emission_probabilities) + for m in [self.hmm.transitions_C, self.hmm.transitions_D]: + for row in m: + values.update(row) + if not values.issubset({0, 1}): + return True + return False + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return self.initial_action + else: + action = self.hmm.move(opponent.history[-1]) + # Record the state for testing purposes, this isn't necessary + # for the strategy to function + self.state = self.hmm.state + return action + + +class EvolvableHMMPlayer(HMMPlayer, EvolvablePlayer): + """Evolvable version of HMMPlayer.""" + name = "EvolvableHMMPlayer" + + def __init__( + self, + transitions_C=None, + transitions_D=None, + emission_probabilities=None, + initial_state=0, + initial_action=C, + num_states=None, + mutation_probability=None + ) -> None: + transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability = self._normalize_parameters( + transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability) + self.mutation_probability = mutation_probability + HMMPlayer.__init__(self, + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities, + initial_state=initial_state, + initial_action=initial_action) + EvolvablePlayer.__init__(self) + self.overwrite_init_kwargs( + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities, + initial_state=initial_state, + initial_action=initial_action, + num_states=num_states, + mutation_probability=mutation_probability + ) + + @classmethod + def _normalize_parameters(cls, transitions_C=None, transitions_D=None, emission_probabilities=None, + initial_state=None, initial_action=None, num_states=None, mutation_probability=None): + if not (transitions_C and transitions_D and emission_probabilities and (initial_state is not None) and (initial_action is not None)): + if not num_states: + raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableHMMPlayer") + transitions_C, transitions_D, emission_probabilities, initial_state, initial_action = cls.random_params( + num_states) + # Normalize types of various matrices + for m in [transitions_C, transitions_D]: + for i in range(len(m)): + m[i] = list(map(float, m[i])) + emission_probabilities = list(map(float, emission_probabilities)) + num_states = len(emission_probabilities) + if mutation_probability is None: + mutation_probability = 10 / (num_states ** 2) + else: + mutation_probability = mutation_probability + return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action, num_states, mutation_probability + + @classmethod + def random_params(cls, num_states): + transitions_C = [] + transitions_D = [] + emission_probabilities = [] + for _ in range(num_states): + transitions_C.append(random_vector(num_states)) + transitions_D.append(random_vector(num_states)) + emission_probabilities.append(random.random()) + initial_state = randrange(num_states) + initial_action = C + return transitions_C, transitions_D, emission_probabilities, initial_state, initial_action + + @property + def num_states(self): + return len(self.hmm.emission_probabilities) + + @staticmethod + def mutate_rows(rows, mutation_probability): + for i, row in enumerate(rows): + row = mutate_row(row, mutation_probability) + rows[i] = normalize_vector(row) + return rows + + def mutate(self): + transitions_C = self.mutate_rows( + self.hmm.transitions_C, self.mutation_probability) + transitions_D = self.mutate_rows( + self.hmm.transitions_D, self.mutation_probability) + emission_probabilities = mutate_row( + self.hmm.emission_probabilities, self.mutation_probability) + initial_action = self.initial_action + if random.random() < self.mutation_probability / 10: + initial_action = self.initial_action.flip() + initial_state = self.initial_state + if random.random() < self.mutation_probability / (10 * self.num_states): + initial_state = randrange(self.num_states) + return self.create_new( + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities, + initial_state=initial_state, + initial_action=initial_action, + ) + + def crossover(self, other): + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + transitions_C = crossover_lists(self.hmm.transitions_C, other.hmm.transitions_C) + transitions_D = crossover_lists(self.hmm.transitions_D, other.hmm.transitions_D) + emission_probabilities = crossover_lists( + self.hmm.emission_probabilities, other.hmm.emission_probabilities) + return self.create_new( + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities + ) + + def receive_vector(self, vector): + """ + Read a serialized vector into the set of HMM parameters (less initial + state). Then assign those HMM parameters to this class instance. + + Assert that the vector has the right number of elements for an HMMParams + class with self.num_states. + + Assume the first num_states^2 entries are the transitions_C matrix. The + next num_states^2 entries are the transitions_D matrix. Then the next + num_states entries are the emission_probabilities vector. Finally the last + entry is the initial_action. + """ + + assert(len(vector) == 2 * self.num_states ** 2 + self.num_states + 1) + + def deserialize(vector): + matrix = [] + for i in range(self.num_states): + row = vector[self.num_states * i: self.num_states * (i + 1)] + row = normalize_vector(row) + matrix.append(row) + return matrix + + break_tc = self.num_states ** 2 + break_td = 2 * self.num_states ** 2 + break_ep = 2 * self.num_states ** 2 + self.num_states + initial_state = 0 + self.hmm = SimpleHMM( + deserialize(vector[0:break_tc]), + deserialize(vector[break_tc:break_td]), + normalize_vector(vector[break_td:break_ep]), + initial_state + ) + self.initial_action = C if round(vector[-1]) == 0 else D + self.initial_state = initial_state + + def create_vector_bounds(self): + """Creates the bounds for the decision variables.""" + vec_len = 2 * self.num_states ** 2 + self.num_states + 1 + lb = [0.0] * vec_len + ub = [1.0] * vec_len + return lb, ub + + +class EvolvedHMM5(HMMPlayer): + """ + An HMM-based player with five hidden states trained with an evolutionary + algorithm. + + Names: + + - Evolved HMM 5: Original name by Marc Harper + """ + + name = "Evolved HMM 5" + + classifier = { + "memory_depth": 5, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + initial_state = 3 + initial_action = C + t_C = [ + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 1, 0, 0, 0], + [0.631, 0, 0, 0.369, 0], + [0.143, 0.018, 0.118, 0, 0.721], + ] + + t_D = [ + [0, 1, 0, 0, 0], + [0, 0.487, 0.513, 0, 0], + [0, 0, 0, 0.590, 0.410], + [1, 0, 0, 0, 0], + [0, 0.287, 0.456, 0.146, 0.111], + ] + + emissions = [1, 0, 0, 1, 0.111] + super().__init__(t_C, t_D, emissions, initial_state, initial_action) diff --git a/axelrod/strategies/human.py b/axelrod/strategies/human.py new file mode 100644 index 000000000..ddf367b8d --- /dev/null +++ b/axelrod/strategies/human.py @@ -0,0 +1,175 @@ +from os import linesep + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from prompt_toolkit import prompt +from prompt_toolkit.validation import ValidationError, Validator + +try: # pragma: no cover + from prompt_toolkit.styles import style_from_dict + from prompt_toolkit.token import Token + + token_toolbar = Token.Toolbar + bottom_toolbar_name = "get_bottom_toolbar_tokens" + PROMPT2 = False + +except ImportError: # prompt_toolkit v2 + from prompt_toolkit.styles import Style + + style_from_dict = Style.from_dict + token_toolbar = "pygments.toolbar" + bottom_toolbar_name = "bottom_toolbar" + PROMPT2 = True + +C, D = Action.C, Action.D + +toolbar_style = style_from_dict({token_toolbar: "#ffffff bg:#333333"}) + + +class ActionValidator(Validator): + """ + A class to validate input from prompt_toolkit.prompt + Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#input-validation + """ + + def validate(self, document) -> None: + text = document.text + + if text and text.upper() not in ["C", "D"]: + raise ValidationError(message="Action must be C or D", cursor_position=0) + + +class Human(IpdPlayer): + """ + A strategy that prompts for keyboard input rather than deriving its own + action. + + This strategy is intended to be used interactively by a user playing + against other strategies from within the rest of the library. Unlike + other strategies, it is designed to be a teaching aid rather than a + research tool. + """ + + name = "Human" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["length", "game"]), + "long_run_time": True, + "inspects_source": True, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, name="human", c_symbol="C", d_symbol="D"): + """ + Parameters + ---------- + name: string + The name of the human player + c_symbol: string + A symbol to denote cooperation within the history toolbar + and prompt + d_symbol: string + A symbol to denote defection within the history toolbar + and prompt + """ + super().__init__() + self.human_name = name + self.symbols = {C: c_symbol, D: d_symbol} + + def _history_toolbar(self): + """ + A prompt-toolkit function to define the bottom toolbar. + Described at http://python-prompt-toolkit.readthedocs.io/en/latest/pages/building_prompts.html#adding-a-bottom-toolbar + """ + my_history = [self.symbols[action] for action in self.history] + opponent_history = [self.symbols[action] for action in self.history.coplays] + history = list(zip(my_history, opponent_history)) + if self.history: + content = "History ({}, opponent): {}".format(self.human_name, history) + else: + content = "" + return content + + def _status_messages(self): + """ + A method to define the messages printed to the console and + displayed in the prompt-toolkit bottom toolbar. + + The bottom toolbar is defined only if a match is in progress. + + The console print statement is either the result of the previous + turn or a message indicating that new match is starting. + + Returns + ------- + dict + mapping print or toolbar to the relevant string + """ + if self.history: + toolbar = ( + self._history_toolbar + if PROMPT2 + else lambda cli: [(token_toolbar, self._history_toolbar())] + ) + print_statement = "{}Turn {}: {} played {}, opponent played {}".format( + linesep, + len(self.history), + self.human_name, + self.symbols[self.history[-1]], + self.symbols[self.history.coplays[-1]], + ) + else: + toolbar = None + print_statement = "{}Starting new match".format(linesep) + + return {"toolbar": toolbar, "print": print_statement} + + def _get_human_input(self) -> Action: # pragma: no cover + """ + A method to prompt the user for input, validate it and display + the bottom toolbar. + + Returns + ------- + string + Uppercase C or D indicating the action to play + """ + action = prompt( + "Turn {} action [C or D] for {}: ".format( + len(self.history) + 1, self.human_name + ), + validator=ActionValidator(), + style=toolbar_style, + **{bottom_toolbar_name: self.status_messages["toolbar"]}, + ) + + return Action.from_char(action.upper()) + + def strategy(self, opponent: IpdPlayer, input_function=None): + """ + Ordinarily, the strategy prompts for keyboard input rather than + deriving its own action. + + However, it is also possible to pass a function which returns a valid + action. This is mainly used for testing purposes in order to by-pass + the need for human interaction. + """ + + self.status_messages = self._status_messages() + self.status_messages = self._status_messages() + print(self.status_messages["print"]) + + if not input_function: # pragma: no cover + action = self._get_human_input() + else: + action = input_function() + + return action + + def __repr__(self): + """ + Override the default __repr__ of the class + """ + return "Human: {}".format(self.human_name) diff --git a/axelrod/strategies/hunter.py b/axelrod/strategies/hunter.py new file mode 100644 index 000000000..65fbe7c7a --- /dev/null +++ b/axelrod/strategies/hunter.py @@ -0,0 +1,255 @@ +from typing import List, Optional, Tuple + +from axelrod._strategy_utils import detect_cycle +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class DefectorHunter(IpdPlayer): + """A player who hunts for defectors. + + Names: + + - Defector Hunter: Original name by Karol Langner + """ + + name = "Defector Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) >= 4 and len(opponent.history) == opponent.defections: + return D + return C + + +class CooperatorHunter(IpdPlayer): + """A player who hunts for cooperators. + + Names: + + - Cooperator Hunter: Original name by Karol Langner + """ + + name = "Cooperator Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) >= 4 and len(opponent.history) == opponent.cooperations: + return D + return C + + +def is_alternator(history: List[Action]) -> bool: + for i in range(len(history) - 1): + if history[i] == history[i + 1]: + return False + return True + + +class AlternatorHunter(IpdPlayer): + """A player who hunts for alternators. + + Names: + + - Alternator Hunter: Original name by Karol Langner + """ + + name = "Alternator Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.is_alt = False + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) < 6: + return C + if len(self.history) == 6: + if is_alternator(opponent.history): + self.is_alt = True + if self.is_alt: + return D + return C + + +class CycleHunter(IpdPlayer): + """Hunts strategies that play cyclically, like any of the Cyclers, + Alternator, etc. + + Names: + + - Cycle Hunter: Original name by Marc Harper + """ + + name = "Cycle Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.cycle = None # type: Optional[Tuple[Action]] + + def strategy(self, opponent: IpdPlayer) -> Action: + if self.cycle: + return D + cycle = detect_cycle(opponent.history, min_size=3) + if cycle: + if len(set(cycle)) > 1: + self.cycle = cycle + return D + return C + + +class EventualCycleHunter(CycleHunter): + """Hunts strategies that eventually play cyclically. + + Names: + + - Eventual Cycle Hunter: Original name by Marc Harper + """ + + name = "Eventual Cycle Hunter" + + def strategy(self, opponent: IpdPlayer) -> None: + if len(opponent.history) < 10: + return C + if len(opponent.history) == opponent.cooperations: + return C + if len(opponent.history) % 10 == 0: + # recheck + self.cycle = detect_cycle(opponent.history, offset=10, min_size=3) + if self.cycle: + return D + else: + return C + + +class MathConstantHunter(IpdPlayer): + """A player who hunts for mathematical constant players. + + Names: + + Math Constant Hunter: Original name by Karol Langner + """ + + name = "Math Constant Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Check whether the number of cooperations in the first and second halves + of the history are close. The variance of the uniform distribution (1/4) + is a reasonable delta but use something lower for certainty and avoiding + false positives. This approach will also detect a lot of random players. + """ + + n = len(self.history) + if n >= 8 and opponent.cooperations and opponent.defections: + start1, end1 = 0, n // 2 + start2, end2 = n // 4, 3 * n // 4 + start3, end3 = n // 2, n + count1 = opponent.history[start1:end1].count(C) + self.history[ + start1:end1 + ].count(C) + count2 = opponent.history[start2:end2].count(C) + self.history[ + start2:end2 + ].count(C) + count3 = opponent.history[start3:end3].count(C) + self.history[ + start3:end3 + ].count(C) + ratio1 = 0.5 * count1 / (end1 - start1) + ratio2 = 0.5 * count2 / (end2 - start2) + ratio3 = 0.5 * count3 / (end3 - start3) + if abs(ratio1 - ratio2) < 0.2 and abs(ratio1 - ratio3) < 0.2: + return D + return C + + +class RandomHunter(IpdPlayer): + """A player who hunts for random players. + + Names: + + - Random Hunter: Original name by Karol Langner + """ + + name = "Random Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + self.countCC = 0 + self.countDD = 0 + super().__init__() + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + A random player is unpredictable, which means the conditional frequency + of cooperation after cooperation, and defection after defections, should + be close to 50%... although how close is debatable. + """ + # Update counts + if len(self.history) > 1: + if self.history[-2] == C and opponent.history[-1] == C: + self.countCC += 1 + if self.history[-2] == D and opponent.history[-1] == D: + self.countDD += 1 + + n = len(self.history) + if n > 10: + probabilities = [] + if self.cooperations > 5: + probabilities.append(self.countCC / self.cooperations) + if self.defections > 5: + probabilities.append(self.countDD / self.defections) + if probabilities and all([abs(p - 0.5) < 0.25 for p in probabilities]): + return D + return C diff --git a/axelrod/strategies/inverse.py b/axelrod/strategies/inverse.py new file mode 100644 index 000000000..8f7d566d1 --- /dev/null +++ b/axelrod/strategies/inverse.py @@ -0,0 +1,48 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class Inverse(IpdPlayer): + """A player who defects with a probability that diminishes relative to how + long ago the opponent defected. + + Names: + + - Inverse: Original Name by Karol Langner + """ + + name = "Inverse" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + """Looks at opponent history to see if they have defected. + + If so, player defection is inversely proportional to when this occurred. + """ + + # calculate how many turns ago the opponent defected + index = next( + ( + index + for index, value in enumerate(opponent.history[::-1], start=1) + if value == D + ), + None, + ) + + if index is None: + return C + + return random_choice(1 - 1 / abs(index)) diff --git a/axelrod/strategies/lookerup.py b/axelrod/strategies/lookerup.py new file mode 100644 index 000000000..69555fea7 --- /dev/null +++ b/axelrod/strategies/lookerup.py @@ -0,0 +1,580 @@ +from collections import namedtuple +from itertools import product +from typing import Any, TypeVar + +import numpy.random as random +from numpy.random import choice + +from axelrod.action import Action, actions_to_str, str_to_actions +from axelrod.evolvable_player import EvolvablePlayer, InsufficientParametersError, crossover_dictionaries +from axelrod.player import IpdPlayer + + +C, D = Action.C, Action.D +actions = (C, D) + +Plays = namedtuple("Plays", "self_plays, op_plays, op_openings") +Reaction = TypeVar("Reaction", Action, float) + + +class LookupTable(object): + """ + LookerUp and its children use this object to determine their next actions. + + It is an object that creates a table of all possible plays to a specified + depth and the action to be returned for each combination of plays. + The "get" method returns the appropriate response. + For the table containing:: + + .... + Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, C): D + Plays(self_plays=(C, C), op_plays=(C, D), op_openings=(D, D): C + ... + + with: + player.history[-2:]=[C, C] and + opponent.history[-2:]=[C, D] and + opponent.history[:2]=[D, D], + calling LookupTable.get(plays=(C, C), op_plays=(C, D), op_openings=(D, D)) + will return C. + + Instantiate the table with a lookup_dict. This is + {(self_plays_tuple, op_plays_tuple, op_openings_tuple): action, ...}. + It must contain every possible + permutation with C's and D's of the above tuple. so:: + + good_dict = {((C,), (C,), ()): C, + ((C,), (D,), ()): C, + ((D,), (C,), ()): D, + ((D,), (D,), ()): C} + + bad_dict = {((C,), (C,), ()): C, + ((C,), (D,), ()): C, + ((D,), (C,), ()): D} + + LookupTable.from_pattern() creates an ordered list of keys for you and maps + the pattern to the keys.:: + + LookupTable.from_pattern(pattern=(C, D, D, C), + player_depth=0, op_depth=1, op_openings_depth=1 + ) + + creates the dictionary:: + + {Plays(self_plays=(), op_plays=(C), op_openings=(C)): C, + Plays(self_plays=(), op_plays=(C), op_openings=(D)): D, + Plays(self_plays=(), op_plays=(D), op_openings=(C)): D, + Plays(self_plays=(), op_plays=(D), op_openings=(D)): C,} + + and then returns a LookupTable with that dictionary. + """ + + def __init__(self, lookup_dict: dict) -> None: + self._dict = make_keys_into_plays(lookup_dict) + + sample_key = next(iter(self._dict)) + self._plays_depth = len(sample_key.self_plays) + self._op_plays_depth = len(sample_key.op_plays) + self._op_openings_depth = len(sample_key.op_openings) + self._table_depth = max( + self._plays_depth, self._op_plays_depth, self._op_openings_depth + ) + self._raise_error_for_bad_lookup_dict() + + def _raise_error_for_bad_lookup_dict(self): + if any( + len(key.self_plays) != self._plays_depth + or len(key.op_plays) != self._op_plays_depth + or len(key.op_openings) != self._op_openings_depth + for key in self._dict + ): + raise ValueError("Lookup table keys are not all the same size.") + total_key_combinations = 2 ** ( + self._plays_depth + self._op_plays_depth + self._op_openings_depth + ) + if total_key_combinations != len(self._dict): + msg = ( + "Lookup table does not have enough keys" + + " to cover all possibilities." + ) + raise ValueError(msg) + + @classmethod + def from_pattern( + cls, pattern: tuple, player_depth: int, op_depth: int, op_openings_depth: int + ): + keys = create_lookup_table_keys( + player_depth=player_depth, + op_depth=op_depth, + op_openings_depth=op_openings_depth, + ) + if len(keys) != len(pattern): + msg = "Pattern must be len: {}, but was len: {}".format( + len(keys), len(pattern) + ) + raise ValueError(msg) + input_dict = dict(zip(keys, pattern)) + return cls(input_dict) + + def get(self, plays: tuple, op_plays: tuple, op_openings: tuple) -> Any: + return self._dict[ + Plays(self_plays=plays, op_plays=op_plays, op_openings=op_openings) + ] + + @property + def player_depth(self) -> int: + return self._plays_depth + + @property + def op_depth(self) -> int: + return self._op_plays_depth + + @property + def op_openings_depth(self) -> int: + return self._op_openings_depth + + @property + def table_depth(self) -> int: + return self._table_depth + + @property + def dictionary(self) -> dict: + return self._dict.copy() + + def display( + self, sort_by: tuple = ("op_openings", "self_plays", "op_plays") + ) -> str: + """ + Returns a string for printing lookup_table info in specified order. + + :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' + """ + + def sorter(plays): + return tuple(actions_to_str(getattr(plays, field) for field in sort_by)) + + col_width = 11 + sorted_keys = sorted(self._dict, key=sorter) + header_line = ( + "{str_list[0]:^{width}}|" + + "{str_list[1]:^{width}}|" + + "{str_list[2]:^{width}}" + ) + display_line = header_line.replace("|", ",") + ": {str_list[3]}," + + def make_commaed_str(action_tuple): + return ", ".join(str(action) for action in action_tuple) + + line_elements = [ + ( + make_commaed_str(getattr(key, sort_by[0])), + make_commaed_str(getattr(key, sort_by[1])), + make_commaed_str(getattr(key, sort_by[2])), + self._dict[key], + ) + for key in sorted_keys + ] + header = header_line.format(str_list=sort_by, width=col_width) + "\n" + lines = [ + display_line.format(str_list=line, width=col_width) + for line in line_elements + ] + return header + "\n".join(lines) + "\n" + + def __eq__(self, other) -> bool: + if not isinstance(other, LookupTable): + return False + return self._dict == other.dictionary + + +def make_keys_into_plays(lookup_table: dict) -> dict: + """Returns a dict where all keys are Plays.""" + new_table = lookup_table.copy() + if any(not isinstance(key, Plays) for key in new_table): + new_table = {Plays(*key): value for key, value in new_table.items()} + return new_table + + +def create_lookup_table_keys( + player_depth: int, op_depth: int, op_openings_depth: int +) -> list: + """Returns a list of Plays that has all possible permutations of C's and + D's for each specified depth. the list is in order, + C < D sorted by ((player_tuple), (op_tuple), (op_openings_tuple)). + create_lookup_keys(2, 1, 0) returns:: + + [Plays(self_plays=(C, C), op_plays=(C,), op_openings=()), + Plays(self_plays=(C, C), op_plays=(D,), op_openings=()), + Plays(self_plays=(C, D), op_plays=(C,), op_openings=()), + Plays(self_plays=(C, D), op_plays=(D,), op_openings=()), + Plays(self_plays=(D, C), op_plays=(C,), op_openings=()), + Plays(self_plays=(D, C), op_plays=(D,), op_openings=()), + Plays(self_plays=(D, D), op_plays=(C,), op_openings=()), + Plays(self_plays=(D, D), op_plays=(D,), op_openings=())] + + """ + self_plays = product((C, D), repeat=player_depth) + op_plays = product((C, D), repeat=op_depth) + op_openings = product((C, D), repeat=op_openings_depth) + + iterator = product(self_plays, op_plays, op_openings) + return [Plays(*plays_tuple) for plays_tuple in iterator] + + +default_tft_lookup_table = { + Plays(self_plays=(), op_plays=(D,), op_openings=()): D, + Plays(self_plays=(), op_plays=(C,), op_openings=()): C, +} + + +class LookerUp(IpdPlayer): + """ + This strategy uses a LookupTable to decide its next action. If there is not + enough history to use the table, it calls from a list of + self.initial_actions. + + if self_depth=2, op_depth=3, op_openings_depth=5, LookerUp finds the last 2 + plays of self, the last 3 plays of opponent and the opening 5 plays of + opponent. It then looks those up on the LookupTable and returns the + appropriate action. If 5 rounds have not been played (the minimum required + for op_openings_depth), it calls from self.initial_actions. + + LookerUp can be instantiated with a dictionary. The dictionary uses + tuple(tuple, tuple, tuple) or Plays as keys. for example. + + - self_plays: depth=2 + - op_plays: depth=1 + - op_openings: depth=0:: + + {Plays((C, C), (C), ()): C, + Plays((C, C), (D), ()): D, + Plays((C, D), (C), ()): D, <- example below + Plays((C, D), (D), ()): D, + Plays((D, C), (C), ()): C, + Plays((D, C), (D), ()): D, + Plays((D, D), (C), ()): C, + Plays((D, D), (D), ()): D} + + From the above table, if the player last played C, D and the opponent last + played C (here the initial opponent play is ignored) then this round, + the player would play D. + + The dictionary must contain all possible permutations of C's and D's. + + LookerUp can also be instantiated with `pattern=str/tuple` of actions, and:: + + parameters=Plays( + self_plays=player_depth: int, + op_plays=op_depth: int, + op_openings=op_openings_depth: int) + + It will create keys of len=2 ** (sum(parameters)) and map the pattern to + the keys. + + initial_actions is a tuple such as (C, C, D). A table needs initial actions + equal to max(self_plays depth, opponent_plays depth, opponent_initial_plays + depth). If provided initial_actions is too long, the extra will be ignored. + If provided initial_actions is too short, the shortfall will be made up + with C's. + + Some well-known strategies can be expressed as special cases; for example + Cooperator is given by the dict (All history is ignored and always play C):: + + {Plays((), (), ()) : C} + + + Tit-For-Tat is given by (The only history that is important is the + opponent's last play.):: + + {Plays((), (D,), ()): D, + Plays((), (C,), ()): C} + + + LookerUp's LookupTable defaults to Tit-For-Tat. The initial_actions + defaults to playing C. + + Names: + + - Lookerup: Original name by Martin Jones + """ + + name = "LookerUp" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + default_tft_lookup_table = { + Plays(self_plays=(), op_plays=(D,), op_openings=()): D, + Plays(self_plays=(), op_plays=(C,), op_openings=()): C, + } + + def __init__( + self, + lookup_dict: dict = None, + initial_actions: tuple = None, + pattern: Any = None, # pattern is str or tuple of Action's. + parameters: Plays = None + ) -> None: + + super().__init__() + self.parameters = parameters + self.pattern = pattern + self._lookup = self._get_lookup_table(lookup_dict, pattern, parameters) + self._set_memory_depth() + self.initial_actions = self._get_initial_actions(initial_actions) + self._initial_actions_pool = list(self.initial_actions) + + @classmethod + def _get_lookup_table( + cls, lookup_dict: dict, pattern: Any, parameters: tuple + ) -> LookupTable: + if lookup_dict: + return LookupTable(lookup_dict=lookup_dict) + if pattern is not None and parameters is not None: + if isinstance(pattern, str): + pattern = str_to_actions(pattern) + self_depth, op_depth, op_openings_depth = parameters + return LookupTable.from_pattern( + pattern, self_depth, op_depth, op_openings_depth + ) + return LookupTable(default_tft_lookup_table) + + def _set_memory_depth(self): + if self._lookup.op_openings_depth == 0: + self.classifier["memory_depth"] = self._lookup.table_depth + else: + self.classifier["memory_depth"] = float("inf") + + def _get_initial_actions(self, initial_actions: tuple) -> tuple: + """Initial actions will always be cut down to table_depth.""" + table_depth = self._lookup.table_depth + if not initial_actions: + return tuple([C] * table_depth) + initial_actions_shortfall = table_depth - len(initial_actions) + if initial_actions_shortfall > 0: + return initial_actions + tuple([C] * initial_actions_shortfall) + return initial_actions[:table_depth] + + def strategy(self, opponent: IpdPlayer) -> Reaction: + turn_index = len(opponent.history) + while turn_index < len(self._initial_actions_pool): + return self._initial_actions_pool[turn_index] + + player_last_n_plays = get_last_n_plays( + player=self, depth=self._lookup.player_depth + ) + opponent_last_n_plays = get_last_n_plays( + player=opponent, depth=self._lookup.op_depth + ) + opponent_initial_plays = tuple( + opponent.history[: self._lookup.op_openings_depth] + ) + + return self._lookup.get( + player_last_n_plays, opponent_last_n_plays, opponent_initial_plays + ) + + @property + def lookup_dict(self): + return self._lookup.dictionary + + def lookup_table_display( + self, sort_by: tuple = ("op_openings", "self_plays", "op_plays") + ) -> str: + """ + Returns a string for printing lookup_table info in specified order. + + :param sort_by: only_elements='self_plays', 'op_plays', 'op_openings' + """ + return self._lookup.display(sort_by=sort_by) + + +class EvolvableLookerUp(LookerUp, EvolvablePlayer): + name = "EvolvableLookerUp" + + def __init__( + self, + lookup_dict: dict = None, + initial_actions: tuple = None, + pattern: Any = None, # pattern is str or tuple of Action's. + parameters: Plays = None, + mutation_probability: float = None + ) -> None: + lookup_dict, initial_actions, pattern, parameters, mutation_probability = self._normalize_parameters( + lookup_dict, initial_actions, pattern, parameters, mutation_probability + ) + LookerUp.__init__( + self, + lookup_dict=lookup_dict, + initial_actions=initial_actions, + pattern=pattern, + parameters=parameters, + ) + EvolvablePlayer.__init__(self) + self.mutation_probability = mutation_probability + self.overwrite_init_kwargs( + lookup_dict=lookup_dict, + initial_actions=initial_actions, + pattern=pattern, + parameters=parameters, + mutation_probability=mutation_probability, + ) + + @classmethod + def _normalize_parameters(cls, lookup_dict=None, initial_actions=None, pattern=None, parameters=None, + mutation_probability=None): + if lookup_dict and initial_actions: + # Compute the associated pattern and parameters + # Map the table keys to namedTuple Plays + lookup_table = cls._get_lookup_table(lookup_dict, pattern, parameters) + lookup_dict = lookup_table.dictionary + parameters = (lookup_table.player_depth, lookup_table.op_depth, lookup_table.op_openings_depth) + pattern = tuple(v for k, v in sorted(lookup_dict.items())) + elif pattern and parameters and initial_actions: + # Compute the associated lookup table + plays, op_plays, op_start_plays = parameters + lookup_table = cls._get_lookup_table(lookup_dict, pattern, parameters) + lookup_dict = lookup_table.dictionary + elif parameters: + # Generate a random pattern and (maybe) initial actions + plays, op_plays, op_start_plays = parameters + pattern, lookup_table = cls.random_params(plays, op_plays, op_start_plays) + lookup_dict = lookup_table.dictionary + if not initial_actions: + num_actions = max([plays, op_plays, op_start_plays]) + initial_actions = tuple([choice((C, D)) for _ in range(num_actions)]) + else: + raise InsufficientParametersError("Insufficient Parameters to instantiate EvolvableLookerUp") + # Normalize pattern + if isinstance(pattern, str): + pattern = str_to_actions(pattern) + pattern = tuple(pattern) + if mutation_probability is None: + plays, op_plays, op_start_plays = parameters + keys = create_lookup_table_keys(plays, op_plays, op_start_plays) + mutation_probability = 2. / len(keys) + return lookup_dict, initial_actions, pattern, parameters, mutation_probability + + @classmethod + def random_value(cls): + return choice(actions) + + @classmethod + def random_params(cls, plays, op_plays, op_start_plays): + keys = create_lookup_table_keys(plays, op_plays, op_start_plays) + # To get a pattern, we just randomly pick between C and D for each key + pattern = [cls.random_value() for _ in keys] + table = dict(zip(keys, pattern)) + return pattern, LookupTable(table) + + @classmethod + def mutate_value(cls, value): + return value.flip() + + @classmethod + def mutate_table(cls, table, mutation_probability): + randoms = random.random(len(table.keys())) + # Flip each value with a probability proportional to the mutation rate + for i, (history, move) in enumerate(table.items()): + if randoms[i] < mutation_probability: + table[history] = cls.mutate_value(move) + return table + + def mutate(self): + lookup_dict = self.mutate_table(self.lookup_dict, self.mutation_probability) + # Add in starting moves + initial_actions = list(self.initial_actions) + for i in range(len(initial_actions)): + r = random.random() + if r < self.mutation_probability: + initial_actions[i] = initial_actions[i].flip() + return self.create_new( + lookup_dict=lookup_dict, + initial_actions=tuple(initial_actions), + ) + + def crossover(self, other): + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + lookup_dict = crossover_dictionaries(self.lookup_dict, other.lookup_dict) + return self.create_new(lookup_dict=lookup_dict) + + +class EvolvedLookerUp1_1_1(LookerUp): + """ + A 1 1 1 Lookerup trained with an evolutionary algorithm. + + Names: + + - Evolved Lookerup 1 1 1: Original name by Marc Harper + """ + + name = "EvolvedLookerUp1_1_1" + + def __init__(self) -> None: + params = Plays(self_plays=1, op_plays=1, op_openings=1) + super().__init__(parameters=params, pattern="CDDDDCDD", initial_actions=(C,)) + + +class EvolvedLookerUp2_2_2(LookerUp): + """ + A 2 2 2 Lookerup trained with an evolutionary algorithm. + + Names: + + - Evolved Lookerup 2 2 2: Original name by Marc Harper + """ + + name = "EvolvedLookerUp2_2_2" + + def __init__(self) -> None: + params = Plays(self_plays=2, op_plays=2, op_openings=2) + pattern = "CDDCDCDDCDDDCDDDDDCDCDCCCDDCCDCDDDCCCCCDDDCDDDDDDDDDCCDDCDDDCCCD" + super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) + + +class Winner12(LookerUp): + """ + A lookup table based strategy. + + Names: + + - Winner12: [Mathieu2015]_ + """ + + name = "Winner12" + + def __init__(self) -> None: + params = Plays(self_plays=1, op_plays=2, op_openings=0) + pattern = "CDCDDCDD" + super().__init__(parameters=params, pattern=pattern, initial_actions=(C, C)) + + +class Winner21(LookerUp): + """ + A lookup table based strategy. + + Names: + + - Winner21: [Mathieu2015]_ + """ + + name = "Winner21" + + def __init__(self) -> None: + params = Plays(self_plays=1, op_plays=2, op_openings=0) + pattern = "CDCDCDDD" + super().__init__(parameters=params, pattern=pattern, initial_actions=(D, C)) + + +def get_last_n_plays(player: IpdPlayer, depth: int) -> tuple: + """Returns the last N plays of player as a tuple.""" + if depth == 0: + return () + return tuple(player.history[-1 * depth :]) diff --git a/axelrod/strategies/mathematicalconstants.py b/axelrod/strategies/mathematicalconstants.py new file mode 100644 index 000000000..839e957c8 --- /dev/null +++ b/axelrod/strategies/mathematicalconstants.py @@ -0,0 +1,79 @@ +import math + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class CotoDeRatio(IpdPlayer): + """The player will always aim to bring the ratio of co-operations to + defections closer to the ratio as given in a sub class + + Names: + + - Co to Do Ratio: Original Name by Timothy Standen + """ + + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + # Initially cooperate + if len(opponent.history) == 0: + return C + # Avoid initial division by zero + if not opponent.defections: + return D + # Otherwise compare ratio to golden mean + cooperations = opponent.cooperations + self.cooperations + defections = opponent.defections + self.defections + if cooperations / defections > self.ratio: + return D + return C + + +class Golden(CotoDeRatio): + """The player will always aim to bring the ratio of co-operations to + defections closer to the golden mean + + Names: + + - Golden: Original Name by Timothy Standen + """ + + name = "$\phi$" + ratio = (1 + math.sqrt(5)) / 2 + + +class Pi(CotoDeRatio): + """The player will always aim to bring the ratio of co-operations to + defections closer to the pi + + Names: + + - Pi: Original Name by Timothy Standen + """ + + name = "$\pi$" + ratio = math.pi + + +class e(CotoDeRatio): + """The player will always aim to bring the ratio of co-operations to + defections closer to the e + + Names: + + - e: Original Name by Timothy Standen + """ + + name = "$e$" + ratio = math.e diff --git a/axelrod/strategies/memoryone.py b/axelrod/strategies/memoryone.py new file mode 100644 index 000000000..c5bad7526 --- /dev/null +++ b/axelrod/strategies/memoryone.py @@ -0,0 +1,343 @@ +"""Memory One strategies. Note that there are Memory One strategies in other +files, including titfortat.py and zero_determinant.py""" + +import warnings +from typing import Tuple + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class MemoryOnePlayer(IpdPlayer): + """ + Uses a four-vector for strategies based on the last round of play, + (P(C|CC), P(C|CD), P(C|DC), P(C|DD)). Win-Stay Lose-Shift is set as + the default player if four_vector is not given. + Intended to be used as an abstract base class or to at least be supplied + with a initializing four_vector. + + Names + + - Memory One: [Nowak1990]_ + """ + + name = "Generic Memory One IpdPlayer" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, four_vector: Tuple[float, float, float, float] = None, initial: Action = C + ) -> None: + """ + Parameters + ---------- + + fourvector: list or tuple of floats of length 4 + The response probabilities to the preceding round of play + ( P(C|CC), P(C|CD), P(C|DC), P(C|DD) ) + initial: C or D + The initial move + + Special Cases + ------------- + + Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C) + Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C) + Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), C) + Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5)) + (with a random choice for the initial state) + TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C) + WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C) + + See also: The remaining strategies in this file + Multiple strategies in titfortat.py + Grofman, Joss in axelrod_tournaments.py + """ + super().__init__() + self._initial = initial + self.set_initial_four_vector(four_vector) + + def set_initial_four_vector(self, four_vector): + if four_vector is None: + four_vector = (1, 0, 0, 1) + warnings.warn("Memory one player is set to default (1, 0, 0, 1).") + + self.set_four_vector(four_vector) + if self.name == "Generic Memory One IpdPlayer": + self.name = "%s: %s" % (self.name, four_vector) + + def set_four_vector(self, four_vector: Tuple[float, float, float, float]): + if not all(0 <= p <= 1 for p in four_vector): + raise ValueError( + "An element in the probability vector, {}, is not " + "between 0 and 1.".format(str(four_vector)) + ) + + self._four_vector = dict(zip([(C, C), (C, D), (D, C), (D, D)], four_vector)) + self.classifier["stochastic"] = any(0 < x < 1 for x in set(four_vector)) + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) == 0: + return self._initial + # Determine which probability to use + p = self._four_vector[(self.history[-1], opponent.history[-1])] + # Draw a random number in [0, 1] to decide + return random_choice(p) + + +class WinStayLoseShift(MemoryOnePlayer): + """ + Win-Stay Lose-Shift, also called Pavlov. + + Names: + + - Win Stay Lose Shift: [Nowak1993]_ + - WSLS: [Stewart2012]_ + - Pavlov: [Kraines1989]_ + """ + + name = "Win-Stay Lose-Shift" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, initial: Action = C) -> None: + four_vector = (1, 0, 0, 1) + super().__init__(four_vector) + self._initial = initial + + +class WinShiftLoseStay(MemoryOnePlayer): + """Win-Shift Lose-Stay, also called Reverse Pavlov. + + Names: + + - WSLS: [Li2011]_ + """ + + name = "Win-Shift Lose-Stay" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, initial: Action = D) -> None: + four_vector = (0, 1, 1, 0) + super().__init__(four_vector) + self._initial = initial + + +class GTFT(MemoryOnePlayer): + """Generous Tit For Tat Strategy. + + Names: + + - Generous Tit For Tat: [Nowak1993]_ + - Naive peace maker: [Gaudesi2016]_ + - Soft Joss: [Gaudesi2016]_ + """ + + name = "GTFT" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, p: float = None) -> None: + """ + Parameters + + p, float + A parameter used to compute the four-vector + + Special Cases + + TitForTat is equivalent to GTFT(0) + """ + self.p = p + super().__init__() + + def set_initial_four_vector(self, four_vector): + pass + + def receive_match_attributes(self): + (R, P, S, T) = self.match_attributes["game"].RPST() + if self.p is None: + self.p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) + four_vector = [1, self.p, 1, self.p] + self.set_four_vector(four_vector) + + def __repr__(self) -> str: + assert self.p is not None + return "%s: %s" % (self.name, round(self.p, 2)) + + +class FirmButFair(MemoryOnePlayer): + """A strategy that cooperates on the first move, and cooperates except after + receiving a sucker payoff. + + Names: + + - Firm But Fair: [Frean1994]_""" + + name = "Firm But Fair" + + def __init__(self) -> None: + four_vector = (1, 0, 1, 2 / 3) + super().__init__(four_vector) + self.set_four_vector(four_vector) + + +class StochasticCooperator(MemoryOnePlayer): + """Stochastic Cooperator. + + Names: + + - Stochastic Cooperator: [Adami2013]_ + """ + + name = "Stochastic Cooperator" + + def __init__(self) -> None: + four_vector = (0.935, 0.229, 0.266, 0.42) + super().__init__(four_vector) + self.set_four_vector(four_vector) + + +class StochasticWSLS(MemoryOnePlayer): + """ + Stochastic WSLS, similar to Generous TFT. Note that this is not the same as + Stochastic WSLS described in [Amaral2016]_, that strategy is a modification + of WSLS that learns from the performance of other strategies. + + Names: + + - Stochastic WSLS: Original name by Marc Harper + """ + + name = "Stochastic WSLS" + + def __init__(self, ep: float = 0.05) -> None: + """ + Parameters + + ep, float + A parameter used to compute the four-vector -- the probability of + cooperating when the previous round was CD or DC + + Special Cases + + WinStayLoseShift is equivalent to StochasticWSLS(0) + """ + + self.ep = ep + four_vector = (1.0 - ep, ep, ep, 1.0 - ep) + super().__init__(four_vector) + self.set_four_vector(four_vector) + + +class SoftJoss(MemoryOnePlayer): + """ + Defects with probability 0.9 when the opponent defects, otherwise + emulates Tit-For-Tat. + + Names: + + - Soft Joss: [Prison1998]_ + """ + + name = "Soft Joss" + + def __init__(self, q: float = 0.9) -> None: + """ + Parameters + + q, float + A parameter used to compute the four-vector + + Special Cases + + Cooperator is equivalent to SoftJoss(0) + TitForTat is equivalent to SoftJoss(1) + """ + self.q = q + four_vector = (1.0, 1 - q, 1, 1 - q) + super().__init__(four_vector) + + def __repr__(self) -> str: + return "%s: %s" % (self.name, round(self.q, 2)) + + +class ALLCorALLD(IpdPlayer): + """This strategy is at the parameter extreme of the ZD strategies (phi = 0). + It simply repeats its last move, and so mimics ALLC or ALLD after round one. + If the tournament is noisy, there will be long runs of C and D. + + For now starting choice is random of 0.6, but that was an arbitrary choice + at implementation time. + + Names: + + - ALLC or ALLD: Original name by Marc Harper + - Repeat: [Akin2015]_ + """ + + name = "ALLCorALLD" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector (1, 1, 0, 0) + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return random_choice(0.6) + return self.history[-1] + + +class ReactivePlayer(MemoryOnePlayer): + """ + A generic reactive player. Defined by 2 probabilities conditional on the + opponent's last move: P(C|C), P(C|D). + + Names: + + - Reactive: [Nowak1989]_ + """ + + name = "Reactive IpdPlayer" + + def __init__(self, probabilities: Tuple[float, float]) -> None: + four_vector = (*probabilities, *probabilities) + super().__init__(four_vector) + self.name = "%s: %s" % (self.name, probabilities) diff --git a/axelrod/strategies/memorytwo.py b/axelrod/strategies/memorytwo.py new file mode 100644 index 000000000..ab1f91214 --- /dev/null +++ b/axelrod/strategies/memorytwo.py @@ -0,0 +1,259 @@ +"""Memory Two strategies.""" + +import itertools +import warnings +from typing import Dict, Tuple + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +from .defector import Defector +from .titfortat import TitFor2Tats, TitForTat + +C, D = Action.C, Action.D + + +class MemoryTwoPlayer(IpdPlayer): + """ + Uses a sixteen-vector for strategies based on the 16 conditional probabilities + P(X | I,J,K,L) where X, I, J, K, L in [C, D] and I, J are the players last + two moves and K, L are the opponents last two moves. These conditional + probabilities are the following: + 1. P(C|CC, CC) + 2. P(C|CC, CD) + 3. P(C|CC, DC) + 4. P(C|CC, DD) + 5. P(C|CD, CC) + 6. P(C|CD, CD) + 7. P(C|CD, DC) + 8. P(C|CD, DD) + 9. P(C|DC, CC) + 10. P(C|DC, CD) + 11. P(C|DC, DC) + 12. P(C|DC, DD) + 13. P(C|DD, CC) + 14. P(C|DD, CD) + 15. P(C|DD, DC) + 16. P(C|DD, DD)) + Cooperator is set as the default player if sixteen_vector is not given. + + Names + + - Memory Two: [Hilbe2017]_ + """ + + name = "Generic Memory Two IpdPlayer" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, sixteen_vector: Tuple[float, ...] = None, initial: Action = C + ) -> None: + """ + Parameters + ---------- + + sixteen_vector: list or tuple of floats of length 16 + The response probabilities to the preceding round of play + initial: C or D + The initial 2 moves + """ + super().__init__() + self._initial = initial + self.set_initial_sixteen_vector(sixteen_vector) + + def set_initial_sixteen_vector(self, sixteen_vector): + if sixteen_vector is None: + sixteen_vector = tuple([1] * 16) + warnings.warn("Memory two player is set to default, Cooperator.") + + self.set_sixteen_vector(sixteen_vector) + if self.name == "Generic Memory Two IpdPlayer": + self.name = "%s: %s" % (self.name, sixteen_vector) + + def set_sixteen_vector(self, sixteen_vector: Tuple): + if not all(0 <= p <= 1 for p in sixteen_vector): + raise ValueError( + "An element in the probability vector, {}, is not " + "between 0 and 1.".format(str(sixteen_vector)) + ) + + states = [ + (hist[:2], hist[2:]) for hist in list(itertools.product((C, D), repeat=4)) + ] + + self._sixteen_vector = dict( + zip(states, sixteen_vector) + ) # type: Dict[tuple, float] + self.classifier["stochastic"] = any(0 < x < 1 for x in set(sixteen_vector)) + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) <= 1: + return self._initial + # Determine which probability to use + p = self._sixteen_vector[ + (tuple(self.history[-2:]), tuple(opponent.history[-2:])) + ] + # Draw a random number in [0, 1] to decide + return random_choice(p) + + +class AON2(MemoryTwoPlayer): + """ + AON2 a memory two strategy introduced in [Hilbe2017]_. It belongs to the + AONk (all-or-none) family of strategies. These strategies were designed to + satisfy the three following properties: + + 1. Mutually Cooperative. A strategy is mutually cooperative if there are + histories for which the strategy prescribes to cooperate, and if it continues + to cooperate after rounds with mutual cooperation (provided the last k actions + of the focal player were actually consistent). + + 2. Error correcting. A strategy is error correcting after at most k rounds if, + after any history, it generally takes a group of players at most k + 1 rounds + to re-establish mutual cooperation. + + 3. Retaliating. A strategy is retaliating for at least k rounds if, after + rounds in which the focal player cooperated while the coplayer defected, + the strategy responds by defecting the following k rounds. + + In [Hilbe2017]_ the following vectors are reported as "equivalent" to AON2 + with their respective self-cooperation rate (note that these are not the same): + + 1. [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], self-cooperation + rate: 0.952 + 2. [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], self-cooperation + rate: 0.951 + 3. [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], self-cooperation + rate: 0.951 + 4. [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1], self-cooperation + rate: 0.952 + + AON2 is implemented using vector 1 due its self-cooperation rate. + + In essence it is a strategy that starts off by cooperating and will cooperate + again only after the states (CC, CC), (CD, CD), (DC, DC), (DD, DD). + + Names: + + - AON2: [Hilbe2017]_ + """ + + name = "AON2" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + sixteen_vector = (1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1) + super().__init__(sixteen_vector) + + +class DelayedAON1(MemoryTwoPlayer): + """ + Delayed AON1 a memory two strategy also introduced in [Hilbe2017]_ and belongs + to the AONk family. Note that AON1 is equivalent to Win Stay Lose Shift. + + In [Hilbe2017]_ the following vectors are reported as "equivalent" to Delayed + AON1 with their respective self-cooperation rate (note that these are not the + same): + + 1. [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1], self-cooperation + rate: 0.952 + 2. [1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1], self-cooperation + rate: 0.970 + 3. [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], self-cooperation + rate: 0.971 + + Delayed AON1 is implemented using vector 3 due its self-cooperation rate. + + In essence it is a strategy that starts off by cooperating and will cooperate + again only after the states (CC, CC), (CD, CD), (CD, DD), (DD, CD), + (DC, DC) and (DD, DD). + + Names: + + - Delayed AON1: [Hilbe2017]_ + """ + + name = "Delayed AON1" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + sixteen_vector = (1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1) + super().__init__(sixteen_vector) + + +class MEM2(IpdPlayer): + """A memory-two player that switches between TFT, TFTT, and ALLD. + + Note that the reference claims that this is a memory two strategy but in + fact it is infinite memory. This is because the player plays as ALLD if + ALLD has ever been selected twice, which can only be known if the entire + history of play is accessible. + + Names: + + - MEM2: [Li2014]_ + """ + + name = "MEM2" + classifier = { + "memory_depth": float("inf"), + "long_run_time": False, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.players = {"TFT": TitForTat(), "TFTT": TitFor2Tats(), "ALLD": Defector()} + self.play_as = "TFT" + self.shift_counter = 3 + self.alld_counter = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + # Update Histories + # Note that this assumes that TFT and TFTT do not use internal counters, + # Rather that they examine the actual history of play + if len(self.history) > 0: + for v in self.players.values(): + v.history.append(self.history[-1], opponent.history[-1]) + self.shift_counter -= 1 + if (self.shift_counter == 0) and (self.alld_counter < 2): + self.shift_counter = 2 + # Depending on the last two moves, play as TFT, TFTT, or ALLD + last_two = list(zip(self.history[-2:], opponent.history[-2:])) + if set(last_two) == set([(C, C)]): + self.play_as = "TFT" + elif set(last_two) == set([(C, D), (D, C)]): + self.play_as = "TFTT" + else: + self.play_as = "ALLD" + self.alld_counter += 1 + return self.players[self.play_as].strategy(opponent) diff --git a/axelrod/strategies/meta.py b/axelrod/strategies/meta.py new file mode 100644 index 000000000..49d02cfae --- /dev/null +++ b/axelrod/strategies/meta.py @@ -0,0 +1,682 @@ +import random + +import numpy as np +from numpy.random import choice + +from axelrod.action import Action +from axelrod.classifier import Classifiers +from axelrod.player import IpdPlayer +from axelrod.strategies import TitForTat +from axelrod.strategy_transformers import NiceTransformer +from ._strategies import all_strategies +from .hunter import ( + AlternatorHunter, + CooperatorHunter, + CycleHunter, + DefectorHunter, + EventualCycleHunter, + MathConstantHunter, + RandomHunter, +) + +# Needs to be computed manually to prevent circular dependency +ordinary_strategies = [s for s in all_strategies if Classifiers.obey_axelrod(s())] + +C, D = Action.C, Action.D + + +class MetaPlayer(IpdPlayer): + """ + A generic player that has its own team of players. + + Names: + + - Meta IpdPlayer: Original name by Karol Langner + """ + + name = "Meta IpdPlayer" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": True, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, team=None): + # The default is to use all strategies available, but we need to import + # the list at runtime, since _strategies import also _this_ module + # before defining the list. + if team: + self.team = team + else: + # Needs to be computed manually to prevent circular dependency + self.team = ordinary_strategies + # Make sure we don't use any meta players to avoid infinite recursion. + self.team = [t for t in self.team if not issubclass(t, MetaPlayer)] + # Initiate all the players in our team. + self.team = [t() for t in self.team] + + super().__init__() + + # This player inherits the classifiers of its team. + # Note that memory_depth is not simply the max memory_depth of the team. + for key in [ + "stochastic", + "inspects_source", + "manipulates_source", + "manipulates_state", + ]: + self.classifier[key] = any(map(Classifiers[key], self.team)) + + for t in self.team: + self.classifier["makes_use_of"].update(Classifiers["makes_use_of"](t)) + + self._last_results = None + + def receive_match_attributes(self): + for t in self.team: + t.set_match_attributes(**self.match_attributes) + + def __repr__(self): + team_size = len(self.team) + return "{}: {} player{}".format( + self.name, team_size, "s" if team_size > 1 else "" + ) + + def update_histories(self, coplay): + # Update team histories. + for player, play in zip(self.team, self._last_results): + player.history.append(play, coplay) + + def update_history(self, play, coplay): + super().update_history(play, coplay) + self.update_histories(coplay) + + def strategy(self, opponent): + # Get the results of all our players. + results = [] + for player in self.team: + play = player.strategy(opponent) + results.append(play) + self._last_results = results + # A subclass should just define a way to choose the result based on + # team results. + return self.meta_strategy(results, opponent) + + def meta_strategy(self, results, opponent): + """Determine the meta result based on results of all players. + Override this function in child classes.""" + return C + + +class MetaMajority(MetaPlayer): + """A player who goes by the majority vote of all other non-meta players. + + Names: + + - Meta Majority: Original name by Karol Langner + """ + + name = "Meta Majority" + + @staticmethod + def meta_strategy(results, opponent): + if results.count(D) > results.count(C): + return D + return C + + +class MetaMinority(MetaPlayer): + """A player who goes by the minority vote of all other non-meta players. + + Names: + + - Meta Minority: Original name by Karol Langner + """ + + name = "Meta Minority" + + @staticmethod + def meta_strategy(results, opponent): + if results.count(D) < results.count(C): + return D + return C + + +class MetaWinner(MetaPlayer): + """A player who goes by the strategy of the current winner. + + Names: + + - Meta Winner: Original name by Karol Langner + """ + + name = "Meta Winner" + + def __init__(self, team=None): + super().__init__(team=team) + # For each player, we will keep the history of proposed moves and + # a running score since the beginning of the game. + self.scores = np.zeros(len(self.team)) + self.classifier["long_run_time"] = True + + def _update_scores(self, coplay): + # Update the running score for each player, before determining the + # next move. + game = self.match_attributes["game"] + scores = [] + for player in self.team: + last_round = (player.history[-1], coplay) + s = game.scores[last_round][0] + scores.append(s) + self.scores += np.array(scores) + + def update_histories(self, coplay): + super().update_histories(coplay) + self._update_scores(coplay) + + def meta_strategy(self, results, opponent): + # Choice an action based on the collection of scores + bestscore = max(self.scores) + beststrategies = [ + i for (i, score) in enumerate(self.scores) if score == bestscore + ] + bestproposals = [results[i] for i in beststrategies] + bestresult = C if C in bestproposals else D + return bestresult + + +NiceMetaWinner = NiceTransformer()(MetaWinner) + + +class MetaWinnerEnsemble(MetaWinner): + """A variant of MetaWinner that chooses one of the top scoring strategies + at random against each opponent. Note this strategy is always stochastic + regardless of the team. + + Names: + + - Meta Winner Ensemble: Original name by Marc Harper + """ + + name = "Meta Winner Ensemble" + + def meta_strategy(self, results, opponent): + # Sort by score + scores = [(score, i) for (i, score) in enumerate(self.scores)] + # Choose one of the best scorers at random + scores.sort(reverse=True) + prop = max(1, int(len(scores) * 0.08)) + index = choice([i for (s, i) in scores[:prop]]) + return results[index] + + +NiceMetaWinnerEnsemble = NiceTransformer()(MetaWinnerEnsemble) + + +class MetaHunter(MetaPlayer): + """A player who uses a selection of hunters. + + Names + + - Meta Hunter: Original name by Karol Langner + """ + + name = "Meta Hunter" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + # Notice that we don't include the cooperator hunter, because it leads + # to excessive defection and therefore bad performance against + # unforgiving strategies. We will stick to hunters that use defections + # as cues. However, a really tangible benefit comes from combining + # Random Hunter and Math Constant Hunter, since together they catch + # strategies that are lightly randomized but still quite constant + # (the tricky/suspicious ones). + team = [ + DefectorHunter, + AlternatorHunter, + RandomHunter, + MathConstantHunter, + CycleHunter, + EventualCycleHunter, + ] + + super().__init__(team=team) + + @staticmethod + def meta_strategy(results, opponent): + # If any of the hunters smells prey, then defect! + if D in results: + return D + + # Tit-for-tat might seem like a better default choice, but in many + # cases it complicates the heuristics of hunting and creates + # false-positives. So go ahead and use it, but only for longer + # histories. + if len(opponent.history) > 100: + return D if opponent.history[-1:] == [D] else C + else: + return C + + +class MetaHunterAggressive(MetaPlayer): + """A player who uses a selection of hunters. + + Names + + - Meta Hunter Aggressive: Original name by Marc Harper + """ + + name = "Meta Hunter Aggressive" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, team=None): + # This version uses CooperatorHunter + if team is None: + team = [ + DefectorHunter, + AlternatorHunter, + RandomHunter, + MathConstantHunter, + CycleHunter, + EventualCycleHunter, + CooperatorHunter, + ] + + super().__init__(team=team) + + @staticmethod + def meta_strategy(results, opponent): + # If any of the hunters smells prey, then defect! + if D in results: + return D + + # Tit-for-tat might seem like a better default choice, but in many + # cases it complicates the heuristics of hunting and creates + # false-positives. So go ahead and use it, but only for longer + # histories. + if len(opponent.history) > 100: + return D if opponent.history[-1:] == [D] else C + else: + return C + + +class MetaMajorityMemoryOne(MetaMajority): + """MetaMajority with the team of Memory One players + + Names + + - Meta Majority Memory One: Original name by Marc Harper + """ + + name = "Meta Majority Memory One" + + def __init__(self): + team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] + super().__init__(team=team) + self.classifier["long_run_time"] = False + + +class MetaMajorityFiniteMemory(MetaMajority): + """MetaMajority with the team of Finite Memory Players + + Names + + - Meta Majority Finite Memory: Original name by Marc Harper + """ + + name = "Meta Majority Finite Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) < float("inf") + ] + super().__init__(team=team) + + +class MetaMajorityLongMemory(MetaMajority): + """MetaMajority with the team of Long (infinite) Memory Players + + Names + + - Meta Majority Long Memory: Original name by Marc Harper + """ + + name = "Meta Majority Long Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) == float("inf") + ] + super().__init__(team=team) + + +class MetaWinnerMemoryOne(MetaWinner): + """MetaWinner with the team of Memory One players + + Names + + - Meta Winner Memory Memory One: Original name by Marc Harper + """ + + name = "Meta Winner Memory One" + + def __init__(self): + team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] + super().__init__(team=team) + self.classifier["long_run_time"] = False + + +class MetaWinnerFiniteMemory(MetaWinner): + """MetaWinner with the team of Finite Memory Players + + Names + + - Meta Winner Finite Memory: Original name by Marc Harper + """ + + name = "Meta Winner Finite Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) < float("inf") + ] + super().__init__(team=team) + + +class MetaWinnerLongMemory(MetaWinner): + """MetaWinner with the team of Long (infinite) Memory Players + + Names + + - Meta Winner Long Memory: Original name by Marc Harper + """ + + name = "Meta Winner Long Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) == float("inf") + ] + super().__init__(team=team) + + +class MetaWinnerDeterministic(MetaWinner): + """Meta Winner with the team of Deterministic Players. + + Names + + - Meta Winner Deterministic: Original name by Marc Harper + """ + + name = "Meta Winner Deterministic" + + def __init__(self): + team = [s for s in ordinary_strategies if not Classifiers["stochastic"](s())] + super().__init__(team=team) + self.classifier["stochastic"] = False + + +class MetaWinnerStochastic(MetaWinner): + """Meta Winner with the team of Stochastic Players. + + Names + + - Meta Winner Stochastic: Original name by Marc Harper + """ + + name = "Meta Winner Stochastic" + + def __init__(self): + team = [s for s in ordinary_strategies if Classifiers["stochastic"](s())] + super().__init__(team=team) + + +class MetaMixer(MetaPlayer): + """A player who randomly switches between a team of players. + If no distribution is passed then the player will uniformly choose between + sub players. + + In essence this is creating a Mixed strategy. + + Parameters + + team : list of strategy classes, optional + Team of strategies that are to be randomly played + If none is passed will select the ordinary strategies. + distribution : list representing a probability distribution, optional + This gives the distribution from which to select the players. + If none is passed will select uniformly. + + Names + + - Meta Mixer: Original name by Vince Knight + """ + + name = "Meta Mixer" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": True, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, team=None, distribution=None): + self.distribution = distribution + super().__init__(team=team) + + def meta_strategy(self, results, opponent): + """Using the numpy.random choice function to sample with weights""" + return choice(results, p=self.distribution) + + +class NMWEDeterministic(NiceMetaWinnerEnsemble): + """Nice Meta Winner Ensemble with the team of Deterministic Players. + + Names + + - Nice Meta Winner Ensemble Deterministic: Original name by Marc Harper + """ + + name = "NMWE Deterministic" + + def __init__(self): + team = [s for s in ordinary_strategies if not Classifiers["stochastic"](s())] + super().__init__(team=team) + self.classifier["stochastic"] = True + + +class NMWEStochastic(NiceMetaWinnerEnsemble): + """Nice Meta Winner Ensemble with the team of Stochastic Players. + + Names + + - Nice Meta Winner Ensemble Stochastic: Original name by Marc Harper + """ + + name = "NMWE Stochastic" + + def __init__(self): + team = [s for s in ordinary_strategies if Classifiers["stochastic"](s())] + super().__init__(team=team) + + +class NMWEFiniteMemory(NiceMetaWinnerEnsemble): + """Nice Meta Winner Ensemble with the team of Finite Memory Players. + + Names + + - Nice Meta Winner Ensemble Finite Memory: Original name by Marc Harper + """ + + name = "NMWE Finite Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) < float("inf") + ] + super().__init__(team=team) + + +class NMWELongMemory(NiceMetaWinnerEnsemble): + """Nice Meta Winner Ensemble with the team of Long Memory Players. + + Names + + - Nice Meta Winner Ensemble Long Memory: Original name by Marc Harper + """ + + name = "NMWE Long Memory" + + def __init__(self): + team = [ + s + for s in ordinary_strategies + if Classifiers["memory_depth"](s()) == float("inf") + ] + super().__init__(team=team) + + +class NMWEMemoryOne(NiceMetaWinnerEnsemble): + """Nice Meta Winner Ensemble with the team of Memory One Players. + + Names + + - Nice Meta Winner Ensemble Memory One: Original name by Marc Harper + """ + + name = "NMWE Memory One" + + def __init__(self): + team = [s for s in ordinary_strategies if Classifiers["memory_depth"](s()) <= 1] + super().__init__(team=team) + self.classifier["long_run_time"] = False + + +class MemoryDecay(MetaPlayer): + """ + A player utilizes the (default) Tit for Tat strategy for the first (default) 15 turns, + at the same time memorizing the opponent's decisions. After the 15 turns have + passed, the player calculates a 'net cooperation score' (NCS) for their opponent, + weighing decisions to Cooperate as (default) 1, and to Defect as (default) + -2. If the opponent's NCS is below 0, the player defects; otherwise, + they cooperate. + + The player's memories of the opponent's decisions have a random chance to be + altered (i.e., a C decision becomes D or vice versa; default probability + is 0.03) or deleted (default probability is 0.1). + + It is possible to pass a different axelrod player class to change the initial + player behavior. + + Name: Memory Decay + """ + + name = "Memory Decay" + classifier = { + "memory_depth": float("inf"), + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, + p_memory_delete: float = 0.1, + p_memory_alter: float = 0.03, + loss_value: float = -2, + gain_value: float = 1, + memory: list = None, + start_strategy: IpdPlayer = TitForTat, + start_strategy_duration: int = 15, + ): + super().__init__(team=[start_strategy]) + # This strategy is stochastic even if none of the team is. The + # MetaPlayer initializer will set stochastic to be False in that case. + self.classifier["stochastic"] = True + + self.p_memory_delete = p_memory_delete + self.p_memory_alter = p_memory_alter + self.loss_value = loss_value + self.gain_value = gain_value + self.memory = [] if not memory else memory + self.start_strategy_duration = start_strategy_duration + self.gloss_values = None + + def __repr__(self): + return IpdPlayer.__repr__(self) + + def gain_loss_translate(self): + """ + Translates the actions (D and C) to numeric values (loss_value and + gain_value). + """ + values = {C: self.gain_value, D: self.loss_value} + self.gloss_values = [values[action] for action in self.memory] + + def memory_alter(self): + """ + Alters memory entry, i.e. puts C if there's a D and vice versa. + """ + alter = choice(range(0, len(self.memory))) + self.memory[alter] = self.memory[alter].flip() + + def memory_delete(self): + """ + Deletes memory entry. + """ + self.memory.pop(choice(range(0, len(self.memory)))) + + def meta_strategy(self, results, opponent): + try: + self.memory.append(opponent.history[-1]) + except IndexError: + pass + if len(self.history) < self.start_strategy_duration: + return results[0] + else: + if random.random() <= self.p_memory_alter: + self.memory_alter() + if random.random() <= self.p_memory_delete: + self.memory_delete() + self.gain_loss_translate() + if sum(self.gloss_values) < 0: + return D + else: + return C diff --git a/axelrod/strategies/mindcontrol.py b/axelrod/strategies/mindcontrol.py new file mode 100644 index 000000000..74033b5a5 --- /dev/null +++ b/axelrod/strategies/mindcontrol.py @@ -0,0 +1,95 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class MindController(IpdPlayer): + """A player that changes the opponents strategy to cooperate. + + Names + + - Mind Controller: Original name by Karol Langner + """ + + name = "Mind Controller" + classifier = { + "memory_depth": -10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # Finds out what opponent will do + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + """ + Alters the opponents strategy method to be a lambda function which + always returns C. This player will then always return D to take + advantage of this + """ + + opponent.strategy = lambda opponent: C + return D + + +class MindWarper(IpdPlayer): + """ + A player that changes the opponent's strategy but blocks changes to + its own. + + Names + + - Mind Warper: Original name by Karol Langner + """ + + name = "Mind Warper" + classifier = { + "memory_depth": -10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # changes what opponent will do + "manipulates_state": False, + } + + def __setattr__(self, name: str, val: str): + if name == "strategy": + pass + else: + self.__dict__[name] = val + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + opponent.strategy = lambda opponent: C + return D + + +class MindBender(MindWarper): + """ + A player that changes the opponent's strategy by modifying the internal + dictionary. + + Names + + - Mind Bender: Original name by Karol Langner + """ + + name = "Mind Bender" + classifier = { + "memory_depth": -10, + "makes_use_of": set(), + "stochastic": False, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # changes what opponent will do + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + opponent.__dict__["strategy"] = lambda opponent: C + return D diff --git a/axelrod/strategies/mindreader.py b/axelrod/strategies/mindreader.py new file mode 100644 index 000000000..ccda24ccf --- /dev/null +++ b/axelrod/strategies/mindreader.py @@ -0,0 +1,108 @@ +""" +The player classes in this module do not obey standard rules of the IPD (as +indicated by their classifier). We do not recommend putting a lot of time in to +optimising them. +""" +from axelrod._strategy_utils import inspect_strategy, look_ahead +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class MindReader(IpdPlayer): + """A player that looks ahead at what the opponent will do and decides what + to do. + + Names: + + - Mind reader: Original name by Jason Young + """ + + name = "Mind Reader" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def foil_strategy_inspection() -> Action: + """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + return D + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Pretends to play the opponent a number of times before each match. + The primary purpose is to look far enough ahead to see if a defect will + be punished by the opponent. + """ + game = self.match_attributes["game"] + + best_strategy = look_ahead(self, opponent, game) + + return best_strategy + + +class ProtectedMindReader(MindReader): + """A player that looks ahead at what the opponent will do and decides what + to do. It is also protected from mind control strategies + + Names: + + - Protected Mind reader: Original name by Jason Young + """ + + name = "Protected Mind Reader" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": True, # Stops opponent's strategy + "manipulates_state": False, + } + + def __setattr__(self, name: str, val: str): + """Stops any other strategy altering the methods of this class """ + + if name == "strategy": + pass + else: + self.__dict__[name] = val + + +class MirrorMindReader(ProtectedMindReader): + """A player that will mirror whatever strategy it is playing against by + cheating and calling the opponent's strategy function instead of its own. + + Names: + + - Protected Mind reader: Original name by Brice Fernandes + """ + + name = "Mirror Mind Reader" + + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Reads and copies the source of the opponent + "manipulates_source": True, # Changes own source dynamically + "manipulates_state": False, + } + + @staticmethod + def foil_strategy_inspection() -> Action: + """Foils _strategy_utils.inspect_strategy and _strategy_utils.look_ahead""" + return C + + def strategy(self, opponent: IpdPlayer) -> Action: + """Will read the mind of the opponent and play the opponent's strategy. """ + return inspect_strategy(self, opponent) diff --git a/axelrod/strategies/mutual.py b/axelrod/strategies/mutual.py new file mode 100644 index 000000000..e06fbda0e --- /dev/null +++ b/axelrod/strategies/mutual.py @@ -0,0 +1,83 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class Desperate(IpdPlayer): + """A player that only cooperates after mutual defection. + + Names: + + - Desperate: [Berg2015]_""" + + name = "Desperate" + classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return random_choice() + if self.history[-1] == D and opponent.history[-1] == D: + return C + return D + + +class Hopeless(IpdPlayer): + """A player that only defects after mutual cooperation. + + Names: + + - Hopeless: [Berg2015]_""" + + name = "Hopeless" + classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return random_choice() + if self.history[-1] == C and opponent.history[-1] == C: + return D + return C + + +class Willing(IpdPlayer): + """A player that only defects after mutual defection. + + Names: + + - Willing: [Berg2015]_""" + + name = "Willing" + classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not opponent.history: + return random_choice() + if self.history[-1] == D and opponent.history[-1] == D: + return D + return C diff --git a/axelrod/strategies/negation.py b/axelrod/strategies/negation.py new file mode 100644 index 000000000..d6d7379e0 --- /dev/null +++ b/axelrod/strategies/negation.py @@ -0,0 +1,34 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class Negation(IpdPlayer): + """ + A player starts by cooperating or defecting randomly if it's their first move, + then simply doing the opposite of the opponents last move thereafter. + + Names: + + - Negation: [PD2017]_ + """ + + name = "Negation" + classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + # Random first move + if not self.history: + return random_choice() + # Act opposite of opponent otherwise + return opponent.history[-1].flip() diff --git a/axelrod/strategies/oncebitten.py b/axelrod/strategies/oncebitten.py new file mode 100644 index 000000000..aee84f5c3 --- /dev/null +++ b/axelrod/strategies/oncebitten.py @@ -0,0 +1,130 @@ +import random + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class OnceBitten(IpdPlayer): + """ + Cooperates once when the opponent defects, but if they defect twice in a row + defaults to forgetful grudger for 10 turns defecting. + + Names: + + - Once Bitten: Original name by Holly Marissa + """ + + name = "Once Bitten" + classifier = { + "memory_depth": 12, # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.mem_length = 10 + self.grudged = False + self.grudge_memory = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Begins by playing C, then plays D for mem_length rounds if the opponent + ever plays D twice in a row. + """ + if self.grudge_memory >= self.mem_length: + self.grudge_memory = 0 + self.grudged = False + + if len(opponent.history) < 2: + return C + + if self.grudged: + self.grudge_memory += 1 + return D + elif not (C in opponent.history[-2:]): + self.grudged = True + return D + return C + + +class FoolMeOnce(IpdPlayer): + """ + Forgives one D then retaliates forever on a second D. + + Names: + + - Fool me once: Original name by Marc Harper + """ + + name = "Fool Me Once" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + if not opponent.history: + return C + if opponent.defections > 1: + return D + return C + + +class ForgetfulFoolMeOnce(IpdPlayer): + """ + Forgives one D then retaliates forever on a second D. Sometimes randomly + forgets the defection count, and so keeps a secondary count separate from + the standard count in IpdPlayer. + + Names: + + - Forgetful Fool Me Once: Original name by Marc Harper + """ + + name = "Forgetful Fool Me Once" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, forget_probability: float = 0.05) -> None: + """ + Parameters + ---------- + forget_probability, float + The probability of forgetting the count of opponent defections. + """ + super().__init__() + self.D_count = 0 + self._initial = C + self.forget_probability = forget_probability + + def strategy(self, opponent: IpdPlayer) -> Action: + r = random.random() + if not opponent.history: + return self._initial + if opponent.history[-1] == D: + self.D_count += 1 + if r < self.forget_probability: + self.D_count = 0 + if self.D_count > 1: + return D + return C diff --git a/axelrod/strategies/prober.py b/axelrod/strategies/prober.py new file mode 100644 index 000000000..3ed58644a --- /dev/null +++ b/axelrod/strategies/prober.py @@ -0,0 +1,405 @@ +import random +from typing import List + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +Vector = List[float] + + +C, D = Action.C, Action.D + + +class CollectiveStrategy(IpdPlayer): + """Defined in [Li2009]_. 'It always cooperates in the first move and defects + in the second move. If the opponent also cooperates in the first move and + defects in the second move, CS will cooperate until the opponent defects. + Otherwise, CS will always defect.' + + Names: + + - Collective Strategy: [Li2009]_ + + """ + + name = "CollectiveStrategy" + + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn == 0: + return C + if turn == 1: + return D + if opponent.defections > 1: + return D + if opponent.history[0:2] == [C, D]: + return C + return D + + +class Detective(IpdPlayer): + """ + Starts with C, D, C, C, or with the given sequence of actions. + If the opponent defects at least once in the first fixed rounds, + play as TFT forever, else defect forever. + + Names: + + - Detective: [NC2019]_ + """ + + name = "Detective" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, initial_actions: List[Action] = None) -> None: + super().__init__() + if initial_actions is None: + self.initial_actions = [C, D, C, C] + else: + self.initial_actions = initial_actions + + def strategy(self, opponent: IpdPlayer) -> Action: + hist_size = len(self.history) + init_size = len(self.initial_actions) + if hist_size < init_size: + return self.initial_actions[hist_size] + if D not in opponent.history[:init_size]: + return D + return opponent.history[-1] # TFT + + +class Prober(IpdPlayer): + """ + Plays D, C, C initially. Defects forever if opponent cooperated in moves 2 + and 3. Otherwise plays TFT. + + Names: + + - Prober: [Li2011]_ + """ + + name = "Prober" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn == 0: + return D + if turn == 1: + return C + if turn == 2: + return C + if turn > 2: + if opponent.history[1:3] == [C, C]: + return D + else: + # TFT + return D if opponent.history[-1:] == [D] else C + + +class Prober2(IpdPlayer): + """ + Plays D, C, C initially. Cooperates forever if opponent played D then C + in moves 2 and 3. Otherwise plays TFT. + + Names: + + - Prober 2: [Prison1998]_ + """ + + name = "Prober 2" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn == 0: + return D + if turn == 1: + return C + if turn == 2: + return C + if turn > 2: + if opponent.history[1:3] == [D, C]: + return C + else: + # TFT + return D if opponent.history[-1:] == [D] else C + + +class Prober3(IpdPlayer): + """ + Plays D, C initially. Defects forever if opponent played C in moves 2. + Otherwise plays TFT. + + Names: + + - Prober 3: [Prison1998]_ + """ + + name = "Prober 3" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn == 0: + return D + if turn == 1: + return C + if turn > 1: + if opponent.history[1] == C: + return D + else: + # TFT + return D if opponent.history[-1:] == [D] else C + + +class Prober4(IpdPlayer): + """ + Plays C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D initially. + Counts retaliating and provocative defections of the opponent. + If the absolute difference between the counts is smaller or equal to 2, + defects forever. + Otherwise plays C for the next 5 turns and TFT for the rest of the game. + + Names: + + - Prober 4: [Prison1998]_ + """ + + name = "Prober 4" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.init_sequence = [ + C, + C, + D, + C, + D, + D, + D, + C, + C, + D, + C, + D, + C, + C, + D, + C, + D, + D, + C, + D, + ] + self.just_Ds = 0 + self.unjust_Ds = 0 + self.turned_defector = False + + def strategy(self, opponent: IpdPlayer) -> Action: + if not self.history: + return self.init_sequence[0] + turn = len(self.history) + if turn < len(self.init_sequence): + if opponent.history[-1] == D: + if self.history[-1] == D: + self.just_Ds += 1 + if self.history[-1] == C: + self.unjust_Ds += 1 + return self.init_sequence[turn] + if turn == len(self.init_sequence): + diff_in_Ds = abs(self.just_Ds - self.unjust_Ds) + self.turned_defector = diff_in_Ds <= 2 + if self.turned_defector: + return D + if not self.turned_defector: + if turn < len(self.init_sequence) + 5: + return C + return D if opponent.history[-1] == D else C + + +class HardProber(IpdPlayer): + """ + Plays D, D, C, C initially. Defects forever if opponent cooperated in moves + 2 and 3. Otherwise plays TFT. + + Names: + + - Hard Prober: [Prison1998]_ + """ + + name = "Hard Prober" + classifier = { + "stochastic": False, + "memory_depth": float("inf"), # Long memory + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turn = len(self.history) + if turn == 0: + return D + if turn == 1: + return D + if turn == 2: + return C + if turn == 3: + return C + if turn > 3: + if opponent.history[1:3] == [C, C]: + return D + else: + # TFT + return D if opponent.history[-1:] == [D] else C + + +class NaiveProber(IpdPlayer): + """ + Like tit-for-tat, but it occasionally defects with a small probability. + + Names: + + - Naive Prober: [Li2011]_ + """ + + name = "Naive Prober" + classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, p: float = 0.1) -> None: + """ + Parameters + ---------- + p, float + The probability to defect randomly + """ + super().__init__() + self.p = p + if (self.p == 0) or (self.p == 1): + self.classifier["stochastic"] = False + + def strategy(self, opponent: IpdPlayer) -> Action: + # First move + if len(self.history) == 0: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + return D + # Otherwise cooperate, defect with probability 1 - self.p + choice = random_choice(1 - self.p) + return choice + + +class RemorsefulProber(NaiveProber): + """ + Like Naive Prober, but it remembers if the opponent responds to a random + defection with a defection by being remorseful and cooperating. + + For reference see: [Li2011]_. A more complete description is given in "The + Selfish Gene" (https://books.google.co.uk/books?id=ekonDAAAQBAJ): + + "Remorseful Prober remembers whether it has just spontaneously defected, and + whether the result was prompt retaliation. If so, it 'remorsefully' allows + its opponent 'one free hit' without retaliating." + + Names: + + - Remorseful Prober: [Li2011]_ + """ + + name = "Remorseful Prober" + classifier = { + "memory_depth": 2, # It remembers if its previous move was random + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, p: float = 0.1) -> None: + super().__init__(p) + self.probing = False + + def strategy(self, opponent: IpdPlayer) -> Action: + # First move + if len(self.history) == 0: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + if self.probing: + self.probing = False + return C + return D + + # Otherwise cooperate with probability 1 - self.p + if random.random() < 1 - self.p: + self.probing = False + return C + + self.probing = True + return D diff --git a/axelrod/strategies/punisher.py b/axelrod/strategies/punisher.py new file mode 100644 index 000000000..30ff7831a --- /dev/null +++ b/axelrod/strategies/punisher.py @@ -0,0 +1,183 @@ +from typing import List + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Punisher(IpdPlayer): + """ + A player starts by cooperating however will defect if at any point the + opponent has defected, but forgets after meme_length matches, with + 1<=mem_length<=20 proportional to the amount of time the opponent has + played D, punishing that player for playing D too often. + + Names: + + - Punisher: Original name by Geraint Palmer + """ + + name = "Punisher" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + """ + Initialised the player + """ + super().__init__() + self.mem_length = 1 + self.grudged = False + self.grudge_memory = 1 + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Begins by playing C, then plays D for an amount of rounds proportional + to the opponents historical '%' of playing D if the opponent ever + plays D + """ + + if self.grudge_memory >= self.mem_length: + self.grudge_memory = 0 + self.grudged = False + + if self.grudged: + self.grudge_memory += 1 + return D + + elif D in opponent.history[-1:]: + self.mem_length = (opponent.defections * 20) // len(opponent.history) + self.grudged = True + return D + + return C + + +class InversePunisher(IpdPlayer): + """ + An inverted version of Punisher. The player starts by cooperating however + will defect if at any point the opponent has defected, and forgets after + mem_length matches, with 1 <= mem_length <= 20. This time mem_length is + proportional to the amount of time the opponent has played C. + + Names: + + - Inverse Punisher: Original name by Geraint Palmer + """ + + name = "Inverse Punisher" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.mem_length = 1 + self.grudged = False + self.grudge_memory = 1 + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + Begins by playing C, then plays D for an amount of rounds proportional + to the opponents historical '%' of playing C if the opponent ever plays + D. + """ + + if self.grudge_memory >= self.mem_length: + self.grudge_memory = 0 + self.grudged = False + + if self.grudged: + self.grudge_memory += 1 + return D + elif D in opponent.history[-1:]: + self.mem_length = (opponent.cooperations * 20) // len(opponent.history) + if self.mem_length == 0: + self.mem_length += 1 + self.grudged = True + return D + return C + + +class LevelPunisher(IpdPlayer): + """ + A player starts by cooperating however, after 10 rounds + will defect if at any point the number of defections + by an opponent is greater than 20%. + + Names: + + - Level Punisher: [Eckhart2015]_ + """ + + name = "Level Punisher" + classifier = { + "memory_depth": float("inf"), # Long Memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) < 10: + return C + elif (len(opponent.history) - opponent.cooperations) / len( + opponent.history + ) > 0.2: + return D + else: + return C + + +class TrickyLevelPunisher(IpdPlayer): + """ + A player starts by cooperating however, after 10, 50 and 100 rounds + will defect if at any point the percentage of defections + by an opponent is greater than 20%, 10% and 5% respectively. + + Names: + + - Tricky Level Punisher: [Eckhart2015]_ + """ + + name = "Tricky Level Punisher" + classifier = { + "memory_depth": float("inf"), # Long Memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(opponent.history) == 0: + return C + if len(opponent.history) < 10: + if opponent.defections / len(opponent.history) > 0.2: + return D + if len(opponent.history) < 50: + if opponent.defections / len(opponent.history) > 0.1: + return D + if len(opponent.history) < 100: + if opponent.defections / len(opponent.history) > 0.05: + return D + return C diff --git a/axelrod/strategies/qlearner.py b/axelrod/strategies/qlearner.py new file mode 100644 index 000000000..72387d010 --- /dev/null +++ b/axelrod/strategies/qlearner.py @@ -0,0 +1,161 @@ +import random +from collections import OrderedDict +from typing import Dict, List, Union + +from axelrod.action import Action, actions_to_str +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +Score = Union[int, float] + +C, D = Action.C, Action.D + + +class RiskyQLearner(IpdPlayer): + """A player who learns the best strategies through the q-learning + algorithm. + + This Q learner is quick to come to conclusions and doesn't care about the + future. + + Names: + + - Risky Q Learner: Original name by Geraint Palmer + """ + + name = "Risky QLearner" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + learning_rate = 0.9 + discount_rate = 0.9 + action_selection_parameter = 0.1 + memory_length = 12 + + def __init__(self) -> None: + """Initialises the player by picking a random strategy.""" + + super().__init__() + + # Set this explicitely, since the constructor of super will not pick it up + # for any subclasses that do not override methods using random calls. + self.classifier["stochastic"] = True + + self.prev_action = None # type: Action + self.original_prev_action = None # type: Action + self.score = 0 + self.Qs = OrderedDict({"": OrderedDict(zip([C, D], [0, 0]))}) + self.Vs = OrderedDict({"": 0}) + self.prev_state = "" + + def receive_match_attributes(self): + (R, P, S, T) = self.match_attributes["game"].RPST() + self.payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} + + def strategy(self, opponent: IpdPlayer) -> Action: + """Runs a qlearn algorithm while the tournament is running.""" + if len(self.history) == 0: + self.prev_action = random_choice() + self.original_prev_action = self.prev_action + state = self.find_state(opponent) + reward = self.find_reward(opponent) + if state not in self.Qs: + self.Qs[state] = OrderedDict(zip([C, D], [0, 0])) + self.Vs[state] = 0 + self.perform_q_learning(self.prev_state, state, self.prev_action, reward) + action = self.select_action(state) + self.prev_state = state + self.prev_action = action + return action + + def select_action(self, state: str) -> Action: + """ + Selects the action based on the epsilon-soft policy + """ + rnd_num = random.random() + p = 1.0 - self.action_selection_parameter + if rnd_num < p: + return max(self.Qs[state], key=lambda x: self.Qs[state][x]) + return random_choice() + + def find_state(self, opponent: IpdPlayer) -> str: + """ + Finds the my_state (the opponents last n moves + + its previous proportion of playing C) as a hashable state + """ + prob = "{:.1f}".format(opponent.cooperations) + action_str = actions_to_str(opponent.history[-self.memory_length :]) + return action_str + prob + + def perform_q_learning(self, prev_state: str, state: str, action: Action, reward): + """ + Performs the qlearning algorithm + """ + self.Qs[prev_state][action] = (1.0 - self.learning_rate) * self.Qs[prev_state][ + action + ] + self.learning_rate * (reward + self.discount_rate * self.Vs[state]) + self.Vs[prev_state] = max(self.Qs[prev_state].values()) + + def find_reward(self, opponent: IpdPlayer) -> Dict[Action, Dict[Action, Score]]: + """ + Finds the reward gained on the last iteration + """ + + if len(opponent.history) == 0: + opp_prev_action = random_choice() + else: + opp_prev_action = opponent.history[-1] + return self.payoff_matrix[self.prev_action][opp_prev_action] + + +class ArrogantQLearner(RiskyQLearner): + """A player who learns the best strategies through the q-learning + algorithm. + + This Q learner jumps to quick conclusions and cares about the future. + + Names: + + - Arrogant Q Learner: Original name by Geraint Palmer + """ + + name = "Arrogant QLearner" + learning_rate = 0.9 + discount_rate = 0.1 + + +class HesitantQLearner(RiskyQLearner): + """A player who learns the best strategies through the q-learning algorithm. + + This Q learner is slower to come to conclusions and does not look ahead much. + + Names: + + - Hesitant Q Learner: Original name by Geraint Palmer + """ + + name = "Hesitant QLearner" + learning_rate = 0.1 + discount_rate = 0.9 + + +class CautiousQLearner(RiskyQLearner): + """A player who learns the best strategies through the q-learning algorithm. + + This Q learner is slower to come to conclusions and wants to look ahead + more. + + Names: + + - Cautious Q Learner: Original name by Geraint Palmer + """ + + name = "Cautious QLearner" + learning_rate = 0.1 + discount_rate = 0.1 diff --git a/axelrod/strategies/rand.py b/axelrod/strategies/rand.py new file mode 100644 index 000000000..57dd1b182 --- /dev/null +++ b/axelrod/strategies/rand.py @@ -0,0 +1,46 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + + +class Random(IpdPlayer): + """A player who randomly chooses between cooperating and defecting. + + This strategy came 15th in Axelrod's original tournament. + + Names: + + - Random: [Axelrod1980]_ + - Lunatic: [Tzafestas2000]_ + """ + + name = "Random" + classifier = { + "memory_depth": 0, # Memory-one Four-Vector = (p, p, p, p) + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, p: float = 0.5) -> None: + """ + Parameters + ---------- + p, float + The probability to cooperate + + Special Cases + ------------- + Random(0) is equivalent to Defector + Random(1) is equivalent to Cooperator + """ + super().__init__() + self.p = p + if p in [0, 1]: + self.classifier["stochastic"] = False + + def strategy(self, opponent: IpdPlayer) -> Action: + return random_choice(self.p) diff --git a/axelrod/strategies/resurrection.py b/axelrod/strategies/resurrection.py new file mode 100644 index 000000000..5595ccbc0 --- /dev/null +++ b/axelrod/strategies/resurrection.py @@ -0,0 +1,73 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Resurrection(IpdPlayer): + """ + A player starts by cooperating and defects if the number of rounds + played by the player is greater than five and the last five rounds + are defections. + + Otherwise, the strategy plays like Tit-for-tat. + + Names: + + - Resurrection: [Eckhart2015]_ + """ + + # These are various properties for the strategy + name = "Resurrection" + classifier = { + "memory_depth": 5, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return C + if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: + return D + else: + return opponent.history[-1] + + +class DoubleResurrection(IpdPlayer): + """ + A player starts by cooperating and defects if the number of rounds + played by the player is greater than five and the last five rounds + are cooperations. + + If the last five rounds were defections, the player cooperates. + + Names: + + - DoubleResurrection: [Eckhart2015]_ + """ + + name = "DoubleResurrection" + classifier = { + "memory_depth": 5, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) == 0: + return C + if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: + return D + elif len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: + return C + else: + return opponent.history[-1] diff --git a/axelrod/strategies/retaliate.py b/axelrod/strategies/retaliate.py new file mode 100644 index 000000000..ad30b67f1 --- /dev/null +++ b/axelrod/strategies/retaliate.py @@ -0,0 +1,196 @@ +from collections import defaultdict + +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class Retaliate(IpdPlayer): + """ + A player starts by cooperating but will retaliate once the opponent + has won more than 10 percent times the number of defections the player has. + + Names: + + - Retaliate: Original name by Owen Campbell + """ + + name = "Retaliate" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "inspects_source": False, + "makes_use_of": set(), + "long_run_time": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, retaliation_threshold: float = 0.1) -> None: + """ + Uses the basic init from the IpdPlayer class, but also set the name to + include the retaliation setting. + """ + super().__init__() + self.retaliation_threshold = retaliation_threshold + self.play_counts = defaultdict(int) # type: defaultdict + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + If the opponent has played D to my C more often than x% of the time + that I've done the same to him, play D. Otherwise, play C. + """ + + if len(self.history): + last_round = (self.history[-1], opponent.history[-1]) + self.play_counts[last_round] += 1 + CD_count = self.play_counts[(C, D)] + DC_count = self.play_counts[(D, C)] + if CD_count > DC_count * self.retaliation_threshold: + return D + return C + + +class Retaliate2(Retaliate): + """ + Retaliate player with a threshold of 8 percent. + + Names: + + - Retaliate 2: Original name by Owen Campbell + """ + + name = "Retaliate 2" + + def __init__(self, retaliation_threshold: float = 0.08) -> None: + super().__init__(retaliation_threshold=retaliation_threshold) + + +class Retaliate3(Retaliate): + """ + Retaliate player with a threshold of 5 percent. + + Names: + + - Retaliate 3: Original name by Owen Campbell + """ + + name = "Retaliate 3" + + def __init__(self, retaliation_threshold: float = 0.05) -> None: + super().__init__(retaliation_threshold=retaliation_threshold) + + +class LimitedRetaliate(IpdPlayer): + """ + A player that co-operates unless the opponent defects and wins. + It will then retaliate by defecting. It stops when either, it has beaten + the opponent 10 times more often that it has lost or it reaches the + retaliation limit (20 defections). + + Names: + + - Limited Retaliate: Original name by Owen Campbell + """ + + name = "Limited Retaliate" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, retaliation_threshold: float = 0.1, retaliation_limit: int = 20 + ) -> None: + """ + Parameters + ---------- + retaliation_threshold, float + The threshold of the difference in defections, previous rounds of + (C, D) versus (D, C) + retaliation_limit, int + The maximum number of retaliations until the strategy returns to + cooperation + """ + super().__init__() + self.retaliating = False + self.retaliation_count = 0 + self.retaliation_threshold = retaliation_threshold + self.retaliation_limit = retaliation_limit + self.play_counts = defaultdict(int) # type: defaultdict + + def strategy(self, opponent: IpdPlayer) -> Action: + """ + If the opponent has played D to my C more often than x% of the time + that I've done the same to him, retaliate by playing D but stop doing + so once I've hit the retaliation limit. + """ + + if len(self.history): + last_round = (self.history[-1], opponent.history[-1]) + self.play_counts[last_round] += 1 + CD_count = self.play_counts[(C, D)] + DC_count = self.play_counts[(D, C)] + if CD_count > DC_count * self.retaliation_threshold: + self.retaliating = True + else: + self.retaliating = False + self.retaliation_count = 0 + + if self.retaliating: + if self.retaliation_count < self.retaliation_limit: + self.retaliation_count += 1 + return D + else: + self.retaliation_count = 0 + self.retaliating = False + + return C + + +class LimitedRetaliate2(LimitedRetaliate): + """ + LimitedRetaliate player with a threshold of 8 percent and a + retaliation limit of 15. + + Names: + + - Limited Retaliate 2: Original name by Owen Campbell + """ + + name = "Limited Retaliate 2" + + def __init__( + self, retaliation_threshold: float = 0.08, retaliation_limit: int = 15 + ) -> None: + super().__init__( + retaliation_threshold=retaliation_threshold, + retaliation_limit=retaliation_limit, + ) + + +class LimitedRetaliate3(LimitedRetaliate): + """ + LimitedRetaliate player with a threshold of 5 percent and a + retaliation limit of 20. + + Names: + + - Limited Retaliate 3: Original name by Owen Campbell + """ + + name = "Limited Retaliate 3" + + def __init__( + self, retaliation_threshold: float = 0.05, retaliation_limit: int = 20 + ) -> None: + super().__init__( + retaliation_threshold=retaliation_threshold, + retaliation_limit=retaliation_limit, + ) diff --git a/axelrod/strategies/revised_downing.py b/axelrod/strategies/revised_downing.py new file mode 100644 index 000000000..0155df07c --- /dev/null +++ b/axelrod/strategies/revised_downing.py @@ -0,0 +1,75 @@ +""" +Revised Downing implemented from the Fortran source code for the second of +Axelrod's tournaments. +""" +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + +class RevisedDowning(IpdPlayer): + """ + Strategy submitted to Axelrod's second tournament by Leslie Downing. + (K59R). + + Revised Downing attempts to determine if players are cooperative or not. + If so, it cooperates with them. + + This strategy is a revision of the strategy submitted by Downing to + Axelrod's first tournament. + + + Names: + - Revised Downing: [Axelrod1980]_ + """ + + name = "Revised Downing" + + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.good = 1.0 + self.bad = 0.0 + self.nice1 = 0 + self.nice2 = 0 + self.total_C = 0 # note the same as self.cooperations + self.total_D = 0 # note the same as self.defections + + def strategy(self, opponent: IpdPlayer) -> Action: + round_number = len(self.history) + 1 + + if round_number == 1: + return C + + # Update various counts + if round_number > 2: + if self.history[-2] == D: + if opponent.history[-1] == C: + self.nice2 += 1 + self.total_D += 1 + self.bad = self.nice2 / self.total_D + else: + if opponent.history[-1] == C: + self.nice1 += 1 + self.total_C += 1 + self.good = self.nice1 / self.total_C + # Make a decision based on the accrued counts + c = 6.0 * self.good - 8.0 * self.bad - 2 + alt = 4.0 * self.good - 5.0 * self.bad - 1 + if c >= 0 and c >= alt: + move = C + elif (c >= 0 and c < alt) or (alt >= 0): + move = self.history[-1].flip() + else: + move = D + return move + diff --git a/axelrod/strategies/selfsteem.py b/axelrod/strategies/selfsteem.py new file mode 100644 index 000000000..ffd49347d --- /dev/null +++ b/axelrod/strategies/selfsteem.py @@ -0,0 +1,53 @@ +from math import pi, sin + +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class SelfSteem(IpdPlayer): + """ + This strategy is based on the feeling with the same name. + It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies + with the current iteration. + + If f > 0.95, 'ego' of the algorithm is inflated; always defects. + If 0.95 > abs(f) > 0.3, rational behavior; follows TitForTat algortithm. + If 0.3 > f > -0.3; random behavior. + If f < -0.95, algorithm is at rock bottom; always cooperates. + + Futhermore, the algorithm implements a retaliation policy, if the opponent + defects; the sin curve is shifted. But due to lack of further information, + this implementation does not include a sin phase change. + Names: + + - SelfSteem: [Andre2013]_ + """ + + name = "SelfSteem" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + turns_number = len(self.history) + sine_value = sin(2 * pi * turns_number / 10) + + if sine_value > 0.95: + return D + + if abs(sine_value) < 0.95 and abs(sine_value) > 0.3: + return opponent.history[-1] + + if sine_value < 0.3 and sine_value > -0.3: + return random_choice() + + return C diff --git a/axelrod/strategies/sequence_player.py b/axelrod/strategies/sequence_player.py new file mode 100644 index 000000000..e5a1ae1fd --- /dev/null +++ b/axelrod/strategies/sequence_player.py @@ -0,0 +1,111 @@ +from types import FunctionType +from typing import Tuple + +from axelrod._strategy_utils import thue_morse_generator +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class SequencePlayer(IpdPlayer): + """Abstract base class for players that use a generated sequence to + determine their plays. + + Names: + + - Sequence IpdPlayer: Original name by Marc Harper + """ + + def __init__( + self, generator_function: FunctionType, generator_args: Tuple = () + ) -> None: + super().__init__() + self.sequence_generator = generator_function(*generator_args) + + @staticmethod + def meta_strategy(value: int) -> Action: + """Determines how to map the sequence value to cooperate or defect. + By default, treat values like python truth values. Override in child + classes for alternate behaviors.""" + if value == 0: + return D + else: + return C + + def strategy(self, opponent: IpdPlayer) -> Action: + # Iterate through the sequence and apply the meta strategy + for s in self.sequence_generator: + return self.meta_strategy(s) + + def __getstate__(self): + """Generator attributes are not pickleable so we remove and rebuild.""" + return_dict = self.__dict__.copy() + del return_dict["sequence_generator"] + return return_dict + + def __setstate__(self, state): + self.reset() + self._history = state["_history"] + self.match_attributes = state["match_attributes"] + for _ in self.history: + next(self.sequence_generator) + + +class ThueMorse(SequencePlayer): + """ + A player who cooperates or defects according to the Thue-Morse sequence. + The first few terms of the Thue-Morse sequence are: + 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . + + Thue-Morse sequence: http://mathworld.wolfram.com/Thue-MorseSequence.html + + Names: + + - Thue Morse: Original name by Geraint Palmer + """ + + name = "ThueMorse" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__(thue_morse_generator, (0,)) + + +class ThueMorseInverse(ThueMorse): + """ A player who plays the inverse of the Thue-Morse sequence. + + Names: + + - Inverse Thue Morse: Original name by Geraint Palmer + """ + + name = "ThueMorseInverse" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super(ThueMorse, self).__init__(thue_morse_generator, (0,)) + + @staticmethod + def meta_strategy(value: int) -> Action: + # Switch the default cooperate and defect action on 0 or 1 + if value == 0: + return C + else: + return D diff --git a/axelrod/strategies/shortmem.py b/axelrod/strategies/shortmem.py new file mode 100644 index 000000000..4d022d875 --- /dev/null +++ b/axelrod/strategies/shortmem.py @@ -0,0 +1,48 @@ +from axelrod import IpdPlayer +from axelrod.action import Action + +C, D = Action.C, Action.D + + +class ShortMem(IpdPlayer): + """ + A player starts by always cooperating for the first 10 moves. + + From the tenth round on, the player analyzes the last ten actions, and + compare the number of defects and cooperates of the opponent, based in + percentage. If cooperation occurs 30% more than defection, it will + cooperate. + If defection occurs 30% more than cooperation, the program will defect. + Otherwise, the program follows the TitForTat algorithm. + + Names: + + - ShortMem: [Andre2013]_ + """ + + name = "ShortMem" + classifier = { + "memory_depth": float('inf'), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + if len(opponent.history) <= 10: + return C + + array = opponent.history[-10:] + C_counts = array.count(C) + D_counts = array.count(D) + + if C_counts - D_counts >= 3: + return C + elif D_counts - C_counts >= 3: + return D + else: + return opponent.history[-1] diff --git a/axelrod/strategies/stalker.py b/axelrod/strategies/stalker.py new file mode 100644 index 000000000..0339afee5 --- /dev/null +++ b/axelrod/strategies/stalker.py @@ -0,0 +1,78 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice +from axelrod.strategy_transformers import FinalTransformer + +C, D = Action.C, Action.D + + +@FinalTransformer((D,), name_prefix=None) # End with defection +class Stalker(IpdPlayer): + """ + + This is a strategy which is only influenced by the score. + Its behavior is based on three values: + the very_bad_score (all rounds in defection) + very_good_score (all rounds in cooperation) + wish_score (average between bad and very_good score) + + It starts with cooperation. + + - If current_average_score > very_good_score, it defects + - If current_average_score lies in (wish_score, very_good_score) it + cooperates + - If current_average_score > 2, it cooperates + - If current_average_score lies in (1, 2) + - The remaining case, current_average_score < 1, it behaves randomly. + - It defects in the last round + + Names: + + - Stalker: [Andre2013]_ + """ + + name = "Stalker" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["game", "length"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def receive_match_attributes(self): + R, P, S, T = self.match_attributes["game"].RPST() + self.very_good_score = R + self.very_bad_score = P + self.wish_score = (R + P) / 2 + self.current_score = 0 + + def score_last_round(self, opponent: IpdPlayer): + # Load the default game if not supplied by a tournament. + game = self.match_attributes["game"] + last_round = (self.history[-1], opponent.history[-1]) + scores = game.score(last_round) + self.current_score += scores[0] + + def strategy(self, opponent: IpdPlayer) -> Action: + + if len(self.history) == 0: + return C + + self.score_last_round(opponent) + + current_average_score = self.current_score / len(self.history) + + if current_average_score > self.very_good_score: + return D + if (current_average_score > self.wish_score) and ( + current_average_score < self.very_good_score + ): + return C + if current_average_score > 2: + return C + if (current_average_score < 2) and (current_average_score > 1): + return D + return random_choice() diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py new file mode 100644 index 000000000..1e5eb29f4 --- /dev/null +++ b/axelrod/strategies/titfortat.py @@ -0,0 +1,917 @@ +from axelrod.action import Action, actions_to_str +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice +from axelrod.strategy_transformers import FinalTransformer, TrackHistoryTransformer + +C, D = Action.C, Action.D + + +class TitForTat(IpdPlayer): + """ + A player starts by cooperating and then mimics the previous action of the + opponent. + + This strategy was referred to as the *'simplest'* strategy submitted to + Axelrod's first tournament. It came first. + + Note that the code for this strategy is written in a fairly verbose + way. This is done so that it can serve as an example strategy for + those who might be new to Python. + + Names: + + - Rapoport's strategy: [Axelrod1980]_ + - TitForTat: [Axelrod1980]_ + """ + + # These are various properties for the strategy + name = "Tit For Tat" + classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + """This is the actual strategy""" + # First move + if not self.history: + return C + # React to the opponent's last move + if opponent.history[-1] == D: + return D + return C + + +class TitFor2Tats(IpdPlayer): + """A player starts by cooperating and then defects only after two defects by + opponent. + + Submitted to Axelrod's second tournament by John Maynard Smith; it came in + 24th in that tournament. + + Names: + + - Tit for two Tats: [Axelrod1984]_ + - Slow tit for two tats: Original name by Ranjini Das + - JMaynardSmith: [Axelrod1980b]_ + """ + + name = "Tit For 2 Tats" + classifier = { + "memory_depth": 2, # Long memory, memory-2 + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return D if opponent.history[-2:] == [D, D] else C + + +class TwoTitsForTat(IpdPlayer): + """A player starts by cooperating and replies to each defect by two + defections. + + Names: + + - Two Tits for Tats: [Axelrod1984]_ + """ + + name = "Two Tits For Tat" + classifier = { + "memory_depth": 2, # Long memory, memory-2 + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return D if D in opponent.history[-2:] else C + + +class DynamicTwoTitsForTat(IpdPlayer): + """ + A player starts by cooperating and then punishes its opponent's + defections with defections, but with a dynamic bias towards cooperating + based on the opponent's ratio of cooperations to total moves + (so their current probability of cooperating regardless of the + opponent's move (aka: forgiveness)). + + Names: + + - Dynamic Two Tits For Tat: Original name by Grant Garrett-Grossman. + """ + + name = "Dynamic Two Tits For Tat" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent): + # First move + if not opponent.history: + # Make sure we cooperate first turn + return C + if D in opponent.history[-2:]: + # Probability of cooperating regardless + return random_choice(opponent.cooperations / len(opponent.history)) + else: + return C + + +class Bully(IpdPlayer): + """A player that behaves opposite to Tit For Tat, including first move. + + Starts by defecting and then does the opposite of opponent's previous move. + This is the complete opposite of Tit For Tat, also called Bully in the + literature. + + Names: + + - Reverse Tit For Tat: [Nachbar1992]_ + + """ + + name = "Bully" + classifier = { + "memory_depth": 1, # Four-Vector = (0, 1, 0, 1) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return C if opponent.history[-1:] == [D] else D + + +class SneakyTitForTat(IpdPlayer): + """Tries defecting once and repents if punished. + + Names: + + - Sneaky Tit For Tat: Original name by Karol Langner + """ + + name = "Sneaky Tit For Tat" + classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if len(self.history) < 2: + return C + if D not in opponent.history: + return D + if opponent.history[-1] == D and self.history[-2] == D: + return C + return opponent.history[-1] + + +class SuspiciousTitForTat(IpdPlayer): + """A variant of Tit For Tat that starts off with a defection. + + Names: + + - Suspicious Tit For Tat: [Hilbe2013]_ + - Mistrust: [Beaufils1997]_ + """ + + name = "Suspicious Tit For Tat" + classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return C if opponent.history[-1:] == [C] else D + + +class AntiTitForTat(IpdPlayer): + """A strategy that plays the opposite of the opponents previous move. + This is similar to Bully, except that the first move is cooperation. + + Names: + + - Anti Tit For Tat: [Hilbe2013]_ + - Psycho (PSYC): [Ashlock2009]_ + """ + + name = "Anti Tit For Tat" + classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + return D if opponent.history[-1:] == [C] else C + + +class HardTitForTat(IpdPlayer): + """A variant of Tit For Tat that uses a longer history for retaliation. + + Names: + + - Hard Tit For Tat: [PD2017]_ + """ + + name = "Hard Tit For Tat" + classifier = { + "memory_depth": 3, # memory-three + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + # Cooperate on the first move + if not opponent.history: + return C + # Defects if D in the opponent's last three moves + if D in opponent.history[-3:]: + return D + # Otherwise cooperates + return C + + +class HardTitFor2Tats(IpdPlayer): + """A variant of Tit For Two Tats that uses a longer history for + retaliation. + + Names: + + - Hard Tit For Two Tats: [Stewart2012]_ + """ + + name = "Hard Tit For 2 Tats" + classifier = { + "memory_depth": 3, # memory-three + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + # Cooperate on the first move + if not opponent.history: + return C + # Defects if two consecutive D in the opponent's last three moves + history_string = actions_to_str(opponent.history[-3:]) + if "DD" in history_string: + return D + # Otherwise cooperates + return C + + +class OmegaTFT(IpdPlayer): + """OmegaTFT modifies Tit For Tat in two ways: + - checks for deadlock loops of alternating rounds of (C, D) and (D, C), + and attempting to break them + - uses a more sophisticated retaliation mechanism that is noise tolerant + + Names: + + - OmegaTFT: [Slany2007]_ + """ + + name = "Omega TFT" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__( + self, deadlock_threshold: int = 3, randomness_threshold: int = 8 + ) -> None: + super().__init__() + self.deadlock_threshold = deadlock_threshold + self.randomness_threshold = randomness_threshold + self.randomness_counter = 0 + self.deadlock_counter = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + # Cooperate on the first move + if not self.history: + return C + # TFT on round 2 + if len(self.history) == 1: + return opponent.history[-1] + + # Are we deadlocked? (in a CD -> DC loop) + if self.deadlock_counter >= self.deadlock_threshold: + move = C + if self.deadlock_counter == self.deadlock_threshold: + self.deadlock_counter = self.deadlock_threshold + 1 + else: + self.deadlock_counter = 0 + else: + # Update counters + if opponent.history[-2:] == [C, C]: + self.randomness_counter -= 1 + # If the opponent's move changed, increase the counter + if opponent.history[-2] != opponent.history[-1]: + self.randomness_counter += 1 + # If the opponent's last move differed from mine, + # increase the counter + if self.history[-1] != opponent.history[-1]: + self.randomness_counter += 1 + # Compare counts to thresholds + # If randomness_counter exceeds Y, Defect for the remainder + if self.randomness_counter >= self.randomness_threshold: + move = D + else: + # TFT + move = opponent.history[-1] + # Check for deadlock + if opponent.history[-2] != opponent.history[-1]: + self.deadlock_counter += 1 + else: + self.deadlock_counter = 0 + return move + + +class OriginalGradual(IpdPlayer): + """ + A player that punishes defections with a growing number of defections + but after punishing for `punishment_limit` number of times enters a calming + state and cooperates no matter what the opponent does for two rounds. + + The `punishment_limit` is incremented whenever the opponent defects and the + strategy is not in either calming or punishing state. + + Note that `Gradual` appears in [CRISTAL-SMAC2018]_ however that version of + `Gradual` does not give the results reported in [Beaufils1997]_ which is the + paper that first introduced the strategy. For a longer discussion of this + see: https://github.com/Axelrod-Python/Axelrod/issues/1294. This is why this + strategy has been renamed to `OriginalGradual`. + + Names: + + - Gradual: [Beaufils1997]_ + """ + + name = "Original Gradual" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + + super().__init__() + self.calming = False + self.punishing = False + self.punishment_count = 0 + self.punishment_limit = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + + if self.calming: + self.calming = False + return C + + if self.punishing: + if self.punishment_count < self.punishment_limit: + self.punishment_count += 1 + return D + else: + self.calming = True + self.punishing = False + self.punishment_count = 0 + return C + + if D in opponent.history[-1:]: + self.punishing = True + self.punishment_count += 1 + self.punishment_limit += 1 + return D + + return C + + +class Gradual(IpdPlayer): + """ + Similar to OriginalGradual, this is a player that punishes defections with a + growing number of defections but after punishing for `punishment_limit` + number of times enters a calming state and cooperates no matter what the + opponent does for two rounds. + + This version of Gradual is an update of `OriginalGradual` and the difference + is that the `punishment_limit` is incremented whenever the opponent defects + (regardless of the state of the player). + + Note that this version of `Gradual` appears in [CRISTAL-SMAC2018]_ however + this version of + `Gradual` does not give the results reported in [Beaufils1997]_ which is the + paper that first introduced the strategy. For a longer discussion of this + see: https://github.com/Axelrod-Python/Axelrod/issues/1294. + + This version is based on https://github.com/cristal-smac/ipd/blob/master/src/strategies.py#L224 + + Names: + + - Gradual: [CRISTAL-SMAC2018]_ + """ + + name = "Gradual" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + + super().__init__() + self.calm_count = 0 + self.punish_count = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + + if len(self.history) == 0: + return C + + if self.punish_count > 0: + self.punish_count -= 1 + return D + + if self.calm_count > 0: + self.calm_count -= 1 + return C + + if opponent.history[-1] == D: + self.punish_count = opponent.defections - 1 + self.calm_count = 2 + return D + return C + + +@TrackHistoryTransformer(name_prefix=None) +class ContriteTitForTat(IpdPlayer): + """ + A player that corresponds to Tit For Tat if there is no noise. In the case + of a noisy match: if the opponent defects as a result of a noisy defection + then ContriteTitForTat will become 'contrite' until it successfully + cooperates. + + Names: + + - Contrite Tit For Tat: [Axelrod1995]_ + """ + + name = "Contrite Tit For Tat" + classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.contrite = False + self._recorded_history = [] + + def strategy(self, opponent: IpdPlayer) -> Action: + + if not opponent.history: + return C + + # If contrite but managed to cooperate: apologise. + if self.contrite and self.history[-1] == C: + self.contrite = False + return C + + # Check if noise provoked opponent + if self._recorded_history[-1] != self.history[-1]: # Check if noise + if self.history[-1] == D and opponent.history[-1] == C: + self.contrite = True + + return opponent.history[-1] + + +class AdaptiveTitForTat(IpdPlayer): + """ATFT - Adaptive Tit For Tat (Basic Model) + + Algorithm + + if (opponent played C in the last cycle) then + world = world + r*(1-world) + else + world = world + r*(0-world) + If (world >= 0.5) play C, else play D + + Attributes + + world : float [0.0, 1.0], set to 0.5 + continuous variable representing the world's image + 1.0 - total cooperation + 0.0 - total defection + other values - something in between of the above + updated every round, starting value shouldn't matter as long as + it's >= 0.5 + + Parameters + + rate : float [0.0, 1.0], default=0.5 + adaptation rate - r in Algorithm above + smaller value means more gradual and robust + to perturbations behaviour + + Names: + + - Adaptive Tit For Tat: [Tzafestas2000]_ + """ + + name = "Adaptive Tit For Tat" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + world = 0.5 + + def __init__(self, rate: float = 0.5) -> None: + super().__init__() + self.rate = rate + self.world = rate + + def strategy(self, opponent: IpdPlayer) -> Action: + + if len(opponent.history) == 0: + return C + + if opponent.history[-1] == C: + self.world += self.rate * (1.0 - self.world) + else: + self.world -= self.rate * self.world + + if self.world >= 0.5: + return C + + return D + + +class SpitefulTitForTat(IpdPlayer): + """ + A player starts by cooperating and then mimics the previous action of the + opponent until opponent defects twice in a row, at which point player + always defects + + Names: + + - Spiteful Tit For Tat: [Prison1998]_ + """ + + name = "Spiteful Tit For Tat" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self) -> None: + super().__init__() + self.retaliating = False + + def strategy(self, opponent: IpdPlayer) -> Action: + # First move + if not self.history: + return C + + if opponent.history[-2:] == [D, D]: + self.retaliating = True + + if self.retaliating: + return D + else: + # React to the opponent's last move + if opponent.history[-1] == D: + return D + return C + + +class SlowTitForTwoTats2(IpdPlayer): + """ + A player plays C twice, then if the opponent plays the same move twice, + plays that move, otherwise plays previous move. + + Names: + + - Slow Tit For Tat: [Prison1998]_ + """ + + name = "Slow Tit For Two Tats 2" + classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + + # Start with two cooperations + if len(self.history) < 2: + return C + + # Mimic if opponent plays the same move twice + if opponent.history[-2] == opponent.history[-1]: + return opponent.history[-1] + + # Otherwise play previous move + return self.history[-1] + + +@FinalTransformer((D,), name_prefix=None) +class Alexei(IpdPlayer): + """ + Plays similar to Tit-for-Tat, but always defect on last turn. + + Names: + + - Alexei: [LessWrong2011]_ + """ + + name = "Alexei" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + if not self.history: + return C + if opponent.history[-1] == D: + return D + return C + + +@FinalTransformer((D,), name_prefix=None) +class EugineNier(IpdPlayer): + """ + Plays similar to Tit-for-Tat, but with two conditions: + 1) Always Defect on Last Move + 2) If other player defects five times, switch to all defects. + + Names: + + - Eugine Nier: [LessWrong2011]_ + """ + + name = "EugineNier" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.is_defector = False + + def strategy(self, opponent: IpdPlayer) -> Action: + if not self.history: + return C + if not (self.is_defector) and opponent.defections >= 5: + self.is_defector = True + if self.is_defector: + return D + return opponent.history[-1] + + +class NTitsForMTats(IpdPlayer): + """ + A parameterizable Tit-for-Tat, + The arguments are: + 1) M: the number of defection before retaliation + 2) N: the number of retaliations + + Names: + + - N Tit(s) For M Tat(s): Original name by Marc Harper + """ + + name = "N Tit(s) For M Tat(s)" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, N: int = 3, M: int = 2) -> None: + """ + Parameters + ---------- + N: int + Number of retaliations + M: int + Number of defection before retaliation + + Special Cases + ------------- + NTitsForMTats(1,1) is equivalent to TitForTat + NTitsForMTats(1,2) is equivalent to TitFor2Tats + NTitsForMTats(2,1) is equivalent to TwoTitsForTat + NTitsForMTats(0,*) is equivalent to Cooperator + NTitsForMTats(*,0) is equivalent to Defector + """ + super().__init__() + self.N = N + self.M = M + self.classifier["memory_depth"] = max([M, N]) + self.retaliate_count = 0 + + def strategy(self, opponent: IpdPlayer) -> Action: + # if opponent defected consecutively M times, start the retaliation + if not self.M or opponent.history[-self.M :].count(D) == self.M: + self.retaliate_count = self.N + if self.retaliate_count: + self.retaliate_count -= 1 + return D + return C + + +@FinalTransformer((D,), name_prefix=None) +class Michaelos(IpdPlayer): + """ + Plays similar to Tit-for-Tat with two exceptions: + 1) Defect on last turn. + 2) After own defection and opponent's cooperation, 50 percent of the time, + cooperate. The other 50 percent of the time, always defect for the rest of + the game. + + Names: + + - Michaelos: [LessWrong2011]_ + """ + + name = "Michaelos" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self): + super().__init__() + self.is_defector = False + + def strategy(self, opponent: IpdPlayer) -> Action: + if not self.history: + return C + if self.is_defector: + return D + if self.history[-1] == D and opponent.history[-1] == C: + decision = random_choice() + if decision == C: + return C + else: + self.is_defector = True + return D + + return opponent.history[-1] + + +class RandomTitForTat(IpdPlayer): + """ + A player starts by cooperating and then follows by copying its + opponent (tit for tat style). From then on the player + will switch between copying its opponent and randomly + responding every other iteration. + + Name: + + - Random TitForTat: Original name by Zachary M. Taylor + """ + + # These are various properties for the strategy + name = "Random Tit for Tat" + classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, p: float = 0.5) -> None: + """ + Parameters + ---------- + p, float + The probability to cooperate + """ + super().__init__() + self.p = p + self.act_random = False + if p in [0, 1]: + self.classifier["stochastic"] = False + + def strategy(self, opponent: IpdPlayer) -> Action: + """This is the actual strategy""" + if not self.history: + return C + + if self.act_random: + self.act_random = False + return random_choice(self.p) + + self.act_random = True + return opponent.history[-1] diff --git a/axelrod/strategies/verybad.py b/axelrod/strategies/verybad.py new file mode 100644 index 000000000..6fe374f92 --- /dev/null +++ b/axelrod/strategies/verybad.py @@ -0,0 +1,52 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer + +C, D = Action.C, Action.D + + +class VeryBad(IpdPlayer): + """ + It cooperates in the first three rounds, and uses probability + (it implements a memory, which stores the opponent’s moves) to decide for + cooperating or defecting. + Due to a lack of information as to what that probability refers to in this + context, probability(P(X)) refers to (Count(X)/Total_Moves) in this + implementation + P(C) = Cooperations / Total_Moves + P(D) = Defections / Total_Moves = 1 - P(C) + + Names: + + - VeryBad: [Andre2013]_ + """ + + name = "VeryBad" + classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + @staticmethod + def strategy(opponent: IpdPlayer) -> Action: + total_moves = len(opponent.history) + + if total_moves < 3: + return C + + cooperations = opponent.cooperations + + cooperation_probability = cooperations / total_moves + + if cooperation_probability > 0.5: + return C + + elif cooperation_probability < 0.5: + return D + + else: + return opponent.history[-1] diff --git a/axelrod/strategies/worse_and_worse.py b/axelrod/strategies/worse_and_worse.py new file mode 100644 index 000000000..0e1bae7b2 --- /dev/null +++ b/axelrod/strategies/worse_and_worse.py @@ -0,0 +1,126 @@ +from axelrod.action import Action +from axelrod.player import IpdPlayer +from axelrod.random_ import random_choice + +C, D = Action.C, Action.D + + +class WorseAndWorse(IpdPlayer): + """ + Defects with probability of 'current turn / 1000'. Therefore + it is more and more likely to defect as the round goes on. + + Source code available at the download tab of [Prison1998]_ + + + Names: + - Worse and Worse: [Prison1998]_ + """ + + name = "Worse and Worse" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + probability = 1 - current_round / 1000 + return random_choice(probability) + + +class KnowledgeableWorseAndWorse(IpdPlayer): + """ + This strategy is based on 'Worse And Worse' but will defect with probability + of 'current turn / total no. of turns'. + + Names: + - Knowledgeable Worse and Worse: Original name by Adam Pohl + """ + + name = "Knowledgeable Worse and Worse" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["length"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + expected_length = self.match_attributes["length"] + probability = 1 - current_round / expected_length + return random_choice(probability) + + +class WorseAndWorse2(IpdPlayer): + """ + Plays as tit for tat during the first 20 moves. + Then defects with probability (current turn - 20) / current turn. + Therefore it is more and more likely to defect as the round goes on. + + Names: + - Worse and Worse 2: [Prison1998]_ + """ + + name = "Worse and Worse 2" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + + if current_round == 1: + return C + elif current_round <= 20: + return opponent.history[-1] + else: + probability = 20 / current_round + return random_choice(probability) + + +class WorseAndWorse3(IpdPlayer): + """ + Cooperates in the first turn. + Then defects with probability no. of opponent defects / (current turn - 1). + Therefore it is more likely to defect when the opponent defects for a larger + proportion of the turns. + + Names: + - Worse and Worse 3: [Prison1998]_ + """ + + name = "Worse and Worse 3" + classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def strategy(self, opponent: IpdPlayer) -> Action: + current_round = len(self.history) + 1 + + if current_round == 1: + return C + else: + probability = 1 - opponent.defections / (current_round - 1) + return random_choice(probability) diff --git a/axelrod/strategies/zero_determinant.py b/axelrod/strategies/zero_determinant.py new file mode 100644 index 000000000..aaacfb130 --- /dev/null +++ b/axelrod/strategies/zero_determinant.py @@ -0,0 +1,256 @@ +from axelrod.action import Action + +from .memoryone import MemoryOnePlayer + +C, D = Action.C, Action.D + + +class LRPlayer(MemoryOnePlayer): + """ + Abstraction for Linear Relation players. These players enforce a linear + difference in stationary payoffs :math:`s (S_{xy} - l) = S_{yx} - l.` + + The parameter :math:`s` is called the slope and the parameter :math:`l` the + baseline payoff. For extortionate strategies, the extortion factor + :math:`\chi` is the inverse of the slope :math:`s`. + + For the standard prisoner's dilemma where :math:`T > R > P > S` and + :math:`R > (T + S) / 2 > P`, a pair :math:`(l, s)` is enforceable iff + + .. math:: + :nowrap: + + \\begin{eqnarray} + &P &<= l <= R \\\\ + &s_{min} &= -\min\\left( \\frac{T - l}{l - S}, \\frac{l - S}{T - l}\\right) <= s <= 1 + \\end{eqnarray} + + And also that there exists :math:`\\phi` such that + + .. math:: + :nowrap: + + \\begin{eqnarray} + p_1 &= P(C|CC) &= 1 - \\phi (1 - s)(R - l) \\\\ + p_2 &= P(C|CD) &= 1 - \\phi (s(l - S) + (T - l)) \\\\ + p_3 &= P(C|DC) &= \\phi ((l - S) + s(T - l)) \\\\ + p_4 &= P(C|DD) &= \\phi (1 - s)(l - P) + \\end{eqnarray} + + + These conditions also force :math:`\\phi >= 0`. For a given pair :math:`(l, s)` + there may be multiple such :math:`\\phi`. + + This parameterization is Equation 14 in [Hilbe2013]_. + See Figure 2 of the article for a more in-depth explanation. Other game + parameters can alter the relations and bounds above. + + Names: + + - Linear Relation player: [Hilbe2013]_ + """ + + name = "LinearRelation" + classifier = { + "memory_depth": 1, # Memory-one Four-Vector + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def __init__(self, phi: float = 0.2, s: float = 0.1, l: float = 1) -> None: + """ + Parameters + + phi, s, l: floats + Parameters determining the four_vector of the LR player. + """ + self.phi = phi + self.s = s + self.l = l + super().__init__() + + def set_initial_four_vector(self, four_vector): + pass + + def receive_match_attributes(self): + """ + Parameters + + phi, s, l: floats + Parameter used to compute the four-vector according to the + parameterization of the strategies below. + """ + + R, P, S, T = self.match_attributes["game"].RPST() + l = self.l + phi = self.phi + s = self.s + + # Check parameters + s_min = -min((T - l) / (l - S), (l - S) / (T - l)) + if (l < P) or (l > R) or (s > 1) or (s < s_min): + raise ValueError + + p1 = 1 - phi * (1 - s) * (R - l) + p2 = 1 - phi * (s * (l - S) + (T - l)) + p3 = phi * ((l - S) + s * (T - l)) + p4 = phi * (1 - s) * (l - P) + + four_vector = [p1, p2, p3, p4] + self.set_four_vector(four_vector) + + +class ZDExtortion(LRPlayer): + """ + An example ZD Extortion player. + + Names: + + - ZDExtortion: [Roemheld2013]_ + """ + + name = "ZD-Extortion" + + def __init__(self, phi: float = 0.2, s: float = 0.1, l: float = 1) -> None: + super().__init__(phi, s, l) + + +class ZDExtort2(LRPlayer): + """ + An Extortionate Zero Determinant Strategy with l=P. + + Names: + + - Extort-2: [Stewart2012]_ + """ + + name = "ZD-Extort-2" + + def __init__(self, phi: float = 1 / 9, s: float = 0.5) -> None: + # l = P will be set by receive_match_attributes + super().__init__(phi, s, None) + + def receive_match_attributes(self): + (R, P, S, T) = self.match_attributes["game"].RPST() + self.l = P + super().receive_match_attributes() + + +class ZDExtort2v2(LRPlayer): + """ + An Extortionate Zero Determinant Strategy with l=1. + + + Names: + + - EXTORT2: [Kuhn2017]_ + """ + + name = "ZD-Extort-2 v2" + + def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 1) -> None: + super().__init__(phi, s, l) + + +class ZDExtort3(LRPlayer): + """ + An extortionate strategy from Press and Dyson's paper witn an extortion + factor of 3. + + Names: + + - ZDExtort3: Original name by Marc Harper + - Unnamed: [Press2012]_ + """ + + name = "ZD-Extort3" + + def __init__(self, phi: float = 3 / 26, s: float = 1 / 3, l: float = 1) -> None: + super().__init__(phi, s, l) + + +class ZDExtort4(LRPlayer): + """ + An Extortionate Zero Determinant Strategy with l=1, s=1/4. TFT is the + other extreme (with l=3, s=1) + + + Names: + + - Extort 4: Original name by Marc Harper + """ + + name = "ZD-Extort-4" + + def __init__(self, phi: float = 4 / 17, s: float = 0.25, l: float = 1) -> None: + super().__init__(phi, s, l) + + +class ZDGen2(LRPlayer): + """ + A Generous Zero Determinant Strategy with l=3. + + Names: + + - GEN2: [Kuhn2017]_ + """ + + name = "ZD-GEN-2" + + def __init__(self, phi: float = 1 / 8, s: float = 0.5, l: float = 3) -> None: + super().__init__(phi, s, l) + + +class ZDGTFT2(LRPlayer): + """ + A Generous Zero Determinant Strategy with l=R. + + Names: + + - ZDGTFT-2: [Stewart2012]_ + """ + + name = "ZD-GTFT-2" + + def __init__(self, phi: float = 0.25, s: float = 0.5) -> None: + # l = R will be set by receive_match_attributes + super().__init__(phi, s, None) + + def receive_match_attributes(self): + (R, P, S, T) = self.match_attributes["game"].RPST() + self.l = R + super().receive_match_attributes() + + +class ZDMischief(LRPlayer): + """ + An example ZD Mischief player. + + Names: + + - ZDMischief: [Roemheld2013]_ + """ + + name = "ZD-Mischief" + + def __init__(self, phi: float = 0.1, s: float = 0.0, l: float = 1) -> None: + super().__init__(phi, s, l) + + +class ZDSet2(LRPlayer): + """ + A Generous Zero Determinant Strategy with l=2. + + Names: + + - SET2: [Kuhn2017]_ + """ + + name = "ZD-SET-2" + + def __init__(self, phi: float = 1 / 4, s: float = 0.0, l: float = 2) -> None: + super().__init__(phi, s, l) diff --git a/axelrod/tests/integration/__init__.py b/axelrod/tests/integration/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/axelrod/tests/integration/test_filtering.py b/axelrod/tests/integration/test_filtering.py new file mode 100644 index 000000000..bce495b76 --- /dev/null +++ b/axelrod/tests/integration/test_filtering.py @@ -0,0 +1,124 @@ +import unittest +import warnings + +import axelrod as axl +from axelrod.tests.property import strategy_lists + +from hypothesis import example, given, settings +from hypothesis.strategies import integers + + +class TestFiltersAgainstComprehensions(unittest.TestCase): + """ + Test that the results of filtering strategies via a filterset dict + match the results from using a list comprehension. + """ + + def setUp(self) -> None: + # Ignore warnings about classifiers running on instances + warnings.simplefilter("ignore", category=UserWarning) + + def tearDown(self) -> None: + warnings.simplefilter("default", category=UserWarning) + + @given(strategies=strategy_lists(min_size=20, max_size=20)) + def test_boolean_filtering(self, strategies): + + classifiers = [ + "stochastic", + "long_run_time", + "manipulates_state", + "manipulates_source", + "inspects_source", + ] + + for classifier in classifiers: + comprehension = set(filter(axl.Classifiers[classifier], strategies)) + filterset = {classifier: True} + filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + self.assertEqual(comprehension, filtered) + + @given( + min_memory_depth=integers(min_value=1, max_value=10), + max_memory_depth=integers(min_value=1, max_value=10), + memory_depth=integers(min_value=1, max_value=10), + strategies=strategy_lists(min_size=20, max_size=20), + ) + @example( + min_memory_depth=float("inf"), + max_memory_depth=float("inf"), + memory_depth=float("inf"), + strategies=axl.short_run_time_strategies, + ) + @settings(max_examples=5) + def test_memory_depth_filtering( + self, min_memory_depth, max_memory_depth, memory_depth, strategies + ): + + min_comprehension = set( + [ + s + for s in strategies + if axl.Classifiers["memory_depth"](s) >= min_memory_depth + ] + ) + min_filterset = {"min_memory_depth": min_memory_depth} + min_filtered = set( + axl.filtered_strategies(min_filterset, strategies=strategies) + ) + self.assertEqual(min_comprehension, min_filtered) + + max_comprehension = set( + [ + s + for s in strategies + if axl.Classifiers["memory_depth"](s) <= max_memory_depth + ] + ) + max_filterset = {"max_memory_depth": max_memory_depth} + max_filtered = set( + axl.filtered_strategies(max_filterset, strategies=strategies) + ) + self.assertEqual(max_comprehension, max_filtered) + + comprehension = set( + [ + s + for s in strategies + if axl.Classifiers["memory_depth"](s) == memory_depth + ] + ) + filterset = {"memory_depth": memory_depth} + filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + self.assertEqual(comprehension, filtered) + + @given( + seed_=integers(min_value=0, max_value=4294967295), + strategies=strategy_lists(min_size=20, max_size=20), + ) + @settings(max_examples=5) + def test_makes_use_of_filtering(self, seed_, strategies): + """ + Test equivalent filtering using two approaches. + + This needs to be seeded as some players classification is random. + """ + classifiers = [["game"], ["length"], ["game", "length"]] + + for classifier in classifiers: + axl.seed(seed_) + comprehension = set( + [ + s + for s in strategies + if set(classifier).issubset(set(axl.Classifiers["makes_use_of"](s))) + ] + ) + + axl.seed(seed_) + filterset = {"makes_use_of": classifier} + filtered = set(axl.filtered_strategies(filterset, strategies=strategies)) + + self.assertEqual( + comprehension, filtered, msg="classifier: {}".format(classifier) + ) diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py new file mode 100644 index 000000000..b8b70aabe --- /dev/null +++ b/axelrod/tests/integration/test_matches.py @@ -0,0 +1,71 @@ +"""Tests for some expected match behaviours""" +import unittest + +import axelrod as axl +from axelrod.tests.property import strategy_lists + +from hypothesis import given, settings +from hypothesis.strategies import integers + +C, D = axl.Action.C, axl.Action.D + +deterministic_strategies = [ + s for s in axl.short_run_time_strategies if not axl.Classifiers["stochastic"](s()) +] +stochastic_strategies = [ + s for s in axl.short_run_time_strategies if axl.Classifiers["stochastic"](s()) +] + + +class TestMatchOutcomes(unittest.TestCase): + @given( + strategies=strategy_lists( + strategies=deterministic_strategies, min_size=2, max_size=2 + ), + turns=integers(min_value=1, max_value=20), + ) + @settings(max_examples=5) + def test_outcome_repeats(self, strategies, turns): + """A test that if we repeat 3 matches with deterministic and well + behaved strategies then we get the same result""" + players = [s() for s in strategies] + matches = [axl.IpdMatch(players, turns) for _ in range(3)] + self.assertEqual(matches[0].play(), matches[1].play()) + self.assertEqual(matches[1].play(), matches[2].play()) + + @given( + strategies=strategy_lists( + strategies=stochastic_strategies, min_size=2, max_size=2 + ), + turns=integers(min_value=1, max_value=20), + seed=integers(min_value=0, max_value=4294967295), + ) + @settings(max_examples=5) + def test_outcome_repeats_stochastic(self, strategies, turns, seed): + """a test to check that if a seed is set stochastic strategies give the + same result""" + results = [] + for _ in range(3): + axl.seed(seed) + players = [s() for s in strategies] + results.append(axl.IpdMatch(players, turns).play()) + + self.assertEqual(results[0], results[1]) + self.assertEqual(results[1], results[2]) + + def test_matches_with_det_player_for_stochastic_classes(self): + """A test based on a bug found in the cache. + + See: https://github.com/Axelrod-Python/Axelrod/issues/779""" + p1 = axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0)) + p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) + p3 = axl.MemoryOnePlayer(four_vector=(1, 1, 1, 0)) + + m = axl.IpdMatch((p1, p2), turns=3) + self.assertEqual(m.play(), [(C, C), (D, C), (D, D)]) + + m = axl.IpdMatch((p2, p3), turns=3) + self.assertEqual(m.play(), [(C, C), (C, C), (C, C)]) + + m = axl.IpdMatch((p1, p3), turns=3) + self.assertEqual(m.play(), [(C, C), (D, C), (D, C)]) diff --git a/axelrod/tests/integration/test_names.py b/axelrod/tests/integration/test_names.py new file mode 100644 index 000000000..04745b778 --- /dev/null +++ b/axelrod/tests/integration/test_names.py @@ -0,0 +1,13 @@ +import unittest + +import axelrod as axl + + +class TestNames(unittest.TestCase): + def test_all_strategies_have_names(self): + names = [s.name for s in axl.all_strategies if s.name] + self.assertEqual(len(names), len(axl.all_strategies)) + + def test_all_names_are_unique(self): + names = set(s.name for s in axl.all_strategies) + self.assertEqual(len(names), len(axl.all_strategies)) diff --git a/axelrod/tests/integration/test_sample_tournaments.py b/axelrod/tests/integration/test_sample_tournaments.py new file mode 100644 index 000000000..099cdb070 --- /dev/null +++ b/axelrod/tests/integration/test_sample_tournaments.py @@ -0,0 +1,70 @@ +import unittest + +import axelrod as axl + +C, D = axl.Action.C, axl.Action.D + + +class TestSampleTournaments(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + + @classmethod + def get_test_outcome(cls, outcome, turns=10): + # Extract the name of players from the outcome tuples, + # and initiate the players by getting the classes from axelrod. + names = [out[0] for out in outcome] + players = [getattr(axl, n)() for n in names] + + # Play the tournament and build the actual outcome tuples. + tournament = axl.IpdTournament( + players=players, game=cls.game, turns=turns, repetitions=1 + ) + results = tournament.play(progress_bar=False) + scores = [score[0] for score in results.scores] + outcome = zip(names, scores) + + # Return the outcome sorted by score + return sorted(outcome, key=lambda k: k[1]) + + def test_defector_v_cooperator(self): + """Test: the defector viciously punishes the cooperator.""" + outcome = [("Cooperator", 0), ("Defector", 50)] + self.assertEqual(self.get_test_outcome(outcome), outcome) + + def test_defector_v_titfortat(self): + """Test: the defector does well against tit for tat.""" + outcome = [("TitForTat", 9), ("Defector", 14)] + self.assertEqual(self.get_test_outcome(outcome), outcome) + + def test_cooperator_v_titfortat(self): + """Test: the cooperator does very well WITH tit for tat.""" + outcome = [("Cooperator", 30), ("TitForTat", 30)] + self.assertEqual(self.get_test_outcome(outcome), outcome) + + def test_cooperator_v_titfortat_v_defector(self): + """Test: the defector dominates in this population.""" + outcome = [("Cooperator", 30), ("TitForTat", 39), ("Defector", 64)] + self.assertEqual(self.get_test_outcome(outcome), outcome) + + def test_cooperator_v_titfortat_v_defector_v_grudger(self): + """Test: tit for tat does better this time around.""" + outcome = [ + ("Cooperator", 60), + ("TitForTat", 69), + ("Grudger", 69), + ("Defector", 78), + ] + self.assertEqual(self.get_test_outcome(outcome), outcome) + + def test_cooperator_v_titfortat_v_defector_v_grudger_v_go_by_majority(self): + """Test: Tit for tat is doing a lot better.""" + outcome = [ + ("Cooperator", 90), + ("Defector", 92), + ("Grudger", 99), + ("GoByMajority", 99), + ("TitForTat", 99), + ] + self.assertEqual(self.get_test_outcome(outcome), outcome) diff --git a/axelrod/tests/integration/test_tournament.py b/axelrod/tests/integration/test_tournament.py new file mode 100644 index 000000000..d5df8a02c --- /dev/null +++ b/axelrod/tests/integration/test_tournament.py @@ -0,0 +1,171 @@ +import unittest + +import filecmp +import pathlib + +import axelrod as axl +from axelrod.load_data_ import axl_filename +from axelrod.strategy_transformers import FinalTransformer +from axelrod.tests.property import tournaments + +from hypothesis import given, settings + + +class TestTournament(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.game = axl.IpdGame() + cls.players = [ + axl.Cooperator(), + axl.TitForTat(), + axl.Defector(), + axl.Grudger(), + axl.GoByMajority(), + ] + cls.player_names = [str(p) for p in cls.players] + cls.test_name = "test" + cls.test_repetitions = 3 + + cls.expected_outcome = [ + ("Cooperator", [45, 45, 45]), + ("Defector", [52, 52, 52]), + ("Grudger", [49, 49, 49]), + ("Soft Go By Majority", [49, 49, 49]), + ("Tit For Tat", [49, 49, 49]), + ] + cls.expected_outcome.sort() + + @given( + tournaments( + strategies=axl.short_run_time_strategies, + min_size=10, + max_size=30, + min_turns=2, + max_turns=210, + min_repetitions=1, + max_repetitions=4, + ) + ) + @settings(max_examples=1) + def test_big_tournaments(self, tournament): + """A test to check that tournament runs with a sample of non-cheating + strategies.""" + path = pathlib.Path("test_outputs/test_tournament.csv") + filename = axl_filename(path) + self.assertIsNone( + tournament.play(progress_bar=False, filename=filename, build_results=False) + ) + + def test_serial_play(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=5, + repetitions=self.test_repetitions, + ) + scores = tournament.play(progress_bar=False).scores + actual_outcome = sorted(zip(self.player_names, scores)) + self.assertEqual(actual_outcome, self.expected_outcome) + + def test_parallel_play(self): + tournament = axl.IpdTournament( + name=self.test_name, + players=self.players, + game=self.game, + turns=5, + repetitions=self.test_repetitions, + ) + scores = tournament.play(processes=2, progress_bar=False).scores + actual_outcome = sorted(zip(self.player_names, scores)) + self.assertEqual(actual_outcome, self.expected_outcome) + + def test_repeat_tournament_deterministic(self): + """A test to check that tournament gives same results.""" + deterministic_players = [ + s() + for s in axl.short_run_time_strategies + if not axl.Classifiers["stochastic"](s()) + ] + files = [] + for _ in range(2): + tournament = axl.IpdTournament( + name="test", + players=deterministic_players, + game=self.game, + turns=2, + repetitions=2, + ) + path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) + files.append(axl_filename(path)) + tournament.play(progress_bar=False, filename=files[-1], build_results=False) + self.assertTrue(filecmp.cmp(files[0], files[1])) + + def test_repeat_tournament_stochastic(self): + """ + A test to check that tournament gives same results when setting seed. + """ + files = [] + for _ in range(2): + axl.seed(0) + stochastic_players = [ + s() + for s in axl.short_run_time_strategies + if axl.Classifiers["stochastic"](s()) + ] + tournament = axl.IpdTournament( + name="test", + players=stochastic_players, + game=self.game, + turns=2, + repetitions=2, + ) + path = pathlib.Path("test_outputs/stochastic_tournament_{}.csv".format(_)) + files.append(axl_filename(path)) + tournament.play(progress_bar=False, filename=files[-1], build_results=False) + self.assertTrue(filecmp.cmp(files[0], files[1])) + + +class TestNoisyTournament(unittest.TestCase): + def test_noisy_tournament(self): + # Defector should win for low noise + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.0) + results = tournament.play(progress_bar=False) + self.assertEqual(results.ranked_names[0], "Defector") + + # If the noise is large enough, cooperator should win + players = [axl.Cooperator(), axl.Defector()] + tournament = axl.IpdTournament(players, turns=5, repetitions=3, noise=0.75) + results = tournament.play(progress_bar=False) + self.assertEqual(results.ranked_names[0], "Cooperator") + + +class TestProbEndTournament(unittest.TestCase): + def test_players_do_not_know_match_length(self): + """Create two players who should cooperate on last two turns if they + don't know when those last two turns are. + """ + p1 = FinalTransformer(["D", "D"])(axl.Cooperator)() + p2 = FinalTransformer(["D", "D"])(axl.Cooperator)() + players = [p1, p2] + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=1) + results = tournament.play(progress_bar=False) + # Check that both plays always cooperated + for rating in results.cooperating_rating: + self.assertEqual(rating, 1) + + def test_matches_have_different_length(self): + """ + A match between two players should have variable length across the + repetitions + """ + p1 = axl.Cooperator() + p2 = axl.Cooperator() + p3 = axl.Cooperator() + players = [p1, p2, p3] + axl.seed(0) + tournament = axl.IpdTournament(players, prob_end=0.5, repetitions=2) + results = tournament.play(progress_bar=False) + # Check that match length are different across the repetitions + self.assertNotEqual(results.match_lengths[0], results.match_lengths[1]) diff --git a/axelrod/tests/strategies/__init__.py b/axelrod/tests/strategies/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/axelrod/tests/strategies/test_adaptive.py b/axelrod/tests/strategies/test_adaptive.py new file mode 100644 index 000000000..5b796ce4e --- /dev/null +++ b/axelrod/tests/strategies/test_adaptive.py @@ -0,0 +1,46 @@ +"""Tests for the Adaptive strategy.""" + +import axelrod as axl + +from .test_player import TestMatch, TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestAdaptive(TestPlayer): + + name = "Adaptive" + player = axl.Adaptive + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 6 + [(D, C)] * 8 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 6 + [(D, D)] * 8 + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D)] * 3 + [(D, C), (D, D)] * 4 + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, C)] * 6 + [(D, C)] + [(D, D)] * 4 + [(C, D), (C, C)] + self.versus_test(axl.TitForTat(), expected_actions=actions) + + def test_scoring(self): + player = axl.Adaptive() + opponent = axl.Cooperator() + player.play(opponent) + player.play(opponent) + self.assertEqual(3, player.scores[C]) + game = axl.IpdGame(-3, 10, 10, 10) + player.set_match_attributes(game=game) + player.play(opponent) + self.assertEqual(0, player.scores[C]) diff --git a/axelrod/tests/strategies/test_adaptor.py b/axelrod/tests/strategies/test_adaptor.py new file mode 100644 index 000000000..740fdb252 --- /dev/null +++ b/axelrod/tests/strategies/test_adaptor.py @@ -0,0 +1,93 @@ +"""Tests for the adaptor""" + +import unittest + +import axelrod as axl + +from .test_player import TestPlayer, test_four_vector + +C, D = axl.Action.C, axl.Action.D + + +class TestAdaptorBrief(TestPlayer): + + name = "AdaptorBrief" + player = axl.AdaptorBrief + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # No error. + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.AdaptorBrief(), expected_actions=actions, seed=0 + ) + + # Error corrected. + actions = [(C, C), (C, D), (D, C), (C, C)] + self.versus_test( + opponent=axl.AdaptorBrief(), expected_actions=actions, seed=22 + ) + + # Error corrected, example 2 + actions = [(D, C), (C, D), (D, C), (C, D), (C, C)] + self.versus_test( + opponent=axl.AdaptorBrief(), expected_actions=actions, seed=925 + ) + + # Versus Cooperator + actions = [(C, C)] * 8 + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=0 + ) + + # Versus Defector + actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=0 + ) + + +class TestAdaptorLong(TestPlayer): + + name = "AdaptorLong" + player = axl.AdaptorLong + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # No error. + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.AdaptorLong(), expected_actions=actions, seed=0 + ) + + # Error corrected. + actions = [(C, C), (C, D), (D, D), (C, C), (C, C)] + self.versus_test( + opponent=axl.AdaptorLong(), expected_actions=actions, seed=22 + ) + + # Versus Cooperator + actions = [(C, C)] * 8 + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=0 + ) + + # Versus Defector + actions = [(C, D), (D, D), (C, D), (D, D), (D, D), (C, D), (D, D)] + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, seed=0 + ) diff --git a/axelrod/tests/strategies/test_alternator.py b/axelrod/tests/strategies/test_alternator.py new file mode 100644 index 000000000..64a347c33 --- /dev/null +++ b/axelrod/tests/strategies/test_alternator.py @@ -0,0 +1,33 @@ +"""Tests for the Alternator strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestAlternator(TestPlayer): + + name = "Alternator" + player = axl.Alternator + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D)] * 5 + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C]) + actions = [(C, D), (D, C)] * 5 + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_ann.py b/axelrod/tests/strategies/test_ann.py new file mode 100644 index 000000000..3a63d2131 --- /dev/null +++ b/axelrod/tests/strategies/test_ann.py @@ -0,0 +1,152 @@ +"""Tests for the ANN strategy.""" +import unittest + +import axelrod as axl +from axelrod.evolvable_player import InsufficientParametersError +from axelrod.load_data_ import load_weights +from axelrod.strategies.ann import split_weights + +from .test_player import TestPlayer +from .test_evolvable_player import PartialClass, TestEvolvablePlayer + + +C, D = axl.Action.C, axl.Action.D +nn_weights = load_weights() +num_features, num_hidden, weights = nn_weights["Evolved ANN 5"] + + +class TestSplitWeights(unittest.TestCase): + def test_split_weights(self): + with self.assertRaises(ValueError): + split_weights([0] * 20, 12, 10) + # Doesn't Raise + split_weights([0] * 70, 5, 10) + split_weights([0] * 12, 10, 1) + + +class TestEvolvedANN(TestPlayer): + + name = "Evolved ANN" + player = axl.EvolvedANN + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] + [(D, D)] * 5 + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C)] * 5 + self.versus_test(axl.TitForTat(), expected_actions=actions) + + +class TestEvolvedANN5(TestPlayer): + + name = "Evolved ANN 5" + player = axl.EvolvedANN5 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] + [(D, D)] * 4 + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestEvolvedANNNoise05(TestPlayer): + + name = "Evolved ANN 5 Noise 05" + player = axl.EvolvedANNNoise05 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestEvolvableANN(unittest.TestCase): + + player_class = axl.EvolvableANN + + def test_normalized_parameters(self): + # Must specify at least one of cycle or cycle_length + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + weights=nn_weights["Evolved ANN 5"][2] + ) + + +class TestEvolvableANN2(TestEvolvablePlayer): + name = "EvolvableANN" + player_class = axl.EvolvableANN + parent_class = axl.ANN + parent_kwargs = ["num_features", "num_hidden", "weights"] + init_parameters = {"num_features": 17, "num_hidden": 8} + + +class TestEvolvableANN3(TestEvolvablePlayer): + name = "EvolvableANN" + player_class = axl.EvolvableANN + parent_class = axl.ANN + parent_kwargs = ["num_features", "num_hidden", "weights"] + init_parameters = { + "num_features": nn_weights["Evolved ANN 5"][0], + "num_hidden": nn_weights["Evolved ANN 5"][1], + "weights": nn_weights["Evolved ANN 5"][2] + } + + +# Substitute EvolvableANN as a regular EvolvedANN5. +EvolvableANNPlayerWithDefault = PartialClass( + axl.EvolvableANN, + num_features=num_features, + num_hidden=num_hidden, + weights=weights +) + + +class EvolvableANNAsANN(TestEvolvedANN5): + player = EvolvableANNPlayerWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_apavlov.py b/axelrod/tests/strategies/test_apavlov.py new file mode 100644 index 000000000..e720dce56 --- /dev/null +++ b/axelrod/tests/strategies/test_apavlov.py @@ -0,0 +1,163 @@ +"""Tests APavlov strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestAPavlov2006(TestPlayer): + name = "Adaptive Pavlov 2006" + player = axl.APavlov2006 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 7 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"opponent_class": "Cooperative"}, + ) + + opponent = axl.MockPlayer(actions=[C] * 6 + [D]) + actions = [(C, C)] * 6 + [(C, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "Cooperative"} + ) + + actions = [(C, D)] + [(D, D)] * 6 + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"opponent_class": "ALLD"}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, D, C]) + actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, C), + (C, D), + (D, C), + ] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} + ) + + opponent = axl.MockPlayer(actions=[D, D, C, D, D, C]) + actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} + ) + + opponent = axl.MockPlayer(actions=[D, D, C, D, D, C, D]) + actions = [(C, D), (D, D), (D, C), (C, D), (D, D), (D, C), (D, D), (C, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "PavlovD"} + ) + + opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) + actions = [(C, C), (C, C), (C, C), (C, D), (D, D), (D, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + ) + + opponent = axl.MockPlayer(actions=[D, D, D, C, C, C]) + actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (D, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + ) + + +class TestAPavlov2011(TestPlayer): + name = "Adaptive Pavlov 2011" + player = axl.APavlov2011 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + + actions = [(C, C)] * 8 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"opponent_class": "Cooperative"}, + ) + + actions = [(C, D)] + [(D, D)] * 9 + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"opponent_class": "ALLD"}, + ) + + opponent = axl.MockPlayer(actions=[C, D, D, D, D, D, D]) + actions = [(C, C), (C, D)] + [(D, D)] * 5 + [(D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} + ) + + opponent = axl.MockPlayer(actions=[C, C, D, D, D, D, D]) + actions = [(C, C), (C, C), (C, D)] + [(D, D)] * 4 + [(D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} + ) + + opponent = axl.MockPlayer(actions=[C, D, D, C, D, D, D]) + actions = [(C, C), (C, D), (D, D), (D, C), (C, D), (D, D), (D, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "ALLD"} + ) + + opponent = axl.MockPlayer(actions=[C, D, D, C, C, D, D]) + actions = [(C, C), (C, D), (D, D), (D, C), (C, C), (C, D), (C, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} + ) + + opponent = axl.MockPlayer(actions=[C, D, C, D, C, D, D]) + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (C, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} + ) + + opponent = axl.MockPlayer(actions=[D, D, D, C, C, C, C]) + actions = [(C, D), (D, D), (D, D), (D, C), (C, C), (C, C), (C, C), (C, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "STFT"} + ) + + opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) + actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (D, D), (D, C), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + ) + + opponent = axl.MockPlayer(actions=[D, D, C, C, C, C]) + actions = [(C, D), (D, D), (D, C), (C, C), (C, C), (C, C), (D, D), (D, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_class": "Random"} + ) diff --git a/axelrod/tests/strategies/test_appeaser.py b/axelrod/tests/strategies/test_appeaser.py new file mode 100644 index 000000000..ede79c1d5 --- /dev/null +++ b/axelrod/tests/strategies/test_appeaser.py @@ -0,0 +1,37 @@ +"""Tests for the Appeaser strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestAppeaser(TestPlayer): + + name = "Appeaser" + player = axl.Appeaser + expected_classifier = { + "memory_depth": float("inf"), # Depends on internal memory. + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D]) + actions = [(C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D, D]) + actions = [(C, C), (C, C), (C, D), (D, D), (C, D), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_averagecopier.py b/axelrod/tests/strategies/test_averagecopier.py new file mode 100644 index 000000000..ae23667e7 --- /dev/null +++ b/axelrod/tests/strategies/test_averagecopier.py @@ -0,0 +1,178 @@ +"""Tests for the AverageCopier strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestAverageCopier(TestPlayer): + + name = "Average Copier" + player = axl.AverageCopier + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Tests that if opponent has played all C then player chooses C. + actions = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + actions = [(D, C)] + [(C, C)] * 9 + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) + + # Tests that if opponent has played all D then player chooses D. + actions = [(C, D)] + [(D, D)] * 9 + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + actions = [(D, D)] + [(D, D)] * 9 + self.versus_test(axl.Defector(), expected_actions=actions, seed=2) + + # Variable behaviour based on the history and stochastic + + actions = [ + (C, C), + (C, D), + (D, C), + (D, D), + (C, C), + (C, D), + (C, C), + (D, D), + (D, C), + (C, D), + ] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) + + actions = [ + (D, C), + (C, D), + (D, C), + (C, D), + (C, C), + (D, D), + (D, C), + (D, D), + (C, C), + (D, D), + ] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) + + opponent = axl.MockPlayer(actions=[C, C, D, D, D, D]) + actions = [ + (C, C), + (C, C), + (C, D), + (D, D), + (D, D), + (C, D), + (D, C), + (D, C), + (D, D), + (D, D), + ] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) + actions = [ + (D, C), + (C, C), + (C, C), + (C, D), + (D, D), + (C, D), + (C, C), + (D, C), + (D, C), + (D, D), + ] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestNiceAverageCopier(TestPlayer): + + name = "Nice Average Copier" + player = axl.NiceAverageCopier + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Tests that if opponent has played all C then player chooses C. + actions = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + + # Tests that if opponent has played all D then player chooses D. + actions = [(C, D)] + [(D, D)] * 9 + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + + # Variable behaviour based on the history and stochastic behaviour + actions = [ + (C, C), + (C, D), + (C, C), + (D, D), + (D, C), + (C, D), + (C, C), + (C, D), + (D, C), + (D, D), + ] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) + + actions = [ + (C, C), + (C, D), + (D, C), + (D, D), + (C, C), + (C, D), + (D, C), + (D, D), + (D, C), + (C, D), + ] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) + + opponent = axl.MockPlayer(actions=[C, C, D, D, D, D]) + actions = [ + (C, C), + (C, C), + (C, D), + (C, D), + (D, D), + (D, D), + (C, C), + (D, C), + (C, D), + (D, D), + ] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[C, C, C, D, D, D]) + actions = [ + (C, C), + (C, C), + (C, C), + (C, D), + (D, D), + (D, D), + (C, C), + (C, C), + (D, C), + (D, D), + ] + self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_axelrod_first.py b/axelrod/tests/strategies/test_axelrod_first.py new file mode 100644 index 000000000..1327757a3 --- /dev/null +++ b/axelrod/tests/strategies/test_axelrod_first.py @@ -0,0 +1,810 @@ +"""Tests for the First Axelrod strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer, test_four_vector + +C, D = axl.Action.C, axl.Action.D + + +class TestFirstByDavis(TestPlayer): + + name = "First by Davis: 10" + player = axl.FirstByDavis + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates for the first ten rounds + actions = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 10 + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D)] * 5 + self.versus_test(axl.Alternator(), expected_actions=actions) + + # If opponent defects at any point then the player will defect forever + # (after 10 rounds) + opponent = axl.MockPlayer(actions=[C] * 10 + [D]) + actions = [(C, C)] * 10 + [(C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C] * 15 + [D]) + actions = [(C, C)] * 15 + [(C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + +class TestFirstByDowning(TestPlayer): + + name = "First by Downing" + player = axl.FirstByDowning + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(D, C), (D, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, C]) + actions = [(D, D), (D, C), (D, C), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, D, C]) + actions = [(D, D), (D, D), (D, C), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D, C, C]) + actions = [(D, C), (D, C), (C, D), (D, D), (D, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) + actions = [(D, C), (D, C), (C, C), (D, C), (D, D), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + +class TestFirstByFeld(TestPlayer): + + name = "First by Feld: 1.0, 0.5, 200" + player = axl.FirstByFeld + expected_classifier = { + "memory_depth": 200, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_cooperation_probability(self): + # Test cooperation probabilities + p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.8, rounds_of_decay=100) + self.assertEqual(1.0, p1._cooperation_probability()) + p2 = axl.Cooperator() + match = axl.IpdMatch((p1, p2), turns=50) + match.play() + self.assertEqual(0.9, p1._cooperation_probability()) + match = axl.IpdMatch((p1, p2), turns=100) + match.play() + self.assertEqual(0.8, p1._cooperation_probability()) + + # Test cooperation probabilities, second set of params + p1 = self.player(start_coop_prob=1.0, end_coop_prob=0.5, rounds_of_decay=200) + self.assertEqual(1.0, p1._cooperation_probability()) + match = axl.IpdMatch((p1, p2), turns=100) + match.play() + self.assertEqual(0.75, p1._cooperation_probability()) + match = axl.IpdMatch((p1, p2), turns=200) + match.play() + self.assertEqual(0.5, p1._cooperation_probability()) + + def test_decay(self): + # Test beyond 200 rounds + for opponent in [axl.Cooperator(), axl.Defector()]: + player = self.player() + self.assertEqual(player._cooperation_probability(), player._start_coop_prob) + match = axl.IpdMatch((player, opponent), turns=201) + match.play() + self.assertEqual(player._cooperation_probability(), player._end_coop_prob) + + def test_strategy(self): + actions = [(C, C)] * 41 + [(D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + + actions = [(C, C)] * 16 + [(D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) + + actions = [(C, D)] + [(D, D)] * 20 + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestFirstByGraaskamp(TestPlayer): + + name = "First by Graaskamp: 0.05" + player = axl.FirstByGraaskamp + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test TfT in first 50 rounds followed by defection followed by 5 rounds + # of TfT + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": None, + } + + # Against alternator + actions = [(C, C)] + [(C, D), (D, C)] * 24 + [(C, D)] # 50 turns + actions += [(D, C)] # 51 turns + actions += [(C, D), (D, C)] * 2 + [(C, D)] # 56 turns + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs=expected_attrs + ) + + # Against defector + actions = [(C, D)] + [(D, D)] * 55 # 56 turns + self.versus_test( + axl.Defector(), expected_actions=actions, attrs=expected_attrs + ) + + # Against cooperator + actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs=expected_attrs + ) + + # Test recognition of random player + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": None, + } + actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 5 # 56 turns + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs=expected_attrs + ) + expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 68} + actions += [(C, C)] # 57 turns + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs=expected_attrs + ) + + expected_attrs = { + "opponent_is_random": True, + "next_random_defection_turn": None, + } + actions = [(C, C)] + [(C, D), (D, C)] * 24 + [(C, D)] # 50 turns + actions += [(D, C)] # 51 turns + actions += [(C, D), (D, C)] * 3 # 57 turns + actions += [(D, D)] + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs=expected_attrs + ) + actions += [(D, C), (D, D)] * 5 + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs=expected_attrs + ) + + # Test versus TfT + expected_attrs = { + "opponent_is_random": False, + "next_random_defection_turn": None, + } + actions = [(C, C)] * 50 + [(D, C)] # 51 turns + actions += [(C, D), (D, C)] * 3 # 56 turns + actions += [(C, D), (D, C)] * 50 + self.versus_test( + axl.TitForTat(), expected_actions=actions, seed=0, attrs=expected_attrs + ) + + # Test random defections + expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 78} + actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 16 + [(D, C)] + [(C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, seed=0, attrs=expected_attrs + ) + + expected_attrs = {"opponent_is_random": False, "next_random_defection_turn": 77} + actions = [(C, C)] * 50 + [(D, C)] + [(C, C)] * 12 + [(D, C)] + [(C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, seed=1, attrs=expected_attrs + ) + + +class TestFirstByGrofman(TestPlayer): + + name = "First by Grofman" + player = axl.FirstByGrofman + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 7 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D] * 8) + actions = [(C, D), (C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[D] * 8) + actions = [(C, D), (D, D), (C, D), (D, D), (C, D), (C, D), (C, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestFirstByJoss(TestPlayer): + + name = "First by Joss: 0.9" + player = axl.FirstByJoss + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 0.9, (C, D): 0, (D, C): 0.9, (D, D): 0} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + + actions = [(C, C), (D, C), (D, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) + + actions = [(C, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + + actions = [(C, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, seed=2) + + +class TestFirstByNydegger(TestPlayer): + + name = "First by Nydegger" + player = axl.FirstByNydegger + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_score_history(self): + """Tests many (but not all) possible combinations.""" + player = self.player() + score_map = player.score_map + score = player.score_history([C, C, C], [C, C, C], score_map) + self.assertEqual(score, 0) + score = player.score_history([D, C, C], [C, C, C], score_map) + self.assertEqual(score, 1) + score = player.score_history([C, C, C], [D, C, C], score_map) + self.assertEqual(score, 2) + score = player.score_history([D, D, C], [D, C, C], score_map) + self.assertEqual(score, 7) + score = player.score_history([C, D, C], [C, D, C], score_map) + self.assertEqual(score, 12) + score = player.score_history([D, C, D], [C, C, C], score_map) + self.assertEqual(score, 17) + score = player.score_history([D, D, D], [D, D, D], score_map) + self.assertEqual(score, 63) + + def test_strategy(self): + # Test TFT-type initial play + # Test trailing post-round 3 play + + actions = [(C, C)] * 9 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C]) + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + +class TestFirstByShubik(TestPlayer): + + name = "First by Shubik" + player = axl.FirstByShubik + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, C]) + actions = [(C, D), (D, C), (C, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C]) + actions = [(C, D), (D, C), (C, D), (D, C), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, D, D, C]) + actions = [(C, D), (D, C), (C, D), (D, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, D]) + actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (D, C), + (C, D), + (D, D), + (D, C), + (D, D), + (C, C), + ] + self.versus_test(opponent, expected_actions=actions) + + +class TestFirstByTullock(TestPlayer): + + name = "First by Tullock" + player = axl.FirstByTullock + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Cooperates for first ten rounds""" + actions = [(C, C), (C, D)] * 5 + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, D)] * 11 + [(D, D)] * 2 + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D] * 10 + [C]) + actions = [(C, D)] * 10 + [(C, C), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + # Test beyond 10 rounds + opponent = axl.MockPlayer(actions=[D] * 5 + [C] * 6) + actions = [(C, D)] * 5 + [(C, C)] * 6 + [(D, D)] * 4 + self.versus_test(opponent, expected_actions=actions, seed=20) + + opponent = axl.MockPlayer(actions=[D] * 5 + [C] * 6) + actions = [(C, D)] * 5 + [(C, C)] * 6 + [(C, D), (D, D), (D, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[C] * 9 + [D] * 2) + actions = [(C, C)] * 9 + [(C, D)] * 2 + [(C, C), (D, C), (D, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[C] * 9 + [D] * 2) + actions = [(C, C)] * 9 + [(C, D)] * 2 + [(D, C), (D, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestFirstByAnonymous(TestPlayer): + + name = "First by Anonymous" + player = axl.FirstByAnonymous + expected_classifier = { + "memory_depth": 0, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(D, C), (C, C), (C, C), (D, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + + actions = [(C, C), (C, C), (D, C), (C, C), (C, C), (D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=10) + + +class TestFirstBySteinAndRapoport(TestPlayer): + + name = "First by Stein and Rapoport: 0.05: (D, D)" + player = axl.FirstBySteinAndRapoport + expected_classifier = { + "memory_depth": float("inf"), + "long_run_time": False, + "stochastic": False, + "makes_use_of": {"length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_init(self): + player = self.player() + self.assertEqual(player.alpha, 0.05) + self.assertFalse(player.opponent_is_random) + + player = self.player(alpha=0.5) + self.assertEqual(player.alpha, 0.5) + self.assertFalse(player.opponent_is_random) + + def test_strategy(self): + # Our IpdPlayer (SteinAndRapoport) vs Cooperator + # After 15th round (pvalue < alpha) still plays TitForTat. + # Note it always defects on the last two rounds. + opponent = axl.Cooperator() + actions = [(C, C)] * 17 + [(D, C)] * 2 + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_is_random": False} + ) + + actions = actions[:-2] + [(C, C)] * 2 + self.versus_test( + opponent, + expected_actions=actions[:-2], + match_attributes={"length": -1}, + attrs={"opponent_is_random": False}, + ) + + # SteinAndRapoport vs Defector + # After 15th round (p-value < alpha) still plays TitForTat. + opponent = axl.Defector() + actions = [(C, D)] * 4 + [(D, D)] * 15 + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_is_random": False} + ) + + # SteinAndRapoport vs Alternator + # After 15th round (p-value > alpha) starts defecting. + opponent = axl.Alternator() + actions = [(C, C), (C, D), (C, C), (C, D)] + + # On 15th round carry out chi-square test. + actions += [(D, C), (C, D)] * 5 + [(D, C)] + + # Defect throughout. + actions += [(D, D), (D, C), (D, D), (D, C)] + + self.versus_test( + opponent, expected_actions=actions, attrs={"opponent_is_random": True} + ) + + # The test is carried out again every 15 rounds. + # If the strategy alternates for the first 12 rounds and then cooperates + # it is no longer recognised as random. + opponent = axl.MockPlayer([C, D] * 6 + [C] * 50) + + actions = [(C, C), (C, D), (C, C), (C, D)] + # On 15th round carry out chi-square test. + actions += [(D, C), (C, D)] * 4 + [(D, C), (C, C), (D, C)] + # Defect throughout and carry out chi-square test on round 30. + # Opponent is no longer recognised as random, revert to TFT. + actions += [(D, C)] * 14 + [(C, C)] + self.versus_test( + opponent, + expected_actions=actions, + match_attributes={"length": -1}, + attrs={"opponent_is_random": False}, + ) + + +class TestFirstByTidemanAndChieruzzi(TestPlayer): + + name = "First by Tideman and Chieruzzi: (D, D)" + player = axl.FirstByTidemanAndChieruzzi + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game", "length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperator Test + opponent = axl.Cooperator() + actions = [(C, C), (C, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + # Cooperator Test does noot defect if game length is unknown + opponent = axl.Cooperator() + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, + match_attributes={"length": float("inf")}) + + # Defector Test + opponent = axl.Defector() + actions = [(C, D), (D, D), (D, D), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + # Test increasing retaliation + opponent = axl.MockPlayer([D, C]) + actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + ] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "is_retaliating": True, + "retaliation_length": 4, + "retaliation_remaining": 3, + }, + ) + + opponent = axl.Cycler("DDCDD") + actions = [ + (C, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + ] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "current_score": 34, + "opponent_score": 19, + "last_fresh_start": 0, + "retaliation_length": 6, + "retaliation_remaining": 2, + }, + ) + + # When the length is given this strategy will not give a fresh start + opponent = axl.Cycler("DDCDD") + actions = [ + (C, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (C, D), + (C, D), + ] + self.versus_test( + opponent, expected_actions=actions, match_attributes={"length": 50} + ) + + # When the length is not given this strategy will give a fresh start. + opponent = axl.Cycler("DDCDD") + actions = [ + (C, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (C, D), + (C, D), + ] + self.versus_test( + opponent, + expected_actions=actions, + match_attributes={"length": float("inf")}, + ) + + # Check standard deviation conditions. + # The opponent is similar to the one above except the stddev condition + # is not met, therefore no fresh start will be given. + opponent = axl.Cycler("DDCDDDDCDDCDCCC") + actions = [ + (C, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, C), + (C, D), + (D, C), + (D, C), + (D, C), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, D), + ] + + self.versus_test( + opponent, expected_actions=actions, attrs={"last_fresh_start": 0} + ) + + # Check the fresh start condition + opponent = axl.TitForTat() + actions = [(C, C), (C, C), (D, C), (D, D)] + self.versus_test( + opponent, expected_actions=actions, attrs={"fresh_start": False} + ) + + # check the fresh start condition: least 20 rounds since the last ‘fresh start’ + opponent = axl.Cycler("CCCCD") + actions = [ + (C, C), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + (D, C), + (C, C), + (C, C), + (C, D), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (D, C), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + (D, C), + (C, C), + (D, C), + (D, D), + ] + self.versus_test( + opponent, + expected_actions=actions, + match_attributes={"length": 35}, + attrs={ + "current_score": 110, + "opponent_score": 75, + "last_fresh_start": 24, + "retaliation_length": 2, + "retaliation_remaining": 0, + }, + ) diff --git a/axelrod/tests/strategies/test_axelrod_second.py b/axelrod/tests/strategies/test_axelrod_second.py new file mode 100644 index 000000000..1b89e5b36 --- /dev/null +++ b/axelrod/tests/strategies/test_axelrod_second.py @@ -0,0 +1,2035 @@ +"""Tests for the Second Axelrod strategies.""" + +import random + +import axelrod as axl + +import numpy as np + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestChampion(TestPlayer): + name = "Second by Champion" + player = axl.SecondByChampion + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates for first 10 rounds + + actions = [(C, C), (C, D)] * 5 # Cooperate for ten rounds + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Mirror partner for next phase + actions += [(D, C), (C, D)] * 7 # Mirror opponent afterwards + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Cooperate unless the opponent defected, has defected at least 40% of + actions_1 = actions + [(D, C), (C, D), (C, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions_1, seed=0) + + actions_2 = actions + [(D, C), (C, D), (D, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions_2, seed=1) + + actions_3 = actions + [(D, C), (C, D), (C, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions_3, seed=2) + + +class TestEatherley(TestPlayer): + + name = "Second by Eatherley" + player = axl.SecondByEatherley + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test cooperate after opponent cooperates + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # If opponent only defects then probability of cooperating is 0. + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + # Stochastic response to defect + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=0) + actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) + + opponent = axl.MockPlayer(actions=[D, C, C, D]) + actions = [(C, D), (D, C), (C, C), (C, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=8) + opponent = axl.MockPlayer(actions=[D, C, C, D]) + actions = [(C, D), (D, C), (C, C), (C, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestTester(TestPlayer): + + name = "Second by Tester" + player = axl.SecondByTester + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Alternate after 3rd round if opponent only cooperates + actions = [(D, C)] + [(C, C), (C, C)] + [(D, C), (C, C)] * 4 + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"is_TFT": False} + ) + + # Cooperate after initial defection and become TfT + actions = [(D, C), (C, D), (C, C)] + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs={"is_TFT": True} + ) + + # Now play TfT + opponent = axl.MockPlayer(actions=[C, D, C, D, D, C]) + actions = [(D, C), (C, D), (C, C), (C, D), (D, D), (D, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, attrs={"is_TFT": True}) + + +class TestGladstein(TestPlayer): + + name = "Second by Gladstein" + player = axl.SecondByGladstein + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates and begins to play TFT when Alternator defects + actions = [(D, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs={"patsy": False} + ) + + # Cooperation ratio will always be less than 0.5 + actions = [(D, C), (C, C), (C, C), (D, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"patsy": True} + ) + + # Apologizes immediately and plays TFT + actions = [(D, D), (C, D), (D, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"patsy": False} + ) + + # Ratio is 1/3 when MockPlayer defected for the first time. + opponent = axl.MockPlayer(actions=[C, C, C, D, D]) + actions = [(D, C), (C, C), (C, C), (D, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) + + opponent = axl.AntiTitForTat() + actions = [(D, C), (C, C), (C, D), (C, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, attrs={"patsy": False}) + + +class TestTranquilizer(TestPlayer): + + name = "Second by Tranquilizer" + player = axl.SecondByTranquilizer + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + # test for initalised variables + + def test_init(self): + + player = axl.SecondByTranquilizer() + + self.assertEqual(player.num_turns_after_good_defection, 0) + self.assertEqual(player.opponent_consecutive_defections, 0) + self.assertEqual(player.one_turn_after_good_defection_ratio, 5) + self.assertEqual(player.two_turns_after_good_defection_ratio, 0) + self.assertEqual(player.one_turn_after_good_defection_ratio_count, 1) + self.assertEqual(player.two_turns_after_good_defection_ratio_count, 1) + + def test_strategy(self): + + opponent = axl.Bully() + actions = [(C, D), (D, D), (D, C), (C, C), (C, D), (D, D), (D, C), (C, C)] + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + + # Tests whether TitForTat is played given score is below 1.75 + + opponent = axl.Defector() + actions = [(C, D)] + [(D, D)] * 20 + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + + opponent = axl.MockPlayer([C] * 2 + [D] * 8 + [C] * 4) + actions = [(C, C), (C, C)] + [(C, D)] + [(D, D)] * 7 + [(D, C)] + [(C, C)] * 3 + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + + # If score is between 1.75 and 2.25, may cooperate or defect + + opponent = axl.MockPlayer(actions=[D] * 3 + [C] * 4 + [D] * 2) + actions = [(C, D)] + [(D, D)] * 2 + [(D, C)] + [(C, C)] * 3 + [(C, D)] + actions += [(C, D)] # <-- Random + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=0, attrs=expected_attrs + ) + + opponent = axl.MockPlayer(actions=[D] * 3 + [C] * 4 + [D] * 2) + actions = [(C, D)] + [(D, D)] * 2 + [(D, C)] + [(C, C)] * 3 + [(C, D)] + actions += [(D, D)] # <-- Random + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=17, attrs=expected_attrs + ) + + """If score is greater than 2.25 either cooperate or defect, + if turn number <= 5; cooperate""" + + opponent = axl.MockPlayer(actions=[C] * 5) + actions = [(C, C)] * 5 + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=1, attrs=expected_attrs + ) + + opponent = axl.MockPlayer(actions=[C] * 5) + actions = [(C, C)] * 4 + [(D, C)] + expected_attrs = { + "num_turns_after_good_defection": 1, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=89, attrs=expected_attrs + ) + + """ Given score per turn is greater than 2.25, + Tranquilizer will never defect twice in a row""" + + opponent = axl.MockPlayer(actions=[C] * 6) + actions = [(C, C)] * 4 + [(D, C), (C, C)] + expected_attrs = { + "num_turns_after_good_defection": 2, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 2, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=89, attrs=expected_attrs + ) + + # Tests cooperation after update_state + + opponent = axl.MockPlayer(actions=[C] * 5) + actions = [(C, C)] * 4 + [(D, C)] + [(C, C)] + expected_attrs = { + "num_turns_after_good_defection": 2, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 2, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=89, attrs=expected_attrs + ) + + # Ensures FD1 values are calculated + + opponent = axl.MockPlayer(actions=[C] * 6) + actions = [(C, C)] * 4 + [(D, C), (C, C)] + expected_attrs = { + "num_turns_after_good_defection": 2, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 2, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test( + opponent, expected_actions=actions, seed=89, attrs=expected_attrs + ) + + # Ensures FD2 values are calculated + + opponent = axl.MockPlayer(actions=[C] * 6) + actions = [(C, C)] * 4 + [(D, C)] + [(C, C)] * 2 + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 1.5, + "one_turn_after_good_defection_ratio_count": 2, + "two_turns_after_good_defection_ratio_count": 2, + } + self.versus_test( + opponent, expected_actions=actions, seed=89, attrs=expected_attrs + ) + + # Ensures scores are being counted + + opponent = axl.Defector() + actions = [(C, D)] + [(D, D)] * 19 + expected_attrs = { + "num_turns_after_good_defection": 0, + "one_turn_after_good_defection_ratio": 5, + "two_turns_after_good_defection_ratio": 0, + "one_turn_after_good_defection_ratio_count": 1, + "two_turns_after_good_defection_ratio_count": 1, + } + self.versus_test(opponent, expected_actions=actions, attrs=expected_attrs) + + +class TestGrofman(TestPlayer): + + name = "Second by Grofman" + player = axl.SecondByGrofman + expected_classifier = { + "memory_depth": 8, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperate for the first two rounds + actions = [(C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Cooperate for the first two rounds, then play tit for tat for 3-7 + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Demonstrate Grofman Logic + # Own previous move was C, opponent defected less than 3 times in last 8 + moregrofman_actions = [C] * 7 + [C] + opponent_actions = [C] * 6 + [D] * 2 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = list(zip(moregrofman_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + # Own previous move was C, opponent defected 3 or more times in last 8 + moregrofman_actions = ([C] * 3 + [D] * 3 + [C]) + [D] + opponent_actions = ([C] * 2 + [D] * 3 + [C] * 2) + [D] + opponent = axl.MockPlayer(actions=opponent_actions) + actions = list(zip(moregrofman_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + # Own previous move was D, opponent defected once or less in last 8 + moregrofman_actions = ([C] * 6 + [D]) + [C] + opponent_actions = ([C] * 5 + [D] * 1 + [C]) + [D] + opponent = axl.MockPlayer(actions=opponent_actions) + actions = list(zip(moregrofman_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + # Own previous move was D, opponent defected more than once in last 8 + moregrofman_actions = ([C] * 2 + [D] * 5) + [D] + opponent_actions = ([D] * 7) + [D] + opponent = axl.MockPlayer(actions=opponent_actions) + actions = list(zip(moregrofman_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + # Test to make sure logic matches Fortran (discrepancy found 8/23/2017) + opponent = axl.AntiTitForTat() + # Actions come from a match run by Axelrod Fortran using IpdPlayer('k86r') + actions = [ + (C, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + ] + self.versus_test(opponent, expected_actions=actions) + + # Test to match the Fortran implementation for 30 rounds + opponent = axl.AntiTitForTat() + actions = [ + (C, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + ] + self.versus_test(opponent, expected_actions=actions) + + # Test to match the Fortran implementation for 60 rounds + opponent = axl.AntiTitForTat() + actions = [ + (C, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + ] + self.versus_test(opponent, expected_actions=actions) + + +class TestKluepfel(TestPlayer): + name = "Second by Kluepfel" + player = axl.SecondByKluepfel + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 # Cooperate forever + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Since never two in a row, will respond in kind with 70% if + # coop and 60% otherwise, after first couple + actions = [ + (C, C), + (C, D), # Views first three as the same. + # A random gets used in each of the first two. + (D, C), + (D, D), + (C, C), + (C, D), + ] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=1) + + actions = [(C, C), (C, D), (C, C), (D, D), (D, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] + self.versus_test(axl.Alternator(), expected_actions=actions, seed=3) + + # Now we have to test the detect-random logic, which doesn't pick up + # until after 26 turns. So we need a big sample. + actions = [ + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, C), + (C, C), + (C, D), + (C, C), + (D, D), + (D, C), + (C, C), + (C, D), + (D, D), + (C, D), + (D, D), + (D, C), + (C, C), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, C), + (C, C), + (C, D), + # Success detect random opponent for remaining turns. + (D, D), + (D, D), + (D, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + (D, D), + (D, C), + (D, C), + (D, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, D), + (D, C), + (D, C), + (D, C), + (D, C), + (D, D), + ] + self.versus_test(axl.Random(0.5), expected_actions=actions, seed=10) + + +class TestBorufsen(TestPlayer): + name = "Second by Borufsen" + player = axl.SecondByBorufsen + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 # Cooperate forever + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Tries to cooperate every third time until detecting defective + actions = ( + [(C, D), (D, D), (D, D), (D, D)] * 6 + [(C, D), (D, D)] + [(D, D)] * 100 + ) + self.versus_test(axl.Defector(), expected_actions=actions) + + # Alternates with additional coop, every sixth turn + # Won't be labeled as random, since 2/3 of opponent's C follow + # player's C + # `flip_next_defect` will get set on the sixth turn, which changes the + # seventh action + # Note that the first two turns of each period of six aren't + # marked as echoes, and the third isn't marked that way until the + # fourth turn. + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] * 20 + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Basically does tit-for-tat against Win-Shift, Lose-Stay D + # After 26 turns, will detect random since half of opponent's C follow + # Cs + # Coming out of it, there will be new pattern. Then random is detected + # again. + actions = ( + [(C, D), (D, C), (C, C)] * 8 + + [(C, D), (D, C)] + + [(D, C)] * 25 + + [(D, C)] + + [(C, C), (C, D), (D, C)] * 8 + + [(D, C)] * 25 + ) + self.versus_test(axl.WinShiftLoseStay(D), expected_actions=actions) + + +class TestCave(TestPlayer): + name = "Second by Cave" + player = axl.SecondByCave + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # It will take until turn 18 to respond decide to repond D->D + actions = [(C, D)] + actions += [ + (C, D), + (D, D), + (D, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + ] # Randomly choose + actions += [(D, D)] * 30 # Defect + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + + # Highly-defective opponent + # It will take until turn 20 to respond decide to repond D to C + opponent_actions = [D] * 17 + [C, C, C, C] + almost_defector = axl.MockPlayer(actions=opponent_actions) + + actions = [(C, D)] + actions += [ + (C, D), + (D, D), + (D, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, C), + ] # Randomly choose + actions += [(C, C)] # Coop for a minute + actions += [(D, C), (D, C)] + self.versus_test(almost_defector, expected_actions=actions, seed=1) + + # Here it will take until turn 40 to detect random and defect + actions = [(C, C)] + actions += [ + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + ] # Randomly choose + actions += [ + (D, C), + (C, D), + (D, C), + ] # 17 D have come, so tit for tat for a while + actions += [(D, D), (D, C)] * 100 # Random finally detected + self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) + + +class TestWmAdams(TestPlayer): + name = "Second by WmAdams" + player = axl.SecondByWmAdams + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 # Cooperate forever + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Will ignore the first four defects + opponent_actions = [D] * 4 + [C] * 100 + defect_four = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 4 + [(C, C)] * 100 + self.versus_test(defect_four, expected_actions=actions) + + actions = [ + (C, D), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + ] + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + actions = [ + (C, D), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (C, D), + (D, D), + (D, D), + ] + self.versus_test(axl.Defector(), expected_actions=actions, seed=2) + + # After responding to the 11th D (counted as 10 D), just start cooperating + opponent_actions = [D] * 11 + [C] * 100 + changed_man = axl.MockPlayer(actions=opponent_actions) + actions = [ + (C, D), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (D, D), + (C, C), + ] + actions += [(C, C)] * 99 + self.versus_test(changed_man, expected_actions=actions, seed=1) + + +class TestGraaskampKatzen(TestPlayer): + name = "Second by GraaskampKatzen" + player = axl.SecondByGraaskampKatzen + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 # Cooperate forever + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # GK does not great against + opponent_actions = [C, D, D] * 100 + GK_Foil = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C), (C, D), (D, D)] + actions += [(D, C), (C, D), (D, D)] * 2 + actions += [(D, C)] + actions += [(D, D), (D, D), (D, C)] * 20 # Defect here on + self.versus_test(GK_Foil, expected_actions=actions) + + # Fail on second checkpoint + opponent_actions = [C] * 10 + [C, D, D] * 100 + Delayed_GK_Foil = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 10 + actions += [(C, C), (C, D), (D, D)] + actions += [(D, C), (C, D), (D, D)] * 2 + actions += [(D, C)] + actions += [(D, D), (D, D), (D, C)] * 20 # Defect here on + self.versus_test(Delayed_GK_Foil, expected_actions=actions) + + +class TestWeiner(TestPlayer): + name = "Second by Weiner" + player = axl.SecondByWeiner + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 # Cooperate forever + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, C)] + actions += [(C, D), (D, C)] # Tit-for-Tat + # Opponent's last move was a C with 1 D between + actions += [(C, D)] # Tit-for-Tat. Raise forgiveness flag. + actions += [(C, C)] # Tit-for-Tat. Use forgiveness flag. + # Opponent's last move was a C, but defect_padding counted as 0. + actions += [(C, D), (D, C)] # Tit-for-Tat + # Opponent's last move was a C with 1 D between + actions += [(C, D)] # Tit-for-Tat. Raise forgiveness flag. + actions += [(D, C)] # Tit-for-Tat. Try forgiveness flag. + # This time grudge=20, so the forgiveness flag doesn't work. + actions += [(C, D)] # Tit-for-Tat. + # This is the 5th opponent defect, won't be counted for 2 turns + actions += [(D, C)] # Tit-for-Tat. + actions += [(D, D), (D, C)] * 100 # Defect now on. + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Build an opponent that will cause a wasted flag. + opponent_actions = [C, D, C, C, C, C, D, D] + Flag_Waster_1 = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C), (C, D), (D, C)] + actions += [(C, C)] # Raise flag, like in Alternator + actions += [(C, C)] # Use flag, but don't change outcome + actions += [(C, C)] + actions += [(C, D)] # Don't raise flag + actions += [(D, D)] # Don't use flag + self.versus_test(Flag_Waster_1, expected_actions=actions) + + # Demonstrate that grudge is not incremented on wasted flag. + opponent_actions = [C, D, C, C, C, C, D, C, D, C] + Flag_Waster_2 = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C), (C, D), (D, C)] + actions += [(C, C)] # Raise flag, like in Alternator + actions += [(C, C)] # Use flag, but don't change outcome + actions += [(C, C), (C, D), (D, C)] + actions += [(C, D)] # Raise flag + actions += [(C, C)] # Use flag to change outcome + self.versus_test(Flag_Waster_2, expected_actions=actions) + + # Show grudge passing over time + opponent_actions = [C, D, C, D, C] + [C] * 11 + [C, D, C, D, C] + Time_Passer = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C), (C, D), (D, C)] + actions += [(C, D)] # Raise flag + actions += [(C, C)] # Use flag to change outcome + actions += [(C, C)] * 11 + actions += [(C, C), (C, D), (D, C)] + actions += [(C, D)] # Raise flag + actions += [(C, C)] # Use flag to change outcome + self.versus_test(Time_Passer, expected_actions=actions) + + +class TestHarrington(TestPlayer): + name = "Second by Harrington" + player = axl.SecondByHarrington + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Build an opponent that will cooperate the first 36 turns and + # defect on the 37th turn + opponent_actions = [C] * 36 + [D] + [C] * 100 + Defect37 = axl.MockPlayer(actions=opponent_actions) + # Activate the Fair-weather flag + actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 + self.versus_test( + Defect37, expected_actions=actions, attrs={"mode": "Fair-weather"} + ) + + # Defect on 37th turn to activate Fair-weather, then later defect to + # exit Fair-weather + opponent_actions = [C] * 36 + [D] + [C] * 100 + [D] + [C] * 4 + Defect37_big = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 + actions += [(C, D)] + # Immediately exit Fair-weather + actions += [(D, C), (C, C), (D, C), (C, C)] + self.versus_test( + Defect37_big, expected_actions=actions, seed=2, attrs={"mode": "Normal"} + ) + actions = [(C, C)] * 36 + [(D, D)] + [(C, C)] * 100 + actions += [(C, D)] + # Immediately exit Fair-weather + actions += [(D, C), (C, C), (C, C), (C, C)] + self.versus_test( + Defect37_big, expected_actions=actions, seed=1, attrs={"mode": "Normal"} + ) + + # Opponent defects on 1st turn + opponent_actions = [D] + [C] * 46 + Defect1 = axl.MockPlayer(actions=opponent_actions) + # Tit-for-Tat on the first, but no streaks, no Fair-weather flag. + actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, C)] + # Two cooperations scheduled after the 37-turn defection + actions += [(C, C)] * 2 + # TFT twice, then random number yields a DCC combo. + actions += [(C, C)] * 2 + actions += [(D, C), (C, C), (C, C)] + # Don't draw next random number until now. Again DCC. + actions += [(D, C), (C, C), (C, C)] + self.versus_test(Defect1, expected_actions=actions, seed=2) + + # Defection on turn 37 by opponent doesn't have an effect here + opponent_actions = [D] + [C] * 35 + [D] + [C] * 10 + Defect1_37 = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, D)] + actions += [(C, C)] * 2 + actions += [(C, C)] * 2 + actions += [(D, C), (C, C), (C, C)] + actions += [(D, C), (C, C), (C, C)] + self.versus_test(Defect1_37, expected_actions=actions, seed=2) + + # However a defect on turn 38 would be considered a burn. + opponent_actions = [D] + [C] * 36 + [D] + [C] * 9 + Defect1_38 = axl.MockPlayer(actions=opponent_actions) + # Tit-for-Tat on the first, but no streaks, no Fair-weather flag. + actions = [(C, D), (D, C)] + [(C, C)] * 34 + [(D, C)] + # Two cooperations scheduled after the 37-turn defection + actions += [(C, D), (C, C)] + # TFT from then on, since burned + actions += [(C, C)] * 8 + self.versus_test( + Defect1_38, expected_actions=actions, seed=2, attrs={"burned": True} + ) + + # Use alternator to test parity flags. + actions = [(C, C), (C, D)] + # Even streak is set to 2, one for the opponent's defect and one for + # our defect. + actions += [(D, C)] + actions += [(C, D)] + # Even streak is increased two more. + actions += [(D, C)] + actions += [(C, D)] + # Opponent's defect increments even streak to 5, so we cooperate. + actions += [(C, C)] + actions += [(C, D), (D, C), (C, D), (D, C), (C, D)] + # Another 5 streak + actions += [(C, C)] + # Repeat + actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] * 3 + # Repeat. Notice that the last turn is the 37th move, but we do not + # defect. + actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Test for parity limit shortening. + opponent_actions = [D, C] * 1000 + AsyncAlternator = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] * 6 + # Defect on 37th move + actions += [(D, D)] + actions += [(C, C)] + # This triggers the burned flag. We should just Tit-for-Tat from here. + actions += [(C, D)] + actions += [(D, C), (C, D), (D, C), (C, D), (C, C)] + # This is the seventh time we've hit the limit. So do it once more. + actions += [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] + # Now hit the limit sooner + actions += [(C, D), (D, C), (C, D), (C, C)] * 5 + self.versus_test( + AsyncAlternator, expected_actions=actions, attrs={"parity_limit": 3} + ) + + # Use a Defector to test the 20-defect streak + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + # Now the two parity flags are used + actions += [(C, D), (C, D)] + # Repeat + actions += [(D, D), (D, D), (D, D), (D, D), (C, D), (C, D)] * 2 + actions += [(D, D), (D, D)] + # 20 D have passed (first isn't record) + actions += [(D, D)] * 100 + # The defect streak will always be detected from here on, because it + # doesn't reset. This logic comes before parity streaks or the turn- + # based logic. + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"recorded_defects": 119}, + ) + + # Detect random + expected_actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, D), + (D, D), + (D, C), + (C, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, D), + (C, D), + (D, C), + (C, C), + ] + # Enter defect mode. + expected_actions += [(D, C)] + random.seed(10) + player = self.player() + match = axl.IpdMatch((player, axl.Random()), turns=len(expected_actions)) + # The history matrix will be [[0, 2], [5, 6], [3, 6], [4, 2]] + actions = match.play() + self.assertEqual(actions, expected_actions) + self.assertAlmostEqual( + player.calculate_chi_squared(len(expected_actions)), 2.395, places=3 + ) + + # Come back out of defect mode + opponent_actions = [ + D, + C, + D, + C, + D, + D, + D, + C, + D, + C, + C, + D, + D, + C, + D, + D, + C, + C, + D, + C, + D, + D, + C, + D, + D, + D, + D, + C, + C, + C, + ] + opponent_actions += [D] * 16 + Rand_Then_Def = axl.MockPlayer(actions=opponent_actions) + actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (C, D), + (D, D), + (D, C), + (C, D), + (D, C), + (C, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, C), + (C, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, D), + (C, D), + (D, C), + (C, C), + ] + actions += [(D, C)] + # Enter defect mode. + actions += [(D, D)] * 14 + # Mutual defect for a while, then exit Defect mode with two coops + actions += [(C, D)] * 2 + self.versus_test( + Rand_Then_Def, + expected_actions=actions, + seed=10, + attrs={"mode": "Normal", "was_defective": True}, + ) + + +class TestTidemanAndChieruzzi(TestPlayer): + name = "Second by Tideman and Chieruzzi" + player = axl.SecondByTidemanAndChieruzzi + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] + [(D, D)] * 8 + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"score_to_beat_inc": 5} + ) + + actions = [(C, D)] + [(D, D)] * 8 + # On tenth turn, try a fresh start + actions += [(C, D), (C, D)] + [(D, D)] * 2 + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"last_fresh_start": 11} + ) + + actions = [(C, C), (C, D)] + # Scores and score_to_beat variables are a turn behind + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={ + "current_score": 3, + "opponent_score": 3, + "score_to_beat": 0, + "score_to_beat_inc": 0, + }, + ) + actions += [(D, C), (C, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={ + "current_score": 8, + "opponent_score": 8, + "score_to_beat": 0, + "score_to_beat_inc": 5, + }, + ) + actions += [(D, C), (D, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={ + "current_score": 13, + "opponent_score": 13, + "score_to_beat": 5, + "score_to_beat_inc": 10, + }, + ) + actions += [(D, C), (D, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={ + "current_score": 19, + "opponent_score": 14, + "score_to_beat": 15, + "score_to_beat_inc": 15, + }, + ) + actions += [(D, C), (D, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={ + "current_score": 25, + "opponent_score": 15, + "score_to_beat": 30, + "score_to_beat_inc": 20, + }, + ) + + # Build an opponent who will cause us to consider a Fresh Start, but + # will fail the binomial test. + opponent_actions = [C] * 5 + [D] * 5 + C5D5_player = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 5 + [(C, D)] + [(D, D)] * 3 + actions += [(D, D)] # No Defection here means no Fresh Start. + self.versus_test(C5D5_player, expected_actions=actions) + + +class TestGetzler(TestPlayer): + name = "Second by Getzler" + player = axl.SecondByGetzler + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (D, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), + expected_actions=actions, + seed=1, + attrs={"flack": 15.0 / 16.0}, + ) + + actions = [(C, C), (C, D), (C, C), (C, D), (D, C), (C, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + seed=4, + attrs={"flack": 5.0 / 16.0}, + ) + + +class TestLeyvraz(TestPlayer): + name = "Second by Leyvraz" + player = axl.SecondByLeyvraz + expected_classifier = { + "memory_depth": 3, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + actions = [(C, D), (D, D), (D, D), (C, D)] + self.versus_test(axl.Defector(), expected_actions=actions, seed=2) + + actions = [ + (C, D), + (C, C), + (D, C), + (C, D), + (D, C), + (D, D), + (C, D), + (D, C), + (C, D), + ] + self.versus_test( + axl.SuspiciousTitForTat(), expected_actions=actions, seed=1 + ) + + actions = [(C, C), (C, D), (D, C)] + [(D, D), (C, C)] * 3 + self.versus_test(axl.Alternator(), expected_actions=actions, seed=2) + actions = [(C, C), (C, D), (C, C)] + [(D, D), (C, C)] * 3 + self.versus_test(axl.Alternator(), expected_actions=actions, seed=3) + + +class TestWhite(TestPlayer): + name = "Second by White" + player = axl.SecondByWhite + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 30 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 10 + [(D, D)] * 20 + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [ + (C, D), + (C, D), + (C, C), + (C, C), + (C, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + (C, D), + (D, C), + (C, D), + (D, D), + (D, C), + (C, D), + (D, D), + (D, C), + ] + self.versus_test(axl.Random(0.5), expected_actions=actions, seed=6) + actions = [ + (C, C), + (C, D), + (C, D), + (C, C), + (C, C), + (C, C), + (C, C), + (C, D), + (C, D), + (C, D), + (C, D), + (D, D), + (D, C), + (C, C), + (C, C), + (C, D), + (C, C), + (C, D), + (C, C), + (C, D), + ] + self.versus_test(axl.Random(0.5), expected_actions=actions, seed=12) + + +class TestBlack(TestPlayer): + name = "Second by Black" + player = axl.SecondByBlack + expected_classifier = { + "memory_depth": 5, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 30 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 5 + actions += [ + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + ] + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + + actions = [(C, D)] * 5 + actions += [ + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + ] + self.versus_test(axl.Defector(), expected_actions=actions, seed=15) + + +class TestRichardHufford(TestPlayer): + name = "Second by RichardHufford" + player = axl.SecondByRichardHufford + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 19 + [(D, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 14} + ) + + actions = [(C, C)] * 19 + [(D, C), (C, C)] + actions += [ + (C, C) + ] # This is the first Cooperation that gets counted on the new streak + actions += [(C, C)] * 13 + [(D, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"streak_needed": 11} + ) + + opponent_actions = [C] * 20 + [D] + BoredCooperator = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 19 + [(D, C), (C, D), (C, C)] + self.versus_test( + BoredCooperator, expected_actions=actions, attrs={"streak_needed": 31} + ) + + actions = [(C, D)] # "Disagreement" + actions += [(D, C)] # TFT. Disagreement + actions += [(C, C)] # TFT. + actions += [(C, D)] # TFT. Disagreement + actions += [(D, C)] # Three of last four are disagreements. + actions += [(C, C)] # TFT. Disagreement + actions += [(D, D)] # Three of last four are disagreements. Disagreement + actions += [(D, D)] # Three of last four are disagreements. + actions += [(D, D)] # Now there are 5/9 disagreements, so Defect. + self.versus_test( + axl.WinShiftLoseStay(), + expected_actions=actions, + attrs={"num_agreements": 5}, + ) + + +class TestYamachi(TestPlayer): + name = "Second by Yamachi" + player = axl.SecondByYamachi + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [ + (C, D) + ] * 2 # Also Cooperate in first two moves (until we update `count_them_us_them`.) + actions += [ + (C, D) + ] # them_three_ago defaults to C, so that (C, C, *) gets updated, then (D, C, *) get checked. + # It's actually impossible to Defect on the third move. + actions += [(D, D)] # (D, C, *) gets updated, then checked. + actions += [(C, D)] # (D, C, *) gets updated, but (D, D, *) checked. + actions += [(D, D)] * 30 # (D, D, *) gets updated and checked from here on. + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D)] + actions += [(C, C)] # Increment (C, C, C). Check (C, C, *). Cooperate. + # Reminder that first C is default value and last C is opponent's first move. + actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 0. Cooperate. + actions += [(C, C)] # Increment (D, C, C). Check (C, C, *) = 0. Cooperate. + # There is one Defection and one Cooperation in this scenario, + # but the Cooperation was due to a default value only. We can see where this is going. + actions += [(C, D)] # Increment (C, C, D). Check (D, C, *) = 1. Cooperate. + actions += [(D, C)] # Increment (D, C, C). Check (C, C, *) = -1. Defect. + actions += [ + (C, D) + ] # Increment (C, C, D). Check (D, D, *) = 0 (New). Cooperate. + actions += [(D, C)] # Increment (D, D, C). Check (C, C, *) < 0. Defect. + actions += [(C, D)] # Increment (C, C, D). Check (D, D, *) > 0. Cooperate. + actions += [(D, C), (C, D)] * 15 # This pattern continues for a while. + actions += [ + (D, C), + (D, D), + ] * 30 # Defect from turn 41 on, since near 50% Defections. + self.versus_test(axl.Alternator(), expected_actions=actions) + + # Rip-off is the most interesting interaction. + actions = [ + (C, D), + (C, C), + (C, D), + (D, C), + (C, C), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + ] + my_dict = { + (C, C, C): 1, + (C, C, D): 18, + (C, D, C): 1, + (C, D, D): 0, + (D, C, C): 1, + (D, C, D): 0, + (D, D, C): 17, + (D, D, D): 0, + } + RipoffPlayer = axl.Ripoff() + self.versus_test( + RipoffPlayer, + expected_actions=actions, + attrs={"count_them_us_them": my_dict}, + ) + self.assertEqual( + RipoffPlayer.defections, 19 + ) # Next turn, `portion_defect` = 0.4756 + + # The pattern (C, D), (D, C) will continue indefintely unless overriden. + actions += [(D, D)] # Next turn, `portion_defect` = 0.4881 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5114 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5222 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5326 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5426 + actions += [(D, D)] # Next turn, `portion_defect` = 0.5521 + actions += [ + (D, D), + (C, D), + (D, C), + (C, D), + ] # Takes a turn to fall back into the cycle. + self.versus_test(axl.Ripoff(), expected_actions=actions) + + +class TestColbert(TestPlayer): + name = "Second by Colbert" + player = axl.SecondByColbert + expected_classifier = { + "memory_depth": 4, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 30 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 5 + [(D, D)] + [(C, D)] * 2 + actions += [(D, D), (D, D), (C, D), (C, D)] * 20 + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent_actions = [C] * 8 + [C, C, D, C, C, C, C, C] + OddBall = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 5 + [(D, C)] + [(C, C)] * 4 + actions += [(C, D)] + [(D, C), (D, C), (C, C), (C, C)] + [(C, C)] + self.versus_test(OddBall, expected_actions=actions) + + +class TestMikkelson(TestPlayer): + name = "Second by Mikkelson" + player = axl.SecondByMikkelson + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 30 + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"credit": 8} + ) + + actions = [(C, D), (C, D), (C, D), (C, D)] + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"credit": 1} + ) + # Defect then reset to 4 + actions += [(D, D)] + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"credit": 4} + ) + # Repeat + actions += [(C, D), (D, D)] * 2 + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"credit": 4} + ) + # With ten turns passed, keep defecting now + actions += [(C, D), (D, D)] + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"credit": 0} + ) + # With ten turns passed, keep defecting now + actions += [(D, D)] * 30 + self.versus_test( + axl.Defector(), expected_actions=actions, attrs={"credit": -7} + ) + + actions = [(C, D), (C, D), (C, C)] + self.versus_test( + axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3} + ) + actions += [(C, D), (C, D)] + self.versus_test( + axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 2} + ) + actions += [(D, C)] + self.versus_test( + axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 4} + ) + actions += [(C, D)] + self.versus_test( + axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 5} + ) + actions += [(C, D)] + self.versus_test( + axl.Cycler("DDC"), expected_actions=actions, attrs={"credit": 3} + ) + + opponent_actions = [C] * 100 + [D] * 10 + Change_of_Heart = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 100 + [(C, D)] * 4 + self.versus_test(Change_of_Heart, expected_actions=actions, attrs={"credit": 2}) + Change_of_Heart = axl.MockPlayer(actions=opponent_actions) + actions += [(C, D)] * 2 + self.versus_test( + Change_of_Heart, expected_actions=actions, attrs={"credit": -2} + ) + # Still Cooperate, because Defect rate is low + +class TestRowsam(TestPlayer): + name = "Second by Rowsam" + player = axl.SecondByRowsam + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set("game"), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Should always cooperate with Cooperator + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Against a Defector should eventually enter Defect mode + actions = [(C, D)] * 5 + actions += [(D, D), (C, D), (D, D)] # Do a Coop-Def cycle + self.versus_test(axl.Defector(), expected_actions=actions, attrs={ + "distrust_points": 5}) + actions += [(C, D)] * 3 # Continue for now + actions += [(D, D)] * 100 # Now Defect mode + self.versus_test(axl.Defector(), expected_actions=actions, attrs={ + "distrust_points": 10, "mode": "Defect"}) + + # Test specific score scenarios + # 5 Defects + opponent_actions = [D] * 5 + [C] * 100 + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 5 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 5, "current_score": 0}) + + # 3 Defects + opponent_actions = [D] * 3 + [C] * 100 + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 3 + actions += [(C, C)] * 2 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 3, "current_score": 6}) + + # 2 Defects + opponent_actions = [D] * 2 + [C] * 100 + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 2 + actions += [(C, C)] * 3 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 2, "current_score": 9}) + + # 1 Defect + opponent_actions = [D] * 1 + [C] * 100 + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 1 + actions += [(C, C)] * 4 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 1, "current_score": 12}) + + # Test that some distrust_points wear off. + opponent_actions = [D] * 3 + [C] * 100 + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, D)] * 3 + actions += [(C, C)] * 2 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 3, "current_score": 6}) + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions += [(C, C), (D, C)] # Complete Coop-Def cycle + actions += [(C, C)] * 3 + actions += [(D, C)] + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 4, "current_score": 28}) + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions += [(C, C), (D, C)] # Complete Coop-Def cycle + actions += [(C, C)] * 4 # No defect or cycle this time. + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 3, "current_score": 50}) # One point wears off. + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions += [(C, C)] * 18 + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 2}) # Second point wears off + custom_opponent = axl.MockPlayer(actions=opponent_actions) + actions += [(C, C)] * 18 + self.versus_test(custom_opponent, expected_actions=actions, attrs={ + "distrust_points": 2}) # But no more + + +class TestAppold(TestPlayer): + name = "Second by Appold" + player = axl.SecondByAppold + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Should cooperate 100% of the time with the cooperator + actions = [(C, C)] * 100 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + opponent = axl.Defector() + # Cooperate always the first 4 turns + actions = [(C, D)] * 4 + # Should cooperate because we forgive the first_opp_def after the fourth + # turn. + actions += [(C, D)] + # Own move two turns ago is C, so D. + actions += [(D, D)] + # Then defect most of the time, depending on the random number. We + # don't defect 100% of the time, because of the way that initialize + # opp_c_after_x. + actions += [(D, D), + (C, D), + (D, D), + (D, D), # C can never be two moves after a C. + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (C, D), + (D, D), + (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=1, + attrs={"first_opp_def": True}) + + # An opponent who defects for a long time, then tries cooperating + opponent_actions = [C] * 30 + [D] + [C] * 10 + MostlyCooperates = axl.MockPlayer(actions=opponent_actions) + # Cooperate always at first + actions = [(C, C)] * 30 + # The opponent defects once + actions += [(C, D)] + # But we forgive it. + actions += [(C, C)] * 10 + self.versus_test(MostlyCooperates, expected_actions=actions) + + opponent = axl.CyclerDC() + # First three opponent actions get counted as reactions to C. Fourth + # action will get counted on next turn. + actions = [(C, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, + attrs={"opp_c_after_x": {C: 1, D: 1}, + "total_num_of_x": {C: 3, D: 1}}) + # Will cooperate 50% of the time + actions += [(C, D)] + self.versus_test(opponent, expected_actions=actions, + attrs={"opp_c_after_x": {C: 2, D: 1}, + "total_num_of_x": {C: 4, D: 1}, + "first_opp_def": False}, seed=100) + # Always cooperate, because we forgive the first defect + actions += [(C, C)] + self.versus_test(opponent, expected_actions=actions, + attrs={"first_opp_def": True}, seed=100) + + # Against a random opponent, will respond mostly randomly too. + actions = [(C, C), + (C, C), + (C, D), + (C, C), + (C, C), + (C, D), + (C, C), + (C, C), + (C, C), + (D, C), + (C, D), + (D, D), + (C, D), + (C, D), + (C, C), + (C, C), + (D, C), + (C, D), + (D, D), + (C, C), + (C, D), + (C, C), + (C, C), + (C, D), + (D, C), + (C, D), + (D, D), + (C, D), + (C, C), + (D, C)] + self.versus_test(axl.Random(0.5), expected_actions=actions, seed=7) + + diff --git a/axelrod/tests/strategies/test_backstabber.py b/axelrod/tests/strategies/test_backstabber.py new file mode 100644 index 000000000..1f580c9cf --- /dev/null +++ b/axelrod/tests/strategies/test_backstabber.py @@ -0,0 +1,171 @@ +"""Tests for BackStabber and DoubleCrosser.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestBackStabber(TestPlayer): + + name = "BackStabber: (D, D)" + player = axl.BackStabber + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_defects_after_four_defections(self): + # Forgives three defections + defector_actions = [(C, D), (C, D), (C, D), (C, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), + expected_actions=defector_actions, + match_attributes={"length": 200}, + ) + alternator_actions = [(C, C), (C, D)] * 4 + [(D, C), (D, D)] * 2 + self.versus_test( + axl.Alternator(), + expected_actions=alternator_actions, + match_attributes={"length": 200}, + ) + + def test_defects_on_last_two_rounds_by_match_len(self): + actions = [(C, C)] * 198 + [(D, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 200}, + ) + actions = [(C, C)] * 10 + [(D, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 12}, + ) + # Test that exceeds tournament length. + actions = [(C, C)] * 198 + [(D, C), (D, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 200}, + ) + # But only if the tournament is known. + actions = [(C, C)] * 202 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": -1}, + ) + + +class TestDoubleCrosser(TestBackStabber): + """ + Behaves like BackStabber except when its alternate strategy is triggered. + The alternate strategy is triggered when opponent did not defect in the + first 7 rounds, and 8 <= the current round <= 180. + """ + + name = "DoubleCrosser: (D, D)" + player = axl.DoubleCrosser + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_when_alt_strategy_is_triggered(self): + """ + The alternate strategy is if opponent's last two plays were defect, + then defect. Otherwise cooperate. + """ + starting_cooperation = [C] * 7 + starting_rounds = [(C, C)] * 7 + + opponent_actions = starting_cooperation + [D, D, C, D] + expected_actions = starting_rounds + [(C, D), (C, D), (D, C), (C, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) + + opponent_actions = starting_cooperation + [D, D, D, D, C, D] + expected_actions = starting_rounds + [ + (C, D), + (C, D), + (D, D), + (D, D), + (D, C), + (C, D), + ] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) + + def test_starting_defect_keeps_alt_strategy_from_triggering(self): + opponent_actions_suffix = [C, D, C, D, D] + 3 * [C] + expected_actions_suffix = [(C, C), (C, D), (C, C), (C, D), (C, D)] + 3 * [ + (D, C) + ] + + defects_on_first = [D] + [C] * 6 + defects_on_first_actions = [(C, D)] + [(C, C)] * 6 + actions = defects_on_first + opponent_actions_suffix + expected_actions = defects_on_first_actions + expected_actions_suffix + self.versus_test( + axl.MockPlayer(actions=actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) + + defects_in_middle = [C, C, C, D, C, C, C] + defects_in_middle_actions = [ + (C, C), + (C, C), + (C, C), + (C, D), + (C, C), + (C, C), + (C, C), + ] + actions = defects_in_middle + opponent_actions_suffix + expected_actions = defects_in_middle_actions + expected_actions_suffix + self.versus_test( + axl.MockPlayer(actions=actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) + + defects_on_last = [C] * 6 + [D] + defects_on_last_actions = [(C, C)] * 6 + [(C, D)] + actions = defects_on_last + opponent_actions_suffix + expected_actions = defects_on_last_actions + expected_actions_suffix + self.versus_test( + axl.MockPlayer(actions=actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) + + def test_alt_strategy_stops_after_round_180(self): + one_eighty_opponent_actions = [C] * 8 + [C, D] * 86 + one_eighty_expected_actions = [(C, C)] * 8 + [(C, C), (C, D)] * 86 + opponent_actions = one_eighty_opponent_actions + [C] * 6 + expected_actions = one_eighty_expected_actions + [(D, C)] * 6 + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + match_attributes={"length": 200}, + ) diff --git a/axelrod/tests/strategies/test_better_and_better.py b/axelrod/tests/strategies/test_better_and_better.py new file mode 100644 index 000000000..f07060975 --- /dev/null +++ b/axelrod/tests/strategies/test_better_and_better.py @@ -0,0 +1,94 @@ +"""Tests for the BetterAndBetter strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestBetterAndBetter(TestPlayer): + + name = "Better and Better" + player = axl.BetterAndBetter + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Tests that the strategy gives expected behaviour.""" + self.versus_test( + axl.Defector(), + expected_actions=[ + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + ], + seed=6, + ) + self.versus_test( + axl.Cooperator(), + expected_actions=[ + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + (D, C), + ], + seed=8, + ) + self.versus_test( + axl.Defector(), + expected_actions=[ + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + ], + seed=1514, + ) + actions = [] + for index in range(200): + if index in [ + 64, + 79, + 91, + 99, + 100, + 107, + 111, + 119, + 124, + 127, + 137, + 141, + 144, + 154, + 192, + 196, + ]: + actions.append((C, D)) + else: + actions.append((D, D)) + self.versus_test(axl.Defector(), expected_actions=actions, seed=8) diff --git a/axelrod/tests/strategies/test_bush_mosteller.py b/axelrod/tests/strategies/test_bush_mosteller.py new file mode 100644 index 000000000..2cc881a57 --- /dev/null +++ b/axelrod/tests/strategies/test_bush_mosteller.py @@ -0,0 +1,77 @@ +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestBushMostellar(TestPlayer): + + name = "Bush Mosteller: 0.5, 0.5, 3.0, 0.5" + player = axl.BushMosteller + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"_stimulus": 1}, + seed=1, + ) + + # Making sure probabilities changes following payoffs + actions = [(C, C), (D, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"_stimulus": 0.4, "_c_prob": 0.6, "_d_prob": 0.5}, + seed=1, + ) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={ + "_stimulus": -0.20000000000000004, + "_c_prob": 0.375, + "_d_prob": 0.45, + }, + seed=1, + ) + + # Testing that stimulus never goes under -1 + actions = [(C, C), (D, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"_stimulus": -1}, + init_kwargs={"aspiration_level_divider": 0.1}, + seed=1, + ) + + # Ensures that the player will never play C or D if his probability is equal to 0 + actions = [(C, C)] * 100 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + init_kwargs={"d_prob": 0.0}, + seed=1, + ) + + actions = [(D, C)] * 100 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + init_kwargs={"c_prob": 0.0}, + seed=1, + ) diff --git a/axelrod/tests/strategies/test_calculator.py b/axelrod/tests/strategies/test_calculator.py new file mode 100644 index 000000000..3b2dc66c8 --- /dev/null +++ b/axelrod/tests/strategies/test_calculator.py @@ -0,0 +1,166 @@ +"""Tests for Calculator strategy.""" + +import axelrod as axl +from axelrod._strategy_utils import detect_cycle + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestCalculator(TestPlayer): + + name = "Calculator" + player = axl.Calculator + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_twenty_rounds_joss_then_defects_for_cyclers(self): + """Uses axelrod.strategies.axelrod_first.Joss strategy for first 20 rounds""" + seed = 2 + flip_indices = [1, 3] + twenty_alternator_actions = [C, D] * 10 + twenty_test_actions = get_joss_strategy_actions( + twenty_alternator_actions, flip_indices + ) + + expected_actions = twenty_test_actions + [(D, C), (D, D), (D, C), (D, D)] + self.versus_test( + axl.Alternator(), expected_actions=twenty_test_actions, seed=seed + ) + self.versus_test( + axl.Alternator(), expected_actions=expected_actions, seed=seed + ) + + def test_twenty_rounds_joss_then_tit_for_tat_for_non_cyclers(self): + """Uses axelrod.strategies.axelrod_first.Joss strategy for first 20 rounds""" + seed = 2 + flip_indices = [1, 2] + + twenty_non_cyclical_actions = [ + C, + C, + D, + C, + C, + D, + C, + C, + C, + D, + C, + C, + C, + C, + D, + C, + C, + C, + C, + C, + ] + twenty_test_actions = get_joss_strategy_actions( + twenty_non_cyclical_actions, flip_indices + ) + + subsequent_opponent_actions = [D, C, D, C, D, C, D, C] + subsequent_test_actions = [ + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + (C, D), + (D, C), + ] + + opponent_actions = twenty_non_cyclical_actions + subsequent_opponent_actions + test_actions = twenty_test_actions + subsequent_test_actions + self.versus_test( + axl.MockPlayer(actions=twenty_non_cyclical_actions), + expected_actions=twenty_test_actions, + seed=seed, + ) + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=test_actions, + seed=seed, + ) + + def test_edge_case_calculator_sees_cycles_of_size_ten(self): + seed = 3 + ten_length_cycle = [C, D, C, C, D, C, C, C, D, C] + self.assertEqual(detect_cycle((ten_length_cycle * 2)), tuple(ten_length_cycle)) + + ten_cycle_twenty_rounds = get_joss_strategy_actions( + ten_length_cycle * 2, indices_to_flip=[16] + ) + opponent_actions = ten_length_cycle * 2 + [C, D, C] + expected = ten_cycle_twenty_rounds + [(D, C), (D, D), (D, C)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected, + seed=seed, + ) + + def test_edge_case_calculator_ignores_cycles_gt_len_ten(self): + seed = 3 + eleven_length_cycle = [D, D, C, C, D, C, C, C, D, C, D] + twenty_rounds_of_eleven_len_cycle = ( + eleven_length_cycle + eleven_length_cycle[:9] + ) + twenty_rounds = get_joss_strategy_actions( + twenty_rounds_of_eleven_len_cycle, indices_to_flip=[19] + ) + + opponent_actions = twenty_rounds_of_eleven_len_cycle[:-1] + [D] + [C, D] + self.assertEqual(detect_cycle(opponent_actions), tuple(eleven_length_cycle)) + + uses_tit_for_tat_after_twenty_rounds = twenty_rounds + [(D, C), (C, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=uses_tit_for_tat_after_twenty_rounds, + seed=seed, + ) + + def test_get_joss_strategy_actions(self): + opponent = [C, D, D, C, C] + + flip_never_occurs_at_index_zero = [0] + flip_indices = [1, 2] + + without_flip = [(C, C), (C, D), (D, D), (D, C), (C, C)] + with_flip = [(C, C), (D, D), (C, D), (D, C), (C, C)] + + self.assertEqual(get_joss_strategy_actions(opponent, []), without_flip) + self.assertEqual( + get_joss_strategy_actions(opponent, flip_never_occurs_at_index_zero), + without_flip, + ) + self.assertEqual(get_joss_strategy_actions(opponent, flip_indices), with_flip) + + +def get_joss_strategy_actions(opponent_moves: list, indices_to_flip: list) -> list: + """ + Takes a list of opponent moves and returns a tuple list of [(Joss moves, opponent moves)]. + "indices_to_flip" are the indices where Joss differs from it's expected TitForTat. + Joss is from axelrod.strategies.axelrod_first. + """ + out = [] + for index, action in enumerate(opponent_moves): + previous_action = opponent_moves[index - 1] + if index == 0: + out.append((C, action)) + elif index in indices_to_flip: + out.append((previous_action.flip(), action)) + else: + out.append((previous_action, action)) + return out diff --git a/axelrod/tests/strategies/test_cooperator.py b/axelrod/tests/strategies/test_cooperator.py new file mode 100644 index 000000000..aca2290ae --- /dev/null +++ b/axelrod/tests/strategies/test_cooperator.py @@ -0,0 +1,79 @@ +"""Tests for the Cooperator strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestCooperator(TestPlayer): + + name = "Cooperator" + player = axl.Cooperator + expected_classifier = { + "memory_depth": 0, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates always. + actions = [(C, C)] + [(C, D), (C, C)] * 9 + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestTrickyCooperator(TestPlayer): + + name = "Tricky Cooperator" + player = axl.TrickyCooperator + expected_classifier = { + "memory_depth": 10, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test if it tries to trick opponent. + self.versus_test(axl.Cooperator(), [(C, C), (C, C), (C, C), (D, C), (D, C)]) + + opponent_actions = [C, C, C, C, D, D] + expected_actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + ) + + opponent_actions = [C, C, C, C] + [D, D] + [C] * 10 + expected_actions = ( + [(C, C), (C, C), (C, C), (D, C)] + [(D, D), (C, D)] + [(C, C)] * 10 + ) + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + ) + + def test_cooperates_in_first_three_rounds(self): + against_defector = [(C, D)] * 3 + against_cooperator = [(C, C)] * 3 + against_alternator = [(C, C), (C, D), (C, C)] + self.versus_test(axl.Defector(), expected_actions=against_defector) + self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) + self.versus_test(axl.Alternator(), expected_actions=against_alternator) + + def test_defects_after_three_rounds_if_opponent_only_cooperated_in_max_history_depth_ten( + self + ): + against_cooperator = [(C, C)] * 3 + [(D, C)] * 20 + self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) + + def test_defects_when_opponent_has_no_defections_to_history_depth_ten(self): + opponent_actions = [D] + [C] * 10 + [D, C] + expected_actions = [(C, D)] + [(C, C)] * 10 + [(D, D), (C, C)] + self.versus_test(axl.MockPlayer(actions=opponent_actions), expected_actions) diff --git a/axelrod/tests/strategies/test_cycler.py b/axelrod/tests/strategies/test_cycler.py new file mode 100644 index 000000000..83de4f135 --- /dev/null +++ b/axelrod/tests/strategies/test_cycler.py @@ -0,0 +1,237 @@ +"""Tests for the Cycler strategies.""" +import unittest +import itertools +import random + +import axelrod as axl +from axelrod._strategy_utils import detect_cycle +from axelrod.action import Action, str_to_actions +from axelrod.evolvable_player import InsufficientParametersError + +from .test_player import TestPlayer +from .test_evolvable_player import PartialClass, TestEvolvablePlayer + +C, D = Action.C, Action.D + + +class TestAntiCycler(TestPlayer): + + name = "AntiCycler" + player = axl.AntiCycler + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_has_no_cycles(self): + test_range = 100 + player = axl.AntiCycler() + for _ in range(test_range): + player.play(axl.Cooperator()) + + contains_no_cycles = player.history + for slice_at in range(1, len(contains_no_cycles) + 1): + self.assertIsNone(detect_cycle(contains_no_cycles[:slice_at])) + + def test_strategy(self): + """Rounds are CDD CD CCD CCCD CCCCD ...""" + anticycler_rounds = [ + C, + D, + D, + C, + D, + C, + C, + D, + C, + C, + C, + D, + C, + C, + C, + C, + D, + C, + C, + C, + C, + C, + D, + ] + num_elements = len(anticycler_rounds) + against_defector = list(zip(anticycler_rounds, [D] * num_elements)) + against_cooperator = list(zip(anticycler_rounds, [C] * num_elements)) + + self.versus_test(axl.Defector(), expected_actions=against_defector) + self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) + + +class TestBasicCycler(TestPlayer): + name = "Cycler: CCD" + player = axl.Cycler + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_memory_depth_is_len_cycle_minus_one(self): + len_ten = "DCDCDDCDCD" + len_five = "DCDDC" + depth_nine = axl.Cycler(cycle=len_ten) + depth_four = axl.Cycler(cycle=len_five) + self.assertEqual(axl.Classifiers["memory_depth"](depth_nine), 9) + self.assertEqual(axl.Classifiers["memory_depth"](depth_four), 4) + + def test_cycler_works_as_expected(self): + expected = [(C, D), (D, D), (D, D), (C, D)] * 2 + self.versus_test( + axl.Defector(), expected_actions=expected, init_kwargs={"cycle": "CDDC"} + ) + + def test_cycle_raises_value_error_on_bad_cycle_str(self): + self.assertRaises(ValueError, axl.Cycler, cycle="CdDC") + + +def test_cycler_factory(cycle_str): + class TestCyclerChild(TestPlayer): + + name = "Cycler %s" % cycle_str + player = getattr(axl, "Cycler%s" % cycle_str) + expected_classifier = { + "memory_depth": len(cycle_str) - 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Starts by cooperating""" + match_len = 20 + actions_generator = _get_actions_cycle_against_cooperator(cycle_str) + test_actions = [next(actions_generator) for _ in range(match_len)] + self.versus_test(axl.Cooperator(), expected_actions=test_actions) + + return TestCyclerChild + + +def _get_actions_cycle_against_cooperator(cycle_string: str): + """Converts str like 'CCDC' to an itertools.cycle against Cooperator. The + above example returns: itertools.cycle([(C, C), (C, C), (D, C), (C, C)])""" + cooperator_opponent_action = C + action_iterator = str_to_actions(cycle_string) + out = [(action, cooperator_opponent_action) for action in action_iterator] + return itertools.cycle(out) + + +TestCyclerDC = test_cycler_factory("DC") +TestCyclerCCD = test_cycler_factory("CCD") +TestCyclerDDC = test_cycler_factory("DDC") +TestCyclerCCCD = test_cycler_factory("CCCD") +TestCyclerCCCCCD = test_cycler_factory("CCCCCD") +TestCyclerCCCDCD = test_cycler_factory("CCCDCD") + + +class TestEvolvableCycler(unittest.TestCase): + + player_class = axl.EvolvableCycler + + def test_normalized_parameters(self): + # Must specify at least one of cycle or cycle_length + self.assertRaises( + InsufficientParametersError, self.player_class._normalize_parameters + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + cycle="", + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + cycle_length=0, + ) + + cycle = "C" * random.randint(0, 20) + "D" * random.randint(0, 20) + self.assertEqual( + self.player_class._normalize_parameters(cycle=cycle), (cycle, len(cycle)) + ) + + cycle_length = random.randint(1, 20) + random_cycle, cycle_length2 = self.player_class._normalize_parameters( + cycle_length=cycle_length + ) + self.assertEqual(len(random_cycle), cycle_length) + self.assertEqual(cycle_length, cycle_length2) + + def test_crossover_even_length(self): + cycle1 = "C" * 6 + cycle2 = "D" * 6 + cross_cycle = "CDDDDD" + + player1 = self.player_class(cycle=cycle1) + player2 = self.player_class(cycle=cycle2) + axl.seed(3) + crossed = player1.crossover(player2) + self.assertEqual(cross_cycle, crossed.cycle) + + def test_crossover_odd_length(self): + cycle1 = "C" * 7 + cycle2 = "D" * 7 + cross_cycle = "CDDDDDD" + + player1 = self.player_class(cycle=cycle1) + player2 = self.player_class(cycle=cycle2) + axl.seed(3) + crossed = player1.crossover(player2) + self.assertEqual(cross_cycle, crossed.cycle) + + +class TestEvolvableCycler2(TestEvolvablePlayer): + name = "EvolvableCycler" + player_class = axl.EvolvableCycler + parent_class = axl.Cycler + parent_kwargs = ["cycle"] + init_parameters = {"cycle_length": 100} + + +class TestEvolvableCycler3(TestEvolvablePlayer): + name = "EvolvableCycler" + player_class = axl.EvolvableCycler + parent_class = axl.Cycler + parent_kwargs = ["cycle"] + init_parameters = { + "cycle": "".join(random.choice(("C", "D")) for _ in range(50)), + "mutation_potency": 10, + } + + +# Substitute EvolvedCycler as a regular Cycler. +EvolvableCyclerWithDefault = PartialClass(axl.EvolvableCycler, cycle="CCD") + + +class EvolvableCyclerAsCycler(TestBasicCycler): + player = EvolvableCyclerWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_darwin.py b/axelrod/tests/strategies/test_darwin.py new file mode 100644 index 000000000..8aa0b6391 --- /dev/null +++ b/axelrod/tests/strategies/test_darwin.py @@ -0,0 +1,105 @@ +"""Tests for the Darwin PD strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestDarwin(TestPlayer): + + name = "Darwin" + player = axl.Darwin + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, + "manipulates_source": False, + "manipulates_state": True, + } + + @classmethod + def tearDownClass(cls): + """After all tests have run, makes sure the Darwin genome is reset.""" + cls.player.reset_genome() + super(TestDarwin, cls).tearDownClass() + + def setUp(self): + """Each test starts with a fresh genome.""" + self.player.reset_genome() + super(TestDarwin, self).setUp() + + def test_setup(self): + player = self.player() + self.assertEqual(player.genome, [C]) + self.assertEqual(player.history, []) + + def test_foil_strategy_inspection(self): + self.assertEqual(self.player().foil_strategy_inspection(), C) + + def test_strategy(self): + p1 = self.player() + p1.reset() + + self.versus_test( + axl.Cooperator(), + expected_actions=[(C, C)] * 5, + attrs={"genome": [C] * 5}, + ) + + expected_genome = [D] * 4 + [C] + self.versus_test( + axl.Defector(), + expected_actions=[(C, D)] * 5, + attrs={"genome": expected_genome}, + ) + + # uses genome + expected_actions = [(C, C)] + [(D, C)] * 3 + [(C, C)] * 2 + self.versus_test(axl.Cooperator(), expected_actions) + + def test_against_geller_and_mindreader(self): + self.versus_test( + axl.GellerCooperator(), + expected_actions=[(C, C)] * 2, + attrs={"genome": [C, C]}, + ) + + self.versus_test( + axl.MindReader(), + expected_actions=[(C, D)] * 2, + attrs={"genome": [D, C]}, + ) + + def test_reset_history_and_attributes(self): + # Overwrite this method because Darwin does not reset + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + + p1 = self.player() + self.assertEqual(p1.genome, [D, C, C, C, D]) + p1.reset() + self.assertEqual(len(p1.history), 0) + self.assertEqual(p1.genome, [C, C, C, C, D]) + + def test_all_darwin_instances_share_one_genome(self): + p1 = self.player() + p2 = self.player() + self.assertIs(p1.genome, p2.genome) + + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + + self.assertEqual(p2.genome, [D, C, C, C, D]) + self.assertIs(p1.genome, p2.genome) + p3 = self.player() + self.assertIs(p3.genome, p2.genome) + + def test_reset_genome(self): + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4) + self.player.reset_genome() + self.assertEqual(self.player().genome, [C]) + + def equality_of_players_test(self, p1, p2, seed, opponent): + return True diff --git a/axelrod/tests/strategies/test_dbs.py b/axelrod/tests/strategies/test_dbs.py new file mode 100644 index 000000000..93d61e41a --- /dev/null +++ b/axelrod/tests/strategies/test_dbs.py @@ -0,0 +1,283 @@ +"""Tests DBS strategy.""" + +import unittest + +import axelrod as axl +from axelrod.strategies import dbs + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestNode(unittest.TestCase): + """Test for the base Node class.""" + + node = dbs.Node() + + def test_get_siblings(self): + with self.assertRaises(NotImplementedError): + self.node.get_siblings() + + def test_is_stochastic(self): + with self.assertRaises(NotImplementedError): + self.node.is_stochastic() + + +class TestTreeSearch(unittest.TestCase): + """ + A set of tests for the tree-search functions. We test the answers of both + minimax_tree_search and move_gen functions, against a set of classic + policies (the answer being the best move to play for the next turn, + considering an incoming position (C, C), (C, D), (D, C) or (D, D)). + For each policy, we test the answer for all incoming position. + """ + + def setUp(self): + """Initialization for tests.""" + # For each test, we check the answer against each possible + # inputs, that are in self.input_pos. + self.input_pos = [(C, C), (C, D), (D, C), (D, D)] + # We define the policies against which we are going to test. + self.cooperator_policy = dbs.create_policy(1, 1, 1, 1) + self.defector_policy = dbs.create_policy(0, 0, 0, 0) + self.titForTat_policy = dbs.create_policy(1, 1, 0, 0) + self.alternator_policy = dbs.create_policy(0, 1, 0, 1) + self.grudger_policy = dbs.create_policy(1, 0, 0, 0) + self.random_policy = dbs.create_policy(0.5, 0.5, 0.5, 0.5) + + def test_minimaxTreeSearch_cooperator(self): + """ + Tests the minimax_tree_search function when playing against a + Cooperator player. Output == 0 means Cooperate, 1 means Defect. + The best (hence expected) answer to Cooperator is to defect + whatever the input position is. + """ + expected_output = [1, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.cooperator_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_cooperator(self): + """ + Tests the move_gen function when playing against a Cooperator player. + """ + expected_output = [D, D, D, D] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.cooperator_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + def test_minimaxTreeSearch_defector(self): + """ + Tests the minimax_tree_search function when playing against a + Defector player. The best answer to Defector is to always defect + """ + expected_output = [1, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.defector_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_defector(self): + """ + Tests the move_gen function when playing against a Defector player. + """ + expected_output = [D, D, D, D] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.defector_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + def test_minimaxTreeSearch_titForTat(self): + """ + Tests the minimax_tree_search function when playing against a + TitForTat player. The best (hence expected) answer to TitFOrTat is to + cooperate whatever the input position is. + """ + expected_output = [0, 0, 0, 0] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.titForTat_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_last_node_titForTat(self): + """ + Test that against TitForTat, for the last move, i.e. if tree depth is 1, + the algorithms defects for all input. + """ + expected_output = [1, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.titForTat_policy, max_depth=1 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_titForTat(self): + """ + Tests the move_gen function when playing against a TitForTat player. + """ + expected_output = [C, C, C, C] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.titForTat_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + def test_minimaxTreeSearch_alternator(self): + """ + Tests the minimax_tree_search function when playing against an + Alternator player. The best answer to Alternator is to always defect. + """ + expected_output = [1, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.alternator_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_alternator(self): + """ + Tests the move_gen function when playing against an Alternator player. + """ + expected_output = [D, D, D, D] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + def test_minimaxTreeSearch_random(self): + """ + Tests the minimax_tree_search function when playing against a Random + player. The best answer to Random is to always defect. + """ + expected_output = [1, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.random_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_random(self): + """ + Tests the move_gen function when playing against a Random player. + """ + expected_output = [D, D, D, D] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.random_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + def test_minimaxTreeSearch_grudger(self): + """ + Tests the minimax_tree_search function when playing against a + Grudger player. The best answer to Grudger is to cooperate if both + cooperated at last round, else it's to defect. + """ + expected_output = [0, 1, 1, 1] + for inp, out in zip(self.input_pos, expected_output): + begin_node = dbs.DeterministicNode(inp[0], inp[1], depth=0) + values = dbs.minimax_tree_search( + begin_node, self.grudger_policy, max_depth=5 + ) + self.assertEqual(values.index(max(values)), out) + + def test_move_gen_grudger(self): + """ + Tests the move_gen function when playing against a Grudger player. + """ + expected_output = [C, D, D, D] + for inp, out in zip(self.input_pos, expected_output): + out_move = dbs.move_gen(inp, self.grudger_policy, depth_search_tree=5) + self.assertEqual(out_move, out) + + +class TestDBS(TestPlayer): + name = "DBS: 0.75, 3, 4, 3, 5" + player = axl.DBS + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": True, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + default_init_kwargs = { + "discount_factor": 0.75, + "promotion_threshold": 3, + "violation_threshold": 4, + "reject_threshold": 4, + "tree_depth": 5, + } + + # Test that DBS always cooperate against Cooperator. + actions = [(C, C)] * 7 + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs=default_init_kwargs, + ) + + # Test if it correctly learns Alternator strategy. + actions = [(C, C), (C, D)] * 3 + [(D, C), (C, D)] * 3 + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs=default_init_kwargs, + ) + + # Check that algorithms take into account a change in opponent's + # strategy. + mock_actions = [C, C, C, D, D, D, D, D, D, D] + exp_actions = [(C, C)] * 3 + [(C, D)] * 4 + [(D, D)] * 3 + self.versus_test( + opponent=axl.MockPlayer(actions=mock_actions), + expected_actions=exp_actions, + init_kwargs=default_init_kwargs, + ) + + # Check that adaptation is faster if diminishing promotion_threshold. + init_kwargs_2 = { + "discount_factor": 0.75, + "promotion_threshold": 2, + "violation_threshold": 4, + "reject_threshold": 4, + "tree_depth": 5, + } + mock_actions = [C, C, C, D, D, D, D, D, D, D] + exp_actions = [(C, C)] * 3 + [(C, D)] * 3 + [(D, D)] * 4 + self.versus_test( + opponent=axl.MockPlayer(actions=mock_actions), + expected_actions=exp_actions, + init_kwargs=init_kwargs_2, + ) + + # Check that ShouldDemote mechanism works. + # We play against Alternator for 12 turns to make the + # algorithm learn Alternator's strategy, then at turn 13 we + # change opponent to Defector, hence triggering ShouldDemote + # mechanism. For this test we use violation_threshold=3 + init_kwargs_3 = { + "discount_factor": 0.75, + "promotion_threshold": 3, + "violation_threshold": 3, + "reject_threshold": 3, + "tree_depth": 5, + } + exp_actions = [(C, C), (C, D)] * 3 + [(D, C), (C, D)] * 3 + exp_actions += [(D, D), (C, D)] * 3 + [(D, D)] + mock_actions = [C, D, C, D, C, D, C, D, C, D, C, D, D, D, D, D, D, D, D] + self.versus_test( + opponent=axl.MockPlayer(actions=mock_actions), + expected_actions=exp_actions, + init_kwargs=init_kwargs_3, + ) diff --git a/axelrod/tests/strategies/test_defector.py b/axelrod/tests/strategies/test_defector.py new file mode 100644 index 000000000..d9dffa48d --- /dev/null +++ b/axelrod/tests/strategies/test_defector.py @@ -0,0 +1,63 @@ +"""Tests for the Defector strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestDefector(TestPlayer): + + name = "Defector" + player = axl.Defector + expected_classifier = { + "memory_depth": 0, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_state": False, + "manipulates_source": False, + } + + def test_strategy(self): + # Test that always defects. + actions = [(D, C)] + [(D, D), (D, C)] * 9 + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestTrickyDefector(TestPlayer): + + name = "Tricky Defector" + player = axl.TrickyDefector + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_cooperates_if_opponent_history_has_C_and_last_three_are_D(self): + opponent_actions = [D, C] + [D] * 5 + actions = [(D, D), (D, C)] + [(D, D)] * 3 + [(C, D)] * 2 + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions=actions + ) + + def test_defects_if_opponent_never_cooperated(self): + opponent_actions = [D] * 7 + actions = [(D, D)] * 7 + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions=actions + ) + + def test_defects_if_opponent_last_three_are_not_D(self): + opponent_actions = [C] + [D] * 3 + [C, D] + actions = [(D, C)] + [(D, D)] * 3 + [(C, C), (D, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions=actions + ) diff --git a/axelrod/tests/strategies/test_doubler.py b/axelrod/tests/strategies/test_doubler.py new file mode 100644 index 000000000..e3d436302 --- /dev/null +++ b/axelrod/tests/strategies/test_doubler.py @@ -0,0 +1,49 @@ +"""Tests for the Doubler strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestDoubler(TestPlayer): + + name = "Doubler" + player = axl.Doubler + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_defects_if_opponent_last_play_is_D_and_defections_gt_two_times_cooperations( + self + ): + opponent_plays = [C] * 7 + [D] * 4 + [C] + actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, C)] + self.versus_test( + axl.MockPlayer(actions=opponent_plays), expected_actions=actions + ) + + def test_defects_if_opponent_last_play_D_and_defections_equal_two_times_cooperations( + self + ): + opponent_plays = [C] * 8 + [D] * 4 + [C] + actions = [(C, C)] * 8 + [(C, D)] * 4 + [(D, C)] + self.versus_test( + axl.MockPlayer(actions=opponent_plays), expected_actions=actions + ) + + def test_cooperates_if_opponent_last_play_is_C(self): + opponent_first_five = [D] * 5 + actions_first_five = [(C, D)] + [(D, D)] * 4 + opponent_plays = opponent_first_five + [C] + [D] + actions = actions_first_five + [(D, C)] + [(C, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_plays), expected_actions=actions + ) diff --git a/axelrod/tests/strategies/test_evolvable_player.py b/axelrod/tests/strategies/test_evolvable_player.py new file mode 100644 index 000000000..de6f9623f --- /dev/null +++ b/axelrod/tests/strategies/test_evolvable_player.py @@ -0,0 +1,213 @@ +import unittest +import functools +import random + +import axelrod as axl +from axelrod.action import Action +from axelrod.evolvable_player import copy_lists, crossover_lists, crossover_dictionaries +from .test_player import TestPlayer + +C, D = Action.C, Action.D + + +def PartialClass(cls, **kwargs): + + class PartialedClass(cls): + __init__ = functools.partialmethod( + cls.__init__, **kwargs) + + return PartialedClass + + +class EvolvableTestOpponent(axl.EvolvablePlayer): + name = "EvolvableTestOpponent" + + def __init__(self, value=None): + super().__init__() + if value: + self.value = value + else: + value = random.randint(2, 100) + self.value = value + self.overwrite_init_kwargs(value=value) + + @staticmethod + def strategy(opponent): + return Action.C + + def mutate(self): + value = random.randint(2, 100) + return EvolvableTestOpponent(value) + + def crossover(self, other): + if other.__class__ != self.__class__: + raise TypeError("Crossover must be between the same player classes.") + value = self.value + other.value + return EvolvableTestOpponent(value) + + +class TestEvolvablePlayer(TestPlayer): + + player_class = EvolvableTestOpponent + parent_class = None + init_parameters = dict() + + def player(self): + return self.player_class(**self.init_parameters) + + def test_repr(self): + """Test that the representation is correct.""" + if self.__class__ != TestEvolvablePlayer: + self.assertIn(self.name, str(self.player())) + pass + + def test_initialisation(self): + """Test that the player initiates correctly.""" + if self.__class__ != TestEvolvablePlayer: + player = self.player() + self.assertEqual(len(player.history), 0) + self.assertEqual(player.cooperations, 0) + self.assertEqual(player.defections, 0) + + def test_randomization(self): + """Test that randomization on initialization produces different strategies.""" + if self.init_parameters: + return + axl.seed(0) + player1 = self.player() + axl.seed(0) + player2 = self.player() + self.assertEqual(player1, player2) + for seed_ in range(2, 20): + axl.seed(seed_) + player2 = self.player() + if player1 != player2: + return + # Should never get here unless a change breaks the test, so don't include in coverage. + self.assertFalse(True) # pragma: no cover + + def test_mutate_variations(self): + """Generate many variations to test that mutate produces different strategies.""" + if not self.init_parameters: + return + axl.seed(100) + variants_produced = False + for _ in range(2, 400): + player = self.player() + mutant = player.mutate() + if player != mutant: + variants_produced = True + self.assertTrue(variants_produced) + + def test_mutate_and_clone(self): + """Test that mutated players clone properly.""" + axl.seed(0) + player = self.player() + mutant = player.clone().mutate() + clone = mutant.clone() + self.assertEqual(clone, mutant) + + def test_crossover(self): + """Test that crossover produces different strategies.""" + for seed_ in range(20): + axl.seed(seed_) + players = [] + for _ in range(2): + player = self.player() + # Mutate to randomize + player = player.mutate() + players.append(player) + player1, player2 = players + crossed = player1.crossover(player2) + if player1 != crossed and player2 != crossed and crossed == crossed.clone(): + return + # Should never get here unless a change breaks the test, so don't include in coverage. + self.assertFalse(True) # pragma: no cover + + def test_crossover_mismatch(self): + other = axl.Cooperator() + player = self.player() + with self.assertRaises(TypeError): + player.crossover(other) + + def test_serialization(self): + """Serializing and deserializing should return the original player.""" + axl.seed(0) + player = self.player() + serialized = player.serialize_parameters() + deserialized_player = player.__class__.deserialize_parameters(serialized) + self.assertEqual(player, deserialized_player) + self.assertEqual(deserialized_player, deserialized_player.clone()) + + def test_serialization_csv(self): + """Serializing and deserializing should return the original player.""" + axl.seed(0) + player = self.player() + serialized = player.serialize_parameters() + s = "0, 1, {}, 3".format(serialized) + s2 = s.split(',')[2] + deserialized_player = player.__class__.deserialize_parameters(s2) + self.assertEqual(player, deserialized_player) + self.assertEqual(deserialized_player, deserialized_player.clone()) + + def behavior_test(self, player1, player2): + """Test that the evolvable player plays the same as its (nonevolvable) parent class.""" + for opponent_class in [axl.Random, axl.TitForTat, axl.Alternator]: + axl.seed(0) + opponent = opponent_class() + match = axl.IpdMatch((player1.clone(), opponent)) + results1 = match.play() + + axl.seed(0) + opponent = opponent_class() + match = axl.IpdMatch((player2.clone(), opponent)) + results2 = match.play() + + self.assertEqual(results1, results2) + + def test_behavior(self): + """Test that the evolvable player plays the same as its (nonevolvable) parent class.""" + if not self.parent_class: + return + + player = self.player_class(**self.init_parameters) + init_kwargs = {k: player.init_kwargs[k] for k in self.parent_kwargs} + parent_player = self.parent_class(**init_kwargs) + self.behavior_test(player, parent_player) + + serialized = player.serialize_parameters() + deserialized_player = player.__class__.deserialize_parameters(serialized) + self.behavior_test(deserialized_player, parent_player) + + +class TestUtilityFunctions(unittest.TestCase): + + def test_copy_lists(self): + l1 = [list(range(10)), list(range(20))] + l2 = copy_lists(l1) + self.assertIsNot(l1, l2) + + def test_crossover_lists(self): + list1 = [[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] + list2 = [[0, D, 1, C], [0, C, 0, C], [1, D, 1, D], [1, C, 1, C]] + + axl.seed(0) + crossed = crossover_lists(list1, list2) + self.assertEqual(crossed, list1[:3] + list2[3:]) + + axl.seed(1) + crossed = crossover_lists(list1, list2) + self.assertEqual(crossed, list1[:1] + list2[1:]) + + def test_crossover_dictionaries(self): + dict1 = {'1': 1, '2': 2, '3': 3} + dict2 = {'1': 'a', '2': 'b', '3': 'c'} + + axl.seed(0) + crossed = crossover_dictionaries(dict1, dict2) + self.assertEqual(crossed, {'1': 1, '2': 'b', '3': 'c'}) + + axl.seed(1) + crossed = crossover_dictionaries(dict1, dict2) + self.assertEqual(crossed, dict2) + diff --git a/axelrod/tests/strategies/test_finite_state_machines.py b/axelrod/tests/strategies/test_finite_state_machines.py new file mode 100644 index 000000000..309ef7cfe --- /dev/null +++ b/axelrod/tests/strategies/test_finite_state_machines.py @@ -0,0 +1,1139 @@ +"""Tests for Finite State Machine Strategies.""" + +import unittest + +import random + +import axelrod as axl +from axelrod.compute_finite_state_machine_memory import get_memory_from_transitions +from axelrod.evolvable_player import InsufficientParametersError +from axelrod.strategies import EvolvableFSMPlayer, FSMPlayer, SimpleFSM + +from .test_player import TestPlayer +from .test_evolvable_player import PartialClass, TestEvolvablePlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestSimpleFSM(unittest.TestCase): + def setUp(self): + self.two_state_transition = ( + (1, C, 0, C), + (1, D, 0, D), + (0, C, 1, D), + (0, D, 1, C), + ) + + self.two_state = SimpleFSM( + transitions=self.two_state_transition, initial_state=1 + ) + + def test__eq__true(self): + new_two_state = SimpleFSM( + transitions=self.two_state_transition, initial_state=1 + ) + self.assertTrue(new_two_state.__eq__(self.two_state)) + new_two_state.move(C) + self.two_state.move(D) + self.assertTrue(new_two_state.__eq__(self.two_state)) + + def test__eq__false_by_state(self): + new_two_state = SimpleFSM( + transitions=self.two_state_transition, initial_state=0 + ) + self.assertFalse(new_two_state.__eq__(self.two_state)) + + def test__eq__false_by_transition(self): + different_transitions = ((1, C, 0, D), (1, D, 0, D), (0, C, 1, D), (0, D, 1, C)) + new_two_state = SimpleFSM(transitions=different_transitions, initial_state=1) + + self.assertFalse(new_two_state.__eq__(self.two_state)) + + def test__eq__false_by_not_SimpleFSM(self): + self.assertFalse(self.two_state.__eq__(3)) + + def test__ne__(self): + new_two_state = SimpleFSM( + transitions=self.two_state_transition, initial_state=1 + ) + self.assertFalse(new_two_state.__ne__(self.two_state)) + new_two_state.move(C) + self.assertTrue(new_two_state.__ne__(self.two_state)) + + def test_move(self): + self.assertEqual(self.two_state.move(C), C) + self.assertEqual(self.two_state.state, 0) + self.assertEqual(self.two_state.move(C), D) + self.assertEqual(self.two_state.state, 1) + + self.assertEqual(self.two_state.move(D), D) + self.assertEqual(self.two_state.state, 0) + self.assertEqual(self.two_state.move(D), C) + self.assertEqual(self.two_state.state, 1) + + def test_bad_transitions_raise_error(self): + bad_transitions = ((1, C, 0, D), (1, D, 0, D), (0, C, 1, D)) + self.assertRaises( + ValueError, SimpleFSM, transitions=bad_transitions, initial_state=1 + ) + + def test_bad_initial_state_raises_error(self): + self.assertRaises( + ValueError, + SimpleFSM, + transitions=self.two_state_transition, + initial_state=5, + ) + + def test_state_setter_raises_error_for_bad_input(self): + with self.assertRaises(ValueError) as cm: + self.two_state.state = 5 + error_msg = cm.exception.args[0] + self.assertEqual(error_msg, "state: 5 does not have values for both C and D") + + +class TestSampleFSMPlayer(TestPlayer): + """Test a few sample tables to make sure that the finite state machines are + working as intended.""" + + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" + player = axl.FSMPlayer + + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_cooperator(self): + """Tests that the player defined by the table for Cooperator is in fact + Cooperator.""" + cooperator_init_kwargs = { + "transitions": ((1, C, 1, C), (1, D, 1, C)), + "initial_state": 1, + "initial_action": C, + } + self.versus_test( + axl.Alternator(), + expected_actions=[(C, C), (C, D)] * 5, + init_kwargs=cooperator_init_kwargs, + ) + + def test_defector(self): + """Tests that the player defined by the table for Defector is in fact + Defector.""" + defector_init_kwargs = { + "transitions": ((1, C, 1, D), (1, D, 1, D)), + "initial_state": 1, + "initial_action": D, + } + self.versus_test( + axl.Alternator(), + expected_actions=[(D, C), (D, D)] * 5, + init_kwargs=defector_init_kwargs, + ) + + def test_tft(self): + """Tests that the player defined by the table for TFT is in fact + TFT.""" + tft_init_kwargs = { + "transitions": ((1, C, 1, C), (1, D, 1, D)), + "initial_state": 1, + "initial_action": C, + } + self.versus_test( + axl.Alternator(), + expected_actions=[(C, C)] + [(C, D), (D, C)] * 5, + init_kwargs=tft_init_kwargs, + ) + + def test_wsls(self): + """Tests that the player defined by the table for TFT is in fact + WSLS (also known as Pavlov.""" + wsls_init_kwargs = { + "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), + "initial_state": 1, + "initial_action": C, + } + expected = [(C, C), (C, D), (D, C), (D, D)] * 3 + self.versus_test( + axl.Alternator(), + expected_actions=expected, + init_kwargs=wsls_init_kwargs, + ) + + +class TestFSMPlayer(TestPlayer): + name = "FSM IpdPlayer: ((1, C, 1, C), (1, D, 1, D)), 1, C" + player = axl.FSMPlayer + + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def transitions_test(self, state_and_action): + """ + takes a list of [(initial_state, first_opponent_action), (next_state, + next_opponent_action), ...] and creates a list of opponent moves, and a + list of expected_actions based on the FiniteStateMachine. Then creates + a versus_test of those two lists. + """ + fsm_player = self.player() + transitions = fsm_player.fsm.state_transitions + first_opponent_move = state_and_action[0][1] + + expected_actions = [(fsm_player.initial_action, first_opponent_move)] + opponent_actions = [first_opponent_move] + + for index in range(1, len(state_and_action)): + current_state, last_opponent_move = state_and_action[index - 1] + fsm_move = transitions[(current_state, last_opponent_move)][1] + + new_state, current_opponent_move = state_and_action[index] + + expected_actions.append((fsm_move, current_opponent_move)) + opponent_actions.append(current_opponent_move) + + self.verify_against_finite_state_machine( + current_state=current_state, + expected_state=new_state, + last_opponent_move=last_opponent_move, + expected_move=fsm_move, + ) + + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected_actions, + ) + + def verify_against_finite_state_machine( + self, current_state, expected_state, last_opponent_move, expected_move + ): + test_fsm = self.player().fsm + test_fsm.state = current_state + self.assertEqual(test_fsm.move(last_opponent_move), expected_move) + self.assertEqual(test_fsm.state, expected_state) + + def test_transitions_with_default_fsm(self): + if self.player is axl.FSMPlayer: + state_action = [(1, C), (1, D)] + self.transitions_test(state_action) + + def test_all_states_reachable(self): + player = self.player() + initial_state = player.initial_state + transitions = player.fsm.state_transitions + + called_states = set(pair[0] for pair in transitions.values()) + called_states.add(initial_state) + + owned_states = set(pair[0] for pair in transitions.keys()) + + un_callable_states = owned_states.difference(called_states) + extra_info = "The following states are un-reachable: {}".format( + un_callable_states + ) + self.assertEqual(un_callable_states, set(), msg=extra_info) + + def test_strategy(self): + """ + Regression test for init without specifying initial state or action + """ + transitions = ( + (0, C, 0, C), + (0, D, 3, C), + (1, C, 5, D), + (1, D, 0, C), + (2, C, 3, C), + (2, D, 2, D), + (3, C, 4, D), + (3, D, 6, D), + (4, C, 3, C), + (4, D, 1, D), + (5, C, 6, C), + (5, D, 3, D), + (6, C, 6, D), + (6, D, 6, D), + (7, C, 7, D), + (7, D, 5, C), + ) + opponent = axl.MockPlayer([D, D, C, C, D]) + actions = [(C, D), (C, D), (C, C), (D, C), (C, D)] + self.versus_test( + opponent, expected_actions=actions, init_kwargs={"transitions": transitions} + ) + + def test_memory(self): + """ + Test the memory depth using implemented algorithm + """ + transitions = self.player().fsm._state_transitions + self.assertEqual(get_memory_from_transitions(transitions), self.expected_classifier["memory_depth"]) + + +class TestFortress3(TestFSMPlayer): + + name = "Fortress3" + player = axl.Fortress3 + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + """ + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 1, D) + ) + """ + + def test_strategy(self): + state_and_actions = [(1, C), (1, D), (2, C), (1, C)] + self.transitions_test(state_and_actions) + + state_and_actions = [(1, D), (2, D), (3, C), (3, C), (3, C), (3, D), (1, C)] * 2 + self.transitions_test(state_and_actions) + + @unittest.expectedFailure + def test_incorrect_transitions(self): + state_and_actions = [(1, C), (1, D), (1, D)] + self.transitions_test(state_and_actions) + + +class TestFortress4(TestFSMPlayer): + + name = "Fortress4" + player = axl.Fortress4 + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + """ + transitions = ( + (1, C, 1, D), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 3, D), + (3, C, 1, D), + (3, D, 4, C), + (4, C, 4, C), + (4, D, 1, D) + ) + """ + + def test_strategy(self): + state_and_actions = [(1, C), (1, D), (2, C)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [(1, D), (2, D), (3, C), (1, C)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [ + (1, D), + (2, D), + (3, D), + (4, C), + (4, C), + (4, C), + (4, C), + (4, D), + ] * 3 + self.transitions_test(state_and_actions) + + +class TestPredator(TestFSMPlayer): + + name = "Predator" + player = axl.Predator + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + """ + transitions = ( + (0, C, 0, D), + (0, D, 1, D), + (1, C, 2, D), + (1, D, 3, D), + (2, C, 4, C), + (2, D, 3, D), + (3, C, 5, D), + (3, D, 4, C), + (4, C, 2, C), + (4, D, 6, D), + (5, C, 7, D), + (5, D, 3, D), + (6, C, 7, C), + (6, D, 7, D), + (7, C, 8, D), + (7, D, 7, D), + (8, C, 8, D), + (8, D, 6, D) + ) + """ + + def test_strategy(self): + state_and_actions = [ + (0, D), + (1, C), + (2, C), + (4, C), + (2, D), + (3, D), + (4, D), + (6, C), + ] + [(7, D), (7, C), (8, C), (8, D), (6, D)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [(0, D), (1, C), (2, D), (3, C), (5, D), (3, C), (5, C)] + [ + (7, C), + (8, D), + (6, C), + ] * 5 + self.transitions_test(state_and_actions) + + state_and_actions = ( + [(0, C), (0, D)] + [(1, D), (3, D), (4, D), (6, D)] + [(7, D)] * 10 + ) + self.transitions_test(state_and_actions) + + +class TestPun1(TestFSMPlayer): + + name = "Pun1" + player = axl.Pun1 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (1, C, 2, C), + (1, D, 2, C), + (2, C, 1, C), + (2, D, 1, D) + ) + """ + + def test_strategy(self): + state_and_actions = [(1, C), (2, D), (1, D), (2, D)] * 3 + self.transitions_test(state_and_actions) + + +class TestRaider(TestFSMPlayer): + + name = "Raider" + player = axl.Raider + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (0, C, 2, D), + (0, D, 2, D), + (1, C, 1, C), + (1, D, 1, D), + (2, C, 0, D), + (2, D, 3, C), + (3, C, 0, D), + (3, D, 1, C) + ) + """ + + def test_strategy(self): + state_and_actions = [(0, C), (2, C), (0, D), (2, C)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [(0, C), (2, D), (3, C)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [(0, C), (2, D), (3, D)] + [(1, C), (1, D)] * 5 + self.transitions_test(state_and_actions) + + +class TestRipoff(TestFSMPlayer): + + name = "Ripoff" + player = axl.Ripoff + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (1, C, 2, C), + (1, D, 3, C), + (2, C, 1, D), + (2, D, 3, C), + (3, C, 3, C), # Note that it's TFT in state 3 + (3, D, 3, D) + ) + """ + + def test_strategy(self): + state_and_actions = [(1, C), (2, C)] * 3 + [(1, D)] + [(3, C), (3, D)] * 5 + self.transitions_test(state_and_actions) + + state_and_actions = [(1, C), (2, D)] + [(3, D)] * 5 + self.transitions_test(state_and_actions) + + +class TestUsuallyCooperates(TestFSMPlayer): + name = "UsuallyCooperates" + player = axl.UsuallyCooperates + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + """ + transitions = ( + (1, C, 1, C), + (1, D, 2, C), + (2, C, 1, D), + (2, D, 1, C) + ) + """ + + def test_strategy(self): + # Never leaves state 1 if C + state_and_actions = [(1, C)] * 10 + self.transitions_test(state_and_actions) + # Visits state 2, but then comes back + # Defaults if DC streak is complete. Starts streak over either way. + state_and_actions = [(1, D), (2, D)] + self.transitions_test(state_and_actions) + state_and_actions = [(1, D), (2, C)] + self.transitions_test(state_and_actions) + + +class TestUsuallyDefects(TestFSMPlayer): + name = "UsuallyDefects" + player = axl.UsuallyDefects + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + """ + transitions = ( + (1, C, 2, D), + (1, D, 1, D), + (2, C, 1, D), + (2, D, 1, C) + ) + """ + + def test_strategy(self): + # Never leaves state 1 if D + state_and_actions = [(1, D)] * 10 + self.transitions_test(state_and_actions) + # Visits state 2, but then comes back + # Cooperates if CD streak is complete. Starts streak over either way. + state_and_actions = [(1, C), (2, D)] + self.transitions_test(state_and_actions) + state_and_actions = [(1, C), (2, C)] + self.transitions_test(state_and_actions) + + +class TestSolutionB1(TestFSMPlayer): + + name = "SolutionB1" + player = axl.SolutionB1 + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (1, C, 2, D), + (1, D, 1, D), + (2, C, 2, C), + (2, D, 3, C), + (3, C, 3, C), + (3, D, 3, C) + ) + """ + + def test_strategy(self): + + state_and_actions = ( + [(1, D)] * 3 + [(1, C)] + [(2, C)] * 3 + [(2, D)] + [(3, C), (3, D)] * 3 + ) + self.transitions_test(state_and_actions) + + +class TestSolutionB5(TestFSMPlayer): + + name = "SolutionB5" + player = axl.SolutionB5 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (1, C, 2, C), + (1, D, 6, D), + (2, C, 2, C), + (2, D, 3, D), + (3, C, 6, C), + (3, D, 1, D), + (4, C, 3, C), + (4, D, 6, D), + (5, C, 5, D), + (5, D, 4, D), + (6, C, 3, C), + (6, D, 5, D) + ) + """ + + def test_strategy(self): + state_and_actions = ([(1, C)] + [(2, C)] * 3 + [(2, D), (3, D)]) * 2 + self.transitions_test(state_and_actions) + + state_and_actions = [(1, C), (2, D)] + [ + (3, C), + (6, D), + (5, C), + (5, D), + (4, C), + (3, C), + (6, C), + ] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [(1, D)] + [(6, D), (5, D), (4, D)] * 3 + self.transitions_test(state_and_actions) + + +class TestThumper(TestFSMPlayer): + + name = "Thumper" + player = axl.Thumper + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (1, C, 1, C), + (1, D, 2, D), + (2, C, 1, D), + (2, D, 1, D) + ) + """ + + def test_strategy(self): + + state_and_actions = [(1, C)] * 3 + [(1, D), (2, C), (1, D), (2, D)] * 3 + self.transitions_test(state_and_actions) + + +class TestEvolvedFSM4(TestFSMPlayer): + + name = "Evolved FSM 4" + player = axl.EvolvedFSM4 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + transitions = ( + (0, C, 0, C), + (0, D, 2, D), + (1, C, 3, D), + (1, D, 0, C), + (2, C, 2, D), + (2, D, 1, C), + (3, C, 3, D), + (3, D, 1, D) + ) + """ + + def test_strategy(self): + state_and_actions = [(0, C)] * 3 + [(0, D), (2, C), (2, D), (1, D)] * 3 + self.transitions_test(state_and_actions) + + state_and_actions = [ + (0, D), + (2, D), + (1, C), + (3, C), + (3, C), + (3, D), + (1, C), + (3, D), + (1, D), + ] * 3 + self.transitions_test(state_and_actions) + + +class TestEvolvedFSM16(TestFSMPlayer): + + name = "Evolved FSM 16" + player = axl.EvolvedFSM16 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + FSM created by ML algorithm never called states 4 or 9, so they were deleted. + transitions = ( + (0, C, 0, C), + (0, D, 12, D), + (1, C, 3, D), + (1, D, 6, C), + (2, C, 2, D), + (2, D, 14, D), + (3, C, 3, D), + (3, D, 3, D), + + (5, C, 12, D), + (5, D, 10, D), + (6, C, 5, C), + (6, D, 12, D), + (7, C, 3, D), + (7, D, 1, C), + (8, C, 5, C), + (8, D, 5, C), + + (10, C, 11, D), + (10, D, 8, C), + (11, C, 15, D), + (11, D, 5, D), + (12, C, 8, C), + (12, D, 11, D), + (13, C, 13, D), + (13, D, 7, D), + (14, C, 13, D), + (14, D, 13, D), + (15, C, 15, D), + (15, D, 2, C) + ) + """ + + def test_strategy(self): + # finished: 0, + state_and_actions = [(0, C)] * 3 + [(0, D)] + [(12, D), (11, D), (5, C)] * 3 + self.transitions_test(state_and_actions) + + # finished: 0, 5, 10 + state_and_actions = [(0, D), (12, D), (11, D)] + [ + (5, D), + (10, C), + (11, D), + (5, D), + (10, D), + (8, C), + ] * 3 + self.transitions_test(state_and_actions) + + # finished: 0, 2, 5, 10, 11, 12, 15 + state_and_actions = ( + [ + (0, D), + (12, C), + (8, D), + (5, D), + (10, C), + (11, C), + (15, C), + (15, C), + (15, D), + ] + + [(2, C)] * 3 + + [(2, D), (14, C), (13, C)] + ) + self.transitions_test(state_and_actions) + + # finished: 0, 2, 3, 5, 10, 11, 12, 13, 14, 15 + to_state_fourteen = [(0, D), (12, D), (11, C), (15, D), (2, D)] + state_and_actions = ( + to_state_fourteen + + [(14, D), (13, C), (13, C), (13, D), (7, C)] + + [(3, D), (3, C)] * 3 + ) + self.transitions_test(state_and_actions) + + # finished: 0, 2, 3, 5, 7, 10, 11, 12, 13, 14, 15 + to_state_seven = to_state_fourteen + [(14, D), (13, D)] + state_and_actions = to_state_seven + [(7, D), (1, C)] + [(3, C)] * 5 + self.transitions_test(state_and_actions) + + # finished: 0, 1, 2, 3, 5, 10, 11, 12, 13, 14, 15 + state_and_actions = to_state_seven + [(7, D), (1, D), (6, C), (5, D), (10, C)] + self.transitions_test(state_and_actions) + + # finished: 0, 1, 2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15 + state_and_actions = to_state_seven + [ + (7, D), + (1, D), + (6, D), + (12, C), + (8, D), + (5, D), + ] + self.transitions_test(state_and_actions) + + +class TestEvolvedFSM16Noise05(TestFSMPlayer): + + name = "Evolved FSM 16 Noise 05" + player = axl.EvolvedFSM16Noise05 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + """ + FSM created by ML algorithm never called states 7 or 9, so they were deleted. + transitions = ( + (0, C, 8, C), + (0, D, 3, D), + (1, C, 13, C), + (1, D, 15, D), + (2, C, 12, C), + (2, D, 3, D), + (3, C, 10, C), + (3, D, 3, D), + (4, C, 5, D), + (4, D, 4, D), + (5, C, 4, D), + (5, D, 10, D), + (6, C, 8, C), + (6, D, 6, D), + + (8, C, 2, C), + (8, D, 4, D), + + (10, C, 4, D), + (10, D, 1, D), + (11, C, 14, D), + (11, D, 13, C), + (12, C, 13, C), + (12, D, 2, C), + (13, C, 13, C), + (13, D, 6, C), + (14, C, 3, D), + (14, D, 13, D), + (15, C, 5, D), + (15, D, 11, C) + ) + """ + + def test_strategy(self): + # finished: 12, 13 + state_and_actions = [ + (0, C), + (8, C), + (2, C), + (12, D), + (2, C), + (12, C), + (13, C), + (13, C), + (13, D), + ] + [(6, D)] * 3 + self.transitions_test(state_and_actions) + + # finished 2, 3, 4, 12, 13 + state_and_actions = [ + (0, C), + (8, C), + (2, D), + (3, D), + (3, D), + (3, C), + (10, C), + (4, D), + (4, D), + (4, C), + (5, D), + ] + self.transitions_test(state_and_actions) + + # finished 0, 2, 3, 4, 6, 8, 10, 12, 13 + state_and_actions = [ + (0, D), + (3, C), + (10, D), + (1, C), + (13, D), + (6, C), + (8, D), + (4, C), + (5, C), + (4, C), + (5, D), + ] + self.transitions_test(state_and_actions) + + # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 13, 15 + state_and_actions = [ + (0, D), + (3, C), + (10, D), + (1, D), + (15, C), + (5, D), + (10, D), + (1, D), + (15, D), + (11, D), + ] + self.transitions_test(state_and_actions) + + # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 13, 15 + to_state_eleven = [(0, D), (3, C), (10, D), (1, D), (15, D)] + + state_and_actions = to_state_eleven + [(11, C), (14, C), (3, C), (10, D)] + self.transitions_test(state_and_actions) + + # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 15 + state_and_actions = to_state_eleven + [(11, D)] + [(13, C)] * 3 + self.transitions_test(state_and_actions) + + # finished 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15 + state_and_actions = to_state_eleven + [(11, C), (14, D)] + [(13, C)] * 3 + self.transitions_test(state_and_actions) + + +class TestTF1(TestFSMPlayer): + name = "TF1" + player = axl.TF1 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestTF2(TestFSMPlayer): + name = "TF2" + player = axl.TF2 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestTF3(TestFSMPlayer): + name = "TF3" + player = axl.TF3 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestEvolvableFSMPlayer(unittest.TestCase): + + player_class = EvolvableFSMPlayer + + def test_normalized_parameters(self): + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + transitions=[[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] + ) + + def test_init(self): + transitions = [[0, C, 1, D], [0, D, 0, D], [1, C, 1, C], [1, D, 1, D]] + player = axl.EvolvableFSMPlayer( + transitions=transitions, + initial_action=D, + initial_state=1 + ) + self.assertEqual(player.num_states, 2) + self.assertEqual(player.fsm.transitions(), transitions) + self.assertEqual(player.initial_action, D) + self.assertEqual(player.initial_state, 1) + + def test_vector_to_instance(self): + num_states = 4 + vector = [random.random() for _ in range(num_states * 4 + 1)] + player = axl.EvolvableFSMPlayer(num_states=num_states) + player.receive_vector(vector) + self.assertIsInstance(player, axl.EvolvableFSMPlayer) + + serialized = player.serialize_parameters() + deserialized_player = player.__class__.deserialize_parameters(serialized) + self.assertEqual(player, deserialized_player) + self.assertEqual(deserialized_player, deserialized_player.clone()) + + def test_create_vector_bounds(self): + num_states = 4 + player = axl.EvolvableFSMPlayer(num_states=num_states) + lb, ub = player.create_vector_bounds() + self.assertEqual(lb, [0] * (4 * num_states + 1)) + self.assertEqual(ub, [1] * (4 * num_states + 1)) + + +class TestEvolvableFSMPlayer2(TestEvolvablePlayer): + name = "EvolvableFSMPlayer" + player_class = axl.EvolvableFSMPlayer + parent_class = FSMPlayer + parent_kwargs = ["transitions", "initial_action", "initial_state"] + init_parameters = {"num_states": 4} + + +class TestEvolvableFSMPlayer3(TestEvolvablePlayer): + name = "EvolvableFSMPlayer" + player_class = axl.EvolvableFSMPlayer + parent_class = FSMPlayer + parent_kwargs = ["transitions", "initial_action", "initial_state"] + init_parameters = {"num_states": 16} + + +class TestEvolvableFSMPlayer4(TestEvolvablePlayer): + name = "EvolvableFSMPlayer" + player_class = axl.EvolvableFSMPlayer + parent_class = FSMPlayer + parent_kwargs = ["transitions", "initial_action", "initial_state"] + init_parameters = { + "transitions": ((1, C, 1, C), (1, D, 2, D), (2, C, 2, D), (2, D, 1, C)), + "initial_state": 1, + "initial_action": C + } + + +# Substitute EvolvedFSMPlayer as a regular FSMPlayer. +EvolvableFSMPlayerWithDefault = PartialClass( + EvolvableFSMPlayer, + transitions=((1, C, 1, C), (1, D, 1, D)), + initial_state=1, + initial_action=C) + + +class EvolvableFSMAsFSM(TestFSMPlayer): + player = EvolvableFSMPlayerWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_forgiver.py b/axelrod/tests/strategies/test_forgiver.py new file mode 100644 index 000000000..e83b86d76 --- /dev/null +++ b/axelrod/tests/strategies/test_forgiver.py @@ -0,0 +1,102 @@ +"""Tests for the forgiver strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestForgiver(TestPlayer): + + name = "Forgiver" + player = axl.Forgiver + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent has defected more than 10 percent of the time, defect. + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 10) + + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 10) + + def test_cooperates_if_opponent_defections_is_ten_pct_and_defects_if_opponent_defections_gt_ten_pct( + self + ): + final_action_lowers_defections_to_ten_percent = [D] + [C] * 9 + expected = [(C, D)] + [(D, C)] * 9 + self.versus_test( + axl.MockPlayer(actions=final_action_lowers_defections_to_ten_percent), + expected_actions=expected * 5, + ) + + def test_never_defects_if_opponent_defections_le_ten_percent(self): + defections_always_le_ten_percent = [C] * 9 + [D] + expected = [(C, C)] * 9 + [(C, D)] + self.versus_test( + axl.MockPlayer(actions=defections_always_le_ten_percent), + expected_actions=expected * 5, + ) + + +class TestForgivingTitForTat(TestPlayer): + + name = "Forgiving Tit For Tat" + player = axl.ForgivingTitForTat + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 5) + self.versus_test( + axl.Alternator(), expected_actions=[(C, C)] + [(C, D), (D, C)] * 5 + ) + + def test_never_defects_if_opponent_defections_le_ten_percent(self): + defections_always_le_ten_percent = [C] * 9 + [D] + expected = [(C, C)] * 9 + [(C, D)] + self.versus_test( + axl.MockPlayer(actions=defections_always_le_ten_percent), + expected_actions=expected * 5, + ) + + def test_plays_tit_for_tat_while_defections_gt_ten_percent(self): + before_tft = (18 * [C] + [D]) * 3 + [D, D, D] + only_cooperates = ([(C, C)] * 18 + [(C, D)]) * 3 + [(C, D), (C, D), (C, D)] + self.versus_test( + axl.MockPlayer(actions=before_tft), expected_actions=only_cooperates + ) + + now_alternator = before_tft + [D, C, D, C] + now_tft = only_cooperates + [(C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.MockPlayer(actions=now_alternator), expected_actions=now_tft + ) + + def test_reverts_to_cooperator_if_defections_become_le_ten_percent(self): + four_defections = [D, D, D, D] + first_four = [(C, D)] + [(D, D)] * 3 + defections_at_ten_pct = four_defections + [C] * 36 + tft = first_four + [(D, C)] + [(C, C)] * 35 + + maintain_ten_pct = defections_at_ten_pct + ([C] * 9 + [D]) * 3 + now_cooperates = tft + ([(C, C)] * 9 + [(C, D)]) * 3 + self.versus_test( + axl.MockPlayer(actions=maintain_ten_pct), + expected_actions=now_cooperates, + ) diff --git a/axelrod/tests/strategies/test_gambler.py b/axelrod/tests/strategies/test_gambler.py new file mode 100755 index 000000000..2749526a3 --- /dev/null +++ b/axelrod/tests/strategies/test_gambler.py @@ -0,0 +1,585 @@ +"""Test for the Gambler strategy. Most tests come from the LookerUp test suite. +""" +import unittest + +import copy + +import random + +import axelrod as axl +from axelrod.load_data_ import load_pso_tables +from axelrod.strategies.lookerup import create_lookup_table_keys + +from .test_lookerup import convert_original_to_current +from .test_player import TestPlayer +from .test_evolvable_player import PartialClass, TestEvolvablePlayer + + +tables = load_pso_tables("pso_gambler.csv", directory="data") +C, D = axl.Action.C, axl.Action.D + + +class TestGambler(TestPlayer): + + name = "Gambler" + player = axl.Gambler + + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + expected_class_classifier = copy.copy(expected_classifier) + + def test_strategy(self): + tft_table = {((), (D,), ()): 0, ((), (C,), ()): 1} + self.versus_test( + axl.Alternator(), + expected_actions=[(C, C)] + [(C, D), (D, C)] * 5, + init_kwargs={"lookup_dict": tft_table}, + ) + + def test_stochastic_values(self): + stochastic_lookup = {((), (), ()): 0.3} + expected_actions = [(C, C), (D, C), (D, C), (C, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=expected_actions, + init_kwargs={"lookup_dict": stochastic_lookup}, + seed=1, + ) + + +class TestPSOGamblerMem1(TestPlayer): + + name = "PSO Gambler Mem1" + player = axl.PSOGamblerMem1 + + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + expected_class_classifier = copy.copy(expected_classifier) + + def test_new_data(self): + original_data = { + ("", "C", "C"): 1.0, + ("", "C", "D"): 0.52173487, + ("", "D", "C"): 0.0, + ("", "D", "D"): 0.12050939, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_strategy(self): + vs_cooperator = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=vs_cooperator) + + def test_defects_forever_with_correct_conditions(self): + seed = 1 + opponent_actions = [D, D] + [C] * 10 + expected = [(C, D), (C, D), (D, C)] + [(D, C)] * 9 + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected, + seed=seed, + ) + + +class TestPSOGambler1_1_1(TestPlayer): + + name = "PSO Gambler 1_1_1" + player = axl.PSOGambler1_1_1 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("C", "C", "C"): 1.0, + ("C", "C", "D"): 0.12304797, + ("C", "D", "C"): 0.0, + ("C", "D", "D"): 0.13581423, + ("D", "C", "C"): 1.0, + ("D", "C", "D"): 0.57740178, + ("D", "D", "C"): 0.0, + ("D", "D", "D"): 0.11886807, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_cooperate_forever(self): + seed = 2 + opponent = [D] * 3 + [C] * 10 + expected = [(C, D), (D, D), (D, D)] + [(C, C)] * 10 + self.versus_test( + axl.MockPlayer(opponent), expected_actions=expected, seed=seed + ) + + def test_defect_forever(self): + seed = 2 + opponent_actions = [C] + [D] + [C] * 10 + expected = [(C, C), (C, D)] + [(D, C)] * 10 + self.versus_test( + axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed + ) + + opponent_actions = [D] + [C] * 10 + expected = [(C, D)] + [(D, C)] * 10 + self.versus_test( + axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed + ) + + +class TestPSOGambler2_2_2(TestPlayer): + + name = "PSO Gambler 2_2_2" + player = axl.PSOGambler2_2_2 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("CC", "CC", "CC"): 1.0, + ("CC", "CC", "CD"): 1.0, + ("CC", "CC", "DC"): 0.0, + ("CC", "CC", "DD"): 0.02126434, + ("CC", "CD", "CC"): 0.0, + ("CC", "CD", "CD"): 1.0, + ("CC", "CD", "DC"): 1.0, + ("CC", "CD", "DD"): 0.0, + ("CC", "DC", "CC"): 0.0, + ("CC", "DC", "CD"): 0.0, + ("CC", "DC", "DC"): 0.0, + ("CC", "DC", "DD"): 0.0, + ("CC", "DD", "CC"): 0.0, + ("CC", "DD", "CD"): 0.0, + ("CC", "DD", "DC"): 0.0, + ("CC", "DD", "DD"): 1.0, + ("CD", "CC", "CC"): 1.0, + ("CD", "CC", "CD"): 0.95280465, + ("CD", "CC", "DC"): 0.80897541, + ("CD", "CC", "DD"): 0.0, + ("CD", "CD", "CC"): 0.0, + ("CD", "CD", "CD"): 0.0, + ("CD", "CD", "DC"): 0.0, + ("CD", "CD", "DD"): 0.65147565, + ("CD", "DC", "CC"): 0.15412392, + ("CD", "DC", "CD"): 0.24922166, + ("CD", "DC", "DC"): 0.0, + ("CD", "DC", "DD"): 0.0, + ("CD", "DD", "CC"): 0.0, + ("CD", "DD", "CD"): 0.0, + ("CD", "DD", "DC"): 0.0, + ("CD", "DD", "DD"): 0.24523149, + ("DC", "CC", "CC"): 1.0, + ("DC", "CC", "CD"): 0.0, + ("DC", "CC", "DC"): 0.0, + ("DC", "CC", "DD"): 0.43278586, + ("DC", "CD", "CC"): 1.0, + ("DC", "CD", "CD"): 0.0, + ("DC", "CD", "DC"): 0.23563137, + ("DC", "CD", "DD"): 1.0, + ("DC", "DC", "CC"): 1.0, + ("DC", "DC", "CD"): 1.0, + ("DC", "DC", "DC"): 0.00227615, + ("DC", "DC", "DD"): 0.0, + ("DC", "DD", "CC"): 0.0, + ("DC", "DD", "CD"): 0.0, + ("DC", "DD", "DC"): 0.0, + ("DC", "DD", "DD"): 1.0, + ("DD", "CC", "CC"): 0.0, + ("DD", "CC", "CD"): 0.0, + ("DD", "CC", "DC"): 0.0, + ("DD", "CC", "DD"): 0.0, + ("DD", "CD", "CC"): 0.15140743, + ("DD", "CD", "CD"): 0.0, + ("DD", "CD", "DC"): 0.0, + ("DD", "CD", "DD"): 0.0, + ("DD", "DC", "CC"): 0.0, + ("DD", "DC", "CD"): 0.0, + ("DD", "DC", "DC"): 0.0, + ("DD", "DC", "DD"): 1.0, + ("DD", "DD", "CC"): 0.0, + ("DD", "DD", "CD"): 1.0, + ("DD", "DD", "DC"): 0.77344942, + ("DD", "DD", "DD"): 0.0, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_vs_defector(self): + expected = [(C, D), (C, D)] + [(D, D)] * 10 + self.versus_test(axl.Defector(), expected_actions=expected) + + def test_vs_cooperator(self): + expected = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=expected) + + def test_vs_alternator(self): + seed = 1 + expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) + + def test_vs_DCDDC(self): + seed = 2 + opponent_actions = [D, C, D, D, C] + expected = [ + (C, D), + (C, C), + (D, D), + (D, D), + (C, C), + (D, D), + (D, C), + (D, D), + (D, D), + (C, C), + ] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected, + seed=seed, + ) + + new_seed = 139 # First seed with different result. + expected[5] = (C, D) + self.versus_test( + axl.MockPlayer(actions=opponent_actions), + expected_actions=expected, + seed=new_seed, + ) + + +class TestPSOGambler2_2_2_Noise05(TestPlayer): + name = "PSO Gambler 2_2_2 Noise 05" + player = axl.PSOGambler2_2_2_Noise05 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("CC", "CC", "CC"): 1.0, + ("CC", "CC", "CD"): 0.0, + ("CC", "CC", "DC"): 1.0, + ("CC", "CC", "DD"): 0.63548102, + ("CC", "CD", "CC"): 1.0, + ("CC", "CD", "CD"): 1.0, + ("CC", "CD", "DC"): 1.0, + ("CC", "CD", "DD"): 0.0, + ("CC", "DC", "CC"): 0.0, + ("CC", "DC", "CD"): 1.0, + ("CC", "DC", "DC"): 0.0, + ("CC", "DC", "DD"): 0.0, + ("CC", "DD", "CC"): 1.0, + ("CC", "DD", "CD"): 0.0, + ("CC", "DD", "DC"): 0.0, + ("CC", "DD", "DD"): 0.0, + ("CD", "CC", "CC"): 1.0, + ("CD", "CC", "CD"): 1.0, + ("CD", "CC", "DC"): 0.0, + ("CD", "CC", "DD"): 0.0, + ("CD", "CD", "CC"): 0.0, + ("CD", "CD", "CD"): 0.13863175, + ("CD", "CD", "DC"): 1.0, + ("CD", "CD", "DD"): 0.7724137, + ("CD", "DC", "CC"): 0.0, + ("CD", "DC", "CD"): 1.0, + ("CD", "DC", "DC"): 0.0, + ("CD", "DC", "DD"): 0.07127653, + ("CD", "DD", "CC"): 0.0, + ("CD", "DD", "CD"): 1.0, + ("CD", "DD", "DC"): 0.28124022, + ("CD", "DD", "DD"): 0.0, + ("DC", "CC", "CC"): 0.0, + ("DC", "CC", "CD"): 0.98603825, + ("DC", "CC", "DC"): 0.0, + ("DC", "CC", "DD"): 0.0, + ("DC", "CD", "CC"): 1.0, + ("DC", "CD", "CD"): 0.06434619, + ("DC", "CD", "DC"): 1.0, + ("DC", "CD", "DD"): 1.0, + ("DC", "DC", "CC"): 1.0, + ("DC", "DC", "CD"): 0.50999729, + ("DC", "DC", "DC"): 0.00524508, + ("DC", "DC", "DD"): 1.0, + ("DC", "DD", "CC"): 1.0, + ("DC", "DD", "CD"): 1.0, + ("DC", "DD", "DC"): 1.0, + ("DC", "DD", "DD"): 1.0, + ("DD", "CC", "CC"): 0.0, + ("DD", "CC", "CD"): 1.0, + ("DD", "CC", "DC"): 0.16240799, + ("DD", "CC", "DD"): 0.0, + ("DD", "CD", "CC"): 0.0, + ("DD", "CD", "CD"): 1.0, + ("DD", "CD", "DC"): 1.0, + ("DD", "CD", "DD"): 0.0, + ("DD", "DC", "CC"): 0.0, + ("DD", "DC", "CD"): 1.0, + ("DD", "DC", "DC"): 0.87463905, + ("DD", "DC", "DD"): 0.0, + ("DD", "DD", "CC"): 0.0, + ("DD", "DD", "CD"): 1.0, + ("DD", "DD", "DC"): 0.0, + ("DD", "DD", "DD"): 0.0, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_vs_defector(self): + expected = [(C, D), (C, D)] + [(D, D)] * 10 + self.versus_test(axl.Defector(), expected_actions=expected) + + def test_vs_cooperator(self): + expected = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=expected) + + def test_vs_alternator(self): + seed = 2 + expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (C, C)] + self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) + + new_seed = 1 + expected[4] = (C, C) + expected[6] = (D, C) + self.versus_test(axl.Alternator(), expected_actions=expected, seed=new_seed) + + def test_vs_DCDDC(self): + opponent_actions = [D, C, D, D, C] + + seed = 1 + expected = [ + (C, D), + (C, C), + (D, D), + (D, D), + (C, C), + (D, D), + (D, C), + (C, D), + (C, D), + ] + self.versus_test( + axl.MockPlayer(opponent_actions), expected_actions=expected, seed=seed + ) + + new_seed = 3 + expected[8] = (D, D) + self.versus_test( + axl.MockPlayer(opponent_actions), + expected_actions=expected, + seed=new_seed, + ) + + new_seed = 2 + new_expected = expected[:6] + [(C, C), (D, D), (D, D)] + self.versus_test( + axl.MockPlayer(opponent_actions), + expected_actions=new_expected, + seed=new_seed, + ) + + +class TestZDMem2(TestPlayer): + name = "ZD-Mem2" + player = axl.ZDMem2 + + expected_classifier = { + "memory_depth": 2, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("", "CC", "CC"): 11 / 12, + ("", "CC", "CD"): 4 / 11, + ("", "CC", "DC"): 7 / 9, + ("", "CC", "DD"): 1 / 10, + ("", "CD", "CC"): 5 / 6, + ("", "CD", "CD"): 3 / 11, + ("", "CD", "DC"): 7 / 9, + ("", "CD", "DD"): 1 / 10, + ("", "DC", "CC"): 2 / 3, + ("", "DC", "CD"): 1 / 11, + ("", "DC", "DC"): 7 / 9, + ("", "DC", "DD"): 1 / 10, + ("", "DD", "CC"): 3 / 4, + ("", "DD", "CD"): 2 / 11, + ("", "DD", "DC"): 7 / 9, + ("", "DD", "DD"): 1 / 10, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_vs_defector(self): + seed = 5 + expected = [ + (C, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (C, D), + (D, D), + ] + + self.versus_test(axl.Defector(), expected_actions=expected, seed=seed) + + def test_vs_cooperator(self): + seed = 5 + expected = [ + (C, C), + (C, C), + (C, C), + (C, C), + (C, C), + (D, C), + (C, C), + (D, C), + (C, C), + (C, C), + ] + + self.versus_test(axl.Cooperator(), expected_actions=expected, seed=seed) + + def test_vs_alternator(self): + seed = 2 + expected = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=expected, seed=seed) + + new_seed = 1 + expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=expected, seed=new_seed) + + +class TestEvolvableGambler(unittest.TestCase): + + def test_receive_vector(self): + plays, op_plays, op_start_plays = 1, 1, 1 + player = axl.EvolvableGambler( + parameters=(plays, op_plays, op_start_plays)) + + self.assertRaises(AttributeError, axl.EvolvableGambler.__getattribute__, + *[player, 'vector']) + + vector = [random.random() for _ in range(8)] + player.receive_vector(vector) + self.assertEqual(player.pattern, vector) + + def test_vector_to_instance(self): + plays, op_plays, op_start_plays = 1, 1, 1 + player = axl.EvolvableGambler( + parameters=(plays, op_plays, op_start_plays)) + + vector = [random.random() for _ in range(8)] + player.receive_vector(vector) + keys = create_lookup_table_keys(player_depth=plays, op_depth=op_plays, + op_openings_depth=op_start_plays) + action_dict = dict(zip(keys, vector)) + self.assertEqual(player._lookup.dictionary, action_dict) + + def test_create_vector_bounds(self): + plays, op_plays, op_start_plays = 1, 1, 1 + player = axl.EvolvableGambler( + parameters=(plays, op_plays, op_start_plays)) + lb, ub = player.create_vector_bounds() + self.assertIsInstance(lb, list) + self.assertIsInstance(ub, list) + self.assertEqual(len(lb), 8) + self.assertEqual(len(ub), 8) + + def test_mutate_value_bounds(self): + self.assertEqual(axl.EvolvableGambler.mutate_value(2), 1) + self.assertEqual(axl.EvolvableGambler.mutate_value(-2), 0) + + +class TestEvolvableGambler2(TestEvolvablePlayer): + name = "EvolvableGambler" + player_class = axl.EvolvableGambler + parent_class = axl.Gambler + parent_kwargs = ["lookup_dict"] + init_parameters = {"parameters": (1, 1, 1), + "initial_actions": (C,)} + + +class TestEvolvableGambler3(TestEvolvablePlayer): + name = "EvolvableGambler" + player_class = axl.EvolvableGambler + parent_class = axl.Gambler + parent_kwargs = ["lookup_dict"] + init_parameters = {"parameters": (3, 2, 1), + "initial_actions": (C, C, C,)} + + +class TestEvolvableGambler4(TestEvolvablePlayer): + name = "EvolvableGambler" + player_class = axl.EvolvableGambler + parent_class = axl.Gambler + parent_kwargs = ["lookup_dict"] + init_parameters = {"parameters": (2, 2, 2), + "pattern": [random.random() for _ in range(64)], + "initial_actions": (C, C,)} + + +# Substitute EvolvableHMMPlayer as a regular HMMPlayer. +EvolvableGamblerWithDefault = PartialClass( + axl.EvolvableGambler, + pattern=tables[("PSO Gambler 2_2_2", 2, 2, 2)], + parameters=(2, 2, 2), + initial_actions=(C, C,) +) + + +class EvolvableGamblerAsGambler(TestPSOGambler2_2_2): + player = EvolvableGamblerWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_geller.py b/axelrod/tests/strategies/test_geller.py new file mode 100644 index 000000000..e2b7bdf67 --- /dev/null +++ b/axelrod/tests/strategies/test_geller.py @@ -0,0 +1,132 @@ +"""Tests for the Geller strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGeller(TestPlayer): + + name = "Geller" + player = axl.Geller + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_state": False, + "manipulates_source": False, + } + + @classmethod + def tearDownClass(cls): + """After all tests have run, makes sure the Darwin genome is reset.""" + axl.Darwin.reset_genome() + super(TestGeller, cls).tearDownClass() + + def setUp(self): + """Each test starts with the basic Darwin genome.""" + axl.Darwin.reset_genome() + super(TestGeller, self).setUp() + + def test_foil_strategy_inspection(self): + axl.seed(2) + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), D) + self.assertEqual(player.foil_strategy_inspection(), D) + self.assertEqual(player.foil_strategy_inspection(), C) + + def test_strategy(self): + """Should cooperate against cooperators and defect against defectors.""" + self.versus_test(axl.Defector(), expected_actions=[(D, D)] * 5) + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 5) + self.versus_test(axl.Alternator(), expected_actions=[(C, C), (D, D)] * 5) + + def test_strategy_against_lookerup_players(self): + """ + Regression test for a bug discussed in + https://github.com/Axelrod-Python/Axelrod/issues/1185 + """ + self.versus_test( + axl.EvolvedLookerUp1_1_1(), expected_actions=[(C, C), (C, C)] + ) + self.versus_test( + axl.EvolvedLookerUp2_2_2(), expected_actions=[(C, C), (C, C)] + ) + + def test_returns_foil_inspection_strategy_of_opponent(self): + self.versus_test( + axl.GellerDefector(), + expected_actions=[(D, D), (D, D), (D, C), (D, C)], + seed=2, + ) + + self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) + + self.versus_test( + axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)], seed=1 + ) + + +class TestGellerCooperator(TestGeller): + + name = "Geller Cooperator" + player = axl.GellerCooperator + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + def test_foil_strategy_inspection(self): + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), C) + + def test_returns_foil_inspection_strategy_of_opponent(self): + self.versus_test( + axl.GellerDefector(), expected_actions=[(D, C), (D, C), (D, C), (D, C)] + ) + + self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) + + self.versus_test( + axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] + ) + + +class TestGellerDefector(TestGeller): + + name = "Geller Defector" + player = axl.GellerDefector + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": False, + "manipulates_state": False, + } + + def test_foil_strategy_inspection(self): + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), D) + + def test_returns_foil_inspection_strategy_of_opponent(self): + + self.versus_test( + axl.GellerDefector(), expected_actions=[(D, D), (D, D), (D, D), (D, D)] + ) + + self.versus_test(axl.Darwin(), expected_actions=[(C, C), (C, C), (C, C)]) + + self.versus_test( + axl.MindReader(), expected_actions=[(D, D), (D, D), (D, D)] + ) diff --git a/axelrod/tests/strategies/test_gobymajority.py b/axelrod/tests/strategies/test_gobymajority.py new file mode 100644 index 000000000..6cd553880 --- /dev/null +++ b/axelrod/tests/strategies/test_gobymajority.py @@ -0,0 +1,179 @@ +"""Tests for the GoByMajority strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestHardGoByMajority(TestPlayer): + + name = "Hard Go By Majority" + player = axl.HardGoByMajority + default_soft = False + + expected_classifier = { + "stochastic": False, + "memory_depth": float("inf"), + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_memory_depth_infinite_soft_is_false(self): + init_kwargs = {} + if self.default_soft: + init_kwargs["soft"] = False + + opponent_actions = [C] * 50 + [D] * 100 + [C] * 52 + actions = ( + [(D, C)] + + [(C, C)] * 49 + + [(C, D)] * 50 + + [(D, D)] * 50 + + [(D, C)] * 51 + + [(C, C)] + ) + opponent = axl.MockPlayer(actions=opponent_actions) + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_memory_depth_even_soft_is_false(self): + memory_depth = 4 + init_kwargs = {"memory_depth": memory_depth} + if self.default_soft: + init_kwargs["soft"] = False + + opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) + actions = ( + [(D, C)] + + [(C, C)] * 3 + + [(C, D)] * 2 + + [(D, D)] * 2 + + [(D, C)] * 3 + + [(C, C)] + ) + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_memory_depth_odd(self): + memory_depth = 5 + init_kwargs = {"memory_depth": memory_depth} + if self.default_soft: + first_action = [(C, C)] + else: + first_action = [(D, C)] + opponent = axl.MockPlayer(actions=[C] * memory_depth + [D] * memory_depth) + actions = ( + first_action + + [(C, C)] * 4 + + [(C, D)] * 3 + + [(D, D)] * 2 + + [(D, C)] * 3 + + [(C, C)] * 2 + ) + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_default_values(self): + player = self.player() + self.assertEqual(player.soft, self.default_soft) + self.assertEqual(player.memory, 0) + + +class TestGoByMajority(TestHardGoByMajority): + + name = "Soft Go By Majority" + player = axl.GoByMajority + default_soft = True + + def test_memory_depth_infinite_soft_is_true(self): + opponent_actions = [C] * 50 + [D] * 100 + [C] * 52 + actions = ( + [(C, C)] * 50 + [(C, D)] * 51 + [(D, D)] * 49 + [(D, C)] * 50 + [(C, C)] * 2 + ) + opponent = axl.MockPlayer(actions=opponent_actions) + self.versus_test(opponent, expected_actions=actions) + + def test_memory_depth_even_soft_is_true(self): + memory_depth = 4 + init_kwargs = {"memory_depth": memory_depth} + + opponent = axl.MockPlayer([C] * memory_depth + [D] * memory_depth) + actions = [(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] + [(D, C)] * 2 + [(C, C)] * 2 + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_name(self): + player = self.player(soft=True) + self.assertEqual(player.name, "Soft Go By Majority") + player = self.player(soft=False) + self.assertEqual(player.name, "Hard Go By Majority") + player = self.player(memory_depth=5) + self.assertEqual(player.name, "Soft Go By Majority: 5") + + def test_str(self): + player = self.player(soft=True) + name = str(player) + self.assertEqual(name, "Soft Go By Majority") + player = self.player(soft=False) + name = str(player) + self.assertEqual(name, "Hard Go By Majority") + player = self.player(memory_depth=5) + name = str(player) + self.assertEqual(name, "Soft Go By Majority: 5") + + +def factory_TestGoByRecentMajority(memory_depth, soft=True): + + prefix = "Hard" + prefix2 = "Hard" + if soft: + prefix = "Soft" + prefix2 = "" + + class TestGoByRecentMajority(TestPlayer): + + name = "{} Go By Majority: {}".format(prefix, memory_depth) + player = getattr(axl, "{}GoByMajority{}".format(prefix2, memory_depth)) + + expected_classifier = { + "stochastic": False, + "memory_depth": memory_depth, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # for example memory_depth=2 plays against [C, C, D, D] + # soft actions = [(C, C), (C, C), (C, D), (C, D)] + # hard actions = [(D, C), (C, C), (C, D), (D, D)] + opponent_actions = [C] * memory_depth + [D] * memory_depth + opponent = axl.MockPlayer(actions=opponent_actions) + if soft: + first_player_action = [C] + else: + first_player_action = [D] + if memory_depth % 2 == 1 or soft: + cooperations = int(memory_depth * 1.5) + else: + cooperations = int(memory_depth * 1.5) - 1 + defections = len(opponent_actions) - cooperations - 1 + player_actions = first_player_action + [C] * cooperations + [D] * defections + + actions = list(zip(player_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + return TestGoByRecentMajority + + +TestGoByMajority5 = factory_TestGoByRecentMajority(5) +TestGoByMajority10 = factory_TestGoByRecentMajority(10) +TestGoByMajority20 = factory_TestGoByRecentMajority(20) +TestGoByMajority40 = factory_TestGoByRecentMajority(40) +TestHardGoByMajority5 = factory_TestGoByRecentMajority(5, soft=False) +TestHardGoByMajority10 = factory_TestGoByRecentMajority(10, soft=False) +TestHardGoByMajority20 = factory_TestGoByRecentMajority(20, soft=False) +TestHardGoByMajority40 = factory_TestGoByRecentMajority(40, soft=False) diff --git a/axelrod/tests/strategies/test_gradualkiller.py b/axelrod/tests/strategies/test_gradualkiller.py new file mode 100644 index 000000000..7d2729862 --- /dev/null +++ b/axelrod/tests/strategies/test_gradualkiller.py @@ -0,0 +1,76 @@ +"""Tests for the Gradual Killer strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGradualKiller(TestPlayer): + + name = "Gradual Killer: (D, D, D, D, D, C, C)" + player = axl.GradualKiller + expected_classifier = { + "memory_depth": float("Inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + first_seven = [D, D, D, D, D, C, C] + + def test_first_seven_moves_always_the_same(self): + opponent = axl.Cooperator() + actions = list(zip(self.first_seven, [C] * 7)) + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = list(zip(self.first_seven, [D] * 7)) + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Alternator() + actions = list(zip(self.first_seven, [C, D] * 4)) + self.versus_test(opponent, expected_actions=actions) + + def test_effect_of_strategy_with_history_CC(self): + """Continues with C if opponent played CC on 6 and 7.""" + opponent_actions = [D] * 5 + [C, C] + [D, C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + + start = list(zip(self.first_seven, opponent_actions[:7])) + actions = start + [(C, D), (C, C)] * 20 + + self.versus_test(opponent, expected_actions=actions) + + def test_effect_of_strategy_with_history_CD(self): + """Continues with C if opponent played CD on 6 and 7.""" + opponent_actions = [D] * 5 + [C, D] + [D, C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + + start = list(zip(self.first_seven, opponent_actions[:7])) + actions = start + [(C, D), (C, C)] * 20 + + self.versus_test(opponent, expected_actions=actions) + + def test_effect_of_strategy_with_history_DC(self): + """Continues with C if opponent played DC on 6 and 7.""" + opponent_actions = [D] * 5 + [D, C] + [D, C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + + start = list(zip(self.first_seven, opponent_actions[:7])) + actions = start + [(C, D), (C, C)] * 20 + + self.versus_test(opponent, expected_actions=actions) + + def test_effect_of_strategy_with_history_DD(self): + """Continues with D if opponent played DD on 6 and 7.""" + opponent_actions = [C] * 5 + [D, D] + [D, C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + + start = list(zip(self.first_seven, opponent_actions[:7])) + actions = start + [(D, D), (D, C)] * 20 + + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_grudger.py b/axelrod/tests/strategies/test_grudger.py new file mode 100644 index 000000000..79194b5d8 --- /dev/null +++ b/axelrod/tests/strategies/test_grudger.py @@ -0,0 +1,278 @@ +"""Tests for Grudger strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGrudger(TestPlayer): + + name = "Grudger" + player = axl.Grudger + expected_classifier = { + "memory_depth": float('inf'), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will defect forever. + opponent = axl.Cooperator() + actions = [(C, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(C, D)] + [(D, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] * 10 + [D] + [C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 10 + [(C, D)] + [(D, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + +class TestForgetfulGrudger(TestPlayer): + + name = "Forgetful Grudger" + player = axl.ForgetfulGrudger + expected_classifier = { + "memory_depth": 10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will respond with + # D ten times and then continue to check for defections. + opponent = axl.Cooperator() + actions = [(C, C)] * 20 + attrs = {"grudged": False, "mem_length": 10, "grudge_memory": 0} + self.versus_test(opponent, expected_actions=actions, attrs=attrs) + + for i in range(1, 15): + opponent = axl.Defector() + actions = [(C, D)] + [(D, D)] * i + memory = i if i <= 10 else i - 10 + attrs = {"grudged": True, "mem_length": 10, "grudge_memory": memory} + self.versus_test(opponent, expected_actions=actions, attrs=attrs) + + opponent_actions = [C] * 2 + [D] + [C] * 10 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = ([(C, C)] * 2 + [(C, D)] + [(D, C)] * 10) * 3 + [(C, C)] + attrs = {"grudged": False, "mem_length": 10, "grudge_memory": 0} + self.versus_test(opponent, expected_actions=actions, attrs=attrs) + + +class TestOppositeGrudger(TestPlayer): + + name = "Opposite Grudger" + player = axl.OppositeGrudger + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent cooperates at any point then the player will cooperate + # forever. + opponent = axl.Cooperator() + actions = [(D, C)] + [(C, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(D, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] + [D] * 30 + opponent = axl.MockPlayer(actions=opponent_actions) + expected = [(D, C)] + [(C, D)] * 30 + self.versus_test(opponent, expected_actions=expected) + + +class TestAggravater(TestPlayer): + + name = "Aggravater" + player = axl.Aggravater + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will defect forever. + # Always defects on first three turns. + opponent = axl.Cooperator() + actions = [(D, C)] * 3 + [(C, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(D, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] * 10 + [D] + [C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(D, C)] * 3 + [(C, C)] * 7 + [(C, D)] + [(D, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + +class TestSoftGrudger(TestPlayer): + + name = "Soft Grudger" + player = axl.SoftGrudger + expected_classifier = { + "memory_depth": 6, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will respond with + # D, D, D, D, C, C and then continue to check for defections. + grudge_response_d = [(D, D)] * 4 + [(C, D)] * 2 + grudge_response_c = [(D, C)] * 4 + [(C, C)] * 2 + + opponent = axl.Cooperator() + actions = [(C, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(C, D)] + grudge_response_d * 5 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] * 10 + [D] + opponent = axl.MockPlayer(actions=opponent_actions) + actions_start = [(C, C)] * 10 + [(C, D)] + subsequent = grudge_response_c + [(C, C)] * 4 + [(C, D)] + actions = actions_start + subsequent * 5 + self.versus_test(opponent, expected_actions=actions) + + def test_reset(self): + player = self.player() + player.grudged = True + player.grudge_memory = 5 + player.reset() + self.assertFalse(player.grudged) + self.assertEqual(player.grudge_memory, 0) + + +class TestGrudgerAlternator(TestPlayer): + + name = "GrudgerAlternator" + player = axl.GrudgerAlternator + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will alternate D C. + opponent = axl.Cooperator() + actions = [(C, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(C, D)] + [(D, D), (C, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] * 10 + [D] + [C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(C, C)] * 10 + [(C, D)] + [(D, C), (C, C)] * 10 + self.versus_test(opponent, expected_actions=actions) + + +class TestEasyGo(TestPlayer): + + name = "EasyGo" + player = axl.EasyGo + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects at any point then the player will cooperate + # forever. + opponent = axl.Cooperator() + actions = [(D, C)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(D, D)] + [(C, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C] * 10 + [D, C] * 20 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = [(D, C)] * 10 + [(D, D)] + [(C, C), (C, D)] * 19 + self.versus_test(opponent, expected_actions=actions) + + +class TestGeneralSoftGrudger(TestPlayer): + + name = "General Soft Grudger: n=1,d=4,c=2" + player = axl.GeneralSoftGrudger + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test strategy with multiple initial parameters""" + + # Testing default parameters of n=1, d=4, c=2 (same as Soft Grudger) + actions = [(C, D), (D, D), (D, C), (D, C), (D, D), (C, D), (C, C), (C, C)] + self.versus_test(axl.MockPlayer(actions=[D, D, C, C]), expected_actions=actions) + + # Testing n=2, d=4, c=2 + actions = [(C, D), (C, D), (D, C), (D, C), (D, D), (D, D), (C, C), (C, C)] + self.versus_test( + axl.MockPlayer(actions=[D, D, C, C]), + expected_actions=actions, + init_kwargs={"n": 2}, + ) + + # Testing n=1, d=1, c=1 + actions = [(C, D), (D, D), (C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] + self.versus_test( + axl.MockPlayer(actions=[D, D, C, C]), + expected_actions=actions, + init_kwargs={"n": 1, "d": 1, "c": 1}, + ) diff --git a/axelrod/tests/strategies/test_grumpy.py b/axelrod/tests/strategies/test_grumpy.py new file mode 100644 index 000000000..1fba6bbdd --- /dev/null +++ b/axelrod/tests/strategies/test_grumpy.py @@ -0,0 +1,80 @@ +"""Tests for the Grumpy strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGrumpy(TestPlayer): + + name = "Grumpy: Nice, 10, -10" + player = axl.Grumpy + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_default_strategy(self): + + opponent = axl.Cooperator() + actions = [(C, C)] * 30 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Alternator() + actions = [(C, C), (C, D)] * 30 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(C, D)] * 11 + [(D, D)] * 20 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [D] * 11 + [C] * 22 + [D] * 11 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = ([(C, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11) * 3 + self.versus_test(opponent, expected_actions=actions) + + def test_starting_state(self): + opponent_actions = [D] * 11 + [C] * 22 + [D] * 11 + opponent = axl.MockPlayer(actions=opponent_actions) + + actions = ([(C, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11) * 3 + init_kwargs = {"starting_state": "Nice"} + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + opponent = axl.MockPlayer(actions=opponent_actions) + grumpy_starting = [(D, D)] * 11 + [(D, C)] * 22 + [(C, D)] * 11 + actions = grumpy_starting + actions + init_kwargs = {"starting_state": "Grumpy"} + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_thresholds(self): + init_kwargs = {"grumpy_threshold": 3, "nice_threshold": -2} + opponent_actions = [D] * 4 + [C] * 7 + [D] * 3 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = ([(C, D)] * 4 + [(D, C)] * 7 + [(C, D)] * 3) * 3 + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + init_kwargs = {"grumpy_threshold": 0, "nice_threshold": -2} + opponent_actions = [D] * 1 + [C] * 4 + [D] * 3 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = ([(C, D)] * 1 + [(D, C)] * 4 + [(C, D)] * 3) * 3 + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + init_kwargs = {"grumpy_threshold": 3, "nice_threshold": 0} + opponent_actions = [D] * 4 + [C] * 5 + [D] * 1 + opponent = axl.MockPlayer(actions=opponent_actions) + actions = ([(C, D)] * 4 + [(D, C)] * 5 + [(C, D)] * 1) * 3 + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + def test_reset_state_with_non_default_init(self): + player = axl.Grumpy(starting_state="Grumpy") + player.state = "Nice" + player.reset() + self.assertEqual(player.state, "Grumpy") diff --git a/axelrod/tests/strategies/test_handshake.py b/axelrod/tests/strategies/test_handshake.py new file mode 100644 index 000000000..a6dbacc38 --- /dev/null +++ b/axelrod/tests/strategies/test_handshake.py @@ -0,0 +1,36 @@ +"""Tests for the Handshake strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestHandshake(TestPlayer): + + name = "Handshake" + player = axl.Handshake + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D)] + [(C, C), (C, D)] * 10 + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (D, C)] + [(D, C)] * 20 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + opponent = axl.MockPlayer([D, C]) + actions = [(C, D), (D, C)] + [(D, D), (D, C)] * 10 + self.versus_test(opponent, expected_actions=actions) + + actions = [(C, D), (D, D)] + [(D, D)] * 20 + self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_headsup.py b/axelrod/tests/strategies/test_headsup.py new file mode 100644 index 000000000..9b9282724 --- /dev/null +++ b/axelrod/tests/strategies/test_headsup.py @@ -0,0 +1,120 @@ +"""Strategy match tests.""" + +import axelrod as axl + +from .test_player import TestMatch + +C, D = axl.Action.C, axl.Action.D + + +class TestTFTvsWSLS(TestMatch): + """Test TFT vs WSLS""" + + def test_rounds(self): + self.versus_test( + axl.TitForTat(), axl.WinStayLoseShift(), [C, C, C, C], [C, C, C, C] + ) + + +class TestTFTvSTFT(TestMatch): + """Test TFT vs Suspicious TFT""" + + def test_rounds(self): + self.versus_test( + axl.TitForTat(), + axl.SuspiciousTitForTat(), + [C, D, C, D, C, D], + [D, C, D, C, D, C], + ) + + +class TestTFTvsBully(TestMatch): + """Test TFT vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.TitForTat(), axl.Bully(), [C, D, D, C, C, D], [D, D, C, C, D, D] + ) + + +class TestTF2TvsBully(TestMatch): + """Test Tit for Two Tats vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.TitFor2Tats(), + axl.Bully(), + [C, C, D, D, C, C, C, D], + [D, D, D, C, C, D, D, D], + ) + + +class TestZDGTFT2vsBully(TestMatch): + """Test ZDGTFT2 vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.ZDGTFT2(), + axl.Bully(), + [C, D, D, C, C, C], + [D, D, C, C, D, D], + seed=2, + ) + + +class TestZDExtort2vsTFT(TestMatch): + """Test ZDExtort2 vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.ZDExtort2(), + axl.TitForTat(), + [C, D, D, D, D, D], + [C, C, D, D, D, D], + seed=2, + ) + + +class FoolMeOncevsBully(TestMatch): + """Test Fool Me Once vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.FoolMeOnce(), + axl.Bully(), + [C, C, D, D, D, D], + [D, D, D, C, C, C], + ) + + +class FoolMeOncevsSTFT(TestMatch): + """Test Fool Me Once vs Suspicious TFT""" + + def test_rounds(self): + self.versus_test( + axl.FoolMeOnce(), axl.SuspiciousTitForTat(), [C] * 9, [D] + [C] * 8 + ) + + +class GrudgervsSTFT(TestMatch): + """Test Grudger vs Suspicious TFT""" + + def test_rounds(self): + self.versus_test( + axl.Grudger(), + axl.SuspiciousTitForTat(), + [C] + [D] * 9, + [D, C] + [D] * 8, + ) + + +class TestWSLSvsBully(TestMatch): + """Test WSLS vs Bully""" + + def test_rounds(self): + self.versus_test( + axl.WinStayLoseShift(), + axl.Bully(), + [C, D, C, C, D], + [D, D, C, D, D], + ) diff --git a/axelrod/tests/strategies/test_hmm.py b/axelrod/tests/strategies/test_hmm.py new file mode 100644 index 000000000..2c54eedb7 --- /dev/null +++ b/axelrod/tests/strategies/test_hmm.py @@ -0,0 +1,327 @@ +"""Tests for Hidden Markov Model Strategies.""" + +import unittest +import random + +import axelrod as axl +from axelrod.random_ import random_vector +from axelrod.evolvable_player import InsufficientParametersError +from axelrod.strategies import ( + EvolvableHMMPlayer, + HMMPlayer, + SimpleHMM, +) +from axelrod.strategies.hmm import is_stochastic_matrix +from .test_player import TestMatch, TestPlayer +from .test_evolvable_player import PartialClass, TestEvolvablePlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestHMMPlayers(unittest.TestCase): + """Test a few sample tables to make sure that the finite state machines are + working as intended.""" + + def test_is_stochastic_matrix(self): + m = [[1, 0], [0, 1]] + self.assertTrue(is_stochastic_matrix(m)) + m = [[1, 1e-20], [0, 1]] + self.assertTrue(is_stochastic_matrix(m)) + m = [[0.6, 0.4], [0.8, 0.2]] + self.assertTrue(is_stochastic_matrix(m)) + m = [[0.6, 0.6], [0.8, 0.2]] + self.assertFalse(is_stochastic_matrix(m)) + m = [[0.6, 0.4], [0.8, 1.2]] + self.assertFalse(is_stochastic_matrix(m)) + + def test_cooperator(self): + """Tests that the player defined by the table for Cooperator is in fact + Cooperator.""" + t_C = [[1]] + t_D = [[1]] + p = [1] + player = axl.HMMPlayer( + transitions_C=t_C, + transitions_D=t_D, + emission_probabilities=p, + initial_state=0, + initial_action=C, + ) + self.assertFalse(player.is_stochastic()) + self.assertFalse(axl.Classifiers["stochastic"](player)) + opponent = axl.Alternator() + for i in range(6): + player.play(opponent) + self.assertEqual(opponent.history, [C, D] * 3) + self.assertEqual(player.history, [C] * 6) + + def test_defector(self): + """Tests that the player defined by the table for Defector is in fact + Defector.""" + t_C = [[1]] + t_D = [[1]] + p = [0] + player = axl.HMMPlayer( + transitions_C=t_C, + transitions_D=t_D, + emission_probabilities=p, + initial_state=0, + initial_action=D, + ) + self.assertFalse(player.is_stochastic()) + self.assertFalse(axl.Classifiers["stochastic"](player)) + opponent = axl.Alternator() + for i in range(6): + player.play(opponent) + self.assertEqual(opponent.history, [C, D] * 3) + self.assertEqual(player.history, [D] * 6) + + def test_tft(self): + """Tests that the player defined by the table for TFT is in fact + TFT.""" + t_C = [[1, 0], [1, 0]] + t_D = [[0, 1], [0, 1]] + p = [1, 0] + player = axl.HMMPlayer( + transitions_C=t_C, + transitions_D=t_D, + emission_probabilities=p, + initial_state=0, + initial_action=C, + ) + self.assertFalse(player.is_stochastic()) + self.assertFalse(axl.Classifiers["stochastic"](player)) + opponent = axl.Alternator() + for i in range(6): + player.play(opponent) + self.assertEqual(opponent.history, [C, D] * 3) + self.assertEqual(player.history, [C, C, D, C, D, C]) + + def test_wsls(self): + """Tests that the player defined by the table for TFT is in fact + WSLS (also known as Pavlov.""" + t_C = [[1, 0], [0, 1]] + t_D = [[0, 1], [1, 0]] + p = [1, 0] + player = axl.HMMPlayer( + transitions_C=t_C, + transitions_D=t_D, + emission_probabilities=p, + initial_state=0, + initial_action=C, + ) + self.assertFalse(player.is_stochastic()) + self.assertFalse(axl.Classifiers["stochastic"](player)) + opponent = axl.Alternator() + for i in range(6): + player.play(opponent) + self.assertEqual(opponent.history, [C, D] * 3) + self.assertEqual(player.history, [C, C, D, D, C, C]) + + def test_malformed_params(self): + # Test a malformed table + t_C = [[1, 0.5], [0, 1]] + self.assertFalse(is_stochastic_matrix(t_C)) + + t_C = [[1, 0], [0, 1]] + t_D = [[0, 1], [1, 0]] + p = [1, 0] + hmm = SimpleHMM(t_C, t_C, p, 0) + self.assertTrue(hmm.is_well_formed()) + hmm = SimpleHMM(t_C, t_D, p, -1) + self.assertFalse(hmm.is_well_formed()) + t_C = [[1, -1], [0, 1]] + t_D = [[0, 1], [1, 0]] + p = [1, 0] + hmm = SimpleHMM(t_C, t_D, p, 0) + self.assertFalse(hmm.is_well_formed()) + t_C = [[1, 0], [0, 1]] + t_D = [[0, 2], [1, 0]] + p = [1, 0] + hmm = SimpleHMM(t_C, t_D, p, 0) + self.assertFalse(hmm.is_well_formed()) + t_C = [[1, 0], [0, 1]] + t_D = [[0, 1], [1, 0]] + p = [-1, 2] + hmm = SimpleHMM(t_C, t_D, p, 0) + self.assertFalse(hmm.is_well_formed()) + + +class TestHMMPlayer(TestPlayer): + + name = "HMM IpdPlayer: 0, C" + player = axl.HMMPlayer + + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_reset(self): + player = self.player( + transitions_C=[[1]], + transitions_D=[[1]], + emission_probabilities=[0], + initial_state=0, + ) + player.hmm.state = -1 + player.reset() + self.assertFalse(player.hmm.state == -1) + + +class TestEvolvedHMM5(TestPlayer): + + name = "Evolved HMM 5" + player = axl.EvolvedHMM5 + + expected_classifier = { + "memory_depth": 5, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestEvolvedHMM5vsCooperator(TestMatch): + def test_rounds(self): + self.versus_test(axl.EvolvedHMM5(), axl.Cooperator(), [C] * 5, [C] * 5) + + +class TestEvolvedHMM5vsDefector(TestMatch): + def test_rounds(self): + self.versus_test(axl.EvolvedHMM5(), axl.Defector(), [C, C, D], [D, D, D]) + + +class TestEvolvableHMMPlayer(unittest.TestCase): + + player_class = EvolvableHMMPlayer + + def test_normalized_parameters(self): + transitions_C = [[1, 0], [1, 0]] + transitions_D = [[0, 1], [0, 1]] + emission_probabilities = [1, 0] + initial_state = 0 + initial_action = C + + self.assertRaises( + InsufficientParametersError, self.player_class._normalize_parameters + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + transitions_C=transitions_C, + transitions_D=transitions_D, + emission_probabilities=emission_probabilities, + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + initial_state=initial_state, + initial_action=initial_action, + ) + + def test_vector_to_instance(self): + num_states = 4 + vector = [] + for _ in range(2 * num_states): + vector.extend(list(random_vector(num_states))) + for _ in range(num_states + 1): + vector.append(random.random()) + player = self.player_class(num_states=num_states) + player.receive_vector(vector=vector) + self.assertIsInstance(player, self.player_class) + + def test_create_vector_bounds(self): + num_states = 4 + size = 2 * num_states ** 2 + num_states + 1 + + player = self.player_class(num_states=num_states) + lb, ub = player.create_vector_bounds() + + self.assertIsInstance(lb, list) + self.assertEqual(len(lb), size) + self.assertIsInstance(ub, list) + self.assertEqual(len(ub), size) + + +class TestEvolvableHMMPlayer2(TestEvolvablePlayer): + name = "EvolvableHMMPlayer" + player_class = EvolvableHMMPlayer + parent_class = HMMPlayer + parent_kwargs = [ + "transitions_C", + "transitions_D", + "emission_probabilities", + "initial_state", + "initial_action", + ] + init_parameters = {"num_states": 4} + + +class TestEvolvableHMMPlayer3(TestEvolvablePlayer): + name = "EvolvableHMMPlayer" + player_class = EvolvableHMMPlayer + parent_class = HMMPlayer + parent_kwargs = [ + "transitions_C", + "transitions_D", + "emission_probabilities", + "initial_state", + "initial_action", + ] + init_parameters = {"num_states": 8} + + +class TestEvolvableHMMPlayer4(TestEvolvablePlayer): + name = "EvolvableHMMPlayer" + player_class = EvolvableHMMPlayer + parent_class = HMMPlayer + parent_kwargs = [ + "transitions_C", + "transitions_D", + "emission_probabilities", + "initial_state", + "initial_action", + ] + init_parameters = { + "transitions_C": [[1, 0], [1, 0]], + "transitions_D": [[0, 1], [0, 1]], + "emission_probabilities": [1, 0], + "initial_state": 0, + "initial_action": C, + } + + +# Substitute EvolvableHMMPlayer as a regular HMMPlayer. +EvolvableHMMPlayerWithDefault = PartialClass( + EvolvableHMMPlayer, + transitions_C=[[1]], + transitions_D=[[1]], + emission_probabilities=[0.5], + initial_state=0, +) + + +class EvolvableHMMPlayerAsHMMPlayer(TestHMMPlayer): + player = EvolvableHMMPlayerWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_human.py b/axelrod/tests/strategies/test_human.py new file mode 100644 index 000000000..d8e6877e5 --- /dev/null +++ b/axelrod/tests/strategies/test_human.py @@ -0,0 +1,133 @@ +from unittest import TestCase +from unittest.mock import patch + +from os import linesep + +import axelrod as axl +from axelrod.strategies.human import ActionValidator, Human +from prompt_toolkit.validation import ValidationError + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestDocument(object): + """ + A class to mimic a prompt-toolkit document having just the text attribute. + """ + + def __init__(self, text): + self.text = text + + +class TestActionValidator(TestCase): + def test_validator(self): + test_documents = [TestDocument(x) for x in ["C", "c", "D", "d"]] + for test_document in test_documents: + ActionValidator().validate(test_document) + + test_document = TestDocument("E") + self.assertRaises(ValidationError, ActionValidator().validate, test_document) + + +class TestHumanClass(TestPlayer): + + name = "Human: human, C, D" + player = Human + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["length", "game"]), + "long_run_time": True, + "inspects_source": True, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_init(self): + human = Human(name="test human", c_symbol="X", d_symbol="Y") + self.assertEqual(human.human_name, "test human") + self.assertEqual(human.symbols, {C: "X", D: "Y"}) + + def test_history_toolbar(self): + human = Human() + expected_content = "" + actual_content = human._history_toolbar() + self.assertEqual(actual_content, expected_content) + + human.history.append(C, C) + expected_content = "History (human, opponent): [('C', 'C')]" + actual_content = human._history_toolbar() + self.assertIn(actual_content, expected_content) + + def test_status_messages(self): + human = Human() + expected_messages = { + "toolbar": None, + "print": "{}Starting new match".format(linesep), + } + actual_messages = human._status_messages() + self.assertEqual(actual_messages, expected_messages) + + human.history.append(C, C) + expected_print_message = "{}Turn 1: human played C, opponent played C".format( + linesep + ) + actual_messages = human._status_messages() + self.assertEqual(actual_messages["print"], expected_print_message) + self.assertIsNotNone(actual_messages["toolbar"]) + + def test_get_human_input_c(self): + with patch("axelrod.human.prompt", return_value="c") as prompt_: + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + self.assertEqual( + prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) + ) + + def test_get_human_input_C(self): + with patch("axelrod.human.prompt", return_value="C") as prompt_: + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + self.assertEqual( + prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) + ) + + def test_get_human_input_d(self): + with patch("axelrod.human.prompt", return_value="d") as prompt_: + actions = [(D, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + self.assertEqual( + prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) + ) + + def test_get_human_input_D(self): + with patch("axelrod.human.prompt", return_value="D") as prompt_: + actions = [(D, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + self.assertEqual( + prompt_.call_args[0], ("Turn 5 action [C or D] for human: ",) + ) + + def test_strategy(self): + human = Human() + expected_action = C + actual_action = human.strategy(axl.IpdPlayer(), lambda: C) + self.assertEqual(actual_action, expected_action) + + def test_reset_history_and_attributes(self): + """Overwrite the reset method for this strategy.""" + pass + + def test_repr(self): + human = Human() + self.assertEqual(human.__repr__(), "Human: human") + + human = Human(name="John Nash") + self.assertEqual(human.__repr__(), "Human: John Nash") + human = Human(name="John Nash", c_symbol="1", d_symbol="2") + self.assertEqual(human.__repr__(), "Human: John Nash") + + def equality_of_players_test(self, p1, p2, seed, opponent): + return True diff --git a/axelrod/tests/strategies/test_hunter.py b/axelrod/tests/strategies/test_hunter.py new file mode 100644 index 000000000..e5b067be7 --- /dev/null +++ b/axelrod/tests/strategies/test_hunter.py @@ -0,0 +1,265 @@ +"""Tests for the Hunter strategy.""" + +import unittest + +import axelrod as axl +from axelrod.strategies.hunter import detect_cycle + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestCycleDetection(unittest.TestCase): + def test_cycles(self): + history = [C] * 10 + self.assertEqual(detect_cycle(history), (C,)) + self.assertEqual(detect_cycle(history, min_size=2), (C, C)) + history = [C, D] * 10 + self.assertEqual(detect_cycle(history, min_size=2), (C, D)) + self.assertEqual(detect_cycle(history, min_size=3), (C, D, C, D)) + history = [C, D, C] * 10 + self.assertTrue(detect_cycle(history), (C, D, C)) + history = [C, C, D] * 10 + self.assertTrue(detect_cycle(history), (C, C, D)) + + def test_noncycles(self): + history = [C, D, C, C, D, C, C, C, D] + self.assertEqual(detect_cycle(history), None) + history = [C, C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C, C, C] + self.assertEqual(detect_cycle(history), None) + + +class TestDefectorHunter(TestPlayer): + + name = "Defector Hunter" + player = axl.DefectorHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, D)] * 4 + [(D, D)] * 10 + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + actions = [(C, C)] * 14 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + +class TestCooperatorHunter(TestPlayer): + + name = "Cooperator Hunter" + player = axl.CooperatorHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 4 + [(D, C)] * 10 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 14 + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestAlternatorHunter(TestPlayer): + + name = "Alternator Hunter" + player = axl.AlternatorHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "inspects_source": False, + "makes_use_of": set(), + "long_run_time": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D)] * 3 + [(D, C), (D, D)] * 5 + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + attrs={"is_alt": True}, + ) + + actions = [(C, D)] * 14 + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + attrs={"is_alt": False}, + ) + + def test_reset_attr(self): + p = self.player() + p.is_alt = True + p.reset() + self.assertFalse(p.is_alt) + + +class TestCycleHunter(TestPlayer): + + name = "Cycle Hunter" + player = axl.CycleHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + player = self.player() + # Test against cyclers + for opponent in [ + axl.CyclerCCD(), + axl.CyclerCCCD(), + axl.CyclerCCCCCD(), + axl.Alternator(), + ]: + player.reset() + for i in range(30): + player.play(opponent) + self.assertEqual(player.history[-1], D) + # Test against non-cyclers + axl.seed(40) + for opponent in [ + axl.Random(), + axl.AntiCycler(), + axl.Cooperator(), + axl.Defector(), + ]: + player.reset() + for i in range(30): + player.play(opponent) + self.assertEqual(player.history[-1], C) + + def test_reset_attr(self): + p = self.player() + p.cycle = "CCDDCD" + p.reset() + self.assertEqual(p.cycle, None) + + +class TestEventualCycleHunter(TestPlayer): + + name = "Eventual Cycle Hunter" + player = axl.EventualCycleHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + player = self.player() + # Test against cyclers + for opponent in [ + axl.CyclerCCD(), + axl.CyclerCCCD(), + axl.CyclerCCCCCD(), + axl.Alternator(), + ]: + player.reset() + for i in range(50): + player.play(opponent) + self.assertEqual(player.history[-1], D) + # Test against non-cyclers and cooperators + axl.seed(43) + for opponent in [ + axl.Random(), + axl.AntiCycler(), + axl.DoubleCrosser(), + axl.Cooperator(), + ]: + player.reset() + for i in range(50): + player.play(opponent) + self.assertEqual(player.history[-1], C) + + def test_reset_attr(self): + p = self.player() + p.cycle = "CCDDCD" + p.reset() + self.assertEqual(p.cycle, None) + + +class TestMathConstantHunter(TestPlayer): + + name = "Math Constant Hunter" + player = axl.MathConstantHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + opponent = axl.MockPlayer([C] * 7 + [D] * 3) + actions = [(C, C)] * 7 + [(C, D)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestRandomHunter(TestPlayer): + + name = "Random Hunter" + player = axl.RandomHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + + # We should catch the alternator here. + actions = [(C, C), (C, D)] * 5 + [(C, C), (D, D), (D, C)] + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + attrs={"countCC": 5, "countDD": 0}, + ) + + actions = [(C, D)] * 14 + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + attrs={"countCC": 0, "countDD": 0}, + ) + + def test_reset(self): + player = self.player() + opponent = axl.Cooperator() + for _ in range(100): + player.play(opponent) + self.assertFalse(player.countCC == 0) + player.reset() + self.assertTrue(player.countCC == 0) diff --git a/axelrod/tests/strategies/test_inverse.py b/axelrod/tests/strategies/test_inverse.py new file mode 100644 index 000000000..3eaee2a89 --- /dev/null +++ b/axelrod/tests/strategies/test_inverse.py @@ -0,0 +1,48 @@ +"""Tests for the inverse strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestInverse(TestPlayer): + + name = "Inverse" + player = axl.Inverse + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test that as long as the opponent has not defected the player will + # cooperate. + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)]) + + # Tests that if opponent has played all D then player chooses D. + self.versus_test(axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 9) + + expected_actions = [ + (C, D), + (D, C), + (D, C), + (D, D), + (D, C), + (C, C), + (C, C), + (C, C), + (C, D), + (D, D), + ] + self.versus_test( + axl.MockPlayer(actions=[a[1] for a in expected_actions]), + expected_actions=expected_actions, + seed=0, + ) diff --git a/axelrod/tests/strategies/test_lookerup.py b/axelrod/tests/strategies/test_lookerup.py new file mode 100755 index 000000000..024328c93 --- /dev/null +++ b/axelrod/tests/strategies/test_lookerup.py @@ -0,0 +1,760 @@ +"""Test for the Looker Up strategy.""" + +import unittest + +import copy + +import random + +import axelrod as axl +from axelrod.action import str_to_actions +from axelrod.evolvable_player import InsufficientParametersError +from axelrod.strategies.lookerup import ( + EvolvableLookerUp, + LookupTable, + Plays, + create_lookup_table_keys, + make_keys_into_plays, +) +from .test_evolvable_player import PartialClass, TestEvolvablePlayer +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestLookupTable(unittest.TestCase): + lookup_dict = { + ((C, C), (C,), ()): C, + ((C, C), (D,), ()): D, + ((C, D), (C,), ()): D, + ((C, D), (D,), ()): C, + ((D, C), (C,), ()): C, + ((D, C), (D,), ()): D, + ((D, D), (C,), ()): D, + ((D, D), (D,), ()): C, + } + + def test_init(self): + table = LookupTable(self.lookup_dict) + + self.assertEqual(table.table_depth, 2) + self.assertEqual(table.player_depth, 2) + self.assertEqual(table.op_depth, 1) + self.assertEqual(table.op_openings_depth, 0) + self.assertEqual( + table.dictionary, + { + Plays(self_plays=(C, C), op_plays=(C,), op_openings=()): C, + Plays(self_plays=(C, C), op_plays=(D,), op_openings=()): D, + Plays(self_plays=(C, D), op_plays=(C,), op_openings=()): D, + Plays(self_plays=(C, D), op_plays=(D,), op_openings=()): C, + Plays(self_plays=(D, C), op_plays=(C,), op_openings=()): C, + Plays(self_plays=(D, C), op_plays=(D,), op_openings=()): D, + Plays(self_plays=(D, D), op_plays=(C,), op_openings=()): D, + Plays(self_plays=(D, D), op_plays=(D,), op_openings=()): C, + }, + ) + self.assertIsInstance(next(iter(table.dictionary)), Plays) + + def test_init_raises_error_when_keys_for_lookup_dict_do_not_match(self): + lookup_dict = {((C,), (C,), ()): C, ((D, D), (D, D), ()): C} + with self.assertRaises(ValueError): + LookupTable(lookup_dict=lookup_dict) + + def test_init_raises_error_keys_do_not_cover_all_combinations(self): + lookup_dict = {((C,), (C,), ()): C, ((D,), (D,), ()): C} + with self.assertRaises(ValueError): + LookupTable(lookup_dict=lookup_dict) + + def test_from_pattern(self): + pattern = (C, D, D, C, C, D, D, C) + table = LookupTable.from_pattern( + pattern, player_depth=2, op_depth=1, op_openings_depth=0 + ) + self.assertEqual(table.dictionary, make_keys_into_plays(self.lookup_dict)) + + def test_from_pattern_raises_error_pattern_len_ne_dict_size(self): + too_big = (C,) * 17 + too_small = (C,) * 15 + just_right = (C,) * 16 + with self.assertRaises(ValueError): + LookupTable.from_pattern(too_big, 2, 2, 0) + with self.assertRaises(ValueError): + LookupTable.from_pattern(too_small, 2, 2, 0) + self.assertIsInstance( + LookupTable.from_pattern(just_right, 2, 2, 0), LookupTable + ) + + def test_dictionary_property_returns_new_dict_object(self): + table = LookupTable(lookup_dict=self.lookup_dict) + self.assertIsNot(table.dictionary, table.dictionary) + + def test_display_default(self): + table = LookupTable.from_pattern( + (C,) * 8, player_depth=2, op_depth=0, op_openings_depth=1 + ) + self.assertEqual( + table.display(), + ( + "op_openings|self_plays | op_plays \n" + + " C , C, C , : C,\n" + + " C , C, D , : C,\n" + + " C , D, C , : C,\n" + + " C , D, D , : C,\n" + + " D , C, C , : C,\n" + + " D , C, D , : C,\n" + + " D , D, C , : C,\n" + + " D , D, D , : C,\n" + ), + ) + + def test_display_assign_order(self): + table = LookupTable.from_pattern( + (C,) * 8, player_depth=0, op_depth=3, op_openings_depth=0 + ) + self.assertEqual( + table.display(sort_by=("op_openings", "op_plays", "self_plays")), + ( + "op_openings| op_plays |self_plays \n" + + " , C, C, C , : C,\n" + + " , C, C, D , : C,\n" + + " , C, D, C , : C,\n" + + " , C, D, D , : C,\n" + + " , D, C, C , : C,\n" + + " , D, C, D , : C,\n" + + " , D, D, C , : C,\n" + + " , D, D, D , : C,\n" + ), + ) + + def test_equality_true(self): + table_a = LookupTable(self.lookup_dict) + table_b = LookupTable(self.lookup_dict) + self.assertTrue(table_a.__eq__(table_b)) + + def test_equality_false(self): + table_a = LookupTable.from_pattern((C, D), 1, 0, 0) + table_b = LookupTable.from_pattern((D, C), 1, 0, 0) + table_c = LookupTable.from_pattern((C, D), 0, 1, 0) + self.assertFalse(table_a.__eq__(table_b)) + self.assertFalse(table_a.__eq__(table_c)) + self.assertFalse(table_a.__eq__(table_a.dictionary)) + + def test_not_equal(self): + table_a = LookupTable(self.lookup_dict) + table_b = LookupTable(self.lookup_dict) + not_equal = LookupTable.from_pattern((C, C), 1, 0, 0) + self.assertFalse(table_a.__ne__(table_b)) + self.assertTrue(table_a.__ne__(not_equal)) + + +class TestLookupTableHelperFunctions(unittest.TestCase): + def test_plays_equals_tuple(self): + self.assertEqual(Plays(1, 2, 3), (1, 2, 3)) + + def test_plays_assign_values(self): + self.assertEqual(Plays(op_plays=2, self_plays=1, op_openings=3), Plays(1, 2, 3)) + + def test_make_keys_into_plays(self): + old = {((C, D), (C,), ()): 1, ((D, D), (D,), ()): 2} + new = make_keys_into_plays(old) + self.assertNotIsInstance(next(iter(old)), Plays) + self.assertIsInstance(next(iter(new)), Plays) + self.assertTrue(new.__eq__(old)) + self.assertTrue(old.__eq__(new)) + + def test_make_keys_into_plays_always_returns_new_dict(self): + old = {Plays((C, D), (C,), ()): 1, Plays((D, D), (D,), ()): 2} + self.assertIsNot(old, make_keys_into_plays(old)) + + def test_create_lookup_table_keys(self): + expected = [ + Plays((C, C), (C,), ()), + Plays((C, C), (D,), ()), + Plays((C, D), (C,), ()), + Plays((C, D), (D,), ()), + Plays((D, C), (C,), ()), + Plays((D, C), (D,), ()), + Plays((D, D), (C,), ()), + Plays((D, D), (D,), ()), + ] + actual = create_lookup_table_keys( + player_depth=2, op_depth=1, op_openings_depth=0 + ) + self.assertEqual(actual, expected) + self.assertIsInstance(actual[0], Plays) + + +class TestLookerUp(TestPlayer): + name = "LookerUp" + player = axl.LookerUp + + expected_classifier = { + "memory_depth": 1, # Default TFT + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + expected_class_classifier = copy.copy(expected_classifier) + + def test_default_init(self): + player = self.player() + expected = {Plays((), (D,), ()): D, Plays((), (C,), ()): C} + self.assertEqual(player.lookup_dict, expected) + self.assertEqual(player.initial_actions, (C,)) + + def test_pattern_and_params_init_pattern_is_string(self): + pattern = "CCCC" + parameters = Plays(1, 1, 0) + player = axl.LookerUp(pattern=pattern, parameters=parameters) + expected_lookup_table = { + Plays((C,), (D,), ()): C, + Plays((D,), (D,), ()): C, + Plays((C,), (C,), ()): C, + Plays((D,), (C,), ()): C, + } + self.assertEqual(player.lookup_dict, expected_lookup_table) + + def test_pattern_and_params_init_pattern_is_tuple(self): + pattern = (C, C, C, C) + parameters = Plays(1, 1, 0) + player = axl.LookerUp(pattern=pattern, parameters=parameters) + expected_lookup_table = { + Plays((C,), (D,), ()): C, + Plays((D,), (D,), ()): C, + Plays((C,), (C,), ()): C, + Plays((D,), (C,), ()): C, + } + self.assertEqual(player.lookup_dict, expected_lookup_table) + + def test_pattern_and_params_init_can_still_use_regular_tuple(self): + pattern = (C, C) + parameters = (1, 0, 0) + player = axl.LookerUp(pattern=pattern, parameters=parameters) + expected_lookup_table = {Plays((C,), (), ()): C, Plays((D,), (), ()): C} + self.assertEqual(player.lookup_dict, expected_lookup_table) + + def test_pattern_and_params_init_only_happens_if_both_are_present(self): + default = {Plays((), (D,), ()): D, Plays((), (C,), ()): C} + pattern = "CC" + parameters = Plays(self_plays=0, op_plays=1, op_openings=0) + player1 = axl.LookerUp(pattern=pattern) + player2 = axl.LookerUp(parameters=parameters) + + self.assertEqual(player1.lookup_dict, default) + self.assertEqual(player2.lookup_dict, default) + + def test_lookup_table_init(self): + lookup_table = { + ((C,), (D,), ()): C, + ((D,), (D,), ()): C, + ((C,), (C,), ()): C, + ((D,), (C,), ()): C, + } + player = axl.LookerUp(lookup_dict=lookup_table) + self.assertEqual(player.lookup_dict, lookup_table) + self.assertIsInstance(next(iter(player.lookup_dict)), Plays) + + def test_lookup_table_init_supersedes_pattern_init(self): + lookup_table = { + ((C,), (D,), ()): D, + ((D,), (D,), ()): D, + ((C,), (C,), ()): D, + ((D,), (C,), ()): D, + } + pattern = "CCCCCCCC" + parameters = Plays(self_plays=1, op_plays=1, op_openings=1) + player = axl.LookerUp( + lookup_dict=lookup_table, pattern=pattern, parameters=parameters + ) + + self.assertEqual(player.lookup_dict, lookup_table) + + def test_init_raises_errors(self): + mismatch_dict = {((C,), (C,), ()): C, ((D, D), (D, D), ()): C} + with self.assertRaises(ValueError): + axl.LookerUp(lookup_dict=mismatch_dict) + + incomplete_lookup_dict = {((C,), (C,), ()): C, ((D,), (D,), ()): C} + with self.assertRaises(ValueError): + axl.LookerUp(lookup_dict=incomplete_lookup_dict) + + too_short_pattern = "CC" + with self.assertRaises(ValueError): + axl.LookerUp(pattern=too_short_pattern, parameters=(3, 3, 3)) + + def test_initial_actions_set_to_max_table_depth(self): + initial_actions = (D, D, D) + table_depth_one = axl.LookerUp(initial_actions=initial_actions) + self.assertEqual(table_depth_one.initial_actions, (D,)) + + def test_initial_actions_makes_up_missing_actions_with_c(self): + initial_actions = (D,) + table_depth_three = axl.LookerUp( + initial_actions=initial_actions, + pattern="CCCCCCCC", + parameters=Plays(3, 0, 0), + ) + self.assertEqual(table_depth_three.initial_actions, (D, C, C)) + + def test_set_memory_depth(self): + mem_depth_1 = axl.LookerUp(pattern="CC", parameters=Plays(1, 0, 0)) + self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_1), 1) + + mem_depth_3 = axl.LookerUp(pattern="C" * 16, parameters=Plays(1, 3, 0)) + self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_3), 3) + + mem_depth_inf = axl.LookerUp(pattern="CC", parameters=Plays(0, 0, 1)) + self.assertEqual(axl.Classifiers["memory_depth"](mem_depth_inf), float("inf")) + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + def test_cooperator_table(self): + lookup_table = {((), (), ()): C} + actions = [(C, D)] * 5 + self.versus_test( + axl.Defector(), + expected_actions=actions, + init_kwargs={"lookup_dict": lookup_table}, + ) + + def test_defector_table_with_initial_cooperate(self): + """ + Testing a lookup table that always defects IF there is enough history. + """ + defector_table = { + ((C,), (D,), ()): D, + ((D,), (D,), ()): D, + ((C,), (C,), ()): D, + ((D,), (C,), ()): D, + } + actions = [(C, C)] + [(D, D), (D, C)] * 4 + self.versus_test( + axl.Alternator(), + expected_actions=actions, + init_kwargs={"lookup_dict": defector_table}, + ) + + def test_zero_tables(self): + """Test the corner case where n=0.""" + anti_tft_pattern = "DC" + parameters = Plays(self_plays=0, op_plays=1, op_openings=0) + + tft_vs_alternator = [(C, C)] + [(D, D), (C, C)] * 5 + self.versus_test( + axl.Alternator(), + expected_actions=tft_vs_alternator, + init_kwargs={"parameters": parameters, "pattern": anti_tft_pattern}, + ) + + def test_opponent_starting_moves_table(self): + """A lookup table that always repeats the opponent's first move.""" + first_move_table = {((), (), (C,)): C, ((), (), (D,)): D} + + vs_alternator = [(C, C), (C, D)] * 5 + self.versus_test( + axl.Alternator(), + expected_actions=vs_alternator, + init_kwargs={"lookup_dict": first_move_table}, + ) + + vs_initial_defector = [(C, D)] + [(D, C), (D, D)] * 10 + opponent = axl.MockPlayer(actions=[D, C]) + self.versus_test( + opponent, + expected_actions=vs_initial_defector, + init_kwargs={"lookup_dict": first_move_table}, + ) + + def test_lookup_table_display(self): + player = axl.LookerUp( + pattern="CCCC", parameters=Plays(self_plays=2, op_plays=0, op_openings=0) + ) + self.assertEqual( + player.lookup_table_display(("self_plays", "op_plays", "op_openings")), + ( + "self_plays | op_plays |op_openings\n" + + " C, C , , : C,\n" + + " C, D , , : C,\n" + + " D, C , , : C,\n" + + " D, D , , : C,\n" + ), + ) + + +class TestEvolvedLookerUp1_1_1(TestPlayer): + name = "EvolvedLookerUp1_1_1" + player = axl.EvolvedLookerUp1_1_1 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("C", "C", "C"): C, + ("C", "C", "D"): D, + ("C", "D", "C"): D, + ("C", "D", "D"): D, + ("D", "C", "C"): D, + ("D", "C", "D"): D, + ("D", "D", "C"): C, + ("D", "D", "D"): D, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_vs_initial_defector(self): + opponent = [D, C, C, D, D, C] + expected = [(C, D), (D, C), (C, C), (D, D), (D, D), (D, C)] + self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) + + def test_vs_initial_cooperator(self): + opponent = [C, D, D, C, C, D] + expected = [(C, C), (C, D), (D, D), (D, C), (D, C), (D, D)] + self.versus_test(axl.MockPlayer(actions=opponent), expected_actions=expected) + + +class TestEvolvedLookerUp2_2_2(TestPlayer): + name = "EvolvedLookerUp2_2_2" + player = axl.EvolvedLookerUp2_2_2 + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_new_data(self): + original_data = { + ("CC", "CC", "CC"): C, + ("CC", "CC", "CD"): D, + ("CC", "CC", "DC"): C, + ("CC", "CC", "DD"): C, + ("CC", "CD", "CC"): D, + ("CC", "CD", "CD"): C, + ("CC", "CD", "DC"): C, + ("CC", "CD", "DD"): C, + ("CC", "DC", "CC"): D, + ("CC", "DC", "CD"): C, + ("CC", "DC", "DC"): D, + ("CC", "DC", "DD"): D, + ("CC", "DD", "CC"): D, + ("CC", "DD", "CD"): C, + ("CC", "DD", "DC"): C, + ("CC", "DD", "DD"): C, + ("CD", "CC", "CC"): D, + ("CD", "CC", "CD"): C, + ("CD", "CC", "DC"): D, + ("CD", "CC", "DD"): D, + ("CD", "CD", "CC"): D, + ("CD", "CD", "CD"): D, + ("CD", "CD", "DC"): D, + ("CD", "CD", "DD"): D, + ("CD", "DC", "CC"): D, + ("CD", "DC", "CD"): C, + ("CD", "DC", "DC"): D, + ("CD", "DC", "DD"): D, + ("CD", "DD", "CC"): D, + ("CD", "DD", "CD"): C, + ("CD", "DD", "DC"): D, + ("CD", "DD", "DD"): C, + ("DC", "CC", "CC"): D, + ("DC", "CC", "CD"): D, + ("DC", "CC", "DC"): D, + ("DC", "CC", "DD"): D, + ("DC", "CD", "CC"): C, + ("DC", "CD", "CD"): C, + ("DC", "CD", "DC"): D, + ("DC", "CD", "DD"): C, + ("DC", "DC", "CC"): C, + ("DC", "DC", "CD"): C, + ("DC", "DC", "DC"): C, + ("DC", "DC", "DD"): D, + ("DC", "DD", "CC"): D, + ("DC", "DD", "CD"): D, + ("DC", "DD", "DC"): D, + ("DC", "DD", "DD"): C, + ("DD", "CC", "CC"): C, + ("DD", "CC", "CD"): D, + ("DD", "CC", "DC"): D, + ("DD", "CC", "DD"): D, + ("DD", "CD", "CC"): D, + ("DD", "CD", "CD"): C, + ("DD", "CD", "DC"): C, + ("DD", "CD", "DD"): D, + ("DD", "DC", "CC"): C, + ("DD", "DC", "CD"): D, + ("DD", "DC", "DC"): D, + ("DD", "DC", "DD"): D, + ("DD", "DD", "CC"): D, + ("DD", "DD", "CD"): D, + ("DD", "DD", "DC"): D, + ("DD", "DD", "DD"): D, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_vs_initial_defector(self): + opponent_actions = [D, D] + [C, D] * 3 + expected = [(C, D), (C, D)] + [(D, C), (C, D)] * 3 + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions=expected + ) + + def test_vs_initial_d_c(self): + opponent_actions = [D, C] + [C, D] * 3 + expected = [(C, D), (C, C)] + [(D, C), (C, D), (C, C), (D, D), (C, C), (C, D)] + self.versus_test( + axl.MockPlayer(actions=opponent_actions), expected_actions=expected + ) + + +class TestWinner12(TestPlayer): + name = "Winner12" + player = axl.Winner12 + + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + expected_class_classifier = copy.copy(expected_classifier) + + def test_new_data(self): + original_data = { + ("", "C", "CC"): C, + ("", "C", "CD"): D, + ("", "C", "DC"): C, + ("", "C", "DD"): D, + ("", "D", "CC"): D, + ("", "D", "CD"): C, + ("", "D", "DC"): D, + ("", "D", "DD"): D, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_strategy(self): + """Starts by cooperating twice.""" + vs_alternator = [(C, C), (C, D), (D, C), (D, D)] * 5 + self.versus_test(axl.Alternator(), expected_actions=vs_alternator) + + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)] * 10) + + self.versus_test( + axl.Defector(), expected_actions=([(C, D), (C, D)] + [(D, D)] * 10) + ) + + +class TestWinner21(TestPlayer): + name = "Winner21" + player = axl.Winner21 + + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + expected_class_classifier = copy.copy(expected_classifier) + + def test_new_data(self): + original_data = { + ("", "C", "CC"): C, + ("", "C", "CD"): D, + ("", "C", "DC"): C, + ("", "C", "DD"): D, + ("", "D", "CC"): C, + ("", "D", "CD"): D, + ("", "D", "DC"): D, + ("", "D", "DD"): D, + } + converted_original = convert_original_to_current(original_data) + self.assertEqual(self.player().lookup_dict, converted_original) + + def test_strategy(self): + """Starts by cooperating twice.""" + vs_alternator = [(D, C), (C, D)] + [(D, C), (D, D)] * 5 + self.versus_test(axl.Alternator(), expected_actions=vs_alternator) + + self.versus_test(axl.Cooperator(), expected_actions=[(D, C)] + [(C, C)] * 10) + + self.versus_test( + axl.Defector(), expected_actions=([(D, D), (C, D)] + [(D, D)] * 10) + ) + + +class TestDictConversionFunctions(unittest.TestCase): + def test_convert_key(self): + opponent_starting_plays = "" + player_last_plays = "CC" + opponent_last_plays = "D" + old_key = (opponent_starting_plays, player_last_plays, opponent_last_plays) + + new_key = Plays(self_plays=(C, C), op_plays=(D,), op_openings=()) + + self.assertEqual(new_key, convert_key(old_key)) + + def test_convert_original_to_current(self): + expected = { + Plays(self_plays=(C, C), op_plays=(D,), op_openings=()): C, + Plays(self_plays=(D,), op_plays=(D, D), op_openings=(C,)): D, + } + original = {("", "CC", "D"): C, ("C", "D", "DD"): D} + self.assertEqual(expected, convert_original_to_current(original)) + + +def convert_original_to_current(original_data: dict) -> dict: + return {convert_key(key): value for key, value in original_data.items()} + + +def convert_key(old_key: tuple) -> Plays: + opponent_start, player, opponent = old_key + return Plays( + self_plays=str_to_actions(player), + op_plays=str_to_actions(opponent), + op_openings=str_to_actions(opponent_start), + ) + + +class TestEvolvableLookerUp(unittest.TestCase): + player_class = EvolvableLookerUp + + def test_normalized_parameters(self): + initial_actions = ( + C, + C, + ) + lookup_dict = { + ((C, C), (C,), ()): C, + ((C, C), (D,), ()): D, + ((C, D), (C,), ()): D, + ((C, D), (D,), ()): C, + ((D, C), (C,), ()): C, + ((D, C), (D,), ()): D, + ((D, D), (C,), ()): D, + ((D, D), (D,), ()): C, + } + pattern = ("".join([random.choice(("C", "D")) for _ in range(8)]),) + + self.assertRaises( + InsufficientParametersError, self.player_class._normalize_parameters + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + pattern=pattern, + initial_actions=initial_actions, + ) + self.assertRaises( + InsufficientParametersError, + self.player_class._normalize_parameters, + lookup_dict=lookup_dict, + ) + + +class TestEvolvableLookerUp2(TestEvolvablePlayer): + name = "EvolvableLookerUp" + player_class = axl.EvolvableLookerUp + parent_class = axl.LookerUp + parent_kwargs = ["lookup_dict", "initial_actions"] + init_parameters = {"parameters": (1, 1, 1)} + + +class TestEvolvableLookerUp3(TestEvolvablePlayer): + name = "EvolvableLookerUp" + player_class = axl.EvolvableLookerUp + parent_class = axl.LookerUp + parent_kwargs = ["lookup_dict", "initial_actions"] + init_parameters = {"parameters": (2, 1, 3)} + + +class TestEvolvableLookerUp4(TestEvolvablePlayer): + name = "EvolvableLookerUp" + player_class = axl.EvolvableLookerUp + parent_class = axl.LookerUp + parent_kwargs = ["lookup_dict", "initial_actions"] + init_parameters = { + "parameters": (2, 2, 2), + "pattern": "".join([random.choice(("C", "D")) for _ in range(64)]), + "initial_actions": (C, C,), + } + + +class TestEvolvableLookerUp5(TestEvolvablePlayer): + name = "EvolvableLookerUp" + player_class = axl.EvolvableLookerUp + parent_class = axl.LookerUp + parent_kwargs = ["lookup_dict", "initial_actions"] + init_parameters = { + "initial_actions": (C, C,), + "lookup_dict": { + ((C, C), (C,), ()): C, + ((C, C), (D,), ()): D, + ((C, D), (C,), ()): D, + ((C, D), (D,), ()): C, + ((D, C), (C,), ()): C, + ((D, C), (D,), ()): D, + ((D, D), (C,), ()): D, + ((D, D), (D,), ()): C, + }, + } + + +# Substitute EvolvedLookerUp as a regular LookerUp. +EvolvableLookerUpWithDefault = PartialClass( + EvolvableLookerUp, + parameters=(0, 1, 0), + lookup_dict={ + ((), (D,), ()): D, + ((), (D,), ()): D, + ((), (C,), ()): C, + ((), (C,), ()): C, + }, + initial_actions=(C,), +) + + +class EvolvableLookerUpAsLookerUp(TestLookerUp): + player = EvolvableLookerUpWithDefault + + def test_equality_of_clone(self): + pass + + def test_equality_of_pickle_clone(self): + pass + + def test_zero_tables(self): + pass + + def test_repr(self): + pass diff --git a/axelrod/tests/strategies/test_mathematicalconstants.py b/axelrod/tests/strategies/test_mathematicalconstants.py new file mode 100644 index 000000000..64d5ec850 --- /dev/null +++ b/axelrod/tests/strategies/test_mathematicalconstants.py @@ -0,0 +1,82 @@ +"""Tests for the golden and other mathematical strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGolden(TestPlayer): + + name = "$\phi$" + player = axl.Golden + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestPi(TestPlayer): + + name = "$\pi$" + player = axl.Pi + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class Teste(TestPlayer): + + name = "$e$" + player = axl.e + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_memoryone.py b/axelrod/tests/strategies/test_memoryone.py new file mode 100644 index 000000000..3c8dab63e --- /dev/null +++ b/axelrod/tests/strategies/test_memoryone.py @@ -0,0 +1,319 @@ +"""Tests for the Memoryone strategies.""" +import unittest +import warnings + +import axelrod as axl +from axelrod.strategies.memoryone import MemoryOnePlayer + +from .test_player import TestPlayer, test_four_vector + +C, D = axl.Action.C, axl.Action.D + + +class TestGenericPlayerOne(unittest.TestCase): + """A class to test the naming and classification of generic memory one + players.""" + + p1 = axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0)) + p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) + p3 = axl.MemoryOnePlayer(four_vector=(1, 0.5, 1, 0.5)) + + def test_name(self): + self.assertEqual(self.p1.name, "Generic Memory One IpdPlayer: (0, 0, 0, 0)") + self.assertEqual(self.p2.name, "Generic Memory One IpdPlayer: (1, 0, 1, 0)") + self.assertEqual(self.p3.name, "Generic Memory One IpdPlayer: (1, 0.5, 1, 0.5)") + + def test_stochastic_classification(self): + self.assertFalse(axl.Classifiers["stochastic"](self.p1)) + self.assertFalse(axl.Classifiers["stochastic"](self.p2)) + self.assertTrue(axl.Classifiers["stochastic"](self.p3)) + + +class TestWinStayLoseShift(TestPlayer): + + name = "Win-Stay Lose-Shift: C" + player = axl.WinStayLoseShift + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_class_classification(self): + self.assertEqual(self.player.classifier, self.expected_classifier) + + def test_strategy(self): + # Check that switches if does not get best payoff. + actions = [(C, C), (C, D), (D, C), (D, D), (C, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestWinShiftLoseStayTestPlayer(TestPlayer): + + name = "Win-Shift Lose-Stay: D" + player = axl.WinShiftLoseStay + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Check that switches if does not get best payoff. + actions = [(D, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestGTFT(TestPlayer): + + name = "GTFT: 0.33" + player = axl.GTFT + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=0) + + actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + + def test_four_vector(self): + (R, P, S, T) = axl.IpdGame().RPST() + p = min(1 - (T - R) / (R - S), (R - P) / (T - P)) + expected_dictionary = {(C, C): 1.0, (C, D): p, (D, C): 1.0, (D, D): p} + test_four_vector(self, expected_dictionary) + + def test_allow_for_zero_probability(self): + player = self.player(p=0) + expected = {(C, C): 1.0, (C, D): 0, (D, C): 1.0, (D, D): 0} + self.assertAlmostEqual(player._four_vector, expected) + + +class TestFirmButFair(TestPlayer): + + name = "Firm But Fair" + player = axl.FirmButFair + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 1, (C, D): 0, (D, C): 1, (D, D): 2 / 3} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (D, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=0) + + actions = [(C, D), (D, D), (C, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=1) + + +class TestStochasticCooperator(TestPlayer): + + name = "Stochastic Cooperator" + player = axl.StochasticCooperator + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = { + (C, C): 0.935, + (C, D): 0.229, + (D, C): 0.266, + (D, D): 0.42, + } + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (D, D), (C, C), (C, D), (C, C), (D, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=15) + + actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=3) + + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=13) + + +class TestStochasticWSLS(TestPlayer): + + name = "Stochastic WSLS: 0.05" + player = axl.StochasticWSLS + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, D), (C, C), (C, D), (D, C), (D, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + + actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=31) + + actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + + actions = [(C, D), (C, C), (C, D), (D, C), (D, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) + + def test_four_vector(self): + player = self.player() + ep = player.ep + expected_dictionary = { + (C, C): 1.0 - ep, + (C, D): ep, + (D, C): ep, + (D, D): 1.0 - ep, + } + test_four_vector(self, expected_dictionary) + + +class TestMemoryOnePlayer(unittest.TestCase): + def test_default_if_four_vector_not_set(self): + player = MemoryOnePlayer() + self.assertEqual( + player._four_vector, {(C, C): 1.0, (C, D): 0.0, (D, C): 0.0, (D, D): 1.0} + ) + + def test_exception_if_four_vector_not_set(self): + with warnings.catch_warnings(record=True) as warning: + warnings.simplefilter("always") + player = MemoryOnePlayer() + + self.assertEqual(len(warning), 1) + self.assertEqual(warning[-1].category, UserWarning) + self.assertEqual( + str(warning[-1].message), + "Memory one player is set to default (1, 0, 0, 1).", + ) + + def test_exception_if_probability_vector_outside_valid_values(self): + player = MemoryOnePlayer() + x = 2.0 + with self.assertRaises(ValueError): + player.set_four_vector([0.1, x, 0.5, 0.1]) + + +class TestSoftJoss(TestPlayer): + + name = "Soft Joss: 0.9" + player = axl.SoftJoss + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 1, (C, D): 0.1, (D, C): 1.0, (D, D): 0.1} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=2) + + actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) + + +class TestALLCorALLD(TestPlayer): + + name = "ALLCorALLD" + player = axl.ALLCorALLD + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(D, C)] * 10 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=0) + actions = [(C, C)] * 10 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions, seed=1) + + +class TestGenericReactiveStrategy(unittest.TestCase): + """ + Tests for the Reactive Strategy which. + """ + + p1 = axl.ReactivePlayer(probabilities=(0, 0)) + p2 = axl.ReactivePlayer(probabilities=(1, 0)) + p3 = axl.ReactivePlayer(probabilities=(1, 0.5)) + + def test_name(self): + self.assertEqual(self.p1.name, "Reactive IpdPlayer: (0, 0)") + self.assertEqual(self.p2.name, "Reactive IpdPlayer: (1, 0)") + self.assertEqual(self.p3.name, "Reactive IpdPlayer: (1, 0.5)") + + def test_four_vector(self): + self.assertEqual( + self.p1._four_vector, {(C, D): 0.0, (D, C): 0.0, (C, C): 0.0, (D, D): 0.0} + ) + self.assertEqual( + self.p2._four_vector, {(C, D): 0.0, (D, C): 1.0, (C, C): 1.0, (D, D): 0.0} + ) + self.assertEqual( + self.p3._four_vector, {(C, D): 0.5, (D, C): 1.0, (C, C): 1.0, (D, D): 0.5} + ) + + def test_stochastic_classification(self): + self.assertFalse(axl.Classifiers["stochastic"](self.p1)) + self.assertFalse(axl.Classifiers["stochastic"](self.p2)) + self.assertTrue(axl.Classifiers["stochastic"](self.p3)) + + def test_subclass(self): + self.assertIsInstance(self.p1, MemoryOnePlayer) + self.assertIsInstance(self.p2, MemoryOnePlayer) + self.assertIsInstance(self.p3, MemoryOnePlayer) diff --git a/axelrod/tests/strategies/test_memorytwo.py b/axelrod/tests/strategies/test_memorytwo.py new file mode 100644 index 000000000..ca52a0a4b --- /dev/null +++ b/axelrod/tests/strategies/test_memorytwo.py @@ -0,0 +1,315 @@ +"""Tests for the Memorytwo strategies.""" + +import unittest + +import random + +import warnings + +import axelrod as axl +from axelrod.strategies import MemoryTwoPlayer + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestGenericPlayerTwo(unittest.TestCase): + """A class to test the naming and classification of generic memory two + players.""" + + p1 = MemoryTwoPlayer( + sixteen_vector=(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + ) + p2 = MemoryTwoPlayer( + sixteen_vector=(1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0) + ) + p3 = MemoryTwoPlayer( + sixteen_vector=( + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + 0.5, + ) + ) + p4 = MemoryTwoPlayer( + sixteen_vector=(0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0) + ) + + def test_name(self): + self.assertEqual( + self.p1.name, + "Generic Memory Two IpdPlayer: (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)", + ) + self.assertEqual( + self.p2.name, + "Generic Memory Two IpdPlayer: (1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0)", + ) + self.assertEqual( + self.p3.name, + "Generic Memory Two IpdPlayer: (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)", + ) + self.assertEqual( + self.p4.name, + "Generic Memory Two IpdPlayer: (0.1, 0, 0.2, 0, 0.3, 0, 0.4, 0, 0.5, 0, 0.6, 0, 0.7, 0, 0.8, 0)", + ) + + def test_deterministic_classification(self): + self.assertFalse(axl.Classifiers["stochastic"](self.p1)) + self.assertFalse(axl.Classifiers["stochastic"](self.p2)) + + def test_stochastic_classification(self): + self.assertTrue(axl.Classifiers["stochastic"](self.p3)) + self.assertTrue(axl.Classifiers["stochastic"](self.p4)) + + +class TestMemoryTwoPlayer(unittest.TestCase): + def test_default_if_four_vector_not_set(self): + player = MemoryTwoPlayer() + self.assertEqual( + player._sixteen_vector, + { + ((C, C), (C, C)): 1.0, + ((C, C), (C, D)): 1.0, + ((C, D), (C, C)): 1.0, + ((C, D), (C, D)): 1.0, + ((C, C), (D, C)): 1.0, + ((C, C), (D, D)): 1.0, + ((C, D), (D, C)): 1.0, + ((C, D), (D, D)): 1.0, + ((D, C), (C, C)): 1.0, + ((D, C), (C, D)): 1.0, + ((D, D), (C, C)): 1.0, + ((D, D), (C, D)): 1.0, + ((D, C), (D, C)): 1.0, + ((D, C), (D, D)): 1.0, + ((D, D), (D, C)): 1.0, + ((D, D), (D, D)): 1.0, + }, + ) + + def test_exception_if_four_vector_not_set(self): + with warnings.catch_warnings(record=True) as warning: + warnings.simplefilter("always") + player = MemoryTwoPlayer() + + self.assertEqual(len(warning), 1) + self.assertEqual(warning[-1].category, UserWarning) + self.assertEqual( + str(warning[-1].message), + "Memory two player is set to default, Cooperator.", + ) + + def test_exception_if_probability_vector_outside_valid_values(self): + player = MemoryTwoPlayer() + x = 2 + with self.assertRaises(ValueError): + player.set_sixteen_vector( + [ + 0.1, + x, + 0.5, + 0.1, + 0.1, + 0.2, + 0.5, + 0.1, + 0.1, + 0.2, + 0.5, + 0.1, + 0.2, + 0.5, + 0.1, + 0.2, + 0.5, + 0.2, + ] + ) + + +class TestMemoryStochastic(TestPlayer): + name = ( + "Generic Memory Two IpdPlayer: (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1): C" + ) + player = axl.MemoryTwoPlayer + expected_classifier = { + "memory_depth": 2, # Memory-two Sixteen-Vector + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + axl.seed(0) + vector = [random.random() for _ in range(16)] + + actions = [(C, C), (C, C), (D, D), (D, C), (C, C), (C, D), (C, C)] + self.versus_test( + opponent=axl.CyclerCCD(), + expected_actions=actions, + seed=0, + init_kwargs={"sixteen_vector": vector}, + ) + + actions = [(C, C), (C, C), (C, D), (D, C), (C, C), (C, D), (C, C)] + self.versus_test( + opponent=axl.CyclerCCD(), + expected_actions=actions, + seed=1, + init_kwargs={"sixteen_vector": vector}, + ) + + actions = [(C, C), (C, C), (D, C), (D, D), (C, D), (C, C), (D, C)] + self.versus_test( + opponent=axl.TitForTat(), + expected_actions=actions, + seed=0, + init_kwargs={"sixteen_vector": vector}, + ) + + actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D), (C, C)] + self.versus_test( + opponent=axl.TitForTat(), + expected_actions=actions, + seed=1, + init_kwargs={"sixteen_vector": vector}, + ) + + +class TestAON2(TestPlayer): + + name = "AON2" + player = axl.AON2 + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # tests states 2, 7, 14 and 15 + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + # tests states 4, 16 and 11 + actions = [(C, D), (C, D), (D, C), (D, D), (D, D), (C, C), (C, D)] + self.versus_test(opponent=axl.CyclerDDC(), expected_actions=actions) + + # tests states 3, 5 and 12 + actions = [(C, D), (C, C), (D, C), (D, D), (D, D), (C, D)] + self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) + + # tests state 1 + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # tests state 6 + actions = [(C, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions) + + +class TestDelayedAON1(TestPlayer): + + name = "Delayed AON1" + player = axl.DelayedAON1 + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy_mutually_cooperative(self): + # tests states 2, 7, 14 and 11 + actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + # tests states 1, 4 and 8 + actions = [(C, D), (C, D), (D, D), (C, C), (C, C), (C, D)] + self.versus_test( + opponent=axl.Cycler(["D", "D", "D", "C", "C"]), expected_actions=actions + ) + + # tests states 3, 5 + actions = [(C, D), (C, C), (D, C), (D, D), (C, D)] + self.versus_test(opponent=axl.SuspiciousTitForTat(), expected_actions=actions) + + +class TestMEM2(TestPlayer): + + name = "MEM2" + player = axl.MEM2 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Start with TFT + actions = [(C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + attrs={"play_as": "TFT", "shift_counter": 1, "alld_counter": 0}, + ) + actions = [(C, D), (D, D)] + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + attrs={"play_as": "TFT", "shift_counter": 1, "alld_counter": 0}, + ) + # TFTT if C, D and D, C + opponent = axl.MockPlayer([D, C, D, D]) + actions = [(C, D), (D, C), (C, D), (C, D)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"play_as": "TFTT", "shift_counter": 1, "alld_counter": 0}, + ) + + opponent = axl.MockPlayer([D, C, D, D]) + actions = [ + (C, D), + (D, C), + (C, D), + (C, D), + (D, D), + (D, C), + (D, D), + (D, D), + (D, D), + (D, C), + ] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"play_as": "ALLD", "shift_counter": -1, "alld_counter": 2}, + ) diff --git a/axelrod/tests/strategies/test_meta.py b/axelrod/tests/strategies/test_meta.py new file mode 100644 index 000000000..4559180e8 --- /dev/null +++ b/axelrod/tests/strategies/test_meta.py @@ -0,0 +1,721 @@ +"""Tests for the various Meta strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +from hypothesis import given, settings +from hypothesis.strategies import integers + +C, D = axl.Action.C, axl.Action.D + + +class TestMetaPlayer(TestPlayer): + """This is a test class for meta players, primarily to test the classifier + dictionary and the reset methods. Inherit from this class just as you would + the TestPlayer class.""" + + name = "Meta IpdPlayer" + player = axl.MetaPlayer + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": {"game"}, + "long_run_time": True, + "manipulates_source": False, + "inspects_source": False, + "manipulates_state": False, + } + + def classifier_test(self, expected_class_classifier=None): + player = self.player() + classifier = dict() + for key in [ + "stochastic", + "inspects_source", + "manipulates_source", + "manipulates_state", + ]: + classifier[key] = any(axl.Classifiers[key](t) for t in player.team) + classifier["memory_depth"] = float("inf") + + for t in player.team: + try: + classifier["makes_use_of"].update(axl.Classifiers["make_use_of"](t)) + except KeyError: + pass + + for key in classifier: + self.assertEqual( + axl.Classifiers[key](player), + classifier[key], + msg="%s - Behaviour: %s != Expected Behaviour: %s" + % (key, axl.Classifiers[key](player), classifier[key]), + ) + + def test_repr(self): + player = self.player() + team_size = len(player.team) + self.assertEqual( + str(player), + "{}: {} player{}".format( + self.name, team_size, "s" if team_size > 1 else "" + ), + ) + + @given(seed=integers(min_value=1, max_value=20000000)) + @settings(max_examples=1) + def test_clone(self, seed): + # Test that the cloned player produces identical play + player1 = self.player() + player2 = player1.clone() + self.assertEqual(len(player2.history), 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 0) + self.assertEqual(player2.state_distribution, {}) + self.assertEqual(player2.classifier, player1.classifier) + self.assertEqual(player2.match_attributes, player1.match_attributes) + + turns = 10 + for op in [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + ]: + player1.reset() + player2.reset() + for p in [player1, player2]: + axl.seed(seed) + m = axl.IpdMatch((p, op), turns=turns) + m.play() + self.assertEqual(len(player1.history), turns) + self.assertEqual(player1.history, player2.history) + + +class TestMetaMajority(TestMetaPlayer): + name = "Meta Majority" + player = axl.MetaMajority + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "manipulates_source": False, + "makes_use_of": {"game", "length"}, + "inspects_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + P1 = axl.MetaMajority() + P2 = axl.IpdPlayer() + + # With more cooperators on the team than defectors, we should cooperate. + P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] + self.assertEqual(P1.strategy(P2), C) + + # With more defectors, we should defect. + P1.team = [axl.Cooperator(), axl.Defector(), axl.Defector()] + self.assertEqual(P1.strategy(P2), D) + + +class TestMetaMinority(TestMetaPlayer): + name = "Meta Minority" + player = axl.MetaMinority + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "makes_use_of": {"game", "length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_team(self): + team = [axl.Cooperator] + player = self.player(team=team) + self.assertEqual(len(player.team), 1) + + def test_strategy(self): + P1 = axl.MetaMinority() + P2 = axl.IpdPlayer() + + # With more cooperators on the team, we should defect. + P1.team = [axl.Cooperator(), axl.Cooperator(), axl.Defector()] + self.assertEqual(P1.strategy(P2), D) + + # With defectors in the majority, we will cooperate here. + P1.team = [axl.Cooperator(), axl.Defector(), axl.Defector()] + self.assertEqual(P1.strategy(P2), C) + + +class TestNiceMetaWinner(TestMetaPlayer): + name = "Nice Meta Winner" + player = axl.NiceMetaWinner + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "makes_use_of": {"game", "length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + P1 = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) + P2 = axl.IpdPlayer() + + # This meta player will simply choose the strategy with the highest + # current score. + P1.team[0].score = 0 + P1.team[1].score = 1 + self.assertEqual(P1.strategy(P2), C) + P1.team[0].score = 1 + P1.team[1].score = 0 + self.assertEqual(P1.strategy(P2), C) + + # If there is a tie, choose to cooperate if possible. + P1.team[0].score = 1 + P1.team[1].score = 1 + self.assertEqual(P1.strategy(P2), C) + + opponent = axl.Cooperator() + player = axl.NiceMetaWinner(team=[axl.Cooperator, axl.Defector]) + for _ in range(5): + player.play(opponent) + self.assertEqual(player.history[-1], C) + + opponent = axl.Defector() + player = axl.NiceMetaWinner(team=[axl.Defector]) + for _ in range(20): + player.play(opponent) + self.assertEqual(player.history[-1], D) + + opponent = axl.Defector() + player = axl.MetaWinner(team=[axl.Cooperator, axl.Defector]) + for _ in range(20): + player.play(opponent) + self.assertEqual(player.history[-1], D) + + +class TestNiceMetaWinnerEnsemble(TestMetaPlayer): + name = "Nice Meta Winner Ensemble" + player = axl.NiceMetaWinnerEnsemble + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": {"game", "length"}, + "long_run_time": True, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 8 + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"team": [axl.Cooperator, axl.Defector]}, + ) + actions = [(C, D)] + [(D, D)] * 7 + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + init_kwargs={"team": [axl.Cooperator, axl.Defector]}, + ) + + +class TestMetaHunter(TestMetaPlayer): + name = "Meta Hunter" + player = axl.MetaHunter + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "long_run_time": False, + "inspects_source": False, + "makes_use_of": set(), + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # We are not using the Cooperator Hunter here, so this should lead to + # cooperation. + actions = [(C, C)] * 5 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # After long histories tit-for-tat should come into play. + opponent = axl.MockPlayer([C] * 100 + [D]) + actions = [(C, C)] * 100 + [(C, D)] + [(D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + actions = [(C, C)] * 102 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # All these others, however, should trigger a defection for the hunter. + actions = [(C, D), (C, D), (C, D), (C, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [ + (C, C), + (C, C), + (C, C), + (C, D), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + ] + self.versus_test(opponent=axl.CyclerCCCD(), expected_actions=actions) + + +class TestMetaHunterAggressive(TestMetaPlayer): + name = "Meta Hunter Aggressive" + player = axl.MetaHunterAggressive + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "long_run_time": False, + "inspects_source": False, + "makes_use_of": set(), + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # We are using CooperatorHunter here, so this should lead to + # defection + actions = [(C, C)] * 4 + [(D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # All these others, however, should trigger a defection for the hunter. + actions = [(C, D), (C, D), (C, D), (C, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [ + (C, C), + (C, C), + (C, C), + (C, D), + (C, C), + (C, C), + (C, C), + (C, D), + (D, C), + ] + self.versus_test(opponent=axl.CyclerCCCD(), expected_actions=actions) + + # To test the TFT action of the strategy after 100 turns, we need to + # remove two of the hunters from its team. + # It is almost impossible to identify a history which reaches 100 turns + # without triggering one of the hunters in the default team. As at + # 16-Mar-2017, none of the strategies in the library does so. + team = [ + axl.DefectorHunter, + axl.AlternatorHunter, + axl.RandomHunter, + axl.CycleHunter, + axl.EventualCycleHunter, + ] + opponent = axl.MockPlayer([C] * 100 + [D]) + actions = [(C, C)] * 100 + [(C, D), (D, C)] + self.versus_test( + opponent=opponent, expected_actions=actions, init_kwargs={"team": team} + ) + + +class TestMetaMajorityMemoryOne(TestMetaPlayer): + name = "Meta Majority Memory One" + player = axl.MetaMajorityMemoryOne + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "inspects_source": False, + "long_run_time": False, + "makes_use_of": set(["game"]), + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaMajorityFiniteMemory(TestMetaPlayer): + name = "Meta Majority Finite Memory" + player = axl.MetaMajorityFiniteMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaMajorityLongMemory(TestMetaPlayer): + name = "Meta Majority Long Memory" + player = axl.MetaMajorityLongMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=0) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=1) + + +class TestMetaWinnerMemoryOne(TestMetaPlayer): + name = "Meta Winner Memory One" + player = axl.MetaWinnerMemoryOne + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaWinnerFiniteMemory(TestMetaPlayer): + name = "Meta Winner Finite Memory" + player = axl.MetaWinnerFiniteMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaWinnerLongMemory(TestMetaPlayer): + name = "Meta Winner Long Memory" + player = axl.MetaWinnerLongMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaWinnerDeterministic(TestMetaPlayer): + name = "Meta Winner Deterministic" + player = axl.MetaWinnerDeterministic + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaWinnerStochastic(TestMetaPlayer): + name = "Meta Winner Stochastic" + player = axl.MetaWinnerStochastic + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMetaMixer(TestMetaPlayer): + name = "Meta Mixer" + player = axl.MetaMixer + expected_classifier = { + "inspects_source": False, + "long_run_time": True, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + "memory_depth": float("inf"), + "stochastic": True, + } + + def test_strategy(self): + team = [axl.TitForTat, axl.Cooperator, axl.Grudger] + distribution = [0.2, 0.5, 0.3] + + P1 = axl.MetaMixer(team=team, distribution=distribution) + P2 = axl.Cooperator() + actions = [(C, C)] * 20 + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"team": team, "distribution": distribution}, + ) + + team.append(axl.Defector) + distribution = [0.2, 0.5, 0.3, 0] # If add a defector but does not occur + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"team": team, "distribution": distribution}, + ) + + distribution = [0, 0, 0, 1] # If defector is only one that is played + actions = [(D, C)] * 20 + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"team": team, "distribution": distribution}, + ) + + def test_raise_error_in_distribution(self): + team = [axl.TitForTat, axl.Cooperator, axl.Grudger] + distribution = [0.2, 0.5, 0.5] # Not a valid probability distribution + + player = axl.MetaMixer(team=team, distribution=distribution) + opponent = axl.Cooperator() + + self.assertRaises(ValueError, player.strategy, opponent) + + +class TestNMWEDeterministic(TestMetaPlayer): + name = "NMWE Deterministic" + player = axl.NMWEDeterministic + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + # Skip this test + def classifier_test(self, expected_class_classifier=None): + pass + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestNMWEStochastic(TestMetaPlayer): + name = "NMWE Stochastic" + player = axl.NMWEStochastic + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=20) + + +class TestNMWEFiniteMemory(TestMetaPlayer): + name = "NMWE Finite Memory" + player = axl.NMWEFiniteMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestNMWELongMemory(TestMetaPlayer): + name = "NMWE Long Memory" + player = axl.NMWELongMemory + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": True, + "inspects_source": False, + "makes_use_of": {"game", "length"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions, seed=10) + + +class TestNMWEMemoryOne(TestMetaPlayer): + name = "NMWE Memory One" + player = axl.NMWEMemoryOne + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "long_run_time": False, + "inspects_source": False, + "makes_use_of": {"game"}, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (C, C), (D, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + +class TestMemoryDecay(TestPlayer): + name = "Memory Decay: 0.1, 0.03, -2, 1, Tit For Tat, 15" + player = axl.MemoryDecay + expected_classifier = { + "memory_depth": float("inf"), + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test TitForTat behavior in first 15 turns + opponent = axl.Cooperator() + actions = list([(C, C)]) * 15 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Defector() + actions = [(C, D)] + list([(D, D)]) * 14 + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Alternator() + actions = [(C, C)] + [(C, D), (D, C)] * 7 + self.versus_test(opponent, expected_actions=actions) + + opponent_actions = [C, D, D, C, D, C, C, D, C, D, D, C, C, D, D] + opponent = axl.MockPlayer(actions=opponent_actions) + mem_actions = [C, C, D, D, C, D, C, C, D, C, D, D, C, C, D] + actions = list(zip(mem_actions, opponent_actions)) + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.Random() + actions = [(C, D), (D, D), (D, C), (C, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions, seed=0) + + # Test net-cooperation-score (NCS) based decisions in subsequent turns + opponent = axl.Cooperator() + actions = [(C, C)] * 15 + [(C, C)] + self.versus_test( + opponent, + expected_actions=actions, + seed=1, + init_kwargs={"memory": [D] * 5 + [C] * 10}, + ) + + opponent = axl.Cooperator() + actions = [(C, C)] * 15 + [(C, C)] + self.versus_test( + opponent, + expected_actions=actions, + seed=1, + init_kwargs={"memory": [D] * 4 + [C] * 11}, + ) + + # Test alternative starting strategies + opponent = axl.Cooperator() + actions = list([(D, C)]) * 15 + self.versus_test( + opponent, + expected_actions=actions, + init_kwargs={"start_strategy": axl.Defector}, + ) + + opponent = axl.Cooperator() + actions = list([(C, C)]) * 15 + self.versus_test( + opponent, + expected_actions=actions, + init_kwargs={"start_strategy": axl.Cooperator}, + ) + + opponent = axl.Cooperator() + actions = [(C, C)] + list([(D, C), (C, C)]) * 7 + self.versus_test( + opponent, + expected_actions=actions, + init_kwargs={"start_strategy": axl.Alternator}, + ) + + opponent = axl.Defector() + actions = [(C, D)] * 7 + [(D, D)] + self.versus_test( + opponent, + expected_actions=actions, + seed=4, + init_kwargs={ + "memory": [C] * 12, + "start_strategy": axl.Defector, + "start_strategy_duration": 0, + }, + ) diff --git a/axelrod/tests/strategies/test_mindcontrol.py b/axelrod/tests/strategies/test_mindcontrol.py new file mode 100644 index 000000000..3088c1aca --- /dev/null +++ b/axelrod/tests/strategies/test_mindcontrol.py @@ -0,0 +1,99 @@ +"""Tests for mind controllers and other wizards.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestMindController(TestPlayer): + + name = "Mind Controller" + player = axl.MindController + expected_classifier = { + "memory_depth": -10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # Finds out what opponent will do + "manipulates_state": False, + } + + def test_strategy(self): + """ Will always make opponent cooperate """ + + p1 = axl.MindController() + p2 = axl.Cooperator() + self.assertEqual(p1.strategy(p2), D) + self.assertEqual(p2.strategy(p1), C) + + def test_vs_defect(self): + """ Will force even defector to cooperate """ + + p1 = axl.MindController() + p2 = axl.Defector() + self.assertEqual(p1.strategy(p2), D) + self.assertEqual(p2.strategy(p1), C) + + def test_vs_grudger(self): + """ Will force even Grudger to forget its grudges""" + + p1 = axl.MindController() + p2 = axl.Grudger() + for _ in range(4): + p1.history.append(D, C) + p2.history.append(C, D) + self.assertEqual(p1.strategy(p2), D) + self.assertEqual(p2.strategy(p1), C) + + +class TestMindWarper(TestMindController): + + name = "Mind Warper" + player = axl.MindWarper + expected_classifier = { + "memory_depth": -10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # Finds out what opponent will do + "manipulates_state": False, + } + + def test_setattr(self): + player = self.player() + player.strategy = lambda opponent: C + + def test_strategy(self): + player = self.player() + opponent = axl.Defector() + play1 = player.strategy(opponent) + play2 = opponent.strategy(player) + self.assertEqual(play1, D) + self.assertEqual(play2, C) + + +class TestMindBender(TestMindController): + + name = "Mind Bender" + player = axl.MindBender + expected_classifier = { + "memory_depth": -10, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": True, # Finds out what opponent will do + "manipulates_state": False, + } + + def test_strategy(self): + player = self.player() + opponent = axl.Defector() + play1 = player.strategy(opponent) + play2 = opponent.strategy(player) + self.assertEqual(play1, D) + self.assertEqual(play2, C) diff --git a/axelrod/tests/strategies/test_mindreader.py b/axelrod/tests/strategies/test_mindreader.py new file mode 100644 index 000000000..21a78ec6a --- /dev/null +++ b/axelrod/tests/strategies/test_mindreader.py @@ -0,0 +1,172 @@ +"""Tests for the Mindreader strategy.""" + +import axelrod as axl +from axelrod._strategy_utils import simulate_match + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestMindReader(TestPlayer): + + name = "Mind Reader" + player = axl.MindReader + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": True, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_foil_inspection_strategy(self): + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), D) + + def test_strategy(self): + """ + Will defect against nice strategies + """ + p1 = axl.MindReader() + p2 = axl.Cooperator() + self.assertEqual(p1.strategy(p2), D) + + def test_vs_defect(self): + """ + Will defect against pure defecting strategies + """ + p1 = axl.MindReader() + p2 = axl.Defector() + self.assertEqual(p1.strategy(p2), D) + + def test_vs_grudger(self): + """ + Will keep nasty strategies happy if it can + """ + p1 = axl.MindReader() + p2 = axl.Grudger() + self.assertEqual(p1.strategy(p2), C) + + def test_vs_tit_for_tat(self): + """ + Will keep nasty strategies happy if it can + """ + p1 = axl.MindReader() + p2 = axl.TitForTat() + self.assertEqual(p1.strategy(p2), C) + + def test_simulate_matches(self): + """ + Simulates a number of matches + """ + p1 = axl.MindReader() + p2 = axl.Grudger() + simulate_match(p1, p2, C, 4) + self.assertEqual(p2.history, [C, C, C, C]) + + def test_history_is_same(self): + """ + Checks that the history is not altered by the player + """ + p1 = axl.MindReader() + p2 = axl.Grudger() + p1.history.append(C, C) + p1.history.append(C, D) + p2.history.append(C, C) + p2.history.append(D, C) + p1.strategy(p2) + self.assertEqual(p1.history, [C, C]) + self.assertEqual(p2.history, [C, D]) + + def test_vs_geller(self): + """Ensures that a recursion error does not occur """ + p1 = axl.MindReader() + p2 = axl.Geller() + p1.strategy(p2) + p2.strategy(p1) + + def test_init(self): + """Tests for init method """ + p1 = axl.MindReader() + self.assertEqual(p1.history, []) + + +class TestProtectedMindReader(TestPlayer): + + name = "Protected Mind Reader" + player = axl.ProtectedMindReader + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"game"}, + "long_run_time": False, + "inspects_source": True, # Finds out what opponent will do + "manipulates_source": True, # Stops opponent's strategy + "manipulates_state": False, + } + + def test_foil_inspection_strategy(self): + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), D) + + def test_strategy(self): + """ + Will defect against nice strategies + """ + p1 = axl.ProtectedMindReader() + p2 = axl.Cooperator() + self.assertEqual(p1.strategy(p2), D) + + def test_vs_defect(self): + """ + Will defect against pure defecting strategies + """ + p1 = axl.ProtectedMindReader() + p2 = axl.Defector() + self.assertEqual(p1.strategy(p2), D) + + def tests_protected(self): + """Ensures that no other player can alter its strategy """ + + p1 = axl.ProtectedMindReader() + p2 = axl.MindController() + P3 = axl.Cooperator() + p2.strategy(p1) + self.assertEqual(p1.strategy(P3), D) + + +class TestMirrorMindReader(TestPlayer): + + name = "Mirror Mind Reader" + player = axl.MirrorMindReader + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": True, # reading and copying the source of the component + "manipulates_source": True, # changing own source dynamically + "manipulates_state": False, + } + + def test_foil_inspection_strategy(self): + player = self.player() + self.assertEqual(player.foil_strategy_inspection(), C) + + def test_strategy(self): + p1 = axl.MirrorMindReader() + p2 = axl.Cooperator() + self.assertEqual(p1.strategy(p2), C) + + def test_vs_defector(self): + p1 = axl.MirrorMindReader() + p2 = axl.Defector() + self.assertEqual(p1.strategy(p2), D) + + def test_nice_with_itself(self): + p1 = axl.MirrorMindReader() + p2 = axl.MirrorMindReader() + self.assertEqual(p1.strategy(p2), C) diff --git a/axelrod/tests/strategies/test_mutual.py b/axelrod/tests/strategies/test_mutual.py new file mode 100644 index 000000000..921fc698d --- /dev/null +++ b/axelrod/tests/strategies/test_mutual.py @@ -0,0 +1,148 @@ +"""Tests for strategies Desperate, Hopeless, Willing, and Grim.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestDesperate(TestPlayer): + + name = "Desperate" + player = axl.Desperate + expected_classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 1 + opponent = axl.Cooperator() + opponent_actions = [C] * 5 + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Desperate) vs Cooperator SEED --> 2 + opponent = axl.Cooperator() + actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Desperate) vs Defector SEED --> 1 + opponent = axl.Defector() + actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Desperate) vs Defector SEED --> 2 + opponent = axl.Defector() + actions = [(D, D), (C, D), (D, D), (C, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Desperate) vs Alternator SEED --> 1 + opponent = axl.Alternator() + actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Desperate) vs Alternator SEED --> 2 + opponent = axl.Alternator() + actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestHopeless(TestPlayer): + + name = "Hopeless" + player = axl.Hopeless + expected_classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 1 + opponent = axl.Cooperator() + opponent_actions = [C] * 5 + actions = [(C, C), (D, C), (C, C), (D, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Hopeless) vs Cooperator SEED --> 2 + opponent = axl.Cooperator() + actions = [(D, C), (C, C), (D, C), (C, C), (D, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Hopeless) vs Defector SEED --> 1 + opponent = axl.Defector() + actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Hopeless) vs Defector SEED --> 2 + opponent = axl.Defector() + actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 1 + opponent = axl.Alternator() + actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Hopeless) vs Alternator SEED --> 2 + opponent = axl.Alternator() + actions = [(D, C), (C, D), (C, C), (D, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + +class TestWilling(TestPlayer): + + name = "Willing" + player = axl.Willing + expected_classifier = { + "memory_depth": 1, + "long_run_time": False, + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Our IpdPlayer (Willing) vs Cooperator SEED --> 1 + opponent = axl.Cooperator() + opponent_actions = [C] * 5 + actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Willing) vs Cooperator SEED --> 2 + opponent = axl.Cooperator() + actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Willing) vs Defector SEED --> 1 + opponent = axl.Defector() + actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Willing) vs Defector SEED --> 2 + opponent = axl.Defector() + actions = [(D, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Our IpdPlayer (Willing) vs Alternator SEED --> 1 + opponent = axl.Alternator() + actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + # Our IpdPlayer (Willing) vs Alternator SEED --> 2 + opponent = axl.Alternator() + actions = [(D, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_negation.py b/axelrod/tests/strategies/test_negation.py new file mode 100644 index 000000000..8c7542aaa --- /dev/null +++ b/axelrod/tests/strategies/test_negation.py @@ -0,0 +1,39 @@ +"""Tests for the Neg Strategy""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestNegation(TestPlayer): + + name = "Negation" + player = axl.Negation + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # First move is random. + actions = [(C, C), (D, D), (C, C)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=1 + ) + actions = [(D, C), (D, D), (C, C)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + actions = [(C, C), (D, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=1 + ) + actions = [(D, D), (C, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=2) diff --git a/axelrod/tests/strategies/test_oncebitten.py b/axelrod/tests/strategies/test_oncebitten.py new file mode 100644 index 000000000..c152bce10 --- /dev/null +++ b/axelrod/tests/strategies/test_oncebitten.py @@ -0,0 +1,142 @@ +"""Tests for the once bitten strategy.""" + +import random + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestOnceBitten(TestPlayer): + + name = "Once Bitten" + player = axl.OnceBitten + expected_classifier = { + "memory_depth": 12, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """If opponent defects at any point then the player will defect + forever.""" + # Become grudged if the opponent defects twice in a row + opponent = axl.MockPlayer([C, C, C, D]) + actions = [(C, C), (C, C), (C, C), (C, D), (C, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": False, "grudge_memory": 0}, + ) + + opponent = axl.MockPlayer([C, C, C, D, D, D]) + actions = [ + (C, C), + (C, C), + (C, C), + (C, D), + (C, D), + (D, D), + (D, C), + (D, C), + (D, C), + (D, D), + (D, D), + ] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 5}, + ) + + # After 10 rounds of being grudged: forgives + opponent = axl.MockPlayer([C, D, D, C] + [C] * 10) + actions = [(C, C), (C, D), (C, D), (D, C)] + [(D, C)] * 10 + [(C, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": False, "grudge_memory": 0}, + ) + + def test_reset(self): + """Check that grudged gets reset properly""" + p1 = self.player() + p2 = axl.Defector() + p1.play(p2) + p1.play(p2) + p1.play(p2) + self.assertTrue(p1.grudged) + p1.reset() + self.assertFalse(p1.grudged) + + +class TestFoolMeOnce(TestPlayer): + + name = "Fool Me Once" + player = axl.FoolMeOnce + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent defects more than once, defect forever + actions = [(C, C)] * 10 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + opponent = axl.MockPlayer([D] + [C] * 9) + actions = [(C, D)] + [(C, C)] * 9 + self.versus_test(opponent=opponent, expected_actions=actions) + + actions = [(C, D)] * 2 + [(D, D)] * 8 + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer([D, D] + [C] * 9) + actions = [(C, D)] * 2 + [(D, C)] * 8 + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestForgetfulFoolMeOnce(TestPlayer): + + name = "Forgetful Fool Me Once: 0.05" + player = axl.ForgetfulFoolMeOnce + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test that will forgive one D but will grudge after 2 Ds, randomly + # forgets count. + actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + seed=2, + attrs={"D_count": 2}, + ) + + # Sometime eventually forget count: + actions = [(C, D), (C, D)] + [(D, D)] * 18 + [(C, D)] + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + seed=2, + attrs={"D_count": 0}, + ) diff --git a/axelrod/tests/strategies/test_player.py b/axelrod/tests/strategies/test_player.py new file mode 100644 index 000000000..dba3d29d8 --- /dev/null +++ b/axelrod/tests/strategies/test_player.py @@ -0,0 +1,735 @@ +import unittest +import itertools +import pickle +import random +import types +import numpy as np + +import axelrod as axl +from axelrod.player import simultaneous_play +from axelrod.tests.property import strategy_lists + +from hypothesis import given, settings +from hypothesis.strategies import integers, sampled_from + +C, D = axl.Action.C, axl.Action.D + +short_run_time_short_mem = [ + s + for s in axl.short_run_time_strategies + if axl.Classifiers["memory_depth"](s()) <= 10 +] + + +# Generic strategy functions for testing + + +def cooperate(*args): + return C + + +def defect(*args): + return D + + +# Test classifier used to create tests players +_test_classifier = { + "memory_depth": 0, + "stochastic": False, + "makes_use_of": None, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, +} + + +class ParameterisedTestPlayer(axl.IpdPlayer): + """A simple IpdPlayer class for testing init parameters""" + + name = "ParameterisedTestPlayer" + classifier = _test_classifier + + def __init__(self, arg_test1="testing1", arg_test2="testing2"): + super().__init__() + + +class TestPlayerClass(unittest.TestCase): + name = "IpdPlayer" + player = axl.IpdPlayer + classifier = {"stochastic": False} + + def test_play(self): + player1, player2 = self.player(), self.player() + player1.strategy = cooperate + player2.strategy = defect + player1.play(player2) + self.assertEqual(player1.history[0], C) + self.assertEqual(player2.history[0], D) + + # Test cooperation / defection counts + self.assertEqual(player1.cooperations, 1) + self.assertEqual(player1.defections, 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 1) + # Test state distribution + self.assertEqual(player1.state_distribution, {(C, D): 1}) + self.assertEqual(player2.state_distribution, {(D, C): 1}) + + player1.play(player2) + self.assertEqual(player1.history[-1], C) + self.assertEqual(player2.history[-1], D) + # Test cooperation / defection counts + self.assertEqual(player1.cooperations, 2) + self.assertEqual(player1.defections, 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 2) + # Test state distribution + self.assertEqual(player1.state_distribution, {(C, D): 2}) + self.assertEqual(player2.state_distribution, {(D, C): 2}) + + def test_state_distribution(self): + player1 = axl.MockPlayer([C, C, D, D, C]) + player2 = axl.MockPlayer([C, D, C, D, D]) + match = axl.IpdMatch((player1, player2), turns=5) + _ = match.play() + self.assertEqual( + player1.state_distribution, + {(C, C): 1, (C, D): 2, (D, C): 1, (D, D): 1}, + ) + self.assertEqual( + player2.state_distribution, + {(C, C): 1, (C, D): 1, (D, C): 2, (D, D): 1}, + ) + + def test_noisy_play(self): + axl.seed(1) + noise = 0.2 + player1, player2 = self.player(), self.player() + player1.strategy = cooperate + player2.strategy = defect + player1.play(player2, noise) + self.assertEqual(player1.history[0], D) + self.assertEqual(player2.history[0], D) + + def test_update_history(self): + player = axl.IpdPlayer() + self.assertEqual(player.history, []) + self.assertEqual(player.cooperations, 0) + self.assertEqual(player.defections, 0) + player.history.append(D, C) + self.assertEqual(player.history, [D]) + self.assertEqual(player.defections, 1) + self.assertEqual(player.cooperations, 0) + player.history.append(C, C) + self.assertEqual(player.history, [D, C]) + self.assertEqual(player.defections, 1) + self.assertEqual(player.cooperations, 1) + + def test_history_assignment(self): + player = axl.IpdPlayer() + with self.assertRaises(AttributeError): + player.history = [] + + def test_strategy(self): + self.assertRaises( + NotImplementedError, self.player().strategy, self.player() + ) + + def test_clone(self): + """Tests player cloning.""" + player1 = axl.Random(p=0.75) # 0.5 is the default + player2 = player1.clone() + turns = 50 + for op in [axl.Cooperator(), axl.Defector(), axl.TitForTat()]: + player1.reset() + player2.reset() + seed = random.randint(0, 10 ** 6) + for p in [player1, player2]: + axl.seed(seed) + m = axl.IpdMatch((p, op), turns=turns) + m.play() + self.assertEqual(len(player1.history), turns) + self.assertEqual(player1.history, player2.history) + + def test_equality(self): + """Test the equality method for some bespoke cases""" + # Check repr + p1 = axl.Cooperator() + p2 = axl.Cooperator() + self.assertEqual(p1, p2) + p1.__repr__ = lambda: "John Nash" + self.assertNotEqual(p1, p2) + + # Check attributes + p1 = axl.Cooperator() + p2 = axl.Cooperator() + p1.test = "29" + self.assertNotEqual(p1, p2) + + p1 = axl.Cooperator() + p2 = axl.Cooperator() + p2.test = "29" + self.assertNotEqual(p1, p2) + + p1.test = "29" + self.assertEqual(p1, p2) + + # Check that attributes of both players are tested. + p1.another_attribute = [1, 2, 3] + self.assertNotEqual(p1, p2) + p2.another_attribute = [1, 2, 3] + self.assertEqual(p1, p2) + + p2.another_attribute_2 = {1: 2} + self.assertNotEqual(p1, p2) + p1.another_attribute_2 = {1: 2} + self.assertEqual(p1, p2) + + def test_equality_for_numpy_array(self): + """Check numpy array attribute (a special case)""" + p1 = axl.Cooperator() + p2 = axl.Cooperator() + + p1.array = np.array([0, 1]) + p2.array = np.array([0, 1]) + self.assertEqual(p1, p2) + + p2.array = np.array([1, 0]) + self.assertNotEqual(p1, p2) + + def test_equality_for_generator(self): + """Test equality works with generator attribute and that the generator + attribute is not altered during checking of equality""" + p1 = axl.Cooperator() + p2 = axl.Cooperator() + + # Check that players are equal with generator + p1.generator = (i for i in range(10)) + p2.generator = (i for i in range(10)) + self.assertEqual(p1, p2) + + # Check state of one generator (ensure it hasn't changed) + n = next(p2.generator) + self.assertEqual(n, 0) + + # Players are no longer equal (one generator has changed) + self.assertNotEqual(p1, p2) + + # Check that internal generator object has not been changed for either + # player after latest equal check. + self.assertEqual(list(p1.generator), list(range(10))) + self.assertEqual(list(p2.generator), list(range(1, 10))) + + # Check that type is generator + self.assertIsInstance(p2.generator, types.GeneratorType) + + def test_equality_for_cycle(self): + """Test equality works with cycle attribute and that the cycle attribute + is not altered during checking of equality""" + # Check cycle attribute (a special case) + p1 = axl.Cooperator() + p2 = axl.Cooperator() + + # Check that players are equal with cycle + p1.cycle = itertools.cycle(range(10)) + p2.cycle = itertools.cycle(range(10)) + self.assertEqual(p1, p2) + + # Check state of one generator (ensure it hasn't changed) + n = next(p2.cycle) + self.assertEqual(n, 0) + + # Players are no longer equal (one generator has changed) + self.assertNotEqual(p1, p2) + + # Check that internal cycle object has not been changed for either + # player after latest not equal check. + self.assertEqual(next(p1.cycle), 0) + self.assertEqual(next(p2.cycle), 1) + + # Check that type is cycle + self.assertIsInstance(p2.cycle, itertools.cycle) + + def test_equality_on_init(self): + """Test instances of all strategies are equal on init""" + for s in axl.strategies: + p1, p2 = s(), s() + # Check three times (so testing equality doesn't change anything) + self.assertEqual(p1, p2) + self.assertEqual(p1, p2) + self.assertEqual(p1, p2) + + def test_equality_with_player_as_attributes(self): + """Test for a strange edge case where players have pointers to each + other""" + p1 = axl.Cooperator() + p2 = axl.Cooperator() + + # If pointing at each other + p1.player = p2 + p2.player = p1 + self.assertEqual(p1, p2) + + # Still checking other attributes. + p1.test_attribute = "29" + self.assertNotEqual(p1, p2) + + # If pointing at same strategy instances + p1.player = axl.Cooperator() + p2.player = axl.Cooperator() + p2.test_attribute = "29" + self.assertEqual(p1, p2) + + # If pointing at different strategy instances + p1.player = axl.Cooperator() + p2.player = axl.Defector() + self.assertNotEqual(p1, p2) + + # If different strategies pointing at same strategy instances + p3 = axl.Defector() + p1.player = axl.Cooperator() + p3.player = axl.Cooperator() + self.assertNotEqual(p1, p3) + + def test_init_params(self): + """Tests player correct parameters signature detection.""" + self.assertEqual(self.player.init_params(), {}) + self.assertEqual( + ParameterisedTestPlayer.init_params(), + {"arg_test1": "testing1", "arg_test2": "testing2"}, + ) + self.assertEqual( + ParameterisedTestPlayer.init_params(arg_test1="other"), + {"arg_test1": "other", "arg_test2": "testing2"}, + ) + self.assertEqual( + ParameterisedTestPlayer.init_params(arg_test2="other"), + {"arg_test1": "testing1", "arg_test2": "other"}, + ) + self.assertEqual( + ParameterisedTestPlayer.init_params("other"), + {"arg_test1": "other", "arg_test2": "testing2"}, + ) + + def test_init_kwargs(self): + """Tests player correct parameters caching.""" + + # Tests for Players with no init parameters + + # Test that init_kwargs exist and are empty + self.assertEqual(self.player().init_kwargs, {}) + # Test that passing a positional argument raises an error + self.assertRaises(TypeError, axl.IpdPlayer, "test") + # Test that passing a keyword argument raises an error + self.assertRaises(TypeError, axl.IpdPlayer, arg_test1="test") + + # Tests for Players with init parameters + + # Test that init_kwargs exist and contains default values + self.assertEqual( + ParameterisedTestPlayer().init_kwargs, + {"arg_test1": "testing1", "arg_test2": "testing2"}, + ) + # Test that passing a keyword argument successfully change the + # init_kwargs dict. + self.assertEqual( + ParameterisedTestPlayer(arg_test1="other").init_kwargs, + {"arg_test1": "other", "arg_test2": "testing2"}, + ) + self.assertEqual( + ParameterisedTestPlayer(arg_test2="other").init_kwargs, + {"arg_test1": "testing1", "arg_test2": "other"}, + ) + # Test that passing a positional argument successfully change the + # init_kwargs dict. + self.assertEqual( + ParameterisedTestPlayer("other", "other2").init_kwargs, + {"arg_test1": "other", "arg_test2": "other2"}, + ) + # Test that passing an unknown keyword argument or a spare one raises + # an error. + self.assertRaises(TypeError, ParameterisedTestPlayer, arg_test3="test") + self.assertRaises( + TypeError, ParameterisedTestPlayer, "other", "other", "other" + ) + + +class TestOpponent(axl.IpdPlayer): + """A player who only exists so we have something to test against""" + + name = "TestOpponent" + classifier = _test_classifier + + @staticmethod + def strategy(opponent): + return C + + +class TestPlayer(unittest.TestCase): + """A Test class from which other player test classes are inherited.""" + + player = TestOpponent + expected_class_classifier = None + + def test_initialisation(self): + """Test that the player initiates correctly.""" + if self.__class__ != TestPlayer: + player = self.player() + self.assertEqual(len(player.history), 0) + self.assertEqual( + player.match_attributes, + {"length": -1, "game": axl.DefaultGame, "noise": 0}, + ) + self.assertEqual(player.cooperations, 0) + self.assertEqual(player.defections, 0) + self.classifier_test(self.expected_class_classifier) + + def test_repr(self): + """Test that the representation is correct.""" + if self.__class__ != TestPlayer: + self.assertEqual(str(self.player()), self.name) + + def test_match_attributes(self): + player = self.player() + # Default + player.set_match_attributes() + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], -1) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Common + player.set_match_attributes(length=200) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["length"], 200) + self.assertEqual(t_attrs["noise"], 0) + self.assertEqual(t_attrs["game"].RPST(), (3, 1, 0, 5)) + + # Noisy + player.set_match_attributes(length=200, noise=0.5) + t_attrs = player.match_attributes + self.assertEqual(t_attrs["noise"], 0.5) + + def equality_of_players_test(self, p1, p2, seed, opponent): + a1 = opponent() + a2 = opponent() + self.assertEqual(p1, p2) + for player, op in [(p1, a1), (p2, a2)]: + axl.seed(seed) + for _ in range(10): + simultaneous_play(player, op) + self.assertEqual(p1, p2) + p1 = pickle.loads(pickle.dumps(p1)) + p2 = pickle.loads(pickle.dumps(p2)) + self.assertEqual(p1, p2) + + @given( + opponent=sampled_from(short_run_time_short_mem), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_clone(self, seed, opponent): + p1 = self.player() + p2 = p1.clone() + self.equality_of_players_test(p1, p2, seed, opponent) + + @given( + opponent=sampled_from(axl.short_run_time_strategies), + seed=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_equality_of_pickle_clone(self, seed, opponent): + p1 = self.player() + p2 = pickle.loads(pickle.dumps(p1)) + self.equality_of_players_test(p1, p2, seed, opponent) + + def test_reset_history_and_attributes(self): + """Make sure resetting works correctly.""" + for opponent in [ + axl.Defector(), + axl.Random(), + axl.Alternator(), + axl.Cooperator(), + ]: + + player = self.player() + clone = player.clone() + for seed in range(10): + axl.seed(seed) + player.play(opponent) + + player.reset() + self.assertEqual(player, clone) + + def test_reset_clone(self): + """Make sure history resetting with cloning works correctly, regardless + if self.test_reset() is overwritten.""" + player = self.player() + clone = player.clone() + self.assertEqual(player, clone) + + @given(seed=integers(min_value=1, max_value=20000000)) + @settings(max_examples=1) + def test_clone(self, seed): + # Test that the cloned player produces identical play + player1 = self.player() + if player1.name in ["Darwin", "Human"]: + # Known exceptions + return + player2 = player1.clone() + self.assertEqual(len(player2.history), 0) + self.assertEqual(player2.cooperations, 0) + self.assertEqual(player2.defections, 0) + self.assertEqual(player2.state_distribution, {}) + self.assertEqual(player2.classifier, player1.classifier) + self.assertEqual(player2.match_attributes, player1.match_attributes) + + turns = 50 + r = random.random() + for op in [ + axl.Cooperator(), + axl.Defector(), + axl.TitForTat(), + axl.Random(p=r), + ]: + player1.reset() + player2.reset() + for p in [player1, player2]: + axl.seed(seed) + m = axl.IpdMatch((p, op), turns=turns) + m.play() + self.assertEqual(len(player1.history), turns) + self.assertEqual(player1.history, player2.history) + + @given( + strategies=strategy_lists( + max_size=5, strategies=short_run_time_short_mem + ), + seed=integers(min_value=1, max_value=200), + turns=integers(min_value=1, max_value=200), + ) + @settings(max_examples=1) + def test_memory_depth_upper_bound(self, strategies, seed, turns): + """ + Test that the memory depth is indeed an upper bound. + """ + + def get_memory_depth_or_zero(player): + # Some of the test strategies have no entry in the classifiers + # table, so there isn't logic to load default value of zero. + memory = axl.Classifiers["memory_depth"](player) + return memory if memory else 0 + + player = self.player() + memory = get_memory_depth_or_zero(player) + if memory < float("inf"): + for strategy in strategies: + player.reset() + opponent = strategy() + max_memory = max(memory, get_memory_depth_or_zero(opponent)) + self.assertTrue( + test_memory( + player=player, + opponent=opponent, + seed=seed, + turns=turns, + memory_length=max_memory, + ), + msg="{} failed for seed={} and opponent={}".format( + player.name, seed, opponent + ), + ) + + def versus_test( + self, + opponent, + expected_actions, + noise=None, + seed=None, + match_attributes=None, + attrs=None, + init_kwargs=None, + ): + """ + Tests a sequence of outcomes for two given players. + Parameters: + ----------- + opponent: IpdPlayer or list + An instance of a player OR a sequence of actions. If a sequence of + actions is passed, a Mock IpdPlayer is created that cycles over that + sequence. + expected_actions: List + The expected outcomes of the match (list of tuples of actions). + noise: float + Any noise to be passed to a match + seed: int + The random seed to be used + length: int + The length of the game. If `opponent` is a sequence of actions then + the length is taken to be the length of the sequence. + match_attributes: dict + The match attributes to be passed to the players. For example, + `{length:-1}` implies that the players do not know the length of the + match. + attrs: dict + Dictionary of internal attributes to check at the end of all plays + in player + init_kwargs: dict + A dictionary of keyword arguments to instantiate player with + """ + + turns = len(expected_actions) + if init_kwargs is None: + init_kwargs = dict() + + if seed is not None: + axl.seed(seed) + + player = self.player(**init_kwargs) + + match = axl.IpdMatch( + (player, opponent), + turns=turns, + noise=noise, + match_attributes=match_attributes, + ) + self.assertEqual(match.play(), expected_actions) + + if attrs: + player = match.players[0] + for attr, value in attrs.items(): + self.assertEqual(getattr(player, attr), value) + + def classifier_test(self, expected_class_classifier=None): + """Test that the keys in the expected_classifier dictionary give the + expected values in the player classifier dictionary. Also checks that + two particular keys (memory_depth and stochastic) are in the + dictionary.""" + player = self.player() + + # Test that player has same classifier as its class unless otherwise + # specified + if expected_class_classifier is None: + expected_class_classifier = player.classifier + actual_class_classifier = { + c: axl.Classifiers[c](player) + for c in expected_class_classifier.keys() + } + self.assertEqual(expected_class_classifier, actual_class_classifier) + + self.assertTrue( + "memory_depth" in player.classifier, + msg="memory_depth not in classifier", + ) + self.assertTrue( + "stochastic" in player.classifier, + msg="stochastic not in classifier", + ) + for key in TestOpponent.classifier: + self.assertEqual( + axl.Classifiers[key](player), + self.expected_classifier[key], + msg="%s - Behaviour: %s != Expected Behaviour: %s" + % ( + key, + axl.Classifiers[key](player), + self.expected_classifier[key], + ), + ) + + +class TestMatch(unittest.TestCase): + """Test class for heads up play between two given players. Plays an + axelrod match between the two players.""" + + def versus_test( + self, + player1, + player2, + expected_actions1, + expected_actions2, + noise=None, + seed=None, + ): + """Tests a sequence of outcomes for two given players.""" + if len(expected_actions1) != len(expected_actions2): + raise ValueError("Mismatched History lengths.") + if seed: + axl.seed(seed) + turns = len(expected_actions1) + match = axl.IpdMatch((player1, player2), turns=turns, noise=noise) + match.play() + # Test expected sequence of play. + for i, (outcome1, outcome2) in enumerate( + zip(expected_actions1, expected_actions2) + ): + player1.play(player2) + self.assertEqual(player1.history[i], outcome1) + self.assertEqual(player2.history[i], outcome2) + + def test_versus_with_incorrect_history_lengths(self): + """Test the error raised by versus_test if expected actions do not + match up""" + with self.assertRaises(ValueError): + p1, p2 = axl.Cooperator(), axl.Cooperator() + actions1 = [C, C] + actions2 = [C] + self.versus_test(p1, p2, actions1, actions2) + + +def test_four_vector(test_class, expected_dictionary): + """ + Checks that two dictionaries match -- the four-vector defining + a memory-one strategy and the given expected dictionary. + """ + player1 = test_class.player() + for key in sorted(expected_dictionary.keys(), key=str): + test_class.assertAlmostEqual( + player1._four_vector[key], expected_dictionary[key] + ) + + +def test_memory(player, opponent, memory_length, seed=0, turns=10): + """ + Checks if a player reacts to the plays of an opponent in the same way if + only the given amount of memory is used. + """ + # Play the match normally. + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns) + plays = [p[0] for p in match.play()] + + # Play with limited history. + player.reset() + opponent.reset() + player._history = axl.LimitedHistory(memory_length) + opponent._history = axl.LimitedHistory(memory_length) + axl.seed(seed) + match = axl.IpdMatch((player, opponent), turns=turns, reset=False) + limited_plays = [p[0] for p in match.play()] + + return plays == limited_plays + + +class TestMemoryTest(unittest.TestCase): + """ + Test for the memory test function. + """ + + def test_passes(self): + """ + The memory test function returns True in this case as the correct mem + length is used + """ + player = axl.TitFor2Tats() + opponent = axl.Defector() + self.assertTrue(test_memory(player, opponent, memory_length=2)) + + def test_failures(self): + """ + The memory test function returns False in this case as the incorrect mem + length is used + """ + player = axl.TitFor2Tats() + opponent = axl.Defector() + self.assertFalse(test_memory(player, opponent, memory_length=1)) diff --git a/axelrod/tests/strategies/test_prober.py b/axelrod/tests/strategies/test_prober.py new file mode 100644 index 000000000..771e0115a --- /dev/null +++ b/axelrod/tests/strategies/test_prober.py @@ -0,0 +1,385 @@ +"""Tests for Prober strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestCollectiveStrategy(TestPlayer): + + name = "CollectiveStrategy" + player = axl.CollectiveStrategy + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If handshake (C, D) is used cooperate until a defection occurs and + # then defect throughout + opponent = axl.MockPlayer([C, D] + [C] * 10) + actions = [(C, C), (D, D)] + [(C, C)] * 11 + [(C, D)] + [(D, C)] * 10 + self.versus_test(opponent=opponent, expected_actions=actions) + + # If handshake is not used: defect + actions = [(C, C), (D, C)] + [(D, C)] * 15 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D)] + [(D, D)] * 15 + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestDetective(TestPlayer): + + name = "Detective" + player = axl.Detective + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + self.versus_test( + opponent=axl.TitForTat(), + expected_actions=[(C, C), (D, C), (C, D)] + [(C, C)] * 15, + ) + + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=[(C, C), (D, C), (C, C), (C, C)] + [(D, C)] * 15, + ) + + self.versus_test( + opponent=axl.Defector(), + expected_actions=[(C, D), (D, D), (C, D), (C, D)] + [(D, D)] * 15, + ) + + def test_other_initial_actions(self): + self.versus_test( + opponent=axl.TitForTat(), + expected_actions=[(C, C), (C, C), (D, C)] + [(D, D)] * 15, + init_kwargs={"initial_actions": [C, C]}, + ) + + # Extreme case: no memory at all, it's simply a defector + self.versus_test( + opponent=axl.TitForTat(), + expected_actions=[(D, C)] + [(D, D)] * 15, + init_kwargs={"initial_actions": []}, + ) + + +class TestProber(TestPlayer): + + name = "Prober" + player = axl.Prober + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Starts by playing DCC. + # Defects forever if opponent cooperated in moves 2 and 3 + actions = [(D, C), (C, C), (C, C)] + [(D, C)] * 3 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + opponent = axl.MockPlayer([D, C, C]) + actions = [(D, D), (C, C), (C, C)] + [(D, D), (D, C), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Otherwise it plays like TFT + actions = [(D, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(D, D), (C, D), (C, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestProber2(TestPlayer): + + name = "Prober 2" + player = axl.Prober2 + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Starts by playing DCC. + # Cooperates forever if opponent played D, C in moves 2 and 3 + actions = [(D, C), (C, D), (C, C)] + [(C, D), (C, C), (C, D)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + opponent = axl.MockPlayer([D, D, C]) + actions = [(D, D), (C, D), (C, C)] + [(C, D), (C, D), (C, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Otherwise it plays like TFT + actions = [(D, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(D, D), (C, D), (C, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer([D, C]) + actions = [(D, D), (C, C), (C, D)] + [(D, C), (C, D), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestProber3(TestPlayer): + + name = "Prober 3" + player = axl.Prober3 + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Starts by playing DC. + # Defects forever if opponent played C in move 2. + actions = [(D, C), (C, C)] + [(D, C), (D, C), (D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + opponent = axl.MockPlayer([D, C]) + actions = [(D, D), (C, C)] + [(D, D), (D, C), (D, D)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Otherwise it plays like TFT + actions = [(D, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(D, D), (C, D), (D, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestProber4(TestPlayer): + + name = "Prober 4" + player = axl.Prober4 + expected_classifier = { + "stochastic": False, + "memory_depth": float("inf"), + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + initial_sequence = [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D] + + def test_strategy(self): + # Starts by playing CCDCDDDCCDCDCCDCDDCD. + # After playing the initial sequence defects forever + # if the absolute difference in the number of retaliating + # and provocative defections of the opponent is smaller or equal to 2 + provocative_histories = [ + [C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D, D], + ] + + attrs = {"turned_defector": True} + for history in provocative_histories: + opponent = axl.MockPlayer(history + [C] * 5) + actions = list(zip(self.initial_sequence, history)) + [(D, C)] * 5 + self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) + + # Otherwise cooperates for 5 rounds and plays TfT afterwards + unprovocative_histories = [ + [C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D], + [D, D, C, D, C, C, C, D, D, C, D, C, D, D, C, D, C, C, D, C], + [C, C, D, C, D, D, C, C, C, C, C, C, C, C, C, C, C, C, C, C], + [C, C, D, C, D, D, C, C, D, C, C, C, C, C, C, D, D, D, C, C], + [C, C, C, C, D, D, C, C, D, C, C, D, D, C, D, C, D, C, C, C], + ] + + attrs = {"turned_defector": False} + for history in unprovocative_histories: + opponent = axl.MockPlayer(history + [D] * 5 + [C, C]) + actions = list(zip(self.initial_sequence, history)) + [(C, D)] * 5 + actions += [(D, C), (C, C)] + self.versus_test(opponent=opponent, expected_actions=actions, attrs=attrs) + + +class TestHardProber(TestPlayer): + + name = "Hard Prober" + player = axl.HardProber + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Starts by playing DDCC + # Defects forever if opponent played C in moves 2 and 3 + actions = [(D, C), (D, C), (C, C), (C, C)] + [(D, C), (D, C), (D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + opponent = axl.MockPlayer([D, C, C, D]) + actions = [(D, D), (D, C), (C, C), (C, D)] + [(D, D), (D, C), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Otherwise it plays like TFT + actions = [(D, C), (D, D), (C, C), (C, D)] + [(D, C), (C, D), (D, C)] + self.versus_test(opponent=axl.Alternator(), expected_actions=actions) + + actions = [(D, D), (D, D), (C, D), (C, D)] + [(D, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestNaiveProber(TestPlayer): + + name = "Naive Prober: 0.1" + player = axl.NaiveProber + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Always retaliate a defection + opponent = axl.MockPlayer([C, D, D, D, D]) + actions = [(C, C), (C, D), (D, D), (D, D), (D, D)] + self.versus_test(opponent=opponent, expected_actions=actions) + + def test_random_defection(self): + # Unprovoked defection with small probability + actions = [(C, C), (D, C), (D, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=2 + ) + + actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=5 + ) + + # Always defect when p is 1 + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"p": 1}, + ) + + def test_reduction_to_TFT(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"p": 0}, + ) + + +class TestRemorsefulProber(TestPlayer): + + name = "Remorseful Prober: 0.1" + player = axl.RemorsefulProber + expected_classifier = { + "memory_depth": 2, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Always retaliate a defection + actions = [(C, D)] + [(D, D)] * 10 + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + attrs={"probing": False}, + ) + + def test_random_defection(self): + # Unprovoked defection with small probability + actions = [(C, C), (D, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + seed=2, + attrs={"probing": True}, + ) + + actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + seed=5, + attrs={"probing": True}, + ) + + # Always defect when p is 1 + actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs={"p": 1}, + attrs={"probing": True}, + ) + + def test_remorse(self): + """After probing, if opponent retaliates, will offer a C.""" + opponent = axl.MockPlayer([C, C, D, C]) + actions = [(C, C), (D, C), (D, D), (C, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + seed=2, + attrs={"probing": False}, + ) + + def test_reduction_to_TFT(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"p": 0}, + attrs={"probing": False}, + ) diff --git a/axelrod/tests/strategies/test_punisher.py b/axelrod/tests/strategies/test_punisher.py new file mode 100644 index 000000000..77a8db244 --- /dev/null +++ b/axelrod/tests/strategies/test_punisher.py @@ -0,0 +1,194 @@ +"""Tests for the Punisher strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestPunisher(TestPlayer): + + name = "Punisher" + player = axl.Punisher + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_init(self): + """Tests for the __init__ method.""" + player = axl.Punisher() + self.assertEqual(player.mem_length, 1) + self.assertFalse(player.grudged) + self.assertEqual(player.grudge_memory, 1) + + def test_strategy(self): + opponent = axl.Alternator() + actions = [(C, C), (C, D), (D, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 0}, + ) + + opponent = axl.MockPlayer([C, D] + [C] * 10) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 10}, + ) + + # Eventually the grudge is dropped + opponent = axl.MockPlayer([C, D] + [C] * 10) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, D)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": False, "grudge_memory": 0, "mem_length": 10}, + ) + + # Grudged again on opponent's D + opponent = axl.MockPlayer([C, D] + [C] * 11) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, C), (C, D), (D, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 0, "mem_length": 2}, + ) + + +class TestInversePunisher(TestPlayer): + + name = "Inverse Punisher" + player = axl.InversePunisher + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_init(self): + """Tests for the __init__ method.""" + player = axl.InversePunisher() + self.assertEqual(player.mem_length, 1) + self.assertFalse(player.grudged) + self.assertEqual(player.grudge_memory, 1) + + def test_strategy(self): + opponent = axl.Alternator() + actions = [(C, C), (C, D), (D, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 0}, + ) + + opponent = axl.MockPlayer([C, D] + [C] * 10) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 10}, + ) + + # Eventually the grudge is dropped + opponent = axl.MockPlayer([C, D] + [C] * 10) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, D)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": False, "grudge_memory": 0, "mem_length": 10}, + ) + + # Grudged again on opponent's D + opponent = axl.MockPlayer([C, D] + [C] * 11) + actions = [(C, C), (C, D)] + [(D, C)] * 11 + [(C, C), (C, D), (D, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"grudged": True, "grudge_memory": 0, "mem_length": 17}, + ) + + +class TestLevelPunisher(TestPlayer): + + name = "Level Punisher" + player = axl.LevelPunisher + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates if the turns played are less than 10. + actions = [(C, C)] * 9 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # After 10 rounds + # Check if number of defections by opponent is greater than 20% + opponent = axl.MockPlayer([C] * 4 + [D] * 2 + [C] * 3 + [D]) + actions = [(C, C)] * 4 + [(C, D)] * 2 + [(C, C)] * 3 + [(C, D), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Check if number of defections by opponent is less than 20% + opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 4 + [D]) + actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 4 + [(C, D), (C, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestTrickyLevelPunisher(TestPlayer): + + name = "Level Punisher" + player = axl.LevelPunisher + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Cooperates if the turns played are less than 10. + actions = [(C, C)] * 9 + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # After 10 rounds + # Check if number of defections by opponent is greater than 20% + opponent = axl.MockPlayer([C] * 4 + [D] * 2 + [C] * 3 + [D]) + actions = [(C, C)] * 4 + [(C, D)] * 2 + [(C, C)] * 3 + [(C, D), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # Check if number of defections by opponent is greater than 10% + opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 4 + [D]) + actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 4 + [(C, D), (C, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + # After 10 rounds + # Check if number of defections by opponent is greater than 5% + opponent = axl.MockPlayer([C] * 4 + [D] + [C] * 5) + actions = [(C, C)] * 4 + [(C, D)] + [(C, C)] * 5 + self.versus_test(opponent=opponent, expected_actions=actions) + + # Check if number of defections by opponent is less than 5% + opponent = axl.MockPlayer([C] * 10) + actions = [(C, C)] * 5 + self.versus_test(opponent=opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_qlearner.py b/axelrod/tests/strategies/test_qlearner.py new file mode 100644 index 000000000..1b07a4bfb --- /dev/null +++ b/axelrod/tests/strategies/test_qlearner.py @@ -0,0 +1,151 @@ +"""Tests for the QLearner strategies.""" + +import random + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestRiskyQLearner(TestPlayer): + + name = "Risky QLearner" + player = axl.RiskyQLearner + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_payoff_matrix(self): + (R, P, S, T) = axl.IpdGame().RPST() + payoff_matrix = {C: {C: R, D: S}, D: {C: T, D: P}} + player = self.player() + self.assertEqual(player.payoff_matrix, payoff_matrix) + + def test_strategy(self): + actions = [(C, C), (D, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + seed=5, + attrs={ + "Qs": { + "": {C: 0, D: 0.9}, + "0.0": {C: 2.7, D: 0}, + "C1.0": {C: 0, D: 4.5}, + "CC2.0": {C: 2.7, D: 0}, + "CCC3.0": {C: 0, D: 0}, + }, + "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, + "prev_state": "CCC3.0", + }, + ) + + +class TestArrogantQLearner(TestPlayer): + + name = "Arrogant QLearner" + player = axl.ArrogantQLearner + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + seed=5, + attrs={ + "Qs": { + "": {C: 0, D: 0.9}, + "0.0": {C: 2.7, D: 0}, + "C1.0": {C: 0, D: 4.5}, + "CC2.0": {C: 2.7, D: 0}, + "CCC3.0": {C: 0, D: 0}, + }, + "Vs": {"": 0.9, "0.0": 2.7, "C1.0": 4.5, "CC2.0": 2.7, "CCC3.0": 0}, + "prev_state": "CCC3.0", + }, + ) + + +class TestHesitantQLearner(TestPlayer): + + name = "Hesitant QLearner" + player = axl.HesitantQLearner + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, D), (D, D), (C, D), (C, D)] + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + seed=5, + attrs={ + "Qs": { + "": {C: 0, D: 0.1}, + "0.0": {C: 0, D: 0}, + "D0.0": {C: 0, D: 0.1}, + "DD0.0": {C: 0, D: 0}, + "DDD0.0": {C: 0, D: 0}, + }, + "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, + "prev_state": "DDD0.0", + }, + ) + + +class TestCautiousQLearner(TestPlayer): + + name = "Cautious QLearner" + player = axl.CautiousQLearner + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, D), (D, D), (C, D), (C, D)] + self.versus_test( + opponent=axl.Defector(), + expected_actions=actions, + seed=5, + attrs={ + "Qs": { + "": {C: 0, D: 0.1}, + "0.0": {C: 0, D: 0}, + "D0.0": {C: 0, D: 0.1}, + "DD0.0": {C: 0, D: 0}, + "DDD0.0": {C: 0, D: 0}, + }, + "Vs": {"": 0.1, "0.0": 0.0, "D0.0": 0.1, "DD0.0": 0.0, "DDD0.0": 0}, + "prev_state": "DDD0.0", + }, + ) diff --git a/axelrod/tests/strategies/test_rand.py b/axelrod/tests/strategies/test_rand.py new file mode 100644 index 000000000..76bfb3478 --- /dev/null +++ b/axelrod/tests/strategies/test_rand.py @@ -0,0 +1,46 @@ +"""Tests for the random strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestRandom(TestPlayer): + + name = "Random: 0.5" + player = axl.Random + expected_classifier = { + "memory_depth": 0, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test that strategy is randomly picked (not affected by history).""" + opponent = axl.MockPlayer() + actions = [(C, C), (D, C), (D, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + + opponent = axl.MockPlayer() + actions = [(D, C), (D, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + opponent = axl.MockPlayer() + actions = [(D, C), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions, init_kwargs={"p": 0}) + + opponent = axl.MockPlayer() + actions = [(C, C), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions, init_kwargs={"p": 1}) + + def test_deterministic_classification(self): + """Test classification when p is 0 or 1""" + for p in [0, 1]: + player = axl.Random(p=p) + self.assertFalse(axl.Classifiers["stochastic"](player)) diff --git a/axelrod/tests/strategies/test_resurrection.py b/axelrod/tests/strategies/test_resurrection.py new file mode 100644 index 000000000..c71becde8 --- /dev/null +++ b/axelrod/tests/strategies/test_resurrection.py @@ -0,0 +1,59 @@ +"""Test for the Resurrection strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class Resurrection(TestPlayer): + + name = "Resurrection" + player = axl.Resurrection + expected_classifier = { + "memory_depth": 5, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Check if the turns played are greater than 5 + actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + # Check for TFT behavior after 5 rounds + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestDoubleResurrection(TestPlayer): + + name = "DoubleResurrection" + player = axl.DoubleResurrection + expected_classifier = { + "memory_depth": 5, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (C, D)] + self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_retaliate.py b/axelrod/tests/strategies/test_retaliate.py new file mode 100644 index 000000000..251438735 --- /dev/null +++ b/axelrod/tests/strategies/test_retaliate.py @@ -0,0 +1,140 @@ +"""Tests for the retaliate strategy.""" + +import axelrod as axl + +from .test_player import TestOpponent, TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestRetaliate(TestPlayer): + + name = "Retaliate: 0.1" + player = axl.Retaliate + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent has defected more than 10 percent of the time, defect. + opponent = axl.Cooperator() + actions = [(C, C)] * 5 + self.versus_test(opponent=opponent, expected_actions=actions) + + opponent = axl.MockPlayer([C, C, C, D, C]) + actions = [(C, C), (C, C), (C, C), (C, D), (D, C), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestRetaliate2(TestPlayer): + + name = "Retaliate 2: 0.08" + player = axl.Retaliate2 + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent has defected more than 8 percent of the time, defect. + opponent = axl.MockPlayer([C] * 13 + [D]) + actions = [(C, C)] * 13 + [(C, D), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestRetaliate3(TestPlayer): + + name = "Retaliate 3: 0.05" + player = axl.Retaliate3 + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent has defected more than 5 percent of the time, defect. + opponent = axl.MockPlayer([C] * 19 + [D]) + actions = [(C, C)] * 19 + [(C, D), (D, C)] + self.versus_test(opponent=opponent, expected_actions=actions) + + +class TestLimitedRetaliate(TestPlayer): + + name = "Limited Retaliate: 0.1, 20" + player = axl.LimitedRetaliate + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent has never defected, co-operate + opponent = axl.Cooperator() + actions = [(C, C)] * 5 + self.versus_test( + opponent=opponent, expected_actions=actions, attrs={"retaliating": False} + ) + + # Retaliate after a (C, D) round. + opponent = axl.MockPlayer([C, C, C, D, C]) + actions = [(C, C), (C, C), (C, C), (C, D), (D, C), (D, C)] + self.versus_test( + opponent=opponent, expected_actions=actions, attrs={"retaliating": True} + ) + + opponent = axl.Alternator() + + # Count retaliations + actions = [(C, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test( + opponent=opponent, expected_actions=actions, attrs={"retaliation_count": 3} + ) + opponent = axl.Alternator() + + # Cooperate if we hit the retaliation limit + actions = [(C, C), (C, D), (D, C), (D, D), (C, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"retaliation_count": 0}, + init_kwargs={"retaliation_limit": 2}, + ) + + # Defect again after cooperating + actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (D, D), (D, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"retaliation_count": 2}, + init_kwargs={"retaliation_limit": 2}, + ) + + # Different behaviour with different retaliation threshold + actions = [(C, C), (C, D), (D, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test( + opponent=opponent, + expected_actions=actions, + attrs={"retaliation_count": 0}, + init_kwargs={"retaliation_limit": 2, "retaliation_threshold": 9}, + ) diff --git a/axelrod/tests/strategies/test_revised_downing.py b/axelrod/tests/strategies/test_revised_downing.py new file mode 100644 index 000000000..fa6897a85 --- /dev/null +++ b/axelrod/tests/strategies/test_revised_downing.py @@ -0,0 +1,42 @@ +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + +class TestRevisedDowning(TestPlayer): + + name = "Revised Downing" + player = axl.RevisedDowning + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (C, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, C, C]) + actions = [(C, D), (C, C), (C, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[D, D, C]) + actions = [(C, D), (C, D), (D, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D, C, C]) + actions = [(C, C), (C, C), (C, D), (C, D), (D, C), (C, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) + actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (C, D), (C, C)] + self.versus_test(opponent, expected_actions=actions) diff --git a/axelrod/tests/strategies/test_selfsteem.py b/axelrod/tests/strategies/test_selfsteem.py new file mode 100644 index 000000000..c0d9ec84c --- /dev/null +++ b/axelrod/tests/strategies/test_selfsteem.py @@ -0,0 +1,81 @@ +"""Tests for the SelfSteem strategy.""" + +import random + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestSelfSteem(TestPlayer): + + name = "SelfSteem" + player = axl.SelfSteem + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + + # Check for f > 0.95, defect + actions = ( + [(C, C), (C, C), (D, C), (D, C), (C, C), (D, C)] + [(C, C)] * 4 + [(D, C)] + ) + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=1) + + # Check for f < -0.95, cooperate + actions = [(D, C), (C, C), (D, C), (D, C), (C, C), (D, C), (C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=0 + ) + + actions = [(D, D)] + [(D, D)] * 5 + [(D, D), (C, D), (C, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=0) + + # Check for -0.3 < f < 0.3, random + actions = ( + [(D, C), (C, C), (D, C), (D, C), (C, C), (D, C)] + + [(C, C)] * 6 + + [(D, C), (D, C)] + + [(C, C)] * 7 + ) + self.versus_test( + opponent=axl.Cooperator(), expected_actions=actions, seed=6 + ) + + actions = ( + [(D, D)] * 7 + + [(C, D), (C, D)] + + [(D, D)] * 8 + + [(C, D), (C, D), (D, D), (D, D), (D, D)] + ) + self.versus_test(opponent=axl.Defector(), expected_actions=actions, seed=5) + + # Check for 0.95 > abs(f) > 0.3, follows TitForTat + actions = ( + [(D, D)] * 5 + + [(C, D), (D, D), (C, D), (C, D), (D, D), (C, D)] + + [(D, D)] * 5 + ) + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + actions = [ + (D, C), + (C, C), + (D, C), + (D, C), + (C, C), + (D, C), + (C, C), + (C, C), + (C, C), + (C, C), + ] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_sequence_player.py b/axelrod/tests/strategies/test_sequence_player.py new file mode 100644 index 000000000..2a3c655ef --- /dev/null +++ b/axelrod/tests/strategies/test_sequence_player.py @@ -0,0 +1,80 @@ +"""Tests for the Thue-Morse strategies.""" +import unittest + +import axelrod as axl +from axelrod._strategy_utils import recursive_thue_morse +from axelrod.strategies.sequence_player import SequencePlayer + +from .test_player import TestOpponent, TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestThueMoreGenerator(unittest.TestCase): + def test_sequence(self): + expected = [0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0] + for i, e in enumerate(expected): + self.assertEqual(recursive_thue_morse(i), e) + + +class TestSequencePlayer(unittest.TestCase): + def test_sequence_player(self): + """Basic test for SequencePlayer.""" + + def cooperate_gen(): + yield 1 + + player = SequencePlayer(generator_function=cooperate_gen) + opponent = TestOpponent() + self.assertEqual(C, player.strategy(opponent)) + + +class TestThueMorse(TestPlayer): + + name = "ThueMorse" + player = axl.ThueMorse + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + + thue_morse_seq = [D, C, C, D, C, D, D, C, C, D, D, C, D, C, C, D, C] + n = len(thue_morse_seq) + + actions = list(zip(thue_morse_seq, [C] * n)) + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = list(zip(thue_morse_seq, [D] * n)) + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestThueMorseInverse(TestPlayer): + + name = "ThueMorseInverse" + player = axl.ThueMorseInverse + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + inv_thue_morse_seq = [C, D, D, C, D, C, C, D, D, C, C, D, C, D, D, C, D] + n = len(inv_thue_morse_seq) + + actions = list(zip(inv_thue_morse_seq, [C] * n)) + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = list(zip(inv_thue_morse_seq, [D] * n)) + self.versus_test(axl.Defector(), expected_actions=actions) diff --git a/axelrod/tests/strategies/test_shortmem.py b/axelrod/tests/strategies/test_shortmem.py new file mode 100644 index 000000000..48dcd0138 --- /dev/null +++ b/axelrod/tests/strategies/test_shortmem.py @@ -0,0 +1,57 @@ +"""Tests for the ShortMem strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestShortMem(TestPlayer): + + name = "ShortMem" + player = axl.ShortMem + expected_classifier = { + "memory_depth": float('inf'), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + + # Starts by cooperating for the first ten moves. + actions = [(C, C)] * 10 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D)] * 10 + self.versus_test(axl.Defector(), expected_actions=actions) + + # Cooperate if in the last ten moves, Cooperations - Defections >= 3 + actions = [(C, C)] * 11 + [(C, D)] * 4 + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 11 + [D] * 4), + expected_actions=actions, + ) + + # Defect if in the last ten moves, Defections - Cooperations >= 3 + actions = [(C, D)] * 11 + [(D, C)] * 4 + self.versus_test( + opponent=axl.MockPlayer(actions=[D] * 11 + [C] * 4), + expected_actions=actions, + ) + + # If neither of the above conditions are met, apply TitForTat + actions = [(C, D)] * 5 + [(C, C)] * 6 + [(C, D), (D, D), (D, D), (D, C), (C, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[D] * 5 + [C] * 6 + [D, D, D, C, C]), + expected_actions=actions, + ) + + actions = [(C, C)] * 5 + [(C, D)] * 6 + [(D, C), (C, C), (C, C), (C, D), (D, D)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 5 + [D] * 6 + [C, C, C, D, D]), + expected_actions=actions, + ) diff --git a/axelrod/tests/strategies/test_stalker.py b/axelrod/tests/strategies/test_stalker.py new file mode 100644 index 000000000..31acdb769 --- /dev/null +++ b/axelrod/tests/strategies/test_stalker.py @@ -0,0 +1,94 @@ +"""Tests for the Stalker strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestStalker(TestPlayer): + + name = "Stalker: (D,)" + player = axl.Stalker + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["game", "length"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C)] * 3 + [(D, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + # wish_score < current_average_score < very_good_score + actions = [(C, C)] * 7 + [(C, D), (C, D), (C, C), (C, C), (D, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 2), expected_actions=actions + ) + + actions = [(C, C)] * 7 + [(C, D), (C, C), (D, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 7 + [D]), expected_actions=actions + ) + + # current_average_score > 2 + actions = [(C, C)] * 9 + [(D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # 1 < current_average_score < 2 + actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, D)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 7 + [D] * 5), expected_actions=actions + ) + + # current_average_score < 1 + actions = ( + [(C, D)] + + [(D, D)] * 2 + + [(C, D)] * 3 + + [(D, D), (C, D), (D, D), (C, D), (D, D), (C, D), (D, D)] + ) + self.versus_test(axl.Defector(), expected_actions=actions, seed=6) + + actions = [(C, D)] * 3 + [ + (D, D), + (C, D), + (D, D), + (C, D), + (C, D), + (D, D), + (C, D), + (C, D), + (C, D), + (D, D), + ] + self.versus_test(axl.Defector(), expected_actions=actions, seed=7) + + # defect in last round + actions = [(C, C)] * 199 + [(D, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, match_attributes={"length": 200} + ) + + # Force a defection before the end of the actual match which ensures + # that current_average_score > very_good_score + actions = [(C, C)] * 3 + [(D, C)] * 3 + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 4}, + ) + + def test_reset(self): + axl.seed(0) + player = axl.Stalker() + m = axl.IpdMatch((player, axl.Alternator())) + m.play() + self.assertNotEqual(player.current_score, 0) + player.reset() + self.assertEqual(player.current_score, 0) diff --git a/axelrod/tests/strategies/test_titfortat.py b/axelrod/tests/strategies/test_titfortat.py new file mode 100644 index 000000000..28b5e36a0 --- /dev/null +++ b/axelrod/tests/strategies/test_titfortat.py @@ -0,0 +1,1191 @@ +"""Tests for the tit for tat strategies.""" + +import copy + +import random + +import axelrod as axl +from axelrod.tests.property import strategy_lists + +from hypothesis import given +from hypothesis.strategies import integers + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestTitForTat(TestPlayer): + """ + Note that this test is referred to in the documentation as an example on + writing tests. If you modify the tests here please also modify the + documentation. + """ + + name = "Tit For Tat" + player = axl.TitForTat + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Play against opponents + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + # This behaviour is independent of knowledge of the IpdMatch length + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + match_attributes={"length": float("inf")}, + ) + + # We can also test against random strategies + actions = [(C, D), (D, D), (D, C), (C, C), (C, D)] + self.versus_test(axl.Random(), expected_actions=actions, seed=0) + + actions = [(C, C), (C, D), (D, D), (D, C)] + self.versus_test(axl.Random(), expected_actions=actions, seed=1) + + # If you would like to test against a sequence of moves you should use + # a MockPlayer + opponent = axl.MockPlayer(actions=[C, D]) + actions = [(C, C), (C, D), (D, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) + actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + + +class TestTitFor2Tats(TestPlayer): + name = "Tit For 2 Tats" + player = axl.TitFor2Tats + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Will punish sequence of 2 defections but will forgive one + opponent = axl.MockPlayer(actions=[D, D, D, C, C]) + actions = [(C, D), (C, D), (D, D), (D, C), (C, C), (C, D)] + self.versus_test(opponent, expected_actions=actions) + opponent = axl.MockPlayer(actions=[C, C, D, D, C, D, D, C, C, D, D]) + actions = [ + (C, C), + (C, C), + (C, D), + (C, D), + (D, C), + (C, D), + (C, D), + (D, C), + (C, C), + (C, D), + (C, D), + ] + self.versus_test(opponent, expected_actions=actions) + + +class TestTwoTitsForTat(TestPlayer): + name = "Two Tits For Tat" + player = axl.TwoTitsForTat + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Will defect twice when last turn of opponent was defection. + opponent = axl.MockPlayer(actions=[D, C, C, D, C]) + actions = [(C, D), (D, C), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + actions = [(C, C), (C, C)] + self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test(opponent=axl.Defector(), expected_actions=actions) + + +class TestDynamicTwoTitsForTat(TestPlayer): + name = "Dynamic Two Tits For Tat" + player = axl.DynamicTwoTitsForTat + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Test that it is stochastic + opponent = axl.MockPlayer(actions=[D, C, D, D, C]) + actions = [(C, D), (D, C), (C, D), (D, D), (D, C)] + self.versus_test(opponent, expected_actions=actions, seed=1) + # Should respond differently with a different seed + actions = [(C, D), (D, C), (D, D), (D, D), (C, C)] + self.versus_test(opponent, expected_actions=actions, seed=2) + + # Will cooperate if opponent cooperates. + actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + # Test against defector + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestBully(TestPlayer): + name = "Bully" + player = axl.Bully + expected_classifier = { + "memory_depth": 1, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Will do opposite of what opponent does. + actions = [(D, C), (D, D), (C, C), (D, D), (C, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + actions = [(D, C), (D, C), (D, C), (D, C), (D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(D, D), (C, D), (C, D), (C, D), (C, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestSneakyTitForTat(TestPlayer): + name = "Sneaky Tit For Tat" + player = axl.SneakyTitForTat + expected_classifier = { + "memory_depth": float("inf"), # Long memory + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + opponent = axl.MockPlayer(actions=[C, C, C, D, C, C]) + actions = [(C, C), (C, C), (D, C), (D, D), (C, C), (C, C)] + self.versus_test(opponent, expected_actions=actions) + + # Repents if punished for a defection + actions = [(C, C), (C, D), (D, C), (C, D), (C, C)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestSuspiciousTitForTat(TestPlayer): + name = "Suspicious Tit For Tat" + player = axl.SuspiciousTitForTat + expected_classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Plays like TFT after the first move, repeating the opponents last + # move. + actions = [(D, C), (C, D)] * 8 + self.versus_test(axl.TitForTat(), expected_actions=actions) + + actions = [(D, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestAntiTitForTat(TestPlayer): + name = "Anti Tit For Tat" + player = axl.AntiTitForTat + expected_classifier = { + "memory_depth": 1, # Four-Vector = (1.,0.,1.,0.) + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (D, C), (D, D), (C, D)] * 4 + self.versus_test(axl.TitForTat(), expected_actions=actions) + + +class TestHardTitForTat(TestPlayer): + name = "Hard Tit For Tat" + player = axl.HardTitForTat + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + opponent = axl.MockPlayer(actions=[D, C, C, C, D, C]) + actions = [(C, D), (D, C), (D, C), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + actions = [(C, C), (C, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + +class TestHardTitFor2Tats(TestPlayer): + name = "Hard Tit For 2 Tats" + player = axl.HardTitFor2Tats + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Uses memory 3 to punish 2 consecutive defections + opponent = axl.MockPlayer(actions=[D, C, C, D, D, D, C]) + actions = [(C, D), (C, C), (C, C), (C, D), (C, D), (D, D), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + +class TestOmegaTFT(TestPlayer): + name = "Omega TFT: 3, 8" + player = axl.OmegaTFT + + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + player_history = [C, D, C, D, C, C, C, C, C] + opp_history = [D, C, D, C, D, C, C, C, C] + actions = list(zip(player_history, opp_history)) + self.versus_test(axl.SuspiciousTitForTat(), expected_actions=actions) + + player_history = [C, C, D, C, D, C, C, C, D, D, D, D, D, D] + opp_history = [C, D] * 7 + actions = list(zip(player_history, opp_history)) + self.versus_test(axl.Alternator(), expected_actions=actions) + + +class TestGradual(TestPlayer): + name = "Gradual" + player = axl.Gradual + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Punishes defection with a growing number of defections and calms + # the opponent with two cooperations in a row. + opponent = axl.MockPlayer(actions=[C]) + actions = [(C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 0, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D]) + actions = [(C, D)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 0, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C]) + actions = [(C, D), (D, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 2, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, C]) + actions = [(C, D), (D, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 1, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C]) + actions = [(C, D), (D, C), (C, D), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 0, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 0, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 0, "punish_count": 0,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 2, "punish_count": 2,}, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 2, "punish_count": 1,}, + ) + + opponent = axl.Defector() + actions = [ + (C, D), + (D, D), # 1 defection as a response to the 1 defection by opponent + (C, D), + (C, D), + (D, D), + # starts defecting after a total of 4 defections by the opponent + (D, D), + (D, D), + (D, D), # 4 defections + (C, D), + (C, D), + (D, D), + # Start defecting after a total of 10 defections by the opponent + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), # 10 defections + (C, D), + (C, D), + (D, D), # starts defecting after 22 defections by the opponent + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), + (D, D), # 22 defections + (C, D), + (C, D), + (D, D), + (D, D), + (D, D), + (D, D), + ] + self.versus_test( + opponent, + expected_actions=actions, + attrs={"calm_count": 2, "punish_count": 42,}, + ) + + def test_specific_set_of_results(self): + """ + This tests specific reported results as discussed in + https://github.com/Axelrod-Python/Axelrod/issues/1294 + + The results there used a version of mistrust with a bug that corresponds + to a memory one player that start by defecting and only cooperates if + both players cooperated in the previous round. + """ + mistrust_with_bug = axl.MemoryOnePlayer(initial=D, four_vector=(1, 0, 0, 0),) + players = [ + self.player(), + axl.TitForTat(), + axl.GoByMajority(), + axl.Grudger(), + axl.WinStayLoseShift(), + axl.Prober(), + axl.Defector(), + mistrust_with_bug, + axl.Cooperator(), + axl.CyclerCCD(), + axl.CyclerDDC(), + ] + axl.seed(1) + tournament = axl.IpdTournament(players, turns=1000, repetitions=1) + results = tournament.play(progress_bar=False) + scores = [ + round(average_score_per_turn * 1000, 1) + for average_score_per_turn in results.payoff_matrix[0] + ] + expected_scores = [ + 3000.0, + 3000.0, + 3000.0, + 3000.0, + 3000.0, + 2999.0, + 983.0, + 983.0, + 3000.0, + 3596.0, + 2302.0, + ] + self.assertEqual(scores, expected_scores) + + +class TestOriginalGradual(TestPlayer): + name = "Original Gradual" + player = axl.OriginalGradual + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Punishes defection with a growing number of defections and calms + # the opponent with two cooperations in a row. + opponent = axl.MockPlayer(actions=[C]) + actions = [(C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 0, + }, + ) + + opponent = axl.MockPlayer(actions=[D]) + actions = [(C, D)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 0, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C]) + actions = [(C, D), (D, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": True, + "punishment_count": 1, + "punishment_limit": 1, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, C]) + actions = [(C, D), (D, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": True, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 1, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C]) + actions = [(C, D), (D, C), (C, D), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 1, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 1, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": False, + "punishment_count": 0, + "punishment_limit": 1, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, C, D, C]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, C), (C, D), (D, C)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": True, + "punishment_count": 1, + "punishment_limit": 2, + }, + ) + + opponent = axl.MockPlayer(actions=[D, C, D, C, C, D, D, D]) + actions = [(C, D), (D, C), (C, D), (C, C), (C, C), (C, D), (D, D), (D, D)] + self.versus_test( + opponent, + expected_actions=actions, + attrs={ + "calming": False, + "punishing": True, + "punishment_count": 2, + "punishment_limit": 2, + }, + ) + + def test_output_from_literature(self): + """ + This strategy is not fully described in the literature, however the + scores for the strategy against a set of opponents is reported + + Bruno Beaufils, Jean-Paul Delahaye, Philippe Mathie + "Our Meeting With Gradual: A Good Strategy For The Iterated Prisoner's + Dilemma" Proc. Artif. Life 1996 + + This test just ensures that the strategy is as was originally defined. + + See https://github.com/Axelrod-Python/Axelrod/issues/1294 for another + discussion of this. + """ + players = [ + axl.Cooperator(), + axl.Defector(), + axl.Random(), + axl.TitForTat(), + axl.Grudger(), + axl.CyclerDDC(), + axl.CyclerCCD(), + axl.GoByMajority(), + axl.SuspiciousTitForTat(), + axl.Prober(), + self.player(), + axl.WinStayLoseShift(), + ] + + axl.seed(1) + turns = 1000 + tournament = axl.IpdTournament(players, turns=turns, repetitions=1) + results = tournament.play(progress_bar=False) + scores = [ + round(average_score_per_turn * 1000, 1) + for average_score_per_turn in results.payoff_matrix[-2] + ] + expected_scores = [ + 3000.0, + 915.0, + 2763.0, + 3000.0, + 3000.0, + 2219.0, + 3472.0, + 3000.0, + 2996.0, + 2999.0, + 3000.0, + 3000.0, + ] + self.assertEqual(scores, expected_scores) + + +class TestContriteTitForTat(TestPlayer): + name = "Contrite Tit For Tat" + player = axl.ContriteTitForTat + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + deterministic_strategies = [ + s for s in axl.strategies if not axl.Classifiers["stochastic"](s()) + ] + + def test_init(self): + ctft = self.player() + self.assertFalse(ctft.contrite, False) + self.assertEqual(ctft._recorded_history, []) + + @given( + strategies=strategy_lists(strategies=deterministic_strategies, max_size=1), + turns=integers(min_value=1, max_value=20), + ) + def test_is_tit_for_tat_with_no_noise(self, strategies, turns): + tft = axl.TitForTat() + ctft = self.player() + opponent = strategies[0]() + m1 = axl.IpdMatch((tft, opponent), turns) + m2 = axl.IpdMatch((ctft, opponent), turns) + self.assertEqual(m1.play(), m2.play()) + + def test_strategy_with_noise(self): + ctft = self.player() + opponent = axl.Defector() + self.assertEqual(ctft.strategy(opponent), C) + self.assertEqual(ctft._recorded_history, [C]) + ctft.reset() # Clear the recorded history + self.assertEqual(ctft._recorded_history, []) + + random.seed(0) + ctft.play(opponent, noise=0.9) + self.assertEqual(ctft.history, [D]) + self.assertEqual(ctft._recorded_history, [C]) + self.assertEqual(opponent.history, [C]) + + # After noise: is contrite + ctft.play(opponent) + self.assertEqual(ctft.history, [D, C]) + self.assertEqual(ctft._recorded_history, [C, C]) + self.assertEqual(opponent.history, [C, D]) + self.assertTrue(ctft.contrite) + + # Cooperates and no longer contrite + ctft.play(opponent) + self.assertEqual(ctft.history, [D, C, C]) + self.assertEqual(ctft._recorded_history, [C, C, C]) + self.assertEqual(opponent.history, [C, D, D]) + self.assertFalse(ctft.contrite) + + # Goes back to playing tft + ctft.play(opponent) + self.assertEqual(ctft.history, [D, C, C, D]) + self.assertEqual(ctft._recorded_history, [C, C, C, D]) + self.assertEqual(opponent.history, [C, D, D, D]) + self.assertFalse(ctft.contrite) + + +class TestAdaptiveTitForTat(TestPlayer): + name = "Adaptive Tit For Tat: 0.5" + player = axl.AdaptiveTitForTat + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"world": 0.34375, "rate": 0.5}, + ) + + +class TestSpitefulTitForTat(TestPlayer): + name = "Spiteful Tit For Tat" + player = axl.SpitefulTitForTat + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # Repeats last action of opponent history until 2 consecutive + # defections, then always defects + opponent = axl.MockPlayer(actions=[C, C, C, C]) + actions = [(C, C)] * 5 + self.versus_test( + opponent, expected_actions=actions, attrs={"retaliating": False} + ) + + opponent = axl.MockPlayer(actions=[C, C, C, C, D, C]) + actions = [(C, C)] * 4 + [(C, D), (D, C), (C, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"retaliating": False} + ) + + opponent = axl.MockPlayer(actions=[C, C, D, D, C]) + actions = [(C, C), (C, C), (C, D), (D, D), (D, C)] + self.versus_test( + opponent, expected_actions=actions, attrs={"retaliating": True} + ) + + +class TestSlowTitForTwoTats2(TestPlayer): + name = "Slow Tit For Two Tats 2" + player = axl.SlowTitForTwoTats2 + expected_classifier = { + "memory_depth": 2, + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # If opponent plays the same move twice, repeats last action of + # opponent history, otherwise repeats previous move. + opponent = axl.MockPlayer(actions=[C, C, D, D, C, D, D, C, C, D, D]) + actions = [ + (C, C), + (C, C), + (C, D), + (C, D), + (D, C), + (D, D), + (D, D), + (D, C), + (D, C), + (C, D), + (C, D), + ] + self.versus_test(opponent, expected_actions=actions) + + +class TestAlexei(TestPlayer): + """ + Tests for the Alexei strategy + """ + + name = "Alexei: (D,)" + player = axl.Alexei + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C), (C, C), (D, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + match_attributes={"length": float("inf")}, + ) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D)] + self.versus_test(axl.Alternator(), expected_actions=actions) + + opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) + actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (D, D)] + self.versus_test(opponent, expected_actions=actions) + + +class TestEugineNier(TestPlayer): + """ + Tests for the Eugine Nier strategy + """ + + name = "EugineNier: (D,)" + player = axl.EugineNier + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": {"length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C), (D, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, attrs={"is_defector": False} + ) + + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"is_defector": False}, + match_attributes={"length": float("inf")}, + ) + + # Plays TfT and defects in last round + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D)] + self.versus_test( + axl.Alternator(), expected_actions=actions, attrs={"is_defector": False} + ) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"is_defector": False}, + match_attributes={"length": float("inf")}, + ) + + # Becomes defector after 5 defections + opponent = axl.MockPlayer(actions=[D, C, D, D, D, D, C, C]) + actions = [(C, D), (D, C), (C, D), (D, D), (D, D), (D, D), (D, C), (D, C)] + self.versus_test(opponent, expected_actions=actions) + + +class TestNTitsForMTats(TestPlayer): + """ + Tests for the N Tit(s) For M Tat(s) strategy + """ + + name = "N Tit(s) For M Tat(s): 3, 2" + player = axl.NTitsForMTats + expected_classifier = { + "memory_depth": 3, + "stochastic": False, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + expected_class_classifier = copy.copy(expected_classifier) + + def test_strategy(self): + # TitForTat test_strategy + init_kwargs = {"N": 1, "M": 1} + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), expected_actions=actions, init_kwargs=init_kwargs + ) + actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, init_kwargs=init_kwargs + ) + actions = [(C, D), (D, D), (D, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), expected_actions=actions, init_kwargs=init_kwargs + ) + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + match_attributes={"length": float("inf")}, + init_kwargs=init_kwargs, + ) + actions = [(C, D), (D, D), (D, C), (C, C), (C, D)] + self.versus_test( + axl.Random(), expected_actions=actions, seed=0, init_kwargs=init_kwargs + ) + actions = [(C, C), (C, D), (D, D), (D, C)] + self.versus_test( + axl.Random(), expected_actions=actions, seed=1, init_kwargs=init_kwargs + ) + opponent = axl.MockPlayer(actions=[C, D]) + actions = [(C, C), (C, D), (D, C), (C, D)] + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + opponent = axl.MockPlayer(actions=[C, C, D, D, C, D]) + actions = [(C, C), (C, C), (C, D), (D, D), (D, C), (C, D)] + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + # TitFor2Tats test_strategy + init_kwargs = {"N": 1, "M": 2} + opponent = axl.MockPlayer(actions=[D, D, D, C, C]) + actions = [(C, D), (C, D), (D, D), (D, C), (C, C), (C, D)] + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + + # TwoTitsForTat test_strategy + init_kwargs = {"N": 2, "M": 1} + opponent = axl.MockPlayer(actions=[D, C, C, D, C]) + actions = [(C, D), (D, C), (D, C), (C, D), (D, C)] + self.versus_test(opponent, expected_actions=actions, init_kwargs=init_kwargs) + actions = [(C, C), (C, C)] + self.versus_test( + opponent=axl.Cooperator(), + expected_actions=actions, + init_kwargs=init_kwargs, + ) + actions = [(C, D), (D, D), (D, D)] + self.versus_test( + opponent=axl.Defector(), expected_actions=actions, init_kwargs=init_kwargs, + ) + + # Cooperator test_strategy + actions = [(C, C)] + [(C, D), (C, C)] * 9 + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"N": 0, "M": 1}, + ) + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"N": 0, "M": 5}, + ) + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"N": 0, "M": 0}, + ) + + # Defector test_strategy + actions = [(D, C)] + [(D, D), (D, C)] * 9 + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"N": 1, "M": 0}, + ) + self.versus_test( + opponent=axl.Alternator(), + expected_actions=actions, + init_kwargs={"N": 5, "M": 0}, + ) + + # Default init args + actions = [(C, C), (C, D), (C, D), (D, C), (D, C), (D, D), (C, C)] + opponent = axl.MockPlayer(actions=[acts[1] for acts in actions]) + self.versus_test(opponent=opponent, expected_actions=actions) + + def test_varying_memory_depth(self): + self.assertEqual(axl.Classifiers["memory_depth"](self.player(1, 1)), 1) + self.assertEqual(axl.Classifiers["memory_depth"](self.player(0, 3)), 3) + self.assertEqual(axl.Classifiers["memory_depth"](self.player(5, 3)), 5) + + +class TestMichaelos(TestPlayer): + """ + Tests for the Michaelos strategy + """ + + name = "Michaelos: (D,)" + player = axl.Michaelos + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": {"length"}, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + actions = [(C, C), (C, C), (C, C), (D, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"is_defector": False}, + seed=2, + ) + + actions = [(C, C), (C, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + attrs={"is_defector": False}, + match_attributes={"length": float("inf")}, + seed=2, + ) + + actions = [(C, D), (D, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"is_defector": False}, + seed=2, + ) + + actions = [(C, D), (D, D), (D, D), (D, D)] + self.versus_test( + axl.Defector(), + expected_actions=actions, + attrs={"is_defector": False}, + match_attributes={"length": float("inf")}, + seed=2, + ) + + # Chance of becoming a defector is 50% after (D, C) occurs. + actions = [(C, C), (C, D), (D, C), (C, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"is_defector": False}, + seed=3, + ) + + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"is_defector": True}, + seed=2, + ) + + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (D, D), (D, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + attrs={"is_defector": True}, + match_attributes={"length": float("inf")}, + seed=1, + ) + + +class TestRandomTitForTat(TestPlayer): + """Tests for random tit for tat strategy.""" + + name = "Random Tit for Tat: 0.5" + player = axl.RandomTitForTat + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """ + Test that strategy reacts to opponent, and controlled by + probability every other iteration. Also reacts randomly if no + probability input. + """ + actions = [(C, C), (C, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 1} + ) + + actions = [(C, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 0}) + + actions = [(C, C), (C, C), (D, C), (C, C)] + self.versus_test( + axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 0} + ) + + actions = [(C, D), (D, D), (C, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, init_kwargs={"p": 1}) + + actions = [(C, C), (C, C), (D, C), (C, C), (D, C), (C, C)] + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=2) + + actions = [(C, D), (D, D), (C, D), (D, D), (D, D), (D, D)] + self.versus_test(axl.Defector(), expected_actions=actions, seed=1) + + def test_deterministic_classification(self): + """ + Test classification when probability input is 0 or 1. + Should change stochastic to false, because actions are no + longer random. + + """ + for p in [0, 1]: + player = axl.RandomTitForTat(p=p) + self.assertFalse(axl.Classifiers["stochastic"](player)) diff --git a/axelrod/tests/strategies/test_verybad.py b/axelrod/tests/strategies/test_verybad.py new file mode 100644 index 000000000..b67545e97 --- /dev/null +++ b/axelrod/tests/strategies/test_verybad.py @@ -0,0 +1,47 @@ +"""Tests for the VeryBad strategy.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestVeryBad(TestPlayer): + + name = "VeryBad" + player = axl.VeryBad + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": False, + "makes_use_of": set(), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + # axelrod.Defector - + # cooperates for the first three, defects for the rest P(C) < .5 + self.versus_test( + axl.Defector(), expected_actions=([(C, D)] * 3 + [(D, D)] * 7) + ) + + # axelrod.Cooperator - + # cooperate for all, P(C) == 1 + self.versus_test(axl.Cooperator(), expected_actions=[(C, C)]) + + expected_actions = [ + (C, C), # first three cooperate + (C, D), + (C, D), + (D, C), # P(C) = .33 + (C, C), # P(C) = .5 (last move C) + (C, D), # P(C) = .6 + (D, D), # P(C) = .5 (last move D) + (D, D), # P(C) = .43 + (D, C), # P(C) = .375 + (D, D), # P(C) = .4 + ] + mock_player = axl.MockPlayer(actions=[a[1] for a in expected_actions]) + self.versus_test(mock_player, expected_actions=expected_actions) diff --git a/axelrod/tests/strategies/test_worse_and_worse.py b/axelrod/tests/strategies/test_worse_and_worse.py new file mode 100644 index 000000000..a402a5e15 --- /dev/null +++ b/axelrod/tests/strategies/test_worse_and_worse.py @@ -0,0 +1,157 @@ +"""Tests for the WorseAndWorse strategies.""" + +import axelrod as axl + +from .test_player import TestPlayer + +C, D = axl.Action.C, axl.Action.D + + +class TestWorseAndWorse(TestPlayer): + + name = "Worse and Worse" + player = axl.WorseAndWorse + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test that the strategy gives expected behaviour.""" + # 6 Rounds Cooperate given seed + actions = [(C, C)] * 6 + [(D, C)] + [(C, C)] * 3 + self.versus_test(axl.Cooperator(), expected_actions=actions, seed=8) + + # 6 Rounds Cooperate and Defect no matter oponent + actions = [(C, D)] * 6 + [(D, D)] + [(C, D)] * 3 + self.versus_test(axl.Defector(), expected_actions=actions, seed=8) + + +class TestWorseAndWorseRandom(TestPlayer): + + name = "Knowledgeable Worse and Worse" + player = axl.KnowledgeableWorseAndWorse + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(["length"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test that the strategy gives expected behaviour.""" + actions = [(C, C)] + [(D, C)] * 4 + self.versus_test( + axl.Cooperator(), + expected_actions=actions, + match_attributes={"length": 5}, + seed=1, + ) + + # Test that behaviour does not depend on opponent + actions = [(C, D)] + [(D, D)] * 4 + self.versus_test( + axl.Defector(), + expected_actions=actions, + match_attributes={"length": 5}, + seed=1, + ) + + # Test that behaviour changes when does not know length. + actions = [(C, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test( + axl.Alternator(), + expected_actions=actions, + match_attributes={"length": -1}, + seed=1, + ) + + +class TestWorseAndWorse2(TestPlayer): + + name = "Worse and Worse 2" + player = axl.WorseAndWorse2 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test that the strategy gives expected behaviour.""" + + # Test next move matches opponent + actions = [(C, C)] * 19 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + actions = [(C, C), (C, C), (C, D), (D, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C, C, D, C]), expected_actions=actions + ) + + actions = [(C, C)] * 18 + [(C, D), (D, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 18 + [D, C]), + expected_actions=actions, + ) + + # After round 20, strategy follows stochastic behavior given a seed + actions = [(C, C)] * 20 + [(C, D), (D, C), (C, C), (C, D)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 20 + [D, C, C, D]), + expected_actions=actions, + seed=8, + ) + + actions = [(C, C)] * 20 + [(D, D), (D, C)] + [(C, C)] * 2 + [(D, C)] + self.versus_test( + opponent=axl.MockPlayer(actions=[C] * 20 + [D, C, C, C]), + expected_actions=actions, + seed=2, + ) + + +class TestWorseAndWorse3(TestPlayer): + + name = "Worse and Worse 3" + player = axl.WorseAndWorse3 + expected_classifier = { + "memory_depth": float("inf"), + "stochastic": True, + "makes_use_of": set(), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_strategy(self): + """Test that the strategy gives expected behaviour.""" + # Test that if opponent only defects, strategy also defects + actions = [(C, D)] + [(D, D)] * 4 + self.versus_test(axl.Defector(), expected_actions=actions) + + # Test that if opponent only cooperates, strategy also cooperates + actions = [(C, C)] * 5 + self.versus_test(axl.Cooperator(), expected_actions=actions) + + # Test that given a non 0/1 probability of defecting, strategy follows + # stochastic behaviour, given a seed + actions = [(C, C), (C, D), (C, C), (D, D), (C, C), (D, C)] + self.versus_test( + axl.MockPlayer(actions=[C, D, C, D, C]), + expected_actions=actions, + seed=8, + ) diff --git a/axelrod/tests/strategies/test_zero_determinant.py b/axelrod/tests/strategies/test_zero_determinant.py new file mode 100644 index 000000000..615ca27fd --- /dev/null +++ b/axelrod/tests/strategies/test_zero_determinant.py @@ -0,0 +1,319 @@ +"""Tests for the Zero Determinant strategies.""" + +import unittest + +import axelrod as axl +from axelrod.game import DefaultGame +from axelrod.strategies.zero_determinant import LRPlayer + +from .test_player import TestPlayer, test_four_vector + +C, D = axl.Action.C, axl.Action.D + + +class TestLRPlayer(unittest.TestCase): + def test_exception(self): + with self.assertRaises(ValueError): + LRPlayer(0, 0, -float("inf")) + + +class TestZDExtortion(TestPlayer): + + name = "ZD-Extortion: 0.2, 0.1, 1" + player = axl.ZDExtortion + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 0.64, (C, D): 0.18, (D, C): 0.28, (D, D): 0} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=3 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=6) + + +class TestZDExtort2(TestPlayer): + + name = "ZD-Extort-2: 0.1111111111111111, 0.5" + player = axl.ZDExtort2 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 8 / 9, (C, D): 0.5, (D, C): 1 / 3, (D, D): 0.0} + test_four_vector(self, expected_dictionary) + + def test_receive_match_attributes(self): + player = self.player() + R, P, S, T = DefaultGame.RPST() + self.assertEqual(player.l, P) + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, C), (C, D), (C, C), (C, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=31 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (C, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + + actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) + + +class TestZDExtort2v2(TestPlayer): + + name = "ZD-Extort-2 v2: 0.125, 0.5, 1" + player = axl.ZDExtort2v2 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = { + (C, C): 7 / 8, + (C, D): 7 / 16, + (D, C): 3 / 8, + (D, D): 0.0, + } + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) + + +class TestZDExtort3(TestPlayer): + name = "ZD-Extort3: 0.11538461538461539, 0.3333333333333333, 1" + player = axl.ZDExtort3 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = { + (C, C): 11 / 13, + (C, D): 1 / 2, + (D, C): 7 / 26, + (D, D): 0, + } + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)] + + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=3 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=6) + + +class TestZDExtort4(TestPlayer): + + name = "ZD-Extort-4: 0.23529411764705882, 0.25, 1" + player = axl.ZDExtort4 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 11 / 17, (C, D): 0, (D, C): 8 / 17, (D, D): 0.0} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) + + +class TestZDGen2(TestPlayer): + + name = "ZD-GEN-2: 0.125, 0.5, 3" + player = axl.ZDGen2 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 1, (C, D): 9 / 16, (D, C): 1 / 2, (D, D): 1 / 8} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + + actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=31 + ) + + actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + + actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) + + +class TestZDGTFT2(TestPlayer): + + name = "ZD-GTFT-2: 0.25, 0.5" + player = axl.ZDGTFT2 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 1.0, (C, D): 1 / 8, (D, C): 1.0, (D, D): 0.25} + test_four_vector(self, expected_dictionary) + + def test_receive_match_attributes(self): + player = self.player() + R, P, S, T = DefaultGame.RPST() + self.assertEqual(player.l, R) + + def test_strategy(self): + actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=31 + ) + + actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=2) + + actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=31) + + +class TestZDMischief(TestPlayer): + + name = "ZD-Mischief: 0.1, 0.0, 1" + player = axl.ZDMischief + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = {(C, C): 0.8, (C, D): 0.6, (D, C): 0.1, (D, D): 0} + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) + + +class TestZDSet2(TestPlayer): + + name = "ZD-SET-2: 0.25, 0.0, 2" + player = axl.ZDSet2 + expected_classifier = { + "memory_depth": 1, + "stochastic": True, + "makes_use_of": set(["game"]), + "long_run_time": False, + "inspects_source": False, + "manipulates_source": False, + "manipulates_state": False, + } + + def test_four_vector(self): + expected_dictionary = { + (C, C): 3 / 4, + (C, D): 1 / 4, + (D, C): 1 / 2, + (D, D): 1 / 4, + } + test_four_vector(self, expected_dictionary) + + def test_strategy(self): + actions = [(C, C), (D, D), (D, C), (C, D), (C, C), (D, D)] + self.versus_test( + opponent=axl.Alternator(), expected_actions=actions, seed=2 + ) + + actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)] + self.versus_test(opponent=axl.CyclerDC(), expected_actions=actions, seed=5) diff --git a/docs/tutorials/advanced/games.html b/docs/tutorials/advanced/games.html new file mode 100644 index 000000000..54738a1bc --- /dev/null +++ b/docs/tutorials/advanced/games.html @@ -0,0 +1,427 @@ + + + + + + +Using and playing different stage games + + + +
+

Using and playing different stage games

+ +

As described in :ref:`play_contexts` the default game used for the Prisoner's +Dilemma is given by:

+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 4); backlink

+Unknown interpreted text role "ref".
+
+>>> import axelrod as axl
+>>> pd = axl.game.Game()
+>>> pd
+Axelrod game: (R,P,S,T) = (3, 1, 0, 5)
+>>> pd.RPST()
+(3, 1, 0, 5)
+
+

These Game objects are used to score :ref:`matches <creating_matches>`, +:ref:`tournaments <creating_tournaments>` and :ref:`Moran processes +<moran-process>`:

+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+

System Message: ERROR/3 (/home/gaffney/Axelrod/docs/tutorials/advanced/games.rst, line 14); backlink

+Unknown interpreted text role "ref".
+
+>>> pd.score((axl.Action.C, axl.Action.C))
+(3, 3)
+>>> pd.score((axl.Action.C, axl.Action.D))
+(0, 5)
+>>> pd.score((axl.Action.D, axl.Action.C))
+(5, 0)
+>>> pd.score((axl.Action.D, axl.Action.D))
+(1, 1)
+
+

It is possible to run a matches, tournaments and Moran processes with a +different game. For example here is the game of chicken:

+
+>>> chicken = axl.game.Game(r=0, s=-1, t=1, p=-10)
+>>> chicken
+Axelrod game: (R,P,S,T) = (0, -10, -1, 1)
+>>> chicken.RPST()
+(0, -10, -1, 1)
+
+

Here is a simple tournament run with this game:

+
+>>> players = [axl.Cooperator(), axl.Defector(), axl.TitForTat()]
+>>> tournament = axl.Tournament(players, game=chicken)
+>>> results = tournament.play()
+>>> results.ranked_names
+['Cooperator', 'Defector', 'Tit For Tat']
+
+

The default Prisoner's dilemma has different results:

+
+>>> tournament = axl.Tournament(players)
+>>> results = tournament.play()
+>>> results.ranked_names
+['Defector', 'Tit For Tat', 'Cooperator']
+
+
+ +