diff --git a/.circleci/config.yml b/.circleci/config.yml index 37c8e66f4..5c33bdeb6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,13 +22,13 @@ commands: rasp_build_deps: description: Install RASP build deps parameters: - miniconda2: + miniconda3: type: string steps: - run: name: Install build deps command: | - curl <> -o ~/miniconda.sh + curl <> -o ~/miniconda.sh bash ~/miniconda.sh -b -p $HOME/miniconda source $HOME/miniconda/bin/activate conda init @@ -54,7 +54,7 @@ commands: - run: name: Clone submodules command: | - git submodule update --force --recursive --init --remote + git submodule update --force --recursive --init - run: name: Build RASP command: | @@ -83,15 +83,17 @@ commands: name: Installing SDKs command: | mv ~/.bashrc ~/.bashrc.bk + sudo apt-get update + sudo apt-get install bc + sudo apt-get install unzip + sudo apt-get install zip curl -s "https://get.sdkman.io" | bash source "$HOME/.sdkman/bin/sdkman-init.sh" sdk version - sdk install java 8.0.252.hs-adpt + sdk install java 8.0.292.hs-adpt sdk install scala sdk install maven - sdk install spark 2.4.4 - sudo apt-get update - sudo apt-get install bc + sdk install spark 3.1.1 - run: name: Build preprocessing package command: | @@ -100,18 +102,18 @@ commands: name: Run script command: | # gather data and store as pickle - coverage run ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym "$CONFIG" + coverage run ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym_random "$CONFIG" # run through timeline operator coverage run --append ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.timeline_operator "$CONFIG" # train on logged data coverage run --append ./reagent/workflow/cli.py run reagent.workflow.training.identify_and_train_network "$CONFIG" - # evaluate torchscript on gym environment + # evaluate on gym environment coverage run --append ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.evaluate_gym "$CONFIG" - run: name: Save coverage results command: | - coverage report - coverage xml + coverage report -i + coverage xml -i bash <(curl -s https://codecov.io/bash) - run: name: Save test results @@ -128,39 +130,56 @@ commands: pip_install: description: Pip install requirements parameters: - install_gym: + e2e_test: type: boolean - default: true + default: false is_ubuntu_gpu: type: boolean default: false steps: - - when: - condition: << parameters.is_ubuntu_gpu >> - steps: - - run: - command: | - pyenv global 3.7.0 - run: + # ubuntu-2004-cuda-11.4:202110-01 image (the image we are using) + # has python3.9 by default. However, we need to use python3.8 + # for tests. Therefore, we need to install python3.8 first. command: | - pip install --upgrade pip - pip install --upgrade tox wheel setuptools + # upgrade pyenv + rm -rf /opt/circleci/.pyenv + curl https://pyenv.run | bash + # retry at most 5 times to avoid transient failure + for i in 1 2 3 4 5; do pyenv install -v 3.8.1 && break || sleep 15; done + pyenv global 3.8.1 + sudo apt update + pip install --upgrade pip --progress-bar off + pip install --upgrade cmake wheel setuptools --progress-bar off + sudo apt install swig + pip install tox==3.20.1 --progress-bar off - when: - condition: << parameters.install_gym >> + # If e2e_test is true, we run end-2-end tests which involve spark pipeline operations + # and python-based training / evaluation. pip-install and tests will be triggered by + # config.yml (this file) + # If e2e_test is false, we run python unit tests using tox, which installs + # virtual environments by tox.ini + condition: << parameters.e2e_test >> steps: + # when/unless clauses act as if ... else ... + # if is_ubuntu_gpu is True, we install cuda-supported pytorch + # otherwise, we install cpu-supported pytorch - when: condition: << parameters.is_ubuntu_gpu >> steps: - run: + # pip install .[gym,test] will trigger to install packages specified in setup.cfg + # "-e" option will activate the development mode (a symlink to the code in ReAgent + # will be created in site-packages directory) command: | - pip install -e .[gym,test] - pip install torch==1.5.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html + pip install -e .[gym,test,torchrec_gpu] --pre --extra-index-url https://download.pytorch.org/whl/cu113 -f https://download.pytorch.org/whl/torchrec/ --progress-bar off - unless: condition: << parameters.is_ubuntu_gpu >> steps: - run: command: | - sudo pip install -e .[gym,test] + pip install -e .[gym,test,torchrec_cpu] --pre --extra-index-url https://download.pytorch.org/whl/cpu -f https://download.pytorch.org/whl/nightly/torchrec_nightly_cpu/ --progress-bar off + run_unittest: description: Run unittests, coverage and save results @@ -169,119 +188,279 @@ commands: type: string steps: - run: + no_output_timeout: 30m command: | - tox -e << parameters.tox_env >> + tox -v -e << parameters.tox_env >> bash <(curl -s https://codecov.io/bash) - - run: python setup.py bdist_wheel + - run: python setup.py -q bdist_wheel - store_artifacts: path: dist/reagent-0.1-py3-none-any.whl destination: reagent-0.1-py3-none-any.whl - store_test_results: - path: .tox/py37/log/ + path: .tox/py38/log/ + run_interrogate: + description: Install and run interrogate + steps: + - run: + name: Install interrogate + command: | + pip install interrogate --progress-bar off + - run: + name: Run interrogate on reagent code base + command: | + interrogate -piImvv -f 15 reagent/ +jobs: + misc_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_misc_unittest + gym_cpu_unittest: + machine: + image: ubuntu-2004:202111-02 + resource_class: xlarge + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: false + - run_unittest: + tox_env: circleci_gym_cpu_unittest -jobs: - gpu_unittest: + gym_replay_buffer_cpu_unittest_1: + machine: + image: ubuntu-2004:202111-02 + resource_class: xlarge + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: false + - run_unittest: + tox_env: circleci_gym_replay_buffer_1_cpu_unittest + + gym_replay_buffer_cpu_unittest_2: machine: - image: ubuntu-1604-cuda-10.1:201909-23 - resource_class: gpu.medium + image: ubuntu-2004:202111-02 + resource_class: xlarge + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: false + - run_unittest: + tox_env: circleci_gym_replay_buffer_2_cpu_unittest + + gym_gpu_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi environment: - CUDA_LAUNCH_BLOCKING: 1 steps: - checkout_merge - pip_install: - install_gym: false + e2e_test: false is_ubuntu_gpu: true - run_unittest: - tox_env: circleci_unittest + tox_env: circleci_gym_gpu_unittest - gym_unittest: + gym_replay_buffer_gpu_unittest_1: machine: - image: ubuntu-1604-cuda-10.1:201909-23 - resource_class: gpu.medium + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi environment: - CUDA_LAUNCH_BLOCKING: 1 steps: - checkout_merge - pip_install: - install_gym: false + e2e_test: false is_ubuntu_gpu: true - run_unittest: - tox_env: circleci_gym_unittest + tox_env: circleci_gym_replay_buffer_1_gpu_unittest + + gym_replay_buffer_gpu_unittest_2: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_gym_replay_buffer_2_gpu_unittest dqn_cartpole_e2e: - docker: - - image: circleci/python:3.7 - resource_class: large + machine: + image: ubuntu-2004:202111-02 + resource_class: xlarge environment: - - BASH_ENV: ~/.bashrc - CONFIG: reagent/workflow/sample_configs/discrete_dqn_cartpole_offline.yaml steps: - checkout_merge - pip_install: - install_gym: true + e2e_test: true is_ubuntu_gpu: false - end_to_end_test + ranking_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_ranking_unittest + + training_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_training_unittest + + prediction_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_prediction_unittest + + world_model_unittest: + machine: + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi + environment: + - CUDA_LAUNCH_BLOCKING: 1 + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: true + - run_unittest: + tox_env: circleci_world_model_unittest + + lite_api_unittest: + machine: + image: ubuntu-2004:202111-02 + resource_class: xlarge + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: false + - run_unittest: + tox_env: circleci_lite_api_unittest + + mab_unittest: + machine: + image: ubuntu-2004:202111-02 + resource_class: medium + steps: + - checkout_merge + - pip_install: + e2e_test: false + is_ubuntu_gpu: false + - run_unittest: + tox_env: circleci_mab_unittest + sac_pendulum_e2e: - docker: - - image: circleci/python:3.7 - resource_class: large + machine: + image: ubuntu-2004:202111-02 + resource_class: xlarge environment: - - BASH_ENV: ~/.bashrc - CONFIG: reagent/workflow/sample_configs/sac_pendulum_offline.yaml steps: - checkout_merge - pip_install: - install_gym: true + e2e_test: true is_ubuntu_gpu: false - end_to_end_test sac_pendulum_e2e_gpu: machine: - image: ubuntu-1604-cuda-10.1:201909-23 - resource_class: gpu.medium + image: ubuntu-2004-cuda-11.4:202110-01 + resource_class: gpu.nvidia.small.multi environment: - CONFIG: reagent/workflow/sample_configs/sac_pendulum_offline.yaml steps: - checkout_merge - pip_install: - install_gym: true + e2e_test: true is_ubuntu_gpu: true - end_to_end_test rasp_test_linux: docker: - image: cimg/base:2020.01 - resource_class: large + resource_class: xlarge steps: - checkout_merge - rasp_build_deps: - miniconda2: https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh + miniconda3: https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh - install_libtorch: source: https://download.pytorch.org/libtorch/nightly/cpu/libtorch-cxx11-abi-shared-with-deps-latest.zip - rasp_build_test - rasp_test_mac: - macos: - xcode: 11.3.0 + docstring_coverage: + docker: + - image: circleci/python:3.8 + resource_class: small steps: - checkout_merge - - rasp_build_deps: - miniconda2: https://repo.anaconda.com/miniconda/Miniconda2-latest-MacOSX-x86_64.sh - - install_libtorch: - source: https://download.pytorch.org/libtorch/nightly/cpu/libtorch-macos-latest.zip - - rasp_build_test + - run_interrogate workflows: build: jobs: + - lite_api_unittest + - mab_unittest + - ranking_unittest + - training_unittest + - prediction_unittest + - world_model_unittest - dqn_cartpole_e2e - sac_pendulum_e2e - sac_pendulum_e2e_gpu - - gpu_unittest - - gym_unittest + - misc_unittest + - gym_cpu_unittest + - gym_gpu_unittest + - gym_replay_buffer_cpu_unittest_1 + - gym_replay_buffer_cpu_unittest_2 + - gym_replay_buffer_gpu_unittest_1 + - gym_replay_buffer_gpu_unittest_2 - rasp_test_linux - - rasp_test_mac + - docstring_coverage diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 000000000..df1e87f74 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,4 @@ +ignore: + # These are more experimental stuffs + - "reagent/ope/**/*" + - "reagent/training/gradient_free/**/*" diff --git a/README.md b/README.md index 5bd38dbc9..001e51fe0 100644 --- a/README.md +++ b/README.md @@ -1,38 +1,77 @@ ![Banner](logo/reagent_banner.png) ### Applied Reinforcement Learning @ Facebook +[![Support Ukraine](https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB)](https://opensource.fb.com/support-ukraine) [![License](https://img.shields.io/badge/license-BSD%203--Clause-brightgreen)](LICENSE) -[![CircleCI](https://circleci.com/gh/facebookresearch/ReAgent/tree/master.svg?style=shield)](https://circleci.com/gh/facebookresearch/ReAgent/tree/master) -[![codecov](https://codecov.io/gh/facebookresearch/ReAgent/branch/master/graph/badge.svg)](https://codecov.io/gh/facebookresearch/ReAgent) +[![CircleCI](https://circleci.com/gh/facebookresearch/ReAgent/tree/main.svg?style=shield)](https://circleci.com/gh/facebookresearch/ReAgent/tree/main) +[![codecov](https://codecov.io/gh/facebookresearch/ReAgent/branch/main/graph/badge.svg)](https://codecov.io/gh/facebookresearch/ReAgent) --- -#### Overview -ReAgent is an open source end-to-end platform for applied reinforcement learning (RL) developed and used at Facebook. ReAgent is built in Python and uses PyTorch for modeling and training and TorchScript for model serving. The platform contains workflows to train popular deep RL algorithms and includes data preprocessing, feature transformation, distributed training, counterfactual policy evaluation, and optimized serving. For more detailed information about ReAgent see the white paper [here](https://research.fb.com/publications/horizon-facebooks-open-source-applied-reinforcement-learning-platform/). +### Overview +ReAgent is an open source end-to-end platform for applied reinforcement learning (RL) developed and used at Facebook. ReAgent is built in Python and uses PyTorch for modeling and training and TorchScript for model serving. The platform contains workflows to train popular deep RL algorithms and includes data preprocessing, feature transformation, distributed training, counterfactual policy evaluation, and optimized serving. For more detailed information about ReAgent see the release post [here](https://research.fb.com/publications/horizon-facebooks-open-source-applied-reinforcement-learning-platform/) and white paper [here](https://arxiv.org/abs/1811.00260). The platform was once named "Horizon" but we have adopted the name "ReAgent" recently to emphasize its broader scope in decision making and reasoning. -#### Algorithms Supported +### Algorithms Supported + +Classic Off-Policy algorithms: - Discrete-Action [DQN](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) - Parametric-Action DQN - [Double DQN](https://arxiv.org/abs/1509.06461), [Dueling DQN](https://arxiv.org/abs/1511.06581), [Dueling Double DQN](https://arxiv.org/abs/1710.02298) - Distributional RL: [C51](https://arxiv.org/abs/1707.06887) and [QR-DQN](https://arxiv.org/abs/1710.10044) - [Twin Delayed DDPG](https://arxiv.org/abs/1802.09477) (TD3) - [Soft Actor-Critic](https://arxiv.org/abs/1801.01290) (SAC) +- [Critic Regularized Regression](https://arxiv.org/abs/2006.15134) (CRR) +- [Proximal Policy Optimization Algorithms](https://arxiv.org/abs/1707.06347) (PPO) + +RL for recommender systems: +- [Seq2Slate](https://arxiv.org/abs/1810.02019) +- [SlateQ](https://arxiv.org/abs/1905.12767) + +Counterfactual Evaluation: +- [Doubly Robust](https://arxiv.org/abs/1612.01205) (for bandits) +- [Doubly Robust](https://arxiv.org/abs/1511.03722) (for sequential decisions) +- [MAGIC](https://arxiv.org/abs/1604.00923) + +Multi-Arm and Contextual Bandits: +- [UCB1](https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf) +- [MetricUCB](https://arxiv.org/abs/0809.4882) +- [Thompson Sampling](https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf) +- [LinUCB](https://arxiv.org/abs/1003.0146) + + +Others: +- [Cross-Entropy Method](http://web.mit.edu/6.454/www/www_fall_2003/gew/CEtutorial.pdf) +- [Synthetic Return for Credit Assignment](https://arxiv.org/abs/2102.12425) -#### Installation + +### Installation ReAgent can be installed via. Docker or manually. Detailed instructions on how to install ReAgent can be found [here](docs/installation.rst). -#### Usage -Detailed instructions on how to use ReAgent Models can be found [here](docs/usage.rst). +### Tutorial +ReAgent is designed for large-scale, distributed recommendation/optimization tasks where we don’t have access to a simulator. +In this environment, it is typically better to train offline on batches of data, and release new policies slowly over time. +Because the policy updates slowly and in batches, we use off-policy algorithms. To test a new policy without deploying it, +we rely on counter-factual policy evaluation (CPE), a set of techniques for estimating a policy based on the actions of another policy. + +We also have a set of tools to facilitate applying RL in real-world applications: +- Domain Analysis Tool, which analyzes state/action feature importance and identifies whether the problem is a suitable for applying batch RL +- Behavior Cloning, which clones from the logging policy to bootstrap the learning policy safely + +Detailed instructions on how to use ReAgent can be found [here](docs/usage.rst). -The ReAgent Serving Platform (RASP) tutorial is available [here](docs/rasp_tutorial.rst). -#### License +### License ReAgent is released under a BSD 3-Clause license. Find out more about it [here](LICENSE). -#### Citing +[Terms of Use](https://opensource.facebook.com/legal/terms) | [Privacy Policy](https://opensource.facebook.com/legal/privacy) | Copyright © 2022 Meta Platforms, Inc + + +### Citing +``` @article{gauci2018horizon, title={Horizon: Facebook's Open Source Applied Reinforcement Learning Platform}, author={Gauci, Jason and Conti, Edoardo and Liang, Yitao and Virochsiri, Kittipat and Chen, Zhengxing and He, Yuchen and Kaden, Zachary and Narayanan, Vivek and Ye, Xiaohui}, journal={arXiv preprint arXiv:1811.00260}, year={2018} } +``` diff --git a/docs/api/ml.rl.evaluation.rst b/docs/api/ml.rl.evaluation.rst deleted file mode 100644 index 24492e79c..000000000 --- a/docs/api/ml.rl.evaluation.rst +++ /dev/null @@ -1,78 +0,0 @@ -ml.rl.evaluation package -======================== - -Submodules ----------- - -ml.rl.evaluation.cpe module ---------------------------- - -.. automodule:: ml.rl.evaluation.cpe - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.doubly\_robust\_estimator module -------------------------------------------------- - -.. automodule:: ml.rl.evaluation.doubly_robust_estimator - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.evaluation\_data\_page module ----------------------------------------------- - -.. automodule:: ml.rl.evaluation.evaluation_data_page - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.evaluator module ---------------------------------- - -.. automodule:: ml.rl.evaluation.evaluator - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.ranking\_evaluator module ------------------------------------------- - -.. automodule:: ml.rl.evaluation.ranking_evaluator - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.sequential\_doubly\_robust\_estimator module -------------------------------------------------------------- - -.. automodule:: ml.rl.evaluation.sequential_doubly_robust_estimator - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.weighted\_sequential\_doubly\_robust\_estimator module ------------------------------------------------------------------------ - -.. automodule:: ml.rl.evaluation.weighted_sequential_doubly_robust_estimator - :members: - :undoc-members: - :show-inheritance: - -ml.rl.evaluation.world\_model\_evaluator module ------------------------------------------------ - -.. automodule:: ml.rl.evaluation.world_model_evaluator - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.evaluation - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.models.rst b/docs/api/ml.rl.models.rst deleted file mode 100644 index 4d1804ca3..000000000 --- a/docs/api/ml.rl.models.rst +++ /dev/null @@ -1,150 +0,0 @@ -ml.rl.models package -==================== - -Submodules ----------- - -ml.rl.models.actor module -------------------------- - -.. automodule:: ml.rl.models.actor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.base module ------------------------- - -.. automodule:: ml.rl.models.base - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.bcq module ------------------------ - -.. automodule:: ml.rl.models.bcq - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.categorical\_dqn module ------------------------------------- - -.. automodule:: ml.rl.models.categorical_dqn - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.cem\_planner module --------------------------------- - -.. automodule:: ml.rl.models.cem_planner - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.convolutional\_network module ------------------------------------------- - -.. automodule:: ml.rl.models.convolutional_network - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.dqn module ------------------------ - -.. automodule:: ml.rl.models.dqn - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.dueling\_q\_network module ---------------------------------------- - -.. automodule:: ml.rl.models.dueling_q_network - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.dueling\_quantile\_dqn module ------------------------------------------- - -.. automodule:: ml.rl.models.dueling_quantile_dqn - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.example\_sequence\_model module --------------------------------------------- - -.. automodule:: ml.rl.models.example_sequence_model - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.fully\_connected\_network module ---------------------------------------------- - -.. automodule:: ml.rl.models.fully_connected_network - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.mdn\_rnn module ----------------------------- - -.. automodule:: ml.rl.models.mdn_rnn - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.no\_soft\_update\_embedding module ------------------------------------------------ - -.. automodule:: ml.rl.models.no_soft_update_embedding - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.parametric\_dqn module ------------------------------------ - -.. automodule:: ml.rl.models.critic - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.quantile\_dqn module ---------------------------------- - -.. automodule:: ml.rl.models.quantile_dqn - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.seq2slate module ------------------------------ - -.. automodule:: ml.rl.models.seq2slate - :members: - :undoc-members: - :show-inheritance: - -ml.rl.models.world\_model module --------------------------------- - -.. automodule:: ml.rl.models.world_model - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.models - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.prediction.rst b/docs/api/ml.rl.prediction.rst deleted file mode 100644 index bae5ed6de..000000000 --- a/docs/api/ml.rl.prediction.rst +++ /dev/null @@ -1,30 +0,0 @@ -ml.rl.prediction package -======================== - -Submodules ----------- - -ml.rl.prediction.dqn\_torch\_predictor module ---------------------------------------------- - -.. automodule:: ml.rl.prediction.dqn_torch_predictor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.prediction.predictor\_wrapper module ------------------------------------------- - -.. automodule:: ml.rl.prediction.predictor_wrapper - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.prediction - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.preprocessing.rst b/docs/api/ml.rl.preprocessing.rst deleted file mode 100644 index b29faaefa..000000000 --- a/docs/api/ml.rl.preprocessing.rst +++ /dev/null @@ -1,78 +0,0 @@ -ml.rl.preprocessing package -=========================== - -Submodules ----------- - -ml.rl.preprocessing.batch\_preprocessor module ----------------------------------------------- - -.. automodule:: ml.rl.preprocessing.batch_preprocessor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.feature\_extractor module ---------------------------------------------- - -.. automodule:: ml.rl.preprocessing.feature_extractor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.identify\_types module ------------------------------------------- - -.. automodule:: ml.rl.preprocessing.identify_types - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.normalization module ----------------------------------------- - -.. automodule:: ml.rl.preprocessing.normalization - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.postprocessor module ----------------------------------------- - -.. automodule:: ml.rl.preprocessing.postprocessor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.preprocessor module ---------------------------------------- - -.. automodule:: ml.rl.preprocessing.preprocessor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.preprocessor\_net module --------------------------------------------- - -.. automodule:: ml.rl.preprocessing.preprocessor_net - :members: - :undoc-members: - :show-inheritance: - -ml.rl.preprocessing.sparse\_to\_dense module --------------------------------------------- - -.. automodule:: ml.rl.preprocessing.sparse_to_dense - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.preprocessing - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.readers.rst b/docs/api/ml.rl.readers.rst deleted file mode 100644 index f8ddeaf66..000000000 --- a/docs/api/ml.rl.readers.rst +++ /dev/null @@ -1,46 +0,0 @@ -ml.rl.readers package -===================== - -Submodules ----------- - -ml.rl.readers.base module -------------------------- - -.. automodule:: ml.rl.readers.base - :members: - :undoc-members: - :show-inheritance: - -ml.rl.readers.data\_streamer module ------------------------------------ - -.. automodule:: ml.rl.readers.data_streamer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.readers.json\_dataset\_reader module ------------------------------------------- - -.. automodule:: ml.rl.readers.json_dataset_reader - :members: - :undoc-members: - :show-inheritance: - -ml.rl.readers.nparray\_reader module ------------------------------------- - -.. automodule:: ml.rl.readers.nparray_reader - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.readers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.rst b/docs/api/ml.rl.rst deleted file mode 100644 index c4010bd46..000000000 --- a/docs/api/ml.rl.rst +++ /dev/null @@ -1,84 +0,0 @@ -ml.rl package -============= - -Subpackages ------------ - -.. toctree:: - - ml.rl.evaluation - ml.rl.models - ml.rl.prediction - ml.rl.preprocessing - ml.rl.readers - ml.rl.simulators - ml.rl.training - ml.rl.workflow - -Submodules ----------- - -ml.rl.caffe\_utils module -------------------------- - -.. automodule:: ml.rl.caffe_utils - :members: - :undoc-members: - :show-inheritance: - -ml.rl.debug\_on\_error module ------------------------------ - -.. automodule:: ml.rl.debug_on_error - :members: - :undoc-members: - :show-inheritance: - -ml.rl.json\_serialize module ----------------------------- - -.. automodule:: ml.rl.json_serialize - :members: - :undoc-members: - :show-inheritance: - -ml.rl.parameters module ------------------------ - -.. automodule:: ml.rl.parameters - :members: - :undoc-members: - :show-inheritance: - -ml.rl.tensorboardX module -------------------------- - -.. automodule:: ml.rl.tensorboardX - :members: - :undoc-members: - :show-inheritance: - -ml.rl.torch\_utils module -------------------------- - -.. automodule:: ml.rl.torch_utils - :members: - :undoc-members: - :show-inheritance: - -ml.rl.types module ------------------- - -.. automodule:: ml.rl.types - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.training.gradient_free.rst b/docs/api/ml.rl.training.gradient_free.rst deleted file mode 100644 index 0629b4b81..000000000 --- a/docs/api/ml.rl.training.gradient_free.rst +++ /dev/null @@ -1,30 +0,0 @@ -ml.rl.training.gradient\_free package -===================================== - -Submodules ----------- - -ml.rl.training.gradient\_free.es\_worker module ------------------------------------------------ - -.. automodule:: ml.rl.training.gradient_free.es_worker - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.gradient\_free.evolution\_pool module ----------------------------------------------------- - -.. automodule:: ml.rl.training.gradient_free.evolution_pool - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.training.gradient_free - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.training.ranking.rst b/docs/api/ml.rl.training.ranking.rst deleted file mode 100644 index 5477af1b9..000000000 --- a/docs/api/ml.rl.training.ranking.rst +++ /dev/null @@ -1,30 +0,0 @@ -ml.rl.training.ranking package -============================== - -Submodules ----------- - -ml.rl.training.ranking.ranking\_trainer module ----------------------------------------------- - -.. automodule:: ml.rl.training.ranking.ranking_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.ranking.seq2slate\_trainer module ------------------------------------------------- - -.. automodule:: ml.rl.training.ranking.seq2slate_trainer - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.training.ranking - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.training.rst b/docs/api/ml.rl.training.rst deleted file mode 100644 index 57785f36b..000000000 --- a/docs/api/ml.rl.training.rst +++ /dev/null @@ -1,159 +0,0 @@ -ml.rl.training package -====================== - -Subpackages ------------ - -.. toctree:: - - ml.rl.training.gradient_free - ml.rl.training.ranking - ml.rl.training.world_model - -Submodules ----------- - -ml.rl.training.c51\_trainer module ----------------------------------- - -.. automodule:: ml.rl.training.c51_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.cem\_trainer module ----------------------------------- - -.. automodule:: ml.rl.training.cem_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.dqn\_predictor module ------------------------------------- - -.. automodule:: ml.rl.training.dqn_predictor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.dqn\_trainer module ----------------------------------- - -.. automodule:: ml.rl.training.dqn_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.dqn\_trainer\_base module ----------------------------------------- - -.. automodule:: ml.rl.training.dqn_trainer_base - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.imitator\_training module ----------------------------------------- - -.. automodule:: ml.rl.training.imitator_training - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.loss\_reporter module ------------------------------------- - -.. automodule:: ml.rl.training.loss_reporter - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.off\_policy\_predictor module --------------------------------------------- - -.. automodule:: ml.rl.training.off_policy_predictor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.on\_policy\_predictor module -------------------------------------------- - -.. automodule:: ml.rl.training.on_policy_predictor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.parametric\_dqn\_trainer module ----------------------------------------------- - -.. automodule:: ml.rl.training.parametric_dqn_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.qrdqn\_trainer module ------------------------------------- - -.. automodule:: ml.rl.training.qrdqn_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.rl\_dataset module ---------------------------------- - -.. automodule:: ml.rl.training.rl_dataset - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.rl\_trainer\_pytorch module ------------------------------------------- - -.. automodule:: ml.rl.training.rl_trainer_pytorch - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.sac\_trainer module ----------------------------------- - -.. automodule:: ml.rl.training.sac_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.sandboxed\_predictor module ------------------------------------------- - -.. automodule:: ml.rl.training.sandboxed_predictor - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.td3\_trainer module ----------------------------------- - -.. automodule:: ml.rl.training.td3_trainer - :members: - :undoc-members: - :show-inheritance: - -ml.rl.training.training\_data\_page module ------------------------------------------- - -.. automodule:: ml.rl.training.training_data_page - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.training - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.training.world_model.rst b/docs/api/ml.rl.training.world_model.rst deleted file mode 100644 index dd2c0fd6e..000000000 --- a/docs/api/ml.rl.training.world_model.rst +++ /dev/null @@ -1,22 +0,0 @@ -ml.rl.training.world\_model package -=================================== - -Submodules ----------- - -ml.rl.training.world\_model.mdnrnn\_trainer module --------------------------------------------------- - -.. automodule:: ml.rl.training.world_model.mdnrnn_trainer - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.training.world_model - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rl.workflow.rst b/docs/api/ml.rl.workflow.rst deleted file mode 100644 index 056aaa3eb..000000000 --- a/docs/api/ml.rl.workflow.rst +++ /dev/null @@ -1,78 +0,0 @@ -ml.rl.workflow package -====================== - -Submodules ----------- - -ml.rl.workflow.base\_workflow module ------------------------------------- - -.. automodule:: ml.rl.workflow.base_workflow - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.create\_normalization\_metadata module ------------------------------------------------------ - -.. automodule:: ml.rl.workflow.create_normalization_metadata - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.dqn\_workflow module ------------------------------------ - -.. automodule:: ml.rl.workflow.dqn_workflow - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.helpers module ------------------------------ - -.. automodule:: ml.rl.workflow.helpers - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.page\_handler module ------------------------------------ - -.. automodule:: ml.rl.workflow.page_handler - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.parametric\_dqn\_workflow module ------------------------------------------------ - -.. automodule:: ml.rl.workflow.parametric_dqn_workflow - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.preprocess\_handler module ------------------------------------------ - -.. automodule:: ml.rl.workflow.preprocess_handler - :members: - :undoc-members: - :show-inheritance: - -ml.rl.workflow.transitional module ----------------------------------- - -.. automodule:: ml.rl.workflow.transitional - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: ml.rl.workflow - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/ml.rst b/docs/api/ml.rst deleted file mode 100644 index b1b1beeda..000000000 --- a/docs/api/ml.rst +++ /dev/null @@ -1,17 +0,0 @@ -ml package -========== - -Subpackages ------------ - -.. toctree:: - - ml.rl - -Module contents ---------------- - -.. automodule:: ml - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/modules.rst b/docs/api/modules.rst index 25b2afbc7..3b064fef7 100644 --- a/docs/api/modules.rst +++ b/docs/api/modules.rst @@ -1,7 +1,7 @@ -ml -== +reagent +======= .. toctree:: :maxdepth: 4 - ml + reagent diff --git a/docs/api/reagent.core.rst b/docs/api/reagent.core.rst new file mode 100644 index 000000000..cd5688e38 --- /dev/null +++ b/docs/api/reagent.core.rst @@ -0,0 +1,189 @@ +reagent.core package +==================== + +Submodules +---------- + +reagent.core.aggregators module +------------------------------- + +.. automodule:: reagent.core.aggregators + :members: + :undoc-members: + :show-inheritance: + +reagent.core.base\_dataclass module +----------------------------------- + +.. automodule:: reagent.core.base_dataclass + :members: + :undoc-members: + :show-inheritance: + +reagent.core.configuration module +--------------------------------- + +.. automodule:: reagent.core.configuration + :members: + :undoc-members: + :show-inheritance: + +reagent.core.dataclasses module +------------------------------- + +.. automodule:: reagent.core.dataclasses + :members: + :undoc-members: + :show-inheritance: + +reagent.core.debug\_on\_error module +------------------------------------ + +.. automodule:: reagent.core.debug_on_error + :members: + :undoc-members: + :show-inheritance: + +reagent.core.fb\_checker module +------------------------------- + +.. automodule:: reagent.core.fb_checker + :members: + :undoc-members: + :show-inheritance: + +reagent.core.multiprocess\_utils module +--------------------------------------- + +.. automodule:: reagent.core.multiprocess_utils + :members: + :undoc-members: + :show-inheritance: + +reagent.core.observers module +----------------------------- + +.. automodule:: reagent.core.observers + :members: + :undoc-members: + :show-inheritance: + +reagent.core.oss\_tensorboard\_logger module +-------------------------------------------- + +.. automodule:: reagent.core.oss_tensorboard_logger + :members: + :undoc-members: + :show-inheritance: + +reagent.core.parameters module +------------------------------ + +.. automodule:: reagent.core.parameters + :members: + :undoc-members: + :show-inheritance: + +reagent.core.parameters\_seq2slate module +----------------------------------------- + +.. automodule:: reagent.core.parameters_seq2slate + :members: + :undoc-members: + :show-inheritance: + +reagent.core.registry\_meta module +---------------------------------- + +.. automodule:: reagent.core.registry_meta + :members: + :undoc-members: + :show-inheritance: + +reagent.core.report\_utils module +--------------------------------- + +.. automodule:: reagent.core.report_utils + :members: + :undoc-members: + :show-inheritance: + +reagent.core.result\_registries module +-------------------------------------- + +.. automodule:: reagent.core.result_registries + :members: + :undoc-members: + :show-inheritance: + +reagent.core.result\_types module +--------------------------------- + +.. automodule:: reagent.core.result_types + :members: + :undoc-members: + :show-inheritance: + +reagent.core.running\_stats module +---------------------------------- + +.. automodule:: reagent.core.running_stats + :members: + :undoc-members: + :show-inheritance: + +reagent.core.tagged\_union module +--------------------------------- + +.. automodule:: reagent.core.tagged_union + :members: + :undoc-members: + :show-inheritance: + +reagent.core.tensorboardX module +-------------------------------- + +.. automodule:: reagent.core.tensorboardX + :members: + :undoc-members: + :show-inheritance: + +reagent.core.torch\_utils module +-------------------------------- + +.. automodule:: reagent.core.torch_utils + :members: + :undoc-members: + :show-inheritance: + +reagent.core.tracker module +--------------------------- + +.. automodule:: reagent.core.tracker + :members: + :undoc-members: + :show-inheritance: + +reagent.core.types module +------------------------- + +.. automodule:: reagent.core.types + :members: + :undoc-members: + :show-inheritance: + +reagent.core.utils module +------------------------- + +.. automodule:: reagent.core.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.core + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.data.rst b/docs/api/reagent.data.rst new file mode 100644 index 000000000..4f9daabf2 --- /dev/null +++ b/docs/api/reagent.data.rst @@ -0,0 +1,53 @@ +reagent.data package +==================== + +Submodules +---------- + +reagent.data.data\_fetcher module +--------------------------------- + +.. automodule:: reagent.data.data_fetcher + :members: + :undoc-members: + :show-inheritance: + +reagent.data.manual\_data\_module module +---------------------------------------- + +.. automodule:: reagent.data.manual_data_module + :members: + :undoc-members: + :show-inheritance: + +reagent.data.oss\_data\_fetcher module +-------------------------------------- + +.. automodule:: reagent.data.oss_data_fetcher + :members: + :undoc-members: + :show-inheritance: + +reagent.data.reagent\_data\_module module +----------------------------------------- + +.. automodule:: reagent.data.reagent_data_module + :members: + :undoc-members: + :show-inheritance: + +reagent.data.spark\_utils module +-------------------------------- + +.. automodule:: reagent.data.spark_utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.data + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.evaluation.feature_importance.rst b/docs/api/reagent.evaluation.feature_importance.rst new file mode 100644 index 000000000..40e947641 --- /dev/null +++ b/docs/api/reagent.evaluation.feature_importance.rst @@ -0,0 +1,29 @@ +reagent.evaluation.feature\_importance package +============================================== + +Submodules +---------- + +reagent.evaluation.feature\_importance.feature\_importance\_base module +----------------------------------------------------------------------- + +.. automodule:: reagent.evaluation.feature_importance.feature_importance_base + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.feature\_importance.feature\_importance\_perturbation module +------------------------------------------------------------------------------- + +.. automodule:: reagent.evaluation.feature_importance.feature_importance_perturbation + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.evaluation.feature_importance + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.evaluation.rst b/docs/api/reagent.evaluation.rst new file mode 100644 index 000000000..c481ad1df --- /dev/null +++ b/docs/api/reagent.evaluation.rst @@ -0,0 +1,85 @@ +reagent.evaluation package +========================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.evaluation.feature_importance + +Submodules +---------- + +reagent.evaluation.cpe module +----------------------------- + +.. automodule:: reagent.evaluation.cpe + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.doubly\_robust\_estimator module +--------------------------------------------------- + +.. automodule:: reagent.evaluation.doubly_robust_estimator + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.evaluation\_data\_page module +------------------------------------------------ + +.. automodule:: reagent.evaluation.evaluation_data_page + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.evaluator module +----------------------------------- + +.. automodule:: reagent.evaluation.evaluator + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.ope\_adapter module +-------------------------------------- + +.. automodule:: reagent.evaluation.ope_adapter + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.sequential\_doubly\_robust\_estimator module +--------------------------------------------------------------- + +.. automodule:: reagent.evaluation.sequential_doubly_robust_estimator + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.weighted\_sequential\_doubly\_robust\_estimator module +------------------------------------------------------------------------- + +.. automodule:: reagent.evaluation.weighted_sequential_doubly_robust_estimator + :members: + :undoc-members: + :show-inheritance: + +reagent.evaluation.world\_model\_evaluator module +------------------------------------------------- + +.. automodule:: reagent.evaluation.world_model_evaluator + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.evaluation + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.agents.rst b/docs/api/reagent.gym.agents.rst new file mode 100644 index 000000000..a258a3e54 --- /dev/null +++ b/docs/api/reagent.gym.agents.rst @@ -0,0 +1,29 @@ +reagent.gym.agents package +========================== + +Submodules +---------- + +reagent.gym.agents.agent module +------------------------------- + +.. automodule:: reagent.gym.agents.agent + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.agents.post\_step module +------------------------------------ + +.. automodule:: reagent.gym.agents.post_step + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.agents + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.datasets.rst b/docs/api/reagent.gym.datasets.rst new file mode 100644 index 000000000..0a8afa63c --- /dev/null +++ b/docs/api/reagent.gym.datasets.rst @@ -0,0 +1,29 @@ +reagent.gym.datasets package +============================ + +Submodules +---------- + +reagent.gym.datasets.episodic\_dataset module +--------------------------------------------- + +.. automodule:: reagent.gym.datasets.episodic_dataset + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.datasets.replay\_buffer\_dataset module +--------------------------------------------------- + +.. automodule:: reagent.gym.datasets.replay_buffer_dataset + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.envs.dynamics.rst b/docs/api/reagent.gym.envs.dynamics.rst new file mode 100644 index 000000000..cd96f226b --- /dev/null +++ b/docs/api/reagent.gym.envs.dynamics.rst @@ -0,0 +1,21 @@ +reagent.gym.envs.dynamics package +================================= + +Submodules +---------- + +reagent.gym.envs.dynamics.linear\_dynamics module +------------------------------------------------- + +.. automodule:: reagent.gym.envs.dynamics.linear_dynamics + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.envs.dynamics + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.envs.functionality.rst b/docs/api/reagent.gym.envs.functionality.rst new file mode 100644 index 000000000..36a3261fd --- /dev/null +++ b/docs/api/reagent.gym.envs.functionality.rst @@ -0,0 +1,21 @@ +reagent.gym.envs.functionality package +====================================== + +Submodules +---------- + +reagent.gym.envs.functionality.possible\_actions\_mask\_tester module +--------------------------------------------------------------------- + +.. automodule:: reagent.gym.envs.functionality.possible_actions_mask_tester + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.envs.functionality + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.envs.pomdp.rst b/docs/api/reagent.gym.envs.pomdp.rst new file mode 100644 index 000000000..ab7ff4a46 --- /dev/null +++ b/docs/api/reagent.gym.envs.pomdp.rst @@ -0,0 +1,45 @@ +reagent.gym.envs.pomdp package +============================== + +Submodules +---------- + +reagent.gym.envs.pomdp.pocman module +------------------------------------ + +.. automodule:: reagent.gym.envs.pomdp.pocman + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.pomdp.state\_embed\_env module +----------------------------------------------- + +.. automodule:: reagent.gym.envs.pomdp.state_embed_env + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.pomdp.string\_game module +------------------------------------------ + +.. automodule:: reagent.gym.envs.pomdp.string_game + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.pomdp.string\_game\_v1 module +---------------------------------------------- + +.. automodule:: reagent.gym.envs.pomdp.string_game_v1 + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.envs.pomdp + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.envs.rst b/docs/api/reagent.gym.envs.rst new file mode 100644 index 000000000..fcb21edff --- /dev/null +++ b/docs/api/reagent.gym.envs.rst @@ -0,0 +1,80 @@ +reagent.gym.envs package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.gym.envs.dynamics + reagent.gym.envs.functionality + reagent.gym.envs.pomdp + reagent.gym.envs.wrappers + +Submodules +---------- + +reagent.gym.envs.changing\_arms module +-------------------------------------- + +.. automodule:: reagent.gym.envs.changing_arms + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.env\_wrapper module +------------------------------------ + +.. automodule:: reagent.gym.envs.env_wrapper + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.gym module +--------------------------- + +.. automodule:: reagent.gym.envs.gym + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.oracle\_pvm module +----------------------------------- + +.. automodule:: reagent.gym.envs.oracle_pvm + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.recsim module +------------------------------ + +.. automodule:: reagent.gym.envs.recsim + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.toy\_vm module +------------------------------- + +.. automodule:: reagent.gym.envs.toy_vm + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.utils module +----------------------------- + +.. automodule:: reagent.gym.envs.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.envs + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.envs.wrappers.rst b/docs/api/reagent.gym.envs.wrappers.rst new file mode 100644 index 000000000..432cb6200 --- /dev/null +++ b/docs/api/reagent.gym.envs.wrappers.rst @@ -0,0 +1,29 @@ +reagent.gym.envs.wrappers package +================================= + +Submodules +---------- + +reagent.gym.envs.wrappers.recsim module +--------------------------------------- + +.. automodule:: reagent.gym.envs.wrappers.recsim + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.envs.wrappers.simple\_minigrid module +------------------------------------------------- + +.. automodule:: reagent.gym.envs.wrappers.simple_minigrid + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.envs.wrappers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.policies.rst b/docs/api/reagent.gym.policies.rst new file mode 100644 index 000000000..4b83b925a --- /dev/null +++ b/docs/api/reagent.gym.policies.rst @@ -0,0 +1,46 @@ +reagent.gym.policies package +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.gym.policies.samplers + reagent.gym.policies.scorers + +Submodules +---------- + +reagent.gym.policies.policy module +---------------------------------- + +.. automodule:: reagent.gym.policies.policy + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.predictor\_policies module +----------------------------------------------- + +.. automodule:: reagent.gym.policies.predictor_policies + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.random\_policies module +-------------------------------------------- + +.. automodule:: reagent.gym.policies.random_policies + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.policies + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.policies.samplers.rst b/docs/api/reagent.gym.policies.samplers.rst new file mode 100644 index 000000000..66e352234 --- /dev/null +++ b/docs/api/reagent.gym.policies.samplers.rst @@ -0,0 +1,37 @@ +reagent.gym.policies.samplers package +===================================== + +Submodules +---------- + +reagent.gym.policies.samplers.continuous\_sampler module +-------------------------------------------------------- + +.. automodule:: reagent.gym.policies.samplers.continuous_sampler + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.samplers.discrete\_sampler module +------------------------------------------------------ + +.. automodule:: reagent.gym.policies.samplers.discrete_sampler + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.samplers.top\_k\_sampler module +---------------------------------------------------- + +.. automodule:: reagent.gym.policies.samplers.top_k_sampler + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.policies.samplers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.policies.scorers.rst b/docs/api/reagent.gym.policies.scorers.rst new file mode 100644 index 000000000..be0476bdd --- /dev/null +++ b/docs/api/reagent.gym.policies.scorers.rst @@ -0,0 +1,37 @@ +reagent.gym.policies.scorers package +==================================== + +Submodules +---------- + +reagent.gym.policies.scorers.continuous\_scorer module +------------------------------------------------------ + +.. automodule:: reagent.gym.policies.scorers.continuous_scorer + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.scorers.discrete\_scorer module +---------------------------------------------------- + +.. automodule:: reagent.gym.policies.scorers.discrete_scorer + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.policies.scorers.slate\_q\_scorer module +---------------------------------------------------- + +.. automodule:: reagent.gym.policies.scorers.slate_q_scorer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.policies.scorers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.preprocessors.rst b/docs/api/reagent.gym.preprocessors.rst new file mode 100644 index 000000000..4dd921fc7 --- /dev/null +++ b/docs/api/reagent.gym.preprocessors.rst @@ -0,0 +1,37 @@ +reagent.gym.preprocessors package +================================= + +Submodules +---------- + +reagent.gym.preprocessors.default\_preprocessors module +------------------------------------------------------- + +.. automodule:: reagent.gym.preprocessors.default_preprocessors + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.preprocessors.replay\_buffer\_inserters module +---------------------------------------------------------- + +.. automodule:: reagent.gym.preprocessors.replay_buffer_inserters + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.preprocessors.trainer\_preprocessor module +------------------------------------------------------ + +.. automodule:: reagent.gym.preprocessors.trainer_preprocessor + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.preprocessors + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.rst b/docs/api/reagent.gym.rst new file mode 100644 index 000000000..429c432ee --- /dev/null +++ b/docs/api/reagent.gym.rst @@ -0,0 +1,51 @@ +reagent.gym package +=================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.gym.agents + reagent.gym.datasets + reagent.gym.envs + reagent.gym.policies + reagent.gym.preprocessors + reagent.gym.runners + reagent.gym.tests + +Submodules +---------- + +reagent.gym.normalizers module +------------------------------ + +.. automodule:: reagent.gym.normalizers + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.types module +------------------------ + +.. automodule:: reagent.gym.types + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.utils module +------------------------ + +.. automodule:: reagent.gym.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.runners.rst b/docs/api/reagent.gym.runners.rst new file mode 100644 index 000000000..b75534983 --- /dev/null +++ b/docs/api/reagent.gym.runners.rst @@ -0,0 +1,21 @@ +reagent.gym.runners package +=========================== + +Submodules +---------- + +reagent.gym.runners.gymrunner module +------------------------------------ + +.. automodule:: reagent.gym.runners.gymrunner + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.runners + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.tests.preprocessors.rst b/docs/api/reagent.gym.tests.preprocessors.rst new file mode 100644 index 000000000..a4cffc535 --- /dev/null +++ b/docs/api/reagent.gym.tests.preprocessors.rst @@ -0,0 +1,29 @@ +reagent.gym.tests.preprocessors package +======================================= + +Submodules +---------- + +reagent.gym.tests.preprocessors.test\_default\_preprocessors module +------------------------------------------------------------------- + +.. automodule:: reagent.gym.tests.preprocessors.test_default_preprocessors + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.preprocessors.test\_replay\_buffer\_inserters module +---------------------------------------------------------------------- + +.. automodule:: reagent.gym.tests.preprocessors.test_replay_buffer_inserters + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.tests.preprocessors + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.gym.tests.rst b/docs/api/reagent.gym.tests.rst new file mode 100644 index 000000000..0d38650ac --- /dev/null +++ b/docs/api/reagent.gym.tests.rst @@ -0,0 +1,77 @@ +reagent.gym.tests package +========================= + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.gym.tests.preprocessors + +Submodules +---------- + +reagent.gym.tests.test\_gym module +---------------------------------- + +.. automodule:: reagent.gym.tests.test_gym + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_gym\_datasets module +-------------------------------------------- + +.. automodule:: reagent.gym.tests.test_gym_datasets + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_gym\_offline module +------------------------------------------- + +.. automodule:: reagent.gym.tests.test_gym_offline + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_gym\_replay\_buffer module +-------------------------------------------------- + +.. automodule:: reagent.gym.tests.test_gym_replay_buffer + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_linear\_dynamics module +----------------------------------------------- + +.. automodule:: reagent.gym.tests.test_linear_dynamics + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_pomdp module +------------------------------------ + +.. automodule:: reagent.gym.tests.test_pomdp + :members: + :undoc-members: + :show-inheritance: + +reagent.gym.tests.test\_world\_model module +------------------------------------------- + +.. automodule:: reagent.gym.tests.test_world_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.gym.tests + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.lite.rst b/docs/api/reagent.lite.rst new file mode 100644 index 000000000..8fd9bc125 --- /dev/null +++ b/docs/api/reagent.lite.rst @@ -0,0 +1,21 @@ +reagent.lite package +==================== + +Submodules +---------- + +reagent.lite.optimizer module +----------------------------- + +.. automodule:: reagent.lite.optimizer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.lite + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.mab.rst b/docs/api/reagent.mab.rst new file mode 100644 index 000000000..26aac0810 --- /dev/null +++ b/docs/api/reagent.mab.rst @@ -0,0 +1,45 @@ +reagent.mab package +=================== + +Submodules +---------- + +reagent.mab.mab\_algorithm module +--------------------------------- + +.. automodule:: reagent.mab.mab_algorithm + :members: + :undoc-members: + :show-inheritance: + +reagent.mab.simulation module +----------------------------- + +.. automodule:: reagent.mab.simulation + :members: + :undoc-members: + :show-inheritance: + +reagent.mab.thompson\_sampling module +------------------------------------- + +.. automodule:: reagent.mab.thompson_sampling + :members: + :undoc-members: + :show-inheritance: + +reagent.mab.ucb module +---------------------- + +.. automodule:: reagent.mab.ucb + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.mab + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.actor_critic.rst b/docs/api/reagent.model_managers.actor_critic.rst new file mode 100644 index 000000000..2cce3c9d5 --- /dev/null +++ b/docs/api/reagent.model_managers.actor_critic.rst @@ -0,0 +1,29 @@ +reagent.model\_managers.actor\_critic package +============================================= + +Submodules +---------- + +reagent.model\_managers.actor\_critic.sac module +------------------------------------------------ + +.. automodule:: reagent.model_managers.actor_critic.sac + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.actor\_critic.td3 module +------------------------------------------------ + +.. automodule:: reagent.model_managers.actor_critic.td3 + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.actor_critic + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.discrete.rst b/docs/api/reagent.model_managers.discrete.rst new file mode 100644 index 000000000..d00c38665 --- /dev/null +++ b/docs/api/reagent.model_managers.discrete.rst @@ -0,0 +1,45 @@ +reagent.model\_managers.discrete package +======================================== + +Submodules +---------- + +reagent.model\_managers.discrete.discrete\_c51dqn module +-------------------------------------------------------- + +.. automodule:: reagent.model_managers.discrete.discrete_c51dqn + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.discrete.discrete\_crr module +----------------------------------------------------- + +.. automodule:: reagent.model_managers.discrete.discrete_crr + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.discrete.discrete\_dqn module +----------------------------------------------------- + +.. automodule:: reagent.model_managers.discrete.discrete_dqn + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.discrete.discrete\_qrdqn module +------------------------------------------------------- + +.. automodule:: reagent.model_managers.discrete.discrete_qrdqn + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.discrete + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.model_based.rst b/docs/api/reagent.model_managers.model_based.rst new file mode 100644 index 000000000..d4b62ac82 --- /dev/null +++ b/docs/api/reagent.model_managers.model_based.rst @@ -0,0 +1,45 @@ +reagent.model\_managers.model\_based package +============================================ + +Submodules +---------- + +reagent.model\_managers.model\_based.cross\_entropy\_method module +------------------------------------------------------------------ + +.. automodule:: reagent.model_managers.model_based.cross_entropy_method + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.model\_based.seq2reward\_model module +------------------------------------------------------------- + +.. automodule:: reagent.model_managers.model_based.seq2reward_model + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.model\_based.synthetic\_reward module +------------------------------------------------------------- + +.. automodule:: reagent.model_managers.model_based.synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.model\_based.world\_model module +-------------------------------------------------------- + +.. automodule:: reagent.model_managers.model_based.world_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.model_based + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.parametric.rst b/docs/api/reagent.model_managers.parametric.rst new file mode 100644 index 000000000..18a65d103 --- /dev/null +++ b/docs/api/reagent.model_managers.parametric.rst @@ -0,0 +1,21 @@ +reagent.model\_managers.parametric package +========================================== + +Submodules +---------- + +reagent.model\_managers.parametric.parametric\_dqn module +--------------------------------------------------------- + +.. automodule:: reagent.model_managers.parametric.parametric_dqn + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.parametric + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.policy_gradient.rst b/docs/api/reagent.model_managers.policy_gradient.rst new file mode 100644 index 000000000..95a5fd9a3 --- /dev/null +++ b/docs/api/reagent.model_managers.policy_gradient.rst @@ -0,0 +1,29 @@ +reagent.model\_managers.policy\_gradient package +================================================ + +Submodules +---------- + +reagent.model\_managers.policy\_gradient.ppo module +--------------------------------------------------- + +.. automodule:: reagent.model_managers.policy_gradient.ppo + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.policy\_gradient.reinforce module +--------------------------------------------------------- + +.. automodule:: reagent.model_managers.policy_gradient.reinforce + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.policy_gradient + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.ranking.rst b/docs/api/reagent.model_managers.ranking.rst new file mode 100644 index 000000000..b50a5a021 --- /dev/null +++ b/docs/api/reagent.model_managers.ranking.rst @@ -0,0 +1,21 @@ +reagent.model\_managers.ranking package +======================================= + +Submodules +---------- + +reagent.model\_managers.ranking.slate\_q module +----------------------------------------------- + +.. automodule:: reagent.model_managers.ranking.slate_q + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers.ranking + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_managers.rst b/docs/api/reagent.model_managers.rst new file mode 100644 index 000000000..a0c37a272 --- /dev/null +++ b/docs/api/reagent.model_managers.rst @@ -0,0 +1,82 @@ +reagent.model\_managers package +=============================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.model_managers.actor_critic + reagent.model_managers.discrete + reagent.model_managers.model_based + reagent.model_managers.parametric + reagent.model_managers.policy_gradient + reagent.model_managers.ranking + +Submodules +---------- + +reagent.model\_managers.actor\_critic\_base module +-------------------------------------------------- + +.. automodule:: reagent.model_managers.actor_critic_base + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.discrete\_dqn\_base module +-------------------------------------------------- + +.. automodule:: reagent.model_managers.discrete_dqn_base + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.model\_manager module +--------------------------------------------- + +.. automodule:: reagent.model_managers.model_manager + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.parametric\_dqn\_base module +---------------------------------------------------- + +.. automodule:: reagent.model_managers.parametric_dqn_base + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.slate\_q\_base module +--------------------------------------------- + +.. automodule:: reagent.model_managers.slate_q_base + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.union module +------------------------------------ + +.. automodule:: reagent.model_managers.union + :members: + :undoc-members: + :show-inheritance: + +reagent.model\_managers.world\_model\_base module +------------------------------------------------- + +.. automodule:: reagent.model_managers.world_model_base + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_managers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.model_utils.rst b/docs/api/reagent.model_utils.rst new file mode 100644 index 000000000..16fd4b441 --- /dev/null +++ b/docs/api/reagent.model_utils.rst @@ -0,0 +1,21 @@ +reagent.model\_utils package +============================ + +Submodules +---------- + +reagent.model\_utils.seq2slate\_utils module +-------------------------------------------- + +.. automodule:: reagent.model_utils.seq2slate_utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.model_utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.models.rst b/docs/api/reagent.models.rst new file mode 100644 index 000000000..f9bb062c5 --- /dev/null +++ b/docs/api/reagent.models.rst @@ -0,0 +1,189 @@ +reagent.models package +====================== + +Submodules +---------- + +reagent.models.actor module +--------------------------- + +.. automodule:: reagent.models.actor + :members: + :undoc-members: + :show-inheritance: + +reagent.models.base module +-------------------------- + +.. automodule:: reagent.models.base + :members: + :undoc-members: + :show-inheritance: + +reagent.models.bcq module +------------------------- + +.. automodule:: reagent.models.bcq + :members: + :undoc-members: + :show-inheritance: + +reagent.models.categorical\_dqn module +-------------------------------------- + +.. automodule:: reagent.models.categorical_dqn + :members: + :undoc-members: + :show-inheritance: + +reagent.models.cem\_planner module +---------------------------------- + +.. automodule:: reagent.models.cem_planner + :members: + :undoc-members: + :show-inheritance: + +reagent.models.containers module +-------------------------------- + +.. automodule:: reagent.models.containers + :members: + :undoc-members: + :show-inheritance: + +reagent.models.convolutional\_network module +-------------------------------------------- + +.. automodule:: reagent.models.convolutional_network + :members: + :undoc-members: + :show-inheritance: + +reagent.models.critic module +---------------------------- + +.. automodule:: reagent.models.critic + :members: + :undoc-members: + :show-inheritance: + +reagent.models.dqn module +------------------------- + +.. automodule:: reagent.models.dqn + :members: + :undoc-members: + :show-inheritance: + +reagent.models.dueling\_q\_network module +----------------------------------------- + +.. automodule:: reagent.models.dueling_q_network + :members: + :undoc-members: + :show-inheritance: + +reagent.models.embedding\_bag\_concat module +-------------------------------------------- + +.. automodule:: reagent.models.embedding_bag_concat + :members: + :undoc-members: + :show-inheritance: + +reagent.models.fully\_connected\_network module +----------------------------------------------- + +.. automodule:: reagent.models.fully_connected_network + :members: + :undoc-members: + :show-inheritance: + +reagent.models.linear\_regression module +---------------------------------------- + +.. automodule:: reagent.models.linear_regression + :members: + :undoc-members: + :show-inheritance: + +reagent.models.mdn\_rnn module +------------------------------ + +.. automodule:: reagent.models.mdn_rnn + :members: + :undoc-members: + :show-inheritance: + +reagent.models.mlp\_scorer module +--------------------------------- + +.. automodule:: reagent.models.mlp_scorer + :members: + :undoc-members: + :show-inheritance: + +reagent.models.model\_feature\_config\_provider module +------------------------------------------------------ + +.. automodule:: reagent.models.model_feature_config_provider + :members: + :undoc-members: + :show-inheritance: + +reagent.models.no\_soft\_update\_embedding module +------------------------------------------------- + +.. automodule:: reagent.models.no_soft_update_embedding + :members: + :undoc-members: + :show-inheritance: + +reagent.models.seq2reward\_model module +--------------------------------------- + +.. automodule:: reagent.models.seq2reward_model + :members: + :undoc-members: + :show-inheritance: + +reagent.models.seq2slate module +------------------------------- + +.. automodule:: reagent.models.seq2slate + :members: + :undoc-members: + :show-inheritance: + +reagent.models.seq2slate\_reward module +--------------------------------------- + +.. automodule:: reagent.models.seq2slate_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.models.synthetic\_reward module +--------------------------------------- + +.. automodule:: reagent.models.synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.models.world\_model module +---------------------------------- + +.. automodule:: reagent.models.world_model + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.categorical_dqn.rst b/docs/api/reagent.net_builder.categorical_dqn.rst new file mode 100644 index 000000000..3b2621d46 --- /dev/null +++ b/docs/api/reagent.net_builder.categorical_dqn.rst @@ -0,0 +1,21 @@ +reagent.net\_builder.categorical\_dqn package +============================================= + +Submodules +---------- + +reagent.net\_builder.categorical\_dqn.categorical module +-------------------------------------------------------- + +.. automodule:: reagent.net_builder.categorical_dqn.categorical + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.categorical_dqn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.continuous_actor.rst b/docs/api/reagent.net_builder.continuous_actor.rst new file mode 100644 index 000000000..fc3c3eebb --- /dev/null +++ b/docs/api/reagent.net_builder.continuous_actor.rst @@ -0,0 +1,37 @@ +reagent.net\_builder.continuous\_actor package +============================================== + +Submodules +---------- + +reagent.net\_builder.continuous\_actor.dirichlet\_fully\_connected module +------------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.continuous_actor.dirichlet_fully_connected + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.continuous\_actor.fully\_connected module +-------------------------------------------------------------- + +.. automodule:: reagent.net_builder.continuous_actor.fully_connected + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.continuous\_actor.gaussian\_fully\_connected module +------------------------------------------------------------------------ + +.. automodule:: reagent.net_builder.continuous_actor.gaussian_fully_connected + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.continuous_actor + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.discrete_actor.rst b/docs/api/reagent.net_builder.discrete_actor.rst new file mode 100644 index 000000000..3b796654b --- /dev/null +++ b/docs/api/reagent.net_builder.discrete_actor.rst @@ -0,0 +1,21 @@ +reagent.net\_builder.discrete\_actor package +============================================ + +Submodules +---------- + +reagent.net\_builder.discrete\_actor.fully\_connected module +------------------------------------------------------------ + +.. automodule:: reagent.net_builder.discrete_actor.fully_connected + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.discrete_actor + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.discrete_dqn.rst b/docs/api/reagent.net_builder.discrete_dqn.rst new file mode 100644 index 000000000..cbfa80b9c --- /dev/null +++ b/docs/api/reagent.net_builder.discrete_dqn.rst @@ -0,0 +1,37 @@ +reagent.net\_builder.discrete\_dqn package +========================================== + +Submodules +---------- + +reagent.net\_builder.discrete\_dqn.dueling module +------------------------------------------------- + +.. automodule:: reagent.net_builder.discrete_dqn.dueling + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.discrete\_dqn.fully\_connected module +---------------------------------------------------------- + +.. automodule:: reagent.net_builder.discrete_dqn.fully_connected + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.discrete\_dqn.fully\_connected\_with\_embedding module +--------------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.discrete_dqn.fully_connected_with_embedding + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.discrete_dqn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.parametric_dqn.rst b/docs/api/reagent.net_builder.parametric_dqn.rst new file mode 100644 index 000000000..2f9196fc8 --- /dev/null +++ b/docs/api/reagent.net_builder.parametric_dqn.rst @@ -0,0 +1,21 @@ +reagent.net\_builder.parametric\_dqn package +============================================ + +Submodules +---------- + +reagent.net\_builder.parametric\_dqn.fully\_connected module +------------------------------------------------------------ + +.. automodule:: reagent.net_builder.parametric_dqn.fully_connected + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.parametric_dqn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.quantile_dqn.rst b/docs/api/reagent.net_builder.quantile_dqn.rst new file mode 100644 index 000000000..d80e2909b --- /dev/null +++ b/docs/api/reagent.net_builder.quantile_dqn.rst @@ -0,0 +1,29 @@ +reagent.net\_builder.quantile\_dqn package +========================================== + +Submodules +---------- + +reagent.net\_builder.quantile\_dqn.dueling\_quantile module +----------------------------------------------------------- + +.. automodule:: reagent.net_builder.quantile_dqn.dueling_quantile + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.quantile\_dqn.quantile module +-------------------------------------------------- + +.. automodule:: reagent.net_builder.quantile_dqn.quantile + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.quantile_dqn + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.rst b/docs/api/reagent.net_builder.rst new file mode 100644 index 000000000..bdc63d20b --- /dev/null +++ b/docs/api/reagent.net_builder.rst @@ -0,0 +1,118 @@ +reagent.net\_builder package +============================ + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.net_builder.categorical_dqn + reagent.net_builder.continuous_actor + reagent.net_builder.discrete_actor + reagent.net_builder.discrete_dqn + reagent.net_builder.parametric_dqn + reagent.net_builder.quantile_dqn + reagent.net_builder.slate_ranking + reagent.net_builder.slate_reward + reagent.net_builder.synthetic_reward + reagent.net_builder.value + +Submodules +---------- + +reagent.net\_builder.categorical\_dqn\_net\_builder module +---------------------------------------------------------- + +.. automodule:: reagent.net_builder.categorical_dqn_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.continuous\_actor\_net\_builder module +----------------------------------------------------------- + +.. automodule:: reagent.net_builder.continuous_actor_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.discrete\_actor\_net\_builder module +--------------------------------------------------------- + +.. automodule:: reagent.net_builder.discrete_actor_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.discrete\_dqn\_net\_builder module +------------------------------------------------------- + +.. automodule:: reagent.net_builder.discrete_dqn_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.parametric\_dqn\_net\_builder module +--------------------------------------------------------- + +.. automodule:: reagent.net_builder.parametric_dqn_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.quantile\_dqn\_net\_builder module +------------------------------------------------------- + +.. automodule:: reagent.net_builder.quantile_dqn_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.slate\_ranking\_net\_builder module +-------------------------------------------------------- + +.. automodule:: reagent.net_builder.slate_ranking_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.slate\_reward\_net\_builder module +------------------------------------------------------- + +.. automodule:: reagent.net_builder.slate_reward_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.synthetic\_reward\_net\_builder module +----------------------------------------------------------- + +.. automodule:: reagent.net_builder.synthetic_reward_net_builder + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.unions module +---------------------------------- + +.. automodule:: reagent.net_builder.unions + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.value\_net\_builder module +----------------------------------------------- + +.. automodule:: reagent.net_builder.value_net_builder + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.slate_ranking.rst b/docs/api/reagent.net_builder.slate_ranking.rst new file mode 100644 index 000000000..4a5d1f4a3 --- /dev/null +++ b/docs/api/reagent.net_builder.slate_ranking.rst @@ -0,0 +1,29 @@ +reagent.net\_builder.slate\_ranking package +=========================================== + +Submodules +---------- + +reagent.net\_builder.slate\_ranking.slate\_ranking\_scorer module +----------------------------------------------------------------- + +.. automodule:: reagent.net_builder.slate_ranking.slate_ranking_scorer + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.slate\_ranking.slate\_ranking\_transformer module +---------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.slate_ranking.slate_ranking_transformer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.slate_ranking + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.slate_reward.rst b/docs/api/reagent.net_builder.slate_reward.rst new file mode 100644 index 000000000..3103e7bce --- /dev/null +++ b/docs/api/reagent.net_builder.slate_reward.rst @@ -0,0 +1,29 @@ +reagent.net\_builder.slate\_reward package +========================================== + +Submodules +---------- + +reagent.net\_builder.slate\_reward.slate\_reward\_gru module +------------------------------------------------------------ + +.. automodule:: reagent.net_builder.slate_reward.slate_reward_gru + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.slate\_reward.slate\_reward\_transformer module +-------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.slate_reward.slate_reward_transformer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.slate_reward + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.synthetic_reward.rst b/docs/api/reagent.net_builder.synthetic_reward.rst new file mode 100644 index 000000000..b723b66f5 --- /dev/null +++ b/docs/api/reagent.net_builder.synthetic_reward.rst @@ -0,0 +1,45 @@ +reagent.net\_builder.synthetic\_reward package +============================================== + +Submodules +---------- + +reagent.net\_builder.synthetic\_reward.ngram\_synthetic\_reward module +---------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.synthetic_reward.ngram_synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.synthetic\_reward.sequence\_synthetic\_reward module +------------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.synthetic_reward.sequence_synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.synthetic\_reward.single\_step\_synthetic\_reward module +----------------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.synthetic_reward.single_step_synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.synthetic\_reward.transformer\_synthetic\_reward module +---------------------------------------------------------------------------- + +.. automodule:: reagent.net_builder.synthetic_reward.transformer_synthetic_reward + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.synthetic_reward + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.net_builder.value.rst b/docs/api/reagent.net_builder.value.rst new file mode 100644 index 000000000..8e9d46303 --- /dev/null +++ b/docs/api/reagent.net_builder.value.rst @@ -0,0 +1,29 @@ +reagent.net\_builder.value package +================================== + +Submodules +---------- + +reagent.net\_builder.value.fully\_connected module +-------------------------------------------------- + +.. automodule:: reagent.net_builder.value.fully_connected + :members: + :undoc-members: + :show-inheritance: + +reagent.net\_builder.value.seq2reward\_rnn module +------------------------------------------------- + +.. automodule:: reagent.net_builder.value.seq2reward_rnn + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.net_builder.value + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.datasets.rst b/docs/api/reagent.ope.datasets.rst new file mode 100644 index 000000000..591d5a642 --- /dev/null +++ b/docs/api/reagent.ope.datasets.rst @@ -0,0 +1,21 @@ +reagent.ope.datasets package +============================ + +Submodules +---------- + +reagent.ope.datasets.logged\_dataset module +------------------------------------------- + +.. automodule:: reagent.ope.datasets.logged_dataset + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope.datasets + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.estimators.rst b/docs/api/reagent.ope.estimators.rst new file mode 100644 index 000000000..e3696edbe --- /dev/null +++ b/docs/api/reagent.ope.estimators.rst @@ -0,0 +1,53 @@ +reagent.ope.estimators package +============================== + +Submodules +---------- + +reagent.ope.estimators.contextual\_bandits\_estimators module +------------------------------------------------------------- + +.. automodule:: reagent.ope.estimators.contextual_bandits_estimators + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.estimators.estimator module +--------------------------------------- + +.. automodule:: reagent.ope.estimators.estimator + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.estimators.sequential\_estimators module +---------------------------------------------------- + +.. automodule:: reagent.ope.estimators.sequential_estimators + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.estimators.slate\_estimators module +----------------------------------------------- + +.. automodule:: reagent.ope.estimators.slate_estimators + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.estimators.types module +----------------------------------- + +.. automodule:: reagent.ope.estimators.types + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope.estimators + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.rst b/docs/api/reagent.ope.rst new file mode 100644 index 000000000..ab6855d72 --- /dev/null +++ b/docs/api/reagent.ope.rst @@ -0,0 +1,32 @@ +reagent.ope package +=================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.ope.datasets + reagent.ope.estimators + reagent.ope.test + reagent.ope.trainers + +Submodules +---------- + +reagent.ope.utils module +------------------------ + +.. automodule:: reagent.ope.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.test.rst b/docs/api/reagent.ope.test.rst new file mode 100644 index 000000000..a1ba489e7 --- /dev/null +++ b/docs/api/reagent.ope.test.rst @@ -0,0 +1,69 @@ +reagent.ope.test package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.ope.test.unit_tests + +Submodules +---------- + +reagent.ope.test.cartpole module +-------------------------------- + +.. automodule:: reagent.ope.test.cartpole + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.envs module +---------------------------- + +.. automodule:: reagent.ope.test.envs + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.gridworld module +--------------------------------- + +.. automodule:: reagent.ope.test.gridworld + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.mslr\_slate module +----------------------------------- + +.. automodule:: reagent.ope.test.mslr_slate + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.multiclass\_bandits module +------------------------------------------- + +.. automodule:: reagent.ope.test.multiclass_bandits + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.yandex\_web\_search module +------------------------------------------- + +.. automodule:: reagent.ope.test.yandex_web_search + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope.test + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.test.unit_tests.rst b/docs/api/reagent.ope.test.unit_tests.rst new file mode 100644 index 000000000..4c273fd8a --- /dev/null +++ b/docs/api/reagent.ope.test.unit_tests.rst @@ -0,0 +1,45 @@ +reagent.ope.test.unit\_tests package +==================================== + +Submodules +---------- + +reagent.ope.test.unit\_tests.test\_contextual\_bandit\_estimators module +------------------------------------------------------------------------ + +.. automodule:: reagent.ope.test.unit_tests.test_contextual_bandit_estimators + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.unit\_tests.test\_slate\_estimators module +----------------------------------------------------------- + +.. automodule:: reagent.ope.test.unit_tests.test_slate_estimators + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.unit\_tests.test\_types module +----------------------------------------------- + +.. automodule:: reagent.ope.test.unit_tests.test_types + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.test.unit\_tests.test\_utils module +----------------------------------------------- + +.. automodule:: reagent.ope.test.unit_tests.test_utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope.test.unit_tests + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.ope.trainers.rst b/docs/api/reagent.ope.trainers.rst new file mode 100644 index 000000000..e0f01acde --- /dev/null +++ b/docs/api/reagent.ope.trainers.rst @@ -0,0 +1,29 @@ +reagent.ope.trainers package +============================ + +Submodules +---------- + +reagent.ope.trainers.linear\_trainers module +-------------------------------------------- + +.. automodule:: reagent.ope.trainers.linear_trainers + :members: + :undoc-members: + :show-inheritance: + +reagent.ope.trainers.rl\_tabular\_trainers module +------------------------------------------------- + +.. automodule:: reagent.ope.trainers.rl_tabular_trainers + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.ope.trainers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.optimizer.rst b/docs/api/reagent.optimizer.rst new file mode 100644 index 000000000..e63c75bff --- /dev/null +++ b/docs/api/reagent.optimizer.rst @@ -0,0 +1,77 @@ +reagent.optimizer package +========================= + +Submodules +---------- + +reagent.optimizer.optimizer module +---------------------------------- + +.. automodule:: reagent.optimizer.optimizer + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.scheduler module +---------------------------------- + +.. automodule:: reagent.optimizer.scheduler + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.scheduler\_union module +----------------------------------------- + +.. automodule:: reagent.optimizer.scheduler_union + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.soft\_update module +------------------------------------- + +.. automodule:: reagent.optimizer.soft_update + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.uninferrable\_optimizers module +------------------------------------------------- + +.. automodule:: reagent.optimizer.uninferrable_optimizers + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.uninferrable\_schedulers module +------------------------------------------------- + +.. automodule:: reagent.optimizer.uninferrable_schedulers + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.union module +------------------------------ + +.. automodule:: reagent.optimizer.union + :members: + :undoc-members: + :show-inheritance: + +reagent.optimizer.utils module +------------------------------ + +.. automodule:: reagent.optimizer.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.optimizer + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.prediction.ranking.rst b/docs/api/reagent.prediction.ranking.rst new file mode 100644 index 000000000..7f2738486 --- /dev/null +++ b/docs/api/reagent.prediction.ranking.rst @@ -0,0 +1,21 @@ +reagent.prediction.ranking package +================================== + +Submodules +---------- + +reagent.prediction.ranking.predictor\_wrapper module +---------------------------------------------------- + +.. automodule:: reagent.prediction.ranking.predictor_wrapper + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.prediction.ranking + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.prediction.rst b/docs/api/reagent.prediction.rst new file mode 100644 index 000000000..b1aa4584a --- /dev/null +++ b/docs/api/reagent.prediction.rst @@ -0,0 +1,30 @@ +reagent.prediction package +========================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.prediction.ranking + reagent.prediction.synthetic_reward + +Submodules +---------- + +reagent.prediction.predictor\_wrapper module +-------------------------------------------- + +.. automodule:: reagent.prediction.predictor_wrapper + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.prediction + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.prediction.synthetic_reward.rst b/docs/api/reagent.prediction.synthetic_reward.rst new file mode 100644 index 000000000..4ff4d8cad --- /dev/null +++ b/docs/api/reagent.prediction.synthetic_reward.rst @@ -0,0 +1,21 @@ +reagent.prediction.synthetic\_reward package +============================================ + +Submodules +---------- + +reagent.prediction.synthetic\_reward.synthetic\_reward\_predictor\_wrapper module +--------------------------------------------------------------------------------- + +.. automodule:: reagent.prediction.synthetic_reward.synthetic_reward_predictor_wrapper + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.prediction.synthetic_reward + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.preprocessing.rst b/docs/api/reagent.preprocessing.rst new file mode 100644 index 000000000..b2e80139f --- /dev/null +++ b/docs/api/reagent.preprocessing.rst @@ -0,0 +1,85 @@ +reagent.preprocessing package +============================= + +Submodules +---------- + +reagent.preprocessing.batch\_preprocessor module +------------------------------------------------ + +.. automodule:: reagent.preprocessing.batch_preprocessor + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.identify\_types module +-------------------------------------------- + +.. automodule:: reagent.preprocessing.identify_types + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.normalization module +------------------------------------------ + +.. automodule:: reagent.preprocessing.normalization + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.postprocessor module +------------------------------------------ + +.. automodule:: reagent.preprocessing.postprocessor + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.preprocessor module +----------------------------------------- + +.. automodule:: reagent.preprocessing.preprocessor + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.sparse\_preprocessor module +------------------------------------------------- + +.. automodule:: reagent.preprocessing.sparse_preprocessor + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.sparse\_to\_dense module +---------------------------------------------- + +.. automodule:: reagent.preprocessing.sparse_to_dense + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.transforms module +--------------------------------------- + +.. automodule:: reagent.preprocessing.transforms + :members: + :undoc-members: + :show-inheritance: + +reagent.preprocessing.types module +---------------------------------- + +.. automodule:: reagent.preprocessing.types + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.preprocessing + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.publishers.rst b/docs/api/reagent.publishers.rst new file mode 100644 index 000000000..fa73ff042 --- /dev/null +++ b/docs/api/reagent.publishers.rst @@ -0,0 +1,45 @@ +reagent.publishers package +========================== + +Submodules +---------- + +reagent.publishers.file\_system\_publisher module +------------------------------------------------- + +.. automodule:: reagent.publishers.file_system_publisher + :members: + :undoc-members: + :show-inheritance: + +reagent.publishers.model\_publisher module +------------------------------------------ + +.. automodule:: reagent.publishers.model_publisher + :members: + :undoc-members: + :show-inheritance: + +reagent.publishers.no\_publishing module +---------------------------------------- + +.. automodule:: reagent.publishers.no_publishing + :members: + :undoc-members: + :show-inheritance: + +reagent.publishers.union module +------------------------------- + +.. automodule:: reagent.publishers.union + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.publishers + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.replay_memory.rst b/docs/api/reagent.replay_memory.rst new file mode 100644 index 000000000..69388ac84 --- /dev/null +++ b/docs/api/reagent.replay_memory.rst @@ -0,0 +1,45 @@ +reagent.replay\_memory package +============================== + +Submodules +---------- + +reagent.replay\_memory.circular\_replay\_buffer module +------------------------------------------------------ + +.. automodule:: reagent.replay_memory.circular_replay_buffer + :members: + :undoc-members: + :show-inheritance: + +reagent.replay\_memory.prioritized\_replay\_buffer module +--------------------------------------------------------- + +.. automodule:: reagent.replay_memory.prioritized_replay_buffer + :members: + :undoc-members: + :show-inheritance: + +reagent.replay\_memory.sum\_tree module +--------------------------------------- + +.. automodule:: reagent.replay_memory.sum_tree + :members: + :undoc-members: + :show-inheritance: + +reagent.replay\_memory.utils module +----------------------------------- + +.. automodule:: reagent.replay_memory.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.replay_memory + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.reporting.rst b/docs/api/reagent.reporting.rst new file mode 100644 index 000000000..2de181fb4 --- /dev/null +++ b/docs/api/reagent.reporting.rst @@ -0,0 +1,101 @@ +reagent.reporting package +========================= + +Submodules +---------- + +reagent.reporting.actor\_critic\_reporter module +------------------------------------------------ + +.. automodule:: reagent.reporting.actor_critic_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.compound\_reporter module +------------------------------------------- + +.. automodule:: reagent.reporting.compound_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.discrete\_crr\_reporter module +------------------------------------------------ + +.. automodule:: reagent.reporting.discrete_crr_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.discrete\_dqn\_reporter module +------------------------------------------------ + +.. automodule:: reagent.reporting.discrete_dqn_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.parametric\_dqn\_reporter module +-------------------------------------------------- + +.. automodule:: reagent.reporting.parametric_dqn_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.reporter\_base module +--------------------------------------- + +.. automodule:: reagent.reporting.reporter_base + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.reward\_network\_reporter module +-------------------------------------------------- + +.. automodule:: reagent.reporting.reward_network_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.seq2reward\_reporter module +--------------------------------------------- + +.. automodule:: reagent.reporting.seq2reward_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.slate\_q\_reporter module +------------------------------------------- + +.. automodule:: reagent.reporting.slate_q_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.td3\_reporter module +-------------------------------------- + +.. automodule:: reagent.reporting.td3_reporter + :members: + :undoc-members: + :show-inheritance: + +reagent.reporting.world\_model\_reporter module +----------------------------------------------- + +.. automodule:: reagent.reporting.world_model_reporter + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.reporting + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.rst b/docs/api/reagent.rst new file mode 100644 index 000000000..24ff47a8f --- /dev/null +++ b/docs/api/reagent.rst @@ -0,0 +1,39 @@ +reagent package +=============== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.core + reagent.data + reagent.evaluation + reagent.gym + reagent.lite + reagent.mab + reagent.model_managers + reagent.model_utils + reagent.models + reagent.net_builder + reagent.ope + reagent.optimizer + reagent.prediction + reagent.preprocessing + reagent.publishers + reagent.replay_memory + reagent.reporting + reagent.samplers + reagent.scripts + reagent.training + reagent.validators + reagent.workflow + +Module contents +--------------- + +.. automodule:: reagent + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/ml.rl.simulators.rst b/docs/api/reagent.samplers.rst similarity index 54% rename from docs/api/ml.rl.simulators.rst rename to docs/api/reagent.samplers.rst index fda5ba238..e23910da9 100644 --- a/docs/api/ml.rl.simulators.rst +++ b/docs/api/reagent.samplers.rst @@ -1,22 +1,21 @@ -ml.rl.simulators package +reagent.samplers package ======================== Submodules ---------- -ml.rl.simulators.recsim module ------------------------------- +reagent.samplers.frechet module +------------------------------- -.. automodule:: ml.rl.simulators.recsim +.. automodule:: reagent.samplers.frechet :members: :undoc-members: :show-inheritance: - Module contents --------------- -.. automodule:: ml.rl.simulators +.. automodule:: reagent.samplers :members: :undoc-members: :show-inheritance: diff --git a/docs/api/reagent.scripts.rst b/docs/api/reagent.scripts.rst new file mode 100644 index 000000000..505b192fe --- /dev/null +++ b/docs/api/reagent.scripts.rst @@ -0,0 +1,21 @@ +reagent.scripts package +======================= + +Submodules +---------- + +reagent.scripts.hparam\_tuning module +------------------------------------- + +.. automodule:: reagent.scripts.hparam_tuning + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.scripts + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.cb.rst b/docs/api/reagent.training.cb.rst new file mode 100644 index 000000000..6484d74cd --- /dev/null +++ b/docs/api/reagent.training.cb.rst @@ -0,0 +1,21 @@ +reagent.training.cb package +=========================== + +Submodules +---------- + +reagent.training.cb.linucb\_trainer module +------------------------------------------ + +.. automodule:: reagent.training.cb.linucb_trainer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training.cb + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.cfeval.rst b/docs/api/reagent.training.cfeval.rst new file mode 100644 index 000000000..1523b3a61 --- /dev/null +++ b/docs/api/reagent.training.cfeval.rst @@ -0,0 +1,21 @@ +reagent.training.cfeval package +=============================== + +Submodules +---------- + +reagent.training.cfeval.bandit\_reward\_network\_trainer module +--------------------------------------------------------------- + +.. automodule:: reagent.training.cfeval.bandit_reward_network_trainer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training.cfeval + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.gradient_free.rst b/docs/api/reagent.training.gradient_free.rst new file mode 100644 index 000000000..05091b401 --- /dev/null +++ b/docs/api/reagent.training.gradient_free.rst @@ -0,0 +1,37 @@ +reagent.training.gradient\_free package +======================================= + +Submodules +---------- + +reagent.training.gradient\_free.ars\_util module +------------------------------------------------ + +.. automodule:: reagent.training.gradient_free.ars_util + :members: + :undoc-members: + :show-inheritance: + +reagent.training.gradient\_free.es\_worker module +------------------------------------------------- + +.. automodule:: reagent.training.gradient_free.es_worker + :members: + :undoc-members: + :show-inheritance: + +reagent.training.gradient\_free.evolution\_pool module +------------------------------------------------------ + +.. automodule:: reagent.training.gradient_free.evolution_pool + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training.gradient_free + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.ranking.rst b/docs/api/reagent.training.ranking.rst new file mode 100644 index 000000000..e0a5935cb --- /dev/null +++ b/docs/api/reagent.training.ranking.rst @@ -0,0 +1,53 @@ +reagent.training.ranking package +================================ + +Submodules +---------- + +reagent.training.ranking.helper module +-------------------------------------- + +.. automodule:: reagent.training.ranking.helper + :members: + :undoc-members: + :show-inheritance: + +reagent.training.ranking.seq2slate\_attn\_trainer module +-------------------------------------------------------- + +.. automodule:: reagent.training.ranking.seq2slate_attn_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.ranking.seq2slate\_sim\_trainer module +------------------------------------------------------- + +.. automodule:: reagent.training.ranking.seq2slate_sim_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.ranking.seq2slate\_tf\_trainer module +------------------------------------------------------ + +.. automodule:: reagent.training.ranking.seq2slate_tf_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.ranking.seq2slate\_trainer module +-------------------------------------------------- + +.. automodule:: reagent.training.ranking.seq2slate_trainer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training.ranking + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.rst b/docs/api/reagent.training.rst new file mode 100644 index 000000000..936b48823 --- /dev/null +++ b/docs/api/reagent.training.rst @@ -0,0 +1,177 @@ +reagent.training package +======================== + +Subpackages +----------- + +.. toctree:: + :maxdepth: 4 + + reagent.training.cb + reagent.training.cfeval + reagent.training.gradient_free + reagent.training.ranking + reagent.training.world_model + +Submodules +---------- + +reagent.training.c51\_trainer module +------------------------------------ + +.. automodule:: reagent.training.c51_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.cem\_trainer module +------------------------------------ + +.. automodule:: reagent.training.cem_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.discrete\_crr\_trainer module +---------------------------------------------- + +.. automodule:: reagent.training.discrete_crr_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.dqn\_trainer module +------------------------------------ + +.. automodule:: reagent.training.dqn_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.dqn\_trainer\_base module +------------------------------------------ + +.. automodule:: reagent.training.dqn_trainer_base + :members: + :undoc-members: + :show-inheritance: + +reagent.training.imitator\_training module +------------------------------------------ + +.. automodule:: reagent.training.imitator_training + :members: + :undoc-members: + :show-inheritance: + +reagent.training.multi\_stage\_trainer module +--------------------------------------------- + +.. automodule:: reagent.training.multi_stage_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.parameters module +---------------------------------- + +.. automodule:: reagent.training.parameters + :members: + :undoc-members: + :show-inheritance: + +reagent.training.parametric\_dqn\_trainer module +------------------------------------------------ + +.. automodule:: reagent.training.parametric_dqn_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.ppo\_trainer module +------------------------------------ + +.. automodule:: reagent.training.ppo_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.qrdqn\_trainer module +-------------------------------------- + +.. automodule:: reagent.training.qrdqn_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.reagent\_lightning\_module module +-------------------------------------------------- + +.. automodule:: reagent.training.reagent_lightning_module + :members: + :undoc-members: + :show-inheritance: + +reagent.training.reinforce\_trainer module +------------------------------------------ + +.. automodule:: reagent.training.reinforce_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.reward\_network\_trainer module +------------------------------------------------ + +.. automodule:: reagent.training.reward_network_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.rl\_trainer\_pytorch module +-------------------------------------------- + +.. automodule:: reagent.training.rl_trainer_pytorch + :members: + :undoc-members: + :show-inheritance: + +reagent.training.sac\_trainer module +------------------------------------ + +.. automodule:: reagent.training.sac_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.slate\_q\_trainer module +----------------------------------------- + +.. automodule:: reagent.training.slate_q_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.td3\_trainer module +------------------------------------ + +.. automodule:: reagent.training.td3_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.utils module +----------------------------- + +.. automodule:: reagent.training.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.training.world_model.rst b/docs/api/reagent.training.world_model.rst new file mode 100644 index 000000000..4cb650daa --- /dev/null +++ b/docs/api/reagent.training.world_model.rst @@ -0,0 +1,37 @@ +reagent.training.world\_model package +===================================== + +Submodules +---------- + +reagent.training.world\_model.compress\_model\_trainer module +------------------------------------------------------------- + +.. automodule:: reagent.training.world_model.compress_model_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.world\_model.mdnrnn\_trainer module +---------------------------------------------------- + +.. automodule:: reagent.training.world_model.mdnrnn_trainer + :members: + :undoc-members: + :show-inheritance: + +reagent.training.world\_model.seq2reward\_trainer module +-------------------------------------------------------- + +.. automodule:: reagent.training.world_model.seq2reward_trainer + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.training.world_model + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.validators.rst b/docs/api/reagent.validators.rst new file mode 100644 index 000000000..47409c746 --- /dev/null +++ b/docs/api/reagent.validators.rst @@ -0,0 +1,37 @@ +reagent.validators package +========================== + +Submodules +---------- + +reagent.validators.model\_validator module +------------------------------------------ + +.. automodule:: reagent.validators.model_validator + :members: + :undoc-members: + :show-inheritance: + +reagent.validators.no\_validation module +---------------------------------------- + +.. automodule:: reagent.validators.no_validation + :members: + :undoc-members: + :show-inheritance: + +reagent.validators.union module +------------------------------- + +.. automodule:: reagent.validators.union + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.validators + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/api/reagent.workflow.rst b/docs/api/reagent.workflow.rst new file mode 100644 index 000000000..b213ddced --- /dev/null +++ b/docs/api/reagent.workflow.rst @@ -0,0 +1,77 @@ +reagent.workflow package +======================== + +Submodules +---------- + +reagent.workflow.cli module +--------------------------- + +.. automodule:: reagent.workflow.cli + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.env module +--------------------------- + +.. automodule:: reagent.workflow.env + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.gym\_batch\_rl module +-------------------------------------- + +.. automodule:: reagent.workflow.gym_batch_rl + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.identify\_types\_flow module +--------------------------------------------- + +.. automodule:: reagent.workflow.identify_types_flow + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.training module +-------------------------------- + +.. automodule:: reagent.workflow.training + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.training\_reports module +----------------------------------------- + +.. automodule:: reagent.workflow.training_reports + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.types module +----------------------------- + +.. automodule:: reagent.workflow.types + :members: + :undoc-members: + :show-inheritance: + +reagent.workflow.utils module +----------------------------- + +.. automodule:: reagent.workflow.utils + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: reagent.workflow + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/build.sh b/docs/build.sh old mode 100755 new mode 100644 index 91f35cf00..557e53a51 --- a/docs/build.sh +++ b/docs/build.sh @@ -1 +1,2 @@ -sphinx-build -b html -E -v . ~/github/HorizonDocs +#!/bin/bash +rm -rf api/* && rm -rf ~/github/HorizonDocs && sphinx-build -b html -E -v . ~/github/HorizonDocs diff --git a/docs/conf.py b/docs/conf.py index bcfbc01f1..f565f01e6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ project = "ReAgent" -copyright = "2019, Facebook Inc." +copyright = "2022, Meta Platforms, Inc" author = "ReAgent Team" # The full version, including alpha/beta/rc tags @@ -60,8 +60,6 @@ "pandas", "sklearn", "reagent.test", - "onnx", - "xgboost", ] # -- Options for HTML output ------------------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index 228d79489..31e5d6a95 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,9 +10,12 @@ ReAgent: Applied Reinforcement Learning Platform ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. image:: https://circleci.com/gh/facebookresearch/ReAgent/tree/master.svg?style=svg - :target: https://circleci.com/gh/facebookresearch/ReAgent/tree/master +.. image:: https://img.shields.io/badge/Support-Ukraine-FFD500?style=flat&labelColor=005BBB + :alt: Support Ukraine - Help Provide Humanitarian Aid to Ukraine. + :target: https://opensource.fb.com/support-ukraine + +.. image:: https://circleci.com/gh/facebookresearch/ReAgent/tree/main.svg?style=svg + :target: https://circleci.com/gh/facebookresearch/ReAgent/tree/main -------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -22,8 +25,9 @@ Overview ReAgent is an open source end-to-end platform for applied reinforcement learning (RL) developed and used at Facebook. ReAgent is built in Python and uses PyTorch for modeling and training and TorchScript for model serving. The platform contains workflows to train popular deep RL algorithms and includes data preprocessing, feature transformation, distributed training, -counterfactual policy evaluation, and optimized serving. For more detailed information about ReAgent see the white -paper here: `Platform `_. +counterfactual policy evaluation, and optimized serving. For more detailed information about ReAgent, please read +`release post `_ +and `white paper `_. The source code is available here: `Source code `_. @@ -32,6 +36,7 @@ The platform was once named "Horizon" but we have adopted the name "ReAgent" rec Algorithms Supported ~~~~~~~~~~~~~~~~~~~~ +Classic Off-Policy algorithms: * Discrete-Action `DQN `_ * Parametric-Action DQN @@ -39,6 +44,33 @@ Algorithms Supported * Distributional RL `C51 `_\ , `QR-DQN `_ * `Twin Delayed DDPG `_ (TD3) * `Soft Actor-Critic `_ (SAC) +* `Critic Regularized Regression `_ (CRR) +* `Proximal Policy Optimization Algorithms `_ (PPO) + +RL for recommender systems: + +* `Seq2Slate `_ +* `SlateQ `_ + +Counterfactual Evaluation: + +* `Doubly Robust `_ (for bandits) +* `Doubly Robust `_ (for sequential decisions) +* `MAGIC `_ + +Multi-Arm and Contextual Bandits: + +* `UCB1 `_ +* `MetricUCB `_ +* `Thompson Sampling `_ +* `LinUCB `_ + + +Others: + +* `Cross-Entropy Method `_ +* `Synthetic Return for Credit Assignment `_ + Installation ~~~~~~~~~~~~~~~~~~~ @@ -46,27 +78,42 @@ Installation ReAgent can be installed via. Docker or manually. Detailed instructions on how to install ReAgent can be found here: :ref:`installation`. -Usage + +Tutorial ~~~~~~~~~~~~ +ReAgent is designed for large-scale, distributed recommendation/optimization tasks where we don’t have access to a simulator. +In this environment, it is typically better to train offline on batches of data, and release new policies slowly over time. +Because the policy updates slowly and in batches, we use off-policy algorithms. To test a new policy without deploying it, +we rely on counter-factual policy evaluation (CPE), a set of techniques for estimating a policy based on the actions of another policy. -The ReAgent Serving Platform (RASP) tutorial covers serving and training models and is available here: :ref:`rasp_tutorial`. +We also have a set of tools to facilitate applying RL in real-world applications: + + +* Domain Analysis Tool, which analyzes state/action feature importance and identifies whether the problem is a suitable for applying batch RL +* Behavior Cloning, which clones from the logging policy to bootstrap the learning policy safely Detailed instructions on how to use ReAgent can be found here: :ref:`usage`. + License ~~~~~~~~~~~~~~ -ReAgent is released under a BSD license. Find out more about it here: :ref:`license`. +| ReAgent is released under a BSD license. Find out more about it here: :ref:`license`. +| Terms of Use - ``_ +| Privacy Policy - ``_ +| Copyright © 2022 Meta Platforms, Inc Citing ~~~~~~ -@article{gauci2018horizon, - title={Horizon: Facebook's Open Source Applied Reinforcement Learning Platform}, - author={Gauci, Jason and Conti, Edoardo and Liang, Yitao and Virochsiri, Kittipat and Chen, Zhengxing and He, Yuchen and Kaden, Zachary and Narayanan, Vivek and Ye, Xiaohui}, - journal={arXiv preprint arXiv:1811.00260}, - year={2018} -} +Cite our work by: +:: + @article{gauci2018horizon, + title={Horizon: Facebook's Open Source Applied Reinforcement Learning Platform}, + author={Gauci, Jason and Conti, Edoardo and Liang, Yitao and Virochsiri, Kittipat and Chen, Zhengxing and He, Yuchen and Kaden, Zachary and Narayanan, Vivek and Ye, Xiaohui}, + journal={arXiv preprint arXiv:1811.00260}, + year={2018} + } Table of Contents ~~~~~~~~~~~~~~~~~~~~~ @@ -75,26 +122,32 @@ Table of Contents :caption: Getting Started Installation - Tutorial Usage + RASP (Not Actively Maintained) .. toctree:: :caption: Advanced Topics - Distributed Training Continuous Integration .. toctree:: :caption: Package Reference - Evaluation - Models - Prediction - Preprocessing - Readers - Simulators - Training - Workflow + Core + Data + Gym + Evaluation + Lite + MAB + Model Managers + Model Utils + Net Builders + Optimizers + Models + Prediction + Preprocessing + Training + Workflow All Modules .. toctree:: diff --git a/docs/installation.rst b/docs/installation.rst index 0398a4d9b..9e661653f 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -7,7 +7,7 @@ ReAgent CLI & Python API ^^^^^^^^^^^^^^^^^^^^^^^^ We have CLI to launch training & Python API to use programmatically, e.g., in your own script or Jupyter Notebook. -To install this component, you will need to have Python 3.7+ installed on your system. +To install this component, you will need to have Python 3.8+ installed on your system. If you don't have that, you can either install it via `pyenv `_ or `conda `_. To verify that you have the right version, type the following command on your shell: @@ -24,6 +24,9 @@ Once you make sure you have the right version, you can simply clone this repo an cd ReAgent pip install ".[gym]" + # install nightly torch (change cpu to cu102 if fit) + pip install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + If you don't want need gym dependencies, you can remove :code:`[gym]` To verify your setup please run `tox `_. @@ -46,7 +49,7 @@ To build from source, you'll need JDK, Scala, & Maven. We will use `SDKMAN! `_. +If there is anything not kept up-to-date in this tutorial, please always refer to the latest code. + + Quick Start ----------- - We have set up `Click `_ commands to run our RL workflow. The basic usage pattern is .. code-block:: @@ -37,7 +40,7 @@ To train a batch RL model, run the following commands: # set the config export CONFIG=reagent/workflow/sample_configs/discrete_dqn_cartpole_offline.yaml # gather some random transitions (can replace with your own) - ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym $CONFIG + ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym_random $CONFIG # convert data to timeline format ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.timeline_operator $CONFIG # train model based on timeline data @@ -92,7 +95,7 @@ In particular, the following Click command runs 150 episodes of ``CartPole-v0`` .. code-block:: - ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym $CONFIG + ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym_random $CONFIG The command essentially performs the following pseudo-code: @@ -231,6 +234,7 @@ To train the model, we first save our Spark table to Parquet format, and use `Pe input_table_spec=input_table_spec, # description of Spark table sample_range=train_sample_range, # what percentage of data to use for training reward_options=reward_options, # config to calculate rewards + data_fetcher=data_fetcher, # Controller for fetching data ) # train_dataset now points to a Parquet @@ -239,7 +243,7 @@ Now we are ready to train a model by running: .. code-block:: # make preprocessor from the normalization parameters of Step 3 - batch_preprocessor = manager.build_batch_preprocessor() + batch_preprocessor = manager.build_batch_preprocessor(use_gpu) # read preprocessed data data_reader = petastorm.make_batch_reader(train_dataset.parquet_url) diff --git a/preprocessing/pom.xml b/preprocessing/pom.xml index 0e3dc67a0..fdb8c4975 100644 --- a/preprocessing/pom.xml +++ b/preprocessing/pom.xml @@ -37,13 +37,13 @@ provided - 2.11.7 + 2.12.10 - 2.11 + 2.12 - 2.3.2 + 3.1.1 @@ -55,7 +55,7 @@ scalatest_${scala.binary.version} - 2.2.6 + 3.2.5 test @@ -69,7 +69,7 @@ jacoco-maven-plugin - 0.8.5 + 0.8.6 test @@ -97,7 +97,7 @@ scalacheck_${scala.binary.version} - 1.13.5 + 1.14.1 test @@ -293,7 +293,7 @@ scala-maven-plugin - 3.2.2 + 4.4.1 diff --git a/preprocessing/src/main/scala/com/facebook/spark/rl/Timeline.scala b/preprocessing/src/main/scala/com/facebook/spark/rl/Timeline.scala index db6260854..abae6a57e 100644 --- a/preprocessing/src/main/scala/com/facebook/spark/rl/Timeline.scala +++ b/preprocessing/src/main/scala/com/facebook/spark/rl/Timeline.scala @@ -128,10 +128,18 @@ object Timeline { sqlContext: SQLContext, config: TimelineConfiguration ): Unit = { - var filterTerminal = "HAVING next_state_features IS NOT NULL"; + var filterTerminal = "WHERE next_state_features IS NOT NULL"; if (config.addTerminalStateRow) { filterTerminal = ""; } + var filterTimeLimit = ""; + if (config.timeWindowLimit != None) { + if (filterTerminal == "") { + filterTimeLimit = s"WHERE time_since_first <= ${config.timeWindowLimit.get}"; + } else { + filterTimeLimit = s" AND time_since_first <= ${config.timeWindowLimit.get}"; + } + } val actionDataType = Helper.getDataTypes(sqlContext, config.inputTableName, List("action"))("action") @@ -193,23 +201,6 @@ object Timeline { case (acc, (k, v)) => s"${acc}, a.${k}" } - val timeLimitedSourceTable = config.timeWindowLimit - .map { timeLimit => - s""" - , time_limited_source_table AS ( - SELECT - *, - sequence_number - FIRST(sequence_number) OVER ( - PARTITION BY mdp_id - ORDER BY mdp_id, sequence_number - ) AS time_since_first - FROM source_table - HAVING time_since_first <= ${timeLimit} - ) - """.stripMargin - } - .getOrElse("") - val sourceTable = s""" WITH ${mdpFilter} source_table AS ( @@ -225,15 +216,8 @@ object Timeline { ${joinClause} a.ds BETWEEN '${config.startDs}' AND '${config.endDs}' ) - ${timeLimitedSourceTable} """.stripMargin - val sourceTableName = config.timeWindowLimit - .map { _ => - "time_limited_source_table" - } - .getOrElse("source_table") - val rewardColumnsQuery = rewardColumnDataTypes.foldLeft("") { case (acc, (k, v)) => s"${acc}, ${k}" } @@ -253,53 +237,59 @@ object Timeline { } val sqlCommand = s""" - ${sourceTable} + ${sourceTable}, + joined_table AS ( + SELECT + mdp_id, + state_features, + action, + LEAD(action) OVER ( + PARTITION BY + mdp_id + ORDER BY + mdp_id, + sequence_number + ) AS next_action, + action_probability + ${rewardColumnsQuery}, + LEAD(state_features) OVER ( + PARTITION BY + mdp_id + ORDER BY + mdp_id, + sequence_number + ) AS next_state_features, + sequence_number, + ROW_NUMBER() OVER ( + PARTITION BY + mdp_id + ORDER BY + mdp_id, + sequence_number + ) AS sequence_number_ordinal, + COALESCE(LEAD(sequence_number) OVER ( + PARTITION BY + mdp_id + ORDER BY + mdp_id, + sequence_number + ), sequence_number) - sequence_number AS time_diff, + sequence_number - FIRST(sequence_number) OVER ( + PARTITION BY + mdp_id + ORDER BY + mdp_id, + sequence_number + ) AS time_since_first + ${timelineJoinColumnsQuery} + FROM source_table + CLUSTER BY HASH(mdp_id, sequence_number) + ) SELECT - mdp_id, - state_features, - action, - LEAD(action) OVER ( - PARTITION BY - mdp_id - ORDER BY - mdp_id, - sequence_number - ) AS next_action, - action_probability - ${rewardColumnsQuery}, - LEAD(state_features) OVER ( - PARTITION BY - mdp_id - ORDER BY - mdp_id, - sequence_number - ) AS next_state_features, - sequence_number, - ROW_NUMBER() OVER ( - PARTITION BY - mdp_id - ORDER BY - mdp_id, - sequence_number - ) AS sequence_number_ordinal, - COALESCE(LEAD(sequence_number) OVER ( - PARTITION BY - mdp_id - ORDER BY - mdp_id, - sequence_number - ), sequence_number) - sequence_number AS time_diff, - sequence_number - FIRST(sequence_number) OVER ( - PARTITION BY - mdp_id - ORDER BY - mdp_id, - sequence_number - ) AS time_since_first - ${timelineJoinColumnsQuery} - FROM ${sourceTableName} + * + FROM joined_table ${filterTerminal} - CLUSTER BY HASH(mdp_id, sequence_number) + ${filterTimeLimit} """.stripMargin log.info("Executing query: ") log.info(sqlCommand) diff --git a/preprocessing/src/test/scala/com/facebook/spark/common/testutil/PipelineTester.scala b/preprocessing/src/test/scala/com/facebook/spark/common/testutil/PipelineTester.scala index 365791b5c..17c1bb526 100644 --- a/preprocessing/src/test/scala/com/facebook/spark/common/testutil/PipelineTester.scala +++ b/preprocessing/src/test/scala/com/facebook/spark/common/testutil/PipelineTester.scala @@ -12,12 +12,13 @@ import org.apache.spark.sql.functions.col import org.apache.spark.sql._ import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.sql.types._ -import org.scalatest.{BeforeAndAfterAll, FunSuiteLike, Suite} +import org.scalatest.{BeforeAndAfterAll, Suite} +import org.scalatest.funsuite.AnyFunSuiteLike import scala.collection.mutable import scala.math.abs -trait PipelineTester extends FunSuiteLike with BeforeAndAfterAll with TestLogging { this: Suite => +trait PipelineTester extends AnyFunSuiteLike with BeforeAndAfterAll with TestLogging { this: Suite => @transient private var _sparkContext: SparkContext = _ def sparkContext: SparkContext = _sparkContext diff --git a/preprocessing/src/test/scala/com/facebook/spark/common/testutil/TestLogging.scala b/preprocessing/src/test/scala/com/facebook/spark/common/testutil/TestLogging.scala index b39ec1f5d..5d146be5a 100644 --- a/preprocessing/src/test/scala/com/facebook/spark/common/testutil/TestLogging.scala +++ b/preprocessing/src/test/scala/com/facebook/spark/common/testutil/TestLogging.scala @@ -10,8 +10,8 @@ import org.scalatest._ import scala.collection.JavaConversions._ import scala.util.Try -trait TestLogging extends BeforeAndAfterAll with BeforeAndAfterEach with TestLogger { - this: Suite => +trait TestLogging extends BeforeAndAfterAll with BeforeAndAfterEach with TestLogger with TestSuiteMixin { + this: TestSuite => private val logLayout = new EnhancedPatternLayout("%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n") @@ -72,6 +72,7 @@ trait TestLogging extends BeforeAndAfterAll with BeforeAndAfterEach with TestLog val scopes = test.scopes val text = test.text val tags = test.tags + val pos = test.pos } super.withFixture(wrappedTest) diff --git a/pyproject.toml b/pyproject.toml index bf54bd474..257c07f49 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,8 @@ [build-system] requires = [ - "setuptools >= 35.0.2", - "setuptools_scm >= 2.0.0, <3" + "setuptools >= 42", + "setuptools_scm[toml] >= 3.4", + "wheel" ] build-backend = "setuptools.build_meta" +[tool.setuptools_scm] diff --git a/reagent/core/__init__.py b/reagent/core/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/core/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/core/aggregators.py b/reagent/core/aggregators.py index ebb2b1142..af20f8ba9 100644 --- a/reagent/core/aggregators.py +++ b/reagent/core/aggregators.py @@ -3,12 +3,12 @@ import logging from collections import deque -from typing import Callable, Deque, Dict, List, Optional +from typing import Any, Callable, Deque, Dict, List, Optional import numpy as np import torch +from reagent.core.tensorboardX import SummaryWriterContext from reagent.core.tracker import Aggregator -from reagent.tensorboardX import SummaryWriterContext logger = logging.getLogger(__name__) @@ -105,6 +105,31 @@ def aggregate(self, values): self.values.append(mean) +class ListAggregator(Aggregator): + def __init__(self, key: str): + super().__init__(key) + self.values: Optional[Any] = [] + + def aggregate(self, values): + self.values.extend(values) + + +class LastEpochListAggregator(TensorAggregator): + def __init__(self, key: str): + super().__init__(key) + self.values: List = [] + self.epoch_values: List = [] + + def aggregate(self, values): + flattened = torch.flatten(values).tolist() + self.values.extend(flattened) + + def flush(self): + if self.values: + self.epoch_values = self.values + self.values = [] + + class FunctionsByActionAggregator(TensorAggregator): """ Aggregating the input by action, using the given functions. The input is diff --git a/reagent/core/base_dataclass.py b/reagent/core/base_dataclass.py new file mode 100644 index 000000000..90a62cc75 --- /dev/null +++ b/reagent/core/base_dataclass.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +""" +We should revisit this at some point. Config classes shouldn't subclass from this. +""" +import dataclasses +from typing import cast + + +class BaseDataClass: + def _replace(self, **kwargs): + return cast(type(self), dataclasses.replace(self, **kwargs)) diff --git a/reagent/core/configuration.py b/reagent/core/configuration.py index e6f55e389..146fe40d6 100644 --- a/reagent/core/configuration.py +++ b/reagent/core/configuration.py @@ -1,15 +1,16 @@ #!/usr/bin/python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import functools -from dataclasses import MISSING, Field, fields -from inspect import Parameter, isclass, signature +from dataclasses import Field, fields, MISSING +from inspect import isclass, Parameter, signature from typing import List, Optional, Type, Union from reagent.core.dataclasses import dataclass from torch import nn -BLACKLIST_TYPES = [nn.Module] +BLOCKLIST_TYPES = [nn.Module] def _get_param_annotation(p): @@ -36,9 +37,9 @@ def _get_param_annotation(p): def make_config_class( func, - whitelist: Optional[List[str]] = None, - blacklist: Optional[List[str]] = None, - blacklist_types: List[Type] = BLACKLIST_TYPES, + allowlist: Optional[List[str]] = None, + blocklist: Optional[List[str]] = None, + blocklist_types: List[Type] = BLOCKLIST_TYPES, ): """ Create a decorator to create dataclass with the arguments of `func` as fields. @@ -46,18 +47,18 @@ def make_config_class( you must use `dataclass.field(default_factory=default_factory)` as default. In that case, the func has to be wrapped with @resolve_defaults below. - `whitelist` & `blacklist` are mutually exclusive. + `allowlist` & `blocklist` are mutually exclusive. """ parameters = signature(func).parameters assert ( - whitelist is None or blacklist is None - ), "whitelist & blacklist are mutually exclusive" + allowlist is None or blocklist is None + ), "allowlist & blocklist are mutually exclusive" - blacklist_set = set(blacklist or []) + blocklist_set = set(blocklist or []) - def _is_type_blacklisted(t): + def _is_type_blocklisted(t): if getattr(t, "__origin__", None) is Union: assert len(t.__args__) == 2 and t.__args__[1] == type( None @@ -66,28 +67,28 @@ def _is_type_blacklisted(t): if hasattr(t, "__origin__"): t = t.__origin__ assert isclass(t), f"{t} is not a class." - return any(issubclass(t, blacklist_type) for blacklist_type in blacklist_types) + return any(issubclass(t, blocklist_type) for blocklist_type in blocklist_types) def _is_valid_param(p): - if p.name in blacklist_set: + if p.name in blocklist_set: return False if p.annotation == Parameter.empty and p.default == Parameter.empty: return False ptype = _get_param_annotation(p) - if _is_type_blacklisted(ptype): + if _is_type_blocklisted(ptype): return False return True - whitelist = whitelist or [p.name for p in parameters.values() if _is_valid_param(p)] + allowlist = allowlist or [p.name for p in parameters.values() if _is_valid_param(p)] def wrapper(config_cls): # Add __annotations__ for dataclass config_cls.__annotations__ = { field_name: _get_param_annotation(parameters[field_name]) - for field_name in whitelist + for field_name in allowlist } # Set default values - for field_name in whitelist: + for field_name in allowlist: default = parameters[field_name].default if default != Parameter.empty: setattr(config_cls, field_name, default) @@ -142,7 +143,7 @@ def wrapper(*args, **kwargs): return wrapper -def param_hash(p): +def param_hash(p) -> int: """ Use this to make parameters hashable. This is required because __hash__() is not inherited when subclass redefines __eq__(). We only need this when diff --git a/reagent/core/dataclasses.py b/reagent/core/dataclasses.py index 8de20c4f6..2d89f4b74 100644 --- a/reagent/core/dataclasses.py +++ b/reagent/core/dataclasses.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import dataclasses import logging @@ -7,12 +7,13 @@ # Redirection to make import simpler from dataclasses import field # noqa -from typing import TYPE_CHECKING, Optional +from typing import Any, Optional, TYPE_CHECKING import pydantic +from reagent.core.fb_checker import IS_FB_ENVIRONMENT -try: +if IS_FB_ENVIRONMENT: import fblearner.flow.api # noqa """ @@ -20,9 +21,7 @@ validator. This necessary to avoid pydantic complaining about validators. """ USE_VANILLA_DATACLASS = True - -except ImportError: - +else: USE_VANILLA_DATACLASS = False @@ -44,10 +43,9 @@ logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -logger.info(f"USE_VANILLA_DATACLASS: {USE_VANILLA_DATACLASS}") -logger.info(f"ARBITRARY_TYPES_ALLOWED: {ARBITRARY_TYPES_ALLOWED}") +logger.debug(f"USE_VANILLA_DATACLASS: {USE_VANILLA_DATACLASS}") +logger.debug(f"ARBITRARY_TYPES_ALLOWED: {ARBITRARY_TYPES_ALLOWED}") if TYPE_CHECKING: @@ -58,9 +56,7 @@ else: - def dataclass( - _cls: Optional[pydantic.typing.AnyType] = None, *, config=None, **kwargs - ): + def dataclass(_cls: Optional[Any] = None, *, config=None, **kwargs): def wrap(cls): # We don't want to look at parent class if "__post_init__" in cls.__dict__: @@ -72,7 +68,7 @@ def wrap(cls): if USE_VANILLA_DATACLASS: try: post_init_post_parse = cls.__dict__["__post_init_post_parse__"] - logger.info( + logger.debug( f"Setting {cls.__name__}.__post_init__ to its " "__post_init_post_parse__" ) diff --git a/reagent/debug_on_error.py b/reagent/core/debug_on_error.py similarity index 97% rename from reagent/debug_on_error.py rename to reagent/core/debug_on_error.py index 3383d01a2..ad1a77c96 100644 --- a/reagent/debug_on_error.py +++ b/reagent/core/debug_on_error.py @@ -4,15 +4,15 @@ import sys -def start(): +def start() -> None: def info(type, value, tb): if hasattr(sys, "ps1") or not sys.stderr.isatty(): # we are in interactive mode or we don't have a tty-like # device, so we call the default hook sys.__excepthook__(type, value, tb) else: - import traceback import pdb + import traceback # we are NOT in interactive mode, print the exception... traceback.print_exception(type, value, tb) diff --git a/reagent/core/fb_checker.py b/reagent/core/fb_checker.py new file mode 100644 index 000000000..6928575c5 --- /dev/null +++ b/reagent/core/fb_checker.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import importlib.util +import os + + +def is_fb_environment() -> bool: + if importlib.util.find_spec("fblearner") is not None: + if not bool(int(os.environ.get("FORCE_OSS_ENVIRONMENT", False))): + return True + return False + + +IS_FB_ENVIRONMENT: bool = is_fb_environment() diff --git a/reagent/core/multiprocess_utils.py b/reagent/core/multiprocess_utils.py index d26ad85f3..bc018338a 100644 --- a/reagent/core/multiprocess_utils.py +++ b/reagent/core/multiprocess_utils.py @@ -12,7 +12,7 @@ def deserialize_and_run( serialized_args: List[bytes], serialized_kwargs: Dict[str, bytes], *args, - **kwargs + **kwargs, ) -> bytes: fn: Callable = cloudpickle.loads(serialized_fn) d_args: List[Any] = [] diff --git a/reagent/core/observers.py b/reagent/core/observers.py index 4fe1c6cbb..d2bc6f294 100644 --- a/reagent/core/observers.py +++ b/reagent/core/observers.py @@ -4,6 +4,7 @@ import logging from typing import Any, Dict, Iterable, List, Optional +from reagent.core.tensorboardX import SummaryWriterContext from reagent.core.tracker import Aggregator, Observer @@ -58,6 +59,16 @@ def reset(self): self.values = [] +class TensorBoardScalarObserver(Observer): + def __init__(self, key: str, logging_key: Optional[str]): + super().__init__(observing_keys=[key]) + self.key = key + self.logging_key = logging_key or key + + def update(self, key: str, value): + SummaryWriterContext.add_scalar(self.logging_key, value) + + class IntervalAggregatingObserver(Observer): def __init__( self, @@ -78,14 +89,14 @@ def update(self, key: str, value): if key == "epoch_end": self.flush() return - self.intermediate_values.append(value) self.iteration += 1 - # pyre-fixme[6]: Expected `int` for 1st param but got `Optional[int]`. if self.interval and self.iteration % self.interval == 0: logger.info( - f"Interval Agg. Update: {self.key}; iteration {self.iteration}; " - f"aggregator: {self.aggregator.__class__.__name__}" + "Aggregating values over the recent interval for %s at iteration %s; aggregator: %s", + self.key, + self.iteration, + self.aggregator.__class__.__name__, ) self.aggregator(self.key, self.intermediate_values) self.intermediate_values = [] @@ -101,3 +112,4 @@ def flush(self): if self.intermediate_values: self.aggregator(self.key, self.intermediate_values) self.intermediate_values = [] + self.aggregator.flush() diff --git a/reagent/core/oss_tensorboard_logger.py b/reagent/core/oss_tensorboard_logger.py new file mode 100644 index 000000000..808f6c632 --- /dev/null +++ b/reagent/core/oss_tensorboard_logger.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +import torch +from pytorch_lightning.loggers import TensorBoardLogger +from pytorch_lightning.utilities import rank_zero_only + + +class LocalCacheLogger: + @staticmethod + def store_metrics( + tb_logger, + metrics: Dict[ + str, Union[float, torch.Tensor, Dict[str, Union[float, torch.Tensor]]] + ], + step: Optional[int] = None, + ) -> None: + for plot_name, plot_value_or_dict in metrics.items(): + if isinstance(plot_value_or_dict, dict): + if plot_name not in tb_logger.line_plot_buffer: + tb_logger.line_plot_buffer[plot_name] = {} + for line_name, plot_value in plot_value_or_dict.items(): + LocalCacheLogger._add_point( + tb_logger, plot_name, line_name, plot_value, step + ) + else: + LocalCacheLogger._add_point( + tb_logger, plot_name, "", plot_value_or_dict, step + ) + + @staticmethod + def _add_point( + tb_logger, + plot_name: str, + line_name: str, + plot_value: Union[float, torch.Tensor], + step: Optional[int], + ) -> None: + """Adds a point to a multi-line plot given the plot name, the line name, and optionally the step (x coordinate).""" + if isinstance(plot_value, torch.Tensor): + plot_value = plot_value.item() + + if step is None: + if ( + plot_name in tb_logger.line_plot_buffer + and line_name in tb_logger.line_plot_buffer[plot_name] + ): + x = tb_logger.line_plot_buffer[plot_name][line_name][-1][0] + 1.0 + else: + x = 0.0 + else: + x = float(step) + + LocalCacheLogger._create_plots_and_append( + tb_logger.line_plot_buffer, plot_name, line_name, x, plot_value + ) + + if len(tb_logger.line_plot_buffer[plot_name][line_name]) >= 50: + mean = float( + torch.mean( + torch.FloatTensor( + [ + float(p[1]) + for p in tb_logger.line_plot_buffer[plot_name][line_name] + ] + ) + ).item() + ) + LocalCacheLogger._create_plots_and_append( + tb_logger.line_plot_aggregated, plot_name, line_name, x, mean + ) + tb_logger.line_plot_buffer[plot_name][line_name].clear() + + @staticmethod + def _create_plots_and_append( + plot_store: Dict[str, Dict[str, List[Tuple[float, float]]]], + plot_name: str, + line_name: str, + x: int, + y: float, + ) -> None: + if plot_name in plot_store and line_name in plot_store[plot_name]: + plot_store[plot_name][line_name].append((x, y)) + elif plot_name in plot_store: + plot_store[plot_name][line_name] = [(x, y)] + else: + plot_store[plot_name] = {line_name: [(x, y)]} + + +class OssTensorboardLogger(TensorBoardLogger): + """Wrapper around ManifoldTensorBoardLogger that collects the plot data in memory and can flush to create fblearner plot objects.""" + + def __init__( + self, + save_dir: str, + name: Optional[str] = "default", + version: Optional[Union[int, str]] = None, + log_graph: bool = False, + default_hp_metric: bool = True, + prefix: str = "", + **kwargs, + ) -> None: + super().__init__( + save_dir, + name, + version, + log_graph, + default_hp_metric, + prefix, + **kwargs, + ) + self.line_plot_aggregated: Dict[str, Dict[str, List[Tuple[float, float]]]] = {} + self.line_plot_buffer: Dict[str, Dict[str, List[Tuple[float, float]]]] = {} + + @rank_zero_only + def log_metrics( + self, + metrics: Dict[ + str, Union[float, torch.Tensor, Dict[str, Union[float, torch.Tensor]]] + ], + step: Optional[int] = None, + ) -> None: + """Log a set of metrics. A metric is either a scalar or a set of scalars that will be plotted together""" + super().log_metrics(metrics, step) + LocalCacheLogger.store_metrics(self, metrics, step) + + def clear_local_data(self) -> None: + # We don't call clear here because it's a lot of data and someone else probably owns it + self.line_plot_aggregated = {} + self.line_plot_buffer = {} diff --git a/reagent/parameters.py b/reagent/core/parameters.py similarity index 65% rename from reagent/parameters.py rename to reagent/core/parameters.py index 8a61e3c61..45f43957c 100644 --- a/reagent/parameters.py +++ b/reagent/core/parameters.py @@ -1,13 +1,17 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import enum from typing import Dict, List, Optional +from reagent.core.base_dataclass import BaseDataClass from reagent.core.configuration import param_hash from reagent.core.dataclasses import dataclass, field -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters_seq2slate import LearningMethod, RewardClamp -from reagent.types import BaseDataClass +from reagent.core.parameters_seq2slate import ( + IPSClamp, + LearningMethod, + SimulationParameters, +) # For TD3 and SAC: actions are normalized in this range for training and @@ -15,6 +19,29 @@ CONTINUOUS_TRAINING_ACTION_RANGE = (-1.0, 1.0) +class ProblemDomain(enum.Enum): + CONTINUOUS_ACTION = "continuous_action" + DISCRETE_ACTION = "discrete_action" + PARAMETRIC_ACTION = "parametric_action" + + # I don't think the data generated for these 2 types are generic + SEQ_TO_REWARD = "seq2reward" + MDN_RNN = "mdn_rnn" + + +class SlateOptMethod(enum.Enum): + GREEDY = "greedy" + TOP_K = "top_k" + EXACT = "exact" + + +@dataclass(frozen=True) +class SlateOptParameters(BaseDataClass): + __hash__ = param_hash + + method: SlateOptMethod = SlateOptMethod.TOP_K + + @dataclass(frozen=True) class RLParameters(BaseDataClass): __hash__ = param_hash @@ -25,7 +52,7 @@ class RLParameters(BaseDataClass): maxq_learning: bool = True reward_boost: Optional[Dict[str, float]] = None temperature: float = 0.01 - softmax_policy: bool = True + softmax_policy: bool = False use_seq_num_diff_as_time_diff: bool = False q_network_loss: str = "mse" set_missing_value_to_zero: bool = False @@ -45,17 +72,30 @@ class MDNRNNTrainerParameters(BaseDataClass): hidden_size: int = 64 num_hidden_layers: int = 2 - minibatch_size: int = 16 learning_rate: float = 0.001 num_gaussians: int = 5 - train_data_percentage: float = 60.0 - validation_data_percentage: float = 20.0 - test_data_percentage: float = 20.0 # weight in calculating world-model loss reward_loss_weight: float = 1.0 next_state_loss_weight: float = 1.0 not_terminal_loss_weight: float = 1.0 fit_only_one_next_step: bool = False + action_dim: int = 2 + action_names: Optional[List[str]] = None + multi_steps: int = 1 + + +@dataclass(frozen=True) +class Seq2RewardTrainerParameters(BaseDataClass): + __hash__ = param_hash + + learning_rate: float = 0.001 + multi_steps: int = 1 + action_names: List[str] = field(default_factory=lambda: []) + compress_model_learning_rate: float = 0.001 + gamma: float = 1.0 + view_q_value: bool = False + step_predict_net_size: int = 64 + reward_boost: Optional[Dict[str, float]] = None @dataclass(frozen=True) @@ -112,7 +152,7 @@ class NormalizationParameters(BaseDataClass): class NormalizationKey(object): - """ Keys for dictionaries of NormalizationData """ + """Keys for dictionaries of NormalizationData""" STATE = "state" ACTION = "action" @@ -126,18 +166,31 @@ class NormalizationData(BaseDataClass): dense_normalization_parameters: Dict[int, NormalizationParameters] +@dataclass(frozen=True) +class ConvNetParameters(BaseDataClass): + conv_dims: List[int] + conv_height_kernels: List[int] + pool_types: List[str] + pool_kernel_sizes: List[int] + conv_width_kernels: Optional[List[int]] = None + + ################################################# # RL Ranking parameters # ################################################# @dataclass(frozen=True) class TransformerParameters(BaseDataClass): - num_heads: int + num_heads: int = 1 + dim_model: int = 64 + dim_feedforward: int = 32 + num_stacked_layers: int = 2 + state_embed_dim: Optional[int] = None + + +@dataclass(frozen=True) +class GRUParameters(BaseDataClass): dim_model: int - dim_feedforward: int num_stacked_layers: int - optimizer: Optimizer__Union = field( - default_factory=Optimizer__Union.default(lr=1e-4, amsgrad=True) - ) @dataclass(frozen=True) @@ -145,26 +198,18 @@ class BaselineParameters(BaseDataClass): dim_feedforward: int num_stacked_layers: int warmup_num_batches: int = 0 - optimizer: Optimizer__Union = field( - default_factory=Optimizer__Union.default(lr=1e-4, amsgrad=True) - ) @dataclass(frozen=True) -class Seq2SlateTransformerParameters(BaseDataClass): - transformer: TransformerParameters - baseline: Optional[BaselineParameters] - on_policy: bool - learning_method: LearningMethod - importance_sampling_clamp_max: Optional[float] = None - simulation_reward_clamp: Optional[RewardClamp] = None - # penalize sequences far away from prod - simulation_distance_penalty: Optional[float] = None +class Seq2SlateParameters(BaseDataClass): + on_policy: bool = True + learning_method: LearningMethod = LearningMethod.REINFORCEMENT_LEARNING + ips_clamp: Optional[IPSClamp] = None + simulation: Optional[SimulationParameters] = None @dataclass(frozen=True) class RankingParameters(BaseDataClass): - minibatch_size: int - max_src_seq_len: int - max_tgt_seq_len: int - greedy_serving: bool + max_src_seq_len: int = 0 + max_tgt_seq_len: int = 0 + greedy_serving: bool = False diff --git a/reagent/core/parameters_seq2slate.py b/reagent/core/parameters_seq2slate.py new file mode 100644 index 000000000..800c61160 --- /dev/null +++ b/reagent/core/parameters_seq2slate.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from enum import Enum +from typing import Dict, Optional + +from reagent.core.dataclasses import dataclass +from reagent.core.types import BaseDataClass + + +class LearningMethod(Enum): + TEACHER_FORCING = "teacher_forcing" + REINFORCEMENT_LEARNING = "reinforcement_learning" + PAIRWISE_ATTENTION = "pairwise_attention" + SIMULATION = "simulation" + + @property + def expect_slate_wise_reward(self) -> bool: + return self in ( + LearningMethod.REINFORCEMENT_LEARNING, + LearningMethod.SIMULATION, + ) + + +@dataclass(frozen=True) +class RewardClamp: + clamp_min: Optional[float] = None + clamp_max: Optional[float] = None + + +class IPSClampMethod(Enum): + # set tgt_propensity / log_propensity <= clamp_max + UNIVERSAL = "universal" + + # set tgt_propensity / log_propensity = 0 if >= clamp_max + # Bottou et. al JMLR 2013 (Counterfactual Reasoning and Learning Systems) + AGGRESSIVE = "aggressive" + + +@dataclass(frozen=True) +class IPSClamp(BaseDataClass): + clamp_method: IPSClampMethod + clamp_max: float + + +@dataclass(frozen=True) +class SimulationParameters(BaseDataClass): + reward_name_weight: Dict[str, float] + reward_name_power: Dict[str, float] + reward_name_path: Dict[str, str] + reward_clamp: Optional[RewardClamp] = None + # penalize sequences far away from prod + distance_penalty: Optional[float] = None diff --git a/reagent/core/registry_meta.py b/reagent/core/registry_meta.py index b8bef96b7..70ee7e95a 100644 --- a/reagent/core/registry_meta.py +++ b/reagent/core/registry_meta.py @@ -1,45 +1,74 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc import logging +import os from typing import Dict, Optional, Type from reagent.core.dataclasses import dataclass -from reagent.core.tagged_union import INTERNAL_TAGGED_UNION, TaggedUnion +from reagent.core.fb_checker import IS_FB_ENVIRONMENT logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) + + +def skip_frozen_registry_check() -> bool: + # returns True if SKIP_FROZEN_REGISTRY_CHECK env var is set to non-NULL + return bool(int(os.environ.get("SKIP_FROZEN_REGISTRY_CHECK", 0))) class RegistryMeta(abc.ABCMeta): + """ + A metaclass used to auto-fill union classes for FBLearner. + It automatically keeps track of all the subclasses and uses them to fill the union + class (by calling the fill_union() method). + After a union class is filled, the registry gets frozen and new members can't be added. + If environment variable SKIP_FROZEN_REGISTRY_CHECK=1 is set, we log a warning instead of + raising an exception when a new member is attempted to be added to the registry. + """ + def __init__(cls, name, bases, attrs): if not hasattr(cls, "REGISTRY"): # Put REGISTRY on cls. This only happens once on the base class - logger.info("Adding REGISTRY to type {}".format(name)) + logger.debug("Adding REGISTRY to type {}".format(name)) cls.REGISTRY: Dict[str, Type] = {} cls.REGISTRY_NAME = name cls.REGISTRY_FROZEN = False - assert not cls.REGISTRY_FROZEN, ( - f"{cls.REGISTRY_NAME} has been used to fill a union. " - "Please rearrange your import orders" - ) - - if not cls.__abstractmethods__ and name != cls.REGISTRY_NAME: - # Only register fully-defined classes - logger.info(f"Registering {name} to {cls.REGISTRY_NAME}") - if hasattr(cls, "__registry_name__"): - registry_name = cls.__registry_name__ - logger.info(f"Using {registry_name} instead of {name}") - name = registry_name - assert name not in cls.REGISTRY - cls.REGISTRY[name] = cls + if cls.REGISTRY_FROZEN: + # trying to add to a frozen registry + if skip_frozen_registry_check(): + logger.warning( + f"{cls.REGISTRY_NAME} has been used to fill a union and is now frozen. " + "Since environment variable SKIP_FROZEN_REGISTRY_CHECK was set, " + f"no exception was raised, but {name} wasn't added to the registry" + ) + else: + raise RuntimeError( + f"{cls.REGISTRY_NAME} has been used to fill a union and is now frozen, " + f"so {name} can't be added to the registry. " + "Please rearrange your import orders. Or set environment variable " + "SKIP_FROZEN_REGISTRY_CHECK=1 to replace this error with a warning if you " + f"don't need the {name} to be added to the registry (e.g. if you're running the " + "code in an interactive mode or are developing custom FBL workflows that don't " + "rely on ReAgent union classes)" + ) else: - logger.info( - f"Not Registering {name} to {cls.REGISTRY_NAME}. Abstract " - f"method {list(cls.__abstractmethods__)} are not implemented." - ) + if not cls.__abstractmethods__ and name != cls.REGISTRY_NAME: + # Only register fully-defined classes + logger.debug(f"Registering {name} to {cls.REGISTRY_NAME}") + if hasattr(cls, "__registry_name__"): + registry_name = cls.__registry_name__ + logger.debug(f"Using {registry_name} instead of {name}") + name = registry_name + assert name not in cls.REGISTRY, f"{name} in REGISTRY {cls.REGISTRY}" + cls.REGISTRY[name] = cls + else: + logger.debug( + f"Not Registering {name} to {cls.REGISTRY_NAME}. Abstract " + f"methods {list(cls.__abstractmethods__)} are not implemented." + ) return super().__init__(name, bases, attrs) def fill_union(cls): @@ -53,7 +82,7 @@ def make_union_instance(inst, instance_class=None): union.make_union_instance = make_union_instance - if not INTERNAL_TAGGED_UNION: + if not IS_FB_ENVIRONMENT: # OSS TaggedUnion union.__annotations__ = { name: Optional[t] for name, t in cls.REGISTRY.items() @@ -67,3 +96,12 @@ def make_union_instance(inst, instance_class=None): return union return wrapper + + +def wrap_oss_with_dataclass(union): + if not IS_FB_ENVIRONMENT: + # OSS TaggedUnion + return dataclass(frozen=True)(union) + else: + # FBL TaggedUnion + return union diff --git a/reagent/workflow/result_registries.py b/reagent/core/result_registries.py similarity index 100% rename from reagent/workflow/result_registries.py rename to reagent/core/result_registries.py diff --git a/reagent/workflow/result_types.py b/reagent/core/result_types.py similarity index 81% rename from reagent/workflow/result_types.py rename to reagent/core/result_types.py index a22bb6bfa..14509b68b 100644 --- a/reagent/workflow/result_types.py +++ b/reagent/core/result_types.py @@ -2,7 +2,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from reagent.core.dataclasses import dataclass -from reagent.workflow.result_registries import PublishingResult, ValidationResult +from reagent.core.result_registries import PublishingResult, ValidationResult @dataclass diff --git a/reagent/core/running_stats.py b/reagent/core/running_stats.py new file mode 100644 index 000000000..2201fb7e9 --- /dev/null +++ b/reagent/core/running_stats.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import math + + +class RunningStats: + """Running statistics for elements in a stream + + Can take single values or iterables + + 1. Implements Welford's algorithm for computing a running mean + and standard deviation + 2. Min-Heap to find top-k where k < capacity (kwarg) + Methods: + mean - returns the mean + std - returns the std + meanfull- returns the mean and std of the mean + topk(k) - returns the kth highest value for k < capacity + """ + + def __init__(self, lst=None, capacity: int = 1000) -> None: + self.k = 0 + self.running_mean = 0 + self.sum_squares = 0 + self.__call__(lst) + + def update(self, x) -> None: + if x is None: + return + self.k += 1 + newM = self.running_mean + (x - self.running_mean) * 1.0 / self.k + newS = self.sum_squares + (x - self.running_mean) * (x - newM) + self.running_mean, self.sum_squares = newM, newS + + def consume(self, lst) -> None: + lst = iter(lst) + for x in lst: + self.update(x) + + def __call__(self, x) -> None: + if hasattr(x, "__iter__"): + self.consume(x) + else: + self.update(x) + + @property + def mean(self) -> int: + return self.running_mean + + @property + def meanfull(self): + return self.mean, self.std / math.sqrt(self.k) + + @property + def std(self) -> float: + if self.k == 1: + return 0 + return math.sqrt(self.sum_squares / (self.k - 1)) + + def __repr__(self) -> str: + return "".format(self.mean, self.std) diff --git a/reagent/core/tagged_union.py b/reagent/core/tagged_union.py index ac37597a3..3f09c0c48 100644 --- a/reagent/core/tagged_union.py +++ b/reagent/core/tagged_union.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -try: - from fblearner.flow.core.types_lib.union import TaggedUnion as FlowTaggedUnion +from reagent.core.fb_checker import IS_FB_ENVIRONMENT - INTERNAL_TAGGED_UNION = True +if IS_FB_ENVIRONMENT: + from fblearner.flow.core.types_lib.union import TaggedUnion as FlowTaggedUnion class TaggedUnion(FlowTaggedUnion): @classmethod @@ -24,13 +24,10 @@ def pydantic_validate(cls, v): raise ValueError(f"Unknown key {key}") return cls(**{key: cls.__annotations__[key](**v[key])}) - -except ImportError: +else: from dataclasses import fields - INTERNAL_TAGGED_UNION = False - class TaggedUnion: """ Assuming that subclasses are pydantic's dataclass. All the fields must be Optional @@ -42,5 +39,7 @@ def value(self): selected_fields = [ field.name for field in fields(self) if getattr(self, field.name, None) ] - assert len(selected_fields) == 1, f"Expecting one selected field" + assert ( + len(selected_fields) == 1 + ), f"{self} Expecting one selected field, got {selected_fields}" return getattr(self, selected_fields[0]) diff --git a/reagent/tensorboardX.py b/reagent/core/tensorboardX.py similarity index 100% rename from reagent/tensorboardX.py rename to reagent/core/tensorboardX.py diff --git a/reagent/core/torch_utils.py b/reagent/core/torch_utils.py new file mode 100644 index 000000000..055ff1103 --- /dev/null +++ b/reagent/core/torch_utils.py @@ -0,0 +1,303 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from io import BytesIO +from typing import Dict, List + +import numpy as np +import torch +from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor + + +def dict_to_tensor(batch: Dict[str, np.ndarray], device: str = "cpu"): + return {k: torch.tensor(v).to(device) for k, v in batch.items()} + + +def rescale_torch_tensor( + tensor: torch.Tensor, + new_min: torch.Tensor, + new_max: torch.Tensor, + prev_min: torch.Tensor, + prev_max: torch.Tensor, +): + """ + Rescale column values in N X M torch tensor to be in new range. + Each column m in input tensor will be rescaled from range + [prev_min[m], prev_max[m]] to [new_min[m], new_max[m]] + """ + assert tensor.shape[1] == new_min.shape[1] == new_max.shape[1] + assert tensor.shape[1] == prev_min.shape[1] == prev_max.shape[1] + prev_range = prev_max - prev_min + new_range = new_max - new_min + return ((tensor - prev_min) / prev_range) * new_range + new_min + + +def stack(mems): + """ + Stack a list of tensors + Could use torch.stack here but torch.stack is much slower + than torch.cat + view + Submitted an issue for investigation: + https://github.com/pytorch/pytorch/issues/22462 + + FIXME: Remove this function after the issue above is resolved + """ + shape = (-1, *mems[0].shape) + return torch.cat(mems).view(*shape) + + +def export_module_to_buffer(module) -> BytesIO: + # traced_script_module = torch.jit.trace(module, module.input_prototype()) + write_buffer = BytesIO() + torch.jit.save(module, write_buffer) + return write_buffer + + +def softmax(x, temperature): + """Compute softmax values for each sets of scores in x.""" + x = x / temperature + return torch.nn.functional.softmax(x, dim=1) + + +def masked_softmax(x, mask: float, temperature): + """Compute softmax values for each sets of scores in x.""" + x = x / temperature + mask_min_x = x - ((1.0 - mask) * 1e20) + mask_min_x -= torch.max(mask_min_x, dim=1, keepdim=True)[0] + e_x = torch.exp(mask_min_x) + e_x *= mask + out = e_x / e_x.sum(dim=1, keepdim=True) + + # Set NaN values to 0 (NaN happens when a full mask row is passed in) + out[out != out] = 0 + return out + + +def gather(data, index_2d): + """ + Gather data alongs the second dim. Assume data is 3d with shape (batch_size, dim1, dim2), + and index_2d's shape is (batch_size, dim1). + output[i][j] = data[i][index_2d[i][j]] + + This function does not require data, output, or index_2d having the same shape, which + is mandated by torch.gather. + """ + batch_size = data.shape[0] + data_dim = data.shape[2] + index_len = index_2d.shape[1] + device = data.device + res = data[ + torch.arange(batch_size, device=device).repeat_interleave( + # index_len has to be moved to the device explicitly, otherwise + # error will throw during jit.trace + torch.tensor([index_len], device=device) + ), + index_2d.flatten(), + ].view(batch_size, index_len, data_dim) + return res + + +def get_device(model): + return next(model.parameters()).device + + +def split_sequence_keyed_jagged_tensor( + x: KeyedJaggedTensor, num_steps: int +) -> List[KeyedJaggedTensor]: + """ + Input: + x (KeyedJaggedTensor): represents a batch of sequential sparse data. + Analogous to a batch of sequential dense data with shape: + batch_size x num_steps x num_dense_feature + + Return: + Split data into individual steps and return a list of KeyedJaggedTensor + (the length of the list equals to num_steps) + + Example: + Input KeyedJaggedTensor (x): + x = KeyedJaggedTensor( + keys=["Key0", "Key1", "Key2"], + values=[V0, V1, V2, V3, V4, V5, V6, V7, V8, V9] + lengths=[2, 0, 1, 1, 1, 1, 3, 0, 0, 1, 0, 0] + ) + which represents a minibatch of 2 data points with three keys and two steps: + data0_step0 data0_step1 data1_step0 data1_step1 + "Key0" [V0,V1] None [V2] [V3] + "Key1" [V4] [V5] [V6,V7,V8] None + "Key2" None [V9] None None + + It will be split and returned as a list of two KeyedJaggedTensor: + [ + # step 0 + KeyedJaggedTensor( + keys=["Key0", "Key1", "Key2"], + values=[V0, V1, V2, V4, V6, V7, V8] + lengths=[2, 1, 1, 3, 0, 0] + ), + # step 1 + KeyedJaggedTensor( + keys=["Key0", "Key1", "Key2"], + values=[V3, V5, V9] + lengths=[0, 1, 1, 0, 1, 0] + ) + ] + """ + keys = x.keys() + has_weights = x._weights is not None + split_dict = {} + for i in range(num_steps): + split_dict[i] = {} + for key in keys: + keyed_x: JaggedTensor = x[key] + weights = keyed_x._weights + values = keyed_x.values() + lengths = keyed_x.lengths() + + # Because len(lengths) == batch_size * num_steps + assert len(lengths) % num_steps == 0 + + splitted_values = torch.split(values, lengths.tolist()) + if has_weights: + # pyre-fixme[6]: For 1st param expected `Tensor` but got `Optional[Tensor]`. + splitted_weights = torch.split(weights, lengths.tolist()) + for i in range(num_steps): + split_dict[i][key] = ( + lengths[i::num_steps], + torch.cat(splitted_values[i::num_steps]), + torch.cat(splitted_weights[i::num_steps]) if has_weights else None, + ) + + result: List[KeyedJaggedTensor] = [] + for i in range(num_steps): + result.append( + KeyedJaggedTensor( + keys=keys, + lengths=torch.cat([split_dict[i][k][0] for k in keys]), + values=torch.cat([split_dict[i][k][1] for k in keys]), + weights=torch.cat([split_dict[i][k][2] for k in keys]) + if has_weights + else None, + ) + ) + return result + + +def reorder_data_kjt(x: KeyedJaggedTensor, indices: torch.Tensor): + """ + Reorder the data for each key in a KeyedJaggedTensor + + Input: + indices: Long tensor represents the order of returned data for each key + + Example: + Input KeyedJaggedTensor (x): + x = KeyedJaggedTensor( + keys=["Key0", "Key1"], + values=[V0, V1, V2, V3, V4, V5, V6] + lengths=[2, 0, 1, 1, 1, 2] + ) + which represents data: + data0 data1 data2 + "Key0" [V0,V1] None [V2] + "Key1" [V3] [V4] [V5,V6] + + If we wish to order data as [data2, data1, data0], then this function will return + data0 data1 data2 + "Key0" [V2] None [V0, V1] + "Key1" [V5,V6] [V4] [V3] + """ + num_keys = len(x.keys()) + num_data = len(indices) + assert ( + len(x.lengths()) == num_keys * num_data + ), "The num of data indicated by input arg indices does not match with input KeyedJaggedTensor" + + acc_lengths_per_key = torch.cumsum(torch.tensor(x.length_per_key()), dim=0) + values_per_key = torch.tensor_split(x.values(), acc_lengths_per_key)[:-1] + val_lens_per_key = torch.chunk(x.lengths(), num_keys) + splitted_vals_per_key = [ + torch.tensor_split(x, torch.cumsum(y, dim=0))[:-1] + for x, y in zip(values_per_key, val_lens_per_key) + ] + + # Reorder values, lengths, and weights *WITHIN each key* + reordered_vals = torch.cat( + [torch.cat([x[y] for y in indices.tolist()]) for x in splitted_vals_per_key] + ) + reordered_lengths = torch.cat([x[indices] for x in val_lens_per_key]) + if x.weights_or_none() is not None: + weights_per_key = torch.tensor_split(x.weights(), acc_lengths_per_key)[:-1] + splitted_weights_per_key = [ + torch.tensor_split(x, torch.cumsum(y, dim=0))[:-1] + for x, y in zip(weights_per_key, val_lens_per_key) + ] + reordered_weights = torch.cat( + [ + torch.cat([x[y] for y in indices.tolist()]) + for x in splitted_weights_per_key + ] + ) + else: + reordered_weights = None + + res = KeyedJaggedTensor( + keys=x.keys(), + lengths=reordered_lengths, + values=reordered_vals, + weights=reordered_weights, + ) + return res + + +def shift_kjt_by_one(x: KeyedJaggedTensor): + """ + Shift the data by one for each key in a KeyedJaggedTensor + The last data will then always have no value + + Example: + Input KeyedJaggedTensor (x): + x = KeyedJaggedTensor( + keys=["Key0", "Key1"], + values=[V0, V1, V2, V3, V4, V5, V6] + lengths=[2, 0, 1, 1, 1, 2] + ) + which represents data: + data0 data1 data2 + "Key0" [V0,V1] None [V2] + "Key1" [V3] [V4] [V5,V6] + + If we wish to shift data by one, then this function will return + data0 data1 data2 + "Key0" None [V2] None + "Key1" [V4] [V5,V6] None + """ + num_keys = len(x.keys()) + acc_lengths_per_key = torch.cumsum(torch.tensor(x.length_per_key()), dim=0) + values_per_key = torch.tensor_split(x.values(), acc_lengths_per_key)[:-1] + val_lens_per_key = torch.chunk(x.lengths(), num_keys) + + # Shift values, lengths, and weights *WITHIN each key* + shifted_vals = torch.cat( + [x[y[0] :] for x, y in zip(values_per_key, val_lens_per_key)] + ) + shifted_lengths = torch.cat( + [ + torch.cat([x[1:], torch.tensor([0], device=x.device)]) + for x in val_lens_per_key + ] + ) + if x.weights_or_none() is not None: + weights_per_key = torch.tensor_split(x.weights(), acc_lengths_per_key)[:-1] + shifted_weights = torch.cat( + [x[y[0] :] for x, y in zip(weights_per_key, val_lens_per_key)] + ) + else: + shifted_weights = None + + res = KeyedJaggedTensor( + keys=x.keys(), + lengths=shifted_lengths, + values=shifted_vals, + weights=shifted_weights, + ) + return res diff --git a/reagent/core/tracker.py b/reagent/core/tracker.py index 0f03090f0..f2e0d4c82 100644 --- a/reagent/core/tracker.py +++ b/reagent/core/tracker.py @@ -3,7 +3,7 @@ import functools import logging -from typing import List +from typing import Dict, List, Type import torch @@ -40,6 +40,60 @@ def __call__(self, key: str, values): def aggregate(self, values): pass + def flush(self): + pass + + +class ObservableMixin: + def __init__(self): + super().__init__() + self._observers = {v: [] for v in self._observable_value_types} + + @property + def _observable_value_types(self) -> Dict[str, Type]: + raise NotImplementedError + + def add_observer(self, observer: Observer): + observing_keys = observer.get_observing_keys() + unknown_keys = [ + k for k in observing_keys if k not in self._observable_value_types + ] + if unknown_keys: + logger.warning(f"{unknown_keys} cannot be observed in {type(self)}") + for k in observing_keys: + if k in self._observers and observer not in self._observers[k]: + self._observers[k].append(observer) + return self + + def add_observers(self, observers: List[Observer]): + for observer in observers: + self.add_observer(observer) + return self + + def notify_observers(self, **kwargs): + for key, value in kwargs.items(): + if value is None: + # Allow optional reporting + continue + + assert key in self._observers, f"Unknown key: {key}" + + # TODO: Create a generic framework for type conversion + if self._observable_value_types[key] == torch.Tensor: + try: + if not isinstance(value, torch.Tensor): + value = torch.tensor(value) + if len(value.shape) == 0: + value = value.reshape(1) + value = value.detach() + except Exception: + # Be lenient about conversion since ReporterBase + # has inaccurate type + pass + + for observer in self._observers[key]: + observer.update(key, value) + def observable(cls=None, **kwargs): # noqa: C901 """ @@ -67,47 +121,11 @@ def new_init(self, *args, **kwargs): cls.__init__ = new_init - def add_observer(self, observer: Observer) -> None: - observing_keys = observer.get_observing_keys() - unknown_keys = [ - k for k in observing_keys if k not in self._observable_value_types - ] - if unknown_keys: - logger.warning(f"{unknown_keys} cannot be observed in {type(self)}") - for k in observing_keys: - if k in self._observers and observer not in self._observers[k]: - self._observers[k].append(observer) - return self - - cls.add_observer = add_observer - - def add_observers(self, observers: List[Observer]) -> None: - for observer in observers: - self.add_observer(observer) - return self - - cls.add_observers = add_observers - - def notify_observers(self, **kwargs): - for key, value in kwargs.items(): - if value is None: - # Allow optional reporting - continue - - assert key in self._observers, f"Unknown key: {key}" - - # TODO: Create a generic framework for type conversion - if self._observable_value_types[key] == torch.Tensor: - if not isinstance(value, torch.Tensor): - value = torch.tensor(value) - if len(value.shape) == 0: - value = value.reshape(1) - value = value.detach() + cls.add_observer = ObservableMixin.add_observer - for observer in self._observers[key]: - observer.update(key, value) + cls.add_observers = ObservableMixin.add_observers - cls.notify_observers = notify_observers + cls.notify_observers = ObservableMixin.notify_observers return cls diff --git a/reagent/core/types.py b/reagent/core/types.py new file mode 100644 index 000000000..66c098109 --- /dev/null +++ b/reagent/core/types.py @@ -0,0 +1,1155 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import dataclasses +import logging + +# The dataclasses in this file should be vanilla dataclass to have minimal overhead +from dataclasses import dataclass, field +from typing import Dict, Final, List, NamedTuple, Optional, Tuple + +# Triggering registration to registries +import reagent.core.result_types # noqa +import torch +import torch.nn.functional as F +from reagent.core.base_dataclass import BaseDataClass +from reagent.core.dataclasses import dataclass as pydantic_dataclass +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.torch_utils import gather +from reagent.model_utils.seq2slate_utils import DECODER_START_SYMBOL, subsequent_mask +from reagent.preprocessing.types import InputColumn +from torchrec import PoolingType +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor + + +if IS_FB_ENVIRONMENT: + import reagent.core.fb.fb_result_types # noqa + + +class NoDuplicatedWarningLogger: + def __init__(self, logger): + self.logger = logger + self.msg = set() + + def warning(self, msg): + if msg not in self.msg: + self.logger.warning(msg) + self.msg.add(msg) + + +logger = logging.getLogger(__name__) +no_dup_logger = NoDuplicatedWarningLogger(logger) + + +def isinstance_namedtuple(x): + return isinstance(x, tuple) and hasattr(x, "_fields") + + +@dataclass +class TensorDataClass(BaseDataClass): + def __getattr__(self, attr): + if attr.startswith("__") and attr.endswith("__"): + raise AttributeError + + tensor_attr = getattr(torch.Tensor, attr, None) + + if tensor_attr is None or not callable(tensor_attr): + # TODO: can we get this working well with jupyter? + logger.error( + f"Attempting to call {self.__class__.__name__}.{attr} on " + f"{type(self)} (instance of TensorDataClass)." + ) + if tensor_attr is None: + raise AttributeError( + f"{self.__class__.__name__}doesn't have {attr} attribute." + ) + else: + raise RuntimeError(f"{self.__class__.__name__}.{attr} is not callable.") + + def continuation(*args, **kwargs): + def f(v): + # if possible, returns v.attr(*args, **kwargs). + # otws, return v + if ( + isinstance(v, (torch.Tensor, TensorDataClass, KeyedJaggedTensor)) + and getattr(v, attr, None) is not None + ): + return getattr(v, attr)(*args, **kwargs) + elif isinstance(v, dict): + return {kk: f(vv) for kk, vv in v.items()} + elif isinstance(v, tuple): + return tuple(f(vv) for vv in v) + return v + + return type(self)(**f(self.__dict__)) + + return continuation + + def cuda(self, *args, **kwargs): + cuda_tensor = {} + for k, v in self.__dict__.items(): # noqa F402 + if isinstance(v, torch.Tensor): + kwargs["non_blocking"] = kwargs.get("non_blocking", True) + cuda_tensor[k] = v.cuda(*args, **kwargs) + elif isinstance(v, TensorDataClass): + cuda_tensor[k] = v.cuda(*args, **kwargs) + else: + cuda_tensor[k] = v + return type(self)(**cuda_tensor) + + def cpu(self): + cpu_tensor = {} + for k, v in self.__dict__.items(): # noqa F402 + if isinstance(v, (torch.Tensor, TensorDataClass)): + cpu_tensor[k] = v.cpu() + else: + cpu_tensor[k] = v + return type(self)(**cpu_tensor) + + +# (offset, value) +IdListFeatureValue = Tuple[torch.Tensor, torch.Tensor] +# (offset, key, value) +IdScoreListFeatureValue = Tuple[torch.Tensor, torch.Tensor, torch.Tensor] +# name -> value +IdListFeature = Dict[str, IdListFeatureValue] +IdScoreListFeature = Dict[str, IdScoreListFeatureValue] +# id -> value +ServingIdListFeature = Dict[int, IdListFeatureValue] +ServingIdScoreListFeature = Dict[int, IdScoreListFeatureValue] + + +##### +# FIXME: These config types are misplaced but we need to write FBL config adapter +# if we moved them. +###### + + +@pydantic_dataclass +class IdListFeatureConfig(BaseDataClass): + # Feature name + name: str + # integer feature ID + feature_id: int + # Name of the embedding table to use. Multiple feature ids may share + # the same embedding table. + id_mapping_name: str + + +@pydantic_dataclass +class IdScoreListFeatureConfig(BaseDataClass): + # Feature name + name: str + # Integer feature ID + feature_id: int + # Name of the embedding table to use. Multiple feature ids may share + # the same embedding table. + id_mapping_name: str + + +@pydantic_dataclass +class FloatFeatureInfo(BaseDataClass): + name: str + feature_id: int + + +@pydantic_dataclass +class IdMappingConfig: + # Embedding table size. + embedding_table_size: int + + # Output embedding dimensions + embedding_dim: int + + # Whether to perform hashing to make id fall in the range of embedding_table_size + # If False, the user is at their own risk of raw ids going beyond the range + hashing: bool = True + + pooling_type: PoolingType = PoolingType.MEAN + + def __eq__(self, other): + return ( + self.embedding_table_size == other.embedding_table_size + and self.embedding_dim == other.embedding_dim + and self.hashing == other.hashing + and self.pooling_type == other.pooling_type + ) + + +@pydantic_dataclass +class ModelFeatureConfig(BaseDataClass): + float_feature_infos: List[FloatFeatureInfo] = field(default_factory=list) + # id_mapping_name -> id mapping config + id_mapping_config: Dict[str, IdMappingConfig] = field(default_factory=dict) + # id_list_feature_configs is feature_id -> list of values + id_list_feature_configs: List[IdListFeatureConfig] = field(default_factory=list) + # id_score_list_feature_configs is feature_id -> (keys -> values) + id_score_list_feature_configs: List[IdScoreListFeatureConfig] = field( + default_factory=list + ) + + def __post_init_post_parse__(self): + both_lists = self.id_list_feature_configs + self.id_score_list_feature_configs + if not self.only_dense: + # sanity check for keys in mapping config + ids = [config.feature_id for config in both_lists] + names = [config.name for config in both_lists] + assert len(ids) == len(set(ids)), f"duplicates in ids: {ids}" + assert len(names) == len(set(names)), f"duplicates in names: {names}" + assert len(ids) == len(names), f"{len(ids)} != {len(names)}" + id_mapping_names = [config.id_mapping_name for config in both_lists] + assert set(id_mapping_names) == set(self.id_mapping_config.keys()), ( + f"id_mapping_names in id_list_feature_configs/id_score_list_feature_configs " + f"({set(id_mapping_names)}) not match with those in " + f"id_mapping_config ({set(self.id_mapping_config.keys())})" + ) + + self._id2name = {config.feature_id: config.name for config in both_lists} + self._name2id = {config.name: config.feature_id for config in both_lists} + self._id2config = {config.feature_id: config for config in both_lists} + self._name2config = {config.name: config for config in both_lists} + + @property + def only_dense(self): + return not (self.id_list_feature_configs or self.id_score_list_feature_configs) + + @property + def id2name(self): + return self._id2name + + @property + def name2id(self): + return self._name2id + + @property + def id2config(self): + return self._id2config + + @property + def name2config(self): + return self._name2config + + +###### +# dataclasses for internal API +###### + + +@dataclass +class ValuePresence(TensorDataClass): + value: torch.Tensor + presence: Optional[torch.Tensor] + + +@dataclass +class ActorOutput(TensorDataClass): + action: torch.Tensor + log_prob: Optional[torch.Tensor] = None + squashed_mean: Optional[torch.Tensor] = None + + +@dataclass +class DocList(TensorDataClass): + # the shape is (batch_size, num_candidates, num_document_features) + float_features: torch.Tensor + # the shapes below are (batch_size, num_candidates) + # mask indicates whether the candidate is present or not; its dtype is torch.bool + # pyre-fixme[8]: Attribute has type `Tensor`; used as `None`. + mask: torch.Tensor = None + # value is context dependent; it could be action probability or the score + # of the document from another model + # pyre-fixme[8]: Attribute has type `Tensor`; used as `None`. + value: torch.Tensor = None + + def __post_init__(self): + assert ( + len(self.float_features.shape) == 3 + ), f"Unexpected shape: {self.float_features.shape}" + if self.mask is None: + self.mask = self.float_features.new_ones( + self.float_features.shape[:2], dtype=torch.bool + ) + if self.value is None: + self.value = self.float_features.new_ones(self.float_features.shape[:2]) + + @torch.no_grad() + def select_slate(self, action: torch.Tensor): + row_idx = torch.repeat_interleave( + torch.arange(action.shape[0]).unsqueeze(1), action.shape[1], dim=1 + ) + mask = self.mask[row_idx, action] + float_features = self.float_features[row_idx, action] + value = self.value[row_idx, action] + return DocList(float_features, mask, value) + + def as_feature_data(self): + _batch_size, _slate_size, feature_dim = self.float_features.shape + return FeatureData(self.float_features.view(-1, feature_dim)) + + +# This method contains dynamic control flow +# Use torch.fx.wrap to mark it as a leaf module for FX tracing +@torch.fx.wrap +def run_post_init_validation( + float_features: torch.Tensor, +) -> None: + usage: str = ( + "For sequence features, use `stacked_float_features`." + + "For document features, use `candidate_doc_float_features`." + ) + + if float_features.ndim == 3: + if not torch.jit.is_scripting(): + no_dup_logger.warning(f"`float_features` should be 2D.\n{usage}") + pass + elif float_features.ndim != 2: + raise ValueError( + f"float_features should be 2D; got {float_features.shape}.\n{usage}" + ) + + +@dataclass +class FeatureData(TensorDataClass): + # For dense features, shape is (batch_size, feature_dim) + float_features: torch.Tensor + # For sparse features saved in KeyedJaggedTensor format + id_list_features: Optional[KeyedJaggedTensor] = None + id_score_list_features: Optional[KeyedJaggedTensor] = None + + # For sparse features saved in dictionary format + id_list_features_raw: IdListFeature = dataclasses.field(default_factory=dict) + id_score_list_features_raw: IdScoreListFeature = dataclasses.field( + default_factory=dict + ) + + # For sequence, shape is (stack_size, batch_size, feature_dim) + stacked_float_features: Optional[torch.Tensor] = None + # For ranking algos, + candidate_docs: Optional[DocList] = None + # Experimental: sticking this here instead of putting it in float_features + # because a lot of places derive the shape of float_features from + # normalization parameters. + time_since_first: Optional[torch.Tensor] = None + + def __post_init__(self): + run_post_init_validation( + float_features=self.float_features, + ) + + @property + def has_float_features_only(self) -> bool: + return ( + not self.id_list_features + and not self.id_score_list_features + and self.time_since_first is None + and self.candidate_docs is None + ) + + def get_tiled_batch(self, num_tiles: int): + assert ( + self.has_float_features_only + ), f"only works for float features now: {self}" + """ + tiled_feature should be (batch_size * num_tiles, feature_dim) + forall i in [batch_size], + tiled_feature[i*num_tiles:(i+1)*num_tiles] should be feat[i] + """ + feat = self.float_features + assert ( + len(feat.shape) == 2 + ), f"Need feat shape to be (batch_size, feature_dim), got {feat.shape}." + batch_size, _ = feat.shape + tiled_feat = feat.repeat_interleave(repeats=num_tiles, dim=0) + return FeatureData(float_features=tiled_feat) + + def concat_user_doc(self): + assert not self.has_float_features_only, "only works when DocList present" + assert self.float_features.dim() == 2 # batch_size x state_dim + batch_size, state_dim = self.float_features.shape + # batch_size x num_docs x candidate_dim + assert self.candidate_docs.float_features.dim() == 3 + assert len(self.candidate_docs.float_features) == batch_size + _, num_docs, candidate_dim = self.candidate_docs.float_features.shape + state_tiled = ( + torch.repeat_interleave(self.float_features, num_docs, dim=0) + .reshape(batch_size, num_docs, state_dim) + .float() + ) + return torch.cat((state_tiled, self.candidate_docs.float_features), dim=2) + + def get_ranking_state(self, has_user_feat: bool): + if has_user_feat: + return self.concat_user_doc() + else: + # pyre-fixme[16]: `Optional` has no attribute `float_features`. + return self.candidate_docs.float_features.float() + + +def _embed_states(x: FeatureData) -> FeatureData: + """ + Get dense feature from float and doc features. + TODO: make this an embedder. + """ + assert x.candidate_docs is not None + + def _concat_state_candidates(state: torch.Tensor, candidates: torch.Tensor): + """ + Expect + state.shape = (n, state_dim), + candidate.shape = (n, num_candidates, candidate_dim), + + Result has shape (n, state_dim + candidate_dim) + [state, mean of candidates] + """ + n = state.shape[0] + assert len(state.shape) == 2, f"{state.shape} != (batch_size, user_dim)" + assert ( + len(candidates.shape) == 3 + ), f"{candidates.shape} != (batch_size, num_candidates, candidate_dim)" + assert candidates.shape[0] == n, f"{candidates.shape} 0th dim != {n}" + # TODO: have an embedder here + # NOTE: mean aggregation is not very effective here + candidates_embedding = candidates.view(n, -1) + return torch.cat([state, candidates_embedding], dim=1) + + return FeatureData( + float_features=_concat_state_candidates( + x.float_features, + x.candidate_docs.float_features, + ) + ) + + +class TensorFeatureData(torch.nn.Module): + """ + Primarily for using in nn.Sequential + """ + + def forward(self, input: torch.Tensor) -> FeatureData: + assert isinstance(input, torch.Tensor) + return FeatureData(input) + + +class ServingFeatureData(NamedTuple): + float_features_with_presence: Tuple[torch.Tensor, torch.Tensor] + id_list_features: ServingIdListFeature + id_score_list_features: ServingIdScoreListFeature + + +@dataclass +class ExtraData(TensorDataClass): + mdp_id: Optional[torch.Tensor] = None + sequence_number: Optional[torch.Tensor] = None + action_probability: Optional[torch.Tensor] = None + max_num_actions: Optional[int] = None + metrics: Optional[torch.Tensor] = None + + @classmethod + def from_dict(cls, d): + return cls(**{f.name: d.get(f.name, None) for f in dataclasses.fields(cls)}) + + +@dataclass +class PreprocessedRankingInput(TensorDataClass): + state: FeatureData + src_seq: FeatureData + src_src_mask: Optional[torch.Tensor] = None + tgt_in_seq: Optional[FeatureData] = None + tgt_out_seq: Optional[FeatureData] = None + tgt_tgt_mask: Optional[torch.Tensor] = None + slate_reward: Optional[torch.Tensor] = None + position_reward: Optional[torch.Tensor] = None + # all indices will be +2 to account for padding + # symbol (0) and decoder_start_symbol (1) + src_in_idx: Optional[torch.Tensor] = None + tgt_in_idx: Optional[torch.Tensor] = None + tgt_out_idx: Optional[torch.Tensor] = None + tgt_out_probs: Optional[torch.Tensor] = None + # store ground-truth target sequences + optim_tgt_in_idx: Optional[torch.Tensor] = None + optim_tgt_out_idx: Optional[torch.Tensor] = None + optim_tgt_in_seq: Optional[FeatureData] = None + optim_tgt_out_seq: Optional[FeatureData] = None + extras: Optional[ExtraData] = field(default_factory=ExtraData) + + def batch_size(self) -> int: + return self.state.float_features.size()[0] + + def __len__(self) -> int: + return self.batch_size() + + @classmethod + def from_input( + cls, + state: torch.Tensor, + candidates: torch.Tensor, + device: torch.device, + action: Optional[torch.Tensor] = None, + optimal_action: Optional[torch.Tensor] = None, + logged_propensities: Optional[torch.Tensor] = None, + slate_reward: Optional[torch.Tensor] = None, + position_reward: Optional[torch.Tensor] = None, + extras: Optional[ExtraData] = None, + ): + """ + Build derived fields (indices & masks) from raw input + """ + # Shape checking + assert len(state.shape) == 2 + assert len(candidates.shape) == 3 + state = state.to(device) + candidates = candidates.to(device) + + if action is not None: + assert len(action.shape) == 2 + action = action.to(device) + if logged_propensities is not None: + assert ( + len(logged_propensities.shape) == 2 + and logged_propensities.shape[1] == 1 + ) + logged_propensities = logged_propensities.to(device) + + batch_size, candidate_num, candidate_dim = candidates.shape + if slate_reward is not None: + assert len(slate_reward.shape) == 2 and slate_reward.shape[1] == 1 + slate_reward = slate_reward.to(device) + if position_reward is not None: + # pyre-fixme[16]: `Optional` has no attribute `shape`. + assert position_reward.shape == action.shape + position_reward = position_reward.to(device) + + src_in_idx = ( + torch.arange(candidate_num, device=device).repeat(batch_size, 1) + 2 + ) + src_src_mask = ( + (torch.ones(batch_size, candidate_num, candidate_num)) + .type(torch.int8) + .to(device) + ) + + def process_tgt_seq(action): + if action is not None: + _, output_size = action.shape + # Account for decoder starting symbol and padding symbol + candidates_augment = torch.cat( + ( + torch.zeros(batch_size, 2, candidate_dim, device=device), + candidates, + ), + dim=1, + ) + tgt_out_idx = action + 2 + tgt_in_idx = torch.full( + (batch_size, output_size), DECODER_START_SYMBOL, device=device + ) + tgt_in_idx[:, 1:] = tgt_out_idx[:, :-1] + tgt_out_seq = gather(candidates_augment, tgt_out_idx) + tgt_in_seq = torch.zeros( + batch_size, output_size, candidate_dim, device=device + ) + tgt_in_seq[:, 1:] = tgt_out_seq[:, :-1] + tgt_tgt_mask = subsequent_mask(output_size, device) + else: + tgt_in_idx = None + tgt_out_idx = None + tgt_in_seq = None + tgt_out_seq = None + tgt_tgt_mask = None + + return tgt_in_idx, tgt_out_idx, tgt_in_seq, tgt_out_seq, tgt_tgt_mask + + ( + tgt_in_idx, + tgt_out_idx, + tgt_in_seq, + tgt_out_seq, + tgt_tgt_mask, + ) = process_tgt_seq(action) + ( + optim_tgt_in_idx, + optim_tgt_out_idx, + optim_tgt_in_seq, + optim_tgt_out_seq, + _, + ) = process_tgt_seq(optimal_action) + + return cls.from_tensors( + state=state, + src_seq=candidates, + src_src_mask=src_src_mask, + tgt_in_seq=tgt_in_seq, + tgt_out_seq=tgt_out_seq, + tgt_tgt_mask=tgt_tgt_mask, + slate_reward=slate_reward, + position_reward=position_reward, + src_in_idx=src_in_idx, + tgt_in_idx=tgt_in_idx, + tgt_out_idx=tgt_out_idx, + tgt_out_probs=logged_propensities, + optim_tgt_in_idx=optim_tgt_in_idx, + optim_tgt_out_idx=optim_tgt_out_idx, + optim_tgt_in_seq=optim_tgt_in_seq, + optim_tgt_out_seq=optim_tgt_out_seq, + extras=extras, + ) + + @classmethod + def from_tensors( + cls, + state: torch.Tensor, + src_seq: torch.Tensor, + src_src_mask: Optional[torch.Tensor] = None, + tgt_in_seq: Optional[torch.Tensor] = None, + tgt_out_seq: Optional[torch.Tensor] = None, + tgt_tgt_mask: Optional[torch.Tensor] = None, + slate_reward: Optional[torch.Tensor] = None, + position_reward: Optional[torch.Tensor] = None, + src_in_idx: Optional[torch.Tensor] = None, + tgt_in_idx: Optional[torch.Tensor] = None, + tgt_out_idx: Optional[torch.Tensor] = None, + tgt_out_probs: Optional[torch.Tensor] = None, + optim_tgt_in_idx: Optional[torch.Tensor] = None, + optim_tgt_out_idx: Optional[torch.Tensor] = None, + optim_tgt_in_seq: Optional[torch.Tensor] = None, + optim_tgt_out_seq: Optional[torch.Tensor] = None, + extras: Optional[ExtraData] = None, + **kwargs, + ): + assert isinstance(state, torch.Tensor) + assert isinstance(src_seq, torch.Tensor) + assert src_src_mask is None or isinstance(src_src_mask, torch.Tensor) + assert tgt_in_seq is None or isinstance(tgt_in_seq, torch.Tensor) + assert tgt_out_seq is None or isinstance(tgt_out_seq, torch.Tensor) + assert tgt_tgt_mask is None or isinstance(tgt_tgt_mask, torch.Tensor) + assert slate_reward is None or isinstance(slate_reward, torch.Tensor) + assert position_reward is None or isinstance(position_reward, torch.Tensor) + assert src_in_idx is None or isinstance(src_in_idx, torch.Tensor) + assert tgt_in_idx is None or isinstance(tgt_in_idx, torch.Tensor) + assert tgt_out_idx is None or isinstance(tgt_out_idx, torch.Tensor) + assert tgt_out_probs is None or isinstance(tgt_out_probs, torch.Tensor) + assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor) + assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor) + assert optim_tgt_in_seq is None or isinstance(optim_tgt_in_seq, torch.Tensor) + assert optim_tgt_out_seq is None or isinstance(optim_tgt_out_seq, torch.Tensor) + assert extras is None or isinstance(extras, ExtraData) + + return cls( + state=FeatureData(float_features=state), + src_seq=FeatureData(float_features=src_seq), + src_src_mask=src_src_mask, + tgt_in_seq=FeatureData(float_features=tgt_in_seq) + if tgt_in_seq is not None + else None, + tgt_out_seq=FeatureData(float_features=tgt_out_seq) + if tgt_out_seq is not None + else None, + tgt_tgt_mask=tgt_tgt_mask, + slate_reward=slate_reward, + position_reward=position_reward, + src_in_idx=src_in_idx, + tgt_in_idx=tgt_in_idx, + tgt_out_idx=tgt_out_idx, + tgt_out_probs=tgt_out_probs, + optim_tgt_in_idx=optim_tgt_in_idx, + optim_tgt_out_idx=optim_tgt_out_idx, + optim_tgt_in_seq=FeatureData(float_features=optim_tgt_in_seq) + if optim_tgt_in_seq is not None + else None, + optim_tgt_out_seq=FeatureData(float_features=optim_tgt_out_seq) + if optim_tgt_out_seq is not None + else None, + extras=extras if extras is not None else None, + ) + + def __post_init__(self): + if ( + isinstance(self.state, torch.Tensor) + or isinstance(self.src_seq, torch.Tensor) + or isinstance(self.tgt_in_seq, torch.Tensor) + or isinstance(self.tgt_out_seq, torch.Tensor) + or isinstance(self.optim_tgt_in_seq, torch.Tensor) + or isinstance(self.optim_tgt_out_seq, torch.Tensor) + ): + raise ValueError( + f"Use from_tensors() {type(self.state)} {type(self.src_seq)} " + f"{type(self.tgt_in_seq)} {type(self.tgt_out_seq)} " + f"{type(self.optim_tgt_in_seq)} {type(self.optim_tgt_out_seq)} " + ) + + +@dataclass +class BaseInput(TensorDataClass): + """ + Base class for all inputs, both raw and preprocessed + """ + + state: FeatureData + next_state: FeatureData + reward: torch.Tensor + time_diff: torch.Tensor + step: Optional[torch.Tensor] + not_terminal: torch.Tensor + + def __len__(self): + assert self.state.float_features.ndim == 2 + return self.state.float_features.size()[0] + + def batch_size(self): + return len(self) + + def as_dict_shallow(self): + return { + "state": self.state, + "next_state": self.next_state, + "reward": self.reward, + "time_diff": self.time_diff, + "step": self.step, + "not_terminal": self.not_terminal, + } + + @staticmethod + def from_dict(batch): + id_list_features = batch.get(InputColumn.STATE_ID_LIST_FEATURES, None) + id_score_list_features = batch.get( + InputColumn.STATE_ID_SCORE_LIST_FEATURES, None + ) + next_id_list_features = batch.get(InputColumn.NEXT_STATE_ID_LIST_FEATURES, None) + next_id_score_list_features = batch.get( + InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, None + ) + # TODO: handle value/mask of DocList + filler_mask_val = None + doc_list = None + candidate_features = batch.get(InputColumn.CANDIDATE_FEATURES, None) + if candidate_features is not None: + filler_mask_val = torch.zeros( + (candidate_features.shape[0], candidate_features.shape[1]) + ) + doc_list = DocList( + float_features=candidate_features, + mask=filler_mask_val.clone().bool(), + value=filler_mask_val.clone().float(), + ) + + next_doc_list = None + next_candidate_features = batch.get(InputColumn.NEXT_CANDIDATE_FEATURES, None) + if next_candidate_features is not None: + assert filler_mask_val is not None + next_doc_list = DocList( + float_features=next_candidate_features, + mask=filler_mask_val.clone().bool(), + value=filler_mask_val.clone().float(), + ) + + return BaseInput( + state=FeatureData( + float_features=batch[InputColumn.STATE_FEATURES], + id_list_features=id_list_features, + id_score_list_features=id_score_list_features, + candidate_docs=doc_list, + ), + next_state=FeatureData( + float_features=batch[InputColumn.NEXT_STATE_FEATURES], + id_list_features=next_id_list_features, + id_score_list_features=next_id_score_list_features, + candidate_docs=next_doc_list, + ), + reward=batch[InputColumn.REWARD], + time_diff=batch[InputColumn.TIME_DIFF], + step=batch.get(InputColumn.STEP, None), + not_terminal=batch[InputColumn.NOT_TERMINAL], + ) + + +@dataclass +class DiscreteDqnInput(BaseInput): + """ + See input_prototype for DQN expected input shapes + """ + + action: torch.Tensor + next_action: torch.Tensor + possible_actions_mask: torch.Tensor + possible_next_actions_mask: torch.Tensor + extras: ExtraData + + @classmethod + def input_prototype(cls, action_dim=2, batch_size=10, state_dim=3): + return cls( + state=FeatureData(float_features=torch.randn(batch_size, state_dim)), + next_state=FeatureData(float_features=torch.randn(batch_size, state_dim)), + reward=torch.rand(batch_size, 1), + time_diff=torch.ones(batch_size, 1), + step=torch.ones(batch_size, 1), + not_terminal=torch.ones(batch_size, 1), + action=F.one_hot( + torch.randint(high=action_dim, size=(batch_size,)), + num_classes=action_dim, + ), + next_action=F.one_hot( + torch.randint(high=action_dim, size=(batch_size,)), + num_classes=action_dim, + ), + possible_actions_mask=torch.ones(batch_size, action_dim), + possible_next_actions_mask=torch.ones(batch_size, action_dim), + extras=ExtraData(action_probability=torch.ones(batch_size, 1)), + ) + + @classmethod + def from_dict(cls, batch): + base = super().from_dict(batch) + return cls( + action=batch[InputColumn.ACTION], + next_action=batch[InputColumn.NEXT_ACTION], + possible_actions_mask=batch[InputColumn.POSSIBLE_ACTIONS_MASK], + possible_next_actions_mask=batch[InputColumn.POSSIBLE_NEXT_ACTIONS_MASK], + extras=ExtraData.from_dict(batch), + **base.as_dict_shallow(), + ) + + +@dataclass +class SlateQInput(BaseInput): + """ + The shapes of `reward`, `reward_mask`, & `next_item_mask` are + `(batch_size, slate_size)`. + + `reward_mask` indicated whether the reward could be observed, e.g., + the item got into viewport or not. + """ + + action: torch.Tensor + next_action: torch.Tensor + reward_mask: torch.Tensor + extras: Optional[ExtraData] = None + + @classmethod + def from_dict(cls, d): + action = d["action"] + next_action = d["next_action"] + return cls( + state=FeatureData( + float_features=d["state_features"], + candidate_docs=DocList( + float_features=d["candidate_features"], + mask=d["item_mask"], + value=d["item_probability"], + ), + ), + next_state=FeatureData( + float_features=d["next_state_features"], + candidate_docs=DocList( + float_features=d["next_candidate_features"], + mask=d["next_item_mask"], + value=d["next_item_probability"], + ), + ), + action=action, + next_action=next_action, + reward=d["position_reward"], + reward_mask=d["reward_mask"], + time_diff=d["time_diff"], + not_terminal=d["not_terminal"], + step=None, + extras=ExtraData.from_dict(d), + ) + + +@dataclass +class ParametricDqnInput(BaseInput): + action: FeatureData + next_action: FeatureData + possible_actions: FeatureData + possible_actions_mask: torch.Tensor + possible_next_actions: FeatureData + possible_next_actions_mask: torch.Tensor + extras: Optional[ExtraData] = None + weight: Optional[torch.Tensor] = None + + @classmethod + def from_dict(cls, batch): + return cls( + state=FeatureData(float_features=batch["state_features"]), + action=FeatureData(float_features=batch["action"]), + next_state=FeatureData(float_features=batch["next_state_features"]), + next_action=FeatureData(float_features=batch["next_action"]), + possible_actions=FeatureData(float_features=batch["possible_actions"]), + possible_actions_mask=batch["possible_actions_mask"], + possible_next_actions=FeatureData( + float_features=batch["possible_next_actions"] + ), + possible_next_actions_mask=batch["possible_next_actions_mask"], + reward=batch["reward"], + not_terminal=batch["not_terminal"], + time_diff=batch["time_diff"], + step=batch["step"], + extras=batch["extras"], + weight=batch.get("weight", None), + ) + + +@dataclass +class PolicyNetworkInput(BaseInput): + action: FeatureData + next_action: FeatureData + extras: Optional[ExtraData] = None + + @classmethod + def from_dict(cls, batch): + base = super().from_dict(batch) + # TODO: Implement ExtraData.from_dict + extras = batch.get("extras", None) + return cls( + action=FeatureData(float_features=batch["action"]), + next_action=FeatureData(float_features=batch["next_action"]), + extras=extras, + **base.as_dict_shallow(), + ) + + +@dataclass +class PolicyGradientInput(TensorDataClass): + """ + See input_prototype for expected input dimensions + """ + + state: FeatureData + action: torch.Tensor + reward: torch.Tensor + log_prob: torch.Tensor + possible_actions_mask: Optional[torch.Tensor] = None + + @classmethod + def input_prototype(cls, action_dim=2, batch_size=10, state_dim=3): + return cls( + state=FeatureData(float_features=torch.randn(batch_size, state_dim)), + action=F.one_hot( + torch.randint(high=action_dim, size=(batch_size,)), + num_classes=action_dim, + ), + reward=torch.rand(batch_size), + log_prob=torch.log(torch.rand(batch_size)), + possible_actions_mask=torch.ones(batch_size, action_dim), + ) + + @classmethod + def from_dict(cls, d: Dict[str, torch.Tensor]): + # TODO: rename "observation" to "state" in Transition and return cls(**d) + return cls( + state=FeatureData(float_features=d["observation"]), + action=d["action"], + reward=d["reward"], + log_prob=d["log_prob"], + possible_actions_mask=d.get("possible_actions_mask", None), + ) + + def __len__(self): + assert self.action.ndim == 2 + return len(self.action) + + def batch_size(self): + return len(self) + + +@dataclass +class BanditRewardModelInput(TensorDataClass): + state: FeatureData + action: torch.Tensor + reward: torch.Tensor + action_prob: Optional[torch.Tensor] = None + + @classmethod + def from_dict(cls, batch: Dict[str, torch.Tensor]): + return cls( + state=FeatureData(float_features=batch["state_features"]), + action=batch["action"], + reward=batch["reward"], + action_prob=batch.get("action_probability", None), + ) + + def batch_size(self): + assert self.state.float_features.ndim == 2 + return self.state.float_features.size()[0] + + +@dataclass +class BehavioralCloningModelInput(TensorDataClass): + state: FeatureData + action: torch.Tensor + possible_actions_mask: Optional[torch.Tensor] = None + + @classmethod + def from_dict(cls, batch: Dict[str, torch.Tensor]): + return cls( + state=FeatureData(float_features=batch["state"]), + action=batch["action"], + possible_actions_mask=batch.get("possible_actions_mask", None), + ) + + def batch_size(self): + assert self.state.float_features.ndim == 2 + return self.state.float_features.size()[0] + + +@dataclass +class MemoryNetworkInput(BaseInput): + action: FeatureData + valid_step: Optional[torch.Tensor] = None + extras: ExtraData = field(default_factory=ExtraData) + + @classmethod + def from_dict(cls, d): + return cls( + state=FeatureData( + float_features=d["state"], + ), + next_state=FeatureData( + float_features=d["next_state"], + ), + action=FeatureData(float_features=d["action"]), + reward=d["reward"], + time_diff=d["time_diff"], + not_terminal=d["not_terminal"], + step=d["step"], + extras=ExtraData.from_dict(d), + ) + + def __len__(self): + if len(self.state.float_features.size()) == 2: + return self.state.float_features.size()[0] + elif len(self.state.float_features.size()) == 3: + return self.state.float_features.size()[1] + else: + raise NotImplementedError() + + +@dataclass +class SlateScoreBatch: + mdp_id: torch.Tensor + sequence_number: torch.Tensor + scores: torch.Tensor + training_input: PolicyGradientInput + + +@dataclass +class MemoryNetworkOutput(TensorDataClass): + mus: torch.Tensor + sigmas: torch.Tensor + logpi: torch.Tensor + reward: torch.Tensor + not_terminal: torch.Tensor + last_step_lstm_hidden: torch.Tensor + last_step_lstm_cell: torch.Tensor + all_steps_lstm_hidden: torch.Tensor + + +@dataclass +class Seq2RewardOutput(TensorDataClass): + acc_reward: torch.Tensor + + +@dataclass +class DqnPolicyActionSet(TensorDataClass): + greedy: int + softmax: Optional[int] = None + greedy_act_name: Optional[str] = None + softmax_act_name: Optional[str] = None + softmax_act_prob: Optional[float] = None + + +@dataclass +class PlanningPolicyOutput(TensorDataClass): + # best action to take next + next_best_continuous_action: Optional[torch.Tensor] = None + next_best_discrete_action_one_hot: Optional[torch.Tensor] = None + next_best_discrete_action_idx: Optional[int] = None + + +@dataclass +class RankingOutput(TensorDataClass): + # a tensor of integer indices w.r.t. to possible candidates + # the values are offset by 2 to account for padding and decoder-starter symbol + # shape: batch_size, tgt_seq_len + # e.g., there are candidates C0, C1, C2, C3, C4, and the ranked order is + # C4, C1, C2, C3, C0. Then the ranked_tgt_out_idx = [6, 3, 4, 5, 2] + ranked_tgt_out_idx: Optional[torch.Tensor] = None + + # generative probability of ranked tgt sequences at each decoding step + # shape: batch_size, tgt_seq_len, candidate_size + ranked_per_symbol_probs: Optional[torch.Tensor] = None + + # generative probability of ranked tgt sequences + # shape: batch_size, 1 + ranked_per_seq_probs: Optional[torch.Tensor] = None + + # log probabilities of given tgt sequences are used in REINFORCE + # shape: batch_size, 1 if Seq2SlateMode == PER_SEQ_LOG_PROB_MODE + # shape: batch_size, tgt_seq_len if Seq2SlateMode == PER_SYMBOL_LOG_PROB_DIST_MODE + log_probs: Optional[torch.Tensor] = None + # encoder scores in tgt_out_idx order + encoder_scores: Optional[torch.Tensor] = None + + +@dataclass +class RewardNetworkOutput(TensorDataClass): + predicted_reward: torch.Tensor + + +@dataclass +class SyntheticRewardNetworkOutput(TensorDataClass): + predicted_reward: torch.Tensor + mask: torch.Tensor + output: torch.Tensor + + +@dataclass +class FrechetSortConfig: + shape: float + equiv_len: int + topk: Optional[int] = None + log_scores: bool = True + + +@dataclass +class CBInput(TensorDataClass): + context_arm_features: torch.Tensor + arm_presence: Final[Optional[torch.Tensor]] = None + action: Final[Optional[torch.Tensor]] = None + reward: Final[Optional[torch.Tensor]] = None + log_prob: Final[Optional[torch.Tensor]] = None + weight: Final[Optional[torch.Tensor]] = None + arms: Final[Optional[torch.Tensor]] = None + mdp_id: Final[Optional[torch.Tensor]] = None + + @classmethod + def input_prototype( + cls, + context_dim: int = 2, + batch_size: int = 10, + arm_features_dim: int = 3, + num_arms: int = 4, + ) -> "CBInput": + return cls( + context_arm_features=torch.randn(batch_size, num_arms, arm_features_dim) + ) + + @classmethod + def from_dict(cls, d: Dict[str, torch.Tensor]) -> "CBInput": + return cls( + context_arm_features=d["context_arm_features"], + arm_presence=d.get("arm_presence", None), + action=d.get("action", None), + reward=d.get("reward", None), + log_prob=d.get("log_prob", None), + weight=d.get("weight", None), + arms=d.get("arms", None), + mdp_id=d.get("mdp_id", None), + ) + + def __len__(self) -> int: + return self.context_arm_features.shape[0] + + @property + def device(self) -> torch.device: + return self.context_arm_features.device diff --git a/reagent/core/utils.py b/reagent/core/utils.py new file mode 100644 index 000000000..dc175ac74 --- /dev/null +++ b/reagent/core/utils.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import pdb +import sys +from collections import defaultdict +from typing import Dict, List + +import reagent.core.types as rlt +import torch +from torchrec import EmbeddingBagConfig + +logger = logging.getLogger(__name__) + + +def embedding_bag_configs_from_feature_configs( + configs: List[rlt.ModelFeatureConfig], +) -> List[EmbeddingBagConfig]: + """ + Obtain a list of EmbeddingBagConfigs from multiple ModelFeatureConfigs. + The returned list will be used for defining sparse model architectures + """ + merged_id_mapping_config: Dict[str, rlt.IdMappingConfig] = {} + for config in configs: + for id_mapping_name, id_mapping_config in config.id_mapping_config.items(): + if id_mapping_name in merged_id_mapping_config: + assert ( + merged_id_mapping_config[id_mapping_name] == id_mapping_config + ), f"Conflicting IdMappingConfigs for id_mapping_name={id_mapping_name}" + else: + merged_id_mapping_config[id_mapping_name] = id_mapping_config + + id_mapping_to_feature_names = defaultdict(list) + for config in configs: + for id_list_feature_config in config.id_list_feature_configs: + id_mapping_to_feature_names[id_list_feature_config.id_mapping_name].append( + id_list_feature_config.name + ) + for id_score_list_feature_config in config.id_score_list_feature_configs: + id_mapping_to_feature_names[ + id_score_list_feature_config.id_mapping_name + ].append(id_score_list_feature_config.name) + + embedding_bag_configs: List[EmbeddingBagConfig] = [] + for id_mapping_name, config in merged_id_mapping_config.items(): + embedding_bag_configs.append( + EmbeddingBagConfig( + name=id_mapping_name, + feature_names=id_mapping_to_feature_names[id_mapping_name], + num_embeddings=config.embedding_table_size, + embedding_dim=config.embedding_dim, + pooling=config.pooling_type, + ) + ) + logger.info(f"Generate EmbeddingBagConfigs: {embedding_bag_configs}") + return embedding_bag_configs + + +def get_rank() -> int: + """ + Returns the torch.distributed rank of the process. 0 represents + the main process and is the default if torch.distributed isn't set up + """ + return ( + torch.distributed.get_rank() + if torch.distributed.is_available() and torch.distributed.is_initialized() + else 0 + ) + + +class lazy_property(object): + """ + More or less copy-pasta: http://stackoverflow.com/a/6849299 + Meant to be used for lazy evaluation of an object attribute. + property should represent non-mutable data, as it replaces itself. + """ + + def __init__(self, fget): + self._fget = fget + self.__doc__ = fget.__doc__ + self.__name__ = fget.__name__ + + def __get__(self, obj, obj_cls_type): + if obj is None: + return None + value = self._fget(obj) + setattr(obj, self.__name__, value) + return value + + +class ForkedPdb(pdb.Pdb): + """A Pdb subclass that may be used + from a forked multiprocessing child + + """ + + def interaction(self, *args, **kwargs): + _stdin = sys.stdin + try: + sys.stdin = open("/dev/stdin") # noqa + pdb.Pdb.interaction(self, *args, **kwargs) + finally: + sys.stdin = _stdin diff --git a/reagent/data/__init__.py b/reagent/data/__init__.py new file mode 100644 index 000000000..dd6afa60c --- /dev/null +++ b/reagent/data/__init__.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .data_fetcher import DataFetcher +from .manual_data_module import ManualDataModule +from .reagent_data_module import ReAgentDataModule + +__all__ = [ + "DataFetcher", + "ManualDataModule", + "ReAgentDataModule", +] diff --git a/reagent/data/data_fetcher.py b/reagent/data/data_fetcher.py new file mode 100644 index 000000000..21d038189 --- /dev/null +++ b/reagent/data/data_fetcher.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import List, Optional, Tuple + +from reagent.workflow.types import Dataset, TableSpec + + +logger = logging.getLogger(__name__) + + +class DataFetcher: + def query_data( + self, + input_table_spec: TableSpec, + discrete_action: bool, + actions: Optional[List[str]] = None, + include_possible_actions=True, + custom_reward_expression: Optional[str] = None, + sample_range: Optional[Tuple[float, float]] = None, + multi_steps: Optional[int] = None, + gamma: Optional[float] = None, + ) -> Dataset: + raise NotImplementedError() + + def query_data_synthetic_reward( + self, + input_table_spec: TableSpec, + discrete_action_names: Optional[List[str]] = None, + sample_range: Optional[Tuple[float, float]] = None, + max_seq_len: Optional[int] = None, + ) -> Dataset: + raise NotImplementedError() diff --git a/reagent/data/manual_data_module.py b/reagent/data/manual_data_module.py new file mode 100644 index 000000000..4a0c0c84e --- /dev/null +++ b/reagent/data/manual_data_module.py @@ -0,0 +1,323 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import logging +import pickle +from typing import Dict, List, NamedTuple, Optional, Tuple + +logger = logging.getLogger(__name__) + + +try: + # pyre-fixme[21]: Could not find `petastorm`. + from petastorm import make_batch_reader + + # pyre-fixme[21]: Could not find module `petastorm.pytorch`. + # pyre-fixme[21]: Could not find module `petastorm.pytorch`. + from petastorm.pytorch import DataLoader, decimal_friendly_collate +except ModuleNotFoundError: + logger.warn("petastorm is not installed; please install if you want to use this") + + +from reagent.core.parameters import NormalizationData +from reagent.data.data_fetcher import DataFetcher +from reagent.data.oss_data_fetcher import OssDataFetcher +from reagent.preprocessing.batch_preprocessor import BatchPreprocessor +from reagent.workflow.types import ( + Dataset, + ReaderOptions, + ResourceOptions, + RewardOptions, + TableSpec, +) + +from .reagent_data_module import ReAgentDataModule + + +class TrainEvalSampleRanges(NamedTuple): + train_sample_range: Tuple[float, float] + eval_sample_range: Tuple[float, float] + + +def get_sample_range( + input_table_spec: TableSpec, calc_cpe_in_training: bool +) -> TrainEvalSampleRanges: + table_sample = input_table_spec.table_sample + eval_table_sample = input_table_spec.eval_table_sample + + if not calc_cpe_in_training: + # use all data if table sample = None + if table_sample is None: + train_sample_range = (0.0, 100.0) + else: + train_sample_range = (0.0, table_sample) + return TrainEvalSampleRanges( + train_sample_range=train_sample_range, + # eval samples will not be used + eval_sample_range=(0.0, 0.0), + ) + + error_msg = ( + "calc_cpe_in_training is set to True. " + f"Please specify table_sample(current={table_sample}) and " + f"eval_table_sample(current={eval_table_sample}) such that " + "eval_table_sample + table_sample <= 100. " + "In order to reliably calculate CPE, eval_table_sample " + "should not be too small." + ) + assert table_sample is not None, error_msg + assert eval_table_sample is not None, error_msg + assert (eval_table_sample + table_sample) <= (100.0 + 1e-3), error_msg + + return TrainEvalSampleRanges( + train_sample_range=(0.0, table_sample), + eval_sample_range=(100.0 - eval_table_sample, 100.0), + ) + + +# pyre-fixme[13]: Attribute `_normalization_data_map` is never initialized. +# pyre-fixme[13]: Attribute `_train_dataset` is never initialized. +# pyre-fixme[13]: Attribute `_eval_dataset` is never initialized. +class ManualDataModule(ReAgentDataModule): + _normalization_data_map: Dict[str, NormalizationData] + _train_dataset: Dataset + _eval_dataset: Optional[Dataset] + + def __init__( + self, + *, + input_table_spec: Optional[TableSpec] = None, + reward_options: Optional[RewardOptions] = None, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + reader_options: Optional[ReaderOptions] = None, + resource_options: Optional[ResourceOptions] = None, + model_manager=None, + ): + super().__init__() + self.input_table_spec = input_table_spec + self.reward_options = reward_options or RewardOptions() + self.reader_options = reader_options or ReaderOptions() + self.resource_options = resource_options or ResourceOptions(gpu=0) + self._model_manager = model_manager + self.setup_data = setup_data + self.saved_setup_data = saved_setup_data or {} + + self._setup_done = False + self._num_train_data_loader_calls = 0 + self._num_val_data_loader_calls = 0 + self._num_test_data_loader_calls = 0 + + def prepare_data(self, *args, **kwargs): + if self.setup_data is not None: + return None + + key = "normalization_data_map" + + data_fetcher = OssDataFetcher() + + normalization_data_map = ( + self.run_feature_identification(self.input_table_spec) + if key not in self.saved_setup_data + else pickle.loads(self.saved_setup_data[key]) + ) + calc_cpe_in_training = self.should_generate_eval_dataset + sample_range_output = get_sample_range( + self.input_table_spec, calc_cpe_in_training + ) + train_dataset = self.query_data( + input_table_spec=self.input_table_spec, + sample_range=sample_range_output.train_sample_range, + reward_options=self.reward_options, + data_fetcher=data_fetcher, + ) + eval_dataset = None + if calc_cpe_in_training: + eval_dataset = self.query_data( + input_table_spec=self.input_table_spec, + sample_range=sample_range_output.eval_sample_range, + reward_options=self.reward_options, + data_fetcher=data_fetcher, + ) + + self.setup_data = self._pickle_setup_data( + normalization_data_map=normalization_data_map, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + return self.setup_data + + def _pickle_setup_data( + self, + normalization_data_map: Dict[str, NormalizationData], + train_dataset: Dataset, + eval_dataset: Optional[Dataset], + ) -> Dict[str, bytes]: + setup_data = dict( + normalization_data_map=pickle.dumps(normalization_data_map), + train_dataset=pickle.dumps(train_dataset), + eval_dataset=pickle.dumps(eval_dataset), + ) + self.setup_data = setup_data + return setup_data + + def setup(self, stage=None): + if self._setup_done: + return + + setup_data = {k: pickle.loads(v) for k, v in self.setup_data.items()} + + self._normalization_data_map = setup_data["normalization_data_map"] + self._train_dataset = setup_data["train_dataset"] + self._eval_dataset = setup_data["eval_dataset"] + + self._setup_done = True + + @property + def model_manager(self): + model_manager = self._model_manager + assert model_manager + return model_manager + + @model_manager.setter + def model_manager(self, model_manager): + assert self._model_manager is None + self._model_manager = model_manager + + def get_normalization_data_map( + self, + keys: Optional[List[str]] = None, + ) -> Dict[str, NormalizationData]: + return self._normalization_data_map + + @abc.abstractmethod + def run_feature_identification( + self, input_table_spec: TableSpec + ) -> Dict[str, NormalizationData]: + """ + Derive preprocessing parameters from data. + """ + pass + + def __getattr__(self, attr): + """Get X_normalization_data by attribute""" + normalization_data_suffix = "_normalization_data" + if attr.endswith(normalization_data_suffix): + assert self._normalization_data_map is not None, ( + f"Trying to access {attr} but normalization_data_map " + "has not been set. Did you run `setup()`" + ) + normalization_key = attr[: -len(normalization_data_suffix)] + normalization_data = self._normalization_data_map.get( + normalization_key, None + ) + if normalization_data is None: + raise AttributeError( + f"normalization key `{normalization_key}` is unavailable. " + f"Available keys are: {self._normalization_data_map.keys()}." + ) + return normalization_data + + raise AttributeError(f"attr {attr} not available {type(self)}") + + @property + @abc.abstractmethod + def should_generate_eval_dataset(self) -> bool: + pass + + @abc.abstractmethod + def query_data( + self, + input_table_spec: TableSpec, + sample_range: Optional[Tuple[float, float]], + reward_options: RewardOptions, + data_fetcher: DataFetcher, + ) -> Dataset: + """ + Massage input table into the format expected by the trainer + """ + pass + + @abc.abstractmethod + def build_batch_preprocessor(self) -> BatchPreprocessor: + pass + + def get_dataloader(self, dataset: Dataset, identity: str = "Default"): + batch_preprocessor = self.build_batch_preprocessor() + reader_options = self.reader_options + assert reader_options + data_reader = make_batch_reader( + # pyre-fixme[16]: `HiveDataSetClass` has no attribute `parquet_url`. + dataset.parquet_url, + num_epochs=1, + reader_pool_type=reader_options.petastorm_reader_pool_type, + ) + # NOTE: must be wrapped by DataLoaderWrapper to call __exit__() on end of epoch + dataloader = DataLoader( + data_reader, + batch_size=reader_options.minibatch_size, + collate_fn=collate_and_preprocess( + batch_preprocessor=batch_preprocessor, use_gpu=False + ), + ) + return _closing_iter(dataloader) + + def train_dataloader(self): + self._num_train_data_loader_calls += 1 + return self.get_dataloader( + self.train_dataset, + identity=f"train_{self._num_train_data_loader_calls}", + ) + + def test_dataloader(self): + self._num_test_data_loader_calls += 1 + # TODO: we currently use the same data for test and validation. + # We should have three different splits of the total data + return self._get_eval_dataset( + identity=f"test_{self._num_test_data_loader_calls}" + ) + + def val_dataloader(self): + self._num_val_data_loader_calls += 1 + return self._get_eval_dataset(identity=f"val_{self._num_val_data_loader_calls}") + + def _get_eval_dataset(self, identity: str): + eval_dataset = self.eval_dataset + if not eval_dataset: + return None + return self.get_dataloader(eval_dataset, identity) + + @property + def train_dataset(self): + return getattr(self, "_train_dataset", None) + + @property + def eval_dataset(self): + return getattr(self, "_eval_dataset", None) + + @property + def test_dataset(self): + # TODO: we currently use the same data for test and validation. + return self.eval_dataset + + +def _closing_iter(dataloader): + yield from dataloader + dataloader.__exit__(None, None, None) + + +def collate_and_preprocess(batch_preprocessor: BatchPreprocessor, use_gpu: bool): + """Helper for Petastorm's DataLoader to preprocess. + TODO(kaiwenw): parallelize preprocessing by using transform of Petastorm reader + Should pin memory and preprocess in reader and convert to gpu in collate_fn. + """ + + def collate_fn(batch_list: List[Dict]): + batch = decimal_friendly_collate(batch_list) + preprocessed_batch = batch_preprocessor(batch) + if use_gpu: + preprocessed_batch = preprocessed_batch.cuda() + return preprocessed_batch + + return collate_fn diff --git a/reagent/workflow/data_fetcher.py b/reagent/data/oss_data_fetcher.py similarity index 71% rename from reagent/workflow/data_fetcher.py rename to reagent/data/oss_data_fetcher.py index 1c9ac1e98..45b1c889b 100644 --- a/reagent/workflow/data_fetcher.py +++ b/reagent/data/oss_data_fetcher.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging from typing import List, Optional, Tuple @@ -14,7 +15,8 @@ StructField, StructType, ) -from reagent.workflow.spark_utils import get_spark_session, get_table_url +from reagent.data.data_fetcher import DataFetcher +from reagent.data.spark_utils import get_spark_session, get_table_url from reagent.workflow.types import Dataset, TableSpec @@ -64,7 +66,7 @@ def set_reward_col_as_reward( def hash_mdp_id_and_subsample(df, sample_range: Optional[Tuple[float, float]] = None): - """ Since mdp_id is a string but Pytorch Tensors do not store strings, + """Since mdp_id is a string but Pytorch Tensors do not store strings, we hash them with crc32, which is treated as a cryptographic hash (with range [0, MAX_UINT32-1]). We also perform an optional subsampling based on this hash value. @@ -79,17 +81,20 @@ def hash_mdp_id_and_subsample(df, sample_range: Optional[Tuple[float, float]] = and sample_range[1] <= 100.0 ), f"{sample_range} is invalid." + # pyre-fixme[16]: Module `functions` has no attribute `col`. df = df.withColumn("mdp_id", crc32(col("mdp_id"))) if sample_range: lower_bound = sample_range[0] / 100.0 * MAX_UINT32 upper_bound = sample_range[1] / 100.0 * MAX_UINT32 + # pyre-fixme[16]: Module `functions` has no attribute `col`. + # pyre-fixme[16]: Module `functions` has no attribute `col`. df = df.filter((lower_bound <= col("mdp_id")) & (col("mdp_id") <= upper_bound)) return df def make_sparse2dense(df, col_name: str, possible_keys: List): - """ Given a list of possible keys, convert sparse map to dense array. - In our example, both value_type is assumed to be a float. + """Given a list of possible keys, convert sparse map to dense array. + In our example, both value_type is assumed to be a float. """ output_type = StructType( [ @@ -116,7 +121,9 @@ def sparse2dense(map_col): sparse2dense_udf = udf(sparse2dense, output_type) df = df.withColumn(col_name, sparse2dense_udf(col_name)) + # pyre-fixme[16]: Module `functions` has no attribute `col`. df = df.withColumn(f"{col_name}_presence", col(f"{col_name}.presence")) + # pyre-fixme[16]: Module `functions` has no attribute `col`. df = df.withColumn(col_name, col(f"{col_name}.dense")) return df @@ -127,7 +134,7 @@ def sparse2dense(map_col): def make_get_step_udf(multi_steps: Optional[int]): - """ Get step count by taking length of next_states_features array. """ + """Get step count by taking length of next_states_features array.""" def get_step(col: List): return 1 if multi_steps is None else min(len(col), multi_steps) @@ -136,7 +143,7 @@ def get_step(col: List): def make_next_udf(multi_steps: Optional[int], return_type): - """ Generic udf to get next (after multi_steps) item, provided item type. """ + """Generic udf to get next (after multi_steps) item, provided item type.""" def get_next(next_col): return ( @@ -149,7 +156,7 @@ def get_next(next_col): def make_where_udf(arr: List[str]): - """ Return index of item in arr, and len(arr) if not found. """ + """Return index of item in arr, and len(arr) if not found.""" def find(item: str): for i, arr_item in enumerate(arr): @@ -161,7 +168,7 @@ def find(item: str): def make_existence_bitvector_udf(arr: List[str]): - """ one-hot encode elements of target depending on their existence in arr. """ + """one-hot encode elements of target depending on their existence in arr.""" default = [0] * len(arr) @@ -176,7 +183,7 @@ def encode(target: List[str]): def misc_column_preprocessing(df, multi_steps: Optional[int]): - """ Miscellaneous columns are step, time_diff, sequence_number, not_terminal. """ + """Miscellaneous columns are step, time_diff, sequence_number, not_terminal.""" # step refers to n in n-step RL; special case when approaching terminal df = df.withColumn("step", make_get_step_udf(multi_steps)("next_state_features")) @@ -186,6 +193,7 @@ def misc_column_preprocessing(df, multi_steps: Optional[int]): df = df.withColumn("time_diff", next_long_udf("time_diff")) # assuming use_seq_num_diff_as_time_diff = False for now + # pyre-fixme[16]: Module `functions` has no attribute `col`. df = df.withColumn("sequence_number", col("sequence_number_ordinal")) return df @@ -194,7 +202,7 @@ def misc_column_preprocessing(df, multi_steps: Optional[int]): def state_and_metrics_sparse2dense( df, states: List[int], metrics: List[str], multi_steps: Optional[int] ): - """ Sparse-to-dense preprocessing of Map columns, which are states and metrics. + """Sparse-to-dense preprocessing of Map columns, which are states and metrics. For each column of type Map, w/ name X, output two columns. Map values are assumed to be scalar. This process is called sparse-to-dense. X = {"state_features", "next_state_features", "metrics"}. @@ -234,7 +242,7 @@ def discrete_action_preprocessing( df = df.withColumn("next_action", where_udf(next_long_udf("next_action"))) def make_not_terminal_udf(actions: List[str]): - """ Return true iff next_action is terminal (i.e. idx = len(actions)). """ + """Return true iff next_action is terminal (i.e. idx = len(actions)).""" def get_not_terminal(next_action): return next_action < len(actions) @@ -271,7 +279,7 @@ def parametric_action_preprocessing( df = df.withColumn("next_action", next_map_udf("next_action")) def make_not_terminal_udf(): - """ Return true iff next_action is an empty map """ + """Return true iff next_action is an empty map""" def get_not_terminal(next_action): return len(next_action) > 0 @@ -289,42 +297,63 @@ def get_not_terminal(next_action): def select_relevant_columns( df, discrete_action: bool = True, include_possible_actions: bool = True ): - """ Select all the relevant columns and perform type conversions. """ + """Select all the relevant columns and perform type conversions.""" if not discrete_action and include_possible_actions: raise NotImplementedError("currently we don't support include_possible_actions") select_col_list = [ + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("reward").cast(FloatType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("state_features").cast(ArrayType(FloatType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("state_features_presence").cast(ArrayType(BooleanType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("next_state_features").cast(ArrayType(FloatType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("next_state_features_presence").cast(ArrayType(BooleanType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("not_terminal").cast(BooleanType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("action_probability").cast(FloatType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("mdp_id").cast(LongType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("sequence_number").cast(LongType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("step").cast(LongType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("time_diff").cast(LongType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("metrics").cast(ArrayType(FloatType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("metrics_presence").cast(ArrayType(BooleanType())), ] if discrete_action: select_col_list += [ + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("action").cast(LongType()), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("next_action").cast(LongType()), ] else: select_col_list += [ + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("action").cast(ArrayType(FloatType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("next_action").cast(ArrayType(FloatType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("action_presence").cast(ArrayType(BooleanType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("next_action_presence").cast(ArrayType(BooleanType())), ] if include_possible_actions: select_col_list += [ + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("possible_actions_mask").cast(ArrayType(LongType())), + # pyre-fixme[16]: Module `functions` has no attribute `col`. col("possible_next_actions_mask").cast(ArrayType(LongType())), ] @@ -332,9 +361,9 @@ def select_relevant_columns( def get_distinct_keys(df, col_name, is_col_arr_map=False): - """ Return list of distinct keys. - Set is_col_arr_map to be true if column is an array of Maps. - Otherwise, assume column is a Map. + """Return list of distinct keys. + Set is_col_arr_map to be true if column is an array of Maps. + Otherwise, assume column is a Map. """ if is_col_arr_map: df = df.select(explode(col_name).alias(col_name)) @@ -343,7 +372,7 @@ def get_distinct_keys(df, col_name, is_col_arr_map=False): def infer_states_names(df, multi_steps: Optional[int]): - """ Infer possible state names from states and next state features. """ + """Infer possible state names from states and next state features.""" state_keys = get_distinct_keys(df, "state_features") next_states_is_col_arr_map = not (multi_steps is None) next_state_keys = get_distinct_keys( @@ -362,7 +391,7 @@ def infer_action_names(df, multi_steps: Optional[int]): def infer_metrics_names(df, multi_steps: Optional[int]): - """ Infer possible metrics names. + """Infer possible metrics names. Assume in multi-step case, metrics is an array of maps. """ is_col_arr_map = not (multi_steps is None) @@ -370,8 +399,8 @@ def infer_metrics_names(df, multi_steps: Optional[int]): def rand_string(length): - import string import random + import string """Generate a random string of fixed length """ letters = string.ascii_lowercase @@ -379,7 +408,7 @@ def rand_string(length): def upload_as_parquet(df) -> Dataset: - """ Generate a random parquet. Fails if cannot generate a non-existent name. """ + """Generate a random parquet. Fails if cannot generate a non-existent name.""" # get a random tmp name and check if it exists sqlCtx = get_spark_session() @@ -394,57 +423,64 @@ def upload_as_parquet(df) -> Dataset: raise Exception(f"Failed to find name after {MAX_UPLOAD_PARQUET_TRIES} tries.") # perform the write + # pyre-fixme[61]: `rand_name` may not be initialized here. df.write.mode("errorifexists").format("parquet").saveAsTable(rand_name) + # pyre-fixme[61]: `rand_name` may not be initialized here. parquet_url = get_table_url(rand_name) logger.info(f"Saved parquet to {parquet_url}") return Dataset(parquet_url=parquet_url) -def query_data( - input_table_spec: TableSpec, - discrete_action: bool, - actions: Optional[List[str]] = None, - include_possible_actions=True, - custom_reward_expression: Optional[str] = None, - sample_range: Optional[Tuple[float, float]] = None, - multi_steps: Optional[int] = None, - gamma: Optional[float] = None, -) -> Dataset: - """ Perform reward calculation, hashing mdp + subsampling and - other preprocessing such as sparse2dense. - """ - sqlCtx = get_spark_session() - df = sqlCtx.sql(f"SELECT * FROM {input_table_spec.table_name}") - df = set_reward_col_as_reward( - df, - custom_reward_expression=custom_reward_expression, - multi_steps=multi_steps, - gamma=gamma, - ) - df = hash_mdp_id_and_subsample(df, sample_range=sample_range) - df = misc_column_preprocessing(df, multi_steps=multi_steps) - df = state_and_metrics_sparse2dense( - df, - states=infer_states_names(df, multi_steps), - metrics=infer_metrics_names(df, multi_steps), - multi_steps=multi_steps, - ) - if discrete_action: - assert include_possible_actions - assert actions is not None, "in discrete case, actions must be given." - df = discrete_action_preprocessing(df, actions=actions, multi_steps=multi_steps) - else: - actions = infer_action_names(df, multi_steps) - df = parametric_action_preprocessing( +class OssDataFetcher(DataFetcher): + def query_data( + self, + input_table_spec: TableSpec, + discrete_action: bool, + actions: Optional[List[str]] = None, + include_possible_actions=True, + custom_reward_expression: Optional[str] = None, + sample_range: Optional[Tuple[float, float]] = None, + multi_steps: Optional[int] = None, + gamma: Optional[float] = None, + ) -> Dataset: + """Perform reward calculation, hashing mdp + subsampling and + other preprocessing such as sparse2dense. + """ + sqlCtx = get_spark_session() + # pyre-ignore + df = sqlCtx.sql(f"SELECT * FROM {input_table_spec.table_name}") + df = set_reward_col_as_reward( + df, + custom_reward_expression=custom_reward_expression, + multi_steps=multi_steps, + gamma=gamma, + ) + df = hash_mdp_id_and_subsample(df, sample_range=sample_range) + df = misc_column_preprocessing(df, multi_steps=multi_steps) + df = state_and_metrics_sparse2dense( df, - actions=actions, + states=infer_states_names(df, multi_steps), + metrics=infer_metrics_names(df, multi_steps), multi_steps=multi_steps, + ) + if discrete_action: + assert include_possible_actions + assert actions is not None, "in discrete case, actions must be given." + df = discrete_action_preprocessing( + df, actions=actions, multi_steps=multi_steps + ) + else: + actions = infer_action_names(df, multi_steps) + df = parametric_action_preprocessing( + df, + actions=actions, + multi_steps=multi_steps, + include_possible_actions=include_possible_actions, + ) + + df = select_relevant_columns( + df, + discrete_action=discrete_action, include_possible_actions=include_possible_actions, ) - - df = select_relevant_columns( - df, - discrete_action=discrete_action, - include_possible_actions=include_possible_actions, - ) - return upload_as_parquet(df) + return upload_as_parquet(df) diff --git a/reagent/data/reagent_data_module.py b/reagent/data/reagent_data_module.py new file mode 100644 index 000000000..0ee045242 --- /dev/null +++ b/reagent/data/reagent_data_module.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +from typing import Dict, List, Optional + +import pytorch_lightning as pl +from reagent.core.parameters import NormalizationData + + +class ReAgentDataModule(pl.LightningDataModule): + def __init__(self) -> None: + super().__init__() + + @abc.abstractmethod + def get_normalization_data_map( + self, + keys: Optional[List[str]] = None, + ) -> Dict[str, NormalizationData]: + pass + + @abc.abstractproperty + def train_dataset(self): + pass + + @abc.abstractproperty + def eval_dataset(self): + pass + + @abc.abstractproperty + def test_dataset(self): + pass diff --git a/reagent/workflow/spark_utils.py b/reagent/data/spark_utils.py similarity index 87% rename from reagent/workflow/spark_utils.py rename to reagent/data/spark_utils.py index b23f75a4a..3a287218d 100644 --- a/reagent/workflow/spark_utils.py +++ b/reagent/data/spark_utils.py @@ -1,16 +1,16 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -import os import pprint from os.path import abspath, dirname, join from typing import Dict, Optional import reagent - -# pyre-fixme[21]: Could not find `pyspark`. -# pyre-fixme[21]: Could not find `pyspark`. from pyspark.sql import SparkSession + +# pyre-fixme[21]: Could not find module `pyspark.sql.functions`. +# pyre-fixme[21]: Could not find module `pyspark.sql.functions`. from pyspark.sql.functions import col @@ -27,8 +27,8 @@ preprocessing/... reagent/... """ -SPARK_JAR = join(dirname(reagent.__file__), os.pardir, SPARK_JAR_FROM_ROOT_DIR) +SPARK_JAR = join(dirname(dirname(reagent.__file__)), SPARK_JAR_FROM_ROOT_DIR) DEFAULT_SPARK_CONFIG = { "spark.app.name": "ReAgent", @@ -72,6 +72,7 @@ def get_table_url(table_name: str) -> str: spark = get_spark_session() url = ( spark.sql(f"DESCRIBE FORMATTED {table_name}") + # pyre-fixme[16]: Module `functions` has no attribute `col`. .filter((col("col_name") == "Location")) .select("data_type") .toPandas() diff --git a/reagent/evaluation/cb/__init__.py b/reagent/evaluation/cb/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/reagent/evaluation/cb/base_evaluator.py b/reagent/evaluation/cb/base_evaluator.py new file mode 100644 index 000000000..403c35b77 --- /dev/null +++ b/reagent/evaluation/cb/base_evaluator.py @@ -0,0 +1,142 @@ +import copy +import logging +from abc import ABC, abstractmethod +from typing import Optional + +import torch +from reagent.core.types import CBInput +from reagent.core.utils import get_rank +from reagent.evaluation.cb.utils import zero_out_skipped_obs_weights +from torch.utils.tensorboard import SummaryWriter + + +logger = logging.getLogger(__name__) + + +class BaseOfflineEval(torch.nn.Module, ABC): + """ + Base class for Contextual Bandit Offline Evaluation algorithms. All algorihtms support evaluation of non-stationary + policies, as required for exploration-exploitation. + """ + + sum_weight: torch.Tensor + all_data_sum_weight: torch.Tensor + sum_weight_local: torch.Tensor + all_data_sum_weight_local: torch.Tensor + sum_weight_since_update_local: torch.Tensor + num_eval_model_updates: torch.Tensor + + def __init__( + self, + eval_model: torch.nn.Module, + summary_writer: Optional[SummaryWriter] = None, + ): + """ + Initialize the evaluator. The evaluated model is passed in as an input and copied to freeze its state. + The state of the model remains frozen until method update_eval_model() is called. + """ + super().__init__() + self.eval_model = copy.deepcopy(eval_model) + self.summary_writer = summary_writer + self.register_buffer("sum_weight", torch.zeros(1, dtype=torch.float)) + self.register_buffer("all_data_sum_weight", torch.zeros(1, dtype=torch.float)) + self.register_buffer("sum_weight_local", torch.zeros(1, dtype=torch.float)) + self.register_buffer( + "all_data_sum_weight_local", torch.zeros(1, dtype=torch.float) + ) + self.register_buffer( + "sum_weight_since_update_local", torch.zeros(1, dtype=torch.float) + ) + self.register_buffer("num_eval_model_updates", torch.zeros(1, dtype=torch.int)) + + def ingest_batch( + self, + batch: CBInput, + model_actions: torch.Tensor, + ) -> CBInput: + """ + Ingest the batch of data and: + 1. Call self._process_all_data() and self._process_used_data() methods + 2. Modify the batch, zeroing out the weights for observations in which the logged and model actions don't match. + + TODO: support more general logic for zero-ing out the weights (e.g. as required by Doubly Robust - Non-Stationary) + TODO: remove rows instead of zero-ing out weights (to speed up processing) + + Inputs: + batch: A batch of training data + model_actions: A tensor of actions chosen by the evaluated model + """ + self._process_all_data(batch) + new_batch = zero_out_skipped_obs_weights(batch, model_actions) + self._process_used_data(new_batch) + return new_batch + + @abstractmethod + def _process_all_data( + self, + batch: CBInput, + ) -> None: + """ + Process all observations, including the ones where logged action doesn't match the model action. For some algorihtms + this will be a no-op. + """ + pass + + @abstractmethod + def _process_used_data( + self, + batch: CBInput, + ) -> None: + """ + Process the observations for which the logged action matches the model action. All other observations + were previously removed (weights wero zero-ed out) by zero_out_skipped_obs_weights() + """ + pass + + @abstractmethod + def _aggregate_across_instances(self) -> None: + """ + Aggregate local data across all instances of the evaluator. + Used for distributed training. + """ + pass + + @abstractmethod + def get_avg_reward(self) -> float: + """ + Get the current estimate of average reward + """ + pass + + def update_eval_model(self, eval_model: torch.nn.Module) -> None: + """ + Update the evaluated model. When exactly to call this is decided by the user and should mimic when + the model would get updated in a real deployment. + """ + self.eval_model = copy.deepcopy(eval_model) + + def attach_summary_writer(self, summary_writer: SummaryWriter) -> None: + """ + Attach a SummaryWriter to the evaluator. This method is useful in cases where SummaryWriter is + not yet available at initialization. + """ + self.summary_writer = summary_writer + + def log_metrics(self, global_step: Optional[int] = None) -> None: + if get_rank() == 0: + # only log from the main process + logger.info(self.get_formatted_result_string()) + summary_writer = self.summary_writer + if summary_writer is not None: + metric_dict = { + "avg_reward": self.get_avg_reward(), + "sum_weight": self.sum_weight.item(), + "all_data_sum_weight": self.all_data_sum_weight.item(), + "num_eval_model_updates": self.num_eval_model_updates.item(), + } + summary_writer.add_scalars( + "Offline_Eval", metric_dict, global_step=global_step + ) + + def get_formatted_result_string(self) -> str: + return f"Avg reward {self.get_avg_reward():0.3f} based on {int(self.sum_weight.item())} processed observations (out of {int(self.all_data_sum_weight.item())} observations). The eval model has been updated {self.num_eval_model_updates.item()} times" diff --git a/reagent/evaluation/cb/policy_evaluator.py b/reagent/evaluation/cb/policy_evaluator.py new file mode 100644 index 000000000..ddaee634d --- /dev/null +++ b/reagent/evaluation/cb/policy_evaluator.py @@ -0,0 +1,72 @@ +import logging + +import torch +from pytorch_lightning.utilities.distributed import ReduceOp, sync_ddp_if_available +from reagent.core.types import CBInput +from reagent.evaluation.cb.base_evaluator import BaseOfflineEval + + +logger = logging.getLogger(__name__) + + +EPSILON = 1e-9 + + +class PolicyEvaluator(BaseOfflineEval): + """ + An offline evaluator for Contextual Bandits, based on the paper https://arxiv.org/pdf/1003.0146.pdf (Algorithm 3) + """ + + sum_reward_weighted: torch.Tensor + sum_reward_weighted_local: torch.Tensor + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.register_buffer("sum_reward_weighted", torch.zeros(1, dtype=torch.float)) + self.register_buffer( + "sum_reward_weighted_local", torch.zeros(1, dtype=torch.float) + ) + + @torch.no_grad() + def _process_all_data(self, batch: CBInput) -> None: + if batch.weight is not None: + self.all_data_sum_weight_local += batch.weight.sum() + else: + self.all_data_sum_weight_local += len(batch) + + @torch.no_grad() + def _process_used_data(self, batch: CBInput) -> None: + """ + Process the observations for which the logged action matches the model action: + - Update the average reward + - Update the total weight counter + """ + assert batch.reward is not None + assert batch.weight is not None + assert batch.weight.shape == batch.reward.shape + self.sum_reward_weighted_local += (batch.weight * batch.reward).sum() + self.sum_weight_local += batch.weight.sum() + + def _aggregate_across_instances(self) -> None: + # sum local values across all trainers, add to the global value + # clone the tensors to avoid modifying them inplace + self.sum_reward_weighted += sync_ddp_if_available( + self.sum_reward_weighted_local.clone(), reduce_op=ReduceOp.SUM + ) + self.sum_weight += sync_ddp_if_available( + self.sum_weight_local.clone(), reduce_op=ReduceOp.SUM + ) + self.all_data_sum_weight += sync_ddp_if_available( + self.all_data_sum_weight_local.clone(), reduce_op=ReduceOp.SUM + ) + # reset local values to zero + self.sum_reward_weighted_local.zero_() + self.sum_weight_local.zero_() + self.all_data_sum_weight_local.zero_() + + def get_avg_reward(self) -> float: + assert ( + self.sum_weight_local.item() == 0.0 + ), f"Non-zero local weight {self.sum_weight_local.item()} in the evaluator. _aggregate_across_instances() Should have beed called to aggregate across all instances and zero-out the local values." + # return the average reward + return (self.sum_reward_weighted / (self.sum_weight + EPSILON)).item() diff --git a/reagent/evaluation/cb/utils.py b/reagent/evaluation/cb/utils.py new file mode 100644 index 000000000..a1167fb1b --- /dev/null +++ b/reagent/evaluation/cb/utils.py @@ -0,0 +1,30 @@ +from dataclasses import replace + +import torch +from reagent.core.types import CBInput + + +def zero_out_skipped_obs_weights( + batch: CBInput, model_actions: torch.Tensor +) -> CBInput: + """ + Return a copy of the input batch, but with weights zero-ed out where the logged action and the model action + don't match. + """ + current_weight = batch.weight + if current_weight is None: + current_weight = torch.ones(len(batch), 1, device=batch.device) + logged_actions = batch.action + assert logged_actions is not None + assert current_weight.shape == logged_actions.shape, ( + current_weight.shape, + logged_actions.shape, + ) + assert logged_actions.shape == model_actions.shape, ( + logged_actions.shape, + model_actions.shape, + ) + new_batch = replace( + batch, weight=current_weight * (logged_actions == model_actions) + ) + return new_batch diff --git a/reagent/evaluation/cpe.py b/reagent/evaluation/cpe.py index ec357f270..248809a27 100644 --- a/reagent/evaluation/cpe.py +++ b/reagent/evaluation/cpe.py @@ -7,11 +7,10 @@ import numpy as np import torch -from reagent.tensorboardX import SummaryWriterContext +from reagent.core.tensorboardX import SummaryWriterContext logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) class CpeEstimate(NamedTuple): @@ -30,6 +29,9 @@ class CpeEstimateSet(NamedTuple): weighted_doubly_robust: Optional[CpeEstimate] = None magic: Optional[CpeEstimate] = None + switch: Optional[CpeEstimate] = None + switch_dr: Optional[CpeEstimate] = None + def check_estimates_exist(self): assert self.direct_method is not None assert self.inverse_propensity is not None @@ -125,7 +127,6 @@ def none_to_zero(x: Optional[float]) -> float: ), ("CPE/{}/MAGIC".format(metric_name), self.magic.normalized), ]: - # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. SummaryWriterContext.add_scalar(name, none_to_zero(value)) def fill_empty_with_zero(self): @@ -149,7 +150,6 @@ class CpeDetails: def __init__(self): self.reward_estimates: CpeEstimateSet = CpeEstimateSet() self.metric_estimates: Dict[str, CpeEstimateSet] = {} - self.mc_loss: float = None self.q_value_means: Optional[Dict[str, float]] = None self.q_value_stds: Optional[Dict[str, float]] = None self.action_distribution: Optional[Dict[str, float]] = None diff --git a/reagent/evaluation/doubly_robust_estimator.py b/reagent/evaluation/doubly_robust_estimator.py index eadac2d08..488725716 100644 --- a/reagent/evaluation/doubly_robust_estimator.py +++ b/reagent/evaluation/doubly_robust_estimator.py @@ -1,21 +1,18 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import itertools import logging from dataclasses import dataclass from typing import Dict, NamedTuple, Optional, Tuple, Union import numpy as np import torch -import xgboost as xgb -from reagent.evaluation.cpe import CpeEstimate, bootstrapped_std_error_of_mean +from reagent.evaluation.cpe import bootstrapped_std_error_of_mean, CpeEstimate from reagent.evaluation.evaluation_data_page import EvaluationDataPage from torch import Tensor logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) DEFAULT_FRAC_TRAIN = 0.4 @@ -133,6 +130,7 @@ def _split_data( raise ValueError("contexts not provided in input") contexts_dict = { "train": edp.contexts[idx_train], + # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. "valid": edp.contexts[idx_valid], "eval": edp.contexts[idx_eval], } @@ -229,6 +227,7 @@ def _get_importance_sampling_inputs( importance_weights = ( target_propensity_for_action / ed.logged_propensities_eval ).float() + logger.info(f"Mean IPS weight on the eval dataset: {importance_weights.mean()}") return ImportanceSamplingData( importance_weight=importance_weights, logged_rewards=ed.logged_rewards_eval, @@ -267,6 +266,22 @@ def _get_importance_sampling_estimates( ) direct_method_score = float(torch.mean(direct_method_values)) + logger.info( + f"Normalized Direct method score = {direct_method_score * normalizer}" + ) + avg_model_reward_for_logged_actions = float( + torch.mean(isd.model_rewards_for_logged_action) + ) + # The reward model could have learned to assign larger rewards than the logged rewards + # to all observed actions, in which case direct_method_score would be inflated. + # Hence, it is probably more useful to look at the adjusted score, which is obtained by + # dividing the normalized score by (avg_model_reward_for_logged_actions)/(average logged reward). + # Since the "normalizer" variable gives the average logged reward, this adjustment is equivalent + # to dividing direct_method_score by avg_model_reward_for_logged_actions, because the + # normalizer variable cancels out. + logger.info( + f"Adjusted Direct method score = {direct_method_score / avg_model_reward_for_logged_actions}" + ) direct_method_std_error = bootstrapped_std_error_of_mean( direct_method_values.squeeze(), sample_percent=hp.bootstrap_sample_percent, @@ -289,6 +304,8 @@ def _get_importance_sampling_estimates( # policy ips_score = float(torch.mean(ips)) + logger.info(f"Normalized IPS score = {ips_score * normalizer}") + ips_score_std_error = bootstrapped_std_error_of_mean( ips.squeeze(), sample_percent=hp.bootstrap_sample_percent, @@ -327,433 +344,3 @@ def estimate( ed = self._prepare_data(edp) isd = self._get_importance_sampling_inputs(ed) return self._get_importance_sampling_estimates(isd, hp=hp) - - -class DoublyRobustEstimatorBOPE(DoublyRobustEstimator): - """ - This class implements a doubly-robust Balanced Off-Policy Evaluation (BOP-E) - method. - For details about BOP-E see https://arxiv.org/abs/1906.03694 - For analysis of BOP-E performance see https://fburl.com/bope_eval_nb - - Note that when using BOP-E the data gets split into training, validation - and evaluation parts and only the evaluation part is used directly for policy - evaluation, while training and validation datasets are used for model training. - - supported modes (all doubly robust): - 1. bope_weights. Use BOP-E (ignoring logged propensities) to estimate the - importance weights. Propensities of the target policy are used as - observation weights when training BOP-E classifier. - 2. bope_weighted_targets. Use BOP-E (ignoring logged propensities) to - estimate the importance weights. Propensities of the target policy - are used as soft targets to train BOP-E regressor. With this method - BOP-E trains a regressor instead of a classifier. - 3. bope_sampling. Use BOP-E (ignoring logged propensities) - to estimate the importance weights. Propensities of the target policy - are used to sample the actions for the classifier training data. - """ - - def _prepare_data(self, edp: EvaluationDataPage) -> EstimationData: - """ - Prepare the datasets for BOP-E classifier estimation - """ - assert ( - edp.contexts is not None - ), "edp.contexts have to be specified when using the estimation-based methods" - num_actions = edp.model_propensities.shape[1] - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `frac_train`. - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `frac_valid`. - tved = self._split_data(edp, self.frac_train, self.frac_valid) - - actions_target_dict = {} - contexts_actions_target_dict = {} - weights_target_dict = {} - policy_indicators_target_dict = {} - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `mode`. - if self.mode == "bope_sampling": - for d in ["train", "valid"]: - # model_propensities is N*N_actions tensor of propensity scores - # for each possible action by the target algorithm at each context - actions_target_dict[d] = ( - torch.multinomial( - tved.model_propensities_dict[d], - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no - # attribute `num_samples`. - self.num_samples, - replacement=True, - ) - .float() - .transpose(0, 1) - .contiguous() - .view(-1, 1) - ) - # transpose and reshape so that the contexts (rows) are arranged - # like [C1,...,CN,C1,...,CN,.....,C1,...,CN] - - # TODO: add context-action interaction here - contexts_actions_target_dict[d] = torch.cat( - [ - torch.cat([tved.contexts_dict[d]] * self.num_samples, dim=0), - actions_target_dict[d], - ], - dim=1, - ) - weights_target_dict[d] = torch.ones( - tved.num_examples_dict[d] * self.num_samples, 1 - ) - policy_indicators_target_dict[d] = torch.ones( - tved.num_examples_dict[d] * self.num_samples, 1, dtype=torch.float32 - ) - elif self.mode == "bope_weights": - # rows are outer products of actions and contexts, ordered first by - # context and then by action - # [[C0,A0], [C0,A1], [C0,A2], [C1,A0], [C1,A1], [C1,A2],...] - for d in ["train", "valid"]: - actions_target_dict[d] = torch.tensor( - list( - itertools.chain.from_iterable( - [ - [x] * tved.num_examples_dict[d] - for x in range(num_actions) - ] - ) - ), - dtype=torch.float32, - ).view(-1, 1) - weights_target_dict[d] = ( - tved.model_propensities_dict[d] - .transpose(0, 1) - .contiguous() - .view(-1, 1) - ) - policy_indicators_target_dict[d] = torch.ones( - tved.num_examples_dict[d] * num_actions, 1, dtype=torch.float32 - ) - # TODO: add context-action interaction here - contexts_actions_target_dict[d] = torch.cat( - [ - torch.cat([tved.contexts_dict[d]] * num_actions, dim=0), # 1498 - actions_target_dict[d], # 1496 - ], - dim=1, - ) - elif self.mode == "bope_weighted_targets": - # rows are outer products of actions and contexts, ordered first by - # context and then by action - # [[C0,A0], [C0,A1], [C0,A2], [C1,A0], [C1,A1], [C1,A2],...] - for d in ["train", "valid"]: - actions_target_dict[d] = torch.tensor( - list( - itertools.chain.from_iterable( - [ - [x] * tved.num_examples_dict[d] - for x in range(num_actions) - ] - ) - ), - dtype=torch.float32, - ).view(-1, 1) - weights_target_dict[d] = torch.ones( - tved.num_examples_dict[d] * num_actions, 1 - ) - policy_indicators_target_dict[d] = ( - tved.model_propensities_dict[d] - .transpose(0, 1) - .contiguous() - .view(-1, 1) - ) - # TODO: add context-action interaction here - contexts_actions_target_dict[d] = torch.cat( - [ - torch.cat([tved.contexts_dict[d]] * num_actions, dim=0), - actions_target_dict[d], - ], - dim=1, - ) - else: - raise ValueError("BOP-E mode '{}'' not supported".format(self.mode)) - contexts_actions_logged_dict = {} - weights_logged_dict = {} - policy_indicators_logged_dict = {} - contexts_actions_all_dict = {} - policy_indicators_all_dict = {} - weights_all_dict = {} - for d in ["train", "valid"]: - contexts_actions_logged_dict[d] = torch.cat( - (tved.contexts_dict[d], tved.actions_logged_dict[d]), dim=1 - ) # N*(d+1) - weights_logged_dict[d] = torch.ones( - tved.num_examples_dict[d], 1, dtype=torch.float32 - ) - policy_indicators_logged_dict[d] = torch.zeros( - tved.num_examples_dict[d], 1, dtype=torch.float32 - ) - contexts_actions_all_dict[d] = torch.cat( - [contexts_actions_logged_dict[d], contexts_actions_target_dict[d]], - dim=0, - ).numpy() - policy_indicators_all_dict[d] = torch.cat( - [policy_indicators_logged_dict[d], policy_indicators_target_dict[d]], - dim=0, - ).numpy() - weights_all_dict[d] = ( - torch.cat([weights_logged_dict[d], weights_target_dict[d]], dim=0) - .flatten() - .numpy() - ) - if ( - contexts_actions_all_dict[d].shape[0] - != policy_indicators_all_dict[d].shape[0] - ): - raise ValueError( - "number of rows in {} contexts_actions({}) and policy_" - "indicators({}) has to be equal".format( - d, - contexts_actions_all_dict[d].shape[0], - policy_indicators_all_dict[d].shape[0], - ) - ) - if contexts_actions_all_dict[d].shape[0] != weights_all_dict[d].shape[0]: - raise ValueError( - "number of rows in {} contexts_actions({}) and weights({})" - " has to be equal".format( - d, - contexts_actions_all_dict[d].shape[0], - weights_all_dict[d].shape[0], - ) - ) - contexts_actions_logged_dict["eval"] = torch.cat( - (tved.contexts_dict["eval"], tved.actions_logged_dict["eval"]), dim=1 - ) # N*(d+1) - - return EstimationData( - contexts_actions_train=contexts_actions_all_dict["train"], - policy_indicators_train=policy_indicators_all_dict["train"], - weights_train=weights_all_dict["train"], - contexts_actions_valid=contexts_actions_all_dict["valid"], - policy_indicators_valid=policy_indicators_all_dict["valid"], - weights_valid=weights_all_dict["valid"], - contexts_actions_eval=contexts_actions_logged_dict["eval"], - contexts_train=None, - actions_logged_train=None, - contexts_valid=None, - actions_logged_valid=None, - contexts_eval=None, - actions_logged_eval=None, - model_propensities_eval=tved.model_propensities_dict["eval"], - model_rewards_eval=tved.model_rewards_dict["eval"], - action_mask_eval=tved.action_mask_dict["eval"], - logged_rewards_eval=tved.logged_rewards_dict["eval"], - model_rewards_for_logged_action_eval=tved.model_rewards_for_logged_action_dict[ - "eval" - ], - logged_propensities_eval=tved.logged_propensities_dict["eval"], - ) - - def _estimate_xgboost_model( - self, - ed: EstimationData, - # pyre-fixme[9]: xgb_params has type `Dict[str, Union[float, int, str]]`; - # used as `None`. - xgb_params: Dict[str, Union[str, float, int]] = None, - nthread: int = 8, - ) -> xgb.Booster: - if xgb_params is None: - xgb_params = {} - dmatrix_train = xgb.DMatrix( - ed.contexts_actions_train, - ed.policy_indicators_train, - nthread=nthread, - weight=ed.weights_train, - ) - dmatrix_valid = xgb.DMatrix( - ed.contexts_actions_valid, - ed.policy_indicators_valid, - nthread=nthread, - weight=ed.weights_valid, - ) - if xgb_params is not None: # check for None to satisfy a test - xgb_params.update({"objective": "binary:logistic"}) - classifier: xgb.Booster = xgb.train( - xgb_params, - dmatrix_train, - evals=[(dmatrix_valid, "validation_set")], - verbose_eval=False, - ) - return classifier - - def _get_importance_sampling_inputs( - self, - ed: EstimationData, - # pyre-fixme[9]: xgb_params has type `Dict[str, Union[float, int, str]]`; - # used as `None`. - xgb_params: Dict[str, Union[str, float, int]] = None, - ) -> ImportanceSamplingData: - classifier = self._estimate_xgboost_model(ed, xgb_params) - - # predictions are made only for the eval set to prevent classifier - # overfitting - predictions = classifier.predict(xgb.DMatrix(ed.contexts_actions_eval)) - - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `mode`. - if self.mode == "bope_sampling": - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute - # `num_samples`. - bope_weight_normalization_factor = 1.0 / self.num_samples - else: - bope_weight_normalization_factor = 1.0 - - importance_weights = ( - torch.tensor(predictions / (1.0 - predictions), dtype=torch.float32).view( - -1, 1 - ) - * bope_weight_normalization_factor - ) - return ImportanceSamplingData( - importance_weight=importance_weights, - logged_rewards=ed.logged_rewards_eval, - model_rewards=ed.model_rewards_eval, - model_rewards_for_logged_action=ed.model_rewards_for_logged_action_eval, - model_propensities=ed.model_propensities_eval, - ) - - def estimate( - self, edp: EvaluationDataPage, hp: Optional[DoublyRobustHP] = None - ) -> Tuple[CpeEstimate, CpeEstimate, CpeEstimate]: - if hp is None: - raise ValueError("Hyperparameters have to be provided for BOP-E") - if hp.bope_mode is None: - raise ValueError("bope_mode has to be specified in hyperparameters") - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `mode`. - self.mode = hp.bope_mode - if (self.mode == "bope_sampling") and (hp.bope_num_samples is None): - raise ValueError( - "Number of samples has to be specified for mode 'bope_sampling'" - ) - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `num_samples`. - self.num_samples = 0 if hp.bope_num_samples is None else hp.bope_num_samples - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `frac_train`. - self.frac_train = hp.frac_train - # pyre-fixme[16]: `DoublyRobustEstimatorBOPE` has no attribute `frac_valid`. - self.frac_valid = hp.frac_train - xgb_params: Dict[str, Union[str, float, int]] = hp.xgb_params or {} - ed = self._prepare_data(edp) - isd = self._get_importance_sampling_inputs(ed, xgb_params) - return self._get_importance_sampling_estimates(isd, hp=hp) - - -class DoublyRobustEstimatorEstProp(DoublyRobustEstimator): - def _prepare_data(self, edp: EvaluationDataPage) -> EstimationData: - assert ( - edp.contexts is not None - ), "edp.contexts have to be specified when using the estimation-based methods" - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `num_actions`. - self.num_actions = edp.model_propensities.shape[1] - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `frac_train`. - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `frac_valid`. - tved = self._split_data(edp, self.frac_train, self.frac_valid) - - return EstimationData( - contexts_actions_train=None, - policy_indicators_train=None, - weights_train=None, - contexts_actions_valid=None, - policy_indicators_valid=None, - weights_valid=None, - contexts_actions_eval=None, - contexts_train=tved.contexts_dict["train"], - actions_logged_train=tved.actions_logged_dict["train"], - contexts_valid=tved.contexts_dict["valid"], - actions_logged_valid=tved.actions_logged_dict["valid"], - contexts_eval=tved.contexts_dict["eval"], - actions_logged_eval=tved.actions_logged_dict["eval"], - model_propensities_eval=tved.model_propensities_dict["eval"], - model_rewards_eval=tved.model_rewards_dict["eval"], - action_mask_eval=tved.action_mask_dict["eval"], - logged_rewards_eval=tved.logged_rewards_dict["eval"], - model_rewards_for_logged_action_eval=tved.model_rewards_for_logged_action_dict[ - "eval" - ], - logged_propensities_eval=tved.logged_propensities_dict["eval"], - ) - - def _estimate_xgboost_model( - self, - ed: EstimationData, - num_classes: int, - # pyre-fixme[9]: xgb_params has type `Dict[str, Union[float, int, str]]`; - # used as `None`. - xgb_params: Dict[str, Union[str, float, int]] = None, - nthread: int = 8, - ) -> xgb.Booster: - if xgb_params is None: - xgb_params = {} - dmatrix_train = xgb.DMatrix( - ed.contexts_train, ed.actions_logged_train, nthread=nthread - ) - dmatrix_valid = xgb.DMatrix( - ed.contexts_valid, ed.actions_logged_valid, nthread=nthread - ) - xgb_params = xgb_params.copy() - xgb_params.update( - {"objective": "multi:softprob", "num_class": num_classes, "n_gpus": 0} - ) - classifier: xgb.Booster = xgb.train( - xgb_params, - dmatrix_train, - evals=[(dmatrix_valid, "validation_set")], - verbose_eval=False, - ) - return classifier - - def _get_importance_sampling_inputs( - self, - ed: EstimationData, - # pyre-fixme[9]: xgb_params has type `Dict[str, Union[float, int, str]]`; - # used as `None`. - xgb_params: Dict[str, Union[str, float, int]] = None, - ): - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `num_actions`. - classifier = self._estimate_xgboost_model(ed, self.num_actions, xgb_params) - # predictions are made only for the eval set to prevent classifier - # overfitting - predicted_logged_propensities_all_actions = torch.tensor( - classifier.predict(xgb.DMatrix(ed.contexts_eval)), dtype=torch.float32 - ) - if ed.actions_logged_eval is None: - raise ValueError("ed.actions_logged_eval has to be non-None") - ret = predicted_logged_propensities_all_actions.gather( - 1, - # pyre-fixme[16]: `Optional` has no attribute `long`. - ed.actions_logged_eval.long(), - ) - predicted_logged_policy_propensities_logged_actions = ret - - target_propensity_for_action = torch.sum( - ed.model_propensities_eval * ed.action_mask_eval, dim=1, keepdim=True - ) - - importance_weights = ( - target_propensity_for_action - / predicted_logged_policy_propensities_logged_actions - ).float() - return ImportanceSamplingData( - importance_weight=importance_weights, - logged_rewards=ed.logged_rewards_eval, - model_rewards=ed.model_rewards_eval, - model_rewards_for_logged_action=ed.model_rewards_for_logged_action_eval, - model_propensities=ed.model_propensities_eval, - ) - - def estimate( - self, edp: EvaluationDataPage, hp: Optional[DoublyRobustHP] = None - ) -> Tuple[CpeEstimate, CpeEstimate, CpeEstimate]: - hp = hp or DoublyRobustHP() - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `frac_train`. - self.frac_train = hp.frac_train - # pyre-fixme[16]: `DoublyRobustEstimatorEstProp` has no attribute `frac_valid`. - self.frac_valid = hp.frac_valid - xgb_params = hp.xgb_params or {} - ed = self._prepare_data(edp) - isd = self._get_importance_sampling_inputs(ed, xgb_params) - return self._get_importance_sampling_estimates(isd, hp=hp) diff --git a/reagent/evaluation/evaluation_data_page.py b/reagent/evaluation/evaluation_data_page.py index 59a95a6a5..7fd63e00a 100644 --- a/reagent/evaluation/evaluation_data_page.py +++ b/reagent/evaluation/evaluation_data_page.py @@ -1,25 +1,31 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from __future__ import annotations + import logging import math -from typing import NamedTuple, Optional, cast +from dataclasses import dataclass, fields +from typing import cast, Optional, TYPE_CHECKING import numpy as np import torch import torch.nn as nn -from reagent import types as rlt -from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet -from reagent.torch_utils import masked_softmax -from reagent.training import ParametricDQNTrainer -from reagent.training.dqn_trainer import DQNTrainer -from reagent.training.trainer import Trainer +from reagent.core import types as rlt +from reagent.core.torch_utils import masked_softmax +from reagent.model_utils.seq2slate_utils import Seq2SlateMode +from reagent.models.seq2slate import Seq2SlateTransformerNet + +if TYPE_CHECKING: + from reagent.training import ParametricDQNTrainer, ReAgentLightningModule + from reagent.training.dqn_trainer import DQNTrainer logger = logging.getLogger(__name__) -class EvaluationDataPage(NamedTuple): +@dataclass +class EvaluationDataPage(rlt.TensorDataClass): mdp_id: Optional[torch.Tensor] sequence_number: Optional[torch.Tensor] logged_propensities: torch.Tensor @@ -29,7 +35,6 @@ class EvaluationDataPage(NamedTuple): model_rewards: torch.Tensor model_rewards_for_logged_action: torch.Tensor model_values: Optional[torch.Tensor] = None - model_values_for_logged_action: Optional[torch.Tensor] = None possible_actions_mask: Optional[torch.Tensor] = None optimal_q_values: Optional[torch.Tensor] = None eval_action_idxs: Optional[torch.Tensor] = None @@ -46,16 +51,13 @@ class EvaluationDataPage(NamedTuple): @classmethod def create_from_training_batch( cls, - tdb: rlt.PreprocessedTrainingBatch, - trainer: Trainer, + tdb: rlt.PreprocessedRankingInput, + trainer: ReAgentLightningModule, reward_network: Optional[nn.Module] = None, ): if isinstance(tdb, rlt.DiscreteDqnInput): - # pyre-fixme[22]: The cast is redundant. discrete_training_input = cast(rlt.DiscreteDqnInput, tdb) - return EvaluationDataPage.create_from_tensors_dqn( - # pyre-fixme[6]: Expected `DQNTrainer` for 1st param but got `Trainer`. trainer, tdb.extras.mdp_id, tdb.extras.sequence_number, @@ -68,29 +70,20 @@ def create_from_training_batch( ) elif isinstance(tdb, rlt.ParametricDqnInput): return EvaluationDataPage.create_from_tensors_parametric_dqn( - # pyre-fixme[6]: Expected `ParametricDQNTrainer` for 1st param but - # got `Trainer`. trainer, - # pyre-fixme[16]: `Optional` has no attribute `mdp_id`. tdb.extras.mdp_id, - # pyre-fixme[16]: `Optional` has no attribute `sequence_number`. tdb.extras.sequence_number, tdb.state, tdb.action, - # pyre-fixme[16]: `Optional` has no attribute `action_probability`. tdb.extras.action_probability, tdb.reward, tdb.possible_actions_mask, tdb.possible_actions, - # pyre-fixme[16]: `Optional` has no attribute `max_num_actions`. tdb.extras.max_num_actions, - # pyre-fixme[16]: `Optional` has no attribute `metrics`. metrics=tdb.extras.metrics, ) else: - raise NotImplementedError( - f"training_input type: {type(tdb.training_input)}" - ) + raise NotImplementedError(f"training_input type: {type(tdb)}") @classmethod @torch.no_grad() @@ -118,7 +111,6 @@ def create_from_tensors_seq2slate( batch_size, tgt_seq_len, candidate_dim, - # pyre-fixme[16]: `Optional` has no attribute `float_features`. ) = training_input.tgt_out_seq.float_features.shape device = training_input.state.float_features.device @@ -129,8 +121,9 @@ def create_from_tensors_seq2slate( if eval_greedy: model_propensities = torch.ones(batch_size, 1, device=device) action_mask = torch.all( - # pyre-fixme[6]: Expected `int` for 1st param but got - # `Optional[torch.Tensor]`. + # pyre-fixme[58]: `-` is not supported for operand types + # `Optional[torch.Tensor]` and `int`. + # pyre-fixme[6]: For 1st param expected `Tensor` but got `bool`. (training_input.tgt_out_idx - 2) == (rank_output.ranked_tgt_out_idx - 2), dim=1, @@ -152,13 +145,13 @@ def create_from_tensors_seq2slate( model_rewards_for_logged_action = reward_network( training_input.state.float_features, training_input.src_seq.float_features, + # pyre-fixme[16]: `Optional` has no attribute `float_features`. training_input.tgt_out_seq.float_features, training_input.src_src_mask, training_input.tgt_out_idx, ).reshape(-1, 1) ranked_tgt_out_seq = training_input.src_seq.float_features[ - # pyre-fixme[16]: `Tensor` has no attribute `repeat_interleave`. torch.arange(batch_size, device=device).repeat_interleave(tgt_seq_len), rank_output.ranked_tgt_out_idx.flatten() - 2, ].reshape(batch_size, tgt_seq_len, candidate_dim) @@ -202,10 +195,13 @@ def create_from_tensors_parametric_dqn( max_num_actions: int, metrics: Optional[torch.Tensor] = None, ): + reward_network = trainer.reward_network + assert reward_network is not None, "CFEval requires a trained reward network" + old_q_train_state = trainer.q_network.training - old_reward_train_state = trainer.reward_network.training + old_reward_train_state = reward_network.training trainer.q_network.train(False) - trainer.reward_network.train(False) + reward_network.train(False) tiled_state = states.float_features.repeat(1, max_num_actions).reshape( -1, states.float_features.shape[1] @@ -214,11 +210,11 @@ def create_from_tensors_parametric_dqn( # Get Q-value of action taken possible_actions_state_concat = (rlt.FeatureData(tiled_state), possible_actions) - # FIXME: model_values, model_values_for_logged_action, and model_metrics_values - # should be calculated using q_network_cpe (as in discrete dqn). + # FIXME: model_values and model_metrics_values should be + # calculated using q_network_cpe (as in discrete dqn). # q_network_cpe has not been added in parametric dqn yet. model_values = trainer.q_network(*possible_actions_state_concat) - optimal_q_values, _ = trainer.get_detached_q_values( + optimal_q_values, _ = trainer.get_detached_model_outputs( *possible_actions_state_concat ) eval_action_idxs = None @@ -236,12 +232,13 @@ def create_from_tensors_parametric_dqn( model_values = model_values.reshape(possible_actions_mask.shape) optimal_q_values = optimal_q_values.reshape(possible_actions_mask.shape) model_propensities = masked_softmax( - optimal_q_values, possible_actions_mask, trainer.rl_temperature + optimal_q_values, + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. + possible_actions_mask, + trainer.rl_temperature, ) - rewards_and_metric_rewards = trainer.reward_network( - *possible_actions_state_concat - ) + rewards_and_metric_rewards = reward_network(*possible_actions_state_concat) model_rewards = rewards_and_metric_rewards[:, :1] assert ( model_rewards.shape[0] * model_rewards.shape[1] @@ -257,10 +254,7 @@ def create_from_tensors_parametric_dqn( model_metrics = rewards_and_metric_rewards[:, 1:] model_metrics = model_metrics.reshape(possible_actions_mask.shape[0], -1) - model_values_for_logged_action = trainer.q_network(states, actions) - model_rewards_and_metrics_for_logged_action = trainer.reward_network( - states, actions - ) + model_rewards_and_metrics_for_logged_action = reward_network(states, actions) model_rewards_for_logged_action = model_rewards_and_metrics_for_logged_action[ :, :1 ] @@ -283,7 +277,7 @@ def create_from_tensors_parametric_dqn( model_metrics_values = model_values.repeat(1, num_metrics) trainer.q_network.train(old_q_train_state) - trainer.reward_network.train(old_reward_train_state) + reward_network.train(old_reward_train_state) return cls( mdp_id=mdp_ids, @@ -294,7 +288,6 @@ def create_from_tensors_parametric_dqn( model_rewards=model_rewards, model_rewards_for_logged_action=model_rewards_for_logged_action, model_values=model_values, - model_values_for_logged_action=model_values_for_logged_action, model_metrics_values=model_metrics_values, model_metrics_values_for_logged_action=model_metrics_values_for_logged_action, model_propensities=model_propensities, @@ -326,24 +319,36 @@ def create_from_tensors_dqn( old_q_train_state = trainer.q_network.training # pyre-fixme[16]: `DQNTrainer` has no attribute `reward_network`. old_reward_train_state = trainer.reward_network.training - # pyre-fixme[16]: `DQNTrainer` has no attribute `q_network_cpe`. + # pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has no attribute + # `training`. old_q_cpe_train_state = trainer.q_network_cpe.training trainer.q_network.train(False) + # pyre-fixme[16]: `Tensor` has no attribute `train`. trainer.reward_network.train(False) + # pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has no attribute + # `train`. trainer.q_network_cpe.train(False) num_actions = trainer.num_actions action_mask = actions.float() - # pyre-fixme[6]: Expected `torch.Tensor` for 2nd positional only parameter + # pyre-fixme[6]: Expected `Tensor` for 2nd param but got `FeatureData`. rewards = trainer.boost_rewards(rewards, actions) + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. model_values = trainer.q_network_cpe(states)[:, 0:num_actions] - optimal_q_values, _ = trainer.get_detached_q_values(states) + # TODO: make generic get_action_idxs for each trainer class + # Note: model_outputs are obtained from the q_network for DQN algorithms + # and from the actor_network for CRR. + model_outputs, _ = trainer.get_detached_model_outputs(states) + # Note: eval_action_idxs is used in evaluate_post_training() function in evaluator.py eval_action_idxs = trainer.get_max_q_values( - optimal_q_values, possible_actions_mask + model_outputs, possible_actions_mask )[1] model_propensities = masked_softmax( - optimal_q_values, possible_actions_mask, trainer.rl_temperature + model_outputs, + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. + possible_actions_mask, + trainer.rl_temperature, ) assert model_values.shape == actions.shape, ( "Invalid shape: " + str(model_values.shape) + " != " + str(actions.shape) @@ -354,10 +359,8 @@ def create_from_tensors_dqn( + " != " + str(possible_actions_mask.shape) ) - model_values_for_logged_action = torch.sum( - model_values * action_mask, dim=1, keepdim=True - ) + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. rewards_and_metric_rewards = trainer.reward_network(states) # In case we reuse the modular for Q-network @@ -387,6 +390,7 @@ def create_from_tensors_dqn( model_metrics_for_logged_action = None model_metrics_values_for_logged_action = None else: + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. model_metrics_values = trainer.q_network_cpe(states) # Backward compatility if hasattr(model_metrics_values, "q_values"): @@ -426,8 +430,12 @@ def create_from_tensors_dqn( model_metrics_values_for_logged_action_list, dim=1 ) + # pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has no attribute + # `train`. trainer.q_network_cpe.train(old_q_cpe_train_state) trainer.q_network.train(old_q_train_state) + # pyre-fixme[16]: Item `Tensor` of `Union[Tensor, Module]` has no attribute + # `train`. trainer.reward_network.train(old_reward_train_state) return cls( @@ -439,7 +447,6 @@ def create_from_tensors_dqn( model_rewards=model_rewards, model_rewards_for_logged_action=model_rewards_for_logged_action, model_values=model_values, - model_values_for_logged_action=model_values_for_logged_action, model_metrics_values=model_metrics_values, model_metrics_values_for_logged_action=model_metrics_values_for_logged_action, model_propensities=model_propensities, @@ -450,28 +457,28 @@ def create_from_tensors_dqn( logged_values=None, logged_metrics_values=None, possible_actions_mask=possible_actions_mask, - optimal_q_values=optimal_q_values, + optimal_q_values=model_outputs, eval_action_idxs=eval_action_idxs, ) def append(self, edp): new_edp = {} - for x in EvaluationDataPage._fields: - t = getattr(self, x) - other_t = getattr(edp, x) + for x in fields(EvaluationDataPage): + t = getattr(self, x.name) + other_t = getattr(edp, x.name) assert int(t is not None) + int(other_t is not None) != 1, ( "Tried to append when a tensor existed in one training page but not the other: " - + x + + x.name ) if other_t is not None: if isinstance(t, torch.Tensor): - new_edp[x] = torch.cat((t, other_t), dim=0) + new_edp[x.name] = torch.cat((t, other_t), dim=0) elif isinstance(t, np.ndarray): - new_edp[x] = np.concatenate((t, other_t), axis=0) + new_edp[x.name] = np.concatenate((t, other_t), axis=0) else: raise Exception("Invalid type in training data page") else: - new_edp[x] = None + new_edp[x.name] = None return EvaluationDataPage(**new_edp) def sort(self): @@ -480,22 +487,32 @@ def sort(self): idxs.append((mdp_id, int(seq_num), i)) sorted_idxs = [i for _mdp_id, _seq_num, i in sorted(idxs)] new_edp = {} - for x in EvaluationDataPage._fields: - t = getattr(self, x) - new_edp[x] = t[sorted_idxs] if t is not None else None + for x in fields(EvaluationDataPage): + t = getattr(self, x.name) + new_edp[x.name] = t[sorted_idxs] if t is not None else None return EvaluationDataPage(**new_edp) def compute_values(self, gamma: float): assert self.mdp_id is not None and self.sequence_number is not None logged_values = EvaluationDataPage.compute_values_for_mdps( - self.logged_rewards, self.mdp_id, self.sequence_number, gamma + self.logged_rewards, + self.mdp_id, + self.sequence_number, + gamma, ) if self.logged_metrics is not None: logged_metrics_values: Optional[ torch.Tensor ] = EvaluationDataPage.compute_values_for_mdps( - self.logged_metrics, self.mdp_id, self.sequence_number, gamma + self.logged_metrics, + # pyre-fixme[6]: Expected `Tensor` for 2nd param but got + # `Optional[torch.Tensor]`. + self.mdp_id, + # pyre-fixme[6]: Expected `Tensor` for 3rd param but got + # `Optional[torch.Tensor]`. + self.sequence_number, + gamma, ) else: logged_metrics_values = None @@ -561,7 +578,7 @@ def validate(self): assert self.model_metrics_values.shape[1] == num_metrics * num_actions minibatch_size = self.logged_propensities.shape[0] - logger.info("EvaluationDataPage minibatch size: {}".format(minibatch_size)) + logger.info("EvaluationDataPage data size: {}".format(minibatch_size)) assert minibatch_size == self.logged_rewards.shape[0] assert minibatch_size == self.logged_values.shape[0] assert minibatch_size == self.model_propensities.shape[0] @@ -573,6 +590,20 @@ def validate(self): assert minibatch_size == self.model_metrics.shape[0] assert minibatch_size == self.model_metrics_values.shape[0] + logger.info("Average logged reward = %s", self.logged_rewards.mean()) + logger.info( + "Average model propensity for action 0 = %s", + self.model_propensities[:, 0].mean(), + ) + logger.info( + "Average model propensity for action 1 = %s", + self.model_propensities[:, 1].mean(), + ) + logger.info( + "Average logged propensity = %s", + self.logged_propensities.mean(), + ) + flatten_mdp_id = self.mdp_id.reshape(-1) unique_mdp_ids = set(flatten_mdp_id.tolist()) prev_mdp_id, prev_seq_num = None, None @@ -602,6 +633,7 @@ def set_metric_as_reward(self, i: int, num_actions: int): return self._replace( logged_rewards=self.logged_metrics[:, i : i + 1], + # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. logged_values=self.logged_metrics_values[:, i : i + 1], model_rewards=self.model_metrics[ :, i * num_actions : (i + 1) * num_actions diff --git a/reagent/evaluation/evaluator.py b/reagent/evaluation/evaluator.py index 7df5e08e7..8ae500861 100644 --- a/reagent/evaluation/evaluator.py +++ b/reagent/evaluation/evaluator.py @@ -20,7 +20,6 @@ logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) def get_tensor(x, dtype=None): @@ -66,8 +65,8 @@ def __init__(self, action_names, gamma, model, metrics_to_score=None) -> None: self.doubly_robust_estimator = DoublyRobustEstimator() self.sequential_doubly_robust_estimator = SequentialDoublyRobustEstimator(gamma) - self.weighted_sequential_doubly_robust_estimator = WeightedSequentialDoublyRobustEstimator( - gamma + self.weighted_sequential_doubly_robust_estimator = ( + WeightedSequentialDoublyRobustEstimator(gamma) ) def evaluate_post_training(self, edp: EvaluationDataPage) -> CpeDetails: @@ -98,6 +97,7 @@ def evaluate_post_training(self, edp: EvaluationDataPage) -> CpeDetails: action: float(value_means[i]) for i, action in enumerate(self.action_names) } + # pyre-ignore [16]: `Optional` has no attribute `std` value_stds = edp.optimal_q_values.std(dim=0) cpe_details.q_value_stds = { action: float(value_stds[i]) @@ -105,17 +105,12 @@ def evaluate_post_training(self, edp: EvaluationDataPage) -> CpeDetails: } if edp.eval_action_idxs is not None: cpe_details.action_distribution = { - # pyre-fixme[6]: Expected `Union[_SupportsIndex, bytearray, - # bytes, str, typing.SupportsFloat]` for 1st param but got - # `ByteTensor`. + # pyre-ignore [16]: `bool` has no attribute `sum` action: float((edp.eval_action_idxs == i).sum()) + # pyre-ignore [16]: `Optional` has no attribute `shape` / edp.eval_action_idxs.shape[0] for i, action in enumerate(self.action_names) } - # Compute MC Loss on Aggregate Reward - cpe_details.mc_loss = float( - F.mse_loss(edp.logged_values, edp.model_values_for_logged_action) - ) # pyre-fixme[16]: `Evaluator` has no attribute `notify_observers`. self.notify_observers(cpe_details=cpe_details) return cpe_details @@ -127,8 +122,10 @@ def score_cpe(self, metric_name, edp: EvaluationDataPage): doubly_robust, ) = self.doubly_robust_estimator.estimate(edp) sequential_doubly_robust = self.sequential_doubly_robust_estimator.estimate(edp) - weighted_doubly_robust = self.weighted_sequential_doubly_robust_estimator.estimate( - edp, num_j_steps=1, whether_self_normalize_importance_weights=True + weighted_doubly_robust = ( + self.weighted_sequential_doubly_robust_estimator.estimate( + edp, num_j_steps=1, whether_self_normalize_importance_weights=True + ) ) magic = self.weighted_sequential_doubly_robust_estimator.estimate( edp, diff --git a/reagent/test/gym/__init__.py b/reagent/evaluation/feature_importance/__init__.py similarity index 100% rename from reagent/test/gym/__init__.py rename to reagent/evaluation/feature_importance/__init__.py diff --git a/reagent/evaluation/feature_importance/feature_importance_base.py b/reagent/evaluation/feature_importance/feature_importance_base.py new file mode 100644 index 000000000..e7a409bf6 --- /dev/null +++ b/reagent/evaluation/feature_importance/feature_importance_base.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import List + +import pandas as pd +import torch.nn as nn +from reagent.core.dataclasses import dataclass + + +@dataclass +class FeatureImportanceBase: + model: nn.Module + sorted_feature_ids: List[int] + + def compute_feature_importance(self) -> pd.DataFrame: + raise NotImplementedError() diff --git a/reagent/evaluation/feature_importance/feature_importance_perturbation.py b/reagent/evaluation/feature_importance/feature_importance_perturbation.py new file mode 100644 index 000000000..3bd9074ab --- /dev/null +++ b/reagent/evaluation/feature_importance/feature_importance_perturbation.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import copy +import logging +from collections import defaultdict +from typing import Any, Callable, Optional + +import pandas as pd +import torch +import torch.nn as nn +from reagent.core.dataclasses import dataclass +from reagent.evaluation.feature_importance.feature_importance_base import ( + FeatureImportanceBase, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class FeatureImportancePerturbation(FeatureImportanceBase): + data_loader: Any + + # Consume model (first arg) and data (second arg) to make model predictions + # Expected to return a tensor of shape (batch_size, 1) + pred_fn: Callable[[nn.Module, Any], torch.Tensor] + + # Perturb data (first arg) on a specific feature id (second arg) + perturb_fn: Callable[[Any, int], Any] + + # How many rounds of perturbations for collecting feature importance for each batch + # The higher it is, the less variance the result will have + repeat: int = 1 + + def compute_feature_importance(self) -> pd.DataFrame: + feature_importance_vals = defaultdict(list) + for batch_idx, data in enumerate(self.data_loader): + for r in range(self.repeat): + pred_value = self.pred_fn(self.model, data) + for feature_idx, feature_id in enumerate(self.sorted_feature_ids): + copy_data = copy.deepcopy(data) + perturbed_data = self.perturb_fn(copy_data, feature_idx) + perturbed_pred_value = self.pred_fn(self.model, perturbed_data) + feature_importance_vals[feature_id].append( + torch.mean( + torch.abs(perturbed_pred_value - pred_value) + ).detach() + ) + logger.info(f"Processed {batch_idx} batches {r}-th time") + + feature_importance_mean = { + k: torch.mean(torch.stack(v)).item() + for k, v in feature_importance_vals.items() + } + result_df = pd.DataFrame.from_dict( + feature_importance_mean, orient="index", columns=["feature_importance"] + ).sort_values(by=["feature_importance"], ascending=False) + # Fblearner UI can't show row names (index). So manually add names as a column + result_df.insert(0, "feature_id", result_df.index) + return result_df + + +def create_default_perturb_fn(key: str): + def default_perturb_fn( + data, + feature_idx, + ): + val_data, presence_data = data[key] + batch_size = val_data.shape[0] + random_idx = torch.randperm(batch_size) + val_data[:, feature_idx] = val_data[:, feature_idx][random_idx] + presence_data[:, feature_idx] = presence_data[:, feature_idx][random_idx] + return data + + return default_perturb_fn diff --git a/reagent/evaluation/ope_adapter.py b/reagent/evaluation/ope_adapter.py new file mode 100644 index 000000000..41fc448c4 --- /dev/null +++ b/reagent/evaluation/ope_adapter.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import torch +from reagent.evaluation.cpe import ( + bootstrapped_std_error_of_mean, + CpeEstimate, + CpeEstimateSet, +) +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.evaluation.evaluator import Evaluator +from reagent.evaluation.weighted_sequential_doubly_robust_estimator import ( + WeightedSequentialDoublyRobustEstimator, +) +from reagent.ope.estimators.contextual_bandits_estimators import ( + BanditsEstimatorInput, + DMEstimator, + DoublyRobustEstimator, + IPSEstimator, + LogSample, + ModelOutputs, +) +from reagent.ope.estimators.estimator import ( + Estimator, + EstimatorResult, + EstimatorResults, +) +from reagent.ope.estimators.sequential_estimators import ( + Action, + ActionDistribution, + DoublyRobustEstimator as SeqDREstimator, + MAGICEstimator, + RLEstimator, + RLEstimatorInput, + RLPolicy, + State, + Transition, + ValueFunction, +) +from reagent.ope.estimators.types import ActionSpace + + +logger = logging.getLogger(__name__) + + +class OPEstimatorAdapter: + def __init__(self, ope_estimator: Estimator, device=None): + self._ope_estimator = ope_estimator + self._device = device + + @staticmethod + def edp_to_contextual_bandit_log( + edp: EvaluationDataPage, device=None + ) -> BanditsEstimatorInput: + log = [] + n = edp.model_rewards.shape[0] + for idx in range(n): + # Action is only 1 if tgt policy and log policy took same action? + action = torch.argmax(edp.action_mask[idx]).item() + # pyre-fixme[6]: For 1st param expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got `Union[bool, + # float, int]`. + if edp.action_mask[idx][action] == 0.0: + action = None + logged_propensities = torch.zeros( + edp.model_propensities[idx].shape, device=device + ) + if action is not None: + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `Union[bool, float, int]`. + logged_propensities[action] = edp.logged_propensities[idx] + log.append( + LogSample( + context=None if edp.contexts is None else edp.contexts[idx], + # pyre-fixme[6]: For 1st param expected `Union[Tuple[float], + # Tuple[int], float, int, ndarray, Tensor]` but got `Union[None, + # bool, float, int]`. + log_action=Action(action), + # pyre-fixme[6]: For 3rd param expected `float` but got `Tensor`. + log_reward=edp.logged_rewards[idx], + log_action_probabilities=ActionDistribution(logged_propensities), + tgt_action_probabilities=ActionDistribution( + edp.model_propensities[idx] + ), + # pyre-fixme[6]: For 1st param expected `Union[Tuple[float], + # Tuple[int], float, int, ndarray, Tensor]` but got `Union[None, + # bool, float, int]`. + tgt_action=Action(action), + model_outputs=ModelOutputs( + # pyre-fixme[6]: For 1st param expected `float` but got + # `Tensor`. + tgt_reward_from_log_action=edp.model_rewards_for_logged_action[ + idx + ], + # pyre-fixme[6]: For 2nd param expected `Sequence[float]` + # but got `Tensor`. + tgt_rewards=edp.model_rewards[idx], + ) + # item features not specified as edp came from trained reward model + ) + ) + return BanditsEstimatorInput(ActionSpace(edp.action_mask.shape[1]), log, True) + + @staticmethod + def estimator_result_to_cpe_estimate(result: EstimatorResult) -> CpeEstimate: + assert result.estimated_reward_normalized is not None + assert result.estimated_reward_normalized is not None + assert result.estimated_reward_std_error is not None + assert result.estimated_reward_normalized_std_error is not None + return CpeEstimate( + raw=result.estimated_reward, + normalized=result.estimated_reward_normalized, + raw_std_error=result.estimated_reward_std_error, + normalized_std_error=result.estimated_reward_normalized_std_error, + ) + + def estimate(self, edp: EvaluationDataPage, **kwargs) -> CpeEstimate: + result = self._ope_estimator.evaluate( + OPEstimatorAdapter.edp_to_contextual_bandit_log(edp), **kwargs + ) + assert isinstance(result, EstimatorResult) + logging.info(f"Got estimator result {result}, turning into cpe estimate") + return OPEstimatorAdapter.estimator_result_to_cpe_estimate(result) + + +class SequentialOPEstimatorAdapter: + def __init__(self, seq_ope_estimator: RLEstimator, gamma: float, device=None): + self.seq_ope_estimator = seq_ope_estimator + self.gamma = gamma + self._device = device + + class EDPSeqPolicy(RLPolicy): + def __init__( + self, num_actions: int, model_propensities: torch.Tensor, device=None + ): + super().__init__(ActionSpace(num_actions), device) + self.model_propensities = model_propensities + + def action_dist(self, state: State) -> ActionDistribution: + # "state" is (trajectory, step) + # pyre-fixme[7]: Expected `ActionDistribution` but got `Tensor`. + # pyre-fixme[6]: For 1st param expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got + # `Union[Tuple[float], Tuple[int], float, ndarray, Tensor]`. + return self.model_propensities[state.value] + + class EDPValueFunc(ValueFunction): + def __init__( + self, model_values: torch.Tensor, target_propensities: torch.Tensor + ): + self.model_values = model_values + self.target_propensities = target_propensities + + def state_action_value(self, state: State, action: Action) -> float: + # pyre-fixme[6]: For 1st param expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got + # `Union[Tuple[float], Tuple[int], float, ndarray, Tensor]`. + # pyre-fixme[6]: For 1st param expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got + # `TypeWrapper[Union[Tuple[float], Tuple[int], float, int, ndarray, + # Tensor]]`. + return self.model_values[state.value][action].item() + + def state_value(self, state: State) -> float: + return torch.dot( + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `Union[Tuple[float], Tuple[int], float, ndarray, + # Tensor]`. + self.model_values[state.value], + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `Union[Tuple[float], Tuple[int], float, ndarray, + # Tensor]`. + self.target_propensities[state.value], + ).item() + + def reset(self): + pass + + @staticmethod + def edp_to_rl_input( + edp: EvaluationDataPage, gamma, device=None + ) -> RLEstimatorInput: + assert edp.model_values is not None + eq_len = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories( + edp.mdp_id, + edp.action_mask.cpu().numpy(), + edp.logged_rewards.cpu().numpy().flatten(), + edp.logged_propensities.cpu().numpy().flatten(), + edp.model_propensities.cpu().numpy(), + # pyre-ignore [16]: Optional type has no attribute `cpu` + edp.model_values.cpu().numpy(), + ) + + ( + actions, + rewards, + logged_propensities, + target_propensities, + estimated_q_values, + ) = ( + torch.tensor(x, dtype=torch.double, device=device, requires_grad=True) + for x in eq_len + ) + + num_examples = logged_propensities.shape[0] + horizon = logged_propensities.shape[1] + + log = [] + for traj in range(num_examples): + log.append( + [ + Transition( + last_state=State((traj, i)), + # pyre-fixme[6]: For 2nd param expected + # `Optional[TypeWrapper[Union[Tuple[float], Tuple[int], float, + # int, ndarray, Tensor]]]` but got `Union[bool, float, int]`. + action=torch.argmax(actions[traj, i]).item(), + action_prob=logged_propensities[traj, i].item(), + state=State((traj, i + 1)), + reward=rewards[traj, i].item(), + ) + for i in range(horizon - 1) + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `Union[bool, float, int]`. + if actions[traj, i][torch.argmax(actions[traj, i]).item()] != 0.0 + ] + ) + + return RLEstimatorInput( + gamma=gamma, + log=log, + target_policy=SequentialOPEstimatorAdapter.EDPSeqPolicy( + actions.shape[2], target_propensities + ), + value_function=SequentialOPEstimatorAdapter.EDPValueFunc( + estimated_q_values, target_propensities + ), + ground_truth=None, + horizon=horizon, + ) + + @staticmethod + def estimator_results_to_cpe_estimate( + estimator_results: EstimatorResults, + ) -> CpeEstimate: + scores = torch.tensor( + [r.estimated_reward for r in estimator_results.results], dtype=torch.double + ) + log_scores = torch.tensor( + [r.log_reward for r in estimator_results.results], dtype=torch.double + ) + + dr_score = float(torch.mean(scores).item()) + dr_score_std_error = bootstrapped_std_error_of_mean(scores) + + log_score = float(torch.mean(log_scores).item()) + if log_score < 1e-6: + logger.warning( + "Can't normalize SDR-CPE because of small" + f" or negative logged_policy_score ({log_score})." + f"Episode values: {log_scores}." + ) + return CpeEstimate( + raw=dr_score, + normalized=0.0, + raw_std_error=dr_score_std_error, + normalized_std_error=0.0, + ) + return CpeEstimate( + raw=dr_score, + normalized=dr_score / log_score, + raw_std_error=dr_score_std_error, + normalized_std_error=dr_score_std_error / log_score, + ) + + def estimate(self, edp: EvaluationDataPage) -> CpeEstimate: + estimator_results = self.seq_ope_estimator.evaluate( + SequentialOPEstimatorAdapter.edp_to_rl_input(edp, self.gamma, self._device) + ) + assert isinstance(estimator_results, EstimatorResults) + return SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate( + estimator_results + ) + + +class OPEvaluator(Evaluator): + def __init__( + self, action_names, gamma, model, metrics_to_score=None, device=None + ) -> None: + super().__init__(action_names, gamma, model, metrics_to_score) + + self._device = device + self.ope_dm_estimator = OPEstimatorAdapter(DMEstimator(device=self._device)) + self.ope_ips_estimator = OPEstimatorAdapter(IPSEstimator(device=self._device)) + self.ope_dr_estimator = OPEstimatorAdapter( + DoublyRobustEstimator(device=self._device) + ) + + self.ope_seq_dr_estimator = SequentialOPEstimatorAdapter( + SeqDREstimator(device=self._device), gamma, device=self._device + ) + self.ope_seq_weighted_dr_estimator = SequentialOPEstimatorAdapter( + SeqDREstimator(weighted=True, device=self._device), + gamma, + device=self._device, + ) + self.ope_seq_magic_estimator = SequentialOPEstimatorAdapter( + MAGICEstimator(device=self._device), gamma + ) + + def score_cpe(self, metric_name, edp: EvaluationDataPage): + logger.info("Using OPE adapter") + direct_method = self.ope_dm_estimator.estimate(edp) + inverse_propensity = self.ope_ips_estimator.estimate(edp) + doubly_robust = self.ope_dr_estimator.estimate(edp) + + sequential_doubly_robust = self.ope_seq_dr_estimator.estimate(edp) + weighted_doubly_robust = self.ope_seq_weighted_dr_estimator.estimate(edp) + magic = self.ope_seq_magic_estimator.estimate(edp) + return CpeEstimateSet( + direct_method=direct_method, + inverse_propensity=inverse_propensity, + doubly_robust=doubly_robust, + sequential_doubly_robust=sequential_doubly_robust, + weighted_doubly_robust=weighted_doubly_robust, + magic=magic, + ) diff --git a/reagent/evaluation/ranking_listwise_evaluator.py b/reagent/evaluation/ranking_listwise_evaluator.py deleted file mode 100644 index 93d032154..000000000 --- a/reagent/evaluation/ranking_listwise_evaluator.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import logging -from dataclasses import dataclass -from typing import Optional - -import numpy as np -import torch -import torch.nn as nn -from reagent.core.tracker import observable -from reagent.models.seq2slate import Seq2SlateMode -from reagent.types import PreprocessedTrainingBatch -from sklearn.metrics import average_precision_score, dcg_score, ndcg_score - - -logger = logging.getLogger(__name__) - - -@dataclass -class ListwiseRankingMetrics: - ndcg: Optional[float] = 0.0 - dcg: Optional[float] = 0.0 - mean_ap: Optional[float] = 0.0 - cross_entropy_loss: Optional[float] = 0.0 - - -@observable( - cross_entropy_loss=torch.Tensor, dcg=np.float64, ndcg=np.float64, mean_ap=np.float64 -) -class RankingListwiseEvaluator: - """ Evaluate listwise ranking models on common ranking metrics """ - - def __init__(self, seq2slate_net, slate_size: int, calc_cpe: bool) -> None: - self.seq2slate_net = seq2slate_net - self.slate_size = slate_size - self.calc_cpe = calc_cpe - self.ndcg = [] - self.dcg = [] - self.mean_ap = [] - self.log_softmax = nn.LogSoftmax(dim=1) - self.kl_loss = nn.KLDivLoss(reduction="batchmean") - - @torch.no_grad() - def evaluate(self, eval_tdp: PreprocessedTrainingBatch) -> None: - seq2slate_net_prev_mode = self.seq2slate_net.training - self.seq2slate_net.eval() - - eval_input = eval_tdp.training_input - # pyre-fixme[16]: `Optional` has no attribute `shape`. - batch_size = eval_input.position_reward.shape[0] - - # shape: batch_size, tgt_seq_len - encoder_scores = self.seq2slate_net( - eval_input, mode=Seq2SlateMode.ENCODER_SCORE_MODE - ).encoder_scores - assert ( - encoder_scores.shape[1] - == eval_input.position_reward.shape[1] - == self.slate_size - ) - ce_loss = self.kl_loss( - self.log_softmax(encoder_scores), eval_input.position_reward - ).item() - - self.seq2slate_net.train(seq2slate_net_prev_mode) - - if not self.calc_cpe: - # pyre-fixme[16]: `RankingListwiseEvaluator` has no attribute - # `notify_observers`. - self.notify_observers(cross_entropy_loss=ce_loss) - return - - # shape: batch_size, tgt_seq_len - ranking_output = self.seq2slate_net(eval_input, mode=Seq2SlateMode.RANK_MODE) - # pyre-fixme[16]: `int` has no attribute `cpu`. - ranked_idx = (ranking_output.ranked_tgt_out_idx - 2).cpu().numpy() - # pyre-fixme[6]: Expected `int` for 1st param but got `Optional[torch.Tensor]`. - logged_idx = (eval_input.tgt_out_idx - 2).cpu().numpy() - score_bar = np.arange(self.slate_size, 0, -1) - - batch_dcg = [] - batch_ndcg = [] - batch_mean_ap = [] - for i in range(batch_size): - ranked_scores = np.zeros(self.slate_size) - ranked_scores[ranked_idx[i]] = score_bar - truth_scores = np.zeros(self.slate_size) - # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. - truth_scores[logged_idx[i]] = eval_input.position_reward[i].cpu().numpy() - # average_precision_score accepts 1D arrays - # dcg & ndcg accepts 2D arrays - batch_mean_ap.append(average_precision_score(truth_scores, ranked_scores)) - ranked_scores = np.expand_dims(ranked_scores, axis=0) - truth_scores = np.expand_dims(truth_scores, axis=0) - batch_dcg.append(dcg_score(truth_scores, ranked_scores)) - batch_ndcg.append(ndcg_score(truth_scores, ranked_scores)) - - self.notify_observers( - cross_entropy_loss=ce_loss, - dcg=torch.mean(torch.tensor(batch_dcg)).reshape(1), - ndcg=torch.mean(torch.tensor(batch_ndcg)).reshape(1), - mean_ap=torch.mean(torch.tensor(batch_mean_ap)).reshape(1), - ) - - @torch.no_grad() - def evaluate_post_training(self): - pass diff --git a/reagent/evaluation/ranking_policy_gradient_evaluator.py b/reagent/evaluation/ranking_policy_gradient_evaluator.py deleted file mode 100644 index 92cd96c1a..000000000 --- a/reagent/evaluation/ranking_policy_gradient_evaluator.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import logging - -# @manual=third-party//scipy:scipy-py -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from reagent.core.tracker import observable -from reagent.evaluation.evaluation_data_page import EvaluationDataPage -from reagent.models.seq2slate import Seq2SlateMode -from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer -from reagent.types import PreprocessedTrainingBatch - - -logger = logging.getLogger(__name__) - - -@observable( - eval_baseline_loss=torch.Tensor, - eval_advantages=torch.Tensor, - logged_slate_probs=torch.Tensor, - ranked_slate_probs=torch.Tensor, - eval_data_pages_g=EvaluationDataPage, - eval_data_pages_ng=EvaluationDataPage, -) -class RankingPolicyGradientEvaluator: - """ Evaluate ranking models that are learned through policy gradient """ - - def __init__( - self, - trainer: Seq2SlateTrainer, - calc_cpe: bool, - reward_network: Optional[nn.Module] = None, - ) -> None: - assert not calc_cpe or reward_network is not None - self.trainer = trainer - self.calc_cpe = calc_cpe - self.reward_network = reward_network - - # Evaluate greedy/non-greedy version of the ranking model - self.eval_data_pages_g: Optional[EvaluationDataPage] = None - self.eval_data_pages_ng: Optional[EvaluationDataPage] = None - - @torch.no_grad() - def evaluate(self, eval_tdp: PreprocessedTrainingBatch) -> None: - seq2slate_net = self.trainer.seq2slate_net - seq2slate_net_prev_mode = seq2slate_net.training - seq2slate_net.eval() - - logged_slate_log_prob = ( - seq2slate_net( - eval_tdp.training_input, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE - ) - .log_probs.detach() - .flatten() - .cpu() - .numpy() - ) - - eval_baseline_loss = 0.0 - if self.trainer.baseline_net: - baseline_net = self.trainer.baseline_net - # pyre-fixme[16]: `Optional` has no attribute `training`. - baseline_net_prev_mode = baseline_net.training - # pyre-fixme[16]: `Optional` has no attribute `eval`. - baseline_net.eval() - # pyre-fixme[29]: `Optional[reagent.models.seq2slate.BaselineNet]` is - # not a function. - b = baseline_net(eval_tdp.training_input).detach() - eval_baseline_loss = F.mse_loss( - b, eval_tdp.training_input.slate_reward - ).item() - # pyre-fixme[16]: `Optional` has no attribute `train`. - baseline_net.train(baseline_net_prev_mode) - else: - b = torch.zeros_like(eval_tdp.training_input.slate_reward) - - eval_advantage = ( - # pyre-fixme[16]: `Optional` has no attribute `__sub__`. - (eval_tdp.training_input.slate_reward - b) - .flatten() - .cpu() - .numpy() - ) - - ranked_slate_output = seq2slate_net( - eval_tdp.training_input, Seq2SlateMode.RANK_MODE, greedy=True - ) - ranked_slate_prob = ( - torch.prod( - torch.gather( - ranked_slate_output.ranked_tgt_out_probs, - 2, - ranked_slate_output.ranked_tgt_out_idx.unsqueeze(-1), - ).squeeze(), - -1, - ) - .cpu() - .numpy() - ) - - seq2slate_net.train(seq2slate_net_prev_mode) - - if not self.calc_cpe: - return - - edp_g = EvaluationDataPage.create_from_tensors_seq2slate( - seq2slate_net, - # pyre-fixme[6]: Expected `Module` for 2nd param but got - # `Optional[nn.Module]`. - self.reward_network, - eval_tdp.training_input, - eval_greedy=True, - ) - if self.eval_data_pages_g is None: - self.eval_data_pages_g = edp_g - else: - # pyre-fixme[16]: `Optional` has no attribute `append`. - self.eval_data_pages_g = self.eval_data_pages_g.append(edp_g) - - edp_ng = EvaluationDataPage.create_from_tensors_seq2slate( - seq2slate_net, - # pyre-fixme[6]: Expected `Module` for 2nd param but got - # `Optional[nn.Module]`. - self.reward_network, - eval_tdp.training_input, - eval_greedy=False, - ) - if self.eval_data_pages_ng is None: - self.eval_data_pages_ng = edp_ng - else: - self.eval_data_pages_ng = self.eval_data_pages_ng.append(edp_ng) - - # pyre-fixme[16]: `RankingPolicyGradientEvaluator` has no attribute - # `notify_observers`. - self.notify_observers( - eval_baseline_loss=torch.tensor(eval_baseline_loss).reshape(1), - eval_advantages=torch.FloatTensor(eval_advantage), - logged_slate_probs=torch.FloatTensor(logged_slate_log_prob), - ranked_slate_probs=torch.FloatTensor(ranked_slate_prob), - ) - - @torch.no_grad() - def evaluate_post_training(self): - self.notify_observers( - # Use ValueListObserver as aggregating_observers requires input to be Tensor - eval_data_pages_g=self.eval_data_pages_g, - eval_data_pages_ng=self.eval_data_pages_ng, - ) - self.eval_data_pages_g = None - self.eval_data_pages_ng = None diff --git a/reagent/evaluation/reward_net_evaluator.py b/reagent/evaluation/reward_net_evaluator.py deleted file mode 100644 index cf44cba96..000000000 --- a/reagent/evaluation/reward_net_evaluator.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import copy -import logging - -import numpy as np -import torch -import torch.nn.functional as F -from reagent import types as rlt -from reagent.training.reward_network_trainer import RewardNetTrainer -from reagent.types import PreprocessedTrainingBatch - - -logger = logging.getLogger(__name__) - - -class RewardNetEvaluator: - """ Evaluate reward networks """ - - def __init__(self, trainer: RewardNetTrainer) -> None: - self.trainer = trainer - self.mse_loss = [] - self.best_model = None - self.best_model_loss = 1e9 - - @torch.no_grad() - def evaluate(self, eval_tdp: PreprocessedTrainingBatch): - reward_net = self.trainer.reward_net - reward_net_prev_mode = reward_net.training - reward_net.eval() - - if isinstance(eval_tdp.training_input, rlt.PreprocessedRankingInput): - reward = eval_tdp.training_input.slate_reward - else: - reward = eval_tdp.training_input.reward - - mse_loss = F.mse_loss( - reward_net(eval_tdp.training_input).predicted_reward, reward - ) - self.mse_loss.append(mse_loss.detach().cpu()) - - reward_net.train(reward_net_prev_mode) - - @torch.no_grad() - def evaluate_post_training(self): - mean_mse_loss = np.mean(self.mse_loss) - logger.info(f"Evaluation MSE={mean_mse_loss}") - eval_res = {"mse": mean_mse_loss} - self.mse_loss = [] - - if mean_mse_loss < self.best_model_loss: - self.best_model_loss = mean_mse_loss - self.best_model = copy.deepcopy(self.trainer.reward_net) - - return eval_res diff --git a/reagent/evaluation/sequential_doubly_robust_estimator.py b/reagent/evaluation/sequential_doubly_robust_estimator.py index fbeb07ee6..f9c8edd1f 100644 --- a/reagent/evaluation/sequential_doubly_robust_estimator.py +++ b/reagent/evaluation/sequential_doubly_robust_estimator.py @@ -6,12 +6,11 @@ import numpy as np import torch -from reagent.evaluation.cpe import CpeEstimate, bootstrapped_std_error_of_mean +from reagent.evaluation.cpe import bootstrapped_std_error_of_mean, CpeEstimate from reagent.evaluation.evaluation_data_page import EvaluationDataPage logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) class SequentialDoublyRobustEstimator: @@ -68,6 +67,7 @@ def estimate(self, edp: EvaluationDataPage) -> CpeEstimate: last_episode_end = -1 while i < num_examples: # calculate the doubly-robust Q-value for one episode + # pyre-ignore [16]: Optional type has no attribute `__getitem__` if i == num_examples - 1 or edp.mdp_id[i] != edp.mdp_id[i + 1]: episode_end = i episode_value = 0.0 diff --git a/reagent/evaluation/weighted_sequential_doubly_robust_estimator.py b/reagent/evaluation/weighted_sequential_doubly_robust_estimator.py index 1af5d2abc..5bd10afd2 100644 --- a/reagent/evaluation/weighted_sequential_doubly_robust_estimator.py +++ b/reagent/evaluation/weighted_sequential_doubly_robust_estimator.py @@ -6,13 +6,11 @@ import numpy as np import scipy as sp -import torch from reagent.evaluation.cpe import CpeEstimate from reagent.evaluation.evaluation_data_page import EvaluationDataPage logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) class WeightedSequentialDoublyRobustEstimator: @@ -44,6 +42,7 @@ def estimate( edp.logged_rewards.cpu().numpy().flatten(), edp.logged_propensities.cpu().numpy().flatten(), edp.model_propensities.cpu().numpy(), + # pyre-ignore [16]: Optional type has no attribute `cpu` edp.model_values.cpu().numpy(), ) @@ -70,8 +69,10 @@ def estimate( importance_weights = target_propensity_for_logged_action / logged_propensities importance_weights = np.cumprod(importance_weights, axis=1) - importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights( - importance_weights, whether_self_normalize_importance_weights + importance_weights = ( + WeightedSequentialDoublyRobustEstimator.normalize_importance_weights( + importance_weights, whether_self_normalize_importance_weights + ) ) importance_weights_one_earlier = ( diff --git a/reagent/evaluation/world_model_evaluator.py b/reagent/evaluation/world_model_evaluator.py index 62c695e11..e7d21c0e8 100644 --- a/reagent/evaluation/world_model_evaluator.py +++ b/reagent/evaluation/world_model_evaluator.py @@ -4,15 +4,15 @@ from typing import Dict, List import torch +from reagent.core.types import FeatureData, MemoryNetworkInput from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer -from reagent.types import FeatureData, MemoryNetworkInput logger = logging.getLogger(__name__) class LossEvaluator(object): - """ Evaluate losses on data pages """ + """Evaluate losses on data pages""" def __init__(self, trainer: MDNRNNTrainer, state_dim: int) -> None: self.trainer = trainer @@ -33,7 +33,7 @@ def evaluate(self, tdp: MemoryNetworkInput) -> Dict[str, float]: class FeatureImportanceEvaluator(object): - """ Evaluate feature importance weights on data pages """ + """Evaluate feature importance weights on data pages""" def __init__( self, @@ -59,12 +59,12 @@ def __init__( self.sorted_state_feature_start_indices = sorted_state_feature_start_indices def evaluate(self, batch: MemoryNetworkInput): - """ Calculate feature importance: setting each state/action feature to - the mean value and observe loss increase. """ + """Calculate feature importance: setting each state/action feature to + the mean value and observe loss increase.""" self.trainer.memory_network.mdnrnn.eval() state_features = batch.state.float_features - action_features = batch.action + action_features = batch.action.float_features seq_len, batch_size, state_dim = state_features.size() action_dim = action_features.size()[2] action_feature_num = self.action_feature_num @@ -81,7 +81,7 @@ def evaluate(self, batch: MemoryNetworkInput): state_feature_boundaries = self.sorted_state_feature_start_indices + [state_dim] for i in range(action_feature_num): - action_features = batch.action.reshape( + action_features = batch.action.float_features.reshape( (batch_size * seq_len, action_dim) ).data.clone() @@ -108,7 +108,7 @@ def evaluate(self, batch: MemoryNetworkInput): action_features = action_features.reshape((seq_len, batch_size, action_dim)) new_batch = MemoryNetworkInput( state=batch.state, - action=action_features, + action=FeatureData(action_features), next_state=batch.next_state, reward=batch.reward, time_diff=torch.ones_like(batch.reward).float(), @@ -172,7 +172,7 @@ def compute_median_feature_value(self, features): class FeatureSensitivityEvaluator(object): - """ Evaluate state feature sensitivity caused by varying actions """ + """Evaluate state feature sensitivity caused by varying actions""" def __init__( self, @@ -185,9 +185,9 @@ def __init__( self.sorted_state_feature_start_indices = sorted_state_feature_start_indices def evaluate(self, batch: MemoryNetworkInput): - """ Calculate state feature sensitivity due to actions: + """Calculate state feature sensitivity due to actions: randomly permutating actions and see how much the prediction of next - state feature deviates. """ + state feature deviates.""" assert isinstance(batch, MemoryNetworkInput) self.trainer.memory_network.mdnrnn.eval() @@ -197,15 +197,13 @@ def evaluate(self, batch: MemoryNetworkInput): feature_sensitivity = torch.zeros(state_feature_num) # the input of world_model has seq-len as the first dimension - mdnrnn_output = self.trainer.memory_network( - batch.state, FeatureData(batch.action) - ) + mdnrnn_output = self.trainer.memory_network(batch.state, batch.action) predicted_next_state_means = mdnrnn_output.mus shuffled_mdnrnn_output = self.trainer.memory_network( batch.state, # shuffle the actions - FeatureData(batch.action[:, torch.randperm(batch_size), :]), + FeatureData(batch.action.float_features[:, torch.randperm(batch_size), :]), ) shuffled_predicted_next_state_means = shuffled_mdnrnn_output.mus diff --git a/reagent/gym/__init__.py b/reagent/gym/__init__.py index 5be5087fd..6573d13dd 100644 --- a/reagent/gym/__init__.py +++ b/reagent/gym/__init__.py @@ -1,2 +1,8 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .agents.agent import Agent +from .envs.gym import Gym + + +__all__ = ["Agent", "Gym"] diff --git a/reagent/gym/agents/agent.py b/reagent/gym/agents/agent.py index 9a466cb07..a8beb27ec 100644 --- a/reagent/gym/agents/agent.py +++ b/reagent/gym/agents/agent.py @@ -1,18 +1,14 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import Any, Optional, Union +from typing import Any, Dict, Optional, Tuple, Union +import numpy as np import torch -from gym import Env +from reagent.gym.envs.env_wrapper import EnvWrapper from reagent.gym.policies.policy import Policy -from reagent.gym.preprocessors import ( - make_default_action_extractor, - make_default_obs_preprocessor, - make_default_serving_action_extractor, - make_default_serving_obs_preprocessor, -) -from reagent.gym.types import PostStep, Transition +from reagent.gym.policies.random_policies import make_random_policy_for_env +from reagent.gym.types import PostEpisode, PostStep, Trajectory, Transition def _id(x): @@ -24,10 +20,11 @@ def __init__( self, policy: Policy, post_transition_callback: Optional[PostStep] = None, - device: Union[str, torch.device] = "cpu", + post_episode_callback: Optional[PostEpisode] = None, obs_preprocessor=_id, action_extractor=_id, - ): + device: Optional[torch.device] = None, + ) -> None: """ The Agent orchestrates the interactions on our RL components, given the interactions with the environment. @@ -38,39 +35,39 @@ def __init__( post_step: called after env.step(action). Default post_step is to do nothing. """ + device = device or torch.device("cpu") self.policy = policy self.obs_preprocessor = obs_preprocessor self.action_extractor = action_extractor self.post_transition_callback = post_transition_callback - self._reset_internal_states() - - if isinstance(device, str): - device = torch.device(device) - self.device: torch.device = device - - def _reset_internal_states(self): - # intermediate state between act and post_step - self._log_prob: float = 0.0 + self.post_episode_callback = post_episode_callback + self.device = device @classmethod def create_for_env( cls, - env: Env, - policy: Policy, + env: EnvWrapper, + policy: Optional[Policy], *, device: Union[str, torch.device] = "cpu", obs_preprocessor=None, action_extractor=None, **kwargs, - ): + ) -> "Agent": + """ + If `policy` is not given, we will try to create a random policy + """ if isinstance(device, str): device = torch.device(device) if obs_preprocessor is None: - obs_preprocessor = make_default_obs_preprocessor(env, device=device) + obs_preprocessor = env.get_obs_preprocessor(device=device) if action_extractor is None: - action_extractor = make_default_action_extractor(env) + action_extractor = env.get_action_extractor() + + if policy is None: + policy = make_random_policy_for_env(env) return cls( policy, @@ -82,10 +79,21 @@ def create_for_env( @classmethod def create_for_env_with_serving_policy( - cls, env: Env, serving_policy: Policy, **kwargs - ): - obs_preprocessor = make_default_serving_obs_preprocessor(env) - action_extractor = make_default_serving_action_extractor(env) + cls, + env: EnvWrapper, + serving_policy: Policy, + *, + obs_preprocessor=None, + action_extractor=None, + **kwargs, + ) -> "Agent": + # device shouldn't be provided as serving is CPU only + if obs_preprocessor is None: + obs_preprocessor = env.get_serving_obs_preprocessor() + + if action_extractor is None: + action_extractor = env.get_serving_action_extractor() + return cls( serving_policy, obs_preprocessor=obs_preprocessor, @@ -93,26 +101,34 @@ def create_for_env_with_serving_policy( **kwargs, ) - def act(self, obs: Any) -> Any: - """ Act on a single observation """ + def act( + self, obs: Any, possible_actions_mask: Optional[np.ndarray] = None + ) -> Tuple[Any, Optional[float]]: + """Act on a single observation""" # preprocess and convert to batch data preprocessed_obs = self.obs_preprocessor(obs) + if possible_actions_mask is not None: + # pyre-fixme[9]: possible_actions_mask has type `Optional[ndarray]`; + # used as `Tensor`. + possible_actions_mask = torch.tensor( + possible_actions_mask, device=self.device + ) # store intermediate actor output for post_step - actor_output = self.policy.act(preprocessed_obs) - self._log_prob = ( - 0.0 - if actor_output.log_prob is None - # pyre-fixme[16]: `Optional` has no attribute `cpu`. - else actor_output.log_prob.cpu().squeeze(0).item() - ) - return self.action_extractor(actor_output) - - def post_step(self, transition: Transition): - """ to be called after step(action) """ + # pyre-fixme[6]: For 2nd param expected `Optional[Tensor]` but got + # `Optional[ndarray]`. + actor_output = self.policy.act(preprocessed_obs, possible_actions_mask) + log_prob = actor_output.log_prob + if log_prob is not None: + log_prob = log_prob.cpu().squeeze(0).item() + return self.action_extractor(actor_output), log_prob + + def post_step(self, transition: Transition) -> None: + """to be called after step(action)""" if self.post_transition_callback is not None: - transition.log_prob = self._log_prob - # pyre-fixme[29]: `Optional[typing.Callable[[Transition], None]]` is not - # a function. self.post_transition_callback(transition) - self._reset_internal_states() + + def post_episode(self, trajectory: Trajectory, info: Dict) -> None: + """to be called after step(action)""" + if self.post_episode_callback is not None: + self.post_episode_callback(trajectory, info) diff --git a/reagent/gym/agents/post_step.py b/reagent/gym/agents/post_step.py index 45fee851e..77656359f 100644 --- a/reagent/gym/agents/post_step.py +++ b/reagent/gym/agents/post_step.py @@ -3,79 +3,29 @@ import logging -from typing import Union import gym -import torch -from reagent.gym.preprocessors import ( - make_replay_buffer_inserter, - make_replay_buffer_trainer_preprocessor, -) -from reagent.gym.types import PostStep, Transition +from reagent.gym.preprocessors import make_replay_buffer_inserter +from reagent.gym.types import Transition from reagent.replay_memory.circular_replay_buffer import ReplayBuffer -from reagent.training.trainer import Trainer logger = logging.getLogger(__name__) def add_replay_buffer_post_step( - replay_buffer: ReplayBuffer, env: gym.Env, replay_buffer_inserter=None -): - """ - Simply add transitions to replay_buffer. - """ - - if replay_buffer_inserter is None: - replay_buffer_inserter = make_replay_buffer_inserter(env) - - def post_step(transition: Transition) -> None: - replay_buffer_inserter(replay_buffer, transition) - - return post_step - - -def train_with_replay_buffer_post_step( replay_buffer: ReplayBuffer, env: gym.Env, - trainer: Trainer, - training_freq: int, - batch_size: int, - trainer_preprocessor=None, - device: Union[str, torch.device] = "cpu", replay_buffer_inserter=None, -) -> PostStep: - """ Called in post_step of agent to train based on replay buffer (RB). - Args: - trainer: responsible for having a .train method to train the model - trainer_preprocessor: format RB output for trainer.train - training_freq: how many steps in between trains - batch_size: how big of a batch to sample +): + """ + Simply add transitions to replay_buffer. """ - if isinstance(device, str): - device = torch.device(device) - - if trainer_preprocessor is None: - trainer_preprocessor = make_replay_buffer_trainer_preprocessor( - trainer, device, env - ) if replay_buffer_inserter is None: replay_buffer_inserter = make_replay_buffer_inserter(env) - _num_steps = 0 - def post_step(transition: Transition) -> None: - nonlocal _num_steps - replay_buffer_inserter(replay_buffer, transition) - if _num_steps % training_freq == 0: - assert replay_buffer.size >= batch_size - train_batch = replay_buffer.sample_transition_batch(batch_size=batch_size) - preprocessed_batch = trainer_preprocessor(train_batch) - trainer.train(preprocessed_batch) - _num_steps += 1 - return - return post_step diff --git a/reagent/gym/datasets/__init__.py b/reagent/gym/datasets/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/gym/datasets/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/datasets/episodic_dataset.py b/reagent/gym/datasets/episodic_dataset.py new file mode 100644 index 000000000..20b139f73 --- /dev/null +++ b/reagent/gym/datasets/episodic_dataset.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Optional + +import torch +from reagent.gym.agents.agent import Agent +from reagent.gym.envs.gym import Gym +from reagent.gym.runners.gymrunner import run_episode + + +logger = logging.getLogger(__name__) + + +class EpisodicDataset(torch.utils.data.IterableDataset): + def __init__( + self, + env: Gym, + agent: Agent, + num_episodes: int, + seed: int = 0, + max_steps: Optional[int] = None, + ): + self.env = env + self.agent = agent + self.num_episodes = num_episodes + self.seed = seed + self.max_steps = max_steps + + def __iter__(self): + self.env.reset() + for i in range(self.num_episodes): + trajectory = run_episode( + self.env, self.agent, max_steps=self.max_steps, mdp_id=i + ) + yield trajectory.to_dict() + + def __len__(self): + return self.num_episodes diff --git a/reagent/gym/datasets/replay_buffer_dataset.py b/reagent/gym/datasets/replay_buffer_dataset.py new file mode 100644 index 000000000..24488db42 --- /dev/null +++ b/reagent/gym/datasets/replay_buffer_dataset.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Callable, Optional + +import torch +from reagent.gym.agents.agent import Agent +from reagent.gym.envs import EnvWrapper +from reagent.gym.preprocessors import ( + make_replay_buffer_inserter, + make_replay_buffer_trainer_preprocessor, +) +from reagent.gym.types import Trajectory, Transition +from reagent.replay_memory.circular_replay_buffer import ReplayBuffer + +logger = logging.getLogger(__name__) + + +class ReplayBufferDataset(torch.utils.data.IterableDataset): + def __init__( + self, + env: EnvWrapper, + agent: Agent, + replay_buffer: ReplayBuffer, + batch_size: int, + training_frequency: int = 1, + num_episodes: Optional[int] = None, + max_steps: Optional[int] = None, + post_episode_callback: Optional[Callable] = None, + trainer_preprocessor=None, + replay_buffer_inserter=None, + ): + super().__init__() + self._env = env + self._agent = agent + self._replay_buffer = replay_buffer + self._batch_size = batch_size + self._training_frequency = training_frequency + self._num_episodes = num_episodes + self._max_steps = max_steps + self._post_episode_callback = post_episode_callback + self._trainer_preprocessor = trainer_preprocessor + assert replay_buffer_inserter is not None + self._replay_buffer_inserter = replay_buffer_inserter + + # TODO: Just use kwargs here? + @classmethod + def create_for_trainer( + cls, + trainer, + env: EnvWrapper, + agent: Agent, + replay_buffer: ReplayBuffer, + batch_size: int, + training_frequency: int = 1, + num_episodes: Optional[int] = None, + max_steps: Optional[int] = None, + post_episode_callback: Optional[Callable] = None, + trainer_preprocessor=None, + replay_buffer_inserter=None, + device=None, + ): + device = device or torch.device("cpu") + if trainer_preprocessor is None: + trainer_preprocessor = make_replay_buffer_trainer_preprocessor( + trainer, device, env + ) + + if replay_buffer_inserter is None: + replay_buffer_inserter = make_replay_buffer_inserter(env) + + return cls( + env=env, + agent=agent, + replay_buffer=replay_buffer, + batch_size=batch_size, + training_frequency=training_frequency, + num_episodes=num_episodes, + max_steps=max_steps, + post_episode_callback=post_episode_callback, + trainer_preprocessor=trainer_preprocessor, + replay_buffer_inserter=replay_buffer_inserter, + ) + + def __iter__(self): + mdp_id = 0 + global_num_steps = 0 + rewards = [] + + # TODO: We probably should put member vars into local vars to + # reduce indirection, improving perf + + while self._num_episodes is None or mdp_id < self._num_episodes: + obs = self._env.reset() + possible_actions_mask = self._env.possible_actions_mask + terminal = False + num_steps = 0 + episode_reward_sum = 0 + trajectory = Trajectory() + while not terminal: + action, log_prob = self._agent.act(obs, possible_actions_mask) + next_obs, reward, terminal, info = self._env.step(action) + next_possible_actions_mask = self._env.possible_actions_mask + if self._max_steps is not None and num_steps >= self._max_steps: + terminal = True + + # Only partially filled. Agent can fill in more fields. + transition = Transition( + mdp_id=mdp_id, + sequence_number=num_steps, + observation=obs, + action=action, + reward=float(reward), + terminal=bool(terminal), + log_prob=log_prob, + possible_actions_mask=possible_actions_mask, + ) + trajectory.add_transition(transition) + self._replay_buffer_inserter(self._replay_buffer, transition) + episode_reward_sum += reward + if ( + global_num_steps % self._training_frequency == 0 + and self._replay_buffer.size >= self._batch_size + ): + train_batch = self._replay_buffer.sample_transition_batch( + batch_size=self._batch_size + ) + if self._trainer_preprocessor: + train_batch = self._trainer_preprocessor(train_batch) + yield train_batch + + obs = next_obs + possible_actions_mask = next_possible_actions_mask + num_steps += 1 + global_num_steps += 1 + if self._agent.post_step: + self._agent.post_step(transition) + if self._post_episode_callback: + self._post_episode_callback(trajectory, info) + + rewards.append(episode_reward_sum) + mdp_id += 1 + logger.info( + f"Training episode: {mdp_id}, total episode reward = {episode_reward_sum}" + ) + + logger.info(f"Episode rewards during training: {rewards}") + + +class OfflineReplayBufferDataset(torch.utils.data.IterableDataset): + """ + Simply sampling from the replay buffer + """ + + def __init__( + self, + env: EnvWrapper, + replay_buffer: ReplayBuffer, + batch_size: int, + num_batches: int, + trainer_preprocessor=None, + ): + super().__init__() + self._env = env + self._replay_buffer = replay_buffer + self._batch_size = batch_size + self._num_batches = num_batches + self._trainer_preprocessor = trainer_preprocessor + + # TODO: Just use kwargs here? + @classmethod + def create_for_trainer( + cls, + trainer, + env: EnvWrapper, + replay_buffer: ReplayBuffer, + batch_size: int, + num_batches: int, + trainer_preprocessor=None, + device=None, + ): + device = device or torch.device("cpu") + if trainer_preprocessor is None: + trainer_preprocessor = make_replay_buffer_trainer_preprocessor( + trainer, device, env + ) + + return cls( + env=env, + replay_buffer=replay_buffer, + batch_size=batch_size, + num_batches=num_batches, + trainer_preprocessor=trainer_preprocessor, + ) + + def __iter__(self): + for _ in range(self._num_batches): + train_batch = self._replay_buffer.sample_transition_batch( + batch_size=self._batch_size + ) + if self._trainer_preprocessor: + train_batch = self._trainer_preprocessor(train_batch) + yield train_batch diff --git a/reagent/gym/envs/__init__.py b/reagent/gym/envs/__init__.py index 44e438285..7fca76547 100644 --- a/reagent/gym/envs/__init__.py +++ b/reagent/gym/envs/__init__.py @@ -1,16 +1,18 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from reagent.core.tagged_union import TaggedUnion + +from .changing_arms import ChangingArms # noqa from .dynamics.linear_dynamics import LinDynaEnv # noqa -from .env_factory import EnvFactory +from .env_wrapper import EnvWrapper +from .gym import Gym # noqa from .pomdp.pocman import PocManEnv # noqa from .pomdp.string_game import StringGameEnv # noqa +from .pomdp.string_game_v1 import StringGameEnvV1 # noqa from .utils import register_if_not_exists -__all__ = ["EnvFactory"] - - ######### Register classes below ########## CUR_MODULE = "reagent.gym.envs" @@ -18,8 +20,45 @@ ("Pocman-v0", ".pomdp.pocman:PocManEnv"), ("StringGame-v0", ".pomdp.string_game:StringGameEnv"), ("LinearDynamics-v0", ".dynamics.linear_dynamics:LinDynaEnv"), + ( + "PossibleActionsMaskTester-v0", + ".functionality.possible_actions_mask_tester:PossibleActionsMaskTester", + ), + ("StringGame-v1", ".pomdp.string_game_v1:StringGameEnvV1"), ] for env_name, rel_module_path in ENV_CLASSES: full_module_path = CUR_MODULE + rel_module_path register_if_not_exists(id=env_name, entry_point=full_module_path) + + +######## Register EnvWrappers ########## + + +try: + from .recsim import RecSim # usort:skip # noqa + from .oracle_pvm import OraclePVM # noqa + from .toy_vm import ToyVM # noqa + + HAS_RECSIM = True +except ImportError: + HAS_RECSIM = False + +__all__ = list( + filter( + None, + [ + "Env__Union", + "Gym", + "ChangingArms", + "RecSim" if HAS_RECSIM else None, + "OraclePVM" if HAS_RECSIM else None, + "ToyVM" if HAS_RECSIM else None, + ], + ) +) + + +@EnvWrapper.fill_union() +class Env__Union(TaggedUnion): + pass diff --git a/reagent/gym/envs/changing_arms.py b/reagent/gym/envs/changing_arms.py new file mode 100644 index 000000000..1f89d2b45 --- /dev/null +++ b/reagent/gym/envs/changing_arms.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +""" +Traditional MAB setup has sequence length = 1 always. In this setup, the +distributions of the arms rewards changes every round, and the agent is presented +with some information and control about how the arms will change. +In particular, the observation includes "mu_changes", which is the possible changes +to mu; only the arm picked by agent will have it's mu_changes reflected. +This way, the next state depend on (only) the previous state and action; +hence this a MDP. + +The reward for picking an action is the change in mu corresponding to that arm. +With following set-up (where ARM_INIT_VALUE = 100 and NUM_ARMS = 5), the +optimal policy can accumulate a reward of 500 per run. +Note that if the policy picks an illegal action at any time, the game ends. +""" +import random + +import gym +import numpy as np +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData, NormalizationKey +from reagent.gym.envs.env_wrapper import EnvWrapper +from reagent.gym.normalizers import only_continuous_normalizer + + +ABS_LOW: float = -1000.0 +ABS_HIGH = 1000.0 + +MU_LOW = 0.0 +MU_HIGH = 1000.0 + + +# illegal move causes game to end with a big BOOM!!! +INVALID_MOVE_PENALTY: float = -1000.0 +IDLE_PENALTY: float = -500.0 + +NUM_ARMS = 5 +# keep these constant for now +ARM_INIT_VALUE = 100.0 +ARM_MU_DECREASE = 10.0 +MAX_STEPS = 49 + + +# in the real world, IDs are not indices into embedding table +# thus, we offset vals to test hashing mechanism +ID_LIST_OFFSET = 1000000 +ID_SCORE_LIST_OFFSET = 1500000 + +ID_LIST_FEATURE_ID = 100 +ID_SCORE_LIST_FEATURE_ID = 1000 + + +def get_initial_mus(num_arms): + return torch.tensor([ARM_INIT_VALUE] * num_arms) + + +def get_mu_changes(num_arms): + return torch.tensor([-ARM_MU_DECREASE] * num_arms) + + +def get_legal_indices_mask(num_arms): + # FIXME: hardcoded for now + assert num_arms == 5, f"unsupported num_arms = {num_arms}, should be 5" + LEGAL_PROBS = torch.tensor([0.95, 1.0, 0.95, 0.8, 0.8]) + return torch.bernoulli(LEGAL_PROBS).to(torch.uint8) + + +@dataclass +class ChangingArms(EnvWrapper): + num_arms: int = NUM_ARMS + + def make(self) -> gym.Env: + # pyre-fixme[45]: Cannot instantiate abstract class `ChangingArmsEnv`. + return ChangingArmsEnv(self.num_arms) + + def _split_state(self, obs: np.ndarray): + assert obs.shape == (3, self.num_arms), f"{obs.shape}." + dense_val = torch.tensor(obs[0, :]).view(1, self.num_arms) + id_list_val = torch.tensor(obs[1, :]).nonzero(as_tuple=True)[0].to(torch.long) + id_score_list_val = torch.tensor(obs[2, :]) + return dense_val, id_list_val, id_score_list_val + + def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData: + dense_val, id_list_val, id_score_list_val = self._split_state(obs) + return rlt.FeatureData( + # dense value + float_features=dense_val, + # (offset, value) + id_list_features_raw={ + "legal": (torch.tensor([0], dtype=torch.long), id_list_val) + }, + # (offset, key, value) + id_score_list_features_raw={ + "mu_changes": ( + torch.tensor([0], dtype=torch.long), + torch.arange(self.num_arms, dtype=torch.long), + id_score_list_val, + ) + }, + ) + + def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData: + dense_val, id_list_val, id_score_list_val = self._split_state(obs) + return rlt.ServingFeatureData( + float_features_with_presence=( + dense_val, + torch.ones_like(dense_val, dtype=torch.uint8), + ), + id_list_features={ + ID_LIST_FEATURE_ID: ( + torch.tensor([0], dtype=torch.long), + id_list_val + ID_LIST_OFFSET, + ) + }, + id_score_list_features={ + ID_SCORE_LIST_FEATURE_ID: ( + torch.tensor([0], dtype=torch.long), + torch.arange(self.num_arms, dtype=torch.long) + + ID_SCORE_LIST_OFFSET, + id_score_list_val, + ) + }, + ) + + def split_state_transform(self, elem: torch.Tensor): + """For generate data""" + dense_val, id_list_val, id_score_list_val = self._split_state(elem.numpy()) + return ( + {i: s.item() for i, s in enumerate(dense_val.view(-1))}, + {ID_LIST_FEATURE_ID: (id_list_val + ID_LIST_OFFSET).tolist()}, + { + ID_SCORE_LIST_FEATURE_ID: { + i + ID_SCORE_LIST_OFFSET: s.item() + for i, s in enumerate(id_score_list_val) + } + }, + ) + + @property + def normalization_data(self): + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=only_continuous_normalizer( + list(range(self.num_arms)), MU_LOW, MU_HIGH + ) + ) + } + + def trainer_preprocessor(self, obs: torch.Tensor): + batch_size = obs.shape[0] + assert obs.shape == (batch_size, 3, self.num_arms), f"{obs.shape}" + dense_val = obs[:, 0, :].view(batch_size, self.num_arms) + # extract one-hot encoded values from id_list + batch_indices, id_list_val = obs[:, 1, :].nonzero(as_tuple=True) + offsets = [] + prev_batch_idx = -1 + for i, batch_idx in enumerate(batch_indices.tolist()): + if batch_idx > prev_batch_idx: + offsets.extend([i] * (batch_idx - prev_batch_idx)) + prev_batch_idx = batch_idx + else: + assert batch_idx == prev_batch_idx + # handle the case of trailing empty batches + # pyre-fixme[61]: `batch_idx` may not be initialized here. + if batch_idx < batch_size - 1: + # pyre-fixme[61]: `batch_idx` may not be initialized here. + offsets.extend([i] * (batch_size - 1 - batch_idx)) + assert len(offsets) == batch_size, f"{len(offsets)} != {batch_size}." + id_list_offsets = torch.tensor(offsets) + + # id_score_list is easier because not one-hot encoded + id_score_list_offsets = torch.tensor( + list(range(0, batch_size * self.num_arms, self.num_arms)) + ) + id_score_list_keys = torch.arange(self.num_arms).repeat(batch_size) + id_score_list_vals = obs[:, 2, :].reshape(-1) + return rlt.FeatureData( + # dense value + float_features=dense_val, + # (offset, value) + id_list_features_raw={"legal": (id_list_offsets, id_list_val)}, + # (offset, key, value) + id_score_list_features_raw={ + "mu_changes": ( + id_score_list_offsets, + id_score_list_keys, + id_score_list_vals, + ) + }, + ) + + +class ChangingArmsEnv(gym.Env): + """This is just the gym environment, without extra functionality""" + + def __init__(self, num_arms) -> None: + self.seed(0) + self.num_arms = num_arms + self.max_steps = MAX_STEPS + + def step(self, action): + if isinstance(action, np.ndarray): + action = action.item() + assert ( + 0 <= action and action <= self.num_arms + ), f"out-of-bounds action {action}." + reached_max_steps = self.num_steps >= self.max_steps + self.num_steps += 1 + + # idle action + if action == self.num_arms: + # simply return new state, without updating distributions + # this is ideal when there aren't any legal actions, this + # would generate a new batch of legal actions + return self.state, IDLE_PENALTY, reached_max_steps, None + + # illegal action + if action not in self.legal_indices: + return self.state, INVALID_MOVE_PENALTY, True, None + + # update states for only the action selected + prev = self.mus[action].item() + self.mus[action] = prev + self.mu_changes[action] + if self.mus[action] <= MU_LOW: + self.legal_indices_mask[action] = 0 + + reward = prev - self.mus[action].item() + return self.state, reward, reached_max_steps, None + + def seed(self, seed: int) -> None: + random.seed(seed) + torch.manual_seed(seed) + + def reset(self): + # initialize the distributions + self.num_steps = 0 + self.mus = get_initial_mus(self.num_arms) + # these are turned off when an arm has been "exhausted" + self.legal_indices_mask = torch.tensor([1] * self.num_arms).to(torch.uint8) + return self.state + + @property + def state(self): + """ + State comprises of: + - initial mus + - legal_indices mask + - randomly-generated mu changes + """ + self.mu_changes = get_mu_changes(self.num_arms) + legal_indices_mask = ( + get_legal_indices_mask(self.num_arms) & self.legal_indices_mask + ) + self.legal_indices = legal_indices_mask.nonzero(as_tuple=True)[0] + result = torch.stack([self.mus, legal_indices_mask, self.mu_changes]) + return result.numpy() + + @property + def observation_space(self): + """ + It should really be a Dict, but we return them all stacked since it's + more convenient for RB. + """ + return gym.spaces.Box(ABS_LOW, ABS_HIGH, shape=(3, self.num_arms)) + + @property + def action_space(self): + # Selecting 0,1,2...,num_arms-1 is selecting an arm. + # If action is invalid, agent incurs a penalty. + # If action is valid, action is an idx i, and reward + # is a sample from ith distribution. At the same time + # the ith distribution is updated with the changes. + # Alternatively, can choose NULL (i.e. do-nothing) action + # if action = num_arms + return gym.spaces.Discrete(self.num_arms + 1) diff --git a/reagent/gym/envs/dynamics/__init__.py b/reagent/gym/envs/dynamics/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/gym/envs/dynamics/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/envs/env_factory.py b/reagent/gym/envs/env_factory.py deleted file mode 100644 index 1144b01e1..000000000 --- a/reagent/gym/envs/env_factory.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import logging - -import gym -from gym_minigrid.wrappers import ReseedWrapper -from reagent.gym.envs.simple_minigrid import SimpleObsWrapper - - -logger = logging.getLogger(__name__) - - -class EnvFactory: - @staticmethod - def make(name: str) -> gym.Env: - env: gym.Env = gym.make(name) - if name.startswith("MiniGrid-"): - # Wrap in minigrid simplifier - env = SimpleObsWrapper(ReseedWrapper(env)) - - logger.info( - f"Env: {name}; observation_space: {env.observation_space}; " - f"action_space: {env.action_space}" - ) - - return env diff --git a/reagent/gym/envs/env_wrapper.py b/reagent/gym/envs/env_wrapper.py new file mode 100644 index 000000000..bc31fa92b --- /dev/null +++ b/reagent/gym/envs/env_wrapper.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import logging +from typing import Callable, Optional + +import gym +import numpy as np +import reagent.core.types as rlt +import torch +from gym import spaces +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE +from reagent.core.registry_meta import RegistryMeta +from reagent.training.utils import rescale_actions + + +# types for reference +ObsPreprocessor = Callable[[np.ndarray], rlt.FeatureData] +ServingObsPreprocessor = Callable[[np.ndarray], rlt.ServingFeatureData] +ActionExtractor = Callable[[rlt.ActorOutput], np.ndarray] +ServingActionExtractor = ActionExtractor + +CONTINUOUS_MODEL_LOW = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0]) +CONTINUOUS_MODEL_HIGH = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1]) + +logger = logging.getLogger(__name__) + + +@dataclass +class EnvWrapper(gym.core.Wrapper, metaclass=RegistryMeta): + """Wrapper around it's environment, to simplify configuration.""" + + def __post_init_post_parse__(self): + super().__init__(self.make()) + logger.info( + f"Env: {self.env};\n" + f"observation_space: {self.env.observation_space};\n" + f"action_space: {self.env.action_space};" + ) + + @abc.abstractmethod + def make(self) -> gym.Env: + pass + + @abc.abstractmethod + def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData: + pass + + @abc.abstractmethod + def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData: + pass + + def get_obs_preprocessor(self, *ctor_args, **ctor_kwargs): + # ctor_args go to .to call + ctor_kwargs["non_blocking"] = True + return lambda *args, **kwargs: self.obs_preprocessor(*args, **kwargs).to( + *ctor_args, **ctor_kwargs + ) + + def get_serving_obs_preprocessor(self): + return lambda *args, **kwargs: self.serving_obs_preprocessor(*args, **kwargs) + + def action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor: + action = actor_output.action + action_space = self.action_space + # Canonical rule to return one-hot encoded actions for discrete + assert ( + len(action.shape) == 2 and action.shape[0] == 1 + ), f"{action} (shape: {action.shape}) is not a single action!" + if isinstance(action_space, spaces.Discrete): + return action.squeeze(0).argmax() + elif isinstance(action_space, spaces.MultiDiscrete): + return action.squeeze(0) + # Canonical rule to scale actions to CONTINUOUS_TRAINING_ACTION_RANGE + elif isinstance(action_space, spaces.Box): + assert len(action_space.shape) == 1, f"{action_space} not supported." + return rescale_actions( + action.squeeze(0), + new_min=torch.tensor(action_space.low), + new_max=torch.tensor(action_space.high), + prev_min=CONTINUOUS_MODEL_LOW, + prev_max=CONTINUOUS_MODEL_HIGH, + ) + else: + raise NotImplementedError(f"Unsupported action space: {action_space}") + + def serving_action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor: + action = actor_output.action + action_space = self.action_space + assert ( + len(action.shape) == 2 and action.shape[0] == 1 + ), f"{action.shape} isn't (1, action_dim)" + if isinstance(action_space, spaces.Discrete): + return action.squeeze(0).argmax().view([]) + elif isinstance(action_space, spaces.MultiDiscrete): + return action.squeeze(0) + elif isinstance(action_space, spaces.Box): + assert ( + len(action_space.shape) == 1 + ), f"Unsupported Box with shape {action_space.shape}" + return action.squeeze(0) + else: + raise NotImplementedError(f"Unsupported action space: {action_space}") + + def get_action_extractor(self): + return ( + lambda *args, **kwargs: self.action_extractor(*args, **kwargs).cpu().numpy() + ) + + def get_serving_action_extractor(self): + return ( + lambda *args, **kwargs: self.serving_action_extractor(*args, **kwargs) + .cpu() + .numpy() + ) + + # TODO: add more methods to simplify gym code + # e.g. normalization, specific preprocessor, etc. + # This can move a lot of the if statements from create_from_env methods. + + @property + def max_steps(self) -> Optional[int]: + possible_keys = [ + # gym should have _max_episode_steps + "_max_episode_steps", + # Minigrid should have max_steps + "max_steps", + ] + for key in possible_keys: + res = getattr(self.env, key, None) + if res is not None: + return res + return None + + @property + def possible_actions_mask(self) -> Optional[np.ndarray]: + ret = getattr(self.env, "possible_actions_mask", None) + if ret is not None: + ret = ret.copy() + return ret diff --git a/reagent/gym/envs/functionality/__init__.py b/reagent/gym/envs/functionality/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/gym/envs/functionality/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/envs/functionality/possible_actions_mask_tester.py b/reagent/gym/envs/functionality/possible_actions_mask_tester.py new file mode 100644 index 000000000..7975bb904 --- /dev/null +++ b/reagent/gym/envs/functionality/possible_actions_mask_tester.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +""" +Simple environment to test possible_actions_mask. +State simply tells you which iteration it is, but doesn't tell anything about +which action to take, so only source of info is possible_actions_mask. +The Q-value of each action to converge to the (discounted) value of the MDP. + +The value of the MDP should be 10 * max_steps = 200 +""" + +import gym +import numpy as np +from gym.spaces import Box, Discrete + + +def _get_state(step_idx, max_steps): + """One-hot encoding of which state we're on""" + zeros = np.zeros(max_steps, dtype=np.float32) + if step_idx == max_steps: + return zeros + assert 0 <= step_idx and step_idx < max_steps + zeros[step_idx] = 1.0 + return zeros + + +class PossibleActionsMaskTester(gym.Env): + def __init__(self) -> None: + self.max_steps = 20 + self.action_num = 4 + self.cur_step = -1 + self.observation_space = Box(0.0, 1.0, shape=(self.max_steps,)) + self.action_space = Discrete(n=self.action_num) + + def _update_possible_actions_mask(self) -> None: + # pyre-fixme[16]: `PossibleActionsMaskTester` has no attribute `legal_action`. + self.legal_action = np.random.randint(self.action_num) + # pyre-fixme[16]: `PossibleActionsMaskTester` has no attribute + # `possible_actions_mask`. + self.possible_actions_mask = np.zeros(self.action_num, dtype=bool) + self.possible_actions_mask[self.legal_action] = True + + def _get_state(self): + return _get_state(self.cur_step, self.max_steps) + + def reset(self): + self.cur_step = 0 + self._update_possible_actions_mask() + return self._get_state() + + def step(self, action): + reward = 10.0 if action == self.legal_action else 0.0 + terminal = self.cur_step == (self.max_steps - 1) + self.cur_step += 1 + self._update_possible_actions_mask() + return self._get_state(), reward, terminal, None diff --git a/reagent/gym/envs/gym.py b/reagent/gym/envs/gym.py new file mode 100644 index 000000000..016bf1944 --- /dev/null +++ b/reagent/gym/envs/gym.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Optional, Tuple + +import gym +import numpy as np +import reagent.core.types as rlt +import torch +from gym import spaces +from gym_minigrid.wrappers import ReseedWrapper +from reagent.core.dataclasses import dataclass +from reagent.gym.envs.env_wrapper import EnvWrapper +from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper + + +logger = logging.getLogger(__name__) + + +@dataclass +class Gym(EnvWrapper): + env_name: str + set_max_steps: Optional[int] = None + + def make(self) -> gym.Env: + kwargs = {} + if self.set_max_steps is not None: + kwargs["max_steps"] = self.set_max_steps + env: gym.Env = gym.make(self.env_name, **kwargs) + if self.env_name.startswith("MiniGrid-"): + # Wrap in minigrid simplifier + env = SimpleObsWrapper(ReseedWrapper(env)) + return env + + def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData: + obs_space = self.observation_space + if isinstance(obs_space, spaces.Box): + return rlt.FeatureData(torch.tensor(obs).float().unsqueeze(0)) + else: + raise NotImplementedError(f"{obs_space} obs space not supported for Gym.") + + # TODO: make return serving feature data + # pyre-fixme[15]: `serving_obs_preprocessor` overrides method defined in + # `EnvWrapper` inconsistently. + def serving_obs_preprocessor( + self, obs: np.ndarray + ) -> Tuple[torch.Tensor, torch.Tensor]: + obs_space = self.observation_space + if not isinstance(obs_space, spaces.Box): + raise NotImplementedError(f"{obs_space} not supported!") + + if len(obs_space.shape) != 1: + raise NotImplementedError(f"Box shape {obs_space.shape} not supported!") + state_dim = obs_space.shape[0] + obs_tensor = torch.tensor(obs).float().view(1, state_dim) + presence_tensor = torch.ones_like(obs_tensor) + return (obs_tensor, presence_tensor) diff --git a/reagent/gym/envs/oracle_pvm.py b/reagent/gym/envs/oracle_pvm.py new file mode 100644 index 000000000..1fd81c30b --- /dev/null +++ b/reagent/gym/envs/oracle_pvm.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from collections import OrderedDict +from typing import Callable, Dict, List + +import gym +import numpy as np +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass +from reagent.gym.envs import RecSim +from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor +from scipy import stats + + +logger = logging.getLogger(__name__) + +# score function takes user and doc features, and outputs a score +SCORE_FUNCTION_T = Callable[[np.ndarray, np.ndarray], float] + + +def make_default_score_fn(fn_i: int) -> SCORE_FUNCTION_T: + """ + Make ith score_fn (constructor of ith score) + """ + + def fn(user: np.ndarray, doc: np.ndarray) -> float: + return doc[fn_i] + # user = user ** (fn_i + 1) + # doc = doc ** (fn_i + 1) + # return np.inner(user, doc) + # return user[fn_i] * doc[fn_i] + + return fn + + +VM_WEIGHT_LOW = -1.0 +VM_WEIGHT_HIGH = 1.0 +MATCH_REWARD_BOOST = 3.0 + + +def get_default_score_fns(num_weights): + return [make_default_score_fn(i) for i in range(num_weights)] + + +def get_ground_truth_weights(num_weights): + return np.array([1] * num_weights) + + +@dataclass +class OraclePVM(RecSim): + """ + Wrapper over RecSim for simulating (Personalized) VM Tuning. + The state is the same as for RecSim (user feature + candidate features). + There are num_weights VM weights to tune, and so action space is a vector + of length num_weights. + OraclePVM hides num_weights number of + (1) score_fns (akin to VM models), that take in + user + candidate_i feature and produces a score for candidate_i. + (2) ground_truth_weights, that are used to produce "ground truth", a.k.a. + "Oracle", rankings. + Reward is the Kendall-Tau between ground truth and the ranking created from the + weights given by action. If the rankings match exactly, the reward is boosted to 3. + NOTE: This environment only tests if the Agent can learn the hidden ground + truth weights, which may be far from optimal (in terms of RecSim's rewards, + which we're ignoring). This is easier for unit tests, but in the real world + we will be trying to learn the optimal weights, and the reward signal would + reflect that. + + TODO: made environment easier to learn from by not using RecSim. + """ + + user_feat_dim: int = 1 + candidate_feat_dim: int = 3 + num_weights: int = 3 + + def __post_init_post_parse__(self): + assert ( + self.slate_size == self.num_candidates + ), f"Must be equal (slate_size) {self.slate_size} != (num_candidates) {self.num_candidates}" + super().__post_init_post_parse__() + self.score_fns: List[SCORE_FUNCTION_T] = get_default_score_fns(self.num_weights) + self.ground_truth_weights: List[float] = get_ground_truth_weights( + self.num_weights + ) + assert len(self.score_fns) == len( + self.ground_truth_weights + ), f"{len(self.score_fns)} != {len(self.ground_truth_weights)}" + assert ( + len(self.ground_truth_weights) == self.num_weights + ), f"{self.ground_truth_weights.shape} != {self.num_weights}" + + def reset(self): + self.prev_obs = super().reset() + self.prev_obs.update( + { + "user": np.random.rand(self.user_feat_dim), + "doc": OrderedDict( + [ + (str(i), np.random.rand(self.candidate_feat_dim)) + for i in range(self.num_candidates) + ] + ), + } + ) + return self.prev_obs + + def step(self, action): + user_feat = self.prev_obs["user"] + doc_feats = self.prev_obs["doc"] + scores = self._get_scores(user_feat, doc_feats) + ground_truth_ranking = self._get_ranking(scores, self.ground_truth_weights) + policy_ranking = self._get_ranking(scores, action) + t = True + # comment out to avoid non-stationary + # self.prev_obs, _, t, i = super().step(policy_ranking) + + num_matches = (ground_truth_ranking == policy_ranking).sum() + if num_matches == self.slate_size: + reward = MATCH_REWARD_BOOST + else: + reward, _p_value = stats.kendalltau(ground_truth_ranking, policy_ranking) + return self.prev_obs, reward, t, None + + def is_match(self, reward): + # for evaluation, return true iff the reward represents a match + return reward > (MATCH_REWARD_BOOST - 1e-6) + + @property + def action_space(self): + return gym.spaces.Box( + low=VM_WEIGHT_LOW, high=VM_WEIGHT_HIGH, shape=(self.num_weights,) + ) + + @action_space.setter + def action_space(self, val): + pass + + def _get_scores( + self, user_feat: np.ndarray, doc_feats: Dict[str, np.ndarray] + ) -> np.ndarray: + # num_docs x num_scores where i,j coordinate is jth score for ith doc + scores = np.array( + [ + [score_fn(user_feat, doc_feat) for score_fn in self.score_fns] + for _k, doc_feat in doc_feats.items() + ] + ) + return scores + + def _get_ranking(self, scores: np.ndarray, weights: np.ndarray): + assert weights.shape == (scores.shape[1],), f"{weights.shape}, {scores.shape}" + weighted_scores = scores * weights + values = weighted_scores.sum(axis=1) + indices = np.argsort(-values) + return indices[: self.slate_size] + + def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData: + preprocessor = RecsimObsPreprocessor.create_from_env(self) + preprocessed_obs = preprocessor(obs) + return rlt._embed_states(preprocessed_obs) + + def serving_obs_preprocessor(self, obs: np.ndarray): + preprocessor = RecsimObsPreprocessor.create_from_env(self) + x = preprocessor(obs) + # user was batch_size x state_size, stack + user = x.float_features.unsqueeze(1).repeat_interleave( + self.num_candidates, dim=1 + ) + candidates = x.candidate_docs.float_features + combined = torch.cat([user, candidates], dim=2).squeeze(0) + return (combined, torch.ones_like(combined, dtype=torch.uint8)) diff --git a/reagent/gym/envs/pomdp/__init__.py b/reagent/gym/envs/pomdp/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/gym/envs/pomdp/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/envs/pomdp/pocman.py b/reagent/gym/envs/pomdp/pocman.py index 2d6156f67..aa94a51b3 100644 --- a/reagent/gym/envs/pomdp/pocman.py +++ b/reagent/gym/envs/pomdp/pocman.py @@ -219,7 +219,7 @@ def __init__(self): self.observation_space = Box(low=0, high=1, shape=(STATE_DIM,)) self._reward_range = 100 self.step_cnt = 0 - self._max_episode_steps = self.board["_max_step"] + self.max_steps = self.board["_max_step"] def seed(self, seed=None): np.random.seed(seed) diff --git a/reagent/gym/envs/pomdp/state_embed_env.py b/reagent/gym/envs/pomdp/state_embed_env.py index 90ea41400..a710ff305 100644 --- a/reagent/gym/envs/pomdp/state_embed_env.py +++ b/reagent/gym/envs/pomdp/state_embed_env.py @@ -14,21 +14,20 @@ import gym import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch -from gym import Env from gym.spaces import Box +from reagent.gym.envs import EnvWrapper from reagent.models.world_model import MemoryNetwork logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -class StateEmbedEnvironment(Env): +class StateEmbedEnvironment(gym.Env): def __init__( self, - gym_env: Env, + gym_env: EnvWrapper, mdnrnn: MemoryNetwork, max_embed_seq_len: int, state_min_value: Optional[float] = None, @@ -71,7 +70,7 @@ def __getattr__(self, name): @torch.no_grad() def embed_state(self, state): - """ Embed state after either reset() or step() """ + """Embed state after either reset() or step()""" assert len(self.recent_states) == len(self.recent_actions) old_mdnrnn_mode = self.mdnrnn.mdnrnn.training self.mdnrnn.mdnrnn.eval() diff --git a/reagent/gym/envs/pomdp/string_game.py b/reagent/gym/envs/pomdp/string_game.py index c913bbbb2..097d26139 100644 --- a/reagent/gym/envs/pomdp/string_game.py +++ b/reagent/gym/envs/pomdp/string_game.py @@ -32,10 +32,10 @@ class StringGameEnv(Env): - def __init__(self): + def __init__(self, max_steps=MAX_STEP): np.random.seed(123) torch.manual_seed(123) - self._max_episode_steps = MAX_STEP + self.max_steps = max_steps self.reward_map = {} self._init_reward() logger.debug(self.reward_map) @@ -80,7 +80,7 @@ def step(self, action): self.recent_states.append(self.cur_state) self.recent_actions.append(action) reward, info = self.get_reward() - if self.step_cnt >= MAX_STEP: + if self.step_cnt >= self.max_steps: self.done = True ob = self.get_observation() self.cur_state = ob @@ -91,12 +91,10 @@ def get_observation(self): """ The function you can write to customize transitions. In this specific environment, the next state is exactly the latest action taken. - The initial observation is character "A". + The initial observation is all zeros. """ ob = np.zeros(STATE_DIM) - if len(self.recent_actions) == 0: - ob[0] = 1 - else: + if len(self.recent_actions) > 0: ob[self.recent_actions[-1]] = 1 return ob @@ -111,9 +109,15 @@ def reset(self): def print_internal_state(self): print("Step", self.step_cnt) - state_str = "".join( - [CHARACTERS[np.nonzero(c)[0].item()] for c in self.recent_states] - ) + + def state_to_chr(s): + state_index = np.nonzero(s)[0] + if len(state_index) != 1: + # initial state + return "I" + return CHARACTERS[state_index.item()] + + state_str = "".join([state_to_chr(s) for s in self.recent_states]) action_str = "".join([CHARACTERS[c] for c in self.recent_actions]) print( "Internal state: recent states {}, recent actions {}".format( diff --git a/reagent/gym/envs/pomdp/string_game_v1.py b/reagent/gym/envs/pomdp/string_game_v1.py new file mode 100644 index 000000000..a568bc34f --- /dev/null +++ b/reagent/gym/envs/pomdp/string_game_v1.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +""" +A game with a stochastic length of the MDP but no longer than 3. + +An agent can choose one character to reveal (either "A" or "B") as the action, +and the next state is exactly the action just taken (i.e., the transition +function only depends on the action). Each episode is limited to 3 steps. + +There is some probability to terminate at any step (but the agent must terminate if +making 3 steps) +If the current state is "A", the agent has 0.5 probability to make to the next step. +If the current state is "B", the agent has 0.9 probability to make to the next step. +The reward is given at the terminal state, based on the accumulated observation (a string). + +If the agent observes "AAA" (survive the first 2 steps and terminate at the last step + no matter what action taken), it receives +5 reward. +If the agent observes "BA" (survive the first step and terminate at the second step), +it receives +4 reward. +For all other scenarios, the agent receives 0 reward. + +If we plan for 3 steps ahead from the beginning, "A" is the better action to take first. +If we plan with consideration of termination probabilities, "B" is better. Because: +The expected Q-value of "A" = 0.5 * 0 + 0.5 * max(0.5 * 0 + 0.5 * max(5, 0), 0) = 1.25 +The expected Q-value of "B" = 0.1 * 0 + 0.9 * max(0.5 * 4 + 0.5 * max(0, 0), 0) = 1.8 +""" +import logging +from collections import defaultdict, deque + +import numpy as np +import torch +from gym import Env +from gym.spaces import Box, Discrete + + +logger = logging.getLogger(__name__) + + +MAX_STEP = 3 +CHARACTERS = ["A", "B"] +STATE_DIM = ACTION_DIM = len(CHARACTERS) + + +class StringGameEnvV1(Env): + def __init__(self, max_steps=MAX_STEP): + np.random.seed(123) + torch.manual_seed(123) + self.max_steps = max_steps + self.reward_map = defaultdict(float) + self.terminal_probs = defaultdict(float) + self._init_reward_and_terminal_probs() + self.recent_actions = deque([], maxlen=MAX_STEP) + self.action_space = Discrete(ACTION_DIM) + self.observation_space = Box(low=0, high=1, shape=(STATE_DIM,)) + self.step_cnt = 0 + self.reset() + + def _init_reward_and_terminal_probs(self): + self.reward_map["AAA"] = 5.0 + self.reward_map["BA"] = 4.0 + self.terminal_probs["A"] = 0.5 + self.terminal_probs["B"] = 0.1 + + def seed(self, seed=None): + np.random.seed(seed) + torch.manual_seed(seed) + + @staticmethod + def random_action(): + return np.random.randint(0, ACTION_DIM) + + def get_reward(self): + """ + The function you can write to customize rewards. In this + specific environment, the reward only depends on action history + """ + recent_characters = [CHARACTERS[c] for c in list(self.recent_actions)] + string = "".join(recent_characters) + if not self.done: + reward = 0 + else: + reward = self.reward_map[string] + return reward, string + + def step(self, action): + assert self.action_space.contains(action) + assert self.done is False + + self.step_cnt += 1 + self.recent_actions.append(action) + if self.step_cnt >= self.max_steps: + self.done = True + else: + self.done = self.sample_terminal(action) + reward, info = self.get_reward() + ob = self.get_observation() + + return ob, reward, self.done, {"reward_str": info} + + def sample_terminal(self, action): + terminal_probability = self.terminal_probs[CHARACTERS[action]] + if np.random.rand() < terminal_probability: + return True + return False + + def get_observation(self): + """ + The function you can write to customize transitions. In this + specific environment, the next state is exactly the latest action taken. + The initial observation is all zeros. + """ + ob = np.zeros(STATE_DIM) + if len(self.recent_actions) > 0: + ob[self.recent_actions[-1]] = 1 + return ob + + def reset(self): + self.done = False + self.recent_actions = deque([], maxlen=MAX_STEP) + self.step_cnt = 0 + ob = self.get_observation() + return ob + + def print_internal_state(self): + action_str = "".join([CHARACTERS[c] for c in self.recent_actions]) + logger.debug( + f"Step {self.step_cnt}, recent actions {action_str}, terminal={self.done}" + ) + + @staticmethod + def print_ob(ob): + return str(ob) + + @staticmethod + def print_action(action): + return CHARACTERS[action] diff --git a/reagent/gym/envs/recsim.py b/reagent/gym/envs/recsim.py index c896ee764..4c8c13131 100644 --- a/reagent/gym/envs/recsim.py +++ b/reagent/gym/envs/recsim.py @@ -1,76 +1,147 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import copy import logging -from enum import Enum import gym -import gym.spaces.dict import numpy as np +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass +from reagent.gym.envs.env_wrapper import EnvWrapper +from reagent.gym.envs.wrappers.recsim import ValueWrapper +from reagent.gym.preprocessors.default_preprocessors import RecsimObsPreprocessor +from recsim import choice_model, utils +from recsim.environments import interest_evolution, interest_exploration +from recsim.simulator import environment, recsim_gym logger = logging.getLogger(__name__) -class ValueMode(Enum): - CONST = 0 - INNER_PROD = 1 - - -class ValueWrapper(gym.core.ObservationWrapper): - KEY = "value" - - def __init__(self, env, value_mode: ValueMode): - super().__init__(env) - self.value_mode = value_mode - - @property - def observation_space(self): - obs_spaces = copy.copy(self.env.observation_space.spaces) - try: - augmentation = obs_spaces["augmentation"] - except KeyError: - augmentation = gym.spaces.Dict() - obs_spaces["augmentation"] = augmentation - - for k in obs_spaces["doc"].spaces: - try: - aug_k = augmentation[k] - except KeyError: - aug_k = gym.spaces.Dict() - augmentation.spaces[k] = aug_k - - assert not aug_k.contains(self.KEY) - - aug_k.spaces[self.KEY] = gym.spaces.Box(low=-1.0, high=1.0, shape=()) - - return gym.spaces.Dict(obs_spaces) - - @observation_space.setter - def observation_space(self, x): - # We just have this method here so that Wrapper.__init__() can run - pass - - def observation(self, obs): - try: - augmentation = obs["augmentation"] - except KeyError: - augmentation = {} - obs["augmentation"] = augmentation - - for k in obs["doc"]: - try: - aug_k = augmentation[k] - except KeyError: - aug_k = {} - augmentation[k] = aug_k - - if self.value_mode == ValueMode.CONST: - aug_k[self.KEY] = 0.0 - elif self.value_mode == ValueMode.INNER_PROD: - aug_k[self.KEY] = np.inner(obs["user"], obs["doc"][k]) - else: - raise NotImplementedError(f"{self.value_mode} is not implemented") - - return obs +def dot_value_fn(user, doc): + return np.inner(user, doc) + + +def multi_selection_value_fn(user, doc): + return (np.inner(user, doc) + 1.0) / 2.0 + + +@dataclass +class RecSim(EnvWrapper): + num_candidates: int + slate_size: int + resample_documents: bool = True + single_selection: bool = True + is_interest_exploration: bool = False + initial_seed: int = 1 + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + if self.is_interest_exploration and not self.single_selection: + raise NotImplementedError( + "Multiselect interest exploration not implemented" + ) + + def make(self) -> gym.Env: + env_config = { + "slate_size": self.slate_size, + "seed": self.initial_seed, + "num_candidates": self.num_candidates, + "resample_documents": self.resample_documents, + } + if self.is_interest_exploration: + env = interest_exploration.create_environment(env_config) + return ValueWrapper(env, lambda user, doc: 0.0) + + if self.single_selection: + env = interest_evolution.create_environment(env_config) + return ValueWrapper(env, dot_value_fn) + else: + env = create_multiclick_environment(env_config) + return ValueWrapper(env, multi_selection_value_fn) + + def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData: + # TODO: remove RecsimObsPreprocessor and move it here + preprocessor = RecsimObsPreprocessor.create_from_env(self) + return preprocessor(obs) + + def serving_obs_preprocessor(self, obs: np.ndarray): + preprocessor = RecsimObsPreprocessor.create_from_env(self) + return preprocessor(obs) + + """ + state["user"] is shared across all output dicts + this is confusing, and should be deepcopied instead + """ + + def reset(self, **kwargs): + state = self.env.reset(**kwargs) + state["user"] = np.copy(state["user"]) + return state + + def step(self, action): + state, r, t, i = self.env.step(action) + state["user"] = np.copy(state["user"]) + return state, r, t, i + + +class MulticlickIEvUserModel(interest_evolution.IEvUserModel): + def simulate_response(self, documents): + responses = [self._response_model_ctor() for _ in documents] + self.choice_model.score_documents( + self._user_state, [doc.create_observation() for doc in documents] + ) + selected_indices = self.choice_model.choose_items() + for i, response in enumerate(responses): + response.quality = documents[i].quality + response.cluster_id = documents[i].cluster_id + for selected_index in selected_indices: + self._generate_click_response( + documents[selected_index], responses[selected_index] + ) + return responses + + +class UserState(interest_evolution.IEvUserState): + def score_document(self, doc_obs): + scores = super().score_document(doc_obs) + # return choice_model.softmax(scores) + return (scores + 1) / 2 + + +def create_multiclick_environment(env_config): + """Creates an interest evolution environment.""" + + def choice_model_ctor(*args, **kwargs): + return choice_model.DependentClickModel( + next_probs=[0.8 ** (i + 1) for i in range(env_config["slate_size"])], + slate_size=env_config["slate_size"], + score_scaling=1.0, + ) + + user_model = MulticlickIEvUserModel( + env_config["slate_size"], + choice_model_ctor=choice_model_ctor, + response_model_ctor=interest_evolution.IEvResponse, + user_state_ctor=UserState, + seed=env_config["seed"], + ) + + document_sampler = interest_evolution.UtilityModelVideoSampler( + doc_ctor=interest_evolution.IEvVideo, seed=env_config["seed"] + ) + + ievenv = environment.Environment( + user_model, + document_sampler, + env_config["num_candidates"], + env_config["slate_size"], + resample_documents=env_config["resample_documents"], + ) + + return recsim_gym.RecSimGymEnv( + ievenv, + interest_evolution.clicked_watchtime_reward, + utils.aggregate_video_cluster_metrics, + utils.write_video_cluster_metrics, + ) diff --git a/reagent/gym/envs/toy_vm.py b/reagent/gym/envs/toy_vm.py new file mode 100644 index 000000000..6716c5192 --- /dev/null +++ b/reagent/gym/envs/toy_vm.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from collections import namedtuple +from typing import List, Optional + +import gym +import numpy as np +from gym.utils import seeding +from gym.wrappers.time_limit import TimeLimit +from reagent.core.dataclasses import dataclass +from reagent.gym.envs.env_wrapper import EnvWrapper +from reagent.gym.envs.recsim import RecsimObsPreprocessor +from reagent.gym.envs.wrappers.recsim import ValueWrapper +from scipy.special import expit, logit # @manual=third-party//scipy:scipy-py + + +Document = namedtuple("Document", ["tap", "quality", "abandon"]) + + +def simulate_reward(slate: List[Document], prng: np.random.RandomState): + reward = 0 + position = 0 + n = len(slate) + if not n: + return 0 # Bail if slate is empty + comparison = slate[position].tap + roll = prng.rand() + done = comparison < roll + while not done: + reward += slate[position].quality + comparison = 1 - slate[position].abandon + roll = prng.rand() + position += 1 + done = (comparison < roll) or (position >= n) + return reward + + +def random_document(prng) -> Document: + p, q, r = prng.rand(), prng.rand(), prng.rand() + return Document(expit(logit(p) + 1), q, expit(logit(r) - 2)) + + +class ToyVMEnv(gym.Env): + def __init__(self, slate_size: int) -> None: + self.slate_size = slate_size + self.action_space = gym.spaces.MultiDiscrete( + [self.slate_size] * self.slate_size + ) + self.observation_space = gym.spaces.Dict( + { + "user": gym.spaces.Box(low=0, high=1, shape=(1,)), + "doc": gym.spaces.Dict( + { + str(k): gym.spaces.Box( + low=0, high=1, shape=(self.slate_size, 3) + ) + for k in range(self.slate_size) + } + ), + } + ) + self.response_space = gym.spaces.Dict({}) + self._doc_sampler = np.random.RandomState() + self._reward_prng = np.random.RandomState() + + def seed(self, seed: Optional[int] = None): + self._doc_sampler, seed1 = seeding.np_random(seed) + _seed2 = seeding.hash_seed(seed1 + 1) % 2**31 + self._reward_prng, seed2 = seeding.np_random(_seed2) + return [seed1, seed2] + + def _sample_candidates(self): + self.candidates = [ + random_document(self._doc_sampler) for _ in range(self.slate_size) + ] + n = len(self.candidates) + return { + "user": np.zeros((1,)), + "doc": { + str(k): np.array(self.candidates[k], dtype=np.float32) for k in range(n) + }, + } + + def step(self, action): + slate = [self.candidates[i] for i in action] + reward = simulate_reward(slate, self._reward_prng) + obs = self._sample_candidates() + done = False + info = {"documents": self.candidates} + return obs, reward, done, info + + def reset(self): + return self._sample_candidates() + + +def zero_augment(user, doc) -> float: + return 0.0 + + +@dataclass +class ToyVM(EnvWrapper): + slate_size: int = 5 + max_episode_steps: int = 100 + initial_seed: Optional[int] = None + + def make(self): + env = ValueWrapper( + TimeLimit( + ToyVMEnv(self.slate_size), + max_episode_steps=self.max_episode_steps, + ), + zero_augment, + ) + if self.initial_seed: + env.seed(self.initial_seed) + return env + + def action_extractor(self, actor_output): + # Extract action from actor output + return actor_output.action.squeeze() + + def obs_preprocessor(self, obs): + preprocessor = RecsimObsPreprocessor.create_from_env(self) + return preprocessor(obs) + + def serving_obs_preprocessor(self, obs): + preprocessor = RecsimObsPreprocessor.create_from_env(self) + return preprocessor(obs) diff --git a/reagent/gym/envs/utils.py b/reagent/gym/envs/utils.py index e80e75365..8c226730f 100644 --- a/reagent/gym/envs/utils.py +++ b/reagent/gym/envs/utils.py @@ -7,7 +7,6 @@ logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) def register_if_not_exists(id, entry_point): @@ -15,5 +14,5 @@ def register_if_not_exists(id, entry_point): Preventing tests from failing trying to re-register environments """ if id not in registry.env_specs: - logging.info(f"Registering id={id}, entry_point={entry_point}.") + logger.debug(f"Registering id={id}, entry_point={entry_point}.") register(id=id, entry_point=entry_point) diff --git a/reagent/gym/envs/wrappers/__init__.py b/reagent/gym/envs/wrappers/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/gym/envs/wrappers/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/envs/wrappers/recsim.py b/reagent/gym/envs/wrappers/recsim.py new file mode 100644 index 000000000..58a5592b0 --- /dev/null +++ b/reagent/gym/envs/wrappers/recsim.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import logging + +import gym +import gym.spaces.dict + + +logger = logging.getLogger(__name__) + + +class ValueWrapper(gym.core.ObservationWrapper): + KEY = "value" + + def __init__(self, env, value_fn): + """ + Args: + env: a RecSim gym environment + value_fn: a function taking user & document feature, + returning the value of the document for the user + """ + super().__init__(env) + self.value_fn = value_fn + + @property + def observation_space(self): + obs_spaces = copy.copy(self.env.observation_space.spaces) + try: + augmentation = obs_spaces["augmentation"] + except KeyError: + augmentation = gym.spaces.Dict() + obs_spaces["augmentation"] = augmentation + + for k in obs_spaces["doc"].spaces: + try: + aug_k = augmentation[k] + except KeyError: + aug_k = gym.spaces.Dict() + augmentation.spaces[k] = aug_k + + assert not aug_k.contains(self.KEY) + + aug_k.spaces[self.KEY] = gym.spaces.Box(low=-1.0, high=1.0, shape=()) + + return gym.spaces.Dict(obs_spaces) + + @observation_space.setter + def observation_space(self, x): + # We just have this method here so that Wrapper.__init__() can run + pass + + def observation(self, obs): + try: + augmentation = obs["augmentation"] + except KeyError: + augmentation = {} + obs["augmentation"] = augmentation + + for k in obs["doc"]: + try: + aug_k = augmentation[k] + except KeyError: + aug_k = {} + augmentation[k] = aug_k + + aug_k[self.KEY] = self.value_fn(obs["user"], obs["doc"][k]) + + return obs diff --git a/reagent/gym/envs/simple_minigrid.py b/reagent/gym/envs/wrappers/simple_minigrid.py similarity index 83% rename from reagent/gym/envs/simple_minigrid.py rename to reagent/gym/envs/wrappers/simple_minigrid.py index bcb79a836..1c1bd1462 100644 --- a/reagent/gym/envs/simple_minigrid.py +++ b/reagent/gym/envs/wrappers/simple_minigrid.py @@ -1,3 +1,6 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + import gym import gym_minigrid # noqa import numpy as np @@ -5,7 +8,7 @@ from gym_minigrid.minigrid import DIR_TO_VEC -NUM_DIRECTIONS = len(DIR_TO_VEC) +NUM_DIRECTIONS: int = len(DIR_TO_VEC) class SimpleObsWrapper(gym.core.ObservationWrapper): @@ -13,7 +16,7 @@ class SimpleObsWrapper(gym.core.ObservationWrapper): Encode the agent's position & direction in a one-hot vector """ - def __init__(self, env): + def __init__(self, env) -> None: super().__init__(env) self.observation_space = spaces.Box( diff --git a/reagent/gym/normalizers.py b/reagent/gym/normalizers.py new file mode 100644 index 000000000..99c4908ac --- /dev/null +++ b/reagent/gym/normalizers.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import collections +import logging + +import numpy as np +from reagent.core.parameters import NormalizationParameters + + +logger = logging.getLogger(__name__) + + +def normalizer_helper(feats, feature_type, min_value=None, max_value=None): + assert feature_type in ( + "DISCRETE_ACTION", + "CONTINUOUS", + "CONTINUOUS_ACTION", + ), f"invalid feature type: {feature_type}." + assert type(min_value) == type(max_value) and type(min_value) in ( + int, + float, + list, + np.ndarray, + type(None), + ), f"invalid {type(min_value)}, {type(max_value)}" + if type(min_value) in [int, float, type(None)]: + min_value = [min_value] * len(feats) + max_value = [max_value] * len(feats) + normalization = collections.OrderedDict( + [ + ( + feats[i], + NormalizationParameters( + feature_type=feature_type, + boxcox_lambda=None, + boxcox_shift=None, + mean=0, + stddev=1, + possible_values=None, + quantiles=None, + min_value=float(min_value[i]) if min_value[i] is not None else None, + max_value=float(max_value[i]) if max_value[i] is not None else None, + ), + ) + for i in range(len(feats)) + ] + ) + return normalization + + +def discrete_action_normalizer(feats): + return normalizer_helper(feats, "DISCRETE_ACTION") + + +def only_continuous_normalizer(feats, min_value=None, max_value=None): + return normalizer_helper(feats, "CONTINUOUS", min_value, max_value) + + +def only_continuous_action_normalizer(feats, min_value=None, max_value=None): + return normalizer_helper(feats, "CONTINUOUS_ACTION", min_value, max_value) diff --git a/reagent/gym/policies/policy.py b/reagent/gym/policies/policy.py index 551a5e574..00d086fea 100644 --- a/reagent/gym/policies/policy.py +++ b/reagent/gym/policies/policy.py @@ -1,13 +1,15 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Any, Optional -import reagent.types as rlt +import reagent.core.types as rlt +import torch from reagent.gym.types import Sampler, Scorer class Policy: - def __init__(self, scorer: Scorer, sampler: Sampler): + def __init__(self, scorer: Scorer, sampler: Sampler) -> None: """ The Policy composes the scorer and sampler to create actions. @@ -19,13 +21,17 @@ def __init__(self, scorer: Scorer, sampler: Sampler): self.scorer = scorer self.sampler = sampler - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: + def act( + self, obs: Any, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: """ Performs the composition described above. These are the actions being put into the replay buffer, not necessary the actions taken by the environment! """ - scores = self.scorer(obs) + scorer_inputs = (obs,) + if possible_actions_mask is not None: + scorer_inputs += (possible_actions_mask,) + scores = self.scorer(*scorer_inputs) actor_output = self.sampler.sample_action(scores) - return actor_output.cpu().detach() diff --git a/reagent/gym/policies/predictor_policies.py b/reagent/gym/policies/predictor_policies.py index b11c6478a..a7bc13d9c 100644 --- a/reagent/gym/policies/predictor_policies.py +++ b/reagent/gym/policies/predictor_policies.py @@ -1,23 +1,33 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +from typing import Optional, Tuple, Union + +import reagent.core.types as rlt import torch +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import RLParameters from reagent.gym.policies import Policy -from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.gym.policies.samplers.discrete_sampler import ( + GreedyActionSampler, + SoftmaxActionSampler, +) +from reagent.gym.policies.samplers.top_k_sampler import TopKSampler from reagent.gym.policies.scorers.discrete_scorer import ( discrete_dqn_serving_scorer, parametric_dqn_serving_scorer, ) +from reagent.gym.policies.scorers.slate_q_scorer import slate_q_serving_scorer +from reagent.models.actor import LOG_PROB_MAX, LOG_PROB_MIN -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbActorPredictorUnwrapper as ActorPredictorUnwrapper, FbDiscreteDqnPredictorUnwrapper as DiscreteDqnPredictorUnwrapper, FbParametricPredictorUnwrapper as ParametricDqnPredictorUnwrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ( ActorPredictorUnwrapper, DiscreteDqnPredictorUnwrapper, @@ -32,11 +42,8 @@ def create_predictor_policy_from_model(serving_module, **kwargs) -> Policy: """ module_name = serving_module.original_name if module_name.endswith("DiscreteDqnPredictorWrapper"): - sampler = GreedyActionSampler() - scorer = discrete_dqn_serving_scorer( - q_network=DiscreteDqnPredictorUnwrapper(serving_module) - ) - return Policy(scorer=scorer, sampler=sampler) + rl_parameters = kwargs.get("rl_parameters", None) + return DiscreteDQNPredictorPolicy(serving_module, rl_parameters) elif module_name.endswith("ActorPredictorWrapper"): return ActorPredictorPolicy(predictor=ActorPredictorUnwrapper(serving_module)) elif module_name.endswith("ParametricDqnPredictorWrapper"): @@ -45,11 +52,20 @@ def create_predictor_policy_from_model(serving_module, **kwargs) -> Policy: assert ( max_num_actions is not None ), f"max_num_actions not given for Parametric DQN." - sampler = GreedyActionSampler() - scorer = parametric_dqn_serving_scorer( - max_num_actions=max_num_actions, - q_network=ParametricDqnPredictorUnwrapper(serving_module), - ) + q_network = ParametricDqnPredictorUnwrapper(serving_module) + + # TODO: write SlateQ Wrapper + slate_size = kwargs.get("slate_size", None) + if slate_size is not None: + scorer = slate_q_serving_scorer( + num_candidates=max_num_actions, q_network=q_network + ) + sampler = TopKSampler(k=slate_size) + else: + sampler = GreedyActionSampler() + scorer = parametric_dqn_serving_scorer( + max_num_actions=max_num_actions, q_network=q_network + ) return Policy(scorer=scorer, sampler=sampler) else: raise NotImplementedError( @@ -57,12 +73,64 @@ def create_predictor_policy_from_model(serving_module, **kwargs) -> Policy: ) +class DiscreteDQNPredictorPolicy(Policy): + def __init__( + self, wrapped_dqn_predictor, rl_parameters: Optional[RLParameters] + ) -> None: + if rl_parameters and rl_parameters.softmax_policy: + self.sampler = SoftmaxActionSampler(temperature=rl_parameters.temperature) + else: + self.sampler = GreedyActionSampler() + self.scorer = discrete_dqn_serving_scorer( + q_network=DiscreteDqnPredictorUnwrapper(wrapped_dqn_predictor) + ) + + @torch.no_grad() + def act( + self, + obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]], + possible_actions_mask: Optional[torch.Tensor], + ) -> rlt.ActorOutput: + """Input is either state_with_presence, or + ServingFeatureData (in the case of sparse features)""" + assert isinstance(obs, tuple) + if isinstance(obs, rlt.ServingFeatureData): + state: rlt.ServingFeatureData = obs + else: + state = rlt.ServingFeatureData( + float_features_with_presence=obs, + id_list_features={}, + id_score_list_features={}, + ) + scores = self.scorer(state, possible_actions_mask) + return self.sampler.sample_action(scores).cpu().detach() + + class ActorPredictorPolicy(Policy): - def __init__(self, predictor): + def __init__(self, predictor) -> None: self.predictor = predictor @torch.no_grad() - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: - action = self.predictor(obs).cpu() - # TODO: return log_probs as well - return rlt.ActorOutput(action=action) + def act( + self, + obs: Union[rlt.ServingFeatureData, Tuple[torch.Tensor, torch.Tensor]], + possible_actions_mask: Optional[torch.Tensor] = None, + ) -> rlt.ActorOutput: + """Input is either state_with_presence, or + ServingFeatureData (in the case of sparse features)""" + assert isinstance(obs, tuple) + if isinstance(obs, rlt.ServingFeatureData): + state: rlt.ServingFeatureData = obs + else: + state = rlt.ServingFeatureData( + float_features_with_presence=obs, + id_list_features={}, + id_score_list_features={}, + ) + output = self.predictor(*state) + if isinstance(output, tuple): + action, log_prob = output + log_prob = log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX) + return rlt.ActorOutput(action=action.cpu(), log_prob=log_prob.cpu()) + else: + return rlt.ActorOutput(action=output.cpu()) diff --git a/reagent/gym/policies/random_policies.py b/reagent/gym/policies/random_policies.py index 0c250ac0e..213a68f25 100644 --- a/reagent/gym/policies/random_policies.py +++ b/reagent/gym/policies/random_policies.py @@ -1,18 +1,19 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import List +from typing import List, Optional import gym import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE from reagent.gym.policies.policy import Policy -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE +from reagent.gym.policies.scorers.discrete_scorer import apply_possible_actions_mask -def make_random_policy_for_env(env: gym.Env): +def make_random_policy_for_env(env: gym.Env) -> Policy: if isinstance(env.action_space, gym.spaces.Discrete): # discrete action space return DiscreteRandomPolicy.create_for_env(env) @@ -26,12 +27,12 @@ def make_random_policy_for_env(env: gym.Env): class DiscreteRandomPolicy(Policy): - def __init__(self, num_actions: int): - """ Random actor for accumulating random offline data. """ + def __init__(self, num_actions: int) -> None: + """Random actor for accumulating random offline data.""" self.num_actions = num_actions @classmethod - def create_for_env(cls, env: gym.Env): + def create_for_env(cls, env: gym.Env) -> "DiscreteRandomPolicy": action_space = env.action_space if isinstance(action_space, gym.spaces.Discrete): return cls(num_actions=action_space.n) @@ -40,15 +41,22 @@ def create_for_env(cls, env: gym.Env): else: raise NotImplementedError(f"action_space is {type(action_space)}") - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: - """ Act randomly regardless of the observation. """ + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: + """Act randomly regardless of the observation.""" + # pyre-fixme[35]: Target cannot be annotated. obs: torch.Tensor = obs.float_features assert obs.dim() >= 2, f"obs has shape {obs.shape} (dim < 2)" + assert obs.shape[0] == 1, f"obs has shape {obs.shape} (0th dim != 1)" batch_size = obs.shape[0] - weights = torch.ones((batch_size, self.num_actions)) + scores = torch.ones((batch_size, self.num_actions)) + scores = apply_possible_actions_mask( + scores, possible_actions_mask, invalid_score=0.0 + ) # sample a random action - m = torch.distributions.Categorical(weights) + m = torch.distributions.Categorical(scores) raw_action = m.sample() action = F.one_hot(raw_action, self.num_actions) log_prob = m.log_prob(raw_action).float() @@ -56,7 +64,7 @@ def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: class MultiDiscreteRandomPolicy(Policy): - def __init__(self, num_action_vec: List[int]): + def __init__(self, num_action_vec: List[int]) -> None: self.num_action_vec = num_action_vec self.dists = [ torch.distributions.Categorical(torch.ones(n) / n) @@ -64,14 +72,18 @@ def __init__(self, num_action_vec: List[int]): ] @classmethod - def create_for_env(cls, env: gym.Env): + def create_for_env(cls, env: gym.Env) -> "MultiDiscreteRandomPolicy": action_space = env.action_space if not isinstance(action_space, gym.spaces.MultiDiscrete): raise ValueError(f"Invalid action space: {action_space}") return cls(action_space.nvec.tolist()) - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: + # TODO: consider possible_actions_mask + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: + # pyre-fixme[35]: Target cannot be annotated. obs: torch.Tensor = obs.float_features batch_size, _ = obs.shape @@ -88,7 +100,7 @@ def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: class ContinuousRandomPolicy(Policy): - def __init__(self, low: torch.Tensor, high: torch.Tensor): + def __init__(self, low: torch.Tensor, high: torch.Tensor) -> None: self.low = low self.high = high assert ( @@ -97,7 +109,7 @@ def __init__(self, low: torch.Tensor, high: torch.Tensor): self.dist = torch.distributions.uniform.Uniform(self.low, self.high) @classmethod - def create_for_env(cls, env: gym.Env): + def create_for_env(cls, env: gym.Env) -> "ContinuousRandomPolicy": action_space = env.action_space if isinstance(action_space, gym.spaces.Discrete): raise NotImplementedError( @@ -116,13 +128,15 @@ def create_for_env(cls, env: gym.Env): else: raise NotImplementedError(f"action_space is {type(action_space)}") - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: - """ Act randomly regardless of the observation. """ + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: + """Act randomly regardless of the observation.""" + # pyre-fixme[35]: Target cannot be annotated. obs: torch.Tensor = obs.float_features assert obs.dim() >= 2, f"obs has shape {obs.shape} (dim < 2)" batch_size = obs.size(0) - # pyre-fixme[6]: Expected `Union[torch.Size, torch.Tensor]` for 1st param - # but got `Tuple[int]`. + # pyre-fixme[6]: For 1st param expected `Size` but got `Tuple[int]`. action = self.dist.sample((batch_size,)) # sum over action_dim (since assuming i.i.d. per coordinate) log_prob = self.dist.log_prob(action).sum(1) diff --git a/reagent/gym/policies/samplers/continuous_sampler.py b/reagent/gym/policies/samplers/continuous_sampler.py index f7c7789b7..b9b23f433 100644 --- a/reagent/gym/policies/samplers/continuous_sampler.py +++ b/reagent/gym/policies/samplers/continuous_sampler.py @@ -1,18 +1,17 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.gym.types import GaussianSamplerScore, Sampler class GaussianSampler(Sampler): - def __init__(self, actor_network): + def __init__(self, actor_network) -> None: self.actor_network = actor_network def _sample_action(self, loc: torch.Tensor, scale_log: torch.Tensor): r = torch.randn_like(scale_log, device=scale_log.device) - # pyre-fixme[16]: `Tensor` has no attribute `exp`. action = torch.tanh(loc + r * scale_log.exp()) # Since each dim are independent, log-prob is simply sum log_prob = self.actor_network._log_prob(r, scale_log) @@ -32,7 +31,6 @@ def _log_prob( self, loc: torch.Tensor, scale_log: torch.Tensor, squashed_action: torch.Tensor ): # This is not getting exported; we can use it - # pyre-fixme[16]: `Tensor` has no attribute `exp`. n = torch.distributions.Normal(loc, scale_log.exp()) raw_action = self.actor_network._atanh(squashed_action) log_prob = n.log_prob(raw_action) diff --git a/reagent/gym/policies/samplers/discrete_sampler.py b/reagent/gym/policies/samplers/discrete_sampler.py index f7974ff58..54248464f 100644 --- a/reagent/gym/policies/samplers/discrete_sampler.py +++ b/reagent/gym/policies/samplers/discrete_sampler.py @@ -2,10 +2,11 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F from reagent.gym.types import Sampler +from reagent.models.dqn import INVALID_ACTION_CONSTANT class SoftmaxActionSampler(Sampler): @@ -13,15 +14,31 @@ class SoftmaxActionSampler(Sampler): Softmax sampler. Equation: http://incompleteideas.net/book/first/ebook/node17.html The action scores are logits. + Supports decaying the temperature over time. Args: temperature: A measure of how uniformly random the distribution looks. The higher the temperature, the more uniform the sampling. + temperature_decay: A multiplier by which temperature is reduced at each .update() call + minimum_temperature: Minimum temperature, below which the temperature is not decayed further """ - def __init__(self, temperature: float = 1.0): + def __init__( + self, + temperature: float = 1.0, + temperature_decay: float = 1.0, + minimum_temperature: float = 0.1, + ) -> None: assert temperature > 0, f"Invalid non-positive temperature {temperature}." self.temperature = temperature + self.temperature_decay = temperature_decay + self.minimum_temperature = minimum_temperature + assert ( + temperature_decay <= 1.0 + ), f"Invalid temperature_decay>1: {temperature_decay}." + assert ( + minimum_temperature <= temperature + ), f"minimum_temperature ({minimum_temperature}) exceeds initial temperature ({temperature})" def _get_distribution( self, scores: torch.Tensor @@ -45,14 +62,24 @@ def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: assert log_prob.ndim == 1 return rlt.ActorOutput(action=action, log_prob=log_prob) - @torch.no_grad() def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor: assert len(scores.shape) == 2, f"{scores.shape}" assert scores.shape == action.shape, f"{scores.shape} != {action.shape}" m = self._get_distribution(scores) - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. return m.log_prob(action.argmax(dim=1)) + def entropy(self, scores: torch.Tensor) -> torch.Tensor: + """ + Returns average policy entropy. Simple unweighted average across the batch. + """ + assert len(scores.shape) == 2, f"{scores.shape}" + m = self._get_distribution(scores) + return m.entropy().mean() + + def update(self) -> None: + self.temperature *= self.temperature_decay + self.temperature = max(self.temperature, self.minimum_temperature) + class GreedyActionSampler(Sampler): """ @@ -63,7 +90,6 @@ def _get_greedy_indices(self, scores: torch.Tensor) -> torch.Tensor: assert ( len(scores.shape) == 2 ), f"scores shape is {scores.shape}, not (batchsize, num_actions)" - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. return scores.argmax(dim=1) @torch.no_grad() @@ -73,14 +99,15 @@ def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: raw_action = self._get_greedy_indices(scores) action = F.one_hot(raw_action, num_actions) assert action.shape == (batch_size, num_actions) - return rlt.ActorOutput(action=action, log_prob=torch.ones_like(raw_action)) + return rlt.ActorOutput( + action=action, log_prob=torch.zeros_like(raw_action, dtype=torch.float) + ) @torch.no_grad() def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor: greedy_indices = self._get_greedy_indices(scores) - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. match = greedy_indices == action.argmax(-1) - lp = torch.zeros(scores.shape[0]).float() + lp = torch.zeros(scores.shape[0], device=scores.device).float() lp[match] = -float("inf") return lp @@ -99,7 +126,7 @@ class EpsilonGreedyActionSampler(Sampler): def __init__( self, epsilon: float, epsilon_decay: float = 1.0, minimum_epsilon: float = 0.0 - ): + ) -> None: self.epsilon = float(epsilon) assert epsilon_decay <= 1 self.epsilon_decay = epsilon_decay @@ -112,14 +139,22 @@ def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: ) # batch_size x num_actions batch_size, num_actions = scores.shape - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. argmax = F.one_hot(scores.argmax(dim=1), num_actions).bool() - rand_prob = self.epsilon / num_actions - p = torch.full_like(rand_prob, scores) + valid_actions_ind = (scores > INVALID_ACTION_CONSTANT).bool() + num_valid_actions = valid_actions_ind.float().sum(1, keepdim=True) + + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + rand_prob = self.epsilon / num_valid_actions + p = torch.zeros_like(scores) + rand_prob greedy_prob = 1 - self.epsilon + rand_prob - p[argmax] = greedy_prob + # pyre-fixme[16]: `float` has no attribute `squeeze`. + p[argmax] = greedy_prob.squeeze() + + p[~valid_actions_ind] = 0.0 + p_sum = p.sum(1) + assert torch.allclose(p_sum, torch.ones_like(p_sum)) m = torch.distributions.Categorical(probs=p) raw_action = m.sample() @@ -131,10 +166,9 @@ def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: def log_prob(self, scores: torch.Tensor, action: torch.Tensor) -> torch.Tensor: max_index = self.sample_action(scores).argmax(-1) - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. opt = max_index == action.argmax(-1) n = len(scores) - lp = torch.ones(n) * self.epsilon / n + lp = torch.ones(n, device=scores.device) * self.epsilon / n lp[opt] = 1 - self.epsilon + self.epsilon / n return lp diff --git a/reagent/gym/policies/samplers/top_k_sampler.py b/reagent/gym/policies/samplers/top_k_sampler.py index 3d814486f..6960ac5ae 100644 --- a/reagent/gym/policies/samplers/top_k_sampler.py +++ b/reagent/gym/policies/samplers/top_k_sampler.py @@ -2,13 +2,13 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.gym.types import Sampler class TopKSampler(Sampler): - def __init__(self, k: int): + def __init__(self, k: int) -> None: self.k = k def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: diff --git a/reagent/gym/policies/scorers/continuous_scorer.py b/reagent/gym/policies/scorers/continuous_scorer.py index 6a5892fbd..7ce89a3e4 100644 --- a/reagent/gym/policies/scorers/continuous_scorer.py +++ b/reagent/gym/policies/scorers/continuous_scorer.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.gym.types import GaussianSamplerScore, Scorer from reagent.models.base import ModelBase @@ -11,7 +11,7 @@ def sac_scorer(actor_network: ModelBase) -> Scorer: @torch.no_grad() def score(preprocessed_obs: rlt.FeatureData) -> GaussianSamplerScore: actor_network.eval() - # pyre-fixme[16]: `ModelBase` has no attribute `_get_loc_and_scale_log`. + # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function. loc, scale_log = actor_network._get_loc_and_scale_log(preprocessed_obs) actor_network.train() return GaussianSamplerScore(loc=loc, scale_log=scale_log) diff --git a/reagent/gym/policies/scorers/discrete_scorer.py b/reagent/gym/policies/scorers/discrete_scorer.py index ac1ad7d9d..d5b33e51d 100644 --- a/reagent/gym/policies/scorers/discrete_scorer.py +++ b/reagent/gym/policies/scorers/discrete_scorer.py @@ -1,37 +1,47 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import Tuple +from typing import Optional, Tuple -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.gym.preprocessors.trainer_preprocessor import get_possible_actions_for_gym from reagent.gym.types import Scorer from reagent.models.base import ModelBase -def discrete_dqn_scorer(q_network: ModelBase) -> Scorer: - @torch.no_grad() - def score(preprocessed_obs: rlt.FeatureData) -> torch.Tensor: - q_network.eval() - scores = q_network(preprocessed_obs) - assert scores.dim() == 2, f"{scores.shape} isn't (batchsize, num_actions)." - q_network.train() - return scores +NEG_INF = float("-inf") - return score + +def apply_possible_actions_mask( + scores: torch.Tensor, + possible_actions_mask: Optional[torch.Tensor] = None, + invalid_score: float = NEG_INF, +) -> torch.Tensor: + if possible_actions_mask is None: + return scores + possible_actions_mask = possible_actions_mask.unsqueeze(0) + assert ( + scores.shape == possible_actions_mask.shape + ), f"{scores.shape} != {possible_actions_mask.shape}" + scores[~possible_actions_mask] = invalid_score + return scores -def discrete_qrdqn_scorer(q_network: ModelBase) -> Scorer: +def discrete_dqn_scorer(q_network: ModelBase) -> Scorer: @torch.no_grad() - def score(preprocessed_obs: rlt.FeatureData) -> torch.Tensor: + def score( + preprocessed_obs: rlt.FeatureData, + possible_actions_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: q_network.eval() scores = q_network(preprocessed_obs) - assert ( - scores.dim() == 3 - ), f"{scores.shape} isn't (batchsize, num_actions, num_atoms)." - scores = scores.mean(dim=2) + # qrdqn returns (batchsize, num_actions, num_atoms) + if scores.dim() == 3: + scores = scores.mean(dim=2) + assert scores.dim() == 2, f"{scores.shape} isn't (batchsize, num_actions)." q_network.train() + scores = apply_possible_actions_mask(scores, possible_actions_mask) return scores return score @@ -39,8 +49,12 @@ def score(preprocessed_obs: rlt.FeatureData) -> torch.Tensor: def discrete_dqn_serving_scorer(q_network: torch.nn.Module) -> Scorer: @torch.no_grad() - def score(value_presence: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: - action_names, q_values = q_network(value_presence) + def score( + state: rlt.ServingFeatureData, + possible_actions_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + action_names, q_values = q_network(*state) + q_values = apply_possible_actions_mask(q_values, possible_actions_mask) return q_values return score diff --git a/reagent/gym/policies/scorers/slate_q_scorer.py b/reagent/gym/policies/scorers/slate_q_scorer.py index fd2956a11..ff491859f 100644 --- a/reagent/gym/policies/scorers/slate_q_scorer.py +++ b/reagent/gym/policies/scorers/slate_q_scorer.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F from reagent.gym.types import Scorer @@ -26,3 +26,30 @@ def score(state: rlt.FeatureData) -> torch.Tensor: return select_prob * scores return score + + +def slate_q_serving_scorer(num_candidates: int, q_network: torch.nn.Module) -> Scorer: + @torch.no_grad() + def score(state: rlt.FeatureData) -> torch.Tensor: + # pyre-fixme[28]: Unexpected keyword argument `axis`. + tiled_state = state.float_features.repeat_interleave( + repeats=num_candidates, axis=0 + ) + candidate_docs = state.candidate_docs + assert candidate_docs is not None + actions = candidate_docs.as_feature_data().float_features + + q_network.eval() + action_names, q_values = q_network( + (tiled_state, torch.ones_like(tiled_state)), + (actions, torch.ones_like(actions)), + ) + scores = q_values.view(-1, num_candidates) + q_network.train() + + select_prob = F.softmax(candidate_docs.value, dim=1) + assert select_prob.shape == scores.shape + + return select_prob * scores + + return score diff --git a/reagent/gym/preprocessors/__init__.py b/reagent/gym/preprocessors/__init__.py index ce8a4ae4a..1b4a9a584 100644 --- a/reagent/gym/preprocessors/__init__.py +++ b/reagent/gym/preprocessors/__init__.py @@ -1,23 +1,15 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from .default_preprocessors import ( - make_default_action_extractor, - make_default_obs_preprocessor, -) -from .default_serving_preprocessors import ( - make_default_serving_action_extractor, - make_default_serving_obs_preprocessor, -) from .replay_buffer_inserters import make_replay_buffer_inserter -from .trainer_preprocessor import make_replay_buffer_trainer_preprocessor +from .trainer_preprocessor import ( + make_replay_buffer_trainer_preprocessor, + make_trainer_preprocessor_online, +) __all__ = [ - "make_default_action_extractor", - "make_default_obs_preprocessor", - "make_default_serving_obs_preprocessor", - "make_default_serving_action_extractor", "make_replay_buffer_trainer_preprocessor", "make_replay_buffer_inserter", + "make_trainer_preprocessor_online", ] diff --git a/reagent/gym/preprocessors/default_preprocessors.py b/reagent/gym/preprocessors/default_preprocessors.py index 5ba93229b..5e0f52da7 100644 --- a/reagent/gym/preprocessors/default_preprocessors.py +++ b/reagent/gym/preprocessors/default_preprocessors.py @@ -4,68 +4,21 @@ """ Get default preprocessors for training time. """ import logging -from typing import List, Optional, Tuple +from typing import List, Tuple import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F from gym import Env, spaces -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE -from reagent.training.utils import rescale_actions logger = logging.getLogger(__name__) -try: - from recsim.simulator.recsim_gym import RecSimGymEnv - - HAS_RECSIM = True -except ImportError: - HAS_RECSIM = False - logger.warning(f"ReplayBuffer.create_from_env() will not recognize RecSim env") - - -def make_default_obs_preprocessor(env: Env, *, device: Optional[torch.device] = None): - """ Returns the default obs preprocessor for the environment """ - if device is None: - device = torch.device("cpu") - observation_space = env.observation_space - if HAS_RECSIM and isinstance(env.unwrapped, RecSimGymEnv): - return RecsimObsPreprocessor.create_from_env(env, device=device) - elif isinstance(observation_space, spaces.Box): - return BoxObsPreprocessor(device) - else: - raise NotImplementedError(f"Unsupport observation space: {observation_space}") - - -def make_default_action_extractor(env: Env): - """ Returns the default action extractor for the environment """ - action_space = env.action_space - if isinstance(action_space, spaces.Discrete): - # Canonical rule to return one-hot encoded actions for discrete - return discrete_action_extractor - elif isinstance(action_space, spaces.MultiDiscrete): - return multi_discrete_action_extractor - elif isinstance(action_space, spaces.Box): - # Canonical rule to scale actions to CONTINUOUS_TRAINING_ACTION_RANGE - return make_box_action_extractor(action_space) - else: - raise NotImplementedError(f"Unsupport action space: {action_space}") - - ####################################### ### Default obs preprocessors. ### These should operate on single obs. ####################################### -class BoxObsPreprocessor: - def __init__(self, device: torch.device): - self.device = device - - def __call__(self, obs: np.ndarray) -> rlt.FeatureData: - return rlt.FeatureData(torch.tensor(obs).float().unsqueeze(0)).to( - self.device, non_blocking=True - ) class RecsimObsPreprocessor: @@ -75,12 +28,10 @@ def __init__( num_docs: int, discrete_keys: List[Tuple[str, int]], box_keys: List[Tuple[str, int]], - device: torch.device, ): self.num_docs = num_docs self.discrete_keys = discrete_keys self.box_keys = box_keys - self.device = device @classmethod def create_from_env(cls, env: Env, **kwargs): @@ -103,10 +54,11 @@ def create_from_env(cls, env: Env, **kwargs): discrete_keys: List[Tuple[str, int]] = [] box_keys: List[Tuple[str, int]] = [] - doc_0_space = doc_obs_space["0"] + key_0 = next(iter(doc_obs_space.spaces)) + doc_0_space = doc_obs_space[key_0] if isinstance(doc_0_space, spaces.Dict): - for k, v in doc_obs_space["0"].spaces.items(): + for k, v in doc_obs_space[key_0].spaces.items(): if isinstance(v, spaces.Discrete): if v.n > 0: discrete_keys.append((k, v.n)) @@ -169,51 +121,5 @@ def __call__(self, obs): .unsqueeze(0) ) - candidate_docs = rlt.DocList( - float_features=doc_features, - mask=torch.ones(doc_features.shape[:-1], dtype=torch.bool), - value=value, - ) - return rlt.FeatureData(float_features=user, candidate_docs=candidate_docs).to( - self.device, non_blocking=True - ) - - -############################################ -### Default action extractors. -### These currently operate on single action. -############################################ -def discrete_action_extractor(actor_output: rlt.ActorOutput): - action = actor_output.action - assert ( - # pyre-fixme[16]: `Tensor` has no attribute `ndim`. - action.ndim == 2 - and action.shape[0] == 1 - ), f"{action} is not a single batch of results!" - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. - return action.squeeze(0).argmax().cpu().numpy() - - -def multi_discrete_action_extractor(actor_output: rlt.ActorOutput): - return actor_output.action.squeeze(0).cpu().numpy() - - -def make_box_action_extractor(action_space: spaces.Box): - assert len(action_space.shape) == 1, f"{action_space} not supported." - - model_low, model_high = CONTINUOUS_TRAINING_ACTION_RANGE - - def box_action_extractor(actor_output: rlt.ActorOutput) -> np.ndarray: - action = actor_output.action - assert ( - len(action.shape) == 2 and action.shape[0] == 1 - ), f"{action} (shape: {action.shape}) is not a single action!" - return rescale_actions( - action.squeeze(0).cpu().numpy(), - new_min=action_space.low, - new_max=action_space.high, - prev_min=model_low, - prev_max=model_high, - ) - - return box_action_extractor + candidate_docs = rlt.DocList(float_features=doc_features, value=value) + return rlt.FeatureData(float_features=user, candidate_docs=candidate_docs) diff --git a/reagent/gym/preprocessors/default_serving_preprocessors.py b/reagent/gym/preprocessors/default_serving_preprocessors.py deleted file mode 100644 index 04dd50f8f..000000000 --- a/reagent/gym/preprocessors/default_serving_preprocessors.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -""" Returns preprocessors for serving module inference. """ - -from typing import Tuple - -import numpy as np -import reagent.types as rlt -import torch -from gym import Env, spaces - - -def make_default_serving_obs_preprocessor(env: Env): - if not isinstance(env.observation_space, spaces.Box): - raise NotImplementedError(f"{env.observation_space} not supported!") - - observation_space = env.observation_space - if len(observation_space.shape) != 1: - raise NotImplementedError(f"Box shape {observation_space.shape} not supported!") - - state_dim = observation_space.shape[0] - - def gym_to_reagent_serving(obs: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]: - obs_tensor = torch.tensor(obs).float().view(1, state_dim) - presence_tensor = torch.ones_like(obs_tensor) - return (obs_tensor, presence_tensor) - - return gym_to_reagent_serving - - -def make_default_serving_action_extractor(env: Env): - if isinstance(env.action_space, spaces.Discrete): - return discrete_predictor_action_extractor - elif isinstance(env.action_space, spaces.Box): - assert ( - len(env.action_space.shape) == 1 - ), f"Unsupported Box with shape {env.action_space.shape}" - return continuous_predictor_action_extractor - else: - raise NotImplementedError - - -def discrete_predictor_action_extractor(output: rlt.ActorOutput): - assert ( - len(output.action.shape) == 2 and output.action.shape[0] == 1 - ), f"{output.action.shape} isn't (1, action_dim)" - # pyre-fixme[16]: `Tensor` has no attribute `argmax`. - return output.action.cpu().squeeze(0).argmax().item() - - -def continuous_predictor_action_extractor(output: rlt.ActorOutput): - assert ( - len(output.action.shape) == 2 and output.action.shape[0] == 1 - ), f"{output.action.shape} isn't (1, action_dim)" - return output.action.squeeze(0).cpu().numpy() diff --git a/reagent/gym/preprocessors/replay_buffer_inserters.py b/reagent/gym/preprocessors/replay_buffer_inserters.py index 7d95ab888..7ef4e3bbf 100644 --- a/reagent/gym/preprocessors/replay_buffer_inserters.py +++ b/reagent/gym/preprocessors/replay_buffer_inserters.py @@ -2,7 +2,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Any, Callable, List, Tuple +from typing import Callable, List, Tuple import gym import numpy as np @@ -80,7 +80,8 @@ def create_for_env(cls, env: gym.Env): discrete_keys: List[str] = [] box_keys: List[str] = [] - doc_0_space = doc_obs_space["0"] + key_0 = next(iter(doc_obs_space.spaces)) + doc_0_space = doc_obs_space[key_0] if isinstance(doc_0_space, gym.spaces.Dict): for k, v in doc_0_space.spaces.items(): @@ -180,12 +181,16 @@ def __call__(self, replay_buffer: ReplayBuffer, transition: Transition): if response is not None: kwargs[f"response_{k}"] = np.stack([v[k] for v in response]) else: - kwargs[f"response_{k}"] = np.zeros((self.num_responses, *d)) + kwargs[f"response_{k}"] = np.zeros( + (self.num_responses, *d), dtype=np.float32 + ) for k, _n in self.response_discrete_keys: if response is not None: kwargs[f"response_{k}"] = np.array([v[k] for v in response]) else: - kwargs[f"response_{k}"] = np.zeros((self.num_responses,)) + kwargs[f"response_{k}"] = np.zeros( + (self.num_responses,), dtype=np.int64 + ) transition_dict.update(kwargs) replay_buffer.add(observation=user, **transition_dict) diff --git a/reagent/gym/preprocessors/trainer_preprocessor.py b/reagent/gym/preprocessors/trainer_preprocessor.py index 535d5957d..eedb2a51e 100644 --- a/reagent/gym/preprocessors/trainer_preprocessor.py +++ b/reagent/gym/preprocessors/trainer_preprocessor.py @@ -5,30 +5,36 @@ import inspect import logging -from typing import Optional +from typing import Dict, Optional import gym import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE -from reagent.training.trainer import Trainer +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE +from reagent.gym.types import Trajectory +from reagent.preprocessing.types import InputColumn +from reagent.training.reagent_lightning_module import ReAgentLightningModule from reagent.training.utils import rescale_actions logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) # This is here to make typechecker happpy, sigh -MAKER_MAP = {} +ONLINE_MAKER_MAP = {} +REPLAY_BUFFER_MAKER_MAP = {} -def make_replay_buffer_trainer_preprocessor( - trainer: Trainer, device: torch.device, env: gym.Env +def make_trainer_preprocessor( + trainer: ReAgentLightningModule, + device: torch.device, + env: gym.Env, + maker_map: Dict, ): - sig = inspect.signature(trainer.train) + assert isinstance(trainer, ReAgentLightningModule), f"{type(trainer)}" + sig = inspect.signature(trainer.train_step_gen) logger.info(f"Deriving trainer_preprocessor from {sig.parameters}") # Assuming training_batch is in the first position (excluding self) assert ( @@ -37,7 +43,7 @@ def make_replay_buffer_trainer_preprocessor( training_batch_type = sig.parameters["training_batch"].annotation assert training_batch_type != inspect.Parameter.empty try: - maker = MAKER_MAP[training_batch_type].create_for_env(env) + maker = maker_map[training_batch_type].create_for_env(env) except KeyError: logger.error(f"Unknown type: {training_batch_type}") raise @@ -49,6 +55,18 @@ def trainer_preprocessor(batch): return trainer_preprocessor +def make_trainer_preprocessor_online( + trainer: ReAgentLightningModule, device: torch.device, env: gym.Env +): + return make_trainer_preprocessor(trainer, device, env, ONLINE_MAKER_MAP) + + +def make_replay_buffer_trainer_preprocessor( + trainer: ReAgentLightningModule, device: torch.device, env: gym.Env +): + return make_trainer_preprocessor(trainer, device, env, REPLAY_BUFFER_MAKER_MAP) + + def one_hot_actions( num_actions: int, action: torch.Tensor, @@ -78,27 +96,52 @@ def one_hot_actions( class DiscreteDqnInputMaker: - def __init__(self, num_actions: int): + def __init__(self, num_actions: int, trainer_preprocessor=None): self.num_actions = num_actions + self.trainer_preprocessor = trainer_preprocessor @classmethod def create_for_env(cls, env: gym.Env): action_space = env.action_space assert isinstance(action_space, gym.spaces.Discrete) - return cls(num_actions=action_space.n) + try: + return cls( + num_actions=action_space.n, + # pyre-fixme[16]: `Env` has no attribute `trainer_preprocessor`. + trainer_preprocessor=env.trainer_preprocessor, + ) + except AttributeError: + return cls(num_actions=action_space.n) def __call__(self, batch): not_terminal = 1.0 - batch.terminal.float() action, next_action = one_hot_actions( self.num_actions, batch.action, batch.next_action, batch.terminal ) + if self.trainer_preprocessor is not None: + state = self.trainer_preprocessor(batch.state) + next_state = self.trainer_preprocessor(batch.next_state) + else: + state = rlt.FeatureData(float_features=batch.state) + next_state = rlt.FeatureData(float_features=batch.next_state) + + try: + possible_actions_mask = batch.possible_actions_mask.float() + except AttributeError: + possible_actions_mask = torch.ones_like(action).float() + + try: + possible_next_actions_mask = batch.next_possible_actions_mask.float() + except AttributeError: + possible_next_actions_mask = torch.ones_like(next_action).float() + return rlt.DiscreteDqnInput( - state=rlt.FeatureData(float_features=batch.state), + state=state, action=action, - next_state=rlt.FeatureData(float_features=batch.next_state), + next_state=next_state, next_action=next_action, - possible_actions_mask=torch.ones_like(action).float(), - possible_next_actions_mask=torch.ones_like(next_action).float(), + possible_actions_mask=possible_actions_mask, + possible_next_actions_mask=possible_next_actions_mask, reward=batch.reward, not_terminal=not_terminal, step=None, @@ -115,8 +158,11 @@ def __call__(self, batch): class PolicyNetworkInputMaker: def __init__(self, action_low: np.ndarray, action_high: np.ndarray): - self.action_low = action_low - self.action_high = action_high + self.action_low = torch.tensor(action_low) + self.action_high = torch.tensor(action_high) + (train_low, train_high) = CONTINUOUS_TRAINING_ACTION_RANGE + self.train_low = torch.tensor(train_low) + self.train_high = torch.tensor(train_high) @classmethod def create_for_env(cls, env: gym.Env): @@ -127,45 +173,107 @@ def create_for_env(cls, env: gym.Env): def __call__(self, batch): not_terminal = 1.0 - batch.terminal.float() # normalize actions - (train_low, train_high) = CONTINUOUS_TRAINING_ACTION_RANGE - action = torch.tensor( - rescale_actions( - batch.action.numpy(), - new_min=train_low, - new_max=train_high, - prev_min=self.action_low, - prev_max=self.action_high, - ) + action = rescale_actions( + batch.action, + new_min=self.train_low, + new_max=self.train_high, + prev_min=self.action_low, + prev_max=self.action_high, ) # only normalize non-terminal non_terminal_indices = (batch.terminal == 0).squeeze(1) next_action = torch.zeros_like(action) - next_action[non_terminal_indices] = torch.tensor( - rescale_actions( - batch.next_action[non_terminal_indices].numpy(), - new_min=train_low, - new_max=train_high, - prev_min=self.action_low, - prev_max=self.action_high, - ) + next_action[non_terminal_indices] = rescale_actions( + batch.next_action[non_terminal_indices], + new_min=self.train_low, + new_max=self.train_high, + prev_min=self.action_low, + prev_max=self.action_high, ) - return rlt.PolicyNetworkInput( - state=rlt.FeatureData(float_features=batch.state), - action=rlt.FeatureData(float_features=action), - next_state=rlt.FeatureData(float_features=batch.next_state), - next_action=rlt.FeatureData(float_features=next_action), - reward=batch.reward, - not_terminal=not_terminal, - step=None, - time_diff=None, - extras=rlt.ExtraData( + dict_batch = { + InputColumn.STATE_FEATURES: batch.state, + InputColumn.NEXT_STATE_FEATURES: batch.next_state, + InputColumn.ACTION: action, + InputColumn.NEXT_ACTION: next_action, + InputColumn.REWARD: batch.reward, + InputColumn.NOT_TERMINAL: not_terminal, + InputColumn.STEP: None, + InputColumn.TIME_DIFF: None, + InputColumn.EXTRAS: rlt.ExtraData( mdp_id=None, sequence_number=None, action_probability=batch.log_prob.exp(), max_num_actions=None, metrics=None, ), + } + has_candidate_features = False + try: + dict_batch.update( + { + InputColumn.CANDIDATE_FEATURES: batch.doc, + InputColumn.NEXT_CANDIDATE_FEATURES: batch.next_doc, + } + ) + has_candidate_features = True + except AttributeError: + pass + output = rlt.PolicyNetworkInput.from_dict(dict_batch) + if has_candidate_features: + output.state = rlt._embed_states(output.state) + output.next_state = rlt._embed_states(output.next_state) + return output + + +class SlateQInputMaker: + def __init__(self): + self.metric = "watch_time" + + @classmethod + def create_for_env(cls, env: gym.Env): + return cls() + + def __call__(self, batch): + n = batch.state.shape[0] + item_mask = torch.ones(batch.doc.shape[:2]) + next_item_mask = torch.ones(batch.doc.shape[:2]) + # TODO: abs value to make probability? + item_probability = batch.augmentation_value # .unsqueeze(2) + next_item_probability = batch.next_augmentation_value # .unsqueeze(2) + + # concat null action + null_action = torch.tensor([batch.action.shape[1]] * n, dtype=torch.int64).view( + n, 1 ) + action = torch.cat([batch.action, null_action], dim=1) + next_action = torch.cat([batch.next_action, null_action], dim=1) + + # concat null reward to position wise reward + position_reward = getattr(batch, f"response_{self.metric}") + null_reward = torch.zeros((n, 1)) + position_reward = torch.cat([position_reward, null_reward], dim=1) + + # concat null mask when nothing clicked + reward_mask = batch.response_click + null_mask = (reward_mask.sum(dim=1) == 0).view(n, 1) + reward_mask = torch.cat([reward_mask.to(torch.bool), null_mask], dim=1) + dict_batch = { + "state_features": batch.state, + "next_state_features": batch.next_state, + "candidate_features": batch.doc, + "next_candidate_features": batch.next_doc, + "item_mask": item_mask, + "next_item_mask": next_item_mask, + "item_probability": item_probability, + "next_item_probability": next_item_probability, + "action": action, + "next_action": next_action, + "position_reward": position_reward, + "reward_mask": reward_mask, + "time_diff": None, + "not_terminal": ~batch.terminal, + } + return rlt.SlateQInput.from_dict(dict_batch) class MemoryNetworkInputMaker: @@ -220,15 +328,28 @@ def __call__(self, batch): assert len(tensor.shape) == 2, f"{name} has shape {tensor.shape}" scalar_fields[name] = tensor.transpose(0, 1) - return rlt.MemoryNetworkInput( - state=rlt.FeatureData(float_features=vector_fields["state"]), - next_state=rlt.FeatureData(float_features=vector_fields["next_state"]), - action=vector_fields["action"], - reward=scalar_fields["reward"], - not_terminal=scalar_fields["not_terminal"], - step=None, - time_diff=None, - ) + # stack_size > 1, so let's pad not_terminal with 1's, since + # previous states couldn't have been terminal.. + if scalar_fields["reward"].shape[0] > 1: + batch_size = scalar_fields["reward"].shape[1] + assert scalar_fields["not_terminal"].shape == ( + 1, + batch_size, + ), f"{scalar_fields['not_terminal'].shape}" + stacked_not_terminal = torch.ones_like(scalar_fields["reward"]) + stacked_not_terminal[-1] = scalar_fields["not_terminal"] + scalar_fields["not_terminal"] = stacked_not_terminal + + dict_batch = { + "state": vector_fields["state"], + "next_state": vector_fields["next_state"], + "action": vector_fields["action"], + "reward": scalar_fields["reward"], + "not_terminal": scalar_fields["not_terminal"], + "step": None, + "time_diff": None, + } + return rlt.MemoryNetworkInput.from_dict(dict_batch) def get_possible_actions_for_gym(batch_size: int, num_actions: int) -> rlt.FeatureData: @@ -290,9 +411,72 @@ def __call__(self, batch): ) -MAKER_MAP = { +class PolicyGradientInputMaker: + def __init__(self, num_actions: Optional[int] = None, recsim_obs: bool = False): + self.num_actions = num_actions + self.recsim_obs = recsim_obs + + @classmethod + def create_for_env(cls, env: gym.Env): + action_space = env.action_space + if isinstance(action_space, gym.spaces.Discrete): + return cls(action_space.n) + elif isinstance(action_space, gym.spaces.Box): + return cls() + elif isinstance(action_space, gym.spaces.MultiDiscrete): + return cls(recsim_obs=True) + else: + raise NotImplementedError() + + def _get_recsim_state(self, observation): + def _stack(slates): + obs = rlt.FeatureData( + float_features=torch.from_numpy( + np.stack(np.array([slate["user"] for slate in slates])) + ), + candidate_docs=rlt.DocList( + float_features=torch.from_numpy( + np.stack(np.array([slate["doc"] for slate in slates])) + ) + ), + ) + return obs + + def _stack_slate(slate): + return { + "user": slate["user"], + "doc": np.stack(np.array(list(slate["doc"].values()))), + } + + return _stack([_stack_slate(slate) for slate in observation]) + + def __call__(self, trajectory: Trajectory): + action = torch.from_numpy(np.stack(trajectory.action).squeeze()) + if self.num_actions is not None: + action = F.one_hot(action, self.num_actions).float() + assert len(action.shape) == 2, f"{action.shape}" + # one hot makes shape (batch_size, num_actions) + state = ( + self._get_recsim_state(trajectory.observation) + if self.recsim_obs + else rlt.FeatureData( + torch.from_numpy(np.stack(trajectory.observation)).float() + ) + ) + return rlt.PolicyGradientInput( + state=state, + action=action, + reward=torch.tensor(trajectory.reward), + log_prob=torch.tensor(trajectory.log_prob), + ) + + +ONLINE_MAKER_MAP = {rlt.PolicyGradientInput: PolicyGradientInputMaker} + +REPLAY_BUFFER_MAKER_MAP = { rlt.DiscreteDqnInput: DiscreteDqnInputMaker, rlt.PolicyNetworkInput: PolicyNetworkInputMaker, rlt.MemoryNetworkInput: MemoryNetworkInputMaker, rlt.ParametricDqnInput: ParametricDqnInputMaker, + rlt.SlateQInput: SlateQInputMaker, } diff --git a/reagent/gym/runners/gymrunner.py b/reagent/gym/runners/gymrunner.py index bb0a8ef25..c766da4c2 100644 --- a/reagent/gym/runners/gymrunner.py +++ b/reagent/gym/runners/gymrunner.py @@ -7,21 +7,21 @@ import numpy as np import torch.multiprocessing as mp -from gym import Env from reagent.core.multiprocess_utils import ( unwrap_function_outputs, wrap_function_arguments, ) +from reagent.core.tensorboardX import SummaryWriterContext from reagent.gym.agents.agent import Agent +from reagent.gym.envs import EnvWrapper from reagent.gym.types import Trajectory, Transition -from reagent.tensorboardX import SummaryWriterContext logger = logging.getLogger(__name__) def run_episode( - env: Env, agent: Agent, mdp_id: int = 0, max_steps: Optional[int] = None + env: EnvWrapper, agent: Agent, mdp_id: int = 0, max_steps: Optional[int] = None ) -> Trajectory: """ Return sum of rewards from episode. @@ -30,12 +30,15 @@ def run_episode( """ trajectory = Trajectory() obs = env.reset() + possible_actions_mask = env.possible_actions_mask terminal = False num_steps = 0 + info = {} while not terminal: - action = agent.act(obs) - next_obs, reward, terminal, _ = env.step(action) - if max_steps is not None and num_steps >= max_steps: + action, log_prob = agent.act(obs, possible_actions_mask) + next_obs, reward, terminal, info = env.step(action) + next_possible_actions_mask = env.possible_actions_mask + if max_steps is not None and num_steps >= (max_steps - 1): terminal = True # Only partially filled. Agent can fill in more fields. @@ -44,34 +47,38 @@ def run_episode( sequence_number=num_steps, observation=obs, action=action, - reward=reward, - terminal=terminal, + reward=float(reward), + terminal=bool(terminal), + log_prob=log_prob, + possible_actions_mask=possible_actions_mask, ) agent.post_step(transition) trajectory.add_transition(transition) SummaryWriterContext.increase_global_step() obs = next_obs + possible_actions_mask = next_possible_actions_mask num_steps += 1 + agent.post_episode(trajectory, info) return trajectory def evaluate_for_n_episodes( n: int, - env: Env, + env: EnvWrapper, agent: Agent, max_steps: Optional[int] = None, gammas: Sequence[float] = (1.0,), - num_processes: int = 4, + num_processes: int = 0, ) -> np.ndarray: - """ Return an np array A of shape n x len(gammas) - where A[i, j] = ith episode evaluated with gamma=gammas[j]. - Runs environments on num_processes, via multiprocessing.Pool. + """Return an np array A of shape n x len(gammas) + where A[i, j] = ith episode evaluated with gamma=gammas[j]. + Runs environments on num_processes, via multiprocessing.Pool. """ num_processes = min(num_processes, n) def evaluate_one_episode( mdp_id: int, - env: Env, + env: EnvWrapper, agent: Agent, max_steps: Optional[int], gammas: Sequence[float], diff --git a/reagent/gym/tests/__init__.py b/reagent/gym/tests/__init__.py index 5be5087fd..40539064a 100644 --- a/reagent/gym/tests/__init__.py +++ b/reagent/gym/tests/__init__.py @@ -1,2 +1 @@ -#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/gym/tests/configs/cartpole/discrete_c51_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_c51_cartpole_online.yaml index 79750b17c..bb11d53e6 100644 --- a/reagent/gym/tests/configs/cartpole/discrete_c51_cartpole_online.yaml +++ b/reagent/gym/tests/configs/cartpole/discrete_c51_cartpole_online.yaml @@ -1,26 +1,26 @@ -env_name: CartPole-v0 +env: + Gym: + env_name: CartPole-v1 model: DiscreteC51DQN: trainer_param: actions: - - 4 - - 5 + - 0 + - 1 rl: - gamma: 0.99 - target_update_rate: 0.1 + gamma: 0.9 + target_update_rate: 0.05 maxq_learning: true - temperature: 0.1 - softmax_policy: true - q_network_loss: mse + temperature: 1.0 double_q_learning: true - minibatch_size: 512 minibatches_per_step: 1 - num_atoms: 11 + num_atoms: 21 qmin: 0 - qmax: 25 + qmax: 40 optimizer: - Adam: + AdamW: lr: 0.001 + amsgrad: true net_builder: Categorical: sizes: @@ -31,11 +31,11 @@ model: - leaky_relu eval_parameters: calc_cpe_in_training: false -replay_memory_size: 50000 -train_every_ts: 3 +replay_memory_size: 100000 +train_every_ts: 1 train_after_ts: 20000 -num_train_episodes: 60 +num_train_episodes: 40 num_eval_episodes: 20 -max_steps: 200 passing_score_bar: 100.0 use_gpu: false +minibatch_size: 512 diff --git a/reagent/gym/tests/configs/cartpole/discrete_crr_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_crr_cartpole_online.yaml new file mode 100644 index 000000000..966f00bab --- /dev/null +++ b/reagent/gym/tests/configs/cartpole/discrete_crr_cartpole_online.yaml @@ -0,0 +1,49 @@ +env: + Gym: + env_name: CartPole-v0 +model: + DiscreteCRR: + trainer_param: + actions: + - 0 + - 1 + rl: + gamma: 0.99 + target_update_rate: 0.2 + temperature: 0.1 + q_network_optimizer: + Adam: + lr: 0.001 + actor_network_optimizer: + Adam: + lr: 0.001 + use_target_actor: false + double_q_learning: true + delayed_policy_update: 1 + actor_net_builder: + FullyConnected: + exploration_variance: 0.0000001 + sizes: + - 1024 + - 1024 + activations: + - relu + - relu + critic_net_builder: + FullyConnected: + sizes: + - 1024 + - 1024 + activations: + - relu + - relu + eval_parameters: + calc_cpe_in_training: false +replay_memory_size: 20000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 25 +num_eval_episodes: 20 +passing_score_bar: 100 +use_gpu: false +minibatch_size: 256 diff --git a/reagent/gym/tests/configs/cartpole/discrete_dqn_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_dqn_cartpole_online.yaml index 690542b14..912eac018 100644 --- a/reagent/gym/tests/configs/cartpole/discrete_dqn_cartpole_online.yaml +++ b/reagent/gym/tests/configs/cartpole/discrete_dqn_cartpole_online.yaml @@ -1,4 +1,6 @@ -env_name: CartPole-v0 +env: + Gym: + env_name: CartPole-v0 model: DiscreteDQN: trainer_param: @@ -7,18 +9,14 @@ model: - 1 rl: gamma: 0.99 - epsilon: 0.05 target_update_rate: 0.2 maxq_learning: true temperature: 1.0 - softmax_policy: false - q_network_loss: mse double_q_learning: true - minibatch_size: 512 minibatches_per_step: 1 optimizer: Adam: - lr: 0.05 + lr: 0.01 net_builder: FullyConnected: sizes: @@ -29,11 +27,11 @@ model: - leaky_relu eval_parameters: calc_cpe_in_training: false -replay_memory_size: 20000 +replay_memory_size: 100000 train_every_ts: 1 -train_after_ts: 5000 -num_train_episodes: 50 +train_after_ts: 30000 +num_train_episodes: 120 num_eval_episodes: 20 -max_steps: 200 passing_score_bar: 100.0 use_gpu: false +minibatch_size: 512 diff --git a/reagent/gym/tests/configs/cartpole/discrete_ppo_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_ppo_cartpole_online.yaml new file mode 100644 index 000000000..73029f19e --- /dev/null +++ b/reagent/gym/tests/configs/cartpole/discrete_ppo_cartpole_online.yaml @@ -0,0 +1,31 @@ +env: + Gym: + env_name: CartPole-v0 +model: + PPO: + trainer_param: + actions: + - 0 + - 1 + gamma: 0.99 + ppo_epsilon: 0.2 + optimizer: + Adam: + lr: 0.001 + weight_decay: 0.001 + update_freq: 2 + update_epochs: 1 + ppo_batch_size: 2 + policy_net_builder: + FullyConnected: + sizes: + - 32 + - 32 + activations: + - leaky_relu + - leaky_relu + sampler_temperature: 1.0 +num_train_episodes: 1000 +num_eval_episodes: 100 +passing_score_bar: 180.0 +use_gpu: false diff --git a/reagent/gym/tests/configs/cartpole/discrete_qr_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_qr_cartpole_online.yaml index fbe2e173d..06dcca526 100644 --- a/reagent/gym/tests/configs/cartpole/discrete_qr_cartpole_online.yaml +++ b/reagent/gym/tests/configs/cartpole/discrete_qr_cartpole_online.yaml @@ -1,40 +1,39 @@ -env_name: CartPole-v0 +env: + Gym: + env_name: CartPole-v1 model: DiscreteQRDQN: trainer_param: actions: - - 4 - - 5 + - 0 + - 1 rl: - gamma: 0.99 - target_update_rate: 0.1 + gamma: 0.9 + target_update_rate: 0.05 maxq_learning: true - softmax_policy: true - temperature: 0.1 - q_network_loss: mse + temperature: 1.0 double_q_learning: true - minibatch_size: 512 minibatches_per_step: 1 num_atoms: 11 optimizer: - Adam: - lr: 0.05 - weight_decay: 0 + AdamW: + lr: 0.001 + amsgrad: true net_builder: DuelingQuantile: sizes: - - 128 + - 64 - 64 activations: - leaky_relu - leaky_relu eval_parameters: calc_cpe_in_training: false -replay_memory_size: 50000 -train_every_ts: 3 -train_after_ts: 50000 -num_train_episodes: 50 +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 20000 +num_train_episodes: 40 num_eval_episodes: 20 -max_steps: 200 passing_score_bar: 100.0 use_gpu: false +minibatch_size: 512 diff --git a/reagent/gym/tests/configs/cartpole/discrete_reinforce_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/discrete_reinforce_cartpole_online.yaml new file mode 100644 index 000000000..1f3a3cb59 --- /dev/null +++ b/reagent/gym/tests/configs/cartpole/discrete_reinforce_cartpole_online.yaml @@ -0,0 +1,27 @@ +env: + Gym: + env_name: CartPole-v0 +model: + Reinforce: + trainer_param: + actions: + - 0 + - 1 + gamma: 0.99 + off_policy: False + optimizer: + Adam: + lr: 0.001 + normalize: False + subtract_mean: True + policy_net_builder: + FullyConnected: + sizes: + - 64 + activations: + - leaky_relu + sampler_temperature: 1.0 +num_train_episodes: 1000 +num_eval_episodes: 100 +passing_score_bar: 180.0 +use_gpu: false diff --git a/reagent/gym/tests/configs/cartpole/parametric_dqn_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/parametric_dqn_cartpole_online.yaml index 49de288c5..898d8f2f2 100644 --- a/reagent/gym/tests/configs/cartpole/parametric_dqn_cartpole_online.yaml +++ b/reagent/gym/tests/configs/cartpole/parametric_dqn_cartpole_online.yaml @@ -1,20 +1,20 @@ -env_name: CartPole-v0 +env: + Gym: + env_name: CartPole-v1 model: ParametricDQN: trainer_param: rl: gamma: 0.99 - target_update_rate: 0.2 + target_update_rate: 0.1 maxq_learning: true - temperature: 0.35 - softmax_policy: true - q_network_loss: mse + temperature: 1.0 double_q_learning: true - minibatch_size: 1024 minibatches_per_step: 1 optimizer: - Adam: - lr: 0.03 + AdamW: + lr: 0.001 + amsgrad: true net_builder: FullyConnected: sizes: @@ -27,9 +27,9 @@ model: calc_cpe_in_training: false replay_memory_size: 100000 train_every_ts: 1 -train_after_ts: 50000 -num_train_episodes: 40 +train_after_ts: 20000 +num_train_episodes: 90 num_eval_episodes: 20 -max_steps: 200 passing_score_bar: 100.0 use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/cartpole/parametric_sarsa_cartpole_online.yaml b/reagent/gym/tests/configs/cartpole/parametric_sarsa_cartpole_online.yaml index 4cb38596c..7dcdd809c 100644 --- a/reagent/gym/tests/configs/cartpole/parametric_sarsa_cartpole_online.yaml +++ b/reagent/gym/tests/configs/cartpole/parametric_sarsa_cartpole_online.yaml @@ -1,4 +1,6 @@ -env_name: CartPole-v0 +env: + Gym: + env_name: CartPole-v0 model: ParametricDQN: trainer_param: @@ -11,10 +13,7 @@ model: # vanilla, on-policy sarsa maxq_learning: false temperature: 0.35 - softmax_policy: true - q_network_loss: mse double_q_learning: true - minibatch_size: 1024 minibatches_per_step: 1 optimizer: Adam: @@ -34,6 +33,6 @@ train_every_ts: 1 train_after_ts: 25000 num_train_episodes: 30 num_eval_episodes: 20 -max_steps: 200 passing_score_bar: 100.0 use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/functionality/dqn_possible_actions_mask.yaml b/reagent/gym/tests/configs/functionality/dqn_possible_actions_mask.yaml new file mode 100644 index 000000000..becfac81a --- /dev/null +++ b/reagent/gym/tests/configs/functionality/dqn_possible_actions_mask.yaml @@ -0,0 +1,39 @@ +env: + Gym: + env_name: PossibleActionsMaskTester-v0 +model: + DiscreteDQN: + trainer_param: + actions: + - 0 + - 1 + - 2 + - 3 + rl: + gamma: 1.0 + target_update_rate: 0.2 + maxq_learning: true + temperature: 1.0 + double_q_learning: true + minibatches_per_step: 1 + optimizer: + Adam: + lr: 0.05 + net_builder: + FullyConnected: + sizes: + - 128 + - 64 + activations: + - leaky_relu + - leaky_relu + eval_parameters: + calc_cpe_in_training: false +replay_memory_size: 5000 +train_every_ts: 1 +train_after_ts: 500 +num_train_episodes: 5 +num_eval_episodes: 3 +passing_score_bar: 200.0 +use_gpu: false +minibatch_size: 512 diff --git a/reagent/gym/tests/configs/open_gridworld/discrete_dqn_open_gridworld.yaml b/reagent/gym/tests/configs/open_gridworld/discrete_dqn_open_gridworld.yaml index 29aa655f9..65b4ef076 100644 --- a/reagent/gym/tests/configs/open_gridworld/discrete_dqn_open_gridworld.yaml +++ b/reagent/gym/tests/configs/open_gridworld/discrete_dqn_open_gridworld.yaml @@ -1,4 +1,6 @@ -env_name: MiniGrid-Empty-5x5-v0 +env: + Gym: + env_name: MiniGrid-Empty-5x5-v0 model: DiscreteDQN: trainer_param: @@ -16,10 +18,8 @@ model: target_update_rate: 0.1 maxq_learning: true temperature: 0.01 - softmax_policy: false q_network_loss: mse double_q_learning: true - minibatch_size: 512 minibatches_per_step: 1 optimizer: Adam: @@ -36,6 +36,6 @@ train_every_ts: 3 train_after_ts: 1 num_train_episodes: 125 num_eval_episodes: 20 -max_steps: 2000 passing_score_bar: 0.9 use_gpu: false +minibatch_size: 512 diff --git a/reagent/gym/tests/configs/pendulum/continuous_crr_pendulum_online.yaml b/reagent/gym/tests/configs/pendulum/continuous_crr_pendulum_online.yaml new file mode 100644 index 000000000..ec5ffd72d --- /dev/null +++ b/reagent/gym/tests/configs/pendulum/continuous_crr_pendulum_online.yaml @@ -0,0 +1,58 @@ +env: + Gym: + env_name: Pendulum-v0 +model: + SAC: + trainer_param: + rl: + gamma: 0.99 + target_update_rate: 0.005 + softmax_policy: true + entropy_temperature: 0.3 + crr_config: + exponent_beta: 1.0 + exponent_clamp: 20.0 + q_network_optimizer: + Adam: + lr: 0.001 + value_network_optimizer: + Adam: + lr: 0.001 + actor_network_optimizer: + Adam: + lr: 0.001 + actor_net_builder: + GaussianFullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu + critic_net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu + value_net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu + eval_parameters: + calc_cpe_in_training: false +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 10000 +num_train_episodes: 40 +num_eval_episodes: 20 +# Though maximal score is 0, we set lower bar to let tests finish in time +passing_score_bar: -500 +use_gpu: false +minibatch_size: 256 diff --git a/reagent/gym/tests/configs/pendulum/sac_pendulum_online.yaml b/reagent/gym/tests/configs/pendulum/sac_pendulum_online.yaml index 503b8a8c7..8d4be5c19 100644 --- a/reagent/gym/tests/configs/pendulum/sac_pendulum_online.yaml +++ b/reagent/gym/tests/configs/pendulum/sac_pendulum_online.yaml @@ -1,4 +1,6 @@ -env_name: Pendulum-v0 +env: + Gym: + env_name: Pendulum-v0 model: SAC: trainer_param: @@ -6,8 +8,7 @@ model: gamma: 0.99 target_update_rate: 0.005 softmax_policy: true - entropy_temperature: 0.1 - minibatch_size: 256 + entropy_temperature: 0.3 q_network_optimizer: Adam: lr: 0.001 @@ -46,12 +47,12 @@ model: - leaky_relu eval_parameters: calc_cpe_in_training: false -replay_memory_size: 10000 +replay_memory_size: 100000 train_every_ts: 1 -train_after_ts: 5000 +train_after_ts: 20000 num_train_episodes: 40 num_eval_episodes: 20 -max_steps: 200 # Though maximal score is 0, we set lower bar to let tests finish in time -passing_score_bar: -750 +passing_score_bar: -500 use_gpu: false +minibatch_size: 256 diff --git a/reagent/gym/tests/configs/pendulum/td3_pendulum_online.yaml b/reagent/gym/tests/configs/pendulum/td3_pendulum_online.yaml index 60a481468..56f6e31e3 100644 --- a/reagent/gym/tests/configs/pendulum/td3_pendulum_online.yaml +++ b/reagent/gym/tests/configs/pendulum/td3_pendulum_online.yaml @@ -1,11 +1,12 @@ -env_name: Pendulum-v0 +env: + Gym: + env_name: Pendulum-v0 model: TD3: trainer_param: rl: gamma: 0.99 target_update_rate: 0.005 - minibatch_size: 256 q_network_optimizer: Adam: lr: 0.01 @@ -38,8 +39,8 @@ replay_memory_size: 100000 train_every_ts: 1 train_after_ts: 5000 num_train_episodes: 40 -num_eval_episodes: 20 -max_steps: 200 +num_eval_episodes: 1 # Though maximal score is 0, we set lower bar to let tests finish in time passing_score_bar: -750 use_gpu: false +minibatch_size: 256 diff --git a/reagent/gym/tests/configs/recsim/slate_q_recsim_online.yaml b/reagent/gym/tests/configs/recsim/slate_q_recsim_online.yaml new file mode 100644 index 000000000..75f98b35d --- /dev/null +++ b/reagent/gym/tests/configs/recsim/slate_q_recsim_online.yaml @@ -0,0 +1,30 @@ +env: + RecSim: + slate_size: 3 + num_candidates: 10 +model: + SlateQ: + slate_size: 3 + num_candidates: 10 + slate_feature_id: 1 # filler + slate_score_id: [42, 42] # filler + trainer_param: + optimizer: + Adam: + lr: 0.001 + net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 300 +num_eval_episodes: 20 +passing_score_bar: 154.0 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/recsim/slate_q_recsim_online_maxq_topk.yaml b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_maxq_topk.yaml new file mode 100644 index 000000000..99365988e --- /dev/null +++ b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_maxq_topk.yaml @@ -0,0 +1,32 @@ +env: + RecSim: + slate_size: 3 + num_candidates: 10 +model: + SlateQ: + slate_size: 3 + num_candidates: 10 + slate_feature_id: 1 # filler + slate_score_id: [42, 42] # filler + trainer_param: + rl: + maxq_learning: True + optimizer: + Adam: + lr: 0.001 + net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 300 +num_eval_episodes: 20 +passing_score_bar: 154.0 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection.yaml b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection.yaml new file mode 100644 index 000000000..13ba1e8de --- /dev/null +++ b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection.yaml @@ -0,0 +1,32 @@ +env: + RecSim: + slate_size: 3 + num_candidates: 10 +model: + SlateQ: + slate_size: 3 + num_candidates: 10 + slate_feature_id: 1 # filler + slate_score_id: [42, 42] # filler + trainer_param: + single_selection: False + next_slate_value_norm_method: "norm_by_next_slate_size" + optimizer: + Adam: + lr: 0.001 + net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 300 +num_eval_episodes: 20 +passing_score_bar: 154.0 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection_avg_curr.yaml b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection_avg_curr.yaml new file mode 100644 index 000000000..8679fe9b6 --- /dev/null +++ b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_multi_selection_avg_curr.yaml @@ -0,0 +1,32 @@ +env: + RecSim: + slate_size: 3 + num_candidates: 10 +model: + SlateQ: + slate_size: 3 + num_candidates: 10 + slate_feature_id: 1 # filler + slate_score_id: [42, 42] # filler + trainer_param: + single_selection: False + next_slate_value_norm_method: "norm_by_current_slate_size" + optimizer: + Adam: + lr: 0.001 + net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 300 +num_eval_episodes: 20 +passing_score_bar: 154.0 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/recsim/slate_q_recsim_online_with_time_scale.yaml b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_with_time_scale.yaml new file mode 100644 index 000000000..ca63a6d43 --- /dev/null +++ b/reagent/gym/tests/configs/recsim/slate_q_recsim_online_with_time_scale.yaml @@ -0,0 +1,31 @@ +env: + RecSim: + slate_size: 3 + num_candidates: 10 +model: + SlateQ: + slate_size: 3 + num_candidates: 10 + slate_feature_id: 1 # filler + slate_score_id: [42, 42] # filler + trainer_param: + discount_time_scale: 2 + optimizer: + Adam: + lr: 0.001 + net_builder: + FullyConnected: + sizes: + - 64 + - 64 + activations: + - leaky_relu + - leaky_relu +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 5000 +num_train_episodes: 300 +num_eval_episodes: 20 +passing_score_bar: 154.0 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/sparse/discrete_dqn_changing_arms_online.yaml b/reagent/gym/tests/configs/sparse/discrete_dqn_changing_arms_online.yaml new file mode 100644 index 000000000..92ab37809 --- /dev/null +++ b/reagent/gym/tests/configs/sparse/discrete_dqn_changing_arms_online.yaml @@ -0,0 +1,79 @@ +env: + ChangingArms: + num_arms: 5 +model: + DiscreteDQN: + trainer_param: + actions: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + rl: + gamma: 1.0 + target_update_rate: 0.2 + maxq_learning: true + temperature: 10.0 + double_q_learning: true + minibatches_per_step: 1 + optimizer: + AdamW: + lr: 0.005 + net_builder: + FullyConnectedWithEmbedding: + sizes: + - 256 + - 128 + activations: + - leaky_relu + - leaky_relu + embedding_dim: 128 + eval_parameters: + calc_cpe_in_training: false + state_feature_config_provider: + raw: + float_feature_infos: + - name: "arm0_sample" + feature_id: 0 + - name: "arm1_sample" + feature_id: 1 + - name: "arm2_sample" + feature_id: 2 + - name: "arm3_sample" + feature_id: 3 + - name: "arm4_sample" + feature_id: 4 + id_list_feature_configs: + - name: "legal" + feature_id: 100 + id_mapping_name: "legal_actions" + id_score_list_feature_configs: + - name: "mu_changes" + feature_id: 1000 + id_mapping_name: "arms_list" + id_mapping_config: + legal_actions: + ids: + - 1000000 + - 1000001 + - 1000002 + - 1000003 + - 1000004 + - 1000005 + arms_list: + ids: + - 1500000 + - 1500001 + - 1500002 + - 1500003 + - 1500004 +replay_memory_size: 100000 +train_every_ts: 1 +train_after_ts: 20000 +num_train_episodes: 150 +num_eval_episodes: 10 +passing_score_bar: 400 +use_gpu: false +minibatch_size: 1024 diff --git a/reagent/gym/tests/configs/world_model/cartpole_features.yaml b/reagent/gym/tests/configs/world_model/cartpole_features.yaml index bfcbdffe1..496dd7d33 100644 --- a/reagent/gym/tests/configs/world_model/cartpole_features.yaml +++ b/reagent/gym/tests/configs/world_model/cartpole_features.yaml @@ -4,7 +4,7 @@ model: trainer_param: hidden_size: 50 num_hidden_layers: 2 - learning_rate: 0.005 + learning_rate: 0.001 not_terminal_loss_weight: 1 next_state_loss_weight: 1 reward_loss_weight: 1 @@ -13,6 +13,6 @@ num_train_transitions: 100000 # approx. 500 episodes num_test_transitions: 6000 # approx. 30 episodes seq_len: 1 batch_size: 1024 -num_train_epochs: 20 +num_train_epochs: 30 use_gpu: false saved_mdnrnn_path: null diff --git a/reagent/gym/tests/configs/world_model/cem_cartpole_offline.yaml b/reagent/gym/tests/configs/world_model/cem_cartpole_offline.yaml index a6c75dd16..586fe534b 100644 --- a/reagent/gym/tests/configs/world_model/cem_cartpole_offline.yaml +++ b/reagent/gym/tests/configs/world_model/cem_cartpole_offline.yaml @@ -1,5 +1,4 @@ env_name: CartPole-v0 -max_steps: 200 model: CrossEntropyMethod: trainer_param: @@ -12,7 +11,6 @@ model: mdnrnn: hidden_size: 100 num_hidden_layers: 2 - minibatch_size: 512 learning_rate: 0.001 not_terminal_loss_weight: 200.0 next_state_loss_weight: 1.0 @@ -26,4 +24,5 @@ num_batches_per_epoch: 1000 num_train_epochs: 1 num_eval_episodes: 1 passing_score_bar: 100.0 +minibatch_size: 1024 use_gpu: false diff --git a/reagent/gym/tests/configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml b/reagent/gym/tests/configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml index 1ee6a1d08..a53240404 100644 --- a/reagent/gym/tests/configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml +++ b/reagent/gym/tests/configs/world_model/cem_many_world_models_linear_dynamics_offline.yaml @@ -1,5 +1,4 @@ env_name: LinearDynamics-v0 -max_steps: 200 model: CrossEntropyMethod: trainer_param: @@ -12,7 +11,6 @@ model: mdnrnn: hidden_size: 100 num_hidden_layers: 2 - minibatch_size: 1024 learning_rate: 0.001 not_terminal_loss_weight: 0.0 next_state_loss_weight: 1.0 @@ -26,4 +24,5 @@ num_batches_per_epoch: 5000 num_train_epochs: 1 num_eval_episodes: 1 passing_score_bar: -2.5 +minibatch_size: 1024 use_gpu: false diff --git a/reagent/gym/tests/configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml b/reagent/gym/tests/configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml index 66b6b3ac5..c71ce53b1 100644 --- a/reagent/gym/tests/configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml +++ b/reagent/gym/tests/configs/world_model/cem_single_world_model_linear_dynamics_offline.yaml @@ -1,5 +1,4 @@ env_name: LinearDynamics-v0 -max_steps: 200 model: CrossEntropyMethod: trainer_param: @@ -12,7 +11,6 @@ model: mdnrnn: hidden_size: 100 num_hidden_layers: 2 - minibatch_size: 1024 learning_rate: 0.001 not_terminal_loss_weight: 0.0 next_state_loss_weight: 1.0 @@ -21,6 +19,7 @@ model: rl: gamma: 1.0 softmax_policy: 0 +minibatch_size: 1024 replay_memory_size: 50000 num_batches_per_epoch: 5000 num_train_epochs: 1 diff --git a/reagent/gym/tests/configs/world_model/discrete_dqn_string.yaml b/reagent/gym/tests/configs/world_model/discrete_dqn_string.yaml index d2d8c3ee6..f23791bb5 100644 --- a/reagent/gym/tests/configs/world_model/discrete_dqn_string.yaml +++ b/reagent/gym/tests/configs/world_model/discrete_dqn_string.yaml @@ -44,8 +44,8 @@ train_model: activations: - leaky_relu - leaky_relu - eval_parameters: - calc_cpe_in_training: false + eval_parameters: + calc_cpe_in_training: false num_agent_train_epochs: 100 num_agent_eval_epochs: 10 use_gpu: false diff --git a/reagent/gym/tests/configs/world_model/seq2reward_test.yaml b/reagent/gym/tests/configs/world_model/seq2reward_test.yaml new file mode 100644 index 000000000..98ea2de69 --- /dev/null +++ b/reagent/gym/tests/configs/world_model/seq2reward_test.yaml @@ -0,0 +1,14 @@ +env_name: StringGame-v0 +model: + Seq2RewardModel: + trainer_param: + learning_rate: 0.005 + multi_steps: 6 + action_names: ["0","1"] +num_train_transitions: 100000 # approx. 500 episodes +num_test_transitions: 6000 # approx. 30 episodes +seq_len: 6 +batch_size: 1024 +num_train_epochs: 20 +use_gpu: false +saved_seq2reward_path: null diff --git a/reagent/gym/tests/preprocessors/test_default_preprocessors.py b/reagent/gym/tests/preprocessors/test_default_preprocessors.py index 5f93755bc..6705ee334 100644 --- a/reagent/gym/tests/preprocessors/test_default_preprocessors.py +++ b/reagent/gym/tests/preprocessors/test_default_preprocessors.py @@ -3,18 +3,14 @@ import unittest -import gym import numpy.testing as npt import torch import torch.nn.functional as F -from reagent.gym.envs.recsim import ValueMode, ValueWrapper -from reagent.gym.preprocessors.default_preprocessors import ( - make_default_obs_preprocessor, -) +from reagent.gym.envs import Gym try: - from recsim.environments import interest_evolution, interest_exploration + from reagent.gym.envs import RecSim HAS_RECSIM = True except ModuleNotFoundError: @@ -22,9 +18,9 @@ class TestMakeDefaultObsPreprocessor(unittest.TestCase): - def test_box(self): - env = gym.make("CartPole-v0") - obs_preprocessor = make_default_obs_preprocessor(env) + def test_box(self) -> None: + env = Gym(env_name="CartPole-v0") + obs_preprocessor = env.get_obs_preprocessor() obs = env.reset() state = obs_preprocessor(obs) self.assertTrue(state.has_float_features_only) @@ -34,10 +30,10 @@ def test_box(self): npt.assert_array_almost_equal(obs, state.float_features.squeeze(0)) @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_box_cuda(self): - env = gym.make("CartPole-v0") + def test_box_cuda(self) -> None: + env = Gym(env_name="CartPole-v0") device = torch.device("cuda") - obs_preprocessor = make_default_obs_preprocessor(env, device=device) + obs_preprocessor = env.get_obs_preprocessor(device=device) obs = env.reset() state = obs_preprocessor(obs) self.assertTrue(state.has_float_features_only) @@ -49,17 +45,13 @@ def test_box_cuda(self): npt.assert_array_almost_equal(obs, state.float_features.cpu().squeeze(0)) @unittest.skipIf(not HAS_RECSIM, "Recsim is not installed") - def test_recsim_interest_evolution(self): + def test_recsim_interest_evolution(self) -> None: num_candidate = 10 - env_config = { - "num_candidates": num_candidate, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_evolution.create_environment(env_config) - env = ValueWrapper(env, ValueMode.INNER_PROD) - obs_preprocessor = make_default_obs_preprocessor(env) + # pyre-fixme[16]: Module `envs` has no attribute `RecSim`. + env = RecSim( + num_candidates=num_candidate, slate_size=3, resample_documents=False + ) + obs_preprocessor = env.get_obs_preprocessor() obs = env.reset() state = obs_preprocessor(obs) self.assertFalse(state.has_float_features_only) @@ -78,17 +70,16 @@ def test_recsim_interest_evolution(self): npt.assert_array_almost_equal(v, doc_float_features[0, i]) @unittest.skipIf(not HAS_RECSIM, "Recsim is not installed") - def test_recsim_interest_exploration(self): + def test_recsim_interest_exploration(self) -> None: num_candidate = 10 - env_config = { - "num_candidates": num_candidate, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_exploration.create_environment(env_config) - env = ValueWrapper(env, ValueMode.CONST) - obs_preprocessor = make_default_obs_preprocessor(env) + # pyre-fixme[16]: Module `envs` has no attribute `RecSim`. + env = RecSim( + num_candidates=num_candidate, + slate_size=3, + resample_documents=False, + is_interest_exploration=True, + ) + obs_preprocessor = env.get_obs_preprocessor() obs = env.reset() state = obs_preprocessor(obs) self.assertFalse(state.has_float_features_only) diff --git a/reagent/gym/tests/preprocessors/test_replay_buffer_inserters.py b/reagent/gym/tests/preprocessors/test_replay_buffer_inserters.py index e2d19ea0b..24496e770 100644 --- a/reagent/gym/tests/preprocessors/test_replay_buffer_inserters.py +++ b/reagent/gym/tests/preprocessors/test_replay_buffer_inserters.py @@ -8,6 +8,7 @@ import numpy as np import numpy.testing as npt import torch +from reagent.gym.envs import EnvWrapper from reagent.gym.preprocessors import make_replay_buffer_inserter from reagent.gym.types import Transition from reagent.replay_memory import ReplayBuffer @@ -17,18 +18,16 @@ logger = logging.getLogger(__name__) try: - from recsim.environments import interest_evolution, interest_exploration + from reagent.gym.envs import RecSim HAS_RECSIM = True except ModuleNotFoundError: HAS_RECSIM = False -def _create_replay_buffer_and_insert(env: gym.Env): +def _create_replay_buffer_and_insert(env: EnvWrapper): env.seed(1) - replay_buffer = ReplayBuffer.create_from_env( - env, replay_memory_size=6, batch_size=1 - ) + replay_buffer = ReplayBuffer(replay_capacity=6, batch_size=1) replay_buffer_inserter = make_replay_buffer_inserter(env) obs = env.reset() inserted = [] @@ -81,13 +80,12 @@ class TestRecSimReplayBufferInserter(HorizonTestBase): @unittest.skipIf(not HAS_RECSIM, "RecSim not installed") def test_recsim_interest_evolution(self): num_candidate = 10 - env_config = { - "num_candidates": num_candidate, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_evolution.create_environment(env_config) + slate_size = 3 + env = RecSim( + num_candidates=num_candidate, + slate_size=slate_size, + resample_documents=False, + ) replay_buffer, inserted = _create_replay_buffer_and_insert(env) batch = replay_buffer.sample_transition_batch(indices=torch.tensor([0])) npt.assert_array_almost_equal( @@ -109,7 +107,7 @@ def test_recsim_interest_evolution(self): npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_quality.squeeze(0)) npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_watch_time.squeeze(0)) resp = inserted[1]["observation"]["response"] - for i in range(env_config["slate_size"]): + for i in range(slate_size): npt.assert_array_equal( resp[i]["click"], batch.next_response_click.squeeze(0)[i] ) @@ -129,13 +127,13 @@ def test_recsim_interest_evolution(self): @unittest.skipIf(not HAS_RECSIM, "RecSim not installed") def test_recsim_interest_exploration(self): num_candidate = 10 - env_config = { - "num_candidates": num_candidate, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_exploration.create_environment(env_config) + slate_size = 3 + env = RecSim( + num_candidates=num_candidate, + slate_size=slate_size, + resample_documents=False, + is_interest_exploration=True, + ) replay_buffer, inserted = _create_replay_buffer_and_insert(env) batch = replay_buffer.sample_transition_batch(indices=torch.tensor([0])) npt.assert_array_almost_equal( @@ -160,7 +158,7 @@ def test_recsim_interest_exploration(self): npt.assert_array_equal([0, 0, 0], batch.response_cluster_id.squeeze(0)) npt.assert_array_equal([0.0, 0.0, 0.0], batch.response_quality.squeeze(0)) resp = inserted[1]["observation"]["response"] - for i in range(env_config["slate_size"]): + for i in range(slate_size): npt.assert_array_equal( resp[i]["click"], batch.next_response_click.squeeze(0)[i] ) diff --git a/reagent/gym/tests/test_epsilon_greedy_action_sampler.py b/reagent/gym/tests/test_epsilon_greedy_action_sampler.py new file mode 100644 index 000000000..4acff749e --- /dev/null +++ b/reagent/gym/tests/test_epsilon_greedy_action_sampler.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import torch +from reagent.gym.policies.samplers.discrete_sampler import EpsilonGreedyActionSampler +from reagent.test.base.horizon_test_base import HorizonTestBase + + +class EpsilonGreedyActionSamplerTest(HorizonTestBase): + def test_greedy_selection(self): + scores = torch.tensor( + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [5.0, 1.0, 2.0, 3.0, 4.0], + ] + ) + sampler = EpsilonGreedyActionSampler(epsilon=0.0) + + test_action = torch.tensor( + [ + [0, 0, 0, 0, 1], + [1, 0, 0, 0, 0], + ], + dtype=torch.long, + ) + action = sampler.sample_action(scores) + + torch.testing.assert_allclose(action.action, test_action) + + test_log_prob = torch.tensor( + [0.0, 0.0], + dtype=torch.float, + ) + + torch.testing.assert_allclose(action.log_prob, test_log_prob) + + def test_uniform_random_selection(self): + scores = torch.tensor( + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [5.0, 1.0, 2.0, 3.0, 4.0], + ] + ) + sampler = EpsilonGreedyActionSampler(epsilon=1.0) + + action = sampler.sample_action(scores) + + test_log_prob = torch.tensor( + [-1.60944, -1.60944], + dtype=torch.float, + ) + + torch.testing.assert_allclose(action.log_prob, test_log_prob) diff --git a/reagent/gym/tests/test_gym.py b/reagent/gym/tests/test_gym.py index 83521c010..dfaa2f805 100644 --- a/reagent/gym/tests/test_gym.py +++ b/reagent/gym/tests/test_gym.py @@ -4,26 +4,32 @@ import os import pprint import unittest +import uuid from typing import Optional import numpy as np +import pytest +import pytorch_lightning as pl import torch from parameterized import parameterized from reagent.gym.agents.agent import Agent -from reagent.gym.agents.post_step import train_with_replay_buffer_post_step -from reagent.gym.envs.env_factory import EnvFactory -from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode +from reagent.gym.datasets.episodic_dataset import EpisodicDataset +from reagent.gym.datasets.replay_buffer_dataset import ReplayBufferDataset +from reagent.gym.envs import Env__Union +from reagent.gym.envs.env_wrapper import EnvWrapper +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.random_policies import make_random_policy_for_env +from reagent.gym.runners.gymrunner import evaluate_for_n_episodes from reagent.gym.utils import build_normalizer, fill_replay_buffer +from reagent.model_managers.union import ModelManager__Union from reagent.replay_memory.circular_replay_buffer import ReplayBuffer -from reagent.tensorboardX import summary_writer_context from reagent.test.base.horizon_test_base import HorizonTestBase -from reagent.workflow.model_managers.union import ModelManager__Union -from reagent.workflow.types import RewardOptions -from torch.utils.tensorboard import SummaryWriter # for seeding the environment SEED = 0 +# exponential moving average parameter for tracking reward progress +REWARD_DECAY = 0.8 logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -34,7 +40,8 @@ NOTE: These tests should ideally finish quickly (within 10 minutes) since they are unit tests which are run many times. """ -GYM_TESTS = [ +REPLAY_BUFFER_GYM_TESTS_1 = [ + ("Discrete CRR Cartpole", "configs/cartpole/discrete_crr_cartpole_online.yaml"), ("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"), ("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"), ("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"), @@ -43,12 +50,45 @@ "configs/open_gridworld/discrete_dqn_open_gridworld.yaml", ), ("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"), + ("Continuous CRR Pendulum", "configs/pendulum/continuous_crr_pendulum_online.yaml"), ("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"), +] +REPLAY_BUFFER_GYM_TESTS_2 = [ ("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"), ( "Parametric SARSA Cartpole", "configs/cartpole/parametric_sarsa_cartpole_online.yaml", ), + # Disabled for now because flaky. + # ( + # "Sparse DQN Changing Arms", + # "configs/sparse/discrete_dqn_changing_arms_online.yaml", + # ), + ("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"), + ( + "SlateQ RecSim with Discount Scaled by Time Diff", + "configs/recsim/slate_q_recsim_online_with_time_scale.yaml", + ), + ( + "SlateQ RecSim multi selection", + "configs/recsim/slate_q_recsim_online_multi_selection.yaml", + ), + ( + "SlateQ RecSim multi selection average by current slate size", + "configs/recsim/slate_q_recsim_online_multi_selection_avg_curr.yaml", + ), + ("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"), +] + +ONLINE_EPISODE_GYM_TESTS = [ + ( + "REINFORCE Cartpole online", + "configs/cartpole/discrete_reinforce_cartpole_online.yaml", + ), + ( + "PPO Cartpole online", + "configs/cartpole/discrete_ppo_cartpole_online.yaml", + ), ] @@ -57,118 +97,234 @@ class TestGym(HorizonTestBase): # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. - @parameterized.expand(GYM_TESTS) - def test_gym_cpu(self, name: str, config_path: str): + @parameterized.expand(REPLAY_BUFFER_GYM_TESTS_1) + def test_replay_buffer_gym_cpu_1(self, name: str, config_path: str): + self._test_replay_buffer_gym_cpu(name, config_path) + + # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. + @parameterized.expand(REPLAY_BUFFER_GYM_TESTS_2) + def test_replay_buffer_gym_cpu_2(self, name: str, config_path: str): + self._test_replay_buffer_gym_cpu(name, config_path) + + def _test_replay_buffer_gym_cpu(self, name: str, config_path: str): + logger.info(f"Starting {name} on CPU") self.run_from_config( - run_test=run_test, + run_test=run_test_replay_buffer, config_path=os.path.join(curr_dir, config_path), use_gpu=False, ) logger.info(f"{name} passes!") # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. - @parameterized.expand(GYM_TESTS) + @parameterized.expand(REPLAY_BUFFER_GYM_TESTS_1) + @pytest.mark.serial @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_gym_gpu(self, name: str, config_path: str): + def test_replay_buffer_gym_gpu_1(self, name: str, config_path: str): + self._test_replay_buffer_gym_gpu(name, config_path) + + # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. + @parameterized.expand(REPLAY_BUFFER_GYM_TESTS_2) + @pytest.mark.serial + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_replay_buffer_gym_gpu_2(self, name: str, config_path: str): + self._test_replay_buffer_gym_gpu(name, config_path) + + def _test_replay_buffer_gym_gpu(self, name: str, config_path: str): + logger.info(f"Starting {name} on GPU") self.run_from_config( - run_test=run_test, + run_test=run_test_replay_buffer, config_path=os.path.join(curr_dir, config_path), use_gpu=True, ) logger.info(f"{name} passes!") + # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. + @parameterized.expand(ONLINE_EPISODE_GYM_TESTS) + def test_online_episode_gym_cpu(self, name: str, config_path: str): + logger.info(f"Starting {name} on CPU") + self.run_from_config( + run_test=run_test_online_episode, + config_path=os.path.join(curr_dir, config_path), + use_gpu=False, + ) + logger.info(f"{name} passes!") + + +def eval_policy( + env: EnvWrapper, + serving_policy: Policy, + num_eval_episodes: int, + serving: bool = True, +) -> np.ndarray: + agent = ( + Agent.create_for_env_with_serving_policy(env, serving_policy) + if serving + else Agent.create_for_env(env, serving_policy) + ) + + eval_rewards = evaluate_for_n_episodes( + n=num_eval_episodes, + env=env, + agent=agent, + max_steps=env.max_steps, + num_processes=1, + ).squeeze(1) + + logger.info("============Eval rewards==============") + logger.info(eval_rewards) + mean_eval = np.mean(eval_rewards) + logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}") + return np.array(eval_rewards) + + +def identity_collate(batch): + assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}" + return batch[0] -def run_test( - env_name: str, + +def run_test_replay_buffer( + env: Env__Union, model: ModelManager__Union, replay_memory_size: int, train_every_ts: int, train_after_ts: int, num_train_episodes: int, - max_steps: Optional[int], passing_score_bar: float, num_eval_episodes: int, use_gpu: bool, + minibatch_size: Optional[int] = None, ): - env = EnvFactory.make(env_name) + """ + Run an online learning test with a replay buffer. The replay buffer is pre-filled, then the training starts. + Each transition is added to the replay buffer immediately after it takes place. + """ + env = env.value + pl.seed_everything(SEED) env.seed(SEED) env.action_space.seed(SEED) + normalization = build_normalizer(env) logger.info(f"Normalization is: \n{pprint.pformat(normalization)}") manager = model.value - trainer = manager.initialize_trainer( + trainer = manager.build_trainer( use_gpu=use_gpu, - reward_options=RewardOptions(), normalization_data_map=normalization, ) - training_policy = manager.create_policy(serving=False) + training_policy = manager.create_policy(trainer, serving=False) - replay_buffer = ReplayBuffer.create_from_env( - env=env, - replay_memory_size=replay_memory_size, - batch_size=trainer.minibatch_size, - ) + if not isinstance(trainer, pl.LightningModule): + if minibatch_size is None: + minibatch_size = trainer.minibatch_size + assert minibatch_size == trainer.minibatch_size - device = torch.device("cuda") if use_gpu else None - # first fill the replay buffer to burn_in - train_after_ts = max(train_after_ts, trainer.minibatch_size) - fill_replay_buffer( - env=env, replay_buffer=replay_buffer, desired_size=train_after_ts + assert minibatch_size is not None + + replay_buffer = ReplayBuffer( + replay_capacity=replay_memory_size, batch_size=minibatch_size ) - post_step = train_with_replay_buffer_post_step( - replay_buffer=replay_buffer, + device = torch.device("cuda") if use_gpu else torch.device("cpu") + # first fill the replay buffer using random policy + train_after_ts = max(train_after_ts, minibatch_size) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) + fill_replay_buffer( env=env, - trainer=trainer, - training_freq=train_every_ts, - batch_size=trainer.minibatch_size, - device=device, + replay_buffer=replay_buffer, + desired_size=train_after_ts, + agent=agent, ) - agent = Agent.create_for_env( + agent = Agent.create_for_env(env, policy=training_policy, device=device) + # TODO: Simplify this setup by creating LightningDataModule + dataset = ReplayBufferDataset.create_for_trainer( + trainer, env, - policy=training_policy, - post_transition_callback=post_step, - # pyre-fixme[6]: Expected `Union[str, torch.device]` for 4th param but got - # `Optional[torch.device]`. + agent, + replay_buffer, + batch_size=minibatch_size, + training_frequency=train_every_ts, + num_episodes=num_train_episodes, + max_steps=200, device=device, ) + data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate) + torch.use_deterministic_algorithms(True, warn_only=True) + pl_trainer = pl.Trainer( + max_epochs=1, + gpus=int(use_gpu), + default_root_dir=f"lightning_log_{str(uuid.uuid4())}", + ) + # Note: the fit() function below also evaluates the agent along the way + # and adds the new transitions to the replay buffer, so it is training + # on incrementally larger and larger buffers. + pl_trainer.fit(trainer, data_loader) + + # TODO: Also check train_reward - writer = SummaryWriter() - with summary_writer_context(writer): - train_rewards = [] - for i in range(num_train_episodes): - trajectory = run_episode( - env=env, agent=agent, mdp_id=i, max_steps=max_steps - ) - ep_reward = trajectory.calculate_cumulative_reward() - train_rewards.append(ep_reward) - logger.info(f"Finished training episode {i} with reward {ep_reward}.") - - assert train_rewards[-1] >= passing_score_bar, ( - f"reward after {len(train_rewards)} episodes is {train_rewards[-1]}," - f"less than < {passing_score_bar}...\n" - f"Full reward history: {train_rewards}" + serving_policy = manager.create_policy( + trainer, serving=True, normalization_data_map=normalization ) - logger.info("============Train rewards=============") - logger.info(train_rewards) + eval_rewards = eval_policy(env, serving_policy, num_eval_episodes, serving=True) + assert ( + eval_rewards.mean() >= passing_score_bar + ), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n" - serving_policy = manager.create_policy(serving=True) - agent = Agent.create_for_env_with_serving_policy(env, serving_policy) - eval_rewards = evaluate_for_n_episodes( - n=num_eval_episodes, env=env, agent=agent, max_steps=max_steps - ).squeeze(1) - assert np.mean(eval_rewards) >= passing_score_bar, ( - f"Predictor reward is {np.mean(eval_rewards)}," - f"less than < {passing_score_bar}...\n" - f"Full eval rewards: {eval_rewards}." +def run_test_online_episode( + env: Env__Union, + model: ModelManager__Union, + num_train_episodes: int, + passing_score_bar: float, + num_eval_episodes: int, + use_gpu: bool, +): + """ + Run an online learning test. At the end of each episode training is run on the trajectory. + """ + env = env.value + pl.seed_everything(SEED) + env.seed(SEED) + env.action_space.seed(SEED) + + normalization = build_normalizer(env) + logger.info(f"Normalization is: \n{pprint.pformat(normalization)}") + + manager = model.value + trainer = manager.build_trainer( + use_gpu=use_gpu, + normalization_data_map=normalization, ) + policy = manager.create_policy(trainer, serving=False) - logger.info("============Eval rewards==============") - logger.info(eval_rewards) + device = torch.device("cuda") if use_gpu else torch.device("cpu") + + agent = Agent.create_for_env(env, policy, device=device) + + torch.use_deterministic_algorithms(True, warn_only=True) + pl_trainer = pl.Trainer( + max_epochs=1, + gpus=int(use_gpu), + default_root_dir=f"lightning_log_{str(uuid.uuid4())}", + ) + dataset = EpisodicDataset( + env=env, agent=agent, num_episodes=num_train_episodes, seed=SEED + ) + data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate) + pl_trainer.fit(trainer, data_loader) + + eval_rewards = evaluate_for_n_episodes( + n=num_eval_episodes, + env=env, + agent=agent, + max_steps=env.max_steps, + num_processes=1, + ).squeeze(1) + assert ( + eval_rewards.mean() >= passing_score_bar + ), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n" if __name__ == "__main__": diff --git a/reagent/gym/tests/test_gym_datasets.py b/reagent/gym/tests/test_gym_datasets.py new file mode 100644 index 000000000..ba7ce2ed1 --- /dev/null +++ b/reagent/gym/tests/test_gym_datasets.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import unittest + +from reagent.gym.agents.agent import Agent +from reagent.gym.datasets.episodic_dataset import EpisodicDataset +from reagent.gym.envs import Gym +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler +from reagent.gym.utils import build_normalizer +from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected + + +logger = logging.getLogger(__name__) + + +class TestEpisodicDataset(unittest.TestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + env = Gym("CartPole-v0") + norm = build_normalizer(env) + net_builder = FullyConnected(sizes=[8], activations=["linear"]) + cartpole_scorer = net_builder.build_q_network( + state_feature_config=None, + state_normalization_data=norm["state"], + output_dim=len(norm["action"].dense_normalization_parameters), + ) + policy = Policy(scorer=cartpole_scorer, sampler=SoftmaxActionSampler()) + agent = Agent.create_for_env(env, policy) + self.max_steps = 3 + self.num_episodes = 6 + self.dataset = EpisodicDataset( + env=env, + agent=agent, + num_episodes=self.num_episodes, + seed=0, + max_steps=self.max_steps, + ) + + def test_episodic_dataset(self): + pass + num_batches = 0 + for batch in self.dataset: + num_batches += 1 + self.assertLessEqual(len(batch["reward"]), self.max_steps) + self.assertIsInstance(batch, dict) + self.assertEqual(num_batches, self.num_episodes) diff --git a/reagent/gym/tests/test_gym_offline.py b/reagent/gym/tests/test_gym_offline.py index e91ab73ec..b71388961 100644 --- a/reagent/gym/tests/test_gym_offline.py +++ b/reagent/gym/tests/test_gym_offline.py @@ -4,24 +4,23 @@ import os import pprint import unittest -from typing import Optional +import uuid -import gym import numpy as np +import pytest +import pytorch_lightning as pl import torch from parameterized import parameterized from reagent.gym.agents.agent import Agent -from reagent.gym.envs.env_factory import EnvFactory -from reagent.gym.preprocessors import make_replay_buffer_trainer_preprocessor +from reagent.gym.datasets.replay_buffer_dataset import OfflineReplayBufferDataset +from reagent.gym.envs import Gym +from reagent.gym.policies.random_policies import make_random_policy_for_env from reagent.gym.runners.gymrunner import evaluate_for_n_episodes from reagent.gym.utils import build_normalizer, fill_replay_buffer +from reagent.model_managers.union import ModelManager__Union from reagent.replay_memory.circular_replay_buffer import ReplayBuffer -from reagent.tensorboardX import summary_writer_context from reagent.test.base.horizon_test_base import HorizonTestBase -from reagent.workflow.model_managers.union import ModelManager__Union from reagent.workflow.types import RewardOptions -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm # for seeding the environment @@ -62,6 +61,7 @@ def test_gym_offline_cpu(self, name: str, config_path: str): # pyre-fixme[16]: Module `parameterized` has no attribute `expand`. @parameterized.expand(GYM_TESTS) + @pytest.mark.serial @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") def test_gym_offline_gpu(self, name: str, config_path: str): self.run_from_config( @@ -72,68 +72,78 @@ def test_gym_offline_gpu(self, name: str, config_path: str): logger.info(f"{name} passes!") -def evaluate_cem( - env: gym.Env, manager, max_steps: Optional[int], num_eval_episodes: int -): +def evaluate_cem(env, manager, trainer_module, num_eval_episodes: int): # NOTE: for CEM, serving isn't implemented - policy = manager.create_policy(serving=False) + policy = manager.create_policy(trainer_module, serving=False) agent = Agent.create_for_env(env, policy) return evaluate_for_n_episodes( - n=num_eval_episodes, env=env, agent=agent, max_steps=max_steps + n=num_eval_episodes, env=env, agent=agent, max_steps=env.max_steps ) +def identity_collate(batch): + assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}" + return batch[0] + + def run_test_offline( env_name: str, - max_steps: Optional[int], model: ModelManager__Union, replay_memory_size: int, num_batches_per_epoch: int, num_train_epochs: int, passing_score_bar: float, num_eval_episodes: int, + minibatch_size: int, use_gpu: bool, ): - env = EnvFactory.make(env_name) + env = Gym(env_name=env_name) env.seed(SEED) env.action_space.seed(SEED) normalization = build_normalizer(env) logger.info(f"Normalization is: \n{pprint.pformat(normalization)}") manager = model.value - trainer = manager.initialize_trainer( + trainer = manager.build_trainer( use_gpu=use_gpu, reward_options=RewardOptions(), normalization_data_map=normalization, ) # first fill the replay buffer to burn_in - replay_buffer = ReplayBuffer.create_from_env( - env=env, - replay_memory_size=replay_memory_size, - batch_size=trainer.minibatch_size, + replay_buffer = ReplayBuffer( + replay_capacity=replay_memory_size, batch_size=minibatch_size ) # always fill full RB + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) fill_replay_buffer( - env=env, replay_buffer=replay_buffer, desired_size=replay_memory_size + env=env, + replay_buffer=replay_buffer, + desired_size=replay_memory_size, + agent=agent, ) device = torch.device("cuda") if use_gpu else None - # pyre-fixme[6]: Expected `device` for 2nd param but got `Optional[torch.device]`. - trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env) - - writer = SummaryWriter() - with summary_writer_context(writer): - for epoch in range(num_train_epochs): - logger.info(f"Evaluating before epoch {epoch}: ") - eval_rewards = evaluate_cem(env, manager, max_steps, 1) - for _ in tqdm(range(num_batches_per_epoch)): - train_batch = replay_buffer.sample_transition_batch() - preprocessed_batch = trainer_preprocessor(train_batch) - trainer.train(preprocessed_batch) + dataset = OfflineReplayBufferDataset.create_for_trainer( + trainer, + env, + replay_buffer, + batch_size=minibatch_size, + num_batches=num_batches_per_epoch, + device=device, + ) + data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate) + pl_trainer = pl.Trainer( + max_epochs=num_train_epochs, + gpus=int(use_gpu), + deterministic=True, + default_root_dir=f"lightning_log_{str(uuid.uuid4())}", + ) + pl_trainer.fit(trainer, data_loader) logger.info(f"Evaluating after training for {num_train_epochs} epochs: ") - eval_rewards = evaluate_cem(env, manager, max_steps, num_eval_episodes) + eval_rewards = evaluate_cem(env, manager, trainer, num_eval_episodes) mean_rewards = np.mean(eval_rewards) assert ( mean_rewards >= passing_score_bar diff --git a/reagent/gym/tests/test_gym_replay_buffer.py b/reagent/gym/tests/test_gym_replay_buffer.py new file mode 100644 index 000000000..26978b089 --- /dev/null +++ b/reagent/gym/tests/test_gym_replay_buffer.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging + +import numpy.testing as npt +from reagent.core.parameters import ProblemDomain +from reagent.gym.envs import Gym +from reagent.gym.envs.wrappers.simple_minigrid import SimpleObsWrapper +from reagent.gym.utils import create_df_from_replay_buffer +from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor +from reagent.test.base.horizon_test_base import HorizonTestBase + +logger = logging.getLogger(__name__) + + +class TestEnv(SimpleObsWrapper): + """ + Wrap Gym environment in TestEnv to save the MiniGrid's + observation, action, reward and terminal in a list so that + we can check if replay buffer is working correctly + """ + + def __init__(self, env): + self.env = env + self.action_space = self.env.action_space + # mdp_id, sequence_number, state, action, reward, terminal + self.sart = [] + self.mdp_id = -1 + self.sequence_number = 0 + + def seed(self, *args, **kwargs): + return self.env.seed(*args, **kwargs) + + def reset(self, **kwargs): + self.mdp_id += 1 + self.sequence_number = 0 + res = self.env.reset(**kwargs) + self.sart.append([self.mdp_id, self.sequence_number, res, None, None, None]) + return res + + def step(self, action): + res = self.env.step(action) + ( + _, + _, + last_state, + last_action, + last_reward, + last_terminal, + ) = self.sart[-1] + assert ( + last_state is not None + and last_action is None + and last_reward is None + and last_terminal is None + ) + next_state, reward, terminal, _ = res + self.sart[-1][3] = action + self.sart[-1][4] = reward + self.sart[-1][5] = terminal + self.sequence_number += 1 + self.sart.append( + [self.mdp_id, self.sequence_number, next_state, None, None, None] + ) + return res + + +class TestGymReplayBuffer(HorizonTestBase): + def test_create_df_from_replay_buffer(self): + env_name = "MiniGrid-Empty-5x5-v0" + env = Gym(env_name=env_name) + state_dim = env.observation_space.shape[0] + # Wrap env in TestEnv + env = TestEnv(env) + problem_domain = ProblemDomain.DISCRETE_ACTION + DATASET_SIZE = 1000 + multi_steps = None + DS = "2021-09-16" + + # Generate data + df = create_df_from_replay_buffer( + env=env, + problem_domain=problem_domain, + desired_size=DATASET_SIZE, + multi_steps=multi_steps, + ds=DS, + shuffle_df=False, + ) + self.assertEqual(len(df), DATASET_SIZE) + + # Check data + preprocessor = PythonSparseToDenseProcessor(list(range(state_dim))) + for idx, row in df.iterrows(): + df_mdp_id = row["mdp_id"] + env_mdp_id = str(env.sart[idx][0]) + self.assertEqual(df_mdp_id, env_mdp_id) + + df_seq_num = row["sequence_number"] + env_seq_num = env.sart[idx][1] + self.assertEqual(df_seq_num, env_seq_num) + + df_state = preprocessor.process([row["state_features"]])[0][0].numpy() + env_state = env.sart[idx][2] + npt.assert_array_equal(df_state, env_state) + + df_action = row["action"] + env_action = str(env.sart[idx][3]) + self.assertEqual(df_action, env_action) + + df_terminal = row["next_action"] == "" + env_terminal = env.sart[idx][5] + self.assertEqual(df_terminal, env_terminal) + if not df_terminal: + df_reward = float(row["reward"]) + env_reward = float(env.sart[idx][4]) + npt.assert_allclose(df_reward, env_reward) + + df_next_state = preprocessor.process([row["next_state_features"]])[0][ + 0 + ].numpy() + env_next_state = env.sart[idx + 1][2] + npt.assert_array_equal(df_next_state, env_next_state) + + df_next_action = row["next_action"] + env_next_action = str(env.sart[idx + 1][3]) + self.assertEqual(df_next_action, env_next_action) + else: + del env.sart[idx + 1] diff --git a/reagent/gym/tests/test_linear_dynamics.py b/reagent/gym/tests/test_linear_dynamics.py index de3b7cd8f..3ea34ff33 100644 --- a/reagent/gym/tests/test_linear_dynamics.py +++ b/reagent/gym/tests/test_linear_dynamics.py @@ -7,7 +7,7 @@ import numpy as np import scipy.linalg as linalg -from reagent.gym.envs.env_factory import EnvFactory +from reagent.gym.envs import Gym logger = logging.getLogger(__name__) @@ -22,7 +22,7 @@ def test_random_vs_lqr(self): Test random actions vs. a LQR controller. LQR controller should perform much better than random actions in the linear dynamics environment. """ - env = EnvFactory.make("LinearDynamics-v0") + env = Gym(env_name="LinearDynamics-v0") num_test_episodes = 500 def random_policy(env, state): diff --git a/reagent/gym/tests/test_pomdp.py b/reagent/gym/tests/test_pomdp.py index f238befee..8dcfb5d90 100644 --- a/reagent/gym/tests/test_pomdp.py +++ b/reagent/gym/tests/test_pomdp.py @@ -6,7 +6,7 @@ import unittest import numpy as np -from reagent.gym.envs.env_factory import EnvFactory +from reagent.gym.envs import Gym logger = logging.getLogger(__name__) @@ -16,14 +16,20 @@ class TestPOMDPEnvironment(unittest.TestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) + def test_string_game_v1(self): + env = Gym(env_name="StringGame-v1") + env.seed(313) + mean_acc_reward = self._test_env(env) + assert 1.0 >= mean_acc_reward + def test_string_game(self): - env = EnvFactory.make("StringGame-v0") + env = Gym(env_name="StringGame-v0") env.seed(313) mean_acc_reward = self._test_env(env) assert 0.1 >= mean_acc_reward def test_pocman(self): - env = EnvFactory.make("Pocman-v0") + env = Gym(env_name="Pocman-v0") env.seed(313) mean_acc_reward = self._test_env(env) assert -80 <= mean_acc_reward <= -70 @@ -36,11 +42,11 @@ def _test_env(self, env): start_time = time.time() env.reset() acc_rw = 0 - for i in range(env._max_episode_steps): + for i in range(1, env.max_steps + 1): env.print_internal_state() action = env.random_action() ob, rw, done, info = env.step(action) - print( + logger.debug( "After action {}: reward {}, observation {} ({})".format( env.print_action(action), rw, ob, env.print_ob(ob) ) @@ -54,7 +60,6 @@ def _test_env(self, env): ) ) break - print("") acc_rws.append(acc_rw) mean_acc_rw = np.mean(acc_rws) diff --git a/reagent/gym/tests/test_world_model.py b/reagent/gym/tests/test_world_model.py index e73e294b6..e0b3832cd 100644 --- a/reagent/gym/tests/test_world_model.py +++ b/reagent/gym/tests/test_world_model.py @@ -7,23 +7,24 @@ import gym import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.evaluation.world_model_evaluator import ( FeatureImportanceEvaluator, FeatureSensitivityEvaluator, ) from reagent.gym.agents.agent import Agent -from reagent.gym.envs.env_factory import EnvFactory +from reagent.gym.envs import EnvWrapper, Gym from reagent.gym.envs.pomdp.state_embed_env import StateEmbedEnvironment +from reagent.gym.policies.random_policies import make_random_policy_for_env from reagent.gym.preprocessors import make_replay_buffer_trainer_preprocessor from reagent.gym.runners.gymrunner import evaluate_for_n_episodes from reagent.gym.utils import build_normalizer, fill_replay_buffer +from reagent.model_managers.union import ModelManager__Union from reagent.models.world_model import MemoryNetwork from reagent.replay_memory.circular_replay_buffer import ReplayBuffer from reagent.test.base.horizon_test_base import HorizonTestBase from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer -from reagent.workflow.model_managers.union import ModelManager__Union from reagent.workflow.types import RewardOptions from tqdm import tqdm @@ -37,14 +38,6 @@ SEED = 0 -def print_mdnrnn_losses(epoch, batch_num, losses): - logger.info( - f"Printing loss for Epoch {epoch}, Batch {batch_num};\n" - f"loss={losses['loss']}, bce={losses['bce']}," - f"gmm={losses['gmm']}, mse={losses['mse']} \n" - ) - - def calculate_feature_importance( env: gym.Env, trainer: MDNRNNTrainer, @@ -85,7 +78,7 @@ def calculate_feature_importance( def calculate_feature_sensitivity( - env: gym.Env, + env: EnvWrapper, trainer: MDNRNNTrainer, use_gpu: bool, test_batch: rlt.MemoryNetworkInput, @@ -114,7 +107,7 @@ def calculate_feature_sensitivity( def train_mdnrnn( - env: gym.Env, + env: EnvWrapper, trainer: MDNRNNTrainer, trainer_preprocessor, num_train_transitions: int, @@ -124,22 +117,27 @@ def train_mdnrnn( # for optional validation test_replay_buffer=None, ): - train_replay_buffer = ReplayBuffer.create_from_env( - env=env, - replay_memory_size=num_train_transitions, + train_replay_buffer = ReplayBuffer( + replay_capacity=num_train_transitions, batch_size=batch_size, stack_size=seq_len, return_everything_as_stack=True, ) - fill_replay_buffer(env, train_replay_buffer, num_train_transitions) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) + fill_replay_buffer(env, train_replay_buffer, num_train_transitions, agent) num_batch_per_epoch = train_replay_buffer.size // batch_size + logger.info("Made RBs, starting to train now!") - for epoch in range(num_train_epochs): + optimizer = trainer.configure_optimizers()[0] + for _ in range(num_train_epochs): for i in range(num_batch_per_epoch): batch = train_replay_buffer.sample_transition_batch(batch_size=batch_size) preprocessed_batch = trainer_preprocessor(batch) - losses = trainer.train(preprocessed_batch) - print_mdnrnn_losses(epoch, i, losses) + loss = next(trainer.train_step_gen(preprocessed_batch, i)) + optimizer.zero_grad() + loss.backward() + optimizer.step() # validation if test_replay_buffer is not None: @@ -150,7 +148,6 @@ def train_mdnrnn( ) preprocessed_test_batch = trainer_preprocessor(test_batch) valid_losses = trainer.get_loss(preprocessed_test_batch) - print_mdnrnn_losses(epoch, "validation", valid_losses) trainer.memory_network.mdnrnn.train() return trainer @@ -166,12 +163,12 @@ def train_mdnrnn_and_compute_feature_stats( use_gpu: bool, saved_mdnrnn_path: Optional[str] = None, ): - """ Train MDNRNN Memory Network and compute feature importance/sensitivity. """ - env: gym.Env = EnvFactory.make(env_name) + """Train MDNRNN Memory Network and compute feature importance/sensitivity.""" + env: gym.Env = Gym(env_name=env_name) env.seed(SEED) manager = model.value - trainer = manager.initialize_trainer( + trainer = manager.build_trainer( use_gpu=use_gpu, reward_options=RewardOptions(), normalization_data_map=build_normalizer(env), @@ -180,14 +177,15 @@ def train_mdnrnn_and_compute_feature_stats( device = "cuda" if use_gpu else "cpu" # pyre-fixme[6]: Expected `device` for 2nd param but got `str`. trainer_preprocessor = make_replay_buffer_trainer_preprocessor(trainer, device, env) - test_replay_buffer = ReplayBuffer.create_from_env( - env=env, - replay_memory_size=num_test_transitions, + test_replay_buffer = ReplayBuffer( + replay_capacity=num_test_transitions, batch_size=batch_size, stack_size=seq_len, return_everything_as_stack=True, ) - fill_replay_buffer(env, test_replay_buffer, num_test_transitions) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) + fill_replay_buffer(env, test_replay_buffer, num_test_transitions, agent) if saved_mdnrnn_path is None: # train from scratch @@ -230,7 +228,7 @@ def train_mdnrnn_and_compute_feature_stats( def create_embed_rl_dataset( - env: gym.Env, + env: EnvWrapper, memory_network: MemoryNetwork, num_state_embed_transitions: int, batch_size: int, @@ -247,19 +245,22 @@ def create_embed_rl_dataset( # the embedded state will be concatenated with the last step # Ie.. (o1,o2,...,on) -> RNN -> h1,h2,...,hn # and we set s_{n+1} = [o_{n+1}, h_n] + # pyre-fixme[45]: Cannot instantiate abstract class `StateEmbedEnvironment`. embed_env = StateEmbedEnvironment( gym_env=env, mdnrnn=memory_network, max_embed_seq_len=seq_len + 1 ) # now create a filled replay buffer of embeddings # new obs shape dim = state_dim + hidden_dim - embed_rb = ReplayBuffer.create_from_env( - env=embed_env, - replay_memory_size=num_state_embed_transitions, - batch_size=batch_size, - stack_size=1, + embed_rb = ReplayBuffer( + replay_capacity=num_state_embed_transitions, batch_size=batch_size, stack_size=1 ) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) fill_replay_buffer( - env=embed_env, replay_buffer=embed_rb, desired_size=num_state_embed_transitions + env=embed_env, + replay_buffer=embed_rb, + desired_size=num_state_embed_transitions, + agent=agent, ) batch = embed_rb.sample_transition_batch(batch_size=num_state_embed_transitions) state_min = min(batch.state.min(), batch.next_state.min()).item() @@ -287,12 +288,12 @@ def train_mdnrnn_and_train_on_embedded_env( # pyre-fixme[9]: saved_mdnrnn_path has type `str`; used as `None`. saved_mdnrnn_path: str = None, ): - """ Train an agent on embedded states by the MDNRNN. """ - env = EnvFactory.make(env_name) + """Train an agent on embedded states by the MDNRNN.""" + env = Gym(env_name=env_name) env.seed(SEED) embedding_manager = embedding_model.value - embedding_trainer = embedding_manager.initialize_trainer( + embedding_trainer = embedding_manager.build_trainer( use_gpu=use_gpu, reward_options=RewardOptions(), normalization_data_map=build_normalizer(env), @@ -340,9 +341,11 @@ def train_mdnrnn_and_train_on_embedded_env( state_max_value=state_max, ) agent_manager = train_model.value - agent_trainer = agent_manager.initialize_trainer( + agent_trainer = agent_manager.build_trainer( use_gpu=use_gpu, reward_options=RewardOptions(), + # pyre-fixme[6]: Expected `EnvWrapper` for 1st param but got + # `StateEmbedEnvironment`. normalization_data_map=build_normalizer(embed_env), ) device = "cuda" if use_gpu else "cpu" @@ -353,19 +356,28 @@ def train_mdnrnn_and_train_on_embedded_env( env, ) num_batch_per_epoch = embed_rb.size // batch_size + # FIXME: This has to be wrapped in dataloader for epoch in range(num_agent_train_epochs): for _ in tqdm(range(num_batch_per_epoch), desc=f"epoch {epoch}"): batch = embed_rb.sample_transition_batch(batch_size=batch_size) preprocessed_batch = agent_trainer_preprocessor(batch) + # FIXME: This should be fitted with Lightning's trainer agent_trainer.train(preprocessed_batch) # evaluate model rewards = [] - policy = agent_manager.create_policy(serving=False) + policy = agent_manager.create_policy(agent_trainer, serving=False) + # pyre-fixme[6]: Expected `EnvWrapper` for 1st param but got + # `StateEmbedEnvironment`. agent = Agent.create_for_env(embed_env, policy=policy, device=device) # num_processes=1 needed to avoid workers from dying on CircleCI tests rewards = evaluate_for_n_episodes( - n=num_agent_eval_epochs, env=embed_env, agent=agent, num_processes=1 + n=num_agent_eval_epochs, + # pyre-fixme[6]: Expected `EnvWrapper` for 2nd param but got + # `StateEmbedEnvironment`. + env=embed_env, + agent=agent, + num_processes=1, ) assert ( np.mean(rewards) >= passing_score_bar @@ -382,19 +394,20 @@ def verify_result(result_dict: Dict[str, float], expected_top_features: List[str ), f"top_feature: {top_feature}, expected_top_features: {expected_top_features}" def test_mdnrnn(self): - """ Test MDNRNN feature importance and feature sensitivity. """ + """Test MDNRNN feature importance and feature sensitivity.""" config_path = "configs/world_model/cartpole_features.yaml" feature_importance, feature_sensitivity = self.run_from_config( run_test=train_mdnrnn_and_compute_feature_stats, config_path=os.path.join(curr_dir, config_path), use_gpu=False, ) - TestWorldModel.verify_result(feature_importance, ["state3"]) + TestWorldModel.verify_result(feature_importance, ["state1", "state3"]) TestWorldModel.verify_result(feature_sensitivity, ["state3"]) logger.info("MDNRNN feature test passes!") + @unittest.skip("This test has to be migrated to Lightning") def test_world_model(self): - """ Train DQN on POMDP given features from world model. """ + """Train DQN on POMDP given features from world model.""" config_path = "configs/world_model/discrete_dqn_string.yaml" HorizonTestBase.run_from_config( run_test=train_mdnrnn_and_train_on_embedded_env, diff --git a/reagent/gym/types.py b/reagent/gym/types.py index 86bc2e485..22a134308 100644 --- a/reagent/gym/types.py +++ b/reagent/gym/types.py @@ -6,10 +6,12 @@ from abc import ABC, abstractmethod from dataclasses import asdict, dataclass, field, fields -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Union -import reagent.types as rlt +import numpy as np +import reagent.core.types as rlt import torch +import torch.nn.functional as F @dataclass @@ -21,8 +23,8 @@ class Transition(rlt.BaseDataClass): reward: float terminal: bool log_prob: Optional[float] = None - possible_actions: Optional[List[int]] = None - possible_actions_mask: Optional[List[int]] = None + possible_actions_mask: Optional[np.ndarray] = None + info: Optional[Dict] = None # Same as asdict but filters out none values. def asdict(self): @@ -30,7 +32,7 @@ def asdict(self): def get_optional_fields(cls) -> List[str]: - """ return list of optional annotated fields """ + """return list of optional annotated fields""" ret: List[str] = [] for f in fields(cls): # Check if exactly two arguments exists and one of them are None type @@ -43,15 +45,15 @@ def get_optional_fields(cls) -> List[str]: class Trajectory(rlt.BaseDataClass): transitions: List[Transition] = field(default_factory=list) - def __post_init__(self): + def __post_init__(self) -> None: self.optional_field_exist: Dict[str, bool] = { f: False for f in get_optional_fields(Transition) } - def __len__(self): + def __len__(self) -> int: return len(self.transitions) - def add_transition(self, transition: Transition): + def add_transition(self, transition: Transition) -> None: if len(self) == 0: # remember which optional fields should be filled for f in self.optional_field_exist: @@ -76,13 +78,32 @@ def __getattr__(self, attr: str): return ret def calculate_cumulative_reward(self, gamma: float = 1.0): - """ Return (discounted) sum of rewards. """ + """Return (discounted) sum of rewards.""" num_transitions = len(self) assert num_transitions > 0, "called on empty trajectory" rewards = self.reward - discounts = [gamma ** i for i in range(num_transitions)] + discounts = [gamma**i for i in range(num_transitions)] return sum(reward * discount for reward, discount in zip(rewards, discounts)) + def to_dict(self): + d = {"action": F.one_hot(torch.from_numpy(np.stack(self.action)), 2)} + for f in [ + "observation", + "reward", + "terminal", + "log_prob", + "possible_actions_mask", + ]: + if self.optional_field_exist.get(f, True): + f_value = getattr(self, f) + if np.isscalar(f_value[0]): + # scalar values + d[f] = torch.tensor(f_value) + else: + # vector values, need to stack + d[f] = torch.from_numpy(np.stack(f_value)).float() + return d + class Sampler(ABC): """Given scores, select the action.""" @@ -96,15 +117,17 @@ def log_prob(self, scores: Any, action: torch.Tensor) -> torch.Tensor: raise NotImplementedError() def update(self) -> None: - """ Call to update internal parameters (e.g. decay epsilon) """ + """Call to update internal parameters (e.g. decay epsilon)""" pass # From preprocessed observation, produce scores for sampler to select action -Scorer = Callable[[Any], Any] +DiscreteScorer = Callable[[Any, Optional[torch.Tensor]], Any] +ContinuousScorer = Callable[[Any], Any] +Scorer = Union[DiscreteScorer, ContinuousScorer] # Transform ReplayBuffer's transition batch to trainer.train -TrainerPreprocessor = Callable[[Any], rlt.PreprocessedTrainingBatch] +TrainerPreprocessor = Callable[[Any], Any] """ Called after env.step(action) @@ -112,6 +135,10 @@ def update(self) -> None: """ PostStep = Callable[[Transition], None] +""" Called after end of episode +""" +PostEpisode = Callable[[Trajectory, Dict], None] + @dataclass class GaussianSamplerScore(rlt.BaseDataClass): diff --git a/reagent/gym/utils.py b/reagent/gym/utils.py index 51407cdc3..8c8c47372 100644 --- a/reagent/gym/utils.py +++ b/reagent/gym/utils.py @@ -2,42 +2,46 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Dict, Optional +import random +from typing import Dict, List, Optional -from gym import Env, spaces +import gym +import numpy as np +import pandas as pd +import torch # @manual +import torch.nn.functional as F +from gym import spaces +from reagent.core.parameters import NormalizationData, NormalizationKey, ProblemDomain from reagent.gym.agents.agent import Agent from reagent.gym.agents.post_step import add_replay_buffer_post_step -from reagent.gym.policies.random_policies import make_random_policy_for_env -from reagent.gym.runners.gymrunner import run_episode -from reagent.parameters import NormalizationData, NormalizationKey -from reagent.replay_memory.circular_replay_buffer import ReplayBuffer -from reagent.test.base.utils import ( +from reagent.gym.envs import EnvWrapper +from reagent.gym.normalizers import ( + discrete_action_normalizer, only_continuous_action_normalizer, only_continuous_normalizer, ) +from reagent.gym.policies.random_policies import make_random_policy_for_env +from reagent.gym.runners.gymrunner import run_episode +from reagent.replay_memory import ReplayBuffer from tqdm import tqdm logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) +SEED = 0 -def get_max_steps(env: Env) -> Optional[int]: - possible_keys = [ - # gym should have _max_episode_steps - "_max_episode_steps", - # Minigrid should have max_steps - "max_steps", - ] - for key in possible_keys: - res = getattr(env, key, None) - if res is not None: - return res - return None +try: + from reagent.gym.envs import RecSim # noqa + HAS_RECSIM = True +except ImportError: + HAS_RECSIM = False -def fill_replay_buffer(env: Env, replay_buffer: ReplayBuffer, desired_size: int): - """ Fill replay buffer with random transitions until size reaches desired_size. """ + +def fill_replay_buffer( + env, replay_buffer: ReplayBuffer, desired_size: int, agent: Agent +): + """Fill replay buffer with transitions until size reaches desired_size.""" assert ( 0 < desired_size and desired_size <= replay_buffer._replay_capacity ), f"It's not true that 0 < {desired_size} <= {replay_buffer._replay_capacity}." @@ -45,13 +49,13 @@ def fill_replay_buffer(env: Env, replay_buffer: ReplayBuffer, desired_size: int) f"Replay buffer already has {replay_buffer.size} elements. " f"(more than desired_size = {desired_size})" ) - logger.info(f"Starting to fill replay buffer to size: {desired_size}.") - random_policy = make_random_policy_for_env(env) - post_step = add_replay_buffer_post_step(replay_buffer, env=env) - agent = Agent.create_for_env( - env, policy=random_policy, post_transition_callback=post_step + logger.info( + f" Starting to fill replay buffer using policy to size: {desired_size}." ) - max_episode_steps = get_max_steps(env) + post_step = add_replay_buffer_post_step(replay_buffer, env=env) + agent.post_transition_callback = post_step + + max_episode_steps = env.max_steps with tqdm( total=desired_size - replay_buffer.size, desc=f"Filling replay buffer from {replay_buffer.size} to size {desired_size}", @@ -59,17 +63,20 @@ def fill_replay_buffer(env: Env, replay_buffer: ReplayBuffer, desired_size: int) mdp_id = 0 while replay_buffer.size < desired_size: last_size = replay_buffer.size - max_steps = desired_size - replay_buffer.size - 1 + max_steps = desired_size - replay_buffer.size if max_episode_steps is not None: max_steps = min(max_episode_steps, max_steps) run_episode(env=env, agent=agent, mdp_id=mdp_id, max_steps=max_steps) size_delta = replay_buffer.size - last_size - assert ( - size_delta >= 0 - ), f"size delta is {size_delta} which should be non-negative." + # The assertion below is commented out because it can't + # support input samples which has seq_len>1. This should be + # treated as a bug, and need to be fixed in the future. + # assert ( + # size_delta >= 0 + # ), f"size delta is {size_delta} which should be non-negative." pbar.update(n=size_delta) mdp_id += 1 - if size_delta == 0: + if size_delta <= 0: # replay buffer size isn't increasing... so stop early break @@ -81,7 +88,7 @@ def fill_replay_buffer(env: Env, replay_buffer: ReplayBuffer, desired_size: int) ) -def build_state_normalizer(env): +def build_state_normalizer(env: EnvWrapper): if isinstance(env.observation_space, spaces.Box): assert ( len(env.observation_space.shape) == 1 @@ -98,12 +105,10 @@ def build_state_normalizer(env): raise NotImplementedError(f"{env.observation_space} not supported") -def build_action_normalizer(env): +def build_action_normalizer(env: EnvWrapper): action_space = env.action_space if isinstance(action_space, spaces.Discrete): - return only_continuous_normalizer( - list(range(action_space.n)), min_value=0, max_value=1 - ) + return discrete_action_normalizer(list(range(action_space.n))) elif isinstance(action_space, spaces.Box): assert ( len(action_space.shape) == 1 @@ -119,12 +124,293 @@ def build_action_normalizer(env): raise NotImplementedError(f"{action_space} not supported.") -def build_normalizer(env) -> Dict[str, NormalizationData]: - return { - NormalizationKey.STATE: NormalizationData( - dense_normalization_parameters=build_state_normalizer(env) - ), - NormalizationKey.ACTION: NormalizationData( - dense_normalization_parameters=build_action_normalizer(env) - ), +def build_normalizer(env: EnvWrapper) -> Dict[str, NormalizationData]: + try: + return env.normalization_data + except AttributeError: + # TODO: make this a property of EnvWrapper? + # pyre-fixme[16]: Module `envs` has no attribute `RecSim`. + if HAS_RECSIM and isinstance(env, RecSim): + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=only_continuous_normalizer( + list(range(env.observation_space["user"].shape[0])) + ) + ), + NormalizationKey.ITEM: NormalizationData( + dense_normalization_parameters=only_continuous_normalizer( + list(range(env.observation_space["doc"]["0"].shape[0])) + ) + ), + } + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=build_state_normalizer(env) + ), + NormalizationKey.ACTION: NormalizationData( + dense_normalization_parameters=build_action_normalizer(env) + ), + } + + +def create_df_from_replay_buffer( + env, + problem_domain: ProblemDomain, + desired_size: int, + multi_steps: Optional[int], + ds: str, + shuffle_df: bool = True, +) -> pd.DataFrame: + # fill the replay buffer + set_seed(env, SEED) + if multi_steps is None: + update_horizon = 1 + return_as_timeline_format = False + else: + update_horizon = multi_steps + return_as_timeline_format = True + is_multi_steps = multi_steps is not None + + # The last element of replay buffer always lacks + # next_action and next_possible_actions. + # To get full data for every returned sample, we create + # replay buffer of desired_size + 1 and discard the last element. + replay_buffer = ReplayBuffer( + replay_capacity=desired_size + 1, + batch_size=1, + update_horizon=update_horizon, + return_as_timeline_format=return_as_timeline_format, + ) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) + fill_replay_buffer(env, replay_buffer, desired_size + 1, agent) + + batch = replay_buffer.sample_transition_batch( + batch_size=desired_size, indices=torch.arange(desired_size) + ) + n = batch.state.shape[0] + logger.info(f"Creating df of size {n}.") + + def discrete_feat_transform(elem) -> str: + """query data expects str format""" + return str(elem.item()) + + def continuous_feat_transform(elem: List[float]) -> Dict[int, float]: + """query data expects sparse format""" + assert isinstance(elem, torch.Tensor), f"{type(elem)} isn't tensor" + assert len(elem.shape) == 1, f"{elem.shape} isn't 1-dimensional" + return {i: s.item() for i, s in enumerate(elem)} + + def make_parametric_feat_transform(one_hot_dim: int): + """one-hot and then continuous_feat_transform""" + + def transform(elem) -> Dict[int, float]: + elem_tensor = torch.tensor(elem.item()) + one_hot_feat = F.one_hot(elem_tensor, one_hot_dim).float() + return continuous_feat_transform(one_hot_feat) + + return transform + + state_features = feature_transform(batch.state, continuous_feat_transform) + next_state_features = feature_transform( + batch.next_state, + continuous_feat_transform, + is_next_with_multi_steps=is_multi_steps, + ) + + if problem_domain == ProblemDomain.DISCRETE_ACTION: + # discrete action is str + action = feature_transform(batch.action, discrete_feat_transform) + next_action = feature_transform( + batch.next_action, + discrete_feat_transform, + is_next_with_multi_steps=is_multi_steps, + replace_when_terminal="", + terminal=batch.terminal, + ) + elif problem_domain == ProblemDomain.PARAMETRIC_ACTION: + # continuous action is Dict[int, double] + assert isinstance(env.action_space, gym.spaces.Discrete) + parametric_feat_transform = make_parametric_feat_transform(env.action_space.n) + action = feature_transform(batch.action, parametric_feat_transform) + next_action = feature_transform( + batch.next_action, + parametric_feat_transform, + is_next_with_multi_steps=is_multi_steps, + replace_when_terminal={}, + terminal=batch.terminal, + ) + elif problem_domain == ProblemDomain.CONTINUOUS_ACTION: + action = feature_transform(batch.action, continuous_feat_transform) + next_action = feature_transform( + batch.next_action, + continuous_feat_transform, + is_next_with_multi_steps=is_multi_steps, + replace_when_terminal={}, + terminal=batch.terminal, + ) + elif problem_domain == ProblemDomain.MDN_RNN: + action = feature_transform(batch.action, discrete_feat_transform) + assert multi_steps is not None + next_action = feature_transform( + batch.next_action, + discrete_feat_transform, + is_next_with_multi_steps=True, + replace_when_terminal="", + terminal=batch.terminal, + ) + else: + raise NotImplementedError(f"model type: {problem_domain}.") + + if multi_steps is None: + time_diff = [1] * n + reward = batch.reward.squeeze(1).tolist() + metrics = [{"reward": r} for r in reward] + else: + time_diff = [[1] * len(ns) for ns in next_state_features] + reward = [reward_list.tolist() for reward_list in batch.reward] + metrics = [ + [{"reward": r.item()} for r in reward_list] for reward_list in batch.reward + ] + + # TODO(T67265031): change this to int + mdp_id = [str(i.item()) for i in batch.mdp_id] + sequence_number = batch.sequence_number.squeeze(1).tolist() + # in the product data, all sequence_number_ordinal start from 1. + # So to be consistent with the product data. + + sequence_number_ordinal = (batch.sequence_number.squeeze(1) + 1).tolist() + action_probability = batch.log_prob.exp().squeeze(1).tolist() + df_dict = { + "state_features": state_features, + "next_state_features": next_state_features, + "action": action, + "next_action": next_action, + "reward": reward, + "action_probability": action_probability, + "metrics": metrics, + "time_diff": time_diff, + "mdp_id": mdp_id, + "sequence_number": sequence_number, + "sequence_number_ordinal": sequence_number_ordinal, + "ds": [ds] * n, } + + if problem_domain == ProblemDomain.PARAMETRIC_ACTION: + # Possible actions are List[Dict[int, float]] + assert isinstance(env.action_space, gym.spaces.Discrete) + possible_actions = [{i: 1.0} for i in range(env.action_space.n)] + + elif problem_domain == ProblemDomain.DISCRETE_ACTION: + # Possible actions are List[str] + assert isinstance(env.action_space, gym.spaces.Discrete) + possible_actions = [str(i) for i in range(env.action_space.n)] + + elif problem_domain == ProblemDomain.MDN_RNN: + # Possible actions are List[str] + assert isinstance(env.action_space, gym.spaces.Discrete) + possible_actions = [str(i) for i in range(env.action_space.n)] + + # these are fillers, which should have correct shape + pa_features = range(n) + pna_features = time_diff + if problem_domain in ( + ProblemDomain.DISCRETE_ACTION, + ProblemDomain.PARAMETRIC_ACTION, + ProblemDomain.MDN_RNN, + ): + + def pa_transform(x): + return possible_actions + + df_dict["possible_actions"] = feature_transform(pa_features, pa_transform) + df_dict["possible_next_actions"] = feature_transform( + pna_features, + pa_transform, + is_next_with_multi_steps=is_multi_steps, + replace_when_terminal=[], + terminal=batch.terminal, + ) + + df = pd.DataFrame(df_dict) + # validate df + validate_mdp_ids_seq_nums(df) + if shuffle_df: + # shuffling (sample the whole batch) + df = df.reindex(np.random.permutation(df.index)) + return df + + +def set_seed(env: gym.Env, seed: int): + np.random.seed(seed) + random.seed(seed) + torch.manual_seed(seed) + env.seed(seed) + env.action_space.seed(seed) + + +def feature_transform( + features, + single_elem_transform, + is_next_with_multi_steps=False, + replace_when_terminal=None, + terminal=None, +): + """feature_transform is a method on a single row. + We assume features is List[features] (batch of features). + This can also be called for next_features with multi_steps which we assume + to be List[List[features]]. First List is denoting that it's a batch, + second List is denoting that a single row consists of a list of features. + """ + if is_next_with_multi_steps: + if terminal is None: + return [ + [single_elem_transform(feat) for feat in multi_steps_features] + for multi_steps_features in features + ] + else: + # for next features where we replace them when terminal + assert replace_when_terminal is not None + return [ + [single_elem_transform(feat) for feat in multi_steps_features] + if not terminal[idx] + else [single_elem_transform(feat) for feat in multi_steps_features[:-1]] + + [replace_when_terminal] + for idx, multi_steps_features in enumerate(features) + ] + else: + if terminal is None: + return [single_elem_transform(feat) for feat in features] + else: + assert replace_when_terminal is not None + return [ + single_elem_transform(feat) + if not terminal[idx] + else replace_when_terminal + for idx, feat in enumerate(features) + ] + + +def validate_mdp_ids_seq_nums(df): + mdp_ids = list(df["mdp_id"]) + sequence_numbers = list(df["sequence_number"]) + unique_mdp_ids = set(mdp_ids) + prev_mdp_id, prev_seq_num = None, None + mdp_count = 0 + for mdp_id, seq_num in zip(mdp_ids, sequence_numbers): + if prev_mdp_id is None or mdp_id != prev_mdp_id: + mdp_count += 1 + prev_mdp_id = mdp_id + else: + assert seq_num == prev_seq_num + 1, ( + f"For mdp_id {mdp_id}, got {seq_num} <= {prev_seq_num}." + f"Sequence number must be in increasing order.\n" + f"Zip(mdp_id, seq_num): " + f"{list(zip(mdp_ids, sequence_numbers))}" + ) + prev_seq_num = seq_num + + assert len(unique_mdp_ids) == mdp_count, "MDPs are broken up. {} vs {}".format( + len(unique_mdp_ids), mdp_count + ) + return diff --git a/reagent/json_serialize.py b/reagent/json_serialize.py deleted file mode 100644 index 7169308e6..000000000 --- a/reagent/json_serialize.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import collections -import json -import logging -from dataclasses import asdict, dataclass, fields, is_dataclass -from typing import Any, NamedTuple, Type, Union - - -logger = logging.getLogger(__name__) - - -def object_to_json(o: Any) -> str: - assert is_dataclass(o), "Only dataclasses can be serialized" - return json.dumps(prepare_for_json(o)) - - -def prepare_for_json(o: Any) -> Any: - if isinstance(o, NamedTuple): - d = {} - for field_name in o._fields: - d[field_name] = prepare_for_json(getattr(o, field_name)) - return d - elif is_dataclass(o): - return asdict(o) - else: - return o - - -def json_to_object(j: str, to_type: Type) -> Any: - assert is_dataclass(to_type), "Only dataclasses can be deserialized" - j_obj = json.loads(j) - return from_json(j_obj, to_type) - - -def from_json(j_obj: Any, to_type: Type) -> Any: - if j_obj is None: - return None - logger.debug("TYPE: ") - logger.debug(j_obj) - logger.debug(to_type) - if getattr(to_type, "_field_types", None) is not None: - # Type is a NamedTuple, dive in - field_data = {} - for field_name in j_obj.keys(): - assert ( - field_name in to_type._fields - ), "Item in dict missing from {}: {}".format(str(to_type), field_name) - field_value = j_obj[field_name] - object_type = to_type._field_types[field_name] - if getattr(object_type, "__origin__", None) is Union: - assert len(object_type.__args__) == 2 and object_type.__args__[ - 1 - ] == type( - None - ), "Only Unions of [X, None] (a.k.a. Optional[X]) are supported" - object_type = object_type.__args__[0] - field_data[field_name] = from_json(field_value, object_type) - return to_type(**field_data) # Create the NamedTuple - elif is_dataclass(to_type): - # Type is a dataclass, dive in - field_types = {} - for field in fields(to_type): - field_types[field.name] = field.type - field_data = {} - for field_name in j_obj.keys(): - assert field_name in field_types, "Item in dict missing from {}: {}".format( - str(to_type), field_name - ) - field_value = j_obj[field_name] - object_type = field_types[field_name] - if getattr(object_type, "__origin__", None) is Union: - assert len(object_type.__args__) == 2 and object_type.__args__[ - 1 - ] == type( - None - ), "Only Unions of [X, None] (a.k.a. Optional[X]) are supported" - object_type = object_type.__args__[0] - field_data[field_name] = from_json(field_value, object_type) - return to_type(**field_data) # Create the NamedTuple - elif getattr(to_type, "_name", None) is not None and to_type._name == "List": - assert isinstance( - j_obj, list - ), "Tried to set the wrong type to a list: {}".format(j_obj) - list_inner_type = to_type.__args__[0] - retval_list = [] - for i in j_obj: - retval_list.append(from_json(i, list_inner_type)) - return retval_list - elif getattr(to_type, "_name", None) is not None and to_type._name == "Dict": - assert isinstance( - j_obj, dict - ), "Tried to set the wrong type to a dict: {}".format(j_obj) - dict_inner_key_type = to_type.__args__[0] - dict_inner_value_type = to_type.__args__[1] - retval_dict = {} - for k, v in j_obj.items(): - retval_dict[from_json(k, dict_inner_key_type)] = from_json( - v, dict_inner_value_type - ) - return retval_dict - else: - return j_obj diff --git a/reagent/lite/__init__.py b/reagent/lite/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/lite/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/lite/optimizer.py b/reagent/lite/optimizer.py new file mode 100644 index 000000000..b166db1bc --- /dev/null +++ b/reagent/lite/optimizer.py @@ -0,0 +1,1643 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import heapq +import logging +from collections import defaultdict, deque +from math import floor +from typing import Any, Callable, Dict, List, Optional, Tuple + +import nevergrad as ng +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from nevergrad.parametrization.choice import Choice +from torch.distributions import Normal + +logger = logging.getLogger(__name__) + +ANNEAL_RATE = 0.9997 +LEARNING_RATE = 0.001 +BATCH_SIZE = 512 +# People rarely need more than that +MAX_NUM_BEST_SOLUTIONS = 1000 +GREEDY_TEMP = 0.0001 + + +def sample_from_logits( + keyed_logits: Dict[str, nn.Parameter], batch_size: int, temp: float +) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]: + """Return sampled solutions and sampled log probabilities""" + sampled_log_probs = torch.zeros(batch_size, 1) + sampled_solutions = {} + for k, logits in keyed_logits.items(): + softmax_val = F.softmax(logits / temp, dim=-1).squeeze(0) + samples = torch.multinomial(softmax_val, batch_size, replacement=True) + sampled_prob = softmax_val[samples].reshape(-1, 1) + sampled_log_probs += torch.log(sampled_prob) + sampled_solutions[k] = samples + return sampled_solutions, sampled_log_probs + + +def obj_func_scaler( + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]], + exp_offset_and_scale: Optional[Tuple[float, float]], +) -> Optional[Callable]: + """ + Scale objective functions to make optimizers get out of local minima more easily. + + The scaling formula is: exp((reward - offset) / scale) + + if obj_exp_offset_scale is None, do not scale the obj_function (i.e., reward == scaled_reward) + """ + if obj_func is None: + return None + + if exp_offset_and_scale is not None: + offset, scale = exp_offset_and_scale + + def obj_func_scaled(*args, **kwargs): + x = obj_func(*args, **kwargs) + if exp_offset_and_scale is not None: + return x, torch.exp((x - offset) / scale) + else: + return x, x + + return obj_func_scaled + + +def _num_of_params(model: nn.Module) -> int: + return len(torch.cat([p.flatten() for p in model.parameters()])) + + +def sol_to_tensors( + sampled_sol: Dict[str, torch.Tensor], input_param: ng.p.Dict +) -> torch.Tensor: + one_hot = [ + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + F.one_hot(sampled_sol[k], num_classes=len(input_param[k].choices)).type( + torch.FloatTensor + ) + for k in sorted(sampled_sol.keys()) + ] + batch_tensors = torch.cat(one_hot, dim=-1) + return batch_tensors + + +class BestResultsQueue: + """Maintain the `max_len` lowest numbers""" + + def __init__(self, max_len: int) -> None: + self.max_len = max_len + self.reward_sol_dict = defaultdict(set) + self.heap = [] + + def insert(self, reward: torch.Tensor, sol: Dict[str, torch.Tensor]) -> None: + # Negate the reward because maximal N elements will be kept + # in heap, while all optimizers are a minimizer. + reward = -reward + sol_str = str(sol) + # skip duplicated solution + if reward in self.reward_sol_dict and sol_str in self.reward_sol_dict[reward]: + return + self.reward_sol_dict[reward].add(sol_str) + if len(self.heap) < self.max_len: + heapq.heappush(self.heap, (reward, sol_str, sol)) + else: + old_r, old_sol_str, old_sol = heapq.heappushpop( + self.heap, (reward, sol_str, sol) + ) + self.reward_sol_dict[old_r].remove(old_sol_str) + + def topk(self, k: int) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: + k = min(k, len(self.heap)) + res = heapq.nlargest(k, self.heap) + # a list of (reward, sol) tuples + return [(-r[0], r[2]) for r in res] + + +class ComboOptimizerBase: + """ + The class contains a series of API to be shared between various combonatorial optimization + optimizers. + + Basic usage: + 1. Create a parameter space and obj function to be minimized + 2. Create optimizer = SomeComboOptimizer(param, obj_func, ...) + 3. Call optimizer.optimize_step() until the budget exhausts + + optimize_step() encapsulates two main steps: + a. sample_internal(), which samples promising solutions to query during training. + b. update_params(), which updates the optimizer's parameters using the rewards obtained + on the sampled solutions from sample_internal() + + The user is free to manually calling sample_internal() and update_params() separately + instead of calling optimize_step(). While calling optimize_step() is more succinct in + code, calling sample_internal() and update_params() separately allows more flexibility + (e.g., the user may perform any additional customized logic between the two functions). + + Once the training is done (i.e., the user no longer has the budget to call optimize_step()), + the user can use optimizer.sample() to sample solutions based on the learned optimizer. + The user can also use optimizer.best_solutions() to return the top best solutions discovered + during the training. + + Each optimizer has its own doc string test for further reference. + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + ) -> None: + for k in param: + assert isinstance( + param[k], Choice + ), "Only support discrete parameterization now" + self.param = param + self.obj_func = obj_func_scaler(obj_func, obj_exp_offset_scale) + self.batch_size = batch_size + self.obj_exp_scale = obj_exp_offset_scale + self.last_sample_internal_res = None + self.best_sols = BestResultsQueue(MAX_NUM_BEST_SOLUTIONS) + self._init() + + def _init(self) -> None: + pass + + def optimize_step(self) -> Tuple: + assert self.obj_func is not None, ( + "obj_func not provided. Can't call optimize_step() for optimization. " + "You have to perform manual optimization, i.e., call sample_internal() then update_params()" + ) + + all_results = self._optimize_step() + sampled_solutions, sampled_reward = all_results[0], all_results[1] + self._maintain_best_solutions(sampled_solutions, sampled_reward) + return all_results + + def _maintain_best_solutions( + self, sampled_sols: Dict[str, torch.Tensor], sampled_reward: torch.Tensor + ) -> None: + for idx in range(len(sampled_reward)): + r = sampled_reward[idx].item() + sol = {k: sampled_sols[k][idx] for k in sampled_sols} + # pyre-fixme[6]: For 1st param expected `Tensor` but got `Union[bool, + # float, int]`. + self.best_sols.insert(r, sol) + + def best_solutions( + self, k: int = 1 + ) -> List[Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: + """ + k solutions with the smallest rewards + Return is a list of tuples (reward, solution) + """ + return self.best_sols.topk(k) + + @abc.abstractmethod + def _optimize_step(self) -> Tuple: + """ + The main component of ComboOptimizer.optimize_step(). The user only + needs to loop over optimizer_step() until the budget runs out. + + _optimize_step() will call sample_internal() and update_params() + to perform sampling and parameter updating + """ + raise NotImplementedError() + + @abc.abstractmethod + def sample_internal( + self, + batch_size: Optional[int] = None, + ) -> Tuple: + """ + Record and return sampled solutions and any other important + information during learning / training. The return type is a tuple, + whose first element is always the sampled solutions (Dict[str, torch.Tensor]). + + It samples self.batch_size number of solutions (i.e., the batch size used during + training), unless batch_size is provided. + """ + raise NotImplementedError() + + @abc.abstractmethod + def update_params( + self, + reward: torch.Tensor, + ) -> None: + """ + Update model parameters by reward. Reward is objective function + values evaluated on the solutions sampled by sample_internal() + """ + raise NotImplementedError() + + def sample( + self, batch_size: int, temp: Optional[float] = None + ) -> Dict[str, torch.Tensor]: + """ + Return sampled solutions, keyed by parameter names. + For discrete parameters, the values are choice indices; + For continuous parameters, the values are sampled float vectors. + + This function is usually called after learning is done. + """ + raise NotImplementedError() + + def indices_to_raw_choices( + self, sampled_sol: Dict[str, torch.Tensor] + ) -> List[Dict[str, str]]: + if not sampled_sol: + # empty sampled_sol + return [{} for _ in range(self.batch_size)] + + batch_size = list(sampled_sol.values())[0].shape[0] + sampled_sol_i_vals = [] + for i in range(batch_size): + sampled_sol_i = {k: sampled_sol[k][i] for k in sampled_sol} + sampled_sol_i_val = { + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + k: self.param[k].choices.value[v] + for k, v in sampled_sol_i.items() + } + sampled_sol_i_vals.append(sampled_sol_i_val) + return sampled_sol_i_vals + + +class RandomSearchOptimizer(ComboOptimizerBase): + """ + Find the best solution to minimize a black-box function by random search + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + sampling_weights (Optional[Dict[str, np.ndarray]]): + Instead of uniform sampling, we sample solutions with preferred + weights. Key: choice name, value: sampling weights + + Example: + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 4 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... reward = torch.ones(BATCH_SIZE, 1) + ... for i in range(BATCH_SIZE): + ... # the best action is "red" + ... if sampled_sol['choice1'][i] == 2: + ... reward[i, 0] = 0.0 + ... return reward + ... + >>> optimizer = RandomSearchOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE) + >>> for i in range(10): + ... res = optimizer.optimize_step() + ... + >>> best_reward, best_choice = optimizer.best_solutions(k=1)[0] + >>> assert best_reward == 0 + >>> assert best_choice['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + batch_size: int = BATCH_SIZE, + sampling_weights: Optional[Dict[str, np.ndarray]] = None, + ) -> None: + self.sampling_weights = sampling_weights + super().__init__( + param, + obj_func, + batch_size=batch_size, + ) + + def sample( + self, batch_size: int, temp: Optional[float] = None + ) -> Dict[str, torch.Tensor]: + assert temp is None, "temp is not used in Random Search" + sampled_sol = {} + for k, param in self.param.items(): + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + num_choices = len(param.choices) + if self.sampling_weights is None: + sampled_sol[k] = torch.randint(num_choices, (batch_size,)) + else: + weight = self.sampling_weights[k] + sampled_sol[k] = torch.tensor( + np.random.choice(num_choices, batch_size, replace=True, p=weight) + ) + return sampled_sol + + def sample_internal( + self, batch_size: Optional[int] = None + ) -> Tuple[Dict[str, torch.Tensor]]: + batch_size = batch_size or self.batch_size + sampled_sol = self.sample(batch_size, temp=None) + self.last_sample_internal_res = sampled_sol + return (sampled_sol,) + + def update_params(self, reward: torch.Tensor): + self.last_sample_internal_res = None + + def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]: + sampled_solutions = self.sample_internal(self.batch_size)[0] + sampled_reward, _ = self.obj_func(sampled_solutions) + sampled_reward = sampled_reward.detach() + self.update_params(sampled_reward) + return sampled_solutions, sampled_reward + + +class NeverGradOptimizer(ComboOptimizerBase): + """ + Minimize a black-box function using NeverGrad, Rapin & Teytaud, 2018. + https://facebookresearch.github.io/nevergrad/. + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + estimated_budgets (int): estimated number of budgets (objective evaluation + times) for nevergrad to perform auto tuning. + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + optimizer_name (Optional[str]): ng optimizer to be used specifically + All possible nevergrad optimizers are available at: + https://facebookresearch.github.io/nevergrad/optimization.html#choosing-an-optimizer. + If not specified, we use the meta optimizer NGOpt + + Example: + + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 4 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... reward = torch.ones(BATCH_SIZE, 1) + ... for i in range(BATCH_SIZE): + ... # the best action is "red" + ... if sampled_sol['choice1'][i] == 2: + ... reward[i, 0] = 0.0 + ... return reward + ... + >>> estimated_budgets = 40 + >>> optimizer = NeverGradOptimizer( + ... ng_param, estimated_budgets, obj_func, batch_size=BATCH_SIZE, + ... ) + >>> + >>> for i in range(10): + ... res = optimizer.optimize_step() + ... + >>> best_reward, best_choice = optimizer.best_solutions(k=1)[0] + >>> assert best_reward == 0 + >>> assert best_choice['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + estimated_budgets: int, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + batch_size: int = BATCH_SIZE, + optimizer_name: Optional[str] = None, + ) -> None: + self.estimated_budgets = estimated_budgets + self.optimizer_name = optimizer_name + self.optimizer = None + self.choice_to_index = {} + super().__init__( + param, + obj_func, + batch_size=batch_size, + ) + + def _init(self) -> None: + optimizer_name = self.optimizer_name or "NGOpt" + logger.info(f"Nevergrad uses {optimizer_name} optimizer") + self.optimizer = ng.optimizers.registry[optimizer_name]( + parametrization=self.param, + budget=self.estimated_budgets, + num_workers=self.batch_size, + ) + for k, param in self.param.items(): + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + self.choice_to_index[k] = {v: i for i, v in enumerate(param.choices.value)} + + def sample( + self, batch_size: int, temp: Optional[float] = None + ) -> Dict[str, torch.Tensor]: + assert temp is None, "temp is not used in Random Search" + ng_sols_idx = {k: torch.zeros(batch_size) for k in self.param} + for i in range(batch_size): + ng_sol = self.optimizer.ask().value + for k in ng_sol: + ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol[k]] + return ng_sols_idx + + def sample_internal(self, batch_size: Optional[int] = None) -> Tuple: + """ + Return sampled solutions in two formats. + (1) our own format, which is a dictionary and consistent with other optimizers. + The dictionary has choice names as the key and sampled choice indices as the + value (of shape (batch_size, )) + (2) nevergrad format returned by optimizer.ask() + """ + batch_size = batch_size or self.batch_size + ng_sols_idx = {k: torch.zeros(batch_size, dtype=torch.long) for k in self.param} + ng_sols_raw = [] + for i in range(batch_size): + ng_sol = self.optimizer.ask() + ng_sols_raw.append(ng_sol) + ng_sol_val = ng_sol.value + for k in ng_sol_val: + ng_sols_idx[k][i] = self.choice_to_index[k][ng_sol_val[k]] + self.last_sample_internal_res = (ng_sols_idx, ng_sols_raw) + return ng_sols_idx, ng_sols_raw + + def update_params(self, reward: torch.Tensor) -> None: + _, sampled_sols = self.last_sample_internal_res + for ng_sol, r in zip(sampled_sols, reward): + self.optimizer.tell(ng_sol, r.item()) + self.last_sample_internal_res = None + + def _optimize_step(self) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]: + sampled_sol_idxs, sampled_sols = self.sample_internal(self.batch_size) + sampled_reward, _ = self.obj_func(sampled_sol_idxs) + sampled_reward = sampled_reward.detach() + self.update_params(sampled_reward) + return sampled_sol_idxs, sampled_reward + + +class LogitBasedComboOptimizerBase(ComboOptimizerBase): + def __init__( + self, + param: ng.p.Dict, + start_temp: float, + min_temp: float, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + learning_rate: float = LEARNING_RATE, + anneal_rate: float = ANNEAL_RATE, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + ) -> None: + self.temp = start_temp + self.min_temp = min_temp + self.anneal_rate = anneal_rate + self.learning_rate = learning_rate + self.logits: Dict[str, nn.Parameter] = {} + self.optimizer = None + super().__init__( + param, + obj_func, + batch_size, + obj_exp_offset_scale, + ) + + def _init(self) -> None: + parameters = [] + for k in self.param.keys(): + v = self.param[k] + if isinstance(v, ng.p.Choice): + logits_shape = len(v.choices) + self.logits[k] = nn.Parameter(torch.randn(1, logits_shape)) + parameters.append(self.logits[k]) + else: + raise NotImplementedError() + self.optimizer = torch.optim.Adam(parameters, lr=self.learning_rate) + + def sample( + self, batch_size: int, temp: Optional[float] = GREEDY_TEMP + ) -> Dict[str, torch.Tensor]: + assert temp is not None, "temp is needed for sampling logits" + sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp) + return sampled_solutions + + +def sample_gumbel(shape: Tuple[int, ...], eps: float = 1e-20) -> torch.Tensor: + U = torch.rand(shape) + return -torch.log(-torch.log(U + eps) + eps) + + +def gumbel_softmax(logits: torch.Tensor, temperature: float) -> torch.Tensor: + # pyre-fixme[6]: For 1st param expected `Tuple[int, ...]` but got `Size`. + y = logits + sample_gumbel(logits.size()) + return F.softmax(y / temperature, dim=-1) + + +class GumbelSoftmaxOptimizer(LogitBasedComboOptimizerBase): + """ + Minimize a differentiable objective function which takes in categorical inputs. + The method is based on Categorical Reparameterization with Gumbel-Softmax, + Jang, Gu, & Poole, 2016. https://arxiv.org/abs/1611.01144. + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + an analytical function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled gumbel-softmax + distributions of shape (batch_size, num_choices) as the value + + start_temp: starting temperature + + min_temp: minimal temperature (towards the end of learning) for sampling gumbel-softmax + + update_params_within_optimizer (bool): If False, skip updating parameters within this + Optimizer. The Gumbel-softmax parameters will be updated in external systems. + + + Example: + + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 4 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... # best action is "red" + ... reward = torch.mm(sampled_sol['choice1'], torch.tensor([[1.], [1.], [0.]])) + ... return reward + ... + >>> optimizer = GumbelSoftmaxOptimizer( + ... ng_param, obj_func, anneal_rate=0.9, batch_size=BATCH_SIZE, learning_rate=0.1 + ... ) + ... + >>> for i in range(30): + ... res = optimizer.optimize_step() + ... + >>> assert optimizer.sample(1)['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + start_temp: float = 1.0, + min_temp: float = 0.1, + learning_rate: float = LEARNING_RATE, + anneal_rate: float = ANNEAL_RATE, + batch_size: int = BATCH_SIZE, + update_params_within_optimizer: bool = True, + ) -> None: + self.update_params_within_optimizer = update_params_within_optimizer + super().__init__( + param, + start_temp, + min_temp, + obj_func, + learning_rate=learning_rate, + anneal_rate=anneal_rate, + batch_size=batch_size, + # no reward scaling in gumbel softmax + obj_exp_offset_scale=None, + ) + + def sample_internal( + self, batch_size: Optional[int] = None + ) -> Tuple[Dict[str, torch.Tensor]]: + batch_size = batch_size or self.batch_size + sampled_softmax_vals = {} + for k, logits in self.logits.items(): + sampled_softmax_vals[k] = gumbel_softmax( + logits.repeat(batch_size, 1), self.temp + ) + self.last_sample_internal_res = sampled_softmax_vals + return (sampled_softmax_vals,) + + def update_params(self, reward: torch.Tensor) -> None: + if self.update_params_within_optimizer: + reward_mean = reward.mean() + assert reward_mean.requires_grad + self.optimizer.zero_grad() + reward_mean.backward() + self.optimizer.step() + + self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp) + self.last_sample_internal_res = None + + def _optimize_step(self) -> Tuple: + sampled_softmax_vals = self.sample_internal(self.batch_size)[0] + sampled_reward, _ = self.obj_func(sampled_softmax_vals) + self.update_params(sampled_reward) + + sampled_softmax_vals = { + k: v.detach().clone() for k, v in sampled_softmax_vals.items() + } + logits = {k: v.detach().clone() for k, v in self.logits.items()} + return sampled_softmax_vals, sampled_reward, logits + + +class PolicyGradientOptimizer(LogitBasedComboOptimizerBase): + """ + Minimize a black-box objective function which takes in categorical inputs. + The method is based on REINFORCE, Williams, 1992. + https://link.springer.com/article/10.1007/BF00992696 + + In this method, the action distribution is a joint distribution of multiple + *independent* softmax distributions, each corresponding to one discrete + choice type. + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + Example: + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 16 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... reward = torch.ones(BATCH_SIZE, 1) + ... for i in range(BATCH_SIZE): + ... # the best action is "red" + ... if sampled_sol['choice1'][i] == 2: + ... reward[i, 0] = 0.0 + ... return reward + ... + >>> optimizer = PolicyGradientOptimizer( + ... ng_param, obj_func, batch_size=BATCH_SIZE, learning_rate=0.1 + ... ) + >>> for i in range(30): + ... res = optimizer.optimize_step() + ... + >>> best_reward, best_choice = optimizer.best_solutions(k=1)[0] + >>> assert best_reward == 0 + >>> assert best_choice['choice1'] == 2 + >>> assert optimizer.sample(1)['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + # default (start_temp=min_temp=1.0): no temperature change for policy gradient + start_temp: float = 1.0, + min_temp: float = 1.0, + learning_rate: float = LEARNING_RATE, + anneal_rate: float = ANNEAL_RATE, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + ) -> None: + super().__init__( + param, + start_temp, + min_temp, + obj_func, + learning_rate=learning_rate, + anneal_rate=anneal_rate, + batch_size=batch_size, + obj_exp_offset_scale=obj_exp_offset_scale, + ) + + def sample( + self, batch_size: int, temp: Optional[float] = GREEDY_TEMP + ) -> Dict[str, torch.Tensor]: + assert temp is not None, "temp is needed for sampling logits" + sampled_solutions, _ = sample_from_logits(self.logits, batch_size, temp) + return sampled_solutions + + def sample_internal( + self, + batch_size: Optional[int] = None, + ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]: + batch_size = batch_size or self.batch_size + sampled_solutions, sampled_log_probs = sample_from_logits( + self.logits, + batch_size, + self.temp, + ) + self.last_sample_internal_res = sampled_solutions, sampled_log_probs + return sampled_solutions, sampled_log_probs + + def update_params(self, reward: torch.Tensor): + _, sampled_log_probs = self.last_sample_internal_res + if self.batch_size == 1: + adv = reward + else: + adv = reward - torch.mean(reward) + + assert not adv.requires_grad + assert sampled_log_probs.requires_grad + assert sampled_log_probs.shape == adv.shape == reward.shape + assert adv.ndim == 2 + assert adv.shape[-1] == 1 + + loss = (adv * sampled_log_probs).mean() + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp) + self.last_sample_internal_res = None + + def _optimize_step(self) -> Tuple: + sampled_solutions, sampled_log_probs = self.sample_internal(self.batch_size) + + sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions) + sampled_reward, sampled_scaled_reward = ( + sampled_reward.detach(), + sampled_scaled_reward.detach(), + ) + self.update_params(sampled_scaled_reward) + return sampled_solutions, sampled_reward, sampled_log_probs + + +def shuffle_exp_replay(exp_replay: List[Any]) -> Any: + shuffle_idx = np.random.permutation(len(exp_replay)) + for idx in shuffle_idx: + yield exp_replay[idx] + + +class QLearningOptimizer(ComboOptimizerBase): + """ + Treat the problem of minimizing a black-box function as a sequential decision problem, + and solve it by Deep Q-Learning. See "Human-Level Control through Deep Reinforcement + Learning", Mnih et al., 2015. https://www.nature.com/articles/nature14236. + + In each episode step, Q-learning makes a decision for one categorical input. The reward + is given only at the end of the episode, which is the value of the black-box function + at the input determined by the choices made at all steps. + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + start_temp (float): the starting exploration rate in epsilon-greedy sampling + + min_temp (float): the minimal exploration rate in epsilon-greedy + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + model_dim (int): hidden layer size for the q-network: input -> model_dim -> model_dim -> output + + num_batches_per_learning (int): the number of batches sampled from replay buffer + for q-learning. + + replay_size (int): the maximum batches held in the replay buffer. Note, a problem instance of n + choices will generate n batches in the replay buffer. + + Example: + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 4 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... reward = torch.ones(BATCH_SIZE, 1) + ... for i in range(BATCH_SIZE): + ... # the best action is "red" + ... if sampled_sol['choice1'][i] == 2: + ... reward[i, 0] = 0.0 + ... return reward + ... + >>> optimizer = QLearningOptimizer(ng_param, obj_func, batch_size=BATCH_SIZE) + >>> for i in range(10): + ... res = optimizer.optimize_step() + ... + >>> best_reward, best_choice = optimizer.best_solutions(k=1)[0] + >>> assert best_reward == 0 + >>> assert best_choice['choice1'] == 2 + >>> assert optimizer.sample(1)['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + start_temp: float = 1.0, + min_temp: float = 0.1, + learning_rate: float = LEARNING_RATE, + anneal_rate: float = ANNEAL_RATE, + batch_size: int = BATCH_SIZE, + model_dim: int = 128, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + num_batches_per_learning: int = 10, + replay_size: int = 100, + ) -> None: + self.model_dim = model_dim + self.sorted_keys = sorted(param.keys()) + assert ( + start_temp <= 1.0 and start_temp > 0 + ), "Starting temperature for epsilon-greedy should be between (0, 1]" + assert ( + min_temp <= start_temp and min_temp >= 0 + ), "Minimum temperature for epsilon-greedy should be between [0, start_temp]" + self.temp = start_temp + self.min_temp = min_temp + self.learning_rate = learning_rate + self.anneal_rate = anneal_rate + self.num_batches_per_learning = num_batches_per_learning + self.replay_size = replay_size + self.exp_replay = deque([], maxlen=replay_size) + self.input_dim = 0 + self.q_net = None + self.optimizer = None + super().__init__( + param, + obj_func, + batch_size=batch_size, + obj_exp_offset_scale=obj_exp_offset_scale, + ) + + def _init(self) -> None: + for k in self.sorted_keys: + v = self.param[k] + if isinstance(v, ng.p.Choice): + num_choices = len(v.choices) + self.input_dim += num_choices + else: + raise NotImplementedError() + + self.q_net = nn.Sequential( + *[ + nn.Linear(self.input_dim, self.model_dim), + nn.ReLU(), + nn.Linear(self.model_dim, self.model_dim), + nn.ReLU(), + nn.Linear(self.model_dim, 1), + ] + ) + for p in self.q_net.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + self.optimizer = torch.optim.Adam( + self.q_net.parameters(), lr=self.learning_rate + ) + + logger.info(f"Number of total params: {_num_of_params(self.q_net)}") + + def sample_internal( + self, + batch_size: Optional[int] = None, + ) -> Tuple[Dict[str, torch.Tensor], List[Any]]: + batch_size = batch_size or self.batch_size + return self._sample_internal(batch_size, self.temp) + + def _sample_internal( + self, + batch_size: int, + temp: float, + ) -> Tuple[Dict[str, torch.Tensor], List[Any]]: + logger.info(f"Explore with temp={temp}") + sampled_solutions: Dict[str, torch.Tensor] = {} + exp_replay = [] + acc_input_dim = 0 + # The first cur_state_action is a dummy vector of all -1 + cur_state_action = torch.full((batch_size, self.input_dim), -1).float() + for k in self.sorted_keys: + v = self.param[k] + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + num_choices = len(v.choices) + next_state_action_all_pairs = cur_state_action.repeat_interleave( + num_choices, dim=0 + ).reshape(batch_size, num_choices, self.input_dim) + next_state_action_all_pairs[ + :, :, acc_input_dim : acc_input_dim + num_choices + ] = torch.eye(num_choices) + q_values = ( + self.q_net(next_state_action_all_pairs) + .detach() + .reshape(batch_size, num_choices) + ) + q_actions = q_values.argmax(dim=1) + random_actions = torch.randint(num_choices, (batch_size,)) + explore_prob = torch.rand(batch_size) + selected_action = ( + (explore_prob <= temp) * random_actions + + (explore_prob > temp) * q_actions + ).long() + + sampled_solutions[k] = selected_action + # the last element is terminal indicator + exp_replay.append((cur_state_action, next_state_action_all_pairs, False)) + + cur_state_action = next_state_action_all_pairs[ + torch.arange(batch_size), selected_action + ] + acc_input_dim += num_choices + + # add dummy next_state_action_all_pairs and terminal indicator + exp_replay.append((cur_state_action, cur_state_action.squeeze(1), True)) + # the first element is not useful + exp_replay.pop(0) + + self.last_sample_internal_res = (sampled_solutions, exp_replay) + return sampled_solutions, exp_replay + + def sample( + self, batch_size: int, temp: Optional[float] = GREEDY_TEMP + ) -> Dict[str, torch.Tensor]: + assert temp is not None, "temp is needed for epsilon greedy" + sampled_solutions, _ = self._sample_internal(batch_size, temp) + return sampled_solutions + + def update_params(self, reward: torch.Tensor) -> None: + _, exp_replay = self.last_sample_internal_res + + # insert reward placeholder to exp replay + # exp replay now has the format: + # (cur_state_action, next_state_action_all_pairs, terminal, reward) + self.exp_replay.extend([[*exp, None] for exp in exp_replay]) + self.exp_replay[-1][-1] = reward + + assert len(exp_replay) == len(self.sorted_keys) + avg_td_loss = [] + + for i, ( + cur_state_action, + next_state_action_all_pairs, + terminal, + r, + ) in enumerate(shuffle_exp_replay(self.exp_replay)): + q = self.q_net(cur_state_action) + if terminal: + # negate reward to be consistent with other optimizers. + # reward returned by obj_func is to be minimized + # but q-learning tries to maxmize accumulated rewards + loss = F.mse_loss(q, -r) + else: + q_next = self.q_net(next_state_action_all_pairs).detach() + # assume gamma=1 (no discounting) + loss = F.mse_loss(q, q_next.max(dim=1).values) + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + avg_td_loss.append(loss.detach()) + + if i == self.num_batches_per_learning - 1: + break + + avg_td_loss = np.mean(avg_td_loss) + logger.info(f"Avg td loss: {avg_td_loss}") + + self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp) + self.last_sample_internal_res = None + + def _optimize_step( + self, + ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]: + sampled_solutions, exp_replay = self.sample_internal(self.batch_size) + sampled_reward, sampled_scaled_reward = self.obj_func(sampled_solutions) + sampled_reward, sampled_scaled_reward = ( + sampled_reward.detach(), + sampled_scaled_reward.detach(), + ) + self.update_params(sampled_scaled_reward) + return sampled_solutions, sampled_reward + + +class BayesianOptimizerBase(ComboOptimizerBase): + """ + Bayessian Optimization with mutation optimization and acquisition function. + The method is motivated from BANANAS, White, 2020. + https://arxiv.org/abs/1910.11858 + + In this method, the searching is based on mutation over the current best solutions. + Acquisition function, e.g., its estimates the expected imrpovement. + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + acq_type (str): type of acquisition function. + + mutation_type (str): type of mutation, e.g., random. + + num_mutations (int): number of best solutions recorded so far that will be mutated, should be more than 1. + + temp (float): percentage of mutation - how many variables will be mutated. + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + start_temp: float = 1.0, + min_temp: float = 0.1, + acq_type: str = "its", + mutation_type: str = "random", + num_mutations: int = 50, + anneal_rate: float = ANNEAL_RATE, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + ) -> None: + self.start_temp = start_temp + self.min_temp = min_temp + self.temp = start_temp + self.acq_type = acq_type + self.mutation_type = mutation_type + self.anneal_rate = anneal_rate + self.last_predictor_loss_mean = None + if num_mutations < 2: + raise ValueError("number of mutations should be more than 1") + self.num_mutations = num_mutations + super().__init__( + param, + obj_func, + batch_size=batch_size, + obj_exp_offset_scale=obj_exp_offset_scale, + ) + + def sample( + self, batch_size: int, temp: Optional[float] = None + ) -> Dict[str, torch.Tensor]: + """ + Applies a type of mutation, e.g., random mutation, on the best solutions recorded so far. + For example, with random mutation, variables are randomly selected, + and their values are randomly set with respect to their domains. + """ + assert temp is not None, "temperature is needed for Bayesian Optimizer" + best_solutions = self.best_solutions(batch_size) + # best_solutions come in as (reward, solution) tuples + # we only need solutions so we strip reward + best_solutions = [sol for _, sol in best_solutions] + if len(best_solutions) < batch_size: + logger.warning( + "Less than batch_size solutions are sampled to be mutated. Will duplicate thse solutions." + ) + dup_times = batch_size // len(best_solutions) + 1 + best_solutions = (best_solutions * dup_times)[:batch_size] + assert batch_size == len(best_solutions) + + # Convert best_solutions to Dict[str, tensor] format + sampled_solutions = {} + for k in sorted(self.param.keys()): + sampled_solutions[k] = torch.cat( + [sol[k].reshape(1) for sol in best_solutions] + ) + + if self.mutation_type == "random": + # keys to mutate for each solution + mutated_keys = [ + np.random.choice( + sorted(self.param.keys()), + floor(temp * len(self.param)), + replace=False, + ) + for _ in range(batch_size) + ] + mutated_solutions = {} + for key in sorted(self.param.keys()): + mutated_solutions[key] = sampled_solutions[key].clone() + sol_indices = torch.tensor( + [ + sol_idx + for sol_idx, mutated_keys_for_one_sol in enumerate(mutated_keys) + if key in mutated_keys_for_one_sol + ] + ) + if len(sol_indices): + mutated_solutions[key][sol_indices] = torch.randint( + # pyre-fixme[16]: `Parameter` has no attribute `choices`. + len(self.param[key].choices), + (len(sol_indices),), + ) + else: + raise NotImplementedError() + return mutated_solutions + + def acquisition( + self, + sampled_sol: Dict[str, torch.Tensor], + ) -> torch.Tensor: + raise NotImplementedError() + + def update_predictor( + self, sampled_solutions: Dict[str, torch.Tensor], sampled_reward: torch.Tensor + ): + raise NotImplementedError() + + def sample_internal( + self, + batch_size: Optional[int] = None, + ) -> Tuple[Dict[str, torch.Tensor]]: + batch_size = batch_size or self.batch_size + mutated_solutions = self.sample(self.num_mutations, self.temp) + _, indices = torch.sort(self.acquisition(mutated_solutions), dim=0) + sampled_solutions = {} + for key in sorted(self.param.keys()): + sampled_solutions[key] = mutated_solutions[key][indices[:batch_size]] + self.last_sample_internal_res = sampled_solutions + return (sampled_solutions,) + + def update_params(self, reward: torch.Tensor): + sampled_solutions = self.last_sample_internal_res + self.update_predictor(sampled_solutions, reward) + self.temp = np.maximum(self.temp * self.anneal_rate, self.min_temp) + self.last_sample_internal_res = None + + def _optimize_step(self) -> Tuple: + sampled_solutions = self.sample_internal(self.batch_size)[0] + sampled_reward, _ = self.obj_func(sampled_solutions) + sampled_reward = sampled_reward.detach() + self.update_params(sampled_reward) + + last_predictor_loss_mean = self.last_predictor_loss_mean + self.last_predictor_loss_mean = None + + return sampled_solutions, sampled_reward, last_predictor_loss_mean + + +def random_sample_n_solutions(params, num_mutations): + """ + random_sample_n_solutions: + Helper function to initialize random sample solutions + """ + sampled_solutions = {} + for n, param in params.items(): + if isinstance(param, ng.p.Choice): + num_choices = len(param.choices) + sampled_solutions[n] = torch.randint(num_choices, (num_mutations,)) + else: + raise NotImplementedError() + return sampled_solutions + + +def input_dim_for_random_sample_n_solutions(params, num_mutations): + """ + input_dim_for_random_sample_n_solutions: + Helper function to calcuate random sample solutions dimentions + """ + input_dim = 0 + for _n, param in params.items(): + if isinstance(param, ng.p.Choice): + num_choices = len(param.choices) + input_dim += num_choices + else: + raise NotImplementedError() + return input_dim + + +class BayesianMLPEnsemblerOptimizer(BayesianOptimizerBase): + """ + Bayessian Optimizer with ensemble of mlp networks, random mutation, and ITS. + The Method is motivated by the BANANAS optimization method, White, 2019. + https://arxiv.org/abs/1910.11858. + + The mutation rate (temp) is starting from start_temp and is decreasing over time + with anneal_rate. It's lowest possible value is min_temp. + Thus, initially the algorithm explores mutations with a higer mutation rate (more variables are randomly mutated). + As time passes, the algorithm exploits the best solutions recorded so far (less variables are mutated). + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + acq_type (str): type of acquisition function. + + mutation_type (str): type of mutation, e.g., random. + + num_mutations (int): number of best solutions recorded so far that will be mutated, should be more than 1. + + num_ensemble (int): number of predictors, should be more than 1. + + epochs (int): number of epochs, should be a positive integer. + + start_temp (float): initial temperature (ratio) for mutation, e.g., with 1.0 all variables will be initally mutated. + + min_temp (float): lowest temperature (ratio) for mutation, e.g., with 0.0 no mutation will occur. + + + Example: + >>> _ = torch.manual_seed(0) + >>> np.random.seed(0) + >>> BATCH_SIZE = 4 + >>> ng_param = ng.p.Dict(choice1=ng.p.Choice(["blue", "green", "red"])) + >>> + >>> def obj_func(sampled_sol: Dict[str, torch.Tensor]): + ... batch_size = sampled_sol['choice1'].shape[0] + ... reward = torch.ones(batch_size, 1) + ... for i in range(batch_size): + ... # the best action is "red" + ... if sampled_sol['choice1'][i] == 2: + ... reward[i, 0] = 0.0 + ... return reward + ... + >>> optimizer = BayesianMLPEnsemblerOptimizer( + ... ng_param, obj_func, batch_size=BATCH_SIZE, + ... acq_type="its", mutation_type="random", + ... num_mutations=8, + ... ) + >>> for i in range(30): + ... res = optimizer.optimize_step() + ... + >>> assert optimizer.sample(1, temp=0)['choice1'] == 2 + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + start_temp: float = 1.0, + min_temp: float = 0.1, + acq_type: str = "its", + mutation_type: str = "random", + anneal_rate: float = ANNEAL_RATE, + num_mutations: int = 50, + epochs: int = 1, + learning_rate: float = LEARNING_RATE, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + model_dim: int = 128, + num_ensemble: int = 5, + ) -> None: + self.temp = start_temp + self.num_mutations = num_mutations + self.epochs = epochs + self.learning_rate = learning_rate + self.model_dim = model_dim + if num_ensemble < 2: + raise ValueError("Number of ensembles should be moe than one") + self.num_ensemble = num_ensemble + self.input_dim = 0 + self.predictor = None + assert ( + num_mutations >= batch_size + ), f"num_mutations ({num_mutations}) >= batch_size ({batch_size}) is not true" + super().__init__( + param, + obj_func, + start_temp=start_temp, + min_temp=min_temp, + acq_type=acq_type, + mutation_type=mutation_type, + num_mutations=num_mutations, + anneal_rate=anneal_rate, + batch_size=batch_size, + obj_exp_offset_scale=obj_exp_offset_scale, + ) + + def _init(self) -> None: + # initial population + sampled_solutions = random_sample_n_solutions(self.param, self.num_mutations) + + self.input_dim = input_dim_for_random_sample_n_solutions( + self.param, self.num_mutations + ) + + # predictor + self.predictor = [] + for _ in range(self.num_ensemble): + model = nn.Sequential( + *[ + nn.Linear(self.input_dim, self.model_dim), + nn.LeakyReLU(), + nn.Linear(self.model_dim, self.model_dim), + nn.LeakyReLU(), + nn.Linear(self.model_dim, 1), + ] + ) + for p in model.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + self.predictor.append(model) + + sampled_reward, _ = self.obj_func(sampled_solutions) + sampled_reward = sampled_reward.detach() + self._maintain_best_solutions(sampled_solutions, sampled_reward) + self.update_predictor(sampled_solutions, sampled_reward) + + def acquisition( + self, + sampled_sol: Dict[str, torch.Tensor], + ) -> torch.Tensor: + predictor = self.predictor + acq_type = self.acq_type + assert predictor is not None + batch_tensors = sol_to_tensors(sampled_sol, self.param) + + if acq_type == "its": + with torch.no_grad(): + predictions = torch.stack([net(batch_tensors) for net in predictor]) + + acquisition_reward = torch.normal( + torch.mean(predictions, dim=0), torch.std(predictions, dim=0) + ) + else: + raise NotImplementedError() + return acquisition_reward.view(-1) + + def update_predictor( + self, sampled_solutions: Dict[str, torch.Tensor], sampled_reward: torch.Tensor + ): + x = sol_to_tensors(sampled_solutions, self.param) + y = sampled_reward + losses = [] + + for model in self.predictor: + model.train() + optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate) + for _ in range(self.epochs): + pred = model(x) + loss = F.mse_loss(pred, y) + optimizer.zero_grad() + loss.backward() + optimizer.step() + losses.append(loss.detach()) + model.eval() + self.last_predictor_loss_mean = np.mean(losses) + + +class LinearBayesianByBackprop(nn.Module): + """ + Support Layer Class for Bayes By Backpropogation Optimizer + This implementation is based on + https://github.com/cpark321/uncertainty-deep-learning/blob/master/01.%20Bayes-by-Backprop.ipynb + """ + + def __init__(self, input_features, output_features, prior_var=1.0): + + """ + Initialization of our layer, prior is a normal distribution + centered in 0 and of variance prior_var. + """ + # initialize layers + super().__init__() + # set input and output dimensions + self.input_features = input_features + self.output_features = output_features + + # initialize mu and rho parameters for the weights of the layer + self.w_mu = nn.Parameter(torch.zeros(output_features, input_features)) + self.w_rho = nn.Parameter(torch.zeros(output_features, input_features)) + + # initialize mu and rho parameters for the layer's bias + self.b_mu = nn.Parameter(torch.zeros(output_features)) + self.b_rho = nn.Parameter(torch.zeros(output_features)) + + # initialize weight samples (these will be calculated whenever the layer makes a prediction) + self.w = None + self.b = None + + # initialize prior distribution for all of the weights and biases + self.prior = torch.distributions.Normal(0, prior_var) + + def forward(self, x): + """ + Optimization forward step for one layer + """ + # sample weights + w_epsilon = Normal(0, 1).sample(self.w_mu.shape) + self.w = self.w_mu + torch.log(1 + torch.exp(self.w_rho)) * w_epsilon + + # sample bias + b_epsilon = Normal(0, 1).sample(self.b_mu.shape) + self.b = self.b_mu + torch.log(1 + torch.exp(self.b_rho)) * b_epsilon + + # record log prior by evaluating log pdf of prior at sampled weight and bias + w_log_prior = self.prior.log_prob(self.w) + b_log_prior = self.prior.log_prob(self.b) + self.log_prior = torch.sum(w_log_prior) + torch.sum(b_log_prior) + + # record log variational posterior by evaluating log pdf of normal distribution + # defined by parameters with respect at the sampled values + self.w_post = Normal(self.w_mu.data, torch.log(1 + torch.exp(self.w_rho))) + self.b_post = Normal(self.b_mu.data, torch.log(1 + torch.exp(self.b_rho))) + self.log_post = ( + self.w_post.log_prob(self.w).sum() + self.b_post.log_prob(self.b).sum() + ) + + return F.linear(x, self.w, self.b) + + +class MLPBayesianByBackprop(nn.Module): + """ + Support custom MLP Class based on Linear BayesianByBackprop + for Bayes By Backpropogation Optimizer + This implementation is based on + https://github.com/cpark321/uncertainty-deep-learning/blob/master/01.%20Bayes-by-Backprop.ipynb + """ + + def __init__(self, input_dim, hidden_dim, noise_tol=0.1, prior_var=1.0): + + # network initialization with "upgraded" Linear BayesianByBackprop layers + super().__init__() + + self.hidden1 = LinearBayesianByBackprop( + input_dim, hidden_dim, prior_var=prior_var + ) + + self.hidden2 = LinearBayesianByBackprop( + hidden_dim, hidden_dim, prior_var=prior_var + ) + + self.out = LinearBayesianByBackprop(hidden_dim, 1, prior_var=prior_var) + + self.noise_tol = noise_tol # noise tolerance is for likelihood caluclation + + def forward(self, x): + # sigmoid gives more stable convergance compared to relu + x = torch.sigmoid(self.hidden1(x)) + x = torch.sigmoid(self.hidden2(x)) + x = self.out(x) + return x + + def log_prior(self): + # calculate the log prior over all the layers + return self.hidden1.log_prior + self.hidden2.log_prior + self.out.log_prior + + def log_post(self): + # calculate the log posterior over all the layers + return self.hidden1.log_post + self.hidden2.log_post + self.out.log_post + + def sample_elbo(self, input, target, samples): + # negative elbo will be the loss function + + # initialize tensors + outputs = torch.zeros(samples, target.shape[0]) + + log_priors = torch.zeros(samples) + log_posts = torch.zeros(samples) + log_likes = torch.zeros(samples) + + # make predictions and calculate prior, posterior, and likelihood for a given number of samples + for i in range(samples): + outputs[i] = self(input).reshape(-1) # make predictions + log_priors[i] = self.log_prior() # get log prior + log_posts[i] = self.log_post() # get log variational posterior + log_likes[i] = ( + Normal(outputs[i], self.noise_tol).log_prob(target.reshape(-1)).sum() + ) # calculate the log likelihood + + # calculate monte carlo estimate of prior posterior and likelihood + log_prior = log_priors.mean() + log_post = log_posts.mean() + log_like = log_likes.mean() + + # calculate the negative elbo (which is our loss function) + loss = log_post - log_prior - log_like + return loss + + +class BayesianByBackpropOptimizer(BayesianOptimizerBase): + """ + Bayessian Optimizer motivated by the BANANAS optimization method, White, 2019. + https://arxiv.org/abs/1910.11858. Implementation of this optimizer is similar to + BayesianMLPEnsemblerOptimizer, based on BayesianOptimizerBase class, and includes Bayessian Optimizer, random mutation, + and ITS. The only difference between BayesianByBackpropOptimizer + and BayesianMLPEnsemblerOptimizer is that BayesianByBackpropOptimizer uses a single MLP network for prediction which is trained + by Bayesian by back propgation method. Baysian by backprop training method is inspired by David J. C. MacKay paper on partical + Bayesian Framework for Backpropagation Networks: https://authors.library.caltech.edu/13793/1/MACnc92b.pdf + + The mutation rate (temp) is starting from start_temp and is decreasing over time + with anneal_rate. It's lowest possible value is min_temp. + Thus, initially the algorithm explores mutations with a higer mutation rate (more variables are randomly mutated). + As time passes, the algorithm exploits the best solutions recorded so far (less variables are mutated). + + Args: + param (ng.p.Dict): a nevergrad dictionary for specifying input choices + + obj_func (Callable[[Dict[str, torch.Tensor]], torch.Tensor]): + a function which consumes sampled solutions and returns + rewards as tensors of shape (batch_size, 1). + + The input dictionary has choice names as the key and sampled choice + indices as the value (of shape (batch_size, )) + + mutation_type (str): type of mutation, e.g., random. + + num_mutations (int): number of best solutions recorded so far that will be mutated, should be > 1. + + epochs (int): number of epochs (i.e., number of times each data point is used in the loss to update the predictor), should be a positive integer. + + start_temp (float): initial temperature (ratio) for mutation, e.g., with 1.0 all variables will be initally mutated. + + min_temp (float): lowest temperature (ratio) for mutation, e.g., with 0.0 no mutation will occur. + + sample_size(int) : number of Monte Carlo samples for priors and likelihood, usually 1 is ok + + noise_tol(float) : noise tolerance for log likelihood estimation + + prior_var(float) : for layers weights and bias prior initialization which is a normal distribution centered in 0 and of variance prior_var. + + + """ + + def __init__( + self, + param: ng.p.Dict, + obj_func: Optional[Callable[[Dict[str, torch.Tensor]], torch.Tensor]] = None, + start_temp: float = 1.0, + min_temp: float = 0.1, + mutation_type: str = "random", + anneal_rate: float = ANNEAL_RATE, + num_mutations: int = 50, + epochs: int = 1, + learning_rate: float = LEARNING_RATE, + batch_size: int = BATCH_SIZE, + obj_exp_offset_scale: Optional[Tuple[float, float]] = None, + model_dim: int = 128, + sample_size: int = 1, + noise_tol: float = 1.0, + prior_var: float = 1.0, + ) -> None: + self.temp = start_temp + self.num_mutations = num_mutations + self.epochs = epochs + self.learning_rate = learning_rate + self.model_dim = model_dim + self.input_dim = 0 + self.predictor = None + self.sample_size = sample_size + self.noise_tol = noise_tol + self.prior_var = prior_var + super().__init__( + param, + obj_func, + start_temp=start_temp, + acq_type="", # acq_type will not be used + min_temp=min_temp, + mutation_type=mutation_type, + num_mutations=num_mutations, + anneal_rate=anneal_rate, + batch_size=batch_size, + obj_exp_offset_scale=obj_exp_offset_scale, + ) + + def _init(self) -> None: + # initial population + sampled_solutions = random_sample_n_solutions(self.param, self.num_mutations) + + self.input_dim = input_dim_for_random_sample_n_solutions( + self.param, self.num_mutations + ) + + self.predictor = MLPBayesianByBackprop(self.input_dim, self.model_dim) + for p in self.predictor.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + sampled_reward, _ = self.obj_func(sampled_solutions) + sampled_reward = sampled_reward.detach() + self._maintain_best_solutions(sampled_solutions, sampled_reward) + self.update_predictor(sampled_solutions, sampled_reward) + + def acquisition( + self, + sampled_sol: Dict[str, torch.Tensor], + ) -> torch.Tensor: + batch_tensors = sol_to_tensors(sampled_sol, self.param) + acquisition_reward = self.predictor(batch_tensors) + return acquisition_reward.view(-1) + + def update_predictor( + self, sampled_solutions: Dict[str, torch.Tensor], sampled_reward: torch.Tensor + ): + x = sol_to_tensors(sampled_solutions, self.param) + y = sampled_reward + losses = [] + + self.predictor.train() + optimizer = torch.optim.Adam(self.predictor.parameters(), lr=self.learning_rate) + for _ in range(self.epochs): + optimizer.zero_grad() + loss = self.predictor.sample_elbo(x, y, self.sample_size) + loss.backward() + optimizer.step() + + losses.append(loss.detach()) + self.predictor.eval() + + self.last_predictor_loss_mean = np.mean(losses) diff --git a/reagent/mab/__init__.py b/reagent/mab/__init__.py new file mode 100644 index 000000000..40539064a --- /dev/null +++ b/reagent/mab/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/mab/mab_algorithm.py b/reagent/mab/mab_algorithm.py new file mode 100644 index 000000000..07775d121 --- /dev/null +++ b/reagent/mab/mab_algorithm.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from abc import ABC, abstractmethod +from typing import List, Optional, Tuple + +import torch +from torch import Tensor + + +def get_arm_indices( + ids_of_all_arms: List[str], ids_of_arms_in_batch: List[str] +) -> List[int]: + arm_idxs = [] + for i in ids_of_arms_in_batch: + try: + arm_idxs.append(ids_of_all_arms.index(i)) + except ValueError: + raise ValueError(f"Unknown arm_id {i}. Known arm ids: {ids_of_all_arms}") + return arm_idxs + + +def place_values_at_indices(values: Tensor, idxs: List[int], total_len: int) -> Tensor: + """ + We place the values provided in `values` at indices provided in idxs. The values at indices + not included in `idxs` are filled with zeros. + TODO: maybe replace with sparse-to-dense tensor function? + Example: + place_values_at_indices(Tensor([4,5]), [2,0], 4) == Tensor([5, 0, 4, 0]) + + Args: + values (Tensor): The values + idxs (List[int]): The indices at which the values have to be placed + total_len (int): Length of the output tensor + Return: + The output tensor + """ + assert len(values) == len(idxs) + ret = torch.zeros(total_len) + ret[idxs] = values + return ret + + +def reindex_multiple_tensors( + all_ids: List[str], + batch_ids: Optional[List[str]], + value_tensors: Tuple[Tensor, ...], +) -> Tuple[Tensor, ...]: + """ + Each tensor from value_tensors is ordered by ids from batch_ids. In the output we + return these tensors reindexed by all_ids, filling in zeros for missing entries. + + Args: + all_ids (List[str]): The IDs that specify how to order the elements in the output + batch_ids (Optional[List[str]]): The IDs that specify how the elements are ordered in the input + value_tensors (Tuple[Tensor]): A tuple of tensors with elements ordered by `batch_ids` + Return: + A Tuple of reindexed tensors + """ + if batch_ids is None or batch_ids == all_ids: + # the observations are for all arms are already in correct order + return value_tensors + else: + assert len(batch_ids) == len( + set(batch_ids) + ) # make sure no duplicates in arm IDs + + # get the indices of the arms + arm_idxs = get_arm_indices(all_ids, batch_ids) + + # put elements from the batch in the positions specified by `arm_ids` (missing arms will be zero) + ret = [] + for v in value_tensors: + ret.append(place_values_at_indices(v, arm_idxs, len(all_ids))) + return tuple(ret) + + +def randomized_argmax(x: torch.Tensor) -> int: + """ + Like argmax, but return a random (uniformly) index of the max element + This function makes sense only if there are ties for the max element + """ + if torch.isinf(x).any(): + # if some scores are inf, return the index for one of the infs + best_indices = torch.nonzero(torch.isinf(x)).squeeze() + else: + max_value = torch.max(x) + best_indices = torch.nonzero(x == max_value).squeeze() + if best_indices.ndim == 0: + # if there is a single argmax + chosen_idx = int(best_indices) + else: + chosen_idx = int( + best_indices[ + torch.multinomial( + 1.0 / len(best_indices) * torch.ones(len(best_indices)), 1 + )[0] + ] + ) + return chosen_idx + + +class MABAlgo(torch.nn.Module, ABC): + def __init__( + self, + randomize_ties: bool = True, + min_num_obs_per_arm: int = 1, + *, + n_arms: Optional[int] = None, + arm_ids: Optional[List[str]] = None, + ) -> None: + super().__init__() + if n_arms is not None: + self.arm_ids = list(map(str, range(n_arms))) + self.n_arms = n_arms + if arm_ids is not None: + self.arm_ids = arm_ids + self.n_arms = len(arm_ids) + self.min_num_obs_per_arm = min_num_obs_per_arm + self.total_n_obs_all_arms = 0 + self.total_n_obs_per_arm = torch.zeros(self.n_arms) + self.total_sum_reward_per_arm = torch.zeros(self.n_arms) + self.total_sum_reward_squared_per_arm = torch.zeros(self.n_arms) + self.randomize_ties = randomize_ties + + def add_batch_observations( + self, + n_obs_per_arm: Tensor, + sum_reward_per_arm: Tensor, + sum_reward_squared_per_arm: Tensor, + arm_ids: Optional[List[str]] = None, + ) -> None: + ( + n_obs_per_arm, + sum_reward_per_arm, + sum_reward_squared_per_arm, + ) = reindex_multiple_tensors( + all_ids=self.arm_ids, + batch_ids=arm_ids, + value_tensors=( + n_obs_per_arm, + sum_reward_per_arm, + sum_reward_squared_per_arm, + ), + ) + + self.total_n_obs_per_arm += n_obs_per_arm + self.total_sum_reward_per_arm += sum_reward_per_arm + self.total_sum_reward_squared_per_arm += sum_reward_squared_per_arm + self.total_n_obs_all_arms += int(n_obs_per_arm.sum().item()) + + def add_single_observation(self, arm_id: str, reward: float) -> None: + """ + Add a single observation (arm played, reward) to the bandit + + Args: + arm_id (int): Which arm was played + reward (float): Reward renerated by the arm + """ + assert arm_id in self.arm_ids + arm_idx = self.arm_ids.index(arm_id) + self.total_n_obs_per_arm[arm_idx] += 1 + self.total_sum_reward_per_arm[arm_idx] += reward + self.total_sum_reward_squared_per_arm[arm_idx] += reward**2 + self.total_n_obs_all_arms += 1 + + def get_action(self) -> str: + """ + Get the id of the action chosen by the MAB algorithm + + Returns: + int: The integer ID of the chosen action + """ + scores = self() # calling forward() under the hood + if self.randomize_ties: + best_idx = randomized_argmax(scores) + else: + best_idx = torch.argmax(scores) + return self.arm_ids[best_idx] + + def reset(self) -> None: + """ + Reset the MAB to the initial (empty) state. + """ + self.__init__(randomize_ties=self.randomize_ties, arm_ids=self.arm_ids) + + @abstractmethod + def get_scores(self) -> Tensor: + pass + + def forward(self): + # set `inf` scores for arms which don't have the minimum number of observations + return torch.where( + self.total_n_obs_per_arm >= self.min_num_obs_per_arm, + self.get_scores().float(), + torch.tensor(torch.inf, dtype=torch.float), + ) + + def get_avg_reward_values(self) -> Tensor: + return self.total_sum_reward_per_arm / self.total_n_obs_per_arm + + @classmethod + def get_scores_from_batch( + cls, + n_obs_per_arm: Tensor, + sum_reward_per_arm: Tensor, + sum_reward_squared_per_arm: Tensor, + ) -> Tensor: + """ + A utility method used to create the bandit, feed in a batch of observations and get the scores in one function call + + Args: + n_obs_per_arm (Tensor): A tensor of counts of per-arm numbers of observations + sum_reward_per_arm (Tensor): A tensor of sums of rewards for each arm + sum_reward_squared_per_arm (Tensor): A tensor of sums of squared rewards for each arm + + Returns: + Tensor: Array of per-arm scores + """ + n_arms = len(n_obs_per_arm) + b = cls(n_arms=n_arms) # pyre-ignore[45] + b.add_batch_observations( + n_obs_per_arm, sum_reward_per_arm, sum_reward_squared_per_arm + ) + return b() + + def __repr__(self) -> str: + t = ", ".join( + f"{v:.3f} ({int(n)})" + for v, n in zip(self.get_avg_reward_values(), self.total_n_obs_per_arm) + ) + return f"{type(self).__name__}({self.n_arms} arms; {t}" + + +class RandomActionsAlgo(MABAlgo): + """ + A MAB algorithm which samples actions uniformly at random + """ + + def get_scores(self) -> Tensor: + return torch.rand(self.n_arms) + + +class GreedyAlgo(MABAlgo): + """ + Greedy algorithm, which always chooses the best arm played so far + Arms that haven't been played yet are given priority by assigning inf score + Ties are resolved in favor of the arm with the smallest index. + """ + + def get_scores(self) -> Tensor: + return self.get_avg_reward_values() diff --git a/reagent/mab/simulation.py b/reagent/mab/simulation.py new file mode 100644 index 000000000..53114114d --- /dev/null +++ b/reagent/mab/simulation.py @@ -0,0 +1,251 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from abc import ABC, abstractmethod +from functools import partial +from multiprocessing import Pool +from typing import Dict, List, Optional, Tuple, Type, Union + +import numpy as np +import torch +from reagent.mab.mab_algorithm import MABAlgo +from torch import Tensor + +# see https://fburl.com/anp/f7y0gzl8 for an example of how evaluate MAB algorithms using a simulation + + +class MAB(ABC): + @abstractmethod + def __init__( + self, + max_steps: int, + expected_rewards: Tensor, + arm_ids: Optional[List[str]] = None, + ): + self.max_steps = max_steps + self.expected_rewards = expected_rewards + self.best_action_value = expected_rewards.max().item() + self.best_action_id = torch.argmax(expected_rewards).item() + if arm_ids is None: + self.arm_ids = list(map(str, range(len(expected_rewards)))) + else: + self.arm_ids = arm_ids + self.t = 0 + + @abstractmethod + def act(self, arm_id: str) -> float: + pass + + @property + def n_arms(self) -> int: + return len(self.expected_rewards) + + +class BernoilliMAB(MAB): + """ + A class that simulates a bandit + + Args: + probs: A tensor of per-arm success probabilities + max_steps: Max number os steps to simulate. This has to be specified because we pre-generate + all the rewards at initialization (for speedup - generating random matrix once should be + faster than generating random scalars in a loop) + """ + + def __init__( + self, + max_steps: int, + probs: torch.Tensor, + arm_ids: Optional[List[str]] = None, + ) -> None: + """ """ + assert probs.max() <= 1.0 + assert probs.min() >= 0.0 + super().__init__(max_steps=max_steps, expected_rewards=probs, arm_ids=arm_ids) + self.rewards = torch.bernoulli( + probs.repeat(max_steps, 1) + ) # pre-generate all rewards ahead of time + assert self.rewards.shape == (max_steps, len(probs)) + + self.best_action_value = probs.max().item() + + def act(self, arm_id: str) -> float: + """ + Sample a reward from a specific arm + + Args: + arm_idx: Index of arm from which reward is sampled + Returns: + Sampled reward + """ + arm_idx = self.arm_ids.index(arm_id) + assert arm_idx <= (len(self.expected_rewards) - 1) + assert self.t < self.max_steps + val = self.rewards[self.t, arm_idx].item() + self.t += 1 + return val + + +def single_evaluation_bandit_algo( + bandit: MAB, + algo: MABAlgo, + *, + update_every: int = 1, + freeze_scores_btw_updates: bool = True, +) -> np.ndarray: + """ + Evaluate a bandit algorithm on a single bandit instance. + Pseudo-regret (difference between expected values of best and chosen actions) is used to minimize variance of evaluation + + Args: + bandit: Bandit instance on which we evaluate + algo: Bandit algorithm to be evaluated + update_every: How many steps between the model is updated. 1 is online learning, >1 is iterative batch learning. + freeze_scores_btw_updates: If True, the scores are frozen between model updates, otherwise at each step we generate + new scores even if the model wasn't updated. `False` doesn't make sense for UCB models since the scores are deterministic + and wouldn't change until the model is updated. Use `False` only for models with non-deterministic scores, like Thompson sampling. + Returns: + An array of cumulative pseudo regret + """ + rewards = [] + expected_rewards = [] + # iterate through model updates + remaining_steps = bandit.max_steps + for _ in range(0, bandit.max_steps, update_every): + batch_n_obs_per_arm = torch.zeros(bandit.n_arms) + batch_sum_reward_per_arm = torch.zeros(bandit.n_arms) + batch_sum_squared_reward_per_arm = torch.zeros(bandit.n_arms) + steps_before_update = min( + remaining_steps, update_every + ) # take this many steps until next model update + arm_id = ( + algo.get_action() + ) # this action will be reused until next model update if freeze_scores_btw_updates + for i in range(steps_before_update): + # iterate through steps without updating the model + if (not freeze_scores_btw_updates) and (i > 0): + # if scores are not frozen, we choose new action at each step + # (except first, because we've already chosen the first action above) + arm_id = algo.get_action() + arm_idx = algo.arm_ids.index(arm_id) + reward = bandit.act(arm_id) + rewards.append(reward) + expected_rewards.append(bandit.expected_rewards[arm_idx].item()) + batch_n_obs_per_arm[arm_idx] += 1 + batch_sum_reward_per_arm[arm_idx] += reward + batch_sum_squared_reward_per_arm[arm_idx] += reward**2 + assert sum(batch_n_obs_per_arm) == steps_before_update + # perform batch update + algo.add_batch_observations( + batch_n_obs_per_arm, + batch_sum_reward_per_arm, + batch_sum_squared_reward_per_arm, + ) + remaining_steps -= steps_before_update + assert remaining_steps == 0 + assert len(rewards) == bandit.max_steps + per_step_pseudo_regret = bandit.best_action_value - np.array(expected_rewards) + return np.cumsum(per_step_pseudo_regret) + + +def multiple_evaluations_bandit_algo( + algo_cls: Type[MABAlgo], + bandit_cls: Type[MAB], + n_bandits: int, + max_steps: int, + update_every: int = 1, + freeze_scores_btw_updates: bool = True, + num_processes: Optional[int] = None, + algo_kwargs: Optional[Dict] = None, + bandit_kwargs: Optional[Dict] = None, +) -> np.ndarray: + """ + Perform evaluations on multiple bandit instances and aggregate (average) the result + + Args: + algo_cls: MAB algorithm class to be evaluated + bandit_cls: Bandit class on which we perform evaluations + n_bandits: Number of bandit instances among which the results are averaged + max_steps: Number of time steps to simulate + update_every: How many steps between the model is updated. 1 is online learning, >1 is iterative batch learning. + freeze_scores_btw_updates: If True, the scores are frozen between model updates, otherwise at each step we generate + new scores even if the model wasn't updated. `False` doesn't make sense for UCB models since the scores are deterministic + and wouldn't change until the model is updated. Use `False` only for models with non-deterministic scores, like Thompson sampling. + algo_kwargs: A dict of kwargs to pass to algo_cls at initialization + bandit_kwargs: A dict of kwargs to pass to bandit_cls at initialization + Returns: + An array of cumulative pseudo regret (average across multiple bandit instances) + """ + if algo_kwargs is None: + algo_kwargs = {} + if bandit_kwargs is None: + bandit_kwargs = {} + pseudo_regrets = [] + arguments = ( + ( + bandit_cls(max_steps=max_steps, **bandit_kwargs), # pyre-ignore[45] + algo_cls(**algo_kwargs), # pyre-ignore[45] + ) + for _ in range(n_bandits) + ) + with Pool(num_processes) as pool: + pseudo_regrets = pool.starmap( + partial( + single_evaluation_bandit_algo, + update_every=update_every, + freeze_scores_btw_updates=freeze_scores_btw_updates, + ), + arguments, + ) + return np.stack(pseudo_regrets).mean(0) + + +def compare_bandit_algos( + algo_clss: List[Type[MABAlgo]], + bandit_cls: Type[MAB], + n_bandits: int, + max_steps: int, + update_every: int = 1, + freeze_scores_btw_updates: bool = True, + algo_kwargs: Optional[Union[Dict, List[Dict]]] = None, + bandit_kwargs: Optional[Dict] = None, +) -> Tuple[List[str], List[np.ndarray]]: + """ + Args: + algo_clss: A list of MAB algorithm classes to be evaluated + bandit_cls: Bandit class on which we perform evaluations + n_bandits: Number of bandit instances among which the results are averaged + max_steps: Number of time steps to simulate + update_every: How many steps between the model is updated. 1 is online learning, >1 is iterative batch learning. + freeze_scores_btw_updates: If True, the scores are frozen between model updates, otherwise at each step we generate + new scores even if the model wasn't updated. `False` doesn't make sense for UCB models since the scores are deterministic + and wouldn't change until the model is updated. Use `False` only for models with non-deterministic scores, like Thompson sampling. + algo_kwargs: A dict (or list of dicts, one per algorightm class) of kwargs to pass to algo_cls at initialization + bandit_kwargs: A dict of kwargs to pass to bandit_cls at initialization + Returns: + A list of algorithm names that were evaluated (based on class names) + A list of cumulative regret trajectories (one per evaluated algorithm) + """ + if algo_kwargs is None: + algo_kwargs = {} + if bandit_kwargs is None: + bandit_kwargs = {} + if isinstance(algo_kwargs, Dict): + algo_kwargs = [algo_kwargs] * len(algo_clss) + names = [] + pseudo_regrets = [] + for algo_cls, algo_kwargs_this_algo in zip(algo_clss, algo_kwargs): + names.append(algo_cls.__name__) + pseudo_regrets.append( + multiple_evaluations_bandit_algo( + algo_cls=algo_cls, + bandit_cls=bandit_cls, + n_bandits=n_bandits, + max_steps=max_steps, + update_every=update_every, + freeze_scores_btw_updates=freeze_scores_btw_updates, + algo_kwargs=algo_kwargs_this_algo, + bandit_kwargs=bandit_kwargs, + ) + ) + return names, pseudo_regrets diff --git a/reagent/mab/thompson_sampling.py b/reagent/mab/thompson_sampling.py new file mode 100644 index 000000000..37f00fa46 --- /dev/null +++ b/reagent/mab/thompson_sampling.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from abc import abstractmethod +from typing import List, Optional + +import torch +from reagent.mab.mab_algorithm import MABAlgo, reindex_multiple_tensors +from torch import Tensor + + +class BaseThompsonSampling(MABAlgo): + @abstractmethod + def _get_posterior_samples(self) -> Tensor: + pass + + def get_scores(self): + return self._get_posterior_samples() + + +class BernoulliBetaThompson(BaseThompsonSampling): + """ + The Thompson Sampling MAB with Bernoulli-Beta distribution for rewards. + Appropriate for MAB with Bernoulli rewards (e.g CTR) + """ + + def _get_posterior_samples(self) -> Tensor: + """ + Get samples from the posterior distributions of arm rewards + """ + return torch.distributions.beta.Beta( + 1 + self.total_sum_reward_per_arm, + 1 + self.total_n_obs_per_arm - self.total_sum_reward_per_arm, + ).sample() + + +class NormalGammaThompson(BaseThompsonSampling): + """ + The Thompson Sampling MAB with Normal-Gamma distribution for rewards. + Appropriate for MAB with normally distributed rewards. + We use posterior update equations from + https://en.wikipedia.org/wiki/Normal-gamma_distribution#Posterior_distribution_of_the_parameters + """ + + def __init__( + self, + randomize_ties: bool = True, + min_num_obs_per_arm: int = 1, + *, + n_arms: Optional[int] = None, + arm_ids: Optional[List[str]] = None, + ) -> None: + super().__init__( + randomize_ties=randomize_ties, + n_arms=n_arms, + arm_ids=arm_ids, + min_num_obs_per_arm=min_num_obs_per_arm, + ) + self.mus = torch.zeros(self.n_arms) + self.alpha_0 = 1.5 # initial value of the alpha parameter + self.lambda_0 = 1.0 # initial value of the lambda parameter + self.gamma_rates = torch.ones(self.n_arms) + + def add_single_observation(self, arm_id: str, reward: float) -> None: + super().add_single_observation(arm_id=arm_id, reward=reward) + arm_idx = self.arm_ids.index(arm_id) + lambda_ = ( + self.lambda_0 + self.total_n_obs_per_arm[arm_idx] - 1 + ) # -1 bcs counter is already incremented by super() call + self.gamma_rates[arm_idx] += ( + 0.5 * (reward - self.mus[arm_idx]) ** 2 * lambda_ / (lambda_ + 1) + ) + self.mus[arm_idx] += (reward - self.mus[arm_idx]) / (lambda_ + 1) + + def add_batch_observations( + self, + n_obs_per_arm: Tensor, + sum_reward_per_arm: Tensor, + sum_reward_squared_per_arm: Tensor, + arm_ids: Optional[List[str]] = None, + ) -> None: + ( + n_obs_per_arm, + sum_reward_per_arm, + sum_reward_squared_per_arm, + ) = reindex_multiple_tensors( + all_ids=self.arm_ids, + batch_ids=arm_ids, + value_tensors=( + n_obs_per_arm, + sum_reward_per_arm, + sum_reward_squared_per_arm, + ), + ) + + mean_rewards_batch = torch.nan_to_num( + sum_reward_per_arm / n_obs_per_arm, nan=0.0 + ) + lambdas = self.lambda_0 + self.total_n_obs_per_arm + self.gamma_rates += 0.5 * n_obs_per_arm * lambdas / ( + n_obs_per_arm + + lambdas + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + ) * (mean_rewards_batch - self.mus) ** 2 + 0.5 * ( + sum_reward_squared_per_arm + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + - n_obs_per_arm * mean_rewards_batch**2 + ) + self.mus += (sum_reward_per_arm - n_obs_per_arm * self.mus) / ( + n_obs_per_arm + lambdas + ) + super().add_batch_observations( + n_obs_per_arm=n_obs_per_arm, + sum_reward_per_arm=sum_reward_per_arm, + sum_reward_squared_per_arm=sum_reward_squared_per_arm, + arm_ids=self.arm_ids, # pass self.arm_ids instead of arm_ids because we've already reindexed all tensors + ) + + def _get_posterior_samples(self) -> Tensor: + """ + Get samples from the posterior distributions of arm rewards + """ + precisions = ( + self.lambda_0 + self.total_n_obs_per_arm + ) * torch.distributions.gamma.Gamma( + 0.5 * (self.total_n_obs_per_arm + self.alpha_0), self.gamma_rates + ).sample() + return torch.distributions.normal.Normal( + self.mus, + # pyre-fixme[58]: `/` is not supported for operand types `float` and + # `Tensor`. + 1.0 / torch.sqrt(precisions), + ).sample() diff --git a/reagent/mab/ucb.py b/reagent/mab/ucb.py new file mode 100644 index 000000000..dfe6dcce3 --- /dev/null +++ b/reagent/mab/ucb.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import math +from abc import ABC, abstractmethod +from typing import List, Optional + +import torch +from reagent.mab.mab_algorithm import MABAlgo +from torch import Tensor + + +class BaseUCB(MABAlgo, ABC): + """ + Base class for UCB-like Multi-Armed Bandits (MAB) + + Args: + estimate_variance: If True, per-arm reward variance is estimated and we multiply thconfidence interval width + by its square root + min_variance: The lower bound applied to the estimated variance. If variance is not estimated, this value is used instead of an estimate. + alpha: Scalar multiplier for confidence interval width. Values above 1.0 make exploration more aggressive, below 1.0 less aggressive + """ + + def __init__( + self, + randomize_ties: bool = True, + estimate_variance: bool = True, + min_variance: float = 0.0, + alpha: float = 1.0, + min_num_obs_per_arm: int = 1, + *, + n_arms: Optional[int] = None, + arm_ids: Optional[List[str]] = None, + ) -> None: + super().__init__( + n_arms=n_arms, + arm_ids=arm_ids, + randomize_ties=randomize_ties, + min_num_obs_per_arm=min_num_obs_per_arm, + ) + self.estimate_variance = estimate_variance + self.min_variance = torch.tensor(min_variance) + self.alpha = alpha + + @property + def var(self): + # return empirical variance of rewards for each arm + if self.estimate_variance: + return torch.fmax( + self.min_variance, + self.total_sum_reward_squared_per_arm / self.total_n_obs_per_arm + - ((self.total_sum_reward_per_arm / self.total_n_obs_per_arm) ** 2), + ) + else: + return self.min_variance + + +class UCB1(BaseUCB): + """ + Canonical implementation of UCB1 + Reference: https://www.cs.bham.ac.uk/internal/courses/robotics/lectures/ucb1.pdf + """ + + def get_scores(self) -> Tensor: + """ + Get per-arm UCB scores. The formula is + UCB_i = AVG([rewards_i]) + SQRT(2*LN(T)/N_i*VAR) + VAR=1 if estimate_variance==False, otherwise VAR=AVG([rewards_i**2]) - AVG([rewards_i])**2 + + Returns: + Tensor: An array of UCB scores (one per arm) + """ + avg_rewards = self.get_avg_reward_values() + log_t_over_ni = ( + math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm + ) + # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`. + return avg_rewards + self.alpha * torch.sqrt(2 * log_t_over_ni * self.var) + + +class MetricUCB(BaseUCB): + """ + This is an improvement over UCB1 which uses a more precise confidence radius, especially for small expected rewards. + This algorithm has been constructed for Bernoulli reward distributions. + Reference: https://arxiv.org/pdf/0809.4882.pdf + """ + + def get_scores(self) -> Tensor: + """ + Get per-arm UCB scores. The formula is + UCB_i = AVG([rewards_i]) + SQRT(AVG([rewards_i]) * LN(T+1)/N_i) + LN(T+1)/N_i + + Returns: + Tensor: An array of UCB scores (one per arm) + """ + avg_rewards = self.get_avg_reward_values() + log_t_over_ni = ( + math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm + ) + return avg_rewards + self.alpha * ( + torch.sqrt(avg_rewards * log_t_over_ni) + log_t_over_ni + ) + + +class UCBTuned(BaseUCB): + """ + Implementation of the UCB-Tuned algorithm from Section 4 of https://link.springer.com/content/pdf/10.1023/A:1013689704352.pdf + Biggest difference from basic UCB is that per-arm reward variance is estimated. + IMPORTANT: This algorithm should only be used if the rewards of each arm have Bernoulli distribution. + """ + + def get_scores(self) -> Tensor: + """ + Get per-arm UCB scores. The formula is + UCB_i = AVG([rewards_i]) + SQRT(LN(T)/N_i * min(V_i, 0.25)) + where V_i is a conservative variance estimate of arm i: + V_i = AVG([rewards_i**2]) - AVG([rewards_i])**2 + sqrt(2ln(t) / n_i) + + Returns: + Tensor: An array of UCB scores (one per arm) + """ + avg_rewards = self.get_avg_reward_values() + log_t_over_ni = ( + math.log(self.total_n_obs_all_arms + 1) / self.total_n_obs_per_arm + ) + per_arm_var_est = ( + self.total_sum_reward_squared_per_arm / self.total_n_obs_per_arm + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + - avg_rewards**2 + + torch.sqrt( + # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`. + 2 + * log_t_over_ni + ) # additional term to make the estimate conservative (unlikely to underestimate) + ) + return avg_rewards + self.alpha * torch.sqrt( + log_t_over_ni * torch.fmin(per_arm_var_est, torch.tensor(0.25)) + ) + + +def get_bernoulli_ucb_tuned_scores( + n_obs_per_arm: Tensor, num_success_per_arm: Tensor +) -> Tensor: + """ + a minimalistic function that implements UCB-Tuned for Bernoulli bandit + it's here only to benchmark execution time penalty incurred by the class-based implementation + """ + avg_rewards = num_success_per_arm / n_obs_per_arm + log_t_over_ni = torch.log(torch.sum(n_obs_per_arm)) / n_obs_per_arm + per_arm_var_est = ( + avg_rewards + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + - avg_rewards**2 + + torch.sqrt( + 2 * log_t_over_ni + ) # additional term to make the estimate conservative (unlikely to underestimate) + ) + # pyre-fixme[6]: For 2nd param expected `Tensor` but got `float`. + return avg_rewards + torch.sqrt(log_t_over_ni * torch.fmin(per_arm_var_est, 0.25)) diff --git a/reagent/model_managers/__init__.py b/reagent/model_managers/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/model_managers/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/workflow/model_managers/actor_critic/__init__.py b/reagent/model_managers/actor_critic/__init__.py similarity index 100% rename from reagent/workflow/model_managers/actor_critic/__init__.py rename to reagent/model_managers/actor_critic/__init__.py diff --git a/reagent/workflow/model_managers/actor_critic/sac.py b/reagent/model_managers/actor_critic/sac.py similarity index 54% rename from reagent/workflow/model_managers/actor_critic/sac.py rename to reagent/model_managers/actor_critic/sac.py index cfd538c16..ec1a91f2e 100644 --- a/reagent/workflow/model_managers/actor_critic/sac.py +++ b/reagent/model_managers/actor_critic/sac.py @@ -3,11 +3,12 @@ import logging -from typing import Optional +from typing import Dict, Optional import torch from reagent.core.dataclasses import dataclass, field -from reagent.models.base import ModelBase +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.model_managers.actor_critic_base import ActorCriticBase from reagent.net_builder.continuous_actor.gaussian_fully_connected import ( GaussianFullyConnected, ) @@ -20,9 +21,8 @@ from reagent.net_builder.value.fully_connected import ( FullyConnected as ValueFullyConnected, ) -from reagent.parameters import param_hash -from reagent.training import SACTrainer, SACTrainerParameters -from reagent.workflow.model_managers.actor_critic_base import ActorCriticBase +from reagent.training import ReAgentLightningModule, SACTrainer, SACTrainerParameters +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -55,72 +55,80 @@ class SAC(ActorCriticBase): ) ) use_2_q_functions: bool = True + serve_mean_policy: bool = True def __post_init_post_parse__(self): super().__post_init_post_parse__() - self._actor_network: Optional[ModelBase] = None self.rl_parameters = self.trainer_param.rl - def build_trainer(self) -> SACTrainer: + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> SACTrainer: actor_net_builder = self.actor_net_builder.value - # pyre-fixme[16]: `SAC` has no attribute `_actor_network`. - # pyre-fixme[16]: `SAC` has no attribute `_actor_network`. - self._actor_network = actor_net_builder.build_actor( - self.state_normalization_data, self.action_normalization_data + actor_network = actor_net_builder.build_actor( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) critic_net_builder = self.critic_net_builder.value - # pyre-fixme[16]: `SAC` has no attribute `_q1_network`. - # pyre-fixme[16]: `SAC` has no attribute `_q1_network`. - self._q1_network = critic_net_builder.build_q_network( - self.state_normalization_data, self.action_normalization_data + q1_network = critic_net_builder.build_q_network( + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) q2_network = ( critic_net_builder.build_q_network( - self.state_normalization_data, self.action_normalization_data + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) if self.use_2_q_functions else None ) value_network = None - if self.value_net_builder: - # pyre-fixme[16]: `Optional` has no attribute `value`. - # pyre-fixme[16]: `Optional` has no attribute `value`. - value_net_builder = self.value_net_builder.value + value_net_builder = self.value_net_builder + if value_net_builder: + value_net_builder = value_net_builder.value value_network = value_net_builder.build_value_network( - self.state_normalization_data + normalization_data_map[NormalizationKey.STATE] ) - if self.use_gpu: - self._q1_network.cuda() - if q2_network: - q2_network.cuda() - if value_network: - value_network.cuda() - self._actor_network.cuda() - - # pyre-fixme[29]: `Type[reagent.training.sac_trainer.SACTrainer]` is not a - # function. - # pyre-fixme[29]: `Type[reagent.training.sac_trainer.SACTrainer]` is not a - # function. trainer = SACTrainer( - actor_network=self._actor_network, - q1_network=self._q1_network, + actor_network=actor_network, + q1_network=q1_network, value_network=value_network, q2_network=q2_network, - use_gpu=self.use_gpu, # pyre-fixme[16]: `SACTrainerParameters` has no attribute `asdict`. # pyre-fixme[16]: `SACTrainerParameters` has no attribute `asdict`. **self.trainer_param.asdict(), ) return trainer - def build_serving_module(self) -> torch.nn.Module: - net_builder = self.actor_net_builder.value - assert self._actor_network is not None - return net_builder.build_serving_module( - self._actor_network, - self.state_normalization_data, - self.action_normalization_data, + def get_reporter(self): + return None + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, SACTrainer) + actor_serving_module = self.actor_net_builder.value.build_serving_module( + trainer_module.actor_network, + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], + serve_mean_policy=self.serve_mean_policy, ) + return actor_serving_module + + # TODO: add in critic + # assert self._q1_network is not None + # _critic_serving_module = self.critic_net_builder.value.build_serving_module( + # self._q1_network, + # self.state_normalization_data, + # self.action_normalization_data, + # ) diff --git a/reagent/workflow/model_managers/actor_critic/td3.py b/reagent/model_managers/actor_critic/td3.py similarity index 57% rename from reagent/workflow/model_managers/actor_critic/td3.py rename to reagent/model_managers/actor_critic/td3.py index 88cbf93ac..1f6e61a47 100644 --- a/reagent/workflow/model_managers/actor_critic/td3.py +++ b/reagent/model_managers/actor_critic/td3.py @@ -3,11 +3,17 @@ import logging -from typing import Optional +from typing import Dict, Optional import torch from reagent.core.dataclasses import dataclass, field -from reagent.models.base import ModelBase +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, + param_hash, +) +from reagent.model_managers.actor_critic_base import ActorCriticBase from reagent.net_builder.continuous_actor.fully_connected import ( FullyConnected as ContinuousFullyConnected, ) @@ -18,9 +24,9 @@ ContinuousActorNetBuilder__Union, ParametricDQNNetBuilder__Union, ) -from reagent.parameters import EvaluationParameters, param_hash -from reagent.training import TD3Trainer, TD3TrainerParameters -from reagent.workflow.model_managers.actor_critic_base import ActorCriticBase +from reagent.reporting.td3_reporter import TD3Reporter +from reagent.training import ReAgentLightningModule, TD3Trainer, TD3TrainerParameters +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -45,62 +51,64 @@ class TD3(ActorCriticBase): FullyConnected=ParametricFullyConnected() ) ) + # Why isn't this a parameter in the .yaml config file? use_2_q_functions: bool = True eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) def __post_init_post_parse__(self): super().__post_init_post_parse__() - self._actor_network: Optional[ModelBase] = None self.rl_parameters = self.trainer_param.rl - def build_trainer(self) -> TD3Trainer: + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> TD3Trainer: actor_net_builder = self.actor_net_builder.value - # pyre-fixme[16]: `TD3` has no attribute `_actor_network`. - # pyre-fixme[16]: `TD3` has no attribute `_actor_network`. - self._actor_network = actor_net_builder.build_actor( - self.state_normalization_data, self.action_normalization_data + actor_network = actor_net_builder.build_actor( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) critic_net_builder = self.critic_net_builder.value - # pyre-fixme[16]: `TD3` has no attribute `_q1_network`. - # pyre-fixme[16]: `TD3` has no attribute `_q1_network`. - self._q1_network = critic_net_builder.build_q_network( - self.state_normalization_data, self.action_normalization_data + q1_network = critic_net_builder.build_q_network( + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) q2_network = ( critic_net_builder.build_q_network( - self.state_normalization_data, self.action_normalization_data + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) if self.use_2_q_functions else None ) - if self.use_gpu: - self._q1_network.cuda() - if q2_network: - q2_network.cuda() - self._actor_network.cuda() - - # pyre-fixme[29]: `Type[reagent.training.td3_trainer.TD3Trainer]` is not a - # function. - # pyre-fixme[29]: `Type[reagent.training.td3_trainer.TD3Trainer]` is not a - # function. trainer = TD3Trainer( - actor_network=self._actor_network, - q1_network=self._q1_network, + actor_network=actor_network, + q1_network=q1_network, q2_network=q2_network, - use_gpu=self.use_gpu, # pyre-fixme[16]: `TD3TrainerParameters` has no attribute `asdict`. # pyre-fixme[16]: `TD3TrainerParameters` has no attribute `asdict`. **self.trainer_param.asdict(), ) return trainer - def build_serving_module(self) -> torch.nn.Module: + def get_reporter(self): + return TD3Reporter() + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, TD3Trainer) net_builder = self.actor_net_builder.value - assert self._actor_network is not None return net_builder.build_serving_module( - self._actor_network, - self.state_normalization_data, - self.action_normalization_data, + trainer_module.actor_network, + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], ) diff --git a/reagent/model_managers/actor_critic_base.py b/reagent/model_managers/actor_critic_base.py new file mode 100644 index 000000000..79fde10c1 --- /dev/null +++ b/reagent/model_managers/actor_critic_base.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from dataclasses import replace +from typing import Dict, List, Optional, Tuple + +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, +) +from reagent.data import DataFetcher, ManualDataModule, ReAgentDataModule +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.model_managers.model_manager import ModelManager +from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider +from reagent.preprocessing.batch_preprocessor import ( + BatchPreprocessor, + PolicyNetworkBatchPreprocessor, + Preprocessor, +) +from reagent.preprocessing.normalization import get_feature_config +from reagent.preprocessing.types import InputColumn +from reagent.reporting.actor_critic_reporter import ActorCriticReporter +from reagent.training import ReAgentLightningModule +from reagent.workflow.identify_types_flow import identify_normalization_parameters +from reagent.workflow.types import ( + Dataset, + ModelFeatureConfigProvider__Union, + PreprocessingOptions, + ReaderOptions, + ResourceOptions, + RewardOptions, + RLTrainingOutput, + TableSpec, +) + + +logger = logging.getLogger(__name__) + + +class ActorPolicyWrapper(Policy): + """Actor's forward function is our act""" + + def __init__(self, actor_network): + self.actor_network = actor_network + + @torch.no_grad() + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: + self.actor_network.eval() + output = self.actor_network(obs) + self.actor_network.train() + return output.detach().cpu() + + +@dataclass +class ActorCriticBase(ModelManager): + state_preprocessing_options: Optional[PreprocessingOptions] = None + action_preprocessing_options: Optional[PreprocessingOptions] = None + action_feature_override: Optional[str] = None + state_feature_config_provider: ModelFeatureConfigProvider__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `raw`. + default_factory=lambda: ModelFeatureConfigProvider__Union( + raw=RawModelFeatureConfigProvider(float_feature_infos=[]) + ) + ) + action_float_features: List[Tuple[int, str]] = field(default_factory=list) + reader_options: Optional[ReaderOptions] = None + eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) + save_critic_bool: bool = True + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert ( + self.state_preprocessing_options is None + or self.state_preprocessing_options.allowedlist_features is None + ), ( + "Please set state allowlist features in state_float_features field of " + "config instead" + ) + assert ( + self.action_preprocessing_options is None + or self.action_preprocessing_options.allowedlist_features is None + ), ( + "Please set action allowlist features in action_float_features field of " + "config instead" + ) + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ) -> Policy: + """Create online actor critic policy.""" + + if serving: + assert normalization_data_map + return create_predictor_policy_from_model( + self.build_serving_module(trainer_module, normalization_data_map) + ) + else: + return ActorPolicyWrapper(trainer_module.actor_network) + + @property + def state_feature_config(self) -> rlt.ModelFeatureConfig: + return self.state_feature_config_provider.value.get_model_feature_config() + + @property + def action_feature_config(self) -> rlt.ModelFeatureConfig: + assert len(self.action_float_features) > 0, "You must set action_float_features" + return get_feature_config(self.action_float_features) + + def get_state_preprocessing_options(self) -> PreprocessingOptions: + state_preprocessing_options = ( + self.state_preprocessing_options or PreprocessingOptions() + ) + state_features = [ + ffi.feature_id for ffi in self.state_feature_config.float_feature_infos + ] + logger.info(f"state allowedlist_features: {state_features}") + state_preprocessing_options = replace( + state_preprocessing_options, allowedlist_features=state_features + ) + return state_preprocessing_options + + def get_action_preprocessing_options(self) -> PreprocessingOptions: + action_preprocessing_options = ( + self.action_preprocessing_options or PreprocessingOptions() + ) + action_features = [ + ffi.feature_id for ffi in self.action_feature_config.float_feature_infos + ] + logger.info(f"action allowedlist_features: {action_features}") + + # pyre-fixme + actor_net_builder = self.actor_net_builder.value + action_feature_override = actor_net_builder.default_action_preprocessing + logger.info(f"Default action_feature_override is {action_feature_override}") + if self.action_feature_override is not None: + action_feature_override = self.action_feature_override + + assert action_preprocessing_options.feature_overrides is None + action_preprocessing_options = replace( + action_preprocessing_options, + allowedlist_features=action_features, + feature_overrides={fid: action_feature_override for fid in action_features}, + ) + return action_preprocessing_options + + def get_data_module( + self, + *, + input_table_spec: Optional[TableSpec] = None, + reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + resource_options: Optional[ResourceOptions] = None, + ) -> Optional[ReAgentDataModule]: + return ActorCriticDataModule( + input_table_spec=input_table_spec, + reward_options=reward_options, + setup_data=setup_data, + saved_setup_data=saved_setup_data, + reader_options=reader_options, + resource_options=resource_options, + model_manager=self, + ) + + def get_reporter(self): + return ActorCriticReporter() + + +class ActorCriticDataModule(ManualDataModule): + def run_feature_identification( + self, input_table_spec: TableSpec + ) -> Dict[str, NormalizationData]: + """ + Derive preprocessing parameters from data. + """ + # Run state feature identification + state_normalization_parameters = identify_normalization_parameters( + input_table_spec, + InputColumn.STATE_FEATURES, + self.model_manager.get_state_preprocessing_options(), + ) + + # Run action feature identification + action_normalization_parameters = identify_normalization_parameters( + input_table_spec, + InputColumn.ACTION, + self.model_manager.get_action_preprocessing_options(), + ) + + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=state_normalization_parameters + ), + NormalizationKey.ACTION: NormalizationData( + dense_normalization_parameters=action_normalization_parameters + ), + } + + @property + def should_generate_eval_dataset(self) -> bool: + return self.model_manager.eval_parameters.calc_cpe_in_training + + def query_data( + self, + input_table_spec: TableSpec, + sample_range: Optional[Tuple[float, float]], + reward_options: RewardOptions, + data_fetcher: DataFetcher, + ) -> Dataset: + return data_fetcher.query_data( + input_table_spec=input_table_spec, + discrete_action=False, + include_possible_actions=False, + custom_reward_expression=reward_options.custom_reward_expression, + sample_range=sample_range, + ) + + def build_batch_preprocessor(self) -> BatchPreprocessor: + state_preprocessor = Preprocessor( + self.state_normalization_data.dense_normalization_parameters, + ) + action_preprocessor = Preprocessor( + self.action_normalization_data.dense_normalization_parameters, + ) + return PolicyNetworkBatchPreprocessor( + state_preprocessor=state_preprocessor, + action_preprocessor=action_preprocessor, + ) diff --git a/reagent/workflow/model_managers/discrete/__init__.py b/reagent/model_managers/discrete/__init__.py similarity index 65% rename from reagent/workflow/model_managers/discrete/__init__.py rename to reagent/model_managers/discrete/__init__.py index b4008a02b..5bc06f3a3 100644 --- a/reagent/workflow/model_managers/discrete/__init__.py +++ b/reagent/model_managers/discrete/__init__.py @@ -2,8 +2,8 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from .discrete_c51dqn import DiscreteC51DQN +from .discrete_crr import DiscreteCRR from .discrete_dqn import DiscreteDQN from .discrete_qrdqn import DiscreteQRDQN - -__all__ = ["DiscreteC51DQN", "DiscreteDQN", "DiscreteQRDQN"] +__all__ = ["DiscreteC51DQN", "DiscreteDQN", "DiscreteQRDQN", "DiscreteCRR"] diff --git a/reagent/workflow/model_managers/discrete/discrete_c51dqn.py b/reagent/model_managers/discrete/discrete_c51dqn.py similarity index 69% rename from reagent/workflow/model_managers/discrete/discrete_c51dqn.py rename to reagent/model_managers/discrete/discrete_c51dqn.py index b024a399b..487defc64 100644 --- a/reagent/workflow/model_managers/discrete/discrete_c51dqn.py +++ b/reagent/model_managers/discrete/discrete_c51dqn.py @@ -1,15 +1,16 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from typing import Dict, Optional import torch from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase from reagent.net_builder.categorical_dqn.categorical import Categorical from reagent.net_builder.unions import CategoricalDQNNetBuilder__Union -from reagent.parameters import param_hash -from reagent.training import C51Trainer, C51TrainerParameters -from reagent.training.loss_reporter import NoOpLossReporter -from reagent.workflow.model_managers.discrete_dqn_base import DiscreteDQNBase +from reagent.training import C51Trainer, C51TrainerParameters, ReAgentLightningModule +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -37,17 +38,28 @@ class DiscreteC51DQN(DiscreteDQNBase): def __post_init_post_parse__(self): super().__post_init_post_parse__() - self.rl_parameters = self.trainer_param.rl - self.action_names = self.trainer_param.actions assert len(self.action_names) > 1, "DiscreteC51DQN needs at least 2 actions" assert ( self.trainer_param.minibatch_size % 8 == 0 ), "The minibatch size must be divisible by 8 for performance reasons." - def build_trainer(self) -> C51Trainer: + @property + def action_names(self): + return self.trainer_param.actions + + @property + def rl_parameters(self): + return self.trainer_param.rl + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> C51Trainer: net_builder = self.net_builder.value q_network = net_builder.build_q_network( - state_normalization_data=self.state_normalization_data, + state_normalization_data=normalization_data_map[NormalizationKey.STATE], output_dim=len(self.action_names), # pyre-fixme[16]: `C51TrainerParameters` has no attribute `num_atoms`. # pyre-fixme[16]: `C51TrainerParameters` has no attribute `num_atoms`. @@ -60,39 +72,29 @@ def build_trainer(self) -> C51Trainer: qmax=self.trainer_param.qmax, ) - if self.use_gpu: - q_network = q_network.cuda() - q_network_target = q_network.get_target_network() - # pyre-fixme[16]: `DiscreteC51DQN` has no attribute `_q_network`. - # pyre-fixme[16]: `DiscreteC51DQN` has no attribute `_q_network`. - self._q_network = q_network - - # pyre-fixme[29]: `Type[reagent.training.c51_trainer.C51Trainer]` is not a - # function. - # pyre-fixme[29]: `Type[reagent.training.c51_trainer.C51Trainer]` is not a - # function. return C51Trainer( q_network=q_network, q_network_target=q_network_target, - metrics_to_score=self.metrics_to_score, - loss_reporter=NoOpLossReporter(), - use_gpu=self.use_gpu, # pyre-fixme[16]: `C51TrainerParameters` has no attribute `asdict`. # pyre-fixme[16]: `C51TrainerParameters` has no attribute `asdict`. **self.trainer_param.asdict(), ) - def build_serving_module(self) -> torch.nn.Module: + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: """ Returns a TorchScript predictor module """ - assert self._q_network is not None, "_q_network was not initialized" + assert isinstance(trainer_module, C51Trainer) net_builder = self.net_builder.value return net_builder.build_serving_module( - self._q_network, - self.state_normalization_data, + trainer_module.q_network, + normalization_data_map[NormalizationKey.STATE], action_names=self.action_names, state_feature_config=self.state_feature_config, ) diff --git a/reagent/model_managers/discrete/discrete_crr.py b/reagent/model_managers/discrete/discrete_crr.py new file mode 100644 index 000000000..1ce5c1a96 --- /dev/null +++ b/reagent/model_managers/discrete/discrete_crr.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +# Note: this file is modeled after td3.py + +import logging +from typing import Dict, Optional + +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, + param_hash, +) +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase +from reagent.models.base import ModelBase +from reagent.net_builder.discrete_actor.fully_connected import ( + FullyConnected as DiscreteFullyConnected, +) +from reagent.net_builder.discrete_dqn.dueling import Dueling +from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected +from reagent.net_builder.unions import ( + DiscreteActorNetBuilder__Union, + DiscreteDQNNetBuilder__Union, +) +from reagent.prediction.cfeval.predictor_wrapper import BanditRewardNetPredictorWrapper +from reagent.reporting.discrete_crr_reporter import DiscreteCRRReporter +from reagent.training import ( + CRRTrainerParameters, + DiscreteCRRTrainer, + ReAgentLightningModule, +) +from reagent.workflow.types import RewardOptions + +logger = logging.getLogger(__name__) + + +class ActorPolicyWrapper(Policy): + """Actor's forward function is our act""" + + def __init__(self, actor_network): + self.actor_network = actor_network + + @torch.no_grad() + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: + self.actor_network.eval() + output = self.actor_network(obs) + self.actor_network.train() + return output.detach().cpu() + + +@dataclass +class DiscreteCRR(DiscreteDQNBase): + __hash__ = param_hash + + trainer_param: CRRTrainerParameters = field(default_factory=CRRTrainerParameters) + + actor_net_builder: DiscreteActorNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: DiscreteActorNetBuilder__Union( + FullyConnected=DiscreteFullyConnected() + ) + ) + + critic_net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling()) + ) + + cpe_net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: DiscreteDQNNetBuilder__Union( + FullyConnected=FullyConnected() + ) + ) + + eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert ( + len(self.action_names) > 1 + ), f"DiscreteCRRModel needs at least 2 actions. Got {self.action_names}." + + @property + def action_names(self): + return self.trainer_param.actions + + @property + def rl_parameters(self): + return self.trainer_param.rl + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> DiscreteCRRTrainer: + actor_net_builder = self.actor_net_builder.value + actor_network = actor_net_builder.build_actor( + normalization_data_map[NormalizationKey.STATE], len(self.action_names) + ) + actor_network_target = actor_network.get_target_network() + + # The arguments to q_network1 and q_network2 below are modeled after those in discrete_dqn.py + critic_net_builder = self.critic_net_builder.value + + q1_network = critic_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + len(self.action_names), + ) + q1_network_target = q1_network.get_target_network() + + q2_network = q2_network_target = None + # pyre-fixme[16]: `CRRTrainerParameters` has no attribute + # `double_q_learning`. + if self.trainer_param.double_q_learning: + q2_network = critic_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + len(self.action_names), + ) + q2_network_target = q2_network.get_target_network() + + reward_options = reward_options or RewardOptions() + metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values) + + reward_network, q_network_cpe, q_network_cpe_target = None, None, None + if self.eval_parameters.calc_cpe_in_training: + # Metrics + reward + num_output_nodes = (len(metrics_to_score) + 1) * len( + # pyre-fixme[16]: `CRRTrainerParameters` has no attribute `actions`. + self.trainer_param.actions + ) + + cpe_net_builder = self.cpe_net_builder.value + reward_network = cpe_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + num_output_nodes, + ) + q_network_cpe = cpe_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + num_output_nodes, + ) + + q_network_cpe_target = q_network_cpe.get_target_network() + + trainer = DiscreteCRRTrainer( + actor_network=actor_network, + actor_network_target=actor_network_target, + q1_network=q1_network, + q1_network_target=q1_network_target, + reward_network=reward_network, + q2_network=q2_network, + q2_network_target=q2_network_target, + q_network_cpe=q_network_cpe, + q_network_cpe_target=q_network_cpe_target, + metrics_to_score=metrics_to_score, + evaluation=self.eval_parameters, + # pyre-fixme[16]: `CRRTrainerParameters` has no attribute `asdict`. + **self.trainer_param.asdict(), + ) + return trainer + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ) -> Policy: + """Create online actor critic policy.""" + assert isinstance(trainer_module, DiscreteCRRTrainer) + if serving: + assert normalization_data_map + return create_predictor_policy_from_model( + self.build_actor_module(trainer_module, normalization_data_map) + ) + else: + return ActorPolicyWrapper(trainer_module.actor_network) + + def get_reporter(self): + return DiscreteCRRReporter( + self.trainer_param.actions, + target_action_distribution=self.target_action_distribution, + ) + + # Note: when using test_gym.py as the entry point, the normalization data + # is set when the line normalization = build_normalizer(env) is executed. + # The code then calls build_state_normalizer() and build_action_normalizer() + # in utils.py + + def serving_module_names(self): + module_names = ["default_model", "dqn", "actor_dqn"] + if len(self.action_names) == 2: + module_names.append("binary_difference_scorer") + if self.eval_parameters.calc_cpe_in_training: + module_names.append("reward_model") + return module_names + + def build_serving_modules( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ): + """ + `actor_dqn` is the actor module wrapped in the DQN predictor wrapper. + This helps putting the actor in places where DQN predictor wrapper is expected. + If the policy is greedy, then this wrapper would work. + """ + assert isinstance(trainer_module, DiscreteCRRTrainer) + serving_modules = { + "default_model": self.build_actor_module( + trainer_module, normalization_data_map + ), + "dqn": self._build_dqn_module( + trainer_module.q1_network, normalization_data_map + ), + "actor_dqn": self._build_dqn_module( + ActorDQN(trainer_module.actor_network), normalization_data_map + ), + } + if len(self.action_names) == 2: + serving_modules.update( + { + "binary_difference_scorer": self._build_binary_difference_scorer( + ActorDQN(trainer_module.actor_network), normalization_data_map + ), + } + ) + if self.eval_parameters.calc_cpe_in_training: + serving_modules.update( + { + "reward_model": self.build_reward_module( + trainer_module, normalization_data_map + ) + } + ) + return serving_modules + + def _build_dqn_module( + self, + network, + normalization_data_map: Dict[str, NormalizationData], + ): + critic_net_builder = self.critic_net_builder.value + assert network is not None + return critic_net_builder.build_serving_module( + network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + + def _build_binary_difference_scorer( + self, + network, + normalization_data_map: Dict[str, NormalizationData], + ): + critic_net_builder = self.critic_net_builder.value + assert network is not None + return critic_net_builder.build_binary_difference_scorer( + network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + + # Also, even though the build_serving_module below is directed to + # discrete_actor_net_builder.py, which returns ActorPredictorWrapper, + # just like in the continuous_actor_net_builder.py, the outputs of the + # discrete actor will still be computed differently from those of the + # continuous actor because during serving, the act() function for the + # Agent class in gym/agents/agents.py returns + # self.action_extractor(actor_output), which is created in + # create_for_env_with_serving_policy, when + # env.get_serving_action_extractor() is called. During serving, + # action_extractor calls serving_action_extractor() in env_wrapper.py, + # which checks the type of action_space during serving time and treats + # spaces.Discrete differently from spaces.Box (continuous). + def build_actor_module( + self, + trainer_module: DiscreteCRRTrainer, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + net_builder = self.actor_net_builder.value + return net_builder.build_serving_module( + trainer_module.actor_network, + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + action_feature_ids=list(range(len(self.action_names))), + ) + + def build_reward_module( + self, + trainer_module: DiscreteCRRTrainer, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert trainer_module.reward_network is not None + net_builder = self.cpe_net_builder.value + return net_builder.build_serving_module( + trainer_module.reward_network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + predictor_wrapper_type=BanditRewardNetPredictorWrapper, + ) + + +class ActorDQN(ModelBase): + def __init__(self, actor): + super().__init__() + self.actor = actor + + def input_prototype(self): + return self.actor.input_prototype() + + def forward(self, state): + return self.actor(state).action diff --git a/reagent/model_managers/discrete/discrete_dqn.py b/reagent/model_managers/discrete/discrete_dqn.py new file mode 100644 index 000000000..ecc551b91 --- /dev/null +++ b/reagent/model_managers/discrete/discrete_dqn.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import Dict, Optional + +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase +from reagent.net_builder.discrete_dqn.dueling import Dueling +from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected +from reagent.net_builder.unions import DiscreteDQNNetBuilder__Union +from reagent.prediction.cfeval.predictor_wrapper import BanditRewardNetPredictorWrapper +from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter +from reagent.training import DQNTrainer, DQNTrainerParameters, ReAgentLightningModule +from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning +from reagent.workflow.types import RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class DiscreteDQN(DiscreteDQNBase): + __hash__ = param_hash + + trainer_param: DQNTrainerParameters = field(default_factory=DQNTrainerParameters) + net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `Dueling`. + default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling()) + ) + cpe_net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: DiscreteDQNNetBuilder__Union( + FullyConnected=FullyConnected() + ) + ) + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert ( + len(self.action_names) > 1 + ), f"DiscreteDQNModel needs at least 2 actions. Got {self.action_names}." + if self.trainer_param.minibatch_size % 8 != 0: + logger.warn( + f"minibatch size ({self.trainer_param.minibatch_size}) " + "should be divisible by 8 for performance reasons!" + ) + + @property + def action_names(self): + return self.trainer_param.actions + + @property + def rl_parameters(self): + return self.trainer_param.rl + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> DQNTrainer: + net_builder = self.net_builder.value + q_network = net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + len(self.action_names), + ) + + q_network_target = q_network.get_target_network() + + reward_options = reward_options or RewardOptions() + metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values) + + reward_network, q_network_cpe, q_network_cpe_target = None, None, None + if self.eval_parameters.calc_cpe_in_training: + # Metrics + reward + num_output_nodes = (len(metrics_to_score) + 1) * len( + # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `actions`. + self.trainer_param.actions + ) + + cpe_net_builder = self.cpe_net_builder.value + reward_network = cpe_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + num_output_nodes, + ) + q_network_cpe = cpe_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + num_output_nodes, + ) + + q_network_cpe_target = q_network_cpe.get_target_network() + + trainer = DQNTrainer( + q_network=q_network, + q_network_target=q_network_target, + reward_network=reward_network, + q_network_cpe=q_network_cpe, + q_network_cpe_target=q_network_cpe_target, + metrics_to_score=metrics_to_score, + evaluation=self.eval_parameters, + # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `asdict`. + **self.trainer_param.asdict(), + ) + return trainer + + def get_reporter(self): + return DiscreteDQNReporter( + self.trainer_param.actions, + target_action_distribution=self.target_action_distribution, + ) + + def serving_module_names(self): + module_names = ["default_model"] + if len(self.action_names) == 2: + module_names.append("binary_difference_scorer") + if self.eval_parameters.calc_cpe_in_training: + module_names.append("reward_model") + return module_names + + def build_serving_modules( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ): + assert isinstance(trainer_module, DQNTrainer) + serving_modules = { + "default_model": self.build_serving_module( + trainer_module, normalization_data_map + ) + } + if len(self.action_names) == 2: + serving_modules.update( + { + "binary_difference_scorer": self._build_binary_difference_scorer( + trainer_module.q_network, normalization_data_map + ) + } + ) + if self.eval_parameters.calc_cpe_in_training: + serving_modules.update( + { + "reward_model": self.build_reward_module( + trainer_module, normalization_data_map + ) + } + ) + return serving_modules + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + """ + Returns a TorchScript predictor module + """ + assert isinstance(trainer_module, DQNTrainer) + + net_builder = self.net_builder.value + return net_builder.build_serving_module( + trainer_module.q_network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + + def _build_binary_difference_scorer( + self, + network, + normalization_data_map: Dict[str, NormalizationData], + ): + assert network is not None + net_builder = self.net_builder.value + return net_builder.build_binary_difference_scorer( + network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + + def build_reward_module( + self, + trainer_module: DQNTrainerBaseLightning, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert trainer_module.reward_network is not None + net_builder = self.cpe_net_builder.value + return net_builder.build_serving_module( + trainer_module.reward_network, + normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + predictor_wrapper_type=BanditRewardNetPredictorWrapper, + ) diff --git a/reagent/workflow/model_managers/discrete/discrete_qrdqn.py b/reagent/model_managers/discrete/discrete_qrdqn.py similarity index 51% rename from reagent/workflow/model_managers/discrete/discrete_qrdqn.py rename to reagent/model_managers/discrete/discrete_qrdqn.py index cb784d561..c4073f205 100644 --- a/reagent/workflow/model_managers/discrete/discrete_qrdqn.py +++ b/reagent/model_managers/discrete/discrete_qrdqn.py @@ -1,36 +1,25 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from typing import Dict, Optional import torch from reagent.core.dataclasses import dataclass, field -from reagent.gym.policies.policy import Policy -from reagent.gym.policies.samplers.discrete_sampler import ( - GreedyActionSampler, - SoftmaxActionSampler, -) -from reagent.gym.policies.scorers.discrete_scorer import ( - discrete_dqn_serving_scorer, - discrete_qrdqn_scorer, -) +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.model_managers.discrete_dqn_base import DiscreteDQNBase from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected from reagent.net_builder.quantile_dqn.dueling_quantile import DuelingQuantile from reagent.net_builder.unions import ( DiscreteDQNNetBuilder__Union, QRDQNNetBuilder__Union, ) -from reagent.parameters import param_hash -from reagent.training import QRDQNTrainer, QRDQNTrainerParameters -from reagent.training.loss_reporter import NoOpLossReporter -from reagent.workflow.model_managers.discrete_dqn_base import DiscreteDQNBase - - -try: - from reagent.fb.prediction.fb_predictor_wrapper import ( - FbDiscreteDqnPredictorUnwrapper as DiscreteDqnPredictorUnwrapper, - ) -except ImportError: - from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorUnwrapper +from reagent.training import ( + QRDQNTrainer, + QRDQNTrainerParameters, + ReAgentLightningModule, +) +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -58,47 +47,42 @@ class DiscreteQRDQN(DiscreteDQNBase): def __post_init_post_parse__(self): super().__post_init_post_parse__() - self.rl_parameters = self.trainer_param.rl - self.action_names = self.trainer_param.actions assert len(self.action_names) > 1, "DiscreteQRDQNModel needs at least 2 actions" assert ( self.trainer_param.minibatch_size % 8 == 0 ), "The minibatch size must be divisible by 8 for performance reasons." - def create_policy(self, serving: bool) -> Policy: - if serving: - sampler = GreedyActionSampler() - scorer = discrete_dqn_serving_scorer( - DiscreteDqnPredictorUnwrapper(self.build_serving_module()) - ) - else: - sampler = SoftmaxActionSampler(temperature=self.rl_parameters.temperature) - # pyre-fixme[16]: `RLTrainer` has no attribute `q_network`. - scorer = discrete_qrdqn_scorer(self.trainer.q_network) - return Policy(scorer=scorer, sampler=sampler) + @property + def action_names(self): + return self.trainer_param.actions - def build_trainer(self) -> QRDQNTrainer: + @property + def rl_parameters(self): + return self.trainer_param.rl + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> QRDQNTrainer: net_builder = self.net_builder.value q_network = net_builder.build_q_network( - self.state_normalization_data, + normalization_data_map[NormalizationKey.STATE], len(self.action_names), # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `num_atoms`. - # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `num_atoms`. num_atoms=self.trainer_param.num_atoms, ) - if self.use_gpu: - q_network = q_network.cuda() - q_network_target = q_network.get_target_network() + reward_options = reward_options or RewardOptions() + metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values) + reward_network, q_network_cpe, q_network_cpe_target = None, None, None - # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `evaluation`. - # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `evaluation`. - if self.trainer_param.evaluation.calc_cpe_in_training: + if self.eval_parameters.calc_cpe_in_training: # Metrics + reward - num_output_nodes = (len(self.metrics_to_score) + 1) * len( - # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`. + num_output_nodes = (len(metrics_to_score) + 1) * len( # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`. self.trainer_param.actions ) @@ -106,51 +90,43 @@ def build_trainer(self) -> QRDQNTrainer: cpe_net_builder = self.cpe_net_builder.value reward_network = cpe_net_builder.build_q_network( self.state_feature_config, - self.state_normalization_data, + normalization_data_map[NormalizationKey.STATE], num_output_nodes, ) q_network_cpe = cpe_net_builder.build_q_network( self.state_feature_config, - self.state_normalization_data, + normalization_data_map[NormalizationKey.STATE], num_output_nodes, ) - if self.use_gpu: - reward_network.cuda() - q_network_cpe.cuda() - q_network_cpe_target = q_network_cpe.get_target_network() - # pyre-fixme[16]: `DiscreteQRDQN` has no attribute `_q_network`. - self._q_network = q_network - # pyre-fixme[29]: `Type[reagent.training.qrdqn_trainer.QRDQNTrainer]` is not - # a function. - # pyre-fixme[29]: `Type[reagent.training.qrdqn_trainer.QRDQNTrainer]` is not - # a function. trainer = QRDQNTrainer( q_network=q_network, q_network_target=q_network_target, reward_network=reward_network, q_network_cpe=q_network_cpe, q_network_cpe_target=q_network_cpe_target, - metrics_to_score=self.metrics_to_score, - loss_reporter=NoOpLossReporter(), - use_gpu=self.use_gpu, - # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`. + metrics_to_score=metrics_to_score, + evaluation=self.eval_parameters, # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`. **self.trainer_param.asdict(), ) return trainer - def build_serving_module(self) -> torch.nn.Module: + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: """ Returns a TorchScript predictor module """ - assert self._q_network is not None, "_q_network was not initialized" + assert isinstance(trainer_module, QRDQNTrainer) net_builder = self.net_builder.value return net_builder.build_serving_module( - self._q_network, - self.state_normalization_data, + trainer_module.q_network, + normalization_data_map[NormalizationKey.STATE], action_names=self.action_names, state_feature_config=self.state_feature_config, ) diff --git a/reagent/model_managers/discrete_dqn_base.py b/reagent/model_managers/discrete_dqn_base.py new file mode 100644 index 000000000..9d171e6a6 --- /dev/null +++ b/reagent/model_managers/discrete_dqn_base.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import abc +import logging +from dataclasses import replace +from typing import Dict, List, Optional, Tuple + +from reagent.core import types as rlt +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, + RLParameters, +) +from reagent.data.data_fetcher import DataFetcher +from reagent.data.manual_data_module import ManualDataModule +from reagent.data.reagent_data_module import ReAgentDataModule +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.gym.policies.scorers.discrete_scorer import discrete_dqn_scorer +from reagent.model_managers.model_manager import ModelManager +from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider +from reagent.preprocessing.batch_preprocessor import ( + BatchPreprocessor, + DiscreteDqnBatchPreprocessor, +) +from reagent.preprocessing.preprocessor import Preprocessor +from reagent.preprocessing.types import InputColumn +from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter +from reagent.training import ReAgentLightningModule +from reagent.workflow.identify_types_flow import identify_normalization_parameters +from reagent.workflow.types import ( + Dataset, + ModelFeatureConfigProvider__Union, + PreprocessingOptions, + ReaderOptions, + ResourceOptions, + RewardOptions, + TableSpec, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class DiscreteDQNBase(ModelManager): + target_action_distribution: Optional[List[float]] = None + state_feature_config_provider: ModelFeatureConfigProvider__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `raw`. + default_factory=lambda: ModelFeatureConfigProvider__Union( + raw=RawModelFeatureConfigProvider(float_feature_infos=[]) + ) + ) + preprocessing_options: Optional[PreprocessingOptions] = None + reader_options: Optional[ReaderOptions] = None + eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + + @property + @abc.abstractmethod + def rl_parameters(self) -> RLParameters: + pass + + @property + @abc.abstractmethod + def action_names(self) -> List[str]: + # Returns the list of possible actions for this instance of problem + pass + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ) -> Policy: + """Create an online DiscreteDQN Policy from env.""" + if serving: + assert normalization_data_map + return create_predictor_policy_from_model( + self.build_serving_module(trainer_module, normalization_data_map), + rl_parameters=self.rl_parameters, + ) + else: + sampler = GreedyActionSampler() + # pyre-fixme[6]: Expected `ModelBase` for 1st param but got + # `Union[torch.Tensor, torch.nn.Module]`. + scorer = discrete_dqn_scorer(trainer_module.q_network) + return Policy(scorer=scorer, sampler=sampler) + + @property + def state_feature_config(self) -> rlt.ModelFeatureConfig: + return self.state_feature_config_provider.value.get_model_feature_config() + + def get_state_preprocessing_options(self) -> PreprocessingOptions: + state_preprocessing_options = ( + self.preprocessing_options or PreprocessingOptions() + ) + state_features = [ + ffi.feature_id for ffi in self.state_feature_config.float_feature_infos + ] + logger.info(f"state allowedlist_features: {state_features}") + state_preprocessing_options = replace( + state_preprocessing_options, allowedlist_features=state_features + ) + return state_preprocessing_options + + @property + def multi_steps(self) -> Optional[int]: + return self.rl_parameters.multi_steps + + def get_data_module( + self, + *, + input_table_spec: Optional[TableSpec] = None, + reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + resource_options: Optional[ResourceOptions] = None, + ) -> Optional[ReAgentDataModule]: + return DiscreteDqnDataModule( + input_table_spec=input_table_spec, + reward_options=reward_options, + setup_data=setup_data, + saved_setup_data=saved_setup_data, + reader_options=reader_options, + resource_options=resource_options, + model_manager=self, + ) + + def get_reporter(self): + return DiscreteDQNReporter( + self.trainer_param.actions, + target_action_distribution=self.target_action_distribution, + ) + + +class DiscreteDqnDataModule(ManualDataModule): + @property + def should_generate_eval_dataset(self) -> bool: + return self.model_manager.eval_parameters.calc_cpe_in_training + + def run_feature_identification( + self, input_table_spec: TableSpec + ) -> Dict[str, NormalizationData]: + preprocessing_options = ( + self.model_manager.preprocessing_options or PreprocessingOptions() + ) + state_features = [ + ffi.feature_id + for ffi in self.model_manager.state_feature_config.float_feature_infos + ] + logger.info(f"Overriding allowedlist_features: {state_features}") + preprocessing_options = replace( + preprocessing_options, allowedlist_features=state_features + ) + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=identify_normalization_parameters( + input_table_spec, InputColumn.STATE_FEATURES, preprocessing_options + ) + ) + } + + def query_data( + self, + input_table_spec: TableSpec, + sample_range: Optional[Tuple[float, float]], + reward_options: RewardOptions, + data_fetcher: DataFetcher, + ) -> Dataset: + return data_fetcher.query_data( + input_table_spec=input_table_spec, + discrete_action=True, + actions=self.model_manager.action_names, + include_possible_actions=True, + sample_range=sample_range, + custom_reward_expression=reward_options.custom_reward_expression, + multi_steps=self.model_manager.multi_steps, + gamma=self.model_manager.rl_parameters.gamma, + ) + + def build_batch_preprocessor(self) -> BatchPreprocessor: + state_preprocessor = Preprocessor( + self.state_normalization_data.dense_normalization_parameters, + ) + return DiscreteDqnBatchPreprocessor( + num_actions=len(self.model_manager.action_names), + state_preprocessor=state_preprocessor, + ) diff --git a/reagent/workflow/model_managers/model_based/__init__.py b/reagent/model_managers/model_based/__init__.py similarity index 51% rename from reagent/workflow/model_managers/model_based/__init__.py rename to reagent/model_managers/model_based/__init__.py index c2a55955f..5d08ea972 100644 --- a/reagent/workflow/model_managers/model_based/__init__.py +++ b/reagent/model_managers/model_based/__init__.py @@ -2,7 +2,9 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from .cross_entropy_method import CrossEntropyMethod +from .seq2reward_model import Seq2RewardModel +from .synthetic_reward import SyntheticReward from .world_model import WorldModel -__all__ = ["WorldModel", "CrossEntropyMethod"] +__all__ = ["WorldModel", "CrossEntropyMethod", "Seq2RewardModel", "SyntheticReward"] diff --git a/reagent/workflow/model_managers/model_based/cross_entropy_method.py b/reagent/model_managers/model_based/cross_entropy_method.py similarity index 65% rename from reagent/workflow/model_managers/model_based/cross_entropy_method.py rename to reagent/model_managers/model_based/cross_entropy_method.py index 6a64c6d17..41bdac0bb 100644 --- a/reagent/workflow/model_managers/model_based/cross_entropy_method.py +++ b/reagent/model_managers/model_based/cross_entropy_method.py @@ -1,19 +1,28 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from typing import Dict, Optional import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + CEMTrainerParameters, + NormalizationData, + NormalizationKey, + param_hash, +) from reagent.gym.policies.policy import Policy +from reagent.model_managers.model_based.world_model import WorldModel +from reagent.model_managers.world_model_base import WorldModelBase from reagent.models.cem_planner import CEMPlannerNetwork -from reagent.parameters import CEMTrainerParameters, param_hash from reagent.preprocessing.identify_types import CONTINUOUS_ACTION from reagent.preprocessing.normalization import get_num_output_features +from reagent.training import ReAgentLightningModule from reagent.training.cem_trainer import CEMTrainer -from reagent.workflow.model_managers.model_based.world_model import WorldModel -from reagent.workflow.model_managers.world_model_base import WorldModelBase +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -24,7 +33,10 @@ def __init__(self, cem_planner_network: CEMPlannerNetwork, discrete_action: bool self.cem_planner_network = cem_planner_network self.discrete_action = discrete_action - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: + # TODO: consider possible_actions_mask + def act( + self, obs: rlt.FeatureData, possible_actions_mask: Optional[torch.Tensor] = None + ) -> rlt.ActorOutput: greedy = self.cem_planner_network(obs) if self.discrete_action: _, onehot = greedy @@ -47,34 +59,43 @@ def __post_init_post_parse__(self): super().__post_init_post_parse__() # TODO: should this be in base class? - def create_policy(self, serving: bool = False) -> Policy: - return CEMPolicy(self.cem_planner_network, self.discrete_action) - - def build_trainer(self) -> CEMTrainer: + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ) -> Policy: + assert isinstance(trainer_module, CEMTrainer) + # pyre-fixme[16]: `CrossEntropyMethod` has no attribute `discrete_action`. + return CEMPolicy(trainer_module.cem_planner_network, self.discrete_action) + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> CEMTrainer: + # pyre-fixme[45]: Cannot instantiate abstract class `WorldModel`. world_model_manager: WorldModel = WorldModel( trainer_param=self.trainer_param.mdnrnn ) - world_model_manager.initialize_trainer( - self.use_gpu, - self.reward_options, - # pyre-fixme[6]: Expected `Dict[str, - # reagent.parameters.NormalizationData]` for 3rd param but got - # `Optional[typing.Dict[str, reagent.parameters.NormalizationData]]`. - # pyre-fixme[6]: Expected `Dict[str, - # reagent.parameters.NormalizationData]` for 3rd param but got - # `Optional[typing.Dict[str, reagent.parameters.NormalizationData]]`. - self._normalization_data_map, + world_model_manager.build_trainer( + use_gpu=use_gpu, + reward_options=reward_options, + normalization_data_map=normalization_data_map, ) world_model_trainers = [ - world_model_manager.build_trainer() + world_model_manager.build_trainer( + normalization_data_map, reward_options=reward_options, use_gpu=use_gpu + ) for _ in range(self.trainer_param.num_world_models) ] world_model_nets = [trainer.memory_network for trainer in world_model_trainers] terminal_effective = self.trainer_param.mdnrnn.not_terminal_loss_weight > 0 - action_normalization_parameters = ( - self.action_normalization_data.dense_normalization_parameters - ) + action_normalization_parameters = normalization_data_map[ + NormalizationKey.ACTION + ].dense_normalization_parameters sorted_action_norm_vals = list(action_normalization_parameters.values()) discrete_action = sorted_action_norm_vals[0].feature_type != CONTINUOUS_ACTION action_upper_bounds, action_lower_bounds = None, None @@ -94,10 +115,14 @@ def build_trainer(self) -> CEMTrainer: num_elites=self.trainer_param.num_elites, plan_horizon_length=self.trainer_param.plan_horizon_length, state_dim=get_num_output_features( - self.state_normalization_data.dense_normalization_parameters + normalization_data_map[ + NormalizationKey.STATE + ].dense_normalization_parameters ), action_dim=get_num_output_features( - self.action_normalization_data.dense_normalization_parameters + normalization_data_map[ + NormalizationKey.ACTION + ].dense_normalization_parameters ), discrete_action=discrete_action, terminal_effective=terminal_effective, @@ -110,8 +135,6 @@ def build_trainer(self) -> CEMTrainer: # store for building policy # pyre-fixme[16]: `CrossEntropyMethod` has no attribute `discrete_action`. self.discrete_action = discrete_action - # pyre-fixme[16]: `CrossEntropyMethod` has no attribute `cem_planner_network`. - self.cem_planner_network = cem_planner_network logger.info( f"Built CEM network with discrete action = {discrete_action}, " f"action_upper_bound={action_upper_bounds}, " @@ -121,11 +144,4 @@ def build_trainer(self) -> CEMTrainer: cem_planner_network=cem_planner_network, world_model_trainers=world_model_trainers, parameters=self.trainer_param, - use_gpu=self.use_gpu, ) - - def build_serving_module(self) -> torch.nn.Module: - """ - Returns a TorchScript predictor module - """ - raise NotImplementedError() diff --git a/reagent/model_managers/model_based/seq2reward_model.py b/reagent/model_managers/model_based/seq2reward_model.py new file mode 100644 index 000000000..ab507c880 --- /dev/null +++ b/reagent/model_managers/model_based/seq2reward_model.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import Dict, Optional + +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + NormalizationData, + NormalizationKey, + param_hash, + Seq2RewardTrainerParameters, +) +from reagent.model_managers.world_model_base import WorldModelBase +from reagent.net_builder.unions import ValueNetBuilder__Union +from reagent.net_builder.value.fully_connected import FullyConnected +from reagent.net_builder.value.seq2reward_rnn import Seq2RewardNetBuilder +from reagent.reporting.seq2reward_reporter import Seq2RewardReporter +from reagent.training.world_model.seq2reward_trainer import Seq2RewardTrainer +from reagent.workflow.types import PreprocessingOptions, RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class Seq2RewardModel(WorldModelBase): + __hash__ = param_hash + net_builder: ValueNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `Seq2RewardNetBuilder`. + # pyre-fixme[28]: Unexpected keyword argument `Seq2RewardNetBuilder`. + default_factory=lambda: ValueNetBuilder__Union( + Seq2RewardNetBuilder=Seq2RewardNetBuilder() + ) + ) + + compress_net_builder: ValueNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: ValueNetBuilder__Union(FullyConnected=FullyConnected()) + ) + + trainer_param: Seq2RewardTrainerParameters = field( + default_factory=Seq2RewardTrainerParameters + ) + + preprocessing_options: Optional[PreprocessingOptions] = None + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> Seq2RewardTrainer: + seq2reward_network = self.net_builder.value.build_value_network( + normalization_data_map[NormalizationKey.STATE] + ) + trainer = Seq2RewardTrainer( + seq2reward_network=seq2reward_network, params=self.trainer_param + ) + return trainer + + def get_reporter(self) -> Seq2RewardReporter: + return Seq2RewardReporter(self.trainer_param.action_names) diff --git a/reagent/model_managers/model_based/synthetic_reward.py b/reagent/model_managers/model_based/synthetic_reward.py new file mode 100644 index 000000000..c47dc9cea --- /dev/null +++ b/reagent/model_managers/model_based/synthetic_reward.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from dataclasses import replace +from typing import Dict, List, Optional, Tuple + +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, + param_hash, +) +from reagent.data.data_fetcher import DataFetcher +from reagent.data.manual_data_module import ManualDataModule +from reagent.data.reagent_data_module import ReAgentDataModule +from reagent.model_managers.model_manager import ModelManager +from reagent.net_builder.synthetic_reward.single_step_synthetic_reward import ( + SingleStepSyntheticReward, +) +from reagent.net_builder.unions import SyntheticRewardNetBuilder__Union +from reagent.preprocessing.types import InputColumn +from reagent.reporting.reward_network_reporter import RewardNetworkReporter +from reagent.training import ( + ReAgentLightningModule, + RewardNetTrainer, + RewardNetworkTrainerParameters, +) +from reagent.workflow.identify_types_flow import identify_normalization_parameters +from reagent.workflow.types import ( + Dataset, + PreprocessingOptions, + ReaderOptions, + ResourceOptions, + RewardOptions, + TableSpec, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class SyntheticReward(ModelManager): + """ + Train models to attribute single step rewards from sparse/delayed/aggregated rewards. + Ideas from: + 1. Synthetic Returns for Long-Term Credit Assignment: https://arxiv.org/pdf/2102.12425.pdf + 2. RUDDER: Return Decomposition for Delayed Rewards: https://arxiv.org/pdf/1806.07857.pdf + 3. Optimizing Agent Behavior over Long Time Scales by Transporting Value: https://arxiv.org/pdf/1810.06721.pdf + 4. Sequence Modeling of Temporal Credit Assignment for Episodic Reinforcement Learning: https://arxiv.org/pdf/1905.13420.pdf + """ + + __hash__ = param_hash + + trainer_param: RewardNetworkTrainerParameters = field( + default_factory=RewardNetworkTrainerParameters + ) + net_builder: SyntheticRewardNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `SlateRewardTransformer`. + default_factory=lambda: SyntheticRewardNetBuilder__Union( + SingleStepSyntheticReward=SingleStepSyntheticReward() + ) + ) + eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) + state_preprocessing_options: Optional[PreprocessingOptions] = None + action_preprocessing_options: Optional[PreprocessingOptions] = None + state_feature_config: rlt.ModelFeatureConfig = field( + default_factory=rlt.ModelFeatureConfig + ) + parametric_action_feature_config: rlt.ModelFeatureConfig = field( + default_factory=rlt.ModelFeatureConfig + ) + discrete_action_names: Optional[List[str]] = None + # max sequence length to look back to distribute rewards + max_seq_len: int = 5 + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert self.max_seq_len is not None and self.max_seq_len > 0 + assert ( + self.state_preprocessing_options is None + or self.state_preprocessing_options.allowedlist_features is None + ), ( + "Please set state allowlist features in state_float_features field of " + "config instead" + ) + + if self.discrete_action_names: + assert ( + type(self.discrete_action_names) is list + and len(self.discrete_action_names) > 1 + ), f"Assume this is a discrete action problem, you need to specify at least 2 actions. Got {self.discrete_action_names}." + else: + assert ( + self.action_preprocessing_options is None + or self.action_preprocessing_options.allowedlist_features is None + ), ( + "Please set action allowlist features in parametric_action_float_features field of " + "config instead" + ) + + def get_data_module( + self, + *, + input_table_spec: Optional[TableSpec] = None, + reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + resource_options: Optional[ResourceOptions] = None, + ) -> Optional[ReAgentDataModule]: + return SyntheticRewardDataModule( + input_table_spec=input_table_spec, + reward_options=reward_options, + setup_data=setup_data, + saved_setup_data=saved_setup_data, + reader_options=reader_options, + resource_options=resource_options, + model_manager=self, + ) + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> RewardNetTrainer: + net_builder = self.net_builder.value + action_normalization_data = None + if not self.discrete_action_names: + action_normalization_data = normalization_data_map[NormalizationKey.ACTION] + synthetic_reward_network = net_builder.build_synthetic_reward_network( + normalization_data_map[NormalizationKey.STATE], + action_normalization_data=action_normalization_data, + discrete_action_names=self.discrete_action_names, + state_feature_config=self.state_feature_config, + action_feature_config=self.parametric_action_feature_config, + ) + + trainer = RewardNetTrainer( + synthetic_reward_network, + # pyre-fixme[16]: `RewardNetworkTrainerParameters` has no attribute + # `asdict`. + **self.trainer_param.asdict(), + ) + return trainer + + def get_reporter(self): + return RewardNetworkReporter( + self.trainer_param.loss_type, + str(self.net_builder.value), + ) + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + """ + Returns a TorchScript predictor module + """ + assert isinstance(trainer_module, RewardNetTrainer) + + net_builder = self.net_builder.value + action_normalization_data = None + if not self.discrete_action_names: + action_normalization_data = normalization_data_map[NormalizationKey.ACTION] + return net_builder.build_serving_module( + self.max_seq_len, + trainer_module.reward_net, + normalization_data_map[NormalizationKey.STATE], + action_normalization_data=action_normalization_data, + discrete_action_names=self.discrete_action_names, + state_feature_config=self.state_feature_config, + action_feature_config=self.parametric_action_feature_config, + ) + + +class SyntheticRewardDataModule(ManualDataModule): + @property + def should_generate_eval_dataset(self) -> bool: + return self.model_manager.eval_parameters.calc_cpe_in_training + + def run_feature_identification( + self, input_table_spec: TableSpec + ) -> Dict[str, NormalizationData]: + """Identify dense feature normalization parameters""" + state_preprocessing_options = ( + self.model_manager.state_preprocessing_options or PreprocessingOptions() + ) + state_features = [ + ffi.feature_id + for ffi in self.model_manager.state_feature_config.float_feature_infos + ] + logger.info(f"state allowedlist_features: {state_features}") + state_preprocessing_options = replace( + state_preprocessing_options, allowedlist_features=state_features + ) + + state_normalization_parameters = identify_normalization_parameters( + input_table_spec, InputColumn.STATE_FEATURES, state_preprocessing_options + ) + if self.model_manager.discrete_action_names: + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=state_normalization_parameters + ) + } + # Run action feature identification + action_preprocessing_options = ( + self.model_manager.action_preprocessing_options or PreprocessingOptions() + ) + action_features = [ + ffi.feature_id + for ffi in self.model_manager.parametric_action_feature_config.float_feature_infos + ] + logger.info(f"action allowedlist_features: {action_features}") + action_preprocessing_options = replace( + action_preprocessing_options, allowedlist_features=action_features + ) + action_normalization_parameters = identify_normalization_parameters( + input_table_spec, InputColumn.ACTION, action_preprocessing_options + ) + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=state_normalization_parameters + ), + NormalizationKey.ACTION: NormalizationData( + dense_normalization_parameters=action_normalization_parameters + ), + } + + def query_data( + self, + input_table_spec: TableSpec, + sample_range: Optional[Tuple[float, float]], + reward_options: RewardOptions, + data_fetcher: DataFetcher, + ) -> Dataset: + return data_fetcher.query_data_synthetic_reward( + input_table_spec=input_table_spec, + discrete_action_names=self.model_manager.discrete_action_names, + sample_range=sample_range, + max_seq_len=self.model_manager.max_seq_len, + ) + + def build_batch_preprocessor(self): + raise NotImplementedError diff --git a/reagent/workflow/model_managers/model_based/world_model.py b/reagent/model_managers/model_based/world_model.py similarity index 57% rename from reagent/workflow/model_managers/model_based/world_model.py rename to reagent/model_managers/model_based/world_model.py index 3397368b9..2bbf2a759 100644 --- a/reagent/workflow/model_managers/model_based/world_model.py +++ b/reagent/model_managers/model_based/world_model.py @@ -1,14 +1,20 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from typing import Dict, Optional -import torch from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ( + MDNRNNTrainerParameters, + NormalizationData, + NormalizationKey, + param_hash, +) +from reagent.model_managers.world_model_base import WorldModelBase from reagent.models.world_model import MemoryNetwork -from reagent.parameters import MDNRNNTrainerParameters, param_hash from reagent.preprocessing.normalization import get_num_output_features from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer -from reagent.workflow.model_managers.world_model_base import WorldModelBase +from reagent.workflow.types import RewardOptions logger = logging.getLogger(__name__) @@ -25,25 +31,24 @@ class WorldModel(WorldModelBase): def __post_init_post_parse__(self): super().__post_init_post_parse__() - def build_trainer(self) -> MDNRNNTrainer: + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> MDNRNNTrainer: memory_network = MemoryNetwork( state_dim=get_num_output_features( - self.state_normalization_data.dense_normalization_parameters - ), - action_dim=get_num_output_features( - self.action_normalization_data.dense_normalization_parameters + normalization_data_map[ + NormalizationKey.STATE + ].dense_normalization_parameters ), + action_dim=self.trainer_param.action_dim, num_hiddens=self.trainer_param.hidden_size, num_hidden_layers=self.trainer_param.num_hidden_layers, num_gaussians=self.trainer_param.num_gaussians, ) - if self.use_gpu: + if use_gpu: memory_network = memory_network.cuda() return MDNRNNTrainer(memory_network=memory_network, params=self.trainer_param) - - def build_serving_module(self) -> torch.nn.Module: - """ - Returns a TorchScript predictor module - """ - raise NotImplementedError() diff --git a/reagent/model_managers/model_manager.py b/reagent/model_managers/model_manager.py new file mode 100644 index 000000000..46a983417 --- /dev/null +++ b/reagent/model_managers/model_manager.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import logging +from typing import Dict, List, Optional, Tuple + +import pytorch_lightning as pl +import torch +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData +from reagent.data.reagent_data_module import ReAgentDataModule +from reagent.reporting.reporter_base import ReporterBase +from reagent.training import MultiStageTrainer, ReAgentLightningModule +from reagent.workflow.types import ( + Dataset, + ReaderOptions, + ResourceOptions, + RewardOptions, + RLTrainingOutput, + RLTrainingReport, + TableSpec, +) +from reagent.workflow.utils import get_rank, train_eval_lightning + + +logger = logging.getLogger(__name__) + + +@dataclass +class ModelManager: + """ + ModelManager manages how to train models. + + Each type of models can have their own config type, implemented as + `config_type()` class method. `__init__()` of the concrete class must take + this type. + + To integrate training algorithms into the standard training workflow, you need: + 1. `build_trainer()`: Builds the ReAgentLightningModule + 2. `get_data_module()`: Defines how to create data module for this algorithm + 3. `build_serving_modules()`: Creates the TorchScript modules for serving + 4. `get_reporter()`: Returns the reporter to collect training/evaluation metrics + 5. `create_policy()`: (Optional) Creates Policy object for to interact with Gym + """ + + def __post_init_post_parse__(self): + """ + We use pydantic to parse raw config into typed (dataclass) config. + This method is called after everything is parsed, so you could + validate constraints that may not be captured with the type alone. + + See https://pydantic-docs.helpmanual.io/usage/dataclasses/#initialize-hooks + """ + pass + + def get_data_module( + self, + *, + input_table_spec: Optional[TableSpec] = None, + reward_options: Optional[RewardOptions] = None, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + reader_options: Optional[ReaderOptions] = None, + resource_options: Optional[ResourceOptions] = None, + ) -> Optional[ReAgentDataModule]: + """ + Return the data module. If this is not None, then `run_feature_identification` & + `query_data` will not be run. + """ + return None + + @abc.abstractmethod + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> ReAgentLightningModule: + """ + Implement this to build the trainer, given the config + + TODO: This function should return ReAgentLightningModule & + the dictionary of modules created + """ + pass + + @abc.abstractmethod + def get_reporter(self) -> ReporterBase: + pass + + def train( + self, + trainer_module: ReAgentLightningModule, + train_dataset: Optional[Dataset], + eval_dataset: Optional[Dataset], + test_dataset: Optional[Dataset], + data_module: Optional[ReAgentDataModule], + num_epochs: int, + reader_options: ReaderOptions, + resource_options: ResourceOptions, + checkpoint_path: Optional[str] = None, + ) -> Tuple[RLTrainingOutput, pl.Trainer]: + """ + Train the model + + Returns partially filled RLTrainingOutput. + The field that should not be filled are: + - output_path + + Arguments: + train/eval/test_dataset: what you'd expect + data_module: [pytorch lightning only] a lightning data module that replaces the use of train/eval datasets + num_epochs: number of training epochs + reader_options: options for the data reader + resource_options: options for training resources (currently only used for setting num_nodes in pytorch lightning trainer) + """ + if isinstance(trainer_module, MultiStageTrainer): + assert trainer_module.multi_stage_total_epochs == num_epochs, ( + f"The sum of each stage's epoch ({trainer_module.trainer_epoch_mapping})" + f" should be equal to num_epochs ({num_epochs})." + ) + + reporter = self.get_reporter() + trainer_module.set_reporter(reporter) + assert data_module is not None + + lightning_trainer = train_eval_lightning( + train_dataset=train_dataset, + eval_dataset=eval_dataset, + test_dataset=test_dataset, + trainer_module=trainer_module, + data_module=data_module, + num_epochs=num_epochs, + logger_name=str(type(self)), + reader_options=reader_options, + checkpoint_path=checkpoint_path, + resource_options=resource_options, + ) + + rank = get_rank() + if rank == 0: + trainer_logger = lightning_trainer.logger + # pyre-fixme[16]: `Optional` has no attribute `line_plot_aggregated`. + logger_data = trainer_logger.line_plot_aggregated + # pyre-fixme[16]: `Optional` has no attribute `clear_local_data`. + trainer_logger.clear_local_data() + if reporter is None: + training_report = None + else: + # pyre-ignore + training_report = RLTrainingReport.make_union_instance( + reporter.generate_training_report() + ) + return ( + RLTrainingOutput( + training_report=training_report, logger_data=logger_data + ), + lightning_trainer, + ) + # Output from processes with non-0 rank is not used + return RLTrainingOutput(), lightning_trainer + + # TODO: make abstract + def build_serving_modules( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> Dict[str, torch.nn.Module]: + """ + Returns TorchScript for serving in production + """ + return { + "default_model": self.build_serving_module( + trainer_module, normalization_data_map + ) + } + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + """ + Optionaly, implement this method if you only have one model for serving + """ + raise NotImplementedError + + # TODO: make abstract + def serving_module_names(self) -> List[str]: + """ + Returns the keys that would be returned in `build_serving_modules()`. + This method is required because we need to reserve entity IDs for + these serving modules before we start the training. + """ + return ["default_model"] + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ): + raise NotImplementedError diff --git a/reagent/workflow/model_managers/parametric/__init__.py b/reagent/model_managers/parametric/__init__.py similarity index 100% rename from reagent/workflow/model_managers/parametric/__init__.py rename to reagent/model_managers/parametric/__init__.py diff --git a/reagent/model_managers/parametric/parametric_dqn.py b/reagent/model_managers/parametric/parametric_dqn.py new file mode 100644 index 000000000..7635b2367 --- /dev/null +++ b/reagent/model_managers/parametric/parametric_dqn.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import Dict, Optional + +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.model_managers.parametric_dqn_base import ParametricDQNBase +from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected +from reagent.net_builder.unions import ParametricDQNNetBuilder__Union +from reagent.training import ( + ParametricDQNTrainer, + ParametricDQNTrainerParameters, + ReAgentLightningModule, +) +from reagent.workflow.types import RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class ParametricDQN(ParametricDQNBase): + __hash__ = param_hash + + trainer_param: ParametricDQNTrainerParameters = field( + default_factory=ParametricDQNTrainerParameters + ) + net_builder: ParametricDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: ParametricDQNNetBuilder__Union( + FullyConnected=FullyConnected() + ) + ) + + @property + def rl_parameters(self): + return self.trainer_param.rl + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> ParametricDQNTrainer: + net_builder = self.net_builder.value + # pyre-fixme[16]: `ParametricDQN` has no attribute `_q_network`. + self._q_network = net_builder.build_q_network( + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], + ) + # Metrics + reward + reward_options = reward_options or RewardOptions() + metrics_to_score = get_metrics_to_score(reward_options.metric_reward_values) + reward_output_dim = len(metrics_to_score) + 1 + reward_network = net_builder.build_q_network( + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], + output_dim=reward_output_dim, + ) + + q_network_target = self._q_network.get_target_network() + return ParametricDQNTrainer( + q_network=self._q_network, + q_network_target=q_network_target, + reward_network=reward_network, + # pyre-fixme[16]: `ParametricDQNTrainerParameters` has no attribute + # `asdict`. + **self.trainer_param.asdict(), + ) + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, ParametricDQNTrainer) + net_builder = self.net_builder.value + return net_builder.build_serving_module( + trainer_module.q_network, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ACTION], + ) diff --git a/reagent/workflow/model_managers/parametric_dqn_base.py b/reagent/model_managers/parametric_dqn_base.py similarity index 50% rename from reagent/workflow/model_managers/parametric_dqn_base.py rename to reagent/model_managers/parametric_dqn_base.py index 59f30b57c..9101789d1 100644 --- a/reagent/workflow/model_managers/parametric_dqn_base.py +++ b/reagent/model_managers/parametric_dqn_base.py @@ -1,34 +1,38 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from dataclasses import replace from typing import Dict, List, Optional, Tuple -import reagent.types as rlt +import reagent.core.types as rlt from reagent.core.dataclasses import dataclass, field -from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.core.parameters import ( + EvaluationParameters, + NormalizationData, + NormalizationKey, +) +from reagent.data.data_fetcher import DataFetcher +from reagent.data.manual_data_module import ManualDataModule from reagent.gym.policies.policy import Policy from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler from reagent.gym.policies.scorers.discrete_scorer import parametric_dqn_scorer +from reagent.model_managers.model_manager import ModelManager from reagent.models.base import ModelBase -from reagent.parameters import EvaluationParameters, NormalizationData, NormalizationKey -from reagent.preprocessing.batch_preprocessor import BatchPreprocessor, InputColumn -from reagent.preprocessing.normalization import ( - get_feature_config, - get_num_output_features, -) +from reagent.preprocessing.batch_preprocessor import BatchPreprocessor +from reagent.preprocessing.normalization import get_feature_config +from reagent.preprocessing.types import InputColumn +from reagent.training import ReAgentLightningModule from reagent.workflow.identify_types_flow import identify_normalization_parameters -from reagent.workflow.model_managers.model_manager import ModelManager from reagent.workflow.types import ( Dataset, PreprocessingOptions, ReaderOptions, RewardOptions, - RLTrainingOutput, TableSpec, ) - logger = logging.getLogger(__name__) @@ -42,48 +46,53 @@ class ParametricDQNBase(ModelManager): eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) def __post_init_post_parse__(self): - super().__init__() + super().__post_init_post_parse__() assert ( self.state_preprocessing_options is None - or self.state_preprocessing_options.whitelist_features is None + or self.state_preprocessing_options.allowedlist_features is None ), ( - "Please set state whitelist features in state_float_features field of " + "Please set state allowlist features in state_float_features field of " "config instead" ) assert ( self.action_preprocessing_options is None - or self.action_preprocessing_options.whitelist_features is None + or self.action_preprocessing_options.allowedlist_features is None ), ( - "Please set action whitelist features in action_float_features field of " + "Please set action allowlist features in action_float_features field of " "config instead" ) - self._state_preprocessing_options = self.state_preprocessing_options - self._action_preprocessing_options = self.action_preprocessing_options self._q_network: Optional[ModelBase] = None - self._metrics_to_score: Optional[List[str]] = None - def create_policy(self, serving: bool) -> Policy: - """ Create an online DiscreteDQN Policy from env. """ + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ): + """Create an online DiscreteDQN Policy from env.""" # FIXME: this only works for one-hot encoded actions - action_dim = get_num_output_features( - self.action_normalization_data.dense_normalization_parameters - ) + # pyre-fixme[16]: `Tensor` has no attribute `input_prototype`. + action_dim = trainer_module.q_network.input_prototype()[1].float_features.shape[ + 1 + ] if serving: + assert normalization_data_map return create_predictor_policy_from_model( - self.build_serving_module(), max_num_actions=action_dim + self.build_serving_module(trainer_module, normalization_data_map), + max_num_actions=action_dim, ) else: + # pyre-fixme[16]: `ParametricDQNBase` has no attribute `rl_parameters`. sampler = SoftmaxActionSampler(temperature=self.rl_parameters.temperature) scorer = parametric_dqn_scorer( - max_num_actions=action_dim, q_network=self._q_network + max_num_actions=action_dim, + # pyre-fixme[6]: Expected `ModelBase` for 2nd param but got + # `Union[torch.Tensor, torch.nn.Module]`. + q_network=trainer_module.q_network, ) return Policy(scorer=scorer, sampler=sampler) - @property - def should_generate_eval_dataset(self) -> bool: - return self.eval_parameters.calc_cpe_in_training - @property def state_feature_config(self) -> rlt.ModelFeatureConfig: return get_feature_config(self.state_float_features) @@ -92,19 +101,48 @@ def state_feature_config(self) -> rlt.ModelFeatureConfig: def action_feature_config(self) -> rlt.ModelFeatureConfig: return get_feature_config(self.action_float_features) + # TODO: Add below get_data_module() method once methods in + # `ParametricDqnDataModule` class are fully implemented + # def get_data_module( + # self, + # *, + # input_table_spec: Optional[TableSpec] = None, + # reward_options: Optional[RewardOptions] = None, + # setup_data: Optional[Dict[str, bytes]] = None, + # saved_setup_data: Optional[Dict[str, bytes]] = None, + # reader_options: Optional[ReaderOptions] = None, + # resource_options: Optional[ResourceOptions] = None, + # ) -> Optional[ReAgentDataModule]: + # return ParametricDqnDataModule( + # input_table_spec=input_table_spec, + # reward_options=reward_options, + # setup_data=setup_data, + # saved_setup_data=saved_setup_data, + # reader_options=reader_options, + # resource_options=resource_options, + # model_manager=self, + # ) + + +class ParametricDqnDataModule(ManualDataModule): + @property + def should_generate_eval_dataset(self) -> bool: + return self.model_manager.eval_parameters.calc_cpe_in_training + def run_feature_identification( self, input_table_spec: TableSpec ) -> Dict[str, NormalizationData]: # Run state feature identification state_preprocessing_options = ( - self._state_preprocessing_options or PreprocessingOptions() + self.model_manager.state_preprocessing_options or PreprocessingOptions() ) state_features = [ - ffi.feature_id for ffi in self.state_feature_config.float_feature_infos + ffi.feature_id + for ffi in self.model_manager.state_feature_config.float_feature_infos ] - logger.info(f"state whitelist_features: {state_features}") - state_preprocessing_options = state_preprocessing_options._replace( - whitelist_features=state_features + logger.info(f"state allowedlist_features: {state_features}") + state_preprocessing_options = replace( + state_preprocessing_options, allowedlist_features=state_features ) state_normalization_parameters = identify_normalization_parameters( @@ -113,14 +151,15 @@ def run_feature_identification( # Run action feature identification action_preprocessing_options = ( - self._action_preprocessing_options or PreprocessingOptions() + self.model_manager.action_preprocessing_options or PreprocessingOptions() ) action_features = [ - ffi.feature_id for ffi in self.action_feature_config.float_feature_infos + ffi.feature_id + for ffi in self.model_manager.action_feature_config.float_feature_infos ] - logger.info(f"action whitelist_features: {action_features}") - action_preprocessing_options = action_preprocessing_options._replace( - whitelist_features=action_features + logger.info(f"action allowedlist_features: {action_features}") + action_preprocessing_options = replace( + action_preprocessing_options, allowedlist_features=action_features ) action_normalization_parameters = identify_normalization_parameters( input_table_spec, InputColumn.ACTION, action_preprocessing_options @@ -134,35 +173,14 @@ def run_feature_identification( ), } - @property - def required_normalization_keys(self) -> List[str]: - return [NormalizationKey.STATE, NormalizationKey.ACTION] - def query_data( self, input_table_spec: TableSpec, sample_range: Optional[Tuple[float, float]], reward_options: RewardOptions, + data_fetcher: DataFetcher, ) -> Dataset: - raise NotImplementedError() - - @property - def metrics_to_score(self) -> List[str]: - assert self.reward_options is not None - if self._metrics_to_score is None: - # pyre-fixme[16]: `ParametricDQNBase` has no attribute `_metrics_to_score`. - # pyre-fixme[16]: `ParametricDQNBase` has no attribute `_metrics_to_score`. - self._metrics_to_score = get_metrics_to_score( - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - self._reward_options.metric_reward_values - ) - return self._metrics_to_score + raise NotImplementedError def build_batch_preprocessor(self) -> BatchPreprocessor: raise NotImplementedError() - - def train( - self, train_dataset: Dataset, eval_dataset: Optional[Dataset], num_epochs: int - ) -> RLTrainingOutput: - raise NotImplementedError() diff --git a/reagent/model_managers/policy_gradient/__init__.py b/reagent/model_managers/policy_gradient/__init__.py new file mode 100644 index 000000000..e047cc2ba --- /dev/null +++ b/reagent/model_managers/policy_gradient/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .ppo import PPO +from .reinforce import Reinforce + +__all__ = ["Reinforce", "PPO"] diff --git a/reagent/model_managers/policy_gradient/ppo.py b/reagent/model_managers/policy_gradient/ppo.py new file mode 100644 index 000000000..9ecea41ee --- /dev/null +++ b/reagent/model_managers/policy_gradient/ppo.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Dict, Optional + +import torch +from reagent.core import types as rlt +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler +from reagent.model_managers.model_manager import ModelManager +from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider +from reagent.net_builder.discrete_dqn.dueling import Dueling +from reagent.net_builder.unions import ( + DiscreteDQNNetBuilder__Union, + ValueNetBuilder__Union, +) +from reagent.training import PPOTrainer, PPOTrainerParameters, ReAgentLightningModule +from reagent.workflow.types import ModelFeatureConfigProvider__Union, RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class PPO(ModelManager): + __hash__ = param_hash + + trainer_param: PPOTrainerParameters = field(default_factory=PPOTrainerParameters) + # using DQN net here because it supports `possible_actions_mask` + policy_net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-ignore + default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling()) + ) + value_net_builder: Optional[ValueNetBuilder__Union] = None + state_feature_config_provider: ModelFeatureConfigProvider__Union = field( + # pyre-ignore + default_factory=lambda: ModelFeatureConfigProvider__Union( + raw=RawModelFeatureConfigProvider(float_feature_infos=[]) + ) + ) + sampler_temperature: float = 1.0 + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + self._policy: Optional[Policy] = None + assert ( + len(self.action_names) > 1 + ), f"PPO needs at least 2 actions. Got {self.action_names}." + + @property + def action_names(self): + return self.trainer_param.actions + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> PPOTrainer: + policy_net_builder = self.policy_net_builder.value + policy_network = policy_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + len(self.action_names), + ) + value_net = None + value_net_builder = self.value_net_builder + if value_net_builder: + value_net_builder = value_net_builder.value + value_net = value_net_builder.build_value_network( + normalization_data_map[NormalizationKey.STATE] + ) + trainer = PPOTrainer( + policy=self._create_policy(policy_network), + value_net=value_net, + **self.trainer_param.asdict(), # pyre-ignore + ) + return trainer + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ): + assert isinstance(trainer_module, PPOTrainer) + if serving: + assert normalization_data_map is not None + return create_predictor_policy_from_model( + self.build_serving_module(trainer_module, normalization_data_map) + ) + else: + return self._create_policy(trainer_module.scorer) + + def _create_policy(self, policy_network): + if self._policy is None: + sampler = SoftmaxActionSampler(temperature=self.sampler_temperature) + self._policy = Policy(scorer=policy_network, sampler=sampler) + return self._policy + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, PPOTrainer) + policy_serving_module = self.policy_net_builder.value.build_serving_module( + q_network=trainer_module.scorer, + state_normalization_data=normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + return policy_serving_module + + @property + def state_feature_config(self) -> rlt.ModelFeatureConfig: + return self.state_feature_config_provider.value.get_model_feature_config() diff --git a/reagent/model_managers/policy_gradient/reinforce.py b/reagent/model_managers/policy_gradient/reinforce.py new file mode 100644 index 000000000..a24e481c7 --- /dev/null +++ b/reagent/model_managers/policy_gradient/reinforce.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Dict, Optional + +import torch +from reagent.core import types as rlt +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler +from reagent.model_managers.model_manager import ModelManager +from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider +from reagent.net_builder.discrete_dqn.dueling import Dueling +from reagent.net_builder.unions import ( + DiscreteDQNNetBuilder__Union, + ValueNetBuilder__Union, +) +from reagent.training import ( + ReAgentLightningModule, + ReinforceTrainer, + ReinforceTrainerParameters, +) +from reagent.workflow.types import ModelFeatureConfigProvider__Union, RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class Reinforce(ModelManager): + __hash__ = param_hash + + trainer_param: ReinforceTrainerParameters = field( + default_factory=ReinforceTrainerParameters + ) + # using DQN net here because it supports `possible_actions_mask` + policy_net_builder: DiscreteDQNNetBuilder__Union = field( + # pyre-ignore + default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling()) + ) + value_net_builder: Optional[ValueNetBuilder__Union] = None + state_feature_config_provider: ModelFeatureConfigProvider__Union = field( + # pyre-ignore + default_factory=lambda: ModelFeatureConfigProvider__Union( + raw=RawModelFeatureConfigProvider(float_feature_infos=[]) + ) + ) + sampler_temperature: float = 1.0 + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + self._policy: Optional[Policy] = None + assert ( + len(self.action_names) > 1 + ), f"REINFORCE needs at least 2 actions. Got {self.action_names}." + + @property + def action_names(self): + return self.trainer_param.actions + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> ReinforceTrainer: + policy_net_builder = self.policy_net_builder.value + policy_network = policy_net_builder.build_q_network( + self.state_feature_config, + normalization_data_map[NormalizationKey.STATE], + len(self.action_names), + ) + value_net = None + value_net_builder = self.value_net_builder + if value_net_builder: + value_net_builder = value_net_builder.value + value_net = value_net_builder.build_value_network( + normalization_data_map[NormalizationKey.STATE] + ) + trainer = ReinforceTrainer( + policy=self._create_policy(policy_network), + value_net=value_net, + **self.trainer_param.asdict(), # pyre-ignore + ) + return trainer + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ): + assert isinstance(trainer_module, ReinforceTrainer) + if serving: + assert normalization_data_map is not None + return create_predictor_policy_from_model( + self.build_serving_module(trainer_module, normalization_data_map) + ) + else: + return self._create_policy(trainer_module.scorer) + + def _create_policy(self, policy_network): + if self._policy is None: + sampler = SoftmaxActionSampler(temperature=self.sampler_temperature) + self._policy = Policy(scorer=policy_network, sampler=sampler) + return self._policy + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, ReinforceTrainer) + policy_serving_module = self.policy_net_builder.value.build_serving_module( + q_network=trainer_module.scorer, + state_normalization_data=normalization_data_map[NormalizationKey.STATE], + action_names=self.action_names, + state_feature_config=self.state_feature_config, + ) + return policy_serving_module + + @property + def state_feature_config(self) -> rlt.ModelFeatureConfig: + return self.state_feature_config_provider.value.get_model_feature_config() diff --git a/reagent/test/environment/__init__.py b/reagent/model_managers/ranking/__init__.py similarity index 65% rename from reagent/test/environment/__init__.py rename to reagent/model_managers/ranking/__init__.py index ec3ac3aaf..2090f0ba8 100644 --- a/reagent/test/environment/__init__.py +++ b/reagent/model_managers/ranking/__init__.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import logging +from .slate_q import SlateQ -logger = logging.getLogger(__name__) + +__all__ = ["SlateQ"] diff --git a/reagent/model_managers/ranking/slate_q.py b/reagent/model_managers/ranking/slate_q.py new file mode 100644 index 000000000..cce64b9e0 --- /dev/null +++ b/reagent/model_managers/ranking/slate_q.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Dict, Optional + +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, NormalizationKey, param_hash +from reagent.model_managers.slate_q_base import SlateQBase +from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected +from reagent.net_builder.unions import ParametricDQNNetBuilder__Union +from reagent.training import ( + ReAgentLightningModule, + SlateQTrainer, + SlateQTrainerParameters, +) +from reagent.workflow.types import RewardOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class SlateQ(SlateQBase): + __hash__ = param_hash + + slate_size: int = -1 + num_candidates: int = -1 + trainer_param: SlateQTrainerParameters = field( + default_factory=SlateQTrainerParameters + ) + net_builder: ParametricDQNNetBuilder__Union = field( + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. + default_factory=lambda: ParametricDQNNetBuilder__Union( + FullyConnected=FullyConnected() + ) + ) + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert ( + self.slate_size > 0 + ), f"Please set valid slate_size (currently {self.slate_size})" + assert ( + self.num_candidates > 0 + ), f"Please set valid num_candidates (currently {self.num_candidates})" + self.eval_parameters = self.trainer_param.evaluation + + def build_trainer( + self, + normalization_data_map: Dict[str, NormalizationData], + use_gpu: bool, + reward_options: Optional[RewardOptions] = None, + ) -> SlateQTrainer: + net_builder = self.net_builder.value + q_network = net_builder.build_q_network( + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ITEM], + ) + + q_network_target = q_network.get_target_network() + return SlateQTrainer( + q_network=q_network, + q_network_target=q_network_target, + slate_size=self.slate_size, + # pyre-fixme[16]: `SlateQTrainerParameters` has no attribute `asdict`. + **self.trainer_param.asdict(), + ) + + def build_serving_module( + self, + trainer_module: ReAgentLightningModule, + normalization_data_map: Dict[str, NormalizationData], + ) -> torch.nn.Module: + assert isinstance(trainer_module, SlateQTrainer) + net_builder = self.net_builder.value + return net_builder.build_serving_module( + trainer_module.q_network, + normalization_data_map[NormalizationKey.STATE], + normalization_data_map[NormalizationKey.ITEM], + ) diff --git a/reagent/model_managers/slate_q_base.py b/reagent/model_managers/slate_q_base.py new file mode 100644 index 000000000..a538a9558 --- /dev/null +++ b/reagent/model_managers/slate_q_base.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Dict, List, Optional, Tuple + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.gym.policies.samplers.top_k_sampler import TopKSampler +from reagent.gym.policies.scorers.slate_q_scorer import slate_q_scorer +from reagent.model_managers.model_manager import ModelManager +from reagent.preprocessing.normalization import get_feature_config +from reagent.reporting.slate_q_reporter import SlateQReporter +from reagent.training import ReAgentLightningModule +from reagent.workflow.types import PreprocessingOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class SlateQBase(ModelManager): + slate_feature_id: int = 0 + slate_score_id: Tuple[int, int] = (0, 0) + item_preprocessing_options: Optional[PreprocessingOptions] = None + state_preprocessing_options: Optional[PreprocessingOptions] = None + state_float_features: Optional[List[Tuple[int, str]]] = None + item_float_features: Optional[List[Tuple[int, str]]] = None + + def __post_init_post_parse__(self): + super().__post_init_post_parse__() + assert ( + self.state_preprocessing_options is None + or self.state_preprocessing_options.allowedlist_features is None + ), ( + "Please set state allowlist features in state_float_features field of " + "config instead" + ) + assert ( + self.item_preprocessing_options is None + or self.item_preprocessing_options.allowedlist_features is None + ), ( + "Please set item allowlist features in item_float_features field of " + "config instead" + ) + assert ( + self.item_preprocessing_options is None + or self.item_preprocessing_options.sequence_feature_id is None + ), "Please set slate_feature_id field of config instead" + self._state_preprocessing_options = self.state_preprocessing_options + self._item_preprocessing_options = self.item_preprocessing_options + self.eval_parameters = self.trainer_param.evaluation + + def create_policy( + self, + trainer_module: ReAgentLightningModule, + serving: bool = False, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + ): + if serving: + assert normalization_data_map + return create_predictor_policy_from_model( + self.build_serving_module(trainer_module, normalization_data_map), + # pyre-fixme[16]: `SlateQBase` has no attribute `num_candidates`. + max_num_actions=self.num_candidates, + # pyre-fixme[16]: `SlateQBase` has no attribute `slate_size`. + slate_size=self.slate_size, + ) + else: + scorer = slate_q_scorer( + num_candidates=self.num_candidates, + # pyre-fixme[6]: Expected `ModelBase` for 2nd param but got + # `Union[torch.Tensor, torch.nn.Module]`. + q_network=trainer_module.q_network, + ) + sampler = TopKSampler(k=self.slate_size) + return Policy(scorer=scorer, sampler=sampler) + + @property + def state_feature_config(self) -> rlt.ModelFeatureConfig: + return get_feature_config(self.state_float_features) + + @property + def item_feature_config(self) -> rlt.ModelFeatureConfig: + return get_feature_config(self.item_float_features) + + def get_reporter(self): + return SlateQReporter() diff --git a/reagent/model_managers/union.py b/reagent/model_managers/union.py new file mode 100644 index 000000000..d7aaa9543 --- /dev/null +++ b/reagent/model_managers/union.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +""" Register all ModelManagers. Must import them before filling union. """ + +from typing import Optional + +from reagent.core.dataclasses import dataclass +from reagent.core.tagged_union import TaggedUnion + +from .actor_critic import SAC as SACType, TD3 as TD3Type +from .discrete import ( + DiscreteC51DQN as DiscreteC51DQNType, + DiscreteCRR as DiscreteCRRType, + DiscreteDQN as DiscreteDQNType, + DiscreteQRDQN as DiscreteQRDQNType, +) +from .model_based import ( + CrossEntropyMethod as CrossEntropyMethodType, + Seq2RewardModel as Seq2RewardModelType, + SyntheticReward as SyntheticRewardType, + WorldModel as WorldModelType, +) +from .parametric import ParametricDQN as ParametricDQNType +from .policy_gradient import PPO as PPOType, Reinforce as ReinforceType +from .ranking import SlateQ as SlateQType + + +@dataclass(frozen=True) +class ModelManager__Union(TaggedUnion): + SAC: Optional[SACType] = None + TD3: Optional[TD3Type] = None + + DiscreteC51DQN: Optional[DiscreteC51DQNType] = None + DiscreteCRR: Optional[DiscreteCRRType] = None + DiscreteDQN: Optional[DiscreteDQNType] = None + DiscreteQRDQN: Optional[DiscreteQRDQNType] = None + + CrossEntropyMethod: Optional[CrossEntropyMethodType] = None + Seq2RewardModel: Optional[Seq2RewardModelType] = None + WorldModel: Optional[WorldModelType] = None + SyntheticReward: Optional[SyntheticRewardType] = None + + ParametricDQN: Optional[ParametricDQNType] = None + + PPO: Optional[PPOType] = None + Reinforce: Optional[ReinforceType] = None + + SlateQ: Optional[SlateQType] = None diff --git a/reagent/model_managers/world_model_base.py b/reagent/model_managers/world_model_base.py new file mode 100644 index 000000000..ad460ccae --- /dev/null +++ b/reagent/model_managers/world_model_base.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from dataclasses import replace +from typing import Dict, Optional, Tuple + +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData, NormalizationKey +from reagent.data.data_fetcher import DataFetcher +from reagent.data.manual_data_module import ManualDataModule +from reagent.preprocessing.batch_preprocessor import BatchPreprocessor +from reagent.preprocessing.types import InputColumn +from reagent.workflow.identify_types_flow import identify_normalization_parameters +from reagent.workflow.types import ( + Dataset, + PreprocessingOptions, + RewardOptions, + TableSpec, +) + +try: + from reagent.model_managers.fb.model_manager import ModelManager +except ImportError: + from reagent.model_managers.model_manager import ModelManager + + +logger = logging.getLogger(__name__) + + +@dataclass +class WorldModelBase(ModelManager): + reward_boost: Optional[Dict[str, float]] = None + + +class WorldModelDataModule(ManualDataModule): + @property + def should_generate_eval_dataset(self) -> bool: + return False + + def run_feature_identification( + self, input_table_spec: TableSpec + ) -> Dict[str, NormalizationData]: + # Run state feature identification + state_preprocessing_options = PreprocessingOptions() + state_features = [ + ffi.feature_id + for ffi in self.model_manager.state_feature_config.float_feature_infos + ] + logger.info(f"Overriding state allowedlist_features: {state_features}") + assert len(state_features) > 0, "No state feature is specified" + state_preprocessing_options = replace( + state_preprocessing_options, allowedlist_features=state_features + ) + + state_normalization_parameters = identify_normalization_parameters( + input_table_spec, InputColumn.STATE_FEATURES, state_preprocessing_options + ) + + return { + NormalizationKey.STATE: NormalizationData( + dense_normalization_parameters=state_normalization_parameters + ) + } + + def query_data( + self, + input_table_spec: TableSpec, + sample_range: Optional[Tuple[float, float]], + reward_options: RewardOptions, + data_fetcher: DataFetcher, + ) -> Dataset: + raise NotImplementedError() + + def build_batch_preprocessor(self) -> BatchPreprocessor: + raise NotImplementedError() diff --git a/reagent/model_utils/__init__.py b/reagent/model_utils/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/model_utils/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/model_utils/seq2slate_utils.py b/reagent/model_utils/seq2slate_utils.py new file mode 100644 index 000000000..3c687c0df --- /dev/null +++ b/reagent/model_utils/seq2slate_utils.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import copy +import logging +import math +from enum import Enum + +import torch +import torch.nn as nn +import torch.nn.functional as F + +logger = logging.getLogger(__name__) + +PADDING_SYMBOL = 0 +DECODER_START_SYMBOL = 1 + + +class Seq2SlateMode(Enum): + RANK_MODE = "rank" + PER_SEQ_LOG_PROB_MODE = "per_sequence_log_prob" + PER_SYMBOL_LOG_PROB_DIST_MODE = "per_symbol_log_prob_dist" + DECODE_ONE_STEP_MODE = "decode_one_step" + ENCODER_SCORE_MODE = "encoder_score_mode" + + +class Seq2SlateOutputArch(Enum): + # Only output encoder scores + ENCODER_SCORE = "encoder_score" + + # A decoder outputs a sequence in an autoregressive way + AUTOREGRESSIVE = "autoregressive" + + # Using encoder scores, a decoder outputs a sequence using + # frechet sort (equivalent to iterative softmax) + FRECHET_SORT = "frechet_sort" + + +def print_model_info(seq2slate): + def _num_of_params(model): + return len(torch.cat([p.flatten() for p in model.parameters()])) + + logger.info(f"Num of total params: {_num_of_params(seq2slate)}") + logger.info(f"Num of Encoder params: {_num_of_params(seq2slate.encoder)}") + logger.info( + f"Num of Candidate Embedder params: {_num_of_params(seq2slate.candidate_embedder)}" + ) + logger.info( + f"Num of State Embedder params: {_num_of_params(seq2slate.state_embedder)}" + ) + if seq2slate.output_arch == Seq2SlateOutputArch.FRECHET_SORT: + logger.info( + f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}" + ) + elif seq2slate.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE: + logger.info( + f"Num of Positional Encoding params: {_num_of_params(seq2slate.positional_encoding_decoder)}" + ) + logger.info(f"Num of Decoder params: {_num_of_params(seq2slate.decoder)}") + elif seq2slate.output_arch == Seq2SlateOutputArch.ENCODER_SCORE: + logger.info( + f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}" + ) + + +def mask_logits_by_idx(logits, tgt_in_idx): + # logits shape: batch_size, seq_len, candidate_size + # tgt_in_idx shape: batch_size, seq_len + + # the first two symbols are reserved for padding and decoder-starting symbols + # so they should never be a possible output label + logits[:, :, :2] = float("-inf") + + batch_size, seq_len = tgt_in_idx.shape + mask_indices = torch.tril( + tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), diagonal=0 + ) + logits = logits.scatter(2, mask_indices, float("-inf")) + return logits + + +def subsequent_mask(size: int, device: torch.device): + """ + Mask out subsequent positions. Mainly used in the decoding process, + in which an item should not attend subsequent items. + + mask_ijk = 0 if the item should be ignored; 1 if the item should be paid attention + """ + subsequent_mask = ~torch.triu( + torch.ones(1, size, size, device=device, dtype=torch.bool), diagonal=1 + ) + return subsequent_mask + + +# TODO (@czxttkl): use when we introduce padding +def subsequent_and_padding_mask(tgt_in_idx): + """Create a mask to hide padding and future items""" + # tgt_in_idx shape: batch_size, seq_len + + # tgt_tgt_mask shape: batch_size, 1, seq_len + tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8) + # subseq_mask shape: 1, seq_len, seq_len + subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device) + # tgt_tgt_mask shape: batch_size, seq_len, seq_len + tgt_tgt_mask = tgt_tgt_mask & subseq_mask + return tgt_tgt_mask + + +def clones(module, N): + """ + Produce N identical layers. + + :param module: nn.Module class + :param N: number of copies + """ + return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) + + +def attention(query, key, value, mask, d_k): + """Scaled Dot Product Attention""" + # mask shape: batch_size x 1 x seq_len x seq_len + + # scores shape: batch_size x num_heads x seq_len x seq_len + scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) + scores = scores.masked_fill(mask == 0, float("-inf")) + # p_attn shape: batch_size x num_heads x seq_len x seq_len + p_attn = F.softmax(scores, dim=3) + # attn shape: batch_size x num_heads x seq_len x d_k + attn = torch.matmul(p_attn, value) + return attn, p_attn + + +def per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx): + """Gather per-symbol log probabilities into per-seq log probabilities""" + # per_symbol_log_probs shape: batch_size, seq_len, candidate_size + # tgt_out_idx shape: batch_size, seq_len + # per_symbol_log_probs is log probability of each symbol in the tgt_out_idx + # shape: batch_size, seq_len + log_probs = torch.gather(per_symbol_log_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze( + 2 + ) + # shape: batch_size, 1 + return log_probs.sum(dim=1, keepdim=True) + + +def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx): + """Gather per-symbol probabilities into per-seq probabilities""" + # per_symbol_probs shape: batch_size, seq_len, candidate_size + # tgt_out_idx shape: batch_size, seq_len + # output shape: batch_size, 1 + return torch.clamp( + torch.prod( + torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(2), + dim=1, + keepdim=True, + ), + # prevent zero probabilities, which cause torch.log return -inf + min=1e-40, + ) + + +def pytorch_decoder_mask( + memory: torch.Tensor, tgt_in_idx: torch.Tensor, num_heads: int +): + """ + Compute the masks used in the PyTorch Transformer-based decoder for + self-attention and attention over encoder outputs + + mask_ijk = 1 if the item should be ignored; 0 if the item should be paid attention + + Input: + memory shape: batch_size, src_seq_len, dim_model + tgt_in_idx (+2 offseted) shape: batch_size, tgt_seq_len + + Return: + tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len + tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len + """ + batch_size, src_seq_len, _ = memory.shape + tgt_seq_len = tgt_in_idx.shape[1] + device = memory.device + mask_indices = torch.tril( + tgt_in_idx.repeat(1, tgt_seq_len).reshape(batch_size, tgt_seq_len, tgt_seq_len), + diagonal=0, + ).to(device) + tgt_src_mask_augmented = torch.zeros( + batch_size, tgt_seq_len, src_seq_len + 2, dtype=torch.bool, device=device + ).scatter(2, mask_indices, 1) + tgt_src_mask = tgt_src_mask_augmented[:, :, 2:].repeat_interleave(num_heads, dim=0) + tgt_tgt_mask = (subsequent_mask(tgt_seq_len, device) == 0).repeat( + batch_size * num_heads, 1, 1 + ) + return tgt_tgt_mask, tgt_src_mask diff --git a/reagent/models/__init__.py b/reagent/models/__init__.py index 076974c8b..66a831e3c 100644 --- a/reagent/models/__init__.py +++ b/reagent/models/__init__.py @@ -15,6 +15,8 @@ from .dueling_q_network import DuelingQNetwork, ParametricDuelingQNetwork from .embedding_bag_concat import EmbeddingBagConcat from .fully_connected_network import FullyConnectedNetwork +from .mlp_scorer import MLPScorer +from .seq2reward_model import Seq2RewardNetwork __all__ = [ @@ -31,4 +33,6 @@ "GaussianFullyConnectedActor", "DirichletFullyConnectedActor", "FullyConnectedActor", + "MLPScorer", + "Seq2RewardNetwork", ] diff --git a/reagent/models/actor.py b/reagent/models/actor.py index 92225cf6b..cf1900dcc 100644 --- a/reagent/models/actor.py +++ b/reagent/models/actor.py @@ -5,16 +5,48 @@ from typing import List, Optional import torch -from reagent import types as rlt +from reagent.core import types as rlt +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE +from reagent.core.tensorboardX import SummaryWriterContext from reagent.models.base import ModelBase from reagent.models.fully_connected_network import FullyConnectedNetwork -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE -from reagent.tensorboardX import SummaryWriterContext from torch.distributions import Dirichlet from torch.distributions.normal import Normal +LOG_PROB_MIN: float = -2.0 +LOG_PROB_MAX = 2.0 + + +class StochasticActor(ModelBase): + """ + An actor defined by a scorer and sampler. + The scorer gives each action an score. And the sampler samples actions + based on the action scores.""" + + def __init__(self, scorer, sampler) -> None: + super().__init__() + self.scorer = scorer + self.sampler = sampler + + def input_prototype(self): + return self.scorer.input_prototype() + + def get_distributed_data_parallel_model(self): + raise NotImplementedError() + + def forward(self, state): + action_scores = self.scorer(state) + return self.sampler.sample_action(action_scores, possible_actions_mask=None) + class FullyConnectedActor(ModelBase): + """ + A general model arch for mapping from state to ActorOutput, + which contains an action tensor and log probabilities (optional). + + The model arch is often used to implement the actor in actor-critic algorithms. + """ + def __init__( self, state_dim: int, @@ -24,7 +56,7 @@ def __init__( use_batch_norm: bool = False, action_activation: str = "tanh", exploration_variance: Optional[float] = None, - ): + ) -> None: super().__init__() assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim) @@ -69,7 +101,7 @@ def forward(self, state: rlt.FeatureData) -> rlt.ActorOutput: # TODO: log prob is affected by clamping, how to handle that? log_prob = ( self.noise_dist.log_prob(noise).to(action.device).sum(dim=1).view(-1, 1) - ) + ).clamp(LOG_PROB_MIN, LOG_PROB_MAX) action = (action + noise.to(action.device)).clamp( *CONTINUOUS_TRAINING_ACTION_RANGE ) @@ -77,6 +109,12 @@ def forward(self, state: rlt.FeatureData) -> rlt.ActorOutput: class GaussianFullyConnectedActor(ModelBase): + """ + A model arch similar to FullyConnectedActor, except that the last layer + represents the parameters of a multi-dimensional Gaussian distribution. + The action to return is sampled from the Gaussian distribution. + """ + def __init__( self, state_dim: int, @@ -86,7 +124,12 @@ def __init__( scale: float = 0.05, use_batch_norm: bool = False, use_layer_norm: bool = False, - ): + use_l2_normalization: bool = False, + ) -> None: + """ + Args: + use_l2_normalization: if True, divides action by l2 norm. + """ super().__init__() assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim) @@ -109,15 +152,16 @@ def __init__( self.loc_layer_norm = torch.nn.LayerNorm(action_dim) self.scale_layer_norm = torch.nn.LayerNorm(action_dim) + self.use_l2_normalization = use_l2_normalization + # used to calculate log-prob self.const = math.log(math.sqrt(2 * math.pi)) self.eps = 1e-6 - self._log_min_max = (-20.0, 2.0) def input_prototype(self): return rlt.FeatureData(torch.randn(1, self.state_dim)) - def _log_prob(self, r, scale_log): + def _normal_log_prob(self, r, scale_log): """ Compute log probability from normal distribution the same way as torch.distributions.normal.Normal, which is: @@ -132,16 +176,16 @@ def _log_prob(self, r, scale_log): The primary reason we don't use Normal class is that it currently cannot be exported through ONNX. """ - return -(r ** 2) / 2 - scale_log - self.const + return -(r**2) / 2 - scale_log - self.const def _squash_correction(self, squashed_action): """ Same as https://github.com/haarnoja/sac/blob/108a4229be6f040360fcca983113df9c4ac23a6a/sac/policies/gaussian_policy.py#L133 """ - return (1 - squashed_action ** 2 + self.eps).log() + return (1 - squashed_action**2 + self.eps).log() - def _get_loc_and_scale_log(self, state): + def _get_loc_and_scale_log(self, state: rlt.FeatureData): loc_scale = self.fc(state.float_features) loc = loc_scale[::, : self.action_dim] scale_log = loc_scale[::, self.action_dim :] @@ -150,51 +194,54 @@ def _get_loc_and_scale_log(self, state): loc = self.loc_layer_norm(loc) scale_log = self.scale_layer_norm(scale_log) - scale_log = scale_log.clamp(*self._log_min_max) + scale_log = scale_log.clamp(LOG_PROB_MIN, LOG_PROB_MAX) return loc, scale_log + def _squash_raw_action(self, raw_action: torch.Tensor) -> torch.Tensor: + # NOTE: without clamping to (-(1-eps), 1-eps), torch.tanh would output + # 1, and torch.atanh would map it to +inf, causing log_prob to be -inf. + squashed_action = torch.clamp( + torch.tanh(raw_action), -1.0 + self.eps, 1.0 - self.eps + ) + if self.use_l2_normalization: + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + l2_norm = (squashed_action**2).sum(dim=1, keepdim=True).sqrt() + squashed_action = squashed_action / l2_norm + return squashed_action + def forward(self, state: rlt.FeatureData): loc, scale_log = self._get_loc_and_scale_log(state) r = torch.randn_like(scale_log, device=scale_log.device) - action = torch.tanh(loc + r * scale_log.exp()) - - # Since each dim are independent, log-prob is simply sum - log_prob = self._log_prob(r, scale_log) - squash_correction = self._squash_correction(action) + raw_action = loc + r * scale_log.exp() + squashed_action = self._squash_raw_action(raw_action) + squashed_loc = self._squash_raw_action(loc) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram("actor/forward/loc", loc.detach().cpu()) SummaryWriterContext.add_histogram( "actor/forward/scale_log", scale_log.detach().cpu() ) - SummaryWriterContext.add_histogram( - "actor/forward/log_prob", log_prob.detach().cpu() - ) - SummaryWriterContext.add_histogram( - "actor/forward/squash_correction", squash_correction.detach().cpu() - ) - log_prob = torch.sum(log_prob - squash_correction, dim=1) return rlt.ActorOutput( - action=action, log_prob=log_prob.reshape(-1, 1), action_mean=loc + action=squashed_action, + log_prob=self.get_log_prob(state, squashed_action), + squashed_mean=squashed_loc, ) - def _atanh(self, x): - """ - Can't find this on pytorch doc :( - """ - return ((1 + x).log() - (1 - x).log()) / 2 - - @torch.no_grad() - def get_log_prob(self, state, squashed_action): + def get_log_prob(self, state: rlt.FeatureData, squashed_action: torch.Tensor): """ Action is expected to be squashed with tanh """ - loc, scale_log = self._get_loc_and_scale_log(state) - # This is not getting exported; we can use it - n = Normal(loc, scale_log.exp()) - raw_action = self._atanh(squashed_action) + if self.use_l2_normalization: + # TODO: calculate log_prob for l2 normalization + # https://math.stackexchange.com/questions/3120506/on-the-distribution-of-a-normalized-gaussian-vector + # http://proceedings.mlr.press/v100/mazoure20a/mazoure20a.pdf + pass - log_prob = n.log_prob(raw_action) + loc, scale_log = self._get_loc_and_scale_log(state) + raw_action = torch.atanh(squashed_action) + r = (raw_action - loc) / scale_log.exp() + log_prob = self._normal_log_prob(r, scale_log) squash_correction = self._squash_correction(squashed_action) if SummaryWriterContext._global_step % 1000 == 0: SummaryWriterContext.add_histogram( @@ -209,16 +256,21 @@ def get_log_prob(self, state, squashed_action): SummaryWriterContext.add_histogram( "actor/get_log_prob/squash_correction", squash_correction.detach().cpu() ) - log_prob = torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1) - - return log_prob + return torch.sum(log_prob - squash_correction, dim=1).reshape(-1, 1) class DirichletFullyConnectedActor(ModelBase): + """ + A model arch similar to FullyConnectedActor, except that the last layer + is sampled from a Dirichlet distribution + """ + # Used to prevent concentration from being 0 EPSILON = 1e-6 - def __init__(self, state_dim, action_dim, sizes, activations, use_batch_norm=False): + def __init__( + self, state_dim, action_dim, sizes, activations, use_batch_norm: bool = False + ) -> None: """ AKA the multivariate beta distribution. Used in cases where actor's action must sum to 1. @@ -266,9 +318,5 @@ def forward(self, state): # ONNX can't export Dirichlet() action = torch._sample_dirichlet(concentration) - if not self.training: - # ONNX doesn't like reshape either.. - return rlt.ActorOutput(action=action) - log_prob = Dirichlet(concentration).log_prob(action) return rlt.ActorOutput(action=action, log_prob=log_prob.unsqueeze(dim=1)) diff --git a/reagent/models/base.py b/reagent/models/base.py index a7ce445dd..294a2dd96 100644 --- a/reagent/models/base.py +++ b/reagent/models/base.py @@ -5,7 +5,7 @@ from typing import Any, Optional import torch.nn as nn -from reagent import types as rlt +from reagent.core import types as rlt # add ABCMeta once https://github.com/sphinx-doc/sphinx/issues/5995 is fixed @@ -29,7 +29,7 @@ def feature_config(self) -> Optional[rlt.ModelFeatureConfig]: """ return None - def get_target_network(self): + def get_target_network(self) -> "ModelBase": """ Return a copy of this network to be used as target network @@ -48,9 +48,13 @@ def get_distributed_data_parallel_model(self): """ raise NotImplementedError - def cpu_model(self): + def cpu_model(self) -> "ModelBase": """ Override this in DistributedDataParallel models """ # This is not ideal but makes exporting simple return deepcopy(self).cpu() + + def requires_model_parallel(self) -> bool: + """Return True if this model has large embedding tables which need to be sharded""" + return False diff --git a/reagent/models/bcq.py b/reagent/models/bcq.py index 65bad1ea4..b6f6b3e99 100644 --- a/reagent/models/bcq.py +++ b/reagent/models/bcq.py @@ -2,11 +2,14 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import torch +from reagent.core import types as rlt from reagent.models.base import ModelBase class BatchConstrainedDQN(ModelBase): - def __init__(self, state_dim, q_network, imitator_network, bcq_drop_threshold): + def __init__( + self, state_dim, q_network, imitator_network, bcq_drop_threshold + ) -> None: super().__init__() assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) self.state_dim = state_dim @@ -18,7 +21,7 @@ def __init__(self, state_dim, q_network, imitator_network, bcq_drop_threshold): def input_prototype(self): return self.q_network.input_prototype() - def forward(self, state): + def forward(self, state: rlt.FeatureData): q_values = self.q_network(state) imitator_outputs = self.imitator_network(state.float_features) imitator_probs = torch.nn.functional.softmax(imitator_outputs, dim=1) diff --git a/reagent/models/categorical_dqn.py b/reagent/models/categorical_dqn.py index f0dce217d..15063b546 100644 --- a/reagent/models/categorical_dqn.py +++ b/reagent/models/categorical_dqn.py @@ -3,7 +3,7 @@ import torch import torch.nn.functional as F -from reagent import types as rlt +from reagent.core import types as rlt from reagent.models.base import ModelBase @@ -14,8 +14,8 @@ def __init__( *, qmin: float, qmax: float, - num_atoms: int - ): + num_atoms: int, + ) -> None: super().__init__() self.distributional_network = distributional_network self.support = torch.linspace(qmin, qmax, num_atoms) @@ -24,7 +24,6 @@ def input_prototype(self): return self.distributional_network.input_prototype() def forward(self, state: rlt.FeatureData): - # pyre-fixme[16]: `Tensor` has no attribute `exp`. dist = self.log_dist(state).exp() q_values = (dist * self.support.to(dist.device)).sum(2) return q_values diff --git a/reagent/models/cem_planner.py b/reagent/models/cem_planner.py index dbc94ff98..1dbd1b426 100644 --- a/reagent/models/cem_planner.py +++ b/reagent/models/cem_planner.py @@ -17,10 +17,10 @@ import scipy.stats as stats import torch import torch.nn as nn -from reagent import types as rlt +from reagent.core import types as rlt +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE from reagent.models.base import ModelBase from reagent.models.world_model import MemoryNetwork -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE from reagent.training.utils import rescale_actions from torch.distributions.bernoulli import Bernoulli from torch.distributions.categorical import Categorical @@ -105,8 +105,8 @@ def __init__( self.action_lower_bounds = np.tile( action_lower_bounds, self.plan_horizon_length ) - self.orig_action_upper = action_upper_bounds - self.orig_action_lower = action_lower_bounds + self.orig_action_upper = torch.tensor(action_upper_bounds) + self.orig_action_lower = torch.tensor(action_lower_bounds) @torch.no_grad() def forward(self, state: rlt.FeatureData): @@ -150,7 +150,7 @@ def acc_rewards_of_one_solution( ), mem_net=self.mem_net_list[mem_net_idx], ) - reward_matrix[i, j] = reward * (self.gamma ** j) + reward_matrix[i, j] = reward * (self.gamma**j) if not not_terminal: logger.debug( @@ -190,7 +190,7 @@ def acc_rewards_of_all_solutions( def sample_reward_next_state_terminal( self, state: rlt.FeatureData, action: rlt.FeatureData, mem_net: MemoryNetwork ): - """ Sample one-step dynamics based on the provided world model """ + """Sample one-step dynamics based on the provided world model""" wm_output = mem_net(state, action) num_mixtures = wm_output.logpi.shape[2] mixture_idx = ( @@ -219,11 +219,10 @@ def constrained_variance(self, mean, var): return np.minimum(np.minimum((lb_dist / 2) ** 2, (ub_dist / 2) ** 2), var) @torch.no_grad() - def continuous_planning(self, state: rlt.FeatureData) -> np.ndarray: + def continuous_planning(self, state: rlt.FeatureData) -> torch.Tensor: # TODO: Warmstarts means and vars using previous solutions (T48841404) mean = (self.action_upper_bounds + self.action_lower_bounds) / 2 var = (self.action_upper_bounds - self.action_lower_bounds) ** 2 / 16 - # pyre-fixme[29]: `truncnorm_gen` is not a function. normal_sampler = stats.truncnorm( -2, 2, loc=np.zeros_like(mean), scale=np.ones_like(mean) ) @@ -256,16 +255,16 @@ def continuous_planning(self, state: rlt.FeatureData) -> np.ndarray: # Pick the first action of the optimal solution solution = mean[: self.action_dim] raw_action = solution.reshape(-1) - low, high = CONTINUOUS_TRAINING_ACTION_RANGE + low = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0]) + high = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1]) # rescale to range (-1, 1) as per canonical output range of continuous agents - raw_action = rescale_actions( - raw_action, + return rescale_actions( + torch.tensor(raw_action), new_min=low, new_max=high, prev_min=self.orig_action_lower, prev_max=self.orig_action_upper, ) - return torch.tensor(raw_action) @torch.no_grad() def discrete_planning(self, state: rlt.FeatureData) -> Tuple[int, np.ndarray]: @@ -300,4 +299,6 @@ def discrete_planning(self, state: rlt.FeatureData) -> Tuple[int, np.ndarray]: f"Stats: {reward_tally} / {first_action_tally}" f" = {reward_tally/first_action_tally} " ) + # pyre-fixme[7]: Expected `Tuple[int, ndarray]` but got `Tuple[typing.Any, + # Tensor]`. return best_next_action_idx, best_next_action_one_hot diff --git a/reagent/models/convolutional_network.py b/reagent/models/convolutional_network.py index 913837820..0f3a683db 100644 --- a/reagent/models/convolutional_network.py +++ b/reagent/models/convolutional_network.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import collections import logging import math @@ -15,14 +16,32 @@ logger = logging.getLogger(__name__) +CnnParameters = collections.namedtuple( + "CnnParameters", + [ + "conv_dims", + "conv_height_kernels", + "conv_width_kernels", + "pool_types", + "pool_kernels_strides", + "num_input_channels", + "input_height", + "input_width", + ], +) + + class ConvolutionalNetwork(nn.Module): - def __init__(self, cnn_parameters, layers, activations) -> None: + def __init__(self, cnn_parameters, layers, activations, use_layer_norm) -> None: super().__init__() self.conv_dims = cnn_parameters.conv_dims self.conv_height_kernels = cnn_parameters.conv_height_kernels self.conv_width_kernels = cnn_parameters.conv_width_kernels + self.use_layer_norm = use_layer_norm + self.conv_layers: nn.ModuleList = nn.ModuleList() self.pool_layers: nn.ModuleList = nn.ModuleList() + self.layer_norm_layers: nn.ModuleList = nn.ModuleList() for i, _ in enumerate(self.conv_dims[1:]): self.conv_layers.append( @@ -42,26 +61,35 @@ def __init__(self, cnn_parameters, layers, activations) -> None: ) else: assert False, "Unknown pooling type".format(layers) + if self.use_layer_norm: + self.layer_norm_layers.append(nn.GroupNorm(1, self.conv_dims[i + 1])) input_size = ( cnn_parameters.num_input_channels, cnn_parameters.input_height, cnn_parameters.input_width, ) + # pyre-fixme[6]: For 1st param expected `Sequence[Union[int, SymInt]]` but + # got `int`. conv_out = self.conv_forward(torch.ones(1, *input_size)) self.fc_input_dim = int(np.prod(conv_out.size()[1:])) layers[0] = self.fc_input_dim - self.feed_forward = FullyConnectedNetwork(layers, activations) + self.feed_forward = FullyConnectedNetwork( + layers, activations, use_layer_norm=use_layer_norm + ) def conv_forward(self, input): x = input for i, _ in enumerate(self.conv_layers): - x = F.relu(self.conv_layers[i](x)) + x = self.conv_layers[i](x) + if self.use_layer_norm: + x = self.layer_norm_layers[i](x) + x = F.relu(x) x = self.pool_layers[i](x) return x def forward(self, input) -> torch.FloatTensor: - """ Forward pass for generic convnet DNNs. Assumes activation names + """Forward pass for generic convnet DNNs. Assumes activation names are valid pytorch activation names. :param input image tensor """ diff --git a/reagent/models/critic.py b/reagent/models/critic.py index 5d570c552..310353e52 100644 --- a/reagent/models/critic.py +++ b/reagent/models/critic.py @@ -4,12 +4,42 @@ from typing import List import torch -from reagent import types as rlt +import torch.fx +from reagent.core import types as rlt from reagent.models.base import ModelBase from reagent.models.fully_connected_network import FullyConnectedNetwork +# This method contains dynamic control flow +# Use torch.fx.wrap to mark it as a leaf module for FX tracing +@torch.fx.wrap +def run_feature_validation( + state_float_features_dim: int, + action_float_features_dim: int, + state_float_features_batch_size: int, + action_float_features_batch_size: int, +) -> None: + + assert ( + state_float_features_dim == 2 + ), f"Expected dimension of state is 2. Got {state_float_features_dim}" + + assert ( + action_float_features_dim == state_float_features_dim + ), "Dimensions of state and action mismatch" + + assert ( + state_float_features_batch_size == action_float_features_batch_size + ), "Batch sizes of state and action mismatch" + + class FullyConnectedCritic(ModelBase): + """ + A general model arch for mapping from state and action to scalar values. + + The model arch is often used to implement the critic in actor-critic algorithms. + """ + def __init__( self, state_dim: int, @@ -19,7 +49,8 @@ def __init__( use_batch_norm: bool = False, use_layer_norm: bool = False, output_dim: int = 1, - ): + final_activation: str = "linear", # most of the time "linear" is the right final activation to use! + ) -> None: super().__init__() assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim) @@ -32,7 +63,7 @@ def __init__( ) self.fc = FullyConnectedNetwork( [state_dim + action_dim] + sizes + [output_dim], - activations + ["linear"], + activations + [final_activation], use_batch_norm=use_batch_norm, use_layer_norm=use_layer_norm, ) @@ -45,13 +76,16 @@ def input_prototype(self): ) def forward(self, state: rlt.FeatureData, action: rlt.FeatureData): - assert ( - len(state.float_features.shape) == len(action.float_features.shape) - and len(action.float_features.shape) == 2 - and (state.float_features.shape[0] == action.float_features.shape[0]) - ), ( - f"state shape: {state.float_features.shape}; action shape: " - f"{action.float_features.shape} not equal to (batch_size, feature_dim)" + state_float_features_dim = state.float_features.dim() + action_float_features_dim = action.float_features.dim() + state_float_features_batch_size = state.float_features.size(dim=0) + action_float_features_batch_size = action.float_features.size(dim=0) + + run_feature_validation( + state_float_features_dim, + action_float_features_dim, + state_float_features_batch_size, + action_float_features_batch_size, ) cat_input = torch.cat((state.float_features, action.float_features), dim=-1) return self.fc(cat_input) diff --git a/reagent/models/deep_represent_linucb.py b/reagent/models/deep_represent_linucb.py new file mode 100644 index 000000000..2a5a21cbc --- /dev/null +++ b/reagent/models/deep_represent_linucb.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import List, Optional + +import torch +from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.models.linear_regression import batch_quadratic_form, LinearRegressionUCB +from torch import nn + +logger = logging.getLogger(__name__) + + +class DeepRepresentLinearRegressionUCB(LinearRegressionUCB): + """ + It is a multiple layer regression model that output UCB score. + The first N layers are trainable by torch optimizer(). + The last layer is the traditional LinUCB, and it is not updated by optimizer, + but still will be updated by matrix computations. + + Example : + Features(dim=9) --> deep_represent_layers --> Features(dim=3) --> LinUCB --> ucb score + + DeepRepresentLinUCBTrainer( + (scorer): DeepRepresentLinearRegressionUCB( + (deep_represent_layers): FullyConnectedNetwork( + (dnn): Sequential( + (0): Linear(in_features=9, out_features=6, bias=True) + (1): ReLU() + (2): Linear(in_features=6, out_features=3, bias=True) + (3): Identity() + ) + ) + ) + (loss_fn): MSELoss() + ) + """ + + def __init__( + self, + raw_input_dim: int, # raw feature + sizes: List[int], # MLP hidden layers of the deep_represent module + linucb_inp_dim: int, # output from deep_represent module, i.e., input to LinUCB module + activations: List[str], + *, + output_activation: str = "linear", + use_batch_norm: bool = False, + dropout_ratio: float = 0.0, + normalized_output: bool = False, + use_layer_norm: bool = False, + ucb: Optional[torch.Tensor] = None, + mlp_out: torch.Tensor = None, # pyre-fixme; Attribute has type `Tensor`; used as `None`. + pred_u: torch.Tensor = None, # pyre-fixme; Attribute has type `Tensor`; used as `None`. + pred_sigma: torch.Tensor = None, # pyre-fixme; Attribute has type `Tensor`; used as `None`. + mlp_layers: nn.Module = None, # pyre-fixme; Attribute has type `nn.Module`; used as `None`. + ): + super().__init__(input_dim=linucb_inp_dim) + + assert raw_input_dim > 0, "raw_input_dim must be > 0, got {}".format( + raw_input_dim + ) + assert linucb_inp_dim > 0, "linucb_inp_dim must be > 0, got {}".format( + linucb_inp_dim + ) + assert len(sizes) == len( + activations + ), "The numbers of sizes and activations must match; got {} vs {}".format( + len(sizes), len(activations) + ) + + self.raw_input_dim = raw_input_dim # input to DeepRepresent + self.mlp_out = mlp_out + self.pred_u = pred_u + self.pred_sigma = pred_sigma + if mlp_layers is None: + self.deep_represent_layers = FullyConnectedNetwork( + [raw_input_dim] + sizes + [linucb_inp_dim], + activations + [output_activation], + use_batch_norm=use_batch_norm, + dropout_ratio=dropout_ratio, + normalize_output=normalized_output, + use_layer_norm=use_layer_norm, + ) + else: + self.deep_represent_layers = mlp_layers # use customized layers + + def input_prototype(self) -> torch.Tensor: + return torch.randn(1, self.raw_input_dim) + + def forward( + self, inp: torch.Tensor, ucb_alpha: Optional[float] = None + ) -> torch.Tensor: + """ + Pass raw input to mlp. + This mlp is trainable to optimizer, i.e., will be updated by torch optimizer(), + then output of mlp is passed to a LinUCB layer. + """ + + self.mlp_out = self.deep_represent_layers( + inp + ) # preprocess by DeepRepresent module before fed to LinUCB layer + + if ucb_alpha is None: + ucb_alpha = self.ucb_alpha + self.pred_u = torch.matmul(self.mlp_out, self.coefs) + if ucb_alpha != 0: + self.pred_sigma = torch.sqrt( + batch_quadratic_form(self.mlp_out, self.inv_avg_A) + / torch.clamp(self.sum_weight, min=0.00001) + ) + pred_ucb = self.pred_u + ucb_alpha * self.pred_sigma + else: + pred_ucb = self.pred_u + # trainer needs pred_u and mlp_out to update parameters + return pred_ucb diff --git a/reagent/models/disjoint_linucb_predictor.py b/reagent/models/disjoint_linucb_predictor.py new file mode 100644 index 000000000..f7b4d26ec --- /dev/null +++ b/reagent/models/disjoint_linucb_predictor.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Optional + +import torch + +from pytorch_lightning.utilities.distributed import ReduceOp, sync_ddp_if_available +from reagent.models.base import ModelBase + + +logger = logging.getLogger(__name__) + + +def batch_quadratic_form_multi_arms(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor: + """ + Compute the quadratic form x^T * A * x for a batched input x. (exploration term in UCB) + x shape: (B, N) + A shape: (num_arms, N, N) + output shape: (B, num_arms) + B is the batch size above + N is the featur dimension here + """ + # xta = x^T A + # the input x is already transposed, so no transpose is applied below. + # xta is dimension (num_arms, B, N) + xta = torch.matmul(x, A) + # einsum i: arm_idx; j: data index in batch; k: feature index + return torch.einsum("ijk, jk -> ji", xta, x) + + +class DisjointLinearRegressionUCB(ModelBase): + """ + A linear regression model for Disjoint LinUCB. + Note that instead of being trained by a PyTorch optimizer, we explicitly + update attributes A and b. + + Args: + input_dim: Dimension of input data + l2_reg_lambda: The weight on L2 regularization + ucb_alpha: The coefficient on the standard deviation in UCB formula. + gamma: The discount factor to avoid exploding numbers when doing incremental training + using gamma as part of simplified D-LinUCB: https://arxiv.org/pdf/1909.09146.pdf + In this simplified version of D-LinUCB, we calculate A = \sum \gamma^t xx^T + to discount the old data. See N2441818 for why we do this. + set gamma=1.0 to use traditional model + """ + + def __init__( + self, + num_arms: int, + input_dim: int, + l2_reg_lambda: float = 1.0, + ucb_alpha: float = 1.0, + gamma: float = 1.0, + ): + + """ + self.A: num_arms * dim * dim + self.A_inv: num_arms * dim * dim + self.b: num_arms * dim + self.coefs: num_arms * dim + dim is feature dimension + """ + super().__init__() + + self.num_arms = num_arms + self.input_dim = input_dim + self.ucb_alpha = ucb_alpha + self.cur_num_obs = torch.zeros(self.num_arms, dtype=torch.int64) + self.gamma = gamma + assert self.gamma <= 1.0 and self.gamma > 0.0 + self.l2_reg_lambda = l2_reg_lambda + # A is accumulated from all training epochs + self.register_buffer( + "A", + torch.zeros((self.input_dim, self.input_dim)).repeat(self.num_arms, 1, 1), + ) + # cur_A is x x^T accumulated within this training epoch + self.register_buffer( + "cur_A", + torch.zeros((self.input_dim, self.input_dim)).repeat(self.num_arms, 1, 1), + ) + # b is accumulated from all training epochs + self.register_buffer("b", torch.zeros(self.num_arms, self.input_dim)) + # cur_b is x * y accumulated within this training epoch + self.register_buffer("cur_b", torch.zeros(self.num_arms, self.input_dim)) + self.register_buffer( + "coefs", torch.zeros(self.input_dim).repeat(self.num_arms, 1) + ) + self.register_buffer( + "inv_A", + torch.eye(self.input_dim).repeat(self.num_arms, 1, 1), + ) + self.register_buffer( + "coefs_valid_for_A", + -torch.ones((self.input_dim, self.input_dim)).repeat(self.num_arms, 1, 1), + ) # value of A matrix for which self.coefs were estimated + + # add a dummy parameter so that DDP doesn't compain about lack of parameters with gradient + self.dummy_param = torch.nn.parameter.Parameter(torch.zeros(1)) + + def input_prototype(self) -> torch.Tensor: + return torch.randn(1, self.input_dim) + + def _estimate_coefs(self): + """ + Compute current estimate of regression coefficients and A_inv=A**-1 + We save both coefficients and A_inv in case they are needed again before we add observations + + self.coefs: num_arms * dim + """ + self.A += sync_ddp_if_available(self.cur_A, reduce_op=ReduceOp.SUM) + self.b += sync_ddp_if_available(self.cur_b, reduce_op=ReduceOp.SUM) + # Set cur_A and cur_b back to 0 for next training epoch + self.cur_A.zero_() + self.cur_b.zero_() + device = self.b.device + # add regularization here so that it's not double-counted under distributed training + # send them to the same device to avoid errors when doing dpp, example failed job without this: aienv-0d5c64a3b3 + m = self.A.to(device) + self.l2_reg_lambda * torch.eye(self.input_dim).to( + device + ) + self.inv_A = torch.linalg.pinv(m).contiguous() + assert self.inv_A.size()[0] == self.b.size()[0] + + # inv_A: (num_arms, d, d) + # self.b: (num_arms, d) + # einsum j: arm_idx, d: feature dimension index + # output self.coefs: (num_arms, d) + self.coefs = torch.einsum("jkl,jl->jk", self.inv_A, self.b) + # copy A to make coefs_valid_for_A the same as A + # need coefs_valid_for_A to check if we have done _estimate_coefs + # this is needed to avoid redundant re-compute of coefs in forward function. + self.coefs_valid_for_A = self.gamma * self.A.clone() + + cur_round_obs = sync_ddp_if_available( + self.cur_num_obs.to(device), reduce_op=ReduceOp.SUM + ) + logging.info( + f"current round num of observations for {self.num_arms} arms are {cur_round_obs}" + ) + logging.info( + f"current round num of observations for all arms are {cur_round_obs.sum()}" + ) + self.cur_num_obs.zero_() + + def forward( + self, inp: torch.Tensor, ucb_alpha: Optional[float] = None + ) -> torch.Tensor: + """ + Forward can return the mean or a UCB. If returning UCB, the CI width is stddev*ucb_alpha + If ucb_alpha is not passed in, a fixed alpha from init is used + + inp: num_batch * dim + self.coefs: num_arms * dim + output: num_batch * num_arms + output is in the format: + [ + [score_1, score_2, ..., score_{num_arms}], + ...., + ] + """ + if ucb_alpha is None: + ucb_alpha = self.ucb_alpha + + results = torch.matmul(inp, self.coefs.t()) + if ucb_alpha == 0: + return results + results += ucb_alpha * torch.sqrt( + batch_quadratic_form_multi_arms(inp, self.inv_A) + ) + return results diff --git a/reagent/models/dqn.py b/reagent/models/dqn.py index 61d7c2b3b..32fae2daa 100644 --- a/reagent/models/dqn.py +++ b/reagent/models/dqn.py @@ -4,12 +4,21 @@ from typing import Optional import torch -from reagent import types as rlt -from reagent.models.base import ModelBase -from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.core import types as rlt +from reagent.models.fully_connected_network import FloatFeatureFullyConnected -class FullyConnectedDQN(ModelBase): +INVALID_ACTION_CONSTANT: float = -1e10 + + +class FullyConnectedDQN(FloatFeatureFullyConnected): + """ + A general model arch for mapping from states to scalar values. + + The model arch is often used to implement Deep Q-network, where + the outputs are q-values of all defined actions. + """ + def __init__( self, state_dim, @@ -17,36 +26,36 @@ def __init__( sizes, activations, *, + output_activation: str = "linear", num_atoms: Optional[int] = None, - use_batch_norm=False, - dropout_ratio=0.0, - normalized_output=False, - ): - super().__init__() - assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) - assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim) - self.state_dim = state_dim - self.action_dim = action_dim - assert len(sizes) == len( - activations - ), "The numbers of sizes and activations must match; got {} vs {}".format( - len(sizes), len(activations) - ) - self.num_atoms = num_atoms - self.fc = FullyConnectedNetwork( - [state_dim] + sizes + [action_dim * (num_atoms or 1)], - activations + ["linear"], + use_batch_norm: bool = False, + dropout_ratio: float = 0.0, + normalized_output: bool = False, + use_layer_norm: bool = False, + ) -> None: + super().__init__( + state_dim=state_dim, + output_dim=action_dim, + sizes=sizes, + activations=activations, + num_atoms=num_atoms, use_batch_norm=use_batch_norm, dropout_ratio=dropout_ratio, - normalize_output=normalized_output, + normalized_output=normalized_output, + use_layer_norm=use_layer_norm, + output_activation=output_activation, ) + self.action_dim = self.output_dim - def input_prototype(self): - return rlt.FeatureData(self.fc.input_prototype()) + def forward( + self, + state: rlt.FeatureData, + possible_actions_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + x = super().forward(state=state) - def forward(self, state: rlt.FeatureData) -> torch.Tensor: - float_features = state.float_features - x = self.fc(float_features) - if self.num_atoms is not None: - x = x.view(float_features.shape[0], self.action_dim, self.num_atoms) + # Only used when FullyConnectedDQN is used as policy.scorer in ReinforceTrainer + if possible_actions_mask is not None: + # subtract huge value from impossible actions to force their probabilities to 0 + x = x + (1 - possible_actions_mask.float()) * INVALID_ACTION_CONSTANT return x diff --git a/reagent/models/dueling_q_network.py b/reagent/models/dueling_q_network.py index 3681a9f66..4f62399e0 100644 --- a/reagent/models/dueling_q_network.py +++ b/reagent/models/dueling_q_network.py @@ -5,17 +5,25 @@ from typing import List, Optional, Tuple import torch -from reagent import types as rlt +from reagent.core import types as rlt +from reagent.core.tensorboardX import SummaryWriterContext from reagent.models.base import ModelBase from reagent.models.critic import FullyConnectedCritic from reagent.models.dqn import FullyConnectedDQN -from reagent.tensorboardX import SummaryWriterContext logger = logging.getLogger(__name__) +INVALID_ACTION_CONSTANT = -1e10 class DuelingQNetwork(ModelBase): + """ + Dueling Q-Network Architecture: https://arxiv.org/abs/1511.06581 + + The model arch maps from state features to scalar outputs which represents + the q-values of defined actions. + """ + def __init__( self, *, @@ -23,15 +31,12 @@ def __init__( advantage_network: ModelBase, value_network: ModelBase, ) -> None: - """ - Dueling Q-Network Architecture: https://arxiv.org/abs/1511.06581 - """ super().__init__() self.shared_network = shared_network input_prototype = shared_network.input_prototype() assert isinstance( input_prototype, rlt.FeatureData - ), f"shared_network should expect FeatureData as input" + ), "shared_network should expect FeatureData as input" self.advantage_network = advantage_network self.value_network = value_network @@ -95,7 +100,11 @@ def _get_values( q_value = value + advantage return value, raw_advantage, advantage, q_value - def forward(self, state: rlt.FeatureData) -> torch.Tensor: + def forward( + self, + state: rlt.FeatureData, + possible_actions_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: value, raw_advantage, advantage, q_value = self._get_values(state) # TODO: export these as observable values @@ -107,7 +116,11 @@ def forward(self, state: rlt.FeatureData) -> torch.Tensor: for i in range(advantage.shape[1]): a = advantage[:, i] _log_histogram_and_mean(f"{self._name}/{i}", "advantage", a) - + if possible_actions_mask is not None: + # subtract huge value from impossible actions to force their probabilities to 0 + q_value = ( + q_value + (1 - possible_actions_mask.float()) * INVALID_ACTION_CONSTANT + ) return q_value @@ -153,18 +166,21 @@ def make_fully_connected( sizes=layers[:-1], activations=activations[:-1], normalized_output=True, + use_batch_norm=use_batch_norm, ) advantage_network = FullyConnectedCritic( state_embedding_dim, action_dim, sizes=[state_embedding_dim // 2], activations=activations[-1:], + use_batch_norm=use_batch_norm, ) value_network = FullyConnectedDQN( state_embedding_dim, 1, sizes=[state_embedding_dim // 2], activations=activations[-1:], + use_batch_norm=use_batch_norm, ) return ParametricDuelingQNetwork( shared_network=shared_network, diff --git a/reagent/models/embedding_bag_concat.py b/reagent/models/embedding_bag_concat.py index 7580af454..9c235ddf0 100644 --- a/reagent/models/embedding_bag_concat.py +++ b/reagent/models/embedding_bag_concat.py @@ -1,9 +1,13 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Dict, List + import torch -from reagent import types as rlt +from reagent.core import types as rlt +from reagent.core.utils import embedding_bag_configs_from_feature_configs from reagent.models.base import ModelBase +from torchrec import EmbeddingBagConfig class EmbeddingBagConcat(ModelBase): @@ -14,31 +18,55 @@ class EmbeddingBagConcat(ModelBase): def __init__( self, - state_dim: int, + state_dense_dim: int, model_feature_config: rlt.ModelFeatureConfig, - embedding_dim: int, - ): + ) -> None: super().__init__() - assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) - self.state_dim = state_dim + assert state_dense_dim > 0, "state_dense_dim must be > 0, got {}".format( + state_dense_dim + ) + self.state_dense_dim = state_dense_dim + # for input prototype + self._id_list_feature_names: List[str] = [ + config.name for config in model_feature_config.id_list_feature_configs + ] + self._id_score_list_feature_names: List[str] = [ + config.name for config in model_feature_config.id_score_list_feature_configs + ] + + embedding_bag_configs: List[ + EmbeddingBagConfig + ] = embedding_bag_configs_from_feature_configs( + [model_feature_config], + ) + assert ( + embedding_bag_configs + ), "No embedding bag config generated. Please double check model_feature_config." + + # Assume all id features will be mapped to the same number of dimensions + assert ( + len({config.embedding_dim for config in embedding_bag_configs}) == 1 + ), "Please ensure all embedding_dims in id_mapping_config are the same" + embedding_dim = embedding_bag_configs[0].embedding_dim self.embedding_bags = torch.nn.ModuleDict( { - id_list_feature.name: torch.nn.EmbeddingBag( - len( - model_feature_config.id_mapping_config[ - id_list_feature.id_mapping_name - ].ids - ), - embedding_dim, + table_name: torch.nn.EmbeddingBag( + num_embeddings=id_mapping.embedding_table_size, + embedding_dim=id_mapping.embedding_dim, + mode=str(id_mapping.pooling_type.name).lower(), ) - for id_list_feature in model_feature_config.id_list_feature_configs + for table_name, id_mapping in model_feature_config.id_mapping_config.items() } ) - + self.feat2table: Dict[str, str] = { + feature_name: config.id_mapping_name + for feature_name, config in model_feature_config.name2config.items() + } self._output_dim = ( - state_dim - + len(model_feature_config.id_list_feature_configs) * embedding_dim + state_dense_dim + + len(self._id_list_feature_names) * embedding_dim + + len(self._id_score_list_feature_names) * embedding_dim ) @property @@ -46,17 +74,39 @@ def output_dim(self) -> int: return self._output_dim def input_prototype(self): + id_list_features = { + k: (torch.tensor([0], dtype=torch.long), torch.tensor([], dtype=torch.long)) + for k in self._id_list_feature_names + } + id_score_list_features = { + k: ( + torch.tensor([0], dtype=torch.long), + torch.tensor([], dtype=torch.long), + torch.tensor([], dtype=torch.float), + ) + for k in self._id_score_list_feature_names + } return rlt.FeatureData( - float_features=torch.randn(1, self.state_dim), - id_list_features={ - k: (torch.zeros(1, dtype=torch.long), torch.ones(1, dtype=torch.long)) - for k in self.embedding_bags - }, + float_features=torch.randn(1, self.state_dense_dim), + id_list_features_raw=id_list_features, + id_score_list_features_raw=id_score_list_features, ) def forward(self, state: rlt.FeatureData): - embeddings = [ - m(state.id_list_features[name][1], state.id_list_features[name][0]) - for name, m in self.embedding_bags.items() + # id_list is (offset, value); sum pooling + id_list_embeddings = [ + self.embedding_bags[self.feat2table[feature_name]](input=v[1], offsets=v[0]) + for feature_name, v in state.id_list_features_raw.items() ] - return torch.cat(embeddings + [state.float_features], dim=1) + + # id_score_list is (offset, key, value); weighted sum pooling + id_score_list_embeddings = [ + self.embedding_bags[self.feat2table[feature_name]]( + input=v[1], offsets=v[0], per_sample_weights=v[2] + ) + for feature_name, v in state.id_score_list_features_raw.items() + ] + return torch.cat( + id_list_embeddings + id_score_list_embeddings + [state.float_features], + dim=1, + ) diff --git a/reagent/models/fully_connected_network.py b/reagent/models/fully_connected_network.py index 3c3cca01d..2b5878c99 100644 --- a/reagent/models/fully_connected_network.py +++ b/reagent/models/fully_connected_network.py @@ -3,42 +3,76 @@ import logging import math -from typing import List +from typing import Any, List, Optional import torch import torch.nn as nn import torch.nn.init as init +from reagent.core import types as rlt from reagent.models.base import ModelBase logger = logging.getLogger(__name__) -def gaussian_fill_w_gain(tensor, activation, dim_in, min_std=0.0) -> None: - """ Gaussian initialization with gain.""" - gain = math.sqrt(2) if (activation == "relu" or activation == "leaky_relu") else 1 +def gaussian_fill_w_gain(tensor, gain, dim_in, min_std=0.0) -> None: + """Gaussian initialization with gain.""" init.normal_(tensor, mean=0, std=max(gain * math.sqrt(1 / dim_in), min_std)) +# troch.fx.trace does not support dynamic control flow, wrap the if-else and assert logic in this function to work around this limitation +@torch.fx.wrap +def transpose_tensor(shape_tensor: torch.Tensor, input: torch.Tensor) -> torch.Tensor: + shape = len(shape_tensor.shape) + assert shape in [2, 3], f"Invalid input shape {shape}" + if shape == 2: + return input + else: + return input.transpose(1, 2) + + ACTIVATION_MAP = { "tanh": nn.Tanh, "relu": nn.ReLU, "leaky_relu": nn.LeakyReLU, "linear": nn.Identity, + "sigmoid": nn.Sigmoid, + "softplus": nn.Softplus, } +class SlateBatchNorm1d(nn.Module): + """ + Same as nn.BatchNorm1d is input has shape (batch_size, feat_dim). + But if input has shape (batch_size, num_candidates, item_feats), like in LearnedVM, + we transpose it, since that's what nn.BatchNorm1d computes Batch Normalization over + 1st dimension, while we want to compute it over item_feats. + + NOTE: this is different from nn.BatchNorm2d which is for CNNs, and expects 4D inputs + """ + + def __init__(self, *args, **kwargs): + super().__init__() + self.vanilla = nn.BatchNorm1d(*args, **kwargs) + + def forward(self, x: torch.Tensor): + input = transpose_tensor(x, x) + output = self.vanilla(input) + return transpose_tensor(x, output) + + class FullyConnectedNetwork(ModelBase): def __init__( self, layers, activations, *, - use_batch_norm=False, - min_std=0.0, - dropout_ratio=0.0, - use_layer_norm=False, - normalize_output=False, + use_batch_norm: bool = False, + min_std: float = 0.0, + dropout_ratio: float = 0.0, + use_layer_norm: bool = False, + normalize_output: bool = False, + orthogonal_init: bool = False, ) -> None: super().__init__() @@ -53,17 +87,28 @@ def __init__( ): # Add BatchNorm1d if use_batch_norm: - modules.append(nn.BatchNorm1d(in_dim)) + modules.append(SlateBatchNorm1d(in_dim)) # Add Linear linear = nn.Linear(in_dim, out_dim) - gaussian_fill_w_gain(linear.weight, activation, in_dim, min_std=min_std) + # assuming activation is valid + try: + gain = torch.nn.init.calculate_gain(activation) + except ValueError: + gain = 1.0 # default value for other activation functions + if orthogonal_init: + # provably better https://openreview.net/forum?id=rkgqN1SYvr + nn.init.orthogonal_(linear.weight.data, gain=gain) + else: + # gaussian init + gaussian_fill_w_gain( + linear.weight, gain=gain, dim_in=in_dim, min_std=min_std + ) + init.constant_(linear.bias, 0) # type: ignore modules.append(linear) # Add LayerNorm if use_layer_norm and (normalize_output or i < len(activations) - 1): - modules.append( - nn.LayerNorm(out_dim) # type: ignore - ) + modules.append(nn.LayerNorm(out_dim)) # type: ignore # Add activation if activation in ACTIVATION_MAP: modules.append(ACTIVATION_MAP[activation]()) @@ -80,8 +125,62 @@ def input_prototype(self): return torch.randn(1, self.input_dim) def forward(self, input: torch.Tensor) -> torch.Tensor: - """ Forward pass for generic feed-forward DNNs. Assumes activation names + """Forward pass for generic feed-forward DNNs. Assumes activation names are valid pytorch activation names. :param input tensor """ return self.dnn(input) + + +class FloatFeatureFullyConnected(ModelBase): + """ + A fully connected network that takes FloatFeatures input + and supports distributional prediction. + """ + + def __init__( + self, + state_dim, + output_dim, + sizes, + activations, + *, + output_activation: str = "linear", + num_atoms: Optional[int] = None, + use_batch_norm: bool = False, + dropout_ratio: float = 0.0, + normalized_output: bool = False, + use_layer_norm: bool = False, + ): + super().__init__() + assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim) + assert output_dim > 0, "output_dim must be > 0, got {}".format(output_dim) + self.state_dim = state_dim + self.output_dim = output_dim + assert len(sizes) == len( + activations + ), "The numbers of sizes and activations must match; got {} vs {}".format( + len(sizes), len(activations) + ) + self.num_atoms = num_atoms + self.fc = FullyConnectedNetwork( + [state_dim] + sizes + [output_dim * (num_atoms or 1)], + activations + [output_activation], + use_batch_norm=use_batch_norm, + dropout_ratio=dropout_ratio, + normalize_output=normalized_output, + use_layer_norm=use_layer_norm, + ) + + def input_prototype(self): + return rlt.FeatureData(self.fc.input_prototype()) + + def forward( + self, + state: rlt.FeatureData, + ) -> torch.Tensor: + float_features = state.float_features + x = self.fc(float_features) + if self.num_atoms is not None: + x = x.view(float_features.shape[0], self.action_dim, self.num_atoms) + return x diff --git a/reagent/models/linear_regression.py b/reagent/models/linear_regression.py new file mode 100644 index 000000000..b845dccd5 --- /dev/null +++ b/reagent/models/linear_regression.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Optional + +import torch +from pytorch_lightning.utilities.distributed import ReduceOp, sync_ddp_if_available +from reagent.models.base import ModelBase + + +logger = logging.getLogger(__name__) + + +def batch_quadratic_form(x: torch.Tensor, A: torch.Tensor) -> torch.Tensor: + """ + Compute the quadratic form x^T * A * x for a batched input x. + Inspired by https://stackoverflow.com/questions/18541851/calculate-vt-a-v-for-a-matrix-of-vectors-v + This is a vectorized implementation of out[i] = x[i].t() @ A @ x[i] + x shape: (Batch, Feature_dim) or (Batch, Arm, Feature_dim) + A shape: (Feature_dim, Feature_dim) + output shape: (Batch) or (Batch, Arm) + """ + return (torch.matmul(x, A) * x).sum(-1) + + +def reduce_avg( + avg_val: torch.Tensor, + sum_weight: torch.Tensor, + cur_distributed_avg_val: torch.Tensor, + cur_distributed_sum_weight: torch.Tensor, +) -> torch.Tensor: + """ + Get the new weighted average value of a tensor. Steps: + 1. Sum across all trainers the weighted sum of values. This is implemented as a sum of product of + current-epoch weighted average value and total weight. + 2. Get the new weighted average value by dividing the total weighted sum by the total weight. + - The total are a sum of the current-epoch values across all trainers and the values from previous + epochs stored in `avg_val` and `sum_weight`. + + Args: + avg_val: Current weighted average value (from previous epochs). + sum_weight: Total weight (from previous epochs). + cur_distributed_avg_val: Current weighted average value in each trainers in current epoch. + cur_distributed_sum_weight: Total weight in each trainer in current epoch. + Returns: + A new weighted average value. + """ + total_weight = ( + sync_ddp_if_available( + cur_distributed_sum_weight.clone(), reduce_op=ReduceOp.SUM + ) + + sum_weight + ) # clone the tensor, so that it's not modified in-place + return ( + avg_val * sum_weight + + sync_ddp_if_available( + cur_distributed_avg_val * cur_distributed_sum_weight, reduce_op=ReduceOp.SUM + ) + ) / total_weight + + +class LinearRegressionUCB(ModelBase): + """ + A linear regression model for LinUCB. + Note that instead of being trained by a PyTorch optimizer, we explicitly + update attributes A and b (according to the LinUCB formulas implemented in + reagent.training.cb.linucb_trainer.LinUCBTrainer). + Since computing the regression coefficients inverse matrix inversion (expensive op), we + save time by only computing the coefficients when necessary (when doing inference). + + Args: + input_dim: Dimension of input data + l2_reg_lambda: The weight on L2 regularization + ucb_alpha: The coefficient on the standard deviation in UCB formula. + Set it to 0 to predict the expected value instead of UCB. + gamma: per-epoch discount factor (A and b get multiplied by gamma every epoch) + """ + + def __init__( + self, + input_dim: int, + *, + l2_reg_lambda: float = 1.0, + ucb_alpha: float = 1.0, + gamma: float = 1.0, + ) -> None: + super().__init__() + + self.input_dim = input_dim + self.ucb_alpha = ucb_alpha + self.l2_reg_lambda = l2_reg_lambda + self.gamma = gamma + assert self.gamma <= 1.0 and self.gamma > 0.0 + + """ + the buffers below are split between "all data" and "current epoch" values. This is done + to enable distributed training. "current epoch" values get reduced acorss all trainers at + the end of an epoch (technically, whenever we estimate the coefficients, which could sometimes + happen multiple times per epoch) and the sum gets redcuced with "all data" values and the new "all data" + value is set to the reduction of "all data" and "current epoch" values. + """ + # A is weighted average of X^T*X across all data + self.register_buffer("avg_A", torch.zeros(self.input_dim, self.input_dim)) + # b is weighted average of reward*X across all data + self.register_buffer("avg_b", torch.zeros(self.input_dim)) + # A is weighted average of X^T*X across current epoch + self.register_buffer("cur_avg_A", torch.zeros(self.input_dim, self.input_dim)) + # b is weighted average of reward*X across current epoch + self.register_buffer("cur_avg_b", torch.zeros(self.input_dim)) + + self.register_buffer("_coefs", torch.zeros(self.input_dim)) + self.register_buffer("inv_avg_A", torch.zeros(self.input_dim, self.input_dim)) + self.register_buffer( + "coefs_valid_for_avg_A", -torch.ones((self.input_dim, self.input_dim)) + ) # value of avg_A matrix for which self.coefs were estimated + self.register_buffer("num_obs", torch.zeros(1, dtype=torch.int64)) + self.register_buffer("cur_num_obs", torch.zeros(1, dtype=torch.int64)) + # initialize sum of weights below at small values to avoid dividing by 0 + self.register_buffer("sum_weight", 1e-5 * torch.ones(1, dtype=torch.float)) + self.register_buffer("cur_sum_weight", 1e-5 * torch.ones(1, dtype=torch.float)) + + # add a dummy parameter so that DDP doesn't compain about lack of parameters with gradient + self.dummy_param = torch.nn.parameter.Parameter(torch.zeros(1)) + + def input_prototype(self) -> torch.Tensor: + return torch.randn(1, self.input_dim) + + def _calculate_coefs(self) -> None: + """ + Compute current estimate of regression coefficients and A_inv=A**-1 + We save both coefficients and A_inv in case they are needed again to avoid recomputing the inverse. + The coefficients are computed only when needed because their computation can be expensive + (involves matrix inversion) + """ + # reduce the values of `avg_A` and `avg_b` across all trainer processes and reduce them with previous-epoch values. + # The coefficients can't be averaged across trainers because they are a non-linear + # function of `A` and `b` + self.avg_A = reduce_avg( + self.avg_A, self.sum_weight, self.cur_avg_A, self.cur_sum_weight + ) + self.avg_b = reduce_avg( + self.avg_b, self.sum_weight, self.cur_avg_b, self.cur_sum_weight + ) + self.num_obs += sync_ddp_if_available(self.cur_num_obs, reduce_op=ReduceOp.SUM) + self.sum_weight += sync_ddp_if_available( + self.cur_sum_weight, reduce_op=ReduceOp.SUM + ) + + self.inv_avg_A = torch.linalg.pinv( + self.avg_A + + self.l2_reg_lambda + * torch.eye(self.input_dim, device=self.avg_A.device) + / self.sum_weight # add regularization here so that it's not double-counted under distributed training + ).contiguous() + self._coefs = torch.matmul(self.inv_avg_A, self.avg_b) + self.coefs_valid_for_avg_A = self.avg_A.clone() + + # reset buffers to zero + self.cur_avg_A.zero_() + self.cur_avg_b.zero_() + self.cur_num_obs.zero_() + self.cur_sum_weight.zero_() + + def calculate_coefs_if_necessary(self) -> torch.Tensor: + if not (self.coefs_valid_for_avg_A == self.avg_A).all() or ( + torch.abs(self.cur_avg_A).max().item() > 0 + ): + # re-calculate the coefs only if the previous value is invalid + self._calculate_coefs() + return self._coefs + + @property + def coefs(self) -> torch.Tensor: + return self.calculate_coefs_if_necessary() + + def _forward_no_coefs_check( + self, inp: torch.Tensor, ucb_alpha: Optional[float] = None + ) -> torch.Tensor: + # perform forward pass without checking if the current coefficient estimate is still valid + if ucb_alpha is None: + ucb_alpha = self.ucb_alpha + + mu = torch.matmul(inp, self._coefs) + + if ucb_alpha != 0: + return mu + ucb_alpha * torch.sqrt( + batch_quadratic_form(inp, self.inv_avg_A) / self.sum_weight + ) + else: + return mu + + def forward( + self, inp: torch.Tensor, ucb_alpha: Optional[float] = None + ) -> torch.Tensor: + """ + Forward can return the mean or a UCB. If returning UCB, the CI width is stddev*ucb_alpha + If ucb_alpha is not passed in, a fixed alpha from init is used + """ + self.calculate_coefs_if_necessary() + return self._forward_no_coefs_check(inp, ucb_alpha) diff --git a/reagent/models/mdn_rnn.py b/reagent/models/mdn_rnn.py index 5aed52cbd..6e9bc9ed8 100644 --- a/reagent/models/mdn_rnn.py +++ b/reagent/models/mdn_rnn.py @@ -8,8 +8,8 @@ import torch import torch.nn as nn import torch.nn.functional as f -from reagent import types as rlt -from reagent.torch_utils import stack +from reagent.core import types as rlt +from reagent.core.torch_utils import stack from torch.distributions.normal import Normal @@ -17,7 +17,7 @@ class MDNRNN(nn.Module): - """ Mixture Density Network - Recurrent Neural Network """ + """Mixture Density Network - Recurrent Neural Network""" def __init__( self, state_dim, action_dim, num_hiddens, num_hidden_layers, num_gaussians @@ -43,7 +43,7 @@ def __init__( ) def forward(self, actions: torch.Tensor, states: torch.Tensor, hidden=None): - """ Forward pass of MDN-RNN + """Forward pass of MDN-RNN :param actions: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor :param states: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor @@ -153,7 +153,7 @@ def sample_memories(self, batch_size, use_gpu=False) -> rlt.MemoryNetworkInput: state=rlt.FeatureData(float_features=state), reward=reward, time_diff=torch.ones_like(reward).float(), - action=action, + action=rlt.FeatureData(float_features=action), next_state=rlt.FeatureData(float_features=next_state), not_terminal=not_terminal, step=None, @@ -184,7 +184,7 @@ def transpose(*args): def gmm_loss(batch, mus, sigmas, logpi, reduce=True): - """ Computes the gmm loss. + """Computes the gmm loss. Compute minus the log probability of batch under the GMM model described by mus, sigmas, pi. Precisely, with bs1, bs2, ... the sizes of the batch diff --git a/reagent/models/mlp_scorer.py b/reagent/models/mlp_scorer.py new file mode 100644 index 000000000..90e750974 --- /dev/null +++ b/reagent/models/mlp_scorer.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + + +import reagent.core.types as rlt +import torch +from reagent.models.base import ModelBase + + +class MLPScorer(ModelBase): + """ + Log-space in and out + """ + + def __init__( + self, + mlp: torch.nn.Module, + has_user_feat: bool = False, + ) -> None: + super().__init__() + self.mlp = mlp + self.has_user_feat = has_user_feat + + def forward(self, obs: rlt.FeatureData): + mlp_input = obs.get_ranking_state(self.has_user_feat) + scores = self.mlp(mlp_input) + return scores.squeeze(-1) + + def input_prototype(self): + # Sample config for input + batch_size = 2 + state_dim = 5 + num_docs = 3 + candidate_dim = 4 + return rlt.FeatureData( + float_features=torch.randn((batch_size, state_dim)), + candidate_docs=rlt.DocList( + float_features=torch.randn(batch_size, num_docs, candidate_dim) + ), + ) diff --git a/reagent/models/model_feature_config_provider.py b/reagent/models/model_feature_config_provider.py new file mode 100644 index 000000000..3032d3b98 --- /dev/null +++ b/reagent/models/model_feature_config_provider.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import abc + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass +from reagent.core.registry_meta import RegistryMeta + + +class ModelFeatureConfigProvider(metaclass=RegistryMeta): + @abc.abstractmethod + def get_model_feature_config(self) -> rlt.ModelFeatureConfig: + pass + + +@dataclass +class RawModelFeatureConfigProvider(ModelFeatureConfigProvider, rlt.ModelFeatureConfig): + __registry_name__ = "raw" + + def get_model_feature_config(self) -> rlt.ModelFeatureConfig: + return self diff --git a/reagent/models/no_soft_update_embedding.py b/reagent/models/no_soft_update_embedding.py index 8558a2511..efcddc002 100644 --- a/reagent/models/no_soft_update_embedding.py +++ b/reagent/models/no_soft_update_embedding.py @@ -12,5 +12,5 @@ class NoSoftUpdateEmbedding(nn.Embedding): table in the target network. """ - def __deepcopy__(self, memo): + def __deepcopy__(self, memo) -> "NoSoftUpdateEmbedding": return copy.copy(self) diff --git a/reagent/models/probabilistic_fully_connected_network.py b/reagent/models/probabilistic_fully_connected_network.py new file mode 100644 index 000000000..181adfcc0 --- /dev/null +++ b/reagent/models/probabilistic_fully_connected_network.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from reagent.models.base import ModelBase +from reagent.models.fully_connected_network import ( + ACTIVATION_MAP, + gaussian_fill_w_gain, + SlateBatchNorm1d, +) +from torch.distributions import Normal + + +logger = logging.getLogger(__name__) + +# code based off of online tutorial at: https://github.com/cpark321/uncertainty-deep-learning/blob/master/01.%20Bayes-by-Backprop.ipynb + + +class LinearBBB(ModelBase): + """ + Layer of our BNN. + """ + + def __init__( + self, + input_dim, + output_dim, + activation, + orthogonal_init: bool = False, + min_std: float = 0.0, + prior_var: float = 1.0, + ) -> None: + """ + Initialization of our layer : our prior is a normal distribution + centered in 0 and of variance 20. + """ + # initialize layers + super().__init__() + # set input and output dimensions + self.input_dim = input_dim + self.output_dim = output_dim + + # initialize mu and rho parameters for the weights of the layer + self.w_mu = nn.Parameter(torch.zeros(output_dim, input_dim)) + self.w_rho = nn.Parameter(torch.zeros(output_dim, input_dim)) + + gain = torch.nn.init.calculate_gain(activation) + if orthogonal_init: + # provably better https://openreview.net/forum?id=rkgqN1SYvr + nn.init.orthogonal_(self.w_mu.data, gain=gain) + nn.init.orthogonal_(self.w_rho.data, gain=gain) + else: + # gaussian init + gaussian_fill_w_gain( + self.w_mu, gain=gain, dim_in=input_dim, min_std=min_std + ) + gaussian_fill_w_gain( + self.w_rho, gain=gain, dim_in=input_dim, min_std=min_std + ) + + # initialize mu and rho parameters for the layer's bias + self.b_mu = nn.Parameter(torch.zeros(output_dim)) + self.b_rho = nn.Parameter(torch.zeros(output_dim)) + + # initialize weight samples (these will be calculated whenever the layer makes a prediction) + self.w = None + self.b = None + self.log_prior = None + self.w_post = None + self.b_post = None + self.log_post = None + + # initialize prior distribution for all of the weights and biases + self.prior = torch.distributions.Normal(0, prior_var) + + def forward(self, x: torch.Tensor): + """ + Optimization process + """ + # sample weights + w_epsilon = Normal(0, 1).sample(self.w_mu.shape) + self.w = self.w_mu + torch.log(1 + torch.exp(self.w_rho)) * w_epsilon + + # sample bias + b_epsilon = Normal(0, 1).sample(self.b_mu.shape) + self.b = self.b_mu + torch.log(1 + torch.exp(self.b_rho)) * b_epsilon + + # record log prior by evaluating log pdf of prior at sampled weight and bias + w_log_prior = self.prior.log_prob(self.w) + b_log_prior = self.prior.log_prob(self.b) + self.log_prior = torch.sum(w_log_prior) + torch.sum(b_log_prior) + + # record log variational posterior by evaluating log pdf of normal distribution defined by parameters with respect at the sampled values + self.w_post = Normal(self.w_mu.data, torch.log(1 + torch.exp(self.w_rho))) + self.b_post = Normal(self.b_mu.data, torch.log(1 + torch.exp(self.b_rho))) + self.log_post = ( + self.w_post.log_prob(self.w).sum() + self.b_post.log_prob(self.b).sum() + ) + + return F.linear(x, self.w, self.b) + + +class FullyConnectedProbabilisticNetwork(ModelBase): + def __init__( + self, + layers, + activations, + prior_var, + *, + noise_tol: float = 0.1, + use_batch_norm: bool = False, + min_std: float = 0.0, + dropout_ratio: float = 0.0, + use_layer_norm: bool = False, + normalize_output: bool = False, + orthogonal_init: bool = False, + ) -> None: + super().__init__() + + self.input_dim = layers[0] + self.use_batch_norm = use_batch_norm + + modules: List[nn.Module] = [] + linear_bbbs: List[nn.Module] = [] + self.noise_tol = noise_tol + self.layers = layers + + assert len(layers) == len(activations) + 1 + + for i, ((in_dim, out_dim), activation) in enumerate( + zip(zip(layers, layers[1:]), activations) + ): + # Add BatchNorm1d + if use_batch_norm: + modules.append(SlateBatchNorm1d(in_dim)) + # Add Linear + linear_bbb = LinearBBB(in_dim, out_dim, activation, prior_var=prior_var) + linear_bbbs.append(linear_bbb) + # assuming activation is valid + + modules.append(linear_bbb) + # Add LayerNorm + if use_layer_norm and (normalize_output or i < len(activations) - 1): + modules.append(nn.LayerNorm(out_dim)) # type: ignore + # Add activation + if activation in ACTIVATION_MAP: + modules.append(ACTIVATION_MAP[activation]()) + else: + # See if it matches any of the nn modules + modules.append(getattr(nn, activation)()) + # Add Dropout + if dropout_ratio > 0.0 and (normalize_output or i < len(activations) - 1): + modules.append(nn.Dropout(p=dropout_ratio)) + + self.dnn = nn.Sequential(*modules) # type: ignore + self.linear_bbbs = nn.ModuleList(linear_bbbs) + + def input_prototype(self): + return torch.randn(1, self.input_dim) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """Forward pass for generic feed-forward DNNs. Assumes activation names + are valid pytorch activation names. + :param input tensor + """ + return self.dnn(input) + + def log_prior(self): + # calculate the log prior over all the layers + ret = 0 + for x in self.linear_bbbs: + ret += x.log_prior + return ret + + def log_post(self) -> torch.Tensor: + # calculate the log posterior over all the layers + ret = 0 + for x in self.linear_bbbs: + ret += x.log_post + return ret + + def sample_elbo(self, input: torch.Tensor, target: torch.Tensor, num_samples: int): + # rlt.BanditRewardModelInput + + # we calculate the negative elbo, which will be our loss function + # initialize tensors + outputs = torch.zeros(num_samples, target.shape[0]) + log_priors = torch.zeros(num_samples) + log_posts = torch.zeros(num_samples) + log_likes = torch.zeros(num_samples) + # make predictions and calculate prior, posterior, and likelihood for a given number of samples + for i in range(num_samples): + outputs[i] = self(input).reshape(-1) # make predictions + log_priors[i] = self.log_prior() # get log prior + log_posts[i] = self.log_post() # get log variational posterior + log_likes[i] = ( + Normal(outputs[i], self.noise_tol).log_prob(target.reshape(-1)).sum() + ) # calculate the log likelihood + # calculate monte carlo estimate of prior posterior and likelihood + log_prior = log_priors.mean() + log_post = log_posts.mean() + log_like = log_likes.mean() + # calculate the negative elbo (which is our loss function) + loss = log_post - log_prior - log_like + return loss diff --git a/reagent/models/seq2reward_model.py b/reagent/models/seq2reward_model.py new file mode 100644 index 000000000..60f101627 --- /dev/null +++ b/reagent/models/seq2reward_model.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import Optional + +import torch +import torch.nn as nn +from reagent.core import types as rlt +from reagent.models.base import ModelBase + + +class Seq2RewardNetwork(ModelBase): + def __init__(self, state_dim, action_dim, num_hiddens, num_hidden_layers) -> None: + super().__init__() + + self.state_dim = state_dim + self.action_dim = action_dim + self.num_hiddens = num_hiddens + self.num_hidden_layers = num_hidden_layers + self.rnn = nn.LSTM( + input_size=action_dim, hidden_size=num_hiddens, num_layers=num_hidden_layers + ) + + self.lstm_linear = nn.Linear(num_hiddens, 1) + self.map_linear = nn.Linear(state_dim, self.num_hiddens) + + def input_prototype(self): + return ( + rlt.FeatureData(torch.randn(1, 1, self.state_dim)), + rlt.FeatureData(torch.randn(1, 1, self.action_dim)), + ) + + def forward( + self, + state: rlt.FeatureData, + action: rlt.FeatureData, + valid_reward_len: Optional[torch.Tensor] = None, + ): + """Forward pass of Seq2Reward + + Takes in the current state and use it as init hidden + The input sequence are pure actions only + Output the predicted reward after each time step + + :param actions: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor + :param states: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor + :param valid_reward_len: (BATCH_SIZE,) torch tensor + + :returns: predicated accumulated rewards at last step for the given sequence + - acc_reward: (BATCH_SIZE, 1) torch tensor + """ + states = state.float_features + actions = action.float_features + batch_size = states.shape[1] + hidden = self.get_initial_hidden_state( + states[0][None, :, :], batch_size=batch_size + ) + # all_steps_hidden shape: seq_len, batch_size, hidden_size + all_steps_hidden, _ = self.rnn(actions, hidden) + if valid_reward_len is None: + acc_reward = self.lstm_linear(all_steps_hidden[-1]) + else: + valid_step_hidden = all_steps_hidden[ + valid_reward_len - 1, torch.arange(batch_size) + ] + acc_reward = self.lstm_linear(valid_step_hidden) + + return rlt.Seq2RewardOutput(acc_reward=acc_reward) + + def get_initial_hidden_state(self, state, batch_size: int = 1): + # state embedding with linear mapping + # repeat state to fill num_hidden_layers at first dimension + state = state.repeat(self.num_hidden_layers, 1, 1) + state_embed = self.map_linear(state) + + # hidden = (hidden,cell) where hidden is init with liner map + # of input state and cell is 0. + # hidden : + # TUPLE( + # (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE), + # (NUM_LAYERS, BATCH_SIZE, HIDDEN_SIZE) + # ) torch tensor + hidden = ( + state_embed, + torch.zeros(self.num_hidden_layers, batch_size, self.num_hiddens).to( + state.device + ), + ) + + return hidden diff --git a/reagent/models/seq2slate.py b/reagent/models/seq2slate.py index c21a7ccf4..21b0d648d 100644 --- a/reagent/models/seq2slate.py +++ b/reagent/models/seq2slate.py @@ -1,16 +1,28 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import copy import logging import math -from enum import Enum -from typing import Optional +from typing import NamedTuple, Optional -import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F -from reagent import types as rlt +import torch.nn.modules.transformer as transformer +from reagent.core import types as rlt +from reagent.core.configuration import param_hash +from reagent.core.dataclasses import dataclass +from reagent.core.torch_utils import gather +from reagent.model_utils.seq2slate_utils import ( + attention, + clones, + DECODER_START_SYMBOL, + mask_logits_by_idx, + PADDING_SYMBOL, + per_symbol_to_per_seq_probs, + print_model_info, + pytorch_decoder_mask, + Seq2SlateMode, + Seq2SlateOutputArch, +) from reagent.models.base import ModelBase from torch.nn.parallel.distributed import DistributedDataParallel @@ -18,139 +30,20 @@ logger = logging.getLogger(__name__) -class Seq2SlateMode(Enum): - RANK_MODE = "rank" - PER_SEQ_LOG_PROB_MODE = "per_sequence_log_prob" - PER_SYMBOL_LOG_PROB_DIST_MODE = "per_symbol_log_prob_dist" - DECODE_ONE_STEP_MODE = "decode_one_step" - ENCODER_SCORE_MODE = "encoder_score_mode" - - -PADDING_SYMBOL = 0 -DECODER_START_SYMBOL = 1 - - -def subsequent_mask(size, device): - """ - Mask out subsequent positions. Mainly used in the decoding process, - in which an item should not attend subsequent items. - """ - attn_shape = (1, size, size) - subsequent_mask = ( - 1 - torch.triu(torch.ones(*attn_shape, device=device), diagonal=1) - ).type(torch.int8) - return subsequent_mask - - -def subsequent_and_padding_mask(tgt_in_idx): - """ Create a mask to hide padding and future items """ - # tgt_in_idx shape: batch_size, seq_len - - # tgt_tgt_mask shape: batch_size, 1, seq_len - tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8) - # subseq_mask shape: 1, seq_len, seq_len - subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device) - # tgt_tgt_mask shape: batch_size, seq_len, seq_len - tgt_tgt_mask = tgt_tgt_mask & subseq_mask - return tgt_tgt_mask - - -def clones(module, N): - """ - Produce N identical layers. - - :param module: nn.Module class - :param N: number of copies - """ - return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) - - -def attention(query, key, value, mask, d_k): - """ Scaled Dot Product Attention """ - # mask shape: batch_size x 1 x seq_len x seq_len - - # scores shape: batch_size x num_heads x seq_len x seq_len - scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) - scores = scores.masked_fill(mask == 0, -1e9) - # p_attn shape: batch_size x num_heads x seq_len x seq_len - p_attn = F.softmax(scores, dim=3) - # attn shape: batch_size x num_heads x seq_len x d_k - attn = torch.matmul(p_attn, value) - return attn, p_attn - - class Generator(nn.Module): - """ Define standard linear + softmax generation step. """ - - def __init__(self, dim_model, candidate_size): - super(Generator, self).__init__() - self.dim_model = dim_model - self.proj = nn.Linear(dim_model, candidate_size) - - def forward(self, mode, decoder_output=None, tgt_in_idx=None, greedy=None): - if mode in ( - Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE, - Seq2SlateMode.PER_SEQ_LOG_PROB_MODE, - ): - return self._log_probs(decoder_output, tgt_in_idx, mode) - elif mode == Seq2SlateMode.DECODE_ONE_STEP_MODE: - assert greedy is not None - return self._decode_one_step(decoder_output, tgt_in_idx, greedy) - else: - raise NotImplementedError() - - def _log_probs(self, x, tgt_in_idx, mode): - """ - Return the log probability distribution at each decoding step - - :param x: the output of decoder. Shape: batch_size, seq_len, dim_model - :param tgt_idx: the indices of candidates in decoder input sequences. - The first symbol is always DECODER_START_SYMBOL. - Shape: batch_size, seq_len - """ - assert mode in ( - Seq2SlateMode.PER_SEQ_LOG_PROB_MODE, - Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE, - ) - # logits: the probability distribution of each symbol - # batch_size, seq_len, candidate_size - logits = self.proj(x) - # the first two symbols are reserved for padding and decoder-starting symbols - # so they should never be a possible output label - logits[:, :, :2] = float("-inf") - - if mode == Seq2SlateMode.PER_SEQ_LOG_PROB_MODE: - batch_size, seq_len = tgt_in_idx.shape - mask_indices = torch.tril( - tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), - diagonal=0, - ) - logits.scatter_(2, mask_indices, float("-inf")) - - # log_probs shape: batch_size, seq_len, candidate_size - log_probs = F.log_softmax(logits, dim=2) - return log_probs + """Candidate generation""" - def _decode_one_step(self, x, tgt_in_idx, greedy): + def forward(self, probs: torch.Tensor, greedy: bool): """ Decode one-step - :param x: the output of the decoder. Shape: batch_size, seq_len, dim_model - :param tgt_in_idx: input to the decoder, the first symbol is always the - starting symbol. Shape: batch_size, seq_len + :param probs: probability distributions of decoder. + Shape: batch_size, tgt_seq_len, candidate_size :param greedy: whether to greedily pick or sample the next symbol """ - # get the last step of decoder output - last_step_x = x[:, -1, :] - - batch_size = x.shape[0] - # logits shape: batch_size, candidate_size - logits = self.proj(last_step_x) - # invalidate the padding symbol and decoder-starting symbol - logits[:, :2] = float("-inf") - # invalidate symbols already appeared in decoded sequences - logits.scatter_(1, tgt_in_idx, float("-inf")) - prob = F.softmax(logits, dim=1) + batch_size = probs.shape[0] + # get the last step probs shape: batch_size, candidate_size + prob = probs[:, -1, :] if greedy: _, next_candidate = torch.max(prob, dim=1) else: @@ -170,7 +63,7 @@ class SublayerConnection(nn.Module): """ def __init__(self, dim_model): - super(SublayerConnection, self).__init__() + super().__init__() self.norm = nn.LayerNorm(dim_model) def forward(self, x, sublayer): @@ -181,7 +74,7 @@ class Encoder(nn.Module): "Core encoder is a stack of num_layers layers" def __init__(self, layer, num_layers): - super(Encoder, self).__init__() + super().__init__() self.layers = clones(layer, num_layers) self.norm = nn.LayerNorm(layer.dim_model) @@ -193,10 +86,10 @@ def forward(self, x, mask): class EncoderLayer(nn.Module): - """ Encoder is made up of self-attn and feed forward """ + """Encoder is made up of self-attn and feed forward""" def __init__(self, dim_model, self_attn, feed_forward): - super(EncoderLayer, self).__init__() + super().__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.sublayer = clones(SublayerConnection(dim_model), 2) @@ -216,10 +109,10 @@ def self_attn_layer(x): class Decoder(nn.Module): - """ Generic num_layers layer decoder with masking.""" + """Generic num_layers layer decoder with masking.""" def __init__(self, layer, num_layers): - super(Decoder, self).__init__() + super().__init__() self.layers = clones(layer, num_layers) self.norm = nn.LayerNorm(layer.size) @@ -231,10 +124,10 @@ def forward(self, x, memory, tgt_src_mask, tgt_tgt_mask): class DecoderLayer(nn.Module): - """ Decoder is made of self-attn, src-attn, and feed forward """ + """Decoder is made of self-attn, src-attn, and feed forward""" def __init__(self, size, self_attn, src_attn, feed_forward): - super(DecoderLayer, self).__init__() + super().__init__() self.size = size self.self_attn = self_attn self.src_attn = src_attn @@ -260,10 +153,115 @@ def self_attn_layer_src(x): return self.sublayer[2](x, self.feed_forward) +class EncoderPyTorch(nn.Module): + """Transformer-based encoder based on PyTorch official implementation""" + + def __init__(self, dim_model, num_heads, dim_feedforward, num_layers): + super().__init__() + encoder_layer = nn.TransformerEncoderLayer( + d_model=dim_model, + dim_feedforward=dim_feedforward, + nhead=num_heads, + dropout=0.0, + ) + self.transformer_encoder = nn.TransformerEncoder( + encoder_layer, num_layers=num_layers + ) + + def forward(self, src): + # Adapt to PyTorch format (batch_size as second dim) + src = src.transpose(0, 1) + # not using mask currently since we do not deal with paddings + out = self.transformer_encoder(src) + return out.transpose(0, 1) + + +class DecoderLastLayerPytorch(transformer.TransformerDecoderLayer): + """ + The last layer of Decoder. + Modified from PyTorch official code: instead of attention embedding, + return attention weights which can be directly used to sample items + """ + + def forward( + self, + tgt, + memory, + tgt_mask, + memory_mask, + ): + tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + _, attn_weights = self.multihead_attn( + tgt, + memory, + memory, + attn_mask=memory_mask, + ) + assert attn_weights is not None + return attn_weights + + +class DecoderPyTorch(nn.Module): + """Transformer-based decoder based on PyTorch official implementation""" + + def __init__(self, dim_model, num_heads, dim_feedforward, num_layers): + super().__init__() + assert num_layers >= 1 + self.layers = nn.ModuleList( + [ + transformer.TransformerDecoderLayer( + d_model=dim_model, + nhead=num_heads, + dim_feedforward=dim_feedforward, + dropout=0.0, + ) + for _ in range(num_layers - 1) + ] + + [ + DecoderLastLayerPytorch( + d_model=dim_model, + nhead=num_heads, + dim_feedforward=dim_feedforward, + dropout=0.0, + ) + ] + ) + self.num_layers = num_layers + + def forward(self, tgt_embed, memory, tgt_src_mask, tgt_tgt_mask): + # tgt_embed shape: batch_size, tgt_seq_len, dim_model + # memory shape: batch_size, src_seq_len, dim_model + # tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len + # tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len + batch_size, tgt_seq_len, _ = tgt_embed.shape + + # Adapt to PyTorch format + tgt_embed = tgt_embed.transpose(0, 1) + memory = memory.transpose(0, 1) + + output = tgt_embed + + for mod in self.layers: + output = mod( + output, + memory, + tgt_mask=tgt_tgt_mask, + memory_mask=tgt_src_mask, + ) + + probs_for_placeholders = torch.zeros( + batch_size, tgt_seq_len, 2, device=tgt_embed.device + ) + probs = torch.cat((probs_for_placeholders, output), dim=2) + return probs + + class MultiHeadedAttention(nn.Module): def __init__(self, num_heads, dim_model): - """ Take in model size and number of heads """ - super(MultiHeadedAttention, self).__init__() + """Take in model size and number of heads""" + super().__init__() assert dim_model % num_heads == 0 # We assume d_v always equals d_k self.d_k = dim_model // num_heads @@ -303,7 +301,7 @@ def forward(self, query, key, value, mask=None): class PositionwiseFeedForward(nn.Module): def __init__(self, dim_model, dim_feedforward): - super(PositionwiseFeedForward, self).__init__() + super().__init__() self.net = torch.nn.Sequential( torch.nn.Linear(dim_model, dim_feedforward), torch.nn.ReLU(), @@ -316,7 +314,7 @@ def forward(self, x): class Embedder(nn.Module): def __init__(self, dim_in, dim_out): - super(Embedder, self).__init__() + super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.linear = nn.Linear(self.dim_in, self.dim_out) @@ -329,29 +327,35 @@ def forward(self, x): class PositionalEncoding(nn.Module): - def __init__(self, dim_model, max_len=5000): - super(PositionalEncoding, self).__init__() - - # Compute the positional encodings once in log space. - pe = torch.zeros(max_len, dim_model) - position = torch.arange(0.0, max_len).unsqueeze(1) - div_term = torch.exp( - torch.arange(0.0, dim_model, 2) * -(math.log(10000.0) / dim_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - # pe shape: 1, max_len, dim_model - self.register_buffer("pe", pe) + """ + A special, non-learnable positional encoding for handling variable (possibly longer) + lengths of inputs. We simply add an ordinal number as an additional dimension for + the input embeddings, and then project them back to the original number of dimensions + """ - def forward(self, x, seq_len): - x = x + self.pe[:, :seq_len] - return x + def __init__(self, dim_model): + super().__init__() + self.pos_embed = nn.Linear(dim_model + 1, dim_model) + self.activation = nn.ReLU() + + def forward(self, x): + device = x.device + batch_size, seq_len, _ = x.shape + position_idx = ( + torch.arange(0, seq_len, device=device) + .unsqueeze(0) + .repeat(batch_size, 1) + .reshape(batch_size, seq_len, 1) + ) + # shape: batch_size, seq_len, dim_model + 1 + x_pos = torch.cat((x, position_idx), dim=2) + # project back to shape: batch_size, seq_len, dim_model + return self.activation(self.pos_embed(x_pos)) class BaselineNet(nn.Module): def __init__(self, state_dim, dim_feedforward, num_stacked_layers): - super(BaselineNet, self).__init__() + super().__init__() nn_blocks = [nn.Linear(state_dim, dim_feedforward), nn.ReLU()] assert num_stacked_layers >= 1 for _ in range(num_stacked_layers - 1): @@ -364,20 +368,33 @@ def forward(self, input: rlt.PreprocessedRankingInput): return self.mlp(x) +class Seq2SlateTransformerOutput(NamedTuple): + ranked_per_symbol_probs: Optional[torch.Tensor] + ranked_per_seq_probs: Optional[torch.Tensor] + ranked_tgt_out_idx: Optional[torch.Tensor] + per_symbol_log_probs: Optional[torch.Tensor] + per_seq_log_probs: Optional[torch.Tensor] + encoder_scores: Optional[torch.Tensor] + + class Seq2SlateTransformerModel(nn.Module): """ A Seq2Slate network with Transformer. The network is essentially an encoder-decoder structure. The encoder inputs a sequence of candidate feature vectors and a state feature vector, and the decoder outputs an ordered list of candidate indices. The output order is learned through REINFORCE - algorithm to optimize some sequence-wise reward which is also specific to - the provided state feature. + algorithm to optimize sequence-wise reward. One application example is to rank candidate feeds to a specific user such that the final list of feeds as a whole optimizes the user's engagement. Seq2Slate paper: https://arxiv.org/abs/1810.02019 Transformer paper: https://arxiv.org/abs/1706.03762 + + The model archtecture can also adapt to some variations. + (1) The decoder can be autoregressive + (2) The decoder can take encoder scores and perform iterative softmax (aka frechet sort) + (3) No decoder and the output order is solely based on encoder scores """ def __init__( @@ -390,7 +407,9 @@ def __init__( dim_feedforward: int, max_src_seq_len: int, max_tgt_seq_len: int, - encoder_only: bool, + output_arch: Seq2SlateOutputArch, + temperature: float = 1.0, + state_embed_dim: Optional[int] = None, ): """ :param state_dim: state feature dimension @@ -402,7 +421,10 @@ def __init__( in Transformer :param max_src_seq_len: the maximum length of input sequences :param max_tgt_seq_len: the maximum length of output sequences - :param encoder_only: if True, the model only has an Encoder but no Decoder. + :param output_arch: determines seq2slate output architecture + :param temperature: temperature used in decoder sampling + :param state_embed_dim: embedding dimension of state features. + by default (if not specified), state_embed_dim = dim_model / 2 """ super().__init__() self.state_dim = state_dim @@ -413,45 +435,41 @@ def __init__( self.dim_feedforward = dim_feedforward self.max_src_seq_len = max_src_seq_len self.max_tgt_seq_len = max_tgt_seq_len - self.encoder_only = encoder_only + self.output_arch = output_arch self._DECODER_START_SYMBOL = DECODER_START_SYMBOL self._PADDING_SYMBOL = PADDING_SYMBOL - self._RANK_MODE = Seq2SlateMode.RANK_MODE + self._RANK_MODE = Seq2SlateMode.RANK_MODE.value self._PER_SYMBOL_LOG_PROB_DIST_MODE = ( - Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE + Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE.value ) - self._PER_SEQ_LOG_PROB_MODE = Seq2SlateMode.PER_SEQ_LOG_PROB_MODE - self._DECODE_ONE_STEP_MODE = Seq2SlateMode.DECODE_ONE_STEP_MODE - self._ENCODER_SCORE_MODE = Seq2SlateMode.ENCODER_SCORE_MODE - - c = copy.deepcopy - attn = MultiHeadedAttention(num_heads, dim_model) - ff = PositionwiseFeedForward(dim_model, dim_feedforward) - self.encoder = Encoder( - EncoderLayer(dim_model, c(attn), c(ff)), num_stacked_layers + self._PER_SEQ_LOG_PROB_MODE = Seq2SlateMode.PER_SEQ_LOG_PROB_MODE.value + self._DECODE_ONE_STEP_MODE = Seq2SlateMode.DECODE_ONE_STEP_MODE.value + self._ENCODER_SCORE_MODE = Seq2SlateMode.ENCODER_SCORE_MODE.value + self._OUTPUT_PLACEHOLDER = torch.zeros(1) + + self.encoder = EncoderPyTorch( + dim_model, num_heads, dim_feedforward, num_stacked_layers ) - if self.encoder_only: - # score encoder output - self.encoder_scorer = nn.Linear(dim_model, 1) - else: - self.decoder = Decoder( - DecoderLayer(dim_model, c(attn), c(attn), c(ff)), num_stacked_layers - ) - # Generator needs to know the output symbol size, - # Possible output symbols include candidate indices, decoder-start symbol - # and padding symbol - self.generator = Generator(dim_model, max_src_seq_len + 2) - self.candidate_embedder = Embedder(candidate_dim, dim_model // 2) - self.state_embedder = Embedder(state_dim, dim_model // 2) - self.positional_encoding = PositionalEncoding( - dim_model, max_len=2 * (max_src_seq_len + max_tgt_seq_len) + # Compute score at each encoder step + self.encoder_scorer = nn.Linear(dim_model, 1) + self.generator = Generator() + self.decoder = DecoderPyTorch( + dim_model, num_heads, dim_feedforward, num_stacked_layers ) + self.positional_encoding_decoder = PositionalEncoding(dim_model) + + if state_embed_dim is None: + state_embed_dim = dim_model // 2 + candidate_embed_dim = dim_model - state_embed_dim + self.state_embedder = Embedder(state_dim, state_embed_dim) + self.candidate_embedder = Embedder(candidate_dim, candidate_embed_dim) + # Initialize parameters with Glorot / fan_avg. for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) - self._print_model_info() + print_model_info(self) __constants__ = [ "state_dim", @@ -462,7 +480,9 @@ def __init__( "dim_feedforward", "max_src_seq_len", "max_tgt_seq_len", - "encoder_only", + "output_path", + "temperature", + "state_embed_dim", "_DECODER_START_SYMBOL", "_PADDING_SYMBOL", "_RANK_MODE", @@ -472,33 +492,17 @@ def __init__( "_ENCODER_SCORE_MODE", ] - def _print_model_info(self): - def _num_of_params(model): - return len(torch.cat([p.flatten() for p in model.parameters()])) - - logger.info(f"Num of total params: {_num_of_params(self)}") - logger.info(f"Num of Encoder params: {_num_of_params(self.encoder)}") - logger.info( - f"Num of Candidate Embedder params: {_num_of_params(self.candidate_embedder)}" - ) - logger.info( - f"Num of State Embedder params: {_num_of_params(self.state_embedder)}" - ) - if self.encoder_only: - logger.info( - f"Num of Encoder_Scorer params: {_num_of_params(self.encoder_scorer)}" - ) - else: - logger.info(f"Num of Decoder params: {_num_of_params(self.decoder)}") - logger.info(f"Num of Generator params: {_num_of_params(self.generator)}") - def forward( self, - input: rlt.PreprocessedRankingInput, mode: str, + state: torch.Tensor, + src_seq: torch.Tensor, + tgt_in_idx: Optional[torch.Tensor] = None, + tgt_out_idx: Optional[torch.Tensor] = None, + tgt_in_seq: Optional[torch.Tensor] = None, tgt_seq_len: Optional[int] = None, greedy: Optional[bool] = None, - ): + ) -> Seq2SlateTransformerOutput: """ :param input: model input :param mode: a string indicating which mode to perform. @@ -506,8 +510,7 @@ def forward( "per_seq_log_probs": return generative log probabilities of given tgt sequences (used for REINFORCE training) "per_symbol_log_probs": return generative log probabilties of each - symbol in given tgt sequences (used in TEACHER FORCING and - DIFFERENTIABLE_REWARD training) + symbol in given tgt sequences (used in TEACHER FORCING training) :param tgt_seq_len: the length of output sequence to be decoded. Only used in rank mode :param greedy: whether to sample based on softmax distribution or greedily @@ -516,37 +519,40 @@ def forward( if mode == self._RANK_MODE: if tgt_seq_len is None: tgt_seq_len = self.max_tgt_seq_len + assert greedy is not None return self._rank( - state=input.state.float_features, - src_seq=input.src_seq.float_features, - src_src_mask=input.src_src_mask, + state=state, + src_seq=src_seq, tgt_seq_len=tgt_seq_len, greedy=greedy, ) elif mode in (self._PER_SEQ_LOG_PROB_MODE, self._PER_SYMBOL_LOG_PROB_DIST_MODE): - assert input.tgt_in_seq is not None + assert tgt_in_seq is not None + assert tgt_in_idx is not None + assert tgt_out_idx is not None return self._log_probs( - state=input.state.float_features, - src_seq=input.src_seq.float_features, - # pyre-fixme[16]: `Optional` has no attribute `float_features`. - tgt_in_seq=input.tgt_in_seq.float_features, - src_src_mask=input.src_src_mask, - tgt_tgt_mask=input.tgt_tgt_mask, - tgt_in_idx=input.tgt_in_idx, - tgt_out_idx=input.tgt_out_idx, + state=state, + src_seq=src_seq, + tgt_in_seq=tgt_in_seq, + tgt_in_idx=tgt_in_idx, + tgt_out_idx=tgt_out_idx, mode=mode, ) elif mode == self._ENCODER_SCORE_MODE: - assert self.encoder_only + assert self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE + assert tgt_out_idx is not None return self.encoder_output_to_scores( - state=input.state.float_features, - src_seq=input.src_seq.float_features, - src_src_mask=input.src_src_mask, - tgt_out_idx=input.tgt_out_idx, + state=state, + src_seq=src_seq, + tgt_out_idx=tgt_out_idx, ) + else: + raise NotImplementedError() - def _rank(self, state, src_seq, src_src_mask, tgt_seq_len, greedy): - """ Decode sequences based on given inputs """ + def _rank( + self, state: torch.Tensor, src_seq: torch.Tensor, tgt_seq_len: int, greedy: bool + ) -> Seq2SlateTransformerOutput: + """Decode sequences based on given inputs""" device = src_seq.device batch_size, src_seq_len, candidate_dim = src_seq.shape candidate_size = src_seq_len + 2 @@ -562,261 +568,299 @@ def _rank(self, state, src_seq, src_src_mask, tgt_seq_len, greedy): candidate_features[:, 2:, :] = src_seq # memory shape: batch_size, src_seq_len, dim_model - memory = self.encode(state, src_seq, src_src_mask) + memory = self.encode(state, src_seq) - if self.encoder_only: - # encoder_scores shape: batch_size, tgt_seq_len - encoder_scores = self.encoder_scorer(memory).squeeze(dim=2) - tgt_out_idx = torch.argsort(encoder_scores, dim=1, descending=True)[ - :, :tgt_seq_len - ] - # +2 to account for start symbol and padding symbol - tgt_out_idx += 2 - # every position has propensity of 1 because we are just using argsort - tgt_out_probs = torch.ones( - batch_size, tgt_seq_len, candidate_size, device=device + if self.output_arch == Seq2SlateOutputArch.ENCODER_SCORE: + tgt_out_idx, ranked_per_symbol_probs = self._encoder_rank( + memory, tgt_seq_len + ) + elif self.output_arch == Seq2SlateOutputArch.FRECHET_SORT and greedy: + # greedy decoding for non-autoregressive decoder + tgt_out_idx, ranked_per_symbol_probs = self._greedy_rank( + state, memory, candidate_features, tgt_seq_len + ) + else: + assert greedy is not None + # autoregressive decoding + tgt_out_idx, ranked_per_symbol_probs = self._autoregressive_rank( + state, memory, candidate_features, tgt_seq_len, greedy ) + # ranked_per_symbol_probs shape: batch_size, tgt_seq_len, candidate_size + # ranked_per_seq_probs shape: batch_size, 1 + ranked_per_seq_probs = per_symbol_to_per_seq_probs( + ranked_per_symbol_probs, tgt_out_idx + ) + + # tgt_out_idx shape: batch_size, tgt_seq_len + return Seq2SlateTransformerOutput( + ranked_per_symbol_probs=ranked_per_symbol_probs, + ranked_per_seq_probs=ranked_per_seq_probs, + ranked_tgt_out_idx=tgt_out_idx, + per_symbol_log_probs=self._OUTPUT_PLACEHOLDER, + per_seq_log_probs=self._OUTPUT_PLACEHOLDER, + encoder_scores=self._OUTPUT_PLACEHOLDER, + ) - # TODO: T62503033 return encoder_scores so that we can apply - # frechet policy gradient + def _greedy_rank( + self, + state: torch.Tensor, + memory: torch.Tensor, + candidate_features: torch.Tensor, + tgt_seq_len: int, + ): + """Using the first step decoder scores to greedily sort items""" + # candidate_features shape: batch_size, src_seq_len + 2, candidate_dim - return tgt_out_probs, tgt_out_idx + batch_size, candidate_size, _ = candidate_features.shape + device = candidate_features.device - tgt_in_idx = ( - torch.ones(batch_size, 1, device=device) - .fill_(self._DECODER_START_SYMBOL) - .type(torch.long) + # Only one step input to the decoder + tgt_in_idx = torch.full( + (batch_size, 1), self._DECODER_START_SYMBOL, dtype=torch.long, device=device ) - tgt_out_probs = torch.zeros( + tgt_in_seq = gather(candidate_features, tgt_in_idx) + # shape: batch_size, candidate_size + probs = self.decode( + memory=memory, + state=state, + tgt_in_idx=tgt_in_idx, + tgt_in_seq=tgt_in_seq, + )[:, -1, :] + # tgt_out_idx shape: batch_size, tgt_seq_len + tgt_out_idx = torch.argsort(probs, dim=1, descending=True)[:, :tgt_seq_len] + + # since it is greedy ranking, we set selected items' probs to 1 + ranked_per_symbol_probs = torch.zeros( batch_size, tgt_seq_len, candidate_size, device=device + ).scatter(2, tgt_out_idx.unsqueeze(2), 1.0) + return tgt_out_idx, ranked_per_symbol_probs + + def _autoregressive_rank( + self, + state: torch.Tensor, + memory: torch.Tensor, + candidate_features: torch.Tensor, + tgt_seq_len: int, + greedy: bool, + ): + batch_size, candidate_size, _ = candidate_features.shape + device = candidate_features.device + tgt_in_idx = torch.full( + (batch_size, 1), self._DECODER_START_SYMBOL, dtype=torch.long, device=device ) - assert greedy is not None - for l in range(tgt_seq_len): - tgt_in_seq = ( - candidate_features[ - torch.arange(batch_size, device=device).repeat_interleave(l + 1), - tgt_in_idx.flatten(), - ] - .view(batch_size, l + 1, -1) - .to(device) - ) - tgt_src_mask = src_src_mask[:, : l + 1, :] - out = self.decode( + ranked_per_symbol_probs = torch.zeros( + batch_size, tgt_seq_len, candidate_size, device=device + ) + for step in torch.arange(tgt_seq_len, device=device): + tgt_in_seq = gather(candidate_features, tgt_in_idx) + + # shape batch_size, step + 1, candidate_size + probs = self.decode( memory=memory, state=state, - tgt_src_mask=tgt_src_mask, + tgt_in_idx=tgt_in_idx, tgt_in_seq=tgt_in_seq, - tgt_tgt_mask=subsequent_mask(l + 1, device), - tgt_seq_len=l + 1, ) # next candidate shape: batch_size, 1 # prob shape: batch_size, candidate_size - next_candidate, prob = self.generator( - mode=self._DECODE_ONE_STEP_MODE, - decoder_output=out, - tgt_in_idx=tgt_in_idx, - greedy=greedy, - ) - tgt_out_probs[:, l, :] = prob + next_candidate, next_candidate_sample_prob = self.generator(probs, greedy) + ranked_per_symbol_probs[:, step, :] = next_candidate_sample_prob tgt_in_idx = torch.cat([tgt_in_idx, next_candidate], dim=1) # remove the decoder start symbol # tgt_out_idx shape: batch_size, tgt_seq_len tgt_out_idx = tgt_in_idx[:, 1:] - # tgt_out_probs shape: batch_size, tgt_seq_len, candidate_size - return tgt_out_probs, tgt_out_idx + + return tgt_out_idx, ranked_per_symbol_probs + + def _encoder_rank(self, memory: torch.Tensor, tgt_seq_len: int): + batch_size, src_seq_len, _ = memory.shape + candidate_size = src_seq_len + 2 + device = memory.device + + ranked_per_symbol_probs = torch.zeros( + batch_size, tgt_seq_len, candidate_size, device=device + ) + # encoder_scores shape: batch_size, src_seq_len + encoder_scores = self.encoder_scorer(memory).squeeze(dim=2) + tgt_out_idx = torch.argsort(encoder_scores, dim=1, descending=True)[ + :, :tgt_seq_len + ] + # +2 to account for start symbol and padding symbol + tgt_out_idx += 2 + # every position has propensity of 1 because we are just using argsort + ranked_per_symbol_probs = ranked_per_symbol_probs.scatter( + 2, tgt_out_idx.unsqueeze(2), 1.0 + ) + return tgt_out_idx, ranked_per_symbol_probs def _log_probs( self, - state, - src_seq, - tgt_in_seq, - src_src_mask, - tgt_tgt_mask, - tgt_in_idx, - tgt_out_idx, - mode, - ): + state: torch.Tensor, + src_seq: torch.Tensor, + tgt_in_seq: torch.Tensor, + tgt_in_idx: torch.Tensor, + tgt_out_idx: torch.Tensor, + mode: str, + ) -> Seq2SlateTransformerOutput: """ Compute log of generative probabilities of given tgt sequences (used for REINFORCE training) """ # encoder_output shape: batch_size, src_seq_len, dim_model - encoder_output = self.encode(state, src_seq, src_src_mask) + encoder_output = self.encode(state, src_seq) tgt_seq_len = tgt_in_seq.shape[1] src_seq_len = src_seq.shape[1] assert tgt_seq_len <= src_seq_len - # tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len - tgt_src_mask = src_src_mask[:, :tgt_seq_len, :] - - # decoder_output shape: batch_size, tgt_seq_len, dim_model - decoder_output = self.decode( + # decoder_probs shape: batch_size, tgt_seq_len, candidate_size + decoder_probs = self.decode( memory=encoder_output, state=state, - tgt_src_mask=tgt_src_mask, + tgt_in_idx=tgt_in_idx, tgt_in_seq=tgt_in_seq, - tgt_tgt_mask=tgt_tgt_mask, - tgt_seq_len=tgt_seq_len, ) # log_probs shape: - # if mode == PER_SEQ_LOG_PROB_MODE: batch_size + # if mode == PER_SEQ_LOG_PROB_MODE: batch_size, 1 # if mode == PER_SYMBOL_LOG_PROB_DIST_MODE: batch_size, tgt_seq_len, candidate_size - log_probs = self._decoder_output_to_log_probs( - decoder_output, tgt_in_idx, tgt_out_idx, mode - ) - - return log_probs + if mode == self._PER_SYMBOL_LOG_PROB_DIST_MODE: + per_symbol_log_probs = torch.log(torch.clamp(decoder_probs, min=1e-40)) + return Seq2SlateTransformerOutput( + ranked_per_symbol_probs=None, + ranked_per_seq_probs=None, + ranked_tgt_out_idx=None, + per_symbol_log_probs=per_symbol_log_probs, + per_seq_log_probs=None, + encoder_scores=None, + ) - def _decoder_output_to_log_probs( - self, decoder_output, tgt_in_idx, tgt_out_idx, mode - ): - """ - :param decoder_output: the output from the decoder, with shape: - (batch_size, seq_len, dim_model) - :param tgt_in_idx: input idx to the decoder, the first symbol is - always the DECODER_START_SYMBOL. Shape: batch_size x seq_len - :param tgt_out_idx: output idx of the decoder. Shape: batch_size x seq_len - :param mode: return log prob distribution per symbol or reduce them per sequence - """ - assert mode in ( - self._PER_SEQ_LOG_PROB_MODE, - self._PER_SYMBOL_LOG_PROB_DIST_MODE, + per_seq_log_probs = torch.log( + per_symbol_to_per_seq_probs(decoder_probs, tgt_out_idx) ) - # per_symbol_log_probs: log probability distribution of each symbol - # shape: batch_size, seq_len, candidate_size - per_symbol_log_probs = self.generator( - mode=mode, decoder_output=decoder_output, tgt_in_idx=tgt_in_idx + return Seq2SlateTransformerOutput( + ranked_per_symbol_probs=None, + ranked_per_seq_probs=None, + ranked_tgt_out_idx=None, + per_symbol_log_probs=None, + per_seq_log_probs=per_seq_log_probs, + encoder_scores=None, ) - if mode == self._PER_SYMBOL_LOG_PROB_DIST_MODE: - return per_symbol_log_probs - - # shape: batch_size, 1 - return self.per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx) - - @staticmethod - def per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx): - device = per_symbol_log_probs.device - batch_size, seq_len, candidate_size = per_symbol_log_probs.shape - # log_probs: log probability of each symbol in the tgt_out_idx - # shape: batch_size, seq_len - log_probs = per_symbol_log_probs.view(-1, candidate_size)[ - torch.arange(batch_size * seq_len, device=device), tgt_out_idx.flatten() - ].view(batch_size, seq_len) - # shape: batch_size, 1 - return log_probs.sum(dim=1, keepdim=True) - - def encoder_output_to_scores(self, state, src_seq, src_src_mask, tgt_out_idx): + + def encoder_output_to_scores( + self, state: torch.Tensor, src_seq: torch.Tensor, tgt_out_idx: torch.Tensor + ) -> Seq2SlateTransformerOutput: # encoder_output shape: batch_size, src_seq_len, dim_model - encoder_output = self.encode(state, src_seq, src_src_mask) + encoder_output = self.encode(state, src_seq) # encoder_output shape: batch_size, src_seq_len, dim_model # tgt_out_idx shape: batch_size, tgt_seq_len - device = encoder_output.device batch_size, tgt_seq_len = tgt_out_idx.shape # order encoder_output by tgt_out_idx # slate_encoder_output shape: batch_size, tgt_seq_len, dim_model - slate_encoder_output = encoder_output[ - torch.arange(batch_size, device=device).repeat_interleave(tgt_seq_len), - (tgt_out_idx - 2).flatten(), - ].reshape(batch_size, tgt_seq_len, -1) + slate_encoder_output = gather(encoder_output, tgt_out_idx - 2) # encoder_scores shape: batch_size, tgt_seq_len - return self.encoder_scorer(slate_encoder_output).squeeze() + encoder_scores = self.encoder_scorer(slate_encoder_output).squeeze() + return Seq2SlateTransformerOutput( + ranked_per_symbol_probs=None, + ranked_per_seq_probs=None, + ranked_tgt_out_idx=None, + per_symbol_log_probs=None, + per_seq_log_probs=None, + encoder_scores=encoder_scores, + ) - def encode(self, state, src_seq, src_mask): + def encode(self, state, src_seq): # state: batch_size, state_dim # src_seq: batch_size, src_seq_len, dim_candidate - # src_src_mask shape: batch_size, src_seq_len, src_seq_len - batch_size = src_seq.shape[0] + batch_size, max_src_seq_len, _ = src_seq.shape # candidate_embed: batch_size, src_seq_len, dim_model/2 candidate_embed = self.candidate_embedder(src_seq) # state_embed: batch_size, dim_model/2 state_embed = self.state_embedder(state) # transform state_embed into shape: batch_size, src_seq_len, dim_model/2 - state_embed = state_embed.repeat(1, self.max_src_seq_len).reshape( - batch_size, self.max_src_seq_len, -1 + state_embed = state_embed.repeat(1, max_src_seq_len).reshape( + batch_size, max_src_seq_len, -1 ) # Input at each encoder step is actually concatenation of state_embed # and candidate embed. state_embed is replicated at each encoding step. # src_embed shape: batch_size, src_seq_len, dim_model - src_embed = self.positional_encoding( - torch.cat((state_embed, candidate_embed), dim=2), self.max_src_seq_len - ) + src_embed = torch.cat((state_embed, candidate_embed), dim=2) # encoder_output shape: batch_size, src_seq_len, dim_model - return self.encoder(src_embed, src_mask) + return self.encoder(src_embed) - def decode( - self, memory, state, tgt_src_mask, tgt_in_seq, tgt_tgt_mask, tgt_seq_len - ): + def decode(self, memory, state, tgt_in_idx, tgt_in_seq): # memory is the output of the encoder, the attention of each input symbol # memory shape: batch_size, src_seq_len, dim_model - # tgt_src_mask shape: batch_size, tgt_seq_len, src_seq_len + # tgt_in_idx shape: batch_size, tgt_seq_len # tgt_seq shape: batch_size, tgt_seq_len, dim_candidate - # tgt_tgt_mask shape: batch_size, tgt_seq_len, tgt_seq_len - batch_size = tgt_in_seq.shape[0] + batch_size, src_seq_len, _ = memory.shape + _, tgt_seq_len = tgt_in_idx.shape + candidate_size = src_seq_len + 2 - # candidate_embed shape: batch_size, tgt_seq_len, dim_model/2 - candidate_embed = self.candidate_embedder(tgt_in_seq) - # state_embed: batch_size, dim_model/2 - state_embed = self.state_embedder(state) - # state_embed: batch_size, tgt_seq_len, dim_model/2 - state_embed = state_embed.repeat(1, tgt_seq_len).reshape( - batch_size, tgt_seq_len, -1 - ) + if self.output_arch == Seq2SlateOutputArch.FRECHET_SORT: + # encoder_scores shape: batch_size, src_seq_len + encoder_scores = self.encoder_scorer(memory).squeeze(dim=2) + logits = torch.zeros(batch_size, tgt_seq_len, candidate_size).to( + encoder_scores.device + ) + logits[:, :, :2] = float("-inf") + logits[:, :, 2:] = encoder_scores.repeat(1, tgt_seq_len).reshape( + batch_size, tgt_seq_len, src_seq_len + ) + logits = mask_logits_by_idx(logits, tgt_in_idx) + probs = torch.softmax(logits, dim=2) + elif self.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE: + # candidate_embed shape: batch_size, tgt_seq_len, dim_model/2 + candidate_embed = self.candidate_embedder(tgt_in_seq) + # state_embed: batch_size, dim_model/2 + state_embed = self.state_embedder(state) + # state_embed: batch_size, tgt_seq_len, dim_model/2 + state_embed = state_embed.repeat(1, tgt_seq_len).reshape( + batch_size, tgt_seq_len, -1 + ) + # tgt_embed: batch_size, tgt_seq_len, dim_model + tgt_embed = self.positional_encoding_decoder( + torch.cat((state_embed, candidate_embed), dim=2) + ) + # tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len + # tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len + tgt_tgt_mask, tgt_src_mask = pytorch_decoder_mask( + memory, tgt_in_idx, self.num_heads + ) + # output of decoder is probabilities over symbols. + # shape: batch_size, tgt_seq_len, candidate_size + probs = self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask) + else: + raise NotImplementedError() - # tgt_embed: batch_size, tgt_seq_len, dim_model - tgt_embed = self.positional_encoding( - torch.cat((state_embed, candidate_embed), dim=2), tgt_seq_len - ) + return probs - # output of decoder will be later transformed into probabilities over symbols. - # shape: batch_size, tgt_seq_len, dim_model - return self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask) +@dataclass +class Seq2SlateNet(ModelBase): + __hash__ = param_hash -class Seq2SlateTransformerNet(ModelBase): - def __init__( - self, - state_dim: int, - candidate_dim: int, - num_stacked_layers: int, - num_heads: int, - dim_model: int, - dim_feedforward: int, - max_src_seq_len: int, - max_tgt_seq_len: int, - encoder_only: bool, - ): + state_dim: int + candidate_dim: int + num_stacked_layers: int + dim_model: int + max_src_seq_len: int + max_tgt_seq_len: int + output_arch: Seq2SlateOutputArch + temperature: float + + def __post_init_post_parse__(self) -> None: super().__init__() - self.state_dim = state_dim - self.candidate_dim = candidate_dim - self.num_stacked_layers = num_stacked_layers - self.num_heads = num_heads - self.dim_model = dim_model - self.dim_feedforward = dim_feedforward - self.max_src_seq_len = max_src_seq_len - self.max_tgt_seq_len = max_tgt_seq_len - self.encoder_only = encoder_only - - self.seq2slate_transformer = Seq2SlateTransformerModel( - state_dim=state_dim, - candidate_dim=candidate_dim, - num_stacked_layers=num_stacked_layers, - num_heads=num_heads, - dim_model=dim_model, - dim_feedforward=dim_feedforward, - max_src_seq_len=max_src_seq_len, - max_tgt_seq_len=max_tgt_seq_len, - encoder_only=encoder_only, - ) + # pyre-fixme[16]: `Seq2SlateNet` has no attribute `seq2slate`. + self.seq2slate = self._build_model() - def get_distributed_data_parallel_model(self): - return _DistributedSeq2SlateTransformerNet(self) + def _build_model(self): + return None def input_prototype(self): return rlt.PreprocessedRankingInput.from_tensors( @@ -824,83 +868,115 @@ def input_prototype(self): src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim), tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), - src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len), - tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len), slate_reward=torch.randn(1), ) def forward( self, input: rlt.PreprocessedRankingInput, - mode: str, + mode: Seq2SlateMode, tgt_seq_len: Optional[int] = None, greedy: Optional[bool] = None, ): - res = self.seq2slate_transformer( - input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy - ) if mode == Seq2SlateMode.RANK_MODE: + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. + res = self.seq2slate( + mode=mode.value, + state=input.state.float_features, + src_seq=input.src_seq.float_features, + tgt_seq_len=tgt_seq_len, + greedy=greedy, + ) return rlt.RankingOutput( - ranked_tgt_out_idx=res[1], ranked_tgt_out_probs=res[0] + ranked_per_symbol_probs=res.ranked_per_symbol_probs, + ranked_per_seq_probs=res.ranked_per_seq_probs, + ranked_tgt_out_idx=res.ranked_tgt_out_idx, ) elif mode in ( Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE, Seq2SlateMode.PER_SEQ_LOG_PROB_MODE, ): - return rlt.RankingOutput(log_probs=res) + assert input.tgt_in_seq is not None + assert input.tgt_in_idx is not None + assert input.tgt_out_idx is not None + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. + res = self.seq2slate( + mode=mode.value, + state=input.state.float_features, + src_seq=input.src_seq.float_features, + tgt_in_seq=input.tgt_in_seq.float_features, + tgt_in_idx=input.tgt_in_idx, + tgt_out_idx=input.tgt_out_idx, + ) + if res.per_symbol_log_probs is not None: + log_probs = res.per_symbol_log_probs + else: + log_probs = res.per_seq_log_probs + return rlt.RankingOutput(log_probs=log_probs) elif mode == Seq2SlateMode.ENCODER_SCORE_MODE: - return rlt.RankingOutput(encoder_scores=res) + assert input.tgt_out_idx is not None + # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function. + res = self.seq2slate( + mode=mode.value, + state=input.state.float_features, + src_seq=input.src_seq.float_features, + tgt_out_idx=input.tgt_out_idx, + ) + return rlt.RankingOutput(encoder_scores=res.encoder_scores) else: raise NotImplementedError() + def get_distributed_data_parallel_model(self): + return _DistributedSeq2SlateNet(self) + + +@dataclass +class Seq2SlateTransformerNet(Seq2SlateNet): + __hash__ = param_hash + + num_heads: int + dim_feedforward: int + state_embed_dim: Optional[int] = None + + def _build_model(self): + return Seq2SlateTransformerModel( + state_dim=self.state_dim, + candidate_dim=self.candidate_dim, + num_stacked_layers=self.num_stacked_layers, + num_heads=self.num_heads, + dim_model=self.dim_model, + dim_feedforward=self.dim_feedforward, + max_src_seq_len=self.max_src_seq_len, + max_tgt_seq_len=self.max_tgt_seq_len, + output_arch=self.output_arch, + temperature=self.temperature, + state_embed_dim=self.state_embed_dim, + ) -class _DistributedSeq2SlateTransformerNet(ModelBase): - def __init__(self, seq2slate_transformer_net: Seq2SlateTransformerNet): + +class _DistributedSeq2SlateNet(ModelBase): + def __init__(self, seq2slate_net: Seq2SlateNet): super().__init__() - self.state_dim = seq2slate_transformer_net.state_dim - self.candidate_dim = seq2slate_transformer_net.candidate_dim - self.num_stacked_layers = seq2slate_transformer_net.num_stacked_layers - self.num_heads = seq2slate_transformer_net.num_heads - self.dim_model = seq2slate_transformer_net.dim_model - self.dim_feedforward = seq2slate_transformer_net.dim_feedforward - self.max_src_seq_len = seq2slate_transformer_net.max_src_seq_len - self.max_tgt_seq_len = seq2slate_transformer_net.max_tgt_seq_len - self.encoder_only = seq2slate_transformer_net.encoder_only current_device = torch.cuda.current_device() self.data_parallel = DistributedDataParallel( - seq2slate_transformer_net.seq2slate_transformer, + seq2slate_net.seq2slate, device_ids=[current_device], output_device=current_device, ) - self.seq2slate_transformer_net = seq2slate_transformer_net + self.seq2slate_net = seq2slate_net def input_prototype(self): - return self.seq2slate_transformer_net.input_prototype() + return self.seq2slate_net.input_prototype() def cpu_model(self): - return self.seq2slate_transformer_net.cpu_model() + return self.seq2slate_net.cpu_model() def forward( self, input: rlt.PreprocessedRankingInput, - mode: str, + mode: Seq2SlateMode, tgt_seq_len: Optional[int] = None, greedy: Optional[bool] = None, ): - res = self.data_parallel( - input, mode=mode, tgt_seq_len=tgt_seq_len, greedy=greedy - ) - if mode == Seq2SlateMode.RANK_MODE: - return rlt.RankingOutput( - ranked_tgt_out_idx=res[1], ranked_tgt_out_probs=res[0] - ) - elif mode in ( - Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE, - Seq2SlateMode.PER_SEQ_LOG_PROB_MODE, - ): - return rlt.RankingOutput(log_probs=res) - elif mode == Seq2SlateMode.ENCODER_SCORE_MODE: - return rlt.RankingOutput(encoder_scores=res) - else: - raise NotImplementedError() + return self.seq2slate_net(input, mode, tgt_seq_len, greedy) diff --git a/reagent/models/seq2slate_reward.py b/reagent/models/seq2slate_reward.py index 38bc2fede..334a79491 100644 --- a/reagent/models/seq2slate_reward.py +++ b/reagent/models/seq2slate_reward.py @@ -1,13 +1,17 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import copy +import logging +from typing import List import torch import torch.nn as nn -from reagent import types as rlt +import torch.nn.functional as F +from reagent.core import types as rlt +from reagent.core.torch_utils import gather +from reagent.model_utils.seq2slate_utils import subsequent_mask from reagent.models.base import ModelBase from reagent.models.seq2slate import ( - DECODER_START_SYMBOL, Decoder, DecoderLayer, Embedder, @@ -16,11 +20,183 @@ MultiHeadedAttention, PositionalEncoding, PositionwiseFeedForward, - subsequent_and_padding_mask, ) -class Seq2SlateRewardNet(ModelBase): +logger = logging.getLogger(__name__) + + +class Seq2SlateRewardNetBase(ModelBase): + def __init__( + self, + state_dim: int, + candidate_dim: int, + dim_model: int, + num_stacked_layers: int, + max_src_seq_len: int, + max_tgt_seq_len: int, + ): + super().__init__() + self.state_dim = state_dim + self.candidate_dim = candidate_dim + self.dim_model = dim_model + self.num_stacked_layers = num_stacked_layers + + self.candidate_embedder = Embedder(candidate_dim, dim_model // 2) + self.state_embedder = Embedder(state_dim, dim_model // 2) + self.max_src_seq_len = max_src_seq_len + self.max_tgt_seq_len = max_tgt_seq_len + + def input_prototype(self): + return rlt.PreprocessedRankingInput.from_tensors( + state=torch.randn(1, self.state_dim), + src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim), + tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), + tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), + src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len), + tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len), + tgt_out_idx=torch.arange(self.max_tgt_seq_len).reshape(1, -1) + 2, + ) + + def _init_params(self): + # Initialize parameters with Glorot / fan_avg. + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def _num_of_params(model): + return len(torch.cat([p.flatten() for p in model.parameters()])) + + logger.info(f"Num of total params: {_num_of_params(self)}, {self._get_name()}") + + +class Seq2SlateGRURewardNet(Seq2SlateRewardNetBase): + def __init__( + self, + state_dim: int, + candidate_dim: int, + num_stacked_layers: int, + dim_model: int, + max_src_seq_len: int, + max_tgt_seq_len: int, + ): + super().__init__( + state_dim, + candidate_dim, + dim_model, + num_stacked_layers, + max_src_seq_len, + max_tgt_seq_len, + ) + self.gru = nn.GRU( + input_size=dim_model, + hidden_size=dim_model, + num_layers=num_stacked_layers, + batch_first=True, + ) + self.end_of_seq_vec = nn.Parameter( + torch.zeros(candidate_dim), requires_grad=True + ) + self.proj = nn.Linear(2 * dim_model, 1) + self._init_params() + + def _convert_seq2slate_to_reward_model_format( + self, input: rlt.PreprocessedRankingInput + ): + device = next(self.parameters()).device + # pyre-fixme[16]: Optional type has no attribute `float_features`. + batch_size, tgt_seq_len, candidate_dim = input.tgt_out_seq.float_features.shape + src_seq_len = input.src_seq.float_features.shape[1] + assert self.max_tgt_seq_len == tgt_seq_len + assert self.max_src_seq_len == src_seq_len + + # unselected_idx stores indices of items that are not included in the slate + unselected_idx = torch.ones(batch_size, src_seq_len, device=device) + unselected_idx[ + torch.arange(batch_size, device=device).repeat_interleave( + torch.tensor(tgt_seq_len, device=device) + ), + # pyre-fixme[16]: Optional type has no attribute `flatten`. + input.tgt_out_idx.flatten() - 2, + ] = 0 + # shape: batch_size, (src_seq_len - tgt_seq_len) + unselected_idx = torch.nonzero(unselected_idx, as_tuple=True)[1].reshape( + batch_size, src_seq_len - tgt_seq_len + ) + # shape: batch_size, (src_seq_len - tgt_seq_len), candidate_dim + unselected_candidate_features = gather( + input.src_seq.float_features, unselected_idx + ) + # shape: batch_size, src_seq_len + 1, candidate_dim + tgt_in_seq = torch.cat( + ( + input.tgt_out_seq.float_features, + unselected_candidate_features, + self.end_of_seq_vec.repeat(batch_size, 1, 1), + ), + dim=1, + ) + + return rlt.PreprocessedRankingInput.from_tensors( + state=input.state.float_features, + src_seq=input.src_seq.float_features, + src_src_mask=input.src_src_mask, + tgt_in_seq=tgt_in_seq, + ) + + def embed(self, state, tgt_in_seq): + batch_size = state.shape[0] + + # candidate_embed: batch_size, src_seq_len + 1, dim_model/2 + candidate_embed = self.candidate_embedder(tgt_in_seq) + # state_embed: batch_size, dim_model/2 + state_embed = self.state_embedder(state) + # transform state_embed into shape: batch_size, src_seq_len, dim_model/2 + state_embed = state_embed.repeat(1, self.max_src_seq_len + 1).reshape( + batch_size, self.max_src_seq_len + 1, -1 + ) + + # Input at each encoder step is actually concatenation of state_embed + # and candidate embed. + # shape: batch_size, src_seq_len + 1, dim_model + tgt_in_embed = torch.cat((state_embed, candidate_embed), dim=2) + return tgt_in_embed + + def forward(self, input: rlt.PreprocessedRankingInput): + input = self._convert_seq2slate_to_reward_model_format(input) + state = input.state.float_features + tgt_in_seq = input.tgt_in_seq.float_features + + # shape: batch_size, src_seq_len + 1, dim_modle + tgt_in_embed = self.embed(state, tgt_in_seq) + + # output shape: batch_size, src_seq_len + 1, dim_model + output, hn = self.gru(tgt_in_embed) + # hn shape: batch_size, dim_model + hn = hn[-1] # top layer's hidden state + + # attention, using hidden as query, outputs as keys and values + # shape: batch_size, src_seq_len + 1 + attn_weights = F.softmax( + torch.bmm( + output, + hn.unsqueeze(2) / torch.sqrt(torch.tensor(self.candidate_dim).float()), + ).squeeze(2), + dim=1, + ) + # shape: batch_size, dim_model + context_vector = torch.bmm(attn_weights.unsqueeze(1), output).squeeze(1) + + # reward prediction depends on hidden state of the last step + context vector + # shape: batch_size, 2 * dim_model + seq_embed = torch.cat((hn, context_vector), dim=1) + + # shape: batch_size, 1 + pred_reward = self.proj(seq_embed) + return rlt.RewardNetworkOutput(predicted_reward=pred_reward) + + +class Seq2SlateTransformerRewardNet(Seq2SlateRewardNetBase): def __init__( self, state_dim: int, @@ -37,25 +213,18 @@ def __init__( It uses a transformer-based encoder to encode the items shown in the slate. The slate reward is predicted by attending all encoder steps' outputs. - - For convenience, Seq2SlateRewardModel and Seq2SlateTransformerModel share - the same parameter notations. Therefore, the reward model's encoder is - actually applied on target sequences (i.e., slates) referred in - Seq2SlateTransformerModel. - - Note that max_src_seq_len is the """ - super().__init__() - self.state_dim = state_dim - self.candidate_dim = candidate_dim - self.num_stacked_layers = num_stacked_layers + super().__init__( + state_dim, + candidate_dim, + dim_model, + num_stacked_layers, + max_src_seq_len, + max_tgt_seq_len, + ) self.num_heads = num_heads - self.dim_model = dim_model self.dim_feedforward = dim_feedforward - self.max_src_seq_len = max_src_seq_len - self.max_tgt_seq_len = max_tgt_seq_len - c = copy.deepcopy attn = MultiHeadedAttention(num_heads, dim_model) ff = PositionwiseFeedForward(dim_model, dim_feedforward) @@ -65,20 +234,14 @@ def __init__( self.decoder = Decoder( DecoderLayer(dim_model, c(attn), c(attn), c(ff)), num_stacked_layers ) - self.candidate_embedder = Embedder(candidate_dim, dim_model // 2) - self.state_embedder = Embedder(state_dim, dim_model // 2) - self.positional_encoding = PositionalEncoding( - dim_model, max_len=2 * (max_src_seq_len + max_tgt_seq_len) - ) + self.positional_encoding_encoder = PositionalEncoding(dim_model) + self.positional_encoding_decoder = PositionalEncoding(dim_model) self.proj = nn.Linear(dim_model, 1) self.decoder_start_vec = nn.Parameter( torch.zeros(candidate_dim), requires_grad=True ) - # Initialize parameters with Glorot / fan_avg. - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) + self._init_params() def encode(self, state, src_seq, src_mask): # state: batch_size, state_dim @@ -98,8 +261,8 @@ def encode(self, state, src_seq, src_mask): # Input at each encoder step is actually concatenation of state_embed # and candidate embed. state_embed is replicated at each encoding step. # src_embed shape: batch_size, src_seq_len, dim_model - src_embed = self.positional_encoding( - torch.cat((state_embed, candidate_embed), dim=2), self.max_src_seq_len + src_embed = self.positional_encoding_encoder( + torch.cat((state_embed, candidate_embed), dim=2) ) # encoder_output shape: batch_size, src_seq_len, dim_model @@ -125,25 +288,14 @@ def decode( ) # tgt_embed: batch_size, seq_len, dim_model - tgt_embed = self.positional_encoding( - torch.cat((state_embed, candidate_embed), dim=2), tgt_seq_len + tgt_embed = self.positional_encoding_decoder( + torch.cat((state_embed, candidate_embed), dim=2) ) # output of decoder will be later transformed into probabilities over symbols. # shape: batch_size, seq_len, dim_model return self.decoder(tgt_embed, memory, tgt_src_mask, tgt_tgt_mask) - def input_prototype(self): - return rlt.PreprocessedRankingInput.from_tensors( - state=torch.randn(1, self.state_dim), - src_seq=torch.randn(1, self.max_src_seq_len, self.candidate_dim), - tgt_in_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), - tgt_out_seq=torch.randn(1, self.max_tgt_seq_len, self.candidate_dim), - src_src_mask=torch.ones(1, self.max_src_seq_len, self.max_src_seq_len), - tgt_tgt_mask=torch.ones(1, self.max_tgt_seq_len, self.max_tgt_seq_len), - tgt_out_idx=torch.arange(self.max_tgt_seq_len).reshape(1, -1) + 2, - ) - def _convert_seq2slate_to_reward_model_format( self, input: rlt.PreprocessedRankingInput ): @@ -157,15 +309,7 @@ def _convert_seq2slate_to_reward_model_format( batch_size, tgt_seq_len, candidate_dim = input.tgt_out_seq.float_features.shape assert self.max_tgt_seq_len == tgt_seq_len - # shape: batch_szie, tgt_seq_len + 1 - tgt_in_idx = torch.cat( - ( - torch.full((batch_size, 1), DECODER_START_SYMBOL, device=device).long(), - input.tgt_out_idx, - ), - dim=1, - ) - tgt_tgt_mask = subsequent_and_padding_mask(tgt_in_idx) + tgt_tgt_mask = subsequent_mask(tgt_seq_len + 1, device) # shape: batch_size, tgt_seq_len + 1, candidate_dim tgt_in_seq = torch.cat( ( @@ -217,7 +361,7 @@ def forward(self, input: rlt.PreprocessedRankingInput): class Seq2SlateRewardNetJITWrapper(ModelBase): - def __init__(self, model: Seq2SlateRewardNet): + def __init__(self, model: Seq2SlateRewardNetBase): super().__init__() self.model = model @@ -250,3 +394,32 @@ def forward( tgt_out_idx=tgt_out_idx, ) ).predicted_reward + + +class Seq2SlateRewardNetEnsemble(ModelBase): + def __init__(self, models: List[ModelBase]): + super().__init__() + self.models = models + + def forward( + self, + state: torch.Tensor, + src_seq: torch.Tensor, + tgt_out_seq: torch.Tensor, + src_src_mask: torch.Tensor, + tgt_out_idx: torch.Tensor, + ) -> torch.Tensor: + agg_pred = torch.cat( + [ + model( + state, + src_seq, + tgt_out_seq, + src_src_mask, + tgt_out_idx, + ) + for model in self.models + ], + dim=1, + ) + return torch.median(agg_pred, dim=1, keepdim=True).values diff --git a/reagent/models/sparse_dqn.py b/reagent/models/sparse_dqn.py new file mode 100644 index 000000000..249fa874c --- /dev/null +++ b/reagent/models/sparse_dqn.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List + +import torch +from reagent.core import types as rlt +from reagent.models import FullyConnectedNetwork +from reagent.models.base import ModelBase +from torchrec.models.dlrm import SparseArch +from torchrec.modules.embedding_modules import EmbeddingBagCollection +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor + + +# troch.fx.trace does not support dynamic control flow, wrap the if-else and assert logic in this function to work around this limitation +@torch.fx.wrap +def fetch_id_list_features( + state: rlt.FeatureData, action: rlt.FeatureData +) -> KeyedJaggedTensor: + assert state.id_list_features is not None or action.id_list_features is not None + if state.id_list_features is not None and action.id_list_features is None: + sparse_features = state.id_list_features + elif state.id_list_features is None and action.id_list_features is not None: + sparse_features = action.id_list_features + elif state.id_list_features is not None and action.id_list_features is not None: + sparse_features = KeyedJaggedTensor.concat( + [state.id_list_features, action.id_list_features] + ) + else: + raise ValueError + # TODO: add id_list_score_features + return sparse_features + + +class SparseDQN(ModelBase): + """ + Concatenating embeddings from bag collection with float features before passing the input + to DQN + """ + + def __init__( + self, + state_dense_dim: int, + embedding_bag_collection: EmbeddingBagCollection, + action_dense_dim: int, + overarch_dims: List[int], + activation: str = "relu", + final_activation: str = "relu", + use_batch_norm: bool = True, + use_layer_norm: bool = False, + output_dim: int = 1, + ) -> None: + super().__init__() + self.sparse_arch: SparseArch = SparseArch(embedding_bag_collection) + + self.sparse_embedding_dim: int = sum( + [ + len(embc.feature_names) * embc.embedding_dim + for embc in embedding_bag_collection.embedding_bag_configs() + ] + ) + + self.input_dim: int = ( + state_dense_dim + self.sparse_embedding_dim + action_dense_dim + ) + layers = [self.input_dim] + overarch_dims + [output_dim] + activations = [activation] * len(overarch_dims) + [final_activation] + self.q_network = FullyConnectedNetwork( + layers=layers, + activations=activations, + use_batch_norm=use_batch_norm, + ) + + def forward(self, state: rlt.FeatureData, action: rlt.FeatureData) -> torch.Tensor: + dense_features = torch.cat( + (state.float_features, action.float_features), dim=-1 + ) + batch_size = dense_features.shape[0] + sparse_features = fetch_id_list_features(state, action) + # shape: batch_size, num_sparse_features, embedding_dim + embedded_sparse = self.sparse_arch(sparse_features) + # shape: batch_size, num_sparse_features * embedding_dim + embedded_sparse = embedded_sparse.reshape(batch_size, -1) + concatenated_dense = torch.cat((dense_features, embedded_sparse), dim=-1) + + return self.q_network(concatenated_dense) diff --git a/reagent/models/synthetic_reward.py b/reagent/models/synthetic_reward.py new file mode 100644 index 000000000..95eafc053 --- /dev/null +++ b/reagent/models/synthetic_reward.py @@ -0,0 +1,529 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +import math +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F +from reagent.core import parameters as rlp, types as rlt +from reagent.models import convolutional_network, fully_connected_network +from reagent.models.base import ModelBase +from reagent.models.fully_connected_network import ACTIVATION_MAP + +logger = logging.getLogger(__name__) + + +def _get_activation_fn(activation): + if activation == "relu": + return F.relu + elif activation == "gelu": + return F.gelu + + raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) + + +class Concat(nn.Module): + def forward(self, state: torch.Tensor, action: torch.Tensor): + return torch.cat((state, action), dim=-1) + + +class SequentialMultiArguments(nn.Sequential): + """Sequential which can take more than 1 argument in forward function""" + + def forward(self, *inputs): + for module in self._modules.values(): + if type(inputs) == tuple: + inputs = module(*inputs) + else: + inputs = module(inputs) + return inputs + + +class ResidualBlock(nn.Module): + def __init__(self, d_model=64, dim_feedforward=128): + super(ResidualBlock, self).__init__() + self.relu = nn.ReLU() + self.fc_residual = nn.Sequential( + nn.Linear(d_model, dim_feedforward), + nn.ReLU(), + nn.Linear(dim_feedforward, d_model), + ) + self.relu = nn.ReLU() + + def forward(self, x): + return self.relu(x + self.fc_residual(x)) + + +class PositionalEncoding(nn.Module): + def __init__(self, feature_dim=128, dropout=0.0, max_len=100): + """ + This module injects some information about the relative or absolute position of the tokens in the sequence. + The generated positional encoding are concatenated together with the features. + Args: input dim + """ + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + pe = torch.zeros(max_len, feature_dim, requires_grad=False) + position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, feature_dim, 2).float() * (-math.log(10000.0) / feature_dim) + ) + pe[:, 0::2] = torch.sin(position * div_term) # max_len * feature_dim // 2 + pe[:, 1::2] = torch.cos(position * div_term) + # pe dimension: (max_len, 1, feature_dim) + pe = pe.unsqueeze(0).transpose(0, 1) + self.register_buffer("pe", pe) + + def forward(self, x): + # x dimension: (L, B, E) + # batch_size, seq_len, d_model + seq_len = x.shape[0] + pos_encoding = self.pe[:seq_len, :] + x = x + pos_encoding + return self.dropout(x) + + +class PETransformerEncoderLayer(nn.Module): + """PETransformerEncoderLayer is made up of Positional Encoding (PE), residual connections, self-attn and feedforward network. + Major differences between this implementation and the pytorch official torch.nn.TransformerEncoderLayer are: + 1. Augment input data with positional encoding. hat{x} = x + PE{x} + 2. Two paralle residual blocks are applied to the raw input data (x) and encoded input data (hat{x}), respectively, i.e. z = Residual(x), hat{z} = Residual(hat{x}) + 3. Treat z as the Value input, and hat{z} as the Query and Key input to feed a self-attention block. + + Main Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + activation: the activation function of intermediate layer, relu or gelu (default=relu). + layer_norm_eps: the eps value in layer normalization components (default=1e-5). + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False``. + max_len: argument passed to the Positional Encoding module, see more details in the PositionalEncoding class. + """ + + __constants__ = ["batch_first"] + + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.0, + activation="relu", + layer_norm_eps=1e-5, + max_len=100, + use_ff=True, + pos_weight=0.5, + batch_first=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(PETransformerEncoderLayer, self).__init__() + self.use_ff = use_ff + self.pos_weight = pos_weight + self.self_attn = nn.MultiheadAttention( + d_model, nhead, dropout=dropout, batch_first=batch_first, **factory_kwargs + ) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs) + + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + # Customized implementation: to map Query & Key, Value with different embeddings. + self.qk_residual = ResidualBlock(d_model, dim_feedforward) + self.v_residual = ResidualBlock(d_model, dim_feedforward) + self.pos_encoder = PositionalEncoding(d_model, dropout=dropout, max_len=max_len) + + self.activation = _get_activation_fn(activation) + + def __setstate__(self, state): + if "activation" not in state: + state["activation"] = F.relu + super(PETransformerEncoderLayer, self).__setstate__(state) + + def forward(self, src, src_mask=None, src_key_padding_mask=None, is_causal=False): + encoded_src = self.pos_encoder(src) + query = self.qk_residual(encoded_src) + # do not involve pos_encoding info into the value + src = self.v_residual(src) + + src2 = self.self_attn( + query, # query + query, # key = query as the input + src, # value + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + # add transformer related residual + src = src + self.dropout1(src2) + src = self.norm1(src) + # add another ff layer + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + +def ngram(input: torch.Tensor, context_size: int, ngram_padding: torch.Tensor): + # input shape: seq_len, batch_size, state_dim + action_dim + seq_len, batch_size, feature_dim = input.shape + + shifted_list = [] + for i in range(context_size): + offset = i - context_size // 2 + if offset < 0: + shifted = torch.cat( + ( + ngram_padding.tile((-offset, batch_size, 1)), + input.narrow(0, 0, seq_len + offset), + ), + dim=0, + ) + elif offset > 0: + shifted = torch.cat( + ( + input.narrow(0, offset, seq_len - offset), + ngram_padding.tile(offset, batch_size, 1), + ), + dim=0, + ) + else: + shifted = input + shifted_list.append(shifted) + + # shape: seq_len, batch_size, feature_dim * context_size + return torch.cat(shifted_list, dim=-1) + + +def _gen_mask(valid_step: torch.Tensor, batch_size: int, seq_len: int): + """ + Mask for dealing with different lengths of MDPs + + Example: + valid_step = [[1], [2], [3]], batch_size=3, seq_len = 4 + mask = [ + [0, 0, 0, 1], + [0, 0, 1, 1], + [0, 1, 1, 1], + ] + """ + assert valid_step.shape == (batch_size, 1) + assert (1 <= valid_step).all() + assert (valid_step <= seq_len).all() + device = valid_step.device + mask = torch.arange(seq_len, device=device).repeat(batch_size, 1) + mask = (mask >= (seq_len - valid_step)).float() + return mask + + +class SyntheticRewardNet(ModelBase): + """ + This base class provides basic operations to consume inputs and call a synthetic reward net + + A synthetic reward net (self.net) assumes the input contains only torch.Tensors. + Expected input shape: + state: seq_len, batch_size, state_dim + action: seq_len, batch_size, action_dim + Expected output shape: + reward: batch_size, seq_len + """ + + def __init__(self, net: nn.Module): + super().__init__() + self.net = net + + def forward(self, training_batch: rlt.MemoryNetworkInput): + # state shape: seq_len, batch_size, state_dim + state = training_batch.state.float_features + # action shape: seq_len, batch_size, action_dim + action = training_batch.action.float_features + + # shape: batch_size, 1 + valid_step = training_batch.valid_step + seq_len, batch_size, _ = training_batch.action.float_features.shape + + # output shape: batch_size, seq_len + output = self.net(state, action) + assert valid_step is not None + mask = _gen_mask(valid_step, batch_size, seq_len) + output_masked = output * mask + + pred_reward = output_masked.sum(dim=1, keepdim=True) + return rlt.SyntheticRewardNetworkOutput( + predicted_reward=pred_reward, + mask=mask, + output=output, + ) + + def export_mlp(self): + """ + Export an pytorch nn to feed to predictor wrapper. + """ + return self.net + + +class SingleStepSyntheticRewardNet(nn.Module): + def __init__( + self, + state_dim: int, + action_dim: int, + sizes: List[int], + activations: List[str], + last_layer_activation: str, + use_batch_norm: bool = False, + use_layer_norm: bool = False, + ): + """ + Decompose rewards at the last step to individual steps. + """ + super().__init__() + modules: List[nn.Module] = [Concat()] + prev_layer_size = state_dim + action_dim + for size, activation in zip(sizes, activations): + if use_batch_norm: + modules.append(nn.BatchNorm1d(prev_layer_size)) + modules.append(nn.Linear(prev_layer_size, size)) + if use_layer_norm: + modules.append(nn.LayerNorm(size)) + modules.append(ACTIVATION_MAP[activation]()) + prev_layer_size = size + # last layer + modules.append(nn.Linear(prev_layer_size, 1)) + modules.append(ACTIVATION_MAP[last_layer_activation]()) + self.dnn = SequentialMultiArguments(*modules) + + def forward(self, state: torch.Tensor, action: torch.Tensor): + # state shape: seq_len, batch_size, state_dim + # action shape: seq_len, batch_size, action_dim + return self.dnn(state, action).squeeze(2).transpose(0, 1) + + +class NGramConvolutionalNetwork(nn.Module): + def __init__( + self, + state_dim: int, + action_dim: int, + sizes: List[int], + activations: List[str], + last_layer_activation: str, + context_size: int, + conv_net_params: rlp.ConvNetParameters, + use_layer_norm: bool = False, + ) -> None: + assert context_size % 2 == 1, f"Context size is not odd: {context_size}" + super().__init__() + + self.context_size = context_size + self.input_width = state_dim + action_dim + self.input_height = context_size + self.num_input_channels = 1 + + num_conv_layers = len(conv_net_params.conv_height_kernels) + conv_width_kernels = [self.input_width] + [1] * (num_conv_layers - 1) + + cnn_parameters = convolutional_network.CnnParameters( + conv_dims=[self.num_input_channels] + conv_net_params.conv_dims, + conv_height_kernels=conv_net_params.conv_height_kernels, + conv_width_kernels=conv_width_kernels, + pool_types=conv_net_params.pool_types, + pool_kernels_strides=conv_net_params.pool_kernel_sizes, + num_input_channels=self.num_input_channels, + input_height=self.input_height, + input_width=self.input_width, + ) + self.conv_net = convolutional_network.ConvolutionalNetwork( + cnn_parameters, + [-1] + sizes + [1], + activations + [last_layer_activation], + use_layer_norm=use_layer_norm, + ) + + self.ngram_padding = torch.zeros(1, 1, state_dim + action_dim) + + def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor: + """Forward pass NGram conv net. + + :param input shape: seq_len, batch_size, feature_dim + """ + # shape: seq_len, batch_size, state_dim + action_dim + input = torch.cat((state, action), dim=-1) + # shape: seq_len, batch_size, (state_dim + action_dim) * context_size + ngram_input = ngram( + input, self.context_size, self.ngram_padding.to(input.device) + ) + + seq_len, batch_size, _ = ngram_input.shape + # shape: seq_len * batch_size, 1, context_size, state_dim + action_dim + reshaped = ngram_input.reshape(-1, 1, self.input_height, self.input_width) + # shape: batch_size, seq_len + output = self.conv_net(reshaped).reshape(seq_len, batch_size).transpose(0, 1) + return output + + +class NGramFullyConnectedNetwork(nn.Module): + def __init__( + self, + state_dim: int, + action_dim: int, + sizes: List[int], + activations: List[str], + last_layer_activation: str, + context_size: int, + use_layer_norm: bool = False, + ) -> None: + assert context_size % 2 == 1, f"Context size is not odd: {context_size}" + super().__init__() + self.context_size = context_size + self.ngram_padding = torch.zeros(1, 1, state_dim + action_dim) + self.fc = fully_connected_network.FullyConnectedNetwork( + [(state_dim + action_dim) * context_size] + sizes + [1], + activations + [last_layer_activation], + use_layer_norm=use_layer_norm, + ) + + def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor: + """Forward pass NGram conv net. + + :param input shape: seq_len, batch_size, feature_dim + """ + input = torch.cat((state, action), dim=-1) + # shape: seq_len, batch_size, (state_dim + action_dim) * context_size + ngram_input = ngram( + input, self.context_size, self.ngram_padding.to(input.device) + ) + # shape: batch_size, seq_len + return self.fc(ngram_input).transpose(0, 1).squeeze(2) + + +class SequenceSyntheticRewardNet(nn.Module): + def __init__( + self, + state_dim: int, + action_dim: int, + lstm_hidden_size: int, + lstm_num_layers: int, + lstm_bidirectional: bool, + last_layer_activation: str, + ): + """ + Decompose rewards at the last step to individual steps. + """ + super().__init__() + + self.state_dim = state_dim + self.action_dim = action_dim + + self.lstm_hidden_size = lstm_hidden_size + self.lstm_num_layers = lstm_num_layers + self.lstm_bidirectional = lstm_bidirectional + + self.lstm = nn.LSTM( + input_size=self.state_dim + self.action_dim, + hidden_size=self.lstm_hidden_size, + num_layers=self.lstm_num_layers, + bidirectional=self.lstm_bidirectional, + ) + + if self.lstm_bidirectional: + self.fc_out = nn.Linear(self.lstm_hidden_size * 2, 1) + else: + self.fc_out = nn.Linear(self.lstm_hidden_size, 1) + + self.output_activation = ACTIVATION_MAP[last_layer_activation]() + + def forward(self, state: torch.Tensor, action: torch.Tensor): + # shape: seq_len, batch_size, state_dim + action_dim + cat_input = torch.cat((state, action), dim=-1) + # output shape: seq_len, batch_size, self.hidden_size + output, _ = self.lstm(cat_input) + # output shape: seq_len, batch_size, 1 + output = self.fc_out(output) + # output shape: batch_size, seq_len + output = self.output_activation(output).squeeze(2).transpose(0, 1) + return output + + +class TransformerSyntheticRewardNet(nn.Module): + def __init__( + self, + state_dim: int, + action_dim: int, + d_model: int, + nhead: int = 2, + num_encoder_layers: int = 2, + dim_feedforward: int = 128, + dropout: float = 0.0, + activation: str = "relu", + last_layer_activation: str = "leaky_relu", + layer_norm_eps: float = 1e-5, + max_len: int = 10, + ): + """ + Decompose rewards at the last step to individual steps using transformer modules. + + Args: + nhead: the number of heads in the multiheadattention models (default=8). + num_encoder_layers: the number of sub-encoder-layers in the encoder (default=6). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + activation: the activation function of encoder/decoder intermediate layer, relu or gelu (default=relu). + layer_norm_eps: the eps value in layer normalization components (default=1e-5). + """ + super().__init__() + + self.state_dim = state_dim + self.action_dim = action_dim + # d_model: dimension of transformer input + self.d_model = d_model + self.nhead = nhead + self.num_encoder_layers = num_encoder_layers + self.dim_feedforward = dim_feedforward + self.dropout = dropout + self.activation = activation + self.layer_norm_eps = layer_norm_eps + self.max_len = max_len + + # map input features to higher latent space before sending to transformer + self.fc_in = nn.Sequential( + nn.Linear(self.state_dim + self.action_dim, self.d_model), + nn.ReLU(), + ) + + # use transformer encoder to get reward logits for each step + encoder_layer = PETransformerEncoderLayer( + self.d_model, + nhead, + dim_feedforward, + dropout, + activation, + layer_norm_eps, + max_len=self.max_len, + batch_first=False, + ) + self.transformer = nn.TransformerEncoder( + encoder_layer, + num_encoder_layers, + ) + self.fc_out = nn.Linear(self.d_model, 1) + self.output_activation = ACTIVATION_MAP[last_layer_activation]() + + def forward(self, state: torch.Tensor, action: torch.Tensor): + # shape: seq_len (L), batch_size (B), state_dim + action_dim + cat_input = torch.cat((state, action), dim=-1) + # latent_input shape: (L,B,E) + latent_input = self.fc_in(cat_input) + # output shape: (L, B, E) + output = self.transformer(latent_input) + output = self.fc_out(output) + # output shape: seq_len, batch_size, 1 + output = self.output_activation(output).squeeze(2).transpose(0, 1) + # output shape: batch_size, seq_len + return output diff --git a/reagent/models/synthetic_reward_sparse_arch.py b/reagent/models/synthetic_reward_sparse_arch.py new file mode 100644 index 000000000..e17627a4f --- /dev/null +++ b/reagent/models/synthetic_reward_sparse_arch.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import List + +import torch +import torch.nn as nn +from reagent.core import types as rlt +from reagent.core.torch_utils import split_sequence_keyed_jagged_tensor +from reagent.models.base import ModelBase +from reagent.models.fully_connected_network import ACTIVATION_MAP +from reagent.models.synthetic_reward import _gen_mask +from torchrec import EmbeddingBagCollection +from torchrec.models.dlrm import InteractionArch, SparseArch +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor + + +logger = logging.getLogger(__name__) + + +def create_dense_arch( + input_dim: int, + dense_sizes: List[int], + dense_activations: List[str], + use_batch_norm: bool, + use_layer_norm: bool, +): + modules: List[nn.Module] = [] + prev_layer_size = input_dim + for size, activation in zip(dense_sizes, dense_activations): + if use_batch_norm: + modules.append(nn.BatchNorm1d(prev_layer_size)) + modules.append(nn.Linear(prev_layer_size, size)) + if use_layer_norm: + modules.append(nn.LayerNorm(size)) + modules.append(ACTIVATION_MAP[activation]()) + prev_layer_size = size + return nn.Sequential(*modules) + + +class SyntheticRewardSparseArchNet(ModelBase): + """ + This base class provides basic operations to consume inputs and call a synthetic reward net + + A synthetic reward net (self.net) assumes the input contains only torch.Tensors. + Expected input shape: + state: seq_len, batch_size, state_dim + action: seq_len, batch_size, action_dim + Expected output shape: + reward: batch_size, seq_len + """ + + def __init__(self, net: nn.Module): + super().__init__() + self.net = net + + def forward(self, training_batch: rlt.MemoryNetworkInput): + # state shape: seq_len, batch_size, state_dim + state = training_batch.state.float_features + # action shape: seq_len, batch_size, action_dim + action = training_batch.action.float_features + + # shape: batch_size, 1 + valid_step = training_batch.valid_step + seq_len, batch_size, _ = training_batch.action.float_features.shape + + # output shape: batch_size, seq_len + output = self.net( + state, + action, + training_batch.state.id_list_features, + training_batch.state.id_score_list_features, + training_batch.action.id_list_features, + training_batch.action.id_score_list_features, + ) + assert valid_step is not None + mask = _gen_mask(valid_step, batch_size, seq_len) + output_masked = output * mask + + pred_reward = output_masked.sum(dim=1, keepdim=True) + return rlt.SyntheticRewardNetworkOutput( + predicted_reward=pred_reward, + mask=mask, + output=output, + ) + + def export_mlp(self): + """ + Export an pytorch nn to feed to predictor wrapper. + """ + return self.net + + def requires_model_parallel(self): + return True + + +class SingleStepSyntheticSparseArchRewardNet(nn.Module): + def __init__( + self, + state_dense_dim: int, + action_dense_dim: int, + dense_sizes: List[int], + dense_activations: List[str], + overall_sizes: List[int], + overall_activations: List[str], + embedding_bag_collection: EmbeddingBagCollection, + use_batch_norm: bool = False, + use_layer_norm: bool = False, + ): + """ + Decompose rewards of the last step to all individual steps. + + This model arch accepts sparse features and is similar to / inspired by + the model in "Deep Learning Recommendation Model for Personalization and + Recommendation Systems" (https://arxiv.org/abs/1906.00091) + + The model arch can be described as below: + + + last_layer_activation + ^ + overall arch + ^ + -----------interaction arch (2D + 2F + F choose 2) ------- + ^ ^ ^ + state_dense_out(D) action_dense_out(D) sparse_out(F*D) + ^ ^ ^ + state_dense_arch action_dense_arch sparse arch + ^ ^ ^ + state_dense action_dense state_sparse / action_sparse + + + , where: + D: last layer of dense_sizes (equal to sparse features' embedding_dim) + F: number of total sparse features (from both state and action and from both + id-list and id-score-list features) + Interaction arch returns a concatenation of + (1) and the dense layers itself, + (2) the dot product of each sparse embedding with the output of the dense arch, + (3) the pairwise dot product of each sparse embedding pair, + + """ + super().__init__() + self.validate_parameters( + dense_sizes, + dense_activations, + overall_sizes, + overall_activations, + embedding_bag_collection, + ) + + self.state_dense_arch = create_dense_arch( + state_dense_dim, + dense_sizes, + dense_activations, + use_batch_norm, + use_layer_norm, + ) + self.action_dense_arch = create_dense_arch( + action_dense_dim, + dense_sizes, + dense_activations, + use_batch_norm, + use_layer_norm, + ) + # sparse arch will be shared for state sparse features and action sparse features + self.sparse_arch = SparseArch(embedding_bag_collection) + + # Overall arch + F = sum( + [ + len(conf.feature_names) + for conf in embedding_bag_collection.embedding_bag_configs() + ] + ) + D = dense_sizes[-1] + self.F = F + self.D = D + sparse_feature_names = [] + for conf in embedding_bag_collection.embedding_bag_configs(): + sparse_feature_names.extend(conf.feature_names) + + try: + self.inter_arch_sparse_and_state_dense = InteractionArch( + F, + ) + self.inter_arch_sparse_and_action_dense = InteractionArch( + F, + ) + except TypeError: + # HACK: in torchrec OSS version (0.1.0), InteractionArch + # only accepts a list of sparse feature names as the input + # pyre-ignore + self.inter_arch_sparse_and_state_dense = InteractionArch( + sparse_feature_names=sparse_feature_names + ) + # pyre-ignore + self.inter_arch_sparse_and_action_dense = InteractionArch( + sparse_feature_names=sparse_feature_names + ) + + interaction_output_dim = 2 * D + 2 * F + F * (F - 1) // 2 + self.overall_arch = create_dense_arch( + interaction_output_dim, + overall_sizes, + overall_activations, + use_batch_norm, + use_layer_norm, + ) + + def validate_parameters( + self, + dense_sizes: List[int], + dense_activations: List[str], + overall_sizes: List[int], + overall_activations: List[str], + embedding_bag_collection: EmbeddingBagCollection, + ): + for i in range(1, len(embedding_bag_collection.embedding_bag_configs())): + conf_prev = embedding_bag_collection.embedding_bag_configs()[i - 1] + conf = embedding_bag_collection.embedding_bag_configs()[i] + assert ( + conf_prev.embedding_dim == conf.embedding_dim + ), "All EmbeddingBagConfigs must have the same embedding_dim" + + conf = embedding_bag_collection.embedding_bag_configs()[0] + dense_output_size = dense_sizes[-1] + assert ( + dense_output_size == conf.embedding_dim + ), "The last layer of dense_sizes should be equal to embedding_dim of sparse features" + assert overall_sizes[-1] == 1, "The last layer of overall_sizes should be 1" + + def forward( + self, + state: torch.Tensor, + action: torch.Tensor, + state_id_list: KeyedJaggedTensor, + state_id_score_list: KeyedJaggedTensor, + action_id_list: KeyedJaggedTensor, + action_id_score_list: KeyedJaggedTensor, + ): + # state shape: seq_len, batch_size, state_dim + # action shape: seq_len, batch_size, action_dim + # state_sparse: sparse state features from seq_len steps + seq_len, batch_size, _ = state.shape + + # state_dense_out shape: seq_len, batch_size, embed_dim + state_dense_out = self.state_dense_arch(state) + # action_dense_out shape: seq_len, batch_size, embed_dim + action_dense_out = self.action_dense_arch(action) + + sparse_data_per_step: List[ + KeyedJaggedTensor + ] = self.create_sparse_data_per_step( + state_id_list, + state_id_score_list, + action_id_list, + action_id_score_list, + seq_len, + ) + sparse_embed_per_step = [ + self.sparse_arch(sparse_data_per_step[i]) for i in range(seq_len) + ] + + interaction_per_step = [] + for i in range(seq_len): + # shape: batch_size, D + F + F choose 2 + inter_sparse_state = self.inter_arch_sparse_and_state_dense( + dense_features=state_dense_out[i], + sparse_features=sparse_embed_per_step[i], + ) + # shape: batch_size, D + F + F choose 2 + inter_sparse_action = self.inter_arch_sparse_and_action_dense( + dense_features=action_dense_out[i], + sparse_features=sparse_embed_per_step[i], + ) + # We need to concat interactions of sparse-state and sparse-action + # However, sparse feature embeddings' self dot-products are included + # in both interactions so we need to dedup + # interaction shape: batch_size, 2D + 2F + F choose 2 + interaction = torch.cat( + ( + inter_sparse_state, + inter_sparse_action[:, : self.D + self.F], + ), + dim=1, + ) + interaction_per_step.append(interaction) + + # interaction_per_step shape: seq_len, batch_size, 2D + 2F + F choose 2 + interaction_per_step = torch.stack(interaction_per_step, dim=0) + # overall_arch_out shape: seq_len, batch_size, 1 + overall_arch_out = self.overall_arch(interaction_per_step) + # return shape: batch_size, seq_len + return overall_arch_out.squeeze(2).transpose(0, 1) + + def create_sparse_data_per_step( + self, + state_id_list: KeyedJaggedTensor, + state_id_score_list: KeyedJaggedTensor, + action_id_list: KeyedJaggedTensor, + action_id_score_list: KeyedJaggedTensor, + seq_len: int, + ): + """ + Return a list of KeyedJaggedTensor, where each KeyedJaggedTensor + represents one step's sparse data. + + Under the hood, we perform the following steps: + 1. Split state_id_list, state_id_score_list, action_id_list, and + action_id_score_list by steps + 2. Treat state_id_list and action_id_list features as id_score_list + features with weight=1 + 3. Concatenate state_id_list, state_id_score_list, action_id_list, and + action_id_score_list at each step + """ + # Convert id_list data as id score list data with weight = 1 + state_id_list._weights = torch.ones_like(state_id_list.values()) + action_id_list._weights = torch.ones_like(action_id_list.values()) + + # For each step, we merge all sparse data into one KeyedJaggedTensor + state_id_list_per_step = split_sequence_keyed_jagged_tensor( + state_id_list, seq_len + ) + state_id_score_list_per_step = split_sequence_keyed_jagged_tensor( + state_id_score_list, seq_len + ) + action_id_list_per_step = split_sequence_keyed_jagged_tensor( + action_id_list, seq_len + ) + action_id_score_list_per_step = split_sequence_keyed_jagged_tensor( + action_id_score_list, seq_len + ) + sparse_data_per_step = [ + KeyedJaggedTensor.concat( + [ + state_id_list_per_step[i], + action_id_list_per_step[i], + state_id_score_list_per_step[i], + action_id_score_list_per_step[i], + ] + ) + for i in range(seq_len) + ] + return sparse_data_per_step diff --git a/reagent/models/world_model.py b/reagent/models/world_model.py index e6beabd87..d0152e94b 100644 --- a/reagent/models/world_model.py +++ b/reagent/models/world_model.py @@ -2,7 +2,7 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import torch -from reagent import types as rlt +from reagent.core import types as rlt from reagent.models.base import ModelBase from reagent.models.mdn_rnn import MDNRNN @@ -10,7 +10,7 @@ class MemoryNetwork(ModelBase): def __init__( self, state_dim, action_dim, num_hiddens, num_hidden_layers, num_gaussians - ): + ) -> None: super().__init__() self.mdnrnn = MDNRNN( state_dim=state_dim, diff --git a/reagent/net_builder/__init__.py b/reagent/net_builder/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/net_builder/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/net_builder/categorical_dqn/__init__.py b/reagent/net_builder/categorical_dqn/__init__.py index 67beca41d..8257356b4 100644 --- a/reagent/net_builder/categorical_dqn/__init__.py +++ b/reagent/net_builder/categorical_dqn/__init__.py @@ -1,2 +1,3 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from . import categorical # noqa diff --git a/reagent/net_builder/categorical_dqn/categorical.py b/reagent/net_builder/categorical_dqn/categorical.py index 796d21fce..cd2797aac 100644 --- a/reagent/net_builder/categorical_dqn/categorical.py +++ b/reagent/net_builder/categorical_dqn/categorical.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.categorical_dqn import CategoricalDQN from reagent.models.dqn import FullyConnectedDQN from reagent.net_builder.categorical_dqn_net_builder import CategoricalDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash @dataclass @@ -17,7 +17,7 @@ class Categorical(CategoricalDQNNetBuilder): sizes: List[int] = field(default_factory=lambda: [256, 128]) activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " diff --git a/reagent/net_builder/categorical_dqn_net_builder.py b/reagent/net_builder/categorical_dqn_net_builder.py index 9bd6e8be1..f88ecd706 100644 --- a/reagent/net_builder/categorical_dqn_net_builder.py +++ b/reagent/net_builder/categorical_dqn_net_builder.py @@ -1,27 +1,28 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc from typing import List -import reagent.types as rlt +import reagent.core.types as rlt import torch -from reagent.core.registry_meta import RegistryMeta +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData from reagent.models.base import ModelBase -from reagent.parameters import NormalizationData from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor from reagent.preprocessing.normalization import get_num_output_features from reagent.preprocessing.preprocessor import Preprocessor -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper -class CategoricalDQNNetBuilder(metaclass=RegistryMeta): +class CategoricalDQNNetBuilder: """ Base class for categorical DQN net builder. """ @@ -56,7 +57,7 @@ def build_serving_module( state_normalization_data.dense_normalization_parameters, False ) dqn_with_preprocessor = DiscreteDqnWithPreprocessor( - q_network.cpu_model().eval(), state_preprocessor + q_network.cpu_model().eval(), state_preprocessor, state_feature_config ) return DiscreteDqnPredictorWrapper( dqn_with_preprocessor, action_names, state_feature_config diff --git a/reagent/net_builder/continuous_actor/__init__.py b/reagent/net_builder/continuous_actor/__init__.py index 3093bbb9c..2c8aa5492 100644 --- a/reagent/net_builder/continuous_actor/__init__.py +++ b/reagent/net_builder/continuous_actor/__init__.py @@ -1,5 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from . import dirichlet_fully_connected # noqa -from . import fully_connected # noqa -from . import gaussian_fully_connected # noqa +from . import ( # noqa # noqa # noqa + dirichlet_fully_connected, + fully_connected, + gaussian_fully_connected, +) diff --git a/reagent/net_builder/continuous_actor/dirichlet_fully_connected.py b/reagent/net_builder/continuous_actor/dirichlet_fully_connected.py index f0085d148..eab09db2e 100644 --- a/reagent/net_builder/continuous_actor/dirichlet_fully_connected.py +++ b/reagent/net_builder/continuous_actor/dirichlet_fully_connected.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List +import reagent.core.types as rlt from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.actor import DirichletFullyConnectedActor from reagent.models.base import ModelBase from reagent.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder -from reagent.parameters import NormalizationData, param_hash from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS from reagent.preprocessing.normalization import get_num_output_features @@ -19,7 +21,7 @@ class DirichletFullyConnected(ContinuousActorNetBuilder): activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) use_batch_norm: bool = False - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -32,6 +34,7 @@ def default_action_preprocessing(self) -> str: def build_actor( self, + state_feature_config: rlt.ModelFeatureConfig, state_normalization_data: NormalizationData, action_normalization_data: NormalizationData, ) -> ModelBase: diff --git a/reagent/net_builder/continuous_actor/fully_connected.py b/reagent/net_builder/continuous_actor/fully_connected.py index 4b1135521..570fc5a78 100644 --- a/reagent/net_builder/continuous_actor/fully_connected.py +++ b/reagent/net_builder/continuous_actor/fully_connected.py @@ -1,12 +1,14 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List, Optional +import reagent.core.types as rlt from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.actor import FullyConnectedActor from reagent.models.base import ModelBase from reagent.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder -from reagent.parameters import NormalizationData, param_hash from reagent.preprocessing.identify_types import CONTINUOUS_ACTION from reagent.preprocessing.normalization import get_num_output_features @@ -22,7 +24,7 @@ class FullyConnected(ContinuousActorNetBuilder): action_activation: str = "tanh" exploration_variance: Optional[float] = None - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -35,6 +37,7 @@ def default_action_preprocessing(self) -> str: def build_actor( self, + state_feature_config: rlt.ModelFeatureConfig, state_normalization_data: NormalizationData, action_normalization_data: NormalizationData, ) -> ModelBase: diff --git a/reagent/net_builder/continuous_actor/gaussian_fully_connected.py b/reagent/net_builder/continuous_actor/gaussian_fully_connected.py index 7c495ea92..bd93653ec 100644 --- a/reagent/net_builder/continuous_actor/gaussian_fully_connected.py +++ b/reagent/net_builder/continuous_actor/gaussian_fully_connected.py @@ -1,12 +1,15 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import List +from typing import List, Optional +import reagent.models as models +from reagent.core import types as rlt from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.actor import GaussianFullyConnectedActor from reagent.models.base import ModelBase from reagent.net_builder.continuous_actor_net_builder import ContinuousActorNetBuilder -from reagent.parameters import NormalizationData, param_hash from reagent.preprocessing.identify_types import CONTINUOUS_ACTION from reagent.preprocessing.normalization import get_num_output_features @@ -19,8 +22,10 @@ class GaussianFullyConnected(ContinuousActorNetBuilder): activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) use_batch_norm: bool = False use_layer_norm: bool = False + use_l2_normalization: bool = False + embedding_dim: Optional[int] = None - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -33,6 +38,7 @@ def default_action_preprocessing(self) -> str: def build_actor( self, + state_feature_config: rlt.ModelFeatureConfig, state_normalization_data: NormalizationData, action_normalization_data: NormalizationData, ) -> ModelBase: @@ -42,11 +48,33 @@ def build_actor( action_dim = get_num_output_features( action_normalization_data.dense_normalization_parameters ) - return GaussianFullyConnectedActor( - state_dim=state_dim, + input_dim = state_dim + embedding_dim = self.embedding_dim + + embedding_concat = None + if embedding_dim is not None: + embedding_concat = models.EmbeddingBagConcat( + state_dense_dim=state_dim, + model_feature_config=state_feature_config, + ) + input_dim = embedding_concat.output_dim + + gaussian_fc_actor = GaussianFullyConnectedActor( + state_dim=input_dim, action_dim=action_dim, sizes=self.sizes, activations=self.activations, use_batch_norm=self.use_batch_norm, use_layer_norm=self.use_layer_norm, + use_l2_normalization=self.use_l2_normalization, + ) + + if not embedding_dim: + return gaussian_fc_actor + + assert embedding_concat is not None + return models.Sequential( # type: ignore + embedding_concat, + rlt.TensorFeatureData(), + gaussian_fc_actor, ) diff --git a/reagent/net_builder/continuous_actor_net_builder.py b/reagent/net_builder/continuous_actor_net_builder.py index 025dc1e80..4845ed85e 100644 --- a/reagent/net_builder/continuous_actor_net_builder.py +++ b/reagent/net_builder/continuous_actor_net_builder.py @@ -1,25 +1,31 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc +import reagent.core.types as rlt import torch -from reagent.core.registry_meta import RegistryMeta +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData from reagent.models.base import ModelBase -from reagent.parameters import NormalizationData -from reagent.prediction.predictor_wrapper import ActorWithPreprocessor +from reagent.prediction.predictor_wrapper import ( + ActorWithPreprocessor, + RankingActorWithPreprocessor, +) from reagent.preprocessing.postprocessor import Postprocessor from reagent.preprocessing.preprocessor import Preprocessor -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbActorPredictorWrapper as ActorPredictorWrapper, + FbRankingActorPredictorWrapper as RankingActorPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ActorPredictorWrapper -class ContinuousActorNetBuilder(metaclass=RegistryMeta): +class ContinuousActorNetBuilder: """ Base class for continuous actor net builder. """ @@ -32,6 +38,7 @@ def default_action_preprocessing(self) -> str: @abc.abstractmethod def build_actor( self, + state_feature_config: rlt.ModelFeatureConfig, state_normalization_data: NormalizationData, action_normalization_data: NormalizationData, ) -> ModelBase: @@ -40,8 +47,10 @@ def build_actor( def build_serving_module( self, actor: ModelBase, + state_feature_config: rlt.ModelFeatureConfig, state_normalization_data: NormalizationData, action_normalization_data: NormalizationData, + serve_mean_policy: bool = False, ) -> torch.nn.Module: """ Returns a TorchScript predictor module @@ -54,9 +63,44 @@ def build_serving_module( action_normalization_data.dense_normalization_parameters, use_gpu=False ) actor_with_preprocessor = ActorWithPreprocessor( - actor.cpu_model().eval(), state_preprocessor, postprocessor + actor.cpu_model().eval(), + state_preprocessor, + state_feature_config, + postprocessor, + serve_mean_policy=serve_mean_policy, ) action_features = Preprocessor( action_normalization_data.dense_normalization_parameters, use_gpu=False ).sorted_features - return ActorPredictorWrapper(actor_with_preprocessor, action_features) + return ActorPredictorWrapper( + actor_with_preprocessor, state_feature_config, action_features + ) + + def build_ranking_serving_module( + self, + actor: ModelBase, + state_normalization_data: NormalizationData, + candidate_normalization_data: NormalizationData, + num_candidates: int, + action_normalization_data: NormalizationData, + ) -> torch.nn.Module: + state_preprocessor = Preprocessor( + state_normalization_data.dense_normalization_parameters, use_gpu=False + ) + candidate_preprocessor = Preprocessor( + candidate_normalization_data.dense_normalization_parameters, use_gpu=False + ) + postprocessor = Postprocessor( + action_normalization_data.dense_normalization_parameters, use_gpu=False + ) + actor_with_preprocessor = RankingActorWithPreprocessor( + model=actor.cpu_model().eval(), + state_preprocessor=state_preprocessor, + candidate_preprocessor=candidate_preprocessor, + num_candidates=num_candidates, + action_postprocessor=postprocessor, + ) + action_features = Preprocessor( + action_normalization_data.dense_normalization_parameters, use_gpu=False + ).sorted_features + return RankingActorPredictorWrapper(actor_with_preprocessor, action_features) diff --git a/reagent/workflow/tagged_union.py b/reagent/net_builder/discrete_actor/__init__.py similarity index 60% rename from reagent/workflow/tagged_union.py rename to reagent/net_builder/discrete_actor/__init__.py index 28fc3ab1f..eb61076d3 100644 --- a/reagent/workflow/tagged_union.py +++ b/reagent/net_builder/discrete_actor/__init__.py @@ -1,4 +1,4 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from reagent.core.tagged_union import TaggedUnion # noqa F401 +from . import fully_connected # noqa diff --git a/reagent/net_builder/discrete_actor/fully_connected.py b/reagent/net_builder/discrete_actor/fully_connected.py new file mode 100644 index 000000000..2e585a4f9 --- /dev/null +++ b/reagent/net_builder/discrete_actor/fully_connected.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.actor import FullyConnectedActor +from reagent.models.base import ModelBase +from reagent.net_builder.discrete_actor_net_builder import DiscreteActorNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class FullyConnected(DiscreteActorNetBuilder): + __hash__ = param_hash + + sizes: List[int] = field(default_factory=lambda: [128, 64]) + activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + use_batch_norm: bool = False + use_layer_norm: bool = False + action_activation: str = "tanh" + exploration_variance: Optional[float] = None + + def __post_init_post_parse__(self) -> None: + super().__init__() + assert len(self.sizes) == len(self.activations), ( + f"Must have the same numbers of sizes and activations; got: " + f"{self.sizes}, {self.activations}" + ) + + def build_actor( + self, + state_normalization_data: NormalizationData, + num_actions: int, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + return FullyConnectedActor( + state_dim=state_dim, + action_dim=num_actions, + sizes=self.sizes, + activations=self.activations, + use_batch_norm=self.use_batch_norm, + action_activation=self.action_activation, + exploration_variance=self.exploration_variance, + ) diff --git a/reagent/net_builder/discrete_actor_net_builder.py b/reagent/net_builder/discrete_actor_net_builder.py new file mode 100644 index 000000000..bc319f470 --- /dev/null +++ b/reagent/net_builder/discrete_actor_net_builder.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +from typing import List + +import reagent.core.types as rlt +import torch +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData +from reagent.models.base import ModelBase +from reagent.prediction.predictor_wrapper import ActorWithPreprocessor +from reagent.preprocessing.preprocessor import Preprocessor + + +if IS_FB_ENVIRONMENT: + from reagent.fb.prediction.fb_predictor_wrapper import ( + FbActorPredictorWrapper as ActorPredictorWrapper, + ) +else: + from reagent.prediction.predictor_wrapper import ActorPredictorWrapper + + +class DiscreteActorNetBuilder: + """ + Base class for discrete actor net builder. + """ + + @abc.abstractmethod + def build_actor( + self, + state_normalization_data: NormalizationData, + num_actions: int, + ) -> ModelBase: + pass + + def build_serving_module( + self, + actor: ModelBase, + state_feature_config: rlt.ModelFeatureConfig, + state_normalization_data: NormalizationData, + action_feature_ids: List[int], + ) -> torch.nn.Module: + """ + Returns a TorchScript predictor module + """ + + state_preprocessor = Preprocessor( + state_normalization_data.dense_normalization_parameters, use_gpu=False + ) + actor_with_preprocessor = ActorWithPreprocessor( + actor.cpu_model().eval(), state_preprocessor, state_feature_config + ) + return ActorPredictorWrapper( + actor_with_preprocessor, state_feature_config, action_feature_ids + ) diff --git a/reagent/net_builder/discrete_dqn/__init__.py b/reagent/net_builder/discrete_dqn/__init__.py index 5e350a987..c7e68d196 100644 --- a/reagent/net_builder/discrete_dqn/__init__.py +++ b/reagent/net_builder/discrete_dqn/__init__.py @@ -1,4 +1,8 @@ #!/usr/bin/env python3 -from . import dueling # noqa -from . import fully_connected # noqa -from . import fully_connected_with_embedding # noqa +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from . import ( # noqa # noqa # noqa + dueling, + fully_connected, + fully_connected_with_embedding, +) diff --git a/reagent/net_builder/discrete_dqn/dueling.py b/reagent/net_builder/discrete_dqn/dueling.py index fc2fe4b2e..23a66853e 100644 --- a/reagent/net_builder/discrete_dqn/dueling.py +++ b/reagent/net_builder/discrete_dqn/dueling.py @@ -1,13 +1,14 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List -from reagent import types as rlt +from reagent.core import types as rlt from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.dueling_q_network import DuelingQNetwork from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash @dataclass @@ -17,7 +18,7 @@ class Dueling(DiscreteDQNNetBuilder): sizes: List[int] = field(default_factory=lambda: [256, 128]) activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " f"{self.sizes}, {self.activations}" diff --git a/reagent/net_builder/discrete_dqn/fully_connected.py b/reagent/net_builder/discrete_dqn/fully_connected.py index fa2d033a6..cd3a6916d 100644 --- a/reagent/net_builder/discrete_dqn/fully_connected.py +++ b/reagent/net_builder/discrete_dqn/fully_connected.py @@ -1,13 +1,14 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List -from reagent import types as rlt +from reagent.core import types as rlt from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.dqn import FullyConnectedDQN from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash @dataclass @@ -17,8 +18,9 @@ class FullyConnected(DiscreteDQNNetBuilder): sizes: List[int] = field(default_factory=lambda: [256, 128]) activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) dropout_ratio: float = 0.0 + use_batch_norm: bool = False - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -38,4 +40,5 @@ def build_q_network( sizes=self.sizes, activations=self.activations, dropout_ratio=self.dropout_ratio, + use_batch_norm=self.use_batch_norm, ) diff --git a/reagent/net_builder/discrete_dqn/fully_connected_with_embedding.py b/reagent/net_builder/discrete_dqn/fully_connected_with_embedding.py index b61dbae0c..3bb51fc28 100644 --- a/reagent/net_builder/discrete_dqn/fully_connected_with_embedding.py +++ b/reagent/net_builder/discrete_dqn/fully_connected_with_embedding.py @@ -1,24 +1,24 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List import reagent.models as models -from reagent import types as rlt +from reagent.core import types as rlt from reagent.core.dataclasses import dataclass, field -from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNWithIdListNetBuilder -from reagent.parameters import NormalizationData, param_hash +from reagent.core.parameters import NormalizationData, param_hash +from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder @dataclass -class FullyConnectedWithEmbedding(DiscreteDQNWithIdListNetBuilder): +class FullyConnectedWithEmbedding(DiscreteDQNNetBuilder): __hash__ = param_hash sizes: List[int] = field(default_factory=lambda: [256, 128]) activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) - embedding_dim: int = 64 dropout_ratio: float = 0.0 - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -31,11 +31,10 @@ def build_q_network( state_normalization_data: NormalizationData, output_dim: int, ) -> models.ModelBase: - state_dim = self._get_input_dim(state_normalization_data) + state_dense_dim = self._get_input_dim(state_normalization_data) embedding_concat = models.EmbeddingBagConcat( - state_dim=state_dim, + state_dense_dim=state_dense_dim, model_feature_config=state_feature_config, - embedding_dim=self.embedding_dim, ) return models.Sequential( # type: ignore embedding_concat, diff --git a/reagent/net_builder/discrete_dqn_net_builder.py b/reagent/net_builder/discrete_dqn_net_builder.py index 598d41566..4235453fb 100644 --- a/reagent/net_builder/discrete_dqn_net_builder.py +++ b/reagent/net_builder/discrete_dqn_net_builder.py @@ -1,34 +1,34 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc from typing import List -import reagent.types as rlt +import reagent.core.types as rlt import torch -from reagent.core.registry_meta import RegistryMeta +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData from reagent.models.base import ModelBase -from reagent.parameters import NormalizationData from reagent.prediction.predictor_wrapper import ( + BinaryDifferenceScorerWithPreprocessor, DiscreteDqnWithPreprocessor, - DiscreteDqnWithPreprocessorWithIdList, ) from reagent.preprocessing.normalization import get_num_output_features from reagent.preprocessing.preprocessor import Preprocessor -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( + FbBinaryDifferenceScorerPredictorWrapper as BinaryDifferenceScorerPredictorWrapper, FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper, - FbDiscreteDqnPredictorWrapperWithIdList as DiscreteDqnPredictorWrapperWithIdList, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ( + BinaryDifferenceScorerPredictorWrapper, DiscreteDqnPredictorWrapper, - DiscreteDqnPredictorWrapperWithIdList, ) -class DiscreteDQNNetBuilder(metaclass=RegistryMeta): +class DiscreteDQNNetBuilder: """ Base class for discrete DQN net builder. """ @@ -53,6 +53,7 @@ def build_serving_module( state_normalization_data: NormalizationData, action_names: List[str], state_feature_config: rlt.ModelFeatureConfig, + predictor_wrapper_type=None, ) -> torch.nn.Module: """ Returns a TorchScript predictor module @@ -61,19 +62,14 @@ def build_serving_module( state_normalization_data.dense_normalization_parameters, False ) dqn_with_preprocessor = DiscreteDqnWithPreprocessor( - q_network.cpu_model().eval(), state_preprocessor + q_network.cpu_model().eval(), state_preprocessor, state_feature_config ) - return DiscreteDqnPredictorWrapper( + predictor_wrapper_type = predictor_wrapper_type or DiscreteDqnPredictorWrapper + return predictor_wrapper_type( dqn_with_preprocessor, action_names, state_feature_config ) - -class DiscreteDQNWithIdListNetBuilder(DiscreteDQNNetBuilder): - """ - Use this in case the model expects ID-list features - """ - - def build_serving_module( + def build_binary_difference_scorer( self, q_network: ModelBase, state_normalization_data: NormalizationData, @@ -81,14 +77,17 @@ def build_serving_module( state_feature_config: rlt.ModelFeatureConfig, ) -> torch.nn.Module: """ - Returns a TorchScript predictor module + Returns softmax(1) - softmax(0) """ + assert len(action_names) == 2 state_preprocessor = Preprocessor( state_normalization_data.dense_normalization_parameters, False ) - dqn_with_preprocessor = DiscreteDqnWithPreprocessorWithIdList( - q_network.cpu_model().eval(), state_preprocessor, state_feature_config + binary_difference_scorer_with_preprocessor = ( + BinaryDifferenceScorerWithPreprocessor( + q_network.cpu_model().eval(), state_preprocessor, state_feature_config + ) ) - return DiscreteDqnPredictorWrapperWithIdList( - dqn_with_preprocessor, action_names, state_feature_config + return BinaryDifferenceScorerPredictorWrapper( + binary_difference_scorer_with_preprocessor, state_feature_config ) diff --git a/reagent/net_builder/parametric_dqn/__init__.py b/reagent/net_builder/parametric_dqn/__init__.py index dad31c172..eb61076d3 100644 --- a/reagent/net_builder/parametric_dqn/__init__.py +++ b/reagent/net_builder/parametric_dqn/__init__.py @@ -1,2 +1,4 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + from . import fully_connected # noqa diff --git a/reagent/net_builder/parametric_dqn/fully_connected.py b/reagent/net_builder/parametric_dqn/fully_connected.py index 2621c2a8e..3d81dbcfb 100644 --- a/reagent/net_builder/parametric_dqn/fully_connected.py +++ b/reagent/net_builder/parametric_dqn/fully_connected.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.critic import FullyConnectedCritic from reagent.net_builder.parametric_dqn_net_builder import ParametricDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash from reagent.preprocessing.normalization import get_num_output_features @@ -18,8 +19,9 @@ class FullyConnected(ParametricDQNNetBuilder): activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) use_batch_norm: bool = False use_layer_norm: bool = False + final_activation: str = "linear" - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -46,4 +48,5 @@ def build_q_network( use_batch_norm=self.use_batch_norm, use_layer_norm=self.use_layer_norm, output_dim=output_dim, + final_activation=self.final_activation, ) diff --git a/reagent/net_builder/parametric_dqn_net_builder.py b/reagent/net_builder/parametric_dqn_net_builder.py index 5541585ff..d37091cc7 100644 --- a/reagent/net_builder/parametric_dqn_net_builder.py +++ b/reagent/net_builder/parametric_dqn_net_builder.py @@ -1,24 +1,25 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc import torch -from reagent.core.registry_meta import RegistryMeta +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData from reagent.models.base import ModelBase -from reagent.parameters import NormalizationData from reagent.prediction.predictor_wrapper import ParametricDqnWithPreprocessor from reagent.preprocessing.preprocessor import Preprocessor -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbParametricDqnPredictorWrapper as ParametricDqnPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ParametricDqnPredictorWrapper -class ParametricDQNNetBuilder(metaclass=RegistryMeta): +class ParametricDQNNetBuilder: """ Base class for parametric DQN net builder. """ diff --git a/reagent/net_builder/quantile_dqn/__init__.py b/reagent/net_builder/quantile_dqn/__init__.py index 554d11092..b548ea269 100644 --- a/reagent/net_builder/quantile_dqn/__init__.py +++ b/reagent/net_builder/quantile_dqn/__init__.py @@ -1,3 +1,3 @@ #!/usr/bin/env python3 -from . import dueling_quantile # noqa -from . import quantile # noqa +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from . import dueling_quantile, quantile # noqa # noqa diff --git a/reagent/net_builder/quantile_dqn/dueling_quantile.py b/reagent/net_builder/quantile_dqn/dueling_quantile.py index 6da8cc975..8076c7cb1 100644 --- a/reagent/net_builder/quantile_dqn/dueling_quantile.py +++ b/reagent/net_builder/quantile_dqn/dueling_quantile.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.dueling_q_network import DuelingQNetwork from reagent.net_builder.quantile_dqn_net_builder import QRDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash @dataclass @@ -16,7 +17,7 @@ class DuelingQuantile(QRDQNNetBuilder): sizes: List[int] = field(default_factory=lambda: [256, 128]) activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " f"{self.sizes}, {self.activations}" diff --git a/reagent/net_builder/quantile_dqn/quantile.py b/reagent/net_builder/quantile_dqn/quantile.py index b1c9154d9..8ed83ef49 100644 --- a/reagent/net_builder/quantile_dqn/quantile.py +++ b/reagent/net_builder/quantile_dqn/quantile.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash from reagent.models.base import ModelBase from reagent.models.dqn import FullyConnectedDQN from reagent.net_builder.quantile_dqn_net_builder import QRDQNNetBuilder -from reagent.parameters import NormalizationData, param_hash @dataclass @@ -17,7 +18,7 @@ class Quantile(QRDQNNetBuilder): activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) dropout_ratio: float = 0.0 - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " diff --git a/reagent/net_builder/quantile_dqn_net_builder.py b/reagent/net_builder/quantile_dqn_net_builder.py index 867a9be48..576dfbc6d 100644 --- a/reagent/net_builder/quantile_dqn_net_builder.py +++ b/reagent/net_builder/quantile_dqn_net_builder.py @@ -1,23 +1,24 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc from typing import List -import reagent.types as rlt +import reagent.core.types as rlt import torch -from reagent.core.registry_meta import RegistryMeta -from reagent.models.base import ModelBase -from reagent.parameters import NormalizationData +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData +from reagent.models import ModelBase, Sequential from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor from reagent.preprocessing.normalization import get_num_output_features from reagent.preprocessing.preprocessor import Preprocessor -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper @@ -27,7 +28,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return torch.mean(input, dim=2) -class QRDQNNetBuilder(metaclass=RegistryMeta): +class QRDQNNetBuilder: """ Base class for QRDQN net builder. """ @@ -60,10 +61,9 @@ def build_serving_module( state_normalization_data.dense_normalization_parameters, False ) dqn_with_preprocessor = DiscreteDqnWithPreprocessor( - torch.nn.Sequential( # type: ignore - q_network.cpu_model().eval(), _Mean() - ), + Sequential(q_network.cpu_model().eval(), _Mean()), # type: ignore state_preprocessor, + state_feature_config, ) return DiscreteDqnPredictorWrapper( dqn_with_preprocessor, action_names, state_feature_config diff --git a/reagent/net_builder/slate_ranking/__init__.py b/reagent/net_builder/slate_ranking/__init__.py new file mode 100644 index 000000000..4a830764e --- /dev/null +++ b/reagent/net_builder/slate_ranking/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import Optional + +from reagent.core.registry_meta import wrap_oss_with_dataclass +from reagent.core.tagged_union import TaggedUnion + +from .slate_ranking_scorer import SlateRankingScorer as SlateRankingScorerT +from .slate_ranking_transformer import ( + SlateRankingTransformer as SlateRankingTransformerType, +) + + +@wrap_oss_with_dataclass +class SlateRankingNetBuilder__Union(TaggedUnion): + SlateRankingTransformer: Optional[SlateRankingTransformerType] = None + SlateRankingScorer: Optional[SlateRankingScorerT] = None diff --git a/reagent/net_builder/slate_ranking/slate_ranking_scorer.py b/reagent/net_builder/slate_ranking/slate_ranking_scorer.py new file mode 100644 index 000000000..64275e06c --- /dev/null +++ b/reagent/net_builder/slate_ranking/slate_ranking_scorer.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from dataclasses import asdict +from typing import List, Optional + +import torch +import torch.nn as nn +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import param_hash +from reagent.models.base import ModelBase +from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.models.mlp_scorer import MLPScorer +from reagent.net_builder.slate_ranking_net_builder import SlateRankingNetBuilder + + +class ScoreCap(nn.Module): + def __init__(self, cap: float) -> None: + super().__init__() + self.cap = cap + + def forward(self, input): + return torch.clip(input, max=self.cap) + + +@dataclass +class FinalLayer: + score_cap: Optional[float] = None + sigmoid: bool = False + tanh: bool = False + + def __post_init_post_parse__(self) -> None: + assert ( + sum(map(lambda x: int(bool(x)), asdict(self).values())) <= 1 + ), f"More than one option set {self}" + + def get(self): + if self.score_cap: + return ScoreCap(self.score_cap) + + if self.sigmoid: + return nn.Sigmoid() + + if self.tanh: + return nn.Tanh() + + return nn.Identity() + + +@dataclass +class SlateRankingScorer(SlateRankingNetBuilder): + __hash__ = param_hash + + # For MLP + hidden_layers: List[int] = field(default_factory=lambda: [64, 32]) + activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + use_batch_norm: bool = False + min_std: float = 0.0 + dropout_ratio: float = 0.0 + use_layer_norm: bool = False + normalize_output: bool = False + orthogonal_init: bool = False + + # For MLP Scorer + # if disabled, ignores the state features + has_user_feat: bool = False # TODO: deprecate + final_layer: FinalLayer = field( + default_factory=FinalLayer + ) # TODO: if score cap not needed, deprecate + + # pyre-fixme[14]: `build_slate_ranking_network` overrides method defined in + # `SlateRankingNetBuilder` inconsistently. + def build_slate_ranking_network( + self, state_dim, candidate_dim, _candidate_size=None, _slate_size=None + ) -> ModelBase: + # pointwise MLP + input_dim = state_dim + candidate_dim + output_dim = 1 + layers = [input_dim, *self.hidden_layers, output_dim] + activations = [ + *self.activations, + # identity, but we'll add our own final layer + "linear", + ] + mlp = FullyConnectedNetwork( + layers=layers, + activations=activations, + use_batch_norm=self.use_batch_norm, + min_std=self.min_std, + dropout_ratio=self.dropout_ratio, + use_layer_norm=self.use_layer_norm, + normalize_output=self.normalize_output, + orthogonal_init=self.orthogonal_init, + ) + mlp = nn.Sequential( + *[ + mlp, + self.final_layer.get(), + ] + ) + return MLPScorer(mlp=mlp, has_user_feat=self.has_user_feat) diff --git a/reagent/net_builder/slate_ranking/slate_ranking_transformer.py b/reagent/net_builder/slate_ranking/slate_ranking_transformer.py new file mode 100644 index 000000000..6a36a6770 --- /dev/null +++ b/reagent/net_builder/slate_ranking/slate_ranking_transformer.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import param_hash, TransformerParameters +from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch +from reagent.models.base import ModelBase +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.net_builder.slate_ranking_net_builder import SlateRankingNetBuilder + + +@dataclass +class SlateRankingTransformer(SlateRankingNetBuilder): + __hash__ = param_hash + + output_arch: Seq2SlateOutputArch = Seq2SlateOutputArch.AUTOREGRESSIVE + temperature: float = 1.0 + transformer: TransformerParameters = field( + default_factory=lambda: TransformerParameters( + num_heads=2, dim_model=16, dim_feedforward=16, num_stacked_layers=2 + ) + ) + + def build_slate_ranking_network( + self, state_dim, candidate_dim, candidate_size, slate_size + ) -> ModelBase: + return Seq2SlateTransformerNet( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=self.transformer.num_stacked_layers, + num_heads=self.transformer.num_heads, + dim_model=self.transformer.dim_model, + dim_feedforward=self.transformer.dim_feedforward, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + output_arch=self.output_arch, + temperature=self.temperature, + state_embed_dim=self.transformer.state_embed_dim, + ) diff --git a/reagent/net_builder/slate_ranking_net_builder.py b/reagent/net_builder/slate_ranking_net_builder.py new file mode 100644 index 000000000..33ff3adf9 --- /dev/null +++ b/reagent/net_builder/slate_ranking_net_builder.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc + +import torch + + +class SlateRankingNetBuilder: + """ + Base class for slate ranking network builder. + """ + + @abc.abstractmethod + def build_slate_ranking_network( + self, state_dim, candidate_dim, candidate_size, slate_size + ) -> torch.nn.Module: + pass diff --git a/reagent/net_builder/slate_reward/__init__.py b/reagent/net_builder/slate_reward/__init__.py new file mode 100644 index 000000000..0cfd572fd --- /dev/null +++ b/reagent/net_builder/slate_reward/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import Optional + +from reagent.core.registry_meta import wrap_oss_with_dataclass +from reagent.core.tagged_union import TaggedUnion + +from .slate_reward_gru import SlateRewardGRU as SlateRewardGRUType +from .slate_reward_transformer import ( + SlateRewardTransformer as SlateRewardTransformerType, +) + + +@wrap_oss_with_dataclass +class SlateRewardNetBuilder__Union(TaggedUnion): + SlateRewardGRU: Optional[SlateRewardGRUType] = None + SlateRewardTransformer: Optional[SlateRewardTransformerType] = None diff --git a/reagent/net_builder/slate_reward/slate_reward_gru.py b/reagent/net_builder/slate_reward/slate_reward_gru.py new file mode 100644 index 000000000..2780b3e6d --- /dev/null +++ b/reagent/net_builder/slate_reward/slate_reward_gru.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import GRUParameters, param_hash +from reagent.models.base import ModelBase +from reagent.models.seq2slate_reward import Seq2SlateGRURewardNet +from reagent.net_builder.slate_reward_net_builder import SlateRewardNetBuilder + + +@dataclass +class SlateRewardGRU(SlateRewardNetBuilder): + __hash__ = param_hash + + gru: GRUParameters = field( + default_factory=lambda: GRUParameters(dim_model=16, num_stacked_layers=2) + ) + fit_slate_wise_reward: bool = True + + def build_slate_reward_network( + self, state_dim, candidate_dim, candidate_size, slate_size + ) -> ModelBase: + seq2slate_reward_net = Seq2SlateGRURewardNet( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=self.gru.num_stacked_layers, + dim_model=self.gru.dim_model, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + ) + return seq2slate_reward_net + + @property + def expect_slate_wise_reward(self) -> bool: + return self.fit_slate_wise_reward diff --git a/reagent/net_builder/slate_reward/slate_reward_transformer.py b/reagent/net_builder/slate_reward/slate_reward_transformer.py new file mode 100644 index 000000000..a1ba0592f --- /dev/null +++ b/reagent/net_builder/slate_reward/slate_reward_transformer.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import param_hash, TransformerParameters +from reagent.models.base import ModelBase +from reagent.models.seq2slate_reward import Seq2SlateTransformerRewardNet +from reagent.net_builder.slate_reward_net_builder import SlateRewardNetBuilder + + +@dataclass +class SlateRewardTransformer(SlateRewardNetBuilder): + __hash__ = param_hash + + transformer: TransformerParameters = field( + default_factory=lambda: TransformerParameters( + num_heads=2, dim_model=16, dim_feedforward=16, num_stacked_layers=2 + ) + ) + fit_slate_wise_reward: bool = True + + def build_slate_reward_network( + self, state_dim, candidate_dim, candidate_size, slate_size + ) -> ModelBase: + seq2slate_reward_net = Seq2SlateTransformerRewardNet( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=self.transformer.num_stacked_layers, + num_heads=self.transformer.num_heads, + dim_model=self.transformer.dim_model, + dim_feedforward=self.transformer.dim_feedforward, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + ) + return seq2slate_reward_net + + @property + def expect_slate_wise_reward(self) -> bool: + return self.fit_slate_wise_reward diff --git a/reagent/net_builder/slate_reward_net_builder.py b/reagent/net_builder/slate_reward_net_builder.py new file mode 100644 index 000000000..d7370e96d --- /dev/null +++ b/reagent/net_builder/slate_reward_net_builder.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc + +import torch + + +class SlateRewardNetBuilder: + """ + Base class for slate reward network builder. + """ + + @abc.abstractmethod + def build_slate_reward_network( + self, state_dim, candidate_dim, candidate_size, slate_size + ) -> torch.nn.Module: + pass + + @abc.abstractproperty + def expect_slate_wise_reward(self) -> bool: + pass diff --git a/reagent/net_builder/synthetic_reward/__init__.py b/reagent/net_builder/synthetic_reward/__init__.py new file mode 100644 index 000000000..afbdbcf46 --- /dev/null +++ b/reagent/net_builder/synthetic_reward/__init__.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from . import ( # noqa # noqa # noqa # noqa + ngram_synthetic_reward, + sequence_synthetic_reward, + single_step_synthetic_reward, + single_step_synthetic_reward_sparse_arch, +) diff --git a/reagent/net_builder/synthetic_reward/ngram_synthetic_reward.py b/reagent/net_builder/synthetic_reward/ngram_synthetic_reward.py new file mode 100644 index 000000000..39ecabe2b --- /dev/null +++ b/reagent/net_builder/synthetic_reward/ngram_synthetic_reward.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import ConvNetParameters, NormalizationData, param_hash +from reagent.models.base import ModelBase +from reagent.models.synthetic_reward import ( + NGramConvolutionalNetwork, + NGramFullyConnectedNetwork, + SyntheticRewardNet, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class NGramSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + sizes: List[int] = field(default_factory=lambda: [256, 128]) + activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + last_layer_activation: str = "sigmoid" + context_size: int = 3 + use_layer_norm: bool = False + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dim = len(discrete_action_names) + + net = NGramFullyConnectedNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=self.sizes, + activations=self.activations, + last_layer_activation=self.last_layer_activation, + context_size=self.context_size, + use_layer_norm=self.use_layer_norm, + ) + return SyntheticRewardNet(net) + + +@dataclass +class NGramConvNetSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + sizes: List[int] = field(default_factory=lambda: [256, 128]) + activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + last_layer_activation: str = "sigmoid" + context_size: int = 3 + conv_net_params: ConvNetParameters = field( + default_factory=lambda: ConvNetParameters( + conv_dims=[256, 128], + conv_height_kernels=[1, 1], + pool_types=["max", "max"], + pool_kernel_sizes=[1, 1], + ) + ) + use_layer_norm: bool = False + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + + if not discrete_action_names: + assert action_normalization_data is not None + action_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dim = len(discrete_action_names) + + net = NGramConvolutionalNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=self.sizes, + activations=self.activations, + last_layer_activation=self.last_layer_activation, + context_size=self.context_size, + conv_net_params=self.conv_net_params, + use_layer_norm=self.use_layer_norm, + ) + return SyntheticRewardNet(net) diff --git a/reagent/net_builder/synthetic_reward/sequence_synthetic_reward.py b/reagent/net_builder/synthetic_reward/sequence_synthetic_reward.py new file mode 100644 index 000000000..5471e6191 --- /dev/null +++ b/reagent/net_builder/synthetic_reward/sequence_synthetic_reward.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.base import ModelBase +from reagent.models.synthetic_reward import ( + SequenceSyntheticRewardNet, + SyntheticRewardNet, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class SequenceSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + lstm_hidden_size: int = 128 + lstm_num_layers: int = 2 + lstm_bidirectional: bool = False + last_layer_activation: str = "sigmoid" + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dim = len(discrete_action_names) + net = SequenceSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + lstm_hidden_size=self.lstm_hidden_size, + lstm_num_layers=self.lstm_num_layers, + lstm_bidirectional=self.lstm_bidirectional, + last_layer_activation=self.last_layer_activation, + ) + return SyntheticRewardNet(net=net) diff --git a/reagent/net_builder/synthetic_reward/single_step_synthetic_reward.py b/reagent/net_builder/synthetic_reward/single_step_synthetic_reward.py new file mode 100644 index 000000000..4cbdee540 --- /dev/null +++ b/reagent/net_builder/synthetic_reward/single_step_synthetic_reward.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.base import ModelBase +from reagent.models.synthetic_reward import ( + SingleStepSyntheticRewardNet, + SyntheticRewardNet, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class SingleStepSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + sizes: List[int] = field(default_factory=lambda: [256, 128]) + activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + last_layer_activation: str = "sigmoid" + use_batch_norm: bool = False + use_layer_norm: bool = False + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dim = len(discrete_action_names) + net = SingleStepSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + sizes=self.sizes, + activations=self.activations, + last_layer_activation=self.last_layer_activation, + use_batch_norm=self.use_batch_norm, + use_layer_norm=self.use_layer_norm, + ) + return SyntheticRewardNet(net) diff --git a/reagent/net_builder/synthetic_reward/single_step_synthetic_reward_sparse_arch.py b/reagent/net_builder/synthetic_reward/single_step_synthetic_reward_sparse_arch.py new file mode 100644 index 000000000..f6825ceca --- /dev/null +++ b/reagent/net_builder/synthetic_reward/single_step_synthetic_reward_sparse_arch.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import NormalizationData, param_hash +from reagent.core.utils import embedding_bag_configs_from_feature_configs +from reagent.models.base import ModelBase +from reagent.models.synthetic_reward_sparse_arch import ( + SingleStepSyntheticSparseArchRewardNet, + SyntheticRewardSparseArchNet, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.preprocessing.normalization import get_num_output_features +from torchrec import EmbeddingBagCollection, EmbeddingBagConfig + + +@dataclass +class SingleStepSparseArchSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + dense_sizes: List[int] = field(default_factory=lambda: [256, 128]) + dense_activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) + overall_sizes: List[int] = field(default_factory=lambda: [128, 1]) + overall_activations: List[str] = field(default_factory=lambda: ["relu", "sigmoid"]) + use_batch_norm: bool = False + use_layer_norm: bool = False + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + # Sparse features will be read from state_feature_config/action_feature_config + feature_config_list: List[rlt.ModelFeatureConfig] = [] + assert state_feature_config is not None + feature_config_list.append(state_feature_config) + if discrete_action_names is None: + assert action_feature_config is not None + feature_config_list.append(action_feature_config) + + state_dense_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_dense_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dense_dim = len(discrete_action_names) + + embedding_bag_configs: List[ + EmbeddingBagConfig + ] = embedding_bag_configs_from_feature_configs( + feature_config_list, + ) + embedding_bag_col = EmbeddingBagCollection( + device=torch.device("meta"), tables=embedding_bag_configs + ) + net = SingleStepSyntheticSparseArchRewardNet( + state_dense_dim=state_dense_dim, + action_dense_dim=action_dense_dim, + dense_sizes=self.dense_sizes, + dense_activations=self.dense_activations, + overall_sizes=self.overall_sizes, + overall_activations=self.overall_activations, + embedding_bag_collection=embedding_bag_col, + use_batch_norm=self.use_batch_norm, + use_layer_norm=self.use_layer_norm, + ) + return SyntheticRewardSparseArchNet(net) diff --git a/reagent/net_builder/synthetic_reward/transformer_synthetic_reward.py b/reagent/net_builder/synthetic_reward/transformer_synthetic_reward.py new file mode 100644 index 000000000..b3232bdc6 --- /dev/null +++ b/reagent/net_builder/synthetic_reward/transformer_synthetic_reward.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List, Optional + +import reagent.core.types as rlt +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.base import ModelBase +from reagent.models.synthetic_reward import ( + SyntheticRewardNet, + TransformerSyntheticRewardNet, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class TransformerSyntheticReward(SyntheticRewardNetBuilder): + __hash__ = param_hash + + nhead: int = 1 + d_model: int = 128 + num_encoder_layers: int = 2 + dim_feedforward: int = 128 + dropout: float = 0.0 + activation: str = "relu" + last_layer_activation: str = "leaky_relu" + layer_norm_eps: float = 1e-5 + max_len: int = 10 + + def build_synthetic_reward_network( + self, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_dim = get_num_output_features( + action_normalization_data.dense_normalization_parameters + ) + else: + action_dim = len(discrete_action_names) + + net = TransformerSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + d_model=self.d_model, + nhead=self.nhead, + num_encoder_layers=self.num_encoder_layers, + dim_feedforward=self.dim_feedforward, + dropout=self.dropout, + activation=self.activation, + last_layer_activation=self.last_layer_activation, + layer_norm_eps=self.layer_norm_eps, + max_len=self.max_len, + ) + return SyntheticRewardNet(net=net) diff --git a/reagent/net_builder/synthetic_reward_net_builder.py b/reagent/net_builder/synthetic_reward_net_builder.py new file mode 100644 index 000000000..0d9b01326 --- /dev/null +++ b/reagent/net_builder/synthetic_reward_net_builder.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +from typing import List, Optional + +import reagent.core.types as rlt +import torch +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData +from reagent.models.base import ModelBase +from reagent.preprocessing.preprocessor import Preprocessor + +if IS_FB_ENVIRONMENT: + from reagent.fb.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import ( + FbSyntheticRewardPredictorWrapper as SyntheticRewardPredictorWrapper, + ) +else: + from reagent.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import ( + SyntheticRewardPredictorWrapper, + ) + + +class SyntheticRewardNetBuilder: + """ + Base class for Synthetic Reward net builder. + """ + + @abc.abstractmethod + def build_synthetic_reward_network( + self, + # dense state features + state_normalization_data: NormalizationData, + # dense action features + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + # sparse state features will be read from state_feature_config + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + # sparse action features will be read from action_feature_config + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> ModelBase: + pass + + def build_serving_module( + self, + seq_len: int, + synthetic_reward_network: ModelBase, + state_normalization_data: NormalizationData, + action_normalization_data: Optional[NormalizationData] = None, + discrete_action_names: Optional[List[str]] = None, + # sparse state features will be read from state_feature_config + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + # sparse action features will be read from action_feature_config + action_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ) -> torch.nn.Module: + """ + Returns a TorchScript predictor module + """ + state_preprocessor = Preprocessor( + state_normalization_data.dense_normalization_parameters + ) + if not discrete_action_names: + assert action_normalization_data is not None + action_preprocessor = Preprocessor( + action_normalization_data.dense_normalization_parameters + ) + return SyntheticRewardPredictorWrapper( + seq_len, + state_preprocessor, + action_preprocessor, + # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a + # function. + synthetic_reward_network.export_mlp().cpu().eval(), + ) + else: + # TODO add Discrete Single Step Synthetic Reward Predictor + return torch.jit.script(torch.nn.Linear(1, 1)) diff --git a/reagent/net_builder/unions.py b/reagent/net_builder/unions.py index 551793152..d616c0e47 100644 --- a/reagent/net_builder/unions.py +++ b/reagent/net_builder/unions.py @@ -1,46 +1,102 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Optional + +from reagent.core.registry_meta import wrap_oss_with_dataclass from reagent.core.tagged_union import TaggedUnion -from . import categorical_dqn # noqa -from . import continuous_actor # noqa -from . import discrete_dqn # noqa -from . import parametric_dqn # noqa -from . import quantile_dqn # noqa -from . import value # noqa -from .categorical_dqn_net_builder import CategoricalDQNNetBuilder -from .continuous_actor_net_builder import ContinuousActorNetBuilder -from .discrete_dqn_net_builder import DiscreteDQNNetBuilder -from .parametric_dqn_net_builder import ParametricDQNNetBuilder -from .quantile_dqn_net_builder import QRDQNNetBuilder -from .value_net_builder import ValueNetBuilder - - -@ContinuousActorNetBuilder.fill_union() +from .categorical_dqn.categorical import Categorical as CategoricalType +from .continuous_actor.dirichlet_fully_connected import ( + DirichletFullyConnected as DirichletFullyConnectedType, +) +from .continuous_actor.fully_connected import ( + FullyConnected as FullyConnectedContinuousActorType, +) +from .continuous_actor.gaussian_fully_connected import ( + GaussianFullyConnected as GaussianFullyConnectedType, +) +from .discrete_actor.fully_connected import ( + FullyConnected as FullyConnectedDiscreteActorType, +) +from .discrete_dqn.dueling import Dueling as DuelingType +from .discrete_dqn.fully_connected import FullyConnected as FullyConnectedType +from .discrete_dqn.fully_connected_with_embedding import ( + FullyConnectedWithEmbedding as FullyConnectedWithEmbeddingType, +) +from .parametric_dqn.fully_connected import ( + FullyConnected as FullyConnectedParametricType, +) +from .quantile_dqn.dueling_quantile import DuelingQuantile as DuelingQuantileType +from .quantile_dqn.quantile import Quantile as QuantileType +from .synthetic_reward.ngram_synthetic_reward import ( + NGramConvNetSyntheticReward as NGramConvNetSyntheticRewardType, + NGramSyntheticReward as NGramSyntheticRewardType, +) +from .synthetic_reward.sequence_synthetic_reward import ( + SequenceSyntheticReward as SequenceSyntheticRewardType, +) +from .synthetic_reward.single_step_synthetic_reward import ( + SingleStepSyntheticReward as SingleStepSyntheticRewardType, +) +from .synthetic_reward.single_step_synthetic_reward_sparse_arch import ( + SingleStepSparseArchSyntheticReward as SingleStepSparseArchSyntheticRewardType, +) +from .synthetic_reward.transformer_synthetic_reward import ( + TransformerSyntheticReward as TransformerSyntheticRewardType, +) +from .value.fully_connected import FullyConnected as FullyConnectedValueType +from .value.seq2reward_rnn import Seq2RewardNetBuilder as Seq2RewardNetBuilderType + + +@wrap_oss_with_dataclass +class DiscreteActorNetBuilder__Union(TaggedUnion): + FullyConnected: Optional[FullyConnectedDiscreteActorType] = None + + +@wrap_oss_with_dataclass class ContinuousActorNetBuilder__Union(TaggedUnion): - pass + FullyConnected: Optional[FullyConnectedContinuousActorType] = None + DirichletFullyConnected: Optional[DirichletFullyConnectedType] = None + GaussianFullyConnected: Optional[GaussianFullyConnectedType] = None -@DiscreteDQNNetBuilder.fill_union() +@wrap_oss_with_dataclass class DiscreteDQNNetBuilder__Union(TaggedUnion): - pass + Dueling: Optional[DuelingType] = None + FullyConnected: Optional[FullyConnectedType] = None + FullyConnectedWithEmbedding: Optional[FullyConnectedWithEmbeddingType] = None -@CategoricalDQNNetBuilder.fill_union() +@wrap_oss_with_dataclass class CategoricalDQNNetBuilder__Union(TaggedUnion): - pass + Categorical: Optional[CategoricalType] = None -@QRDQNNetBuilder.fill_union() +@wrap_oss_with_dataclass class QRDQNNetBuilder__Union(TaggedUnion): - pass + Quantile: Optional[QuantileType] = None + DuelingQuantile: Optional[DuelingQuantileType] = None -@ParametricDQNNetBuilder.fill_union() +@wrap_oss_with_dataclass class ParametricDQNNetBuilder__Union(TaggedUnion): - pass + FullyConnected: Optional[FullyConnectedParametricType] = None -@ValueNetBuilder.fill_union() +@wrap_oss_with_dataclass class ValueNetBuilder__Union(TaggedUnion): - pass + FullyConnected: Optional[FullyConnectedValueType] = None + Seq2RewardNetBuilder: Optional[Seq2RewardNetBuilderType] = None + + +@wrap_oss_with_dataclass +class SyntheticRewardNetBuilder__Union(TaggedUnion): + SingleStepSyntheticReward: Optional[SingleStepSyntheticRewardType] = None + SingleStepSparseArchSyntheticReward: Optional[ + SingleStepSparseArchSyntheticRewardType + ] = None + NGramSyntheticReward: Optional[NGramSyntheticRewardType] = None + NGramConvNetSyntheticReward: Optional[NGramConvNetSyntheticRewardType] = None + SequenceSyntheticReward: Optional[SequenceSyntheticRewardType] = None + TransformerSyntheticReward: Optional[TransformerSyntheticRewardType] = None diff --git a/reagent/net_builder/value/__init__.py b/reagent/net_builder/value/__init__.py index 05d9251a3..6b1cf462a 100644 --- a/reagent/net_builder/value/__init__.py +++ b/reagent/net_builder/value/__init__.py @@ -1,3 +1,4 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from . import fully_connected # noqa +from . import fully_connected, seq2reward_rnn # noqa # noqa diff --git a/reagent/net_builder/value/fully_connected.py b/reagent/net_builder/value/fully_connected.py index a8c491e1a..042d80745 100644 --- a/reagent/net_builder/value/fully_connected.py +++ b/reagent/net_builder/value/fully_connected.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import List import torch from reagent.core.dataclasses import dataclass, field -from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.fully_connected_network import FloatFeatureFullyConnected from reagent.net_builder.value_net_builder import ValueNetBuilder -from reagent.parameters import NormalizationData, param_hash from reagent.preprocessing.normalization import get_num_output_features @@ -18,7 +19,7 @@ class FullyConnected(ValueNetBuilder): activations: List[str] = field(default_factory=lambda: ["relu", "relu"]) use_layer_norm: bool = False - def __post_init_post_parse__(self): + def __post_init_post_parse__(self) -> None: super().__init__() assert len(self.sizes) == len(self.activations), ( f"Must have the same numbers of sizes and activations; got: " @@ -26,13 +27,15 @@ def __post_init_post_parse__(self): ) def build_value_network( - self, state_normalization_data: NormalizationData + self, state_normalization_data: NormalizationData, output_dim: int = 1 ) -> torch.nn.Module: state_dim = get_num_output_features( state_normalization_data.dense_normalization_parameters ) - return FullyConnectedNetwork( - [state_dim] + self.sizes + [1], - self.activations + ["linear"], + return FloatFeatureFullyConnected( + state_dim=state_dim, + output_dim=output_dim, + sizes=self.sizes, + activations=self.activations, use_layer_norm=self.use_layer_norm, ) diff --git a/reagent/net_builder/value/seq2reward_rnn.py b/reagent/net_builder/value/seq2reward_rnn.py new file mode 100644 index 000000000..e9d0a7a14 --- /dev/null +++ b/reagent/net_builder/value/seq2reward_rnn.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import torch +from reagent.core.dataclasses import dataclass +from reagent.core.parameters import NormalizationData, param_hash +from reagent.models.seq2reward_model import Seq2RewardNetwork +from reagent.net_builder.value_net_builder import ValueNetBuilder +from reagent.preprocessing.normalization import get_num_output_features + + +@dataclass +class Seq2RewardNetBuilder(ValueNetBuilder): + __hash__ = param_hash + action_dim: int = 2 + num_hiddens: int = 64 + num_hidden_layers: int = 2 + + def build_value_network( + self, state_normalization_data: NormalizationData + ) -> torch.nn.Module: + state_dim = get_num_output_features( + state_normalization_data.dense_normalization_parameters + ) + + return Seq2RewardNetwork( + state_dim=state_dim, + action_dim=self.action_dim, + num_hiddens=self.num_hiddens, + num_hidden_layers=self.num_hidden_layers, + ) diff --git a/reagent/net_builder/value_net_builder.py b/reagent/net_builder/value_net_builder.py index 3d6328b26..58a406536 100644 --- a/reagent/net_builder/value_net_builder.py +++ b/reagent/net_builder/value_net_builder.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc import torch -from reagent.core.registry_meta import RegistryMeta -from reagent.parameters import NormalizationData +from reagent.core.parameters import NormalizationData -class ValueNetBuilder(metaclass=RegistryMeta): +class ValueNetBuilder: """ Base class for value-network builder. """ diff --git a/reagent/notebooks/PPO_for_CartPole_Control.ipynb b/reagent/notebooks/PPO_for_CartPole_Control.ipynb new file mode 100644 index 000000000..86af7883e --- /dev/null +++ b/reagent/notebooks/PPO_for_CartPole_Control.ipynb @@ -0,0 +1,591 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use the [CartPole-v1](https://gym.openai.com/envs/CartPole-v0/) OpenAI Gym environment. For reproducibility, let is fix a random seed." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:43.355142Z", + "start_time": "2021-02-25T00:00:40.650953Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160042.161 dataclasses.py:48] USE_VANILLA_DATACLASS: True\n", + "I0224 160042.162 dataclasses.py:49] ARBITRARY_TYPES_ALLOWED: True\n", + "W0224 160042.172 file_io.py:72] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to the version in iopath repo. **\n", + "https://github.com/facebookresearch/iopath \n", + "\n", + "W0224 160042.177 manifold.py:86] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to iopath. **\n", + "\n", + "I0224 160042.178 io.py:19] Registered Manifold PathManager\n", + "W0224 160042.180 manifold.py:86] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to iopath. **\n", + "\n", + "I0224 160042.180 patch.py:95] Patched torch.load, torch.save, torch.jit.load and save to handle Manifold uri\n", + "I0224 160042.333 registry_meta.py:19] Adding REGISTRY to type TrainingReport\n", + "I0224 160042.334 registry_meta.py:40] Not Registering TrainingReport to TrainingReport. Abstract method [] are not implemented.\n", + "I0224 160042.334 registry_meta.py:19] Adding REGISTRY to type PublishingResult\n", + "I0224 160042.335 registry_meta.py:40] Not Registering PublishingResult to PublishingResult. Abstract method [] are not implemented.\n", + "I0224 160042.336 registry_meta.py:19] Adding REGISTRY to type ValidationResult\n", + "I0224 160042.337 registry_meta.py:40] Not Registering ValidationResult to ValidationResult. Abstract method [] are not implemented.\n", + "I0224 160042.338 registry_meta.py:31] Registering NoPublishingResults to PublishingResult\n", + "I0224 160042.339 registry_meta.py:34] Using no_publishing_results instead of NoPublishingResults\n", + "I0224 160042.341 registry_meta.py:31] Registering NoValidationResults to ValidationResult\n", + "I0224 160042.341 registry_meta.py:34] Using no_validation_results instead of NoValidationResults\n", + "I0224 160042.347 registry_meta.py:31] Registering SchedulingFrequencyValidationResults to ValidationResult\n", + "I0224 160042.348 registry_meta.py:34] Using scheduling_frequency_validation_results instead of SchedulingFrequencyValidationResults\n", + "I0224 160042.349 registry_meta.py:31] Registering PDIVFilterValidationResults to ValidationResult\n", + "I0224 160042.350 registry_meta.py:34] Using pdiv_filter_validation_results instead of PDIVFilterValidationResults\n", + "I0224 160042.352 registry_meta.py:31] Registering Seq2SlateValidationResults to ValidationResult\n", + "I0224 160042.353 registry_meta.py:34] Using seq2slate_validation_results instead of Seq2SlateValidationResults\n", + "I0224 160042.354 registry_meta.py:31] Registering SchedulingFrequencyPublishingResults to PublishingResult\n", + "I0224 160042.355 registry_meta.py:34] Using scheduling_frequency_publishing_results instead of SchedulingFrequencyPublishingResults\n", + "I0224 160042.356 registry_meta.py:31] Registering PDIVFilterPublishingResults to PublishingResult\n", + "I0224 160042.356 registry_meta.py:34] Using pdiv_filter_publishing_results instead of PDIVFilterPublishingResults\n", + "I0224 160042.358 registry_meta.py:31] Registering FeedPublishingResults to PublishingResult\n", + "I0224 160042.359 registry_meta.py:34] Using feed_publishing_results instead of FeedPublishingResults\n", + "I0224 160042.361 registry_meta.py:31] Registering ScoreFblearnerPredictorPublishingResult to PublishingResult\n", + "I0224 160042.361 registry_meta.py:34] Using score_offline_results instead of ScoreFblearnerPredictorPublishingResult\n", + "I0224 160042.363 registry_meta.py:31] Registering ScoreSeq2SlateOutput to PublishingResult\n", + "I0224 160042.363 registry_meta.py:34] Using score_seq2slate_offline instead of ScoreSeq2SlateOutput\n", + "I0224 160042.365 registry_meta.py:31] Registering IPSResult to PublishingResult\n", + "I0224 160042.365 registry_meta.py:34] Using learnvm_ips_result instead of IPSResult\n", + "I0224 160042.367 registry_meta.py:31] Registering SlateRewardFeatureImportanceOutput to PublishingResult\n", + "I0224 160042.368 registry_meta.py:34] Using slate_reward_feature_importance instead of SlateRewardFeatureImportanceOutput\n", + "I0224 160042.372 dataclasses.py:73] Setting IdMapping.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.373 dataclasses.py:73] Setting ModelFeatureConfig.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.407 registry_meta.py:19] Adding REGISTRY to type LearningRateSchedulerConfig\n", + "I0224 160042.408 registry_meta.py:40] Not Registering LearningRateSchedulerConfig to LearningRateSchedulerConfig. Abstract method [] are not implemented.\n", + "I0224 160042.410 registry_meta.py:19] Adding REGISTRY to type OptimizerConfig\n", + "I0224 160042.410 registry_meta.py:40] Not Registering OptimizerConfig to OptimizerConfig. Abstract method [] are not implemented.\n", + "I0224 160042.411 registry_meta.py:31] Registering Adam to OptimizerConfig\n", + "I0224 160042.413 registry_meta.py:31] Registering SGD to OptimizerConfig\n", + "I0224 160042.414 registry_meta.py:31] Registering AdamW to OptimizerConfig\n", + "I0224 160042.416 registry_meta.py:31] Registering SparseAdam to OptimizerConfig\n", + "I0224 160042.418 registry_meta.py:31] Registering Adamax to OptimizerConfig\n", + "I0224 160042.419 registry_meta.py:31] Registering LBFGS to OptimizerConfig\n", + "I0224 160042.421 registry_meta.py:31] Registering Rprop to OptimizerConfig\n", + "I0224 160042.423 registry_meta.py:31] Registering ASGD to OptimizerConfig\n", + "I0224 160042.424 registry_meta.py:31] Registering Adadelta to OptimizerConfig\n", + "I0224 160042.426 registry_meta.py:31] Registering Adagrad to OptimizerConfig\n", + "I0224 160042.427 registry_meta.py:31] Registering RMSprop to OptimizerConfig\n", + "I0224 160042.449 dataclasses.py:73] Setting Seq2SlateNet.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.468 dataclasses.py:73] Setting CRRWeightFn.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.489 registry_meta.py:19] Adding REGISTRY to type EnvWrapper\n", + "I0224 160042.490 registry_meta.py:40] Not Registering EnvWrapper to EnvWrapper. Abstract method ['serving_obs_preprocessor', 'make', 'obs_preprocessor'] are not implemented.\n", + "I0224 160042.490 dataclasses.py:73] Setting EnvWrapper.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.496 registry_meta.py:31] Registering ChangingArms to EnvWrapper\n", + "I0224 160042.513 registry_meta.py:31] Registering Gym to EnvWrapper\n", + "I0224 160042.517 utils.py:18] Registering id=Pocman-v0, entry_point=reagent.gym.envs.pomdp.pocman:PocManEnv.\n", + "I0224 160042.518 utils.py:18] Registering id=StringGame-v0, entry_point=reagent.gym.envs.pomdp.string_game:StringGameEnv.\n", + "I0224 160042.519 utils.py:18] Registering id=LinearDynamics-v0, entry_point=reagent.gym.envs.dynamics.linear_dynamics:LinDynaEnv.\n", + "I0224 160042.519 utils.py:18] Registering id=PossibleActionsMaskTester-v0, entry_point=reagent.gym.envs.functionality.possible_actions_mask_tester:PossibleActionsMaskTester.\n", + "I0224 160042.520 utils.py:18] Registering id=StringGame-v1, entry_point=reagent.gym.envs.pomdp.string_game_v1:StringGameEnvV1.\n", + "I0224 160042.551 registry_meta.py:31] Registering RecSim to EnvWrapper\n", + "I0224 160042.552 dataclasses.py:73] Setting RecSim.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.555 registry_meta.py:31] Registering OraclePVM to EnvWrapper\n", + "I0224 160042.556 dataclasses.py:73] Setting OraclePVM.__post_init__ to its __post_init_post_parse__\n", + "I0224 160042.565 registry_meta.py:31] Registering ToyVM to EnvWrapper\n", + "\n", + "Bad key \"axes.color_cycle\" on line 214 in\n", + "/home/alexnik/.matplotlib/matplotlibrc.\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.1.2/matplotlibrc.template\n", + "or from the matplotlib source distribution\n" + ] + } + ], + "source": [ + "import pytorch_lightning as pl\n", + "from reagent.gym.envs.gym import Gym\n", + "import pandas as pd\n", + "from matplotlib import pyplot as plt\n", + "import seaborn as sns\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn.functional as F\n", + "import tqdm.autonotebook as tqdm" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:43.533034Z", + "start_time": "2021-02-25T00:00:43.357339Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160043.363 env_wrapper.py:38] Env: >>;\n", + "observation_space: Box(4,);\n", + "action_space: Discrete(2);\n", + "I0224 160043.365 seed.py:57] Global seed set to 0\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 2, + "metadata": { + "bento_obj_id": "139979157612704" + }, + "output_type": "execute_result" + } + ], + "source": [ + "env = Gym('CartPole-v0')\n", + "env.seed(0)\n", + "env.action_space.seed(0)\n", + "pl.seed_everything(0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `policy` is composed of a simple scorer (a MLP) and a softmax sampler. Our `agent` simply executes this policy in the CartPole Environment." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:43.817285Z", + "start_time": "2021-02-25T00:00:43.535633Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160043.644 registry_meta.py:19] Adding REGISTRY to type DiscreteDQNNetBuilder\n", + "I0224 160043.645 registry_meta.py:40] Not Registering DiscreteDQNNetBuilder to DiscreteDQNNetBuilder. Abstract method ['build_q_network'] are not implemented.\n", + "I0224 160043.645 registry_meta.py:31] Registering Dueling to DiscreteDQNNetBuilder\n", + "I0224 160043.646 dataclasses.py:73] Setting Dueling.__post_init__ to its __post_init_post_parse__\n", + "I0224 160043.648 registry_meta.py:31] Registering FullyConnected to DiscreteDQNNetBuilder\n", + "I0224 160043.649 dataclasses.py:73] Setting FullyConnected.__post_init__ to its __post_init_post_parse__\n", + "I0224 160043.651 registry_meta.py:31] Registering FullyConnectedWithEmbedding to DiscreteDQNNetBuilder\n", + "I0224 160043.651 dataclasses.py:73] Setting FullyConnectedWithEmbedding.__post_init__ to its __post_init_post_parse__\n" + ] + } + ], + "source": [ + "from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected\n", + "from reagent.gym.utils import build_normalizer\n", + "\n", + "norm = build_normalizer(env)\n", + "net_builder = FullyConnected(sizes=[8], activations=[\"linear\"])\n", + "cartpole_scorer = net_builder.build_q_network(\n", + " state_feature_config=None, \n", + " state_normalization_data=norm['state'],\n", + " output_dim=len(norm['action'].dense_normalization_parameters))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:43.994904Z", + "start_time": "2021-02-25T00:00:43.820165Z" + } + }, + "outputs": [], + "source": [ + "from reagent.gym.policies.policy import Policy\n", + "from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler\n", + "from reagent.gym.agents.agent import Agent\n", + "\n", + "\n", + "policy = Policy(scorer=cartpole_scorer, sampler=SoftmaxActionSampler())\n", + "agent = Agent.create_for_env(env, policy)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a trainer that uses the PPO Algorithm to train." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:44.180279Z", + "start_time": "2021-02-25T00:00:43.997244Z" + } + }, + "outputs": [], + "source": [ + "from reagent.training.ppo_trainer import PPOTrainer\n", + "from reagent.optimizer.union import classes\n", + "\n", + "\n", + "ppo_trainer = PPOTrainer(\n", + " policy=policy,\n", + " gamma=0.99,\n", + " optimizer=classes['Adam'](lr=8e-3, weight_decay=1e-3),\n", + " ppo_epsilon=0.2,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RL Interaction Loop" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:48.623567Z", + "start_time": "2021-02-25T00:00:44.182376Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160046.344 gymrunner.py:132] For gamma=1.0, average reward is 18.6\n", + "Rewards list: [15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18.\n", + " 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18.\n", + " 15. 18. 15. 18. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12.\n", + " 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12.\n", + " 29. 12. 29. 12. 29. 12. 29. 12. 17. 21. 17. 21. 17. 21. 17. 21. 17. 21.\n", + " 17. 21. 17. 21. 17. 21. 17. 21. 17. 21.]\n" + ] + } + ], + "source": [ + "from reagent.gym.runners.gymrunner import evaluate_for_n_episodes\n", + "eval_rewards = evaluate_for_n_episodes(100, env, agent, 500, num_processes=20)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run training loop (managed by Pytorch Lightning)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:48.807351Z", + "start_time": "2021-02-25T00:00:48.626018Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160048.628 seed.py:57] Global seed set to 0\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 7, + "metadata": { + "bento_obj_id": "139979157612704" + }, + "output_type": "execute_result" + } + ], + "source": [ + "pl.seed_everything(0)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:48.982293Z", + "start_time": "2021-02-25T00:00:48.809528Z" + } + }, + "outputs": [], + "source": [ + "num_episodes = 75\n", + "max_steps = 200\n", + "reward_decay = 0.8" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:00:49.170773Z", + "start_time": "2021-02-25T00:00:48.985979Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160049.000 distributed.py:54] GPU available: False, used: False\n", + "I0224 160049.001 distributed.py:54] TPU available: None, using: 0 TPU cores\n" + ] + } + ], + "source": [ + "from reagent.gym.datasets.episodic_dataset import EpisodicDataset, EpisodicDatasetDataloader\n", + "\n", + "pl_trainer = pl.Trainer(max_epochs=1, deterministic=True)\n", + "dataset = EpisodicDataset(env=env, agent=agent, num_episodes=num_episodes, seed=0, max_steps=max_steps)\n", + "\n", + "train_rewards = []\n", + "class TrainRewardsExtractor(EpisodicDataset):\n", + " # a wrapper around a dataset to enable logging of rewards during training\n", + " def __init__(self, dataset):\n", + " self.dataset = dataset\n", + " \n", + " def __iter__(self):\n", + " for traj in iter(self.dataset):\n", + " ep_reward = traj[\"reward\"].sum().item()\n", + " train_rewards.append(ep_reward)\n", + " yield traj\n", + " \n", + " def __getattr__(self, name):\n", + " return getattr(self.dataset, name)\n", + " \n", + "dataset = TrainRewardsExtractor(dataset)\n", + "\n", + "dataloader = EpisodicDatasetDataloader(dataset, num_episodes_between_updates=1, batch_size=1, num_epochs=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:01:00.467129Z", + "start_time": "2021-02-25T00:00:49.173362Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160049.195 lightning.py:1381] \n", + " | Name | Type | Params\n", + "---------------------------------------------\n", + "0 | scorer | FullyConnectedDQN | 58 \n", + "---------------------------------------------\n", + "58 Trainable params\n", + "0 Non-trainable params\n", + "58 Total params\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 150/150 [00:11<00:00, 13.52it/s, loss=-0.047, v_num=50] \n" + ] + }, + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": 10, + "metadata": { + "bento_obj_id": "139979157612736" + }, + "output_type": "execute_result" + } + ], + "source": [ + "pl_trainer.fit(ppo_trainer, dataloader)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the rewards over training episodes." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:01:01.214706Z", + "start_time": "2021-02-25T00:01:00.469074Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(
,\n", + " )" + ] + }, + "execution_count": 11, + "metadata": { + "bento_obj_id": "139972921706768" + }, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAt0AAAJlCAYAAAAGrk7qAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdeXxU9bk/8M+ZmSRkAUIglIBUrwiIomwBwV3UAm5o1SsqqKhVqVqvVq2tCBZwAbG4FMW64nUrVpEiQnD9aRdFENyutlJb2TWEQEgmycyZ8/39MXNOJrOemTkzZ/u87+sWyEzOOuN5znOe7/OVhBACRERERESUNx6zN4CIiIiIyOkYdBMRERER5RmDbiIiIiKiPGPQTURERESUZwy6iYiIiIjyjEE3EREREVGeMegmIiLbue2227Bo0SKzN4OISDef2RtARORk48ePx+7du+H1elFaWooTTjgBM2fORHl5OaZNm4ZNmzbB5/OhuLgYo0ePxqxZs9C7d28AwCeffIIHHngAn3/+OTweD0aPHo2bb74ZhxxySEG2/bPPPsPDDz+MjRs3wuPx4Mc//jEuvPBCnHvuuVktb9q0aTjrrLNw/vnnaz8bPHgwSktLIUkSKioqcNppp+HWW2+F1+s1cE+IiMzHTDcRUZ4tWbIEGzduxPLly/H555/j0Ucf1V6bNWsWNm7ciLq6OjQ1NeGee+4BAGzcuBFXXHEFTj75ZHzwwQd4++23MXjwYFx44YXYunVr3rd548aNuPTSSzF69GisXbsWH330Ee688068//77GS9LCAFFUZK+vmLFCmzcuBHPPPMMXn/9dSxbtizHrScish4G3UREBfKjH/0Ixx13HL755pu41yorKzFhwgTttfvuuw+TJ0/GpZdeioqKClRWVuLGG2/EsGHD8PDDDydcvqIoeOSRR3DSSSdh3LhxuPXWW7F//34AwLZt2zB48GAsX74cJ554Io466qhOwX+sBQsW4Oyzz8ZVV12FqqoqSJKEoUOH4sEHHwQA7Nu3D1dffTXGjh2L0aNH4+qrr8auXbu03582bRoWLVqEKVOmYNiwYbjllluwfv16zJkzByNGjMCcOXPi1jlgwACMGjVKOwb/+te/MG3aNNTW1uL000/H22+/nXR73333XUyePBm1tbWYMmUKvv766xRngoio8Bh0ExEVyM6dO/H+++9jyJAhca/t2bMHdXV1GDJkCFpbW7Fx40ZMnDgx7n2TJk3C3/72t4TLf/XVV7F8+XI8++yzeOutt+D3++OC2w0bNmDNmjVYunQpFi9ejH/9619xy2ltbcWmTZswYcKEpPuiKAp++tOf4t1338W7776LkpKSuHWtWLECc+fOxSeffIJ7770XtbW1WmZ/1qxZccvcvHkzNmzYgCFDhiAYDOKaa67BMcccg7/97W+YOXMmbr75Znz77bdxv/fll1/iN7/5DebMmYOPPvoIF1xwAX7+858jEAgk3X4iokJj0E1ElGfXXnstamtrcdFFF2H06NG45pprtNfmzZuH2tpaTJ48GdXV1fj1r3+Nffv2QVEUVFdXxy2ruroajY2NCdezcuVKXHbZZejfvz/Ky8tx00034Y033oAsy9p7rrvuOnTp0gWHHnooDj300IQZ4aampqTrV/Xo0QMTJkxAaWkpKioqMGPGDHz88ced3nPOOedg4MCB8Pl8KCoqSrqsc845Rzsu5513Hs4991x8+umn8Pv9uOqqq1BcXIxx48bhpJNOwqpVq+J+f9myZbjgggswbNgweL1enHPOOSgqKsKmTZuSrpOIqNA4kJKIKM8WL16Mo48+OuFrM2fO7DSwEAD8fj88Hg/q6+sxYMCATq/V19ejR48eCZf1ww8/oF+/ftq/+/XrB1mW0dDQoP2sV69e2t9LS0vh9/vjltOtW7ek61e1trbinnvuwQcffIB9+/YBAFpaWhAKhbRBkDU1NQl/N9by5ctx4IEHxu1Lnz594PF05Ib69u2L77//Pu73d+zYgddeew3PPfec9rNgMIgffvhB1/qJiAqBQTcRkcWUlZVh+PDhWLNmDcaOHdvptdWrV8f9TNW7d29s375d+/eOHTvg8/nQs2fPTvXW6ZSWlmL48OFYu3Zt0nU99dRT+Pe//41ly5ahuroaX331Fc4++2wIIbT3SJKke52J9mXXrl1QFEULvHfu3ImDDjoo7r01NTW45pprMGPGjKzXR0SUbywvISKyoF/+8pd47bXX8Oyzz6K5uRn79u3DokWLsGnTJlx33XUJf+eMM87A0qVLsXXrVrS0tGDRokWYNGkSfL7M8yu33HILli9fjieeeEIrZ/n6669x4403ApGsdklJCbp164a9e/fi97//fdpl9urVS3fnlSOPPBKlpaV44oknEAwG8dFHH+Gdd97BaaedFvfe888/Hy+99BI+/fRTCCHg9/vx3nvvobm5OeP9JiLKFwbdREQWVFtbiyeeeAJvvvkmjjvuOJx00kn46quv8MILLyTM9gLAueeei7POOgtTp07FySefjOLiYtxxxx1ZrX/kyJFYunQpPvzwQ5xyyikYM2YM7rjjDpxwwgkAgEsvvRTt7e0YO3YsLrjgAhx33HFpl3nJJZegrq4Oo0ePxrx581K+t7i4GI8++ijef/99jB07Fr/97W+xYMGChOUuRxxxBObOnYs5c+Zg9OjR+MlPfoJXX301q/0mIsoXSUQ/CyQiIiIiIsMx001ERERElGcMuomIiIiI8oxBNxERERFRnjHoJiIiIiLKMwbdRERERER5xqCbiIiIiCjPXDMjZWNjCxSl8N0Re/asQEMDJ2jIFY+jMXgcjcHjaBweS2PwOBqDx9EYbj6OHo+EHj3KE77mmqBbUYQpQbe6bsodj6MxeByNweNoHB5LY/A4GoPH0Rg8jvFYXkJERERElGcMuomIiIiI8oxBNxERERFRnrmmpjuRUEhGY2M9ZDmQt3X88IMHiqLkbfluweNoDKOPo89XjB49quH1uvo/JURERGm5+krZ2FiPLl3KUF7eB5Ik5WUdPp8HssxgMVc8jsYw8jgKIdDS0oTGxnr06lVjyDKJiIicytXlJbIcQHl5t7wF3EROJkkSysu75fVJERERkVO4OuhGJHAgouzw+0NERKSP64NuKznvvDMxefIEhEIh7WerVv0Zxx5bi1de+WPWy/366//Db38706Ct7OyOO27DGWecClmW87L8fDvvvDNx0UXn4tJLL8TFF5+HlStfM3uTAAA7d+7A6aefbPZmEBERkUEYdFtMz569sG7d37V/r179OgYPHpLTMg899DDMnj3PgK3rrKlpH9avX4d+/Q7AX//6vqHLLmQQP2/efCxd+iLmzr0X999/L3bvri/YulHgfSUiIiJzuHogpRVNmnQm3njjdYwbdyx27NiO9vY2HHzwAO11v9+PBx64D1999SUAYMKE0zB16mX49NONeOCB+/D00y9o77388qm4/vobIYTA4sUP4skn/xc7d+7AlVdOw1ln/RQffvhXtLW14bbbZmHYsOEAgFde+SNefvklVFR0xbhxx+DVV5dh1aq3E25rXd1qHH30MRgzZhxWrfozTjhhPADgnnvmYMCAgfjv/74QAPDtt5vxq1/9EsuWvQa/vwUPP7wI//rXNwgEAhgxohbXX38jvF4vrrvuKgwcOBhffvk5unXrhnvv/R1uvfV/sG/fPgQC7Rgy5HDccstvUFRUhGAwiN/9bgE2btyAHj16YODAQdizpwHz5i0AADz//FK8997bCIVC6NWrN371q9vRs2evlMf+4IMPQdeu3VBf/wN69apOuZyzz56Ep59+Hj16VOHmm38BSZJw330PorFxD6ZPvxivvbYa69evw+OPP4pAoB2hUAiXXHI5TjllAgDE7evChQ/hlVeWYdmyF1BeXo5x447N6XNERERE1sKgO+Kvn+/EXz7bafhyJQk45ogaHHOEvu4OI0fWYvnyl9HU1ITVq1/HxImn4+uvv9Jef+aZJ6AoCp599o/w+1tw9dWXY8CAgRg37hi0trZi8+ZvcMghA/Htt5vR3Lwfw4ePxMaNGzqtY9++fRg69EhcffW1WLt2NZYseQiPPvoUNm/+Bv/7v8/g6adfQI8ePfDgg/en3NY33vgzrrvuRgwdegQefPB+7N5dj169qnHaaWfiwQcXakH3qlUrcdppZ0CSJDz88CIMHz4St912BxRFwW9/OxOrVv0ZZ511DgBgx45teOSRJ+Dz+SCEwOzZ89C9eyW8Xgl33nkHVq1agbPPPg8rVryC77/fheeeW4ZQKITrr78avXv3BgDU1b2Bbdu24bHHnoHH48Hy5X/C73//QNps/2efbUL37pU45JBBaZczcmQtNmz4GCeeeDJ27doJIQRkWcb69eswalQtAGDQoEPxyCNPwOv1Ys+eBlxxxTSMGTMO3bp1i9vXzZu/wbPPPoWnn34eVVU9sXDhvbo+L0RERGQPDLotRpKA8eNPxdtvr8Xbb6/Fo48+2SnoXr9+HW644eZI54gKnHLKT7B+/TqMG3cMJk48HatXr8T1198UCXTPTDjQrbS0DMcccxwA4PDDj8Dvf/8AAGDjxg0YN+4Y9OjRAwBw2mlnYu3aNxJu5z//+TX279+PkSNrIUkSTjjhJKxevQrTpl2GYcNGwO/3Y/Pmb3DQQf+Ft96qw2OPPQ0A+Mtf3sdXX32Jl156HgDQ1taG3r1/pC331FMnwucLfywVRcGLLz6HDz/8G4RQ0NTUhC5dugAAPvlkAyZOPA0+nw8+nw+nnDIBn322UVvH119/hcsvnwpE+rFXVFQkPeYzZ/4KQghs374Nc+fei6KiorTLGTmyFuvXr0N1dW8cdthQCCHw5ZdfRILuMQCAvXsbcc89c7Bt2xZ4vT40Ne3Dli3fYejQI+L2dePGDTj66GNRVdUTADB58jl49903035eiIiIyB4YdEdkko3ORDZ9kSdNOgNXX30Zhg8fie7dK2NeFYiNo9XAeuLEM3D11Zfiqquu7RToxiouLtL+7vF4EAqFa4qFEAD0daN4/fUVaG7ej/PPPwsAEAwGUFZWjmnTLotsy+lYvfp1jBgxCgcd9F/o00c9tgJ3370Q/fodkHC5paVl2t/ffHMNPvtsEx555HF069YVTz31BLZu3ZJ2W4UQuPTSy3HGGZN17cu8efNx8MGH4J133sLdd/8WRxwxDFVVPVMup7Z2DJYufRLV1b0xatRoCCGwYcM6bNjwMaZPvwoAcP/99+KYY47H3XffB0mSMGXKTxEItCfc1/D+EBERkVNxIKUF9et3AH72s5/j0kuvjHuttvYovP76Cggh4Pe34O2316K2NpxZ7dOnDw466GA88MBCHHTQwVGBrj4jRozChx/+FXv37gUArFnzesL3BQIBvPXWWjz++LP4059W4k9/WokVK+ogSRI+/XQTELkBeOutOrz++ms47bQztd895pjj8dxzS7UOLXv37sWOHdsTrqe5eT+6d69EWVk5mpv3480312ivjRxZi7Vr34Asy2hvb8c773RkhY899ngsX/4nNDU1adv7zTf/TLv/48efgtGjx+K5555Ju5w+fWrg8XiwZs0qjBo1BrW1R2H16tfh8/nQp08fAMD+/ftRU1MDSZLw8ccfYvv2rUnXPXJkLf7+97+isXEPELmpISIiIudgptuiJk/+acKfX3bZlVi0aAEuueQCIDKQcuzYo7XXTzvtTMydOwt33DEn43UOHDgIF110Ca65ZjrKyspRWzsa5eXxZRkffPAe+vU7AP37/7jTz089dSJWrVqBYcOGazcAGzduwJ133q2954YbfolHHnkIl112ISRJQlFRMX7xi1+ib99+ceuZOPEMfPDB+7joonNRVVWFYcNGoL09nCk+++xzsXnzPzF16n+jsrISBx54UNTvnY59+/bi+uvDGWdFUXDOOedj4MBBaY/BNddchyuumIqLL7407XJGjRqNzz77FL16hQdolpSU4Mgjh2vLmjHjOtx//3w8+eQfMGTIYRgwYGDS9R5yyEBMmzYdM2ZcgbKycowbd0zabSUiIiL7kIRLnms3NDRDUTrv6q5d36FPnwPzul67TV/u97egrKwcAPDkk49h+/ZtmDVrrtmblfA4qtsaCARw22034aSTTsGZZ55t2jbaQT4+j4X4HllNdXVX1NfvN3szHIHH0hg8jsbgcTSGm4+jxyOhZ8/E48iY6aZOHn309/j8808hy0H07dsPt956u9mblNQNN/wcwWAQgUA7amvHYNKkM8zeJCIiIqKEGHRTJ7/85a/M3gTdHn98qdmbQERERKRLQQZSNjY24mc/+xkmTJiAM888E9dddx327AkPGNu0aRPOOussTJgwAZdffjkaGhq030v1GhERERGRXRQk6JYkCVdeeSXq6uqwcuVK9O/fHwsXLoQQArfccgtmzZqFuro61NbWYuHChUCkhVqy14iIiIiI7KQgQXdlZSWOOuoo7d/Dhw/Hjh078Pnnn6OkpAS1teEZ/KZMmYI1a8Jt4VK9RkRERERkJwXv0x2eZfBFjB8/Hjt37kTfvn2116qqqqAoCvbu3ZvyNSIiIicICYEf2oMIuaORGFFBBBQFP7QHLTfxXMEHUs6dOxdlZWWYOnUq3nyzcNNcJ2rf8sMPHvh8+b/vKMQ63IDH0RhGH0ePx4Pq6q6GLtMO3LjP+eLmY9kckLGnqRVdu5eivCi3S7Kbj6OReByNYeZx3L6/Fb5ACNVV5dqs3VZQ0KB7/vz5+O6777BkyRJ4PB7U1NRgx44d2ut79uyBJEmorKxM+Vo2EvXpVhQl7z20M+mLfN55Z2LBgkU4+OBDslrXk08+hksuuRxFRUU63p3csmUv4NRTJ6JHj6qclmOkTI9jcXExiotLtJ/dc89C1NT0Tfl7l112ER577CmUlHTJeXvfeGMl/va3DzBv3oKMfk/d9qKiYshyEFOmTDW093i2fbp37tyBK6+chlWr3o57TVEU1/VjdXMPWqO5/Vj6Qwr87UE0yAr83uxviN1+HI3C42gMM4+jIgR2tQVR7vVg9+7mgq/fEn26Fy1ahC+++AJ/+MMfUFxcDAAYOnQo2trasH79etTW1uKll17CpEmT0r5GiT399OO48MJpBgTdL6K2doylgu5MzZs3P+Obl2eeeSFv25MJddu//XYzLr98KsaNOwa9elUXbP2yLMPnYzdRokJQH39b6yE4kX21hhQIIVCew01svhTkyvrNN99gyZIlOOiggzBlyhQAwAEHHIDFixdjwYIFmD17Ntrb29GvXz/cd999QOSRdbLX3OC6667CkCGH44svPsPu3bsxfvwpmDHjegDAU0/9AW+9VYfi4hJIEvDQQ4/hD394BAAwY8blkCQPHn74Mfz973/Fyy+/CFkOAgCuvfZ/UFs7BohkVCdOPB0ff/wRGhp248ILp+Lccy/A0qVPYvfuesyc+SsUF5dg9ux5aGjYjccffxSBQDtCoRAuueRynHLKhLTbuXv3bjzwwAJ8//0utLe345RTJuCSSy4HAHz11Zd44IGFaGtrRZcupfif/7kZQ4Ycjk8+WY/Fix/Ek0/+LwBo/1669Hls2fIf3HXXb9HW1gZFCWHSpDNx0UXTMjquxx5bi+nTf4YPPvh/aG9vw9VXX4sTTzxZe23t2vfRpUsX/O53C/DJJx+jqKgYZWWlePTRpwAAq1e/jhdf/F9IkoS+fQ/Arbf+Bj16VCEYDGLRogX45JP16N69EgMHDu603uefX4r33nsboVAIvXr1xq9+dTt69uyVclsPPvgQdO3aDfX1P2hBd7LlnH32JDz99PPo0aMKN9/8C0iShPvuexCNjXswffrFeO211Vi/fh2eeOJRtLcnPo8DBw7Gl19+jm7dumHhwofwyivLsGzZCygvL8e4ccdmdJyJSB812LZa7SmRXbWEFPg8Eko81ikrURUk6B44cCD+8Y9/JHxt5MiRWLlyZcavGa1ZDqElZHypiScooVSSUOHzZvy733+/C4sXPw6/348LLpiMM86YjO7dK7Fs2QtYsWINSkq6wO9vQXFxCX75y19h+fKX8eijT6GsrAwAcNRRY3HqqRMgSRK2bPkPbrjh51i+/A1t+W1tbXjssaexc+cOXHLJBZg06UxceukVWLnytU6Z4p49e+GRR56A1+vFnj0NuOKKaRgzZhy6deuWdDv79/8x5s2bhcsuuxLDh49EMBjEDTfMwJAhh2H48FG4/fZb8etfz8Lo0Udh/fp1uP32W/HHP76W8ni8+uqfcOyxx2PatOkAgKampqTvVW8aAMDr9WpBPCI3dM888wK2bPkPrrnmCgwbNqJTVn/z5n9i48b1eO65l+HxeLT1fPvtZixZ8ns8+eRz6NWrFx5//FEsWnQf5sy5BytWvIKdO3fguedehizLuPban6GmpgYAUFf3BrZt24bHHnsGHo8Hy5f/Cb///QOYPXteyv397LNN6N69EoccMijtckaOrMWGDR/jxBNPxq5dOyGEgCzLWL9+HUaNCncAGjToUDz22FMQQkp4Hnfs2IZHHnkCPp8Pmzd/g2effQpPP/08qqp6YuHCe1NuKxFlR8T8SUTZCwmBVkWgm89jqVpuFZ8hW9hJJ50Mj8eDiooKHHjgf2H79m3o27cf+vXrj7lzZ2PMmLE4+ujjUFZWnvD3t2/fhjvvvB319fXw+XzYs6cBDQ27tQzrKaf8BABQU9NXy6geeOBBccvZu7cR99wzB9u2bYHX60NT0z5s2fIdhg49Iul29upVjY0bN3TqNuP3t+A///kPqqp6oaioCKNHh9tI1taOQVFREbZs+S7l8Rg+fAQeeeQhtLW1YeTIWowcWZv0vanKS844YzIA4Mc/PgiDBoWzu8cee4L2et++B0CWZdx771yMHFmLo48+Dohk3cOlHuHjN3nyT3HZZRdFXtuASZPOgM/ng8/nw4QJk/DZZ5sAAH/5y/v4+uuvcPnlUwEAoZCMiorE9V6I3DAIIbB9+zbMnXuvVi6UajkjR9Zi/fp1qK7ujcMOGwohBL788otI0D1GO4/z58/Fli3fJTyPp546USsr2bhxA44++lhUVfWM7Os5ePfdwg18JnILBt1ExvGHFMCipSVg0N2hwufNKhudTrYD1wB0Ggjo8XgQCoXg9Xrx2GNP4/PPP8Unn6zHFVdMxf33P4xDDhkY9/t33nk7rrvuRhx//IlQFAWnnHIsAoFA1PKLY5YvJ9yO+++/F8ccczzuvvs+SJKEKVN+ikCgPeV2CqFAkiQ88cSzcfXBmzd/k/AOVJIAr9cHITqOV/T2nnjiyRg69EisW/chnnvuGaxa9WfMmjU37XFMJfxEt/O2VFRU4Nln/4iNGzdgw4aP8eijD+Opp56DEIjbbvWfqR4NCyFw6aWXa8F+OuoNwzvvvIW77/4tjjhiGKqqeqZcTm3tGCxd+iSqq3tj1KjREEJgw4Z12LDhY0yffhUQOY/HH38C5s1bkPA8lpaWddpmIso/9avGbxxR7lpkBUUeD4o91gy6rblVlJTf34K9e/dixIhRuOKKq3HwwQPw7bf/AgCUlZWjpaVjpG5zc7PWseP111d0CmBTKS8vR3Nzx3L279+PmpoaSJKEjz/+ENu3b027jLKycgwbNgLPPfeM9rPvv9+FhobdOPDAgxAIBPDJJ+uBSAZZlmX0738g+vbtix07tqOpqQlCCLz1Vp32+9u2bUVVVU+cdtqZmD79Z/i///tS1/7EWrXqzwCArVu3YPPmf+Dww4d2er2xsRHt7e0YO/ZoXHPNdaioqMCOHdsxatRo/P3vf0VDw24AwMqVr2k18rW1o7FmzRuQZRnt7W14882OiZyOPfZ4LF/+J61MJRAI4Jtv/pl2O8ePPwWjR4/VjmGq5fTpUwOPx4M1a1Zh1KgxqK09CqtXvw6fz4c+ffoA2nnsq+s8jhxZi7///a9obNwDRD4/RGQ8EfcXIspGUBFoVxTLZrnBTLf9NDc34/bbb0Ug0A5FUTBo0KE44YSTAABTplyMX/ziGpSUdMHDDz+GX/ziJvzmNzeja9euOOqoo9G9e3dd6zjvvCm4++456NKlC2bPnocZM67D/ffPx5NP/gFDhhyGAQPis+qJzJo1Fw899DtccskFQCQQ//WvZ6Fnz164664FnQZSzps3H0VFRaiu7o0pU6biiiumoaqqCsOHj8S///0tAOCdd97E2rVrUFTkgyRJuOGGXyZdd3RNNwDcdttMHHroYQCAUCiE6dMvQltbG2655TdxXVp++OF7zJ8/D6FQCKFQCGPHHo3DDz8CHo8HV199LW688drIQMp+uOWW3wAAzjrrp9i8eTOmTj0f3btX4tBDD0djYwMAYOLE07Fv315cf30446woCs4553wMHDgo7TG85prrcMUVU3HxxZemXc6oUaPx2WefauUvJSUlOPLI4dqy1PP4hz8sSXseDzlkIKZNm44ZM65AWVk5xo07Ju22ElHmBNi9hMgI6rg8KwfdknDJc+REfbp37foOffocmNf15lJeQh2MOo5qhxJ1sKnb5OPzWIjvkdWwl69x3H4sGwMymuQQuhd5UZnD5DhuP45G4XE0RqGPoxACO9uD8EgS+pTk1jY5V6n6dFv3doCIiMjhOJCSKHcBIRBUrDuAUsXyEnKVv/xlvdmbQESk6ejTbfKGENmYXw43byizeNBt7a0jIiJyMGa6iXIjhEBLSEEXjwSvBXtzR3N90O2SknaivOD3hyg3nAaeKDdtikDIwr25o1l/C/PI5ytGS0sTAweiLAgh0NLSBJ+vWMe7iSgRZrqJcuMPKfBIEkptEHS7uqa7R49qNDbWo7l5r453Z8fj8UBR2L0kVzyOxjD6OPp8xejRo9qw5RG5TUdNN8NuokwJIeAPKSj1euCxeGkJ3B50e70+9OpVk9d1sP2QMXgcjcHjSGQtDLWJsudXBBSblJbA7eUlREREZuI08ETZa5FD8EoSunisn+UGg24iIiLzsKabKDshIdCmCJR5PZBsUFoCBt1ERERminQvYdSdVlDhQaIO/pACIQTKffYJZe2zpURERA7DTLc+AUXBjrYA2jmgniL8IQU+j4Rim2S5waCbiIjIPB013Qy7U675e6cAACAASURBVAmJzn+Su8mKQFtIQbnXa5vSEjDoJiIiMg+ngdeHh4eitYTCTzzs0rVEZa+tJSIichAGk/oo6sydvDuhSGlJsceDIpt0LVEx6CYiIjKBEII13TrxOJFKVgQCioIym2W5waCbiIjIRGoG1+ztsDj2MyeVPzKYlkE3ERER6aIGkJIkhbPeLJ1ISmG4TRGtIQVFNiwtAYNuIiIic6hhJC/E6XHAKaHThDj2C7jB7zoREZE5tKBb6vxvisfyEkIkyw0hbFlaAgbdRERE5lADSU+kzzADyuTUKXF4jNxNnRCnyEa9uaMx6CYiIjKB2rtEvRCzdCI5oQ045UFyK0UrLfHYakKcaAy6iYiITBA9kBLM4qbEY0OtIQVCCJR67Bu62nfLiYiIbEwrL1H/zdAyKa28hIfItfyKgFeSUGLDriUqBt1EREQm4kDK9DiQ0t0UIdAaUlBq49ISMOgmIiIyR0fLwEh5CSPKpPgUwN3alHAfe7t2LVHZe+uJiIhsKrZlICWnMNPtav6QAo8koYvNvywMuomIiEygduRQwwgGlMl1TI7Do+Q2wiGlJWDQTUREZI6OTDe7l6QjYv4k92hTBBQHlJaAQTcREZE5YqeBZxY3MSEEj42LtYYUSA4oLQGDbiIiInNwGnh9RJK/k/MJIeAPKSj1SNoTITtj0E1ERGQCNXkrgeUlqShRf+cxcpeAIhBySGkJGHQTERGZQ5sGnpnulKIrS1hl4i5+JVxaUsqgm4iIiLIlIlPAa91LGFAmpPXoliTemLiIWlrSxSGlJWDQTUREZBIRbhfIloGpqT26wwGLvY/S9+1B7A3KZm+GLQSFgKwIx2S5waCbiIjIHGr4KElSJItr74AyX6IHnNr9CAUVAdnuO1Eg/pACSJJj6rnBoJuIiMgcIirL7YyH5/mhDqT0QrJ9CY5ga0jd/CGBEo8Er0NKS8Cgm4iIyBzhmu7w3yXWdCelBqlOyHTD9gUyhRFUBIKK4qgsNxh0ExERmUMIobULlBiMJRU7c6edCZ5nXfyh8PONMo+zwlRn7Q0REZFNdCovcUgWNx+iB1La/Rgx6NanVVFQ7PHA54BZKKMx6CYiIjJBXHmJ2RtkUdH9zO1cEy2EAIRgGZEOISFQ7LCAGwy6iYiIzNF5IKVk22Ay3zr6mUuOKHy3/x7knyKcGaA6cZ+IiIgsTwh2L9EjNgCza9AqEvyN4gkhoAgRbqXpMAy6iYiITBCd6QZrupNSy3DUGMyux0nE/EmJqS0iHVhdwqCbiIjIDAId2bx8tgz0hxQ02ngWRLXLi91jMC3oZtSdUucZSJ3FiftERERkK/kcSNkSUtAsKzreaU0iEqyoQbdtg1bR6Q9KQtEGztr9Niseg24iIiITxNZ052sa+JBi7wnmFbW8JPJvu+4Ly0v0YaabiIiIDNW5T7eUt2BMtm1qOCz65gR5vDnJN3W77bn1hcOabiIiIjJUIaaBV4RASAiIyP/bkRKpfXdMNwsbn4tCUI+Nx/ZV/PEYdBMRERWYGgR3Li8xXsgBsZ0QMTXdJm9PtkSSv1NnWnmJ82JuBt1ERERmUXty5Cvoji4tsWugp5bh2H0gZfR223QXCkKBACT7d6tJxFeoFc2fPx91dXXYvn07Vq5ciUGDBmHbtm249tprtffs378fzc3NWLduHQBg/PjxKC4uRklJCQDg5ptvxnHHHVeoTSYiIsoLNejKd59uuwfdQghtIKX2MzM3KAfMdOujTobkmHKiKAULuk8++WRccskluPjii7WfHXDAAVixYoX277vuuguhUKjT7z300EMYNGhQoTaTiMgVdrQF0M3nRYXPa/amuJIWdGs13ZJW62tksGH3oBsIp4g9kGw/OU4ngtOQJqM4tLQEhQy6a2trU74eCASwcuVKPPnkk4XaJCIiVxJCIKgIBO36nN4B1EMfOw280bFYKDa9arNgRu1kEd0y0K6Y6dZHidxkOVHBgu503nnnHfzoRz/C4Ycf3unnN998M4QQGDVqFG666SZ069bNtG0kInISxtzmiS0vyVcW1+6ZbnXzPVH173bt/CE6nQsb3gEVCDPdBfDKK6/g3HPP7fSz559/HjU1NQgEArjrrrswZ84cLFy4MKvl9+xZYdCWZq66uqtp63YSHkdj8Dgaw87HMaQI7N7TjG5dilBd0cXszbH1scxWmxzC3r0Sqrp2QbeSIvjaAmhvbkevqnL4PNn1OEh0HPfuaUaRCGcPqyrL0MVm5UTtcgiNkeNU6vNib2MLKiu6oLJLUd7Wma/PY3FbEP7mNgBAVfcylBbZ61xkKtvj2NTYghKvB9XdSg3fJrNZIuj+/vvv8fHHH2PBggWdfl5TUwMAKC4uxkUXXYQZM2ZkvY6GhmYoSuHvjquru6K+fn/B1+s0PI7G4HE0ht2PY0gI+FsDkNpl+FqDpm6L3Y9lttoVBf62IBplBe1eD5rlEPwBGfUhAV8Wab5Ex1EIgX1tQRRLEgKKgoaQguIsA3qzqMdpr6zA75Hgbw1gTzCEYJ5uHvL5eVTPMQDslhV08drrXGQil+PY1BpAqdeD+nbZ8O0qBI9HSprotcQZX758OU444QT06NFD+5nf78f+/eETJoTAG2+8gSFDhpi4lUREzsDpqM2XqqbbKLIIr0gN4u1YldG5vMTeWNOtj2KV4DQPCpbpnjdvHtauXYvdu3dj+vTpqKysxKpVq4BI0H377bd3en9DQwOuv/56hEIhKIqCAQMGYPbs2YXaXCIi54pc8e1aG+sE8TXdkcDYwFrfUOT8Ftm460d0lxfb9+lO8nfqIPLQwcdKChZ0z5w5EzNnzkz4Wl1dXdzP+vfvj9dee60AW0ZE5C684JsvtmVg7M+NoA6i9GkBvf0okX2QHDAjpX03vHDUbjVOHUjp1Aw+ERElISJXf8YA5onLdKs/N/CkyCI8s5+dg251m6NbyNlxPxCb6bZruj7PlKhyIidy6n4REVESrOm2AC2D2zENPPJQ0+21edawU3lJ+H9s+8kVUdttzz3IPyVyZDwOLS9h0E1E5DZaTbfZG+JehejTHYoMorRzf2s18xn9RMB+exHGmu70mOkmIiJHYabbfPHTwHf+uRFkIbTSEqOXXSgC4RKZTkG3HXckhgN2IS9Y001ERI7SEXTz0m+W+JaBUucXcl6+gCwAn5S/2S4LQYjIIEqpowzHjvsBdV/U+nq77kSeqU9jnDoNPINuIiKX4fXefEkHUhq0/FCkR7c3KktsR7E9m+1e6mv7Dix5ppWX2Pw8J8Ogm4jIpZhtM0/sUwajs9HR7QLt3N9a5LmtYiFpHdgliU+ZklBiyomchkE3EZHLqI9wedk3j4iUTESXTcDAwDi2Rzdser4VIRAdgkk2HRCKqBsIpwaURlBEZPZRuz/SSKJgk+MQEZE1cCCl+dRaZZXRZQdyZEG+qJXY8XyL2PISSLbcD0Rlup0yGDQfFAeXloBBNxGR+zDoNl+yyd6NKjsIqfXcWnsUew5BTFReYltCaLcN9jsThaEI4dhBlGB5CRGRiwlh20f1dhcbTKqlJkbWdEeXltgz5A6XG3R6IiDZcz8Qnem28T7km9Mz3Qy6iYhcJjrO5sXfOiQYd0JkIeD1xNZCG7PsQhIQnWYntOvNA2Jquu26D/mm1nQ7lZP3jYiIEuDMeOYTMQMEtZ8btOyQ6FzPbddAL1Htux1vHlTqsFk+YUpMEcKxU8CDQTcRkfuwotR8iWq6jQqMQ5HA2+eA4CXRcbLrpze2NzvFY3kJERE5SqdMt10jGJtLNEDQqFrfkBLfLtCOdcRCiLggTLLpgFB0ml3TrnuQXyIyxoQDKYmIyJF48TdH0ky3AXdBao9ur81roQXCkWpcn24zNyoHnVoGmr0xFqRE/mSmm4iIHKPzQEpe/s0QW6sMA3tQJ+rRLdmwjlgrx4itTbfXbkTpmG3RvvuQP9oU8GZvSB6xTzcRkctwIKX5Ema6DSo7kCOD0ew+IE0kCMLsnCWOPud23Yd8UiJHxamzUcLhNxRERJQAa7rNJyDiggvDBlImGERpxzpitdzAKSFYR003p8dJxA2ZbifvGxERkSUlLi8x5iYoPDFOgmXnvuiCEgkyn3a8eYjmlBuIfGBNNxEROU50ba+dAxg7Sz4NfI7LFQKyQHym24ZPNVKVl9itPh0xLQNtuPl5p55Tu5dFpcKgm4jIZVjTbQ3xNd2556OVSPDidUC6UCsviRkQateIlTNSpsbyEiIiciT1kb0dM4ZOIBAfdRsRjKntAhNmum0W6qmfzXzN3Flo0d1Y7Lj9+aZEdXdxKgbdREQuI6L+48+Lf+Gpk4Dko6Zb1ibGiVm2ZEw7wkJSt7dTeYnU+TW7EEJEeo539GPnDW9nigifa3YvISIixxBRg5V42S+8jtpe47uXhLQe3fav6U5cXmJvUuT/KJ7Tp4AHg24iIvcJd85w+NXNwhJN+gKDyg6c0qMbKQZSwoY3ENGDKNmrOzHF4VPAg0E3EZH7dMp088pvmoTlJTkuU07Qo9uoZReaiNT4RrNrwKptr2TfEpl8Y6abiIgcSNg2eHEC9UYn0TTwyLHWVxaAN0HgYsfBe6lqfG03KDTyp5TgZxSmnm8nc/r+ERFRDBHVC9duwYsTJArAEr2e8XKFSDgbZa7LNUuiXua2HWQXdaNl1xKZfFMipVFOxqCbiMhlOk9HTYWWaKZFGNCZQ4kELsnKS3LNoheaSFT3HvWanbCmOz2WlxARkeMIZttMlSzTnWswFlJ7dCeIXOw4cDbRwDq7fm6jnyh13GyZsxOtIUXr524VagtFDqQkIiJHcvblzbqSxTu5BpRy5PcS1nSry85u0aZIlOmOfs2OpKjJX8zYByEE6gMy9sshE9aenNoekpluIiJyFE5Hba58ZbqTzUYJm04qo5ZBRbPjfiBZeYkJOyELNatc+HWn4oYp4OGC/SMioihCCK28xI4dLZwgeZ/u3MJuWRGQJCnlhd1O51tJEKTYNREaHeSamelWb8ys9jlQkoxzcBoG3UREbiOENi+enQbWOYV6zPOR6fZJiQMXO9ZCC4gE+xLpumOnHYkioeNkMOjuwEw3ERE5TvTFVgK7l5gp0TTwyCEwDonEpSWwaYY4YXmJ+poJ25OL6KcbHee58Hth3aA7vEWs6SYiIseILW2w2sXXDfJZ05006LZhX/aE5SU2Dco613SbtxNyJKVstScFHQMpbXqCdWLQTUTkQpKNAxi7S1bTnUvZQUgRUISAN81JtVaolZw29sBhfbphek23eetOheUlRETkONFTkEs2q/F1iuTTwKuvZ35Sgko4V+hLEnPbraZboGPsQTS77YdGq+OXTHvKJISwbnkJBCDZsZt8Zhh0ExG5SGzrMqtdfN0geXmJ1On1TASV5BPjJFqX1anHIFmNr90+t1ZoGahE1U5b7aZFiHBAyu4lRETkGLGPue1U4+sUIpLVi5VLuBEMqZnu1EG3Xc62ku5pQKE3KEfa9krm7YOa5ZYk6w2gdsMU8GDQTUTkNh0XXitefN2go096TOlEDmUHwXQ9um02cFa9GYyfHEeKHCi77Eln0ftT6BtedRClT5Isd7OtuGAKeDDoJiJyl9jSBqs9ZnaDRK3wkOM5CYYUeJP06IYNM8Qd5SWJnwjYZT9UncZSmHTDqw6iLPJIlvveM9NNRESOk2hmPCoskeTY5xIYB5Xk7QIR3abOatFWEsnKS9Sf2WQ3NLE3uxIKf+cgR7rbeC140xLOdDufG/aRiIgiOJDSfIla4XV+PbvuJamD7o5120HStoo2/dwm2l4zarp9kQ4hVjt+inB+j24w6CYicie1NNZqF183SJrpzrLsQBECcppMt+1qutUZCh3zPCbSEi9yjswIfGUhwt1tLPi9Z3kJERE5TqJMt9Vmp3O68PFOXnud6ekIRd6frEc37JzpTvCaHW8WY894ofch3KM7/BmRIAFCWOZ7LyLb4pwbrOQYdBMRuUjHhVbSLr5UeMnCi2wyoGoruFSzUWqv2OR0q9OCJy0vsdnnNnbwbKH3QQ7fXWvlJbDQR6FjCniTN6QAGHQTEblIogyiVS6+bpGqpjubDKgadCebGAd2zHSnmBZcgv1bXRZ6H7TPiGTejJjJuGUKeLhkH4mIKIaUY19oyl7y4pLsyEJAAuDVtW57nG0lpgba7uLKSwr8vYu+MbPaDZgSNXeA0zHoJiJykeiuEFa7+LpFsj7dUDOgGZYdhARQ5PWkDFrsNhmSOi14Irat6Y46PYWOL2URnjzJa8Ee/cx0ExGRI8UOpISFLr5uISAMr+ku0lsQa5NzneppgBVb3qWTsGVgAXci3N0mcvOlbZM1jqKidqpxfqKbQTcRkZtwchzziRSP0rPJ4oaEgFdHxGKnYFVJVfduxxtFITomKDKlvARaS0n1s2eVQ9gxkNL5/0Vi0E1E5CKdMt3axdcql193SF1eknkwJHROLGKnoDtdCzm77IcqcU13IbuXdPRxt9oTLpaXEBGRQ8UPULPItdc10pZOZHhChM5H83aqhU7d4cVOtw9h8X26C1dfHxICSqKgu0DrT0cbNGv2hhQAg24iIheJvvhbLePlFkbWKwshoKTp0W1HSqqBlBYKGDMidf5rob53sS0lrda1SB00y+4lBpo/fz7Gjx+PwYMH45///Kf28/Hjx2PixImYPHkyJk+ejA8++EB7bdOmTTjrrLMwYcIEXH755WhoaCjU5hIROVJ0aYPVMl5uIZAq6pYyKjsQCJ9UPbP52WlSGaOfBpjNzJaBsqL26O5YNyz0vXfLFPAoZNB98skn4/nnn0e/fv3iXnvooYewYsUKrFixAscddxwQ+Q/DLbfcglmzZqGurg61tbVYuHBhoTaXiMixtKDbYhkvNxBCxA2qi5Zp7JFq5sb4ZduoZSBE8sGmNvzMJpyRskDrliMr6igvUb/41jiKikumgEchg+7a2lrU1NTofv/nn3+OkpIS1NbWAgCmTJmCNWvW5HELiYicL/oy647LnLUkmhE0WqZZXPW9espL7BSspiovsaO4417AL58sBLySpA22tdrNtpsy3T6zNwAAbr75ZgghMGrUKNx0003o1q0bdu7cib59+2rvqaqqgqIo2Lt3LyorK03dXiIiu4oeoKZmvOxScuAE0ZMTJZJxTTfUHse6Ut22kXogpXUCRv0EJKnjNkIt9REieUbfKNGdS2DF8hIhUOSCem5YIeh+/vnnUVNTg0AggLvuugtz5szJSxlJz54Vhi9Tr+rqrqat20l4HI3B42gMux7H9v2tKJYVVPcoRyCkYG9jCyoruqCyS5Fp22TXY5mNoKJgz54WVFWUoEeX4rjXlZZ2iLYAqnvqOyYtQRn79rXCI6U/jv59fggBVFeWZb39hSCEQH1DM3qUFaO6rCT+9ZZ2hFoD6NWzIi8Baz4+j/saW1Dq86C6aykAQPIHEPS3o1fPirz3p45dtyIE9jQ0o3t5CXqWxn8GjaL3OO7d04zyYh+qK7rkbVuswvSgWy05KS4uxkUXXYQZM2ZoP9+xY4f2vj179kCSpKyz3A0NzVCUwt/XVVd3RX39/oKv12l4HI3B42gMOx/Hfe1ByAKolxXIQsDfGsCeYAhBn9eU7bHzscxGUBHwtwWwNxiC7GuPe70pKKM5GMIPIX0ZUH9Igb89CE/3srTHsbk9iJAA6oOhnPYh35TI57IpEAJaAnGvNwVDaAnKqFeMzxLn6/O4vzUA2etBfZsMRPbBH9mHfAbdQgjsbQtC+DrWLSLHtzEQgtIc/xk0QibHsak1AOELor41mJdtKTSPR0qa6DW1ZMrv92P//vBJEULgjTfewJAhQwAAQ4cORVtbG9avXw8AeOmllzBp0iQzN5eIyBHYvcQ86Wu6MwvARIZTaNvhXKcrwYl9nx0Vqq5aFuHC/07lJZIESFK4P7bJ1BIbtwykLFime968eVi7di12796N6dOno7KyEkuWLMH111+PUCgERVEwYMAAzJ49GwDg8XiwYMECzJ49G+3t7ejXrx/uu+++Qm0uEZEjda7pjvzM/Guvi4QPdqrOHOq79IQhmUyhLUkSIJS07zOb+lA6RVdFwGZBd6KWgVC/e3mMN7Ue3TGfDwnWOIAdn1+TN6RAChZ0z5w5EzNnzoz7+WuvvZb0d0aOHImVK1fmecuIiNwj0eQ4VDhpM90ZBpTqDZOTpoEXOm9M7CR2YGihnjKlCrqt8Flw0xTwcNF+EhFRgn7BmU7GQrkR6bK46vt0Lk+B/vISu0wqI9IEYo54QhMJgvP93ZMj3VG8MZ8PywTdmXTfcQAG3URELiVJkmUuvm6RLtOtvU/nSRHqeXRQplub8CfJ63Ybi6C1Boz6WeEy3YBXin9qYJW2i8x0ExGRYwnEX/xtnTG0GT19upFBBtRpk8gg6hily97b7WMbPUhW+1ued0JWEvfAliBZoj+/IlKXEjmN076rRESUgoi5wNkl++kUHYFOknrlDIOPVJPIxC/bHudaPUbJOrl0HCM77E3ipxuFrOmOreeGhb73bhtIyaCbiMhlOl38bRKIOYWeaeCRwdMHJYN2a2qgZYUMZypaeUnapwH2oG1n9EDKAnRgCQkBJVnQbZHvPctLiIjIsSwebzme/vIS/cvTnemGZIsPQLqBlLHvs4tCZ7q1ziUJ0sjWyXQLbWyJGzDoJiJykfh+wdao7XQLo1sGZlPTbfWz7byBlOE/E3TKzut3T1bUdoHxr1llLIeIfH5Z001ERI6TqF+wBa69rmF0y0AB/VOh22VSGXWfkvbptll8ZlZNtxxZePKabvM/CYoNz2cuGHQTEblI7GXWKrWdbpM8zsgsA5pJptsusU1cL/kYtst0J/hZIW6AZCHglaSEPbAlSbLE8ctkTIITMOgmInIJIQQgRFzrMitcfN1CQACpsrja+/QuT38wbZdJZdLvU/5LM4wV3xavUDXdibLcsFB5ieKiziVg0E1E5D7s022edAFlJhlQIURWQYvVT3e6fXJEpjvViwaRhUg4iBIWutlWhHDNbJRg0E1E5B56Z0Ok/DGydEJEFqi394NdglWRZp/sFqMlquPP97kQQkAWiQdRwkJlZU6c3CkVN+0rEZGrJWpXF67ttMLl1x3SZrrV9+k4JXpnbtSWbZNJZdK1QbTHXsSL26U8fvfkcEP21OUlFijRYXkJERE5UrLH3HYLXuxMb0CpR6YTi9glWE2X/bRLbboqYfeSSG/qfO2C1qM7adBtfs92IQQEB1ISEZETJXvMbZfgxQl0DXzUmQEVCQbo6doGi59vAaErDLP4bmgSzUiJPH/30gfdMdtmArdNAQ8G3URE7hMXdJu4LW6Tvl5ZfwZUSXATlYoVAi09RJobCbvshyrZWIp8Z7olSYI3RU03zA66XTYFPFy2r0RErpYoM8qgu7D0ZLr1ZkAzzRTaZQBi2vISSYrsjE0+uZGTGXuzlc/BjLIAvFLymxcrlOgokb1n9xIiInKchNdX91zvLCNdjKE3nBRJgrlUy4UNQtV0de+w2c2iGdspKwJFFn9awEw3ERE5VvKabmF6FwO30HOY9WZAs62JtfK5Vj+L6W4k7DQWIVHXIERulvJ1LlJNjIOop11mdi5SRPyTN6dj0E1E5DKdg273XPCsQHd5iZ5lZdy9xPrnWmuDmOZ9dsp0qwpV0x0SAkq6oDvyJwdSFhaDbiIil0jYuizmNcovAaEjs6cvA6rWxDppIKUaiDkp+ZnoCRPyWNMdUjuXpIhmLVHTzfISIiJyqsST43R+jfJLb6Zb97IkSffjeTuca73Ze6vMqKhHsu3MV6Y7GFlostkoYZEbMCVyA+qg+6u0GHQTEblEsslxYKP6WLtLNw08MqnpznIKbSufar29x+1UXpKyZWAedkJWUvfohkVuwNTPL2u6iYjIeVJ0u7BLAGN3htZ0Z1iGYYcbLL29x/M5CNF4AkjyRCIfeyALAY8kpWnFFxlIaeIxVBxWRqQHg24iIpdIXdNtlwDG3nS3w9PTpzvDKbTtEN8kywrbWbIbLSlPvcbTdS6BRcpL3DYFPBh0ExG5R6LpqN30aNdsQogMMt16poHPLlNo5RssrXtJuhsTG9V0I0lJUb5KZGQhUg6ihJXKS1z2nx8G3URELpEy022bCMbm9PSglqS81HSrgy6tfKoVnRP+2LGmO1Y+arqFEJAFUKTjaUqqbSsEBcJVs1GCQTcRkYskmRwHNgpg7Exv6YTeYExf+8Hslm2WZBPJxLL6fkRLWl6Sh++dLMIHRnd5icktA90WhLptf4mIXIt9us2VqLwnEb3BmBODlkwm/LHLZzZZGVA+SmRktUe37ppucwdSsryEiIgcKeEF1gK1nW5h9CBBPfXhsaxelqFNjpPmffkahFhI+diDYCToLkpb021uqZEQggMpiYjIuRJNptLxmNneAYwdJJuZMJaeDKgQIqtModUHIKolM47q0520jl8CIsGnUWQl0i5Qx3vNLNFx4xTwYNBNROQiCRPdLrvqmSizmu7UwZiAvkGZqbbDivRMHgQH1XTD4PMRjNRz66n1N/PGxY1TwMOF+0tE5FqJLv6s6S4kvbMtpg+Y9LbWS7hsC0ererP3drtVTNynO/ynkWdDFiJtaUn0+k0LuiNrZvcSIiJypIRBN2u6C0Z3eYn6/hTv0TtzY6JlW/lch0sxdL43z9tiFIHEJ8roG15FCMiKQJHOQJaZ7sJz2/4SEblWoi4KVmgd5ha6y0t03AiJLDOF1q/p1pfpt/p+REtXXmLUjnR0LtH3fgmSaWM5tH7szHQTEZETpQ7iKN8y6UGNNDdCTs10Z1JeYuX9iJasTt3oTHdQ0de5JHb9ZuBASiIicrYEA++s0K/XLTIZSIl05SWRP7MKWix8qvUOpMxH549CU7O8Rn33gpFHWel6dHesn+Ulhea2/SUicq3ENd0SYPGpwZ2iI0BMl+pOH4wJndOlxy3avyHNMwAAIABJREFU2jG37lk27TQAON0+GbUPshDwSfpLjqSo4LfQFLU1pDmrNw2DbiIil0hZW2qH6MXm8pLpznAbrB50651l004DgNO2DDRoJ4JK+unfY5mZ6fawppuIiJws4XTUNgle7M7Imm5tuvSMJ8eRLF1KpHeWTSeEakZm64UQCAr9nUtg8mdB0fE9cCIG3URELpHs8sqguzDykel2WtySqMNOInbqupN0IKWB2fpQJPDWO4gSJn923DgFPBh0ExG5R6qLvw1iF9vLZBp46GgZqGe69LhlWzhQVWfhzCQYs+iudJK2ZaABZEVtF5hZ0G3aNPDCfZ1LwKCbiMg9Ul387RC82F2mme5U9NY+J1q2Vc+13uMDg0sz8i359y4yYNaAyDcYWUZG5SVm1nRDuG42SjDoJiJyDwGR5Nm9eZNkuIl6/PVOA5/qnOgtw4hftnUDVa1kRk95iWSnsDv/M1IGlfCTD28GnwmzWwa6MQB14z4TEblWvh9zUwo6e1DrmwY+y5pYC59sbXCojvfaJeRWS2YSHnYDa7rlyCDKTMqNpKjtKzS9kyA5DYNuIiKXYE23ufR25tATjOWU6bbopDJqJ41MAkcL7kZCiTpSG5rpFgK+DKNY7YmKAevPRDa1+07BoJuIyCVY022uTNvhpc50Z1vTbd1AJ5Op7W2T6U7xmlEdWIQQkAVQlHH7yMjv57b6jLl1Cngw6CYico+UQbfVoxcHMLIdnt6ZG5MuO+PfzD91mzKZHMfqUvVmN+pcBEX4w5LJIEoj15+pTG6unIZBNxGRm3ByHNMkre2Nkc9Mt5F1xEbLaCBl5E8r7kciicu6JMCACWrkyN1Z5uUlYYW+4Vb3l91LiIjIkVIP6LL2LIVOIXSWd+gJxnTXh8cuO+r3rUatM9dXAmNcu718Steb3YiwM6hk3i4QFsh0uzEAdeM+ExG5VqoBXZRfmQx+lJA8GhJCZN39QfsVC8aqWnmJgzLd6bbPiNKuoBDwSlLGmWOz2i5m8kTDaRh0ExG5QKqJR1jTXRiZZKdTlfwIhE9YNoMirRysZjOQ0urSTfhjRGmXnOH079HrhhnlJZEVsnsJERE5knZdZU23aZK1bEwkbdCdY/cHK5YTaS0DdbzXrM4bmUvdBtGIdp1BRWQ0/bu27sifZnUvYaabiIgcKWWmm326CyKTY5zqnOTS/SGbjieFIiLbp2cbzcrSZkpXeUkOyw8JASWLziUw8cYlk0mQnMaN+0xE5D4pAjVmugsjkzZ/qTPd2Xd/sHKwmk1HFgvuRmdpb5CknAaDaoMocykvyXrt2VEyeKLhNL5CrWj+/Pmoq6vD9u3bsXLlSgwaNAiNjY249dZbsWXLFhQXF+PAAw/EnDlzUFVVBQAYPHgwBg0aBI8n/DVcsGABBg8eXKhNJiJyjNS1pRKgdjexcCbUCfSXlyQPxnLKdEf+tGKwmvFAU4vuRzQ9Nd250NoFZnUDZk4HGCH0P9FwmoJluk8++WQ8//zz6Nevn/YzSZJw5ZVXoq6uDitXrkT//v2xcOHCTr/30ksvYcWKFVixYgUDbiKiLGk1vAkudHYJYOzOqJpuI2b0s+K5FhkMDlXbKlpzTzqkDbpzLO0KRm6UfTl0sjGjptutZRYF2+/a2lrU1NR0+lllZSWOOuoo7d/Dhw/Hjh07CrVJRESuk6ymG5YPX+wvo+4lKYKxzPpZxy8XFj3XmfYet37InXoAMwzYh/Agyuxq9c2cBt6FSW6gkOUl6SiKghdffBHjx4/v9PNp06YhFArh+OOPx/XXX4/i4mLTtpGIyK5STdLBTHdhZFI+of1Cih9nkzWzcqyTae9xO7W6TFVeouSwD3KWgyijt8mMloFubBcIKwXdc+fORVlZGaZOnar97L333kNNTQ2am5txyy23YPHixbjxxhuzWn7PnhUGbm1mqqu7mrZuJ+FxNAaPozHsdhxbgjKa9rWiZ7dSlBd3/k9/UVsQbc1t6NmjHMXewj/4tduxzIYQAvUNzehRVozqspK0729rakVQUVBdWR73mqc1gNaWdvSuqoA3KkrVcxzb5RD27fWjsmsXdC8pymJP8mf/3hYUeTyo7laq6/2Ne5rRtdiH6oouhm6HkZ/Hfe1BtOxvQ6/KcpT44r9b7U2taA8pqO4Rf57TEUJgd0MzepYWo7o8/Wcq0e83NDSje1kxeun4TGYq2XFs2eeHBKC6e5nh67Q6SwTd8+fPx3fffYclS5ZogyYBaOUoFRUVOP/88/H0009nvY6GhmYoudxOZqm6uivq6/cXfL1Ow+NoDB5HY9jxOLaGFPjbg9gjK/DHBNYtcgj+gIzdoewm2ciFHY9lNoQQ8LcGsC8QAloCad+/PyAjqCioDypxr+0NhuAPymhQOga+6j2OQUXA3xbAnmAIAZ83y73Jj6a2AIo9HtS3y7re39IagOINwtcaNGwbjP48Nke+Ww1Jvlv7AzLaFQX1cvx5TieoCLS0BdAlGEK9P/1nKpYQAv62IBoDMoSOz2QmUh3HprYgvBJQHwgZuk6r8HikpIle02vZFy1ahC+++AKLFy/uVDqyb98+tLW1AQBkWUZdXR2GDBli4pYSEdmXNqArRVG3FSdMcYp0A+pipWsZmG33B0vXdGfYMtBO/eXzUcMcjNSFZF1eIkmm1MUrEFm1u3SCgmW6582bh7Vr12L37t2YPn06Kisr8cADD2DJkiU46KCDMGXKFADAAQccgMWLF+Pbb7/FrFmzIEkSZFnGiBEjcMMNNxRqc4mIHIk13eZIedOTQKp65Wz6WUcv16qcPJAy5TTwWe6EHHl678vh6ZQZdfGZdPFxmoIF3TNnzsTMmTPjfv6Pf/wj4ftHjBiBlStXFmDLiIispS2kICQEyg18/J+q44WVJ0xxilQDWRNJNw18tolCq55rIUTGXS1S9TK3Cl1Bd5bLDopwxtibQ9bYnEx3bu0u7cz08hIiIupsvxzCXtnYesdUF1ZmuvPPyPISxYDuD5Y81w7sapHuniCXEpmgyH0MRqFLdERkEi6nnWe9GHQTEVmMyEMmMlV5g0vLKwsq43r5VH26jch0Z/freaMOI8wo022nmu4UP1cD0UzJSvbtAmPXXyjZnGcnYdBNRGQxIuriZDTWdJuj46ZH54yLkIAkwVhONd0WnclR3c2MBlJabi/ipbvZymaCI0SedoSEyGr699j1FzbTHf7TrcGnW/ebiMiyRA7Zr6TLTFlTHOleYvH6WDvLpqYbSYJKtXtJtqwYrGZafgOYMwgwU0LtEpLkfGV7w6t1LrFZeYkCdWyJOzHoJiKyGDWQMDLbnSqoYaY7/zKu6U7R2i+XTDcsGqxqwZjT6g7SHecsWzgG1c4lRpSX5LSEzGiZbqedZ50YdBMRWYz6SNrIwCjlQEoL9252mkxaBiJppju3TKElM93ZlJdIkuV7y6c7V9ne8MpCAJKEohxj10LfgKmJhHwHn59/24Df/OFDUyZFTIVBNxGRxaiXCWMz3eGLdKJMIjPd+ZdN95JOv6j+M9Jaz2kt1zLtYw6L3jzE0h10Z7gjQUXAJ+X+ZKDQx7AQAymFEHj1/W/D+2Wx7wmDbiIii1EvwEZn8VJ1UIiskPIkVZ/0RJLdCInwwrIegAeLdv1QtOOjnxXLZGKl6zSTfU139jNRdlp/gZ8WqN+DfLYM/OfWvfhu135MGN3fcmUsDLqJiCxGy3QbWV6SYhY4ZrrzL/Oa7sjg1pizov4rl0y3FTPE2n5ZLTWZZx2Zav1nRAgBWeTeLhAmlpfkMxauW7cVFaVFOHpon/ytJEsMuomILEa9MBl5LdRTB2z1+lg7y6Z8Agk+A+qNWG413dabydGx5SVCGF7THRLh5eYy/Xv0+p3UMnBnQws2bd6N8SP7objIuBl9jcKgm4jIQoQQ2pVJMTgwShbQqC3NrB7A2Fm2Nd2xHwH1xshqj81zpWQRjFmxTCaWSFNSlE1Nt9Yu0JDyksK3DEzVQjFXaz/eCp/Xg/EjD8jL8nPFoJuIyEJEkr8budxE7FAfa2eZHttkGVBDMt0WDFZlIeDNMBizQ6Y7nWwy3Ub16IZJme583S42+QP42xe7cPTQPuhWXpynteSGQTcRkYVEXwCNrelO/Zg7dt1kLDVDnWufbqfWdMtZlUskn7XTKtINpMymT7esCHgkyZAALtXMp/mQz847736yHUFZwYQx/fOzAgMw6CYispDoa5+RNdZ6H3NTfqSbmTBW8kx3Zl1Qki3banGqnMWU5nYYAKy/ZaD+vQhGjpURJRqFPoZKjp13kgkEQ3jnk20YNqAnanqWG758ozDoJiKykHxlutOxYsmBk2T6WF0LTGKCsY4uH84R7sYB+DKMxexQ1p7uvGcTgMpCGFJaAhMmxhJ5ynT/7ctd2O8PYsKYHxu/cAM56XtLRGR70dltw7uXpOkXbOXH9G6TrqY79/IS65xrWYSjUydmutPJdB8UISArxrQL7LT+Ah1ERRgfeCpCYO26rTjwR10x+MeVBi/dWAy6iYgspFOm2+Dlpsu42Tl4sbpMp25PloHU+hznsC1W61QjRyK+rINuK+1MDIE0LQMzzDR3HKvctw0m3LiISPcSI332rwbs2uPHhDH989YVxSgMuomILKRTTbeB0UT6x9z2zhhaXdoBdTFStQzMteWa1Wq6tUAyy/S9UbvSrijY0xowaGlhai1/MpneOAQV4zqXhDcg8SRM+ZKPTHfdR1tQ1a0EtYf2NnjJxmPQTURkIR2ThEiGZ7pTYk13XokMB5ClKi9x2oVbFuEbiUynMjE6S7tfVrCrpV0LbAsh030IRd6Y6VMBo9afq0yf+KTzn11N+MfWvThlVH/4vNb/Zlh/C4mIXEQNtL2S0dnI1I91JTDVnU/ZBhuJpoHPNd6y2lMNWRHwSakzwolkM4V6KgEl/O1rCRl3u6t3AK3eTHMIApCM6/9RyBIdIYThLQPr1m1Fl2Ivjh/W17iF5hGDbiIiK4lc/byQoBjeMjA5qwViRmgNKdjWGjB8Zs9sZFxekmSWUEUIeHIMuazWqSbcuSTzfTIy5FaE0DLc/lDIsNKutN+7DGeDVZ90GFW7XMhMt0D4v2+5fn5VDfva8PFXP+D4YX1R1sVnyDLzjUE3EZGFRE9+YmSsqK+m20qhWO6CikBICO2RvJmyyXQnqr02ItMNC91ghdsFZt65pPMyct+OQCTg7lrsQ1ARCBj45dO1ZzpXZ3SmuJAtA7VBwAZt/5vrtwIATq217mQ4sRh0ExFZSEd5ibE13dCRcbNKIGYU9UmBkU8MsmXU9NdG1HRLkQ2yQotIJZJlzmYQpZFZWjXo7l1WAkmS4JeN+fYJpI+6M3nKpAgBbw6fpObWIB54+VN8/PUP2rpRoHah6iqMCDz9bTLe/3QHRg/pjZ7duxiwxMKwRz6eiMgltAuTFL4QCmFMi610l1SrdbQwgjoerpCTDCWTdaY7bjkCkpRb2JKPGQGzlW27QBg8OU5ACHglCSU+D7p4JLSEFFTm+N0TQug675kF3eHxHtloC8h44OVP8e2OJmyvb8bIQb20z0JhMt2R2VQNOHHvf7oDbYGQpad8T4SZbiIiC1FLPLwGXwz1lCVYIDY1lJqrtMJ+ZdOfOFHttWGZboscF1nJvu+0sZluBcWRbHu514OQEGgz4m5NR9eaTGrsFQh4sghag7KCxa9+jn/vbMJJI/qhoakdH3/9Q0HLS4zKdMshBW+u34rB/StxUJ9uRmxawTDoJiKyELWvr/q03agsrZ6BlE6jDqC0wkDKbCSaJdSIlmuFnvo7FTmnFniRG9Mcz68iBIICKPaEQ6JSrwceSYI/xy4mercqk9lgs7npUhSBx1//P3z5n0ZMnzQEF/9kEGp6lmHNR1u0SLiQNd251qSv++p7NO5vx8SjrD3leyIMuomILESt/VWvS0ZUlgohIhfXFC0DLdbRwgjq/hhdG5+NbGq6Y2cJNbrlmhXOtxwp68gme2tUpjughL8fJZED65EklHo98IcUQ2qd0+2a3tlgszn/Qgg8W/cPrP/6B1ww/hAce2QNPJKECWN+jC3fN+MfW/ZG3qd/mdlSj2Uu5U1CCKz5aCv69irHEQN6Grh1hcGgm4jIQtTQ2GPgTHHahDsp3qNm26wwuM4otq/pjrkRUluu5VqTbaXp0+UsB1HCwKcz6iDK4qjtKPd6oAgBfw4fHj3fO2RQ060g85Z7r/y/b/H+pztw+rgDMWFMR2Z43OE/QrfyYtR9tKXTtuaTEZnuL/+zB9vqmzFhTP+sbtTMxqCbiMhC1NprLdNt4NUw1TXKSoPrjKIO3LJCK8RsWv3FBmPR7SRzYaUznUu7QKPKZNRBlN6o7ejiCf+7RQ5lvVzdQbfO3Vf/W6D3/K/+6Du88eF3OHF4X/z0+IM7vVbk8+LkUQfgi3/vQVNLsCDfEXX7c/n8rfloC7pXFGPsYX2M2qyCYtBNRGQh4enCOy6sRlwK9Wa6jVqfVVgl0611ocnw92I7yhgRtIQXYNxTlFyEe3RnN4gShpaXKJ2y3IiMqyj3etAW6fWejUx+Tc971ZtIPRne9z/dgZff/RfGDOmNqT8ZnHAQ70kj+qGkyIt/bt1bkC++yHE2ze927cf//acRp9b2R5HPnuFrypaBDz74oK6F3HDDDUZtDxGRq4lI1lm9NBkxCFDoCdYsNLjOCGr9KyxS040snyZ0znTrD7pSb0f8ss0gi/CHM5eJcZBjmYw6iLLMGx/Elfk8aJJD8IcUdPV5s16HnvISPZ9RLdOd5n0b/vEDlq75GkP/qwpXnnEYPElS4xWlRTjuyBps/X4/mg6uQo/i/HaRznU2zbp1W1BS7MWJw+0x5XsiKY/wrl27tL+3t7fj/7P35nGSVGW+9+9EbrV0VVdXdfW+QS9009A00M2ibLIL6MVdUZzR8TrjuNyrlxmdkSsOir4u8+rFGd/xXryOCu6oQIPQQgvI3oDN1mxN03vTS+1VWZWZEee8f0SczKisyMxYTkRkVj7fz6c/2VWVGXFiy3jOE7/n92zevBknnHACFi5ciAMHDuC5557DxRdfHMU4CYIgmgIpQwgj012NegnEVCH1z4io8UfNsfjSdDMIUQrHVGW660XTHcSjG4rO2fIiSjtpxpDSmO+gu/gkocb2ue0GKyfg1eQlew6N4Ae3v4BjF3Tik+84EUmHyYSdizcuxk8efh3bdvRh6Ya2mmMIQhDnnaND43jixcO4cMMitLWkFI8sOqoG3V//+teL///sZz+Lf/3Xf8Ull1xS/N3mzZtx9913hztCgiCIJkLemFRquj3JS1R40tUBvML/46C4/31oup2WM1003cWg228hJWPWTvV/kTgVUdqX35ZIYKigQ+feCz7da7qZq4lhqRDReYlCCNz8x1fQkk7iM+9ah0y69kRhdlcrFs1px/bd/bjohPloawkv282F8H3u3vvkPqDBWr474VoU8+CDD+LCCy+c9LsLLrgADzzwQBjjIgiCaEomWQYypkh3W7sT3HTLdBdlOYzVgabbGorHz03VdAe3XEMdHWvd6vjoX7jhrZujE05FlHbarUzxmB/PbpfHvfw4V6KWvOTRF97Ajn1DePd5y9HRlnY9zOOXdkPnAg88s9/1Z/wgfJ672YkCHnjmAE47vrFavjvhOuheunQpbrnllkm/+9nPfoYlSxrPnJwgCKJekZ0LGWPQosx0s3oJxdQg91uiHgoGrVdfQbfDcgKXkNWJfl/nAkkWrC2424C1Ek5FlHZSGkNG03wF3eotA4XVOGvqErMTOn71p9dw7IJOnLVuvqdx9nS2YFHvDNz75D7oARsCVcOvx/yf/rIfubyBS09r/HjT9XOEr371q/jUpz6Fm266CXPnzsWhQ4eQTCbxve99L9wREgRBNBF2dUfQLF5xmS4WMr1C7tJ2JBlDIXZNtzttbznlPt3c32KmLrc4rngxnUuCZ+39bke1Iko7bUkNA3ndCtDdT3ncjst10F2lG+VtD72OkbE8/tu713kutGUMOHF5D/782B48vv0Q3nyit6DdLVwAKY+Hu6Bz3PvkPqxdNgtL5naEMq4ocR10r169Gvfccw+eeeYZHD58GL29vVi/fj1SqcYVtBMEQdQb9s6FGlPUkdJ6dXO/i7u4ThVSipHQGHKGZdkXUzONoJluOfaipjfgeIqP+GM82KZdoEDGQxCrmmpFlHbaExoGGMOY4S/ornnaufbpFo4B9b7Do7jvqX04d/0CHDO/0/X47Ktf2NuOhb3tuPuJPXjTCfNCuVbMp3jejvdjL7yBobE8PnbF8crHEweutt4wDKxfvx5CCGzYsAGXXXYZNm7cSAE3QRCEYiZnut0VWLmlenOc0vqnAzJATVr6gzi3y7+mm00KjO3SoyDUw7HmVhAZONPN/G9HtSJKOwnG0Gq5mPi5Ht1pumt3g3WSZ8jiydZMAu88d7nnsZXGx3DpaUuw/8gYnn+939dyalEtU+/8foG7n9iDJXNm4Phls0IZU9S42v5EIoFly5ZhYGAg/BERBEE0MfbOhVFmulV196sXippua8PidDDxm+ku/7zXoKUidXCsgzqXSILIS2oVUdppT2jQuUDOQ5FFKYiuZRnobh84Hf/Htx/CK3sH8a7zlmNGq79EKLMKtk8/fi5mdWRwt9UaXjVeu7I+91ofDvZlccnpS2J7SqUa1/KSt73tbfi7v/s7fPjDH8a8eZPbb5555plhjI0gCKKpKHUulLknRYWULm7+9ZD9VIksOpPOGDxGK0TfloFlwbHXoKXicsvGFQc6lx7dwZYT5GlQrSJKO60JDcySmLTU0IBLvBRSouwplxMcAimbPGM8p+OXf9qBZfM6cM46/w1j5MQlmdBw4YZF+PWfXsPuN0awdJ46DbX8btM8XIR3P74H3Z0ZbFw9R9k44sZ10P3zn/8cAKYUTjLGcN9996kfGUEQRJNRfpPWGENBqMvRVvfpjl/nqxKZFZQaWBGoNUcwgmi6MSnTLQLbBU4aR4yHWrfWHVRe4he3RZQSjTG0JTRkDY5uj/UBtX26zddah8Moy3Tf/vDrGBrN41PvPLFi10m345OX/bknLcQdD+/Cjbc+i8VzZqC7I4NZHRnM6mjBrM5M8ecWj90rSx7j7t7/+sFhvLx3EO87f0XNBj+NhOu9tmXLlnBHQhAE0eSUNz8JaodWvlxXzXGCr64u4FYwo7LJkG98+muXNywSChrjoE6OtW5JOwK3tPep6ZZFlG4z3QDQqjGM6QIFIZB2MW63TzjKj7PjsmSm2FrY/qNjuPfJfTh73XwsXzDT9TZUWr8ca1tLEh+74nj8+ZkDGBjJYeeBYYyOF6Z8ZlHvDPzjVSe7lrR47ab6h8f3oDWTxDknNW7LdyfCaz1EEARBeKK84E65pttF1D1dgm75KFvGVI2o6Z6a6QYSChPDcZaX6sJ7h0cn/Gq6ZRFlLecSOzLgdTuB82IZWOv98vxNMPPcvmXzy8ikEnjXef6KJyetv2zicsqqXpyyqrf4c75gYHA0h4GRHPpHcjgyOI47Ht6FH27ajs+8e52rrL8819xMsg72jeGplw7jrWcsRWtmeoWprrdmdHQU3/ve97B161YMDAxM0lDdf//9YY2PIAiiaSgPzqReNajdnZubfz1kP1UinR6khlSlC4xX/Gu6J6t9/ViuVVquWTwXH6rsAv3WPXgpopR4ncB5da2pJoGyd6Pc+tJhvLRnEB+6eBU6PXSerITdPcXpeyadSmDOrDbMmdVW/F1bJomf3fsq7nliLy49vXbTGi+Z7rse3Y1UUsPFGxu75bsTrs/4L3/5y9i+fTv+/u//HoODg7j22msxf/58/PVf/3W4IyQIgmgSZDZI3vi04u8DLtfFDY+VvbfR4UJYmm75c3xjCdIG3v55Ze4lxYGpXJiH1QphNcaJZ/3wWEQpYT4ncLU13bLuoDLSd76gG/jllh1YMncGzlu/0NM4Ko/P+4G44NRFOPW4Xvzm/tewY99QzfeXS+cqcWRwHI++cAjnrl+IzvbgE4p6w/X1+/DDD+PGG2/EhRdeiEQigQsvvBDf/e53cdttt4U7QoIgiCZhaiGl+aoqYGwqTbcwH2UXNd0xbpkqeYnKUtAgVntB0c2OP0qKKKXdnRdkEaWXRjfwk+l2OS43ha3S1/yX9+3AwEgOH7r4uEDFk07r97IXGWP4yFvXoGdmBv9x+/OOum873GVdwx8e2w1Ng6vseSPi+ozjnKOjw7SPaWtrw/DwMHp7e7F79+4wx0cQBNE0lGdESwFjwOVC1NQ2MMYAHwFMvcKtG1xRShFnptvnPrUHQ0IIx+Yofok36JZ2gfFouv0UUcL+5MmDpttNMyM3Qa/OBZ5+5Si2vXIUH7hwJVYsDFY8OWn9Pus52lqS+MSVJ2B4LI+bNm0vBtZOFDPdVZbXPzyBh547iLPWLcCsjozH0TQGroPu1atXY+vWrQCADRs24F/+5V/w5S9/GcuWLQtzfARBEE1DufZ3st1dsOWySRphZ6ZH+4mp+lStDgopfXWStAVDAma0p8IyEAE7OQZFVWMc+HT4yQvvRZSYNAlWu+dqBb1cCNz+8OvYf3QMb3/zUly0Qa3WOYi0bNm8Trzv/JV49rU+3FOlqU5Rk15ll9/9xB5wDlw2TbPc8BJ0f/WrX8XChaZ+6Nprr0VLSwuGh4fxzW9+M8zxEQRBNA1TCylNgspLhHAXUKuyKIybck9gVS4wfnG7/8spBUPCtSa2EdDF5MZFUZPn3oso4eOpiZfrDhWCbiEEfnrPy3h+9wBWL+nCpRvVB6RBpWXnn7IQG1bPwa0P7MQrewcd3yOvv0r7Y2gsjwe3HcCZJ8zF7K5WnyOpf1y7lyxeXJpZdXd344YbbghrTARBEE2JKNM9aj4f+/olTsmBSuxOD+Yrq/roO2z8arGLhXs+fI5rLzs+RxedCyRZ7ScvbvAnL/FeRCnx8tS9AL9fAAAgAElEQVTE/XF3LtAUQuDn976KB7YdwDsuWonVi7tCaYfOJj1R8758xhj++tLV2PPGCH5w+wv48kc2orfsPcLqEFtp/Ju37kFB57j8zOmtnnCd6b7yyivxta99Dffeey8GB51nMgRBEIR/nCwDYStCCrJcVxm3GCUHKin3BGYs3gy+8Nm+3S478OJz7GrZiM8y0HQuiUcm47eI0r4+L09N3GymU6ZZCIFf3/8a7n1qHy7euBhnnDAPiZAec6goopb67pFsHv9n03bwssdz1Zx3RscL2PL0fmxcMwfzutsqvGt64Pqs+/znP48ZM2bgxz/+Mc4991y87W1vw1e+8hXcfffd4Y6QIAiiSZiq6Z78+yDLdRvjTIege2qmuz7cS7xiP2ShZLoVLcsLpl2gGucS+NgOv0WUEs3yzneD6+Y4Dtf5bQ+9jrsf34O3nLIQ7zt/hdmNNKSqC1V2oUvndeADF6zE8zv7ceufXp30t2rfQfc+uRe5vIErpnmWG17kJWeeeSbOPPNMAMDAwAD+8z//EzfffDN+9rOf4dJLLw1zjARBEE1BeWClTNPtNtPtIaCoZ6ZqutmUzFuk+CyAtAdDomwiFpiYtOHS+k5V0A2YjzHcNpDyW0Qp8VIf4P66K70fADY9sgu3P7wLZ6+bjw9etAqMMaXONVPWr1DGdt7JC/Hy3kHcfPdLMAoGzj9loTl+q0NsOeM5Hfc+uQ8nr5yNRXNmKBhBfeM66H7wwQexdetWbN26FQcPHsT69evxuc99Dqeddlq4IyQIgmgSHAspFdj4mYF07Tv2NKjRAxw8gePWqvvXdJc+79bn2Muy49gnKp1LULaP3CzRbxGlfX2u28C7mGxxIdA3OI43snm8eHgUr+0awFMvH8GZa+firy5dbWs9L5AKQc8NxR79jDH81aWrwcFwyx9fwY79Q/irS4+rmOne8vQ+ZHM6rnjT9M9yw0vQ/fGPfxxLlizBxz/+cVx55ZVIJl1/lCAIgnBBebERY8yURii4GzaTptvRvcRDNlQ1fjXdpc8LCFlcq2hMXoJHlehcenSrWZ7XxQQpooT11KQg/Hvh7D8yihd2DWD/kVHsPzqG/UfHkC8YOOmUhTi4fxiFbB4XnLII779wxaTmN1xAWTOccvx22qxEayaJ//nR0/GTTc/jtw/uxJ5DI/jQO05AT5n3di5v4J4n9uKEY7txzPxOJeuud1xHzjfffDOeeuop3H333fjud7+LVatWYePGjdi4cSM2bNhQ9bPf+MY3cM8992D//v244447sGrVKgDA66+/ji984QsYHBxEV1cXvvGNbxR9v6v9jSAIYjriZDGmIiPpNuiLOyOsCpkVtruXQHFHRy/4znRLizq75ZrCDYgn022+qiykhMttkUWUbQn/6/Ziq1l+3b20ewDf+fUzKOgcM1pTWNTbjrNOnI+Fve2YOb8DC960DHPbnZvChCovsY1XFZrGcPmZy3Ds/E784PYX8Icn9uCM43oxZ3nJ1+SBZw5gdLyAtzVJlhteJs0bNmzA3/7t3+Kmm27CbbfdhhNPPBE33XQTrr766pqfveCCC3DLLbcUfb4l1113Ha666ircc889uOqqq/ClL33J1d8IgiCmI07BmQqPaS/a0mkg6YaQLeBlcxxr4+OSdfv16YbtmJQXhwbF3DfR7xBdmPIOdS4sJm7O21IRpf+96FfTvfPAMP7Xrc+it6sV3/rEm3Djfzsb/3jVKfjgRatw3vqF6OlsQTrt7Fwun9KEVkgZojXpmmXduO4jp2H2zBbc8/he/HTzyyjoHAXdwN2P78bqJV1YuagrhDXXJ64z3X/84x/x+OOPY+vWrdi1axfWrl2LD33oQ9i4cWPNzzplwvv6+rB9+3b86Ec/AgBcccUV+MpXvoL+/n4IISr+rbu729sWEgRBNAhOGWklxY21u8BPGkOjw8smGcWC1Jhy3UHWKkPjWj7HfpcbNSqdS+y42ZagRZSwXY+upErWdbfvyCi+86tt6GxL4X+8b71ji/NqE95yuZRqVLmXVGJWRwbnrF+IznQSt9+3A68fGMYJx3ZjcDSPj11xfDgrrVNcB90/+clPsHHjRnzhC1/AySefjJaWlkArPnjwIObOnYtEwpzZJRIJzJkzBwcPHoQQouLfKOgmCGK6Emam2w2MMSCAXrVe4EJMyqTK/8eVxTcDZv++0LI5jsqYK66nGroQyATINJfjRRoRtIgSticNHKjZUVMAGB7L499/sQ2ppIZr3n+yY8CNGpOgcrmUasKQl9gxJyjAeesXYMnMVvzwzu3Y9cgIli/oxJqls0Jaa33iOuj+6U9/Gu5IQqanJz4rmt7ejtjWPZ2g/agG2o9qCGM/TgyPo8A5ervaq/7OK8MDY8gkNPR2Vm+vnB+ZwHjBQG+3/3X5QfW+zA5lIQD0zjQbbWQLBkaHsujqbMWMdPQmAIP9o2hPJdHb4T1ZNTwwhpakGW4ldY7eWZWPjZf9qI9OQMvp6I3w3iiEwNG+UfS0ptFbQbvslUyugOzIBHq62tCSrB4Gjw5m0c5K50Ulqu3H5EQBudEJ9MxqRypRPQw+eGAQdz62G1wA/8/fvxlL5lUuFqx2jmQLOoaGxtHb2Yr2EM5feVxmtqXR26bmuMC2Hw0ucLR/FN3tGaxcMAvrjpuLH9+5He84bznmzGmOAkqJ66OXz+fx7//+79i0aRMGBwfx1FNP4aGHHsKuXbvwoQ99yPOK58+fj0OHDsEwDCQSCRiGgcOHD2P+/PkQQlT8m1/6+kZj8Wnt7e3AkSMjka93ukH7UQ20H9UQ1n4cyhUgABwplLLNo3kdOc4n/c4rIxN55DUNR3J69ffldYwbHEeM6LLdYezLoYkCEgw4kjcAy7EiO1HA0YKB8RqBWRiMjOfBEwUcmSh4/uzoRAETVipSF8AR3fnYeN2Pw3kdYwbHkQjviwUuMDaRR0vBwJFsXskyxw2ObK6AowavmUEfHM+jNaEVzwsnau3HMd1ANq/jsMGrasOHs3nc/tguZMcL+Ox716E1waovdyIPXSsg5XCOZK1tHDQ4sgqfEtgZH89jIK8DY2qOi30/6kIgO57HUMEAH80hCeBvLlsNANPyfqRprGKi1/XRu+GGG/DKK6/g29/+dlHHtHLlSvz85z/3Naienh6sWbMGmzZtAgBs2rQJa9asQXd3d9W/EQRBTFec5CVRygCmjXsJnOUlcQlngmq65TJUanrjsIcsenSHoemusTFCiMAOIDsPDOMX976Kl/cO4vWDI9ArTE6zEzq+88tnkJ3QcfkZS7GsSoZbUq12Q7VHu/P6w/ueUV0E3Mi4znTfd9992Lx5M9ra2qBZM625c+fi0KFDNT/71a9+FZs3b8bRo0fxkY98BF1dXbjzzjvx5S9/GV/4whfw/e9/H52dnfjGN75R/Ey1vxEEQUxHTNeNyb9Toul2qQeeNkG3mHyDl/+P1ZnFZ7xk13QHcLpzHI4o6m2jKS4NI+h2q0fm1rb61XO/uKsfN976HGZ0pJBNMNz5ylEUcjpWL5mFNctm4fhl3VjQ04a8zvG/fvMM9h0ZxccvWoGFve7kO9U13earyuPvZf1Bkc29VDnWNDKug+5UKgXDmPxIpr+/H11dta1err32Wlx77bVTfr98+XL8+te/dvxMtb8RBEFMR8wWKJNvTJ7cEiou12XMNw2a4wghprjAyM6ePIatKx47n5+f5F6iupQy4lmIbp3DSicPLpdVDFx9rOMvrxzB/3fbC5jb3YrPvPckDAqBgWXdeGlnP7bv6se2HUcBAF0z0mhvTeHAkTH87X9Zi3k97a6PWLUnD9yyQQkzZA3zyYfc9xRyewi6L730Unz+85/HP/3TPwEADh8+jK997Wu4/PLLwxwfQRBE0yCK/sklNPvfAi239vsYzHRwXJ0bVSCzt3ZPY5WdPf2MBwGkAbJzpOqOhF7bp6tA5wIJNvUcD4LbTLch/GVbH33+DfzwzhexbH4H/vt7TkImk0R2Io/jj+nGaSvNRi9HB8exffcAtu/qx97Do/jIZWtw2pq52DvuXh9dK9OtOXw3qCTcTLdJWJaHjYTroPuzn/0svvWtb+Htb387xsfHcckll+A973kPPvnJT4Y7QoIgiCbBSQZib+zi96bl2jIw5s6NKqjkaRyXdKYYdPuVlxQz3YotA0NsiFIJXQAp5YGjuxbmMuj2kmXf8vQ+3Lz5FaxZOgufeueJaM0ki8uxS75md7XinK5WnHPSgkmfdzvZhW1y5USY3ShL61fQD6ACUWjSGwXXuvZ0Oo0vfvGL+Mtf/oJHHnkETz/9NN75znfimmuuCXeEBEEQTUKlQkoEKAIUwuzC51bT3ehUKtrSGCve/KOklOn2CWMQCF4EOGWx6hblmjAa43jRdANwpekWQuDOR3fh5s2vYP2K2fjv71mH1oyZo/RSH+B1olTNpzusbpSSMOUllOkuUTPTPT4+jh/84Ad46aWXsHTpUnz605/G2NgYvvSlL+Hhhx/GlVdeGc1ICYIgpjmOHSllY5eAuU43WaY4sp+qqVS0paIg1eeAgIDuJdzUzCjNFE7qQhhBMGQIAR5i0O1m/bBkRtUQQuA397+GPzy+B2esnYuPXrYGSZsft+wKWqs+QE523Y6wWmMqI8BTLreE+SSI3EtK1Ay6r7/+emzfvh1nnXUWHnzwQbzyyivYuXMnrrzySlx//fVk40cQBKGASgV3xQ54Pu+IXjKtUQdiYVAx020FL1ETNNPNQm4DHtUuKTqXKN4ItxNFQ5hFlNV00ZwLfP/WZ3H343vwlpMX4oMXr3LUgLux1/N63KtquiGQ9NnR1C3V5C1BkYWghIug+89//jNuu+029PT04Oqrr8Z5552Hn/70p9i4cWM0IyQIgmgCKhXcaQGzz140xWG3g46Con60bIMZYxAxtLiXmXe/RXCyuBWK50FRH2udS7vAcJbvppCyWhHl6HgB//fOF7Ftx1FcfuZSvPOcYyseMy9PTTwF3ZU03cKf64oXQi2kjKAQtFGoGXRns1n09PQAAObNm4e2tjYKuAmCIBRTKTguFjf61CP7+VRQKUucVMoKx+9e4g/7+aDS57gUAEVzrHVrR4Sm6a5xbKv5nL+2fwj/cdvzGBzN42/fcSJOP663xjpr1wd4LaCtFPSqaOrjav0hTkqjGH+jUDPoNgwDjz322KQv/PKfzzzzzPBGSBAE0QRUCs6K7iV+l+tBU1zSjzcu1eQlcWi6vex/J1iF/wcl8ky31ZhGdYMU15aBEMiUSTSEENi8dS9+c/9rmNWRwT9ffSpOW7ewZmtyzUXRodf9WqmQUZgDDb+QMsRlC8X1CI1MzaC7p6cH//zP/1z8uaura9LPjDHcd9994Y2QIAiiCRAVgsWie0nA6MiTe0kDR92VGomYmbzoPciD7kr7loSi6Y7oWBsBukHWhNUWRxhlEg27nOSUVb346GWr0daScrU6N/UBfiZbTossTiIjKKQMT9NNmW5JzaB7y5Yt0YyEIAiiiZHa3/Ln0Qwl2ziVy3ViOmi6K+lH7U8MwtbHThqP9RrEp9vp/0GJ+lgbitvYS5g1waq2HdyabMks+2sHhvAfv38Bg6M5fOCClbhwwyJPEzFmv65cvNfd+5hjYyrpkhJ2C/WwNd0k5zZx3RyHIAiCCI+il23Z74N2U/TlXuJvVXVBpazaJH/lCAMAEbAIcpKmW6VlYMRBkCEEUlo4Dhy13ESM4lMkU07y6z/twKyODP7pQ6fi2AWdntenMQZe44L0MtlFlQ6hRoUnYMoJsw08BFIkLwEo6CYIgqgPqj2ODpSF8qTpnvSRhqRSIxGZPeQRF4lWcqVxy6RMt8JhRznBksWAYWS64eJocmGGwLfevxMPPrUPJ6+cjY9evgbtLuUkTutz3+XV5fsqXHth2kVOWn+FTLsKBDmXFKGgmyAIog6olpEO0tjFj3tGWO2go6BWpjsOBxMoCvPDyHZGcayFtZ5EiJOdalthANixbwhbtx/C29+8DP/lrGMCBYHyeqwWoPrx6YZT0C2ik5fAIdOuAi6oMY6E9gNBEEQdUE37y8ACWwa6k5dMD/cSpxtbUBcYv6jSdMtOiKqI0k1CSiRCy3TXkEa8vG8QL+4ewPoVPYEDbtizwlXe47WQslIRc1TdHMN6yhWV5WGjQEE3QRBEHVDS/k69O6nIdLu5+0+H+6KAc/ZRSk6izuIHtgy0tkX1sYlSXmKEnK2tJvc4NJDF7Q/vQmd7GldftErJxMXNBM6zZWCFz3HrfA772gzrfJCWh2QZaEJBN0EQRB1QLSNdq1DM73KnrGdaaLqdb2ws7ky3z8/Lz6nOFEZ5rOU+DyvTjQpPgibyOv7tt88hmdJw+po5aEmrUdROKsqtNTKPou7y/DmPqJtjMehWfEIUC8Qp5gYo6CYIgqgPqskQNMaK1mHeF1w5g15OWDfeqBBCmIWUdaTplr7hfinKS0LKFE6XTHc5Qgj83ztfxIGjY7jszKXobPVXNOm4vklFuc6o1HRHEbCW1q/2jOABn/RMN6iQkiAIog6o6V5CloE1KWXVHNxLEMzv3PeYAmYq5ceUZ7qt1ygmWEVNd0jLd9J0/+HxPXjy5SN471tWYH7vDKXrc5PpLmXePVoGlmu6IyiiRIjdaOX1FsU2NAKU6SYIgqgD5CP4sNxLvKSaog5MVVGt6Cyo37nvMQUMmIvyElUDKltuFHCrMU1YEolyTffzO/tw6/2v4bQ1c3DJaYvBFXfD9CJVCp7pjiZQC2sSFlUhaKNA+4EgCKIOELJ9uWOWttTC3Pty5TJqIx0yGjPkrt29L8jkxS8icBFZOIWUkigmWEaoeu7JT4IOD2TxH7e9gIW9M/CRt64BY2xKC/iguCnK9SwvqejTLaLJdMN5/UEpJhMo0Q1Q0E0QBFEfiCoZreLjbF8LNl883fMaNOqulVVjYEXf46hQlulWHLVEOcHiIprAMZc38G+/fQ6MAZ9614nIpBNFnX/UmW6vVpGV3hZZpjtEy0Ao7qbayJCmmyAIog6o1pTCfpP3egP2U9DVoDF3zayaFmKr60qIYHWUxc+GEbIEqRXwgiEEUiEG3eM5A28MZvHzx/Zg/9ExfO696zGnq9Vct/UelZp4N0W53i0Dp2bPqxUGq4Yy3dFAQTdBEEQdIKoFi9Yr99Euzqt8oKGD7hpZNc1W1BfZmCCQChAyl5rjKBtS5BgCaFEUOeoGx55Do3jtwBB2HhjGa/uH0D6rFe0z0nh51wDef8FKrD2m27Zu84ArzXTDRVGuz+Y49iWWWsBHIS+RqW61F4io8fSp2aCgmyAIog6onum2e/h6uwELj90Ma3X3q2dKQYrz3xlj4CJaVTcXAAsQcDJr3CqDRvuywz7WqrK1Q2N5/O/bX8Cr+4agG+YxnNWRwfIFnVi5cjZ6Z7fhM5esRjo1Wb0ts9EqNeVuinI9+7M7yDuiLEIMS14i6ywaeM6oFAq6CYIg6oBqBXeBPKY9fqaRM921smpaDB7kImDQpDGGeZkkkmEE3RFMsKS8I+ik4a5Hd+PlPYO4cMMirFg4E8cu6ER3ZwsAoC+vY9zgUwJu2D3CFYd9ta4TP0+YMCXTHZ3dXljyElNeFZ5zTaNBQTdBEEQdUFVeEiAL5TU3zip092sEii2z68S9RAgRuJASANJaeLnOsI80V9AYZzibxwPb9uOMtXPx/gtWTvl7tQDYCCHTDRfnkgAquhE54WTZV8zS+x+ma0KzDCRpySRoXxAEQdQBVeUlLizK/Cx3ulHL6UELYL3oBwEzigmrm2RQGFjoqX8VjXH+uHUvCjrH5Wcudfx7tYw9F+ZETLn7i4vJqbfJrslkeUnjZ7p5g9cjqIaCboIgiDpAiMo3aZkp9ZOlrZZBd6KhNd01tMNempqooNQhM6IVeiQKKREPWMiYnShgy9P7cOpxvZjf0+74nqqZ7pA8wmtmuqtcz04wxqYUZ9aqUVCJ0/pVIIQgu0AbFHQTBEHUAQKiSqbbJIpuio2s6eY1tLuBtPF+xuPHIz1CItF0S529z51w39P7MZ4zcPmZy6q8y8zYO2WeDSGQCOEIMBeFlF7XWv7+qLs5hmEhqUJeNZ2goJsgCKIOkC4jTriyKKu0XI/yhqi8m8OAi+o3eG2SC0z4iAgL4fwgJ1i6wdE3NBHKOgxLZ+8n2MjlDfxx616sW96DpfM6Kr6vmjTCqHFO+EULIStcfu1JaUxURYhhTLi9ZvynO1RISRAEUQdUuzm5sSiruFwf2tIGjbnBIZBklcO7KJ8YIIZMpReGs3m8emAIe4+O4aFHd2M8Z+DjbzseZ6ydp3Q9UmfvJ3B8YNt+jI4XcEXVLHf185sLgUQIhahuLAO9bnL5tWdEfO6Ece1zCKSqXJPNBgXdBEEQdUCt4NjvDdFz0M0YRMRe1qoQtQopI9Z011M3PiEE9h0ZwzM7juKZ145i5/5hHLO8B50daWxcPQcH+rL44Z0vYmZ7GmuWdbtYojsMny3YCzrH3U/sweolXVixaGbV91bymBZChKbpriXN8SUvYVMLKaN8ShKG3Igy3ZOhoJsgCKIOqJUZC2R31yR3vVr6Uan3jsy9JCSPaK88s+Mobt78MvqGcwCAZfM68PazjsHKVT2Y2dGCha1pZCcK+PrNT+PffvccvvDBU7F4zgwl6zaEv6D34ecOYnA0j7+54via751kd2dbF7d+GUZjIbsTjlMWX5QPxiXlHSnDmDBUIgy7UNJ0T4aCboIgiJgp3ryrvMfvDdGXvKQB9SVyH1YtpGzCTLcQAr/csgOJhIa/futqrFveg64ZGQDAkbyOPDdH2daSwmffexJu+OlT+M6vtuGLV29Az8yWwOvnECjkOL57+3Zkczo++Y4TMbM9XfUzBue467HdOGZ+J45fOsv1uspP21JjHPXYnXAq2SF6L6ScfI1zAaQiDbrVZrrdXJPNBgltCIIgYqbUMrp6wOjLMtCrdVmDarrd2KtFremu1SEzCl7ZO4g3+rN425uW4ZyTFhQDbjgEhd2dLfjse05CrmDgO79+BmMThUDrFkLgjYFx3HLPy3hx9wD2HBrBDT95Eof6s1U/9/j2Qzg6NIEr3rTUlRa8UiFlqTFOGJlua50VziW/mm47jS4vqYdJZ71BQTdBEETMFIPuGgGjnwy0d013gwbdLgJc6QLDI9pCuZ44Y44Hth1AayaJDavnTPmb0zm1aM4MfOqd63CoP4vv3focCrox5XNu4Fxg0yO78MjzbyChMfzPD2/AP37gFEzkDdzw06ew88Cw8+eEwJ2P7sai3nactGK2q3WVAvPJGxPUI9zNOiudS0o03RFLM9Rnus1XCjRL0L4gCIKImVKmuzKaz2BRQHhKNckbb6O1gi8GuFW2VbrARLVpQiBSy7dyRscLePLlw3jT2nnIpKaKICoFWWuWzsLHrjger+wdxE2bXiwGr24ZHsvjO7/ahjsf34OFve342GWrsWjODBy7oBNf/PCpaM0k8M2fP41tO45O+ezTLx/Bwb4sLj9zmecsb6VMdyiWgdZrxacmPs4x+/GIQ5qhWlpWD5POeoOCboIgiJiRN7qa7iU+b4je5CXhtwYPA7f2fIEKUj3CY77JPvLcQeiGwLnrFzj+vVpm8/Tj5+K9b1mBrS8dxq+27HC9zpd2D+C6Hz2BV/YN4aqLV+GUVb1oTZfKx+bOasM/X70BC3ra8b1bn8WDzxwo/k0IgU2P7sLcWa3Y6JCZr8SkQkobQTzCa1GUl1T4u9/mOHIb5POF6DPd6q79Yqab9CVFKOgmCIKIGeEiSxuVprs0psbCbctsDcxz5tYvPEY9qxAC9287gOULO7GokhNJjbFdctpiXHjqImzeuhf3PLGn6nu5ELjj4dfxrV/8BS3pJK798AZsWDMXzOGYzGxP4x+vOhlrj+nGf/7hJdz20OsQQuC5nf3Yc2gUl52xFJqHaLOSpjuIR3jNdcpCygrnku+gG3K55mukPt2MhaLppkCzBLmXEARBxIwbeQmrYVFWbdleNd1owKDbrT1flJr1OJ0bZAHlRy9bU/E9zO6c43BOMcbw/gtWYnA0h19u2YG7HtsNTWNIaAwas16tn/MFjsOD4zjj+Lm4+pLj0JpJYtTSgzu1YW9JJ/GZd63Dj+82g+6BkQkcOJpFd2cGZ57grUFPpcvBr0e4G4r2kxXfIQCPTWHsHvkymI+0kFK5vMRaLiW6i1DQTRAEETNuCo7sj7P9ZNC8vrfRgu5iZrBmpruk9Q2bOD2KH3jGLKDcuKayTKOaW45E0xj+69uOx6LePRgay8PgApwL81WI4s+cC1z+pqU468T5xQC+5B7ivOxkQsNHL1uDWR0ZbHpkNwDggxetQjLhMVi1Xp003WH5XNstA53w59Jdwu2TG5W4LaSUE9xak/968amvJyjoJgiCiBlXmW7bTd5LSCJqLbh8PfJzQaOGiOFWwWitIWuMoRBRx00h4gm6R8cLePKlIzjnpPmOBZQSe7BabZipZAJvP+sYz+MwLMu7WsWt7zxnObo7W/DMq0dx9rr5ntcjKS/+DbMFea1CSj+yLnummccQsLp9CjRicAwXDCxsSVU9tpTpngoF3QRBEDHjxjJw0k3e5U3MTdOdchr1/uhWv6tF6NMdZtBXDbOAkuO89QurvzFkKZHhIVN73vqFtcdbAeYg9RBChJzpZpYGuvLeC6Tptl7jyHTXkrDluIAhBPJCIFPlfWQZOBXaFwRBEDFTfFxb5TYtb4J+3AXcyAhUrCdO3Eo5otR0c59FrEEQQuCBZw5g+YIqBZQWYUuJeIiaajtOa5DBo5OeXBXVJnB+9qn93OSm32S0hZQunYt0a6NzNWav3HKPicsysx6hoJsgCCJmXPl0y/d6uJsHCaYaK+S2uve50SjbClLDRsSg6X513xAO9mVxTgWbQDvFoYW0KwwRTZDhVPxbS0+uar1VLQM9rgNNLZ0AACAASURBVNte2Bqm80q19aPG6SCEgC7cBd1+nZOmMxR0EwRBxIwreUmNwi2/yy2nkudxveM20+1nP/qhJO2JNux4YNt+tGYSOG313JrvnS6ZbsnkoDt8949q9pP+Cp5Ln4ijCNeNcxG3ZeFzRvWrKM5C4nqFgm6CIIiYcdccx5J9+IiGm8G9xG1WTb4nbF13HJrc0fECtr50BGeunYdMunIBZTlhSImEEDBCzjRLnCaKsrlMHJluIYSvSmT7tef2yY1K3Ey4C9aF06oxGEIUpSZO8BgmnfUOBd0EQRAx464NvPnqKdPtIpgvp1F9ujmEq6ymfA8PeQvjKCJ75Pk3oBsc55xUW1qCkKULHOZOiFLTbT+iMgMd5vprFeV6XrPt2uMxON+4mXBLaUm7ZeuY45W/keKQV9U7FHQTBEHETNHurpq1mnyvD013M7mX1MKPNt7XeGSX0XBXU0QIgQe27cexCzqxZG6Hq8+EKSUqyjvUL9oZNtll2oigEJEx5jh583PdoTzT7XISqRI3RdQFa7+2JTQwxqrqut1ek80E7Q+CIIi4cSGNYDADCy9SgCBOJ1EUGqpCCFGHmm65vmgCp+2v9+NgXxbnusxyI2QpkYzFIsl0W/7s5YWUiZCz+VqFCUvxVz4KKWEtM46A1VWmmwskmblfM1r1oFvAW/fcZoCCboIgiJhx0/CGWVk7P1pkPze+xgm5rbG61MDKSUWlAjhVFBuDhLqWEnc/tsssoFxTu4CynDD2RBSFjHZYWQAsG/OEiVZh8qYi0+3F41wVrjTdAkhZ+zWjMeSFqHgtUaZ7KrQ/CIIgYsatvVh5Ns/NcuFVXtKAmm63LeDt7wl7+6IspBwdL+DhZw7gDI8FlGEe6ygKGavBI1g3Y3C2n/RRSwHb5JhbhZixFVJW+Lu0C0wWg24NEAL5CpmABmtqGwkUdBMEQcSM26BHYxEUUnocUz1Q1E+7KaSUnwl5A0VR0xx+2PHo82+goHNP0hKEHBBF3dxlqrwk/CJOeWzLT6WgmW6jWAQacIBe119jEmZY53VKK2W6UcGvW1gZcCqknAwF3QRBEDEjPEgjvGitfXXFK47Jx4djopjpdvFe2SEvbPeSorwk5KCjoHPc99Q+rFrS5bqAskR4+v0oNNV2yu375PrDXiccJCZ+LRiLQbf1GnUhZa3zQdoDyky3xhhSmuboYFK6JinqtkNBN0EQRMy4fQzrOdNtvfppjtNIeJVy+NXGeyEqy8B7n9yLw4Pj+OAlazx/NsynGlFoqu3YM93cknxEoelGlQmq3wlH5M4vFrXOh4I1rpRtu2QxZXmgXnr6FNJgGxQKugmCIGLGk6bbTyGl5w94c0mJG69Sjmrtu1XBLeeGMDO9Q6M53P7ILqxfMRunrJ7j+fNharqj0FRPpvQUKIoW8LBrsMv2oG95ifUBOf5660ipC/Octu/XjGZ25SyUB93WTJiCzMkk4x7Avn378MlPfrL488jICEZHR/HEE0/g/PPPRzqdRiaTAQBcc801OPvss2McLUEQhHqEy0YYGmMoCPe57lL2yf3d28l+rd7xnumu3L5bFW47ZAbhNw+8Bl3neN8FK3x9PszxGUKYhXYRYd8WGQSHr+m21ldeR+nz1Cp58UdXD+C0/oqZbptdoEQe4xwXSNsOt5c6i2Yi9qB70aJFuO2224o/33DDDTAMo/jzjTfeiFWrVsU0OoIgiPBxKy/xmukOUtA1XTXd8CHT8YNb33C/7DwwjIefewNvPX0J5s5q87WMcJvjRJvltD+9iCpTXMkJx/91Z34itky39VrpfLA7l0iSzJzc5LiAvaKA83gkMvVOXe2PfD6PO+64A+9617viHgpBEERkuG0iEYWmGz6sCePGq5SDRaDp5kIUg6gwlv2ze1/BzPY0rnjTssDLU70rpKY6isY4Evs5W3T/CDlTXMvz3c91B1s3zXjKKJ0LQU27QBSdS4qfYQxpjU0ppvRi49lMxJ7ptrNlyxbMnTsXa9euLf7ummuugRACp556Kj73uc+hs7Mz1jESBEGoxq0UgaHkC+zlsa0fbWlDBd0es6oaYxAeZDp+ECEGHI+98AZ2HhjG31y+Bq0Z/7fxsAopS90oFS+4CvaJVFTrV57plppuROv8Ulq/OXF1Oh8MYX73lGe6YUlMxgv6JJtGORGJfupQ39RV0H3rrbdOynLfcsstmD9/PvL5PG644QZcf/31+Pa3v+1r2T09MxSO1Bu9vV5tnAgnaD+qgfajGlTux/6+UczMJNE7o6Xq+7TxPApjOfR0z0DCRUSnjecxMZZDr8v3S4YGxtCa1NDb0er6M0EIui/zI+NI6Ry9s9pdvd8YncBQTkdviPeF0cEsEgzonelP+lGJ7EQBv31wJ1Yt6cLbz1sJzXZc/ezHvqMjmNmaRm97RtkYxwsGBoYYZne2oiMdTZgxMTyOAufo7WqHMToBI6djjs/j63Y/ciHQ3zeKzrYMZreli78fzhUwNjKBnq42tCTdGxcaXKC/fxQAkElors9nlfT3jaKzJYne9snfRaN5HW3DDHNntqI9NfmYthcM5IeyaO9oRYc1Cewfz6OtPYM53e1IRqjtr3fqJug+dOgQtm7dim9+85vF382fPx8AkE6ncdVVV+ETn/iE7+X39Y0WNUZR0tvbgSNHRiJf73SD9qMaaD+qQfV+HB3Pg+UKSIwXqr5vRDeQzes4zJ0zTuUMFQxkCzqOcm/2aWMTBeQZkJ7QXX/GLyr25WCuAEMAR3R32evhgo4RneOwwUPLJg5P5JHSNBzJGy7e7Z7f3P8a+odz+MSVJ6Cvb7T4e7/7MTueRyKvg2XzysaYNTiyuQKGDI6JiAKukVwBBSFwpMDRn9eR5xxHfNzzvexHIQSyEwX053WIsVzx92PWddpviClyjGpwIZAdN4+DoWmuz2eVZMfzEBMFJLKTv4vkd88wF8iWXTNCCIxPFHAwr2PCCsi19gyyYzn0efzumQ5oGquY6K2b6cfvfvc7nHvuuZg1axYAIJvNYmTEPPGFELjrrruwZo13H1KCIIh6pigXcfHeWr7AU5ZtvfoqpPT4mTjhLt1fJBoYIMI1RfQqeXHDoYEsNm/dgzedMA/LF8xUsswwimbjcN+wyyJ4RHpyZnXcrNAF3ndHSsSoha4kLStwyy7Q8TNS1136ZFy69HqnbjLdv/vd7/DFL36x+HNfXx8+/elPwzAMcM6xfPlyXHfddbGOkSAIQjWlG7SLjpSTfIHd3M6sG5/HAKThNN0QSDH3IW4lLa5KeAiWfL+8bwcSCQ3vPm+5smWGMcGKyifbjn3yYAgxqYFLmDgVNxf3p9dCSqmpjrix0KQxVDgfdGufVvouyWgMIzov1ptwIaCRZeAU6ibovueeeyb9vHjxYvz+97+PbTwEQRBR4MVhxE+m288tryEz3R7eb/dXDiMwFMWOiOqW+fzrfdi24yjefd5ydM1Qp78OA24FXnEFjoYAWiJKFcviZjt+M9124pIhMFuTITsFIZCucjwzmoZhYSDPBTIJBh6BT30jUjfyEoIgiGbEyw1axhFulZ5+G7Q0nE+3x8fxlToJqsLL0ws36AbHz+99FXO6WnHRhsVKlikJ46mGEXk3ytJEUQhhZlmjynSDTb0eRWlMXpGfiU1e4vA7aReYrDKojPU3KTHhiied0wUKugmCIGJEeLhByyDOKRPluOyQuw7WA8WssoctrdRJUBVeO2TW4k9P78fBvized8EKpJJqb9vhyEtE6B7Z5cjJgyxbjSroZ0ydphuTgu6Y5CUOkzDdnM1UlewkGEPS5tcdpk99I0NBN0EQRIzIcr4wMt1+MYvSGiPVLfeFlxglbE23DMJU3GB37BvC7x/aibXHdGP9itkKlliOs5wgCF4LW1UhbEWcUTXm0RyuxyB7Uw47PnnJ1Mmobu3TWo5JGU1DjgvraQM1xnGibjTdBEEQzUhJ0+2ikNJ6dZuhFT664qHBNN1eW8DDRSfBwGOSE6mAgd+TLx3G/9m0HbM6MvirS48LpSgtjLjIEALpiL2ZmeVIo8sizqjW69hoyV8BM+og0w2Ha79gXSe17A8zGsOYbh4DwyqkJCZDQTdBEESMeJOXmFG02yx0oELKBom6ZYDrJUgJ+4mBikz35q178cv7XsWxCzvxmXetQ4et+YpKVGu6hRAwYtAkT2qhHmHQqjlMgoPIupg15Y0t0+0widAtr+1aYyrpujmESJBziQMUdBMEQcSIvF+7uclKX2DXmW6fuspGynT7CXCj0nT7iTk4F/jFlldx75P7cOqqXvzXtx2PdCq8vK3qY81hHpSo5B0SuTa9KC+JaL2WZaC0ykOAAmbUaSFlwWr/XiuITlmONTkuwCjT7QgF3QRBEDHixTIQEQXEjRR0+ylaLPohh+Ve4rM5TL5g4H/fsR1Pv3IEF21YjPedv2JSm/cwcNLwBoFHLO+QyOvHEIjUrtDeaMm+Rt9rl5ruOH26HTTdbuRCjDFkrCY5aSEid7BpBCjoJgiCiJFSEZu7OxRzaMZRcdk+s62NZDrgt/uhlycGnsck1+FhSMPZPL73m2ex88Aw3n/BSly8Ua01YFRELe9wWn+UwZ5TUW6gQkrrNepJi339k7bF0sm3u9ynaU3DeEFHKuKOpI0CBd0EQRAx4kVeAutGFrZloGz4YX9kXq8UCyk9d/8LT9Mtx+R2SIcGsvjOr57BwEgOn7jyBGxYPSekkU2F2Rx0VGBELO+Q2OUlUdoV2oubE7YA3O9lw2xPYuKgXOPvxi7QTsZ2IZJ7yVQo6CYIgogRr/ISp7bTFZft07arkfx1i04hHj/nZfLiFSHdK1y89+jQOG74yVMAgH94/8lYsWhmKGOqhGw7rgp5bsal6TYEkIpQTCwz+naBSbBCyni9nMsn3NK5pFpjHDsZjRW/zBrnWyQ6KOgmCIKIES/uJfJ9Ycki7OtAgzTXMf2AvWcGvUxefI3JpWXglqf3Izuh4/q/OQ0LZreHNKLKqHaqMYQ54Yg6cCwVMQokWHRrd7TxFP6vnLaEhpQWX0UFK5s46NydR7dEY6zYLj5O28N6hYJugiCIGPHavU5jDIUpvsCVli3AfAQg8l4Z9q1fZtSC4LUFvEQDUAhpA92GXLrB8fBzB7F+5exYAu4wMDxMOMIiDk23/YoMMlltT8al5jYpv/YLwrQL9PLkIqMxGA0wYY8DcnQhCIKIESlFcIuXzGQQTTdC9urmQmDfRAEjeT3wcnzZIoboXsKFcDUR+MurRzGSLeDc9QtCGYcblFsGxuRaYV9llBnWYmbYdrE0whOiStifcsHSyLvVc0ukrpvcS6ZCQTdBEESMSE9ft5lBr5ruIPe9MDPdhjADtKxuBFqOCJDpDtO9xM1E4IFt+9HTmcHaZd3hDMQFqpvjGDHouVEW5EaZK67YaKlBA87yCbcuhGs9t6QtoWFJZ6vnYL0ZoKCbIAgiRrxmxZgiWUatdUCxq0U5sgAyrwdTVkv9tFc0W1MT1biZCBweyGL7rgGcfdKC0L24a6E06LbkCFFjX2WUQb889+yn0XTJdHMhoHPhWs9dXAZjmJFO1r3zURxQ0E0QBBEjXjO1mq3Qyc2yfclLIrhZyixzPmC6mcNfkMdsTU1Uw108YfjzswfBGHD2uvikJZDnh8JJHBfxeEzHJy8x4bYzKegTpjgpXfui2N0z1agbU4dQ0E0QBBEjXvO8rNLjbAf8+gVHpekGgILBAwV8QTLd8LH/3SBqTAR0g+PPzx7ESctnY1ZHJoQRuEelPaQQwtJ0xxulRaklLnY3Lct0Nyr2a1+3NsSrvISoDAXdBEFMG4QQOJQrIGuEZQanHuGxENDpcXbFZQctpPTxWbfIIyQshwQ/CCFM/bRPTTdC0nXXmgg8s+MohsfyOCfGAkqJymMt1flxxGjFaygGu0JtyuSt/ptKVcJ+PhS4zHQ35rbUIxR0EwQxbeAAJgyOiUYKuj1+EcubOa8RJgkhfPsFRxJ02xbu17pPwJx9+Gk3PbmpiTrcTAQeeOYAZnVkcOKx8RVQSlTaQ/JiN8r4CikTMdgVlnc3bWhNt+180K2nFuS3rQ4KugmCmDbIFtR6mLoIxXiVgLjNdHv1/55EBD7d3GaVqPtMN8tAx1/XTWsZYWxklYnA0cFxvLCzH2evm4+EVj+3YCWZbmshsWi6rd0dR8Bf3t20oTXd1qt8AkXSErXUzxVPEAQREHnTNxon5vZ8g/aqRQ7m0x2ie4lVcJfSmG95iQyY60nTLZdXKfZ78NmDAOIvoJSo1O/LSW+cmdE4vKHD7G4aNXbfcZ179+gmqkNBN0EQ04aGzXR7eL9TM45Ky4XvQsoI3EusYCWd0HxnukWAIE9zuR+9Um0iYHCOh549gBOO7UHPzBal6/WLyiMttz3O5jix2BVOQ8tAw/o+9WoXSFSHgm6CICJjVDcwVAjWDKUastqeWy4KjYDwWHTl1r0kiLwkGk236fCRTmgo+LSsCyIvKWa6FW+k1Ig7BX/PvtaHwdF8rB0op6BQ225Y53IcYVpR0x1LpptNtgz0OdmtB+S4ZRElyUvUQkE3QRCRMWZwjBrhBd32QLtRst2e5SXWa61gMcjmqyyuq4R0+MgkNHOS5GsZ5gh9tYGXy1C8lfK4OI3ogW0HMHNGGuuW9yhdZxCUupcIM+iNw7mDMYaZqSTaE9GHNU6Z7kZFHrl80bkk1uFMOyjoJggiMrgwb8xhaYV1USrOaxRdt5+OlGCsZmZSSaY7TJ9ulDLdsGXWvC3DxFchpYO/sgrk4srH1D88ged29uGsE+cjGUNgWAmVx5r7dJJRRVcqgXQMxal2Tbd0DWrUWFWOW9ZZkLxELfVz5RMEMe3hEEVLtTAwRCkz0zCZbo+PopnlQ1x780Tx/UHGFhYy0y2Dbj/HK0ghpfyc8kLKYvZ9Mn9+9iCEAM45qY6kJYq1xzLT3Wwwy73EnkyIR2SjCGZuD9kFqoeCboIgIkPek4yQAmJDCDPTxVhDBN3yRu31tsY8aLr9UJIchLMP5cRLY6Z7CWP+HEx4QA1xGK4TxUy3bVScC/z52QNYe0w3ertaFa8xGGrlJdU7cU5XipKvoFaddYD9ekqRnls5FHQTTY8QAgMFPbRAkDCxZ7jDkH4IIWBYhVRJ1jjyEvjIipU343BCns6+5CWMWRKWcOAoeVkzxpBkzJe8ROrh/WbzGZjygtti9t02pOdf70P/cA7n1lmWG1DnyS6v72bMdMtjLYRtPzbwfpBDJ2mJeijoJpqevBAYLhgYa6Auho2IsNmzhTHBkYFcwgriGiHTXcvTuRLlzTicCJpxYwhPX1IemKaYv1bwhYCWZhpTr1uXhZn2UT2w7QA621JYv3K22pUpQFWmW17fiUaONn1iL8pt9Ew3KOgOFQq6iaZHBgC5UFrTERL73tVD2NUykE8whgRjDfHkwu8N2osswu99k4Wo6ZaBqZQiJDUG3WOBrRACeS6QDvAIXAvBvUTYijQBYGAkh2d29OHN6+qrgFJSfMoS8HoxHDL8zYJWtF0sXTSNvBvkdwbJS9STjHsABBE38vFyzuCmvpZm96Fgn9OEERDLm34SprxkzAri6vl4+pWAMDeWgbb3+iHUoLusADLFzJSzLtxblOnCvHYzQYJuxsAVT7ZlgWh2QsejL7yBLU/vAxei7gooJaoy3fZJb7NRzHSL0qSjkfcCZbrDg4Juoukp6YwFDGEGbIR67BnFcILuyZluNMDxFA5SBDdojKEgque6g9oyMhZm0C0z3ebPMqNWEAIpl3sjx83tD5LpDmNicWggi5f2D2Hzn15DvsCxbF4HPnHlCZg7q03xmhShSNMtz8Zm1nTzECeqUcLAAEYe3WFAQTfR9NgTXTnOkdQScQ5n2lLS8bJQihzlMs1CSvNuoQuBZB3nnEqt2j0WUnpQA/j19ghXXmJSlJdYrwUuAJeXX56bTzFSATXdXDrIOCyHC4EDR8ZQMDjaWpJoyyTRmklOkYnk8gYef/EQ7v/LfmjtabS0JnDG8XNx7vqFOGZ+p+/xRYHqTHczupfIa0zYOl3V8xO2WjBmBoeNvA31CgXdRNPDbW24c1ygPe4BTVNkdjOlMegh6OelXRljrJhtq3ddt195iRtNd/CCrtrFmn4pl5fIpxNeil9z3JSWBAkMpK2fvUHR0cFxbN89gO27+rF91wBGxwtTPpdJJSYF4fuPjmI8Z2Dh7Hacv24+li/sxJIZLb7HFSXFvRfwUBcnvUEH1IA4ZbobOVxNW+5FhHoo6CaaHqnBTGmMiilDRAaJKcaQQ+Xsol90y7kEKElKwijYVInfGzSze3xX2IelLLq/sYV5y3Xy105q7r26hRAoCIGOGoWJL+8ZwC+37EB7SxKd7Rl0zUhjZnsanTPSmNmeQUt7CrkEw1N7h/Di62aQfXhwHADQZbVrX7N0FtpbUxif0JHN6RibKCBr/X98wvx5/YpenLt+AVYumok3cnpDSSxUZbq5bdLbbMiz0GpG2fB0pyk0DAvas0TTI28WGY1hSOfFnwm1yPmMLJozFH8B2bvhaVYntXq3DfQbGMsMLa+SWQy65W68wP0iJ7r2AC3FGMZd2nbmrQlHpkrL73zBwI/uegkTBQOaxvBG/yCGxnLQbdqmrlmtWHZsN1564RDABVYvmYULNyzC8cu6Mb+nzVcAySGQZPXnUlKLoI2QjCbVc8PmVsNtz0yadFcQNaCgm2h6ZGe8jKYBwkCeC7Q0690jRGR2M2k9izUCeiyXYwiBlC0ISzaAbaCUb3jVXcvCwzwXaK1wrgZpjgPrEfOowWHYniCogjtYy8nj5WbSK59IVSuivPPR3Tg8OI7/8f71WLusG7D293hOx9BYHkOjeQxOFDCR1HDJ8fOwfF6HEks/IRrLi1dVI6RmT1bIOougT5iI6Q0F3UTTIx0uMpp585ngAi3NKEwMGVljJKUfKosp7d0oJQk2feUlUss8wTlaawSKfu/97UkNI7qBcYNjRlLtBeEUoNkdTDI1IpY8LzVBcuJg3xjuemw3zlg7txhwwwow21pSaGtJYX5PO3IGxxu5AuZkUso8tJ0mFPWOikZIhhCBilobHVlnMR003UR4NNKEnCBCgVuZPM1yQshz6kwZBjIYkVlTlVloezdKSUNkuq1Xr0GalENNVJm5BJULpK2nEmF0auUO2WAZsLlpB5/jvKI/txACP7n7ZWRSCbzv/JVVl6N5WKcbijp7JUuLjqBONYblsd7Mvs4MzHTCiXsgRF1DQTfR9HDbhZCxiinDcm1oZrgQ0MDMfa1Yb+3UmCPJzJsgr+NjGUQC0qJpyHNecWJR3hnRK4wxtCc0THChXBvPMTXTnWTmeVGrmNIQAjoXSFfQcz/y/Bt4ee8g3v2W5ZjZnq66rJTGkNY0jFqNsYJSnEQ1WNgdNOge0c39155s3pBCk772PiVjRHPQvFcIQdgyUzIAyGhmoObWRYFwj8x0M8aQUCwvsXejlEipST0XUwZ5FC3rDiYqZKJtlsG+aU8kACEwpqvNdjtluhljSDLUtJPMW393ynSPjhfwyy07sGLhTNcdIDuSGgqcF5cbBLmIRkv4BmmExIXAqG6gNaFVnAg1A5p1/Ov324aoB5r3CiGISU06zFfphkDWgeoxM90mCcXSD6fGHLJgs5513QLCd4SWtiRREyGeqzITnFUoMRFCVNQ9pxhDocbm5Li5z5yKKH/1px0Yz+n48CXHuS7qa0to0BjDiIJtLH6fBF5S4zBmFdt2Ktb9NxrMup6pkJKoRjN9NxDEFMqbdCQtzTEF3erhwt6BMKRMt+1GlwxBO64aaTDmRwLCGEOLZhZTOkkjhKJirvakKWNRVesgH8E7STBSmik7qib1yHOOlDXhsPPK3kE89OxBXLxxMRbNmeF6PJolo8kalaU6bpEhV6O5eDCbk44XhBAY1g2kNa2ixr5Z0Bib1N24ufcGUQkKuommhpdlSJmVQctRMaVy7NnNMDLd5Y05pA90XctLAkpAWhIadC4cs/lCUbatPaEBTF1BpZwgVcp0C+G8PbCCPNmJ0o5ucPz47pfQ09mCt7/5GM9jmpHUIIQIvI1FeUmgpUQPgz/LwHFu6us7k1pTNsWxI3XxQa06iekNBd1EU2NYr/Z7eItmBjL1nCFtNEquDuaOTiguctQdvKRlO/j6lpcEDLotOdSEwyRR1WYnGEOrxpBVVmxYORssn05Umijpwpwol0tL7nliDw72ZfHBi1chk/Yuc0hrph55VA+2jX7daOLGbyHlsG4gqTG0KbJbbGTKLQMJwgm6UoimppjptoU+MotGEhN1lGvnZZGjKomJvRulnXq3DQyajU4yU7vuWExpm+QEpc3KqKu4JoqZboe/2b26nZASF3um+/DgOG5/eBdOXdWL9Stm+x6XioJK3qjOFT6GO2Fw5AyOjmSi6bPckMdcCKsrJWW6CWco6CaamvJgEFaXO8ZIYqKScu28aq9uDueuiQnGarphxAkPGBibum7T1q88Q6tK0w0r6GaMKSmoLEm6pv5NszLglY5ZjptdTaWntxACN29+GZrG8IELq3ty10JFQWUzZbqHdQMaY5hBWW7Adsy5NZOmiQjhBF0tRFNTynSXKOm66zdYazR4maRAZdAtNcDOmW5TQlTPvutBb80tls1l3inoVnTf1xhDa0LDmAKJSWmiO3VwMqCumOm2pCUyoHno2YN4fmc/3nn2sejubAk0Lo2ZMokgBZXlk8tGgdm0yG4ocIFxLtCR1BquaDQs5DE3GrA5EhEdjfbdQBBKkY4a5VmJjMaQpyY5ynByiYEieYlTN0pJkpmPfOtV162iZXiLlWks706pMtMNq6CSCzPYCkKtwDSpMccOkUII5LnZIj5fMHDz5pfxoz+8hJWLZuL8UxcGGpOkwyqo9JvR5wEsIOPGy1Ed1g0wAB1NbhNoR95DOElLiCokXbyHIKYtlYKe1QEamQAAIABJREFUjKZhWBjmTd4phUp4olzGI23yVGS6nbpRSuwZ9VQd3gqFCB50J5jppT3BOWbCFgQpjrpbNdOmb8zggQrnOEyJSKWhpRjDmFVka8+i5q1i3IGhCfy/t7+A/UfGcPHGxXjXucuRUNSURRZUjugcMxLeHTmEKLnmNBKMMUC4m2gYlstLe0JzvOaaFXkG1nMHXCJ+KOgmmhpewS/YXkyZoWROYERZwWrRWUTBsmWC1+kw1XLDiBtVo2pJMIzofFKgKhQ/ymSW/GLM4FMCYi/wGoFpqamRQNr2npzB8frBEfx604tIMeCz7z0JJx7b43NrKtOR1NCX131NuFU8uYgDL5ruEd2AAJq+GU458lTlCjrBEtMXCrqJpqbSTTLBmOkKwTk6HcM5wgtFSYFtX6vy6q6e6ZbvCbyaUBAQ0Fjw0LjFejKT4wKtiVLQrfrm357QMKobyBocM3wGXbUC05T1twIXSFu7Zjibx28f343+8QJWzO/ERy9fg5ntaV/rr0VbQkO/VVCZ8ZjRFwodY6LEraabC4ERnaNVY0WnGcJEJhSMsiZdBGGHgm6iqeFCIFXh0XRG0zBhFY412uPiekNqXe17ManIIcapG6VEY8x0MKnXTLeirFjGKi6c4BytVqAoIMAUBPTl60lant2+g+4KT5ckKcYAWzHlC7v6cdOm7Vi4bBZOXt6D81fPDfV6lB0q/WT0p3ume9TaJ51JCh3KKZ4monF1/UT40JVDNDXyUbcTGY1hTDeL8FL0HRoIJ0lBwmoFH3RS49SN0o7q7pcqUZWN1hhDRmNmMWVKwQIrICUmw7rp8OFH08srOM3Y15FkQP9oHpu2voYtT+/Hwt52XHz6EiztaotkAtyRNDP6Y5YPtVumc7wlhMCIbiCT0IrFu0SJSQ5YDfi0g4gGCrqJpkUIUTUzVdJ1c6Q0kpgEwWk/J8CKxyDI3nXqRmknySo3W4kblbZ+LZqGQd0oBsOqsujltCc0DBfMgNSPrpdDIFUhAy+EwKv7hrB1Tz/2HR3Dqy8dxVtOWYi3n3ssBg0+pRNlWPgtqOSoz4LdWjBWO9OdNTh0LjArQ2GDEwxyR5JlIFGZurh6zj//fKTTaWQyGQDANddcg7PPPhvbtm3Dl770JeRyOSxcuBDf+ta30NOjvnAmLPJcTdtkIhyEdZOv9Kg7xUy3hhwXmBH56NRjCIGcguYmfnCSFJT01v4ypqVlV8+cJhnDuGX/WE8yISGEUt11S4IBBYEJg6M9mQhF0w1bQDqm+wy6HZ4u6QbH1pcO449b92LXGyNYfmw3Tl47Fx87bwV6ZrZiqGAABp/UiTJsZiQ19Od15IVpU+gGLgDWgPoSZvs+dLpGhBAY1jmSGkNrA25fFDDGoJFlIFGDugi6AeDGG2/EqlWrij8LIfAP//AP+PrXv44NGzbg+9//Pr797W/j61//eqzjdAsXAgdzOtpzKvwZiDBw6kZph1mP7KdLk5yhgoGRoSy6Yli3Y6a7aOcXbNkGBDJVtMsJjUHowTPqoaCw8C5tTRInuEB7SIWUkraEhsGCjgIXngrqyp8ujWTz2HLvy7jjzzsxNJrHvO42XH3JcVi3eg5GBcfMVrNY0nzaxCJtxNKe0DDATFeYTNqdnEK1Y0x0sIqVlHnO0V8wkOccPelkXU1c6w025T8EMZm6CbrLee6555DJZLBhwwYAwPvf/35ccMEFDRN0M+tf3uB0/dUppXbUlY9QRtMwrhuBLNLqBV0IsJgyvlxMLXRU0ZVSCAHDRaYbCjLqqpFbrWpEcpI4YS9ODWlz25MaBnXTs7vLg/SqYHD0DU/g+X1DePbFw9h5YBgGF1h7TDc+8tbFOOHYbnPiYHCM5jgKXCChAXkuItcRey2orCVXq2eKNYC2/3MhMKQbGNY5NAA96STaSctdFc2qU2nAU4CIiLoJuq+55hoIIXDqqafic5/7HA4ePIgFCxYU/97d3Q3OOQYHB9HVFUeuzhvSh7jAOcIxtiKCIhPY1UKGjGZmgOxWbI2KzPQaDgFw2DhZ46mw8+NWsFMtmJbr0QXq6lqUobHKeUBrQsN43sxAixC1pUnG0KIxZA0DXanKV5AQAgeOjmH7rgFs39WP1w6NYMVxvdi7awCd6QQuPX0J3nrWsWgru7bsXt2GMAtho5SWSLwUVAqofXIRJfIclJfiuMHRX9Chc4H2ZAKzUom6mrDWK8wS6tCeIipRF0H3Lbfcgvnz5yOfz+OGG27A9ddfj4suukjpOnp6olflZoeyyBsCC3s7Il/3dKRX8X4cyekYGRnH7K42tFa4oXIhMNo3ita2NHrbMkrXHzXDA2PIGRwzu9vRViVQCoP+vlHMbEmit71l0u8H+0bRnkmid0ZLxc9WY0I30D/I0NvRgs6Ms22HzjmG+8fQ0Z5Bd6u6sDvo+Zg3OAYGxtA9owVdLWosRzoNjomBMbS0Z9CWYJjVlsHstnCmGiybw+FsHl1dbRgYzuHQQBaH+rI4PJDFoX7zdd/hUQyO5AAAC2a349xTFuGYFT046fK1mDezteKyhRAY6h9DWyaJtlQCbSMM82e2oTXi8xYAxvvHkElp6O2oPF5Y51lf/xi6FZ9nXvB7TibG88iN5dDV1Ya+8TxGczo6Mi2Y155Be7ouwoRI8bsfs0NZjBUMdKST6O2sfr40A6rv2dOBuria5s+fDwBIp9O46qqr8IlPfAIf/vCHceDAgeJ7+vv7wRjzneXu6xsFj1ibO57XobWmcOTISKTrnY709nYo34+juoFsXsegITBaJYtWmCjg0EQBGMsrXX/UDI3n0dKWxpG+0UBtvL0ihMDIRAFaroBEtjDpbxMTBejjeSTHCxU/X41xgyObK2BI58hV2CYhBMYnCjia12GM5nytpxwV52OeC2Qn8hgsGCgo6u4nhEA+V8DBiQLGDY6hvAExpmabAWAir+P1gyPYeWAIB4YmwNpSePrp/ZiYKNWuMABdHRnMntmC45fOwnGLu7Bm2SzMntmKcYPjcK6AwngeR/LmZyrty/xEAX3jeQxrDOM6xwgXGI0h25rLFZAVQHqien1OwTqeQwVD2XnmhSDnpPwufCFrfsd1JjV0JBPIDo0jq3ic9U6g/ZgzrzvkdBxp8nquMO7ZjYKmsYqJ3tiD7mw2C8Mw0NHRASEE7rrrLqxZswYnnHACJiYm8OSTT2LDhg34xS9+gbe+9a1xD9cTScaQ52Ja6IGnI05dEp3IaAyjDd4kRwhR1LBH7VktH7s7ucRIr26/VOtGKZG+z3qdFcQK62G+ynOKMYYWzdQhm7/wvywuBN7oy+K1A0PYeWAYr+0fxv6jo8V6u2MWz8TaeXNx8emL0d1mBtmzZ7agu7MFyQoToFrFy3ZSmtk8iXOzSDSuay9lNR2qdf3L49mI3/VyzBmNoTuVpG6TPpH7kfYeUYnYg+6+vj58+tOfhmEY4Jxj+fLluO6666BpGr75zW/iuuuum2QZ2EgkGZC3dInpBvwinu4YDl0SnchoDCO6QKGBj6Nh/3/EQXe1yY1p5+ffxrBaN0o7ZldK36sJBXkYVJ9RLRrDqLWxfpe9+40R/PDO7dh3ZAwA0JZJ4tgFnThl1TIsXzgTx8zvRDqTxMGJPGank2h3mal3U7wsSTJgTJjn64wYC/hSlvtNrSZZxfM8spGpo1VjmN+SQirGyc10QB572oNEJWIPuhcvXozf//73jn875ZRTcMcdd0Q+JlWUioHqq4CLMBEOXRKdyFht4ie4gEvnsLqD2wLtoBZ9ntddJQOYYOaEwO9ThFrdKCVBg/swUO1eIrG7fHhdtm5wbHpkFzY9shud7Sl8+NLjcNziLsztbpty/OQ55WUy4yUwTVmNRoStUVUcSPcbXVRvfBNGYWxUMMYaNqFQT9AuJGoRe9A9nbF/WRP1h+HyMXdSY0hqzGwso0h7GzWGLasaW6bb4W8JK7AyfH4ZmTaAtd8nW8HXk0SoGHQrHk6CMaQ0DQXuza50z6ER/PDOF7H38CjedMI8fODClWivUuCpWb7gXs4nDnP/uxmXXeKQ1uLNdMPSbLdWufxlI7RKzbaI6Y889nQGEJWgoDtENOnbWWdaUsKEC4GEy6/HFk1DtoF13TIrmUpoGIs8023iNMGxe3UnfWW6q+u5JVJ+UksiECUySAtjOK0aQ4G7i+h1g+Oux3bjjod3ob01hU+/60ScvLLX1XpMTb6HoNvl0yXYkhaapcmPiwRjSDCGQo3tbORMN6EGeezpHCAqQUF3iDAr45SjTHddUqt9uB2pk/XSErqekJruloSG4cgz3TK4dJaXIIDkpVY3Som9QU41iUCUlOQl6sfTktAwrBs1ZRz7jozih3e+iN1vjOD04+figxetwoxW9/aF5hME9+Py0jxGY+YTpnrQGSe12kG3aGBNN6EGOvZELSjoDpk4MouEOzgEUi4CNth0shOGQKYBv1m5MItGM0kNPGKZhdtMt1fcdKMsX089Sb3CkpfAmiTOTifRUiXC/ePWvfj1/TvQmkni7688ARtWz/G8ngQr64BZA17BxaYSs9PJupBrpBir+aRL1i7EP1oiLuS5QecAUQkKukMmrbG6utETJeSjbjckGENa0zDBOWZW7WFZnxjWtiYtbWyUXSmrarphRp1+gm433SglSWaup56uxTAzo4yxqo4ih/qz+Pl9r2Ld8h589PI16PTZQMdrIayXp0uwFTHHTUpj4LoodnV1wpzXxp+VJ+Kj5F5C5wDhTH18o01jUgkzs8jr6GZPWL7VHh51A0BLgiHHG/NYcqvgUDrqGIhuG2RA5hSMMMaKrem9UvTodvHeIOsJi7DcS9zwp7/sR0Jj+MhbV/sOuCFlO1YhrBs4GrNnQYqViikrwemG2vTQ8SdqQedIyKS0+nusTVRv2FKJFk2DEAK5BiyMNWwaWUQcfNYKRhI+M91yG9xkumEFiPV0HQrLJz5qcgUDDz93EKce14uZMzKBluVVHuTl6VI9UQy6q2yny7pVYhpDhZRELRrx+6+hSFla4HprzNHsyIDNS6Y7o5nZWi8a1nrBdGopTQKjtA2s1ZE18f+3d+9RctRl3sC/v19VX+aamcn9AolBAhEWk000rhouYY/XLOKyKi+HHF9eOeLiIroisMsKCMjZ6O5BF/Cwrr6+Z89Z4ayKLCCCroAXFkK4aRDFGDEkmckkk8lkrn2p+v3eP6qqp6ene7qqu6u7uuf7OYczZC7d1dXV3U899fyeR1T2+vAzjXLG/USs1Eu7We56lyM8+5tBTKQsnLdxZdW3FWQhbCVXl6LCEM7zNFemWwc8iafWw5aBVA6D7pB5/WWjNoJ6vptrYEspUggkpEAqSjUKPtna2X5DiIprqCtVLtAyq850+/t90x05ryMSeGvdmA/nJ188hBWLOrDupJ6qb8sMkOmu5OpSVAjhdFEpl+luxhMKqh1DAL0xE+0NnKBK0cYjI2RSOMFOlDJsVPnI5qSUyGhd9wEz1fAyjF62rt61zeVKCgwhch1VgvCmUfo9cTKEyO2LKNANyIi9NjCK1wbGcN7GlTXJsEv4X6BaydWlKInJcpluZjjnOyEEumOG76tvNP8w6A6ZECJytaQ03Ts6aACQNJyFYyk7KqFbeQpuhtH9IKi0hrry+y9fXoIKSrD8TqP0RG1CrG5A7ecTLx5CImbg7Wcuq8ntCXdwja/ykgquLkVJzH3dlFpI3ayLRImofhh014FTsxqND3pyTPeODvYhGXczq6kmKhfKlWG4/660hrpSfjLdqKDO3O80So9ZYXAfFq11XVuLTaSy2PXKIP7sjKVoS9SuW6zfkzi7wqtLUZEbB18q6Gamm4jKaNb3v6biZLqjU0tKlZeXCK+uu4kWU05n9acz3fVqe+iVc8wVG08vxgsYdMNfj+7p+3GD+ypPmCyl0T+Wqnof1ru85KlfDSBrKZxbgwWU+QyfVw/mGpLUDMq1DdRN/NiIqD4YdNeBKaNVS0rOpWARoB44X5shYSk9Z31nlHg9lL3g1stM1uMk0M/iuVwwHOR2A0yj9Ej3pKnaq05jto2RdBaZKp//epaXKK3xxIuH8MaVC3Dy0q6a3rY3Cr7c8VR48tdsvAFLxTLd2n09NeMiUSKqHwbddRC1WlKqrl9w0u1I0yzZ7tmZbuf79VhM6Se76QXDQTLdQaZReqbrjyt/4FprTLr1/NW+nuvZs/o3+49j8PhUTdoEFjLdBarl9kalV5eiwulgUjzTrXK/U/fNIqIm0qzvf00lV0vaJJnR+cDpHV3Z35rCye41y2LK2TXd9ZtKmQv45/gdIYQzTjzA5gSZRpnPK/WqVFbr3Ou42qC7nu8GT7xwCJ1tMWw+fXHNb9vvQljv6lIzx6Wl2gbqJj+hIKL64HtEHUxnuhu9JeRRVVzmFkKgzZBIqfqUaFTLG07jtYgz65npzrWJm3tfB+2oEnQaZaX3U2jSdqZIGlUG73C7edRjMM7waAov7R3C1rOWI2YGPU0pz+9CWC+zX+9hQLUUk8XX53h9/5v3kRFRPTDorgOvlzDLS6Kj2kv7SeksRsw0wXNqF5R3VNotpBLePZS7qmAEfH0EnUbpMcu0fStnylZISIGkKavPdNepvORnv+yH1hrnhFBagiBBdwssNIwJp2VotuChap8nl0Q0vzHorhP26o6WanvqJt2JY80wnVIV1D57A03qEXRPl5fMva9NBJsWGXQaZe5+qsjyZ5VGRim0S4mYu5i2GvXoXmLZCj/9ZT/OXLsQS3raQrkPv/tUtcBCQ7NE28Dc2oUGbBMRNQ++R9SJKRl0R0m1mW5DCMSkbIrFlHbBY63nVEq/beIMn4vxPEGnUebfDyqsx55ya/jbDIm4lFVlzFGn7iUv7R3CifEMzvvTcLLccI8nP1fylG6RTHeR9TlcSElEfjDorhNvalsz1AC3Ou0GS9VeCm6TAmlVXeBVD4WZbtRxKqXfjhVBO6oEnUbpMasorZlUCnEpEZMCMaO6jkTeCUbYMdrjLxzEwu4kzlq7MNT78XM8tcLERikETDl7MaX2eUWHiOY3Bt114rXVCtKLmMLhZaUqCdryJQ0JrTXSEe5K4/WHL8wwBu0WUqlcxwofCykRIIgNOo1y+n6cdGTQYNlyn+c2t6wo7raNrGoxZcgTKfuHJvDb10dw7sYVkCGnmP2Mgq9ni8QwxYSY1TaQmW4i8qMV3gObglnisiTVX636BSekE0xGucREe/2s0bhMt5/9HHRxZ9BplB6vtCZosDxlK0BrtLtnajHDC7orzHR721PRX/vz5IuHYEiBrWetCPFeHOWOp1Inf83IdNsG5l+1ZMtAIvLDbPQGzBcckBMdXnuvai91SyEQl8JZTBmr0cbVmJd9LJ7pdgKHMFu4+Q20gpSXVDKNMp/znKlAJUaTtoIpRa6m1xDO81/pSXQu6K7xrk9lLPQPTeLQ0XE89fJhbD59Cbo74rW9kyLKHU9+JpM2i5gU0JZzDHqLSP1e0SGi+Y1Bd53kBuQw5m44VSIQrURSSpywbLfGOHofuKpEa73pzPL0sRnW/fsJtLxFkX4y3ZVMo8zXbRoYTGcxbil0x8r3rba1RkppdJsyF1Q50y0rXxztHYOV7nrLVjh8bBIHh8Zx6OgEDh2dwMGj4xg6kcr9TkfSxHveenKF9xCMWeZ4quVrrtG8E6+s1jDdZ1Br9ugmovIYdNeJyA3UYNTdaH7b2PnRJgVOaI2UrdARwuCRanlrCGZnuqenUpohhgs6QMcKw+eI9kqnUXqShkSb4ZwsdZqybLZ7urRkZvGAKWa3jvPLb3mJUhpHR6Zw8OgEDnkB9tAEBocnYbuRrCEFlvW1Y+2Kbmw9azlWLu7EysUdWLygLfRabk9+TX6x46lWV5eiIOa1DVQabe5B2CqlM0QULgbddWTK+tTR0tz8trHzIy6dDG1KaXRUf3M1l8t0FwRC9ZpKqaBhCH+Vrk5dcPnfq3QaZb6emIGBVBYnLBu9sbnfBidtBUMIxAvuz5QCU1alJTruBMMSf/fkS4fw0xf70X9sAllres3A4p4kVi3uxJ+uW4QVizqwanEnlvW1wzQaW008XR5U/Ams1TqKKDDcqzL5J1wq5EWxRNQaGHTXkSkE0hFedDdf2DUMAIQQSMjoLqYsXdNdn6mUQTpWGEIgrcvvx0qnUeaLS4kO08CYpdBl6lx5RCHllpZ0GnJWgJzfkSjoG2mp8hKtNR7+nz/i+z9/DWuWdeG8jSuxcrETXK9Y2IFEPHpXU1BQrlRMqSsuzSpW0DZQt9BjI6LwMOiuI1MAE26vbi64aRxvAV2tnoOkITGVsZBVOnfpOSqUe6wVXtav11TKIJfdDeGseSj3+qh0GmWhBaaBCVthNGujL178rXBKOZnswtISFHQkMgNuTLHyEq01vvfTP+CRZ/bjz85Yhv/z/tNhyObIDUv3BLTU8dRqfaxjQmDSnj5BVFUs7CWi+aM53tFbhCkEoDUXUzZYresv29zAKIrZbrtEcFqPqZTa7WbhN9DyXh/l9mKl0ygLxaRAlyExZqtZfZc9U7aCdK9mFN3eCjsSFXYvUVrj2z/ei0ee2Y9zN67Ex7avb5qAG7k1K4BV4uelrrg0q5gQUFrnTjI0mEghovKa5129BbBtYDT47ajhlymcy+spO7ygW2uN4xkLmYCB/VyPNexe3UFr5/2WvFQ6jbKY7phT7X7Cmj22SmuNKVuhvUhpCby6+AoXR3uZX+Eulvx/j/wWP3nhIN791pOw413rmnLBoTnH8aSgAdE6Vc/5iynRQoN/iChcfJ+oo3rV0dLcap3pFkIgachQJ1NaGhi1bEwEDOzn6mcd9lTKoIvnvO0sdyWo0mmUxZhCoMuUmLDsWSc0KaWh9PQUykJO28DK2oB6f2LbGl9/6Nf4xZ4BXPCONfjweW9s2ozpXCdxSjvdZpr1sRXKbxsI9z2lNR4ZEYWJQXcdVZMZo9pxsr+1FXc704T13Hof7kEDPIXSA2DCz3QHaxPndVgpm+mucBplKQtMA1IIjGRnZrsn3dKStjnO0Crt1a0B2Erjmw+/gmd/cwQfPu+NuHDr2qYOSr2TOF1kf6gWG5FuCOcEIuvW/GutW6Z0hojCw6C7jqrJjFHtKF37fsFezW8mpGy3N/kwyATEcpMb86cIhqGiTLePceLVTKMsRgqBBaaBKVvlSoS80pJkidISj1nhVMpMVmHXbwbxy98PYce71uE9W+ozxCZMhtvNpdi1GKX1rLaVzUwIZzppVuu8RbGt8/iIKBzsXlJn1Uyxo9oIY5BF3O2GklaqaKeLak1nugME3W7wOFdNN0KcSun1CPd7fuNncWe10yhL6TQlRi0n271UCqSVs0iuvcyB4tUx+xkpf3wsjd8fOoG9B0ZwcHQKbd1JfPQ9p+Odf7K8po+lUfLL5wqfn1bs7hGTzuu9ln3/iai1MeiuM1MIZ8IdNYQK2FHDLy/zFVamO1c76jPAg4/WemFPpZwORvzf9tx1wTr32ql1t2opBHpiBo5lLEwpZ8KoEKJkPbfHO1mxtJ4xPEdrjYFjk9h7cAR7D57A3oMjODrijGiPxyQ2nrkMG9YvxVtXLqjxI2mc6X0BxAt+pqAR8zkkqVnEhMCEmu5g0lqPjojCwKC7zowAmTGqvTCzUgkpMG6rmvdh11ojq3Tu2CkM8EopV1Md9lTKSqYQFi7utN1Ae8pWuZ7ZUgjEQ2in12FIjEqBkawFDSApi7clPDGRwbO/ew2Hj4whpRTiC5L4Uf8Yjh+fxGTawmTKwvhUFqmMUyPe3R7Dqat6cP6frsKpJ/XgpCWdGFMKY5Zq6hruQnMtFG/F7h6xgpKyVnouiSgcDLrrLJYX6PByZP2pELNScSmgLY2sz6DYL+Vud4dpYMKykS2SSSymXKZbhtxNp5I2cYZbojNq2U6NtdKAW67QaUi0GRJJWbvBRvmEEFgQMzGUzgIAFhRMfxyfyuLRXa/jv58/gEzWOX3raI/hrA3LMTqVga00ejsTWLmoA+3JGE5e2ol1q3qwpLdt1vZqW7VcBbCB4jX5Xp13q73feS1gvaC71U4qiKj2GHTXmZcNsrRGrOU+dmdLK4WjaQvLkrGSo7brKZd9DWFbEm72NaM04jX8BPZ6AbcZTms7y+u/VoYqMwWwVJBUK1pPTyr0y3SHjhzPWIhJgW5Tot2QuZr5sLVLJ4ueyWsVOJW28OPdB/DY7teRStvY8qal2PH+NyEunKz7gVQWnYYsOdWyGN2CLeZK1eRrOAdDq0yj9MTchb9pFWztAhHNXwy668yU82tAzqStYGuNTAWjssMQZnmJKZxgPq00Omt4u149d8Itd/B77JTLdIc9lbKS7GanKSGFiaSUucv39SSEwKK4iazWsLIKP3rhIB55Zj8mUhY2rVuMD2x9A1Yt7sTixV04enQMqHBxtG7RIM0osi+mT3Qbs01hcdZxTL8+W+2kgohqj0F3nXkDIuZL0O1lgaLyeMtlf6shhEBc1n4xZVY5GVUjYICn3NHUcz3SMHt1VzL50xACXWatl0kGo22Fp37Zjx88vR+jExmcdcpCXLj1DVizrLvo7+cHXr7vQ+uWbDFnFGmJGrRfezMxhUDWHarUaicVRFR7DLrrTAjhfDCFOL0wKpSb4UbA/tJhUmWyv9VKSIETlqrpQtms1jDd8gpTCt+j4G0f5R1hTqVspjperTX+MDCKp/Ycxq5XBjGVtnD6yT345AfPxKmreub8W1MKTFk60ALaViwvgRuEpguOz0oW1DaLmBSYcmcqteLzSUS1xaC7AWJCzIsBORm320SUpnBWsrgviLiUgLaRURrJGkX2WaWRdOuLTQFMulP/ygV4ykc/a0P4D+KDUnp64XBUHR9L4+lfH8ZTewYwcGwScVNi02lLcPabl+O0k3t93YbpDoWxA7yhtnJ5SWFbS2/OZ7OcgAXhjYMP8z2FiFoHg+4GMIpkg1rBUyeSAAAgAElEQVRRSilACCRlhILuChb3BZE/mTJZgyoJpZ0+wN6HuykE4E5lLDfQxk+HnPyplLXeJ0pryBBa+1VrKm1hzx+O4Rd7BvDr14ahNXDqqgX43+89HW85fQnaEsHeFr0FwlaAdQvuuWjL8R5+/rGnW7jm2Vt3EOZ7ChG1DgbdDWAWyQa1orRyWufFpcBYwMvvYQm75MFwS0DSNTrJ8GqFvQ93M6/7TbmBNn4Gkhh52chavxmoBpUU7Dt0AoeGJnBiPI0TE5ncf6Pjztd01sm99nUn8P4/W4N3/MkyLO1tr/j+8p8Tv1SLvvmaeW0ove5MdosupEReprsFHxoRhaAV3/cjr9QUu1aitXa6eBiyosvvYalkcV9QcSlrVrLhtQv0PtxjAQI828dAkvyBJrVs6ahDyp7PJZ2xce9P9uJnv+zPfa8jaaK7I44FHXGsXdHt/H9nHKuXduH01b01Oek13dZxQYJuDQ3RYhMaUdAS1RN2SVcjSfckuxUfGxHVXqNjoHlpum2gvyEnzSjt1nMnjekQN8jl97CoOgwlSgiBSXc8dLma6nKybuDqnagZboBXrluGF/SWr+l2vtZ6MWWYrRmLeX1wDP/64K9x+Ngk3vu2k3HexpVY0JFAzAw/sPWeH7/rNLTWLTmhESWOJ6+tfKOvcoUlLgSiUTxHRFHHoLsBKrkc3Wy8VoEJKXLdC6LweG1oxEPOMMbdSDOtNNqrPMnIKg1TTAcsfgM8v5f05xrdXY16daxQWuO/dx/Ad3+6Dx1tMXz24g1405q+kO91tiCtHDNuaVmyBestpHB6ydszMt2tWb/uWRhgKBIRzW98t2gAb9FNVNrohSGlFGJSwhACEtHpYOJzmGNV4lIAbleQdqO6sLPYSHnTx7Hj9UYum+lGOFMpdR16M58YT+ObP/gNXn5tGBveuAiXve90dLU35tqRKQSmbH8lRVO2u8C4ymMjqpxe3XlBt9YwWrgAo5XX5RBRbTHobgAnWxmNIDQM+fXcqODye5jbVY+smxQC8bzx0JXSWsPSQIcxO+ieLFMzbvvMNIc1lTLsTPev9g3h//7gN5jK2NjxrnU4d+PKhpYvmO6Ji5/F0VO2RkKKqkuPosoZuDT9b6XD64tPRNRMGHQ3SBSC0LB4/bkTeZm8KJxkaDi92uqRdYtLgUlbVbWYMKs1kNcu0OOn+403edNPYBfGVMqwarqHR1N4dNfr+O/nD2LV4g587n+dgZWLO2t7JxWYXhwNxOd4zLbWyCiFnljrvvWaQmAq76TQTxcdIqL5oHXf+SPOm9wWhTZ6tZZy05z5NatBLr+HRfmsc66FhBQYt5xMdaUDYrLu9sYKNnh6IW7p7jdB2rSFMZVS1bA389CJKTz/6lE89+oR7Ds0CgA4f9MqfPi8UxBr8Mh4z4znZI7H7L0G2lo49WsIpwWl997WqotGiYiCYtDdILlsJcKvMa63tFKIFVw+D3L5PSyqDnXGnunFlAoxWdkznFVOLXxhK79Yfla1xN/abh29n2AnjKmU1Z7gHBmZwvO/PYLnXj2C1wbGAAAnL+3EX569FptPX4JlfZX31Q6D38XRU8rpKFN49aKVGN4AJwCG+x7XgmtGiYgCY9DdIPmZsVaq7dRaI6U0OgoWifm9/B6menXUgNtPWwqBTBV13ZZ2OpcUniTkT0AsdcYWpE1bGFMpg/RmtmyF/qEJHDgyjtcHx/G7AyPYP+gE2m9Y3oUPnXsKNp22GEuqGGATNj+Lo7XWSNnO4tpWu7qVL78jjnQD8FacRklEFBSD7gbJBaFKI9FC114zbvBW2A7N7+X3MNnu13pk3YQ7ibOaxZRZNbueG3lt2ebKqtoBriiEMZXSKykoDC6zlo0/9I/i9SPjODA4jtcHx3BoaAK2u5/ipsTqZV34yLY3YtO6xVjU01ajLQqXn8XRaeVc6Wlr0a4lHu+9zc47J2Smm4goAkH38ePHce211+L1119HPB7H6tWrccstt6Cvrw+nnXYa1q1bBymdD6kvfelLOO200xq9yTUxfTm60VtSW2m3ODgxK9Pd+N7kuTrjOmUZ41Jg1Kqsbl9rjazWSMriAVq5AE/Bf8eIMKZSFispODIyha9+55cYODYJAOhuj+HkpV04Y20fTl7ShZOXdmJpbztkk0Zoppj7+J5SCkKIluzPnS9/KqUXgLOtHhFRBIJuIQQuv/xybNmyBQCwc+dO/NM//RNuv/12AMB9992Hjo6OBm9l7fnJVjajlFIw5ew65Cj0Jq9neQkAJKQEtI2M0kgEXDhnaSfwLlxE6THl3HXYttaIlwjYC4UxlVJpjfzikt8fOoE7v/crKKXxiQ+cgXUn9aCnM1G7O4yAmBBIqdJlOl6rwFYPQL3XurOGQ+S+R0Q03zX8vbCnpycXcAPAhg0b0N/f39BtqpeYLD/Ou5l4/bmLZWej0JtcwQmG6hXy5BZTVvCYvf1UKvNsuh1HdInbDjIEKIyplDov0737t0fwpW+/iLa4ib/fsQlvXb+05QJuuCdC2l1AWMhSGlml0ObzRKiZCSFyHXHqWdJFRBR1Dc9051NK4d5778W2bdty39uxYwds28bZZ5+Nq666CvF4YybOhSEmqu/lHCXZMuOtG92bvFSdcVhMIdzOIMEfdNb9m5KZbuEGeHq6htajc11i/N1XGFMpvYEojzyzH999ch/euGoBrvrLP2nYxMh6yF/gahZc2fD6Vrd6PbfH6/2ua9g6koio2QldKlXWAF/4whcwODiIu+66C1JKDAwMYPny5RgfH8fnPvc5rFu3Dp/5zGcavZk1MzyVweGJNE7t7UCsBT6Myz2ewYk0jqcyOK2vsyEnGQdHp5C2FU7prV+50sHRKaRshTcGvM/+8RTGMxbW9RUf/DKRsbB/dAqru9vQEZ957pxVCnuHJ7CsI4G+Nn9B7t7hcXTETazoTAbazlJePTaGn+0+iAd//DucvWElrr54I+KxVmuOOVPaUtg3MoEVnUn0JGMzfnbAO/Z62lviBLucg2NTSFkKvYkYBifTOH1hZ8uX1RARlROZTPfOnTuxf/9+3HPPPbmFk8uXLwcAdHZ24kMf+hC+9a1vVXz7x46NQzWgnnjx4i4cPTpW9GcpW2EyncWApVoiA3Y0nUVGa4yU2M0Tlo3xjIXDdvAFe3PtR79G0lloAEet+g3pSWVtjGQtHM7agVpDHktnAQBH7eKPOas0JlMZHMna6CwYEJNRCpOpLEazNuzx9IyfldqPU6ksMpMZxKayvrexlMmUhUdfOojf7DuG7W9fjQu3rsWJkcmqbzdKiu1HrTUmU1kczVjI5k2cVFrjSCqLTkNiqI7HXiNNZiyM2QpqKoNJS2FIjZU82ajFa5u4H2uF+7E25vN+lFJg4cLiCbNIBN133HEHXn75ZXz961/PlY+cOHECiUQCyWQSlmXhsccew/r16xu9qTXllQ5klEZbkycBvf7cc508zHX5vR68kod6Ssx4jv3dudYaWaXRPue+dEpC8tcEvDYwildfH4EwJczuBH47moYuCPJ6e9oQE0BvVwJ9XQl0dcQh82pwC7djfCqLoRMpHDuRwtCJFIbHUoiZEu0JE+3JmPvVzH21bY1/ffDXWLK6B+/avApbT1sabIc1MSEEjCIdidLu4spWOLH2y3DLn6wA/eKJiFpdw4PuvXv34p577sGaNWtw8cUXAwBWrVqFyy+/HDfeeKPT8cKysHHjRlx99dWN3tyaMoSA2SKLKcvVcyMCbQMVNGKivoFPXApACKSV/6sZys2OlqrnRm5h6swA7yfPH8T/vHwYPb1tWLO2D7/99SBSKWvO+zKkQE9nAqe8sQ8L+9oxNTSJ4bF0LshOZ2cuC0zEDdi2gjVHq5OOthg+cOYyrFtU/Ey/lZly9mLhKdtpFZiYR6sJvfPLrNJgvE1E5Gh40H3qqafi1VdfLfqzhx56qO7bU2+xKqcWRoU3BCYxR3cGLzvbsKBb179dj3RHfgd5jnOLKMtEK2ZBC8b/8/71uPRd6zBuK5ywbHzs7LWzSlo6u9uw74/DGB5L4fhYGsfH0hgeTcNWGqNTWfzyd0fR25nAkt42vGlNHxYtSGLRgiQWul/bkzEnE28pTKYtTKasvK9ZpNI2Tlvdg0zSnJcdK2JCYMqevrqgtcaUUkjOg1aB+bzjLqs1EvPocRMRzaXhQfd8F5MCUxUOUImSlNJuf+7Sv1MsO1svQTt61FJCButS41358BN0T+b16pZCIBk3kcpaMAG0x81Z99fblcTqZV1YvaxrxvfHLBvDGQsffefasvX2QgjEYwbiMaNo67+MUhhIZZv6eK6UmetPrd0+/E45VXe8yevHAsodQ1o37bAjIqJamz9FhhEVFwJwpw82K6010rZCQsqygVajenV7oWkjso1xKaDc+lY/nEvyomz9uSmc21UF+9OuoDViLXt113sIUZR4J53ec+1lvedDf+58+cfufMrwExHNZX59EkSQV7ebbeISE0s7wZqf8daFJRH10shAcHoxpb/OFVmtEROi/AmMLF4jr7QOvGC0llMpp09wqr+tZlP4nEwphZiUue/PF8LtUQ9+yBAR5fD9sMG84CrTxJnulBtMzlXP7cm//F5PCu6QjgZk3bznOO3zZCOr5l5EOX27ztfCDLqq4HHWNtM9fwei5C8WVu6EVr9da1pNLuienw+fiGgWBt0N5o1Hb1SmW+dNjatUWmmnE4uPD9fCy+/14u3eRlTWCiEQl/4WUyqtYbuZ7nLyWzDms7UO/DhrOZVyPme6vbIeSzktNLXW8660xOOda8zHky8iomLm56dBxPgNyGpNa41DqSxGqxjYobVGylZIGOXruTFHSUTYvOxroxb3JaRzNaNcaU1uEaWPiFUK4S7WKywvCZ7p9npM16K8RM/jmm7vJNrSGlO2gpxnrQLzMdNNRDTTfPxcjJyYm2GsRZYxiLRy7nPMsivOdgep50YDe3V7pxWNutLfaTi55+GsNee+9tsu0FO4MFVrDbvCx2nULNPtLASdj91L4F7N8YLupJy/+2E66J6fj5+IqBCD7giIN2gxpVeLbWuNqQrvOx2gnhsFl9/rqdEdNWJSoMc0MGUrTNqlryxktQZ8lurAvXKQ3/lGwW3TVkGgU2wqZSUa0Q89SmJuuZg9z6ZQFjJz5SVERAS+H0ZDroNJnbO/KaURdzsrjFm2j7+YbcJWMKXILeorJ//yez0p3fjsa5cpEZcSx7N2yYxyVmnEhP8yGNMNlL3suV1F7XrNMt0N6oceFfmdSuZz0J2QEjEpfZVKERHNB/P3EyFCDPcSbD3ruvM7K3QYEilbBc60p5VCylboMoxAwWwjBuSoCBzsQggsjBuwAYxki5/kWD4XUXpMIZySEnd/5jqHVJrpBqpeWFtJ95RW4pVQxaWcNRF0PolJgRXJ2LzeB0RE+Rodh5AbjMUKygTCllIa0BpJKZ16YyEwbgfLdo9azkKxTjPYYRRzM93VBndB2BWWXNRaXEp0mxLjlj1jXDjcYDer/S2i9BTWyHvPYKU13dBO141qzPfyEu85ma+tAomIqLj5/NkYKXG3DrRegWjKVhBuZwVTCrRJgQl3HL0fWaUxaSt0mjJwMGtKJztbec+U4FQDF1EWWmAaMKXAcNaa0a88q5167KCZbuQF3dVkutsNpxTgaMaadUIQhEI0TnAaxRTAwriJLnN+jX4nIqK5MeiOiFjAUeHVSqmZnRU6TSPQgsoxy4YAKgosSvWXDpPSiEy3YCkEFsZMWErjRF4tfZB2gR5TOP21rRrVdC9NxBATAkcyFiYqrPOf75luIQQ6TYNlFURENMN8/myMlHgdF1NaWiOrnNIST5t0xjaP+wi0bK0xbiu0GzIXQAfRiLaBzmj06ARBSUOi0zQwaqnceHjvJCTIPhVupxMrr6ZbVrFg1Am8TSSkwFDW9nU85NNaQzvnAURERJSHQXdEeCUF9WgbmHJLB5J59RbCrc2eUrrsNoy7ZSjdAWu5PV52NlvHxZQqgkM6emIGDADHMrZbz61hShG8XCevBaNdg8cphcCSuIk2KXAsY2G0xKLPYrQbeHMKIRER0UwMuiNCCqe2OlOH7G/KHdteWDvsDXCZmGNBpXaH6bQZTvu7SnjTD+uV6fZG3UctEDSEQG/MQEYpjFnKbRdY2ZWD/JruWmT0pRBYHDfRbho4nrUwUmaoj2c+j4AnIiKaC4PuCHGGaoS7vNAb254sMrbdW1A5PseCynFbwdYa3VUuEjNl/Xp1RzkQbDck2gyJEct2Mt0VBt221lBu68BavaiFEFgUM9BpGjiRtXE8W35yaaOHEBEREUUVPxsjJC6dkgu/HUS0Ll8KUijrjpsvNbZ9rgWVXpY7LiUSVUawZh2nUtoRDgSFEOiLmxDu/q1kkIg3+c9yA+9a1q4LIdAXM9BtGhizbBwrE3jrKrqnEBERtbIoxiHzVsztk+x3MeWopdCfzgYKvL0ezMkSpSFzLaj06r27zdlZ8qDys7Nhq6aNXj2YQqAn5lw5iFcSdEtvYWo4tevC3b4FMRMTlpPxLiXKVxWIiIgaiUF3hHg10n4mU3pZZ2iN0QAdJlLu2HazRFSUv6CyMBM9atkwpUB7DUZbx3LZ2apvqqxmCAQ7DYkVyTgSFdTJeyUpGeWUBRkh1K57gbeX8S7V1SR3ghOx+nkiIqJGMxu9ATTNFE5w4ydzPaWcMhFTCkzYCj0+ygq0O22ws0zQ3GkYOGEpjNs2eqRziKRthbSt0Bs3q85yo6BtYLzKAG3KVnO2trNy5SXRDQSFELkTkaAM4XQ8SSsvo1/bbcvXEzOQ0RrDWRsxKWadJDTDCQ4REVEjMNMdIcLtKOKng8m4ZcNwO0xod1hNOWl34mWyTNBdbEHlqGU7I99rkOXGjJKI6lLdltYYylhIK2eEerH/tNsX22zhQNAUIneFJMx+5EIILIqbMAQwlLFgFzx/XEhJRERUHDPdEROTItdHuxTLXejYbTpt+9qkwJil0G3OPX47pRQgRMlFlPk6TQNH01lMKY2YACaVxoIKRr6XIt0ArtrFlMczFjSAZYlYRYsQW4UppsuSwt4N3sne4bSFoYyFJXlXPxQ0RBXDeYiIiFoVE1IRE3cXGBZmEPONWwrQOtdXu9s0oLTGRJlgPaU0EsLf8JX8BZWjVYx8L8WZpFhd28BJW2HSVlhgGvM64EbelQMAodR0F4pLiYUxAylbzVhYOd9HwBMREZXCz8eI8YLHUnXdWmtM2DaShsz9btKQSBgSo1bpdm5Ka6SVnjGFci75CyonbIUOQ9a8bCF/fHlQSmsMZyzEpax4MmYrye/vXa/zjw7TQFfBwkrFEfBERERFMVqJGK9lXKm6bq+rSGdB1rnbNGApjckS2e6U0oDWJVsFFuNl0nUNhuEUE3Mz3X77kuc7nrVhA+iLGyxlyAu6hc8rGbXSGzOQNCSGszbSbveUKC9YJSIiahQG3RFjCKeso1Sm21tA2V6Qzmxz2wCOlpgmmbIVhBCBhtqY0lk42RlS+YYpBbTWCDqDM+V2K+k2ZUUt9lqRF3T7vJBRM4ULKy2t2bmEiIioCEYsERSTomivbm8BZUeR4TRCCHSbBjJK5VrH5UsphaQMvsBtYdzEwng4621zbQMDLKbUWuNY1oIpBRaEkH1vVqZw6joakWX2Flba2imLiuoQIiIiokZi0B1BcSGQLVJ2UbiAspBXd104LMdyx8UHKS2ph/xe3X6dsGxYSqMvZjK4y+MsTK1/ptsTlxJ97lTNaB1lRERE0cCWgREUkwLa0rD09OTGYgsoC0kh0GkaOJG1kFE6Vx/utSD0u4iyXry+2VmfMXdGKZywFDpMA2016hfeSpwTkcbdf6fp9E2Z751kiIiIimHkEkGxIospUyUWUBbqcktP8oflpJQzrTIWscywcOvX/WS6tdY4lrFhuIv3aLY2o/E17h2mgXjErqgQERFFAT8dIygmBCAEsmp6ieFYiQWUhQx3auSErXKdQVK2QsKYXQceBab0F3QfT2WRUQq9MSPUiYtEREREYWDQHUFSCMTEdK/uuRZQFtNlGrnR8Fl30E5bRC/5mz6mUlpK48hkBm2GRDvLSoiIiKgJsaY7omJC5MpLyi2gnPW3UqDdkBi3VK6bRdQWUXpMdwKn1nrWCYWlNaZshTFLIWHG0RczI5mtJyIiIiqHQXdExaTEpGW7493nXkBZTLfp/P0Jy4bp9vCOIm+hqLdoNOsO+Jm0FTJueY0pBVZ0JpAeTTV2Y4mIiIgqxKA7ouJSAFpj1G2R15MI9lQlpETSkEjZCm0+M+SN4LUNPJ61cq0N4bag64mZaDMkYgLoTsRwFAy6iYiIqDkx6I4or9PIqKV8LaAspts0kLIVkhGugzals2h0SmkkpUBX3GkHaLKMhIiIiFoIg+6IMoXTUk9rjY6YUVEtc5shsTwZz5VwRJEhBFYkYpAC7EpCRERELSu6KdB5TgiBuBuE+l1AWUy8gtHv9RaTggE3ERERtTRmuiOsw5RIKMEJf0RERERNjkF3hHWVmT5JRERERM2B5SVERERERCFj0E1EREREFDIG3UREREREIWPQTUREREQUMgbdREREREQhY9BNRERERBQyBt1ERERERCFj0E1EREREFDIG3UREREREIWPQTUREREQUMgbdREREREQhY9BNRERERBQyBt1ERERERCFj0E1EREREFDIG3UREREREIYt80P3aa6/hIx/5CN797nfjIx/5CP74xz82epOIiIiIiAKJfNB900034ZJLLsFjjz2GSy65BDfeeGOjN4mIiIiIKJBIB93Hjh3DK6+8gu3btwMAtm/fjldeeQXDw8ON3jQiIiIiIt8iHXQPDAxg6dKlMAwDAGAYBpYsWYKBgYFGbxoRERERkW9mozegXhYu7GzYfS9e3NWw+24l3I+1wf1YG9yPtcN9WRvcj7XB/Vgb3I+zRTroXr58OQYHB2HbNgzDgG3bOHLkCJYvXx74to4dG4dSOpTtnMvixV04enSs7vfbargfa4P7sTa4H2uH+7I2uB9rg/uxNubzfpRSlEz0Rrq8ZOHChVi/fj0efvhhAMDDDz+M9evXo6+vr9GbRkRERETkW6Qz3QBw88034/rrr8fXvvY1dHd3Y+fOnRXdjpSi5tvWDPfdSrgfa4P7sTa4H2uH+7I2uB9rg/uxNubrfpzrcQutdf1rLoiIiIiI5pFIl5cQEREREbUCBt1ERERERCFj0E1EREREFDIG3UREREREIWPQTUREREQUMgbdREREREQhY9BNRERERBQyBt1ERERERCFj0E1EREREFDIG3UREREREIWPQTUREREQUMrPRG9CqXnvtNVx//fUYGRlBT08Pdu7ciTVr1jR6syJv586deOyxx3Do0CE89NBDWLduHcD9Gdjx48dx7bXX4vXXX0c8Hsfq1atxyy23oK+vDy+99BJuvPFGpNNprFy5El/+8pexcOHCRm9yZF155ZU4ePAgpJRob2/H5z//eaxfv57HZIXuuusu3HnnnbnXN4/H4LZt24Z4PI5EIgEAuOaaa7B161buy4DS6TRuv/12PP3000gkEtiwYQNuvfVWvrYDOHjwID75yU/m/j02Nobx8XE8++yz3I/FaArFjh079AMPPKC11vqBBx7QO3bsaPQmNYXdu3fr/v5+fd555+lXX301933uz2COHz+un3nmmdy///Ef/1H/3d/9nVZK6T//8z/Xu3fv1lprfffdd+vrr7++gVsafaOjo7n///GPf6wvvPBCrXlMVuTll1/WH/vYx/S5556rX331VR6PFSp8f9Rac19W4NZbb9Vf/OIXtVJKa6310aNHteZruyq33Xab/sIXvqA192NRLC8JwbFjx/DKK69g+/btAIDt27fjlVdewfDwcKM3LfI2b96M5cuXz/ge92dwPT092LJlS+7fGzZsQH9/P/bs2YNEIoHNmzcDAC6++GI8+uijDdzS6Ovq6sr9//j4OIQQPCYrkMlkcMstt+Cmm26CEAIAeDzWEPdlMBMTE3jggQdw9dVX547HRYsW8bVdhUwmg4ceeggXXXQR92MJLC8JwcDAAJYuXQrDMAAAhmFgyZIlGBgYQF9fX6M3r+lwf1ZHKYV7770X27Ztw8DAAFasWJH7WV9fH5RSuct/VNwNN9yAp556ClprfOMb3+AxWYGvfvWruOCCC3DSSSflvsfjsXLXXHMNtNbYtGkT/vZv/5b7MqADBw6gp6cHd911F3bt2oWOjg5cffXVSCaTfG1X6PHHH8fSpUtxxhln4OWXX+Z+LIKZbqIWd+utt6K9vR2XXnppozelaX3xi1/Ek08+ic985jP40pe+1OjNaTovvvgi9uzZg0suuaTRm9IS/uM//gMPPvggvve970FrjVtuuaXRm9R0LMvCgQMH8KY3vQn3338/rrnmGlx11VWYnJxs9KY1re9973u46KKLGr0ZkcagOwTLly/H4OAgbNsGANi2jSNHjswqmyB/uD8rt3PnTuzfvx9f+cpXIKXE8uXL0d/fn/v58PAwhBDMhPl04YUXYteuXVi2bBmPyQB2796NP/zhDzj//POxbds2HD58GB/72Mewf/9+Ho8V8I6zeDyOSy65BC+88AJf2wGtWLECpmnmyh/e/OY3o7e3F8lkkq/tCgwODmL37t34i7/4C4Cf2yUx6A7BwoULsX79ejz88MMAgIcffhjr16+f15dUqsH9WZk77rgDL7/8Mu6++27E43EAwJlnnolUKoXnnnsOAHDffffhve99b4O3NLomJiYwMDCQ+/fjjz+OBQsW8JgM6OMf/zh+8Ytf4PHHH8fjjz+OZcuW4Zvf/CYuv/xyHo8BTU5OYmxsDHAaIeCRRx7B+vXr+doOqK+vD1u2bMFTTz0FuB2yjh07hjVr1vC1XYHvf//7OOecc9Db2wvwc7skobXWjd6IVrRv3z5cf9h+s1UAAAX5SURBVP31GB0dRXd3N3bu3Im1a9c2erMi77bbbsOPfvQjDA0Nobe3Fz09PfjBD37A/RnQ3r17sX37dqxZswbJZBIAsGrVKtx999144YUXcNNNN81oK7Zo0aJGb3IkDQ0N4corr8TU1BSklFiwYAGuu+46nHHGGTwmq7Bt2zbcc889WLduHY/HgA4cOICrrroKtm1DKYVTTjkF//AP/4AlS5ZwXwZ04MAB/P3f/z1GRkZgmiY+/elP45xzzuFruwLvfve7ccMNN+Dss8/OfY/7cTYG3UREREREIWN5CRERERFRyBh0ExERERGFjEE3EREREVHIGHQTEREREYWMQTcRERERUcgYdBMRzROXX345vv/979f0Nu+8805cc801Nb1NIqJWZDZ6A4iIKJht27ZhaGgIhmHkvvfBD34QN95445x/941vfKMOW0dERMUw6CYiakL33HMP3v72tzd6M4iIyCeWlxARtYj7778fF198MW699VZs2rQJ73nPe/D000/nfr5jxw585zvfAQDs378fl156KTZt2oQtW7bg05/+dO73XnjhBVx00UXYtGkTLrroIrzwwgu5nx04cACXXnopNm7ciMsuuwzHjx+fsQ0vvfQSLr74YmzevBkXXHABdu3aNWP7zj//fGzcuBHbtm3Dgw8+GPIeISKKDgbdREQt5Fe/+hVOOukkPPPMM/jUpz6Fv/mbv8HIyMis3/vqV7+Kd7zjHdi9ezd+9rOf4dJLLwUAjIyM4IorrsCOHTuwa9cuXHbZZbjiiitywfU111yDM844A7t27cKVV145o0Z8cHAQV1xxBf76r/8azz77LK677jp86lOfwvDwMCYnJ3Hbbbfh3/7t3/Diiy/ivvvuw/r16+u4Z4iIGotBNxFRE/rkJz+JzZs35/77z//8TwBAX18fPvrRjyIWi+F973sf3vCGN+DJJ5+c9femaaK/vx9HjhxBIpHA5s2bAQBPPvkkVq9ejQsvvBCmaWL79u1Yu3YtnnjiCfT392PPnj24+uqrEY/H8Za3vAXbtm3L3eZ//dd/4eyzz8Y555wDKSXe8Y534Mwzz8RPf/pTAICUEnv37kUqlcKSJUtw6qmn1m1/ERE1GoNuIqImdPfdd+O5557L/ffhD38YALB06VIIIXK/t2LFChw5cmTW33/uc5+D1hp/9Vd/hfe///347ne/CwA4cuQIVqxYMeN3V6xYgcHBQRw5cgTd3d1ob2+f8TNPf38/Hn300RknA88//zyOHj2K9vZ23HHHHbjvvvvwzne+Ex//+Mexb9++UPYNEVEUcSElEVELGRwchNY6F3gPDAzMyEZ7Fi9ejNtuuw0A8Nxzz+Gyyy7DW97yFixZsgT9/f0zfndgYABbt27F4sWLMTo6isnJyVzg3d/fn7uv5cuX4wMf+EDudgtt3boVW7duRSqVwle+8hV8/vOfx7e//e2a7wMioihippuIqIUMDw/j3//935HNZvHDH/4Q+/btwznnnDPr9374wx/i8OHDAIAFCxZACAEpJc455xz88Y9/xEMPPQTLsvDII4/g97//Pc4991ysXLkSZ555Ju68805kMhk899xzeOKJJ3K3ecEFF+CJJ57Az3/+c9i2jXQ6jV27duHw4cMYGhrCT37yE0xOTiIej6O9vX1Gy0MiolbHTDcRURP6xCc+MSNoffvb347zzz8fZ511Fvbv34+3ve1tWLRoEf7lX/4Fvb29s/5+z549uP322zE+Po6FCxfihhtuwEknnQS47Qhvv/123HzzzVi9ejXuuece9PX1AQD++Z//Gddddx22bNmCDRs24MILL8To6CjgZrq/9rWv4ctf/jI++9nPQkqJs846CzfffDOUUvjWt76Fa6+9FkIIrF+/HjfddFPd9hcRUaMJrbVu9EYQEVH17r//fnznO9/Bvffe2+hNISKiAiwvISIiIiIKGYNuIiIiIqKQsbyEiIiIiChkzHQTEREREYWMQTcRERERUcgYdBMRERERhYxBNxERERFRyBh0ExERERGFjEE3EREREVHI/j/fuvk/v+lhJAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "bento_obj_id": "139974490566800" + }, + "output_type": "display_data" + } + ], + "source": [ + "def plot_rewards(rewards):\n", + " fig, ax = plt.subplots(1, 1, figsize=(12, 10));\n", + " pd.Series(rewards).rolling(20).mean().plot(ax=ax);\n", + " pd.Series(rewards).plot(ax=ax,alpha=0.5,color='lightblue');\n", + " ax.set_xlabel('Episodes');\n", + " ax.set_ylabel('Reward');\n", + " plt.title('PPO on CartPole');\n", + " plt.legend(['Moving Average Reward', 'Instantaneous Episode Reward'])\n", + " return fig, ax\n", + "\n", + "sns.set_style('darkgrid')\n", + "sns.set()\n", + "\n", + "\n", + "plot_rewards(train_rewards)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print eval rewards" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T00:02:12.264614Z", + "start_time": "2021-02-25T00:01:01.218034Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0224 160212.086 gymrunner.py:132] For gamma=1.0, average reward is 187.87\n", + "Rewards list: [200. 190. 190. 200. 200. 200. 187. 188. 198. 200. 200. 200. 200. 165.\n", + " 169. 200. 200. 153. 200. 176. 200. 200. 200. 161. 200. 200. 200. 200.\n", + " 200. 200. 200. 170. 189. 138. 200. 200. 200. 183. 200. 154. 200. 134.\n", + " 194. 178. 180. 170. 200. 162. 168. 200. 176. 155. 200. 182. 200. 200.\n", + " 200. 186. 169. 178. 150. 200. 178. 172. 154. 200. 200. 200. 154. 200.\n", + " 200. 192. 195. 155. 200. 200. 200. 200. 200. 157. 136. 200. 200. 200.\n", + " 200. 172. 200. 200. 200. 171. 200. 200. 157. 193. 145. 200. 200. 200.\n", + " 200. 200. 200. 200. 172. 200. 155. 200. 131. 200. 200. 200. 178. 162.\n", + " 184. 200. 200. 200. 175. 200. 200. 200. 200. 200. 200. 134. 200. 200.\n", + " 146. 200. 200. 191. 200. 200. 200. 200. 150. 194. 200. 200. 200. 200.\n", + " 158. 131. 161. 200. 200. 200. 165. 200. 114. 200. 200. 200. 175. 200.\n", + " 200. 200. 200. 123. 200. 195. 197. 200. 193. 200. 200. 200. 200. 200.\n", + " 200. 200. 181. 200. 190. 191. 125. 165. 200. 200. 200. 200. 200. 181.\n", + " 200. 200. 195. 200. 200. 200. 181. 144. 200. 200. 200. 187. 184. 200.\n", + " 200. 200. 142. 200.]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean reward: 187.87\n" + ] + } + ], + "source": [ + "eval_episodes = 200\n", + "eval_rewards = evaluate_for_n_episodes(eval_episodes, env, agent, 500, num_processes=1).T[0]\n", + "mean_reward = pd.Series(eval_rewards).mean()\n", + "print(f'Mean reward: {mean_reward:.2f}')" + ] + } + ], + "metadata": { + "anp_cloned_from": { + "revision_id": "351369499371280" + }, + "bento_stylesheets": { + "bento/extensions/flow/main.css": true, + "bento/extensions/kernel_selector/main.css": true, + "bento/extensions/kernel_ui/main.css": true, + "bento/extensions/new_kernel/main.css": true, + "bento/extensions/system_usage/main.css": true, + "bento/extensions/theme/main.css": true + }, + "kernelspec": { + "display_name": "alexnik (local)", + "language": "python", + "name": "alexnik_local" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5+" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/reagent/notebooks/REINFORCE_for_CartPole_Control.ipynb b/reagent/notebooks/REINFORCE_for_CartPole_Control.ipynb new file mode 100644 index 000000000..4d34f8cc8 --- /dev/null +++ b/reagent/notebooks/REINFORCE_for_CartPole_Control.ipynb @@ -0,0 +1,591 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use the [CartPole-v1](https://gym.openai.com/envs/CartPole-v0/) OpenAI Gym environment. For reproducibility, let is fix a random seed." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:39.238680Z", + "start_time": "2021-02-25T18:41:36.874709Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104138.043 dataclasses.py:48] USE_VANILLA_DATACLASS: True\n", + "I0225 104138.045 dataclasses.py:49] ARBITRARY_TYPES_ALLOWED: True\n", + "W0225 104138.056 file_io.py:72] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to the version in iopath repo. **\n", + "https://github.com/facebookresearch/iopath \n", + "\n", + "W0225 104138.062 manifold.py:86] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to iopath. **\n", + "\n", + "I0225 104138.064 io.py:19] Registered Manifold PathManager\n", + "W0225 104138.068 manifold.py:86] ** fvcore version of PathManager will be deprecated soon. **\n", + "** Please migrate to iopath. **\n", + "\n", + "I0225 104138.069 patch.py:95] Patched torch.load, torch.save, torch.jit.load and save to handle Manifold uri\n", + "I0225 104138.232 registry_meta.py:19] Adding REGISTRY to type TrainingReport\n", + "I0225 104138.233 registry_meta.py:40] Not Registering TrainingReport to TrainingReport. Abstract method [] are not implemented.\n", + "I0225 104138.234 registry_meta.py:19] Adding REGISTRY to type PublishingResult\n", + "I0225 104138.234 registry_meta.py:40] Not Registering PublishingResult to PublishingResult. Abstract method [] are not implemented.\n", + "I0225 104138.235 registry_meta.py:19] Adding REGISTRY to type ValidationResult\n", + "I0225 104138.236 registry_meta.py:40] Not Registering ValidationResult to ValidationResult. Abstract method [] are not implemented.\n", + "I0225 104138.237 registry_meta.py:31] Registering NoPublishingResults to PublishingResult\n", + "I0225 104138.238 registry_meta.py:34] Using no_publishing_results instead of NoPublishingResults\n", + "I0225 104138.239 registry_meta.py:31] Registering NoValidationResults to ValidationResult\n", + "I0225 104138.239 registry_meta.py:34] Using no_validation_results instead of NoValidationResults\n", + "I0225 104138.244 registry_meta.py:31] Registering SchedulingFrequencyValidationResults to ValidationResult\n", + "I0225 104138.245 registry_meta.py:34] Using scheduling_frequency_validation_results instead of SchedulingFrequencyValidationResults\n", + "I0225 104138.247 registry_meta.py:31] Registering PDIVFilterValidationResults to ValidationResult\n", + "I0225 104138.247 registry_meta.py:34] Using pdiv_filter_validation_results instead of PDIVFilterValidationResults\n", + "I0225 104138.249 registry_meta.py:31] Registering Seq2SlateValidationResults to ValidationResult\n", + "I0225 104138.249 registry_meta.py:34] Using seq2slate_validation_results instead of Seq2SlateValidationResults\n", + "I0225 104138.250 registry_meta.py:31] Registering SchedulingFrequencyPublishingResults to PublishingResult\n", + "I0225 104138.251 registry_meta.py:34] Using scheduling_frequency_publishing_results instead of SchedulingFrequencyPublishingResults\n", + "I0225 104138.252 registry_meta.py:31] Registering PDIVFilterPublishingResults to PublishingResult\n", + "I0225 104138.253 registry_meta.py:34] Using pdiv_filter_publishing_results instead of PDIVFilterPublishingResults\n", + "I0225 104138.254 registry_meta.py:31] Registering FeedPublishingResults to PublishingResult\n", + "I0225 104138.255 registry_meta.py:34] Using feed_publishing_results instead of FeedPublishingResults\n", + "I0225 104138.256 registry_meta.py:31] Registering ScoreFblearnerPredictorPublishingResult to PublishingResult\n", + "I0225 104138.257 registry_meta.py:34] Using score_offline_results instead of ScoreFblearnerPredictorPublishingResult\n", + "I0225 104138.258 registry_meta.py:31] Registering ScoreSeq2SlateOutput to PublishingResult\n", + "I0225 104138.259 registry_meta.py:34] Using score_seq2slate_offline instead of ScoreSeq2SlateOutput\n", + "I0225 104138.260 registry_meta.py:31] Registering IPSResult to PublishingResult\n", + "I0225 104138.261 registry_meta.py:34] Using learnvm_ips_result instead of IPSResult\n", + "I0225 104138.263 registry_meta.py:31] Registering SlateRewardFeatureImportanceOutput to PublishingResult\n", + "I0225 104138.264 registry_meta.py:34] Using slate_reward_feature_importance instead of SlateRewardFeatureImportanceOutput\n", + "I0225 104138.268 dataclasses.py:73] Setting IdMapping.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.269 dataclasses.py:73] Setting ModelFeatureConfig.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.303 registry_meta.py:19] Adding REGISTRY to type LearningRateSchedulerConfig\n", + "I0225 104138.304 registry_meta.py:40] Not Registering LearningRateSchedulerConfig to LearningRateSchedulerConfig. Abstract method [] are not implemented.\n", + "I0225 104138.306 registry_meta.py:19] Adding REGISTRY to type OptimizerConfig\n", + "I0225 104138.306 registry_meta.py:40] Not Registering OptimizerConfig to OptimizerConfig. Abstract method [] are not implemented.\n", + "I0225 104138.308 registry_meta.py:31] Registering Adam to OptimizerConfig\n", + "I0225 104138.309 registry_meta.py:31] Registering SGD to OptimizerConfig\n", + "I0225 104138.311 registry_meta.py:31] Registering AdamW to OptimizerConfig\n", + "I0225 104138.312 registry_meta.py:31] Registering SparseAdam to OptimizerConfig\n", + "I0225 104138.314 registry_meta.py:31] Registering Adamax to OptimizerConfig\n", + "I0225 104138.315 registry_meta.py:31] Registering LBFGS to OptimizerConfig\n", + "I0225 104138.317 registry_meta.py:31] Registering Rprop to OptimizerConfig\n", + "I0225 104138.322 registry_meta.py:31] Registering ASGD to OptimizerConfig\n", + "I0225 104138.324 registry_meta.py:31] Registering Adadelta to OptimizerConfig\n", + "I0225 104138.325 registry_meta.py:31] Registering Adagrad to OptimizerConfig\n", + "I0225 104138.327 registry_meta.py:31] Registering RMSprop to OptimizerConfig\n", + "I0225 104138.343 dataclasses.py:73] Setting Seq2SlateNet.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.359 dataclasses.py:73] Setting CRRWeightFn.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.380 registry_meta.py:19] Adding REGISTRY to type EnvWrapper\n", + "I0225 104138.381 registry_meta.py:40] Not Registering EnvWrapper to EnvWrapper. Abstract method ['obs_preprocessor', 'serving_obs_preprocessor', 'make'] are not implemented.\n", + "I0225 104138.382 dataclasses.py:73] Setting EnvWrapper.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.387 registry_meta.py:31] Registering ChangingArms to EnvWrapper\n", + "I0225 104138.402 registry_meta.py:31] Registering Gym to EnvWrapper\n", + "I0225 104138.406 utils.py:18] Registering id=Pocman-v0, entry_point=reagent.gym.envs.pomdp.pocman:PocManEnv.\n", + "I0225 104138.407 utils.py:18] Registering id=StringGame-v0, entry_point=reagent.gym.envs.pomdp.string_game:StringGameEnv.\n", + "I0225 104138.407 utils.py:18] Registering id=LinearDynamics-v0, entry_point=reagent.gym.envs.dynamics.linear_dynamics:LinDynaEnv.\n", + "I0225 104138.408 utils.py:18] Registering id=PossibleActionsMaskTester-v0, entry_point=reagent.gym.envs.functionality.possible_actions_mask_tester:PossibleActionsMaskTester.\n", + "I0225 104138.409 utils.py:18] Registering id=StringGame-v1, entry_point=reagent.gym.envs.pomdp.string_game_v1:StringGameEnvV1.\n", + "I0225 104138.433 registry_meta.py:31] Registering RecSim to EnvWrapper\n", + "I0225 104138.435 dataclasses.py:73] Setting RecSim.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.437 registry_meta.py:31] Registering OraclePVM to EnvWrapper\n", + "I0225 104138.437 dataclasses.py:73] Setting OraclePVM.__post_init__ to its __post_init_post_parse__\n", + "I0225 104138.446 registry_meta.py:31] Registering ToyVM to EnvWrapper\n", + "\n", + "Bad key \"axes.color_cycle\" on line 214 in\n", + "/home/alexnik/.matplotlib/matplotlibrc.\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.1.2/matplotlibrc.template\n", + "or from the matplotlib source distribution\n" + ] + } + ], + "source": [ + "import pytorch_lightning as pl\n", + "from reagent.gym.envs.gym import Gym\n", + "import pandas as pd\n", + "from matplotlib import pyplot as plt\n", + "import seaborn as sns\n", + "import numpy as np\n", + "import torch\n", + "import torch.nn.functional as F\n", + "import tqdm.autonotebook as tqdm" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:39.429693Z", + "start_time": "2021-02-25T18:41:39.240892Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104139.247 env_wrapper.py:38] Env: >>;\n", + "observation_space: Box(4,);\n", + "action_space: Discrete(2);\n", + "I0225 104139.250 seed.py:57] Global seed set to 0\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 2, + "metadata": { + "bento_obj_id": "139934208915616" + }, + "output_type": "execute_result" + } + ], + "source": [ + "env = Gym('CartPole-v0')\n", + "env.seed(0)\n", + "env.action_space.seed(0)\n", + "pl.seed_everything(0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `policy` is composed of a simple scorer (a MLP) and a softmax sampler. Our `agent` simply executes this policy in the CartPole Environment." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:39.723885Z", + "start_time": "2021-02-25T18:41:39.432154Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104139.542 registry_meta.py:19] Adding REGISTRY to type DiscreteDQNNetBuilder\n", + "I0225 104139.543 registry_meta.py:40] Not Registering DiscreteDQNNetBuilder to DiscreteDQNNetBuilder. Abstract method ['build_q_network'] are not implemented.\n", + "I0225 104139.543 registry_meta.py:31] Registering Dueling to DiscreteDQNNetBuilder\n", + "I0225 104139.544 dataclasses.py:73] Setting Dueling.__post_init__ to its __post_init_post_parse__\n", + "I0225 104139.546 registry_meta.py:31] Registering FullyConnected to DiscreteDQNNetBuilder\n", + "I0225 104139.547 dataclasses.py:73] Setting FullyConnected.__post_init__ to its __post_init_post_parse__\n", + "I0225 104139.548 registry_meta.py:31] Registering FullyConnectedWithEmbedding to DiscreteDQNNetBuilder\n", + "I0225 104139.549 dataclasses.py:73] Setting FullyConnectedWithEmbedding.__post_init__ to its __post_init_post_parse__\n" + ] + } + ], + "source": [ + "from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected\n", + "from reagent.gym.utils import build_normalizer\n", + "\n", + "norm = build_normalizer(env)\n", + "net_builder = FullyConnected(sizes=[8], activations=[\"linear\"])\n", + "cartpole_scorer = net_builder.build_q_network(\n", + " state_feature_config=None, \n", + " state_normalization_data=norm['state'],\n", + " output_dim=len(norm['action'].dense_normalization_parameters))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:39.905841Z", + "start_time": "2021-02-25T18:41:39.726095Z" + } + }, + "outputs": [], + "source": [ + "from reagent.gym.policies.policy import Policy\n", + "from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler\n", + "from reagent.gym.agents.agent import Agent\n", + "\n", + "\n", + "policy = Policy(scorer=cartpole_scorer, sampler=SoftmaxActionSampler())\n", + "agent = Agent.create_for_env(env, policy)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a trainer that uses the REINFORCE Algorithm to train." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:40.079237Z", + "start_time": "2021-02-25T18:41:39.907857Z" + } + }, + "outputs": [], + "source": [ + "from reagent.training.reinforce_trainer import ReinforceTrainer\n", + "from reagent.optimizer.union import classes\n", + "\n", + "\n", + "reinforce_trainer = ReinforceTrainer(\n", + " policy=policy,\n", + " gamma=0.99,\n", + " optimizer=classes['Adam'](lr=5e-3, weight_decay=1e-3),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "RL Interaction Loop" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:44.651922Z", + "start_time": "2021-02-25T18:41:40.081054Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104142.407 gymrunner.py:132] For gamma=1.0, average reward is 18.6\n", + "Rewards list: [15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18.\n", + " 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18. 15. 18.\n", + " 15. 18. 15. 18. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12.\n", + " 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12. 29. 12.\n", + " 29. 12. 17. 21. 29. 12. 29. 12. 17. 21. 17. 21. 29. 12. 17. 21. 17. 21.\n", + " 17. 21. 17. 21. 17. 21. 17. 21. 17. 21.]\n" + ] + } + ], + "source": [ + "from reagent.gym.runners.gymrunner import evaluate_for_n_episodes\n", + "eval_rewards = evaluate_for_n_episodes(100, env, agent, 500, num_processes=20)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run training loop (managed by Pytorch Lightning)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:44.832445Z", + "start_time": "2021-02-25T18:41:44.654204Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104144.656 seed.py:57] Global seed set to 0\n" + ] + }, + { + "data": { + "text/plain": [ + "0" + ] + }, + "execution_count": 7, + "metadata": { + "bento_obj_id": "139934208915616" + }, + "output_type": "execute_result" + } + ], + "source": [ + "pl.seed_everything(0)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:45.015184Z", + "start_time": "2021-02-25T18:41:44.834628Z" + } + }, + "outputs": [], + "source": [ + "num_episodes = 175\n", + "reward_min = 20\n", + "max_steps = 200\n", + "reward_decay = 0.8" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:41:45.206743Z", + "start_time": "2021-02-25T18:41:45.018149Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104145.027 distributed.py:54] GPU available: False, used: False\n", + "I0225 104145.029 distributed.py:54] TPU available: None, using: 0 TPU cores\n" + ] + } + ], + "source": [ + "from reagent.gym.datasets.episodic_dataset import EpisodicDataset, EpisodicDatasetDataloader\n", + "\n", + "pl_trainer = pl.Trainer(max_epochs=1, deterministic=True)\n", + "dataset = EpisodicDataset(env=env, agent=agent, num_episodes=num_episodes, seed=0, max_steps=max_steps)\n", + "\n", + "train_rewards = []\n", + "class TrainRewardsExtractor(EpisodicDataset):\n", + " # a wrapper around a dataset to enable logging of rewards during training\n", + " def __init__(self, dataset):\n", + " self.dataset = dataset\n", + " \n", + " def __iter__(self):\n", + " for traj in iter(self.dataset):\n", + " ep_reward = traj[\"reward\"].sum().item()\n", + " train_rewards.append(ep_reward)\n", + " yield traj\n", + " \n", + " def __getattr__(self, name):\n", + " return getattr(self.dataset, name)\n", + " \n", + "dataset = TrainRewardsExtractor(dataset)\n", + "\n", + "dataloader = EpisodicDatasetDataloader(dataset, num_episodes_between_updates=1, batch_size=1, num_epochs=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:42:15.446538Z", + "start_time": "2021-02-25T18:41:45.209061Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104145.227 lightning.py:1381] \n", + " | Name | Type | Params\n", + "---------------------------------------------\n", + "0 | scorer | FullyConnectedDQN | 58 \n", + "---------------------------------------------\n", + "58 Trainable params\n", + "0 Non-trainable params\n", + "58 Total params\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 0: 100%|██████████| 175/175 [00:30<00:00, 5.83it/s, loss=-0.075, v_num=3] \n" + ] + }, + { + "data": { + "text/plain": [ + "1" + ] + }, + "execution_count": 10, + "metadata": { + "bento_obj_id": "139934208915648" + }, + "output_type": "execute_result" + } + ], + "source": [ + "pl_trainer.fit(reinforce_trainer, dataloader)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Plot the rewards over training episodes." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:42:16.328382Z", + "start_time": "2021-02-25T18:42:15.448696Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(
,\n", + " )" + ] + }, + "execution_count": 11, + "metadata": { + "bento_obj_id": "139927970772224" + }, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAt4AAAJlCAYAAADtmfXpAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nOzdd3wUdf4/8NdsS0hCSAJBQhEFAQsqgYg0yyEKKIrtFLuUs3wF/Xl2QbBgAfHsWBH1xHZ6yiEoit55eIeHIIh6eooNgSAhJKRutszn90d2N1tmdmdmZza7m9fzHh6wO/OZz8xs4D2ffX/eH0kIIUBERERERJaytXcHiIiIiIg6AgbeREREREQpwMCbiIiIiCgFGHgTEREREaUAA28iIiIiohRg4E1ERERElAIMvImIiALGjh2Lf//73+3dDSLKUgy8iajDGzt2LI444giUl5dj9OjRuPnmm9HY2Bh6/+abb8bgwYNRXl4e+u+0004DAGzfvh2DBg2Cz+cLbTto0CBs2bIltP8vv/yCQYMGhf580UUX4fDDD49ob9OmTQAAIQSeffZZnHTSSTjiiCNw/PHHY9GiRfB4PIr9GT58OKZOnYoffvgh4px2796NW2+9FWPGjEF5eTkmTJiARx55BE1NTQCAQYMGYciQIRF9eOaZZyy7xuFWrFiBM888E+Xl5RgzZgxmzJiBDRs2GG5v0KBB+OWXX0J//s9//oODDz44dF7jx4/Hm2++aVLviYiMc7R3B4iI0sGTTz6JUaNGoaqqCtOnT8fTTz+Na6+9NvT+9OnTI/4cT1FRER566CE899xzqtvMnTsXv//972Nenz9/PtauXYsFCxbg8MMPx08//YRbbrkFP/zwA5544omY/rjdbtx+++2YPXs2Xn31VQBAbW0tpkyZgvLycrz66qvo3bs3KisrsWTJEmzbtg0HH3wwAGD58uXo27evruuUrKVLl+Lpp5/GHXfcgTFjxsDpdGLt2rX48MMPUVFRoastn88Hh0P5n7Hu3bvjn//8J4QQ+PDDD3H11VfjyCOPxEEHHWTSmRAR6ccRbyKiMKWlpRgzZgy++eYbw22cfvrp+N///of169fr2u/nn3/Gyy+/jEWLFqG8vBwOhwMDBgzAo48+irVr12LdunUx++Tm5mLixIn49ttvQ68tXboU+fn5uP/++9G7d28AQFlZGebMmRMKuvWor6/HjTfeiBEjRuB3v/sdFi9eDFmWAQB//etfcd5552HBggU46qijMHbsWHz88ceq7TzyyCOYO3cuTjrpJOTl5cHpdGLs2LG46aabAABbtmzBueeei4qKCowZMwZ33nlnxGj/oEGDsGzZMpx00kk46aSTcMEFFwAAJk+ejPLycqxatSrimJIkYdy4cSgsLMTWrVsBAB9++CFOOeUUVFRU4KKLLor5tiBIlmU8/fTTGDduHI4++mhcc801qK2t1X39iIiCGHgTEYXZtWsX1q5di/33399wG7m5ubj88svx4IMP6tpv3bp16NGjB4444oiI18vKyjBkyBDF3OOmpia88847Ef1dt24dTjzxRNhs5vwVf9ddd6G+vh5r1qzBn//8ZyxfvjwidWPLli048MAD8emnn2LGjBmYPXs2hBAx7WzatAktLS048cQTVY9ls9lwyy234NNPP8Wrr76KdevW4eWXX47YZs2aNXj99dexatUqLFu2DAiM3m/atAknn3xyxLayLOODDz5AfX09Bg4ciJ9++gnXXXcdbr31Vqxbtw7HHnssrrjiiojgPujFF1/EmjVr8NJLL2Ht2rXo0qUL7rzzTkPXkIgIDLyJiFpdddVVKC8vx3HHHYeSkhJcffXVEe8/99xzqKioCP0XHKFVM2XKFFRWVqqO/s6fPz/U1hlnnAEAqKmpQWlpqeL2paWlqKmpienP0KFDsXHjRixcuDD0Xm1trWo74c4444yIc1q7dm3MNn6/H6tWrcJ1112HgoIC9O7dG1OnTsXf/va30DY9e/bEOeecA7vdjjPOOANVVVXYs2dPTFu1tbUoLi5WTQ8BgMGDB2PIkCFwOBzo3bs3zj33XHz22WcR21x22WUoKipCbm6uaju7d+9GRUUFRowYgcceewwLFy5Ev379sGrVKhx33HEYPXo0nE4npk+fDrfbHcqxD/faa6/h2muvRY8ePeByuTBz5kysXr06lM9PRKQXc7yJiAA8/vjjGDVqFNavX4/rrrsONTU1KCwsDL0/bdo0zTneAOByufB///d/ePjhh/GnP/0p5v05c+bE5HgXFxejqqpKsb2qqqpQ2kh4f3bu3IkZM2bgp59+CqWRFBUVqbYT7q233kqY411TUwOv14uePXuGXuvZsyd+++230J+7desW+n2nTp2AwEh8tKKiItTU1MTNzf7pp59w33334auvvkJzczP8fj8OO+ywiG3KysoSnlswxzva7t27I87FZrOhrKws4nyCdu7ciauuuirimwObzYbq6mrst99+CftARBSNI95ERGGGDx+OM888EwsWLEi6rTPPPBMNDQ344IMPNG0/YsQIVFZWRlREAYDKykps3rwZI0eOjNmnZ8+emD17Nu6++2643W4AwMiRI/HBBx+E8rCTUVxcDKfTiZ07d0b0x0jgWV5ejpycHKxZs0Z1m9tvvx39+vXD6tWr8fnnn+Paa6+NSVuRJEn3sYO6d+8ecS5CCNXz6dGjB5555hls2LAh9N+XX37JoJuIDGPgTUQU5ZJLLsG///3vpCZYAoDD4cDMmTPx7LPPatr+wAMPxJQpU3D99ddj8+bN8Pv9+P777zFr1iyMGjUKo0aNUtxv9OjR6N69O1577TUAwNSpU9HY2IibbroJO3bsAAD89ttvuPfeeyMmYWpht9sxYcIEPPjgg2hoaMCOHTuwdOnSUDlFPTp37oyrr74ad955J9asWYPm5mZ4vV58/PHHoVSZxsZG5OfnIz8/Hz/88ANeeeWVhO1269YNv/76q6Y+TJw4ER9//DHWrVsHr9eL5557Di6XC+Xl5THbnnfeeXjooYdC13Dv3r1xHxqIiBJh4E1EFKWkpASTJ0/G4sWLQ68tWbIkoub10UcframtSZMmacq3Dpo7dy7OPvts3HDDDSgvL8eMGTMwfPhwPProo3H3mzFjBp599ll4PB4UFRXhlVdegcPhwDnnnIPy8nJccskl6Ny5c0RqSbASSPC/u+++W7Ht2267DZ06dcK4ceNw/vnnY9KkSTjrrLM0n1O4qVOn4uabb8bixYsxcuRIHH/88Vi2bBnGjRsHALjpppvwzjvvYOjQobjttttiJksqmTlzJm6++WZUVFTEVDWJ1q9fP9x///246667MGLECPz973/Hk08+CZfLFbPtxRdfjLFjx2LatGkoLy/HOeecE/NtBBGRHpJQmnpORERERESm4og3EREREVEKMPAmIiIiIkoBBt5ERERERCnAwJuIiIiIKAUYeBMRERERpQADbyIiIiKiFOgwS8bX1DRCllNfObFr1wJUVzek/LgdAa+ttXh9rcNrax1eW2vx+lqH19Zaqbq+NpuE4uJ81fc7TOAty6JdAu/gsckavLbW4vW1Dq+tdXhtrcXrax1eW2ulw/VlqgkRERERUQow8CYiIiIiSgEG3kREREREKdBhcryV+P0+1NRUwefzWHaM3bttkGXZsvY7Ml5ba2m5vg6HC8XFpbDbO/RfJURERJp06H8ta2qqkJubh/z8HpAkyZJjOBw2+HwMDq3Aa2utRNdXCIHGxjrU1FShW7eylPaNiIgoE3XoVBOfz4P8/ELLgm6ibCZJEvLzCy39xoiIiCibdOjAG4HggYiM4c8PERGRdh0+8E4nZ599KiZPHg+/3x96beXKv2HMmAq8+eZrhtv99tv/4o475pjUy0i33XYzJk06ET6fz5L2rXb22afi/PPPwiWXnIcLLjgbK1a83d5dAgBUVu7EKaec0N7dICIiIhMx8E4zXbt2w/r160J/fvfddzBo0CFJtXnwwYdi3rz5JvQuUl3dPmzYsB69evXGv/71T1PbTmUgP3/+Arzwwiu466778MAD92HPnqqUHRspPlciIiJqPx16cmU6mjjxVKxa9Q5GjhyDnTt3oKXFjX79+ofeb2pqwkMP3Y9vvvkaADB+/Mm48MJL8cUXm/DQQ/dj6dKXQ9tOm3YhZs26FkIIPP74w1iy5M+orNyJGTMuwmmnnYlPP/0X3G43br55Lo48cggA4M03X8Nf/vIqCgo6Y+TI0fjrX1/HypUfKvZ19ep3MWrUaAwfPhIrV/4Nxx03FgBw7713on//ATjnnPMAAD/+uBU33XQdXn/9bTQ1NeLRRx/EDz98D4/Hg/LyCsyadS3sdjtmzrwMAwYMwtdff4nCwkLcd9+fcOON/w/79u1DS0sLDj30MNxww61wOp3wer24//6F2LhxI4qLizFgwEDs3VuN+fMXAgCWLXsB//jHh/D7/ejWrTtuumk2unbtFvfa9+t3EDp3LkRV1W5061Yat53TT5+IpUuXobi4BNdffzUkScL99z+Mmpq9mDr1Arz99rvYsGE9nnnmCXg8LfD7/bj44mkYN248AMSc66JFj+DNN1/H66+/jPz8fIwcOSapzxERERGlHwbeAf/6shKfbKk0vV1JAkYfXobRh2ur+jB0aAXeeusvqKurw7vvvoMJE07Bt99+E3r/+eefhSzLePHF19DU1IjLL5+G/v0HYOTI0WhubsbWrd/joIMG4Mcft6KhoR5DhgzFpk0bI46xb98+DB58BC6//Cq8//67ePLJR/DEE89h69bv8ec/P4+lS19GcXExHn74gbh9XbXqb5g581oMHnw4Hn74AezZU4Vu3Upx8smn4uGHF4UC75UrV+DkkydBkiQ8+uiDGDJkKG6++TbIsow77piDlSv/htNOOwMAsHPndixe/CwcDgeEEJg3bz66dCmCEALz58/DypXLcfrpZ2P58jfx22+78NJLr8Pv92PWrMvRvXt3AMDq1auwfft2PPXU87DZbHjrrTfw2GMPJRz137JlM7p0KcJBBw1M2M7QoRXYuPEzHH/8Cdi1qxJCCPh8PmzYsB7DhlUAAAYOPBiLFz8Lu92OvXurMX36RRg+fCQKCwtjznXr1u/x4ovPYenSZSgp6YpFi+7T9HkhIiKizMHAO81IEjB27In48MP38eGH7+OJJ5ZEBN4bNqzHNddcH6goUYBx407Chg3rMXLkaEyYcArefXcFZs36YyDYPVVx8lunTnkYPfoYAMBhhx2Oxx57CACwadNGjBw5GsXFxQCAk08+Fe+/v0qxn9999y3q6+sxdGgFJEnCccf9Du++uxIXXXQpjjyyHE1NTdi69XsccMCBWLNmNZ56aikA4JNP/olvvvkar766DADgdrvRvft+oXZPPHECHI7Wj6Usy3jllZfw6af/hiz7UV9fj9zcXADA559vxIQJp8DhcMDhcGDcuPHYsmVT6BjffvsNpk27EAjUay8oKFC95nPm3AQhBHbs2I677roPTqczYTtDh1Zgw4b1KC3tjkMPHQwhBL7++qtA4D0cAFBbW4N7770T27dvg93uQF3dPmzb9gsGDz485lw3bdqIUaPGoKSkKwBg8uQz8Pe/f5Dw80JERESZg4F3gJ5RaT2M1JqeOHESLr/8UgwZMhRduhRFvSsQHUsHg+sJEybh8ssvwWWXXRUR7EZzuZyh39tsNvj9rTnGQggA2qpUvPPOcjQ01OP3vz8NAOD1epCXl4+LLro00JdT8O6776C8fBgOOOBA9OgRvLYC99yzCL169VZst1OnvNDvP/jgPWzZshmLFz+DvLx8vPjic/j1122hvqpV1BBC4JJLpmHSpMmazmX+/AXo1+8gfPTRGtxzzx04/PAjUVLSNW47FRXD8cILS1Ba2h3Dhh0FIQQ2blyPjRs/w9SplwEAHnjgPowefSzuued+SJKEKVPOhMfToniurdeeiIiIshknV6ahXr164w9/+D9ccsmMmPcqKo7GO+8shxACTU2N+PDD91FR0TrC2qNHDxxwQD889NAiHHBAv7BgV5vy8mH49NN/oba2FgDw3nvvKG7n8XiwZs37eOaZF/HGGyvwxhsrsHz5akiShC++2AwEHgLWrFmNd955GyeffGpo39Gjj8VLL70QqtxSW1uLnTt3KB6noaEeXboUIS8vHw0NDfjgg/dC7w0dWoH33lsFn8+HlpYWfPRR2+jwmDHH4q233kBdXV2ov99//13C8x87dhyOOmoEXnrp+YTt9OhRBpvNhvfeW4lhw4ajouJovPvuO3A4HOjRowcAoL6+HmVlZZAkCZ999il27PhV9dhDh1Zg3bp/oaZmLxB4sCEiIqLswhHvNDV58pmKr1966Qw8+OBCXHzxuUBgcuWIEaNC75988qm46665uO22O3Ufc8CAgTj//ItxxRVTkZeXj4qKo5CfH5uisXbtP9CrV2/06bN/xOsnnjgBK1cux5FHDgk9BGzatBG3335PaJtrrrkOixc/gksvPQ+SJMHpdOHqq69Dz569Yo4zYcIkrF37T5x//lkoLi7BkUeWo6WldcT49NPPwo8/fo8LLzwHRUVF6Nv3gLD9TsG+fbWYNat15FmWZZxxxu8xYMDAhNfgiitmYvr0C3HBBZckbGfYsKOwZcsX6NatddJmTk4OjjhiSKitK6+ciQceWIAlS57GIYcciv79B6ge96CDBuCii6biyiunIy8vHyNHjk7YVyIiIsoskugg33FXVzdAliNPddeuX9CjR19Lj5tpy5o3NTUiLy8fALBkyVPYsWM75s69q727pailpRk5OZ3g8Xhw881/xO9+Nw6nnnp6e3cra2j97Kbi5yjblJZ2RlVVfXt3Iyvx2lqL19c6vLbWStX1tdkkdO2qPq+MI94U4YknHsOXX34Bn8+Lnj174cYbZ7d3l1TNmnUlPB4PPJ4WVFQMx8SJk9q7S0RERESqGHhThOuuu6m9u6DZc8+9mFHfJhAREVHHlpLJlTU1NfjDH/6A8ePH49RTT8XMmTOxd2/rJLLNmzfjtNNOw/jx4zFt2jRUV1eH9ov3HhERERFRJklJ4C1JEmbMmIHVq1djxYoV6NOnDxYtWgQhBG644QbMnTsXq1evRkVFBRYtWgQEyqupvUdERERElGlSEngXFRXh6KOPDv15yJAh2LlzJ7788kvk5OSgoqJ1pb8pU6bgvfdaS8bFe4+IiIiIKNOkPMe7dTXCVzB27FhUVlaiZ8+eofdKSkogyzJqa2vjvldUFL2oDBERUfra5/XDJgGdHfaY92q9PjT5tc1XkSChe44D9qgFxLyywB6PDwJt1bsK7HYUOmOPV+P1oTnseDZIKFVoMx6l43V22BXPL115ZYEarw9dXYnPPfqaRVM791qvDw5JQkGC6+KVBX6ta4ZLCNh03Id4PLKMao8/4h6Fs0kSurscuo6XqE2j1PrikWXs8fgBA8ezB9pUW2yvvaQ88L7rrruQl5eHCy+8EB98kLolsZVKu+zebYPDYf2gfyqO0VHx2lpLy/W12WwoLe2ckv5kE14z66Tjta2vbYQECaVFeTHv7atpRL4Acp3xf96EAOo9PuQU5KI41xnxXlVTC5xNEjrntP6z3uz1Q7JJKC3Kj2mnproBBTYJOQ4bfLJAk9ePLkV56KQxaC4t7Yy6Fi9q690ocNphs0lo9PjhdNpRWthJUxvpYF/gHFz5OSjp5Iq7bV1NI/KEQCeFB5lGjx8ulx2lnWPPva6mEZCU73u4WrcXOxvc6F+SjxyTHl5q3V7sa3Cjs8sRs+K11y/Q7POjqDgfOXbt/47Ga9OoeH3Z2+yBs7HF0PGcNhtK81wRgXc6/N2Q0sB7wYIF+OWXX/Dkk0/CZrOhrKwMO3fuDL2/d+9eSJKEoqKiuO8ZoVTHW5Zly6ti6KnjffbZp2LhwgfRr99Bho61ZMlTuPjiaXA6nRq2Vvf66y/jxBMnoLi4JKl2rKZ2bc8++1S4XC64XDmh1+69dxHKynrGbBvu0kvPx1NPPYecnNyk+7Zq1Qr8+99rMX/+Ql37BfvudLrg83kxZcqF7VabPPz6VlbuxIwZF2Hlyg9jtpNlmbVndWK9Xuuk67Wtd3shIFDl9Ue8LoTAPrcXnR025Pjj/5MshMAetxeVLT74XJHb7mrxQgggJ/DPXJPXhxqfjN0ef0Tg4RcCdc0eFLscyHHYIftlNLV4Ue2XkWNLHIAFr2+jz48mjw9dcp1w2WzY6/bCJwFVLT59F6YdBc9hp9sLf078fzfr3R7k2GzI8ceOvFa7PfA3S6hyx557vdsDvwDyPb64I6/1Pj/gtGNPdQNcGu6DFg2B8yv2Czhskcf2Bd7b45d1HS9em0b5A59Bpb7Uef1o8vrQVTb2TcCeJk/o9x2ujveDDz6Ir776Ck8//TRcrtYny8GDB8PtdmPDhg2oqKjAq6++iokTJyZ8j5QtXfoMzjvvIhMC71dQUTE87QPveObPX6D7Aeb551+2rD96BPv+449bMW3ahRg5cjS6dStN2fF9Ph8cDlYaJTKXgE+0Bs/hAZgcCKjtSBxUSJKEXLsNzX45oh1ZCLTIAoVh31A5JQlCtB7TGda0NzAA5YwKYowvpSeF/j/TVuML9tctC/iEgMPwEG78/YQQ8AoBV4pTHkL3Q+GwSadfmHgqwaZkhQ+QHDiL9EoWSU5K/nX9/vvv8eSTT+KAAw7AlClTAAC9e/fG448/joULF2LevHloaWlBr169cP/99wOBr6/V3usIZs68DIccchi++moL9uzZg7Fjx+HKK2cBAJ577mmsWbMaLlcOJAl45JGn8PTTiwEAV145DZJkw6OPPoV16/6Fv/zlFfh8XgDAVVf9P1RUDAcCI6sTJpyCzz77D6qr9+C88y7EWWedixdeWII9e6owZ85NcLlyMG/efFRX78EzzzwBj6cFfr8fF188DePGjU/Yzz179uChhxbit992oaWlBePGjcfFF08DAHzzzdd46KFFcLubkZvbCf/v/12PQw45DJ9/vgGPP/4wliz5MwBE/Hnbtp9x9913wO12Q5b9mDTpNJx77oW6ruuYMRWYOvUPWLv2Y7S0uHH55Vfh+ONPCL33/vv/RG5uLv70p4X4/PPP4HS6kJfXCU888RwA4N1338Err/wZkiShZ8/euPHGW1FcXAKv14sHH1yIzz/fgC5dijBgwKCI4y5b9gL+8Y8P4ff70a1bd9x002x07dotbl/79TsInTsXoqpqdyjwVmvn9NMnYunSZSguLsH1118NSZJw//0Po6ZmL6ZOvQBvv/0uNmxYH/c+DhgwCF9//SUKCwuxaNEjeOON1/DKK8uQn5+PkSPH6LrORBRJoDW6lQGEJxL4AxGvXWNk0ckmodEn4BECOYHgyS0LQAh0ChstdAZGI71CwBkWtngCx3PZ2gLmZEgxv8lAQqDJL6MwToqHEOqnqOWhwyMLuOIMLAcffKx4eIl3a/Qez4r+BS+LUtsi8JCQbnnayUhJ4D1gwAD873//U3xv6NChWLFihe73zNbg86NR4+QWPWxeCZ00TKxQ8ttvu/D448+gqakJ5547GZMmTUaXLkV4/fWXsXz5e8jJyUVTUyNcrhxcd91NeOutv+CJJ55DXl5rLtnRR4/AiSeOhyRJ2LbtZ1xzzf/hrbdWhdp3u9146qmlqKzciYsvPhcTJ56KSy6ZjhUr3o4YMe7atRsWL34Wdrsde/dWY/r0izB8+EgUFhaq9rNPn/0xf/5cXHrpDAwZMhRerxfXXHMlDjnkUAwZMgyzZ9+IW26Zi6OOOhobNqzH7Nk34rXX3o57Pf761zcwZsyxuOiiqQCApqYG1W2DDw4AYLfbQ4E8Ag91zz//MrZt+xlXXDEdRx5ZHjG6v3Xrd9i0aQNeeukvsNlsqKurAwD8+ONWPPnkY1iy5CV069YNzzzzBB588H7ceee9WL78TVRW7sRLL/0FPp8PV131B5SVlQEAVq9ehe3bt+Opp56HzWbDW2+9gcceewjz5s2Pe75btmxGly5FOOiggQnbGTq0Ahs3fobjjz8Bu3ZVto50+XzYsGE9hg1rrQw0cODBce/jzp3bsXjxs3A4HNi69Xs8//wSPPfcMpSUdMWiRffF7SsRxRcMrPxCREzkC2YuaJ3YmBvIgXX7BXICEYvbL0OSJOSEffUfHNH2yAJ5Yf/8eANf2UfHgMkGVJkYFgXP2S5JCQNvw8cIHMSjNJyr0BczI9tgU0r3RoreSCcz73fwo68UgcV74MlU/D45jf3udyfAZrOhoKAAffseiB07tqNnz17o1asP7rprHoYPH4FRo45BXl7s5BkA2LFjO26/fTaqqqrgcDiwd281qqv3hEZax407CQBQVtYzNLLat+8BMe3U1tbg3nvvxPbt22C3O1BXtw/btv2CwYMPV+1nt26l2LRpI2pra0PtNDU14ueff0ZJSTc4nU4cdVRricmKiuFwOp3Ytu2XuNdjyJByLF78CNxuN4YOrcDw4cPhV8i3Q4JUk0mTJgMA9t//AAwc2DrKO2bMcaH3e/bsDZ/Ph/vuuwtDh1Zg1KhjgMDoe2vaR+v1mzz5TFx66fmB9zZi4sRJcDgccDgcGD9+IrZs2QwA+OSTf+Lbb7/BtGmto/N+vw8FBer5X3Pm3AQhBHbs2I677rovlDoUr52hQyuwYcN6lJZ2x6GHDoYQAl9//VUg8B6u6T6eeOKEUIrJpk0bMWrUMSgp6Ro41zPw97+nbjI0UbbyCSB8Gl/biLe28MIuSXDZbHDLMroExs6bZRm5tshRQZskwWGT4I3KIfEKAWfYtkYHEtVGJzNRnt2Gep8fPlk9b1kkGWx6EuTyBKuEmHoN4zQmJd4kZaRAb4TCNZIBmJRKnjYYeAcUOOyGRqUT0TO5Mlr45ECbzQa/3w+73Y6nnlqKL7/8Ap9/vgHTp1+IBx54FAcdNCBm/9tvn42ZM6/FscceD1mWMW7cGHg8nrD2XVHtK0+KeeCB+zB69LG45577IUkSpkw5Ex5PS9x+CtE6AvPssy/G5Atv3fq94tdGkgTY7Q4I0Xa9wvt7/PEnYPDgI7B+/ad46aXnsWrV33DbbXclvI7xtP6cR/aloKAAL58gygUAACAASURBVL74GjZt2oiNGz/DE088iueee6n1yTuq38E/Kv2F0XYMgUsumRYK+BMJPjR89NEa3HPPHTj88CNRUtI1bjsVFcPxwgtLUFraHcOGHQUhBDZuXI+NGz/D1KmXARruY6dObbPu450PEekX/InyR/1sBccOHDqCi1y7hHqfDFkIyALwyQKdXbH/fjklKZTTjWCusSyQr1DFwuhPfES3M+yvjWB38x2tgXeTX0ahzZpyiF5ZxOT3K/XFzEsYb8Q7epv2FAysFR/mhAgF5tmCtdgyTFNTI2pra1FePgzTp1+Ofv3648cffwAA5OXlo7GxLf2ioaEhVMnjnXeWRwSx8eTn56Ohoa2d+vp6lJWVQZIkfPbZp9ix49eEbeTl5ePII8vx0kvPh1777bddqK7eg759D4DH48Hnn28AAiPJPp8Pffr0Rc+ePbFz5w7U1dVBCIE1a1aH9t++/VeUlHTFySefiqlT/4D//vdrTecTbeXKvwEAfv11G7Zu/R8OO2xwxPs1NTVoaWnBiBGjcMUVM1FQUICdO3dg2LCjsG7dv1BdvQcAsGLF26Gc+YqKo/Dee6vg8/nQ0uLGBx+0LfY0ZsyxeOutN0IpKx6PB99//13Cfo4dOw5HHTUidA3jtdOjRxlsNhvee28lhg0bjoqKo/Huu+/A4XCgR48egM77OHRoBdat+wQ1NXuBwOeHiJIXHXj7AnWb9eSw5tpsEELALQs0y3LotWjOwIh38EHaH5iI6QwbQjQrpMnEyZXBDjsD3yLESzeNN+Kd6NwlSYIcmOiaqC9mit8n89s0KjT6rtB4st80pCOOeGeYhoYGzJ59IzyeFsiyjIEDD8Zxx/0OADBlygW4+uorkJOTi0cffQpXX/1H3Hrr9ejcuTOOPnoUunTpoukYZ589Bffccydyc3Mxb958XHnlTDzwwAIsWfI0DjnkUPTvHzu6rmTu3LvwyCN/wsUXnwsEgvFbbpmLrl274e67F0ZMrpw/fwGcTidKS7tjypQLMX36RSgpKcGQIUPx008/AgA++ugDvP/+e3A6WwviX3vtDarHDs/xBoCbb56Dgw8+FADg9/sxder5cLvduOGGW2Oqt+ze/RsWLJgPv98Pv9+PESNG4bDDDofNZsPll1+Fa6+9KjC5shduuOFWAMBpp52JrVu34sILf48uXYpw8MGHoaamGgAwYcIp2LevFrNmtY48y7KMM874PQYMGJjwGl5xxUxMn34hLrjgkoTtDBt2FLZs+SKUCpOTk4MjjhgSakvPfTzooAG45JJpuPLK6cjLy8fIkaMT9pWI1LWNeEe+Hp3zrUUwrcTtl+EPpJ84FZpwSRIQVlFDraIJDHzLFa9iRqYIP+M8uw21Xh+8cuSDSYR4kXecY7hsElr8Ah5ZhlNlRN2KEe94rSWbamJqjnfgV1mhN9mYaiKJDvKdslId7127fkGPHn0tPW4yqSYUn5FrG6xcEpyASuq0Xt9U/Bxlm3StNZ0N0vXa/tLsaa08Yrehe1jN6Eq3F3YJEa9psbvFC28g1aST3YZurthxNI8so9LtRTeXA/kOO+q8ftR4fejdyRUK9r2ywE63J7RNIsHrW+/zY6+nra3dLV74BNAzN7lytqm0z+tHrdeH/Tu54BfADrcHRU4HuigskrOt2YPODhuKnbHXeVeLFxKA/RTu4a/NHuTZW0fT1fYHgGqPD3KOA/k+GXk6FrSJp8brQ51PRl+FxYFCn40cp2LqkZrgZ6hPJ5dpK2wizvU1+vOhJF3qeDPVhIiIyEJCiND36NHpBkZGvBFILfHJArIQ6KQyJOiUJEBqm2DpDRxL6XjmVDXJrHG88N46bK2TUT2y8mBDMikPUuBexKtsYs2Id7xB+tAEJZOPaIyk0hUZ6nnxmYqpJtShfPLJhvbuAhF1YOE53kKIQKqI/nZy7RLgDf5eeQxNCqSgBFNMPAppFEZDmpgYKZDWklkCi7MEK7xAUp3gF+/ctOS3u2ytJQvVJli2lRO0oJ5gcpsktb1WNkm9nGC2jRBn2/kQERGllWCwYgtMspODkx1Fa2RhZMTbGRi5dtlscfd3ShI8gQmWXiFi87vjVJTQQgr7NSPD7rDrkeguGKmuIQKHcNla771KBdxQwG12VRMDaemamD0GLUFSnGeQjZMrO3zg3UFS3IkswZ8fosSCPyXBJcmDwZcf+mp4h5MkCd1cDpQolBEM57RJ8AmElqs3a8Q746JsFVrO34xTDS4Xr1bP26rLqXp+ST5wmU3twU1OogJLuurQgbfD4UJjYx2DByIDhBBobKyDwxE7cYeIYgVTSvxR+d56aniHy7XbkKNQRjCcM5AC0hQoledKUEdaq+jt1XJ0M4kkJSjBF+f1ROfutLXm28fLIYcFI95qjFY1SWWqSTDFx5ZlY94dOse7uLgUNTVVaGio1bC1MTabDbLKDxolh9fWWlqur8PhQnFxacr6RJSJYke8RcSvRka8tQqOcAdrVKuOeGd40GyE7uXIDdym8DQjp6S+dLwVl1/EGS02ft+t+aBIAKIvTfBfn2wb8e7Qgbfd7kC3bmWWHiNdS1tlA15ba/H6EpkjOBoaXI48lGoSmGhn5VfPzsDiPF5Zhl2SVEvAmZHjnenU0h20rACZqF0AcNlscKss0hP8jKT6+cfwfTc5Gm5dZCjy2gSvSbalZmTb+RAREaUleyDA8IdNrrRL5gcx4SRJCo20uxTKDppW1SQDB81TMnEvLAfFFbj30auXxnTKrEPHeS+p87bg82pTSNdJ9oEnXTHwJiIispAIC4HsEuAL/N5oDW+9guklqisyGor3Mi3MTkwtV1vLmaqWIQwTneMfub/5VU0gRMKgVeg8opWTQKPbDq5kmW11vBl4ExERpUBw9LltcmWKAu/AMZSWipcCi+wYDalCNbATTExMR/FyoGM2TKo0X/AaqS8zZFWOt1rvgvfdyHGt+MRKipMrrTtee2LgTUREZKHw4MYenWqSguPn2NRTTZBMDe4sG4lMlOMdX/wFdsJ/VRxVD+V4mxuCx7tD6XT3bGitvhP+LUHbxNR265YlGHgTERFZKHzkzi61BtxyIMhIxYh3rk1Cz1wXXAlKD+oRnR+diQvoxEgw8q9eE1vbPYy3VcrreKdZCcjgJQwf9Q4uNJVlcTcDbyIiolSQAiPeQohQWTmjNbx1HVeS4uZ3SzAp8osasUx3SpMrjY54a9kvGFzGa8/0Ot5mf74sur3BYDT849M2uTK7Qm8G3kRERBaKTjUB2uo5p2LEW4vk46n0OA9dRGTonWjqqdokv8RLzUf+msoc74Qj3ia3aZTStWGqCREREekWGrmT2hbPaEnB4jlaGeqBiE01yTQpKScYJjhyG/2tgAj7piCVXxik04TY4EONHNaj4II6mfjZioeBNxERUQpIESPerdms9jSJKsxaOjxdAjkjkiknqCS0nxTxS8quke6VOduRcqqJAKRsSzRh4E1ERGSp8BHOYKDtkwVscVaSTCUzutD+Z6GfmeUEtYxUq+V4K6VXpILRCbFWlRNE9LUIPDiwjjcRERHpJqF1+fbgqHc6pJkEJR3waZg4mO6SKycYv93wX9VWaDRb4nYl3bktVldfkcP6I2dpkJqN50RERJQ2ooOVtsC7XbqjKNlJdml0KprpzfFO9hzVUk2sG/GOv3JlOpWADOW/h72m+RuJDMPAm4iIyEJtZdFaBQPudBnxbi0nmC4hWPtRm2yYdI53zOupGfIWGZSmYVP4xkQWIutKCYKBNxERkTqvLLDT7Q2tNpkMKSrgTkUNb22MLR2uJKPCd52TD9ViWK0jx5LUmmqUNjneBquaWBHLt6WatL0msjRIzcZzIiIiMoVPCHhlGT7ZeEiknmqSHpG3kV5EpwFI4W9kiJgFbszO8VaYlKk0ETNiBNzEbx4SVTUxWsfbChJaP1Dh14KpJkRERB2MiPo1mUaCMYQjzVJNzJRBcbeyOKtvmnG3lILd9hrxbp8DKpMCZQNF1DNI9v2EMPAmIiKyVHSOt9PWmnLgTJPA24yFVNLjTPSJnSCqfBZm5ngrXWur0uutWLnSSrZAJZMgGSItym2azaFhGyIiIjIoejJdjs2GPrnOjJn4piQ7FtARgKRh/FFEPzpFShTARu8Vb9tUX790ul/RDyUc8SYiIupgTEk1QWtUER5op1PQbcpCKqHzSadQLj61kohqZ6B6x3TcSglS7JLx+pvRJOGIt6T/zustwaiHBCmijreVx2pPDLyJiIjUmJAHkBEBhO5ZdpFnlfbnp4XKSZiaahJnO5tCxZOkxbkx6ZhqEnrQFaJ1AZ2s+GBFYuBNRERkpXSKbhSYWd0izU81RrxAWMu28feJfVU5x7v1FZvJkbAlD3zCusfImGvDOt5EREQdUzLxUEaMeBuQ8StXRt1UtXMQCTZIeO5S5DcDap8lM0e8hRAJv61RKm3YnqSwOt7BSZZplJFlGgbeREREKsyIS9I98DZzoDWdAjktzBrxjnfiiet4B94zobpM7LHVe230mw6rPsu2sDrewWuUjUFqNp4TERERaWVgkl1ME6Z1JnWUFtBRekPrlVGr/x19jOgqN+E53maF3hn2/ANEPZTIYa9lGwbeRERECWgJqlT3TfOvzNNt5DOVNI14K6xAmbgVtTre6kvG2ywImONXNUmvAD38cxh8OEmn6j9mYeBNRERkoXQKbkylEBNl0rkKRJ6D0RBPz36KqSbBtAoTg8xEeenpyCa1jnQLIZhqQkRE1BGZEkimeXUGMybZZdPApNqItBmBebwl460Y3U24cqUQur/NsbKON0TrWDcnVxIREXVgrGoSS8/ExHQUs4BOkosAqaWohFMsJwgBSBJsknmTU7U1o/9TGf0tgZmCl1+EpXal8wOrUQy8iYiIOjAz6nhnWnikpdxeaNsEVydhGcKobbUurJOUhHnpiVfqTLVgQCpEZN57tsnGcyIiIjKFaeUEMy0yNShdgjitwkdU4waikmQ4FSQy1SSQThG+NLpo3cbUso46+5UOgv2RmWpCRETUsWVzqomh6hYxOwTzBDIt9FagUE4w7v0LS5Ewcohg+0oVT5JlRb8ty/EORNkCnFxJRETUMZk15J1lYvKj27EvRihNmDQ79UK5nGDse1Z8I5IoPQZpeM8iU01Edg53M/AmIiKyVtqPeKc4xSGtaLkxQluutJ7D6cn/Tka89JhQX9LkpgW7KgeWjrexjjcREVHHIqJ+TaaNtGagrFy4TAuP4k1wVCv3Z5TiqHpYoyJQblKSkr8PoTYt2tbKD3Mw314ESgpm2mdKKwbeRERElhJpHUQYKdmmmuJtSo9SR1L9Q5ztVKieu8IiPYo53hqOoZdZI/VBVgbEoVSTLJ+QzMCbiIgokSQiSpEhX5nrPcVsyPHW8p7RW6+2ZHz0u23BrIkrV+rotJZ88FSITDURsGXcp0obBt5EREQqzEg1SXfGwpsMn/ymUOc66ZUpNXxIFFNNAkGn0sTLZMUd8U6z+xddxzvNumcaBt5ERESqTMi3TTA5r90lWQ5Py+vpJu6Id9RwcbJBYMLKKSLOewbpqeOdVvdMkkLlBNP6ZyYJDLyJiIgslO4TxZIe6dU56ptOYha3SYKmYFetnGBSR058vHj0paVYR5Ik2AIVTeQ0/5lJBgNvIiKiBDIsnjSkI5Us15vjbWSSolJ1EuXJlSKwgE7ivmll5cqVVgbEwZKKAgK2LM01YeBNRESkQij8zkgb6RxDmDFancanpyKY3xG7ZHzspsmOSYcfo61kXnhPJElqG3E38eklqWosJmytV3AVVaaaEBERkSHZMAocs61KYJQp5xqkpZyg1nNS207v6pimjHhryB8xPMJu4VOkDRJkISBncYCaredFRERkGsMl5YQARLrX8TahDYUyeelMzwI60HyNjAW7wYcYK+LZeHnr6VbHG+Ej3mn+LVEyHKk60IIFC7B69Wrs2LEDK1aswMCBA7F9+3ZcddVVoW3q6+vR0NCA9evXAwDGjh0Ll8uFnJwcAMD111+PY445JlVdJiKijs6kODLZiXupkPSpSlKGhN1tTLkrKhGi1uA+egEdM9Zt1PftRfrcNQmAP5Brkgk/M0akLPA+4YQTcPHFF+OCCy4Ivda7d28sX7489Oe7774bfr8/Yr9HHnkEAwcOTFU3iYiIQpINSdInpFEXOVqtfWw3estMCpOUYs14S8bHG33Vc97x6nhbIf6k0PS7YzYA3sDFsaVf90yRssC7oqIi7vsejwcrVqzAkiVLUtUlIiIibZKMoLM0hsh4ivfF5Kcl5Vz42JUrgw9AplY10VJO0ITjmUWSJPgDPcrWn5mUBd6JfPTRR9hvv/1w2GGHRbx+/fXXQwiBYcOG4Y9//CMKCwvbrY9ERNQxJbtseDrnq5qVnS3prAndnvTkeGv9HkBbHW8JUlhKTnAOgFXjz0bKICbTZrIktH2I0vhHJilpE3i/+eabOOussyJeW7ZsGcrKyuDxeHD33XfjzjvvxKJFiwy137VrgUk91a+0tHO7HTvb8dpai9fXOry21jHz2voa3PC7veiS60RpQa7u/b2yjL17G1FckIPiXJdp/TJTg8eHhrpmlHTJQ57TnnD70tLO2FvdgKJcJ0rzc0Kv761uQJcch6HrlGrBc+4ads5CCOypbkBRngvd8trOq7G2CTYJKO2Sp9hWvceHxkBbncKun8PtRXODG91K8uG0tdWy2FvdgMJcB0rzcyEHjlmclwMJQF5+jub7EI+92QN3YwtKSwpgV8nZ8MkyavY2okt+Dko6afts1u5tQL7LunssN7ZAbvYAALoVdkKBy9wwNR3+3k2LwPu3337DZ599hoULF0a8XlZWBgBwuVw4//zzceWVVxo+RnV1A2Q59Y/ipaWdUVVVn/LjdgS8ttbi9bUOr611zL62+zw+NPn8sLf4YG/26t7fJws0uT2o9frhc7SY1i8zuf0ymlq8qPbJaLTHL3YWvL4NzR5ILV7Ymjyh95qaPYDba+g6pVpT4Jz3+mU0BoJiIQSamj2o8fggGtvOq87thV0Cqjx+xbaag9fPLyMnLMCu9/nR5PGhWhawh33l0dTsgXB7YW/ywh845j6vH526FaCpsUXTfUikzutHk9eHPbL6QjTBY9d4/fA3aPtsNjR74Ld74bToHtd5fWjytl7nGp+M5iSvQ7hU/b1rs0lxB3vTopzgW2+9heOOOw7FxcWh15qamlBf33qBhBBYtWoVDjnkkHbsJRERdVTJppp0FJl2vhE1tiUpuZwgjSevdAgpfHEd4z3Q1RUzFk4yW3jCTTqnZyUjZSPe8+fPx/vvv489e/Zg6tSpKCoqwsqVK4FA4D179uyI7aurqzFr1iz4/X7Isoz+/ftj3rx5qeouERFR0kI53u3cj3gyqwK3OeItdqOU422kTS2L5ARz4q36fJi/cqW1n+XwrBhbWv/UGJeywHvOnDmYM2eO4nurV6+Oea1Pnz54++23U9AzIiIiZckHo4GJYuk8fGega4rlBKUMCt6Fnkei+DWl9V4+CVKodnZ4LwyvJKlAaGjFyAOX1fdXaZXPbJMWqSZERETpLJtTTQyNeGdK+ZIElGqRK56akQcnldHs8FH18ADZ9EAzUEElsfS5l5GpP+3YEQsx8CYiIrKKxakE7UFtpUOlNI10pXeU18zwNfybgbZyk21HMGMlSS19ji5tmA7CJ4Jma4CaredFRETU7tIpqFEXmNSnO+BTCO0ybCQ83mh0sjTlkYc9mKV1OlIYy+t4Q89ofeZh4E1ERKQiemTS6P7pHEKYt5BKeo2exqNrZUdh7kI04eksETne0X1LQqI+K/VFU7uw9sMcjLWzOTjN5nMjIiIyR5IjuZkweGe0ekdQBpxiDFMmiCY4ceVRdYXJlXqPaxEhBPzt9M1FsJJJulwLKzDwJiIiUpNkAJIJI8DZHOSoad8cbyn2m5SwyNusOt6aR7yjXnPLAjvc3nYJvoN9zoQHVaMYeBMREanoCKkmRgM+K/OjLZeCSa9xc7zjpJqkktIIv08ICCHQDot9h6WapPVPTFIYeBMREVlFV73o9mG4Z0qRd4ZTe3gwluOtofpL4PMhBf6nvpc+IolR40QL/1i6gE7gV454ExERkW7pH3a36Ug53ur3RYpJL0pqQqFCdQ5J4ZuUiC1SONKsuFJn1Gh8KkmSBEhSVgen2XxuREREpkg2CEnnoDTUN50nqXROGZNqoiKZdBnNS8Yr1PGGZPbKlVo/c1JMGUkR27vWPwmRknKRtjT/eUkWA28iIiKL6Clb195MCakyJPLWsqR627aJbl8wYo5tU2k/KTCqLoSIGPE29SMitIXexh6erP0w26TIhXSyjaO9O0BERJSuko0jMyHVRG/f4qWayCb0J2WU0kCiJhtqGeU1UscbgesYbDq8DT0PBWqSqWqSaFDb6s9yN5cD9rT+iUkOA28iIqIEDIdCGbRkvOaAL4POSY3eSYKSiWerlFISWrky1Uu4Ky0+GvVrquXYsjsZI7vPjoiIqB1lQuZFMOBLup0MOd94os9Bb73v+C+0HSP4toCIuPZmhfdaq5oojnjHaZOSx8CbiIhIRUdINYHOoDn+dpkRnmldUj3IyP1TG1UPBd6ibZuIlJdUVzWJOp5oz7ImHQADbyIiogSMF3PI3uglJqhMdZqEBZQC0dAbcfbRewyE5XhLUe+ltqqJ8r6I0490f4hMdwy8iYiILCICo5nRk/jSjWrAqbONTGLmMvC6jiu1hd7RAbJp11Bjx/WkmpA5GHgTERGpYRQSIxsuSaLVGaPFDYjj1N9OlGqit29aaR7xlmJD74Q53pn2hJVmGHgTERGpSLbCg95c4vaiL8UhzlLoGRSVK30JEb1oezKnkyhVQyhMgowuZ2i1dFu5siNg4E1ERGSRZHJt0126p8/Eo76qZFSeuobSibpzvMNGyIUQlnw+BIy3m6isZObe9fTAwJuIiCih7B7/M2OkNRsCsuhzSKqcYIJjhEa8o45qVqqJlnqCilVNTDg+qWPgTUREpCLpVBON9ZQzid786HSkfwEdg8dQTGcJJLQIlcmVJuXrmL1yZabc23THwJuIiKiDM6OMXUY9Xwjl0Dt6BFjLNQm1oqOSSMTmUuS77R3gtvfKldmOgTcREZGq5MIPq3J4zSfpHmmNreOdOcGa9hHvYJK3iXcxPMfbonKCWif1KqUYqd5DDfnulBgDbyIiIoso5fCmI10rV2ZKdG2A2nUws+Z3ogV0Ui2830KItpUryRIMvImIKCs1+2W4/XJSbYS+dmcskpBZqy6mitUL6CQsJygURt5N+tZA64h+MKc8GGxHBOEKbVLyHO3dASIiIivs8/ohSUCuvf3GmDJlcqWRoFnxtAJBXLqXGmyt+hH7ekzqRZLlBOMuoBP4HyRbxHtmBbjaAu84FVVUnjbT+86mP454ExFRVuIInQ6mRFPZF5Il9RlSm8ApSZDC6oVbU8dbX8P8Zid1GHgTEVHWMiuOSKqcoEl9sJKuHO84bWSKePfF6hzvUHtCOcfbjOBXb7653v3IOAbeRESUlUzJlU2ykUxZMt4IPYFr2lHpZLycZ72Nxgvugw86VjyYCdEa0WtqN6zCCqJWrWSOtzUYeBMRUZYS/OpcI0MjrVLcP6Y19YBXz6uB9yTJUCJ/KPAO29XcCaraVq5EROAdjwWlFTsgBt5ERJSVzIy5jaeaZEodb+1Ur4WU4P0MoJp6oeEm6ko1kdoC75g3TLqCmquaoO2QWh6+su3znGoMvImIiCzSOsktM0KVZKuaZMZZtkqU4hF9LRKdm95zlyBBVkgJMSPsNrK/0oh3Jj9ApTMG3kRElLXaI4iJlgkBqWTiSGtmEIoPRGZONtSS4w0LF1jSM+LNZeJTh4E3ERFlpdavzc0JJQynmmTI5Eozq5pkQvCmGhRHp8sIEfmGXiq7SRIgK9QIN6OqSajHWteMD9sr3qqVmXBfMwEDbyIiylqmBQucpdkmUTCawZcq3gh1QjrOWwIgB3Zoz0ykeCPe2VA2Mh0x8CYioqxk1tLbod8bCL4zqY63WftkcNwdojf1Qm+iTvjItuk53hpW2ww/XsS+SR6bEmPgTUREZJFMCbxhwqB+ppwn4qQARVf5iHldzzESjKDLBtrUelzd+yhVNWEhb0sw8CYioqwk0iVDJAMiUj1dTHRJ0+GSJytmxNvkeyhJUugblIgRb8m862dkxDv8YYBxtzUYeBMREcUTSMLVG3iIwAqIGRB3AyaUE4yeqJfO1Eajk8rx1iEy2Da3dd1L16usXJloHzKGgTcREWUlxQVKDLSRfKCR/qGKGT1M/7PUzshDSMwIcZyKNtF53fHa0U/HCpMxVVzMfxCgSAy8iYgo64jA4iQmNGR818CvGRHG6EhxSJR+kf7j3bFLtQdJ0eX1gq+bfPy47QW+KbH0GCrbyJnyec1gDLyJiChrJbUASlQOrtG2Okogk5XnadVThKT4W1MW0zE2ubLtQaPt8x7ZUiY8UGUCBt5ERERxGA2FrJqYZwUjKQ6q5QRTHKHJQmCf169rlFjrBFHN5QR1TooMD7CVRt6TuoS6yglGbqX2TUDkPpQMBt5ERJR10mEJ7IxKNYGkOcUh3RZWafbLqPX64NUYeAfTkFJRTlBNvBzvZBn53IV+XoSADVL86DszPtBpi4E3ERFlLzNyZaMnoGk+dmD/pHtgvUzooxrjD1nqZ62nyoeRI0gRqSaxeSdJpUjp2Db6sx2ZamK8XVLHwJuIiLJOsitORreh/ILO/TOApj6rPFC0VzFBM3PvFQNlSTJY6UO9Z6koW2h05UpJyuwHsXTHwJuIiCiOZIOQTAhiTOljO5+o1ucrLZspjQAbalclYI9eNEdTW3r7oeN+tKWaJN4tEz7P6YyBNxERZZ14K1/rFUwF0L2Ajp56yu1NR4qDmStXemQZXjm5O2Q0m0hxxNvgrVLaumx17wAAIABJREFULdGS8XF/b8LXBrpGvEXbL2b2gWIx8CYiIlKQyqW725uRPqqlmuhR7fGj1uc3sGcbvTneWkaEIxaU0dwR7Z+Y8NQVtaDdqGRXrpQkSXlBoCT6RG0YeBMRUfYxMUowOgqaWVVNWplx2fS0ISP5xWKC3yyYUQ5RKefZeGvatja7qknbA4D21sJTTRqbvdhR1Rg7sTTVdSKzlKO9O0BERGQ2M1NNjLaTSXFKRHpBgngt4ZRBPfW09SRR6+6P8e3NWDI+0fbKbwTTmpK/MJpSTaRg6UCBFo8fX/+8F//ZUomCwhzkSGXo2qfYYMvty+P1Y2d1Iw7oUdjeXYnBEW8iIqI42sIMY5F0+ocpbTRWwlZ8NemFhowK5ifrnF2pPOId+fCQTPgbb6Ji8HUpqmJKe9TxFrLAlh+qccvT6/Dttlr03a8ALocdX/5Yrbh9un+e3R4f/vTaZtz70ufw+eX27k4MjngTEVHWCf+a3Ghgl2yqSCalmujuY5wSe3qud6pTW8w6ZrKCl0519c8k2tZa1cTr8+OTLZX4vq4Z27bXobhzLo45sif6dc3DJ9/uxsavd2FfQwu6FOQk0ZvUcnt8eOj1L7B1Rx0uO+1QOOzpN76cfj0iIiIyU7KRd7K7Z0TkHZ7iEJ/aKLDZqzvqbcPMHO+IcoKW3L8EZQYtrmryzS81uOnJdfjz+98hx2nH6ccciNkXDUXXwhxIkNC/VxfIAvjky8rQPunwwBJPdNA9/JD92rtLijjiTUREWS35coLJtZMRcbeJjWiuLhJYol558XbtjK9cqSCJcoJGcrwtHfFW8cXWPXj8ra9QWpSLGZMORef98pFnt0MEnjAkCSjMc6J3aT4+3rwTE0f0hS3NS2L6/DIeeWNL2gfdSOWI94IFCzB27FgMGjQI3333Xej1sWPHYsKECZg8eTImT56MtWvXht7bvHkzTjvtNIwfPx7Tpk1DdbVyvhEREVE4U1MYDAYdwkB1ifaWzHVrr7M0s7664oi3lkZVFsFJtGR8dBfMqWqi3taGb3fjsb9+iV6l+bjlwmE49ICSmDr1wf0G9+uKPfvc+O9Pe83vo8le+fB7fLutFtNOOTitg26kMvA+4YQTsGzZMvTq1SvmvUceeQTLly/H8uXLccwxxwCBv7BuuOEGzJ07F6tXr0ZFRQUWLVqUqu4SEVEmE4q/NSTZr//TMVCJpquPJuUchJewS64h88oJqrVtNrUR72S/XYm376df78ITy7/CgWWFuGFKOQo6OVuPKbXuI6IC9n5lhSjo5MQ/Nu9U7mSaWPvFTvz98x2YMHx/jBpc1t7dSShlqSYVFRW6tv/yyy+Rk5MT2m/KlCk44YQTcO+991rUQyIiyhYi5k/GowWjwVDmjXdrizMT5XjrXsgmSXoDeD3lBFt/Nf8OqtYxMfFQ4U3VN3nw/HvfYkDvIlz7+yOR47JHbCvCvgkI7me32zDmiDK8v/5X1NS3wGcHqva58dP3eyAMFAvx+2U0un1oaPai0e2Fz992Jzq57DjmiJ44qHcXXW3+uLMOf37/fzj0gGKcdXw//Z1qB2mR43399ddDCIFhw4bhj3/8IwoLC1FZWYmePXuGtikpKYEsy6itrUVRUVG79peIiCiRTJpcaWoXTQyArWwn7gRREX87pf2U+pGwnKDK62aPeH+w4Vd4vTIuHj8oJuiW0PrUEnpQDFu58rghPfHef7bhtmf/g06dc9CnbxG++qISPp/xMn2dchzIz3XA6WhLuqht8GDtlkoc1KsLJhy9P3p2y0d1nRt797mxr9Gjej3+sWkHigpycMXkwbDbMqNeSLsH3suWLUNZWRk8Hg/uvvtu3HnnnZaklHTtWmB6m1qVlnZut2NnO15ba/H6WofX1jqlpZ3R6PGhvq4ZAFBSlIdODnvC/aK1+PyorZVQlOuE7PaipLAT8l3a/9m0N3vgbmxBaUk+HGkeFDR5/ajf16TpHAu7dILU4kNp1L+rshCorm5Al7wcdMtzJTymxy+jpqYROXYbSovzDfe9cV8TbF4/uuS5UJqXuPRdk9ePusC5FkSda/Q5NO9rgl8ApUV58dvc1wQBoLRL23bV1Q3okutEab5yn6r31KOT0x6xT9euBdhX24TizrkozHFqOPtYorEFvmYPundr/TumodmLjz7fgVFH9MSRh/SI2b6+thFOmw0lnVyo29eEroWd4GtsQY7dht69ivH7Ewagck8j+h1QjJLuBbhqwqHIcej/PNtsEvI7ORVL/LlbfPhg/Ta8/c8f8Nhfv9TcZmG+C3dcNhIH9tQ2Up4Of++2e+BdVtaaj+NyuXD++efjyiuvDL2+c2dbXtHevXshSZLh0e7q6gbIcuqL4ZSWdkZVVX3Kj9sR8Npai9fXOry21gle22a/jKYWLwCg2i8jx0Dg65FlNLm9sLf40OTzo9ono0lHXeA6rx9NXh+qZZH2VSFaAuea6BxLSztj375mNPtlVEX9myqEQFOzB7UeP0RjS8JjemSBJrcHXpuEqiRGUOvcXnhkGTUeH9DoSbi9O/DZqPHJaI461+hzqGvxQgCo8vrjtlkf3M7Ttl1jswe2Fi9sTcp9amr2wG+TQvuUlnZGdXUjmtwe7PX60WLgYREAar0+NPnk0N8xK/79M5rcPowb2kvx750Gtxd2CRBNntB1afD60SIBVS0+TDyqT+s5+vzY6/EBPh98fmOfZ0+z+v0ZcXApjhrYFV9srYbb40PXwlx0LcxFlwKXas14m02CTZI0/X2aqr93bTYp7mBvuwbeTU1N8Pv96Ny5M4QQWLVqFQ455BAAwODBg+F2u7FhwwZUVFTg1VdfxcSJE9uzu0RElCEiloxPcszFaMyc7nWPlaR2QRtjkyJTQUs983CSgc+ZFCfVxCwtHj8++OxXHNG/K/r2UB7tDU2uDPszFO5LKu6T3WbD0IGlKThS+0lZ4D1//ny8//772LNnD6ZOnYqioiI8+eSTmDVrFvx+P2RZRv/+/TFv3jwAgM1mw8KFCzFv3jy0tLSgV69euP/++1PVXSIiIlOk91h3KzP6KElSIGpLZuF5/YJBst7JlYo53qFzaNtW89LrCf4ccywdbekRvlT9PzbvQEOzF5NGHaBhv7bJlfH6lgmf53SWssB7zpw5mDNnTszrb7/9tuo+Q4cOxYoVKyzuGRERZRszR26NVzVJx7Hc+LRW/Ij3LYD2BXS09ipxfwxROQe9i+EYOAQAwCYhJgXJzIyk5hYf3lu/DYf0LcZBvdRzoIPn2/Z5V+lE5n2c01K753gTERGZLiyqS76Ot8EFdNAaSanlp6aT0DkmGQ3rOVOz6ngH97eijKEQrQGyIQlOrMTpiA28g7saPOS6r3bhu+oGtMgC//nPrxAALpt0aNx9JACyiE01IWsw8CYiItLASDDEGEZdew+gxi31Z6BzkfMKEjeQG2cSq5Fr8+l/d+GZd/6Lww7pjj49O2PymAMxoE8RDulbrGn/YJdtKqP+7X2/sgUDbyIiyjpC5fdGGmmr7ayvpfBc23RnRv3oYDt6c62TXrhSZztagmLzVsHU9wkwukrqT5V1WLrqWwzo3QUXnjQQPgnolZu4pCMUU03idztTPtPpKr0LixIREbWTDvXVu45zTG4d0PCGzBlDNRrAq6UQhb+q71xje2A4S0XHtjX1LXjkzS0ozHPhqjMPh11HyUug9QMuAv/rGB/29sXAm4iIso4pI94m9CFTwhhdI95xAmY9ExNNmQAbtuKiabM1I/qmMRg1KWDV20qLx49H39wCt8ePa84+AoV5Lt2fu/B7L0WtXEnmY6oJERFlN5MmDOqvapJ5A4jazzH5EzMr1SR4f/UG/Ko53lLktkbKCZqVPhPPr7sb8NTfvkblnkbMPOtw9O4eWLRF6Au9g+lBiVKjGIibg4E3ERFlndQuBJP59OQWxwvrlB40/IHA2B71phkD1Ek1YUJJxERN6X080VILXQiBNRu34y9//wH5uQ78ccoQHHZASdv7Bh+LYvaL6UJH+omwDgNvIiLKamZMGDR63Awb8LbkgaXK44NDktDN5VDcLqnFYuIcN9E+Wu5Ne0yQjRd2+/wynlr+NTZ+V4Uj+3fF1FMOQWGetkmUiY4X/g2NBEBW3DgzymOmMwbeRESUfcwcnAsEGrpTTXR+5d+erOylXwjFCWWh7GwhIIQwFNBZkctvRn5zaH8TL6wsCyxZ+Q02fleFc353EMYP76N4zQyPeAthuGY9acfAm4iIso6ZAVkyoUimhTFar1W81AqlNpRGT00JlA3c6MSbSRF5MFruoZ4yiqr9CluyPbotIQSWffAd/vPf3/D74/tjwtH7J+6QRpIUNuIdr38Z+HlOR6xqQkREpEBPSoLa/pkSqGjJLQ5qXZFTtaWYqDE4cU+5oeQYesCKrs8eJfwqpOqhbfP3e3DenFVYue5nyAoX6621P+Lvm3Zg4oj9MXFE37htGfncRaeakHU44k1ERFknrMicaSkIhtrJoEDGrBJySiseJoq7jU8I1H+fzVscxxyyLPCXf2yFTxZ48+Mf8ZvPj1EDu8NVmIvPvvkN//5qF37YWYfjhvTE2cf1N/34rSt1CshCgi0sx5tTKa3BwJuIiCiOjjDibRal8xVRAXL468lqW+RIf6ioecTbwE3U05NP/7sLldVNuPmSo1Bf14xPt9XgtY+2YvuvtfDLAr1K8zHlhAEYN6y3pjx4IRAKoLUI5nULROd4R+e7aG+T1DHwJiKirCNU/2CwDQPt6A2A2lvryGfi7XTVtg5MnBQKe5iRhx/MyjB14R4D98zoCLHPL2P5Jz9h//0KMHJwGaqrG1DSszO2fFeFQd0LMGpwD/TpXpCSSiKyAKTQkLdCylAHfJC0AgNvIiLKaoYH6oKT3aQk28kgSec0S8oBtUUp3iH6HnBE3LJ40SPehoLNBHnkQf/6shJVtW5cc/YRsAVOwuW0Y/ThPWPKL+o5tK6VK8M+38H9mGpiHU6uJCKi7CNg2ihhR0k10dxXHRFZ3MDbhBV02ibASpqrimi9L0J1Vqg5vD4//vavn9G/ZyGO6N819HryQa/GZe7DjgcAslD6XoLMxsCbiIiyTnhw1V4jd0JnAJQOtFyruCtXqqSQiASBtuFUk8CvNpPreIc3ZqS2tZa+fLx5J2rqW3D6sf2iHhJj0zz0Htvop45BofV4jYmIKMsZC2LCR1ONtpJJYbcVzwih+FEhkDQlDz8qHUjbTqZsEiE6xSb0usr2e/Y1Y/knP2FQnyIc2rc4Zp9UPixKqn+IlGnf4KQrBt5ERJR1QkGCJJkYxOhrqT2WG0+WKXWrwxqJN4HS7EWO9Eyu1JRqEta2mbw+GU+8/RVkIXDpxINNnzip93MXOdYuhV6zMMumQ2PgTUREWcuMkCYYwFOrRPFYbC0M5f3MCOxCqSYmPmCZumS8glc//B4/VdZj2smHYr+SvNjjq4yepwKDQuvxGhMRUdaJWIXPpChGbzOZ9tW8noBTezlB5d/D7DregQNombCZ8L4EaoKHWtJ4smoPHOHWfbULf9+0AxOO3h/DBpVqa1gn/VVNpLDfW9IlCsPAm4iIslYyo5fhgZexVRUzjdZJfeqTRtUmVwKAHNOKCAV9SgvsaBE+udIs0eeg5d6rbhN2nSqrG/HC6m8xsE8Rzjqun+bjp1K8coKCgbkpGHgTEVHWMTNwSSbWyKRAxZSAL2oFyUSjwMlentACOjpqrWsNIHVfC4WHlvDDLP/kJ0iShCsmHwa7LUH4lcSN0BsgSyq/J2sw8CYiouwTqEls6uih3pUrszSQMVJOEEqpJiZMPo2uPGOG0MRCjYvgRPRH5RuD3/Y24bNvd2NseS8UFeQkPr6eDpsoFStkdnQMvImIKOsEisyZ0AZCLekJhqxefMUKVkzqCw9EY1NNzFgVtDXtxaZzxFtjyzpEftai9333P7/AbrPhpKP66GrVCN053gq/58qV1mHgTUREWUlKg1QPM0diM1GicoKh62O0jndU4KipmQQrNJqZ4y0B2Fvnxr++3IVjjyxDlwSj3UrH18PIA59iqonCCTEQNwcDbyIiymqGAwYDqQZJH7MdmVHVJLr+c2SqiYj4vRBC10i1kmC6Sqg/mhfHSXxXzbqH763fBgCYcPT+2naQkh9v1vXAF7ZpogfVjv0YaQ4G3kRElHXMzq/WnWoS3C/TIhUtgauOC5FokRwzcrx1j3gnOG7bvY6auamxP+G/aWj24p+bd2LEYfuhW5dOmvuWdCUeHdQmV8a0JbJ11kJqMfAmIqLsJKU+iPn/7L15lBzVeff/vdWzz0gazWi0IAnEJhCrhAWObDAYbOO8AUPs13FeG/sX5+c4ThyOYx8n9ok5doIhOfg4J4tfEvuN35/jeH1/+cVLwNiAwRgDXsQSNoEFCARakDSj2bunl6r7+6Orqqtu36q6VV1rz/P5Y3q6u+reW0tXPfep7/M8kiEUhjh0vW3BlQFVLDtNtW7pxGNN2c5836qsYo/jgScOo94w8N9+46Q4RqZMuPPOkcfbUbmSSAYyvAmCIIiuo+WbU81N7c1ykZqEQbmAjkdqQWfFyXgGpG56q6Tb4zEcw/JSAw88eQg7z1yLDePDyut1Uq49bNEfcVHXakJBIvJ3xwMZ3gRBEETXEld2hrAZP+z80jH0nRaq+8pvGS/PLzy8353n8W6ljQwamypWFUzXe6WV3ON6Yt8UGBh+542nxTCqcETOalKkE7agkOFNEARBdB8x5ojuhELZMTEE9YG1G9uapDqlODHpSGoSMndMsOeWucYYdjwA8MS+KRyZruDKi07E+KqBUG2kLo9i7f8u92w8SUKGN0EQBNF1xCP18G+Fc475hi4tmhK1BHqWJJG72TJyGWNyjXeH9l1bcGUMGyBOBoKGWF5qoFbXHe/ruO3BlzA60oeLz13f+YDCEOFJC1WuTJeerAdAEARBEEkRR1EYxpjUKF0yOI7XGugb6EW/hwXZjZUA/TTSssqVjAGsLb20ldSPOd5FhIUzGJteeIXlFNoyDI7P/tvDYP0aLtq+EeXVQ/jVniNYWKrj/NPWRNKwd3LOdrIfGWNt56s4sem+szl9yPAmCIIguo7YAsGchojgTrXeGRJrp3j+7s6C+lyYQXlOL7dXKXnNsU7ErmLPamK3xYMzfj/23DEcOV7GRedvwKGpRdzx4+fQqBv4zYu3YHS4LzNLNYrHO2gdHrZhQgoZ3gRBEEQXwgGmgfHoog+38d5ucXDh1fVdAYMr40DUBjeDH5sub1mgZRxSEw2tHR1nOkGVDCF3734Fa1YN4N1XnI5j1QbOX7cC+w/N4807N2FWNiNT7J47Ji9hiHK2ywzv5XbepgkZ3gRBEETXkUoBHdN6lGm8nesVhTiymni1C7FypfhdyDad7bCQ+zno3BC/81r2pVfnsPfALH738tOgaQw9JYbXbluP15+9AWXdAKr1iMe/87MmisSJSSzvIj65yTsUXEkQBEF0LbHJJ3yQeryT7TJzlGQJrgI3XsGVnRmZHJZX2MpEojh1UOg3qKW7dx9Af18JF593guJo1ehkj0SqXMkYwNTymBRpIplXyPAmCIIgupKOjYQAK8ZXamKNoUCWikpQH+dtUZLuNtqWt7Ka+Gu846iYqYqqx7u1me1LzyxU8atnjuCS8zZgaKAlHvA7J8ISqY2IEicmyIRk63f7ZDItyPAmCIIguo44vNxOA00mw7CNLFlwpUJgXrHxSWsierxl+y+m/WMb9o7+4sIvneC9jx6EYXC86TWb5Mt0cAKm7fG2+izSJLHIkOFNEARBdC2x5ab2MUp8Pd4FMr3FfdUwOI5W6zAkRqTXVrVXruStdIyyypVx5fEOoUnmAcWVWka8vLVaXcd9jx3E9tPXYO3qoYDeomutO0oNGNM65OWOHzK8CYIgiK6jZZAlURam1QcAGJL2bSlFcezuJmY2DQCoGgYquoG6JCgysBnHa6uAjiy4srM83i0Nefx4ebwfeupVLFTqePPOzZ7rWKTtve7I4y28b2swhmqwBBneBEEQRBfTqdntJzWxPpBLTdzrFwHRO2/XYgyxAz013oF5vMOO1r1aGKlJYMYbq8S9xPJu6Abu+MV+nHLCSpxx4qjPiKIjNXrDthHhxFMNmCU6gwxvgiAIouvgEY0PsQ2V72XLGeZrkQxvEd1Kl+j4THlCwVsvtuHtlJoIebwjeXfNQE/nhCHpkvG/3HMEk7NLuPp1W9wZWSQVHzslTY93iTGUJD8YMrbjh/J4EwRBEIQKXP5WrvHm0hLcecZpcDJHRc5ODEB7AsRFj3fnwZVOwzhKFg/V9i0Mg+P2n+/HiWtHcN6p46Hbi3Nscbextr+HCuikBHm8CYIgiK7DaZTF5QX1UnLLckcHBfDlEmHAsqBKr2VlH9vVF73SCTpyR3di3Lv6VGiJiysJeI1p97NHceR4GVeJ3m4fUtd4y/QxCpQYgxawTeT9jgcyvAmCIIiuoiVB6LCdALlKUB7vohneosGpC+/hYezKcK7DYGU1aQVutgfqdZB+j7WKwMSaTtCR8tDgHLc/9BJOWDOMC86Y8F6nbXDh+20Z9dmkJHQSZ7Ao0YQMb4IgCKJLYfGlE/TB0/AuuJUi93j7702n0eiWgrizlziznUTdUaLeXLWVoEmR7LvH9k7i4OQirtp1UqBnGBl6h2PrV2r8k887DkjjTRAEQXQlcdu9otlhO28l9oghBP0VAdvUMi1T3d6+8OkEncs67VTR8Lb6jSOQUEVWFFR5U9a+YRj4/gMvYu3qQVy4ba10WdW85irEUQyo0zPPc/2izyZzAHm8CYIgiK7CLXOIx0/nZ254tV/UGyw3DVTD60tFqYnTI22XhndmO+k460z0AE2/SZGYaeXHDx/EgWML+O+XnoqSFnBUY3QKR4lNsFdJwD4mf3c8FPW6QBAEQWRE1TBQNaRmWa5wVjSMgkuSICnEY3tEJSaJUUDnoHO4BlqWXxiDyx3o2PpM9OK2yT06ylnN7H5UU0CqwDlQrjbwvQf24fxTx/EaH2232H6W3uq42gBpvBOBDG+CIAgiFDN1HTN1XWHJbBC9fnF56rza8SqgU1QjhaMlM4Gw3ar7knOHR9qh47Y+47xlekeWmoga7xA7XEXjbXCOx5+fAgPDdW85wzeTSRLHOs083iJFPXeLAGm8CYIgiFDEkZ4vDaLkd3YhGIdtXwuv7u84GCuWb8sZGGlEPMaqHm9x2SjI0wkqotD5y0cXcOR4Gb99yckYXzUQaXCdaLwj0UG/fu0R8VGsqwJBEASROTzn9+O2sTnT2HXSTojliphO0IlTSBQ1naDTI+0K3BQ03rEGVyqu4z9+hlrDwKN7j2HVcB+u2Lkp8pgi0cGTmmhZvIPbI+KDPN4EQRBEKMIrf1PGZfB1ZoKoBFXahWIcUgTOi+fZcnqldedEJYTWxMv7HKjxjkB71hQWy+OYw1OLuP/ZI6jrBi7YOhEcUOkztiikXXRHue0Cy6fyRGqG9y233II777wTBw8exG233YatW7dienoaf/7nf46XX34ZfX19OOmkk3DjjTdibGwMAHDGGWdg69at0MyT/nOf+xzOOOOMtIZMEARBSFGpD5gvOjX0ZKnqxDR7YnBi0YIrbThcUhO33a1ueTsN4zbDmwMaa30fh3c3juDKXz1zBF//8V6cc+4GXLZ9I1av6FcaSyIa70iTiHh+mUU9dYtAaob3FVdcgfe97314z3veY3/GGMMHPvABvPa1rwVM4/zzn/88/vqv/9pe5tvf/jaGh4fTGiZBEAQRAJcYoXkiTh1xlD4554WUmogeb8aYvS3tC8u3ztmGs/KjXUDHkSml4/1jtx+iJR8N9H8++CK+97MXccZJq3Hp9hMw0N+j3rLHgnGnOgyCo3lsVEvaK7VHxEpqT8J27tyJDRs2uD4bHR21jW4A2L59Ow4dOpTWkAiCIIguJs20bDJdedEK6DgxAJTs4bebXypb1vJIM4nUhHf8SKDN463gOff6/oWDs/jez17Eb5y9Dn/6zvMw0FeKNLuM01CN2lZxz7rlQW403oZh4Fvf+hYuv/xy1+fvfe97oes63vCGN+D6669HX19fZmMkCIIgrFRx+cXlo+00UM0nANDt5W4tawUmFk1q4jSODc6hgbUZs2ECTv2kJs7+Og6ujBCk6Tw0umHga3f9GqMjfXjvW85AT6nlk4x6DDPL4x3TD9Mri0/BTulckhvD+7Of/SyGhoZw3XXX2Z/dd9992LBhAxYWFvBnf/ZnuPXWW/HRj340Uvvj4yMxjjYcExMrMuu726F9myy0f5OjyPt2+vgCDM4xMZ7PbRgfH8HsTBljKwbQMDiqi1WsGRtGT8ggufJsGRzAxKoh6AtLQLWBCce9ZHG2DM3MZz42OoSBnhIAoK4bOD69iLGRAawe6I1565KjUtcxN1vG2MpBNMpV9GgayvUGVg30YWK4315maLgfYysHsaKv3YSoNgzMzixi9YoB6AbH0mIVE2MjYACOH1/AquF+jA/2Yfr4Alb09WBiZABz04voL2mYWDkYarylSs1uv0djqMyWoXNgYnTIc51qQ8fMDMPYigGs7G8em9t+tg8vH1nAJ963EyduWg3d4Dh+fAEA0FfSMLE6WO5aaeiYmyljbMUgVvT3AOUq6uUaJsZHQsk+JiZWoK4bmJ5exGiE80dfWAIXztMolB3nwoh5nI9PLWBVf/OYFZU8XHdzYXjfcsst2L9/P774xS/agZQAbGnKyMgI3vnOd+IrX/lK5D6mphZgRE1M2gETEytw7Nh86v0uB2jfJgvt3+Qo+r5dqNRgADiqz8WmJY2LiYkVmJpaQHmpjumGAZ1zlGsNHDM4ekKOdb5aBwdwrKZjttbAom7gmOM+Ml+to6o3/duTuoF+8/5VNzjKSzXM1nU0TGO8CNQMA+WlOqYaBubqDfRrGiq6AbbUgFauAQDpyJtNAAAgAElEQVSGR4dQXqxiumFgqdQ+kbG2fbquo8GBcr2BKYODAShXapiu6TAWqlio1MBLdfRU6lhYqmOJAf3VRqjxztZ1u32NMcxX69A5cMynuJO1jdMNA9WShtmFKr72wz04e8tqbN3Q/F0anKNcaW5vTWM41giu0mq1e9zcLzP1BsoNA5N8QXl7rOuCbvY/HeH8kZ2nUag6zoWKeZwXKzVgqY5Spd5R21mR1nVX05ivszfzbEd/93d/h6eeegq33nqrS0YyOzuLpaUlAECj0cCdd96Jbdu2ZThSgiAIAtbT7BxHV8oKt0R5BM8D9MOct4rOuLPvOSo2FhSdOzXe4eHicTCrVxqOZJTOPN6dEKmAjsn/+5PnUW8YeI9HZcqwY/MrqhSWqPKbJGMbintG54fUPN433XQT7rrrLkxOTuL9738/RkdH8fd///f44he/iC1btuB3f/d3AQCbNm3Crbfein379uHTn/40GGNoNBrYsWMHPvKRj6Q1XIIgCCKA3Go+JRZLUtMEDYAuFpyJu3pgajRHbJh5yTUzKDLMvnMbwdyVYYM1P4wt64uYb4UxBnB/77Rzjb2vzODnTx/BVa87CevHWvIU97hUR2kFE3Dxk9B0PFlM6MRzxjwQ0UnN8L7hhhtwww03tH3+61//Wrr8jh07cNttt6UwMoIgiM7gnGOmoWNVTwlagb2cqsTp1UuCtNIJ2kaOEGxqB1cm2HcSWOPVzY0psXZPv+3ND2iEm3+Y8JXdliPrSyfBlUww7APb4Vb3HP/n3uewekU/fmvXFs/FQ5rdsZKH31cextBtZC41IQiCKDpVg2OurqOaQRxJ2nDOba9e3re2EwmCtZJ/5o2mVxiSYjooYFYTC6tqpXQSGbJypfO9xpqTkrYmomYOiVBJ0er7iRcm8eLhebz9Daegv9eto7ZkMVmRj8qV7R58Ih7I8CYIguiQvHuAEyOnG+walm1AxZCTWTBCOFrVF8UCOoihXH3a2B5v87XUoTdaJicR0wwiRm+xrLqoDN3g+OEvXsaJa0ew65z1nm0hwuSJt/0TnUj1YTlPNX89ER4yvAmCIAhlouR0zgpn/ui4EMt422a9K7iySeFusObGuD3ezLVxYtEajybsZZ2GqwZ3JcxOn0iIhr1qyfgXD89jeq6K37n8tMSkYZ1o2DvxuAcIgToitzEdBSMX6QQJgiCKzHLyeLfrffN3K5Ydh06Pjcx7zXkzdZjYflGlJi2Nt2V4+xizQdtmBlE695vVljT4NMZAwsefn8QLh2ZRrRmomqkFt520GueeMo6KbmDvKzM448RRnLVlLLCPrDTeUZ80xD2W5XBNSxsyvAmCIDrE9oIuMz1k3reWdWiEqGyfhqb155QFGDLDskDopkUr89gH7ROnsKfNI83a0wxar8GZsuVjcXnNOcfjL0zhO7c/Awagv6+E/t4S6g0D9z9+CCWNYfMJK7B63Qpc/TrvgEp0aPh2A20SIUdsB9EZZHgTBEEQyri1zBkOxAepxzviWGUaZVE1LmqLAzN/5JSWbIajZGYLkeUvh+K2iR5pZk9KzP3T4SMBp0d9oVLHf/zsBVQAXPGaTXjX5afZpd8Ng+OFQ7N47LlJvHBkHqdtXIUTxr2rWzpRHqFE69/J1qnq1UWSTCfYpGhndf4gw5sgCKJDlpPUBFz6b77gLZOYdTRKDjDNbEn6re1VF6UmzjR3RUTzStGnuDulHm8xuDJ8s1KOzVTw+W8/hr6RPrzl4pNx8Uljrn2vaQynbxrF6ZtGsagbmKwGV14Me+RkE7QsiE38JZlIgMzuWCDDmyAIokNya4AmQJG2NRUjgbXS5FlESXOXB5xjLnlYxUGZNlqBgbypgWfu7zg32jy57Sp5NTgAwzDwT999CouVBv6va87G8KqBgJWCwkPzwXKXunQzhQu6JgiCyBvLyeNdhKwmcQVXuryHsrSBfh7vCP3lCcs4iGIAMj+PN5d4vDvI4PHLZ45g/5F5/N9XbcPGNSP25ypj9P0+5lSHaRHXpK9o210kyPAmCILokLxqnZPBkVou5xvOYtS7+gWbMTNNnoVRwIwmENLYOdPsSTO2KJhmHNwl+WiTmjD352F58fAc9rw4jSsv2owdp08o685DEfJAOifhnZwDXtr6tFlOToW0IMObIAgiJpbbzSmv22uPi0k+ixmG9gBEzlsVLYuGNeqSy+MbLcpP9L56ZTWJwqvHy/j500ewdnQA77j0VFeDsXi8Q47RXi7GEy3qU5okKeZZnS9I400QBNEhtu41r5ZojBRBamLRaTpBGTIPoDy4MuaOU8b2eDO3N1/lIYe/1MT9pCAs9YaO+x8/jB/8/CWcdNo43rxzs529RMw2I0M2KfPfimzQhKco6vBYKqYmMZEgmpDhTRAE0SHL6XFsztUlQJwaby7xfArWpGXcG44OiqzxtszNkuN9VNoN7+Y7w37v6NPnABmc4+h0BY8/P4kf/eplzC7UcOrGlXjjjo1YMdTnGru0Ywlxe7ydXcdBFKmJznnzPNQSPPuKemLnCDK8CYIgOqUAxmgS5NYIN8fFHH/jHqzTc2pl67C/48naPmngSico2XV+m+fScrP2dexJik8jDd3AvY8exOPPT+KlV+dRqTYAAGeeOIoPXn02zjxxFAeX6p5pHr1I65TttB/NKmSkSFU3MFlvQAcwXIpPRbycnAppQYY3QRBEhyynm1ORpCZIUGribF8UJRjg6GHFDKESNd4i6lKNpsXuKhlv/msIBYbE/ffCoVl89YfP4sCxRZy4dgSvPWsdtqxfgVM3rsLGNcOusYhSFgSdl9y9rBeRNd4Bn6kiTua84JxjrmFgpqGjhwHr+3vQr3V+7nmNveDzyVxAhjdBEESHcMl/3Yrb8M7n9sZVoIX7GGDcYcBpotSEFz9zgWcBHYU9yVi7nAQSj7fr+HCOl4/M42ePH8a9jx7A6Ip+XP/2c7Fj64RnP22KEsa8vgkxesnAo8A7ExyJ55QXZd3ATL2BoZ4SxntLrmw0RD4hw5sgCKJDlpPH20kRtjcpMyQwuDKhfpPGrH3j0ni7M7a0PvfDCgwUS8ZD0Hj/6pkjeOLgLJY4x6OPHAQDcPlrNuHtbzgFg/3+JoqXlCXOrCZhiU3jrdhWxeAoMYY1vaVEKqUu12tbkpDhTRAE0SHL6abEPd/kB5cnPkSKOfW23bmp7TR5vJm3uuhZTVzl7iNuh+ypg9Um5xxgDPsOzeFL//k0tm2dwNZTx7Hz6rOwdfMoxlYGVJ90ZEeRedRVxqVK+OPI7b+dSU2aExTrnJL2xDmWdAMDJS0Ro9vsJaF2ly9keBMEQXSK5QZcDvcoZ2q5TAcSgGk8sriCKgXDxmncMzB7v3DOm8ZSPL2mDhP03VGS6jEPOYlTamLoBv7l9j0YW9GP696yFTWN4cTBPllzAX2172n/Q57MWessPhQHmuOc8qLOOXTOMZBAJG/b9ig+6SCCKboMjSAIInOW0+PYIgRXxiX18NN4WzS9w83/DZent6gmCmsv/mNOJqBc/IbZExOp4Q2OJ16YwrHpCj5w1VkY6CuFPpekHnXVlZ0efa9FwrYZM85zyouKmfZkIMYsJiJ5/Y0XGTK8CYIgOmRZ3pxMSUVRiDxW0QaVtOfUFlufFzWdYLvHO/yGuDzezpLx5r8HJ8t44eAs3nLRZpxx4urmhw7jXgXb8A6p8U5Sfx/l6YAXlnHmt0uWDAO9moaehGQmnQYnE3JIakIQBBETy+HmZBuWed5er8I3CcCcBl+7tLxwjPeV5PINa5sU96PUM86B6YUaHnnuGFYM9eLtF58iWypU+y5i1PO3zp9oR7Jjjbf5ani0ZHCOqsGxIkFvN5EMZHgTBEF0SG4N0ARhjswVeUM2qqjpBC280gnCGTToMJKKanj3CTmgRS+ymtTEvVx5qYFf7HkVDz79KlatX4HeEsOuczagt6ek1Ja0fR8NedB6Sv3F4UXuoAnNPqfkVI3mE4IkZSayART1vM4TZHgTBEF0yHKMrSxK1o5Oh+lncEP0eDv+JpdlIl3aN0Mtjzc3tSbPvHQcX/3BM5gr13HyCStx/qnj2LhmGEN97eZHGC+xfALgyJrSIZGPXlyxvOarVy7vJd0AYwz9CWqa4pTOEC3I8CYIguiQ5XRzaklN8qvxlhlwkcbqY8A5DT+nHlfVI1w0wgYQNwyOPS8exw/vfR5rRgZw/TvOw0kbVuDgUh2IGhQpGZFLQ664Vpj+wi7r95QkDJY97bW/KwZHv8ZSK5iT1996ESHDmyAIomPcGR+WA1bu6rzTMszCjVb0mnoa8kwe1NctyluXRl7BxmvoBl48NIdnD85ivlLHZeefgN+++GT09pRc+5RJLO8wR8gvq0mcGu9O8ph3pvH29t43OEfdMDDcm6YJV4RfezEgw5sgCKJD7FtSTjXPcWKlidMA6Dnd3LYCNh1lYBHzd3t/aziD8rrM5e3abkk6vpmFKu599CDuf/wQJk5YgfVrR7DrnPXYsW4leiRyiE493tKfmqIBH+bY5DGd4JLe/HSwlOzopFKTbjuxM4AMb4IgiA5ZThpvoHXzLcr2xpFdQsQtKbAsPu7wxHaHgdIWXCk56PWGjr/+2iOYml3CeaeO4/UXbcaasWGINVisiphxFBiKmsdb9Zzt+Oh16PL2Sye4ZJaJ783ACO6OszpbyPAmCILokKIYoHFg2RNNA8qvvEd2JHE82mMMrQIx7gI6Xefxdkl15Blb7n74ACZnl/Cxd52Pc04ex9FqHRXTKysuy3xs0khSE0lDcR7/qBpv3qHcyJ1O0E3VMNCvBRcBIvJJt8jQCIIgMmdZGOCSNG65Q/CosggqoPZH7O7PvQroGMJnRccrnaDF3GINtz/0EraftgbnnDzuuX7be1lQZIQfkFTj7dOOqiM66+NnPx2QfGdwoJSC0e2M41gW17aU8PV4/8M//INSIx/5yEfiGg9BEEThWE43JdvjnWNJu+WLjoOgVpiHwdctXq2g7f/eAy+i3jDwzjee2r6OxDhkzF3gyEkoj7e9s8NlNWkeJIUlWYg2E0KTpBPknMPIsDJq1hOSbsDX8H711Vft/6vVKu666y6cc8452LhxIw4dOoQnn3wSb3nLW9IYJ0EQRC7hDl3vcsBleGc9GB/aPN4xtgfPrCYFS3IeglYcQ+tpwsFjC/jpfx3E5Rdswobx4dbCrCVJEeUQzDwaHQdX+qzr94vsNNuIH3Fn+pG1x2E90em+c2y54Gt4/83f/I39/0c/+lH87d/+La688kr7s7vuugs/+tGPkh0hQRBE3uHLMJ1gl2+v07CWfu74WpQFyAzOotKubW/9+39+8jwG+3pwzcUnS9eR7QG/7zrVeDMzmjPedILRjmM8Y2AwhMdKlpQpdY93N//YU0b5adj999+PN73pTa7PrrjiCvz0pz9NYlwEQRCFgHu+6U7sVH05zuMtejVjtYE9JlmW9Ea5JHnBELf36ZeO46l9x3H167dgZLBXuo7U8GbuV/eb8GeUp4bcgyQL6MS5LkzjWtwjliGehpRJDBYl4kH52J100kn4xje+4frsm9/8Jk488cQkxkUQBFE4ltPNicFMn5dXoXeApzpsM22P9rn4fSu4Miv9bRJYNrHbAOP43v37MLayH5dfsClwXddnwqv4vypc2P/OtgJPSSWJd1wJD6Mj2xZL851WxUpxAF10ameGcjrBm266CX/yJ3+CL3/5y1i3bh2OHDmCnp4efOELX0h2hARBEDkmp2ZnYlg5mPOsMU1Sx+vsA0L6wG7LaAKPbXl1qoIXDs3hfW89A7097f47FamJ7NtOK1eqkqTHWyw01AkagLrwWbpSk246k/ODsuF95pln4s4778Tjjz+Oo0ePYmJiAtu3b0dvr/wRE0EQxHLA6RBaDkZ4K3t1633eb89R9Ohey3s9emdgtvc/z5OSqFjbxjnHUy9OYc2qAVx87gbfdWT7gTkCL1vLRRiPxxEKOtZJphN0rhPH70KWK5/bUpN0zrHlcE1LGyXDW9d17NixAw8//DB27tyZ/KgIgiAKAvd5160wiQQhT3AuegRZ5ETe3t5bd3taQFGXorJUbbjeP/fKDKbmlnD167agpyRXqyoFV3YYXckdQa2hCGl5hz6UMUqvZOkE0/R4ixMJIh6UNN6lUglbtmzB9PR08iMiCIIoEK3sF/FkU8g7znSCyHEub5G4hykaflbqt24KrrznkQP483/+OR7Zewyz5To45/jpYwcxMtCLXeesD1xfVeNtEeoYeSwclNIv2QI68R552bbYGu9Ye1KnW87tLFGWmlx99dX40Ic+hPe9731Yv979g9u1a1cSYyMIgigA6WUZyA2Ou28R7O5O8kSLjXhtLzONIh7mxppjXjw8h2/f8xw2rRvBocky/ud/PYEt48M4uljDFRdt9vR2I8C41iTfRT0+Xh71WHNph1w23r6b8iXOuT3BM8CjefrjGxTRIcrXh29961sA0BZMyRjDPffcE//ICIIgCoAzyKso3t9OaPN451DlncSIxPZEzzYz9wUHoLFiT8MWl+r4p+8+hdGRPvzpO8/HwaU6HjI4Hnr0IHactx5bNqzwb8Bv5/tIOMIGV8q78ZcVhfZ4RzyR4njyYZ1FzjEbPL1JftwTCaKJsuF97733JjsSgiCIAmKnNYsgIy4kphWQ7wIxHHAYv51UFFTdymYgHC+81IRzjv/nB89gZqGKT153AUYGezEMjt+5/DRcetY6rFg7AsMwFFqSnyNWwKXzu8gebw8pi/+xdp8bQWR5LK3tMxxGeKrpKgv2ZKsoFHtaThAEkRMYlqnGO+PxyEjSB+/MatLm8ebFyPLix52/egWPPTeJd77xNJx6wqrWtnDg9E2jWL1yILANFamJbIUo6QTDkqzGO15sj7djYw3OU8toIiMP+6XoKHu8FxYW8IUvfAG7d+/G9PS0q2jCfffdl9T4CIIgck0n+YSLiLi9RfDyR0sn6FE4xCuoz5FxItcPA3x4ct8U/v2+53HB1gm8eae7MA53/aO2gVINtkRqEsnjzblHusKA4MrQTyTUl45d4+3QdVvjMDhQSun8IqlJMih7vP/yL/8Se/bswR//8R9jZmYGN9xwAzZs2IDf+73fS3aEBEEQOca6McnKO3cn3CUTyOs2x2abeFjRotShldVEbhDmnYOTi/ji95/CxjUj+MBV26T5tqF4vG05ifQ7eH4XlijBlaoe78GShtHeHvSGja4M2Y8fUo+38Psjioeyx/vBBx/EHXfcgdWrV6NUKuFNb3oTzj33XHzoQx8i45sgiGVLq2jK8sCWmuQ8j7cTJsmHrIqq4ak5gvqKpuGcL9fwj//f4+jtKeEj//08DPS1mwbOJwDqunfJZz7f8RCPT7w03vaXUdZzoDGGVb0l5fEkgVPjbcFTDK50ksffeVFRPn6GYWDFimYk89DQEObm5jAxMYH9+/cnOT6CIIhCwJZJHm+LPGu8k6A9qwn3lEsUySFZbxi49TtPYnq+huvffi7GV7k13IyxyDnqZbuhZLZXcnwb5QlB1HSCRTpfrf3irBqaZnClK1OT/U+BTu6cEqpk/O7du7Fr1y7s3LkTf/VXf4Xh4WFs2bIl2RESBEHkGNftSMi5241YGlnbWMqhyDuOx/xBWyU+53DJTjrsOy0MzvGVHz6DvQdm8cG3nYVTN66SLufcHhWvdEtO0r4nBkoaNg30Ng3wDmivTur4znMdDiQoBRKN/jjTCVqvnIIrC4+yx/umm27Cxo0bAQA33HADBgYGMDc3h8997nNJjo8gCCI1DM4xWWtAD/nIG8vwhlQkj3cnQWJ+UhNPj3fEvtLmOz/dh188fQRvf8Mp+I2zvCtRMmF+Fbh9VgClx4JeRnccWU2ak175t2n+VkPEoHoiSk3SLBdPJIeyx3vz5s32/2NjY7j55puTGhNBEEQm1AyOxYaO4ZKGQcXUAc7gyuWA7U3Os8ZbNPoYA7ha7mm11s1mnV24usv/yXDvowdwxy/247LtJ+C3dp2kvJ6KQRk2gDJSVhNwMEk+br9Jlm145//wAJLgyizLxefxd15UlA3va6+9FhdddBEuuugi7Ny5E6Ojo8mOjCAIIiNC3WSsAjqOtwW5r0eiCHm8RSKlExQDNE1tsv093AfaaWznPbjyseeO4Rt378X5p47jPW/ZGjhRiPrEQPl3ECWPd4Qgw6Q93uKTgY7bM0vDG2hpvGEGfqYBpRNMBuXz9hOf+ARGRkbw1a9+FZdeeimuvvpqfPazn8WPfvSjZEdIEASRElFuMq2beTeb2wIs33m8kxySXwEd+/8cnwqVagNfueNZnLhuBT50zTkoaWpmgNd2y0jD4214GDC+xq8wSU4CLrx2inN7spGaeOSzJyKj7PHetWsXdu3aBQCYnp7Gv/7rv+LrX/86vvnNb+Ktb31rkmMkCIJIlXBpzcwwuxxLL+JE9HjnjVYAXTyoarc1xeWy5sePHMBCpY4/fef56O9TS5cXdSIRVnKj+tvhdhCzpE8VqUmoUUUnjn405tB426lL00prwvI5sy44yob3/fffj927d2P37t04fPgwtm/fjo997GO46KKLkh0hQRBESkT1eDO2PPzdllFrw1hbhcf8wFz/xW0/iFk13FlN8nk2lJcauPOXL+P8U8dxygkrQ63L2/7xJqzHOyxRnzK1JsnJHp+230kHMIcjIG2PN5N58PN5ahcKZcP7gx/8IE488UR88IMfxLXXXoueHuVVCYIgCkGkx8Tc921XwsDsyUbetjcur6Zsu5jXF4IRmFepyV27X0a52sC1l5wSaj3mKA4UJoYh9HKKJ5NfQLNfyfg0ztW4+2Bo5VDPMriSiA/l4/f1r38d73jHO/CjH/0Il112GX7/938f//zP/4yHH344cN1bbrkFl19+Oc444wzs3bvX/vzFF1/Eu971Llx55ZV417vehZdeeknpO4IgiESI4KVqk17kzRKNEXHTkvAkJ0ES6QT9NN55NIwWKnXc/fAreM3WCZy0fkWodSMHV4acgKj2EWSAeufxNscVbljKtLfbeU8uqYlZLj7LeV1O55SFQvn6sHPnTvzhH/4hvvzlL+P73/8+zj33XHz5y1/Ge9/73sB1r7jiCnzjG9+w84BbfOYzn8G73/1u3HnnnXj3u9+NT3/600rfEQRBJEEUj7dogAWte7zWwOGFpQijyx4xHVsuPd5xG1eeRVq4y7LMewGdO3/1MpaqOq655OSO2lGRFllSjqSCK/0kI82nElwap5GOxluebjIqruBKM5NLEdJVEt4oG9533303brrpJlxzzTW4/PLLsXv3blx33XX4l3/5l8B1d+7ciQ0bNrg+m5qawp49e3DVVVcBAK666irs2bMHx48f9/2OIAgiT1g5o1VvhXXOUWnoCY8qWWwNr89j/cwRDOHQ6QQ9mgwsoJNDo2iuXMOPHz6AC7etxaaJkdDrhz3OgxrDmr4e9CYUXGn4TK78/MGJ5/FOoGHNkU7QSPn0chr9uf2dFxBlofa//du/4cILL8QnP/lJ7NixAwMDAx11fPjwYaxbtw6lUjOqulQqYe3atTh8+DA4557fjY2NddQvQRCEF3GkAgtalxdEniGjCMO2vaHOD1MwVqwu8uiRvPeRA6jWdbzt9dG83a4JB1dIJ8gYhnvUMqa4UTvDVIpWybToaXi8ecy/E5fxm3G5eCIelA3vr33ta8mOJGHGx8PP8uNiYiKcno5Qh/Ztsiy3/Vuq1LC0WMXocD/GB/uU1qnOVdCnGxgb6kN5fgnjo0MY8DE6FmbK0A1eyH1b0w1MTy9ibGQAowO9mJteRH9Jw8TKwayHZlM3DAwN92P1SD9WDzSPIV+sQq/UMLFGfZ/PVxtYnK9gfHQIg+bxnDm+gOG+HkyMDGDm+AJGzP8tpibnUdIYJsayu9+I1Oo6fvr4IVx41jqcv827LLwf5dkyOAcmRocwN72IlSsHYz/mU5PzWDXYh4nh/sBlZ6t1LMwvYY3kt6ZVaqgtVrFmbAQlwTLvq9ZRnl/CmtFh9PfEr8SvzVdQaRhYs2oIU8cXsDrEdcRCvC4Yi1VgqY6J8REszJRRYsDEqqGYRy6nsbAErdbAxNgItEoN1cUqJiT7tUjk4bqrbHjXajXceuutuP322zEzM4NHHnkEDzzwAF566SVcd911oTvesGEDjhw5Al3XUSqVoOs6jh49ig0bNoBz7vldVKamFmAY6ftrJiZW4Nix+dT7XQ7Qvk2W5bh/5xo6yrUGpms6jIWq0jqz1Tp0DvRUGyhX65jSDfT5FCWZW6pjcLivkPu2bnCUl2qYruuo95SwuFTHEgP6q42sh2YzOjaM8mIVM3UdjZ7mMZytN7DYMELt87JuoFyt47jjeC4u1dCo1NFbqWO+UoNRqqOnUrfXqVRq0BhwTM/Ps4H7Hz+E2YUaLjtvQ+RzbsE8x4/VdaBHw9z8Eo7FfMzLlRpKtQZYuRY8Hut3qnP0CEag9Rs+ZnCUhCcP1nrHJevFwVytgaphYLKuo1ypYSbEdQQe19y5egMLDQNHdQNz1Tp6NQ3HaulI1eZqDSzqBo7pHHN1HeV6A5MGT61yZtykdU/TNObr7FWe8t18883Yu3cvPv/5z9uP0U4//XR861vfijSw8fFxbNu2DbfffjsA4Pbbb8e2bdswNjbm+x1BEERSRJWAhAmuBDgy8AHEgvioPpfBlZ5fyAPuQrfjA8tZDm/OOe7e/Qo2TYzgzJNWx9Zu1ltoZfnwKqADj+OXuMZb7DuGfqxUjtwRXJkFefudFxllj/c999yDu+66C0NDQ9DM2f+6detw5MiRwHVvuukm3HXXXZicnMT73/9+jI6O4gc/+AH+8i//Ep/85CfxT//0T1i5ciVuueUWex2/7wiCIJIkTFEYDncJ9aBVuavyXtYmTFisqDZmv+T1huwOfIywn7m/IphLDLgwQbZpsOelaT0OSkkAACAASURBVBycXMTv/7dtHZ1rzPGbSOp4h0lNaS3nVTK+uVD7oUv6XE3i2FuOeW5OOAqs8iBMlA3v3t5e6Lr78cbx48cxOjoauO4NN9yAG264oe3zU089Ff/+7/8uXcfvO4IgiCSIXLkyRBuc59dYVcV178/ZxvgZb2GKv1h4BejJl2W5MozufvgVrBzuw2vPWtdZQ4y1nhbk4Hjb+ax9JhNSj3fCebwhnH9xpRMEAJ2nH1wpe6KVo9O7sCg/tXjrW9+KT3ziE3jllVcAAEePHsWNN96I3/qt30pyfARBECkS3qvHuVBARxFDYZm8UQypSfuI4jIWxAmW2G4PQ5uuOCsOTy3iiRemcPmOjejtMJDQvd3ZH3Huk1lFSWqS0LiSwNJT66ZFn+rErkg7qkAo/xo/+tGPYuPGjXjb296Gubk5XHnllVi7di0+/OEPJztCgiCIlIjq8Q7TRhwpC7NC9CaznBhiMjqV8QRqxSWu9TV9PRjrjZJGL37ufvgAekoaLtuxUWFpf0QZSBL2WJhJnK/kgnmb3lbRo6QlXnGnE4TT8I6x7TDk81deTJSPYV9fHz71qU/hsccew0MPPYRHH30Ub3/72/Hxj3882RESBEGkTLggS+uxt+rSUfoAFhs6FnJSeKdVQIfl7oYsHQ/z+S4U7u0VteMaY7nI+FCt6fj506/itdvWYuVwuHR2RYBz7qnb9/N4q+Qg7wS/AktRsYy0hh1ekdX5lbdfenEJ1HhXKhV86UtfwrPPPouTTjoJ119/PRYXF/HpT38aDz74IK699tp0RkoQBJEwUbKahNZ426/hFMfzugHOgZFIhUniQab3zF0xIImOtxNTRdZO3jZZ5JG9R1Gt6bjk/BNiaY+l8KQmTKCu4eM19DvWUTT+kYhxJ1l2dhYe77anECk8LVgOBBreN954I/bs2YOLL74Y999/P/bu3Yt9+/bh2muvxY033kgp/giC6DqS0ng7JQphDVaDZy/rSCsdWyf47aG4915ed8ODT76KidEBnL5pVfyNJ+w1VhxC4Dko+32lZnjHiOXZz0TjjTzOrItPoOH9s5/9DN///vcxPj6O9773vbjsssvwta99DRdeeGE6IyQIgkiJqLcYl8db8UYVNriS5yj/d56DK2Uj8ksxF9iKZPk8T0AmZyt4Zv80rr3k5Ni8k05vdB6Ot8GBXo9NC/R4p3DM4txHlqFtSU2ykDJxHvb5HOFH4FOLcrmM8fFxAMD69esxNDRERjdBEF1JFOOilSkh+LbEPf5XweCAEbIITNy0SU1ymMfbL2Vc1KI4zv95ToxPLx566lUAwOvOiVYe3os0cmCr9sHhnQM/KKtJohpv4fcQbzrBLKQmZGonQaDHW9d1/OIXv3Bd7MX3u3btSm6EBEEQOUb0oqlKHUJVUeTc9pAbADJTeZtjtm7ItiGao2JAsr0a28gEqy4fW9yCc44HnzyMbSetxppVg7G1y9DK5sLBEjL+mLKswbeCo08grV9QZl5haF5gDG5mZMl6QETHBBre4+Pj+Iu/+Av7/ejoqOs9Ywz33HNPciMkCIJIiageb2WNt8f/SuuZRonBgVJGd9/24Ep1YyltZPOAKE8ywn6XJc8dmMWxmSVce/EpsbbrNveSEXmH83h7S0byYJjGmk6QNSc6VkBpFhPcvJ7vRSXQ8L733nvTGQlBEETW2NX51G41rWBJ5utpa60g/TcQpx7csPrLEFk1xzwYPPBMJ+h0VUcfaRLp4uLkgScPY6CvhAu2TiTSfh4MMG563r0rODJ7ubZ1U9Plx7unrCFnWhU1B0G13UJWudgJgiByR1iPd0vfHd7jHSZQ0rlslpm8xYBDpjLZSJu40gn6asVztcWAmbt797NHceGZa9HfF68YyR08HGvTobEmoVE83n4VL+NAln4vlnbNZtIsF4+QaVIJdcjwJgiC6BDV26FbahJC4+1Y1sja8hGymiBnN2W//RrLOB2N5EXXDgA//OV+VGs6Lj5vQ/yNCxOsRCpXKgbqWqd/UB7vLIIrm520eo6rL8vgTt3jnceJdRdAhjdBEIRJ2BuMM62cmhHKJf8F4/R4Z5lSUMwY4krTlxNkZk8Ue8UrSDOPWU2ePziL2x56CbvOXo/TN43G3r7z3M56222Pt9cCfsGV4Kn4jGPPF297vIlugI4jQRCESSdSk7YPfZZHyEf2Ti+3kaHp0x5cKf88U/wyjkQYqF87efB3V6oN/MttT2N85QCue8vWRPpon2DFv+WqwZXWE42gdILydZN+SpFM25ahlnYOb7G3PJzv3QAZ3gRBEBFxGs8qRqhz+agZNvJQRMe+AZuGQJ40z2lkI8nP1gLf/PFeTM4u4Q+uPguD/YH5EjoiSamJKtb5H2S8yNMJJq/xBvxzyUdq1/ydZWmw5emcLzpkeBMEQZjYSU1CrhflBhumcqVlbJQYyzi40r1nwnq8Z+s65urhtyDKep0GV3ptU56ymux+9igefPJVXLVrSyISE4vWcU7Y/FJo3lrES+8sGr/iulkfsyhYhlpWMQVkdMcLGd4EQRAiinca2WNv1eC+MAV0DDSLZ5SsQhpZwpi9vX5GjoyKbqBshJlyNCmHWM9PDRFuz7UvLfr3szTidMPAt+95DlvWr8DVr9+SbGf2k40Eu1CVmghFnGTtZE3sGm/zNe3gyjzGcHQDZHgTBEGYRNV4wzK+AzxSTm9d2OBKDc3COZkGV3boSTYipqMzwJXXaxlmLTpxFLq8jKIBmqGV91/PTWF6voqrX7cFPaVkb+VJSSiCaHBul0q3CEonaCGbACedxzuppvMQXJleDvTuhwxvgiAIE+tmrWwbSrJ8qGiMNcZCa7w11lwvy+BKseuwebw5eKTxcx6P0y10GxJLIy9Sk588dgBjK/tx3mnjqfWZ9Jkntj9Za2BakBgFabytCbDYllV4J82sJvGnEySpSTdAhjdBEEQbarea0DdY03tXYix0VhMGs3R0Dj3eyoY3j+rxVtcXy45JVI23V1aHrA2Rw1OL2PPSNC7dvhElLfnbeNt2J2D/ycQsOudoCCe8Le8KbMvru+K5vLPyeJODOxnI8CYIghCImk6QBUgpnB7vMJ5fw+nxNj13WdBujJrmkuJ4ouSBtjyVypusmM6xIwJ0xklz32OHUNIY3nD+Can0l8qEQ+qlBnThU86bXm2/QEPZ7zBNXX7cv89WOsFYmyUyggxvgiAIk7D3y7AeQGv5Egubx7ul8UbIjChJEtrjHcF4s7Y16mTI9S4mgyhLj3e1puOBJw9j55lrsWq4L93OJfr5uJC1yQHows42Ihou9nlRPIc3BjQNwz0l9GYoNcn6KU83QYY3QRC+8Aw9rGkT2htrvqpqvC00LazGm4MxZl+ws5KbiAFWYewAzpsigbDnk7Wt6mt4ZSMJiaRD0ZOahRn0y2eOoFJt4I07NqbWZ1gtf1xY54szk49KkJ/sd5hmYGjc3vUejWFNX0/66QSZe2pNDvd4IMObIAhfjtd1HKs1sh5GqqjLGsJJDqx2SyGDKy2PtxVclXlKQZPQEgQeMnhVCHhVMdhVglvV+vU2NLLKasI5x72PHsCmiWGcvmlVav2mITURJzXOCZrT621wbgcbhiFNqYlNwS1V1/Bzcs3pBsjwJgjClyXDQGOZXHM79ngHpAl0ZTUJ4fltabyb77MqoiNmhQiTx9u5SJTiQao3fs7ducaRoP2Ttl313IFZvHxkAW+8YFMmxVTSTidooYf1eEt+h2lWV+3ey2XBZxI5gQxvgiA8MTg3je7uvZXICB3HF1rjrV6QpJUGjdmevqw83uID5zCeUOcyofTtHm340XY4YpJKWBKGLPa+YXB868fPYXSkD7vOXpdq39YTnTS322uiZj39CbO+832SExavLDhFhzTe8UKGN0EQntTN/G/L4aLbiZbdeYP1b6FZgVILYQhaRofGHMGVGR4QeRCcggTEKSMIo9gWvJ2By0s+i2oAea6Xgef3/icOYf+RefzO5adhoK8nxZ5bJHklEHXZTmPb7fHmgcYzA2v7LWeh8S463TqRyBoyvAmC8KSepYWXISEl3q7gSj93rqUbtrTaKna+s2AIQ/M5elZFdETds5XWLaxBHOa0cnm8FdeLw0BQkQylZYgsVOr4zk/3YevmUbx2W7rebkiebKSTkq/1v9PwVvF4+1WGTXvsXUG3bU/GkOFNEIQndSsYji68UtoNEX8jlPPmsmGyRFheRs00crMuouOZ9y0Ap7c0bGBpmPX8nlp0LDVh2Tx2/+7P9mFxqY73vHlrJtrumLMxenbhkiM5/teFcyBoD/jm8S5gOsGsoct/vJDhTRCEJ8vJ4+3UgKprvN1Lqt54bY+3Qk/WIbDa1liGwZUej5/VDGJ3O+p9cun/fmMUYX5fRoK3BXAmxctH5nHfYwdx+Y5N2Lx2JPH+ZKRiUAqdOI+1YTsAuB1o7IcmCeBN1Vtv/9etpjjRCWR4EwThie3xznogKeC6MSvqvTnaM2gELc8cF16VeY1T4w0AJbDs0glKulU2vJ3/R8jj7dG9lMA0gAr4Zc9Q8brGxb//5HkMD/Ti2jecnFKP7bQF0SY04ZAG4DJmTzS5+UVQ+s7m5FnUeKdvehfd7BZjV4q+PXmBDG+CIKToBkfD4NJSzt1M2JuLW/McrA1mrJVZIYx0wspoorGMC+gInwVts4Xh8b9Kn/b/KpIWj8lBHKhOMuJicraCp1+axpt2bsLwQG+KPctJ0nQV27T66mEtjbf1mZLH20tqEsNYg+ja6yVZ3rFAhjdBEFKqetM86l0mF1tRA6oqn4iye6JmNYEpU8kyuFIkmsdbvU+xaqFKP36e6o7hPPJxD8vPnz4CAHjd2etT6M2bVr72hM87x5Mmq6cSY7bGW5RdeSHP4936LjEyKumeNF07kcgIMrwJgpBiG96a1lUXXp1zHKnW0fCIvlKtQgmJBzjICLXycdsebwVDpk3jnWlwpSyVW3vqNvmqDs1uiB4NhHtCoJaPJCrp5bPmnOOhJw/jjM2jWDM6mEKPwSS53eLvzjqnesxiUwbnrkDjoLbE9KBpeLzDFJQqAt05jcgeMrwJgpBS1XUwxpoe7w5yXOeNusGxpBuoCdareGNW1hOHuDuJGm81j3fT2G0FVzY13lkcD6/gShXchXDUx855K3955DzrTX1PeI232I7H/0mw79AcjkxX8LpzsvV2Q7L/0pRr9Jid6Vzd4y37fWVRdbPwhqtjA0jjHR9keBMEIaXWMNDLWCgPcJGJ8ihaNORkacxEnHm8VTy/rRSEzXXsIjrqw0wU8bG+zrkr77KFMzAvXOVKbuvbO5H/hD6LE0xLqMJDT72Kvh4NO89cm0JvwaSlb+fCa4/1W3EU8goyXJjPbyRVjXeXXDq7w+WSH8jwJghCSlU30OuIYuqWi699Y28zrKwgRvdyQW2Fubc2s6A4dOSKWU2cwWRhMqLEjcyoFQ2yyVoDU7WGdF2Y4w8zdMPp8VYZoz2qOJC3wzlPVM5bbxj41TNHcMHWCQz2Z1OlUkRlUtlp+05Ew1t3GNJB+97+Dbsy4vCu1WAnBe2tZCDDmyCINnTOUTe46fHuLkSPmvi59weStoTUZoEab3MZBtPzq5THu+XxhcNbLvMqZ4HTIOOco2pwV8ETi5Y+N3xWk1KISp/wMBjCGo5egaRe38XJEy9MYnGpkQuZiZNEpSbC5Mo6ViXH+S5m+PFsyn6i5NZ4O58cJYF4fnTVtTOlgOLlABneBEG0YRXO6dNY2w2xW/DaHi1UIJ8A8ze9nbKRMNlAXB7vDKUmXh5+azvqvGl8y7aLm9utqQZjmoa8wXmoLDBxat/98oEnaYQ89NSrWDXSh21bVifYSzhYSG1+lPadWH21NN68M483GY6R4V14/c8SMrwJgmjDKpzTlR5vD8MsSvCVmLoujA5W1QNrCAaD5e3LooiONODQMdmoG4a5nETjbU861PeRtVwJTPkJATwMs1g0yq4JQDK/jPlyDU+8MIVdZ61HSVt+t2jnEylmFqdqBhS75Up+yDTeaQYHdo+RGkITRyiz/H7VBEEEUjc4SoyhxNJ7vJ42XveS1vYGb3F4jTd3lX5X8Vob4K70aXZwZVYHhLW/tYZiZYqR7VtnRhfVsTs9nKoTlSR3izNdXFJG3INPvgrd4LmTmTCWksbbli21Pisxy+OtptO23AViOsGkJd6tbeiOq2W3OV3yAhneBEG0UeccfSXNpYfskntJoMY7VFaTkAYYR0tjquqBNbj7Qm3pw7MoouOVYs86N2rmP7IJBXca0Ir9WQa6FrJQT7KhlcHfRcXgHD957ABO37QKm9aOJNBDZ0T5jXTSV8vwbpaN5+ZvIUinLU0nmNhI2+lKjTcRG2R4EwTRRs3g6O9pXh669eYherTFm2WUG3WYAD6moHW29NIuOQtjmRTR4dx61u8+IyyDmHPu8Hi35xlvBqI2x68qGXEWTFE22L2eZISQuNjNeJz8PKFIs6f2TeHYzBIuv2BT/I13SBbpBK3zvlm9krdl+PFCKjURAqEJdXjME9rlDhneBFEwqoaBip5caJ1uBrQNlNyXhy5xeHt6vC3CVa6UVXL0Wd5hr6kYgpZBK2Zx0FgzvVoWeKUTbPCmx7ZHkwencnPfhpmcOAumqBrOSRoIzklZEn3c++hBrBruw2vOmEig9c5gCFeAKHz7bpyGsmYW0LEmb0HIgitlfSRFt1wrydBOBjK8CaJgzNV1TNeTM7uqprVje7xZJz7g/OG1FZaH1qnjVWnLeXNSTScIRe+49bXo5SuBpR5c6dmbaRDXzMDKATMgUGp4s3DpBK3lmsl11LbZy5sep8c2iV1/dLqMJ1+YwqXbT0BPKae3ZuE3kkgXjlen1IRzDl1xYmwFZbalE0xL4+3xvthQHvS4yOmvmyAIL3jCab2WdAOMMQz2lNr67QocwVsywt5bQmu8HesFGaBOjbMTjWUgNTFfxf3D0NyZNd68MfdZHm/evj6zJxxqJe+deZs7v+WHiw7082on4fH+yWMHoWkMl27fGHPL8ZC4zSVM8N1Sk+Zrw+BKUhNIJrapSCVCpL0sBN22PTmBDG+CKBhJXwQrhoEBjdmZNLrNxxEYXOnxvbQt8VG2ihzC7EBT0DpbHjtRzqJlFFwJqVev+UnN4Ogz9ecQipfAmU7Qyjih0Jcrq4mq1MRHft3pHkvq2U+1ruOBJw5jx9YJrF7RH3PrxaBdauL2eMOUwalej8QnK6nm8e5CS5U03vFBhjdBFJCkVAYNg6NhtOu7k+wzbYI13uHaUl3e8vKGkZp4eryzCK70+NzanqrB0acxe5LQPrFpbnuYYjhtWU1Ut1nino3TaIjbCPnlniNYXGrgigvy6e2GNKQ2GeRSk9b3mqLr3Xqy4myXNN7hIEM7GcjwJoiCwRO8sC8JOl105cVXvvdaUgp1j2xbxhGF9SyPr2zZusHtqqHOMYiP15sFRdTkGrHhCHR0Ym0/503D2yuwjVvaW4/v5V1yW6+rIs1BnAaWn4eUx6d35ZzjJ48exMaJYWzdPBpLm0kgxjIk2T6EwGWnsa3atxgMGjYQOgrdFQ3Totu2J2vI8CaIgpGkrVUxC+f0CsYkuujia3vUxHR35mtYj3f7h3KDWPxEJp04Xm9gstaw3xt2MJt7VHYRnRBj7RSVJwR9jElTucElNbG+Dz6jnDnMldMQ+mj3w5zD0mUjGIBBvPTqPPYfmccbd2xM3DAsAtZPx+XxdnyvqvEOE8QbP82JGR1PQgYZ3gRRQJIwgjnnWNINDAiFc7o1wKZtewSPbtD2Wnmt3Z5AlYwLrWXFIMOGGaRoGdzOrB5OrAu3nr7D23sLGUOvxqRVA+EwpMI8UTBc+0t1nOo64LAk4fW977GD6O8tYdfZ+apUKZJ4bKXw3p16M3zMSVtwZYoa7265Vtr7y+NpFxENMrwJomAkJTWxDL5Bwcrrtout06Pm+tx8bSVXUNvLMmNbtqZouIrV9TjnTUOaczulo3dWE2Z+n+Yt3rr7ys+PXtYcl6xqoPWeMe8cy9IeHTnMQ2m8JajIgNKmvNTAL585gteetRaD/T1ZD8efBLz9fnDhvLee8og57b0QA5DT0Xinu4/SpFtifPIAGd4EUTi4p5yhE5ZM96kssBI5NFqSQt2z2r6837ri8rY22nxvOLzEVVNr79Q4O7HmRlk8Sm/XeDc/6dOsvO/Nz8Wqga3Kle3fe+GsVKiF0HjHQbBWv3N+/vSrqNUNXLYjv0GVFml7iw2hUyuziap6o83jnUEe725huVz704IMb4IoGEFZOaKyZBjo0zT7Bmdhe3S7xOXhnU7QracOlJqYr0xieUs93mLqQfPV8mrrjgWcHm/ZRdry+qUZXOklNbHe9wlSANHoAazKlepjd8kDOkwnGMVj7rWtccA5x33/dRBb1q/AlvUrY2w5BRKwMJ2/o5aMq/Wh9ZRH1WhxTtTEjEJJ0yWXyq6dSGQNGd4EQcAw5Q0DJe9LbZfcSxzSDsnnTL1Qi2x/qKzLhFfL4Lf02r2ahprRNBQMDy9dFh5vL2PCOmX6Sw7DWwiEdE5SvKQoMgxwh7aXJfKkxxv/zCWdek+fPziLg8cWC+HtRgpZTSycUjqX1MTqWzWdoDlRc54vcZRhUqFbrpVO0kzH2O2Q4U0QBcNLo9wJS6ah50wjaNGtF1svw1mUgAQ1INs/co236VG3H5m7PeuWx3u4pMHgHHXTyJRpWu0iNRnc4cXR9GkaThjoQ78tNWHSx/wIu3/FrCZCW1405TkKjWfMfY8dwmB/CRdtW5v1UJRIM7hS9jSpFNLjbU/UImYs6pQCnILKdONEIksyj+Y4cOAAPvzhD9vv5+fnsbCwgF/96le4/PLL0dfXh/7+ZiWvj3/847jkkksyHC1BZE8SUhOrTPyALFdXl2U1ae0/mcs7fDthNd4WYpBhw3wdLGmYqTflJoZH+jRL951m9Uq/nnqFQYp6bGsbNcc+Cpo0cNNosoyvMIV3ZIQNrpR5+OLy+i5U6tj97FFccv4GDPRlfhvOFdxxvjj3sfVkRXVSJTtf0tJ4d593uFuu/vkg81/8pk2b8P3vf99+f/PNN0PXdfv9P/7jP2Lr1q0ZjY4g8guP8ere4By9Hnlnu+sG4q2ZsNPdOd77NuOR5cPVmPiZT3Clzpuyil7W9O5VDQ6Dw5VT3Una1SulmnYPxJzZzn0lk6J49ilkNYHCee+ZNo4xgMcpzon+y7jnkQNo6AbeuL0YMhMIxz3RawLn4JLUgQOahsGSht4QlSth/kaK8AQkjzDGaOclQK6kJrVaDbfddhve8Y53ZD0UgsgtSdhafhH/qoZoUfDTeDPZggHtyLygcqmJexlxWZ1z9JiTnz6NoWoYvtX2sioQoqZjZ23luq11ZVIUGWIOc5Xz0PKSe407znM4qjlSXqrjrt2vYMfpa7Bp7UiMIyo2Tv21KMsCgB6NYW1/r3LJeM2WcnGpBz0RHJPpbjJXeRduU5Zk7vF2cu+992LdunU4++yz7c8+/vGPg3OO17zmNfjYxz6GlSsLFv1NEDHjlkrEcylUaalbIvUt/Aw0v+/FBqIGY4rZPXTuCFTUNFTqDYAxDHq0Z5WNT4swPflJTWTfyzDE/eswpIL3ejImQhzOv7t2v4JKtYFrLj45jiGlRmoZQWLSZDs93uLkLSns9nn3uNmZ5xsiKrkyvP/jP/7D5e3+xje+gQ0bNqBWq+Hmm2/GjTfeiM9//vOR2h4fz86zMDGxIrO+u53ltm855zg2tQAAGBsdwmBPKXAdFRZmyigxYGLVkOvziYkVMDjH1NQCRof6sWaoL5b+sqQ8Wwar69AYw4TjulCbX0JPXcfE6iFMTi1g1VAfJob6PdtZrDcwN1vB+KpBDPc2L6X91TrK80sYGx3CgHBs5msNLMxVsMbcxxMTI5g5vohVIwNYPdCL6eMLWNHXg4mRAQzXddRmywCAMY9xVGbL0DkwMTrU9l0S9JnbtmZ0GP09/g9Ly7NlcLTOp4VaA/NzFYyvGsJQbwmz04sY7NEwscJrWgFUGjpmZhjWrBjEiv4eDNYaKM9VMLZqCIO98vPe4ByTUwtYPTrUdq7W55ewWG9gYkztXjBzfAHD5vGwx1TXMWcdl+F+jA2G+z0slGv48SMHsOvcDXjNOSeEWjdzylXUyzXAvC6oep5VqekGZqYXsXpkAL0aw9yc+7cVerh1HQuzZaxeOYiSxjA7wzBunktJ0TAMHD++CJhxD6rnmpO83dOmpuaxaqAPxlINqwb6MDHsfU0sAnnYv7kxvI8cOYLdu3fjc5/7nP3Zhg0bAAB9fX1497vfjT/6oz+K3P7U1AKMDFIATEyswLFj86n3uxxYjvuWc45ypXnzm2oY6PcodhOW+aU6Sgw4VmvFV1j71+pzuqaDL1Zj6S9L5qp1VM1g0mOOa8JcrYGqYeBYQ0e5UsNMrQEs1jzbqegGytU6phsGyuZxKJufTemGXVDGwvruuG5g47pVOD65gHKlhuN1HfWShrlKDay3hGOVOjjnqCw1X+dqunQcC7UGaoaBY3W97bskWGjoKNcamNJ5WzBl27LVOnTeOp+sbZ/WDSxqGhaX6qgxoG+p4dnGkrnOTMPAUkmz3082DM8iTwZvujdnZspt5+pcrYGKbuCYrnYfWKjUoJfq6K3U7c+qhoHyUvP9TF2HvhDu9/Dd+/ehvNTAWy/cXLhr12xdR7newNBwPyaPzSun9VOlYXCUl2qYrusoMWafL2VJpiUVauaxmmwY6AHs3+pSTNdMGbrj+lxiTPlcs8jjPW2xUoO21MCibkCrNqCVva+JeSet/atpzNfZmxuN93e/+11ceumlWL16NQCgXC5jfr65gzjnuOOOO7Bt27aMR0kQ2cI9/u+8Xe/iEq0Amy7RmljpGIWc0K3KiswM/lNqRvr4Varx5u4H6M5gQev+3KrOx1oFaXyCeXG1pAAAIABJREFUK7OQ/yhpvMVy3eZALXmNpnA2WVIUJsgEVDY5qSfinWYyufvhV7DzjAlsLqC2O019dDxSk5aUK5N0gl0iy2DCMSE6Jzce7+9+97v41Kc+Zb+fmprC9ddfD13XYRgGTj31VHzmM5/JdIwEkSfiNbz9C1N0kdntuR2WaYiQN+iw6QTdmslmdg8rh3eP4xj0awxVw9s7YgVXcu4dgBknfhMNEXFS0BZYqhAYahnrdlYThXSCfmOMI52g2F4Y7tr9Mqo1HW8rmLbbJqVUfJBM1KKgxWzIq9AltrYn3b59aZEbw/vOO+90vd+8eTO+973vZTYegsgjiXm8u8WqVkDch56Gs0KeaXgYB377s2V8MttAtQQXzsKhfSUNaOieWlrNzBySdrYBtawm8nPV6b0OUv6JWU3g8GDGMcY0qTd0/OTRg7hg6wQ2TRTP2w3xd5LgRC+J4EpunXd5OzGKxHK6SSRMbqQmBEEEk+SlL8i71z3X3fY0dxCMcBXvqOx7JY+3kA/ZcFStLDm+HNIYxvp65EWNMigbHyYlW1se77Yc5sHFf8SsJiql5v2+i+OpjXOSFcaGe3TvJBaXGrjsguLk7RYR02Am1T54uJzxvu2ZT5RUJmuEnG562pkXyPAmiCLhfHwf481kOeVo9X1qEGEneNXPUfnMMlB1Uy6iub5jWNFT8s7jbb6mFTMeLp0gc2noRQ+mij7dAG8V3FGUmvimeMzwBL//8UNYs2oA205and0gCgKXpZKMgPV7MpyGfIonQXdeT7tzq9KGDG+CKBDJBVf6e5e61ush6JCjeLyVNd4SY8IqNGPl8A7zCN8uEJKyN09VagIx5zxze4sD9y83S8wLVQz9NjfV34TisTo6U8Ez+6dxyXkbYk/BlybiBChJ4tJki+cZabyjkUVwajdDhjdBEMGBZF10xW3WtjCNVtcX4duBR4GJIMmD83/L410KuZNTl5qE2EFMGJtVxt02olnw7jZc+u7Os5rEIzUJzwNPHAJjwOvP3dBh792NK7jSrNjaqZbcCuKNQ7oSofc0O0uMbrr25wUyvAlCgsE5jlXraGSQ+90Pp/ET18g456YV6X+Fzdee6IyWXti9P51evdg93pIWNbOfRhTD2+ytk+qVcw0dc2HygCsaQ/b+NYcmnl2iFEWGwXm7NMDU7EYmoM8wqBwt3TDwwBOHce4p4xhbOaCwRn5pPXlI1hKzSrzH0UvrPGu+JxsyGqSRjxcyvAlCQoNzlHUDVSMtf2J4YjO8zdegm1K3XHqdEoJOpDthg/yslI1MkFwYZh7vsDVI4/B4LzR0zDZ0pRtrmDiA1hOFlsZbNkEJ2ocuj7ep9/Zfx/vbsAajtKWQltuT+45jZqGGS84rWJXKDHD+LuKKObEm0FlIJbrJyCepSbyQ4U0QErjwmhdkmSLiatM/qwnrmrQm3GmECVqTMBpvWMF/TGJSKu4rZgYRcs5dObxV10UHwZWcc9R507NcUxlvCC+kODZDzOaiMGkwePsNSjW7jp9XPszuEltRfbph8bPHD2HlUC/OP208RK/5JBWjyyxcFaSvV6UVXJnOtatVbIwgvCHDmyAkOB+R54kkbd/lFFwpS03HXQZb8ERD5pULCq5sW54xW85UCnm/tjTTQWn5vGjw1gm1pFDaOorBak9gBdmIKEWRYaC9MFCwxztGAo5/0OGanq/i8een8PpzN6AnwTLlacGE1yT7iM/jbaYTlDxtItSQxbAQnVH8qwFBJEienbxxeXGWm/7RS2riRDXrhtc+85SaSPqxCKvxdqZLi0LdKgDEGJYUJFVhjCFNsn/FVIkIOIe5zOOtoL2HT3ClKkqaVp8GdcPAl2/fA01jeMP2LpOZpJHVRKbvj4Dt8Y5JM65CN15Hc3wbLCRkeBOEhEJITRJo0wtVgyfvWIGkMo1xlJtzGI83JE8VnBfgsIa31V5UjbflaR8uaagavKMgzbZxmXvCMmDbpCbmq9+kQcxqYrXraxQrpJMJ57lv97jL/hf5zv378Mz+abz3yq1Yt3ooRI/5xVl1NLE+zN9hXFITawIdlwc9bN/dASONd8yQ4U0QElqGd77MzU7S33m3aXo+42muENg5sD2+V/J4e63o4/FuW9yx08NKTeDI2hCFOufQGMNQSQPnHEsBrvMwxpBXOkH7e0eb0r7M7CMywzeq1CSt8/vhZ4/ih794GZdtP4GCKiMSZ3BlZuHxy+mCSoSCDG+CkJBXj7eTuMfmp3/sFo13m+fGYbS6bvYKN02ZIeqr8ZZ6yJufaIxFKq6isehSkwbn6NUYBrSm9nVJj89ECUwnGOB95sJyFp1ITcS2O0UmhTg0uYj/fcczOOWElfgfb9oaU0/5IE07Ms50guDNpzkk746GakAzoQ4Z3gQhwyp1nbMLDhcMxVjaNF/9s5p0h+UtbmunHu9QGm8PDy4iykxgGuxRgyvrRjOTCmMM/Vqwzps7JDpB2FISa10hUFKUoohYk4mwWU38zmUmLuSgZhh4uVJD3ezY77zwolJt4H9+50n092j442vPQW9Pt91em1ufZIBiSxqifq75tud48pJWufg0glCJYtNtVwaCiIXl5PFWnVzkeV+EReZxDasr5ZKiQ37GnQyrv56Id+mowZUG59A5R685gEFNQ93gaPicDDyE8WJlkLDzeItSE0GK0jY+S/4UMquJFTBaEl3lwnaINHjzeOrC9gdORq02Ocf//sEzODpdwYeuOafwxXJkpOrxjsnAtwwcPcXgym6ENN7xQoY3QUjIq+FtjydGr5PqRTVv+yIKrW1t13g7/1d9vBo6q4mHNCW6xzuahtUyUHtNA3XAFJjHKTdx7sM2qYn56rWPrc/bs5r4V66sG80KoLJiRH572HAEgSrjaPCOX+zHo3uP4Z1vPBVnnrQ6TCuFIQ1Prl3wJiZDuRXES1KTqPhHwxBRIMObICTk1fC2YKopzxSwjVGfG1PT+5TXvRECcxPsbbUMQyHbiWpTHWu8zQaiBFZCsfS6DCujieVp72UMJcZQ8XGfhw14Y8LvSJbVxKs3ywCWSk18+qxzjv4eLbS31B6nJTETByrB+urpF4/jO/fvw0Xb1uItF24O1W+hSNFwjSu40oqbMFIcftfZ912S0SpPkOFNEDKcBlmOsIPOkgiuDPguX3siGs79B8k2WZ5wle2VVVYU+wn60Fq/E483IpSNr/Nm1U1LasIYw0BJw5Ju+J/zIYZpZZTgnLcZUpYUxUufbnmgxYBTvycRnHPUDY7+gGI1Mo+5Nd9oPx/EbWpVJmQAJmcr+NJ/Po0T1gzj/b+5rasLtKS1ZTyC7MsLe4IXclIdB917JhCdQoY3QUjIq8dbxTsduk3bkun+W0V7cKVHMJ2Ch18WANYyzNrXlWc1aRLZ8DZfw+q867zp7XYaigMag8G5LUMRCeuFdKU69EoN6DFu2+MteaLgtam6abB7Gd7+ZeTVpSZWK7W6jlu/8xR0w8Cf/Pa56O+TCVy6j0SlJmieFHEZyj5Sf0IR5++Udmc8kOFNEBI8DbKssS+ALLaxqWY1yZnzPyKtoL2mXtiNU8eqlNVEYsx5rSszXPs1htV9PRiMaCEw+1F6eKlJrzB2671n9fiQultbr2u9F7/3k5p43Oj90glaGUn6A7KJSI8Nd78GwQF88+7nsP/IPP7gqrOxbqw7iuT4wdr+SaYXMQ6js9ZabXTz0wiiWJDhTRASWprPjAcikITUJAkvel5p22eixCDEPvCTmsj75m07mTGGlT2lyEZBFI83N73aPW1jscbpsV7IsTGrXLfQvrM/v6wmjLWbXlZsg0wOYxveJbnn2Te40nxVnXDvf3UeDzx5GFe/bgu2n74mYOnuII3LA3Mci1ikJpK4gqTpxutozm6DhYcMb4KQ4aH5zBqnERPXpEDZ4x1Pd9ni8KTKtsnl8Q7YYC8dKoP3zor7nmxrvEOcDLppvPYKXvag3Nphj781OfTyXvtV3bQmNe3pBL1zLNR5M6NJT8DTA1mX1kf2BMZjzNW6jlePl/HEC1M495RxXHPxyb59dSNJ58OOU9bgNHC60B5Oha659ueInqwHQBB5pHX/zdslx5RKxN+kcs7iIuM8mq6sGyFv9pbXVfNYw0vOEL/h3craoEoro4nc4+3dFgdj6r4axhg4Nzwndn6yEcNrUuPjla8Z7ZMJJ37VMg0xm4mD2cUavvrDZ7H/yDym56s49/wNGBnqxf+4+ixoy0hE7AwsTRIjxmsc8/g/DbryzOhGd34GkOFNEBLyZm5bcDQvfizGSYFSaGWXpJRybqvT8JN5vv22N+rj8NgNb2s8IQ6OmMNbbMsza0iUdIIcrckia+/PS0/uNalpBcW2L1/nHCOa98TAWlf2dEA8D5xL/PjhV/D4C5P4jbPWYf3YEMY3rcLE6kGMDPZ69tWtsIQNSsYAbrT66rw9M5YjizzeXWSkLp/w+3Qgw5sgJORV4w3baPR+TB8WVQM+h7siMl4GnLrHu/kqM/P8givjhqF5gw8TXFk3mvppUQnttU+iopkTFM9AyYDgSpkz2R6jMAvQuVw+4x6Pt0zFGqNolBuc48EnD+PcU8bxB1efDQA4UKl11W8hNAlbX61JbTwdaaa8iozGDsjjjbDAkMabICR4eUKzJgm5gu1F97nROdN8FRlnECVDSygvTj6UPd4h+47bCcYYa2qpQxyWhlkqvk0/HZBbO+y5F5jVxKcKpQHelsMbDmNMXK9m7oA+nx3c0sO3f+cVVPn8gVnMLNRwyXkbPNtdTiTu8U7Au9o6JdIxvZnwWnS6ZTvyBBneBCHDR/OZB1SC/5RRMKiSDqhKG8/gSusuHbC5lqEmTSfoIcsJK9VQJWzZ+DrnngGIQedVOKlJc2JjeZFlVT4983h7ZIzx8spbGU38PN621MQjx7qs3Uf3HsOKoV6cf5o7c0l3/RrUSWW7rfMlpuacRbEIIg+Q4U0QElpSk3x5ebmP0dhpm6rLFhlnsSA/jbfXZ612mq+qF1CrJH0St38NTDmrCeccDQ70egzD77wKndVECNZsz2ril05QQWriwMpoIvOS2+t65G6Hs3Kl9QqOal3Hs/un8bpz1qPHUZSni6S7ucO5a+OKW7XaSe+4ddkJ4thxXbZlmUGGN0FIyKuBackV0ja8u+2C2zZ5EXTIQdIav+BKv2OTtce7zpvbJRbPsfDLNBIluBKOAMq2ypWWFEXYx34ZY/w83n0KlprmKTVp4vSGv3J0EbrBccl5J7SPo9t+EIpY156Ueou1lbTGvUxPDSIEZHgTRAC5NMJjvLoraY9Za9ki06Yf5R6fB+xgy1iUpxNsT7KeZFYAL2NShm6OS0wl2GorwHseYgPELCLtUpPmfhJ78yoXD4+UgFZGEz+Zid2nZJLifKrVqmDJ8fKReWxeN4IT1gwHtrusSGnWEVcvlpGTelKTlPtLim7ZjjxBhjdBSOAe/2eNS2oSkwxGpYVuufi6gyv90wn6EdbjzX2W7xSNqUtNLMO7lIbHW8gx3iY18ZjMWZMI+c3JDK50bG/DymiisHNlRXtsQ9whQ3np1QXMV+rYuXWtzyiWH2kEV1rEJTWxzsPlesyI/EGGN0FIcN6bcyTxtnXCsQY7cq7cXp52RSe0pCbt2UyceBqhYTXe4YeojJ9WWsSSfZRCaryjaNRbOcblwXJeem27gIoscNVax/GZV15y6ZikHu/ma8mcmDx/YBY//OV+9GgM554yJh1DtwUbq8LM2IjkOohfT2x7vJerPihGaA/GA+XxJggJefd4e72Po00ZLmlGga++zmPpzIXu9IRDYRP90glKjVfuvXynWNvRLBLi34POm2n6vJZjAbKVcB5vq0/5uq0sI278PN6yypV2RhMFw0q2fQaAuXId+16ZwZG5JfzyFy9jZKQPV5+zHgN9dIt0Mt5XwsRQP2aXGon3FVtWE8XfdFwU+PIopdu2Jw/QVYUgJHDwlkGTQ2vTr3R2WFQ03l5BbUUjqDQ8E/7xll1wO0uGdyPO5f377QRn9hCxKI6IzrmntxumbKXO2/3nUcbfMqzl+8qdk7v1neUhl2q8hfHAzOHdo/lnNLHXd2wf5xy/fnkG9zx+EPpgL8qLNZx24ig+cNVZ2HbyOGY9nyMsV3830Kdp6Csl96C89fuLbw+nfrRSNvSJ4kGGN0F4YFU8y5Ox6cxqEmubIZbtBlo6+eZ7UfcbtD/8isnINd7J7Tln2Xg/oxrm+eyl74awT5yITwTUxsXscclW8yp33wqu9JGaONapG2r6bgA4PruEQ7MV/PDZY3j+4CyOTlewbmIYb7x4C87auAq6xrBxsA8Ng2O2Kje8GciqShoWozQkq+BKgvCCDG+CkMC56XTh+dJ4O4MrgdCyW3mbXMHjbS+QP+9/GJwGsMxAFotteB16rzzTQSShM9XsIMbgY6Nzjn7N22PppfF2fq+KtamGmWNbxPqsvUx789XPr2pXmuQcDc4x6LNNAFCt6fj63b/Gi8fLmFg7ghf3TeHUjavwm689Edu3rcWMbmC4p4S5hq70ey/uLyDfMOE1ljaZ+5UIB/P4n4gOGd4EIYEDKEnD7zJGsK3iGtuy8nibsoegAjr2FzLpiE9AqkxHnHQ6QSikFOScQw/wisvS7SHicXfdsCV9WuPQ2zzezZmgVD8vFMHRzYwmXpU4AeDg5CL++XtP4fDkIq5502k47ZRxnPnmrdBMY32xoQO6YU9gxMmZbLvIAEmWOPdv2h7vJCYPuaErNyp9yPAmCA+80p1lCTdvJHFqrjmCs5p0y/VWFpzqfBUDsXw93mH6TfAkEitEemGYxnfJ52hqVm5tMVAzQnBokKfMkhPowl7mZrl4vwBQa3/aGU08lv3lniP4yg+fQX9vCR9713Zs3rwK07UGuGN5a79ZE4GgCcxob5CSnoiKbbTGeMEZLGlY3dejLEeKjW65aBKxQ4Y3QUhwGmh5KxmPmKUfPIQEIke7IhqO8TO0V6cM4/kPcx9P1uNtemoDDk5QKkEIEw7ZBCWc1ITZAcqyiR1jDCUm83j7y3iccpiGVRBIssIjvz6K/3Xb0zht4yp86JpzsHpFPxYaenN7HBtopxO0Pd7+DCQYXEg0iTMgUmMMK3toshQVkprED11BCELALlmteCNOFy41iDpqcRlpWp0GpfPm3rYLJLIDJ14BgwjQSSdTQKc1Jj+CiufAqcsWPo96nrVyKHt93178x/AoF+9kam4J9YaOutH8nYpm1TMvHceX/vNpnHLCSnzsd7bj/2/vzcPkuMp7/++p6mVmNDOaRSNpJAvJm4xs4QWJa8e2bBAQYmJWY3AcO/n5ub4hQAwJMeCExCbGgQjIhcDPxE8SLvfmd4mdAMbxhh02AyHGC7ZBtrxjS7ZntMymWXp6qTrn90fXqT5dXVVd1V1Vvcz7eR49M+ql6tTpmurveev7vu/wQLY8DrkP5YicEe+qkpN1j5CIg06e904euytdd0CthyLeBOGBW+myViOj027VHZphpXi8q4V35TGnlaLefAgIaMwjbsEY4CjJF2fEm1n75HU+HR5AeNvnfM2NFDlB4Y5AJih7vStMxHt+qYif/moC+xcLmJpdxsv7Z/HfdhyD9Wv6cKQng1et68foYA+ee2kOX751L9YN9+Ej7zkN2UxFltuLaWWfdmlIuwqLkgxKoiNRKBGSWAmQ8CYIB06/bzuKzSi/lwI10OnCL0L18/Wu1+3xeNhygjF6dBhj5e6VdSPe5Z/+yZVqpN/FHhJ2bNZseAtvhiKvXqRwAaSVN5QMjn/74XP48S9fgWEKnHf2Zpx/2gYcGutHLsXw6FNHcOuvZwAAq3pS4ALo70njo+87Hf296apt24moymPyswxyPhDJ0A2Xm244BiIeSHgThAP5pVvxzrZ0OFXYIkH+P4ptBhDWUe6vlbgdq3CJSDP1SRcaLicY09exWyt0J6YQVXdL3PC6k9JoxL6SLOf+To1ZtfKVZE4uhF1xZGY+jxu/8zhemJzH+advwJt2boI+kAEDsGvrGF7Kl7AKwNHZZRw4tIgDhxZQNAXe9hubbXtJ1XhsW0611URjftF+Imlo+tsH5vM/ojFIeBOEB5WqJm0oNyOMxq+kBjpu/l0hnM+4v95+TFb98NgHi1C4BsXNK+3EtKLdfom0FQ90NY0uPuXfkF/EG0JUdd2UFWOePjCLv7/tcRQMjg+9azt2nLQWAHCoUIKQiZVCoDeTwpoNq3H8htUAgLGxARw5suA+HrmYrjq2cvKn27GTzEiWegu1zqIbjqGa7jui1kDCmyAcqG3F1ZrB7YAUyVFFoIUQgUJ8tixpp/B/IyiCWZ1DgUp9b9SZDecdkUC7lb/E9M0VNOLt5+9GgBKaDUe8PZ6XYluOTQgBk3Pc/9RhfOd7z2LNUC8+9u7XYOOaVVXb5AIoWYNMh7j14JY8KoV+pUY4hbxbR3neO7nqQ9A8EWLlQsKbIByo0Um36GUrqQjvaIRw2EhsG01FQ5THb4lrxlwedXt9NVK0eWlY36om4YYbGObSiMaJKeq3VpfnldOTXmnjHnJcdRYyciEgx35wNoefPXMEv3riEHacNIbfe8ur0ddT/TVVLlHIYVim9lSI6KhbsyHVfiT/3jv9PO9USLS2H3HZ41YyJLwJwkG9znUtRxlU0xHv2k3W22XH4zwW4ZIs6aflpCZtpIFOXPPolqToxBRATx3l7HUnRdpYwn4J1ysnaBgcc0tFTLx0FC9NzOM/H5/EtlPW4127jsVvbF3rOUZhNc/RGbMrlQShOqpdhgsgpQhvDnVB201nfufQ0bPeFTYZIk5IeBOEB8624u2AHfGOuOJK3e+KNq7wEgbXcoI+B+Xq8ZYRcr/Oih7bic/j7W814UJUl8nzwKuOd8MRb/tn9RuPLhVx+89ewM/2TuLkU8fxyktHMX1kCWe8egxvOGMDXjXY67tNIQCDi1A2E4mzAoxaGlJz/L2ThEqWise7xQMhXKGPJRpIeBOEA6dIahexqfqxo7oABo3EdtUF1yXhz+nq9TteXm/OXJ6I+xySHSJrWr1bBCklCGcdbwXeQJQfDgsHAOTyBr7/8Ev47oMHUCpx7DptHFtPGsMFp2/EluE+GAw4XCj57keNeK/SwruBnYtp9W4HA7NsNl11xnccnWxvILsMUQ8S3kTbYAqBiXwJazMpZFvYlrkqudL+Im4fqpIrmxxa2Le310yER7hE1fyOye2zt5Mr/ewobo8oyZtRY3evVBIWVYJ0rbRxWDGgNpkJOf6FpRKemDiKl/bP4rkXZjA9XwAA7DhpDBedfzzWj/ThlXwRWU1DOqWhZPKq4/EYnm19cWsVXw9nBRi1NKQU5Z1+nncsdargEMnDPP9DNAoJb6JtMK3b4SUhUFuBNzmct5rb5UvYza7QvMfbO7FQxX6+XSajQYSoLXEnhZaqKf0ibhW/szvM2pEafY47hmqX4hPuUW1Tvq7OIGSdb7eId5ilsGFy3PvgAdz/7BGs3zCIwnIJJ24awnmjq7D92BEcOz5ov1YVwvKnX8UY9Zl6yaKu71ci3s7SkFqAJFUiPrrKatINx4AuOo42goQ30TbI29mt/t6zBS6rvS3dStSSdFHbYOpFMtvNdhMlQnhXanf3eJfxFoe1j/t1uowCu/mMh8QPE/F284tzIQL7u595aQ7/+7tP4eBMDufs2IizdxyDVw30YFXKLRZf3TbetrT4RbyV4wtT0USiimvnZ8k6Ibl6BdDJ897JY69HNx9bkpDwJtqGShSqxQOxYG0W8ZYwKbRY86NbieUEXRP+HA1xfD3e8jUeLwoux6PDrUyeiinKVpcgUWu3xSYPWLf88Rem8eVv7cXIQBZ/fPFpOG7zEKaLhu/CTq3IwqUlp874YP0NpBqYVMYYuJD7q91mlb2IlEaidJM/uhuOAV10HO0ECW+ibRCOny0bh1I6jfmImaRxDiOKRUH4RU6bTEbDCMCqYFFjNVG/Ynz830HLCQokd6dAltTz6l5pirIdJYhH2y2vwcvCorLvxRl85dt7sWG0D1f/zhno700jZ3m2/d6qto2XlpYg3TVTdbpweu5P+Qydn6X8e+/0s7zT6Y7OlQThTic3iCK6DPllz1v8tVfr8W6Pr2FnBRI3L27obSrb8qNS/7h9MIXAZL6EUoiVUdDkSv+It/A1obo94/SQR42aXOlGkFKCErcFnUyu9OLJ/bP48rd+hXXDvfjTS05Hf2/a3hbqzKfaNp4HKFkon27E3w1Hl09naUgqJ9ge0LwT3QxFvIm2wRYNbaLuGHO59dwGRPmlpPrZO40SFyhyjiLnSGvu/uEguCVXqs/VPFYvKusi6GOPeFs/va0m9SPW9rZc2s/7JVc+9NRhfO2ufVgz1IurLzkDA30Z+7kejWEonULWR02rbeO5EHUtLXbEu4GKJlAi+jLCjprFbLsstVce3WA16YZjUAlqwSOC0xbCe/fu3chkMshmy7Usrr76auzatQuPPfYYrr32WhQKBWzcuBGf//znMTo62urhBqbIeduJtnZGTpV//70ExmH9ZBFFlaPCKQei9J8HuaC201xAOU/CnC/VNZsrjznxq+JSr0KJ63NCxFqbWPr+ve4WmUIgE7DmtZu9yi0SXSiZuOUHz+LHj03guA2DuOrdr8Hgqkz1thjD6rT/oki3bTIBI97W6xuOeFs/1bKBlUo35V/a6TxfiXRiIIAggtIWwhsAvvzlL2Pr1q32/4UQ+NjHPobPfvaz2LlzJ7761a/iC1/4Aj772c+2dJxBMYXAZMFAf9Fo9VA6hvbxeJd/tm1ypfWtFEXFFdXP3mnY1qQQkyA8IlJeYtpt00HEofO9cZcTZFbipNtcCCFgBiglqG5LiMpyRkaH1Uj0xNQS/v62x/HK1BIuOOtVeNeu45BqsPa+9KebVgS6XsJkqrzKQKbBiLdtyxG1pSErlp12+6tfGaSspPGgtiiC6EQOJ/ugAAAgAElEQVTa1uO9d+9eZLNZ7Ny5EwBwySWX4J577mn1sALDUFZwRrtk5nUA8suu1TNW5fFsx3KCdR5rdptetNsipBLxbmJUVrMYZ7k/v699USd67RYtj1t4w8MiAlnDO4TH2yngne3ic3kDf/uvj2E+V8RH33saLn79CQ2LbigLAlPI9u3+48xoGjb1pANH8J3IrXPlHpJaThAhOroS0ZLVy59tI2UiiXigTyJ62ibiffXVV0MIgR07duCjH/0oJicnsWHDBvv5kZERcM4xNzeHoaGhlo41CPbFne5ZBqZSaaA9kivViLdXK+5WDKwSsWVN3xPvZI+3FIehI95K6bh6i4moIt7lHQYfZyM4OzJKQnWtdFlgOX3Q//aj5zC3WMAnL9+J4zYMum0i5LjLn4UJEbhRTz1xHuS9wq2coPX/VtvdVjLNfLbtgPNc6ia68ZhaQVsI72984xsYHx9HsVjEX//1X+P666/Hm9/85kj3MTraH+n2gjAzvQgugHVjA4nvuxMpLeRhFkpYldYxtrov0HvGYphbsVSAsVzE2jUD0HIFlHJFjI32t1x4L5UMzB9dxuhgL1ZlUliYW0Ja0zA22NvwNvXlIvJLBYyNrELKJYKozu/R2SX0pnSMDfQ0vL8oEUsFFJeL6M+kAs/B1NQChnozGFtVzieZmV7EYE8KqaKJtF49l9NTCxjqy2Csr7qPar15zxRKWF7IY3R4FbJWJHhpLgfGUHVeR33uLs8vw+QCY0PVfzsLRQPz88tYu7oPvXX81gDAlwoQ+SLGRsvjWzZMzM0xrBnsxfMvzuAnv5zAu19/As48bWNkYz86s4i+dApmoYQRlzkPi9/c5komFo/mMDTYi7zBUcgVsHa0Hxpj6C0ayM0vYyCTgigaGLMeJ6qJ47rbLXDr+2N4oAers+nQ72+3uc0USsgt5AGgLb4Hm6Ud5rcthPf4+DgAIJPJ4NJLL8UHPvAB/N7v/R4mJibs18zMzIAx1nC0e3p6ETxh28fychGD2RSOHFlIdL+dylzRQM4wYeoajhTNuq8fGxuIZW7nSgZyBseRIwuYL5nIlQwc5vVvgcfNssmRK5QwY3DkdA1L+RIYA3oKjecRyOObdjk+5/wu5osoahoy+VJTxwHrTpChRGcZWOi6zLPW+cLzJWQDzIEQAkvLRaSKBliuCADILReBfAl5zpHWNBxRtpNbLmK2aABLxartzOeLyDheq7JkmMgVDUyZwvYhz+dL0Bns8zqOc3epaKDAOY6Uqv92FqzxzJkCiwFC9fMlA4slE4fN8l2evHXeHcwV8aWbH8W6kT785o6NkY5/OV9CnhVRMDkyJbNmzsNQb26LnCOXL2HKKFfEyRkcU3yh6lh5voS8yTHF2+BOV5sR13W3WzhaMpArmZgtmSh6dGv1oh3nVl7PAOBIh/89JDW/msZ8g70t93jncjksLJQnQgiBu+++G9u2bcP27duRz+fx8MMPAwBuueUWXHDBBS0ebTg0RlaTMEiLSaunrKryhU+t51bBan5pnDCdK2XeQhTMlExM5kv2v4l8EYWQC2P56rDraac/W4T0YIs6dojKF1OyZ41XcqVsjx64nCAqVgwotou779+Pmfk8/vtbtyETIHIeBp3Brsce95dSpXKJqCkNaf+9t9MfPEG0kE4W2u1KyyPe09PTuOqqq2CaJjjnOP7443HddddB0zR87nOfw3XXXVdVTrCTYGCJR9k7mUpVk9Z7vF1LzrWHxdseRxTJjmHmmiG6BjqmEEhbNZ4NITBbNFD/Hkc1clEbNLnStVGOT/KsV/nERk6FcgOaeCUls5IrnfkIpijfzQj6BepcbE5O57B38ih+8tgE3rRzE044ZnXkY9eVNu5x31lSmw05P0spyqmqCdEolcBI9wlWEuHR0HLhvWnTJtx2222uz732ta/FHXfckfiYoqIc8W71KDoHO7my1QNRaE3s0h91UdDs+SVQ/oJI+oIqRFls9ekaSlxgtoGkWvlqM+QcOKuXCMddDt99Wh0W/RwbbudMEus23Uq2de7LDFHRBNZ7CyUTj758BD95bAKHFgt41eZhnH/aOC4677hYxq7OZ9wRb7uOt0vdcLXGN0E0Q9dJVBLdkdFy4d3NaEpFAaI+dhvnVltN1GuMXQGh9SHvijCtjKPpqQooOBFRhF3ClYuP5oiwhtkGlDrT9RYPzjsGULoYuuF2vAIhmuE43pxEOUHILpPKzoJ2rZyZz+PO/3oRL88to3+0D089cQgZjeEt5x2HE44bwQkDPbEt0NSFQYPluQPDrIUmh6gpDcmUOQRF+AiCiAES3jHCmHt5L8IdaXto9Yy5Wk1aOB4nlTUBA0Rzhc9CLSci1CCq9cJZOzkozlrT9VzHfn72oPPg7HTohmvEO4ETSIpE7jiaIF0rH9h3CP/fvU/DMDlOf/VanLBlGL958jocv24Qi5xjweSxilD1s0uimRPziHhXlRMk0U00RHedN87vQqJ5SHjHiFeyE+FOu1hN1ChYo6IwDpzCMRqPd2si3mqCYqOLm7J4L0eseZCorktTFOnxdpsHN/+3s6Z18LEmYTUpo15z6nWtzOUNfON7T+P+Jw7h+A2D+B9vOxmDgz04VChhXTaNtK6BF3ns9o8kI95yH1yeh07hzcqWHYp2E41AQpWoBwnvGNEYYAS8DU6Etw4kQTtFvF2TKyPweIea5ogmouqugnLrPwxSbBuiNsrrtU8nzGVe/bch7DF74enxjvl0VpMGJX5dK18+vIj/99a9mDqaxzvOPRYXnr0ZuqahwHnVdrgQsYthNaEyiVJbstkQh0BKSXoN0lSJIAiiGUh4x4isAtF6d3D7I2RSmBXBbOWcVXc3rDzWLkQ5L2GOi0XY0c8pRMMuImSSY4YxGFbHw6A4kyu58P4brfF4Ww/4isMWnbh2RQ5lIr26Vj745CH8r7ufRG82hU/87hk48Zihmu0Iu2pM/JVGZEQ+TPWVZrDvdLiUhtTkgoUgCNIuMUDCO0bUCFTLC6a3OTJpTWMMZouFbrt6vJ1jiCQyFzRR0GMMje1SJrVVCHss8nzRrWhlkFwKu3SiQ9jJbTlhLo9z9bkgY/Q43jhQkyslzhreywUDd/zsRdzz4AGcsHE1Pviu7Rjqr+4S6azsEcjG0+zYrUV3EjYT2Inv3hajtviDJzqSbhWq3XpcrYCEd4yoZavorPWnkrTGYFqNLVo1Z6rv0xalbWDylkNQC64k7fGOQpFUvOrV1SRCC28AKRd7RT2qPd4MwkpQrV2A1NYtD5NcWft4PCf0Qq6Iu+7fD5ML7HjdMVitNLcxrbtHL7wyj5/9agIPPXUYxRLHG167Eb/zxhOR0mtDAs67PE47RhwwxsriO6E/embVDXcrDSnNJnTJJpqBzh/CCxLeMeJVZYCoRSiRuVIbBZza3WoSifAOrLy9S++F3SdqrCbhti2juilrI0FqebtVNfGzuLhNi4ysBxGIdidWn+01g8k57nt0Arf99NdYLpjQNGCKc2we6cMbtq3DK1NLePLgPGYKBu6//wB6MjrOOnk9dp06juM3ejfBsSt7yGTnOp06o0JnLLFCInbiu8sdH7o7STRFl33Vd9nhtAUkvGNEXsCpskl9ZMRSeklb6fJ2LXXXkpFU4+wyKa0QzSSiho94N4+bENVCW03Kr5ae4EBWE5eXsAbtI/4RbxcrS9AdBOTAoQV87a4n8dLhRWzbPIxL33QishkdP35uCr98bgq3/+A5AMCxx43ghM3D+O+/vQ07T1qLbKZ+q/fKOa96vKMbuxdDEbeh90NTFlE1EW9SGgRRgVX9ICKAhHeMNNoYZCUiv+R1+/+tHItHy/h2QEk+i+K2vBDBhUYUVVQA9wRF2e7cjSLnSDuS7tSyfrrPe93w8pY756HZcoJunvxmEULgew+/jG/d9xxW9abxwXdux46Txuy5OfuU9di+eRhP7DuMLesHsG7jIHRdw/qedOB9yCozQvGnawl87fa62F7iwpnYW/Wcx+MEEQQ6b4h6kPCOEbcqA4Q7thhrA0u12j68vSLetaLR7fGwJP1F4VaSj3ncGSpyjsl8uaZ0j5LhV7lDUikNV3+/Xk8E/3QFyisVvzlzPuf05jfK/FIRX7vrSez99TROP2ENrnjrqzHQl6l6jcaA1QNZvNtq7T6RLzaUGCkXWeo8dxPqQsJ5t6gdypgSnU+3nEZdchhtBQnvGHGrq0u4U2s1aSMYq7F5tASPEGozIxMIXtUkqmIPrl5rJclRRYpxwyGOpX9aA7OboQTerxrtrBP5dCsnyOqJM8fnEsWcLS6X8Nn/+wvMLBRw2W9uxRvO2Og6Bg1AyVHVpKcB1SznVM5/t/me1alzKydIEAQRFyS8Y6SqqgnhixRSbWc1aaOGGn4R76a2mXBoxi0C7DXHlSYu7o8zJsVm8Flwm0Ov1zk3G6Q0aE3EO8C+/DBMjr+/7XFMz+dx9SVnYOumIc/XaoyBywY4QoB7NM+pB7NrpdfenegGqmxOjue660gJIhq67BLQUmhxHyNStIXtyLcSUcsJQhHirRpLM81d4sJLeDdzeoU5rijKF8Ihmu1te4xFngemY8+qT1xjLFACs/tLmMtv3u8JUwVGOH5rRLwKIfAv33sGT+6fxe//1qt9RTeUSDVcaniHQTb/6t6Id2VSvJIrSWgQjdB9OQLdcyTtQrddT9sOnbG2EG3tjpwiKRLaJeKNCC0WUROV/zzMZTUuq4nmYcnyjnjLsn4IbDWRf4jMS2wHUFo8QKKhc0HUzJx9/xcv477HJvDWszbjnNeM1329ZpVlFELYXStTjUS8rUVW93q8K9SWE+yygyWIJqC/hugh4R0zmsbI4x2ASmTNSkht0TiEEDUdj6KK9DaLV7S1Gf956HKClqhrBuEmgO0W3u6RbWfyZLkaC7Mar5TFZr0ES7dnQ3u8G4h4N5pc+V+PT+KWHzyLM05cg3eff1yg96h5JV7t4gNtx/qbVL303YS6kKhtoEMQBBEf5PGOGY1RVZMgVOoyW/9v0ZS5Jv612PoiqcmtjOBeeBghGZUkcU1yBHP90OUjzoWY6rVWxWaQSEJQj3d5NVC95yDt051PS/95OsTnde+DB/CvP3wO2zYP4w/edoptwaqH2jvAaMpqUv6btMsndpka9bzr0YXHSiQLnT5EPUh4x4zGattOE7XIJL+KfaK1s1YtztrkMxTCVTQ2nVwZ8LXq/pr5cvGq4w1LPKttVKSlxNmZUm3qUhGb/kXJ7U14RLmDRbyDV4GRlHi5wVEQASyEwLfuex7ffeAAdr56Lf7HhScjnQp+Y1IKdI6y1URjLLBoV2FWl1I5/912a9SOcruUhpTHSgKKaI7uOIO6z7Peekh4x4weMPFrpcOtttRq845W4FVyrh0+QuHiR0UTdwfcbDV+RHXh9bqrADeh62k1qQhgW2zWmQe//Tp/99tG0DsNcvFYEqKmAZAbnAv8n3uewk9/NYk3vHYjfvdNW6GFNFerEW9TiIai3VDreCuWnm5CFddUx5uIkoymIatrSNFpRHhAwjtmyolf7SDb2hvV8tDKKiKeVpPWDMeXZiPeocvcRZT4KpvQqFSV3lSe4spPIYQtiqoi3qz6tT47Vg+jLm7noVwg+r6PsarjK3FRtysj5wL/6+4n8V+PH8Tbzt6Cd+46tiEBaM+PEDBFY/5uKAmroguj3ZJyfoDL4y6/EURQ0hrD+mzwTrFtD6v5hWgSEt4xo1FVk0AIxUbRSqHr9lmxANHUJKgpc9jkdTCs8I4s4u3ShEb+LhxGFumtF6L6GS5gR5TshNyGkiurEzzrjj3g6+Q5bFrVRfz83Sbn+NpdT+LnTxzCO3cdi7efc2z9HXigK4sQEwJZ1phslnkN3LKrdBvS1uZ2aN13tARBtBMkvGNGY1TVJAiqjaIdrB0rIeJtbyek0mg+4u3dtMSpndX/mkJJvoWAZolKW2wGHFhQe4nzc5dl+sJU+ChZg0p7WEZKBsfX7tqHB588jIvOPw6//RtbAm/bDWdyZV/DVpPyG80uLCUo0Zh7tRZ5vF162AQRCvJ4Rw8J75jRWeULm7yD3nDHF16rqohIX64zGuvWzjxpvNzYjc5V2DJ39uuazK7kPiXc3KqXVH6v7Fi1fDCUVw/1LF31EnbrHZJb4x+/bQmhVDRxUa/7Dy7ga3ftw8tHlnDx64/HBWdtrr/hevu1/Ngly7/fSA1vKJ9PvWh9J8PAPCLe3Xm8BEG0ByS8Y0ZTPKl63VevXIRQhXfrkytV2uVrWJ0jRCAQws5xVBF214i3h3+8fMzMan/u2IY8XyyvbqDkSkeioCOtrmZM1RHv8s8w5o0SL1s11L99w+S4+d6n8K/ffwb9vWl85D2n4rQT1oTYqj+aEmlv1OMt32UKINOlIe+M5uHx7s7DJQiiTSDhHTNqxYVGKwysBAQEmGUdaKXVxC0K3MpkTz+aT66UjWzCvq851IokEvl/Z/Seoxy1LSoNctwsH0G7V3oJfrfnnPCAr5OvEbKiiVYR+ybn+Ltv/hJPvDiLs05eh0vfvBX9vdEmYmmsEmlvpqoJrLnu1uTKNRn3rz8qJ0gQFejvIHpIeMeM6kmN8xTmQuBI0cBwWkdG67yvSqd1oNU6tx093jVnUMgqIwuGCUMIDKer/+wDW6CYKvUbP5fdEhQ15bmq1wpAFgQxrcfcLB8aWFVypWn9PaxJp5DSmL0tL2+583e4fO5uNqR6lLhAn1LR5F9/+ByeeHEWH3zPadh5wmjg7YRBYwyCl2ep4Yg3q17UrCSoiANB1EJ/DtHReQqtw9CViHecGALImxyFdii/0QCqlGtlhNmtjjfaINkTLmX4wka8cyZHzqzEhVtW1SSM1QQo+5RZRVjLU1y9eJU7xFb+v2xyFEyOQt0TqU7I24que+3Xc6sMMKwovfR3/2zvJL7/8Mt4885NuKDJJEo/7PE5LC4NbaML28XXoxvrlhNE09CfRGSQ8I4Z1eMdJzIa146WiCA4Pbut6lzpXseb2faGVuMaoQ04rHJTFbVEX2P79ntbkfO68+QXea7ycQsBbp0XqodbnhtaVVSWwVRGJheg6lj8qqk4f4eLh14K/6Dl9aTPOsMYnp84iv9zz9PYtnkY7919fKD3N4ocnx4yOq9SdTdhBX7hrsBDJghX6G8heshqEjPySzBu0SZFSetrb4THrvoiywm2tI537Z6jMVg0j3NoYSPepjXPwjHHoT3eHjssco7JfAlrs2n0+piLVT+/hCnPOXemgUFjitXExYfvTK6UwtsrIdPeb4CDl/Ml/7bq+aY5F5g5msfccgn5goGfHjiKh586jKH+DD7wzu3QY7aCya03ajOBy9yuNBgJDoIgYoKEd8zI79j4I97yZ+ujsmGRI67yeLf4MNRop5cNohW4uiECjkyeg7IetqutJuS+VQwht1+/kU2t1YRZdzpqx8tYWUTayZXW45ojKiu7WwoAJcvjXK/EYD2Pt4rpYzXhXODBJw/hl89P44kXZnDMscPo6U3DMEy88PQU1g734ooLtkWeSOmGnJdmkrmrz/+VJ0FX4CEThC/0JxEdJLxjJimPN2/QPtAOOJPltBZG7t2tJtXPcSEwWzKxOq03XCe50bFVjcvyPgf5yLlilTGFQFop2RhVVRMpuOt9dl5t150LLrV8n6YIX3muq+JQByv7sZVoN9wi3i77dPtdfUD93N38v1wIfP27T+Jnew9icFUGrzluFCdvX4vh1T1Y3ZPGxgtOrjMj0SI92c2cm1WLmigG1WGsNF87QRDJQcI7ZhhgiaN4FbEtDmLdSzzUlPBrZTlBOQSXnDthKbciF1g0TGQ1hv5Ua6uzB5UHqgCVlg24CFjffdV5mRFw8edl2XFajNTyfRpjKFpRbK+IN6zjLHAOyNreVTsOZxZyvpK7RJG5EPjne57Cz/YexNvP2YK3n3ssNMYwmS+hyDl6W3B+VCLeEVlNVqAG7U+R9CYIKAEeIjpIeMdM0OYezWInnnVgyNuZLMdk9NLR7ZMLETixrfGx1OKMeJccVS6SwtWjHPAuh2q5cFo2GhmHG3ZE2mfLtsfc5WOsaVijlO/TmbBFtHtVE5nELFDgAhnrTkDg5EqfShZqxFuVY0II/N97n8ZPfjmJC8/egnece2zNNrxaxceJnVzZlNVE2d4KlKADLV5QEwTRvazEu4iJE7S5RzNIkdINEW+3ms6mEHg5X8KyGfMRuiTuVcRU+UlDJu4lHJf3WhQEGYUp1N+rhXdYj3ddq0m9Abk00IFLLe5qq0m5sgwXwp53twRAU5StJlmNBfq7k9Ecrwh8eSDlH2qr++WCga/d9STue2wCbz1rM961q1p0y19b0W5dCu5mrCaqpWYlRrwJgqhAycbRQhHvBJDl6OKkUmqt83B2BHRLZjSsCLgR8zz6erytJ1sR8RZCWIK1Maqby1jbtP4f2nzh8RkE8Xj7iX1nxFv1/qtWEuFSa1k+X7DKGWY1BsMUVQsOz0i7z3jhiHinNQ2P/3oa//uepzC7UMDbz3GPdEtaEfHOMIb12XTTrd6Zx10CgiAIonFIeCdAMhHvMp3YP8fp2XV6qqEcV72KGc2PpXb7XlYTs+aV8VPTat0hVgsmBwfQq1ffzKqUwmOVOXSJ7vvv2x9b0Pt8Rn5t153Re7kdDcyO4nII1+RMaYdYtgaR1TTkuAAX1Q2DfCPbPo+XDBMHZ5fxzK+n8R8/fRHjo33488t34PgNqz3fqzPWlM+6URhjyDbjM5HbsWwrK7GqCUEQRFyQ8E6ARDze1s+OLCfoSPJzszQ4uxbGNhaXaKoagS9H3avHlARee3KK1TmrLfxGPVP1OimK0xqruTsSVni7jYVbNhDU+YxqEmkd23dWIZGPq1YS7hK5lou2IueW4C2/p+oj8lDe6hxOHV3Gzd9/FiWTY3B1D0bXD+Dbz0zh6f2zOOXU9Th8aBFvPWsz3nHuFqR9fMA9moaM1nl/iyosqI+JIIiuhpbe0ULCOwEYY1WRtzhotBthO+AsJyhFr1CUEne8NjZ8uioKWau6RcmVcL0AVtuYDFFtr5DIxFSdMRSErA4S8gBcLEASe5+M+SdXKgmTNZtnDEL5O6mymijHIRxJjnJo5feXbSaMsara3rJGuGsZQ0tg5osGvvytvThydBkbRvvK7eZ7U8iXTLzhtRtx7NYxHHtOL9auyvrPE4DV6c5PztPAIOgblyAIIlJIeCdAkhHvjkyutH4yx091ESGFXfxWk1pkJF4IgZL1WErzF5ixjculqon9Gkt0yyREtQKMTAzUHR5vt7rUXvi9Sn4uKVYn4u2zLa3GalJ5XK7KuCPJ0R6bVT3ItGwmUBIyBWrvDLiN62t3PYlXphbxJxefhu3HjSJnchwplDB+bhoMDBP5IvoyK+eSqbWwrCdBEO0FrcGjg6qaJEASd2zVcoKdVlKwSmApf+C86jUJWk0cj6kR7xKveIgTTa50jMUemyKOTGWenFFvbrder1QHafSkdHubTHpNM+afXOn4rFWcVhOOSsMa1WoiXO5KQBHj0t+s1UTo3ZNTGYB9L8zgF08fwcWvPwHbjxu1H5fvl4usuMtZthNDaR0jXRC5JwiiOVbOVS8ZVk74poVorHKLPK5EJVElLzrrD4VDVNVRdqtqYqqvjRG3yhfqeAwhoDOGlNXCPM7PtGZgHosCW3grqlV2p6z8v1xmTubcmSL8eeIsr6eiesiXTe45L2GqmqgCu2wdYXY5wTSrle7lRMByVQ9AtaeUj1tYO+FCYN8LM5hbLKJQMrGcZnhy/yzO3r4eb/lvm2rHrKxRVlKkQt45IAiCoCY60UHCOwGkHzVOQcylYhTlRiOd9JUpHFUqKlaTigzjHpHcyMfi8phqfSkJgZSmVtko2zfixvewrSfVUotOS44Uq7LKhilEw+eju8e74iGHz7z4WU2YcsdGerLV11U82+7vzzAGTYOygJNjUbYkBG7+/rP4wS9ett934klj2LCmD5f+xpbqpFpl2/L8W0kRb4IgCAld+aKDhHcCMEWkxSWIhSV0zPCdsVuOc7hMWahI7KotcUeZXZq7VCVXcoFeXauJpiaFW8Rbzo2hTJiz1KEswacuGNyi+0H27SW8dVYbZXbirGBTvf3qBarTyy0b7Lh5vAFgJJOqWqypY4H1d/HgU4fwg1+8jDftPAZv3rkJ2YyOeQhoGvOsUiKqyjG6voQgCKJroXhDtJDwToAqARDDCSzFqK5pViSzs5S3UwC6WU24Q1TGdeL6zZwpBEwhkLJsD7AjofHPtW0lYrWLAjk1hhV1Fo6It/R068q4ZcS70dE4KVtZKtv3OgedFWyqjsXxuQvHIkhnsJNHvdqYuzXVkft85uU5PLDvMM59zTh+540n2q9dLpRc/frqHuSdlk66k0QQBEG0HyS8E6CeGGkWZzSu05rocIeQcousyioddr3o2Lzybh7vsv9cNs5JW+3I0YImOjVHrZTgM6xFAUd1SUE5RlnVBLKJjkfrds99W+3VXSPeEMgyrSbK7KReVRMod2xqIt6MocC5PGxfDs3k8ODTh5Fa3YPc3DKOzuVxlAEnbBjE7553XJVAH0zp7osQZSEgzz9qJkMQxEpjKKUjRde+yCDhnQB2lY6YBLEMbuq2wO8snGK3SoBZcKtiRlGI2BcWXv7jIq9U7rDHlVAFGc+qJsrvphBIMwZDsKqIt+1PVsvuNZBc6bZ/KE2F+lhtlLn2td7bcS64hHBaTaqPxW0cTx2Yw/ceegm/fG4Kus5w2ms34uDEPGamcjj/vC14wynj0B1Jg84un27H6mVvIQiC6HZW+TQLI8JDwjsB6omRZpGVPioR786S3l4BbGmGkNVDUrqGIo83yuw1FibtG4yVa1Vbjyd1d8HrI2Wy6oYlfnt1BsFFld+bOxZmsm18WI+3uj8Vbg0wxZgdQfc6B+0GOm7bdtwZEhBgSvUSNZrRsC8AACAASURBVLFRvtYwOZ55aQ6/fG4av3x+Codnl9Hfm8bbztmC15++AUdTGlanNAylUziwXESqAZO2cLkrQxAEQRCNQMI7AeyqJjEJYrnVTo14c8eJKK0dQnkeANIJLCxEne6GKWZFjS0RnmQTHfiUE5QdGlOMgTPYlgw4rCawFmjcY3uNIKup6IzVj3j7NO1x3hnijmo3qmbmpom7HjyAu39+AMsFAyldw6s3D+GtZ23GWSevQ8aqP72wXKwkV4Y+3kolFIp4EwRBEFFAwjsBWNwRb0dEs9O6VwoIaI66zGpkVQrtlJ3QGNF+hcBsycRguuJf8xJn8jFpM5GWjcQi3nWeV8WvzkRVjXFnKTzd8kprYI1FvB2Pmfb5V1tJpOY4PEoBws1q4iwnaD02OZ3D137wHF4+uIDTT1iDXaeN4+TNI8hmam+Hqm3jPW8b1BkPrLsdGaprTRAEQTQJCe8EqCdGmqUS8bb+32Ehbzcxpgo8dWGhCslmKQqBBcNERmPotzxsAgJwac7CrBGlHVUzkkqu9Go8I5vOGFwuTgDTrlxS3cJdylLZNl5j0US8TUX0V+5WeFlNvPepdpoUVpT54EwOtz/6CmbmCygKgeH1AyiUTKR1hj+95HScsmXEd2wM1edLmGRSdczO6DtBEARBNAIJ7wSoJ0aapZJwZgnTFphN7PrMDWQ+u9k71C6GMoKvWZ0XoxK7MlJrOIS8X8Q7pfgNdES3CAiK1wJFerpTjMGwK64IpKwxqvYOu208A1JhzRfMLeJdtt1IYe93J4D7+MqZYsl65uV5PHrwKH75xCHMzy5j/UgfRoZ6sW6kFyMDPThz9wnI6vUTfjRrvI18SmoEnlMNb4IgCCICSHgnRJy2BLlZGcFsRcR7rmQizwXGe9Kh3icjmzWRXDBbzKsLCy1CsSsFt1p6z8sKIcdXE/FOLLnSa0flbqWyhrdmWU2gHJfpEI3yd0MA2QiMy6YoR9Erwt6vqkm5hKFhcrxyZAkT00uYnF7CwZll5AoG1hwziP0vzOLlyXnsfN0xeP3pG3HetnXozaZgcIFX8kUAQCqg7UOt4IKwyaRqeU5KriQIgiAigIR3QjAfMdIsUtCzOqInTkpCoMi5Xe84FB7dImutJtbxRSR2TVt4K81mQkS8NcZQEsnOtldXTVnDG4rX31QWLqpolM83skJjLosA02rOI/FbHHEh8MLEPP7nPU9jZr5Qfj1jGBvqwfBAFqt60jjxVUM49+R12LJ1Ddb1pNFr2YCcpQWD4PyMGtDdikc+xJsJgiAIwgUS3gmhKRHcqOEo3+pnHsIoCWR0tcgFekLck/fqZFhtNakcn8YYihGJXTsirEa8fcQZU+wUiPguhhACCwZHf0pzXbj4xLsBACUu7Oh1VZMc6U92iXgjpBCV73COxbAi3o//ehqPPTeFvpFe6LqG4tE8+rIpDPVnMTSQhWkK/PzFaUzN5THQm8F7Xn88No31Y91IH1K6Bi4EXlouYiidQq+uYTJfrE6uVOwyQS1N8jNq5mOqVIUh5U0QBEE0BwnvhIgzEi1EpTkKcxFGSSBFXlEI9IR4n1wjOCOYmiKIuXJ8eoRi1y3iDbir0R5Ng86EoyU5q6oe0gxzhon5kgmNpexETzdqPN7Szy0EUlZSqJynitVEIONVDzvkOKWV6ehSEUcXCzi6VMSBXBGPP3METz8zhWxax7HHjyCT1bHv8UMwHF6c008fx3mnjePs49fUCNmKp1pU6n07XhM2wdG+0+TTuMfvWKGcH5RcSRAEQTRLy4X37OwsPv7xj+PAgQPIZDLYvHkzrr/+eoyMjOCkk07C1q1boVl+zs997nM46aSTWj3khmCIzw+s1hhuhdVE+rShdHcMil89aTW50nl8UUT1VeEtt1f2INcymK4Vw3bDIqViSCPkTY55g1tjcX9NEI+y7rDBqBFvdXzq70HWCz/bO4mHnjqMucUChtf3Yyln4Llnp6z3M5yxYwPGBnvwpndtx6nHr8E851g2OY654GQUiibmlgqYWyggVzAwumEQvR5RfbV+u9eCTA+ZPKxZHnj5nsaEt7UtingTBEEQTdJy4c0Yw5VXXokzzzwTALBnzx584QtfwGc+8xkAwC233IJVq1a1eJTNU64kEY8kVsUiS7C2tL1/RQiHFd6ekU1lvlSPssakkGoeQ1Si1qaoCOmgJefUMpGNVrzgQmC6ZFjdMFlNhRVJPasJlDrnkGUFRWVRpFpNmJWEyT0WGfY+hcDt//kCbvvPF7BuuBfrRvqwcU0/erM6ztwygtWrMhjsz4INZjDel7Ej9VqJ259PNqNjXaYP64b7AACv5Iu+dwdkRN3LgqQxACL4ZNsNfZr4m5Aeb6pqQhAEQTRLy4X30NCQLboB4PTTT8fNN9/c0jHFQZxVTbjSHbMVCX8yIpjSGEpChEqw9IpsqtVZVI+yjNY2O5fSIpLVNeRNAVMZc1B9pSlJjOkGK17MlUwYAliXSWG2ZNbaXiQeVolq4V09tiLndjt35+eh2/XQ3cfNhcA//vvjuOM/X8A529fj/3nrq6FrGg4VShAA1mfL1WvyJsehQqkquVJWpHGz4Pg10IHLHQ3nImg4rYfKCZXnlZzXRixBtsc79DsJgiAIopqWC28Vzjluvvlm7N69237s8ssvh2maOO+883DVVVchk8m0dIyNEqUf2IlQIoOtKCcoRU2PpmHRMFESAtmgwtv6WVtOsPKc6lFWxW5zYy7/zGgMedOqCgJlEgNQrz26EyEEcmYlGmwKYMEod87s0TXohmnX4655r9dGq6LcauWSsmC0u0o63qYzoORxqEv5Ev7le8/g/icO4c07N+F9bzyhWrgrg1Gb50jUeXHu1y95FcrnbpfIdDwftnuk/FtrZCnKGLOyfEVVYidBEARBNEpbCe9Pf/rT6Ovrw2WXXQYAuO+++zA+Po7FxUV87GMfw4033og/+ZM/aWjbo6P9EY82OGNjA9CXiygtFTAysipwDeKgzM8uIaNrGBvsBV8qAPkSxhI83vlCCQsLeWwY6MHLC3ms6s9iuCfYAmm+UMLiQh5rhvrQoyQV8qUCRL6IsdEBzE4vYiCbwlh/D5YNE4tzOQwN9ALW3DbCUtHA3DzD+oEeGAt5DK7KYiCbwszMEkYCjr9ocszPLmF1fw+GAtQvXy6ZmDqaq3pspC+NLav7oDEGYzGP+YLh/tnlCijmilg72l8lAHuKBnLzy9AZwzrlffpyEcZSAYMDvTi6wLBmsBf9mcqfe3FhGUcLBoZXZTHaWz7WiSOLuP2nv8b3HzqAQtHEZb/1arz3TVur9rc8vwyTC4wNla0j2nIRuaUC1o/02x7zdL6EwmIeI8OrkNGrz/WpqQUM92YwtirrOkcLc0tIaxpWpXUsLxWwVtluI+RKJpaO5jDQl0E+V8SoYx7qMT29AC7Kgn9sJFrLW6PnLlEfmtt4ofmND5rbeGmH+W0b4b1nzx7s378fN910k51MOT4+DgDo7+/HxRdfjK9//esNb396ehE8afOz9SEfObKAJcNErmjgkMlDR+3qsZAvIqtpOFIwMF8ysGhwHDZ5YhG6BevYlrhAPl/CoYIBI1MI9N5F670zpkBaEViV45jHfL4EFEpILZdgcIFcvoipkomB8SEcObLQ0JjlfhdNgeVCCVNFAwVdRy5fLNs/UvXHz4VAbrmI6aKJkkvypdc+1/ek7ei0VjQxPbUIAFgqmVgolc8RpzVkrmQgVzJxhFffMVk2OXKFEtKahiO8MhdL1uMHiyZyJQNzJseyct4tFg3kDBNHSyb4YgHff/gl3Pz9Z6HrDGeevA5v3rkJO7ZvqJnfhUIJpgCOlMoGjJmigWWTY5ov2OPKWfs+4jjXhRBYWi4iXTTBckXXOVoqlMAAFDQNuZKBad7cHaIi58jlS5gplI93xuBY1oP//S0tFyGEgKFpOGJGZ+GS1wUiemhu44XmNz5obuMlqfnVNOYb7G0L4f3FL34Rjz/+OP7hH/7BtpIcPXoU2WwWPT09MAwD9957L7Zt29bqoTaMFFJxaH9Zbg/SEytE3Vv6Ue8flq0go7FQCZZq100VBiWJUgjotoe9/HyzFWJsC4ZsQ692Nwy4DYay1SNolY2SJSIzHraFlHJszvkQ1r6c72OO90rkMqAkrSCOo9IdfvZNa/vx7vOPw7mvGcfqfvdoNGwrSOV4ZVfMqjKL1k/naRBkfivJldVt7htF5j5In3ZD5ROpeQ5BEAQRES0X3s8++yxuuukmbNmyBZdccgkA4JhjjsGVV16Ja6+9FowxGIaBM844Ax/5yEdaPdyGUUvPRY3q8ZYCIcnYvql4YDMaw4LBA3vZ1a6bKjU1lBX7NQtZUs5vzOUW6+XSe7agDCj2GGOhkmaLQiDtIyalGDY8kjX9RpVi7sK6xMtnnFM4Oit0nPSqYZz0quH6B8GqG0EZXNTs28v7HqQkIrNEcr0kzKBUqpqELydYvR1S3gRBEETztFx4n3jiiXj66addn7vjjjsSH09cRJUU6ERWj5CRPSkPmilxFxZTETgZS5iVhEAmgFgRHvWVK41hyj/tiiMhxa7nmIWw50e3KoDY+w6xHY1Voqn1KHGBHh+bg7PVu4qXEJXzVCu8rX0K2VjJfV+NRIBVDCHQ57BOyb0528Z7LbJUZBlJEbioY4DxMlY5X0JuVL68mTrtBEEQBCGhClkJ4XX7vVmc9Y6lQBUJxry5ELaQk57eoHYToTROUamJeCvPqc1hGsUQFfGZgqx5Xb3vIOhgNQLTDVOUSxamfRYj0i7iVcvba/9grMofD2uOZM1z2fWzel/l9+khI7msqsxjuXRk6Ih3gO07a483ilyomU1HvJsfC0EQBEGQ8E4IacVo1iLhxFkHW414J4WpeMxTlt83sPD2iuRaPw074l15To+gO6epLBZ0K0rfiA9YY8HmumS9KOOj4Jhte6l9zsuzn9IYNmbT6HHZru5YjKmkrfdlm1CUcoFQI/qtn7Ue7/q1tO1ygiKqmLezm2vIhYacw8QyJgiCIIhuhoR3gsgkvijhDjEjP9AkPd5qxFv6vItKC/YFw8RsyaiJDAshYAjhGk1kDtuFKnw0FxtDGKTIlhFmKVDDRJrtsQRcTMkkR7+IN6xFgNs4VB+/k5Tm7huvLCzCvc8PppxbcjHhTOy0W7/XfN6VbXhun1XqeEcVZZYNfert2w+KeBMEQRBR0HKP90pCC2hLCENNxDvGJE73/Qu7soUkwxgWTQ5TCExb5eYAYNkUWJNJIaOxqucGUrUOWq/kSkix20RI34RVKUWJeMNKFETw3MryWALeXZAVTer57lOsItKbRXMsxqJG3o1wWk3kPr2sJn7jkSKZi/pzFRT13GnU007JlQRBEEQUkPBOEC0Ci4QT2+Nt/7Q83gm1r3RrSZ7RGIQhMJEvtxcfzqSQZgxTRQMHCyUMpjQsGuUOjsOZFAZcEg4rwrs2OVAmNDZ6jM5ui2o1EYS2mgTrSCqTTetFmHXGsMxrt9dIeUj5xx2laJQRaVjzpUsvuYOyBccR8Va24YXa4j2qRlNVWwk9FeUYP90aJAiCIKKAhHeCaI7qGVEgtyajekmXE3RrSS59zBoDxjIpO+FyvCeN6aKBoyUTKY1hvfKck0q97trkQF3W+G7wINUa3qhKaiz/DOMtVstE+lW+KHGB3gCNW1KWRcMptMuLjHCq0Y54xxSsNVwSK+19g9VGvO2FjZ/PvfyTRxipVxcGjUa8wyahEgRBEIQbJLwTRI8h6dEpZpJOruSO6DGsyibrsmlktOpoaIoxrM2kkOcCWc09UlrB8ngDSDteporyRnBGvJkVtW3I421v09tLbVc0CaCAVb+5sxxj6Ii39LBHmBjIlBKWhhDI+iycnPkMQauawE6ujIYoBDx5vAmCIIgooDuoCSKtJlHaQJwRbwaryYlPzLvIOXIRtb9282ADQI+uuQprxhh6PZ5TsU9Mh40FSgRTerKFEJg3zMD+eVOIcik95TFdSQYM5fG2O5J671smIdZLrITil/YSrWGIJ+LN7PEYojax0n6VS9Jp0AY6CPC6MKjbadjjHc1QCIIgiBUOfZ8kiKyrHKXZpDa5sn6DmbmSiemiEckCQB5L1Lfi1c05T1Lb3mGNf8nkmC0aWAq4mDBE+e4Dq4rGK/sOMU6vmtUqdkWTIBFvrdpvrhJ2ijMaQ1rTPO08jSCHYIhy4XNvq0nl3JQEaaBT3Xo+mnNK3U4jFm8tgtb1BEEQBAES3skiI6xR2kA4RE0DGlYnQmpYjU+MCMYho8dRn0jVyZSOiDcqEe9ytLsse0sBJ1at4S3RG/QBy7H4RbyLvBy1D9L9UEdZYTttNI0kV+qMYUNP2rd2eFjklko+FU3gkUjs1aW06n3qvmKoahIWRjYTgiAIIkJIeCdIEFuCH4YQmMgXkVciu24NaJhP9RShCO5iBBFv2Twn6ohglfB2PKd6vPNcoMQ5wFjgMnymi0VC9WeHOZJK9N37NSXL3x1kjhhjSDHULoqSLMzuh/SgyxreHqpUs8oCqndVvLqUVm3e505Ho9jbaSByzah5DkEQBBEhJLwTxBaMDb5/wTBR4gJ5pTKKW2ttDbXNSySGqHjMo6iwUm6e0/Rm3PHwKGvWcyYve7t1xrBK1yKLeIcaojUWryY6QgiUuH+reCd6RBHvOKhEvK265B6vc7PgeHUpddu+8/dmkAveRra3Oq1jOB3kXgVBEARB1IeEd4IEsSV4wYXAooulwq21tlpr2Yldq5qxwELVDzOmUmtq7W6n1UT62HMlE3mrAY9sylOv0gm3bDaewjtkVFSORSZDCiGQM7n9GZvWPsPYPdy6V7ZLwFtS4mV/t9dcuVXXCbJ4UM/lqO6iNHORy2oaegKUgSQIgiCIIFA5wQQJYkvwYtEScymlHTs86h1rLlUxJFJs92gMRZdGLWHhQiAdYfKeivSqu21dZ0DOMMEYQ39KQ9E6rhIX0H1C8M4a3hJpPWlkJqSf2eACUyUDBZMjpTGMZVL2/sJEvFMMWLbuTFQ+m9oFVitQkyuzPosJ21alyG2/tvf29mOwmrAmPluCIAiCiBIK5SQIs6J4Zsj4pRACC4aJrK5hla5V2UWs3Mbq/fiUEyxZ5fl6dK0cIW78cABZv7rJbXhRaQpUK5nkY/26Bp0xu2JIPd+6s4a3RG/CjqCj3BhpslBCiQsMpVMQAjhYMHDUKM9wkIom6liEo/qNiDACHAVc+Ntn5IVF/Tjc7s44iaOcYDNWE4IgCIKIEhLeCeJV6m/JMDFbNDzflzM5DC4wmNLLYkcIO5GQu7Sz9isnaFiJfrI5S7EJu4mwbBtRtiR3w+0k1SwhNZAqy37dElj17DNSeDurcWgOe0uo8bFytDvFGNZn01id1jHek0aPxlAwOXTGQtlxKrW8281gUi1evRIr4eXxDiB+q6qaRFZOkCAIgiDaA7KaJIzGWI3He8nkWOYCgy7eY1kuL6Ux9GoMJVF+vsQFMpp7JNSvnGCJC/ToWtlzzMp2k74GQ9YyWh5XcqVmHYmbsB9I6ehflYW5WACsOUhr9SubGB5WE8ZYw8cxkNKR1TQMpjT7s9BZ2WqyaPIGygBWxpqxHguSmJgIjk6kni9zyWdwSwT23IeoXVA2ikyAbaMbBgRBEMQKhYJBCaOz2qomJasZSd6lAUyBCxQ5x2BKL4tLVhaJ0lLBRe2HyDw6ZHLZutxqkZ5mzVU2kaIqroi33KqbIO7VNYz0ZqoeS1sRb7/GQKblm3Ybc6NJor26htVpvXYBxFh5gZAKt7LpmIi3n9Wk0aomyl2H6DpXRl9nniAIgiAagb6PEkZzRAHVutp5F5vEgsmhWeXyYNd5rghM7iJQNLDatoFKRRPpN85oWuDa127YiYoNb8EfeVxBT9KMVr6b4OdbN4TwbHOeDmkJiQtpe1Erm7RLOUEbq964F/Iz4w0cg+b4GQUaa7P5IwiCIFYkJLwTpmw1qfxfRrsZY8hzXhWt5UJg2eRYpWtVEdoqS4UQNQ0+mEu0EUpFEymYMozB4PVL8HnBPRIVo0ImowZNKpTJfl4+byEEClwg61GFZTitYyzTeveVtL2olWnaRXjLMaSYf7Kn/NyqkisRrIIOYxGGuy3KfyPtMIMEQRDESoaEd8JoltVECmwZ7V6lazB4dRv3ZbMsxPscdYQrgrn8/9qIdxmnni5ZJVCkQJW1pRtNsJSR5bhaarOQJ6iM5HsJ7wIvJ4N61WXW2iTiDcvG4azl3Q66sSK86w9Ga8BqAuVzj7KKC6OIN0EQBNEGkPBOGN2ygUhJJUXiQKr8UahdKZesihjOeslSYBas19Z4vK2fzm6KhqiOVKabFd6WkI/rJOrTNfSngm9dVg/xss/krdbyPXGtFCIkbSW+yrsK7RLxlgQS3qwxqwmLoWL5KqsUJ0EQBEG0EvomShi7bbylR0pWJZOMpiGlMSxbCZZcCOR5OdrtjPzJiHWBV7pQVu+j0rBExdm6XGespiGPH0IIhxWm7O+Oq770qpSOoXQ460faagzkRt4UyLZRVNuPPl2DsKxGwrIjtYP0llMXTHgzl1rkwfYR9dpoIKVjkFq/EwRBEC2GhHfCVHf0q9TVBoAeTUPeSprMWYLLLUono9ZSYHpGvB3NS0pC1AimDGOBK5vMlkxMFEqVdugu5Q9bTdqKeLtVdCkIgZ64ah9GTFYrLxCWlEo37TByOYYgDYE06/wWyr8gsWwtxko5BEEQBNFKSHgnjIy5SVFc4hUx3GtFOQu8LLxTGrN92CrM8mnLSHVNAx3rLar0lL5yp2DKaGVvubO2uBvLvNzIZ7Zk2sfQbq6NtFbu+mg6DifPy1Hjnpja20cNYwx9enkhJr307TDVacawNptGb4APfiClw+ACc0alV2uQ2R9OpzAa8k4HQRAEQXQCnaFCugg74m3V1FZbb/dYTW1yVkMdN5uJRApMuNy+l1FFNepr2BVNnMK72rbihSkEDF6OcC8aJpZNDhPtGfGGS+v4ZZODufjl25lVKc2++wG0h/JmjKHX57xU6dU19Kd0zBvcLpUZ5HRJayxQRJ0gCIIgOg0S3glje7xVMWw9qFnCcMHkgEs1ExXVqx2knKBMOEy7CG+NMUwVjSpbgxMpzEczKaQ1DdNFA6ZL855Wk/GobJLnHD1a8NKE7UDG8uAvGeXPpXNGXmE4rSPFgOmiAXToMRAEQRBEVLSbbup6NJSVMbc813CI4R6tXPUkpTFkfESiGhEMUk7Q4MK1LbrOGNZn00gxhqlCCdNFw9V2UuCViPFoRretK+0W8dZcKpuUeDla39thVS2k3aSZ7qKtRmMMo+mUfU6119lCEARBEMnSWUqkC5Dtq7koR6GZowOg9CD72UxgRUMlQcoJlixLi9s20xrD+mwKg2kdi4aJI1Z0UqXABTJWq/WspmG1VeavHXMVMxpD3uR21FuWaOwUf7eKetejDac6ED26hoFUObuhk+44EARBEETUUAZTC9CtJjrgoqYDYFZjGMmkfG0mchvyfU4xY3cNVB4rCYGMj/BkjGE4nYIGhrmSUS49qFW84kUuMKCMaXVKh2b5fduNwZSOI0UDk4USRtI68laiql+L83ZF2k0MLjpWeAPAUFpHSuuMGuoEQRAEERftp5pWAJpiNXEmOzLGMJDS61o4ZGUTr1dpSuUUIcodMdMBNM8qK5KdU/zeRasUXEaNvjKGwQDjbAU9uobxnjSyGsN00UCOl6uZdGK0VdpNOh3NOl+oTCBBEASxkun8b/QORLMa6BgiWD1kL3p07+oPjFXKCS5ZyZrpAFaLFGPI6lpVoqVMrOykiiApxrA2k8Jqq2lKJ4vXVbpuWZI6Z/4JgiAIgqiFrCYtQGcMy9wSw02IqaGUdyc+hnJUXdbd7tE19AUUzqt0DTNFA0UukNEYClxYVo3OEn6MMQylU1idEh0Z7ZZkNIZNPemOPgaCIAiCICji3RI0Vik50oyYZR7JknIfAsB0yYAAMJJOBRZufbpm1RM3yw19TI5sByYmSrpBsHbDMRAEQRDESqdz1VQHo9bdjqtRCLO6NeZNjuG0Hmo/OisnweVMDlOUm+d0ks2EIAiCIAiiHSHh3QJkCT7NKi0YBxrKUfVeXUN/A/7mPl1DiQssmOWG5SS8CYIgCIIgmoOEdwuQlR286mpHgW7V3B7JBLeYqEi7yYLB7QoqBEEQBEEQRONQcmULkKudVIxR5KG0jsG03rCHXNpN8iZHT51mPgRBEARBEER9KOLdAtSId5z7aLYKySrLokI2E4IgCIIgiOYh4d0C0gzo1TX0tmO/dYU+XUOvrnV0DWyCIAiCIIh2gawmLYAxhrXZdKuHURetQ8ZJEARBEATRCVAokyAIgiAIgiASgIQ3QRAEQRAEQSQACW+CIAiCIAiCSAAS3gRBEARBEASRACS8CYIgCIIgCCIBSHgTBEEQBEEQRAKQ8CYIgiAIgiCIBCDhTRAEQRAEQRAJQMKbIAiCIAiCIBKAhDdBEARBEARBJAAJb4IgCIIgCIJIABLeBEEQBEEQBJEAJLwJgiAIgiAIIgFIeBMEQRAEQRBEApDwJgiCIAiCIIgEaHvh/cILL+B973sf3vKWt+B973sfXnzxxVYPiSAIgiAIgiBC0/bC+7rrrsOll16Ke++9F5deeimuvfbaVg+JIAiCIAiCIELT1sJ7enoa+/btw4UXXggAuPDCC7Fv3z7MzMy0emgEQRAEQRAEEYq2Ft6Tk5NYt24ddF0HAOi6jrVr12JycrLVQyMIgiAIgiCIUKRaPYCkGB3tb9m+x8YGWrbvbofmNl5ofuOD5jY+aG7jheY3Pmhu46Ud5rethff4+DgOHToE0zSh6zpM08Thw4cxPj4eelvT04vgXMQyTj/GxgZw5MhC4vtdCdDcxgvNb3zQ3MYHzW280PzGB81tvCQ1v5rGfIO9bW01GR0dxbZt23DnUdOzrQAACzxJREFUnXcCAO68805s27YNIyMjrR4aQRAEQRAEQYSirSPeAPCpT30K11xzDb761a9icHAQe/bsaWg7msYiH1sn7LvbobmNF5rf+KC5jQ+a23ih+Y0Pmtt4SWJ+6+2DCSGS918QBEEQBEEQxAqjra0mBEEQBEEQBNEtkPAmCIIgCIIgiAQg4U0QBEEQBEEQCUDCmyAIgiAIgiASgIQ3QRAEQRAEQSQACW+CIAiCIAiCSAAS3gRBEARBEASRACS8CYIgCIIgCCIBSHgTBEEQBEEQRAKQ8CYIgiAIgiCIBCDhTRAEQRAEQRAJkGr1ALqVF154Addccw3m5uYwNDSEPXv2YMuWLa0eVkcyOzuLj3/84zhw4AAymQw2b96M66+/HiMjIzjppJOwdetWaFp5Dfm5z30OJ510UquH3FHs3r0bmUwG2WwWAHD11Vdj165deOyxx3DttdeiUChg48aN+PznP4/R0dFWD7ejePnll/GhD33I/v/CwgIWFxfx4IMPes474c2ePXtw77334pVXXsEdd9yBrVu3AnWut3QtDo7b/PpdfwHQNTggXueu33WArsHBcZtfv+sv6sx9rAgiFi6//HJx2223CSGEuO2228Tll1/e6iF1LLOzs+LnP/+5/f+/+Zu/EX/2Z38mhBBi69atYnFxsYWj63ze8IY3iKeffrrqMc65eNOb3iQeeughIYQQN954o7jmmmtaNMLu4YYbbhB/9Vd/JYTHvBP+PPTQQ2JiYqJm7vyut3QtDo7b/PpdfwVdgwPjde56XQfoGhwOr/lVUa+/ooXXYLKaxMD09DT27duHCy+8EABw4YUXYt++fZiZmWn10DqSoaEhnHnmmfb/Tz/9dExMTLR0TN3O3r17kc1msXPnTgDAJZdcgnvuuafVw+poisUi7rjjDlx00UWtHkrHsnPnToyPj1c95ne9pWtxONzml66/0eA2t37QNTgc9ea3na6/ZDWJgcnJSaxbtw66rgMAdF3H2rVrMTk5ad+eIxqDc46bb74Zu3fvth+7/PLLYZomzjvvPFx11VXIZDItHWMncvXVV0MIgR07duCjH/0oJicnsWHDBvv5kZERcM7t2/VEeH74wx9i3bp1OOWUU+zHnPM+ODjY0jF2In7XWyEEXYsjxO36C7oGN43bdYCuwdHidv1Fi67BFPEmOopPf/rT6Ovrw2WXXQYAuO+++3DrrbfiG9/4Bp577jnceOONrR5ix/GNb3wDt99+O7797W9DCIHrr7++1UPqSr797W9XRVto3olOw3n9BV2Dm4auA8ngvP6ihXNPwjsGxsfHcejQIZimCQAwTROHDx8OdZuJqGXPnj3Yv38/vvSlL9mJPHJO+/v7cfHFF+ORRx5p8Sg7DzmHmUwGl156KR555BGMj49X3U6emZkBY4wiLQ1y6NAhPPTQQ3jb295mP+Y270R4/K63dC2ODrfrL+ga3DRe1wG6BkeH2/UXLbwGk/COgdHRUWzbtg133nknAODOO+/Etm3b6NZmE3zxi1/E448/jhtvvNG+jXn06FHk83kAgGEYuPfee7Ft27YWj7SzyOVyWFhYAMqJ1rj77ruxbds2bN++Hfl8Hg8//DAA4JZbbsEFF1zQ4tF2Lt/5zndw/vnnY3h4GPCZdyI8ftdbuhZHg9v1F3QNbhq/6wBdg6PDef1Fi6/BTAghEtnTCuP555/HNddcg/n5eQwODmLPnj047rjjWj2sjuTZZ5/FhRdeiC1btqCnpwcAcMwxx+DKK6/EtddeC8YYDMPAGWecgT//8z/HqlWrWj3kjuGll17CVVddBdM0wTnH8ccfj7/4i7/A2rVr8cgjj+C6666rKmW1Zs2aVg+5I3nLW96CT37ykzjvvPOAOvNOeHPDDTfgP/7jPzA1NYXh4WEMDQ3hrrvu8r3e0rU4OG7z+6Uvfcn1+nvjjTfi0UcfpWtwQNzm9qabbvK9DtA1ODhe1wa4XH/R4mswCW+CIAiCIAiCSACymhAEQRAEQRBEApDwJgiCIAiCIIgEIOFNEARBEARBEAlAwpsgCIIgCIIgEoCEN0EQBEEQBEEkAAlvgiCIFcKVV16J73znO5Fu8ytf+QquvvrqSLdJEATRraRaPQCCIAgiHLt378bU1BR0Xbcfe9e73oVrr73W933/9E//lMDoCIIgCC9IeBMEQXQgN910E84+++xWD4MgCIIIAVlNCIIguoRbb70Vl1xyCT796U9jx44d+K3f+i3cf//99vOXX345vvnNbwIA9u/fj8suuww7duzAmWeeiT/+4z+2X/fII4/goosuwo4dO3DRRRfhkUcesZ976aWXcNlll+GMM87AFVdcgdnZ2aoxPPbYY7jkkkuwc+dOvP3tb8cDDzxQNb43vvGNOOOMM7B7927cfvvtMc8IQRBEe0HCmyAIoov41a9+hU2bNuHnP/85PvzhD+OP/uiPMDc3V/O6v/u7v8M555yDhx56CD/5yU9w2WWXAQDm5ubw/ve/H5dffjkeeOABXHHFFXj/+99vC+yrr74ap5xyCh544AF88IMfrPKMHzp0CO9///vxgQ98AA8++CA+8YlP4MMf/jBmZmaQy+Vwww034B//8R/x6KOP4pZbbsG2bdsSnBmCIIjWQ8KbIAiiA/nQhz6EnTt32v/+7d/+DQAwMjKC3//930c6ncZb3/pWHHvssbjvvvtq3p9KpTAxMYHDhw8jm81i586dAID77rsPmzdvxjvf+U6kUilceOGFOO644/CjH/0IExMT2Lt3Lz7ykY8gk8ngda97HXbv3m1v89///d9x3nnn4fzzz4emaTjnnHOwfft2/PjHPwYAaJqGZ599Fvl8HmvXrsWJJ56Y2HwRBEG0AyS8CYIgOpAbb7wRDz/8sP3vve99LwBg3bp1YIzZr9uwYQMOHz5c8/6PfexjEELgPe95D377t38b3/rWtwAAhw8fxoYNG6peu2HDBhw6dAiHDx/G4OAg+vr6qp6TTExM4J577qlaEPziF7/AkSNH0NfXhy9+8Yu45ZZbcO655+IP/uAP8Pzzz8cyNwRBEO0KJVcSBEF0EYcOHYIQwhbfk5OTVVFpydjYGG644QYAwMMPP4wrrrgCr3vd67B27VpMTExUvXZychK7du3C2NgY5ufnkcvlbPE9MTFh72t8fBzveMc77O062bVrF3bt2oV8Po8vfelL+Mu//Ev8y7/8S+RzQBAE0a5QxJsgCKKLmJmZwT//8z+jVCrhu9/9Lp5//nmcf/75Na/77ne/i4MHDwIAVq9eDcYYNE3D+eefjxdffBF33HEHDMPA3Xffjeeeew6vf/3rsXHjRmzfvh1f+cpXUCwW8fDDD+NHP/qRvc23v/3t+NGPfoSf/vSnME0ThUIBDzzwAA4ePIipqSn84Ac/QC6XQyaTQV9fX1U5RIIgiJUARbwJgiA6kD/8wz+sEq5nn3023vjGN+LUU0/F/v37cdZZZ2HNmjX48pe/jOHh4Zr37927F5/5zGewuLiI0dFRfPKTn8SmTZsAq1ThZz7zGXzqU5/C5s2bcdNNN2FkZAQA8Ld/+7f4xCc+gTPPPBOnn3463vnOd2J+fh6wIt5f/epX8fnPfx5/+qd/Ck3TcOqpp+JTn/oUOOf4+te/jo9//ONgjGHbtm247rrrEpsvgiCIdoAJIUSrB0EQBEE0z6233opvfvObuPnmm1s9FIIgCMIFspoQBEEQBEEQRAKQ8CYIgiAIgiCIBCCrCUEQBEEQBEEkAEW8CYIgCIIgCCIBSHgTBEEQBEEQRAKQ8CYIgiAIgiCIBCDhTRAEQRAEQRAJQMKbIAiCIAiCIBKAhDdBEARBEARBJMD/D2PPeqxPQ2PGAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "bento_obj_id": "139932048217936" + }, + "output_type": "display_data" + } + ], + "source": [ + "def plot_rewards(rewards):\n", + " fig, ax = plt.subplots(1, 1, figsize=(12, 10));\n", + " pd.Series(rewards).rolling(50).mean().plot(ax=ax);\n", + " pd.Series(rewards).plot(ax=ax,alpha=0.5,color='lightblue');\n", + " ax.set_xlabel('Episodes');\n", + " ax.set_ylabel('Reward');\n", + " plt.title('REINFORCE on CartPole');\n", + " plt.legend(['Moving Average Reward', 'Instantaneous Episode Reward'])\n", + " return fig, ax\n", + "\n", + "sns.set_style('darkgrid')\n", + "sns.set()\n", + "\n", + "\n", + "plot_rewards(train_rewards)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Print eval rewards" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "ExecuteTime": { + "end_time": "2021-02-25T18:43:32.330306Z", + "start_time": "2021-02-25T18:42:16.331040Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0225 104332.151 gymrunner.py:132] For gamma=1.0, average reward is 198.59\n", + "Rewards list: [200. 200. 200. 200. 200. 200. 200. 167. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 100. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 191. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 170. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 151. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 190.\n", + " 200. 200. 149. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200. 200.\n", + " 200. 200. 200. 200.]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mean reward: 198.59\n" + ] + } + ], + "source": [ + "eval_episodes = 200\n", + "eval_rewards = evaluate_for_n_episodes(eval_episodes, env, agent, 500, num_processes=1).T[0]\n", + "mean_reward = pd.Series(eval_rewards).mean()\n", + "print(f'Mean reward: {mean_reward:.2f}')" + ] + } + ], + "metadata": { + "anp_cloned_from": { + "revision_id": "351369499371280" + }, + "bento_stylesheets": { + "bento/extensions/flow/main.css": true, + "bento/extensions/kernel_selector/main.css": true, + "bento/extensions/kernel_ui/main.css": true, + "bento/extensions/new_kernel/main.css": true, + "bento/extensions/system_usage/main.css": true, + "bento/extensions/theme/main.css": true + }, + "kernelspec": { + "display_name": "alexnik (local)", + "language": "python", + "name": "alexnik_local" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5+" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/reagent/ope/datasets/__init__.py b/reagent/ope/datasets/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/ope/datasets/__init__.py +++ b/reagent/ope/datasets/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/ope/datasets/logged_dataset.py b/reagent/ope/datasets/logged_dataset.py index c7c139908..86c45ee28 100644 --- a/reagent/ope/datasets/logged_dataset.py +++ b/reagent/ope/datasets/logged_dataset.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from abc import ABC, abstractmethod from dataclasses import dataclass diff --git a/reagent/ope/estimators/contextual_bandits_estimators.py b/reagent/ope/estimators/contextual_bandits_estimators.py index 32bb1a29c..456d229f4 100644 --- a/reagent/ope/estimators/contextual_bandits_estimators.py +++ b/reagent/ope/estimators/contextual_bandits_estimators.py @@ -1,27 +1,35 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +import time from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Iterable, Optional, Sequence, Union +from typing import Optional, Sequence, Tuple, Union import numpy as np -from reagent.ope.estimators.estimator import Estimator, EstimatorResults +import torch +from reagent.ope.estimators.estimator import Estimator, EstimatorResult from reagent.ope.estimators.types import ( Action, ActionDistribution, ActionSpace, Reward, + Trainer, + TrainingData, Values, ) from reagent.ope.utils import Clamper, RunningAverage from torch import Tensor +logger = logging.getLogger(__name__) Actions = Union[Sequence[Action], Tensor, np.ndarray] +PROPENSITY_THRESHOLD = 1e-6 class ActionRewards(Values[Action]): - def _new_key(self, k: int) -> Action: + def _to_key(self, k: int) -> Action: return Action(k) @@ -42,59 +50,253 @@ def __call__(self, context) -> ActionRewards: return self._action_rewards(context) +@dataclass(frozen=True) +class ModelOutputs: + tgt_reward_from_log_action: Reward + tgt_rewards: Sequence[Reward] + + @dataclass(frozen=True) class LogSample: # task specific context context: object # log - logged_action: Action - logged_propensities: ActionDistribution - logged_reward: Reward + log_action: Action + log_reward: Reward + log_action_probabilities: ActionDistribution # result from target policy - target_action: Action - target_propensities: ActionDistribution - - -@dataclass(frozen=True) -class Log: - """ - Input for contextual bandits estimators - Tensor is used if action can be indexed in [0, action_space) - Otherwise, Sequence and Mapping are used - """ - - samples: Iterable[LogSample] + tgt_action_probabilities: ActionDistribution + tgt_action: Action + model_outputs: Optional[ModelOutputs] = None + ground_truth_reward: Reward = float("nan") + item_feature: Optional[Tensor] = None @dataclass(frozen=True) class BanditsEstimatorInput: action_space: ActionSpace - logs: Iterable[Log] - target_model: Optional[BanditsModel] = None - ground_truth_model: Optional[BanditsModel] = None + samples: Sequence[LogSample] + has_model_outputs: bool class DMEstimator(Estimator): + TRAINING_VALIDATION_SPLIT = 0.8 """ Estimating using Direct Method (DM), assuming a reward model is trained """ - def evaluate(self, input: BanditsEstimatorInput, **kwargs) -> EstimatorResults: - self.reset() - for log in input.logs: - log_reward = RunningAverage() - tgt_reward = RunningAverage() - gt_reward = RunningAverage() - for sample in log.samples: - log_reward.add(sample.logged_reward) - rewards = input.target_model(sample.context) - tgt_reward.add(rewards[sample.target_action]) - rewards = input.ground_truth_model(sample.context) - gt_reward.add(rewards[sample.target_action]) - self._append_estimate( - log_reward.average, tgt_reward.average, gt_reward.average + def __init__(self, trainer: Optional[Trainer] = None, device=None): + super().__init__(device) + self._trainer = trainer + + def _train_model( + self, samples: Sequence[LogSample], force_train: bool = False + ) -> bool: + if self._trainer is None: + logger.error("Target model trainer not set") + return False + trainer = self._trainer + assert trainer is not None + if trainer.is_trained and not force_train: + return True + logger.info(" training direct model...") + st = time.perf_counter() + sample_size = len(samples) + training_size = int(sample_size * DMEstimator.TRAINING_VALIDATION_SPLIT) + train_x = [] + train_y = [] + for i in range(training_size): + sample = samples[i] + if sample.item_feature is None: + continue + train_x.append( + torch.cat( + ( + torch.tensor( + sample.log_action.value, dtype=torch.float + ).flatten(), + sample.item_feature.flatten(), + ) + ) + ) + train_y.append(sample.log_reward) + if len(train_x) == 0: + logger.error("Item features not provided, DM is not available") + return False + train_x = torch.stack(train_x) + train_y = torch.tensor(train_y, dtype=torch.float, device=train_x.device) + vali_x = [] + vali_y = [] + for i in range(training_size, sample_size): + sample = samples[i] + if sample.item_feature is None: + continue + vali_x.append( + torch.cat( + ( + torch.tensor( + sample.log_action.value, dtype=torch.float + ).flatten(), + sample.item_feature.flatten(), + ) + ) + ) + vali_y.append(sample.log_reward) + if len(vali_x) == 0: + vali_x = train_x.detach().clone() + vali_y = train_y.detach().clone() + else: + vali_x = torch.stack(vali_x) + vali_y = torch.tensor(vali_y, dtype=torch.float, device=vali_x.device) + training_data = TrainingData(train_x, train_y, None, vali_x, vali_y, None) + trainer.train(training_data) + logger.info(f" training direct model done: {time.perf_counter() - st}s") + return True + + def _calc_dm_reward( + self, action_space: ActionSpace, sample: LogSample + ) -> Tuple[Optional[Reward], torch.Tensor, torch.Tensor]: + if sample.model_outputs is not None: + return ( + sample.model_outputs.tgt_reward_from_log_action, + torch.tensor( + sample.model_outputs.tgt_rewards, + dtype=torch.float, + device=self._device, + ), + torch.tensor( + # pyre-fixme[16]: `ActionDistribution` has no attribute `_values`. + sample.tgt_action_probabilities._values, + dtype=torch.float, + device=self._device, + ), + ) + trainer = self._trainer + if trainer is None or not trainer.is_trained: + return 0.0, torch.zeros(), torch.zeros() + assert sample.item_feature is not None + item_feature = sample.item_feature.flatten() + features = [] + probs = [] + idx = -1 + for action in action_space: + if idx < 0 and action == sample.log_action: + idx = len(features) + features.append( + torch.cat( + ( + torch.tensor(action.value, dtype=torch.float).flatten(), + item_feature, + ) + ) ) - return self.results + probs.append(sample.tgt_action_probabilities[action]) + preds = trainer.predict(torch.stack(features).float(), device=self._device) + return ( + preds.scores[idx].item(), + preds.scores, + torch.tensor(probs, dtype=torch.float, device=self._device), + ) + + def _evaluate( + self, + input: BanditsEstimatorInput, + train_samples: Sequence[LogSample], + eval_samples: Sequence[LogSample], + force_train: bool = False, + **kwargs, + ) -> Optional[EstimatorResult]: + logger.info("OPE DM Evaluating") + if ( + not self._train_model(train_samples, force_train) + and not input.has_model_outputs + ): + return None + log_avg = RunningAverage() + tgt_avg = RunningAverage() + tgt_vals = [] + gt_avg = RunningAverage() + for sample in eval_samples: + log_avg.add(sample.log_reward) + _, tgt_scores, tgt_probs = self._calc_dm_reward(input.action_space, sample) + tgt_reward = torch.dot(tgt_scores.reshape(-1), tgt_probs.reshape(-1)).item() + tgt_avg.add(tgt_reward) + tgt_vals.append(tgt_reward) + gt_avg.add(sample.ground_truth_reward) + ( + tgt_score_normalized, + tgt_std_err, + tgt_std_err_normalized, + ) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average) + return EstimatorResult( + log_reward=log_avg.average, + estimated_reward=tgt_avg.average, + ground_truth_reward=gt_avg.average, + estimated_weight=tgt_avg.count, + estimated_reward_normalized=tgt_score_normalized, + estimated_reward_std_error=tgt_std_err, + estimated_reward_normalized_std_error=tgt_std_err_normalized, + ) + + @staticmethod + def _calc_optional_avg(a: Optional[float], b: Optional[float]) -> Optional[float]: + # Annoying but Pyre would only take it like this + return None if a is None else (None if b is None else (a + b) / 2) + + def evaluate( + self, input: BanditsEstimatorInput, **kwargs + ) -> Optional[EstimatorResult]: + if input.has_model_outputs: + return self._evaluate( + input, input.samples, input.samples, force_train=True, **kwargs + ) + log_avg = RunningAverage() + gt_avg = RunningAverage() + for sample in input.samples: + log_avg.add(sample.log_reward) + gt_avg.add(sample.ground_truth_reward) + + # 2-fold cross "validation" as used by https://arxiv.org/pdf/1612.01205.pdf + shuffled = list(input.samples) + np.random.shuffle(shuffled) + lower_half = shuffled[: len(shuffled) // 2] + upper_half = shuffled[len(shuffled) // 2 :] + er_lower = self._evaluate( + input, lower_half, upper_half, force_train=True, **kwargs + ) + er_upper = self._evaluate( + input, upper_half, lower_half, force_train=True, **kwargs + ) + if er_lower is None or er_upper is None: + return None + return EstimatorResult( + log_reward=log_avg.average, + estimated_reward=( + (er_lower.estimated_reward + er_upper.estimated_reward) / 2 + ), + estimated_reward_normalized=( + DMEstimator._calc_optional_avg( + er_lower.estimated_reward_normalized, + er_upper.estimated_reward_normalized, + ) + ), + estimated_reward_normalized_std_error=( + DMEstimator._calc_optional_avg( + er_lower.estimated_reward_normalized_std_error, + er_upper.estimated_reward_normalized_std_error, + ) + ), + estimated_reward_std_error=( + DMEstimator._calc_optional_avg( + er_lower.estimated_reward_std_error, + er_upper.estimated_reward_std_error, + ) + ), + ground_truth_reward=gt_avg.average, + ) + + def __repr__(self): + return f"DMEstimator(trainer({None if self._trainer is None else self._trainer.name},device({self._device}))" class IPSEstimator(Estimator): @@ -102,59 +304,370 @@ class IPSEstimator(Estimator): Inverse Propensity Scoring (IPS) estimator """ - def __init__(self, weight_clamper: Clamper = None, device=None): + def __init__( + self, + weight_clamper: Optional[Clamper] = None, + weighted: bool = False, + device=None, + ): super().__init__(device) self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper + self._weighted = weighted - def evaluate(self, input: BanditsEstimatorInput, **kwargs) -> EstimatorResults: - self.reset() - for log in input.logs: - log_reward = RunningAverage() - tgt_reward = RunningAverage() - gt_reward = RunningAverage() - for sample in log.samples: - log_reward.add(sample.logged_reward) + def evaluate( + self, input: BanditsEstimatorInput, **kwargs + ) -> Optional[EstimatorResult]: + logger.info("OPE IPS Evaluating") + log_avg = RunningAverage() + logged_vals = [] + tgt_avg = RunningAverage() + tgt_vals = [] + acc_weight = RunningAverage() + gt_avg = RunningAverage() + for sample in input.samples: + log_avg.add(sample.log_reward) + logged_vals.append(sample.log_reward) + weight = 0.0 + tgt_result = 0.0 + if sample.log_action.value is not None: weight = ( - sample.target_propensities[sample.logged_action] - / sample.logged_propensities[sample.logged_action] + 0.0 + if sample.log_action_probabilities[sample.log_action] + < PROPENSITY_THRESHOLD + else sample.tgt_action_probabilities[sample.log_action] + / sample.log_action_probabilities[sample.log_action] ) weight = self._weight_clamper(weight) - tgt_reward.add(sample.logged_reward * weight) - rewards = input.ground_truth_model(sample.context) - gt_reward.add(rewards[sample.target_action]) - self._append_estimate( - log_reward.average, tgt_reward.average, gt_reward.average - ) - return self.results + tgt_result = sample.log_reward * weight + tgt_avg.add(tgt_result) + tgt_vals.append(tgt_result) + acc_weight.add(weight) + gt_avg.add(sample.ground_truth_reward) + ( + tgt_score_normalized, + tgt_std_err, + tgt_std_err_normalized, + ) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average) + return EstimatorResult( + log_reward=log_avg.average, + estimated_reward=tgt_avg.average + if not self._weighted + else tgt_avg.average / acc_weight.total, + ground_truth_reward=gt_avg.average, + estimated_weight=tgt_avg.count, + estimated_reward_normalized=tgt_score_normalized, + estimated_reward_std_error=tgt_std_err, + estimated_reward_normalized_std_error=tgt_std_err_normalized, + ) + + def __repr__(self): + return ( + f"IPSEstimator(weight_clamper({self._weight_clamper})" + f",weighted({self._weighted}),device({self._device}))" + ) -class DoublyRobustEstimator(IPSEstimator): +class DoublyRobustEstimator(DMEstimator): """ Doubly Robust (DR) estimator: reference: https://arxiv.org/abs/1103.4601 (deterministic reward model) https://arxiv.org/abs/1612.01205 (distributed reward model) """ - def evaluate(self, input: BanditsEstimatorInput, **kwargs) -> EstimatorResults: - self.reset() - for log in input.logs: - log_reward = RunningAverage() - tgt_reward = RunningAverage() - gt_reward = RunningAverage() - for sample in log.samples: - log_reward.add(sample.logged_reward) + def __init__( + self, + trainer: Optional[Trainer] = None, + weight_clamper: Optional[Clamper] = None, + device=None, + ): + super().__init__(trainer, device) + self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper + + def _evaluate( + self, + input: BanditsEstimatorInput, + train_samples: Sequence[LogSample], + eval_samples: Sequence[LogSample], + force_train: bool = False, + **kwargs, + ) -> Optional[EstimatorResult]: + logger.info("OPE DR Evaluating") + self._train_model(train_samples, force_train) + log_avg = RunningAverage() + tgt_avg = RunningAverage() + tgt_vals = [] + gt_avg = RunningAverage() + for sample in eval_samples: + log_avg.add(sample.log_reward) + dm_action_reward, dm_scores, dm_probs = self._calc_dm_reward( + input.action_space, sample + ) + dm_reward = torch.dot(dm_scores.reshape(-1), dm_probs.reshape(-1)).item() + tgt_result = 0.0 + weight = 0.0 + if sample.log_action.value is not None: weight = ( - sample.target_propensities[sample.logged_action] - / sample.logged_propensities[sample.logged_action] + 0.0 + if sample.log_action_probabilities[sample.log_action] + < PROPENSITY_THRESHOLD + else sample.tgt_action_probabilities[sample.log_action] + / sample.log_action_probabilities[sample.log_action] ) weight = self._weight_clamper(weight) - rewards = input.target_model(sample.context) - r1 = rewards[sample.logged_action] - r2 = rewards[sample.target_action] - tgt_reward.add((sample.logged_reward - r1) * weight + r2) - rewards = input.ground_truth_model(sample.context) - gt_reward.add(rewards[sample.target_action]) - self._append_estimate( - log_reward.average, tgt_reward.average, gt_reward.average + assert dm_action_reward is not None + assert dm_reward is not None + tgt_result += ( + sample.log_reward - dm_action_reward + ) * weight + dm_reward + else: + tgt_result = dm_reward + tgt_avg.add(tgt_result) + tgt_vals.append(tgt_result) + gt_avg.add(sample.ground_truth_reward) + ( + tgt_score_normalized, + tgt_std_err, + tgt_std_err_normalized, + ) = self._compute_metric_data(torch.tensor(tgt_vals), log_avg.average) + return EstimatorResult( + log_reward=log_avg.average, + estimated_reward=tgt_avg.average, + ground_truth_reward=gt_avg.average, + estimated_weight=tgt_avg.count, + estimated_reward_normalized=tgt_score_normalized, + estimated_reward_std_error=tgt_std_err, + estimated_reward_normalized_std_error=tgt_std_err_normalized, + ) + + def __repr__(self): + return ( + f"DoublyRobustEstimator(trainer({None if self._trainer is None else self._trainer.name})" + f",weight_clamper({self._weight_clamper}),device({self._device}))" + ) + + +class SwitchEstimator(DMEstimator): + # For details, visit https://arxiv.org/abs/1612.01205 sections 4, 5 + CANDIDATES = 21 + EXP_BASE = 1.5 + EPSILON = 1e-6 + + def __init__( + self, + trainer: Optional[Trainer] = None, + weight_clamper: Optional[Clamper] = None, + rmax: Optional[Reward] = None, + device=None, + ): + """ + rmax is an a priori upper bound on any possible reward. + The tighter the bound, the better the estimator can estimate + its bias. If not provided, the estimator will use the max + reward seen in the sample data. + """ + super().__init__(trainer, device) + self._rmax = rmax + self._weight_clamper = Clamper() if weight_clamper is None else weight_clamper + + def _estimate_rmax(self, input: BanditsEstimatorInput) -> Reward: + rmax = float("-inf") + for sample in input.samples: + _, dm_scores, dm_probs = self._calc_dm_reward(input.action_space, sample) + max_sample_r = max(sample.log_reward, torch.max(dm_scores).item()) + rmax = max(rmax, max_sample_r) + return rmax + + def _calc_weight_reward_tensors( + self, input: BanditsEstimatorInput, eval_samples: Sequence[LogSample] + ) -> Tuple[ + torch.Tensor, + torch.Tensor, + torch.Tensor, + torch.Tensor, + torch.Tensor, + torch.Tensor, + torch.Tensor, + RunningAverage, + RunningAverage, + ]: + n = len(eval_samples) + ws = torch.ones((n, len(input.action_space))) + rs = torch.zeros((n, 1)) + r_est = torch.zeros((n, len(input.action_space))) + r_est_for_logged_action = torch.zeros((n, 1)) + actions = torch.zeros((n, len(input.action_space))) + expected_rmax = torch.zeros((n, len(input.action_space))) + propensities = torch.zeros((n, len(input.action_space))) + + log_avg = RunningAverage() + gt_avg = RunningAverage() + + priori_rmax = self._estimate_rmax(input) if self._rmax is None else self._rmax + assert priori_rmax is not None + + for i, sample in enumerate(eval_samples): + dm_score_for_logged_action, dm_scores, dm_probs = self._calc_dm_reward( + input.action_space, sample ) - return self.results + for a in input.action_space: + weight = ( + 0.0 + if sample.log_action_probabilities[a] < PROPENSITY_THRESHOLD + else sample.tgt_action_probabilities[a] + / sample.log_action_probabilities[a] + ) + ws[i, a] = self._weight_clamper(weight) + propensities[i, a] = sample.tgt_action_probabilities[a] + expected_rmax[i, a] = sample.tgt_action_probabilities[a] * priori_rmax + actions[i, a] = float(a == sample.log_action) + + rs[i, 0] = sample.log_reward + r_est[i] = dm_scores.reshape(-1) + # pyre-fixme[6]: For 2nd param expected `Union[bool, float, int, + # Tensor]` but got `Optional[float]`. + r_est_for_logged_action[i] = dm_score_for_logged_action + log_avg.add(sample.log_reward) + gt_avg.add(sample.ground_truth_reward) + + return ( + actions, + ws, + rs, + r_est, + r_est_for_logged_action, + propensities, + expected_rmax, + log_avg, + gt_avg, + ) + + def _calc_estimated_values( + self, + logged_rewards: torch.Tensor, + weights: torch.Tensor, + actions: torch.Tensor, + threshold: float, + est_rewards: torch.Tensor, + est_rewards_for_logged_action: torch.Tensor, + tgt_props: torch.Tensor, + ) -> torch.Tensor: + ips_scores = (weights * actions).sum(dim=1, keepdim=True) + return logged_rewards * ips_scores * (ips_scores <= threshold).float() + ( + est_rewards * tgt_props * (weights > threshold).float() + ).sum(dim=1, keepdim=True) + + def _evaluate( + self, + input: BanditsEstimatorInput, + train_samples: Sequence[LogSample], + eval_samples: Sequence[LogSample], + force_train: bool = False, + **kwargs, + ) -> Optional[EstimatorResult]: + logger.info("OPE Switch Evaluating") + self._train_model(train_samples, force_train) + + if "exp_base" in kwargs: + exp_base = kwargs["exp_base"] + else: + exp_base = SwitchEstimator.EXP_BASE + if "candidates" in kwargs: + num_candidates = kwargs["candidates"] + else: + num_candidates = SwitchEstimator.CANDIDATES + + ( + actions, + ws, + rs, + r_est, + r_est_for_logged_action, + propensities, + expected_rmax, + log_avg, + gt_avg, + ) = self._calc_weight_reward_tensors(input, eval_samples) + min_w, max_w = float(torch.min(ws).item()), float(torch.max(ws).item()) + diff = max_w - min_w + # The threshold lies in the range [min ips, max ips] + # Picking a small threshold -> using mainly the model-based estimator + # Picking a large threshold -> using mainly the ips-based estimator + candidates = [ + min_w + ((exp_base**x) / (exp_base ** (num_candidates - 1))) * diff + for x in range(num_candidates) + ] + # This prevents the edge case where nearly all scores being min_w prevents + # switch from trying a purely DM estimate + tau = min_w - SwitchEstimator.EPSILON + loss = float("inf") + for candidate in candidates: + estimated_values = self._calc_estimated_values( + rs, ws, actions, candidate, r_est, r_est_for_logged_action, propensities + ) + var = (1.0 / (estimated_values.shape[0] ** 2)) * torch.sum( + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` + # and `int`. + (estimated_values - torch.mean(estimated_values)) + ** 2 + ).item() + bias = torch.mean( + torch.sum(expected_rmax * (ws > candidate).float(), dim=1, keepdim=True) + ).item() + # pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, + # float, int]`. + cand_loss = var + bias * bias + if cand_loss < loss: + tau = candidate + loss = cand_loss + + estimated_values = self._calc_estimated_values( + rs, ws, actions, tau, r_est, r_est_for_logged_action, propensities + ) + ( + tgt_score_normalized, + tgt_std_err, + tgt_std_err_normalized, + ) = self._compute_metric_data(estimated_values.detach(), log_avg.average) + return EstimatorResult( + log_reward=log_avg.average, + estimated_reward=torch.mean(estimated_values).item(), + ground_truth_reward=gt_avg.average, + estimated_weight=float(estimated_values.shape[0]), + estimated_reward_normalized=tgt_score_normalized, + estimated_reward_std_error=tgt_std_err, + estimated_reward_normalized_std_error=tgt_std_err_normalized, + ) + + def __repr__(self): + return ( + f"SwitchEstimator(trainer({None if self._trainer is None else self._trainer.name})" + f",weight_clamper({self._weight_clamper}),device({self._device}))" + ) + + +class SwitchDREstimator(SwitchEstimator): + # For details, visit https://arxiv.org/abs/1612.01205 sections 4, 5 + + def _calc_estimated_values( + self, + logged_rewards: torch.Tensor, + weights: torch.Tensor, + actions: torch.Tensor, + threshold: float, + est_rewards: torch.Tensor, + est_rewards_for_logged_action: torch.Tensor, + tgt_props: torch.Tensor, + ) -> torch.Tensor: + ips_scores = (weights * actions).sum(dim=1, keepdim=True) + dr = ips_scores * (logged_rewards - est_rewards_for_logged_action) + ( + tgt_props * est_rewards + ).sum(dim=1, keepdim=True) + return dr * (ips_scores <= threshold).float() + ( + est_rewards * tgt_props * (weights > threshold).float() + ).sum(dim=1, keepdim=True) + + def __repr__(self): + return ( + f"SwitchDREstimator(trainer({None if self._trainer is None else self._trainer.name})" + f",weight_clamper({self._weight_clamper}),device({self._device}))" + ) diff --git a/reagent/ope/estimators/estimator.py b/reagent/ope/estimators/estimator.py index f7b180b22..1b8cc8cc7 100644 --- a/reagent/ope/estimators/estimator.py +++ b/reagent/ope/estimators/estimator.py @@ -1,16 +1,29 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import math +import pickle +import tempfile from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import Optional, Tuple, Union +from dataclasses import dataclass, field +from multiprocessing import Pool +from typing import Iterable, List, Mapping, Optional, Tuple, Union import torch +from reagent.evaluation.cpe import bootstrapped_std_error_of_mean from torch import Tensor +logger = logging.getLogger(__name__) +SCORE_THRESHOLD = 1e-6 + + class ResultDiffs: + """ + Statistics for differences, e.g., estimates vs ground truth + """ + def __init__(self, diffs: Tensor): self._diffs = diffs self._rmse = None @@ -20,7 +33,10 @@ def __init__(self, diffs: Tensor): @property def rmse(self) -> Tensor: if self._rmse is None: - self._rmse = (self._diffs ** 2.0).mean().sqrt() + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `float`. + # pyre-fixme[16]: `float` has no attribute `mean`. + self._rmse = (self._diffs**2.0).mean().sqrt() return self._rmse @property @@ -43,26 +59,100 @@ def __repr__(self): @dataclass(frozen=True) +class EstimatorResult: + log_reward: float + estimated_reward: float + ground_truth_reward: Optional[float] = 0.0 + estimated_weight: float = 1.0 + estimated_reward_normalized: Optional[float] = None + estimated_reward_std_error: Optional[float] = None + estimated_reward_normalized_std_error: Optional[float] = None + + +@dataclass class EstimatorResults: """ Estimator results """ - logs: Tensor - estimates: Tensor - ground_truths: Optional[Tensor] = None - estimate_log_diffs: Optional[ResultDiffs] = None - estimate_gt_diffs: Optional[ResultDiffs] = None + results: List[EstimatorResult] = field(default_factory=list) + device = None + + def append(self, result: EstimatorResult): + """Append a data point + + Args: + result: result from an experimental run + """ + er = result.estimated_reward + if math.isnan(er) or math.isinf(er): + logging.warning(f" Invalid estimate: {er}") + return + lr = result.log_reward + gr = ( + result.ground_truth_reward + if result.ground_truth_reward is not None + else 0.0 + ) + logging.info( + f" Append estimate [{len(self.results) + 1}]: " + f"log={lr}, estimated={er}, ground_truth={gr}" + ) + self.results.append( + EstimatorResult( + log_reward=result.log_reward, + estimated_reward=result.estimated_reward, + ground_truth_reward=gr, + estimated_weight=result.estimated_weight, + ) + ) + + def report(self): + ert = torch.tensor( + [res.estimated_reward for res in self.results], + dtype=torch.double, + device=self.device, + ) + lrt = torch.tensor( + [res.log_reward for res in self.results], + dtype=torch.double, + device=self.device, + ) + grt = torch.tensor( + [ + res.ground_truth_reward if res.ground_truth_reward is not None else 0.0 + for res in self.results + ], + dtype=torch.double, + device=self.device, + ) + self._estimated_log_diff = ResultDiffs(ert - lrt) + self._estimated_ground_truth_diff = ResultDiffs(ert - grt) + return ( + lrt.mean().item(), + ert.mean().item(), + grt.mean().item(), + ResultDiffs(ert - grt), + ResultDiffs(ert - lrt), + torch.tensor([float(res.estimated_weight) for res in self.results]) + .mean() + .item(), + ) + + +@dataclass(frozen=True) +class EstimatorSampleResult: + log_reward: float + target_reward: float + ground_truth_reward: float + weight: float def __repr__(self): - repr = "" - if self.estimate_gt_diffs is not None: - repr += f"Target vs GT: {self.estimate_gt_diffs}" - if self.estimate_log_diffs is not None: - if len(repr) > 0: - repr += ", " - repr += f"Target vs Log: {self.estimate_log_diffs}" - return repr + return ( + f"EstimatorSampleResult(log={self.log_reward}" + f",tgt={self.target_reward},gt={self.ground_truth_reward}" + f",wgt={self.weight}" + ) class Estimator(ABC): @@ -72,76 +162,129 @@ class Estimator(ABC): def __init__(self, device=None): self._device = device - self._logs = [] # logged values - self._estimates = [] # estimated values - self._ground_truths = [] # ground truth values - self._results = None - def reset(self): - self._logs.clear() - self._estimates.clear() - self._ground_truths.clear() - self._results = None + def _compute_metric_data( + self, tgt_rewards: Tensor, logged_score: float + ) -> Tuple[float, float, float]: + """ + Given a sequence of scores, normalizes the target score by the average logged score + and computes the standard error of the target score. Normalizing by the logged score + can provide a better metric to compare models against. + """ + if len(tgt_rewards.shape) > 1: + assert tgt_rewards.shape[1] == 1 + tgt_rewards = tgt_rewards.reshape((tgt_rewards.shape[0],)) + if logged_score < SCORE_THRESHOLD: + normalizer = 0.0 + else: + normalizer = 1.0 / logged_score + std_err = bootstrapped_std_error_of_mean(tgt_rewards) + return ( + torch.mean(tgt_rewards).item() * normalizer, + std_err, + std_err * normalizer, + ) - @property - def logged_values(self): - return self._logs + @abstractmethod + def evaluate( + self, input, **kwargs + ) -> Optional[Union[EstimatorResult, EstimatorResults]]: + pass - @property - def estimated_values(self): - return self._estimates + def __repr__(self): + return f"{self.__class__.__name__}(device({self._device}))" - @property - def ground_truth_values(self): - return self._ground_truths - def _append_estimate( +def run_evaluation( + file_name: str, +) -> Optional[Mapping[str, Iterable[EstimatorResults]]]: + logger.info(f"received filename {file_name}") + try: + with open(file_name, "rb") as fp: + estimators, inputs = pickle.load(fp) + except Exception as err: + return None + results = {} + for estimator in estimators: + estimator_name = repr(estimator) + estimator_results = [] + for input in inputs: + try: + estimator_results.append(estimator.evaluate(input)) + except Exception as err: + logger.error(f"{estimator_name} error {err}") + results[repr(estimator)] = estimator_results + return results + + +class Evaluator: + """ + Multiprocessing evaluator + """ + + def __init__( self, - log: Union[float, Tensor], - estimate: Union[float, Tensor], - ground_truth: Optional[Union[float, Tensor]] = None, + experiments: Iterable[Tuple[Iterable[Estimator], object]], + max_num_workers: int, ): - if math.isnan(estimate) or math.isinf(estimate): - return - logging.info( - f" Append estimate [{len(self._estimates) + 1}]: " - f"{log}, {estimate}, {ground_truth}" - ) - self._logs.append(log) - self._estimates.append(estimate) - if ground_truth is not None: - self._ground_truths.append(ground_truth) - self._results = None - - @property - def results(self) -> EstimatorResults: - if self._results is None: - logs_tensor = torch.tensor( - self._logs, dtype=torch.float, device=self._device - ) - estimates_tensor = torch.tensor( - self._estimates, dtype=torch.float, device=self._device - ) - if len(self._ground_truths) == len(self._estimates): - ground_truths_tensor = torch.tensor( - self._ground_truths, dtype=torch.float, device=self._device - ) - log_gt_diff = logs_tensor - ground_truths_tensor - else: - ground_truths_tensor = None - log_gt_diff = None - self._results = EstimatorResults( - logs_tensor, - estimates_tensor, - ground_truths_tensor, - ResultDiffs(log_gt_diff) if log_gt_diff is not None else None, - ResultDiffs(estimates_tensor - logs_tensor), - ) - return self._results + """ + Args: + estimators: estimators to be evaluated + experiments: + max_num_workers: <= 0 no multiprocessing + otherwise create max_num_workers processes + """ + self._experiments = experiments + self._tasks = None + if max_num_workers > 0: + self._tasks = [[] for _ in range(max_num_workers)] + for i, experiment in enumerate(experiments): + self._tasks[i % max_num_workers].append(experiment) - @abstractmethod - def evaluate(self, input, **kwargs) -> EstimatorResults: - pass + def evaluate(self) -> Mapping[str, EstimatorResults]: + results = {} + if self._tasks is None: + for estimators, input in self._experiments: + for estimator in estimators: + estimator_name = repr(estimator) + if estimator_name in results: + result = results[estimator_name] + else: + result = EstimatorResults() + results[estimator_name] = result + result.append(estimator.evaluate(input)) + else: + tmp_files = [] + tmp_file_names = [] + for task in self._tasks: + fp = tempfile.NamedTemporaryFile() + pickle.dump(task, fp, protocol=pickle.HIGHEST_PROTOCOL) + fp.flush() + tmp_files.append(fp) + tmp_file_names.append(fp.name) + with Pool(len(tmp_file_names)) as pool: + evaluation_results = pool.map(run_evaluation, tmp_file_names) + for tmp_file in tmp_files: + tmp_file.close() + for evaluation_result in evaluation_results: + if evaluation_result is None: + continue + for estimator_name, estimator_results in evaluation_result.items(): + if estimator_name in results: + result = results[estimator_name] + else: + result = EstimatorResults() + results[estimator_name] = result + for estimator_result in estimator_results: + result.append(estimator_result) + return results - def __repr__(self): - return f"{self.__class__.__name__}{{device[{self._device}]}}" + @staticmethod + def report_results(results: Mapping[str, EstimatorResults]): + for name, result in results.items(): + log_r, tgt_r, gt_r, tgt_gt, tgt_log, weight = result.report() + print( + f"{name} rewards: log_reward{log_r} tgt_reward[{tgt_r}] gt_reward[{gt_r}]" + f", diffs: tgt-gt[{tgt_gt}] tgt-log[{tgt_log}]", + flush=True, + ) diff --git a/reagent/ope/estimators/sequential_estimators.py b/reagent/ope/estimators/sequential_estimators.py index ace0ac319..1fde8ba56 100644 --- a/reagent/ope/estimators/sequential_estimators.py +++ b/reagent/ope/estimators/sequential_estimators.py @@ -1,18 +1,26 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import copy import logging import random import time +import typing from abc import ABC, abstractmethod +from copy import deepcopy from dataclasses import dataclass from enum import Enum from functools import reduce from itertools import count, zip_longest -from typing import Iterable, Mapping, Optional, Sequence, Tuple, Union +from typing import Callable, Dict, Iterable, Mapping, Optional, Sequence, Tuple, Union import numpy as np import torch -from reagent.ope.estimators.estimator import Estimator, EstimatorResults +from reagent.ope.estimators.estimator import ( + Estimator, + EstimatorResult, + EstimatorResults, +) from reagent.ope.estimators.types import ( Action, ActionDistribution, @@ -21,6 +29,7 @@ Reward, TypeWrapper, ) +from reagent.ope.trainers.linear_trainers import LinearNet from reagent.ope.utils import Clamper, RunningAverage from torch import Tensor @@ -32,13 +41,13 @@ class State(TypeWrapper[StateType]): is_terminal: bool = False - def __repr__(self): + def __repr__(self) -> str: return super().__repr__()[:-1] + f",is_terminal[{self.is_terminal}]}}" @dataclass(frozen=True) class StateReward: - state: State = None + state: Optional[State] = None reward: Reward = 0.0 @@ -76,7 +85,7 @@ class RLPolicy(ABC): Policy interface """ - def __init__(self, action_space: ActionSpace, device=None): + def __init__(self, action_space: ActionSpace, device=None) -> None: self._action_space = action_space self._device = device @@ -97,11 +106,14 @@ class RandomRLPolicy(RLPolicy): A random policy which return an action according to uniform distribution """ - def __init__(self, action_space: ActionSpace, device=None): + def __init__(self, action_space: ActionSpace, device=None) -> None: super().__init__(action_space, device) self._prob = 1.0 / len(action_space) def action_dist(self, state: State) -> ActionDistribution: + # pyre-fixme[6]: Expected `Union[Mapping[TypeWrapper[Union[Tuple[float], + # Tuple[int], Tensor, float, int, np.ndarray]], float], Sequence[float], + # Tensor, np.ndarray]` for 1st param but got `int`. return self._action_space.distribution([self._prob] * len(self._action_space)) @@ -113,15 +125,15 @@ class EpsilonGreedyRLPolicy(RLPolicy): calculate probabilities for all actions """ - def __init__(self, policy: RLPolicy, epsilon: float = 0.0): - assert policy is not None and 0 <= epsilon < 1 + def __init__(self, policy: RLPolicy, epsilon: float = 0.0) -> None: + assert policy is not None and 0.0 <= epsilon < 1.0 super().__init__(policy._device) self._policy = policy self._exploitation_prob = 1.0 - epsilon self._exploration_prob = epsilon / len(policy.action_space) - def action_dist(self, state) -> ActionDistribution: - new_dist = self._policy(state).copy() + def action_dist(self, state: State) -> ActionDistribution: + new_dist = deepcopy(self._policy(state)) for a, p in new_dist: new_dist[a] = p * self._exploitation_prob + self._exploration_prob return new_dist @@ -168,11 +180,12 @@ def __call__(self, state: State, action: Optional[Action] = None) -> float: @dataclass(frozen=True) class RLEstimatorInput: gamma: float - log: Mapping[State, Sequence[Mdp]] + log: Sequence[Mdp] target_policy: RLPolicy value_function: Optional[ValueFunction] = None ground_truth: Optional[ValueFunction] = None horizon: int = -1 + discrete_states: bool = True class RLEstimator(Estimator): @@ -187,6 +200,21 @@ def _log_reward(self, gamma: float, mdps: Sequence[Mdp]) -> float: avg.add(r) return avg.average + def _estimate_value( + self, gamma: float, mdps: Sequence[Mdp], value_function: ValueFunction + ) -> float: + avg = RunningAverage() + for mdp in mdps: + discount = 1.0 + r = 0.0 + for t in mdp: + if t.last_state is None: + break + r += discount * value_function(t.last_state) + discount *= gamma + avg.add(r) + return avg.average + class DMEstimator(RLEstimator): """ @@ -194,24 +222,28 @@ class DMEstimator(RLEstimator): """ def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults: + # kwargs is part of the function signature, so to satisfy pyre it must be included assert input.value_function is not None logging.info(f"{self}: start evaluating") stime = time.process_time() - self.reset() - for state, mdps in input.log.items(): - estimate = input.value_function(state) - if input.ground_truth is not None: - ground_truth = input.ground_truth(state) - else: - ground_truth = None - self._append_estimate( - self._log_reward(input.gamma, mdps), estimate, ground_truth + results = EstimatorResults() + + estimate = self._estimate_value(input.gamma, input.log, input.value_function) + if input.ground_truth is not None: + gt = self._estimate_value(input.gamma, input.log, input.ground_truth) + results.append( + EstimatorResult( + self._log_reward(input.gamma, input.log), + estimate, + # pyre-fixme[61]: `gt` may not be initialized here. + None if input.ground_truth is None else gt, ) + ) logging.info( f"{self}: finishing evaluating[" f"process_time={time.process_time() - stime}]" ) - return self.results + return results class IPSEstimator(RLEstimator): @@ -220,8 +252,11 @@ class IPSEstimator(RLEstimator): """ def __init__( - self, weight_clamper: Clamper = None, weighted: bool = True, device=None - ): + self, + weight_clamper: Optional[Clamper] = None, + weighted: bool = True, + device=None, + ) -> None: super().__init__(device) self._weight_clamper = ( weight_clamper if weight_clamper is not None else Clamper() @@ -243,6 +278,7 @@ def _calc_weights( i = 0 for t in ts: if t is not None and t.action is not None and t.action_prob > 0.0: + assert t.last_state is not None pi_e[i, j] = policy(t.last_state)[t.action] pi_b[i, j] = t.action_prob else: @@ -262,90 +298,100 @@ def _calc_weights( return self._weight_clamper(ws) def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults: + # kwargs is part of the function signature, so to satisfy pyre it must be included logging.info(f"{self}: start evaluating") stime = time.process_time() - self.reset() - for state, mdps in input.log.items(): - n = len(mdps) - horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, mdps)) - weights = self._calc_weights( - n, horizon, zip_longest(*mdps), input.target_policy - ) - discount = torch.full((horizon,), input.gamma, device=self._device) - discount[0] = 1.0 - discount = discount.cumprod(0) - rewards = torch.zeros((n, horizon)) - j = 0 - for ts in zip_longest(*mdps): - i = 0 - for t in ts: - if t is not None: - rewards[i, j] = t.reward - i += 1 - j += 1 - rewards = rewards.to(device=self._device) - estimate = weights.mul(rewards).sum(0).mul(discount).sum().item() - if input.ground_truth is not None: - ground_truth = input.ground_truth(state) - else: - ground_truth = None - self._append_estimate( - self._log_reward(input.gamma, mdps), estimate, ground_truth + results = EstimatorResults() + + n = len(input.log) + horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log)) + weights = self._calc_weights( + n, horizon, zip_longest(*input.log), input.target_policy + ) + discount = torch.full((horizon,), input.gamma, device=self._device) + discount[0] = 1.0 + discount = discount.cumprod(0) + rewards = torch.zeros((n, horizon)) + j = 0 + for ts in zip_longest(*input.log): + i = 0 + for t in ts: + if t is not None: + rewards[i, j] = t.reward + i += 1 + j += 1 + rewards = rewards.to(device=self._device) + estimate = weights.mul(rewards).sum(0).mul(discount).sum().item() + + results.append( + EstimatorResult( + self._log_reward(input.gamma, input.log), + estimate, + None + if input.ground_truth is None + else self._estimate_value(input.gamma, input.log, input.ground_truth), ) + ) logging.info( f"{self}: finishing evaluating[" f"process_time={time.process_time() - stime}]" ) - return self.results + return results def __repr__(self): return super().__repr__()[0:-1] + f",weighted[{self._weighted}]}}" -class DREstimator(IPSEstimator): +class DoublyRobustEstimator(IPSEstimator): """ Doubly Robust estimator """ def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults: + # kwargs is part of the function signature, so to satisfy pyre it must be included logging.info(f"{self}: start evaluating") stime = time.process_time() - self.reset() - for state, mdps in input.log.items(): - n = len(mdps) - horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, mdps)) - ws = self._calc_weights(n, horizon, zip_longest(*mdps), input.target_policy) - last_ws = torch.zeros((n, horizon), device=self._device) - last_ws[:, 0] = 1.0 / n - last_ws[:, 1:] = ws[:, :-1] - discount = torch.full((horizon,), input.gamma, device=self._device) - discount[0] = 1.0 - discount = discount.cumprod(0) - rs = torch.zeros((n, horizon)) - vs = torch.zeros((n, horizon)) - qs = torch.zeros((n, horizon)) - for ts, j in zip(zip_longest(*mdps), count()): - for t, i in zip(ts, count()): - if t is not None and t.action is not None: - qs[i, j] = input.value_function(t.last_state, t.action) - vs[i, j] = input.value_function(t.last_state) - rs[i, j] = t.reward - vs = vs.to(device=self._device) - qs = qs.to(device=self._device) - rs = rs.to(device=self._device) - estimate = ((ws * (rs - qs) + last_ws * vs).sum(0) * discount).sum().item() - if input.ground_truth is not None: - ground_truth = input.ground_truth(state) - else: - ground_truth = None - self._append_estimate( - self._log_reward(input.gamma, mdps), estimate, ground_truth + results = EstimatorResults() + + n = len(input.log) + horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log)) + ws = self._calc_weights( + n, horizon, zip_longest(*input.log), input.target_policy + ) + last_ws = torch.zeros((n, horizon), device=self._device) + last_ws[:, 0] = 1.0 / n + last_ws[:, 1:] = ws[:, :-1] + discount = torch.full((horizon,), input.gamma, device=self._device) + discount[0] = 1.0 + discount = discount.cumprod(0) + rs = torch.zeros((n, horizon)) + vs = torch.zeros((n, horizon)) + qs = torch.zeros((n, horizon)) + for ts, j in zip(zip_longest(*input.log), count()): + for t, i in zip(ts, count()): + if t is not None and t.action is not None: + assert input.value_function is not None + qs[i, j] = input.value_function(t.last_state, t.action) + vs[i, j] = input.value_function(t.last_state) + rs[i, j] = t.reward + vs = vs.to(device=self._device) + qs = qs.to(device=self._device) + rs = rs.to(device=self._device) + estimate = ((ws * (rs - qs) + last_ws * vs).sum(0) * discount).sum().item() + results.append( + EstimatorResult( + self._log_reward(input.gamma, input.log), + estimate, + None + if input.ground_truth is None + else self._estimate_value(input.gamma, input.log, input.ground_truth), ) + ) logging.info( f"{self}: finishing evaluating[" f"process_time={time.process_time() - stime}]" ) - return self.results + return results class MAGICEstimator(IPSEstimator): @@ -353,107 +399,387 @@ class MAGICEstimator(IPSEstimator): Algorithm from https://arxiv.org/abs/1604.00923, appendix G.3 """ - def __init__(self, weight_clamper: Clamper = None, device=None): + def __init__(self, weight_clamper: Optional[Clamper] = None, device=None) -> None: super().__init__(weight_clamper, True, device) def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults: assert input.value_function is not None logging.info(f"{self}: start evaluating") stime = time.process_time() - self.reset() + results = EstimatorResults() num_resamples = kwargs["num_resamples"] if "num_resamples" in kwargs else 200 - loss_threhold = ( - kwargs["loss_threhold"] if "loss_threhold" in kwargs else 0.00001 + loss_threshold = ( + kwargs["loss_threshold"] if "loss_threshold" in kwargs else 0.00001 ) lr = kwargs["lr"] if "lr" in kwargs else 0.0001 logging.info( f" params: num_resamples[{num_resamples}], " - f"loss_threshold[{loss_threhold}], " + f"loss_threshold[{loss_threshold}], " f"lr[{lr}]" ) - for state, mdps in input.log.items(): - n = len(mdps) - horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, mdps)) - ws = self._calc_weights(n, horizon, zip_longest(*mdps), input.target_policy) - last_ws = torch.zeros((n, horizon), device=self._device) - last_ws[:, 0] = 1.0 / n - last_ws[:, 1:] = ws[:, :-1] - discount = torch.full((horizon,), input.gamma, device=self._device) - discount[0] = 1.0 - discount = discount.cumprod(0) - rs = torch.zeros((n, horizon)) - vs = torch.zeros((n, horizon)) - qs = torch.zeros((n, horizon)) - for ts, j in zip(zip_longest(*mdps), count()): - for t, i in zip(ts, count()): - if t is not None and t.action is not None: - qs[i, j] = input.value_function(t.last_state, t.action) - vs[i, j] = input.value_function(t.last_state) - rs[i, j] = t.reward - vs = vs.to(device=self._device) - qs = qs.to(device=self._device) - rs = rs.to(device=self._device) - wdrs = ((ws * (rs - qs) + last_ws * vs) * discount).cumsum(1) - wdr = wdrs[:, -1].sum(0) - next_vs = torch.zeros((n, horizon), device=self._device) - next_vs[:, :-1] = vs[:, 1:] - gs = wdrs + ws * next_vs * discount - gs_normal = gs.sub(torch.mean(gs, 0)) - omiga = n * torch.einsum("ij,ik->jk", gs_normal, gs_normal) / (n - 1.0) - resample_wdrs = torch.zeros((num_resamples,)) - for i in range(num_resamples): - samples = random.choices(range(n), k=n) - sws = ws[samples, :] - last_sws = last_ws[samples, :] - srs = rs[samples, :] - svs = vs[samples, :] - sqs = qs[samples, :] - resample_wdrs[i] = ( - ((sws * (srs - sqs) + last_sws * svs).sum(0) * discount) - .sum() - .item() - ) - resample_wdrs, _ = resample_wdrs.to(device=self._device).sort(0) - lb = torch.min(wdr, resample_wdrs[int(round(0.05 * num_resamples))]) - ub = torch.max(wdr, resample_wdrs[int(round(0.95 * num_resamples)) - 1]) - b = torch.tensor( - list( - map( - lambda a: a - ub if a > ub else (a - lb if a < lb else 0.0), - gs.sum(0), - ) - ), - device=self._device, + # Compute MAGIC estimate + n = len(input.log) + horizon = len(reduce(lambda a, b: a if len(a) > len(b) else b, input.log)) + ws = self._calc_weights( + n, horizon, zip_longest(*input.log), input.target_policy + ) + last_ws = torch.zeros((n, horizon), device=self._device) + last_ws[:, 0] = 1.0 / n + last_ws[:, 1:] = ws[:, :-1] + discount = torch.full((horizon,), input.gamma, device=self._device) + discount[0] = 1.0 + discount = discount.cumprod(0) + rs = torch.zeros((n, horizon)) + vs = torch.zeros((n, horizon)) + qs = torch.zeros((n, horizon)) + for ts, j in zip(zip_longest(*input.log), count()): + for t, i in zip(ts, count()): + if t is not None and t.action is not None: + qs[i, j] = input.value_function(t.last_state, t.action) + vs[i, j] = input.value_function(t.last_state) + rs[i, j] = t.reward + vs = vs.to(device=self._device) + qs = qs.to(device=self._device) + rs = rs.to(device=self._device) + wdrs = ((ws * (rs - qs) + last_ws * vs) * discount).cumsum(1) + wdr = wdrs[:, -1].sum(0) + next_vs = torch.zeros((n, horizon), device=self._device) + next_vs[:, :-1] = vs[:, 1:] + gs = wdrs + ws * next_vs * discount + gs_normal = gs.sub(torch.mean(gs, 0)) + assert n > 1 + omiga = (n / (n - 1.0)) * torch.einsum("ij,ik->jk", gs_normal, gs_normal) + resample_wdrs = torch.zeros((num_resamples,)) + for i in range(num_resamples): + samples = random.choices(range(n), k=n) + sws = ws[samples, :] + last_sws = last_ws[samples, :] + srs = rs[samples, :] + svs = vs[samples, :] + sqs = qs[samples, :] + resample_wdrs[i] = ( + ((sws * (srs - sqs) + last_sws * svs).sum(0) * discount).sum().item() ) - b.unsqueeze_(0) - bb = b * b.t() - cov = omiga + bb - # x = torch.rand((1, horizon), device=self.device, requires_grad=True) - x = torch.zeros((1, horizon), device=self._device, requires_grad=True) - # using SGD to find min x - optimizer = torch.optim.SGD([x], lr=lr) - last_y = 0.0 - for i in range(100): - x = torch.nn.functional.softmax(x, dim=1) - y = torch.mm(torch.mm(x, cov), x.t()) - if abs(y.item() - last_y) < loss_threhold: - print(f"{i}: {last_y} -> {y.item()}") - break - last_y = y.item() - optimizer.zero_grad() - y.backward(retain_graph=True) - optimizer.step() + resample_wdrs, _ = resample_wdrs.to(device=self._device).sort(0) + lb = torch.min(wdr, resample_wdrs[int(round(0.05 * num_resamples))]) + ub = torch.max(wdr, resample_wdrs[int(round(0.95 * num_resamples)) - 1]) + b = torch.tensor( + list( + map( + lambda a: a - ub if a > ub else (a - lb if a < lb else 0.0), + gs.sum(0), + ) + ), + device=self._device, + ) + b.unsqueeze_(0) + bb = b * b.t() + cov = omiga + bb + # x = torch.rand((1, horizon), device=self.device, requires_grad=True) + x = torch.zeros((1, horizon), device=self._device, requires_grad=True) + # using SGD to find min x + optimizer = torch.optim.SGD([x], lr=lr) + last_y = 0.0 + for i in range(100): x = torch.nn.functional.softmax(x, dim=1) - estimate = torch.mm(x, gs.sum(0, keepdim=True).t()) - if input.ground_truth is not None: - ground_truth = input.ground_truth(state) - else: - ground_truth = None - self._append_estimate( - self._log_reward(input.gamma, mdps), estimate, ground_truth + y = torch.mm(torch.mm(x, cov), x.t()) + # pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, + # float, int]`. + if abs(y.item() - last_y) < loss_threshold: + print(f"{i}: {last_y} -> {y.item()}") + break + last_y = y.item() + optimizer.zero_grad() + y.backward(retain_graph=True) + optimizer.step() + x = torch.nn.functional.softmax(x, dim=1) + estimate = torch.mm(x, gs.sum(0, keepdim=True).t()).cpu().item() + + results.append( + EstimatorResult( + self._log_reward(input.gamma, input.log), + estimate, + None + if input.ground_truth is None + else self._estimate_value(input.gamma, input.log, input.ground_truth), ) + ) logging.info( f"{self}: finishing evaluating[" f"process_time={time.process_time() - stime}]" ) - return self.results + return results + + +@dataclass +class NeuralDualDICE(RLEstimator): + # See https://arxiv.org/pdf/1906.04733.pdf sections 4, 5, A + # Google's implementation: https://github.com/google-research/google-research/tree/master/dual_dice + """ + Args: + state_dim: The dimensionality of the state vectors + action_dim: The number of discrete actions + deterministic_env: Whether or not the environment is determinstic. + Can help with stability of training. + average_next_v: Whether or not to average the next nu value over all + possible actions. Can help with stability of training. + polynomial_degree: The degree of the convex function f(x) = 1/p * |x|^p + value_lr: The learning rate for nu + zeta_lr: The learning rate for zeta + hidden_dim: The dimensionality of the hidden layers for zeta and v + hidden_layers: The number of hidden layers for zeta and v + activation: The activation function for zeta and v + training_samples: The number of batches to train zeta and v for + batch_size: The number of samples in each batch + loss_callback_fn: A function that will be called every reporting_frequency batches, + giving the average zeta loss, average nu loss, and self + reporting_frequency: The number of batches between outputting the state of the training + """ + state_dim: int + action_dim: int + deterministic_env: bool + average_next_v: bool = False + polynomial_degree: float = 1.5 + value_lr: float = 0.01 + zeta_lr: float = 0.01 + hidden_dim: int = 64 + hidden_layers: int = 2 + activation = torch.nn.Tanh + training_samples: int = 100000 + batch_size: int = 2048 + device: typing.Any = None + loss_callback_fn: Optional[Callable[[float, float, RLEstimator], None]] = None + reporting_frequency: int = 1000 + # These are initialized in __post_init__() and calms Pyre + v: typing.Any = None + zeta: typing.Any = None + f: typing.Any = None + fconjugate: typing.Any = None + zeta_net: typing.Any = None + v_net: typing.Any = None + + def __post_init__(self) -> None: + conjugate_exponent = self.polynomial_degree / (self.polynomial_degree - 1) + self.f = self._get_convex_f(self.polynomial_degree) + self.fconjugate = self._get_convex_f(conjugate_exponent) + self.reset() + + def _get_convex_f(self, degree): + return lambda x: (torch.abs(x) ** degree) / degree + + @torch.no_grad() + def _mdps_value(self, mdps: Sequence[Mdp], gamma: float) -> float: + self.zeta_net.eval() + avg = RunningAverage() + + for mdp in mdps: + discount = 1.0 + r = 0.0 + for t in mdp: + assert t.last_state is not None, "Expected last_state, got None" + assert t.action is not None, "Expected action, got None" + zeta = self.zeta( + torch.tensor(t.last_state.value, dtype=torch.float) + .reshape(-1, self.state_dim) + .to(self.device), + torch.nn.functional.one_hot( + torch.tensor(t.action.value, dtype=torch.long), self.action_dim + ) + .reshape(-1, self.action_dim) + .float() + .to(self.device), + ) + r += discount * t.reward * zeta.cpu().item() + discount *= gamma + avg.add(r) + self.zeta_net.train() + return avg.average + + @torch.no_grad() + def _compute_estimates(self, input: RLEstimatorInput) -> EstimatorResults: + results = EstimatorResults() + estimate = self._mdps_value(input.log, input.gamma) + results.append( + EstimatorResult( + self._log_reward(input.gamma, input.log), + estimate, + None + if input.ground_truth is None + else self._estimate_value(input.gamma, input.log, input.ground_truth), + ) + ) + return results + + def _compute_average_v(self, transition): + next_vs = [ + transition["tgt_action_props"][:, a].reshape(-1, 1) + * self.v( + transition["state"], + torch.nn.functional.one_hot( + torch.tensor(a, dtype=torch.long), self.action_dim + ) + .reshape(1, -1) + .float() + .to(self.device) + .repeat(transition["state"].shape[0], 1), + ) + for a in range(self.action_dim) + ] + return sum(next_vs) + + def _compute_loss( + self, gamma: float, transition: Dict, compute_determ_v_loss: bool + ): + if self.average_next_v: + next_v = self._compute_average_v(transition) + else: + next_v = self.v(transition["state"], transition["next_action"]) + delta_v = ( + self.v(transition["last_state"], transition["log_action"]) - gamma * next_v + ) + init_v = self.v(transition["init_state"], transition["init_action"]) + if compute_determ_v_loss: + unweighted_loss = self.f(delta_v) - (1 - gamma) * init_v + else: + zeta = self.zeta(transition["last_state"], transition["log_action"]) + unweighted_loss = ( + delta_v * zeta - self.fconjugate(zeta) - (1 - gamma) * init_v + ) + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `Any`. + weights = torch.full( + (unweighted_loss.shape[0], 1), gamma, dtype=torch.float + ).to(device=self.device) ** transition["timestep"].reshape((-1, 1)) + return torch.sum(weights * unweighted_loss) / torch.sum(weights) + + def reset(self) -> None: + self.v_net = LinearNet( + self.state_dim + self.action_dim, + self.hidden_dim, + 1, + self.hidden_layers, + self.activation, + ) + self.zeta_net = copy.deepcopy(self.v_net) + self.v_net.to(self.device) + self.zeta_net.to(self.device) + + self.v = self._build_function(self.v_net) + self.zeta = self._build_function(self.zeta_net) + + def _build_function(self, net: torch.nn.Module): + return lambda s, a: net(torch.cat((s, a), dim=1)) + + def _collect_data(self, input: RLEstimatorInput): + samples = { + "init_state": [], + "init_action": [], + "last_state": [], + "state": [], + "log_action": [], + "next_action": [], + "tgt_action_props": [], + "timestep": [], + "reward": [], + } + for mdp in input.log: + state = mdp[0].last_state + assert state is not None, "Expected initial state, got None" + tgt_init_action = input.target_policy.action_dist(state).sample()[0] + for i, t in enumerate(mdp): + assert ( + t.state is not None + and t.last_state is not None + and t.action is not None + ), "Expected all fields to be present" + tgt_dist = input.target_policy.action_dist(t.state) + tgt_action = tgt_dist.sample()[0] + samples["init_state"].append(state.value) + samples["init_action"].append( + torch.nn.functional.one_hot( + torch.tensor(tgt_init_action.value, dtype=torch.long), + self.action_dim, + ).float() + ) + samples["last_state"].append(t.last_state.value) + samples["state"].append(t.state.value) + samples["log_action"].append( + torch.nn.functional.one_hot( + torch.tensor(t.action.value, dtype=torch.long), self.action_dim + ).float() + ) + samples["next_action"].append( + torch.nn.functional.one_hot( + torch.tensor(tgt_action.value, dtype=torch.long), + self.action_dim, + ).float() + ) + samples["tgt_action_props"].append(tgt_dist.values) + samples["timestep"].append(i) + samples["reward"].append(t.reward) + + return { + k: torch.stack(v).to(self.device) + if "action" in k + else torch.tensor(v, dtype=torch.float).to(self.device) + for k, v in samples.items() + } + + def _sample_batch(self, dataset): + idxs = np.random.choice(dataset["init_state"].shape[0], self.batch_size) + return {k: v[idxs] for k, v in dataset.items()} + + def evaluate(self, input: RLEstimatorInput, **kwargs) -> EstimatorResults: + stime = time.process_time() + dataset = self._collect_data(input) + logging.info(f"Data loading time: {time.process_time() - stime}") + + zeta_optim = torch.optim.Adam(self.zeta_net.parameters(), lr=self.zeta_lr) + v_optim = torch.optim.Adam(self.v_net.parameters(), lr=self.value_lr) + avg_zeta_loss = RunningAverage() + avg_v_loss = RunningAverage() + sample_time = time.process_time() + for sampled in range(self.training_samples): + sample = self._sample_batch(dataset) + + zeta_loss = -(self._compute_loss(input.gamma, sample, False)) + # Populate zeta gradients and optimize + zeta_optim.zero_grad() + zeta_loss.backward() + zeta_optim.step() + + if self.deterministic_env: + v_loss = self._compute_loss(input.gamma, sample, True) + else: + v_loss = self._compute_loss(*sample) + # Populate value gradients and optimize + v_optim.zero_grad() + v_loss.backward() + v_optim.step() + + avg_zeta_loss.add(zeta_loss.cpu().item()) + avg_v_loss.add(v_loss.cpu().item()) + if sampled % self.reporting_frequency == 0: + report_time = time.process_time() - sample_time + callback_time = None + if self.loss_callback_fn is not None: + # Pyre gets angry if we don't make callback local + callback = self.loss_callback_fn + assert callback is not None + stime = time.process_time() + callback(avg_zeta_loss.average, avg_v_loss.average, self) + callback_time = abs(time.process_time() - stime) + logging.info( + f"Samples {sampled}, " + f"Avg Zeta Loss {avg_zeta_loss.average}, " + f"Avg Value Loss {avg_v_loss.average},\n" + f"Time per {self.reporting_frequency} samples: {report_time}" + + ( + "" + if callback_time is None + else f", Time for callback: {callback_time}" + ) + ) + avg_zeta_loss = RunningAverage() + avg_v_loss = RunningAverage() + sample_time = time.process_time() + return self._compute_estimates(input) diff --git a/reagent/ope/estimators/slate_estimators.py b/reagent/ope/estimators/slate_estimators.py index cf6aca1fd..b283cd161 100644 --- a/reagent/ope/estimators/slate_estimators.py +++ b/reagent/ope/estimators/slate_estimators.py @@ -1,31 +1,42 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import math +import random +import time from abc import ABC, abstractmethod from dataclasses import dataclass from typing import ( - Generic, Iterable, Mapping, MutableMapping, MutableSequence, Optional, Sequence, + Set, Tuple, Union, ) import numpy as np import torch -from reagent.ope.estimators.estimator import Estimator, EstimatorResults +from reagent.ope.estimators.estimator import ( + Estimator, + EstimatorResult, + EstimatorSampleResult, +) from reagent.ope.estimators.types import ( Action, Items, + Objects, Probability, - Type, + Reward, + Trainer, + TrainingData, TypeWrapper, Values, + ValueType, ) from reagent.ope.utils import Clamper, RunningAverage from torch import Tensor @@ -40,6 +51,7 @@ # Types for slates SlateSlotType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, Tensor] SlateSlot = TypeWrapper[SlateSlotType] +logger = logging.getLogger(__name__) class SlateSlotValues(Values[SlateSlot]): @@ -47,7 +59,7 @@ class SlateSlotValues(Values[SlateSlot]): Map from a slot to a value """ - def _new_key(self, k: int) -> SlateSlot: + def _to_key(self, k: int) -> SlateSlot: return SlateSlot(k) @@ -59,6 +71,7 @@ class SlateSlots(Items[SlateSlot]): def _new_item(self, i: int) -> SlateSlot: return SlateSlot(i) + # pyre-fixme[15]: `fill` overrides method defined in `Items` inconsistently. def fill( self, values: Union[Mapping[SlateSlot, float], Sequence[float], np.ndarray, Tensor], @@ -71,65 +84,38 @@ def fill( Returns: Map from slots to given values """ - return SlateSlotValues(super()._fill(values)) + return SlateSlotValues(super().fill(values)) -class SlateSlotObjects(Generic[Type]): +class SlateSlotObjects(Objects[SlateSlot, ValueType]): def __init__( - self, values: Union[MutableMapping[SlateSlot, Type], MutableSequence[Type]] + self, + values: Union[MutableMapping[SlateSlot, ValueType], MutableSequence[ValueType]], ): assert (len(values)) > 0 - self._slot_to_index = None - if isinstance(values, Mapping): - self._slot_to_index = {s: i for i, s in enumerate(values.keys())} - self._values = list(values.values()) - else: - self._values = values - - def __getitem__(self, slot: SlateSlot) -> Optional[Type]: - try: - if self._slot_to_index is None: - return self._values[slot] - else: - return self._values[self._slot_to_index[slot]] - except Exception: - return None - - def __setitem__(self, slot: SlateSlot, value: Type): - if self._slot_to_index is None: - self._values[slot] = value - else: - self._values[self._slot_to_index[slot]] = value - - def __len__(self): - return len(self._values) - - def __iter__(self): - if self._slot_to_index is None: - return ((SlateSlot(a), p) for a, p in enumerate(self._values)) - else: - return ((s, self._values[i]) for s, i in self._slot_to_index.items()) + super().__init__(values) - @property - def is_sequence(self): - return self._slot_to_index is None + def _to_key(self, k: int) -> SlateSlot: + return SlateSlot(k) @property def slots(self) -> SlateSlots: - if self._slot_to_index is None: + if self.is_sequence: + # pyre-fixme[16]: `SlateSlotObjects` has no attribute `_values`. return SlateSlots(len(self._values)) else: - return SlateSlots(list(self._slot_to_index.keys())) + return SlateSlots(list(self._key_to_index.keys())) @property - def items(self) -> Sequence[Type]: - return list(self._values) + def objects(self) -> Sequence[ValueType]: + return super().values def fill( - self, values: Sequence[object] - ) -> Union[Mapping[SlateSlot, object], Sequence[object]]: + self, values: Sequence[ValueType] + ) -> Union[Mapping[SlateSlot, ValueType], Sequence[ValueType]]: + # pyre-fixme[16]: `SlateSlotObjects` has no attribute `_values`. assert len(values) >= len(self._values) - if self._slot_to_index is None: + if self._key_to_index is None: return values[: len(self._values)] else: return {s: v for s, v in zip(self.slots, values[: len(self._values)])} @@ -145,7 +131,50 @@ def _new_item(self, i: int) -> SlateItem: class SlateItemValues(Values[SlateItem]): - def _new_key(self, k: int) -> SlateItem: + def _to_key(self, k: int) -> SlateItem: + return SlateItem(k) + + @property + def items(self) -> SlateItems: + if self.is_sequence: + return SlateItems(len(self)) + else: + return SlateItems(super().keys) + + +class SlateItemFeatures(Objects[SlateItem, Tensor]): + def __init__( + self, + values: Union[Mapping[SlateItem, Tensor], Sequence[Tensor], Tensor, np.ndarray], + ): + # pyre-fixme[6]: Expected + # `Union[Mapping[Variable[reagent.ope.estimators.types.KeyType], + # Variable[ValueType]], Sequence[Variable[ValueType]]]` for 1st param but got + # `Union[Mapping[TypeWrapper[Union[Tuple[float], Tuple[int], Tensor, float, + # int, np.ndarray]], Tensor], Sequence[Tensor], Tensor, np.ndarray]`. + super().__init__(values) + + def _init_values( + self, + values: Union[Mapping[SlateItem, Tensor], Sequence[Tensor], Tensor, np.ndarray], + ): + if isinstance(values, Tensor): + # pyre-fixme[16]: `SlateItemFeatures` has no attribute `_values`. + self._values = values.to(dtype=torch.double) + elif isinstance(values, np.ndarray): + self._values = torch.as_tensor(values, dtype=torch.double) + elif isinstance(values, Sequence): + # pyre-fixme[6]: Expected `Union[typing.List[Tensor], + # typing.Tuple[Tensor, ...]]` for 1st param but got `Sequence[Tensor]`. + self._values = torch.stack(values).to(dtype=torch.double) + elif isinstance(values, Mapping): + self._key_to_index = dict(zip(values.keys(), range(len(values)))) + self._index_to_key = list(values.keys()) + self._values = torch.stack(list(values.values())).to(dtype=torch.double) + else: + raise TypeError(f"Unsupported values type {type(values)}") + + def _to_key(self, k: int) -> SlateItem: return SlateItem(k) @property @@ -153,7 +182,15 @@ def items(self) -> SlateItems: if self.is_sequence: return SlateItems(len(self)) else: - return SlateItems(super().items) + return SlateItems(super().keys) + + +# SlateSlotFeatures = SlateSlotObjects[Tensor] +class SlateSlotFeatures(SlateSlotObjects[Tensor]): + @property + def features(self) -> Tensor: + # pyre-fixme[16]: `SlateSlotFeatures` has no attribute `_values`. + return torch.stack(self._values) class Slate(SlateSlotObjects[SlateItem]): @@ -163,10 +200,15 @@ class Slate(SlateSlotObjects[SlateItem]): def one_hots(self, items: SlateItems, device=None) -> Tensor: oh = torch.zeros((len(self), len(items)), dtype=torch.double, device=device) + # pyre-fixme[16]: `Slate` has no attribute `_values`. for t, i in zip(oh, self._values): t[items.index_of(i)] = 1.0 return oh + @property + def items(self) -> Sequence[SlateItem]: + return super().values + def slot_values(self, item_values: SlateItemValues) -> SlateSlotValues: """ Map items in the slate to given values @@ -176,16 +218,39 @@ def slot_values(self, item_values: SlateItemValues) -> SlateSlotValues: Returns: List of values in the slate """ - if self._slot_to_index is None: + if self._key_to_index is None: + # pyre-fixme[16]: `Slate` has no attribute `_values`. return SlateSlotValues([item_values[i] for i in self._values]) else: - return SlateSlotValues({s: self._values[i] for s, i in self._slot_to_index}) + return SlateSlotValues({k: item_values[i] for k, i in self._key_to_index}) + + def slot_features(self, item_features: SlateItemFeatures) -> SlateSlotFeatures: + """ + Map items in the slate to given values + Args: + item_values: Map from all items to some values + + Returns: + List of values in the slate + """ + if self._key_to_index is None: + return SlateSlotFeatures( + # pyre-fixme[16]: `Slate` has no attribute `_values`. + [item_features[i].detach().clone() for i in self._values] + ) + else: + return SlateSlotFeatures( + {k: item_features[i].detach().clone() for k, i in self._key_to_index} + ) def __repr__(self): return f"{self.__class__.__name__}{{value[{self._values}]}}" def make_slate(slots: SlateSlots, items: Sequence[SlateItem]) -> Slate: + """ + Assign items to slots to make a slate + """ assert len(items) >= len(slots) if slots.is_sequence: return Slate(list(items[: len(slots)])) @@ -201,11 +266,13 @@ def __init__( ], ): super().__init__(values) + # pyre-fixme[16]: `SlateSlotItemValues` has no attribute `_values`. self._item_size = len(self._values[0]) for v in self._values[1:]: assert self._item_size == len(v) def values_tensor(self, device=None) -> Tensor: + # pyre-fixme[16]: `SlateSlotItemValues` has no attribute `_values`. dist = [v.values for v in self._values] return torch.stack(dist).to(device=device) @@ -232,6 +299,10 @@ def expected_rewards( else: return SlateSlotValues(dict(zip(self.slots, rewards.tolist()))) + @property + def expectations(self) -> Sequence[SlateItemValues]: + return super().values + def make_slot_item_distributions( slots: SlateSlots, dists: Sequence[SlateItemValues] @@ -243,6 +314,60 @@ def make_slot_item_distributions( return SlateSlotItemExpectations(dict(zip(slots, dists[: len(slots)]))) +def is_to_calculate_expectation(slate_size: int, item_size: int) -> bool: + """ + Switch between calculating and sampling expectations, balanced by execution + time and accuracy + Return: + True to calculate + False to sample + """ + return ( + slate_size < 4 + or (slate_size == 4 and item_size < 182) + or (slate_size == 5 and item_size < 47) + or (slate_size == 6 and item_size < 22) + or (slate_size == 7 and item_size < 15) + ) + + +def _calculate_slot_expectation( + d_out: Tensor, + probs: Sequence[float], + buffer: Iterable[Tuple[Set[int], float, float, float]], +) -> Iterable[Tuple[Set[int], float, float, float]]: + """ + A helper function to calculate items' expectations for a slot + """ + assert d_out.shape[0] == len(probs) + next_buffer = [] + for b0, b1, b2, _ in buffer: + # memory buffer for all ordered combinations so far, list of tuples of + # b0: all the items in this ordered combination + # b1: cumulative probability of b0 + # b2: sum of the probabilities of b0 + # b3: = b1 / (1.0 - b2) cached value for faster computation + for i, i_prob in enumerate(probs): + # only add i if it's not already in + if i in b0: + continue + # nb* are next buffer values + nb2 = b2 + i_prob + # due to precision errors, sometimes nb2 becomes 1, in this + # case, discard the combination + if nb2 < 1.0: + nb1 = b1 * i_prob / (1.0 - b2) + next_buffer.append(({*b0, i}, nb1, nb2, nb1 / (1.0 - nb2))) + for i, i_prob in enumerate(probs): + p = 0.0 + for b0, _, _, b3 in next_buffer: + if i in b0: + continue + p += b3 + d_out[i] = p * i_prob + return next_buffer + + class SlateItemProbabilities(SlateItemValues): """ Probabilities of each item being selected into the slate @@ -257,7 +382,7 @@ def __init__( self._greedy = greedy self._slot_item_expectations = None - def _new_key(self, k: int) -> SlateItem: + def _to_key(self, k: int) -> SlateItem: return SlateItem(k) def _reset(self): @@ -280,13 +405,14 @@ def slate_probability(self, slate: Slate) -> Probability: return 0.0 return 1.0 else: - p = 1.0 - d = 1.0 - for _, i in slate: - ip = self.probability(i) - p *= ip / d - d -= ip - return Probability(p) + # pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_values`. + clamped = torch.clamp(self._values, 0.0) + indices = [self.index_of(item) for _, item in slate] + probs = clamped[indices] + sums = clamped[indices] + clamped[indices] = 0.0 + sums = sums.flip(0).cumsum(0).flip(0) + clamped.sum() + return Probability((probs / sums).prod().item()) def slot_item_expectations(self, slots: SlateSlots) -> SlateSlotItemExpectations: slate_size = len(slots) @@ -300,19 +426,21 @@ def slot_item_expectations(self, slots: SlateSlots) -> SlateSlotItemExpectations if self._greedy: self._slot_item_expectations = make_slot_item_distributions( slots, + # pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param + # but got `List[Values[typing.Any]]`. [ self.replace(torch.zeros(item_size, dtype=torch.double)) for _ in range(len(self)) ], ) sorted_items, _ = self.sort() - for item, ds in zip(sorted_items, self._slot_item_expectations.items): + for item, ds in zip( + sorted_items, self._slot_item_expectations.expectations + ): ds[item] = 1.0 else: self._normalize() - if (len(slots) < 5 and len(self) < 47) or ( - len(slots) < 6 and len(self) < 19 - ): + if is_to_calculate_expectation(len(slots), len(self)): self._calculate_expectations(slots) else: self._sample_expectations(slots, 20000) @@ -323,6 +451,7 @@ def _sample_expectations(self, slots: SlateSlots, num_samples: int): item_size = len(self) dm = torch.zeros((slate_size, item_size), dtype=torch.double) ri = torch.arange(slate_size) + # pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_probabilities`. ws = self._probabilities.repeat((num_samples, 1)) for _ in range(item_size): samples = torch.multinomial(ws, slate_size) @@ -330,35 +459,31 @@ def _sample_expectations(self, slots: SlateSlots, num_samples: int): dm[ri, sample] += 1 dm /= num_samples * item_size self._slot_item_expectations = make_slot_item_distributions( - slots, [self.replace(vs) for vs in dm] + slots, + # pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param but + # got `List[Values[typing.Any]]`. + [self.replace(vs) for vs in dm], ) def _calculate_expectations(self, slots: SlateSlots): + """ + A brute-force way to calculate each item's expectations at each slot by + going through all l-choose-m (l!/(l-m)!) possible slates. + """ slate_size = len(slots) item_size = len(self) dm = torch.zeros((slate_size, item_size), dtype=torch.double) + # pyre-fixme[16]: `SlateItemProbabilities` has no attribute `_probabilities`. dm[0] = self._probabilities - buffer = [({}, 1.0, 0.0, 1.0)] + buffer = [(set(), 1.0, 0.0, 1.0)] + probs = self._probabilities.tolist() for d in dm[1:]: - next_buffer = [] - for b in buffer: - for i, i_prob in enumerate(self._probabilities): - if i in b[0]: - continue - b1 = b[1] * i_prob / (1.0 - b[2]) - b2 = b[2] + i_prob - b3 = b1 / (1.0 - b2) - next_buffer.append(({*b[0], i}, b1, b2, b3)) - for i, i_prob in enumerate(self._probabilities): - p = 0.0 - for b in next_buffer: - if i in b[0]: - continue - p += b[3] * i_prob - d[i] = p - buffer = next_buffer + buffer = _calculate_slot_expectation(d, probs, buffer) self._slot_item_expectations = make_slot_item_distributions( - slots, [self.replace(vs) for vs in dm] + slots, + # pyre-fixme[6]: Expected `Sequence[SlateItemValues]` for 2nd param but + # got `List[Values[typing.Any]]`. + [self.replace(vs) for vs in dm], ) def sample_slate(self, slots: SlateSlots) -> Slate: @@ -367,10 +492,55 @@ def sample_slate(self, slots: SlateSlots) -> Slate: items = super().greedy(slate_size) else: items = super().sample(slate_size) - if slate_size == 1: - items = [items] return make_slate(slots, items) + @property + def is_deterministic(self) -> bool: + return self._greedy + + def slate_space( + self, slots: SlateSlots, max_size: int = -1 + ) -> Iterable[Tuple[Sequence[SlateItem], float]]: + """Return all possible slates and their probabilities + + The algorithm is similar to :func:`~_calculate_expectations`, but has + less value to cache thus save both space and computation + Args: + slots: slots to be filled + max_size: max number of samples to be returned + <= 0 return all samples + """ + slate_size = len(slots) + item_size = len(self) + assert item_size >= slate_size + if self._greedy: + items = super().greedy(slate_size) + return [(items, 1.0)] + else: + buffer = [([], 1.0, 0.0)] + # pyre-fixme[16]: `SlateItemProbabilities` has no attribute + # `_probabilities`. + probs = self._probabilities.tolist() + for _ in range(slate_size): + next_buffer = [] + for b0, b1, b2 in buffer: + # memory buffer for all ordered combinations so far, list of tuples of + # b0: all the items in this ordered combination + # b1: cumulative probability of b0 + # b2: sum of the probabilities of b0 + for i, i_prob in enumerate(probs): + if i in b0: + continue + nb2 = b2 + i_prob + if nb2 < 1.0: + nb1 = b1 * i_prob / (1.0 - b2) + next_buffer.append(([*b0, i], nb1, nb2)) + if max_size <= 0 or max_size > len(next_buffer): + buffer = next_buffer + else: + buffer = random.sample(next_buffer, max_size) + return [([SlateItem(i) for i in b[0]], b[1]) for b in buffer] + class SlateSlotItemProbabilities(SlateSlotItemValues): def __init__( @@ -423,6 +593,7 @@ def slot_item_expectations(self, samples: int = 20000) -> SlateSlotItemExpectati and len(self._slot_item_expectations) >= slate_size ): return self._slot_item_expectations + # pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`. item_size = len(self._values[0]) assert item_size >= slate_size ps = self.values_tensor() @@ -431,6 +602,9 @@ def slot_item_expectations(self, samples: int = 20000) -> SlateSlotItemExpectati for i, value in zip(range(slate_size), self._values): item = ps[i].argmax().item() dist = torch.zeros(item_size, dtype=torch.double) + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `Union[bool, float, int]`. dist[item] = 1.0 dists.append(value.replace(dist)) ps[torch.arange(i + 1, slate_size), item] = 0.0 @@ -438,16 +612,15 @@ def slot_item_expectations(self, samples: int = 20000) -> SlateSlotItemExpectati self.slots, dists ) else: - if (slate_size < 5 and item_size < 47) or ( - slate_size < 6 and item_size < 19 - ): + if is_to_calculate_expectation(slate_size, item_size): self._calculate_expectations() else: - self._sample_expectations(20000 * item_size) + self._sample_expectations(samples * item_size) return self._slot_item_expectations def _sample_expectations(self, num_samples: int): slate_size = len(self.slots) + # pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`. item_size = len(self._values[0]) dm = torch.zeros((slate_size, item_size), dtype=torch.double) ri = torch.arange(slate_size) @@ -468,27 +641,14 @@ def _calculate_expectations(self): slate_size = len(self.slots) item_size = len(self._values[0]) dm = torch.zeros((slate_size, item_size), dtype=torch.double) - self._values[0]._normalize() - dm[0] = self._values[0]._probabilities + prob_list = [] + for v in self._values: + v._normalize() + prob_list.append(v._probabilities.detach().clone()) + dm[0] = prob_list[0] buffer = [({}, 1.0, 0.0, 1.0)] - for d, probs in zip(dm[1:], self._values[1:]): - next_buffer = [] - for b in buffer: - for i, i_prob in enumerate(probs): - if i in b[0]: - continue - b1 = b[1] * i_prob / (1.0 - b[2]) - b2 = b[2] + i_prob - b3 = b1 / (1.0 - b2) - next_buffer.append(({*b[0], i}, b1, b2, b3)) - for i, i_prob in enumerate(probs): - p = 0.0 - for b in next_buffer: - if i in b[0]: - continue - p += b[3] * i_prob - d[i] = p - buffer = next_buffer + for d, probs in zip(dm[1:], prob_list[1:]): + buffer = _calculate_slot_expectation(d, probs.tolist(), buffer) self._slot_item_expectations = make_slot_item_distributions( self.slots, [its.replace(vs) for its, vs in zip(self._values, dm)] ) @@ -498,6 +658,7 @@ def sample_slate(self, slots: SlateSlots) -> Slate: ps = self.values_tensor() items = [] if self._greedy: + # pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`. for i, value in zip(range(slate_size), self._values): item = ps[i].argmax().item() items.append(value.items[item]) @@ -510,7 +671,99 @@ def sample_slate(self, slots: SlateSlots) -> Slate: return make_slate(slots, items) -SlateQueryType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, Tensor] +class RewardDistribution(ABC): + """ + Return customized probability distribution according to rewards + """ + + def __init__(self, deterministic: bool = False): + self._deterministic = deterministic + + @abstractmethod + def distribution(self, rewards: Tensor) -> Tensor: + pass + + def __call__(self, rewards: SlateItemValues) -> SlateItemProbabilities: + dist = self.distribution(rewards.values) + return SlateItemProbabilities(rewards.items.fill(dist), self._deterministic) + + @property + @abstractmethod + def name(self) -> str: + pass + + +class PassThruDistribution(RewardDistribution): + """ + No-op distribution, probability determined by reward + """ + + def distribution(self, rewards: Tensor) -> Tensor: + return rewards.detach().clone() + + @property + def name(self) -> str: + return f"{self._deterministic}" + + def __repr__(self): + return f"PassThruDistribution[deterministic={self._deterministic}]" + + +class RankingDistribution(RewardDistribution): + """ + Ranking distribution according to https://arxiv.org/abs/1605.04812 + """ + + def __init__(self, alpha: float = -1.0, deterministic: bool = False): + super().__init__(deterministic) + self._alpha = alpha + + def distribution(self, rewards: Tensor) -> Tensor: + dist = rewards.detach().clone() + if self._alpha >= 0: + _, ids = torch.sort(rewards, descending=True) + rank = torch.arange(1, ids.shape[0] + 1, dtype=torch.double) + dist[ids] = torch.pow( + 2.0, + (-1.0 * (self._alpha * torch.log2(rank)).floor_()), + ) + return dist + + @property + def name(self) -> str: + return f"ranking_{self._alpha}_{self._deterministic}" + + def __repr__(self): + return ( + f"RankingDistribution[alpha={self._alpha}" + f",deterministic={self._deterministic}]" + ) + + +class FrechetDistribution(RewardDistribution): + """ + Frechet distribution + """ + + def __init__(self, shape: float, deterministic: bool = False): + super().__init__(deterministic) + self._shape = shape + + def distribution(self, rewards: Tensor) -> Tensor: + return torch.pow(rewards, self._shape) + + @property + def name(self) -> str: + return f"frechet_{self._shape}_{self._deterministic}" + + def __repr__(self): + return ( + f"FrechetDistribution[shape={self._shape}]" + f",deterministic={self._deterministic}]" + ) + + +SlateQueryType = Union[Tuple[int], Tuple[float], np.ndarray, Tensor, Tuple[int, int]] SlateQuery = TypeWrapper[SlateQueryType] @@ -537,9 +790,11 @@ def __call__(self, context: SlateContext) -> SlateItemProbabilities: return self._query(context) -class SlateMetric(ABC): +class SlateMetric: """ Metric calculator for a slate: weights (dot) rewards + + Base class is just sum of the all item rewards """ def __init__(self, device=None): @@ -548,11 +803,12 @@ def __init__(self, device=None): def calculate_reward( self, slots: SlateSlots, - rewards: SlateSlotValues = None, - slot_values: SlateSlotValues = None, - slot_weights: SlateSlotValues = None, + rewards: Optional[SlateSlotValues] = None, + slot_values: Optional[SlateSlotValues] = None, + slot_weights: Optional[SlateSlotValues] = None, ) -> float: if slot_values is None: + assert rewards is not None slot_values = self.slot_values(rewards) values = slot_values.values.to(device=self._device) if slot_weights is None: @@ -571,13 +827,14 @@ def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues: class DCGSlateMetric(SlateMetric): - _weights: Tensor = None + _weights: Optional[Tensor] = None def _get_discount(self, slate_size: int) -> Tensor: + weights = DCGSlateMetric._weights if ( - DCGSlateMetric._weights is None - or DCGSlateMetric._weights.shape[0] < slate_size - or DCGSlateMetric._weights.device != self._device + weights is None + or weights.shape[0] < slate_size + or weights.device != self._device ): DCGSlateMetric._weights = torch.reciprocal( torch.log2( @@ -586,12 +843,15 @@ def _get_discount(self, slate_size: int) -> Tensor: ) ) ) - return DCGSlateMetric._weights[:slate_size] + weights = DCGSlateMetric._weights + assert weights is not None + return weights[:slate_size] def slot_weights(self, slots: SlateSlots) -> SlateSlotValues: return slots.fill(self._get_discount(len(slots))) def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues: + # pyre-fixme[7]: Expected `SlateSlotValues` but got `Values[typing.Any]`. return rewards.replace(torch.pow(2.0, rewards.values) - 1.0) @@ -616,7 +876,32 @@ def slot_weights(self, slots: SlateSlots) -> SlateSlotValues: self._idcg[slate_size] = idcg else: idcg = self._idcg[slate_size] - return slots.fill(self._get_discount(slate_size) / idcg) + return slots.fill( + torch.zeros(slate_size, dtype=torch.double) + if idcg == 0 + else self._get_discount(slate_size) / idcg + ) + + +class ERRSlateMetric(SlateMetric): + def __init__(self, max_reward: float, device=None): + super().__init__(device) + self._max_reward = max_reward + + def slot_weights(self, slots: SlateSlots) -> SlateSlotValues: + return slots.fill([1.0 / (r + 1) for r in range(len(slots))]) + + def slot_values(self, rewards: SlateSlotValues) -> SlateSlotValues: + d = torch.tensor(self._max_reward, device=self._device).pow(2.0) + r = (torch.pow(2.0, rewards.values.clamp(0.0, self._max_reward)) - 1.0) / d + p = 1.0 + err = torch.zeros(len(rewards), dtype=torch.double, device=self._device) + for i in range(len(rewards)): + ri = r[i] + err[i] = p * ri + p = p * (1.0 - ri.item()) + # pyre-fixme[7]: Expected `SlateSlotValues` but got `Values[typing.Any]`. + return rewards.replace(err) class SlateModel(ABC): @@ -651,36 +936,41 @@ def slot_probabilities(self, context: SlateContext) -> SlateSlotValues: @dataclass(frozen=True) class LogSample: - log_slate: Slate - log_rewards: SlateSlotValues - slot_probabilities: Optional[SlateSlotValues] = None - log_slate_probability: float = 0.0 - tgt_slate_probability: float = 0.0 - - def validate(self, slate_size: int, item_size: int): - assert len(self.log_slate) == slate_size - assert len(self.log_rewards) == slate_size - assert self.log_slate_probability <= 1.0 - assert self.tgt_slate_probability <= 1.0 - - -@dataclass(frozen=True) -class LogEpisode: context: SlateContext metric: SlateMetric - samples: Iterable[LogSample] + log_slate: Slate + log_reward: Reward + _log_slate_probability: Probability = float("nan") # probability for each item being places at each slot _log_slot_item_probabilities: Optional[SlateSlotItemProbabilities] = None # item probability distribution from behavior policy _log_item_probabilities: Optional[SlateItemProbabilities] = None + _tgt_slate_probability: Probability = float("nan") _tgt_slot_item_probabilities: Optional[SlateSlotItemProbabilities] = None # item probability distribution from target policy _tgt_item_probabilities: Optional[SlateItemProbabilities] = None - gt_item_rewards: Optional[SlateItemValues] = None + # gt_item_rewards: Optional[SlateItemValues] = None + # pre-calculated ground truth for target policy + ground_truth_reward: Reward = float("nan") + # context dependent slot weights (e.g. DCG or ERR weights), used by PBM + slot_weights: Optional[SlateSlotValues] = None + # item/action independent examination probabilities of each slot, used by PBM + slot_probabilities: Optional[SlateSlotValues] = None + # features associated with the slate, to train direct model + item_features: Optional[SlateItemFeatures] = None def validate(self): slate_size = len(self.context.slots) item_size = len(self.items) + assert len(self.log_slate) == slate_size + assert ( + math.isnan(self._log_slate_probability) + or self._log_slate_probability <= 1.0 + ) + assert ( + math.isnan(self._tgt_slate_probability) + or self._tgt_slate_probability <= 1.0 + ) assert ( self._log_slot_item_probabilities is None or len(self._log_slot_item_probabilities) == slate_size @@ -697,8 +987,11 @@ def validate(self): self._tgt_item_probabilities is None or len(self._tgt_item_probabilities) == item_size ) - for s in self.samples: - s.validate(slate_size, item_size) + assert self.slot_weights is None or len(self.slot_weights) == slate_size + assert ( + self.slot_probabilities is None + or len(self.slot_probabilities) == slate_size + ) def log_slot_item_expectations( self, slots: SlateSlots @@ -709,13 +1002,16 @@ def log_slot_item_expectations( return self._log_item_probabilities.slot_item_expectations(slots) return None - def log_slate_probability(self, slate: Slate) -> float: + def log_slate_probability(self, slate: Optional[Slate] = None) -> float: + if not math.isnan(self._log_slate_probability): + return self._log_slate_probability + if slate is None: + slate = self.log_slate if self._log_slot_item_probabilities is not None: return self._log_slot_item_probabilities.slate_probability(slate) if self._log_item_probabilities is not None: return self._log_item_probabilities.slate_probability(slate) - else: - return 0.0 + return 0.0 def tgt_slot_expectations( self, slots: SlateSlots @@ -726,17 +1022,26 @@ def tgt_slot_expectations( return self._tgt_item_probabilities.slot_item_expectations(slots) return None - def tgt_slate_probability(self, slate: Slate) -> float: + def tgt_slate_probability(self) -> float: + if not math.isnan(self._tgt_slate_probability): + return self._tgt_slate_probability if self._tgt_slot_item_probabilities is not None: - return self._tgt_slot_item_probabilities.slate_probability(slate) + return self._tgt_slot_item_probabilities.slate_probability(self.log_slate) if self._tgt_item_probabilities is not None: - return self._tgt_item_probabilities.slate_probability(slate) - else: - return 0.0 + return self._tgt_item_probabilities.slate_probability(self.log_slate) + return 0.0 + + def tgt_slate_space( + self, slots: SlateSlots + ) -> Iterable[Tuple[Sequence[SlateItem], float]]: + if self._tgt_item_probabilities is not None: + return self._tgt_item_probabilities.slate_space(slots) + return [] @property def items(self) -> SlateItems: if self._log_slot_item_probabilities is not None: + # pyre-fixme[16]: `SlateSlotItemProbabilities` has no attribute `_values`. return self._log_slot_item_probabilities._values[0].items if self._log_item_probabilities is not None: return self._log_item_probabilities.items @@ -745,51 +1050,140 @@ def items(self) -> SlateItems: @dataclass(frozen=True) class SlateEstimatorInput: - episodes: Iterable[LogEpisode] - tgt_model: SlateModel = None # target model, used by DM + samples: Sequence[LogSample] def validate(self): - for e in self.episodes: - e.validate() + for s in self.samples: + s.validate() -class DMEstimator(Estimator): +class SlateEstimator(Estimator): + @abstractmethod + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + pass + + +class DMEstimator(SlateEstimator): """ Direct Method estimator """ - def evaluate(self, input: SlateEstimatorInput, *kwargs) -> EstimatorResults: + def __init__(self, trainer: Trainer, training_sample_ratio: float, device=None): + super().__init__(device) + self._trainer = trainer + self._training_sample_ratio = training_sample_ratio + + def _train_model( + self, samples: Sequence[LogSample] + ) -> Optional[Iterable[LogSample]]: + if self._trainer is None: + logger.error("Target model trainer is none, DM is not available") + return None + self._trainer.reset() + logger.info(" training direct model...") + st = time.perf_counter() + sample_size = len(samples) + if self._training_sample_ratio > 0.0 and self._training_sample_ratio < 1.0: + training_samples = range(int(sample_size * self._training_sample_ratio)) + else: + training_samples = range(sample_size) + train_x = [] + train_y = [] + vali_mask = [True] * len(samples) + for i in training_samples: + sample = samples[i] + if sample.item_features is None: + continue + slate_features = sample.log_slate.slot_features(sample.item_features) + train_x.append(slate_features.features.flatten()) + train_y.append(sample.log_reward) + vali_mask[i] = False + if len(train_x) == 0: + logger.error("Slate features not provided, DM is not available") + return None + train_x = torch.stack(train_x) + train_y = torch.tensor(train_y, dtype=torch.double, device=train_x.device) + vali_x = [] + vali_y = [] + evaluate_samples = [] + for mask, sample in zip(vali_mask, samples): + if not mask or sample.item_features is None: + continue + slate_features = sample.log_slate.slot_features(sample.item_features) + vali_x.append(slate_features.features.flatten()) + vali_y.append(sample.log_reward) + evaluate_samples.append(sample) + if len(vali_x) == 0: + vali_x = train_x.detach().clone() + vali_y = train_y.detach().clone() + evaluate_samples = samples + else: + vali_x = torch.stack(vali_x) + vali_y = torch.tensor(vali_y, dtype=torch.double, device=vali_x.device) + training_data = TrainingData(train_x, train_y, None, vali_x, vali_y, None) + self._trainer.train(training_data) + logger.info(f" training direct model done: {time.perf_counter() - st}s") + + return evaluate_samples + + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + slots = sample.context.slots + tgt_slate_space = sample.tgt_slate_space(slots) + features = [] + probs = [] + for items, prob in tgt_slate_space: + slate = make_slate(slots, items) + assert sample.item_features is not None + slate_features = slate.slot_features(sample.item_features) + features.append(slate_features.features.flatten()) + probs.append(prob) + preds = self._trainer.predict(torch.stack(features), device=self._device) + tgt_reward = torch.dot( + preds.scores, torch.tensor(probs, dtype=torch.double, device=self._device) + ) + return EstimatorSampleResult( + sample.log_reward, + tgt_reward.item(), + sample.ground_truth_reward, + float("nan"), + ) + + # pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently. + def evaluate( + self, input: SlateEstimatorInput, *kwargs + ) -> Optional[EstimatorResult]: input.validate() - if input.tgt_model is None: - logging.error("Target model is none, DM is not available") - return self.results - for episode in input.episodes: - log_avg = RunningAverage() - tgt_avg = RunningAverage() - gt_avg = RunningAverage() - tgt_slot_expects = episode.tgt_slot_expectations(episode.context.slots) - if tgt_slot_expects is None: - logging.warning(f"Target slot expectations not available") + samples = self._train_model(input.samples) + if samples is None: + return None + + log_avg = RunningAverage() + tgt_avg = RunningAverage() + gt_avg = RunningAverage() + for sample in samples: + result = self._evaluate_sample(sample) + if result is None: continue - gt_slot_rewards = None - if episode.gt_item_rewards is not None: - gt_slot_rewards = tgt_slot_expects.expected_rewards( - episode.gt_item_rewards - ) - for sample in episode.samples: - log_avg.add(episode.metric(episode.context.slots, sample.log_rewards)) - tgt_item_rewards = input.tgt_model.item_rewards(episode.context) - tgt_slot_rewards = tgt_slot_expects.expected_rewards(tgt_item_rewards) - tgt_avg.add(episode.metric(episode.context.slots, tgt_slot_rewards)) - if gt_slot_rewards is not None: - gt_avg.add(episode.metric(episode.context.slots, gt_slot_rewards)) - self._append_estimate(log_avg.average, tgt_avg.average, gt_avg.average) - return self.results - - -class IPSEstimator(Estimator): + log_avg.add(result.log_reward) + tgt_avg.add(result.target_reward) + gt_avg.add(result.ground_truth_reward) + return EstimatorResult( + log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count + ) + + def __repr__(self): + return ( + f"DMEstimator(trainer({self._trainer.name})" + f",ratio({self._training_sample_ratio}),device({self._device}))" + ) + + +class IPSEstimator(SlateEstimator): def __init__( - self, weight_clamper: Clamper = None, weighted: bool = True, device=None + self, + weight_clamper: Optional[Clamper] = None, + weighted: bool = True, + device=None, ): super().__init__(device) self._weight_clamper = ( @@ -797,58 +1191,173 @@ def __init__( ) self._weighted = weighted - def evaluate(self, input: SlateEstimatorInput, *kwargs) -> EstimatorResults: + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + tgt_prob = sample.tgt_slate_probability() + log_prob = sample.log_slate_probability(sample.log_slate) + if tgt_prob == log_prob: + weight = 1.0 + elif tgt_prob <= 0.0: + weight = 0.0 + elif log_prob <= 0.0: + return None + else: + weight = self._weight_clamper(tgt_prob / log_prob) + return EstimatorSampleResult( + sample.log_reward, + sample.log_reward * weight, + sample.ground_truth_reward, + weight, + ) + + # pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently. + def evaluate( + self, input: SlateEstimatorInput, *kwargs + ) -> Optional[EstimatorResult]: input.validate() - for episode in input.episodes: - log_avg = RunningAverage() - tgt_avg = RunningAverage() - acc_weight = 0.0 - gt_avg = RunningAverage() - gt_slot_rewards = None - if episode.gt_item_rewards is not None: - tgt_slot_expects = episode.tgt_slot_expectations(episode.context.slots) - if tgt_slot_expects is not None: - gt_slot_rewards = tgt_slot_expects.expected_rewards( - episode.gt_item_rewards - ) - for sample in episode.samples: - log_prob = sample.log_slate_probability - if log_prob <= 0.0: - log_prob = episode.log_slate_probability(sample.log_slate) - if log_prob <= 0.0: - logging.warning(f"Invalid log slate probability: {log_prob}") - continue - tgt_prob = sample.tgt_slate_probability - if tgt_prob <= 0.0: - tgt_prob = episode.tgt_slate_probability(sample.log_slate) - if tgt_prob <= 0.0: - logging.warning(f"Invalid target probability: {tgt_prob}") - continue - weight = self._weight_clamper(tgt_prob / log_prob) - log_reward = episode.metric(episode.context.slots, sample.log_rewards) - log_avg.add(log_reward) - tgt_avg.add(log_reward * weight) - acc_weight += weight - if gt_slot_rewards is not None: - gt_avg.add(episode.metric(episode.context.slots, gt_slot_rewards)) - if tgt_avg.count == 0: + log_avg = RunningAverage() + tgt_avg = RunningAverage() + acc_weight = RunningAverage() + gt_avg = RunningAverage() + zw = 0 + for sample in input.samples: + result = self._evaluate_sample(sample) + if result is None: + zw += 1 continue - if self._weighted: - self._append_estimate( - log_avg.average, tgt_avg.total / acc_weight, gt_avg.average - ) - else: - self._append_estimate(log_avg.average, tgt_avg.average, gt_avg.average) - return self.results + log_avg.add(result.log_reward) + tgt_avg.add(result.target_reward) + gt_avg.add(result.ground_truth_reward) + acc_weight.add(result.weight) + if result.weight == 0.0: + zw += 1 + logging.info( + f"IPSEstimator invalid sample pct: {zw * 100 / len(input.samples)}%" + ) + if tgt_avg.count == 0: + return None + if self._weighted: + estimated = tgt_avg.total / acc_weight.total + return EstimatorResult( + log_avg.average, estimated, gt_avg.average, acc_weight.average + ) + else: + return EstimatorResult( + log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count + ) + + def __repr__(self): + return ( + f"IPSEstimator(weight_clamper({self._weight_clamper})" + f",weighted({self._weighted}),device({self._device}))" + ) -class PseudoInverseEstimator(Estimator): +class DoublyRobustEstimator(DMEstimator): + def __init__( + self, + trainer: Trainer, + training_sample_ratio: float, + weight_clamper: Optional[Clamper] = None, + weighted: bool = False, + device=None, + ): + super().__init__(trainer, training_sample_ratio, device) + self._weight_clamper = ( + weight_clamper if weight_clamper is not None else Clamper() + ) + self._weighted = weighted + + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + slots = sample.context.slots + if self._trainer.is_trained: + tgt_slate_space = sample.tgt_slate_space(slots) + features = [] + probs = [] + for items, prob in tgt_slate_space: + slate = make_slate(slots, items) + assert sample.item_features is not None + slate_features = slate.slot_features(sample.item_features) + features.append(slate_features.features.flatten()) + probs.append(prob) + preds = self._trainer.predict(torch.stack(features), device=self._device) + dm_reward = torch.dot( + preds.scores, + torch.tensor(probs, dtype=torch.double, device=self._device), + ).item() + assert sample.item_features is not None + log_slate_feature = sample.log_slate.slot_features(sample.item_features) + pred = self._trainer.predict( + torch.unsqueeze(log_slate_feature.features.flatten(), dim=0), + device=self._device, + ) + log_dm_reward = pred.scores[0].item() + else: + dm_reward = 0.0 + log_dm_reward = 0.0 + tgt_prob = sample.tgt_slate_probability() + log_prob = sample.log_slate_probability(sample.log_slate) + if tgt_prob == log_prob: + weight = 1.0 + elif tgt_prob <= 0.0: + weight = 0.0 + elif log_prob <= 0.0: + return None + else: + weight = self._weight_clamper(tgt_prob / log_prob) + target_reward = (sample.log_reward - log_dm_reward) * weight + dm_reward + return EstimatorSampleResult( + sample.log_reward, target_reward, sample.ground_truth_reward, weight + ) + + def evaluate( + self, input: SlateEstimatorInput, *kwargs + ) -> Optional[EstimatorResult]: + input.validate() + samples = self._train_model(input.samples) + if samples is None: + samples = input.samples + + log_avg = RunningAverage() + tgt_avg = RunningAverage() + acc_weight = RunningAverage() + gt_avg = RunningAverage() + for sample in samples: + result = self._evaluate_sample(sample) + if result is None: + continue + log_avg.add(result.log_reward) + tgt_avg.add(result.target_reward) + acc_weight.add(result.weight) + gt_avg.add(result.ground_truth_reward) + if self._weighted: + estimated = tgt_avg.total / acc_weight.total + return EstimatorResult( + log_avg.average, estimated, gt_avg.average, acc_weight.average + ) + else: + return EstimatorResult( + log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count + ) + + def __repr__(self): + return ( + f"DoublyRobustEstimator(trainer({self._trainer.name})" + f",ratio({self._training_sample_ratio})" + f",weight_clamper({self._weight_clamper})" + f",weighted({self._weighted}),device({self._device}))" + ) + + +class PseudoInverseEstimator(SlateEstimator): """ Estimator from reference 2 """ def __init__( - self, weight_clamper: Clamper = None, weighted: bool = True, device=None + self, + weight_clamper: Optional[Clamper] = None, + weighted: bool = True, + device=None, ): super().__init__(device) self._weight_clamper = ( @@ -856,63 +1365,95 @@ def __init__( ) self._weighted = weighted - def evaluate(self, input: SlateEstimatorInput, *kwargs) -> EstimatorResults: + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + log_slot_expects = sample.log_slot_item_expectations(sample.context.slots) + if log_slot_expects is None: + logger.warning("Log slot distribution not available") + return None + tgt_slot_expects = sample.tgt_slot_expectations(sample.context.slots) + if tgt_slot_expects is None: + logger.warning("Target slot distribution not available") + return None + log_indicator = log_slot_expects.values_tensor(self._device) + tgt_indicator = tgt_slot_expects.values_tensor(self._device) + lm = len(sample.context.slots) * len(sample.items) + gamma = torch.as_tensor( + np.linalg.pinv( + torch.mm( + log_indicator.view((lm, 1)), log_indicator.view((1, lm)) + ).numpy() + ) + ) + # torch.pinverse is not very stable + # gamma = torch.pinverse( + # torch.mm(log_indicator.view((lm, 1)), log_indicator.view((1, lm))) + # ) + ones = sample.log_slate.one_hots(sample.items, self._device) + weight = self._weight_clamper( + torch.mm(tgt_indicator.view((1, lm)), torch.mm(gamma, ones.view((lm, 1)))) + ).item() + return EstimatorSampleResult( + sample.log_reward, + sample.log_reward * weight, + sample.ground_truth_reward, + weight, + ) + + # pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently. + def evaluate( + self, input: SlateEstimatorInput, *kwargs + ) -> Optional[EstimatorResult]: input.validate() - for episode in input.episodes: - log_avg = RunningAverage() - tgt_avg = RunningAverage() - acc_weight = 0.0 - gt_avg = RunningAverage() - log_slot_expects = episode.log_slot_item_expectations(episode.context.slots) - if log_slot_expects is None: - logging.warning(f"Log slot distribution not available") + log_avg = RunningAverage() + tgt_avg = RunningAverage() + acc_weight = RunningAverage() + gt_avg = RunningAverage() + zw = 0 + for sample in input.samples: + result = self._evaluate_sample(sample) + if result is None: + zw += 1 continue - tgt_slot_expects = episode.tgt_slot_expectations(episode.context.slots) - if tgt_slot_expects is None: - logging.warning(f"Target slot distribution not available") - continue - log_indicator = log_slot_expects.values_tensor(self._device) - tgt_indicator = tgt_slot_expects.values_tensor(self._device) - lm = len(episode.context.slots) * len(episode.items) - gamma = torch.pinverse( - torch.mm(log_indicator.view((lm, 1)), log_indicator.view((1, lm))) + log_avg.add(result.log_reward) + tgt_avg.add(result.target_reward) + gt_avg.add(result.ground_truth_reward) + acc_weight.add(result.weight) + if result.weight == 0.0: + zw += 1 + if tgt_avg.count % 1000 == 0: + logger.info(f" PseudoInverseEstimator: processed {tgt_avg.count}") + logging.info( + f"PseudoInverseEstimator invalid sample pct: {zw * 100 / len(input.samples)}%" + ) + if tgt_avg.count == 0: + return None + if self._weighted: + estimated = tgt_avg.total / acc_weight.total + return EstimatorResult( + log_avg.average, estimated, gt_avg.average, acc_weight.average + ) + else: + return EstimatorResult( + log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count ) - gt_slot_rewards = None - if episode.gt_item_rewards is not None: - gt_slot_rewards = tgt_slot_expects.expected_rewards( - episode.gt_item_rewards - ) - for sample in episode.samples: - log_reward = episode.metric(episode.context.slots, sample.log_rewards) - log_avg.add(log_reward) - ones = sample.log_slate.one_hots(episode.items, self._device) - weight = self._weight_clamper( - torch.mm( - tgt_indicator.view((1, lm)), torch.mm(gamma, ones.view(lm, 1)) - ) - ) - tgt_avg.add(log_reward * weight) - acc_weight += weight - if gt_slot_rewards is not None: - gt_avg.add(episode.metric(episode.context.slots, gt_slot_rewards)) - if tgt_avg.count == 0: - continue - if self._weighted: - self._append_estimate( - log_avg.average, tgt_avg.total / acc_weight, gt_avg.average - ) - else: - self._append_estimate(log_avg.average, tgt_avg.average, gt_avg.average) - return self.results + def __repr__(self): + return ( + f"PseudoInverseEstimator(weight_clamper({self._weight_clamper})" + f",weighted({self._weighted}),device({self._device}))" + ) -class PBMEstimator(Estimator): + +class PBMEstimator(SlateEstimator): """ Estimator from reference 1: Position-Based Click Model """ def __init__( - self, weight_clamper: Clamper = None, weighted: bool = True, device=None + self, + weight_clamper: Optional[Clamper] = None, + weighted: bool = True, + device=None, ): super().__init__(device) self._weight_clamper = ( @@ -920,63 +1461,86 @@ def __init__( ) self._weighted = weighted - def evaluate(self, input: SlateEstimatorInput, *kwargs) -> EstimatorResults: + def _evaluate_sample(self, sample: LogSample) -> Optional[EstimatorSampleResult]: + log_slot_expects = sample.log_slot_item_expectations(sample.context.slots) + if log_slot_expects is None: + logger.warning(" Log slot distribution not available") + return None + tgt_slot_expects = sample.tgt_slot_expectations(sample.context.slots) + if tgt_slot_expects is None: + logger.warning(" Target slot distribution not available") + return None + slate_size = len(sample.context.slots) + slot_weights = sample.slot_weights + if slot_weights is None: + slot_weights = SlateSlotValues(torch.ones(slate_size, dtype=torch.double)) + weights = slot_weights.values.to(device=self._device) + if sample.slot_probabilities is not None: + weights *= sample.slot_probabilities.values + h = torch.zeros(slate_size, dtype=torch.double, device=self._device) + p = torch.zeros(slate_size, dtype=torch.double, device=self._device) + i = 0 + for slot, item in sample.log_slate: + h[i] = tgt_slot_expects[slot][item] + p[i] = log_slot_expects[slot][item] + i += 1 + nu = torch.tensordot(h, weights, dims=([0], [0])) + de = torch.tensordot(p, weights, dims=([0], [0])) + if nu == de: + weight = 1.0 + elif nu == 0: + weight = 0.0 + elif de == 0: + return None + else: + weight = self._weight_clamper(nu / de) + return EstimatorSampleResult( + sample.log_reward, + sample.log_reward * weight, + sample.ground_truth_reward, + weight, + ) + + # pyre-fixme[14]: `evaluate` overrides method defined in `Estimator` inconsistently. + def evaluate( + self, input: SlateEstimatorInput, *kwargs + ) -> Optional[EstimatorResult]: input.validate() - for episode in input.episodes: - log_avg = RunningAverage() - tgt_avg = RunningAverage() - acc_weight = 0.0 - gt_avg = RunningAverage() - log_slot_expects = episode.log_slot_item_expectations(episode.context.slots) - if log_slot_expects is None: - logging.warning(f"Log slot distribution not available") + log_avg = RunningAverage() + tgt_avg = RunningAverage() + acc_weight = RunningAverage() + gt_avg = RunningAverage() + zw = 0 + for sample in input.samples: + result = self._evaluate_sample(sample) + if result is None: + zw += 1 continue - tgt_slot_expects = episode.tgt_slot_expectations(episode.context.slots) - if tgt_slot_expects is None: - logging.warning(f"Target slot distribution not available") - continue - slate_size = len(episode.context.slots) - gt_slot_rewards = None - if episode.gt_item_rewards is not None: - gt_slot_rewards = tgt_slot_expects.expected_rewards( - episode.gt_item_rewards - ) - for sample in episode.samples: - slot_weights = episode.metric.slot_weights(episode.context.slots) - log_reward = episode.metric.calculate_reward( - episode.context.slots, sample.log_rewards, None, slot_weights - ) - log_avg.add(log_reward) - weights = slot_weights.values.to(device=self._device) - if sample.slot_probabilities is not None: - weights *= sample.slot_probabilities.values - h = torch.zeros(slate_size, dtype=torch.double, device=self._device) - p = torch.zeros(slate_size, dtype=torch.double, device=self._device) - i = 0 - for slot, item in sample.log_slate: - h[i] = log_slot_expects[slot][item] - p[i] = tgt_slot_expects[slot][item] - i += 1 - ips = torch.tensordot(h, weights, dims=([0], [0])) / torch.tensordot( - p, weights, dims=([0], [0]) - ) - ips = self._weight_clamper(ips) - if ips <= 0.0 or math.isinf(ips) or math.isnan(ips): - continue - tgt_avg.add(log_reward * ips) - acc_weight += ips - if gt_slot_rewards is not None: - gt_avg.add( - episode.metric.calculate_reward( - episode.context.slots, gt_slot_rewards - ) - ) - if tgt_avg.count == 0: - continue - if self._weighted: - self._append_estimate( - log_avg.average, tgt_avg.total / acc_weight, gt_avg.average - ) - else: - self._append_estimate(log_avg.average, tgt_avg.average, gt_avg.average) - return self.results + log_avg.add(result.log_reward) + tgt_avg.add(result.target_reward) + gt_avg.add(result.ground_truth_reward) + acc_weight.add(result.weight) + if result.weight == 0.0: + zw += 1 + if tgt_avg.count % 1000 == 0: + logger.info(f" PBMEstimator: processed {tgt_avg.count}") + logging.info( + f"PBMEstimator invalid sample pct: {zw * 100 / len(input.samples)}%" + ) + if tgt_avg.count == 0: + return None + if self._weighted: + estimated = tgt_avg.total / acc_weight.total + return EstimatorResult( + log_avg.average, estimated, gt_avg.average, acc_weight.average + ) + else: + return EstimatorResult( + log_avg.average, tgt_avg.average, gt_avg.average, tgt_avg.count + ) + + def __repr__(self): + return ( + f"PBMEstimator(weight_clamper({self._weight_clamper})" + f",weighted({self._weighted}),device({self._device}))" + ) diff --git a/reagent/ope/estimators/types.py b/reagent/ope/estimators/types.py index deed408e0..2c9ffde0f 100644 --- a/reagent/ope/estimators/types.py +++ b/reagent/ope/estimators/types.py @@ -1,49 +1,48 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import random +import logging +import pickle from abc import ABC, abstractmethod +from copy import deepcopy from dataclasses import dataclass -from functools import reduce -from typing import ( - Generic, - Mapping, - MutableMapping, - MutableSequence, - Sequence, - Tuple, - TypeVar, - Union, -) +from typing import Generic, Mapping, Optional, Sequence, Tuple, TypeVar, Union import numpy as np import torch from torch import Tensor -def is_array(obj): +def is_array(obj) -> bool: return isinstance(obj, Tensor) or isinstance(obj, np.ndarray) Type = TypeVar("Type") +KeyType = TypeVar("KeyType") +ValueType = TypeVar("ValueType") @dataclass(frozen=True) -class TypeWrapper(Generic[Type]): - value: Type +class TypeWrapper(Generic[ValueType]): + value: ValueType - def __index__(self): + def __index__(self) -> int: try: + # pyre-fixme[6]: For 1st param expected `Union[_SupportsTrunc, bytes, + # str, SupportsInt, SupportsIndex]` but got `ValueType`. return int(self.value) except Exception: raise ValueError(f"{self} cannot be used as index") - def __int__(self): + def __int__(self) -> int: try: + # pyre-fixme[6]: For 1st param expected `Union[_SupportsTrunc, bytes, + # str, SupportsInt, SupportsIndex]` but got `ValueType`. return int(self.value) except Exception: raise ValueError(f"{self} cannot be converted to int") - def __hash__(self): + def __hash__(self) -> int: if ( isinstance(self.value, int) or isinstance(self.value, float) @@ -59,7 +58,7 @@ def __hash__(self): else: raise TypeError - def __eq__(self, other): + def __eq__(self, other) -> bool: if not isinstance(other, TypeWrapper): return False if isinstance(self.value, Tensor): @@ -72,7 +71,7 @@ def __eq__(self, other): else: return self.value == other.value - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) def __lt__(self, other): @@ -85,11 +84,11 @@ def __lt__(self, other): else: return self.value < other.value - def __repr__(self): + def __repr__(self) -> str: return f"{self.__class__.__name__}{{value[{self.value}]}}" -class Values(Generic[Type], ABC): +class Objects(Generic[KeyType, ValueType], ABC): """ Generic class for a map from item to its value. It supports [] indexing, and iterator protocol @@ -100,61 +99,172 @@ class Values(Generic[Type], ABC): """ def __init__( - self, values: Union[Mapping[Type, float], Sequence[float], np.ndarray, Tensor] - ): + self, values: Union[Mapping[KeyType, ValueType], Sequence[ValueType]] + ) -> None: self._key_to_index = None self._index_to_key = None - if isinstance(values, Tensor): - self._values = values.to(dtype=torch.double) - elif isinstance(values, np.ndarray): - self._values = torch.as_tensor(values, dtype=torch.double) - elif isinstance(values, Sequence): - self._values = torch.tensor(values, dtype=torch.double) + self._init_values(values) + self._reset() + + def _init_values( + self, values: Union[Mapping[KeyType, ValueType], Sequence[ValueType]] + ) -> None: + if isinstance(values, Sequence): + # pyre-fixme[16]: `Objects` has no attribute `_values`. + self._values = list(values) elif isinstance(values, Mapping): self._key_to_index = dict(zip(values.keys(), range(len(values)))) self._index_to_key = list(values.keys()) - self._values = torch.tensor(list(values.values()), dtype=torch.double) + self._values = list(values.values()) else: raise TypeError(f"Unsupported values type {type(values)}") - self._reset() - def _reset(self): - self._probabilities = None - self._is_normalized = False + def _reset(self) -> None: self._unzipped = None - self._sorted = None + self._keys = None - def __getitem__(self, key: Type) -> float: + def __getitem__(self, key: KeyType) -> ValueType: if self._key_to_index is not None: - return self._values[self._key_to_index[key]].item() + # pyre-fixme[16]: `Objects` has no attribute `_values`. + return self._values[self._key_to_index[key]] else: - return self._values[key].item() + return self._values[key] - def __setitem__(self, key: Type, value: float): + def __setitem__(self, key: KeyType, value: ValueType) -> None: if self._key_to_index is not None: + # pyre-fixme[16]: `Objects` has no attribute `_values`. self._values[self._key_to_index[key]] = value else: self._values[key] = value self._reset() @abstractmethod - def _new_key(self, k: int) -> Type: + def _to_key(self, k: int) -> KeyType: pass + def _to_value(self, v) -> ValueType: + return v + def __iter__(self): if self._key_to_index is not None: - return ((k, self._values[i]) for k, i in self._key_to_index.items()) + return ( + (k, self._to_value(self._values[i])) + for k, i in self._key_to_index.items() + ) else: - return ((self._new_key(a), p.item()) for a, p in enumerate(self._values)) + return ( + (self._to_key(i), self._to_value(v)) for i, v in enumerate(self._values) + ) def __len__(self) -> int: - return self._values.shape[0] + # pyre-fixme[16]: `Objects` has no attribute `_values`. + return len(self._values) @property - def is_sequence(self): + def is_sequence(self) -> bool: return self._key_to_index is None - def sort(self, descending: bool = True) -> Tuple[Sequence[Type], Tensor]: + @property + def _values_copy(self) -> Sequence[ValueType]: + # pyre-fixme[16]: `Objects` has no attribute `_values`. + return list(self._values) + + def index_of(self, key: KeyType) -> int: + if self._key_to_index is None: + try: + # pyre-fixme[6]: Expected `Union[_SupportsIndex, bytes, str, + # typing.SupportsInt]` for 1st param but got `KeyType`. + index = int(key) + if 0 <= index < len(self): + return index + else: + raise ValueError(f"{key} is not valid") + except Exception: + raise ValueError(f"{key} is not valid") + elif self._key_to_index is not None: + try: + return self._key_to_index[key] + except Exception: + raise ValueError(f"{key} is not valid") + else: + raise ValueError(f"{key} is not valid") + + @property + def keys(self) -> Sequence[KeyType]: + if self._keys is None: + if self._key_to_index is not None: + self._keys = list(self._key_to_index.keys()) + else: + self._keys = [self._to_key(i) for i in range(len(self))] + return self._keys + + @property + def values(self): + return self._values_copy + + def __repr__(self) -> str: + # pyre-fixme[16]: `Objects` has no attribute `_values`. + return f"{self.__class__.__name__}{{values[{self._values}]}}" + + +class Values(Objects[KeyType, float]): + """ + Generic class for a map from item to its value. + It supports [] indexing, and iterator protocol + + Attributes: + items: list of items + values: list of their values + """ + + def __init__( + self, + values: Union[Mapping[KeyType, float], Sequence[float], np.ndarray, Tensor], + ) -> None: + # pyre-fixme[6]: Expected `Union[Mapping[Variable[KeyType], + # Variable[ValueType]], Sequence[Variable[ValueType]]]` for 1st param but got + # `Union[Mapping[Variable[KeyType], float], Sequence[float], Tensor, + # np.ndarray]`. + super().__init__(values) + + def _init_values( + self, + values: Union[Mapping[KeyType, float], Sequence[float], np.ndarray, Tensor], + ) -> None: + if isinstance(values, Tensor): + # pyre-fixme[16]: `Values` has no attribute `_values`. + self._values = values.to(dtype=torch.double) + elif isinstance(values, np.ndarray): + self._values = torch.as_tensor(values, dtype=torch.double) + elif isinstance(values, Sequence): + self._values = torch.tensor(values, dtype=torch.double) + elif isinstance(values, Mapping): + self._key_to_index = dict(zip(values.keys(), range(len(values)))) + self._index_to_key = list(values.keys()) + self._values = torch.tensor(list(values.values()), dtype=torch.double) + else: + raise TypeError(f"Unsupported values type {type(values)}") + + def _reset(self) -> None: + super()._reset() + # pyre-fixme[16]: `Values` has no attribute `_probabilities`. + self._probabilities = None + # pyre-fixme[16]: `Values` has no attribute `_is_normalized`. + self._is_normalized = False + # pyre-fixme[16]: `Values` has no attribute `_sorted`. + self._sorted = None + + def __getitem__(self, key: KeyType) -> float: + return super().__getitem__(key).item() + + def _to_value(self, v: Tensor) -> float: + return v.item() + + def __len__(self) -> int: + # pyre-fixme[16]: `Values` has no attribute `_values`. + return self._values.shape[0] + + def sort(self, descending: bool = True) -> Tuple[Sequence[KeyType], Tensor]: """ Sort based on values @@ -164,7 +274,9 @@ def sort(self, descending: bool = True) -> Tuple[Sequence[Type], Tensor]: Returns: Tuple of sorted indices and values """ + # pyre-fixme[16]: `Values` has no attribute `_sorted`. if self._sorted is None: + # pyre-fixme[16]: `Values` has no attribute `_values`. rs, ids = torch.sort(self._values, descending=descending) if self._index_to_key is not None: self._sorted = ( @@ -172,59 +284,17 @@ def sort(self, descending: bool = True) -> Tuple[Sequence[Type], Tensor]: rs.detach(), ) else: - self._sorted = ([self._new_key(i.item()) for i in ids], rs.detach()) + self._sorted = ([self._to_key(i.item()) for i in ids], rs.detach()) return self._sorted - def _unzip(self): - if self._unzipped is None: - if self._key_to_index is not None: - self._unzipped = ( - list(self._key_to_index.keys()), - self._values.clone().detach(), - ) - else: - self._unzipped = ( - [self._new_key(i) for i in range(self._values.shape[0])], - self._values.clone().detach(), - ) - - def index_of(self, item: Type) -> int: - if self._key_to_index is None and isinstance(item.value, int): - if 0 <= item.value < self._values.shape[0]: - return item.value - else: - raise ValueError(f"{item} is not valid") - elif self._key_to_index is not None: - try: - return self._key_to_index[item] - except Exception: - raise ValueError(f"{item} is not valid") - else: - raise ValueError(f"{item} is not valid") - - @property - def items(self) -> Sequence[Type]: - self._unzip() - return self._unzipped[0] - @property - def values(self) -> Tensor: - self._unzip() - return self._unzipped[1] - - def __repr__(self): - return f"{self.__class__.__name__}{{values[{self._values}]}}" - - def copy(self) -> "Values": - cp = self.__class__(self._values.clone().detach()) - if self._key_to_index is not None: - cp._key_to_index = dict(self._key_to_index) - if self._index_to_key is not None: - cp._index_to_key = list(self._index_to_key) - return cp + def _values_copy(self) -> Tensor: + # pyre-fixme[16]: `Values` has no attribute `_values`. + return self._values.clone().detach() def replace( - self, values: Union[Mapping[Type, float], Sequence[float], Tensor, np.ndarray] + self, + values: Union[Mapping[ValueType, float], Sequence[float], Tensor, np.ndarray], ) -> "Values": """ Replace current values with new values, and returns the new copy. @@ -236,8 +306,9 @@ def replace( Returns: Values object with new values """ - copy = self.copy() + copy = deepcopy(self) if isinstance(values, Tensor): + # pyre-fixme[16]: `Values` has no attribute `_values`. assert values.shape[0] == copy._values.shape[0] copy._values = values.to(dtype=torch.double) elif isinstance(values, np.ndarray): @@ -257,22 +328,26 @@ def replace( raise TypeError(f"Unsupported values type {type(values)}") return copy - def _normalize(self): + def _normalize(self) -> None: + # pyre-fixme[16]: `Values` has no attribute `_is_normalized`. if self._is_normalized: + # pyre-fixme[16]: `Values` has no attribute `_probabilities`. if self._probabilities is None: raise ValueError(f"Invalid distribution {type(self._values)}") return self._is_normalized = True self._probabilities = None try: + # pyre-fixme[16]: `Values` has no attribute `_values`. dist = self._values.detach().clamp(min=0.0) dist /= dist.sum() self._probabilities = dist except ZeroDivisionError: pass - def probability(self, key: Type) -> float: + def probability(self, key: ValueType) -> float: self._normalize() + # pyre-fixme[16]: `Values` has no attribute `_probabilities`. if self._probabilities is not None: if self._key_to_index is not None: return self._probabilities[self._key_to_index[key]].item() @@ -281,37 +356,32 @@ def probability(self, key: Type) -> float: else: return 0.0 - def sample(self, size=1) -> Union[Sequence[Type], Type]: + def sample(self, size: int = 1) -> Sequence[KeyType]: self._normalize() if self._index_to_key is not None: l = [ self._index_to_key[k.item()] + # pyre-fixme[16]: `Values` has no attribute `_probabilities`. for k in torch.multinomial(self._probabilities, size) ] else: l = [ - self._new_key(k.item()) + self._to_key(k.item()) for k in torch.multinomial(self._probabilities, size) ] - if size == 1: - return l[0] - else: - return l + return l - def greedy(self, size=1) -> Union[Sequence[Type], Type]: + def greedy(self, size: int = 1) -> Sequence[KeyType]: sorted_keys, _ = self.sort() - if size == 1: - return sorted_keys[0] - else: - return sorted_keys[:size] + return sorted_keys[:size] -class Items(Generic[Type], ABC): +class Items(Generic[ValueType], ABC): """ List of items """ - def __init__(self, items: Union[Sequence[Type], int]): + def __init__(self, items: Union[Sequence[ValueType], int]) -> None: if isinstance(items, int): assert items > 0 self._items = [self._new_item(i) for i in range(items)] @@ -320,33 +390,35 @@ def __init__(self, items: Union[Sequence[Type], int]): self._items = items self._reverse_lookup = {v: i for i, v in enumerate(items)} - def __getitem__(self, i) -> Type: + def __getitem__(self, i) -> ValueType: return self._items[i] - def __len__(self): + def __len__(self) -> int: return len(self._items) def __iter__(self): return iter(self._items) - def __int__(self): + def __int__(self) -> int: if self._reverse_lookup is None: return len(self._items) else: return 0 @abstractmethod - def _new_item(self, i: int) -> Type: + def _new_item(self, i: int) -> ValueType: pass @property - def is_sequence(self): + def is_sequence(self) -> bool: return self._reverse_lookup is None - def index_of(self, item: Type) -> int: - if self._reverse_lookup is None and isinstance(item.value, int): - if 0 <= item.value < len(self._items): - return item.value + def index_of(self, item: ValueType) -> int: + if self._reverse_lookup is None: + # pyre-fixme[16]: `ValueType` has no attribute `value`. + int_val = int(item.value) + if 0 <= int_val < len(self._items): + return int_val else: raise ValueError(f"{item} is not valid") elif self._reverse_lookup is not None: @@ -357,9 +429,10 @@ def index_of(self, item: Type) -> int: else: raise ValueError(f"{item} is not valid") - def _fill( - self, values: Union[Mapping[Type, float], Sequence[float], np.ndarray, Tensor] - ) -> Union[Sequence[float], Mapping[Type, float]]: + def fill( + self, + values: Union[Mapping[ValueType, float], Sequence[float], np.ndarray, Tensor], + ) -> Union[Sequence[float], Mapping[ValueType, float]]: if self._reverse_lookup is None: if isinstance(values, Mapping): ds = [] @@ -383,7 +456,13 @@ def _fill( ds[a] = 0.0 return ds else: - raise Type(f"{values} not valid type") + ds = {} + for a in self._items: + try: + ds[a] = values[self._reverse_lookup[a]] + except Exception: + ds[a] = 0.0 + return ds # action type @@ -398,7 +477,7 @@ def _fill( # if action can be indexed, the type is either sequence of float or 1-D tensor, # with the indices being the action class ActionDistribution(Values[Action]): - def _new_key(self, k: int) -> Action: + def _to_key(self, k: int) -> Action: return Action(k) @@ -413,7 +492,7 @@ def space(self) -> Sequence[Action]: def distribution( self, dist: Union[Mapping[Action, float], Sequence[float], np.ndarray, Tensor] ) -> ActionDistribution: - return ActionDistribution(super()._fill(dist)) + return ActionDistribution(super().fill(dist)) class Policy(ABC): @@ -421,7 +500,7 @@ class Policy(ABC): Policy interface """ - def __init__(self, action_space: ActionSpace, device=None): + def __init__(self, action_space: ActionSpace, device=None) -> None: self._action_space = action_space self._device = device @@ -433,5 +512,90 @@ def __call__(self, context) -> Tuple[Action, ActionDistribution]: return self._query(context) @property - def action_space(self): + def action_space(self) -> ActionSpace: return self._action_space + + +@dataclass(frozen=True) +class TrainingData: + train_x: Tensor + train_y: Tensor + train_weight: Optional[Tensor] + validation_x: Tensor + validation_y: Tensor + validation_weight: Optional[Tensor] + + +@dataclass(frozen=True) +class PredictResults: + predictions: Optional[Tensor] # shape = [num_samples] + scores: Tensor # shape = [num_samples] + probabilities: Optional[Tensor] = None + + +class Trainer(ABC): + def __init__(self) -> None: + self._model = None + + @staticmethod + def _sample( + x: Tensor, + y: Tensor, + weight: Optional[Tensor] = None, + num_samples: int = 0, + fortran_order: bool = False, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + assert x.shape[0] == y.shape[0] + x_na = x.numpy() + if fortran_order: + x_na = x_na.reshape(x.shape, order="F") + y_na = y.numpy() + w_na = weight.numpy() if weight is not None else None + if num_samples > 0 and num_samples < x.shape[0]: + cs = np.random.choice(x.shape[0], num_samples, replace=False) + x_na = x_na[cs, :] + y_na = y_na[cs] + w_na = w_na[cs] if w_na is not None else None + return x_na, y_na, w_na + + def reset(self) -> None: + self._model = None + + @property + @abstractmethod + def name(self) -> str: + pass + + @property + def is_trained(self) -> bool: + return self._model is not None + + @abstractmethod + def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + pass + + @abstractmethod + def predict(self, x: Tensor, device=None) -> PredictResults: + pass + + @abstractmethod + def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float: + pass + + def save_model(self, file: str) -> None: + if self._model is None: + logging.error(f"{self.__class__.__name__}.save_model: _model is None ") + return + try: + with open(file, "wb") as f: + pickle.dump(self._model, f, protocol=pickle.HIGHEST_PROTOCOL) + except Exception: + logging.error(f"{file} cannot be accessed.") + + def load_model(self, file: str) -> None: + try: + logging.info(f"{self.__class__.__name__}.load_model: {file}") + with open(file, "rb") as f: + self._model = pickle.load(f) + except Exception: + logging.error(f"{file} cannot be read.") diff --git a/reagent/ope/test/cartpole.py b/reagent/ope/test/cartpole.py new file mode 100644 index 000000000..0affbb16a --- /dev/null +++ b/reagent/ope/test/cartpole.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import gym +import torch +from reagent.ope.estimators.sequential_estimators import ( + Action, + ActionDistribution, + ActionSpace, + IPSEstimator, + Model, + NeuralDualDICE, + RandomRLPolicy, + RewardProbability, + RLEstimatorInput, + RLPolicy, + State, + StateDistribution, + Transition, +) +from reagent.ope.utils import RunningAverage + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +NUM_EPISODES = 200 +MAX_HORIZON = 250 +GAMMA = 0.99 + + +class ComboPolicy(RLPolicy): + # Weighted combination between two given policies + def __init__(self, action_space: ActionSpace, weights, policies): + assert len(weights) == len(policies) + self._weights = weights + self._policies = policies + self._action_space = action_space + + def action_dist(self, state: State) -> ActionDistribution: + weighted_policies = [ + w * p(state).values for w, p in zip(self._weights, self._policies) + ] + weighted = torch.stack(weighted_policies).sum(0) + return self._action_space.distribution(weighted) + + +class PyTorchPolicy(RLPolicy): + def __init__(self, action_space: ActionSpace, model): + self._action_space = action_space + self._model = model + self._softmax = torch.nn.Softmax(dim=0) + + def action_dist(self, state: State) -> ActionDistribution: + self._model.eval() + dist = self._model(torch.tensor(state.value, dtype=torch.float).reshape(1, -1))[ + 0 + ] + return self._action_space.distribution(self._softmax(dist)) + + +class EnvironmentModel(torch.nn.Module): + def __init__(self, state_dim, action_dim, hidden_dim, hidden_layers, activation): + super(EnvironmentModel.Network, self).__init__() + self._state_dim = state_dim + self._action_dim = action_dim + self._hidden_dim = hidden_dim + self._hidden_layers = hidden_layers + self._activation = activation + + self.layers = [] + dim = self._state_dim + self._action_dim + for _ in range(self._hidden_layers): + self.layers.append(torch.nn.Linear(dim, self._hidden_dim)) + self.layers.append(self._activation()) + dim = self._hidden_dim + # Output is the next state and its reward + self.layers.append(torch.nn.Linear(dim, self._state_dim + 1)) + self.model = torch.nn.Sequential(*self.layers) + + def forward(self, state: torch.Tensor, action: torch.Tensor): + x = torch.cat((state, action), dim=1) + return self.model(x) + + +class ModelWrapper(Model): + def __init__(self, model: EnvironmentModel, device=None): + self._model = model + self._device = device + self._model.to(self._device) + + def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution: + self._model.eval() + state_reward_tensor = ( + self._model( + torch.tensor(state.value, dtype=torch.float) + .reshape(-1, self._model._state_dim) + .to(self._device), + torch.nn.functional.one_hot( + torch.tensor(action.value, dtype=torch.long), + self._model._action_dim, + ) + .reshape(-1, self._model._action_dim) + .float() + .to(self._device), + ) + .reshape(-1) + .cpu() + ) + return { + State(state_reward_tensor[: self._model._state_dim]): RewardProbability( + state_reward_tensor[-1].item() + ) + } + + def to(self, device): + self._model.to(device) + + +def generate_logs(episodes: int, max_horizon: int, policy: RLPolicy): + """ + Args: + episodes: number of episodes to generate + max_horizon: max horizon of each episode + policy: RLPolicy which uses real-valued states + """ + log = [] + env = gym.make("CartPole-v0") + for _ in range(episodes): + init_state = env.reset() + cur_state = init_state + mdp = [] + for _ in range(max_horizon): + action_dist = policy(State(cur_state)) + action = action_dist.sample()[0].value + action_prob = action_dist.probability(Action(action)) + next_state, _, done, _ = env.step(action) + mdp.append( + Transition( + last_state=State(cur_state), + action=Action(action), + action_prob=action_prob, + state=State(next_state), + reward=1.0, + status=Transition.Status.NORMAL, + ) + ) + cur_state = next_state + if done: + log.append(mdp) + break + log.append(mdp) + return log + + +def zeta_nu_loss_callback(losses, estimated_values, input: RLEstimatorInput): + def callback_fn(zeta_loss, nu_loss, estimator): + losses.append((zeta_loss, nu_loss)) + estimated_values.append(estimator._compute_estimates(input)) + + return callback_fn + + +def estimate_value(episodes: int, max_horizon: int, policy: RLPolicy, gamma: float): + avg = RunningAverage() + env = gym.make("CartPole-v0") + for _ in range(episodes): + init_state = env.reset() + cur_state = init_state + r = 0.0 + discount = 1.0 + for _ in range(max_horizon): + action_dist = policy(State(cur_state)) + action = action_dist.sample()[0].value + next_state, _, done, _ = env.step(action) + reward = 1.0 + r += reward * discount + discount *= gamma + if done: + break + cur_state = next_state + avg.add(r) + return avg.average + + +def run_dualdice_test(model_path: str, alpha: float): + device = torch.device("cuda") if torch.cuda.is_available() else None + logger.info(f"Device - {device}") + model = torch.jit.load(model_path) + model = model.dqn_with_preprocessor.model + + random_policy = RandomRLPolicy(ActionSpace(2)) + model_policy = PyTorchPolicy(ActionSpace(2), model) + target_policy = ComboPolicy( + ActionSpace(2), [0.7, 0.3], [model_policy, random_policy] + ) + behavior_policy = ComboPolicy( + ActionSpace(2), + [0.55 + 0.15 * alpha, 0.45 - 0.15 * alpha], + [model_policy, random_policy], + ) + + ground_truth = estimate_value(NUM_EPISODES, MAX_HORIZON, target_policy, GAMMA) + log_policy_value = estimate_value(NUM_EPISODES, MAX_HORIZON, behavior_policy, GAMMA) + trained_policy_value = estimate_value( + NUM_EPISODES, MAX_HORIZON, model_policy, GAMMA + ) + + logger.info(f"Target Policy Ground Truth value: {ground_truth}") + logger.info(f"Behavior Policy Ground Truth value: {log_policy_value}") + logger.info(f"Model Policy Ground Truth value: {trained_policy_value}") + + log = generate_logs(NUM_EPISODES, MAX_HORIZON, behavior_policy) + + inp = RLEstimatorInput( + gamma=GAMMA, log=log, target_policy=target_policy, discrete_states=False + ) + ips = IPSEstimator() + dualdice_losses = [] + dualdice_values = [] + dualdice = NeuralDualDICE( + state_dim=4, + action_dim=2, + deterministic_env=True, + average_next_v=False, + value_lr=0.003, + zeta_lr=0.003, + batch_size=2048, + reporting_frequency=1000, + training_samples=100000, + loss_callback_fn=zeta_nu_loss_callback(dualdice_losses, dualdice_values, inp), + device=device, + ) + + ips_result = ips.evaluate(inp) + dd_result = dualdice.evaluate(inp) + + return { + "ips_estimate": ips_result, + "dualdice_estimate": dd_result, + "ground_truth": ground_truth, + "dualdice_losses": dualdice_losses, + "dualdice_estimates_per_epoch": dualdice_values, + } + + +if __name__ == "__main__": + run_dualdice_test( + "/mnt/vol/gfsfblearner-nebraska/flow/data/2020-07-27/a56cd422-794b-4866-9b73-5de95fb65700/207851498_207851498_0.pt", + 0.0, + ) diff --git a/reagent/ope/test/configs/ecoli_config.json b/reagent/ope/test/configs/ecoli_config.json index 4f7e2eff6..fca60d482 100644 --- a/reagent/ope/test/configs/ecoli_config.json +++ b/reagent/ope/test/configs/ecoli_config.json @@ -4,7 +4,5 @@ "sep": "\\s+", "index_col": 0, "label_col": 8 - }, - "iterations": 500, - "estimators": ["dm","ips", "dr"] + } } \ No newline at end of file diff --git a/reagent/ope/test/configs/letter_recog_config.json b/reagent/ope/test/configs/letter_recog_config.json index a631f9cae..f84a61aa0 100644 --- a/reagent/ope/test/configs/letter_recog_config.json +++ b/reagent/ope/test/configs/letter_recog_config.json @@ -3,7 +3,5 @@ "file": "data/letter-recognition.data", "sep": ",", "label_col": 0 - }, - "iterations": 500, - "estimators": ["dm","ips", "dr"] + } } \ No newline at end of file diff --git a/reagent/ope/test/configs/mslr_web30k_config.json b/reagent/ope/test/configs/mslr_web30k_config.json index 9bba836f3..a91e9f6a6 100644 --- a/reagent/ope/test/configs/mslr_web30k_config.json +++ b/reagent/ope/test/configs/mslr_web30k_config.json @@ -14,6 +14,21 @@ "source_file": "test.txt", "cache_file": "test.pickle" }, + "all_set": { + "folder": "data/MSLR-WEB30K", + "source_file": "", + "cache_file": "all.pickle" + }, + "first_set": { + "folder": "data/MSLR-WEB30K", + "source_file": "", + "cache_file": "first_half.pickle" + }, + "second_set": { + "folder": "data/MSLR-WEB30K", + "source_file": "", + "cache_file": "second_half.pickle" + }, "num_columns": 138, "anchor_url_features": [ 2, diff --git a/reagent/ope/test/configs/optdigits_config.json b/reagent/ope/test/configs/optdigits_config.json new file mode 100644 index 000000000..4557f4bf7 --- /dev/null +++ b/reagent/ope/test/configs/optdigits_config.json @@ -0,0 +1,7 @@ +{ + "dataset": { + "file": "data/optdigits.data", + "sep": ",", + "label_col": 64 + } + } diff --git a/reagent/ope/test/configs/pendigits_config.json b/reagent/ope/test/configs/pendigits_config.json index 5c4737040..821f72fd8 100644 --- a/reagent/ope/test/configs/pendigits_config.json +++ b/reagent/ope/test/configs/pendigits_config.json @@ -3,7 +3,5 @@ "file": "data/pendigits.data", "sep": ",", "label_col": 16 - }, - "iterations": 500, - "estimators": ["dm", "ips", "dr"] + } } \ No newline at end of file diff --git a/reagent/ope/test/configs/satimage_config.json b/reagent/ope/test/configs/satimage_config.json new file mode 100644 index 000000000..ba80238c0 --- /dev/null +++ b/reagent/ope/test/configs/satimage_config.json @@ -0,0 +1,7 @@ +{ + "dataset": { + "file": "data/satimage.data", + "sep": " ", + "label_col": 36 + } + } diff --git a/reagent/ope/test/configs/yandex_web_search_config.json b/reagent/ope/test/configs/yandex_web_search_config.json index 41f6ae510..85013f814 100644 --- a/reagent/ope/test/configs/yandex_web_search_config.json +++ b/reagent/ope/test/configs/yandex_web_search_config.json @@ -5,10 +5,9 @@ "source_file": "train", "total_days": 27 }, - "log_training_data": { + "log_data": { "folder": "data/Yandex_Web_Search", "base_file_name": "train", - "min_query_count": 10, "days": [ 1, 3, @@ -23,12 +22,12 @@ 23, 25 ], - "cache_file": "log_train.pickle" + "cache_file": "log_dataset.pickle", + "min_query_count": 10 }, - "target_training_data": { + "target_data": { "folder": "data/Yandex_Web_Search", "base_file_name": "train", - "min_query_count": 10, "days": [ 2, 4, @@ -43,49 +42,15 @@ 24, 26 ], - "cache_file": "target_train.pickle" - }, - "ground_truth_training_data": { - "folder": "data/Yandex_Web_Search", - "base_file_name": "train", - "min_query_count": 10, - "days": [ - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27 - ], - "cache_file": "ground_truth_train.pickle" + "cache_file": "target_dataset.pickle", + "min_query_count": 10 }, "test_data": { "folder": "data/Yandex_Web_Search", - "cache_folder": "", "base_file_name": "train", "days": [ 9 - ] + ], + "cache_file_name": "test_log" } } \ No newline at end of file diff --git a/reagent/ope/test/data/optdigits.data b/reagent/ope/test/data/optdigits.data new file mode 100644 index 000000000..01f530702 --- /dev/null +++ b/reagent/ope/test/data/optdigits.data @@ -0,0 +1,5620 @@ +0,1,6,15,12,1,0,0,0,7,16,6,6,10,0,0,0,8,16,2,0,11,2,0,0,5,16,3,0,5,7,0,0,7,13,3,0,8,7,0,0,4,12,0,1,13,5,0,0,0,14,9,15,9,0,0,0,0,6,14,7,1,0,0,0 +0,0,10,16,6,0,0,0,0,7,16,8,16,5,0,0,0,11,16,0,6,14,3,0,0,12,12,0,0,11,11,0,0,12,12,0,0,8,12,0,0,7,15,1,0,13,11,0,0,0,16,8,10,15,3,0,0,0,10,16,15,3,0,0,0 +0,0,8,15,16,13,0,0,0,1,11,9,11,16,1,0,0,0,0,0,7,14,0,0,0,0,3,4,14,12,2,0,0,1,16,16,16,16,10,0,0,2,12,16,10,0,0,0,0,0,2,16,4,0,0,0,0,0,9,14,0,0,0,0,7 +0,0,0,3,11,16,0,0,0,0,5,16,11,13,7,0,0,3,15,8,1,15,6,0,0,11,16,16,16,16,10,0,0,1,4,4,13,10,2,0,0,0,0,0,15,4,0,0,0,0,0,3,16,0,0,0,0,0,0,1,15,2,0,0,4 +0,0,5,14,4,0,0,0,0,0,13,8,0,0,0,0,0,3,14,4,0,0,0,0,0,6,16,14,9,2,0,0,0,4,16,3,4,11,2,0,0,0,14,3,0,4,11,0,0,0,10,8,4,11,12,0,0,0,4,12,14,7,0,0,6 +0,0,11,16,10,1,0,0,0,4,16,10,15,8,0,0,0,4,16,3,11,13,0,0,0,1,14,6,9,14,0,0,0,0,0,0,12,10,0,0,0,0,0,6,16,6,0,0,0,0,5,15,15,8,8,3,0,0,10,16,16,16,16,6,2 +0,0,1,11,13,11,7,0,0,0,9,14,6,4,3,0,0,0,16,12,16,15,2,0,0,5,16,10,4,12,6,0,0,1,1,0,0,10,4,0,0,0,0,0,5,10,0,0,0,0,0,8,15,3,0,0,0,0,1,13,5,0,0,0,5 +0,0,8,10,8,7,2,0,0,1,15,14,12,12,4,0,0,7,15,12,5,0,0,0,0,5,14,12,15,7,0,0,0,0,0,0,2,13,0,0,0,0,0,0,4,12,0,0,0,0,6,7,14,5,0,0,0,0,4,13,8,0,0,0,5 +0,0,15,2,14,13,2,0,0,0,16,15,12,13,8,0,0,2,16,12,1,6,10,0,0,7,15,3,0,5,8,0,0,5,12,0,0,8,8,0,0,5,12,0,7,15,5,0,0,5,16,13,16,6,0,0,0,0,10,12,5,0,0,0,0 +0,0,3,13,13,2,0,0,0,6,16,12,10,8,0,0,0,9,15,12,16,6,0,0,0,10,16,16,13,0,0,0,0,1,12,16,12,14,4,0,0,0,11,8,0,3,12,0,0,0,13,11,8,13,12,0,0,0,3,15,11,6,0,0,8 +0,0,6,14,14,16,16,8,0,0,7,11,8,10,15,3,0,0,0,0,4,15,10,0,0,1,15,16,16,16,14,0,0,3,11,13,13,0,0,0,0,0,0,15,5,0,0,0,0,0,7,13,0,0,0,0,0,0,10,12,0,0,0,0,7 +0,0,0,3,16,11,1,0,0,0,0,8,16,16,1,0,0,0,0,9,16,14,0,0,0,1,7,16,16,11,0,0,0,9,16,16,16,8,0,0,0,1,8,6,16,7,0,0,0,0,0,5,16,9,0,0,0,0,0,2,14,14,1,0,1 +0,0,0,4,13,16,16,3,0,0,8,16,9,12,16,4,0,7,16,3,3,15,13,0,0,9,15,14,16,16,6,0,0,1,8,7,12,15,0,0,0,0,0,0,13,10,0,0,0,0,0,3,15,6,0,0,0,0,0,5,15,4,0,0,9 +0,0,7,12,6,2,0,0,0,0,16,16,13,14,1,0,0,9,16,11,3,0,0,0,0,8,16,16,16,4,0,0,0,1,2,0,6,12,0,0,0,0,0,0,7,12,0,0,0,0,6,9,16,6,0,0,0,0,5,16,9,0,0,0,5 +0,0,7,11,11,6,0,0,0,9,16,12,10,14,0,0,0,5,2,0,4,14,0,0,0,0,1,5,14,6,0,0,0,1,15,16,16,10,0,0,0,0,7,4,4,15,6,0,0,0,5,4,8,13,12,0,0,0,14,16,12,10,1,0,3 +0,1,10,15,8,0,0,0,0,6,16,7,11,8,0,0,0,7,16,3,1,13,1,0,0,7,13,0,0,10,6,0,0,8,12,0,0,14,4,0,0,3,16,0,6,15,2,0,0,0,15,9,16,4,0,0,0,0,9,15,8,0,0,0,0 +0,0,0,1,11,7,0,0,0,0,2,13,10,16,4,0,0,0,13,4,1,16,0,0,0,6,14,8,12,16,7,0,0,0,8,8,15,10,2,0,0,0,0,1,12,1,0,0,0,0,0,4,16,0,0,0,0,0,0,3,15,0,0,0,4 +0,0,5,12,16,16,3,0,0,0,11,11,4,16,9,0,0,0,0,4,8,16,5,0,0,0,4,16,16,16,14,0,0,0,0,11,14,1,0,0,0,0,0,13,10,0,0,0,0,0,3,16,1,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,1,8,13,13,2,0,0,4,16,8,1,12,4,0,0,7,13,3,10,13,0,0,0,3,15,15,14,15,0,0,0,0,13,10,0,10,6,0,0,0,11,5,0,9,8,0,0,0,7,11,4,14,2,0,0,0,1,13,12,4,0,0,8 +0,0,0,2,13,12,4,0,0,0,3,15,15,13,12,0,0,2,15,14,1,12,8,0,0,8,16,14,16,16,11,0,0,3,16,16,16,16,10,0,0,0,0,0,8,13,0,0,0,0,0,0,13,7,0,0,0,0,0,0,15,3,0,0,4 +0,0,4,11,15,16,15,0,0,0,13,13,8,13,14,0,0,0,0,0,0,15,5,0,0,0,2,4,10,15,1,0,0,0,10,16,16,16,8,0,0,0,1,13,13,1,0,0,0,0,1,16,6,0,0,0,0,0,6,14,2,0,0,0,7 +0,0,4,10,13,11,1,0,0,2,13,10,4,8,8,0,0,6,13,4,9,15,4,0,0,4,16,16,16,13,0,0,0,0,12,11,1,8,8,0,0,0,12,4,0,7,8,0,0,0,12,8,8,14,0,0,0,0,6,13,11,1,0,0,8 +0,0,3,11,13,14,6,0,0,0,12,9,3,0,0,0,0,2,14,0,0,0,0,0,0,8,10,6,12,12,2,0,0,7,16,13,6,14,8,0,0,0,0,0,1,14,1,0,0,0,1,4,11,6,0,0,0,0,3,13,10,0,0,0,5 +0,0,1,4,11,13,7,0,0,2,14,12,10,16,5,0,0,7,14,6,14,12,0,0,0,2,12,11,10,15,1,0,0,0,0,0,0,16,4,0,0,0,0,0,3,16,3,0,0,0,0,0,10,11,0,0,0,0,0,1,14,3,0,0,9 +0,0,9,13,1,0,0,0,0,0,8,16,6,0,0,0,0,0,7,16,10,0,0,0,0,0,13,16,10,0,0,0,0,0,9,16,14,0,0,0,0,0,0,7,16,5,0,0,0,0,3,9,16,13,8,5,0,0,4,15,16,16,16,16,1 +0,0,9,16,11,0,0,0,0,4,16,13,16,4,0,0,0,0,9,3,13,9,0,0,0,0,0,0,16,8,0,0,0,0,0,3,16,4,0,0,0,0,0,12,15,1,0,0,0,0,6,16,16,15,11,1,0,0,10,16,9,9,13,6,2 +0,0,2,13,9,0,0,0,0,0,14,11,12,7,0,0,0,6,16,1,0,16,0,0,0,5,12,0,0,11,5,0,0,8,13,0,0,8,7,0,0,1,16,0,0,9,8,0,0,0,13,3,6,16,1,0,0,0,3,16,14,4,0,0,0 +0,0,0,10,12,0,0,0,0,0,9,14,4,0,0,0,0,0,15,3,0,0,0,0,0,2,15,2,6,1,0,0,0,2,16,15,12,15,4,0,0,0,16,5,0,3,14,0,0,0,12,10,4,11,14,0,0,0,1,11,14,12,1,0,6 +0,0,0,0,10,13,0,0,0,0,0,0,15,16,0,0,0,0,0,7,16,14,0,0,0,3,12,16,16,13,0,0,0,3,11,9,16,9,0,0,0,0,0,0,16,9,0,0,0,0,0,0,15,12,0,0,0,0,0,0,8,15,2,0,1 +0,0,7,9,13,11,2,0,0,6,16,9,1,13,8,0,0,8,14,5,11,14,2,0,0,3,16,16,16,6,0,0,0,2,16,5,1,12,5,0,0,4,15,0,0,8,8,0,0,3,16,4,7,13,2,0,0,0,13,12,8,1,0,0,8 +0,0,9,14,16,7,0,0,0,1,14,6,14,13,0,0,0,0,0,0,14,8,0,0,0,0,10,13,16,13,2,0,0,0,16,16,16,13,8,0,0,0,2,15,4,0,0,0,0,0,8,13,0,0,0,0,0,0,12,10,0,0,0,0,7 +0,0,8,16,8,0,0,0,0,1,16,9,10,9,0,0,0,4,15,0,0,10,1,0,0,6,12,0,0,6,6,0,0,5,12,0,0,4,8,0,0,4,14,0,0,7,8,0,0,1,16,9,8,14,4,0,0,0,6,15,14,5,0,0,0 +0,0,9,16,16,15,2,0,0,0,9,6,8,16,8,0,0,0,0,2,1,16,7,0,0,0,12,16,16,16,13,0,0,0,3,4,15,10,1,0,0,0,0,7,15,2,0,0,0,0,3,15,7,0,0,0,0,0,8,13,1,0,0,0,7 +0,0,2,14,10,0,0,0,0,1,14,12,0,0,0,0,0,5,14,1,0,0,0,0,0,6,11,0,0,0,0,0,0,7,15,13,15,7,1,0,0,3,15,8,0,11,10,0,0,0,11,9,4,8,15,0,0,0,1,12,14,12,4,0,6 +0,0,0,3,11,16,11,0,0,0,5,16,14,13,16,3,0,3,16,8,0,13,14,0,0,10,15,6,11,16,6,0,0,8,16,15,14,15,1,0,0,0,3,0,10,12,0,0,0,0,0,0,15,9,0,0,0,0,0,1,16,5,0,0,9 +0,0,0,0,13,16,3,0,0,0,0,1,15,16,0,0,0,2,5,13,16,14,0,0,0,10,16,15,16,12,0,0,0,1,4,5,16,12,0,0,0,0,0,1,16,14,0,0,0,0,0,0,16,15,0,0,0,0,0,0,11,16,8,0,1 +0,0,4,14,16,16,12,0,0,0,12,9,0,5,16,3,0,1,15,14,10,13,12,0,0,8,16,16,16,9,3,0,0,1,4,8,15,0,0,0,0,0,0,8,13,0,0,0,0,0,1,14,9,0,0,0,0,0,6,15,4,0,0,0,9 +0,0,6,12,11,3,0,0,0,0,16,9,16,7,0,0,0,0,0,10,13,0,0,0,0,0,14,16,16,10,1,0,0,0,8,2,3,15,4,0,0,0,1,0,0,12,6,0,0,8,15,6,9,16,3,0,0,0,7,12,13,6,0,0,3 +0,0,0,3,10,15,9,4,0,0,2,15,7,9,16,6,0,0,11,6,0,10,16,1,0,1,15,14,16,16,7,0,0,0,5,7,2,14,2,0,0,0,0,0,10,6,0,0,0,0,0,1,15,0,0,0,0,0,0,7,9,0,0,0,9 +0,0,0,2,10,16,12,2,0,0,2,14,12,7,16,3,0,3,15,15,4,10,15,0,0,6,16,16,16,16,14,0,0,0,3,3,4,16,6,0,0,0,0,0,6,16,2,0,0,0,0,0,10,14,0,0,0,0,0,0,14,9,0,0,4 +0,0,8,14,9,2,0,0,0,2,14,5,11,13,0,0,0,0,15,0,6,16,4,0,0,0,13,9,15,15,6,0,0,0,0,5,1,7,9,0,0,0,0,0,0,5,12,0,0,0,12,3,0,9,8,0,0,0,7,14,15,13,0,0,9 +0,0,6,16,12,2,0,0,0,6,16,9,11,11,0,0,0,7,14,0,5,14,0,0,0,3,6,0,7,11,0,0,0,0,0,0,14,7,0,0,0,0,0,8,15,0,0,0,0,0,4,16,10,4,3,0,0,0,7,16,13,14,16,3,2 +0,0,8,14,2,0,0,0,0,0,5,16,6,0,0,0,0,0,0,16,11,0,0,0,0,0,2,16,15,0,0,0,0,0,0,14,16,2,0,0,0,0,0,1,16,9,0,0,0,0,5,15,16,15,12,8,0,0,4,15,16,13,12,12,1 +0,0,0,4,11,16,11,0,0,0,6,13,3,3,15,0,0,0,14,2,4,14,11,0,0,0,12,16,15,15,8,0,0,0,0,2,0,14,2,0,0,0,0,0,4,11,0,0,0,0,0,0,14,4,0,0,0,0,0,3,10,0,0,0,9 +0,0,0,0,9,15,12,0,0,0,2,14,9,6,16,1,0,0,12,4,1,12,11,0,0,4,15,7,13,16,3,0,0,2,12,11,1,15,0,0,0,0,0,0,5,11,0,0,0,0,0,0,8,8,0,0,0,0,0,0,9,7,0,0,9 +0,0,2,13,5,0,0,0,0,0,11,15,0,0,0,0,0,1,16,2,0,0,0,0,0,2,16,8,6,2,0,0,0,5,16,9,10,15,2,0,0,0,16,4,0,9,8,0,0,0,11,10,3,16,6,0,0,0,4,15,12,9,1,0,6 +0,0,0,1,10,16,4,0,0,0,1,13,16,16,3,0,0,0,11,15,9,16,3,0,0,11,16,9,14,13,0,0,0,10,16,16,16,16,9,0,0,1,4,4,16,9,2,0,0,0,0,0,14,5,0,0,0,0,0,0,13,9,0,0,4 +0,0,4,13,16,8,0,0,0,6,16,9,11,12,0,0,0,9,11,4,16,6,0,0,0,0,8,15,16,12,1,0,0,0,2,6,1,13,8,0,0,0,9,1,0,9,8,0,0,0,13,8,9,15,4,0,0,0,6,14,12,5,0,0,3 +0,1,14,14,4,0,0,0,0,10,15,15,15,0,0,0,0,14,10,4,16,3,0,0,0,2,1,9,16,0,0,0,0,0,0,13,13,0,0,0,0,0,3,16,5,0,0,0,0,0,13,16,13,5,4,0,0,2,15,14,16,16,16,5,2 +0,2,7,14,11,1,0,0,0,8,15,3,7,8,0,0,0,10,10,1,12,8,0,0,0,4,16,16,15,6,0,0,0,2,16,6,0,11,7,0,0,1,16,0,0,4,13,0,0,0,16,2,5,14,4,0,0,0,7,16,14,6,0,0,8 +0,1,14,15,5,0,0,0,0,3,16,15,15,3,0,0,0,3,16,9,16,5,0,0,0,0,5,6,16,4,0,0,0,0,0,8,15,1,0,0,0,0,0,14,11,0,0,0,0,0,9,16,14,8,7,0,0,0,15,16,16,16,16,6,2 +0,0,1,8,14,14,6,0,0,0,3,11,7,12,14,0,0,0,0,0,0,9,11,0,0,1,12,12,12,15,8,0,0,0,7,8,14,13,1,0,0,0,0,4,15,2,0,0,0,0,0,13,6,0,0,0,0,0,1,14,2,0,0,0,7 +0,0,0,1,15,9,1,0,0,0,0,5,16,16,0,0,0,0,0,13,16,11,0,0,0,2,8,16,16,11,0,0,0,5,12,13,16,8,0,0,0,0,0,5,16,7,0,0,0,0,0,7,16,4,0,0,0,0,0,3,16,15,1,0,1 +0,0,0,4,15,6,0,0,0,0,3,15,9,4,14,0,0,2,15,9,0,11,10,0,0,7,16,10,12,16,14,0,0,3,14,13,15,14,2,0,0,0,0,0,12,10,0,0,0,0,0,2,16,6,0,0,0,0,0,6,13,1,0,0,4 +0,0,2,13,6,0,0,0,0,0,10,14,6,0,0,0,0,0,15,6,0,0,0,0,0,0,16,3,0,0,0,0,0,0,14,16,16,12,1,0,0,2,16,13,3,9,10,0,0,0,8,14,4,13,13,0,0,0,2,12,14,12,4,0,6 +0,0,11,16,6,0,0,0,0,2,16,14,14,1,0,0,0,6,16,2,15,6,0,0,0,2,11,0,15,5,0,0,0,0,0,0,16,5,0,0,0,0,0,4,16,2,0,0,0,0,6,15,15,8,8,3,0,0,12,16,15,16,16,6,2 +0,0,7,14,6,0,0,0,0,5,16,9,13,3,0,0,0,8,12,0,5,12,0,0,0,4,12,0,0,11,6,0,0,6,12,0,0,9,7,0,0,1,15,0,1,15,5,0,0,0,13,7,13,11,0,0,0,0,4,15,11,0,0,0,0 +0,0,0,0,8,12,5,0,0,0,0,10,11,11,9,0,0,0,11,10,0,16,1,0,0,6,16,13,12,15,4,0,0,2,8,8,12,14,4,0,0,0,0,0,12,4,0,0,0,0,0,0,13,1,0,0,0,0,0,0,11,0,0,0,4 +0,0,1,12,11,0,0,0,0,0,8,14,3,0,0,0,0,1,14,5,0,0,0,0,0,1,16,9,4,0,0,0,0,3,16,13,12,12,3,0,0,1,16,3,0,1,15,0,0,0,9,9,2,9,15,2,0,0,2,14,15,12,3,0,6 +0,2,11,15,16,8,0,0,0,4,9,1,5,16,4,0,0,0,0,5,13,13,0,0,0,0,4,16,16,5,0,0,0,0,0,2,7,14,2,0,0,0,2,0,0,11,7,0,0,6,10,1,8,14,2,0,0,3,14,15,11,3,0,0,3 +0,0,5,12,14,16,5,0,0,6,16,15,10,16,6,0,0,0,3,0,3,16,2,0,0,0,1,4,14,13,2,0,0,7,16,16,16,16,11,0,0,4,9,14,12,1,1,0,0,0,0,16,8,0,0,0,0,0,4,16,5,0,0,0,7 +0,0,0,0,13,5,0,0,0,0,0,3,16,13,0,0,0,0,0,10,16,12,0,0,0,6,16,16,16,8,0,0,0,4,8,6,16,5,0,0,0,0,0,4,16,4,0,0,0,0,0,4,16,8,0,0,0,0,0,0,12,13,5,0,1 +0,0,10,8,11,2,0,0,0,0,16,15,8,13,2,0,0,4,16,4,0,9,7,0,0,7,14,0,0,3,8,0,0,8,12,0,0,7,8,0,0,6,6,0,3,15,4,0,0,4,14,10,16,7,0,0,0,0,10,14,5,0,0,0,0 +0,1,9,13,9,0,0,0,0,3,16,10,11,11,0,0,0,1,9,0,2,15,0,0,0,0,0,0,4,15,0,0,0,0,0,1,11,8,0,0,0,0,0,10,15,1,0,0,0,0,6,16,16,13,9,0,0,0,11,12,8,8,8,1,2 +0,3,12,16,10,0,0,0,0,8,16,10,16,6,0,0,0,2,9,0,12,8,0,0,0,0,0,0,15,9,0,0,0,0,0,9,14,1,0,0,0,0,4,16,9,0,0,0,0,1,14,16,13,12,9,1,0,4,16,16,13,15,16,3,2 +0,0,10,16,16,16,4,0,0,2,14,8,1,1,0,0,0,7,15,12,12,7,0,0,0,3,9,7,5,16,6,0,0,0,0,0,0,5,8,0,0,0,0,0,0,13,7,0,0,0,11,4,13,12,0,0,0,0,9,13,9,0,0,0,5 +0,0,8,15,5,0,0,0,0,4,15,12,16,0,0,0,0,10,9,0,12,4,0,0,0,3,1,0,13,3,0,0,0,0,0,1,16,0,0,0,0,0,0,6,11,0,0,0,0,0,5,16,16,16,15,0,0,0,9,10,4,6,9,0,2 +0,0,0,0,11,2,0,0,0,0,0,8,12,0,0,0,0,0,2,15,4,4,8,0,0,1,13,8,0,10,10,0,0,9,16,8,9,16,7,0,0,6,12,12,13,14,1,0,0,0,0,0,14,8,0,0,0,0,0,3,16,4,0,0,4 +0,0,5,12,16,12,0,0,0,2,16,10,8,16,4,0,0,7,16,8,2,16,4,0,0,0,5,15,16,10,0,0,0,0,0,12,16,8,0,0,0,0,6,12,5,16,4,0,0,0,8,12,6,16,6,0,0,0,5,15,15,9,0,0,8 +0,0,0,6,14,11,1,0,0,0,2,14,16,16,2,0,0,0,11,16,16,16,0,0,0,5,15,16,16,16,4,0,0,1,5,7,16,16,2,0,0,0,0,3,16,16,3,0,0,0,0,4,16,16,4,0,0,0,0,4,15,15,4,0,1 +0,0,5,10,16,15,6,0,0,0,12,9,8,15,12,0,0,0,0,0,1,16,5,0,0,2,15,12,13,15,2,0,0,2,11,11,15,10,5,0,0,0,0,11,7,0,0,0,0,0,4,16,1,0,0,0,0,0,9,12,0,0,0,0,7 +0,0,10,13,15,12,5,0,0,0,9,5,5,13,7,0,0,0,0,1,11,13,0,0,0,0,5,16,16,3,0,0,0,0,2,10,12,15,7,0,0,0,0,0,1,16,4,0,0,0,2,8,13,12,1,0,0,0,10,15,7,0,0,0,3 +0,0,2,12,11,2,0,0,0,1,15,12,9,13,0,0,0,6,13,1,0,14,2,0,0,6,12,0,0,8,8,0,0,8,12,0,0,7,8,0,0,5,15,0,0,7,9,0,0,0,13,9,8,15,4,0,0,0,3,13,16,8,0,0,0 +0,0,5,13,14,5,0,0,0,5,16,12,12,16,0,0,0,1,3,0,11,14,0,0,0,0,0,9,16,4,0,0,0,0,2,16,16,9,1,0,0,0,0,3,5,16,7,0,0,0,2,4,9,16,5,0,0,0,5,13,14,6,0,0,3 +0,0,6,9,13,11,0,0,0,10,14,8,9,16,4,0,0,0,0,0,1,15,6,0,0,0,0,3,14,14,1,0,0,0,7,16,16,16,4,0,0,0,1,1,0,12,9,0,0,0,1,2,5,14,10,0,0,0,7,16,13,10,1,0,3 +0,0,7,11,16,7,0,0,0,1,12,12,13,16,0,0,0,0,0,0,6,16,0,0,0,4,13,10,15,13,2,0,0,1,8,14,16,12,4,0,0,0,2,15,7,0,0,0,0,0,9,15,0,0,0,0,0,0,12,7,0,0,0,0,7 +0,1,9,15,16,8,0,0,0,7,16,12,16,8,0,0,0,0,2,0,16,8,0,0,0,1,8,12,16,10,1,0,0,4,16,16,16,16,11,0,0,0,12,16,3,4,2,0,0,0,9,16,0,0,0,0,0,0,14,11,0,0,0,0,7 +0,0,1,9,13,11,3,0,0,0,13,12,8,10,12,0,0,5,13,1,8,15,10,0,0,6,16,16,15,16,6,0,0,0,5,5,5,14,0,0,0,0,0,1,12,7,0,0,0,0,0,8,15,0,0,0,0,0,0,15,7,0,0,0,9 +0,0,0,4,13,12,2,0,0,0,1,15,16,16,3,0,0,0,7,16,16,16,2,0,0,1,14,16,16,16,3,0,0,7,16,16,16,16,0,0,0,0,0,11,16,13,0,0,0,0,0,8,16,15,3,0,0,0,0,6,16,12,2,0,1 +0,0,6,13,15,4,0,0,0,5,16,9,8,12,0,0,0,4,4,0,10,10,0,0,0,0,1,10,16,10,1,0,0,0,3,11,8,15,8,0,0,0,0,0,0,11,7,0,0,0,4,4,8,15,1,0,0,0,4,15,15,1,0,0,3 +0,0,7,13,16,9,0,0,0,0,11,8,9,16,5,0,0,0,0,0,7,15,0,0,0,0,0,13,16,5,0,0,0,0,0,10,12,16,3,0,0,0,0,0,3,16,2,0,0,0,2,7,14,7,0,0,0,0,10,10,2,0,0,0,3 +0,0,0,2,14,2,0,0,0,0,0,11,13,0,0,0,0,0,8,15,3,2,10,0,0,2,16,9,0,12,15,0,0,13,16,9,12,16,9,0,0,12,16,16,16,16,2,0,0,0,4,3,14,14,0,0,0,0,0,0,15,13,0,0,4 +0,3,9,14,16,13,1,0,0,7,13,9,10,16,4,0,0,0,1,1,12,12,1,0,0,0,0,14,16,4,0,0,0,0,0,8,14,16,2,0,0,0,0,0,3,16,8,0,0,0,3,9,15,12,1,0,0,2,16,13,7,0,0,0,3 +0,0,0,4,16,4,0,0,0,0,0,14,14,2,0,0,0,0,7,16,5,1,3,0,0,4,16,11,1,13,11,0,0,13,16,13,13,16,8,0,0,9,16,16,16,16,4,0,0,0,0,4,16,7,0,0,0,0,0,5,16,9,0,0,4 +0,0,12,16,7,0,0,0,0,1,16,9,15,2,0,0,0,0,3,0,12,4,0,0,0,0,0,0,13,4,0,0,0,0,0,7,15,1,0,0,0,1,11,16,7,0,0,0,0,7,16,16,11,6,2,0,0,1,8,11,12,13,7,0,2 +0,0,6,13,16,10,1,0,0,0,15,7,5,13,7,0,0,7,15,1,1,14,6,0,0,1,12,13,14,9,0,0,0,0,0,15,16,3,0,0,0,0,7,13,8,15,0,0,0,0,12,8,7,16,1,0,0,0,5,16,16,5,0,0,8 +0,0,4,12,16,15,5,0,0,0,13,16,16,16,7,0,0,4,16,13,14,11,0,0,0,6,16,16,14,1,0,0,0,0,9,16,12,1,0,0,0,0,12,12,15,7,0,0,0,0,13,10,13,12,0,0,0,0,3,15,13,4,0,0,8 +0,0,3,12,13,10,1,0,0,2,13,9,7,14,8,0,0,8,12,0,0,13,8,0,0,4,15,13,14,16,1,0,0,0,1,4,14,9,0,0,0,0,0,6,14,1,0,0,0,0,2,16,5,0,0,0,0,0,2,16,1,0,0,0,9 +0,0,1,11,12,4,0,0,0,1,11,12,9,15,5,0,0,7,16,4,3,15,7,0,0,3,15,13,15,11,0,0,0,0,3,16,13,14,0,0,0,0,8,9,0,14,7,0,0,0,11,9,9,15,6,0,0,0,2,12,14,8,0,0,8 +0,0,0,4,13,0,0,0,0,0,0,15,8,2,5,0,0,0,11,9,0,12,8,0,0,4,14,1,0,14,5,0,0,8,14,6,7,16,6,0,0,2,11,13,15,13,2,0,0,0,0,1,15,3,0,0,0,0,0,4,14,0,0,0,4 +0,0,12,15,15,12,1,0,0,0,3,4,5,15,7,0,0,0,0,0,5,14,0,0,0,1,11,12,14,11,0,0,0,0,4,11,16,16,7,0,0,0,0,13,7,0,0,0,0,0,4,14,1,0,0,0,0,0,12,5,0,0,0,0,7 +0,0,2,10,12,4,0,0,0,0,10,16,16,16,0,0,0,0,13,16,16,12,0,0,0,0,12,16,16,12,0,0,0,0,12,16,16,12,0,0,0,0,12,16,16,13,0,0,0,0,8,16,16,15,0,0,0,0,4,10,8,3,0,0,1 +0,0,0,10,8,0,0,0,0,0,3,16,7,0,0,0,0,0,10,9,0,0,0,0,0,0,14,9,7,3,0,0,0,1,16,16,14,16,5,0,0,1,15,10,0,3,14,1,0,0,8,12,5,5,15,4,0,0,1,9,15,16,11,0,6 +0,0,1,10,16,7,0,0,0,1,14,14,12,16,4,0,0,6,16,2,1,16,4,0,0,6,16,11,13,16,2,0,0,0,11,12,16,11,0,0,0,0,0,3,16,5,0,0,0,0,0,11,14,0,0,0,0,0,0,13,11,0,0,0,9 +0,0,0,0,10,0,0,0,0,0,0,6,13,0,0,0,0,0,1,13,5,2,2,0,0,0,11,9,0,14,7,0,0,5,16,1,8,16,2,0,0,3,16,14,16,15,5,0,0,0,0,0,16,8,0,0,0,0,0,0,15,8,0,0,4 +0,0,0,8,12,2,0,0,0,0,9,15,8,12,0,0,0,5,16,3,0,12,2,0,0,5,13,0,0,5,7,0,0,8,10,0,0,4,8,0,0,0,16,0,0,3,12,0,0,0,11,9,4,14,6,0,0,0,0,10,13,9,1,0,0 +0,0,0,3,15,11,4,0,0,0,0,13,16,15,0,0,0,0,7,16,16,12,0,0,0,6,16,16,16,12,0,0,0,5,10,5,16,13,0,0,0,0,0,4,16,16,2,0,0,0,0,6,16,16,2,0,0,0,0,3,13,12,1,0,1 +0,1,8,13,16,13,0,0,0,5,14,7,8,15,0,0,0,0,0,2,13,8,0,0,0,0,0,13,16,13,1,0,0,0,0,2,5,14,7,0,0,0,0,0,0,13,7,0,0,0,2,6,12,13,1,0,0,0,7,13,7,1,0,0,3 +0,0,0,7,13,4,0,0,0,0,8,15,7,2,0,0,0,2,15,5,0,0,0,0,0,5,15,0,0,0,0,0,0,5,16,16,15,12,4,0,0,2,15,11,4,10,12,0,0,0,7,13,5,12,13,0,0,0,1,8,13,12,3,0,6 +0,0,2,14,3,0,0,0,0,0,13,16,12,0,0,0,0,8,12,2,12,0,0,0,0,6,8,0,12,0,0,0,0,0,0,0,15,0,0,0,0,0,1,5,15,0,0,0,0,0,7,16,16,14,9,0,0,0,2,12,12,12,11,0,2 +0,0,0,2,13,1,0,0,0,0,1,15,11,0,0,0,0,0,8,15,2,2,0,0,0,1,16,7,3,16,3,0,0,7,16,10,10,16,4,0,0,5,12,13,16,15,2,0,0,0,0,1,14,10,0,0,0,0,0,0,15,11,0,0,4 +0,0,11,16,16,16,3,0,0,1,12,12,13,16,5,0,0,0,0,0,7,16,2,0,0,0,5,13,16,15,1,0,0,0,12,16,16,16,10,0,0,0,1,13,10,4,1,0,0,0,8,16,2,0,0,0,0,0,15,11,0,0,0,0,7 +0,0,0,7,15,1,0,0,0,0,2,14,11,0,0,0,0,0,8,16,4,1,7,0,0,5,16,9,0,12,15,0,0,11,16,14,12,16,9,0,0,6,12,12,16,16,1,0,0,0,0,5,16,8,0,0,0,0,0,10,12,0,0,0,4 +0,0,2,13,9,0,0,0,0,2,16,15,14,6,0,0,0,8,16,5,3,15,0,0,0,7,16,1,0,11,7,0,0,6,12,0,0,8,8,0,0,0,16,2,0,7,10,0,0,0,12,12,7,15,5,0,0,0,0,13,16,9,0,0,0 +0,2,14,16,14,2,0,0,0,9,15,9,16,8,0,0,0,1,3,1,16,6,0,0,0,0,0,5,16,3,0,0,0,0,0,13,13,0,0,0,0,0,7,16,3,0,0,0,0,2,15,16,10,12,6,0,0,4,16,16,16,16,11,0,2 +0,0,8,15,16,14,5,0,0,5,16,4,6,16,8,0,0,7,16,4,9,15,3,0,0,0,12,16,16,3,0,0,0,0,3,15,16,9,0,0,0,0,13,6,6,16,4,0,0,0,16,8,8,16,6,0,0,0,11,16,13,8,0,0,8 +0,4,14,14,16,16,4,0,0,5,16,11,5,4,0,0,0,4,16,4,0,0,0,0,0,6,16,16,16,11,0,0,0,4,11,6,7,16,4,0,0,0,0,0,4,16,4,0,0,0,1,7,15,12,0,0,0,4,16,12,5,0,0,0,5 +0,0,0,9,15,10,1,0,0,0,4,15,16,16,4,0,0,4,15,16,16,16,4,0,0,8,15,10,16,16,8,0,0,0,0,0,16,16,7,0,0,0,0,1,16,16,6,0,0,0,0,7,16,16,4,0,0,0,0,6,12,12,1,0,1 +0,0,5,16,10,1,0,0,0,11,14,13,14,12,0,0,0,12,16,4,3,15,5,0,0,11,12,1,0,7,9,0,0,9,10,0,0,3,14,0,0,6,14,0,0,9,16,0,0,0,14,9,9,16,11,0,0,0,5,15,16,15,1,0,0 +0,0,0,0,11,15,2,0,0,0,0,7,16,16,7,0,0,0,3,15,16,16,4,0,0,6,16,16,16,16,3,0,0,3,8,6,16,16,0,0,0,0,0,4,16,16,0,0,0,0,0,4,16,16,4,0,0,0,0,1,13,16,3,0,1 +0,0,3,12,15,4,0,0,0,0,14,12,13,14,2,0,0,1,16,0,8,16,10,0,0,5,15,13,16,16,8,0,0,1,8,9,11,16,5,0,0,0,0,2,15,10,0,0,0,0,0,10,14,2,0,0,0,0,3,14,5,0,0,0,9 +0,0,0,10,13,1,0,0,0,0,8,16,12,2,0,0,0,1,15,10,0,0,0,0,0,6,16,7,5,0,0,0,0,2,16,16,16,15,3,0,0,2,16,12,0,7,13,0,0,0,11,10,4,10,16,2,0,0,2,11,15,14,9,0,6 +0,0,1,8,16,14,1,0,0,0,11,13,7,15,7,0,0,2,15,2,0,9,12,0,0,6,15,9,13,16,8,0,0,0,6,12,13,16,2,0,0,0,0,0,12,10,0,0,0,0,0,8,15,1,0,0,0,0,0,13,7,0,0,0,9 +0,0,2,12,15,2,0,0,0,0,8,14,4,1,0,0,0,0,15,7,0,0,0,0,0,2,16,6,4,2,0,0,0,3,16,16,15,15,4,0,0,2,15,11,0,4,14,0,0,0,11,9,0,3,16,0,0,0,2,13,16,16,12,0,6 +0,0,4,12,14,9,1,0,0,1,16,13,9,16,7,0,0,7,16,1,4,16,4,0,0,8,13,4,12,16,4,0,0,2,14,14,16,9,0,0,0,0,0,7,16,2,0,0,0,0,1,14,11,0,0,0,0,0,4,16,5,0,0,0,9 +0,1,11,13,13,12,7,0,0,2,16,6,4,10,9,0,0,2,16,2,0,0,0,0,0,4,16,16,16,4,0,0,0,1,10,5,7,12,0,0,0,0,0,0,5,11,0,0,0,0,1,5,14,3,0,0,0,0,15,14,7,0,0,0,5 +0,0,0,11,11,0,0,0,0,0,3,16,7,8,11,0,0,0,14,14,0,16,12,0,0,7,16,7,9,16,9,0,0,8,16,16,16,16,11,0,0,1,4,10,16,8,0,0,0,0,0,9,15,0,0,0,0,0,0,9,11,0,0,0,4 +0,0,11,10,8,14,10,0,0,1,16,14,12,12,7,0,0,0,16,8,0,0,0,0,0,5,16,16,10,2,0,0,0,2,8,8,12,15,1,0,0,0,0,0,5,16,3,0,0,0,5,9,16,10,0,0,0,2,12,12,4,0,0,0,5 +0,0,0,7,14,15,3,0,0,0,11,16,9,15,10,0,0,5,14,3,4,14,10,0,0,7,15,10,16,16,4,0,0,2,11,11,10,16,1,0,0,0,0,0,11,11,0,0,0,0,0,5,16,7,0,0,0,0,0,10,15,0,0,0,9 +0,1,13,11,11,13,5,0,0,8,16,9,8,8,1,0,0,4,16,0,0,0,0,0,0,4,16,16,14,6,0,0,0,2,11,8,9,16,4,0,0,0,0,0,0,12,8,0,0,0,1,5,8,15,6,0,0,2,14,12,12,6,0,0,5 +0,1,13,16,16,16,4,0,0,0,11,12,13,16,4,0,0,0,0,0,13,11,0,0,0,0,11,16,16,16,7,0,0,0,11,15,14,13,8,0,0,0,0,16,7,0,0,0,0,0,9,16,1,0,0,0,0,0,14,10,0,0,0,0,7 +0,0,0,0,13,12,1,0,0,0,0,5,16,16,1,0,0,0,0,14,16,14,0,0,0,0,7,16,16,10,0,0,0,5,16,16,16,14,0,0,0,2,6,9,16,16,1,0,0,0,0,2,16,16,7,0,0,0,0,0,8,12,6,0,1 +0,0,4,10,16,9,0,0,0,4,15,9,8,12,0,0,0,4,4,0,8,10,0,0,0,0,0,6,16,13,1,0,0,0,0,9,10,12,7,0,0,0,0,0,0,10,7,0,0,0,1,6,10,16,3,0,0,0,4,14,10,2,0,0,3 +0,0,5,9,12,12,2,0,0,0,7,10,10,16,5,0,0,0,0,0,6,13,0,0,0,0,7,9,14,14,6,0,0,0,8,14,15,8,2,0,0,0,0,11,7,0,0,0,0,0,6,15,0,0,0,0,0,0,11,7,0,0,0,0,7 +0,0,4,11,15,4,0,0,0,3,15,15,10,16,0,0,0,8,15,2,0,14,4,0,0,7,13,0,0,10,8,0,0,7,12,0,0,13,4,0,0,1,15,4,1,15,2,0,0,0,10,12,11,12,0,0,0,0,2,11,13,2,0,0,0 +0,0,8,13,7,0,0,0,0,5,16,9,15,6,0,0,0,5,8,0,7,13,0,0,0,2,4,0,1,15,0,0,0,0,0,0,4,13,0,0,0,0,0,0,10,10,0,0,0,0,5,12,16,9,8,1,0,0,8,13,14,15,16,4,2 +0,0,0,11,13,1,0,0,0,2,7,16,11,11,0,0,0,8,16,8,0,9,1,0,0,7,12,7,0,4,5,0,0,3,9,0,0,4,9,0,0,1,12,1,0,10,7,0,0,0,7,11,10,15,0,0,0,0,1,11,13,5,0,0,0 +0,0,0,2,14,8,0,0,0,0,0,8,14,2,6,0,0,0,3,15,7,6,16,2,0,1,13,12,1,12,11,0,0,11,16,16,16,16,10,0,0,5,10,8,13,15,3,0,0,0,0,0,14,12,0,0,0,0,0,4,16,7,0,0,4 +0,0,1,12,16,12,2,0,0,0,8,12,11,16,4,0,0,5,16,3,12,13,1,0,0,2,15,15,15,3,0,0,0,0,5,16,15,6,0,0,0,0,6,13,8,16,6,0,0,0,6,13,4,13,12,0,0,0,1,14,16,11,2,0,8 +0,0,9,13,6,0,0,0,0,5,16,9,14,1,0,0,0,9,9,0,11,5,0,0,0,1,1,0,10,6,0,0,0,0,0,0,15,4,0,0,0,0,0,5,15,0,0,0,0,0,5,15,11,8,8,0,0,0,8,16,12,12,15,4,2 +0,0,6,15,16,11,3,0,0,0,12,8,8,14,9,0,0,0,0,0,3,16,4,0,0,0,0,5,16,13,1,0,0,0,0,8,12,14,7,0,0,0,0,0,3,15,2,0,0,0,2,9,15,8,0,0,0,0,10,9,2,0,0,0,3 +0,0,6,7,8,9,9,0,0,5,16,16,14,13,3,0,0,5,10,7,3,0,0,0,0,5,16,16,13,0,0,0,0,0,0,0,12,1,0,0,0,0,0,0,13,1,0,0,0,0,8,11,12,0,0,0,0,0,10,11,2,0,0,0,5 +0,1,8,14,16,6,0,0,0,9,15,10,13,10,0,0,0,1,2,7,16,5,0,0,0,0,4,16,9,0,0,0,0,0,4,16,11,2,0,0,0,0,0,4,13,16,9,0,0,1,8,4,4,13,13,0,0,0,9,14,16,13,7,0,3 +0,2,12,13,12,2,0,0,0,2,7,5,15,7,0,0,0,0,0,9,16,2,0,0,0,4,16,14,3,0,0,0,0,2,12,16,14,5,0,0,0,0,0,1,6,15,4,0,0,0,1,0,2,13,6,0,0,4,13,15,16,12,1,0,3 +0,0,0,6,14,0,0,0,0,0,3,15,9,0,0,0,0,0,14,14,0,7,10,0,0,8,16,8,0,14,15,1,0,12,16,12,10,16,10,0,0,5,14,16,16,15,2,0,0,0,0,4,16,6,0,0,0,0,0,8,14,0,0,0,4 +0,0,7,15,11,0,0,0,0,9,10,14,15,12,0,0,0,9,12,0,0,4,1,0,0,7,16,14,9,0,0,0,0,0,7,13,16,6,0,0,0,0,0,0,12,9,0,0,0,0,11,10,16,8,0,0,0,0,5,16,14,3,0,0,5 +0,1,12,14,4,0,0,0,0,7,15,7,9,0,0,0,0,6,15,1,3,0,0,0,0,1,14,9,5,9,6,0,0,0,3,16,15,5,2,0,0,2,15,13,15,0,0,0,0,7,14,4,14,3,0,0,0,0,11,14,11,0,0,0,8 +0,0,1,7,16,2,0,0,0,0,14,13,12,10,1,0,0,3,14,0,3,16,4,0,0,1,15,16,15,16,0,0,0,0,2,7,2,10,3,0,0,0,0,0,0,11,2,0,0,0,0,9,2,10,4,0,0,0,0,9,16,12,1,0,9 +0,0,0,11,6,0,0,0,0,0,0,15,10,0,0,0,0,0,5,16,2,0,0,0,0,0,8,13,0,0,0,0,0,0,10,12,5,7,1,0,0,0,9,16,12,12,15,2,0,0,7,15,6,9,15,8,0,0,1,10,15,15,10,0,6 +0,0,5,11,8,4,0,0,0,0,15,16,16,15,1,0,0,0,12,16,16,15,2,0,0,3,16,16,16,11,0,0,0,4,16,16,16,4,0,0,0,3,16,16,16,8,0,0,0,0,12,16,16,9,0,0,0,0,3,8,12,8,0,0,1 +0,2,15,15,5,0,0,0,0,4,13,10,16,0,0,0,0,0,4,4,16,0,0,0,0,0,0,2,15,0,0,0,0,0,0,9,10,0,0,0,0,0,3,15,3,0,0,0,0,1,13,14,8,8,6,0,0,2,13,12,10,9,6,0,2 +0,0,9,15,3,0,0,0,0,1,15,16,15,5,0,0,0,3,16,8,9,14,0,0,0,4,13,0,0,10,4,0,0,3,13,0,0,7,6,0,0,2,12,0,0,10,10,0,0,1,12,9,13,16,6,0,0,0,7,16,16,8,0,0,0 +0,0,7,11,11,2,0,0,0,0,16,16,15,13,0,0,0,3,15,7,0,10,3,0,0,3,12,0,0,5,7,0,0,7,9,0,0,10,6,0,0,4,13,0,3,15,6,0,0,2,15,12,15,14,0,0,0,0,7,16,12,2,0,0,0 +0,0,1,14,4,0,0,0,0,0,6,15,2,0,0,0,0,0,12,8,0,0,0,0,0,0,12,4,0,0,0,0,0,2,16,13,12,9,0,0,0,5,16,11,8,15,10,0,0,0,13,9,4,9,11,0,0,0,3,11,14,13,4,0,6 +0,0,9,16,15,2,0,0,0,4,16,13,15,11,0,0,0,6,10,0,14,10,0,0,0,0,0,4,16,4,0,0,0,0,0,10,14,0,0,0,0,0,5,15,4,0,0,0,0,0,14,14,11,12,5,0,0,0,12,16,16,11,3,0,2 +0,0,11,16,16,16,7,0,0,0,9,8,8,16,13,0,0,0,0,0,2,16,10,0,0,0,2,4,12,16,4,0,0,0,15,16,16,16,10,0,0,0,5,14,10,4,0,0,0,0,6,16,0,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,0,12,6,0,0,0,0,0,3,16,1,0,0,0,0,0,6,13,0,0,0,0,0,0,10,9,0,0,0,0,0,0,13,8,8,3,0,0,0,1,16,12,8,12,8,0,0,0,8,12,1,0,14,3,0,0,0,8,14,16,12,3,6 +0,0,0,5,15,1,0,0,0,0,1,14,12,0,0,0,0,1,12,15,0,3,13,1,0,7,16,4,0,11,15,0,0,14,13,0,4,16,5,0,0,14,16,16,16,11,0,0,0,2,8,13,16,3,0,0,0,0,0,7,16,2,0,0,4 +0,0,0,9,7,0,0,0,0,0,4,16,5,0,0,0,0,0,8,13,0,0,0,0,0,0,13,8,0,0,0,0,0,2,15,7,8,7,1,0,0,1,15,11,8,10,13,0,0,0,11,10,4,5,15,2,0,0,0,9,12,13,9,0,6 +0,0,3,16,11,1,0,0,0,0,12,16,16,2,0,0,0,3,16,16,11,0,0,0,0,5,16,16,7,0,0,0,0,6,16,16,5,0,0,0,0,1,16,16,3,0,0,0,0,0,11,16,9,0,0,0,0,0,4,12,14,12,5,0,1 +0,0,5,11,12,5,0,0,0,0,9,16,16,11,0,0,0,0,11,16,16,4,0,0,0,1,16,16,15,2,0,0,0,0,14,16,11,0,0,0,0,2,16,16,8,0,0,0,0,0,14,16,8,0,0,0,0,0,8,16,13,1,0,0,1 +0,0,0,1,14,2,0,0,0,0,0,10,13,0,1,0,0,0,6,13,2,6,9,0,0,1,15,4,0,13,6,0,0,6,14,6,7,16,2,0,0,1,9,13,16,14,5,0,0,0,0,0,16,2,0,0,0,0,0,0,14,0,0,0,4 +0,2,12,16,15,8,0,0,0,13,16,13,8,12,2,0,0,16,13,1,0,0,0,0,0,9,16,13,5,0,0,0,0,1,11,13,14,2,0,0,0,0,0,0,16,8,0,0,0,0,3,11,16,5,0,0,0,0,14,16,10,1,0,0,5 +0,0,6,14,14,4,0,0,0,0,13,15,11,13,0,0,0,0,2,2,2,16,1,0,0,0,0,0,3,14,1,0,0,0,0,0,9,11,0,0,0,0,0,4,16,3,0,0,0,0,4,15,16,16,14,2,0,0,7,13,12,9,9,3,2 +0,0,7,14,1,0,0,0,0,0,14,13,6,0,0,0,0,0,12,8,3,0,0,0,0,0,3,14,12,16,6,0,0,0,2,14,16,4,0,0,0,0,11,14,8,14,2,0,0,3,16,3,1,16,4,0,0,0,8,14,16,11,1,0,8 +0,0,0,10,7,0,0,0,0,0,1,16,5,0,0,0,0,0,0,15,2,7,4,0,0,0,0,11,16,14,4,0,0,1,12,16,15,1,0,0,0,7,16,4,16,1,0,0,0,1,12,9,16,4,0,0,0,0,1,11,15,2,0,0,8 +0,0,1,8,16,4,0,0,0,0,13,10,5,8,0,0,0,1,16,5,10,15,2,0,0,1,11,13,11,14,4,0,0,0,0,0,0,8,8,0,0,0,0,0,0,8,5,0,0,0,0,5,0,13,4,0,0,0,0,12,16,10,0,0,9 +0,2,10,15,14,9,2,0,0,4,11,4,5,14,8,0,0,0,0,0,8,16,3,0,0,0,0,6,14,3,0,0,0,0,0,8,16,7,0,0,0,0,0,0,7,16,1,0,0,2,4,4,7,15,1,0,0,3,12,14,12,6,0,0,3 +0,0,5,13,1,0,0,0,0,0,13,16,7,1,0,0,0,1,16,16,16,13,1,0,0,2,16,10,0,12,5,0,0,2,16,0,0,3,10,0,0,2,15,0,0,6,14,0,0,0,14,5,8,16,9,0,0,0,4,13,15,7,0,0,0 +0,1,10,16,15,0,0,0,0,7,15,5,8,0,0,0,0,10,12,0,0,0,0,0,0,4,16,4,2,9,5,0,0,0,10,15,15,15,8,0,0,0,12,16,13,1,0,0,0,3,16,13,16,1,0,0,0,0,12,16,14,1,0,0,8 +0,0,0,5,16,1,0,0,0,0,1,16,10,0,0,0,0,0,13,13,1,3,8,0,0,6,16,3,0,13,14,0,0,13,16,3,5,16,5,0,0,15,16,16,16,15,0,0,0,1,7,11,16,6,0,0,0,0,0,7,15,3,0,0,4 +0,1,11,16,16,7,0,0,0,0,8,7,12,12,0,0,0,0,0,0,12,8,0,0,0,0,0,2,14,4,0,0,0,1,13,16,16,11,4,0,0,1,12,16,10,8,3,0,0,0,11,11,0,0,0,0,0,3,16,5,0,0,0,0,7 +0,0,0,11,11,0,0,0,0,0,3,16,7,0,0,0,0,0,6,15,1,0,0,0,0,0,7,12,0,0,0,0,0,0,12,13,8,7,0,0,0,1,16,12,8,11,12,0,0,0,8,13,4,7,16,1,0,0,2,11,13,12,7,0,6 +0,0,7,15,14,4,0,0,0,0,13,13,10,11,0,0,0,0,0,1,0,15,1,0,0,0,0,0,1,16,0,0,0,0,0,0,6,14,0,0,0,0,0,4,15,8,0,0,0,0,5,14,16,16,9,0,0,0,5,11,7,4,9,2,2 +0,0,9,16,12,1,0,0,0,10,16,10,15,4,0,0,0,8,6,0,8,7,0,0,0,0,0,0,8,10,0,0,0,0,0,1,14,5,0,0,0,0,0,10,15,1,1,0,0,0,10,16,14,14,11,0,0,0,8,15,14,10,3,0,2 +0,0,0,9,14,0,0,0,0,0,6,16,8,0,0,0,0,0,14,14,0,6,15,0,0,9,16,3,3,16,10,0,0,13,16,4,11,15,1,0,0,13,16,16,16,12,2,0,0,2,4,10,16,2,0,0,0,0,0,9,15,2,0,0,4 +0,0,0,11,3,0,0,0,0,0,5,16,2,0,0,0,0,0,12,11,0,0,0,0,0,0,14,5,0,0,0,0,0,0,16,3,1,1,0,0,0,0,16,16,16,16,11,0,0,0,12,8,4,6,16,2,0,0,2,11,15,16,11,1,6 +0,0,0,7,15,1,0,0,0,0,2,14,10,0,2,0,0,0,10,12,1,2,15,3,0,5,16,3,0,9,14,0,0,14,12,2,2,16,5,0,0,14,16,16,16,16,1,0,0,5,8,12,16,4,0,0,0,0,0,9,16,6,0,0,4 +0,0,0,3,16,3,0,0,0,0,0,9,16,2,0,0,0,0,2,14,9,5,6,0,0,0,8,15,2,15,12,0,0,4,16,6,3,16,5,0,0,11,16,13,16,16,8,0,0,5,11,13,16,10,1,0,0,0,0,4,16,7,0,0,4 +0,0,4,14,14,4,0,0,0,5,14,3,1,4,0,0,0,8,12,0,2,2,0,0,0,4,15,12,16,16,8,0,0,0,14,16,9,0,0,0,0,4,16,11,15,2,0,0,0,2,15,8,12,12,0,0,0,0,4,12,16,10,0,0,8 +0,1,10,16,15,1,0,0,0,5,16,13,16,4,0,0,0,5,9,3,16,2,0,0,0,0,0,10,10,0,0,0,0,0,2,15,6,0,0,0,0,0,9,12,1,0,0,0,0,0,12,14,10,4,0,0,0,0,11,16,16,16,2,0,2 +0,0,2,13,10,4,0,0,0,0,0,16,16,11,0,0,0,0,0,13,16,15,0,0,0,0,1,14,16,11,0,0,0,0,4,16,16,6,0,0,0,0,3,16,14,2,0,0,0,0,7,16,16,3,0,0,0,0,4,13,16,4,0,0,1 +0,0,11,16,16,9,0,0,0,2,16,12,16,11,0,0,0,1,3,1,16,8,0,0,0,0,0,12,14,2,0,0,0,0,3,16,8,0,0,0,0,0,11,15,0,0,0,0,0,0,13,15,12,12,4,0,0,0,13,16,16,15,2,0,2 +0,1,10,16,16,14,1,0,0,1,11,5,4,15,0,0,0,0,0,0,6,10,0,0,0,0,1,13,12,1,0,0,0,0,2,15,16,12,1,0,0,0,0,0,3,14,4,0,0,1,6,0,2,14,3,0,0,2,12,16,15,8,0,0,3 +0,0,9,11,1,0,0,0,0,4,16,14,9,0,0,0,0,1,6,3,13,0,0,0,0,0,0,3,14,0,0,0,0,0,0,5,13,0,0,0,0,0,0,14,10,3,0,0,0,0,11,16,16,16,14,2,0,0,6,9,7,4,8,6,2 +0,0,13,13,12,12,2,0,0,0,14,16,16,14,3,0,0,0,10,16,5,0,0,0,0,0,1,10,14,1,0,0,0,0,0,2,16,4,0,0,0,0,0,0,12,8,0,0,0,0,4,6,15,5,0,0,0,1,15,16,15,1,0,0,5 +0,0,5,11,13,10,0,0,0,0,8,9,9,16,5,0,0,0,0,0,0,14,7,0,0,0,0,0,1,16,1,0,0,0,3,4,12,9,1,0,0,10,16,16,14,12,2,0,0,3,2,15,2,0,0,0,0,0,8,6,0,0,0,0,7 +0,0,7,16,11,0,0,0,0,5,15,12,16,3,0,0,0,6,16,14,16,7,0,0,0,1,8,12,16,8,0,0,0,0,0,0,6,15,2,0,0,0,0,0,0,16,7,0,0,0,0,0,2,13,8,0,0,0,8,16,16,14,4,0,9 +0,0,4,15,14,1,0,0,0,2,16,14,16,9,0,0,0,5,16,1,10,16,0,0,0,4,16,16,16,16,1,0,0,0,9,12,10,16,6,0,0,0,0,0,0,13,7,0,0,0,3,6,5,14,8,0,0,0,6,16,16,15,2,0,9 +0,0,9,15,13,5,1,0,0,2,16,13,9,12,3,0,0,8,12,0,0,0,0,0,0,2,16,15,6,0,0,0,0,0,1,8,15,3,0,0,0,0,0,0,6,11,0,0,0,0,5,6,12,10,0,0,0,1,10,13,10,1,0,0,5 +0,0,7,16,12,5,0,0,0,0,7,16,16,7,0,0,0,0,3,16,16,8,0,0,0,0,5,16,16,7,0,0,0,0,11,16,15,2,0,0,0,0,14,16,14,0,0,0,0,0,14,16,16,3,0,0,0,0,6,13,14,5,0,0,1 +0,0,6,12,14,11,0,0,0,1,11,8,10,14,0,0,0,0,0,0,12,9,0,0,0,0,0,11,10,0,0,0,0,0,0,10,16,13,1,0,0,0,0,0,7,15,5,0,0,0,4,4,5,14,3,0,0,0,10,15,12,8,0,0,3 +0,0,4,15,11,0,0,0,0,1,15,6,14,3,0,0,0,3,14,8,14,10,0,0,0,1,9,12,7,13,0,0,0,0,0,0,0,14,2,0,0,0,0,0,0,13,4,0,0,0,7,4,3,13,4,0,0,0,7,13,15,9,0,0,9 +0,1,11,16,9,0,0,0,0,10,14,9,13,2,0,0,0,4,3,0,12,4,0,0,0,0,0,0,12,3,0,0,0,0,0,5,15,0,0,0,0,0,2,13,10,0,0,0,0,1,16,16,9,10,7,0,0,0,10,13,12,10,9,0,2 +0,0,13,16,14,6,0,0,0,3,16,14,12,15,2,0,0,7,16,2,0,0,0,0,0,12,16,10,6,0,0,0,0,6,16,16,16,6,0,0,0,0,0,1,10,13,0,0,0,0,1,4,13,14,0,0,0,0,12,16,16,10,0,0,5 +0,0,3,15,7,0,0,0,0,0,13,16,10,0,0,0,0,5,16,7,5,3,0,0,0,5,16,8,16,15,2,0,0,2,12,1,6,8,10,0,0,1,14,2,0,0,13,0,0,0,9,15,12,15,16,0,0,0,2,10,15,16,7,0,0 +0,0,0,4,16,4,0,0,0,0,0,3,16,6,0,0,0,0,0,10,14,1,9,2,0,0,3,15,4,9,15,0,0,3,15,9,3,14,10,0,2,15,16,15,16,16,3,0,1,10,9,9,16,10,0,0,0,0,0,5,16,2,0,0,4 +0,0,6,16,16,12,0,0,0,0,8,9,13,16,2,0,0,0,0,0,4,16,1,0,0,0,1,5,12,12,0,0,0,0,13,16,16,16,7,0,0,0,8,14,12,10,3,0,0,0,3,16,3,0,0,0,0,0,9,11,0,0,0,0,7 +0,1,14,13,16,12,1,0,0,9,16,13,7,8,1,0,0,10,16,11,7,0,0,0,0,1,13,16,16,6,0,0,0,0,0,0,5,12,0,0,0,0,0,0,5,11,0,0,0,0,1,6,15,9,0,0,0,0,13,15,8,1,0,0,5 +0,0,3,15,13,1,0,0,0,0,13,14,11,4,0,0,0,0,14,11,3,1,0,0,0,0,4,16,12,16,6,0,0,0,5,16,16,6,0,0,0,3,16,9,11,11,0,0,0,3,16,6,2,16,4,0,0,0,6,13,14,9,0,0,8 +0,0,4,10,16,14,4,0,0,0,10,16,16,16,4,0,0,0,16,16,16,8,0,0,0,0,16,16,16,0,0,0,0,0,16,16,16,0,0,0,0,4,16,16,16,0,0,0,0,0,16,16,16,0,0,0,0,0,6,14,14,0,0,0,1 +0,0,5,8,0,0,0,0,0,0,12,16,13,7,0,0,0,0,15,15,13,16,3,0,0,3,15,0,0,10,6,0,0,3,13,0,0,7,9,0,0,4,16,0,1,12,12,0,0,1,15,12,14,16,9,0,0,0,6,14,14,9,1,0,0 +0,2,14,16,9,0,0,0,0,8,16,13,16,3,0,0,0,10,9,5,16,2,0,0,0,1,0,8,14,0,0,0,0,0,0,14,10,0,0,0,0,0,5,16,5,0,0,0,0,3,16,16,11,6,1,0,0,2,13,16,16,16,12,0,2 +0,0,0,7,16,2,0,0,0,0,3,15,10,0,0,0,0,1,13,14,2,3,9,0,0,9,16,7,0,14,14,0,0,13,16,0,5,16,8,0,0,14,16,16,16,15,2,0,0,0,4,7,16,6,0,0,0,0,0,8,15,0,0,0,4 +0,1,9,15,13,0,0,0,0,6,16,16,16,2,0,0,0,3,8,8,16,4,0,0,0,0,0,12,13,0,0,0,0,0,1,16,9,0,0,0,0,0,9,16,2,0,0,0,0,0,12,14,10,8,1,0,0,0,13,16,16,16,5,0,2 +0,0,6,11,0,0,0,0,0,1,15,12,2,4,0,0,0,5,12,3,16,16,4,0,0,5,9,0,8,7,8,0,0,8,8,0,0,4,8,0,0,7,9,0,0,4,9,0,0,0,15,8,9,15,7,0,0,0,6,16,16,9,0,0,0 +0,0,6,13,16,12,1,0,0,3,16,11,8,9,1,0,0,3,16,5,2,8,5,0,0,1,13,14,16,16,6,0,0,0,5,16,16,3,0,0,0,0,14,12,13,10,0,0,0,0,15,11,10,14,0,0,0,0,4,15,16,7,0,0,8 +0,0,0,5,15,0,0,0,0,0,0,11,11,0,0,0,0,0,1,16,4,4,6,0,0,0,13,12,1,15,7,0,0,2,16,2,8,14,2,0,0,12,16,15,16,15,0,0,0,5,8,10,16,2,0,0,0,0,0,3,11,2,0,0,4 +0,0,2,14,16,11,0,0,0,0,11,15,12,10,2,0,0,1,15,3,0,0,0,0,0,1,16,5,0,0,0,0,0,0,12,16,7,0,0,0,0,0,0,9,16,7,0,0,0,0,3,10,15,12,0,0,0,0,3,16,15,7,0,0,5 +0,0,0,12,15,9,11,6,0,0,0,5,10,13,16,2,0,0,0,0,0,9,9,0,0,0,0,4,10,16,2,0,0,0,8,16,16,16,5,0,0,0,3,8,14,2,0,0,0,0,0,11,9,0,0,0,0,0,0,12,1,0,0,0,7 +0,0,6,9,0,0,0,0,0,0,7,16,10,13,7,0,0,0,1,8,16,14,1,0,0,0,0,1,13,7,0,0,0,0,12,16,16,10,1,0,0,0,3,15,10,8,2,0,0,0,2,16,2,0,0,0,0,0,4,13,0,0,0,0,7 +0,0,0,0,6,15,12,0,0,0,1,11,14,13,15,0,0,1,12,10,0,11,7,0,0,5,15,1,3,16,3,0,0,5,16,13,15,14,0,0,0,0,4,4,11,13,0,0,0,0,0,0,8,13,0,0,0,0,0,0,8,8,0,0,9 +0,0,0,13,9,10,14,2,0,0,0,11,12,15,12,0,0,0,0,0,0,14,3,0,0,1,4,4,7,15,1,0,0,7,16,16,16,12,6,0,0,2,4,8,11,0,0,0,0,0,0,11,6,0,0,0,0,0,0,13,7,0,0,0,7 +0,1,13,13,1,0,0,0,0,9,16,16,7,0,0,0,0,5,5,13,8,0,0,0,0,0,2,16,6,0,0,0,0,0,11,15,1,0,0,0,0,4,16,8,1,0,0,0,0,4,16,16,16,16,6,0,0,1,11,14,16,16,11,0,2 +0,0,0,8,14,10,0,0,0,0,2,14,16,14,0,0,0,0,12,16,16,14,2,0,0,5,16,16,16,12,1,0,0,1,4,10,16,14,0,0,0,0,0,9,16,12,0,0,0,0,0,9,16,12,0,0,0,0,0,8,15,12,0,0,1 +0,0,0,8,16,10,8,12,0,0,0,12,16,16,16,11,0,0,0,0,0,14,14,0,0,0,0,1,8,16,11,0,0,0,1,15,16,16,7,0,0,0,2,9,16,8,0,0,0,0,0,7,16,3,0,0,0,0,0,10,12,0,0,0,7 +0,0,3,11,2,7,13,0,0,0,3,13,16,16,7,0,0,0,0,0,5,13,0,0,0,0,0,0,9,7,0,0,0,8,16,16,16,16,2,0,0,2,4,12,11,1,0,0,0,0,0,15,5,0,0,0,0,0,3,16,3,0,0,0,7 +0,0,0,0,15,7,0,0,0,0,0,7,16,4,0,0,0,0,1,15,11,1,5,0,0,2,14,12,1,12,13,0,0,7,16,11,8,16,11,0,0,12,16,16,16,16,3,0,0,1,8,5,14,15,0,0,0,0,0,0,13,12,0,0,4 +0,0,0,1,13,8,0,0,0,0,0,9,16,15,0,0,0,0,5,16,16,12,0,0,0,2,15,16,16,10,0,0,0,8,14,3,16,10,0,0,0,0,0,4,16,8,0,0,0,0,0,4,16,11,0,0,0,0,0,0,15,12,0,0,1 +0,0,0,3,11,16,11,0,0,0,6,16,11,16,7,0,0,0,14,8,0,15,1,0,0,8,16,4,12,11,0,0,0,1,11,15,16,11,0,0,0,0,0,0,15,5,0,0,0,0,0,0,16,6,0,0,0,0,0,2,16,5,0,0,9 +0,1,11,16,7,0,0,0,0,8,16,15,10,0,0,0,0,12,8,12,11,0,0,0,0,2,0,15,7,0,0,0,0,0,8,14,0,0,0,0,0,0,13,12,0,3,3,0,0,5,16,13,14,16,16,1,0,0,16,16,15,10,5,0,2 +0,0,2,9,0,0,0,0,0,0,11,8,0,0,0,0,0,1,16,0,0,0,0,0,0,4,11,0,0,0,0,0,0,7,8,3,8,3,0,0,0,6,11,13,16,16,4,0,0,0,14,16,7,12,12,0,0,0,3,14,16,14,6,0,6 +0,0,0,2,9,16,3,0,0,0,2,14,15,16,2,0,0,0,11,10,7,9,0,0,0,1,16,4,11,5,0,0,0,0,14,16,16,10,0,0,0,0,0,1,14,11,0,0,0,0,0,0,14,7,0,0,0,0,0,0,14,3,0,0,9 +0,0,0,9,13,1,0,0,0,0,3,15,7,1,0,0,0,0,8,10,0,0,0,0,0,0,13,5,0,0,0,0,0,0,14,16,12,3,0,0,0,0,13,10,7,15,1,0,0,0,5,12,2,11,7,0,0,0,0,8,16,16,4,0,6 +0,0,0,1,8,15,12,0,0,0,2,14,13,16,5,0,0,0,9,14,9,9,0,0,0,0,8,16,16,15,2,0,0,0,0,2,10,14,1,0,0,0,0,0,10,12,0,0,0,0,0,0,12,7,0,0,0,0,0,0,11,1,0,0,9 +0,0,2,15,7,0,0,0,0,0,7,16,6,0,0,0,0,0,14,15,1,3,1,0,0,5,16,7,3,16,8,0,0,13,15,0,10,16,4,0,0,13,16,16,16,15,0,0,0,2,6,15,15,5,0,0,0,0,4,15,4,0,0,0,4 +0,0,0,0,5,14,8,0,0,0,1,10,13,13,11,0,0,0,11,9,2,14,6,0,0,4,16,9,12,16,3,0,0,0,6,4,4,15,4,0,0,0,0,0,0,16,4,0,0,0,0,0,0,15,3,0,0,0,0,0,2,15,0,0,9 +0,0,6,16,12,1,0,0,0,0,15,14,14,13,0,0,0,2,16,6,5,16,2,0,0,0,15,4,2,16,6,0,0,1,16,5,1,16,6,0,0,2,16,2,11,14,1,0,0,0,16,5,15,5,0,0,0,0,7,16,7,0,0,0,0 +0,1,6,8,11,16,6,0,0,5,16,16,12,9,4,0,0,8,13,1,0,0,0,0,0,6,16,11,4,0,0,0,0,0,7,12,16,10,0,0,0,0,0,0,7,12,0,0,0,0,1,5,14,11,0,0,0,0,9,15,9,1,0,0,5 +0,0,8,12,8,8,10,0,0,0,5,10,13,16,8,0,0,0,0,0,5,11,0,0,0,2,8,12,15,12,2,0,0,8,12,15,12,8,4,0,0,0,2,13,2,0,0,0,0,0,8,11,0,0,0,0,0,0,10,5,0,0,0,0,7 +0,0,6,16,13,4,0,0,0,0,10,8,8,13,0,0,0,0,0,0,6,11,0,0,0,0,3,7,14,2,0,0,0,2,16,13,15,6,0,0,0,0,3,0,6,14,0,0,0,0,3,4,5,14,0,0,0,0,9,16,12,4,0,0,3 +0,0,4,12,15,8,0,0,0,4,15,10,10,12,0,0,0,13,10,2,16,4,0,0,0,6,12,15,9,0,0,0,0,0,2,16,13,3,0,0,0,0,10,9,6,13,1,0,0,0,10,9,0,10,8,0,0,0,2,13,16,14,5,0,8 +0,0,0,0,9,15,0,0,0,0,0,4,16,6,0,0,0,0,0,13,12,0,0,0,0,0,10,14,1,12,6,0,0,4,16,6,0,16,12,0,0,13,15,12,16,16,9,0,0,13,16,15,13,16,5,0,0,1,4,0,9,12,0,0,4 +0,0,1,13,15,7,0,0,0,0,9,14,9,15,1,0,0,2,16,5,0,10,4,0,0,5,13,5,0,4,8,0,0,8,8,0,0,7,8,0,0,3,14,1,0,9,7,0,0,0,13,13,9,16,4,0,0,0,3,13,16,10,0,0,0 +0,2,12,14,12,1,0,0,0,7,13,6,14,5,0,0,0,0,0,3,15,3,0,0,0,2,15,16,14,1,0,0,0,1,6,5,12,13,0,0,0,0,0,0,1,14,6,0,0,4,8,4,5,14,8,0,0,2,13,16,15,9,1,0,3 +0,0,0,0,5,15,1,0,0,0,0,0,14,16,2,0,0,0,1,12,16,16,2,0,0,1,13,16,12,16,0,0,0,4,13,2,8,13,0,0,0,0,0,0,9,13,0,0,0,0,0,0,11,16,0,0,0,0,0,0,4,16,4,0,1 +0,0,7,12,12,10,2,0,0,1,16,15,12,15,9,0,0,0,16,7,0,1,2,0,0,0,11,16,10,1,0,0,0,0,0,3,14,13,1,0,0,0,0,0,3,16,1,0,0,0,8,7,15,12,0,0,0,0,11,12,6,0,0,0,5 +0,0,5,15,15,5,0,0,0,2,15,5,3,13,0,0,0,3,9,0,6,10,0,0,0,4,8,3,13,1,0,0,0,3,14,14,6,0,0,0,0,0,7,12,14,11,1,0,0,0,12,5,1,13,9,0,0,0,5,12,14,10,1,0,8 +0,0,6,12,14,10,0,0,0,4,16,13,10,13,0,0,0,8,9,0,0,0,0,0,0,6,14,10,7,0,0,0,0,0,8,9,15,11,0,0,0,0,0,0,1,14,7,0,0,0,4,8,8,15,5,0,0,0,5,16,12,6,0,0,5 +0,0,0,1,11,14,10,0,0,0,6,14,7,10,13,0,0,0,15,3,1,14,5,0,0,0,15,11,9,16,5,0,0,0,3,8,7,16,4,0,0,0,0,0,2,11,0,0,0,0,0,0,10,10,0,0,0,0,0,0,14,7,0,0,9 +0,4,15,14,11,4,0,0,0,2,11,8,14,16,1,0,0,0,0,5,15,9,0,0,0,0,9,16,12,0,0,0,0,0,2,8,15,12,1,0,0,0,0,0,2,16,8,0,0,1,4,4,10,16,5,0,0,4,16,15,11,3,0,0,3 +0,0,0,5,0,4,13,5,0,0,6,16,16,16,14,1,0,0,2,6,8,16,2,0,0,0,2,4,12,14,0,0,0,5,15,16,16,16,4,0,0,6,9,11,15,4,0,0,0,0,0,15,9,0,0,0,0,0,0,13,6,0,0,0,7 +0,0,7,16,13,6,0,0,0,2,14,5,7,15,2,0,0,3,7,1,10,14,0,0,0,5,14,14,13,1,0,0,0,0,6,16,9,0,0,0,0,0,10,7,13,2,0,0,0,0,14,4,13,4,0,0,0,0,9,14,11,1,0,0,8 +0,0,5,10,14,5,0,0,0,4,16,13,13,12,0,0,0,7,10,0,10,7,0,0,0,0,0,6,16,6,0,0,0,0,0,6,14,15,1,0,0,0,0,0,0,11,7,0,0,0,4,8,8,15,7,0,0,0,5,14,12,8,0,0,3 +0,0,2,15,13,5,0,0,0,0,5,13,12,15,0,0,0,4,14,13,15,14,0,0,0,8,16,16,15,3,0,0,0,0,13,11,9,12,0,0,0,0,15,2,0,15,1,0,0,0,13,8,4,14,3,0,0,0,2,12,13,12,2,0,8 +0,0,4,15,11,2,0,0,0,0,15,15,14,11,0,0,0,2,16,6,0,15,1,0,0,2,16,5,0,11,5,0,0,2,16,4,0,7,9,0,0,0,15,2,0,10,9,0,0,0,11,13,9,16,4,0,0,0,2,10,16,10,0,0,0 +0,0,1,11,15,8,0,0,0,0,11,11,4,16,0,0,0,6,11,1,3,13,0,0,0,3,14,6,14,2,0,0,0,0,2,16,10,0,0,0,0,0,4,11,10,7,0,0,0,0,5,8,5,13,0,0,0,0,1,12,16,5,0,0,8 +0,0,1,8,13,10,2,0,0,0,11,16,13,13,3,0,0,4,15,0,0,0,0,0,0,3,15,7,1,0,0,0,0,0,8,15,15,7,0,0,0,0,0,2,10,16,2,0,0,0,2,7,8,16,4,0,0,0,2,15,12,7,0,0,5 +0,0,7,14,16,8,0,0,0,0,5,10,13,7,0,0,0,0,0,0,13,3,0,0,0,0,4,8,16,8,4,0,0,2,16,16,13,12,4,0,0,1,3,14,2,0,0,0,0,0,2,14,0,0,0,0,0,0,8,13,0,0,0,0,7 +0,0,0,6,13,3,0,0,0,0,4,16,13,1,0,0,0,0,13,11,1,0,0,0,0,5,15,1,0,0,0,0,0,5,13,0,0,0,0,0,0,2,16,10,16,13,3,0,0,0,11,16,14,16,15,0,0,0,1,8,13,13,9,0,6 +0,0,0,2,9,15,15,0,0,0,4,14,12,9,12,0,0,2,14,6,0,10,10,0,0,6,16,6,7,16,8,0,0,1,13,16,11,15,7,0,0,0,0,0,1,16,3,0,0,0,0,0,7,15,0,0,0,0,0,0,11,9,0,0,9 +0,0,0,0,10,12,0,0,0,0,0,0,15,12,0,0,0,0,0,7,16,12,0,0,0,0,9,16,16,11,0,0,0,8,16,11,16,12,0,0,0,3,4,3,16,9,0,0,0,0,0,0,15,10,0,0,0,0,0,0,9,13,0,0,1 +0,0,0,0,9,13,0,0,0,0,0,3,15,12,0,0,0,0,2,13,16,14,0,0,0,4,14,14,14,14,0,0,0,5,11,1,9,13,0,0,0,0,0,0,11,11,0,0,0,0,0,0,10,10,0,0,0,0,0,0,9,9,0,0,1 +0,0,0,14,8,8,14,0,0,0,1,11,15,16,13,0,0,0,0,0,6,16,3,0,0,3,13,16,16,16,3,0,0,2,8,11,15,9,4,0,0,0,0,10,7,0,0,0,0,0,0,16,5,0,0,0,0,0,0,14,1,0,0,0,7 +0,0,6,16,14,3,0,0,0,0,9,5,5,11,0,0,0,0,3,2,3,12,0,0,0,2,15,14,12,3,0,0,0,0,3,15,12,8,0,0,0,0,10,6,0,11,2,0,0,0,13,3,0,9,7,0,0,0,5,14,14,12,4,0,8 +0,0,0,0,10,5,0,0,0,0,0,0,16,4,0,0,0,0,0,9,12,4,3,0,0,0,2,15,1,11,8,0,0,0,14,7,0,14,4,0,0,6,16,9,15,15,2,0,0,9,13,8,13,4,0,0,0,0,0,0,9,4,0,0,4 +0,2,13,9,1,0,0,0,0,8,15,13,10,0,0,0,0,2,6,1,12,0,0,0,0,0,0,4,12,0,0,0,0,0,0,12,6,0,0,0,0,0,8,12,0,0,0,0,0,2,16,11,14,16,9,0,0,2,15,12,11,5,1,0,2 +0,0,6,14,16,5,0,0,0,3,16,10,14,14,0,0,0,3,6,1,13,9,0,0,0,0,2,15,16,11,0,0,0,0,2,12,14,16,2,0,0,0,0,0,1,16,7,0,0,0,6,13,10,15,3,0,0,0,10,16,13,5,0,0,3 +0,0,5,13,10,0,0,0,0,1,15,16,16,4,0,0,0,3,11,0,10,7,0,0,0,0,1,0,13,0,0,0,0,0,0,5,11,0,0,0,0,0,3,15,3,0,0,0,0,0,8,16,16,15,2,0,0,0,6,13,12,10,2,0,2 +0,0,1,13,16,16,9,0,0,0,13,14,12,12,11,0,0,9,16,5,0,0,0,0,0,9,16,6,2,0,0,0,0,3,15,16,15,3,0,0,0,0,3,11,16,10,0,0,0,0,4,16,16,7,0,0,0,0,0,15,15,2,0,0,5 +0,0,0,4,13,6,0,0,0,0,0,11,16,12,0,0,0,0,8,16,16,10,0,0,0,6,16,16,16,6,0,0,0,5,12,12,16,3,0,0,0,0,0,10,15,0,0,0,0,0,0,10,14,0,0,0,0,0,0,5,16,2,0,0,1 +0,0,3,0,6,15,0,0,0,9,16,14,16,15,0,0,0,5,7,8,14,11,0,0,0,0,1,4,13,11,2,0,0,0,8,16,16,16,12,0,0,0,1,8,15,11,2,0,0,0,0,0,12,12,0,0,0,0,0,0,8,13,0,0,7 +0,0,0,11,4,0,0,0,0,0,6,15,2,0,0,0,0,0,14,7,0,0,0,0,0,3,16,9,12,6,0,0,0,4,16,15,10,12,9,0,0,2,16,9,0,0,12,0,0,0,8,13,2,2,14,2,0,0,0,8,15,15,11,0,6 +0,0,3,12,12,1,0,0,0,0,15,15,13,11,0,0,0,3,16,4,0,14,2,0,0,5,15,5,0,5,7,0,0,8,8,0,0,4,8,0,0,4,14,0,0,7,11,0,0,0,14,11,12,16,3,0,0,0,3,14,12,4,0,0,0 +0,0,2,10,12,11,1,0,0,0,11,14,9,16,3,0,0,2,16,1,11,11,0,0,0,6,16,15,14,3,0,0,0,0,7,16,12,0,0,0,0,0,5,14,11,13,0,0,0,0,7,14,9,14,0,0,0,0,2,13,16,7,0,0,8 +0,0,0,0,3,13,4,0,0,0,0,0,11,16,8,0,0,0,0,8,16,16,6,0,0,1,9,16,9,16,2,0,0,7,16,5,5,16,0,0,0,0,2,0,8,13,0,0,0,0,0,0,9,12,0,0,0,0,0,0,5,15,3,0,1 +0,0,0,0,9,8,0,0,0,0,0,4,16,2,0,0,0,0,0,15,10,0,0,0,0,0,6,11,1,6,6,0,0,1,14,2,0,9,7,0,0,9,7,0,3,16,2,0,0,7,16,16,16,15,3,0,0,1,4,2,11,3,0,0,4 +0,0,0,0,12,14,0,0,0,0,0,1,16,16,1,0,0,0,5,14,16,12,0,0,0,5,16,16,16,8,0,0,0,9,15,9,16,8,0,0,0,0,1,2,16,7,0,0,0,0,0,4,16,10,0,0,0,0,0,0,10,16,6,0,1 +0,0,0,2,11,16,12,0,0,0,4,15,14,16,8,0,0,1,16,13,14,16,6,0,0,2,16,16,11,15,5,0,0,0,6,12,16,10,0,0,0,0,0,0,15,6,0,0,0,0,0,2,16,7,0,0,0,0,0,1,16,5,0,0,9 +0,0,7,14,12,1,0,0,0,2,16,12,16,8,0,0,0,2,10,0,12,8,0,0,0,0,5,12,16,3,0,0,0,0,5,12,15,13,1,0,0,0,0,0,5,16,3,0,0,0,12,8,4,13,3,0,0,0,9,16,14,9,0,0,3 +0,0,3,14,16,16,12,0,0,2,15,16,11,8,6,0,0,9,16,4,0,0,0,0,0,9,16,16,14,3,0,0,0,0,9,9,14,14,0,0,0,0,0,0,5,16,0,0,0,0,0,4,14,12,0,0,0,0,2,16,13,3,0,0,5 +0,0,0,1,15,4,0,0,0,0,0,6,16,1,0,0,0,0,0,14,12,0,0,0,0,0,10,16,2,3,0,0,0,4,16,8,3,16,4,0,0,12,16,13,15,16,4,0,0,8,14,12,16,14,5,0,0,0,0,1,16,6,0,0,4 +0,0,8,16,11,0,0,0,0,1,14,10,16,5,0,0,0,1,4,7,15,1,0,0,0,0,6,16,15,4,0,0,0,0,0,5,10,16,5,0,0,0,4,0,1,16,4,0,0,0,13,8,15,10,0,0,0,0,11,14,5,0,0,0,3 +0,0,10,16,16,16,2,0,0,0,7,8,10,16,0,0,0,0,0,0,10,14,5,0,0,0,8,15,16,16,8,0,0,0,14,16,11,0,0,0,0,0,3,16,2,0,0,0,0,0,10,10,0,0,0,0,0,0,12,3,0,0,0,0,7 +0,0,2,11,0,0,0,0,0,0,8,10,0,0,0,0,0,0,14,6,0,0,0,0,0,0,15,3,3,0,0,0,0,3,16,16,16,16,5,0,0,2,15,5,0,5,13,0,0,0,12,13,8,10,15,0,0,0,1,10,16,13,6,0,6 +0,0,1,11,14,12,1,0,0,0,11,12,9,16,3,0,0,7,6,6,12,6,0,0,0,6,13,10,14,1,0,0,0,0,12,16,4,0,0,0,0,0,14,13,14,4,0,0,0,0,11,12,11,15,0,0,0,0,1,9,14,14,1,0,8 +0,0,11,15,9,0,0,0,0,0,13,9,13,10,0,0,0,0,13,6,1,11,3,0,0,4,12,0,0,4,8,0,0,8,7,0,0,5,8,0,0,7,8,0,5,15,3,0,0,2,14,10,16,10,0,0,0,0,12,12,3,0,0,0,0 +0,0,9,16,10,0,0,0,0,0,4,10,15,2,0,0,0,0,13,0,4,13,0,0,0,4,16,0,0,13,2,0,0,7,12,0,0,10,6,0,0,8,13,1,2,13,8,0,0,5,16,16,16,16,2,0,0,0,9,16,15,8,0,0,0 +0,3,14,9,5,0,0,0,0,0,7,10,16,0,0,0,0,0,0,2,14,0,0,0,0,0,0,10,6,0,0,0,0,0,3,15,1,0,0,0,0,1,13,8,0,0,0,0,0,8,16,10,12,15,7,0,0,3,12,16,13,8,3,0,2 +0,0,7,15,6,0,0,0,0,0,5,8,14,7,0,0,0,0,7,2,4,13,2,0,0,0,14,3,0,12,4,0,0,2,16,0,1,14,5,0,0,5,13,0,7,16,3,0,0,1,16,9,15,13,0,0,0,0,9,15,10,1,0,0,0 +0,0,0,5,16,2,0,0,0,0,1,15,8,0,0,0,0,0,10,13,0,2,2,0,0,6,15,6,10,16,12,0,0,13,16,16,14,16,10,0,0,8,12,5,7,16,4,0,0,0,0,2,14,8,0,0,0,0,0,9,13,1,0,0,4 +0,0,2,14,11,1,0,0,0,0,8,13,2,0,0,0,0,0,12,8,0,0,0,0,0,0,16,4,0,0,0,0,0,0,16,12,15,11,4,0,0,2,16,12,4,5,14,0,0,0,13,11,4,12,15,1,0,0,3,11,16,12,3,0,6 +0,0,1,11,2,0,0,11,0,0,5,16,3,0,7,15,0,0,8,16,0,1,15,7,0,0,8,16,8,13,12,0,0,0,7,16,16,16,6,0,0,0,0,4,14,12,0,0,0,0,0,6,14,0,0,0,0,0,2,15,6,0,0,0,4 +0,0,1,13,1,0,0,0,0,0,7,14,0,0,0,0,0,0,12,7,0,0,0,0,0,0,14,5,0,0,0,0,0,1,16,4,4,4,1,0,0,0,14,15,11,10,12,0,0,0,8,11,0,4,15,4,0,0,1,10,16,14,9,0,6 +0,0,0,4,12,15,5,0,0,1,10,15,14,16,7,0,0,3,16,16,16,16,7,0,0,0,3,4,3,13,8,0,0,0,0,0,2,16,4,0,0,0,0,0,11,11,0,0,0,0,0,1,14,5,0,0,0,0,0,3,16,2,0,0,9 +0,0,15,16,16,16,3,0,0,0,7,8,10,16,4,0,0,0,0,0,8,15,0,0,0,0,4,7,16,15,6,0,0,9,16,16,14,13,3,0,0,2,11,15,2,0,0,0,0,0,13,9,0,0,0,0,0,2,16,4,0,0,0,0,7 +0,0,6,13,16,11,0,0,0,11,16,11,4,16,4,0,0,3,13,13,9,16,1,0,0,0,0,10,16,8,0,0,0,0,0,13,16,9,0,0,0,0,7,14,6,13,0,0,0,0,8,13,10,15,0,0,0,0,5,14,14,6,0,0,8 +0,0,0,2,8,13,14,2,0,0,5,14,9,4,16,5,0,1,14,0,5,14,13,0,0,0,16,16,13,16,5,0,0,0,1,0,7,9,0,0,0,0,0,0,13,2,0,0,0,0,0,2,13,0,0,0,0,0,0,3,10,0,0,0,9 +0,0,0,2,13,14,1,0,0,2,9,16,16,16,2,0,0,12,16,16,16,16,2,0,0,10,12,3,16,12,0,0,0,0,0,5,16,8,0,0,0,0,0,5,16,5,0,0,0,0,0,5,16,7,0,0,0,0,0,2,15,9,0,0,1 +0,0,3,10,16,11,0,0,0,3,12,10,6,16,2,0,0,9,16,6,12,11,1,0,0,0,5,16,15,0,0,0,0,0,7,12,15,2,0,0,0,0,10,5,6,11,0,0,0,0,11,4,10,13,0,0,0,0,3,16,14,5,0,0,8 +0,0,0,12,14,6,0,0,0,1,5,1,8,16,3,0,0,4,15,12,16,16,3,0,0,0,12,16,12,2,0,0,0,4,16,14,11,0,0,0,0,4,15,5,15,2,0,0,0,0,9,16,15,8,0,0,0,0,0,10,16,9,0,0,8 +0,0,3,14,14,3,0,0,0,0,7,6,5,14,0,0,0,0,7,7,3,13,3,0,0,0,2,15,15,5,0,0,0,0,8,16,11,0,0,0,0,1,14,3,10,4,0,0,0,1,13,3,3,11,0,0,0,0,4,10,16,11,0,0,8 +0,0,4,12,16,4,0,0,0,0,15,15,6,15,0,0,0,3,16,11,0,8,4,0,0,4,11,3,0,4,8,0,0,5,8,0,0,9,7,0,0,1,12,0,0,13,5,0,0,0,13,10,14,11,0,0,0,0,4,12,13,1,0,0,0 +0,0,0,6,12,0,0,0,0,0,5,14,4,0,0,0,0,1,15,3,0,1,12,0,0,2,16,2,0,8,11,0,0,3,16,15,10,15,6,0,0,0,2,12,14,13,1,0,0,0,0,0,12,5,0,0,0,0,0,7,13,0,0,0,4 +0,0,0,0,6,15,1,0,0,0,0,3,14,16,4,0,0,0,4,15,16,16,2,0,0,4,16,13,6,16,0,0,0,5,7,0,7,16,0,0,0,0,0,0,8,16,0,0,0,0,0,0,10,16,0,0,0,0,0,0,4,12,0,0,1 +0,1,9,15,16,13,4,0,0,6,9,4,4,13,8,0,0,0,0,0,7,15,4,0,0,0,0,10,16,8,0,0,0,0,0,4,12,13,1,0,0,0,0,0,0,14,8,0,0,0,6,4,7,15,6,0,0,0,8,16,12,5,0,0,3 +0,0,0,12,3,0,0,0,0,0,6,13,0,0,0,0,0,0,11,7,0,0,0,0,0,0,13,2,2,2,0,0,0,1,16,11,16,16,6,0,0,0,15,12,4,14,13,0,0,0,10,11,7,16,12,0,0,0,0,11,15,11,2,0,6 +0,0,4,11,7,1,0,0,0,0,9,16,16,12,0,0,0,0,10,16,16,8,0,0,0,0,10,16,16,10,0,0,0,2,15,16,16,8,0,0,0,0,16,16,16,4,0,0,0,0,5,16,16,13,2,0,0,0,5,10,11,8,3,0,1 +0,0,1,8,14,8,0,0,0,7,16,15,10,5,0,0,0,14,10,2,0,0,0,0,0,4,14,8,0,0,0,0,0,0,2,13,11,0,0,0,0,0,0,1,12,7,0,0,0,0,2,13,10,16,0,0,0,0,0,13,12,11,1,0,5 +0,2,16,9,0,0,0,0,0,2,16,16,10,1,0,0,0,0,1,7,16,13,0,0,0,1,5,0,8,16,3,0,0,4,8,0,0,12,7,0,0,5,10,0,5,15,4,0,0,8,15,14,16,10,0,0,0,3,15,15,8,1,0,0,0 +0,2,8,10,13,15,8,0,0,1,16,15,10,7,2,0,0,0,10,14,2,0,0,0,0,0,0,12,12,0,0,0,0,0,0,0,11,10,0,0,0,0,0,0,1,14,3,0,0,0,0,2,7,15,3,0,0,0,12,16,10,4,0,0,5 +0,1,7,16,16,16,8,0,0,10,16,14,12,10,3,0,0,12,14,1,0,0,0,0,0,2,15,13,1,0,0,0,0,0,2,16,11,0,0,0,0,0,1,5,15,7,0,0,0,0,11,10,13,16,1,0,0,0,8,16,16,10,0,0,5 +0,1,13,16,14,6,1,0,0,0,13,7,8,16,4,0,0,0,2,1,12,15,2,0,0,0,2,15,13,3,0,0,0,0,3,14,15,6,0,0,0,0,0,1,8,16,3,0,0,4,15,10,5,16,5,0,0,3,12,15,16,13,1,0,3 +0,0,0,12,12,0,0,0,0,0,9,16,9,0,0,0,0,3,16,14,5,5,2,0,0,6,16,16,16,16,9,0,0,0,5,8,13,16,6,0,0,0,0,1,15,16,2,0,0,0,0,5,16,11,0,0,0,0,0,11,16,9,0,0,4 +0,0,11,14,16,8,0,0,0,1,12,12,14,16,3,0,0,0,0,0,4,16,4,0,0,0,0,0,7,16,3,0,0,0,7,8,15,16,11,0,0,5,16,16,16,8,0,0,0,0,7,13,14,0,0,0,0,0,15,15,3,0,0,0,7 +0,0,6,14,15,4,0,0,0,3,16,10,11,14,0,0,0,2,8,0,6,16,2,0,0,0,0,0,8,10,0,0,0,0,0,0,14,7,0,0,0,0,0,7,13,0,0,0,0,0,4,16,11,14,4,0,0,0,5,15,13,8,1,0,2 +0,0,0,5,12,0,0,0,0,0,0,13,13,0,0,0,0,0,5,16,3,0,0,0,0,0,8,15,0,0,0,0,0,0,10,12,8,6,0,0,0,2,15,15,12,14,9,0,0,0,8,15,6,12,12,0,0,0,0,8,13,15,5,0,6 +0,0,7,10,12,16,14,1,0,0,10,15,12,15,15,1,0,0,0,0,1,13,11,0,0,0,0,0,5,16,10,0,0,0,7,12,15,16,9,0,0,0,9,15,16,3,0,0,0,0,3,15,8,0,0,0,0,0,9,15,2,0,0,0,7 +0,0,3,8,12,14,15,3,0,0,4,8,4,8,16,3,0,0,0,0,0,6,14,0,0,0,2,10,12,15,8,0,0,0,9,8,15,12,4,0,0,0,0,5,15,3,0,0,0,0,0,10,10,0,0,0,0,0,2,16,4,0,0,0,7 +0,0,1,15,0,0,0,0,0,0,4,15,1,0,0,0,0,0,12,9,0,0,0,0,0,1,16,9,2,0,0,0,0,5,16,13,14,10,2,0,0,2,16,5,0,10,9,0,0,1,13,12,8,12,11,0,0,0,1,12,13,10,3,0,6 +0,0,5,12,13,9,5,0,0,1,14,8,6,14,14,0,0,0,6,0,3,15,9,0,0,0,3,12,13,4,0,0,0,0,12,16,14,6,0,0,0,0,0,1,10,16,3,0,0,0,3,8,11,15,3,0,0,0,6,14,10,2,0,0,3 +0,0,5,10,15,9,0,0,0,6,14,8,9,16,4,0,0,4,2,2,13,13,0,0,0,0,0,8,16,1,0,0,0,0,0,0,9,13,1,0,0,0,1,2,0,12,7,0,0,0,9,9,4,11,10,0,0,0,4,13,13,10,1,0,3 +0,2,16,16,11,2,0,0,0,0,8,11,16,8,0,0,0,0,4,14,15,1,0,0,0,0,13,16,12,1,0,0,0,0,2,8,15,14,1,0,0,0,4,0,3,16,6,0,0,5,15,8,14,15,2,0,0,2,12,12,9,5,0,0,3 +0,1,9,12,15,16,7,0,0,10,16,15,12,11,3,0,0,13,16,2,0,0,0,0,0,5,16,13,2,0,0,0,0,0,4,15,15,1,0,0,0,0,2,4,15,9,0,0,0,0,14,14,16,11,0,0,0,0,11,16,12,1,0,0,5 +0,0,4,15,13,0,0,0,0,1,15,9,9,9,1,0,0,4,16,6,13,16,4,0,0,0,8,9,6,16,4,0,0,0,0,0,0,16,4,0,0,0,0,0,0,16,5,0,0,0,2,8,5,16,4,0,0,0,3,15,14,7,1,0,9 +0,0,0,6,10,14,6,0,0,0,14,16,12,16,8,0,0,0,8,16,16,16,8,0,0,5,12,8,1,13,6,0,0,0,0,0,4,16,3,0,0,0,0,0,12,11,0,0,0,0,0,1,16,2,0,0,0,0,0,9,11,0,0,0,9 +0,0,0,13,3,0,0,0,0,0,10,13,1,0,0,0,0,3,16,7,0,1,3,0,0,8,16,8,5,13,15,0,0,4,16,16,16,15,4,0,0,0,3,11,16,5,0,0,0,0,0,13,13,0,0,0,0,0,1,15,3,0,0,0,4 +0,0,0,0,13,13,0,0,0,0,2,12,16,16,0,0,0,3,15,16,16,13,0,0,0,11,15,4,14,12,0,0,0,2,2,0,16,11,0,0,0,0,0,0,15,11,0,0,0,0,0,3,16,10,0,0,0,0,0,0,15,15,0,0,1 +0,0,3,15,3,0,0,0,0,0,12,14,0,0,5,3,0,2,16,10,0,5,16,5,0,3,16,14,12,15,14,0,0,0,13,16,16,14,3,0,0,0,0,8,16,5,0,0,0,0,0,13,13,0,0,0,0,0,5,16,4,0,0,0,4 +0,0,0,4,10,16,14,0,0,0,6,16,12,11,16,2,0,3,15,15,10,15,16,2,0,1,15,16,10,15,14,0,0,0,0,0,1,16,8,0,0,0,0,0,7,15,3,0,0,0,0,0,12,12,0,0,0,0,0,1,16,8,0,0,9 +0,7,16,16,16,5,0,0,0,1,8,8,15,7,0,0,0,0,0,3,15,2,0,0,0,0,0,11,13,4,1,0,0,0,8,16,16,16,11,0,0,0,13,14,8,4,2,0,0,5,16,4,0,0,0,0,0,7,12,0,0,0,0,0,7 +0,0,2,8,15,14,0,0,0,1,14,12,8,16,0,0,0,6,12,1,5,13,0,0,0,1,2,1,14,5,0,0,0,0,0,9,13,0,0,0,0,0,0,15,4,0,0,0,0,0,0,16,8,11,7,0,0,0,0,9,13,7,0,0,2 +0,0,1,14,8,0,0,0,0,0,9,14,2,0,3,3,0,4,16,3,0,2,16,3,0,5,16,9,8,13,12,0,0,2,16,16,16,14,2,0,0,0,0,0,13,9,0,0,0,0,0,8,14,1,0,0,0,0,0,15,7,0,0,0,4 +0,0,5,10,14,6,0,0,0,13,16,15,10,3,0,0,0,15,8,0,0,0,0,0,0,8,11,1,0,0,0,0,0,0,7,13,3,0,0,0,0,0,0,5,13,5,0,0,0,0,6,8,13,14,1,0,0,0,4,12,13,11,0,0,5 +0,0,0,1,10,15,2,0,0,0,6,14,12,16,6,0,0,4,16,15,13,16,7,0,0,6,16,14,9,16,2,0,0,0,1,0,2,14,0,0,0,0,0,0,8,10,0,0,0,0,0,0,14,3,0,0,0,0,0,0,14,0,0,0,9 +0,1,6,13,13,4,0,0,0,9,16,14,15,15,0,0,0,5,5,0,6,13,0,0,0,0,0,1,12,11,0,0,0,0,0,7,14,1,0,0,0,0,1,16,8,0,0,0,0,0,8,16,9,13,10,0,0,0,4,14,16,9,2,0,2 +0,0,11,16,7,0,0,0,0,4,16,16,16,0,0,0,0,12,9,4,16,0,0,0,0,8,5,1,16,2,0,0,0,0,0,11,13,0,0,0,0,0,3,16,14,7,9,0,0,0,13,16,16,16,13,0,0,0,13,13,10,2,0,0,2 +0,0,1,13,7,0,0,0,0,0,12,15,1,0,0,0,0,6,16,5,0,3,5,0,0,10,16,9,11,16,14,0,0,6,14,16,16,16,5,0,0,0,0,1,16,11,0,0,0,0,0,9,16,2,0,0,0,0,0,14,13,0,0,0,4 +0,0,8,13,16,12,1,0,0,6,16,13,14,16,5,0,0,5,14,14,16,14,0,0,0,0,0,4,16,6,0,0,0,0,0,11,12,0,0,0,0,0,3,16,11,0,0,0,0,0,7,16,4,0,0,0,0,0,10,15,1,0,0,0,9 +0,0,0,9,14,1,0,0,0,0,5,16,4,0,1,1,0,0,14,13,0,3,13,6,0,1,16,16,16,16,14,1,0,0,7,12,14,16,2,0,0,0,0,0,14,10,0,0,0,0,0,6,16,2,0,0,0,0,0,13,5,0,0,0,4 +0,0,5,12,14,4,0,0,0,10,16,15,14,15,0,0,0,8,5,1,10,13,0,0,0,0,0,0,14,10,0,0,0,0,0,11,16,3,0,0,0,0,4,15,13,0,0,0,0,0,12,16,12,9,11,0,0,0,2,14,16,13,7,0,2 +0,0,0,12,16,5,0,0,0,0,2,13,16,8,0,0,0,6,15,16,16,10,0,0,0,4,8,9,16,14,0,0,0,0,0,0,14,16,4,0,0,0,0,0,7,16,12,0,0,0,0,7,11,16,12,0,0,0,0,13,16,13,3,0,1 +0,0,5,10,13,9,0,0,0,7,16,16,16,14,0,0,0,4,6,5,16,7,0,0,0,0,0,1,16,8,0,0,0,0,0,0,16,16,0,0,0,0,0,0,12,16,3,0,0,0,1,11,16,8,0,0,0,0,6,13,7,0,0,0,3 +0,0,0,1,16,1,0,0,0,0,0,7,12,0,0,0,0,0,3,15,4,0,0,0,0,1,14,8,0,10,9,0,0,8,13,0,3,16,2,0,0,8,16,13,16,13,0,0,0,0,8,5,16,6,0,0,0,0,0,1,14,1,0,0,4 +0,0,13,10,5,10,0,0,0,4,16,10,8,14,0,0,0,0,14,6,15,10,0,0,0,0,4,16,14,0,0,0,0,0,2,16,8,0,0,0,0,0,8,16,12,0,0,0,0,0,12,16,10,0,0,0,0,0,10,13,2,0,0,0,8 +0,0,5,16,16,8,0,0,0,0,10,16,16,11,0,0,0,0,12,13,1,0,0,0,0,0,5,16,1,0,0,0,0,0,0,14,6,0,0,0,0,0,6,10,11,0,0,0,0,0,15,16,16,0,0,0,0,0,5,15,14,0,0,0,5 +0,0,0,10,12,0,0,0,0,0,8,16,9,4,0,0,0,0,15,10,0,0,0,0,0,2,16,6,0,0,0,0,0,3,16,10,10,0,0,0,0,1,15,16,16,10,0,0,0,0,9,13,11,16,3,0,0,0,1,9,15,13,0,0,6 +0,3,10,12,15,10,1,0,0,6,11,8,14,16,4,0,0,0,0,4,16,12,0,0,0,0,1,15,12,1,0,0,0,0,0,14,12,0,0,0,0,0,0,8,16,1,0,0,0,1,7,14,13,0,0,0,0,3,15,9,1,0,0,0,3 +0,1,9,14,16,16,3,0,0,2,16,16,15,12,3,0,0,0,10,16,1,0,0,0,0,0,1,15,7,0,0,0,0,0,0,12,12,0,0,0,0,0,0,9,16,0,0,0,0,2,14,15,14,0,0,0,0,1,16,16,4,0,0,0,5 +0,0,0,8,15,5,0,0,0,0,0,15,16,11,0,0,0,0,8,16,16,13,0,0,0,7,16,16,16,16,4,0,0,2,4,0,10,16,10,0,0,0,0,0,8,16,11,0,0,0,0,7,14,16,10,0,0,0,0,11,16,9,1,0,1 +0,0,3,10,8,0,0,0,0,4,16,13,12,11,0,0,0,6,16,11,15,16,1,0,0,0,7,13,16,12,0,0,0,0,0,0,13,10,0,0,0,0,0,0,15,9,0,0,0,0,5,12,16,5,0,0,0,0,7,13,8,0,0,0,9 +0,0,4,10,14,12,10,0,0,0,7,8,8,12,13,0,0,0,0,0,0,10,12,0,0,0,1,5,8,16,6,0,0,0,5,16,16,13,1,0,0,0,0,7,15,4,0,0,0,0,0,14,12,0,0,0,0,0,2,16,2,0,0,0,7 +0,0,6,14,15,6,0,0,0,14,16,16,16,16,0,0,0,8,8,10,16,13,0,0,0,0,0,14,16,7,0,0,0,0,0,6,16,12,0,0,0,0,0,0,12,16,6,0,0,0,0,7,16,16,5,0,0,0,6,16,12,1,0,0,3 +0,0,0,9,11,0,0,0,0,0,0,13,16,6,0,0,0,0,7,15,16,5,0,0,0,7,16,16,16,5,0,0,0,1,7,4,15,10,0,0,0,0,0,0,12,14,0,0,0,0,0,6,15,16,0,0,0,0,0,10,16,8,0,0,1 +0,0,0,8,14,4,0,0,0,0,8,16,13,1,0,0,0,1,15,13,0,0,0,0,0,2,16,4,0,0,0,0,0,7,16,3,8,7,1,0,0,0,16,16,16,16,11,0,0,0,9,16,16,16,12,0,0,0,1,10,16,12,1,0,6 +0,0,14,16,9,12,1,0,0,0,16,10,10,16,6,0,0,0,12,12,13,13,1,0,0,0,4,16,14,1,0,0,0,0,6,16,7,0,0,0,0,1,15,15,12,0,0,0,0,5,16,12,12,0,0,0,0,1,10,14,3,0,0,0,8 +0,0,9,14,7,0,0,0,0,2,16,16,16,15,0,0,0,3,16,16,16,10,0,0,0,0,9,16,16,10,0,0,0,0,0,3,16,10,0,0,0,0,0,11,14,9,0,0,0,0,5,16,15,3,0,0,0,0,10,12,4,0,0,0,9 +0,0,0,7,14,6,0,0,0,0,11,16,9,2,0,0,0,4,15,10,0,0,0,0,0,9,12,0,3,1,0,0,0,7,13,2,16,12,2,0,0,3,16,2,2,13,9,0,0,0,11,13,6,15,10,0,0,0,0,8,16,13,1,0,6 +0,2,9,14,8,7,0,0,0,11,16,16,16,16,0,0,0,6,16,16,16,16,2,0,0,0,0,0,8,16,7,0,0,0,0,0,8,16,4,0,0,0,0,1,15,16,0,0,0,0,4,14,16,7,0,0,0,1,13,14,2,0,0,0,9 +0,0,0,8,11,0,0,0,0,0,3,16,5,0,0,0,0,0,11,13,0,0,0,0,0,1,16,5,0,1,0,0,0,7,15,0,3,16,3,0,0,8,13,1,10,16,4,0,0,7,16,16,16,16,2,0,0,0,4,6,5,15,3,0,4 +0,1,12,16,8,2,0,0,0,2,16,8,16,15,0,0,0,0,16,9,16,6,0,0,0,0,8,16,8,0,0,0,0,0,9,16,1,0,0,0,0,0,15,16,8,0,0,0,0,3,16,16,9,0,0,0,0,0,7,14,2,0,0,0,8 +0,0,0,2,14,1,0,0,0,0,0,10,12,0,0,0,0,0,8,15,1,2,1,0,0,3,15,5,0,12,7,0,0,10,14,0,6,16,2,0,0,8,16,16,16,12,0,0,0,0,2,4,16,5,0,0,0,0,0,2,13,0,0,0,4 +0,0,5,11,14,9,1,0,0,6,15,12,13,16,2,0,0,8,5,4,16,9,0,0,0,0,0,4,16,8,0,0,0,0,0,2,15,11,0,0,0,0,0,0,10,16,3,0,0,0,1,11,16,10,1,0,0,0,5,13,6,0,0,0,3 +0,0,0,0,10,10,2,0,0,0,0,6,16,7,0,0,0,0,3,16,7,0,0,0,0,0,14,13,0,3,0,0,0,4,16,6,4,16,4,0,0,5,16,9,13,16,6,0,0,0,7,12,16,14,1,0,0,0,0,0,12,14,0,0,4 +0,0,9,12,16,16,3,0,0,0,14,16,16,11,0,0,0,0,7,16,2,0,0,0,0,0,1,16,7,0,0,0,0,0,0,12,10,0,0,0,0,0,0,8,15,0,0,0,0,3,14,16,12,0,0,0,0,1,15,16,6,0,0,0,5 +0,0,5,11,12,2,0,0,0,5,16,14,14,12,0,0,0,5,7,0,10,10,0,0,0,0,0,1,14,6,0,0,0,0,0,14,12,1,0,0,0,0,11,16,1,0,0,0,0,0,15,13,8,5,2,0,0,0,3,12,16,15,9,0,2 +0,0,2,10,15,16,16,5,0,0,10,13,12,14,16,6,0,0,0,0,0,8,16,1,0,0,0,4,4,14,12,0,0,0,6,16,16,16,8,0,0,0,2,11,16,11,1,0,0,0,0,12,15,0,0,0,0,0,1,16,8,0,0,0,7 +0,0,4,14,15,3,0,0,0,0,14,16,16,9,0,0,0,0,11,16,16,15,0,0,0,0,3,13,16,16,1,0,0,0,0,0,3,16,6,0,0,0,0,0,13,16,1,0,0,0,2,11,16,12,0,0,0,0,6,15,10,1,0,0,9 +0,0,0,7,16,6,0,0,0,2,9,16,16,11,0,0,0,10,16,12,13,16,1,0,0,2,4,1,7,16,5,0,0,0,0,0,2,16,10,0,0,0,0,0,2,16,13,0,0,0,0,4,14,16,12,0,0,0,0,4,14,9,2,0,1 +0,0,6,16,16,11,0,0,0,0,15,16,14,8,0,0,0,0,9,13,0,0,0,0,0,0,3,15,1,0,0,0,0,0,0,11,8,0,0,0,0,0,2,13,14,0,0,0,0,0,16,16,13,0,0,0,0,0,7,16,6,0,0,0,5 +0,0,0,13,14,2,0,0,0,0,7,16,12,2,0,0,0,0,14,14,1,0,0,0,0,0,15,11,0,0,0,0,0,4,16,16,14,14,4,0,0,2,16,16,13,10,14,0,0,0,9,13,8,12,16,1,0,0,1,11,16,16,10,0,6 +0,0,0,8,14,16,6,0,0,3,13,16,13,15,14,0,0,3,12,6,0,11,16,0,0,0,0,0,4,15,13,0,0,0,0,12,16,16,11,0,0,0,1,11,16,11,0,0,0,0,0,7,16,5,0,0,0,0,0,11,12,0,0,0,7 +0,0,2,7,13,3,0,0,0,0,10,16,12,13,0,0,0,0,15,9,1,12,4,0,0,1,15,0,0,8,7,0,0,5,8,0,0,12,5,0,0,1,11,0,1,15,4,0,0,0,14,13,15,10,0,0,0,0,3,13,14,3,0,0,0 +0,0,0,14,8,1,0,0,0,0,9,16,16,4,0,0,0,11,16,16,14,0,0,0,0,5,8,14,16,2,0,0,0,0,0,7,16,6,0,0,0,0,0,4,16,12,0,0,0,0,1,6,16,14,0,0,0,0,2,14,16,9,0,0,1 +0,0,3,12,13,1,0,0,0,0,14,16,15,11,0,0,0,0,15,15,15,14,0,0,0,0,11,16,15,16,2,0,0,0,1,5,3,16,6,0,0,0,0,0,1,16,6,0,0,0,0,5,15,16,4,0,0,0,6,16,16,6,0,0,9 +0,0,9,16,7,5,0,0,0,0,9,16,16,16,0,0,0,0,7,16,15,6,0,0,0,0,7,16,7,0,0,0,0,0,14,16,5,0,0,0,0,3,16,16,4,0,0,0,0,1,16,16,5,0,0,0,0,0,7,12,1,0,0,0,8 +0,0,0,1,11,5,0,0,0,0,0,13,12,1,0,0,0,0,8,14,3,0,0,0,0,2,16,6,2,12,4,0,0,7,16,7,8,15,5,0,0,2,16,16,16,11,0,0,0,0,4,10,16,7,0,0,0,0,0,3,14,4,0,0,4 +0,0,1,14,15,3,0,0,0,0,9,16,16,8,0,0,0,9,16,16,14,10,0,0,0,0,3,8,16,14,0,0,0,0,0,2,16,16,2,0,0,0,0,0,13,16,9,0,0,0,1,10,16,16,9,0,0,0,2,10,15,10,0,0,1 +0,0,4,12,16,9,0,0,0,0,13,10,4,14,3,0,0,5,13,1,0,12,4,0,0,4,12,0,0,8,8,0,0,6,12,0,0,10,7,0,0,3,15,0,2,16,3,0,0,0,16,13,15,11,0,0,0,0,6,15,10,0,0,0,0 +0,0,1,10,13,0,0,0,0,0,11,16,9,0,0,0,0,0,15,12,0,0,0,0,0,3,16,5,0,0,0,0,0,5,16,8,12,10,1,0,0,2,16,8,10,15,9,0,0,0,9,14,8,12,15,0,0,0,0,8,14,15,8,0,6 +0,0,1,9,13,16,8,0,0,0,12,13,9,11,14,0,0,0,0,0,0,9,12,0,0,0,0,5,9,15,10,0,0,0,5,16,16,16,5,0,0,0,0,0,13,12,0,0,0,0,0,2,16,7,0,0,0,0,0,9,16,0,0,0,7 +0,0,6,16,16,10,0,0,0,5,16,16,13,6,0,0,0,1,15,15,1,0,0,0,0,0,4,16,5,0,0,0,0,0,0,14,11,0,0,0,0,0,0,13,14,0,0,0,0,0,9,16,11,0,0,0,0,0,8,15,5,0,0,0,5 +0,0,0,12,9,0,0,0,0,0,4,16,16,2,0,0,0,5,15,16,16,3,0,0,0,6,14,13,15,12,0,0,0,0,0,0,8,16,2,0,0,0,0,0,0,16,10,0,0,0,0,7,9,15,15,0,0,0,0,10,16,14,5,0,1 +0,2,15,12,3,6,0,0,0,5,16,13,6,16,6,0,0,6,16,1,3,16,2,0,0,0,15,11,15,14,0,0,0,0,9,16,16,3,0,0,0,0,13,16,6,0,0,0,0,8,16,16,2,0,0,0,0,3,15,13,0,0,0,0,8 +0,0,5,13,14,1,0,0,0,0,14,16,16,9,0,0,0,0,10,16,16,14,0,0,0,0,2,12,14,16,2,0,0,0,0,0,8,16,2,0,0,0,0,4,15,16,2,0,0,0,5,16,16,14,0,0,0,0,4,14,15,1,0,0,9 +0,0,1,13,10,0,0,0,0,0,10,16,7,0,0,0,0,3,16,7,0,0,0,0,0,3,16,3,0,0,0,0,0,8,16,6,8,7,0,0,0,3,15,16,16,16,8,0,0,0,9,16,16,16,5,0,0,0,0,8,15,9,0,0,6 +0,1,7,14,16,11,0,0,0,11,16,12,15,16,1,0,0,8,4,3,16,10,0,0,0,0,0,1,16,7,0,0,0,0,0,0,16,11,0,0,0,0,0,0,12,16,0,0,0,0,2,7,15,13,0,0,0,0,10,15,9,1,0,0,3 +0,1,9,14,7,2,0,0,0,8,16,11,16,14,2,0,0,5,16,14,16,16,4,0,0,0,4,7,10,16,7,0,0,0,0,0,4,16,8,0,0,0,0,0,6,16,9,0,0,0,4,11,16,11,0,0,0,0,12,14,8,0,0,0,9 +0,0,0,0,13,6,0,0,0,0,1,10,13,3,0,0,0,0,5,16,5,0,0,0,0,2,15,9,0,1,0,0,0,7,16,1,5,16,6,0,0,8,16,12,16,14,0,0,0,2,11,13,16,12,0,0,0,0,0,1,14,5,0,0,4 +0,0,5,12,13,1,0,0,0,3,15,14,7,10,0,0,0,0,15,7,14,16,2,0,0,0,8,16,16,9,0,0,0,0,3,16,16,1,0,0,0,0,12,16,16,6,0,0,0,1,16,16,16,7,0,0,0,0,6,14,12,1,0,0,8 +0,0,9,16,16,8,0,0,0,2,16,16,13,4,0,0,0,3,16,6,1,0,0,0,0,0,11,11,0,0,0,0,0,0,2,14,5,0,0,0,0,0,0,12,10,0,0,0,0,0,10,16,15,0,0,0,0,0,10,15,6,0,0,0,5 +0,0,13,10,2,8,0,0,0,2,16,13,13,14,0,0,0,0,14,4,12,11,0,0,0,0,12,13,16,5,0,0,0,0,3,16,13,0,0,0,0,0,9,16,9,0,0,0,0,0,16,16,10,0,0,0,0,0,11,13,2,0,0,0,8 +0,1,12,16,10,1,0,0,0,11,15,15,16,8,0,0,0,9,16,16,16,14,0,0,0,0,11,7,6,16,8,0,0,0,0,0,8,13,2,0,0,0,0,8,15,12,0,0,0,0,7,16,15,3,0,0,0,0,9,8,1,0,0,0,9 +0,0,0,0,11,9,0,0,0,0,0,8,15,3,0,0,0,0,4,15,5,0,0,0,0,1,14,9,0,5,3,0,0,8,15,0,1,16,7,0,0,12,15,12,15,15,3,0,0,6,15,12,15,12,0,0,0,0,0,0,10,4,0,0,4 +0,0,4,15,16,6,0,0,0,0,14,15,8,14,2,0,0,7,12,2,0,8,4,0,0,7,8,0,0,5,8,0,0,8,8,0,0,9,8,0,0,3,11,0,0,10,7,0,0,0,15,7,8,14,2,0,0,0,5,12,14,6,0,0,0 +0,0,4,13,12,1,0,0,0,2,15,12,11,7,0,0,0,1,12,13,15,14,0,0,0,0,3,16,16,5,0,0,0,0,4,16,11,0,0,0,0,0,10,16,16,0,0,0,0,0,14,16,13,0,0,0,0,0,8,13,3,0,0,0,8 +0,0,5,15,15,2,0,0,0,4,15,11,16,4,0,0,0,2,4,6,16,2,0,0,0,0,0,14,10,0,0,0,0,0,6,16,2,0,0,0,0,0,9,13,0,0,0,0,0,0,10,15,8,4,3,0,0,0,3,14,16,14,4,0,2 +0,0,4,9,7,13,1,0,0,1,16,6,6,14,4,0,0,7,13,0,0,10,8,0,0,8,6,0,0,12,7,0,0,8,5,0,0,13,4,0,0,8,10,0,5,16,4,0,0,1,15,12,15,10,0,0,0,0,5,14,9,1,0,0,0 +0,1,6,14,10,1,0,0,0,9,16,15,16,13,0,0,0,6,6,9,16,7,0,0,0,0,0,14,10,0,0,0,0,0,0,8,15,5,0,0,0,0,0,0,13,13,0,0,0,0,2,9,15,8,0,0,0,0,10,16,7,0,0,0,3 +0,0,8,16,6,0,0,0,0,0,13,10,8,8,0,0,0,0,8,12,13,15,3,0,0,0,6,16,16,6,0,0,0,0,11,16,6,0,0,0,0,2,14,14,11,0,0,0,0,1,16,11,15,1,0,0,0,0,9,16,10,0,0,0,8 +0,0,6,13,11,4,0,0,0,5,16,10,14,12,0,0,0,7,8,1,14,9,0,0,0,0,0,5,15,3,0,0,0,0,2,15,10,0,0,0,0,0,12,14,0,0,0,0,0,4,16,12,4,6,5,0,0,1,9,14,13,12,5,0,2 +0,0,2,13,8,0,0,0,0,0,12,15,16,11,0,0,0,2,16,3,3,13,4,0,0,5,13,0,0,9,7,0,0,7,8,0,0,13,3,0,0,3,14,0,1,15,2,0,0,0,14,10,12,12,0,0,0,0,2,13,12,3,0,0,0 +0,0,6,16,16,7,0,0,0,1,15,16,14,10,0,0,0,0,15,11,0,0,0,0,0,0,6,16,2,0,0,0,0,0,0,10,10,0,0,0,0,0,0,7,14,0,0,0,0,0,5,12,16,2,0,0,0,0,9,16,10,0,0,0,5 +0,0,3,9,13,14,1,0,0,4,16,15,11,14,8,0,0,0,2,0,2,14,6,0,0,0,0,0,10,16,5,0,0,0,5,16,16,16,7,0,0,0,1,10,16,5,0,0,0,0,0,8,16,0,0,0,0,0,0,10,10,0,0,0,7 +0,0,0,11,12,0,0,0,0,0,8,16,12,0,0,0,0,0,15,14,1,0,0,0,0,1,15,8,0,0,0,0,0,3,16,6,1,0,0,0,0,1,15,16,16,16,10,0,0,0,8,16,16,16,16,3,0,0,0,7,15,16,9,0,6 +0,0,1,15,9,0,0,0,0,0,4,16,16,2,0,0,0,5,15,16,16,5,0,0,0,2,8,11,16,12,0,0,0,0,0,2,16,16,2,0,0,0,0,0,13,16,8,0,0,0,2,8,13,16,8,0,0,0,1,11,14,12,2,0,1 +0,0,0,10,11,0,0,0,0,0,3,16,5,8,5,0,0,0,10,14,2,16,2,0,0,4,15,5,8,12,0,0,0,12,16,12,15,16,6,0,0,14,16,16,16,14,2,0,0,0,0,11,13,0,0,0,0,0,0,14,5,0,0,0,4 +0,0,6,16,12,1,0,0,0,0,16,10,13,7,0,0,0,0,14,6,10,12,0,0,0,0,5,14,16,16,6,0,0,0,0,0,4,11,9,0,0,0,0,0,0,7,13,0,0,0,10,10,4,11,12,0,0,0,6,14,12,12,5,0,9 +0,0,6,14,10,0,0,0,0,0,8,16,16,0,0,0,0,0,8,16,16,1,0,0,0,1,13,16,14,0,0,0,0,3,13,16,13,0,0,0,0,0,7,16,16,0,0,0,0,0,6,16,16,9,0,0,0,0,5,14,16,13,4,0,1 +0,0,2,15,5,0,0,0,0,0,5,15,1,7,0,0,0,0,10,10,6,16,0,0,0,5,15,2,13,11,0,0,0,14,15,12,16,16,6,0,0,14,16,16,16,14,3,0,0,1,4,15,11,1,0,0,0,0,2,15,7,0,0,0,4 +0,0,13,16,6,0,0,0,0,6,16,13,15,0,0,0,0,4,15,7,16,0,0,0,0,0,3,10,13,0,0,0,0,0,0,14,10,0,0,0,0,1,13,16,1,2,0,0,0,8,16,16,15,16,2,0,0,1,11,15,16,16,3,0,2 +0,0,9,16,9,1,0,0,0,5,16,9,16,11,0,0,0,8,12,0,15,12,0,0,0,1,15,16,16,16,2,0,0,0,0,5,4,16,4,0,0,0,0,0,0,13,7,0,0,1,12,12,12,15,9,0,0,1,10,14,8,8,1,0,9 +0,1,13,16,9,0,0,0,0,6,16,14,13,0,0,0,0,5,11,8,15,0,0,0,0,0,0,12,9,0,0,0,0,0,3,16,6,0,0,0,0,1,13,13,1,0,0,0,0,11,16,16,16,16,5,0,0,2,12,14,15,16,5,0,2 +0,0,10,12,2,0,0,0,0,0,16,14,8,0,0,0,0,0,14,7,12,0,0,0,0,0,1,5,12,0,0,0,0,0,0,9,9,0,0,0,0,0,3,16,2,0,0,0,0,4,16,16,10,6,2,0,0,1,8,8,11,13,10,0,2 +0,3,15,11,1,0,0,0,0,8,13,12,7,0,0,0,0,5,5,9,9,0,0,0,0,0,2,14,10,0,0,0,0,0,3,12,15,13,1,0,0,0,0,0,1,15,7,0,0,4,11,5,10,16,4,0,0,4,12,13,12,3,0,0,3 +0,0,4,15,14,2,0,0,0,0,14,8,16,2,0,0,0,0,6,3,16,0,0,0,0,0,0,12,16,5,0,0,0,0,0,7,15,16,4,0,0,0,0,0,0,16,4,0,0,0,8,12,10,15,3,0,0,0,6,13,13,4,0,0,3 +0,0,3,15,16,16,3,0,0,0,3,9,13,16,2,0,0,0,0,0,10,16,0,0,0,0,6,12,16,16,9,0,0,0,15,16,16,14,7,0,0,0,0,10,15,1,0,0,0,0,2,16,10,0,0,0,0,0,7,15,4,0,0,0,7 +0,0,8,16,16,16,7,0,0,0,16,12,8,8,4,0,0,2,16,6,0,0,0,0,0,7,16,16,16,8,0,0,0,2,12,9,9,16,3,0,0,0,0,0,1,16,3,0,0,0,8,11,12,16,2,0,0,0,10,16,14,6,0,0,5 +0,2,14,10,1,0,0,0,0,6,13,13,6,0,0,0,0,8,5,6,8,0,0,0,0,3,2,8,6,0,0,0,0,0,0,11,4,0,0,0,0,0,6,14,0,1,0,0,0,3,16,15,12,15,7,0,0,2,13,9,8,9,7,0,2 +0,0,6,12,12,12,2,0,0,1,16,12,12,12,5,0,0,5,12,5,1,0,0,0,0,8,16,16,14,0,0,0,0,2,6,0,16,1,0,0,0,0,0,0,15,1,0,0,0,0,8,11,13,0,0,0,0,0,9,9,3,0,0,0,5 +0,0,9,6,0,0,0,0,0,0,16,4,0,0,0,0,0,2,15,0,0,0,0,0,0,6,12,1,2,0,0,0,0,7,15,14,16,11,1,0,0,4,16,10,4,16,5,0,0,0,16,11,8,16,6,0,0,0,6,14,14,9,0,0,6 +0,1,9,12,12,15,6,0,0,1,16,11,8,8,4,0,0,6,16,5,4,2,0,0,0,7,16,16,16,15,2,0,0,0,2,2,3,14,6,0,0,0,0,0,2,14,6,0,0,2,11,8,12,13,2,0,0,1,11,16,10,2,0,0,5 +0,0,0,11,16,10,0,0,0,0,0,15,16,8,0,0,0,0,7,16,16,6,0,0,0,4,16,16,16,4,0,0,0,1,4,12,16,4,0,0,0,0,0,9,16,7,0,0,0,0,0,11,16,15,0,0,0,0,0,8,14,11,2,0,1 +0,0,2,15,16,10,0,0,0,0,1,12,14,16,0,0,0,0,0,0,5,16,2,0,0,0,0,0,8,15,1,0,0,0,7,8,14,15,4,0,0,5,16,16,16,15,2,0,0,1,4,14,12,0,0,0,0,0,3,15,5,0,0,0,7 +0,0,7,15,12,5,0,0,0,0,15,7,6,16,2,0,0,3,16,3,10,16,6,0,0,0,14,16,16,11,2,0,0,0,1,14,16,3,0,0,0,0,2,14,15,12,0,0,0,0,12,8,8,16,0,0,0,0,9,16,14,10,0,0,8 +0,0,10,11,10,2,0,0,0,0,16,14,14,10,0,0,0,3,16,13,12,9,0,0,0,4,16,16,14,15,2,0,0,0,0,0,0,12,4,0,0,0,0,0,0,13,7,0,0,0,7,8,14,14,1,0,0,0,10,13,9,0,0,0,5 +0,0,1,12,16,3,0,0,0,0,7,11,8,12,0,0,0,2,14,1,0,14,1,0,0,4,14,0,0,11,7,0,0,3,13,0,0,5,8,0,0,0,13,4,0,9,7,0,0,0,10,13,8,14,1,0,0,0,1,9,14,6,0,0,0 +0,0,4,15,16,15,2,0,0,0,16,12,10,15,9,0,0,2,16,5,0,9,11,0,0,0,15,6,0,8,10,0,0,1,16,7,0,13,6,0,0,2,16,10,4,16,0,0,0,0,10,16,16,8,0,0,0,0,2,14,12,2,0,0,0 +0,0,14,10,0,0,0,0,0,2,16,14,4,0,0,0,0,0,9,8,8,0,0,0,0,0,0,10,4,0,0,0,0,0,2,15,2,0,0,0,0,2,13,9,0,2,1,0,0,8,16,13,13,16,5,0,0,1,9,15,16,16,7,0,2 +0,0,0,10,16,13,7,0,0,0,2,9,10,15,11,0,0,0,0,0,0,13,8,0,0,0,1,7,9,16,5,0,0,0,7,16,16,15,5,0,0,0,0,0,14,3,0,0,0,0,0,6,16,1,0,0,0,0,0,16,8,0,0,0,7 +0,0,4,14,13,3,0,0,0,1,15,11,16,9,0,0,0,2,15,0,10,14,0,0,0,0,13,11,13,16,4,0,0,0,1,11,12,14,7,0,0,0,0,0,0,10,9,0,0,3,16,10,6,9,12,0,0,0,6,9,12,13,6,0,9 +0,2,11,16,5,0,0,0,0,10,14,12,12,0,0,0,0,3,2,8,11,0,0,0,0,0,0,11,12,0,0,0,0,0,1,13,16,10,0,0,0,0,0,0,4,16,5,0,0,0,8,6,6,16,10,0,0,2,12,13,12,10,1,0,3 +0,0,0,10,13,3,0,0,0,0,0,13,16,7,0,0,0,0,4,16,16,5,0,0,0,2,16,16,16,6,0,0,0,7,16,16,16,11,0,0,0,0,0,8,16,12,0,0,0,0,0,15,16,14,0,0,0,0,0,8,13,9,0,0,1 +0,0,5,14,16,15,2,0,0,0,12,14,11,16,5,0,0,0,3,0,8,15,0,0,0,0,0,3,15,11,0,0,0,1,11,16,16,15,6,0,0,1,13,16,16,13,3,0,0,0,3,16,9,0,0,0,0,0,6,16,2,0,0,0,7 +0,0,6,13,2,0,0,0,0,3,15,14,8,0,0,0,0,8,6,5,12,0,0,0,0,3,2,4,10,0,0,0,0,0,0,8,8,0,0,0,0,0,0,13,4,0,0,0,0,0,10,16,16,16,7,0,0,0,7,12,10,13,7,0,2 +0,0,11,7,0,0,0,0,0,3,16,4,0,0,0,0,0,2,15,2,0,0,0,0,0,3,16,4,0,0,0,0,0,8,16,16,16,10,1,0,0,5,16,7,3,16,8,0,0,0,13,14,11,16,6,0,0,0,9,16,11,2,0,0,6 +0,0,3,14,16,16,4,0,0,0,4,9,10,16,5,0,0,0,0,0,7,16,2,0,0,0,3,8,13,16,7,0,0,1,16,16,16,16,9,0,0,0,7,11,15,3,0,0,0,0,0,15,12,0,0,0,0,0,5,16,5,0,0,0,7 +0,0,0,6,15,10,1,0,0,0,1,11,16,16,5,0,0,0,8,16,16,16,6,0,0,8,16,16,16,16,6,0,0,0,3,7,16,16,4,0,0,0,0,4,16,16,4,0,0,0,0,6,16,16,7,0,0,0,0,4,14,14,6,0,1 +0,0,4,14,11,2,0,0,0,0,8,11,5,16,0,0,0,0,8,13,9,15,6,0,0,0,3,16,16,12,1,0,0,0,11,14,12,0,0,0,0,4,11,1,15,3,0,0,0,6,11,4,7,12,0,0,0,1,8,14,16,9,0,0,8 +0,0,7,15,13,2,0,0,0,4,16,6,13,8,0,0,0,0,7,1,13,7,0,0,0,0,0,2,16,11,0,0,0,0,0,1,12,15,6,0,0,0,0,0,0,5,11,0,0,3,13,9,7,13,7,0,0,0,6,13,15,9,1,0,3 +0,0,5,12,12,5,0,0,0,0,13,4,5,16,0,0,0,0,15,1,12,14,0,0,0,0,9,16,16,6,0,0,0,0,7,16,16,1,0,0,0,1,13,4,7,11,0,0,0,2,13,1,5,16,0,0,0,0,6,9,9,6,0,0,8 +0,1,11,14,0,0,0,0,0,4,15,14,6,0,0,0,0,6,9,7,8,0,0,0,0,3,5,6,11,0,0,0,0,0,0,9,8,0,0,0,0,0,1,14,7,0,0,0,0,1,15,16,16,16,6,0,0,0,8,8,12,13,4,0,2 +0,0,5,16,16,16,6,0,0,0,5,10,11,16,3,0,0,0,0,0,10,14,0,0,0,0,1,4,16,10,0,0,0,0,10,16,16,16,6,0,0,0,6,14,13,12,3,0,0,0,2,16,6,0,0,0,0,0,7,14,1,0,0,0,7 +0,0,1,9,13,1,0,0,0,0,11,11,13,9,0,0,0,2,15,0,4,16,4,0,0,8,9,0,0,13,6,0,0,5,12,0,0,9,8,0,0,0,15,3,0,8,8,0,0,0,6,14,4,11,7,0,0,0,0,11,16,13,2,0,0 +0,0,5,13,15,12,1,0,0,0,16,12,4,4,1,0,0,6,16,5,4,3,0,0,0,6,16,16,16,15,2,0,0,0,3,0,0,12,6,0,0,0,0,0,0,14,5,0,0,0,0,0,8,16,1,0,0,0,4,14,16,7,0,0,5 +0,0,10,12,12,5,0,0,0,0,8,16,16,14,0,0,0,0,8,16,16,11,0,0,0,0,13,16,16,7,0,0,0,0,9,16,16,1,0,0,0,0,9,16,16,7,0,0,0,1,15,16,16,7,0,0,0,0,7,12,12,9,0,0,1 +0,0,6,13,12,2,0,0,0,6,16,12,16,13,3,0,0,7,13,5,16,16,8,0,0,1,14,16,15,7,0,0,0,0,3,16,14,1,0,0,0,0,4,16,16,11,0,0,0,0,6,16,13,16,4,0,0,0,7,15,12,10,0,0,8 +0,0,4,11,10,2,0,0,0,2,16,5,14,9,0,0,0,1,15,4,13,16,3,0,0,0,4,16,15,4,0,0,0,0,2,16,11,0,0,0,0,0,9,11,14,7,0,0,0,0,14,3,5,15,0,0,0,0,5,8,12,10,0,0,8 +0,0,8,15,12,3,0,0,0,5,16,6,10,14,0,0,0,7,14,7,14,16,2,0,0,0,11,16,13,6,1,0,0,0,11,16,12,0,0,0,0,0,16,7,10,12,0,0,0,4,16,0,2,16,6,0,0,1,10,16,16,15,2,0,8 +0,0,2,9,16,5,0,0,0,0,14,9,10,15,4,0,0,4,16,2,10,16,11,0,0,2,15,16,16,9,0,0,0,0,2,16,16,11,0,0,0,0,10,9,7,16,0,0,0,0,10,11,8,16,0,0,0,0,2,15,14,8,0,0,8 +0,0,0,5,14,0,0,0,0,0,0,14,11,0,0,0,0,0,1,16,5,0,0,0,0,0,5,16,1,0,0,0,0,0,12,16,12,4,0,0,0,0,16,16,12,15,7,0,0,0,7,16,11,12,14,0,0,0,0,5,15,16,11,0,6 +0,0,0,2,12,3,0,0,0,0,0,9,11,0,0,0,0,0,2,15,6,6,7,0,0,0,13,7,0,16,3,0,0,7,15,3,2,16,2,0,0,13,16,16,16,13,1,0,0,1,4,5,16,6,0,0,0,0,0,4,16,3,0,0,4 +0,1,10,16,15,3,0,0,0,8,14,4,16,12,0,0,0,8,13,1,16,12,0,0,0,2,13,16,16,13,0,0,0,0,0,3,9,16,0,0,0,0,6,4,4,16,4,0,0,0,16,13,10,16,5,0,0,0,7,12,13,10,1,0,9 +0,0,0,9,16,9,0,0,0,0,0,14,16,11,0,0,0,0,5,16,16,8,0,0,0,3,15,16,16,6,0,0,0,5,15,16,16,7,0,0,0,0,4,16,16,5,0,0,0,0,1,16,16,12,0,0,0,0,1,12,13,12,0,0,1 +0,0,4,14,15,9,1,0,0,0,0,14,16,16,4,0,0,0,0,15,16,16,0,0,0,0,7,16,16,13,0,0,0,2,16,16,16,12,0,0,0,0,4,15,16,8,0,0,0,0,5,16,16,12,0,0,0,0,4,14,15,5,0,0,1 +0,0,2,15,11,1,0,0,0,0,9,12,14,13,2,0,0,2,15,2,1,13,6,0,0,7,16,0,0,9,8,0,0,4,16,0,0,10,7,0,0,3,16,3,2,14,2,0,0,0,14,14,14,9,0,0,0,0,3,11,13,1,0,0,0 +0,0,11,15,8,0,0,0,0,6,16,10,16,0,0,0,0,2,7,8,16,0,0,0,0,0,0,13,16,2,0,0,0,0,0,8,14,13,1,0,0,0,3,0,3,16,5,0,0,0,16,10,12,15,2,0,0,0,12,15,9,2,0,0,3 +0,0,0,14,8,0,0,0,0,0,3,16,5,0,0,0,0,0,12,12,10,14,0,0,0,4,16,6,13,11,0,0,0,12,16,7,16,14,3,0,0,15,16,16,16,16,6,0,0,2,5,13,16,4,0,0,0,0,0,15,11,0,0,0,4 +0,0,0,5,15,0,0,0,0,0,1,14,12,7,3,0,0,0,10,15,5,16,6,0,0,5,16,7,5,16,3,0,0,12,16,13,15,16,9,0,0,4,12,13,16,13,3,0,0,0,0,7,16,5,0,0,0,0,0,7,15,0,0,0,4 +0,1,11,16,8,0,0,0,0,8,16,5,16,3,0,0,0,8,11,0,13,10,0,0,0,3,15,9,11,15,2,0,0,0,1,7,7,15,7,0,0,0,0,0,0,8,12,0,0,1,10,5,4,11,12,0,0,0,8,15,16,15,6,0,9 +0,0,5,13,1,0,0,0,0,0,13,11,0,0,0,0,0,0,16,2,0,0,0,0,0,5,16,0,0,0,0,0,0,8,15,9,14,6,0,0,0,7,16,16,10,16,2,0,0,1,16,16,4,16,5,0,0,0,4,14,16,13,0,0,6 +0,0,1,11,16,16,13,0,0,0,3,7,4,9,13,0,0,0,0,0,0,10,6,0,0,0,2,4,8,15,6,0,0,0,9,13,15,14,5,0,0,0,1,2,15,0,0,0,0,0,0,12,9,0,0,0,0,0,2,15,3,0,0,0,7 +0,0,5,12,14,16,8,0,0,0,16,16,13,12,4,0,0,0,16,14,8,2,0,0,0,1,16,16,16,15,2,0,0,5,16,9,6,16,4,0,0,0,0,0,4,16,4,0,0,0,3,12,15,15,2,0,0,0,5,13,13,2,0,0,5 +0,0,2,12,15,5,0,0,0,0,11,9,9,15,3,0,0,0,15,1,0,15,4,0,0,4,10,0,0,13,3,0,0,3,14,0,0,12,8,0,0,0,15,3,0,13,5,0,0,0,10,13,5,16,2,0,0,0,1,13,16,8,0,0,0 +0,1,13,9,0,0,0,0,0,10,14,15,2,0,0,0,0,8,6,10,6,0,0,0,0,2,3,10,5,0,0,0,0,0,3,16,2,0,0,0,0,0,6,11,0,0,0,0,0,4,16,15,12,12,6,0,0,0,10,12,12,13,11,0,2 +0,2,12,8,0,0,0,0,0,11,16,16,0,0,0,0,0,7,8,14,6,0,0,0,0,1,2,12,8,0,0,0,0,0,0,14,4,0,0,0,0,0,2,16,1,0,0,0,0,2,13,16,9,15,11,0,0,2,14,16,13,15,9,0,2 +0,0,13,10,8,8,3,0,0,3,16,16,13,11,3,0,0,5,16,8,0,0,0,0,0,6,16,16,15,6,0,0,0,0,3,2,10,15,3,0,0,0,0,0,5,16,2,0,0,0,4,8,16,8,0,0,0,0,14,14,4,0,0,0,5 +0,0,2,11,15,16,15,0,0,0,8,13,12,16,13,0,0,0,0,0,6,16,5,0,0,0,0,6,13,16,7,0,0,0,9,16,16,16,9,0,0,0,5,9,16,5,0,0,0,0,0,11,16,1,0,0,0,0,1,16,9,0,0,0,7 +0,0,10,16,15,12,0,0,0,0,14,9,4,4,0,0,0,0,16,0,0,0,0,0,0,5,16,12,12,8,0,0,0,7,15,10,8,15,6,0,0,2,1,0,2,16,4,0,0,0,3,10,14,12,1,0,0,0,10,14,6,0,0,0,5 +0,0,6,13,15,8,0,0,0,4,16,12,12,16,5,0,0,6,16,2,5,16,5,0,0,1,15,13,13,16,1,0,0,0,0,7,13,16,1,0,0,0,0,0,4,16,0,0,0,4,16,11,7,16,0,0,0,1,10,13,16,13,0,0,9 +0,0,0,9,16,11,0,0,0,0,6,15,12,16,6,0,0,0,16,11,0,9,12,0,0,5,16,3,0,8,8,0,0,4,16,0,0,8,8,0,0,2,16,7,0,12,5,0,0,0,11,16,14,16,1,0,0,0,1,9,13,6,0,0,0 +0,0,5,15,2,0,0,0,0,1,15,10,0,0,0,0,0,8,16,2,0,0,0,0,0,10,13,1,4,6,0,0,0,8,16,13,16,15,9,0,0,6,16,13,1,4,12,0,0,1,16,15,3,8,11,0,0,0,5,12,16,16,4,0,6 +0,0,1,12,16,2,0,0,0,0,5,16,15,0,0,0,0,0,13,16,11,0,0,0,0,8,16,16,12,0,0,0,0,7,10,16,15,0,0,0,0,0,0,14,16,2,0,0,0,0,0,13,16,7,0,0,0,0,0,12,16,9,0,0,1 +0,0,5,8,11,13,10,0,0,0,12,12,7,4,4,0,0,3,12,0,4,0,0,0,0,5,16,16,16,14,0,0,0,1,7,1,0,14,6,0,0,0,0,0,0,12,4,0,0,7,11,1,6,12,1,0,0,0,8,13,10,2,0,0,5 +0,0,2,12,13,7,0,0,0,2,14,5,0,13,2,0,0,10,6,0,2,16,4,0,0,6,13,7,9,16,4,0,0,0,8,12,8,13,4,0,0,0,0,0,0,7,7,0,0,0,0,0,0,4,11,0,0,0,1,14,11,13,12,0,9 +0,0,8,16,15,9,0,0,0,2,15,5,3,16,6,0,0,8,12,0,3,16,8,0,0,8,15,10,14,16,5,0,0,1,9,10,3,15,7,0,0,0,0,0,0,15,5,0,0,0,1,4,0,13,7,0,0,0,5,16,16,16,2,0,9 +0,0,3,15,15,3,0,0,0,2,14,10,13,9,0,0,0,6,16,1,6,12,0,0,0,9,14,7,14,13,0,0,0,2,12,12,14,14,0,0,0,0,0,0,6,16,0,0,0,0,0,4,5,16,5,0,0,0,2,12,16,16,4,0,9 +0,0,0,7,16,2,0,1,0,0,0,11,13,1,13,8,0,0,6,16,4,8,15,1,0,0,15,16,16,16,12,0,0,0,8,8,13,16,3,0,0,0,0,0,13,11,0,0,0,0,0,4,16,4,0,0,0,0,0,9,12,0,0,0,4 +0,0,0,8,16,16,16,9,0,0,0,8,8,9,16,8,0,0,0,0,0,7,16,1,0,0,0,0,1,12,14,0,0,0,3,14,16,16,6,0,0,0,4,6,11,10,0,0,0,0,0,3,15,3,0,0,0,0,0,10,11,0,0,0,7 +0,0,0,8,15,1,0,0,0,0,0,8,16,5,0,0,0,0,0,14,16,4,0,0,0,0,8,16,16,4,0,0,0,2,15,12,16,6,0,0,0,3,8,1,16,10,0,0,0,0,0,5,15,16,1,0,0,0,0,9,16,16,11,0,1 +0,0,1,9,16,16,16,8,0,0,7,15,9,12,13,1,0,0,0,0,0,14,6,0,0,0,0,0,5,15,1,0,0,0,13,16,16,16,8,0,0,1,9,8,14,5,0,0,0,0,0,7,13,0,0,0,0,0,0,14,6,0,0,0,7 +0,2,14,16,9,0,0,0,0,10,15,10,16,0,0,0,0,8,12,0,16,5,0,0,0,3,3,3,16,5,0,0,0,0,0,5,16,1,0,0,0,0,0,11,13,0,0,0,0,0,7,16,13,11,11,1,0,1,15,16,16,16,16,5,2 +0,1,12,16,16,8,0,0,0,8,14,8,16,8,0,0,0,1,2,9,15,3,0,0,0,0,12,15,3,0,0,0,0,0,10,15,9,1,0,0,0,0,1,9,16,11,0,0,0,0,1,0,3,16,4,0,0,2,15,16,16,15,4,0,3 +0,0,1,16,13,0,0,0,0,0,3,16,15,0,0,0,0,0,12,16,16,0,0,0,0,3,16,16,16,0,0,0,0,7,7,12,16,0,0,0,0,0,0,7,16,3,0,0,0,0,0,10,16,14,1,0,0,0,2,13,16,16,12,0,1 +0,0,0,7,15,1,0,0,0,0,1,15,5,2,4,0,0,0,10,10,2,16,6,0,0,4,16,3,9,14,0,0,1,15,14,12,15,10,0,0,5,15,12,11,16,4,0,0,0,0,0,6,15,1,0,0,0,0,0,9,12,0,0,0,4 +0,0,9,15,15,3,0,0,0,8,15,11,16,6,0,0,0,2,1,12,15,1,0,0,0,0,6,16,7,0,0,0,0,0,2,15,15,3,0,0,0,0,0,1,7,15,5,0,0,0,14,1,0,12,12,0,0,0,8,16,16,16,10,0,3 +0,0,11,16,10,0,0,0,0,2,16,14,15,0,0,0,0,0,13,7,16,1,0,0,0,0,0,6,16,1,0,0,0,0,0,10,12,0,0,0,0,0,1,15,8,0,0,0,0,0,12,16,15,10,4,0,0,0,11,11,8,13,16,6,2 +0,0,0,3,15,5,0,0,0,0,1,12,15,1,6,5,0,0,10,16,2,5,16,4,0,2,16,10,0,13,10,0,0,12,16,11,11,16,1,0,0,4,14,16,16,9,0,0,0,0,0,7,16,1,0,0,0,0,0,6,14,0,0,0,4 +0,0,8,16,16,8,0,0,0,6,14,8,16,9,0,0,0,3,3,9,13,3,0,0,0,0,0,11,15,2,0,0,0,0,0,1,13,13,1,0,0,0,0,0,1,16,5,0,0,0,7,8,4,12,7,0,0,0,10,16,16,16,4,0,3 +0,0,0,2,15,5,0,0,0,0,0,10,14,0,3,0,0,0,3,16,3,6,16,2,0,1,14,8,0,14,10,0,1,12,14,8,11,16,5,0,5,16,16,15,16,11,1,0,1,3,0,1,16,6,0,0,0,0,0,5,15,1,0,0,4 +0,0,9,16,15,1,0,0,0,6,14,9,16,4,0,0,0,3,4,11,15,0,0,0,0,0,8,16,1,0,0,0,0,0,3,14,10,0,0,0,0,0,0,2,15,8,0,0,0,0,5,6,5,16,5,0,0,0,13,16,16,16,5,0,3 +0,1,14,15,2,0,0,0,0,9,15,15,12,0,0,0,0,9,12,8,15,0,0,0,0,3,5,9,10,0,0,0,0,0,0,11,11,0,0,0,0,0,1,15,7,0,0,0,0,0,7,16,11,7,4,0,0,1,15,16,16,16,16,5,2 +0,0,12,16,10,0,0,0,0,3,16,6,14,9,0,0,0,7,14,0,3,15,3,0,0,9,12,0,0,8,10,0,0,8,12,0,0,4,12,0,0,7,13,0,0,5,12,0,0,2,16,6,2,13,9,0,0,0,9,16,16,13,0,0,0 +0,0,15,16,13,2,0,0,0,3,15,2,10,15,2,0,0,8,12,0,0,10,8,0,0,8,12,0,0,4,12,0,0,9,12,0,0,4,12,0,0,8,12,0,0,9,8,0,0,6,15,4,11,14,2,0,0,0,12,16,15,3,0,0,0 +0,0,11,16,16,16,6,0,0,4,16,13,10,4,2,0,0,4,16,0,0,0,0,0,0,1,15,10,0,0,0,0,0,0,6,15,9,0,0,0,0,2,1,3,16,1,0,0,0,9,11,5,16,2,0,0,0,1,11,16,14,0,0,0,5 +0,0,2,14,11,0,0,0,0,0,13,7,9,6,0,0,0,0,12,0,1,12,2,0,0,3,12,0,2,15,4,0,0,1,13,14,12,14,7,0,0,0,0,0,0,12,4,0,0,0,0,0,0,10,3,0,0,0,2,14,12,14,0,0,9 +0,0,0,14,7,0,0,0,0,0,0,16,12,0,0,0,0,0,5,16,13,0,0,0,0,1,14,14,16,1,0,0,0,3,3,1,16,5,0,0,0,0,0,0,11,8,0,0,0,0,0,3,13,13,0,0,0,0,0,12,16,16,8,0,1 +0,0,6,16,15,2,0,0,0,1,16,10,7,2,0,0,0,6,15,0,0,0,0,0,0,8,15,0,0,0,0,0,0,9,12,5,11,4,0,0,0,5,15,16,14,14,10,0,0,0,11,15,1,6,16,0,0,0,5,12,16,16,8,0,6 +0,0,0,7,15,6,0,0,0,0,7,16,4,1,3,0,0,2,16,10,0,14,13,0,0,6,16,7,4,16,8,0,0,6,16,16,14,16,6,0,0,1,7,9,16,13,0,0,0,0,0,4,16,7,0,0,0,0,0,10,12,0,0,0,4 +0,4,16,16,6,0,0,0,0,4,14,8,16,0,0,0,0,3,8,5,16,0,0,0,0,0,0,5,16,0,0,0,0,0,0,14,9,0,0,0,0,0,5,15,2,0,0,0,0,1,13,15,11,9,7,0,0,6,16,14,12,12,12,0,2 +0,1,13,16,15,1,0,0,0,5,13,10,16,5,0,0,0,0,0,10,14,1,0,0,0,0,7,16,3,0,0,0,0,0,4,16,13,1,0,0,0,0,0,5,16,12,0,0,0,0,6,6,8,16,6,0,0,0,15,16,16,15,1,0,3 +0,2,13,16,13,9,2,0,0,1,13,5,4,4,2,0,0,0,12,5,0,0,0,0,0,0,14,16,13,3,0,0,0,0,1,4,8,12,0,0,0,0,0,0,3,16,0,0,0,7,8,4,8,13,0,0,0,3,10,14,12,4,0,0,5 +0,0,11,16,16,10,1,0,0,8,16,10,8,10,6,0,0,14,7,0,0,0,0,0,0,9,14,0,0,0,0,0,0,2,15,10,0,0,0,0,0,0,2,15,12,0,0,0,0,3,11,10,16,1,0,0,0,1,11,16,16,2,0,0,5 +0,0,0,13,15,2,0,0,0,0,4,16,14,1,0,0,0,0,15,16,11,0,0,0,0,6,9,16,14,0,0,0,0,0,0,10,15,2,0,0,0,0,0,6,16,4,0,0,0,0,0,1,16,11,0,0,0,0,0,15,16,15,0,0,1 +0,0,11,14,4,0,0,0,0,3,13,2,12,0,0,0,0,4,12,2,8,0,0,0,0,0,12,12,6,0,0,0,0,0,8,16,10,1,0,0,0,2,15,1,5,10,8,0,0,0,10,0,0,2,12,0,0,0,9,11,12,14,4,0,8 +0,0,1,10,16,16,4,0,0,0,7,8,6,16,7,0,0,0,0,0,3,16,5,0,0,1,4,4,10,14,3,0,0,12,16,16,16,10,5,0,0,2,0,9,13,1,0,0,0,0,1,16,7,0,0,0,0,0,0,15,4,0,0,0,7 +0,0,0,10,16,16,16,9,0,0,0,2,5,10,16,12,0,0,0,0,0,6,16,2,0,0,0,0,0,14,10,0,0,1,8,14,16,16,5,0,0,5,10,8,16,10,1,0,0,0,0,5,14,1,0,0,0,0,0,13,9,0,0,0,7 +0,0,0,4,15,6,0,3,0,0,0,13,14,1,11,11,0,0,8,16,4,4,16,4,0,2,16,6,3,11,13,0,0,12,16,16,16,16,7,0,0,11,9,7,13,14,1,0,0,0,0,1,14,5,0,0,0,0,0,7,12,0,0,0,4 +0,1,10,16,16,16,16,5,0,1,16,13,6,1,1,0,0,4,16,15,10,3,0,0,0,1,8,11,16,16,2,0,0,0,1,0,1,14,7,0,0,7,9,0,0,12,8,0,0,5,16,3,0,15,5,0,0,0,9,16,16,13,2,0,5 +0,0,8,16,12,1,0,0,0,3,15,5,13,13,0,0,0,10,12,0,1,15,6,0,0,12,9,0,0,7,12,0,0,12,10,0,0,5,12,0,0,8,14,0,0,7,12,0,0,3,16,9,5,15,5,0,0,0,9,16,16,11,0,0,0 +0,0,6,16,16,10,0,0,0,1,15,11,6,15,3,0,0,7,16,2,0,11,9,0,0,12,14,0,0,9,11,0,0,8,16,0,0,8,12,0,0,4,16,5,0,9,11,0,0,1,14,13,6,16,3,0,0,0,5,15,16,6,0,0,0 +0,0,4,16,14,0,0,0,0,0,8,16,6,0,0,0,0,1,15,16,4,0,0,0,0,10,16,16,4,0,0,0,0,0,5,16,10,0,0,0,0,0,1,16,12,0,0,0,0,0,1,15,16,6,1,0,0,0,6,16,16,16,4,0,1 +0,0,1,11,16,16,15,1,0,0,6,9,8,14,14,0,0,0,0,0,0,12,10,0,0,0,1,4,6,16,4,0,0,0,10,16,16,15,7,0,0,0,3,5,16,6,0,0,0,0,0,9,15,0,0,0,0,0,1,15,4,0,0,0,7 +0,0,12,16,9,0,0,0,0,3,16,7,5,0,0,0,0,11,13,0,0,0,0,0,0,9,12,0,0,0,0,0,0,9,13,10,15,8,0,0,0,5,16,15,8,11,10,0,0,4,16,12,1,5,16,0,0,1,10,15,16,16,10,0,6 +0,0,4,13,15,2,0,0,0,2,16,11,10,8,1,0,0,8,15,1,0,13,10,0,0,8,15,9,13,16,9,0,0,0,10,16,12,16,4,0,0,0,0,0,4,16,4,0,0,0,0,2,5,16,7,0,0,0,4,16,16,16,5,0,9 +0,0,0,3,14,7,0,0,0,0,0,12,12,0,3,8,0,0,9,14,0,2,16,6,0,4,16,2,0,10,14,0,1,15,8,3,8,16,4,0,7,16,16,16,16,12,0,0,2,9,5,2,15,3,0,0,0,0,0,5,14,0,0,0,4 +0,0,13,15,4,0,0,0,0,8,15,8,10,0,0,0,0,11,12,0,0,0,0,0,0,12,7,0,4,1,0,0,0,11,10,11,16,14,3,0,0,4,16,16,6,7,14,0,0,1,16,14,1,1,14,3,0,0,10,13,16,15,15,0,6 +0,0,7,15,13,9,2,0,0,4,15,0,0,10,9,0,0,1,15,5,3,13,5,0,0,0,7,16,13,1,0,0,0,0,10,12,15,4,0,0,0,0,8,1,8,13,0,0,0,11,11,4,1,16,0,0,0,1,9,12,16,12,0,0,8 +0,0,7,13,14,2,0,0,0,5,12,0,4,7,0,0,0,8,8,0,0,16,3,0,0,5,11,0,1,16,1,0,0,0,10,13,14,15,5,0,0,0,0,0,0,10,8,0,0,0,0,0,0,11,6,0,0,0,6,13,12,9,0,0,9 +0,0,7,15,3,0,0,0,0,0,15,6,0,0,0,0,0,6,13,0,0,0,0,0,0,7,13,0,0,0,0,0,0,8,11,7,13,11,2,0,0,3,16,15,5,4,11,0,0,0,16,6,0,7,11,0,0,0,8,12,15,10,1,0,6 +0,2,12,16,16,3,0,0,0,11,10,5,16,4,0,0,0,0,0,11,13,0,0,0,0,0,1,15,9,0,0,0,0,0,0,4,16,5,0,0,0,0,0,0,6,15,3,0,0,0,3,1,0,11,11,0,0,1,16,16,16,16,9,0,3 +0,0,1,15,16,13,1,0,0,3,11,11,3,13,9,0,0,12,13,0,0,8,12,0,0,9,15,0,0,8,12,0,0,4,16,2,0,8,12,0,0,2,15,9,0,8,12,0,0,0,10,16,5,14,8,0,0,0,1,12,16,15,1,0,0 +0,0,6,12,12,2,0,0,0,4,14,0,9,6,0,0,0,5,13,0,12,3,0,0,0,0,7,14,12,0,0,0,0,0,1,12,13,10,0,0,0,0,10,3,1,12,6,0,0,7,10,0,0,11,8,0,0,1,10,13,12,9,1,0,8 +0,0,9,13,16,6,0,0,0,2,16,14,16,2,0,0,0,0,2,14,8,0,0,0,0,0,4,16,13,6,0,0,0,0,0,4,12,16,6,0,0,0,0,0,0,13,7,0,0,0,5,7,12,15,1,0,0,0,12,16,11,1,0,0,3 +0,1,12,16,3,0,0,0,0,5,14,14,4,0,0,0,0,9,7,7,8,0,0,0,0,6,2,8,8,0,0,0,0,0,0,10,5,0,0,0,0,0,0,14,2,0,0,0,0,0,9,16,11,8,5,0,0,2,14,11,12,16,9,0,2 +0,0,8,16,14,4,0,0,0,1,15,7,9,15,2,0,0,8,11,0,0,12,9,0,0,9,5,0,0,6,12,0,0,9,8,0,0,4,12,0,0,8,12,0,0,9,12,0,0,3,16,6,7,15,5,0,0,0,8,16,16,12,0,0,0 +0,0,5,14,9,0,0,0,0,1,14,4,9,0,0,0,0,0,16,0,6,4,0,0,0,0,13,3,13,2,0,0,0,0,1,13,13,0,0,0,0,0,4,14,9,13,3,0,0,0,13,3,0,3,15,0,0,0,5,12,12,15,4,0,8 +0,0,1,15,16,16,15,0,0,0,0,8,8,14,10,0,0,0,0,0,4,15,1,0,0,0,0,3,12,12,2,0,0,2,12,16,16,14,7,0,0,3,6,9,10,0,0,0,0,0,0,13,6,0,0,0,0,0,2,16,2,0,0,0,7 +0,2,15,9,0,0,0,0,0,8,13,15,0,0,0,0,0,10,4,14,3,0,0,0,0,11,2,12,4,0,0,0,0,1,0,12,4,0,0,0,0,0,2,16,0,0,0,0,0,0,11,14,6,6,5,0,0,2,16,16,16,16,9,0,2 +0,0,6,13,12,2,0,0,0,0,14,2,2,10,0,0,0,4,10,0,6,13,0,0,0,4,10,0,8,12,0,0,0,0,11,12,13,13,0,0,0,0,0,3,0,11,0,0,0,0,0,0,0,9,4,0,0,0,5,16,16,13,2,0,9 +0,0,8,12,12,0,0,0,0,5,14,2,9,8,0,0,0,4,14,1,11,4,0,0,0,0,7,14,12,1,0,0,0,0,11,11,12,4,0,0,0,3,14,0,2,12,5,0,0,4,12,0,0,1,12,0,0,0,9,11,12,12,4,0,8 +0,2,15,13,11,8,1,0,0,7,16,14,13,12,3,0,0,8,16,4,0,0,0,0,0,4,16,16,14,3,0,0,0,0,3,4,13,14,0,0,0,0,0,0,1,15,7,0,0,0,7,3,2,13,10,0,0,2,15,16,16,16,9,0,5 +0,0,8,16,9,0,0,0,0,2,15,9,15,3,0,0,0,6,14,0,11,11,0,0,0,7,15,10,16,14,0,0,0,2,13,11,5,16,3,0,0,0,0,0,0,13,6,0,0,0,0,1,4,14,4,0,0,0,10,16,13,5,0,0,9 +0,0,1,11,16,16,4,0,0,0,8,10,8,16,3,0,0,0,0,0,3,14,0,0,0,0,0,0,8,8,0,0,0,0,7,11,16,16,8,0,0,11,13,12,11,0,0,0,0,0,0,8,9,0,0,0,0,0,0,14,2,0,0,0,7 +0,0,8,12,16,15,8,0,0,3,15,3,0,0,0,0,0,4,12,0,0,0,0,0,0,4,14,12,12,3,0,0,0,0,7,5,5,14,2,0,0,3,2,0,0,11,4,0,0,2,11,0,0,13,2,0,0,0,11,13,12,5,0,0,5 +0,0,9,15,2,0,0,0,0,3,16,10,1,0,0,0,0,7,14,0,0,0,0,0,0,9,11,3,8,8,1,0,0,10,11,13,14,15,8,0,0,7,16,14,0,7,12,0,0,3,16,10,4,13,10,0,0,0,10,16,16,13,1,0,6 +0,0,0,8,16,6,0,0,0,0,5,15,16,1,0,0,0,5,16,16,13,0,0,0,0,4,9,16,14,0,0,0,0,0,0,16,14,0,0,0,0,0,0,12,15,0,0,0,0,0,0,10,16,5,0,0,0,0,0,6,16,15,1,0,1 +0,0,12,16,16,15,2,0,0,0,5,8,10,16,7,0,0,0,0,0,8,15,1,0,0,0,10,13,15,15,8,0,0,0,12,16,15,12,5,0,0,0,1,14,6,0,0,0,0,0,6,16,2,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,9,14,8,0,0,0,0,7,12,6,15,3,0,0,0,3,1,0,12,8,0,0,0,0,0,7,15,11,0,0,0,0,0,10,11,15,5,0,0,0,0,0,0,9,8,0,0,0,4,7,4,14,5,0,0,0,8,14,16,14,1,0,3 +0,0,11,12,12,14,4,0,0,0,16,8,4,4,5,0,0,2,16,8,4,0,0,0,0,5,16,15,15,9,0,0,0,1,4,1,4,16,3,0,0,0,0,0,0,14,7,0,0,3,7,6,9,16,2,0,0,2,11,12,13,7,0,0,5 +0,3,13,12,12,13,2,0,0,3,16,8,5,7,3,0,0,3,14,2,3,0,0,0,0,4,16,15,16,9,0,0,0,0,4,0,2,15,2,0,0,0,0,0,0,12,5,0,0,1,6,4,6,15,5,0,0,3,13,16,16,10,0,0,5 +0,0,5,14,11,3,0,0,0,2,16,14,16,14,0,0,0,4,13,4,2,13,4,0,0,8,9,0,0,8,8,0,0,8,10,0,0,7,8,0,0,4,13,0,0,9,8,0,0,0,14,9,4,14,6,0,0,0,4,16,16,8,0,0,0 +0,0,0,3,15,1,0,0,0,0,0,7,14,0,0,0,0,0,0,12,10,0,0,0,0,0,3,15,3,0,0,0,0,0,11,14,8,6,0,0,0,5,16,11,16,11,0,0,1,13,16,16,16,16,4,0,0,4,4,7,16,5,0,0,4 +0,2,12,12,13,12,1,0,0,8,14,8,4,6,0,0,0,8,14,8,7,1,0,0,0,8,15,12,14,12,0,0,0,0,0,0,1,14,4,0,0,0,0,0,0,12,8,0,0,2,8,4,6,15,7,0,0,0,11,16,15,7,0,0,5 +0,1,12,12,1,0,0,0,0,4,14,12,10,0,0,0,0,7,11,4,12,0,0,0,0,2,12,7,12,0,0,0,0,0,0,8,12,0,0,0,0,0,1,14,9,0,0,0,0,0,13,16,16,15,6,0,0,0,9,12,12,12,7,0,2 +0,0,0,5,15,1,0,0,0,0,0,11,13,0,0,0,0,0,0,14,10,0,0,0,0,0,7,15,4,0,0,0,0,1,13,9,13,6,0,0,1,12,16,14,16,14,3,0,1,11,12,14,16,12,1,0,0,0,0,8,15,0,0,0,4 +0,0,0,13,5,0,0,0,0,0,8,15,3,0,0,0,0,0,12,8,0,0,0,0,0,3,16,13,12,5,0,0,0,5,16,12,12,15,5,0,0,4,12,0,0,8,12,0,0,1,13,10,5,12,14,0,0,0,2,11,14,12,2,0,6 +0,0,0,0,11,9,1,0,0,0,0,0,14,15,0,0,0,1,7,11,16,12,0,0,0,6,15,12,16,13,0,0,0,0,0,0,16,12,0,0,0,0,0,0,13,15,1,0,0,0,0,0,12,16,2,0,0,0,0,0,9,16,4,0,1 +0,0,0,9,6,0,0,0,0,0,3,15,5,0,0,0,0,0,14,9,0,0,0,0,0,2,16,6,8,2,0,0,0,5,16,16,12,15,4,0,0,3,16,5,0,5,12,0,0,0,8,14,3,3,16,0,0,0,0,9,16,16,10,0,6 +0,0,1,13,4,0,0,0,0,0,10,11,0,0,0,0,0,1,16,3,0,0,0,0,0,3,16,16,10,3,0,0,0,6,16,11,10,14,1,0,0,4,16,1,0,7,11,0,0,1,13,7,0,9,12,0,0,0,1,13,16,15,5,0,6 +0,1,8,15,13,1,0,0,0,6,14,9,16,4,0,0,0,3,6,1,14,4,0,0,0,0,4,14,16,3,0,0,0,0,9,12,14,16,1,0,0,0,0,0,0,16,8,0,0,3,12,11,10,16,7,0,0,1,10,14,15,10,0,0,3 +0,0,4,13,16,7,0,0,0,1,15,13,16,15,1,0,0,5,13,0,7,14,4,0,0,6,9,0,0,8,7,0,0,5,11,0,0,8,8,0,0,4,13,0,0,8,8,0,0,0,14,12,6,16,4,0,0,0,3,16,15,5,0,0,0 +0,0,7,15,11,0,0,0,0,5,14,4,12,14,3,0,0,8,11,0,0,16,4,0,0,2,15,9,11,16,4,0,0,0,5,10,8,12,8,0,0,0,0,0,0,9,8,0,0,0,12,7,4,15,7,0,0,0,7,16,15,8,0,0,9 +0,0,7,15,16,16,9,0,0,2,15,6,15,15,11,0,0,1,13,0,7,15,3,0,0,1,14,12,15,3,0,0,0,0,10,16,16,4,0,0,0,2,13,2,10,12,0,0,0,1,16,5,8,16,0,0,0,0,8,16,16,7,0,0,8 +0,0,7,15,12,1,0,0,0,3,15,5,10,11,0,0,0,0,8,0,4,16,0,0,0,0,0,5,13,9,0,0,0,0,0,8,13,13,1,0,0,0,0,0,1,14,6,0,0,5,15,3,0,15,8,0,0,0,9,16,16,14,3,0,3 +0,0,0,15,4,0,0,0,0,0,8,14,0,0,0,0,0,0,14,7,0,0,0,0,0,1,16,9,8,2,0,0,0,2,16,16,15,15,2,0,0,3,16,7,0,6,12,0,0,0,11,12,4,7,15,0,0,0,1,9,16,16,6,0,6 +0,0,11,16,16,12,5,0,0,1,16,10,8,8,5,0,0,2,16,10,7,1,0,0,0,3,16,13,14,12,1,0,0,0,5,0,2,15,5,0,0,0,0,0,0,13,7,0,0,0,6,5,10,16,2,0,0,0,12,14,12,6,0,0,5 +0,0,10,16,15,13,10,0,0,0,4,4,6,16,6,0,0,0,0,0,8,13,0,0,0,0,9,16,16,16,6,0,0,0,5,13,14,13,5,0,0,0,1,14,3,0,0,0,0,0,6,16,0,0,0,0,0,0,9,10,0,0,0,0,7 +0,3,16,15,4,0,0,0,0,8,13,12,12,0,0,0,0,9,8,8,12,0,0,0,0,1,5,5,16,0,0,0,0,0,0,11,12,0,0,0,0,0,1,16,8,0,0,0,0,3,14,16,13,15,12,0,0,3,16,16,16,16,15,0,2 +0,0,4,13,7,4,4,0,0,0,12,12,13,16,6,0,0,0,11,0,6,13,0,0,0,0,4,4,13,5,0,0,0,0,16,16,16,16,4,0,0,0,0,12,10,10,1,0,0,0,2,14,2,0,0,0,0,0,6,10,0,0,0,0,7 +0,0,6,15,11,3,0,0,0,1,14,16,16,14,0,0,0,4,16,1,9,16,2,0,0,6,16,0,0,13,7,0,0,5,13,0,0,12,8,0,0,4,16,1,0,14,8,0,0,0,14,10,10,15,1,0,0,0,4,13,16,8,0,0,0 +0,0,2,13,6,0,0,0,0,0,11,11,3,0,0,0,0,1,16,3,0,0,0,0,0,3,16,11,5,0,0,0,0,3,16,15,4,6,3,0,0,2,16,4,0,7,10,0,0,0,13,8,0,8,13,0,0,0,3,12,16,16,6,0,6 +0,0,6,12,13,4,0,0,0,1,16,5,8,15,3,0,0,4,13,0,0,16,5,0,0,2,15,9,11,16,7,0,0,0,1,8,8,14,8,0,0,3,5,0,0,11,8,0,0,2,16,9,6,15,7,0,0,0,3,14,15,6,0,0,9 +0,0,0,1,16,8,0,0,0,0,0,5,16,8,0,0,0,0,5,12,16,4,0,0,0,8,16,16,16,3,0,0,0,0,0,9,16,5,0,0,0,0,0,11,16,6,0,0,0,0,0,6,16,12,0,0,0,0,0,2,16,14,1,0,1 +0,0,12,13,14,5,0,0,0,0,16,9,8,6,0,0,0,3,16,2,3,0,0,0,0,5,16,16,16,8,0,0,0,0,2,0,4,16,3,0,0,0,0,0,0,15,8,0,0,0,3,0,4,16,7,0,0,0,11,16,16,13,1,0,5 +0,0,6,13,13,12,2,0,0,4,15,5,6,11,8,0,0,8,11,0,0,10,6,0,0,3,14,13,13,13,1,0,0,0,8,16,14,9,0,0,0,0,16,4,1,15,0,0,0,0,15,5,6,16,0,0,0,0,5,15,16,9,0,0,8 +0,0,5,15,13,3,0,0,0,0,15,7,10,15,1,0,0,0,16,4,2,15,8,0,0,1,10,16,16,16,4,0,0,0,0,0,0,12,7,0,0,5,2,0,3,16,5,0,0,7,14,8,14,14,1,0,0,1,8,16,11,1,0,0,9 +0,0,8,13,4,0,0,0,0,2,16,11,15,1,0,0,0,0,12,0,11,5,0,0,0,0,4,2,9,9,0,0,0,0,0,0,12,11,0,0,0,0,0,6,15,4,0,0,0,0,6,16,13,5,8,0,0,0,7,16,16,16,16,5,2 +0,0,3,14,12,11,11,0,0,0,7,10,7,13,11,0,0,0,15,3,4,14,0,0,0,0,4,0,10,7,0,0,0,0,11,15,16,16,3,0,0,0,3,10,11,4,0,0,0,0,0,14,5,0,0,0,0,0,3,16,0,0,0,0,7 +0,0,0,8,10,0,0,0,0,0,0,15,7,0,0,0,0,0,6,15,1,0,0,0,0,1,14,7,4,2,0,0,0,6,14,4,15,2,0,0,0,14,15,13,16,12,3,0,0,15,12,15,15,11,1,0,0,0,0,13,8,0,0,0,4 +0,0,0,0,8,13,0,0,0,0,0,0,16,16,0,0,0,0,0,1,16,14,0,0,0,4,12,15,16,12,0,0,0,0,3,4,16,14,0,0,0,0,0,0,15,16,0,0,0,0,0,0,12,16,2,0,0,0,0,0,11,16,4,0,1 +0,1,9,13,14,13,8,0,0,4,14,4,4,4,2,0,0,4,12,0,0,0,0,0,0,5,15,16,16,6,0,0,0,0,4,0,4,16,3,0,0,0,0,0,0,13,7,0,0,0,3,2,5,16,6,0,0,0,10,16,15,8,1,0,5 +0,0,0,1,11,10,0,0,0,0,0,6,16,15,0,0,0,4,13,16,16,11,0,0,0,3,8,10,16,10,0,0,0,0,0,4,16,12,0,0,0,0,0,0,16,14,0,0,0,0,0,0,16,16,5,0,0,0,0,0,12,13,6,0,1 +0,0,7,14,9,3,0,0,0,0,15,7,15,15,1,0,0,4,12,0,3,13,6,0,0,4,12,0,0,8,8,0,0,5,8,0,0,5,8,0,0,4,12,0,0,8,7,0,0,2,15,6,6,15,2,0,0,0,6,15,16,8,0,0,0 +0,0,7,12,14,16,8,0,0,0,16,11,8,8,4,0,0,0,16,1,5,3,0,0,0,5,16,16,16,13,1,0,0,2,9,4,2,14,6,0,0,0,0,0,0,15,5,0,0,0,2,8,10,16,4,0,0,0,6,15,12,6,0,0,5 +0,0,4,13,0,0,0,0,0,0,11,9,0,0,0,0,0,3,16,0,0,0,0,0,0,4,16,13,14,6,0,0,0,8,16,10,7,16,3,0,0,4,15,0,0,8,10,0,0,0,15,8,1,9,12,0,0,0,4,13,16,14,3,0,6 +0,0,12,14,16,12,0,0,0,1,15,9,5,9,0,0,0,4,16,12,12,5,0,0,0,0,5,5,7,16,3,0,0,0,0,0,0,12,8,0,0,0,0,0,0,9,11,0,0,0,11,6,5,14,12,0,0,0,9,16,16,14,3,0,5 +0,0,4,13,13,13,3,0,0,3,16,9,12,16,8,0,0,7,14,0,4,16,5,0,0,2,15,13,15,11,0,0,0,0,10,16,16,10,0,0,0,1,16,5,3,16,4,0,0,0,15,10,5,16,7,0,0,0,5,16,16,9,0,0,8 +0,0,2,12,1,0,0,0,0,0,11,12,0,0,0,0,0,0,16,5,0,0,0,0,0,2,16,5,4,1,0,0,0,2,16,16,16,13,2,0,0,1,15,8,0,7,12,0,0,0,8,9,2,5,15,0,0,0,0,11,16,14,8,0,6 +0,0,5,12,12,3,0,0,0,1,16,8,13,14,3,0,0,2,16,1,3,16,9,0,0,2,14,11,14,16,6,0,0,0,2,7,5,16,8,0,0,2,3,0,1,15,5,0,0,5,16,5,8,15,2,0,0,0,10,16,13,4,0,0,9 +0,0,6,14,15,12,11,0,0,2,15,5,4,4,3,0,0,2,16,11,12,7,0,0,0,5,11,6,6,16,3,0,0,0,0,0,0,13,7,0,0,0,0,0,2,16,3,0,0,4,15,5,12,15,0,0,0,0,9,15,11,1,0,0,5 +0,0,0,14,3,0,0,0,0,0,7,15,3,0,0,0,0,0,15,7,0,0,0,0,0,2,16,9,7,2,0,0,0,2,16,13,12,14,5,0,0,0,15,3,0,3,13,0,0,0,11,10,3,4,15,2,0,0,0,9,16,16,13,1,6 +0,0,9,13,14,1,0,0,0,2,16,4,16,13,0,0,0,3,16,1,11,14,0,0,0,0,10,16,16,4,0,0,0,1,13,13,15,13,1,0,0,5,13,1,4,16,5,0,0,8,12,2,5,16,4,0,0,1,8,16,16,11,1,0,8 +0,0,0,0,12,11,0,0,0,0,0,1,16,9,0,0,0,5,12,13,16,7,0,0,0,2,8,10,16,8,0,0,0,0,0,0,16,9,0,0,0,0,0,0,12,11,0,0,0,0,0,0,11,16,3,0,0,0,0,0,13,16,4,0,1 +0,0,14,16,16,16,9,0,0,0,6,8,10,16,6,0,0,0,0,0,9,14,2,0,0,0,13,14,16,14,4,0,0,0,8,16,16,16,11,0,0,0,3,16,4,3,1,0,0,0,11,12,0,0,0,0,0,0,16,8,0,0,0,0,7 +0,0,6,14,9,1,0,0,0,0,14,9,13,12,0,0,0,2,13,0,2,15,2,0,0,5,8,0,0,8,4,0,0,8,8,0,0,5,8,0,0,6,11,0,0,6,5,0,0,0,15,7,4,13,3,0,0,0,5,12,16,11,0,0,0 +0,0,3,11,16,13,2,0,0,0,14,8,8,15,8,0,0,0,15,11,5,13,12,0,0,0,6,16,16,13,0,0,0,0,13,12,12,15,1,0,0,3,12,0,0,16,5,0,0,1,15,6,7,16,4,0,0,0,6,12,14,7,0,0,8 +0,0,0,0,14,12,0,0,0,0,0,0,16,16,0,0,0,1,6,12,16,16,0,0,0,6,12,10,16,14,0,0,0,0,0,0,16,16,0,0,0,0,0,0,16,15,0,0,0,0,0,2,16,16,7,0,0,0,0,1,13,16,4,0,1 +0,0,0,12,3,0,0,0,0,0,10,14,1,0,0,0,0,0,14,9,1,0,0,0,0,1,16,16,15,6,0,0,0,3,16,10,5,14,6,0,0,2,16,1,0,5,12,0,0,0,12,9,4,7,15,0,0,0,1,10,16,16,7,0,6 +0,1,7,12,12,0,0,0,0,3,12,4,15,3,0,0,0,0,0,1,15,3,0,0,0,0,5,15,13,2,0,0,0,0,1,4,8,15,2,0,0,0,0,0,0,16,5,0,0,3,11,4,5,16,3,0,0,0,10,16,13,7,0,0,3 +0,1,10,16,13,2,0,0,0,6,14,9,16,6,0,0,0,0,1,0,13,11,0,0,0,0,1,13,16,11,1,0,0,0,2,12,11,16,8,0,0,0,5,0,0,15,9,0,0,3,16,12,8,16,7,0,0,0,10,16,16,11,1,0,3 +0,0,13,16,7,0,0,0,0,0,16,13,16,2,0,0,0,0,11,8,13,8,0,0,0,0,1,1,16,8,0,0,0,0,0,3,16,6,0,0,0,0,0,10,15,2,0,0,0,0,12,16,15,9,9,0,0,0,15,16,16,16,16,5,2 +0,0,10,16,16,16,16,4,0,0,4,8,8,14,14,1,0,0,0,0,6,15,4,0,0,0,4,12,15,14,4,0,0,0,11,16,16,16,9,0,0,0,0,14,10,3,0,0,0,0,6,16,5,0,0,0,0,0,12,12,0,0,0,0,7 +0,0,0,1,12,16,2,0,0,0,0,0,16,16,1,0,0,6,12,13,16,16,0,0,0,8,16,15,16,16,0,0,0,0,0,0,16,16,0,0,0,0,0,0,16,16,3,0,0,0,0,0,12,16,7,0,0,0,0,0,14,16,6,0,1 +0,0,5,11,14,7,0,0,0,4,16,6,9,15,1,0,0,7,12,0,1,16,4,0,0,2,16,9,11,16,6,0,0,0,2,5,4,12,8,0,0,0,0,0,0,10,8,0,0,3,15,7,3,14,8,0,0,0,5,14,16,11,2,0,9 +0,1,14,15,4,0,0,0,0,6,15,12,14,0,0,0,0,4,16,4,16,4,0,0,0,0,6,4,16,3,0,0,0,0,0,9,15,0,0,0,0,0,1,14,11,0,0,0,0,0,13,16,10,8,9,1,0,2,14,16,16,16,16,6,2 +0,0,8,12,7,0,0,0,0,0,14,8,15,14,0,0,0,3,14,0,6,15,2,0,0,5,11,0,0,9,8,0,0,8,8,0,0,5,8,0,0,5,11,0,0,8,8,0,0,1,15,2,2,14,5,0,0,0,6,15,16,12,1,0,0 +0,0,0,3,10,0,0,0,0,0,0,9,8,0,0,0,0,0,4,15,0,0,0,0,0,0,9,9,1,0,0,0,0,3,16,2,10,6,0,0,0,13,16,6,14,12,0,0,0,15,16,16,16,13,0,0,0,0,0,4,13,3,0,0,4 +0,0,1,11,4,0,0,0,0,0,5,14,1,0,0,0,0,0,12,7,0,0,0,0,0,1,15,0,4,1,0,0,0,4,15,14,16,14,1,0,0,2,14,5,1,6,11,0,0,1,13,7,0,1,14,0,0,0,3,10,15,16,10,0,6 +0,2,12,16,14,1,0,0,0,12,12,8,16,7,0,0,0,2,1,1,16,8,0,0,0,0,2,14,16,10,0,0,0,0,1,11,12,16,7,0,0,0,0,0,0,15,12,0,0,3,13,6,4,16,11,0,0,1,14,16,16,12,3,0,3 +0,0,8,12,15,12,0,0,0,8,15,7,4,4,0,0,0,6,14,4,3,0,0,0,0,7,16,13,16,8,0,0,0,1,4,0,5,16,5,0,0,0,0,0,0,15,9,0,0,1,7,4,6,15,8,0,0,0,10,15,16,9,0,0,5 +0,0,11,15,7,0,0,0,0,2,16,10,16,14,4,0,0,5,13,0,6,16,6,0,0,4,16,9,10,16,8,0,0,0,7,12,11,14,8,0,0,0,0,0,0,12,8,0,0,1,12,10,10,15,6,0,0,1,10,12,14,10,1,0,9 +0,0,9,16,16,3,0,0,0,5,16,10,14,7,0,0,0,1,10,0,14,10,0,0,0,0,2,15,16,9,0,0,0,0,1,8,11,16,6,0,0,0,0,0,3,16,8,0,0,1,16,11,11,16,4,0,0,0,11,16,16,11,0,0,3 +0,0,3,16,7,0,0,0,0,0,0,13,13,0,0,0,0,0,2,15,11,0,0,0,0,8,16,16,16,12,3,0,0,6,12,15,16,16,12,0,0,0,0,8,16,7,1,0,0,0,0,10,15,2,0,0,0,0,2,16,13,0,0,0,4 +0,1,10,16,16,9,0,0,0,11,15,8,15,12,0,0,0,3,2,2,16,9,0,0,0,0,0,7,16,4,0,0,0,0,0,2,15,11,0,0,0,0,0,0,5,16,6,0,0,0,3,7,6,15,12,0,0,0,12,16,16,14,5,0,3 +0,0,0,7,14,6,0,0,0,0,1,15,11,2,0,0,0,0,8,13,0,0,0,0,0,1,14,13,8,1,0,0,0,3,16,14,12,10,0,0,0,0,15,2,0,7,9,0,0,0,11,13,7,4,15,0,0,0,0,7,12,12,13,2,6 +0,1,10,14,7,0,0,0,0,7,12,5,15,0,0,0,0,4,7,5,15,1,0,0,0,0,2,15,9,0,0,0,0,0,0,9,16,4,0,0,0,0,0,0,4,14,4,0,0,0,10,3,3,10,12,0,0,0,11,16,16,14,4,0,3 +0,0,7,15,10,1,0,0,0,1,16,10,14,7,0,0,0,0,11,2,0,15,0,0,0,0,0,0,0,15,3,0,0,0,0,0,5,16,0,0,0,0,0,3,15,9,0,0,0,0,9,16,16,10,6,0,0,0,9,12,12,12,13,1,2 +0,0,6,14,15,3,0,0,0,0,8,15,12,2,0,0,0,0,8,14,12,11,1,0,0,0,0,0,0,9,6,0,0,0,0,0,0,3,9,0,0,0,3,1,0,1,12,0,0,0,13,8,4,9,11,0,0,0,5,13,16,13,7,0,5 +0,0,9,13,6,0,0,0,0,0,15,16,15,5,0,0,0,4,13,1,3,14,0,0,0,5,11,0,0,10,7,0,0,8,8,0,0,8,6,0,0,4,12,0,0,7,7,0,0,2,13,4,7,16,0,0,0,0,8,16,13,8,0,0,0 +0,0,0,6,16,3,0,0,0,0,5,16,8,1,0,0,0,0,10,14,0,0,0,0,0,0,14,15,8,3,0,0,0,0,14,15,12,15,4,0,0,0,16,6,0,6,11,0,0,0,9,13,5,6,15,0,0,0,1,8,12,15,13,1,6 +0,1,15,10,0,0,0,0,0,9,13,12,9,0,0,0,0,10,8,8,16,10,0,0,0,3,14,16,16,12,0,0,0,0,0,0,6,14,0,0,0,0,0,0,2,14,3,0,0,3,11,5,2,8,12,0,0,1,8,13,16,16,11,0,9 +0,0,7,10,2,0,0,0,0,3,16,16,16,10,0,0,0,5,16,12,11,16,2,0,0,6,12,0,0,9,8,0,0,4,12,0,0,8,8,0,0,4,16,0,0,8,8,0,0,1,16,10,11,15,6,0,0,0,7,16,13,5,0,0,0 +0,0,4,15,10,0,0,0,0,0,14,8,16,7,0,0,0,0,15,0,13,10,0,0,0,1,15,16,16,9,0,0,0,0,0,0,5,12,0,0,0,2,1,0,3,15,0,0,0,8,14,6,4,14,1,0,0,0,6,13,16,15,0,0,9 +0,0,7,14,5,0,0,0,0,3,16,16,16,5,0,0,0,6,16,0,5,14,0,0,0,4,12,0,0,13,3,0,0,8,8,0,0,12,8,0,0,7,9,0,0,12,7,0,0,1,15,8,14,16,3,0,0,0,8,14,12,4,0,0,0 +0,0,14,16,8,3,0,0,0,5,15,8,16,16,3,0,0,9,11,1,12,15,2,0,0,1,13,16,15,3,0,0,0,0,11,14,15,3,0,0,0,1,16,4,4,14,5,0,0,4,14,5,4,11,12,0,0,0,9,12,13,12,8,0,8 +0,0,1,11,13,2,0,0,0,0,9,14,13,3,0,0,0,1,15,12,0,0,0,0,0,3,15,9,2,0,0,0,0,2,16,16,16,11,0,0,0,0,13,7,4,14,7,0,0,0,9,11,9,16,6,0,0,0,1,12,16,7,0,0,6 +0,0,0,10,14,1,0,0,0,0,8,16,10,1,0,0,0,2,16,9,0,0,0,0,0,7,16,7,4,1,0,0,0,5,16,15,12,13,0,0,0,0,15,6,0,4,11,0,0,0,5,13,2,9,13,0,0,0,0,6,15,16,6,0,6 +0,1,9,16,10,0,0,0,0,6,12,4,15,2,0,0,0,0,0,0,13,3,0,0,0,0,0,10,16,1,0,0,0,0,0,6,13,11,0,0,0,0,0,0,0,14,4,0,0,3,10,1,0,10,8,0,0,0,10,16,16,15,3,0,3 +0,0,6,15,2,0,0,0,0,0,3,16,8,0,0,0,0,0,4,16,10,0,0,0,0,0,7,16,13,0,0,0,0,0,0,9,16,2,0,0,0,0,0,1,16,7,0,0,0,0,2,10,15,15,15,6,0,0,4,16,16,16,16,13,1 +0,0,3,12,11,1,0,0,0,0,9,13,10,10,0,0,0,0,5,13,11,13,0,0,0,0,0,7,12,14,5,0,0,0,0,0,0,6,10,0,0,0,0,0,0,1,15,0,0,0,8,8,4,4,13,5,0,0,3,10,16,16,16,5,9 +0,0,14,12,0,0,0,0,0,3,13,16,0,0,0,0,0,12,16,16,4,0,0,0,0,5,10,16,6,0,0,0,0,0,1,15,9,0,0,0,0,0,0,12,15,0,0,0,0,0,7,13,16,9,8,0,0,0,13,16,16,16,16,3,1 +0,0,0,0,11,15,0,0,0,0,0,2,15,11,0,0,0,0,0,9,16,4,0,0,0,0,8,16,8,7,3,0,0,9,16,16,12,16,11,0,0,4,11,12,14,16,5,0,0,0,0,0,9,16,4,0,0,0,0,0,12,15,2,0,4 +0,0,12,16,12,0,0,0,0,4,16,12,16,6,0,0,0,1,8,0,12,8,0,0,0,0,0,0,15,5,0,0,0,0,0,3,16,3,0,0,0,0,8,16,8,0,0,0,0,3,16,16,16,13,2,0,0,1,8,9,14,16,4,0,2 +0,0,10,16,7,0,0,0,0,1,16,16,15,3,0,0,0,4,16,16,16,13,0,0,0,4,16,10,11,14,2,0,0,6,16,0,0,12,4,0,0,5,15,0,0,10,7,0,0,4,16,8,11,16,0,0,0,1,10,15,13,4,0,0,0 +0,0,10,16,11,8,3,0,0,0,6,12,13,16,7,0,0,0,0,0,5,15,1,0,0,0,4,6,13,8,0,0,0,0,15,16,16,16,4,0,0,0,1,14,11,13,4,0,0,0,4,16,1,0,0,0,0,0,12,10,0,0,0,0,7 +0,0,0,3,12,5,0,0,0,0,8,16,12,4,0,0,0,2,16,8,0,0,0,0,0,6,16,14,9,2,0,0,0,4,16,11,10,15,3,0,0,0,14,3,0,6,10,0,0,0,5,15,5,12,9,0,0,0,0,5,15,11,2,0,6 +0,0,11,15,4,0,0,0,0,0,7,16,16,16,11,0,0,0,0,1,7,16,11,0,0,2,4,2,15,12,1,0,0,13,16,16,16,12,3,0,0,2,6,16,11,14,4,0,0,0,7,16,4,0,0,0,0,0,14,14,1,0,0,0,7 +0,1,14,12,1,0,0,0,0,7,16,16,13,0,0,0,0,6,14,3,16,2,0,0,0,2,12,0,14,6,0,0,0,0,0,0,15,8,0,0,0,0,0,8,16,4,0,0,0,2,12,16,16,12,10,0,0,0,15,16,16,16,16,6,2 +0,0,7,14,12,0,0,0,0,5,16,16,16,6,0,0,0,8,16,8,5,15,3,0,0,8,12,0,0,10,7,0,0,8,11,0,0,12,5,0,0,4,13,0,0,12,5,0,0,0,16,12,9,16,3,0,0,0,8,14,12,7,0,0,0 +0,0,9,9,0,0,0,0,0,0,7,16,2,0,0,0,0,0,8,16,3,0,0,0,0,0,6,16,9,0,0,0,0,0,0,6,14,0,0,0,0,0,0,1,15,4,0,0,0,0,6,12,15,14,9,5,0,0,7,16,16,16,16,14,1 +0,4,16,13,0,0,0,0,0,12,11,15,4,0,0,0,0,12,6,10,10,0,0,0,0,1,1,8,10,0,0,0,0,0,0,15,7,0,0,0,0,0,4,16,1,0,0,0,0,0,13,16,12,12,8,0,0,2,16,16,16,16,16,0,2 +0,0,11,8,0,0,0,0,0,0,9,15,0,0,0,0,0,12,15,16,6,0,0,0,0,8,15,16,9,0,0,0,0,0,1,13,13,0,0,0,0,0,0,6,16,2,0,0,0,0,8,13,16,13,12,5,0,0,11,16,16,16,16,11,1 +0,0,5,13,5,0,0,0,0,4,16,11,12,11,2,0,0,7,8,0,12,16,6,0,0,3,15,14,16,12,1,0,0,0,9,14,14,10,0,0,0,0,11,1,2,13,5,0,0,0,9,7,4,8,12,0,0,0,5,14,15,13,7,0,8 +0,0,0,1,11,14,0,0,0,0,0,2,16,10,0,0,0,0,0,11,16,4,0,0,0,0,12,15,3,5,1,0,0,7,16,13,9,16,10,0,0,5,16,16,16,16,5,0,0,0,0,0,12,13,0,0,0,0,0,0,13,13,1,0,4 +0,0,0,0,11,13,0,0,0,0,0,2,16,10,0,0,0,0,0,13,15,2,0,0,0,0,11,15,8,11,4,0,0,9,16,15,14,16,11,0,0,2,7,11,13,16,4,0,0,0,0,0,8,16,3,0,0,0,0,0,15,16,2,0,4 +0,0,0,1,15,8,0,0,0,0,0,2,16,9,0,0,0,0,0,7,16,4,0,0,0,1,7,16,15,15,2,0,0,11,16,16,13,16,5,0,0,7,12,12,14,16,5,0,0,0,0,0,13,15,0,0,0,0,0,4,16,10,0,0,4 +0,0,6,12,12,1,0,0,0,4,16,8,13,7,0,0,0,5,10,0,11,8,0,0,0,0,0,0,14,3,0,0,0,0,0,0,13,8,0,0,0,0,0,0,3,14,2,0,0,0,7,9,4,11,7,0,0,0,7,12,12,13,5,0,3 +0,0,5,14,15,2,0,0,0,4,15,7,11,8,0,0,0,4,6,1,15,5,0,0,0,0,0,4,15,4,0,0,0,0,0,0,7,15,1,0,0,0,0,0,0,9,7,0,0,3,12,5,4,12,7,0,0,0,6,13,16,9,1,0,3 +0,0,10,13,16,5,0,0,0,0,15,15,10,12,0,0,0,3,14,1,0,14,2,0,0,6,12,0,0,9,7,0,0,4,12,0,0,4,8,0,0,0,12,0,0,11,6,0,0,0,16,10,14,16,2,0,0,0,8,13,10,1,0,0,0 +0,0,4,15,8,0,0,0,0,0,12,16,15,3,0,0,0,1,16,11,7,14,2,0,0,4,14,0,0,12,8,0,0,4,12,0,0,14,5,0,0,4,14,1,0,15,5,0,0,0,13,9,9,16,6,0,0,0,7,16,12,5,0,0,0 +0,0,10,9,0,0,0,0,0,0,7,16,2,0,0,0,0,11,14,16,3,0,0,0,0,5,15,16,5,0,0,0,0,0,1,10,10,0,0,0,0,0,0,5,15,0,0,0,0,0,5,12,16,14,12,4,0,0,8,16,16,16,16,13,1 +0,0,4,15,11,1,0,0,0,0,9,13,10,11,0,0,0,0,7,11,3,16,1,0,0,0,1,14,16,16,5,0,0,0,0,0,2,11,6,0,0,0,0,0,0,6,10,0,0,0,8,5,4,8,13,0,0,0,4,12,15,16,8,0,9 +0,0,4,16,16,16,12,0,0,0,1,7,8,13,15,0,0,0,0,0,0,14,10,0,0,0,2,8,11,15,1,0,0,0,4,14,16,16,4,0,0,0,0,8,15,13,4,0,0,0,0,14,10,0,0,0,0,0,6,16,4,0,0,0,7 +0,0,7,13,12,3,0,0,0,2,14,8,14,8,0,0,0,2,15,4,14,8,0,0,0,0,8,12,12,8,0,0,0,0,0,0,4,12,0,0,0,0,0,0,1,15,0,0,0,0,10,10,1,16,3,0,0,0,5,13,16,12,0,0,9 +0,1,6,13,16,5,0,0,0,6,16,9,15,4,0,0,0,4,9,5,16,2,0,0,0,0,0,8,13,0,0,0,0,0,0,3,15,7,0,0,0,0,0,0,6,15,0,0,0,0,4,6,4,14,6,0,0,0,10,16,16,16,5,0,3 +0,0,10,16,16,15,2,0,0,0,5,8,8,16,6,0,0,0,0,0,4,16,3,0,0,0,6,8,12,11,0,0,0,1,16,16,16,15,1,0,0,0,1,13,10,11,3,0,0,0,8,14,0,0,0,0,0,0,13,6,0,0,0,0,7 +0,3,14,16,4,0,0,0,0,8,10,11,11,0,0,0,0,5,9,0,13,2,0,0,0,0,0,0,12,4,0,0,0,0,0,2,15,1,0,0,0,0,0,14,11,0,0,0,0,1,13,16,14,12,6,0,0,2,12,12,12,12,11,0,2 +0,0,7,14,1,0,0,0,0,0,6,16,8,0,0,0,0,8,16,16,10,0,0,0,0,0,11,13,16,1,0,0,0,0,0,3,16,6,0,0,0,0,0,1,13,12,0,0,0,0,10,16,16,16,12,7,0,0,5,15,16,16,16,15,1 +0,0,5,15,10,10,4,0,0,0,12,12,15,14,8,0,0,0,11,7,1,15,5,0,0,0,6,15,8,14,0,0,0,0,4,14,16,2,0,0,0,3,16,11,12,5,0,0,0,7,14,7,6,14,0,0,0,0,7,13,16,14,0,0,8 +0,0,5,16,11,3,0,0,0,0,14,15,10,15,2,0,0,5,13,6,0,11,8,0,0,8,8,0,0,5,8,0,0,8,7,0,0,8,5,0,0,4,10,0,1,13,1,0,0,0,14,9,13,10,0,0,0,0,5,13,9,1,0,0,0 +0,1,14,14,2,0,0,0,0,7,16,16,8,0,0,0,0,11,11,10,14,0,0,0,0,0,0,8,16,0,0,0,0,0,0,10,13,0,0,0,0,0,5,16,12,0,0,0,0,4,16,16,16,16,6,0,0,1,8,6,8,15,11,0,2 +0,0,0,8,11,0,0,0,0,0,3,16,12,0,0,0,0,0,11,15,1,0,0,0,0,1,15,12,5,0,0,0,0,2,16,16,13,12,1,0,0,0,14,9,1,4,13,0,0,0,5,14,3,0,11,6,0,0,0,5,15,16,16,7,6 +0,0,11,11,0,0,0,0,0,0,12,14,0,0,0,0,0,13,16,15,0,0,0,0,0,6,11,16,3,0,0,0,0,0,1,16,4,0,0,0,0,0,0,11,9,0,0,0,0,0,10,15,15,12,7,0,0,0,11,16,16,16,16,4,1 +0,0,0,11,11,0,0,0,0,0,2,16,13,0,0,0,0,0,6,16,5,0,0,0,0,0,11,16,5,0,0,0,0,0,13,16,14,12,1,0,0,0,11,14,2,8,14,0,0,0,6,16,5,12,15,1,0,0,0,8,15,13,6,0,6 +0,0,8,16,7,0,0,0,0,0,11,14,16,4,0,0,0,0,0,1,14,3,0,0,0,0,8,14,16,6,0,0,0,0,6,16,16,16,7,0,0,0,0,12,9,13,5,0,0,0,4,14,2,0,0,0,0,0,9,10,0,0,0,0,7 +0,0,4,14,3,0,0,0,0,0,9,16,16,4,0,0,0,0,14,16,16,14,0,0,0,2,16,6,1,16,4,0,0,5,15,0,0,12,4,0,0,5,13,0,0,12,8,0,0,0,16,10,9,16,7,0,0,0,6,16,15,8,0,0,0 +0,0,11,14,5,0,0,0,0,5,13,4,16,6,0,0,0,3,14,11,15,8,0,0,0,0,2,6,6,13,0,0,0,0,0,0,0,14,2,0,0,0,0,0,0,5,10,0,0,1,4,0,0,7,12,0,0,1,9,15,16,15,4,0,9 +0,0,2,13,10,1,0,0,0,0,8,16,16,14,1,0,0,0,11,8,3,15,1,0,0,0,6,12,9,11,0,0,0,0,9,16,16,2,0,0,0,1,16,7,9,10,0,0,0,2,14,5,4,16,3,0,0,0,4,11,14,13,5,0,8 +0,2,15,10,1,0,0,0,0,8,15,16,9,0,0,0,0,8,10,11,12,0,0,0,0,1,5,7,16,0,0,0,0,0,0,10,13,0,0,0,0,0,2,15,10,0,0,0,0,5,16,16,16,12,8,0,0,3,12,12,11,14,16,1,2 +0,0,3,14,11,1,0,0,0,1,15,10,16,5,0,0,0,2,10,2,16,3,0,0,0,0,0,1,16,6,0,0,0,0,0,0,9,15,3,0,0,0,6,0,0,10,9,0,0,2,16,7,4,13,10,0,0,0,5,11,14,11,1,0,3 +0,0,7,13,9,0,0,0,0,0,13,11,13,11,0,0,0,0,14,8,12,16,2,0,0,0,5,13,13,15,3,0,0,0,0,0,0,11,7,0,0,0,0,0,0,3,13,0,0,0,12,9,4,4,16,0,0,0,7,16,16,16,12,0,9 +0,0,14,14,11,2,0,0,0,1,16,16,16,15,0,0,0,4,16,9,7,14,6,0,0,8,15,0,0,8,8,0,0,8,11,0,0,8,8,0,0,8,12,0,0,12,5,0,0,4,16,10,14,13,0,0,0,0,9,16,15,3,0,0,0 +0,0,2,15,3,0,0,0,0,0,2,14,10,0,0,0,0,0,15,16,14,0,0,0,0,0,6,12,16,0,0,0,0,0,0,1,16,2,0,0,0,0,0,0,13,6,0,0,0,0,8,14,14,14,8,3,0,0,2,11,12,12,13,9,1 +0,1,12,10,2,0,0,0,0,5,14,13,12,0,0,0,0,4,8,3,16,0,0,0,0,0,1,0,15,0,0,0,0,0,0,5,12,0,0,0,0,0,4,15,6,0,0,0,0,4,16,16,15,10,3,0,0,2,11,7,8,11,9,0,2 +0,0,5,14,15,2,0,0,0,0,11,16,12,1,0,0,0,0,7,16,13,2,0,0,0,0,1,11,14,15,2,0,0,0,0,0,0,10,10,0,0,1,8,1,0,2,15,1,0,2,13,10,5,7,16,2,0,0,4,13,16,16,14,0,5 +0,0,10,15,8,2,0,0,0,0,5,13,16,15,1,0,0,0,0,0,4,16,4,0,0,0,1,4,7,16,3,0,0,0,9,16,16,16,1,0,0,0,1,12,15,12,4,0,0,0,3,16,5,0,0,0,0,0,14,14,0,0,0,0,7 +0,0,0,8,13,0,0,0,0,0,6,15,8,0,0,0,0,2,16,10,0,0,0,0,0,2,16,13,10,1,0,0,0,2,16,11,9,13,1,0,0,0,10,6,0,5,13,0,0,0,4,15,1,9,16,2,0,0,0,7,15,16,7,0,6 +0,0,11,8,0,0,0,0,0,0,12,15,1,0,0,0,0,0,13,16,5,0,0,0,0,0,12,16,6,0,0,0,0,0,2,15,7,0,0,0,0,0,0,10,14,1,0,0,0,0,16,16,16,16,11,3,0,0,11,16,16,16,16,11,1 +0,0,1,14,9,0,0,0,0,0,5,15,13,6,0,0,0,0,9,12,4,12,0,0,0,0,6,15,10,16,4,0,0,0,0,10,16,16,10,0,0,0,0,0,0,2,16,0,0,0,1,4,4,6,15,4,0,0,1,13,15,14,11,0,9 +0,0,6,15,16,7,0,0,0,10,16,9,14,11,0,0,0,5,3,1,14,10,0,0,0,0,0,9,16,5,0,0,0,0,0,8,16,16,4,0,0,0,1,0,1,12,8,0,0,3,13,5,4,13,11,0,0,0,8,16,16,12,2,0,3 +0,0,1,13,15,3,0,0,0,6,16,16,16,9,0,0,0,9,15,16,16,6,0,0,0,3,16,16,10,0,0,0,0,0,12,16,11,0,0,0,0,0,14,6,10,15,2,0,0,0,11,12,6,16,13,0,0,0,1,10,15,13,5,0,8 +0,0,3,9,8,0,0,0,0,2,15,14,14,4,0,0,0,9,16,5,13,16,1,0,0,5,16,16,16,16,7,0,0,0,6,8,8,16,6,0,0,0,0,0,0,15,9,0,0,0,7,4,9,16,8,0,0,0,9,16,13,9,1,0,9 +0,0,6,11,11,1,0,0,0,0,9,16,16,14,0,0,0,0,11,16,16,10,0,0,0,0,7,16,16,12,0,0,0,0,12,16,16,11,0,0,0,0,9,16,16,12,0,0,0,0,8,16,16,16,0,0,0,0,5,12,12,12,3,0,1 +0,0,0,1,12,2,0,0,0,0,1,16,16,2,0,0,0,0,10,15,3,0,0,0,0,0,14,11,0,0,0,0,0,1,16,16,15,7,0,0,0,0,14,16,11,15,5,0,0,0,6,16,6,16,9,0,0,0,0,5,12,12,3,0,6 +0,0,7,15,16,15,3,0,0,0,15,8,6,4,0,0,0,0,16,5,4,2,0,0,0,3,16,14,16,10,0,0,0,7,16,13,10,14,0,0,0,0,4,0,6,16,1,0,0,0,8,8,12,14,0,0,0,0,7,13,11,4,0,0,5 +0,0,0,11,13,0,0,0,0,2,12,16,16,7,0,0,0,12,16,8,3,14,2,0,0,6,16,10,0,14,6,0,0,0,15,9,0,11,5,0,0,0,11,13,0,9,9,0,0,0,8,16,13,16,5,0,0,0,0,13,16,10,1,0,0 +0,0,0,2,15,5,0,0,0,0,0,9,15,2,2,0,0,0,3,16,7,5,16,2,0,1,12,16,8,14,13,0,0,11,16,16,16,16,10,0,0,10,10,4,14,14,1,0,0,0,0,0,14,8,0,0,0,0,0,1,15,3,0,0,4 +0,0,0,4,13,1,0,0,0,0,3,16,13,0,0,0,0,0,8,15,4,0,0,0,0,0,12,14,0,0,0,0,0,0,15,11,6,6,0,0,0,0,13,16,16,16,10,0,0,0,8,16,5,11,16,2,0,0,0,6,12,15,10,0,6 +0,0,0,6,14,3,0,0,0,0,2,15,14,4,0,0,0,0,9,16,2,0,0,0,0,0,13,13,0,0,0,0,0,0,14,14,16,16,5,0,0,0,14,16,15,13,11,0,0,0,10,16,6,13,13,0,0,0,1,9,15,13,4,0,6 +0,0,3,14,10,0,0,0,0,2,14,15,13,3,0,0,0,2,16,8,4,10,0,0,0,0,15,14,13,16,4,0,0,0,2,10,14,15,10,0,0,0,3,0,0,11,9,0,0,0,10,8,4,15,8,0,0,0,2,12,13,14,4,0,9 +0,0,0,2,13,0,0,0,0,0,0,11,12,0,0,0,0,0,3,16,6,1,4,0,0,0,11,10,0,12,8,0,0,6,16,7,7,16,1,0,0,9,16,16,16,16,3,0,0,0,3,5,15,8,1,0,0,0,0,2,11,0,0,0,4 +0,0,8,12,13,16,8,0,0,0,9,9,9,16,5,0,0,0,0,0,7,15,0,0,0,0,6,11,15,11,1,0,0,0,9,14,15,15,6,0,0,0,0,16,8,0,0,0,0,0,3,16,3,0,0,0,0,0,8,14,0,0,0,0,7 +0,0,2,10,12,1,0,0,0,0,10,15,14,8,0,0,0,6,16,7,8,8,0,0,0,5,16,16,16,10,0,0,0,0,8,16,16,16,2,0,0,0,11,14,1,13,6,0,0,0,12,13,7,14,4,0,0,0,4,14,12,10,2,0,8 +0,0,0,10,15,2,0,0,0,0,0,13,16,11,0,0,0,0,0,15,16,9,0,0,0,0,6,16,16,7,0,0,0,1,15,16,16,3,0,0,0,1,14,16,16,2,0,0,0,0,0,14,16,8,0,0,0,0,0,6,14,9,0,0,1 +0,0,7,12,14,12,8,0,0,0,10,8,9,16,14,0,0,0,0,0,2,16,6,0,0,0,6,16,13,14,1,0,0,0,3,10,16,10,0,0,0,0,0,14,11,0,0,0,0,0,3,16,3,0,0,0,0,0,8,11,0,0,0,0,7 +0,1,11,14,14,4,0,0,0,3,14,8,10,16,0,0,0,0,0,0,5,16,1,0,0,0,0,3,16,14,0,0,0,0,0,4,12,16,7,0,0,0,0,0,0,9,12,0,0,0,9,8,8,14,13,0,0,0,7,12,12,10,3,0,3 +0,0,9,15,12,10,7,0,0,0,5,8,12,16,12,0,0,0,0,0,4,16,6,0,0,0,1,5,14,10,0,0,0,0,13,16,16,12,0,0,0,1,8,16,10,2,0,0,0,0,9,16,3,0,0,0,0,0,9,13,0,0,0,0,7 +0,0,1,10,9,0,0,0,0,1,10,13,8,7,0,0,0,5,16,2,0,9,0,0,0,0,16,12,14,16,1,0,0,0,11,16,13,13,3,0,0,0,2,15,5,6,3,0,0,0,4,15,4,13,2,0,0,0,1,11,12,4,0,0,8 +0,0,0,1,10,0,0,0,0,0,0,11,8,2,1,0,0,0,3,13,0,8,8,0,0,0,10,8,2,15,3,0,0,6,16,15,16,16,5,0,0,12,15,12,15,14,3,0,0,0,0,0,15,1,0,0,0,0,0,0,12,0,0,0,4 +0,0,0,4,14,4,0,0,0,0,3,16,14,4,0,0,0,0,8,16,5,0,0,0,0,0,15,12,11,8,1,0,0,0,16,16,16,16,8,0,0,2,16,16,4,3,14,0,0,0,10,16,5,10,16,1,0,0,0,7,13,14,10,0,6 +0,0,13,16,16,11,0,0,0,0,10,7,4,16,0,0,0,0,0,0,10,12,0,0,0,0,0,3,13,14,2,0,0,0,0,10,13,14,6,0,0,0,0,0,0,6,11,0,0,1,14,6,4,13,7,0,0,0,9,13,16,12,0,0,3 +0,0,10,15,16,15,1,0,0,7,16,10,4,3,1,0,0,8,13,8,0,0,0,0,0,7,16,16,7,0,0,0,0,1,8,10,8,0,0,0,0,0,0,5,15,0,0,0,0,0,1,8,14,0,0,0,0,0,6,16,10,0,0,0,5 +0,0,3,14,9,1,0,0,0,0,10,15,13,8,0,0,0,0,13,10,4,14,0,0,0,0,12,16,16,16,4,0,0,0,0,6,8,12,10,0,0,0,0,0,0,4,13,0,0,0,3,8,8,13,13,0,0,0,2,10,12,10,5,0,9 +0,0,4,10,16,4,0,0,0,4,16,13,10,10,0,0,0,8,16,8,7,12,0,0,0,3,15,16,16,9,0,0,0,5,16,16,16,16,2,0,0,1,13,12,1,9,9,0,0,0,14,10,2,14,5,0,0,0,2,12,15,11,1,0,8 +0,0,9,13,7,0,0,0,0,2,15,10,14,6,0,0,0,12,13,0,9,16,2,0,0,8,16,12,14,16,6,0,0,1,8,8,10,16,2,0,0,0,0,0,1,16,5,0,0,0,8,4,7,16,2,0,0,0,10,15,16,13,0,0,9 +0,0,0,5,11,0,0,0,0,0,7,16,14,1,0,0,0,3,16,3,9,11,0,0,0,4,16,13,16,14,3,0,0,0,7,16,13,4,4,0,0,0,1,16,6,3,5,0,0,0,5,14,5,10,3,0,0,0,0,9,9,5,0,0,8 +0,0,3,9,9,0,0,0,0,0,9,16,16,5,0,0,0,0,9,16,16,8,0,0,0,0,7,16,16,7,0,0,0,0,12,16,16,7,0,0,0,0,16,16,16,8,0,0,0,0,12,16,16,12,0,0,0,0,2,7,10,4,0,0,1 +0,0,9,15,16,16,14,0,0,1,15,10,8,14,13,0,0,0,0,0,2,15,9,0,0,0,0,10,14,16,4,0,0,0,1,16,16,16,11,0,0,0,0,13,15,4,1,0,0,0,5,16,7,0,0,0,0,0,9,15,0,0,0,0,7 +0,0,4,16,12,0,0,0,0,9,16,16,16,8,0,0,0,9,16,9,6,14,0,0,0,6,16,2,0,12,5,0,0,6,16,1,0,8,9,0,0,3,16,1,0,12,10,0,0,0,12,13,15,16,8,0,0,0,3,12,12,10,1,0,0 +0,0,0,9,7,0,0,0,0,0,3,16,8,0,0,0,0,0,8,14,2,0,0,0,0,0,13,10,0,0,0,0,0,0,15,16,16,15,5,0,0,0,15,16,11,11,12,0,0,0,8,15,6,9,15,0,0,0,0,8,14,16,8,0,6 +0,0,4,8,10,13,8,0,0,0,10,12,12,14,12,0,0,0,0,0,0,13,8,0,0,0,5,12,13,16,6,0,0,0,7,13,16,12,3,0,0,0,0,8,14,1,0,0,0,0,3,16,9,0,0,0,0,0,5,14,2,0,0,0,7 +0,0,6,16,12,12,14,6,0,0,5,8,8,11,15,2,0,0,0,0,1,14,5,0,0,0,0,1,12,12,0,0,0,0,0,11,16,15,1,0,0,0,0,10,11,1,0,0,0,0,1,14,4,0,0,0,0,0,5,12,2,0,0,0,7 +0,0,6,12,12,15,7,0,0,0,14,15,12,16,9,0,0,0,0,0,0,16,8,0,0,0,1,1,9,14,0,0,0,3,15,16,16,16,2,0,0,3,8,14,16,5,0,0,0,0,3,16,8,0,0,0,0,0,7,15,5,0,0,0,7 +0,0,0,9,13,0,0,0,0,0,5,16,16,4,0,0,0,0,15,15,7,0,0,0,0,1,16,13,0,0,0,0,0,3,16,15,16,16,6,0,0,1,14,16,16,16,15,1,0,0,8,16,13,14,16,1,0,0,0,7,12,12,6,0,6 +0,0,1,15,1,0,0,0,0,0,6,16,0,2,0,0,0,0,11,13,2,16,2,0,0,6,16,8,9,14,0,0,0,8,16,16,16,14,3,0,0,0,8,15,16,16,8,0,0,0,0,12,14,3,0,0,0,0,1,16,9,0,0,0,4 +0,0,1,12,11,2,0,0,0,0,2,16,16,3,0,0,0,0,5,16,16,3,0,0,0,1,15,16,14,2,0,0,0,5,16,16,13,0,0,0,0,1,11,16,14,0,0,0,0,0,5,16,16,7,0,0,0,0,1,15,16,9,0,0,1 +0,3,11,15,14,2,0,0,0,10,13,8,14,10,0,0,0,5,1,0,13,8,0,0,0,0,0,9,16,8,0,0,0,0,0,5,9,16,3,0,0,0,0,0,0,16,9,0,0,4,12,8,11,15,3,0,0,1,8,12,11,4,0,0,3 +0,0,7,13,12,12,5,0,0,3,15,8,7,8,3,0,0,4,15,5,6,0,0,0,0,5,16,16,16,2,0,0,0,2,8,1,12,4,0,0,0,0,0,0,12,4,0,0,0,0,3,5,16,2,0,0,0,0,9,15,8,0,0,0,5 +0,0,11,16,15,8,0,0,0,0,9,13,5,3,0,0,0,0,12,13,0,0,0,0,0,0,12,13,8,2,0,0,0,0,15,16,16,7,0,0,0,0,0,0,9,11,0,0,0,0,5,6,15,8,0,0,0,0,11,16,13,2,0,0,5 +0,0,3,11,14,11,8,4,0,0,3,8,8,10,16,6,0,0,0,0,0,13,9,0,0,0,0,3,11,16,9,0,0,0,0,11,16,8,3,0,0,0,0,10,13,0,0,0,0,0,1,16,3,0,0,0,0,0,5,11,0,0,0,0,7 +0,0,10,16,16,14,1,0,0,0,15,9,8,8,2,0,0,4,16,0,0,0,0,0,0,6,16,15,14,0,0,0,0,5,13,12,16,3,0,0,0,0,0,0,13,7,0,0,0,0,9,8,16,7,0,0,0,0,11,16,15,0,0,0,5 +0,0,0,0,13,9,0,0,0,0,0,10,16,16,1,0,0,1,12,16,16,13,0,0,0,7,16,16,16,15,0,0,0,0,7,12,16,13,0,0,0,0,0,4,16,16,0,0,0,0,0,3,16,16,3,0,0,0,0,0,13,16,6,0,1 +0,0,7,15,16,9,0,0,0,1,16,12,8,7,0,0,0,0,14,3,0,0,0,0,0,0,14,16,13,1,0,0,0,0,12,16,15,7,0,0,0,0,0,0,9,9,0,0,0,0,4,5,14,9,0,0,0,0,6,16,15,3,0,0,5 +0,0,8,12,12,14,8,0,0,2,11,12,12,16,12,0,0,0,0,0,3,16,5,0,0,0,3,7,15,13,0,0,0,0,15,16,16,16,4,0,0,0,1,15,9,0,0,0,0,0,10,14,0,0,0,0,0,0,10,11,0,0,0,0,7 +0,0,6,15,10,0,0,0,0,4,16,15,15,5,0,0,0,10,11,1,12,4,0,0,0,11,9,0,15,3,0,0,0,4,8,1,14,4,0,0,0,0,0,8,16,2,1,0,0,0,5,16,16,15,15,2,0,0,8,16,14,11,13,1,2 +0,0,9,15,16,8,0,0,0,0,7,7,9,15,1,0,0,0,0,0,9,12,0,0,0,0,5,13,16,13,3,0,0,0,1,11,14,4,2,0,0,0,0,15,4,0,0,0,0,0,6,16,3,0,0,0,0,0,10,12,0,0,0,0,7 +0,0,5,14,16,13,1,0,0,0,15,14,9,10,2,0,0,1,16,5,0,0,0,0,0,4,16,16,5,0,0,0,0,2,8,12,12,0,0,0,0,0,0,6,15,0,0,0,0,2,13,14,11,0,0,0,0,1,10,12,3,0,0,0,5 +0,0,1,8,12,3,0,0,0,0,9,16,16,12,0,0,0,0,11,16,7,11,0,0,0,0,14,11,0,7,2,0,0,0,14,8,0,9,4,0,0,0,16,7,2,13,1,0,0,0,11,15,11,15,0,0,0,0,1,7,9,1,0,0,0 +0,0,6,16,13,0,0,0,0,7,16,14,15,10,0,0,0,12,16,3,7,16,1,0,0,11,13,0,7,16,1,0,0,0,0,0,14,14,0,0,0,0,2,9,16,10,5,0,0,0,11,16,16,16,16,3,0,0,6,16,11,8,8,2,2 +0,0,0,8,14,4,0,0,0,0,5,16,10,0,0,0,0,0,12,13,1,0,0,0,0,1,16,7,1,0,0,0,0,4,16,16,16,12,1,0,0,2,16,14,9,14,5,0,0,0,12,14,6,16,5,0,0,0,1,8,13,9,2,0,6 +0,0,6,13,16,7,0,0,0,5,16,16,12,14,1,0,0,9,16,11,0,16,4,0,0,6,16,4,0,13,7,0,0,8,13,0,0,12,7,0,0,4,16,0,0,11,8,0,0,2,15,6,9,16,5,0,0,0,6,16,16,11,0,0,0 +0,0,1,9,10,1,0,0,0,0,5,16,16,8,0,0,0,0,6,16,16,10,0,0,0,0,7,16,16,10,0,0,0,0,2,16,16,7,0,0,0,0,1,16,16,10,0,0,0,0,8,16,16,15,1,0,0,0,1,6,9,9,2,0,1 +0,0,13,16,16,15,0,0,0,4,16,13,7,3,0,0,0,5,16,11,1,0,0,0,0,7,16,16,12,0,0,0,0,3,9,6,16,1,0,0,0,0,0,3,16,4,0,0,0,0,7,10,16,3,0,0,0,1,13,16,14,0,0,0,5 +0,0,3,14,10,0,0,0,0,4,15,12,15,2,0,0,0,11,15,1,11,5,0,0,0,9,12,0,10,7,0,0,0,1,1,0,15,7,0,0,0,0,0,7,16,6,2,0,0,0,1,16,16,16,16,3,0,0,2,12,11,8,8,3,2 +0,0,10,16,8,0,0,0,0,3,16,14,14,2,0,0,0,6,13,6,16,2,0,0,0,2,7,8,16,1,0,0,0,0,2,16,10,0,0,0,0,1,12,16,5,2,2,0,0,4,16,16,16,15,14,1,0,0,9,12,11,12,13,1,2 +0,1,13,16,16,10,0,0,0,2,16,13,9,11,2,0,0,5,16,4,0,0,0,0,0,7,16,16,12,0,0,0,0,1,7,7,16,0,0,0,0,0,0,3,16,3,0,0,0,0,14,14,15,3,0,0,0,0,9,12,7,0,0,0,5 +0,0,6,15,6,0,0,0,0,4,16,14,15,2,0,0,0,4,16,7,13,12,0,0,0,3,16,16,16,16,3,0,0,0,5,11,10,14,10,0,0,0,0,0,0,12,10,0,0,0,0,2,12,16,8,0,0,0,5,15,16,11,1,0,9 +0,0,11,16,14,5,0,0,0,0,15,10,8,8,0,0,0,0,12,7,0,0,0,0,0,0,16,16,9,0,0,0,0,0,15,11,15,1,0,0,0,0,0,0,8,5,0,0,0,0,2,3,11,7,0,0,0,0,12,16,15,3,0,0,5 +0,0,8,15,10,0,0,0,0,3,16,16,15,1,0,0,0,9,13,1,16,1,0,0,0,7,11,2,16,1,0,0,0,0,0,9,14,0,0,0,0,0,1,16,9,2,4,0,0,0,13,16,16,16,16,0,0,0,11,16,9,8,9,0,2 +0,0,0,7,14,2,0,0,0,0,5,16,16,4,0,0,0,0,13,16,7,0,0,0,0,0,14,13,0,0,0,0,0,0,16,16,16,5,2,0,0,0,14,16,15,13,15,1,0,0,8,16,11,14,16,1,0,0,0,4,12,14,7,0,6 +0,0,0,5,12,5,0,0,0,0,2,16,14,7,0,0,0,0,8,16,2,0,0,0,0,0,14,7,0,0,0,0,0,0,15,14,16,16,5,0,0,1,16,16,12,6,14,0,0,0,10,16,7,7,16,1,0,0,1,8,14,16,10,1,6 +0,0,6,15,16,13,1,0,0,3,16,16,16,16,6,0,0,7,16,8,12,14,1,0,0,0,12,16,14,1,0,0,0,0,11,14,16,6,0,0,0,0,14,0,2,15,3,0,0,0,16,2,3,13,8,0,0,0,6,14,16,14,2,0,8 +0,0,1,14,15,2,0,0,0,2,12,16,16,5,0,0,5,16,16,16,16,2,0,0,1,8,5,11,14,0,0,0,0,0,0,10,16,1,0,0,0,0,0,8,15,0,0,0,0,0,0,12,14,0,0,0,0,0,0,14,16,1,0,0,1 +0,0,7,16,12,1,0,0,0,0,9,12,9,11,0,0,0,0,5,13,10,16,3,0,0,0,0,9,16,16,9,0,0,0,0,0,0,6,13,0,0,0,0,0,0,3,15,0,0,0,0,0,0,5,16,0,0,0,6,16,16,16,12,0,9 +0,0,6,15,13,2,0,0,0,0,11,16,13,12,0,0,0,0,8,13,6,16,2,0,0,0,0,2,2,16,5,0,0,0,0,0,4,16,4,0,0,0,0,0,10,16,3,0,0,0,2,11,16,15,8,1,0,0,6,14,16,16,16,14,2 +0,1,10,14,15,6,0,0,0,6,13,1,4,9,0,0,0,3,14,2,1,9,3,0,0,0,7,15,16,16,3,0,0,0,0,0,6,10,0,0,0,0,0,0,4,11,0,0,0,0,2,0,4,9,0,0,0,0,8,14,15,9,0,0,9 +0,0,0,12,14,0,0,0,0,0,4,16,14,1,0,0,0,0,10,15,2,0,0,0,0,0,14,14,1,0,0,0,0,0,14,16,15,5,0,0,0,0,10,11,4,14,1,0,0,0,6,12,4,14,7,0,0,0,0,9,16,16,6,0,6 +0,0,0,9,10,2,0,0,0,0,5,16,16,2,0,0,0,4,14,16,16,4,0,0,0,8,11,10,16,6,0,0,0,0,0,7,16,3,0,0,0,0,0,10,16,2,0,0,0,0,0,12,11,0,0,0,0,0,0,9,13,0,0,0,1 +0,0,14,4,4,0,0,0,0,9,16,16,16,9,0,0,0,3,11,5,14,15,0,0,0,0,0,14,16,10,0,0,0,0,0,9,16,16,6,0,0,0,1,0,0,7,16,0,0,2,15,4,5,12,15,0,0,0,10,16,16,15,3,0,3 +0,0,15,16,16,15,2,0,0,0,5,8,11,16,3,0,0,0,0,0,15,8,0,0,0,3,8,8,16,7,2,0,0,6,15,16,16,16,8,0,0,0,2,15,7,3,0,0,0,0,11,13,0,0,0,0,0,0,16,7,0,0,0,0,7 +0,0,5,14,15,4,0,0,0,1,16,2,3,13,2,0,0,7,12,4,0,5,4,0,0,0,13,3,0,1,7,0,0,2,12,0,0,1,8,0,0,0,13,0,0,2,12,0,0,0,14,1,0,13,4,0,0,0,6,16,15,9,1,0,0 +0,0,8,12,11,1,0,0,0,0,13,12,6,12,0,0,0,1,14,4,0,12,0,0,0,2,16,1,0,5,4,0,0,5,11,0,0,8,4,0,0,4,12,0,0,9,5,0,0,2,15,6,8,14,0,0,0,0,5,15,12,3,0,0,0 +0,0,5,13,14,12,1,0,0,0,9,16,7,7,7,0,0,0,16,4,0,1,8,0,0,4,12,0,0,4,8,0,0,4,8,0,0,6,5,0,0,5,8,0,0,11,2,0,0,1,14,5,11,8,0,0,0,0,4,14,11,0,0,0,0 +0,0,9,15,16,4,0,0,0,0,8,7,6,16,0,0,0,0,0,0,10,12,0,0,0,0,3,15,15,1,0,0,0,0,1,9,12,11,2,0,0,0,0,0,0,5,10,0,0,0,12,4,4,6,15,0,0,0,5,11,12,12,6,0,3 +0,0,12,16,15,6,0,0,0,0,15,13,11,16,3,0,0,0,1,7,5,16,5,0,0,0,1,13,16,11,0,0,0,0,1,10,15,15,1,0,0,0,0,0,0,13,11,0,0,2,11,4,4,14,8,0,0,0,11,16,16,14,1,0,3 +0,3,16,16,16,16,8,0,0,0,5,8,12,16,6,0,0,0,0,0,13,12,0,0,0,8,12,12,16,10,1,0,0,8,16,16,16,15,1,0,0,0,8,15,3,0,0,0,0,2,15,8,0,0,0,0,0,2,15,2,0,0,0,0,7 +0,0,0,4,16,5,0,0,0,0,1,12,16,6,0,0,0,5,14,16,16,1,0,0,0,13,16,14,16,3,0,0,0,1,2,9,16,4,0,0,0,0,0,5,16,4,0,0,0,0,0,4,16,6,0,0,0,0,0,5,16,9,0,0,1 +0,0,0,0,15,8,0,0,0,0,0,2,16,8,0,0,0,0,0,11,16,3,0,0,0,0,5,16,16,3,0,0,0,9,16,11,16,1,0,0,0,1,7,1,16,2,0,0,0,0,0,1,16,5,0,0,0,0,0,0,13,8,0,0,1 +0,0,13,16,16,12,0,0,0,0,9,12,8,7,0,0,0,0,11,16,7,0,0,0,0,0,11,15,14,3,0,0,0,0,1,2,6,11,0,0,0,0,0,0,3,14,0,0,0,0,0,0,9,12,0,0,0,0,12,16,16,6,0,0,5 +0,0,8,16,16,7,0,0,0,2,16,14,7,16,0,0,0,3,15,1,5,16,1,0,0,1,14,13,13,16,3,0,0,0,1,8,8,14,6,0,0,0,0,0,0,10,10,0,0,0,0,1,2,15,9,0,0,0,10,16,16,16,4,0,9 +0,0,0,8,11,0,0,0,0,0,3,16,6,0,0,0,0,0,9,10,0,0,0,0,0,0,11,4,1,0,0,0,0,0,13,15,16,14,4,0,0,0,13,10,4,4,14,1,0,0,6,10,2,6,16,2,0,0,1,8,13,14,5,0,6 +0,0,0,9,14,1,0,0,0,0,2,16,11,0,0,0,0,0,7,16,5,0,0,0,0,0,11,14,0,0,0,0,0,0,14,10,4,0,0,0,0,0,16,16,16,13,2,0,0,0,9,15,6,12,10,0,0,0,0,8,16,16,12,0,6 +0,0,0,3,16,9,0,0,0,0,0,8,16,10,0,0,0,0,5,15,16,11,0,0,0,5,16,16,16,12,0,0,0,7,12,3,16,11,0,0,0,0,0,0,15,9,0,0,0,0,0,3,16,9,0,0,0,0,0,4,16,4,0,0,1 +0,0,4,10,14,3,0,0,0,0,16,11,10,12,0,0,0,4,12,0,0,10,3,0,0,5,11,0,0,7,4,0,0,6,8,0,0,10,4,0,0,3,9,0,0,10,3,0,0,0,13,9,11,13,0,0,0,0,2,14,12,3,0,0,0 +0,2,16,14,12,9,2,0,0,0,6,12,16,16,12,0,0,1,1,1,15,11,2,0,0,10,13,12,16,7,0,0,0,3,14,16,16,16,5,0,0,0,10,14,6,6,1,0,0,2,16,8,0,0,0,0,0,2,16,4,0,0,0,0,7 +0,0,3,12,13,3,0,0,0,0,12,14,7,12,0,0,0,1,16,1,0,14,0,0,0,0,14,0,0,15,0,0,0,0,1,0,4,12,0,0,0,0,0,0,14,8,0,0,0,0,2,14,16,12,14,2,0,0,7,16,12,12,12,3,2 +0,0,8,16,15,5,0,0,0,7,16,14,5,15,2,0,0,10,16,8,0,9,9,0,0,10,16,3,0,4,12,0,0,9,14,0,0,6,12,0,0,7,13,0,0,14,7,0,0,2,16,8,12,14,1,0,0,0,10,16,13,3,0,0,0 +0,0,0,11,13,0,0,0,0,0,3,15,9,0,0,0,0,0,10,13,1,5,11,0,0,5,16,4,3,14,11,0,0,10,16,9,15,15,3,0,0,9,16,13,16,9,0,0,0,0,0,6,16,2,0,0,0,0,0,14,9,0,0,0,4 +0,2,11,11,16,16,4,0,0,3,16,12,10,8,0,0,0,7,16,11,1,0,0,0,0,11,16,16,13,0,0,0,0,1,4,0,16,4,0,0,0,0,0,0,12,8,0,0,0,0,4,4,16,5,0,0,0,1,15,16,13,1,0,0,5 +0,2,6,16,11,0,0,0,0,9,16,8,10,10,0,0,0,7,11,0,0,11,1,0,0,8,13,0,0,7,5,0,0,6,13,0,0,8,7,0,0,2,14,0,0,14,4,0,0,0,12,7,12,12,0,0,0,0,2,13,14,4,0,0,0 +0,0,6,16,16,13,1,0,0,0,11,13,7,16,6,0,0,0,1,3,0,16,7,0,0,0,0,2,8,15,2,0,0,0,8,16,16,8,0,0,0,0,0,5,11,15,6,0,0,0,0,0,1,14,12,0,0,0,9,13,16,15,3,0,3 +0,0,0,12,15,2,0,0,0,0,6,16,11,1,0,0,0,0,14,14,0,0,0,0,0,0,16,11,3,0,0,0,0,2,16,16,16,7,0,0,0,0,16,13,4,14,5,0,0,0,10,14,6,15,10,0,0,0,0,9,15,16,5,0,6 +0,0,3,8,14,9,0,0,0,0,11,14,6,13,3,0,0,3,16,3,0,6,6,0,0,6,15,0,0,8,5,0,0,4,12,0,0,11,1,0,0,1,12,0,4,10,0,0,0,0,9,11,14,5,0,0,0,0,2,13,7,0,0,0,0 +0,0,8,16,16,10,0,0,0,1,15,15,9,15,1,0,0,0,14,3,0,15,5,0,0,0,0,0,0,15,6,0,0,0,0,0,5,16,4,0,0,0,0,0,12,14,0,0,0,0,8,15,16,6,4,1,0,0,10,16,16,16,16,9,2 +0,0,0,4,16,5,0,0,0,0,0,11,16,8,0,0,0,2,9,16,16,4,0,0,0,11,16,16,16,4,0,0,0,3,4,6,16,4,0,0,0,0,0,4,16,3,0,0,0,0,0,6,16,6,0,0,0,0,0,6,16,2,0,0,1 +0,0,0,4,12,1,0,0,0,0,2,16,11,3,0,0,0,0,7,15,1,0,0,0,0,0,11,11,4,2,0,0,0,0,13,16,13,15,4,0,0,0,13,7,0,0,15,1,0,0,5,13,2,3,15,1,0,0,0,6,14,15,5,0,6 +0,0,0,10,16,2,0,0,0,0,4,16,13,1,0,0,0,0,10,16,2,0,0,0,0,0,13,10,0,0,0,0,0,0,16,15,10,0,0,0,0,0,14,16,13,10,0,0,0,0,11,16,8,16,1,0,0,0,1,10,16,13,0,0,6 +0,0,0,10,13,0,0,0,0,0,3,16,10,0,0,0,0,0,6,15,1,0,0,0,0,0,10,13,0,0,0,0,0,0,8,16,15,7,0,0,0,0,11,15,12,13,7,0,0,0,5,15,5,7,16,1,0,0,0,6,15,16,15,1,6 +0,0,5,16,16,8,0,0,0,0,6,14,10,16,1,0,0,0,0,4,13,16,1,0,0,0,6,16,16,5,0,0,0,0,1,9,15,10,0,0,0,0,6,0,3,15,5,0,0,0,14,8,11,16,7,0,0,0,5,15,16,13,1,0,3 +0,0,0,8,16,5,0,0,0,0,0,13,16,7,0,0,0,0,5,16,8,0,0,0,0,0,7,16,4,0,0,0,0,0,9,15,8,8,5,0,0,0,15,16,16,16,16,4,0,0,5,15,10,8,16,5,0,0,0,6,15,16,14,0,6 +0,1,7,15,13,6,0,0,0,7,13,2,4,15,0,0,0,4,12,6,13,4,0,0,0,0,10,16,8,0,0,0,0,0,14,12,6,0,0,0,0,2,12,0,12,3,0,0,0,0,14,1,6,8,0,0,0,0,4,13,13,5,0,0,8 +0,0,5,12,14,3,0,0,0,0,9,10,7,12,0,0,0,0,0,0,6,15,0,0,0,0,1,5,15,7,0,0,0,0,2,14,16,1,0,0,0,0,0,0,8,13,1,0,0,0,4,1,1,10,8,0,0,0,7,13,16,13,7,0,3 +0,3,15,16,16,16,4,0,0,1,10,8,12,16,0,0,0,0,0,0,14,10,0,0,0,5,16,13,16,14,9,0,0,3,12,16,13,8,2,0,0,0,9,14,1,0,0,0,0,2,16,7,0,0,0,0,0,5,13,1,0,0,0,0,7 +0,0,0,13,8,0,0,0,0,0,11,13,3,0,0,0,0,2,16,5,0,4,2,0,0,7,14,1,5,16,5,0,0,6,15,12,16,11,0,0,0,0,6,11,16,1,0,0,0,0,0,8,13,0,0,0,0,0,0,15,7,0,0,0,4 +0,0,6,16,14,5,0,0,0,0,9,15,12,15,0,0,0,0,0,6,2,16,5,0,0,0,0,0,6,16,3,0,0,0,0,0,13,15,0,0,0,0,1,9,16,8,0,0,0,0,11,16,16,12,5,0,0,0,6,16,16,16,14,0,2 +0,7,16,16,16,16,3,0,0,3,12,12,13,15,1,0,0,0,0,2,14,6,0,0,0,3,8,11,15,8,5,0,0,5,16,16,14,12,6,0,0,0,7,13,0,0,0,0,0,2,16,3,0,0,0,0,0,7,11,0,0,0,0,0,7 +0,0,5,16,16,7,0,0,0,0,6,15,8,15,3,0,0,0,0,0,0,15,6,0,0,0,0,1,10,16,5,0,0,0,0,10,16,8,0,0,0,0,4,1,13,13,0,0,0,3,15,5,7,16,0,0,0,0,2,15,16,13,1,0,3 +0,0,1,15,10,0,0,0,0,0,11,15,3,0,0,0,0,5,16,7,0,9,5,0,0,11,13,0,8,16,11,0,0,11,15,12,16,12,0,0,0,3,11,16,16,1,0,0,0,0,0,13,12,0,0,0,0,0,0,15,8,0,0,0,4 +0,0,5,16,0,0,0,0,0,0,12,9,2,13,1,0,0,4,16,1,8,15,0,0,0,8,13,1,14,15,11,0,0,7,16,16,16,9,2,0,0,1,8,14,13,0,0,0,0,0,1,15,7,0,0,0,0,0,4,16,0,0,0,0,4 +0,0,0,4,15,12,0,0,0,0,0,10,16,14,0,0,0,2,9,16,16,13,0,0,0,14,16,13,14,14,0,0,0,5,12,0,11,16,0,0,0,0,0,0,13,14,0,0,0,0,0,1,16,11,0,0,0,0,0,4,16,7,0,0,1 +0,0,6,15,12,2,0,0,0,3,16,5,8,12,0,0,0,3,14,4,5,16,0,0,0,0,6,12,12,14,1,0,0,0,0,0,0,6,6,0,0,0,0,0,0,3,9,0,0,0,2,1,1,8,10,0,0,0,5,11,15,14,7,0,9 +0,0,0,12,13,1,0,0,0,0,1,14,16,3,0,0,0,2,13,16,16,2,0,0,0,15,16,16,16,3,0,0,0,5,3,10,16,2,0,0,0,0,0,6,16,5,0,0,0,0,0,9,16,5,0,0,0,0,0,10,16,3,0,0,1 +0,4,16,16,16,16,6,0,0,3,11,8,14,15,2,0,0,0,0,0,15,7,0,0,0,0,2,7,16,8,3,0,0,7,16,16,16,16,11,0,0,1,9,16,3,0,0,0,0,0,13,13,0,0,0,0,0,6,15,4,0,0,0,0,7 +0,2,16,16,16,16,5,0,0,0,4,8,10,16,6,0,0,0,0,0,10,9,0,0,0,3,4,4,15,5,3,0,0,9,16,16,16,16,8,0,0,1,12,14,5,2,0,0,0,1,14,6,0,0,0,0,0,4,14,0,0,0,0,0,7 +0,1,14,16,6,0,0,0,0,4,15,10,13,0,0,0,0,1,3,4,15,0,0,0,0,0,0,2,15,0,0,0,0,0,0,9,9,0,0,0,0,0,1,15,4,0,0,0,0,1,13,14,7,2,0,0,0,3,15,16,16,16,13,1,2 +0,0,0,1,13,2,0,0,0,0,0,8,16,4,0,0,0,0,0,16,8,0,0,0,0,0,3,16,2,0,0,0,0,0,13,16,11,5,0,0,0,1,15,12,6,12,4,0,0,0,1,13,8,12,10,0,0,0,0,2,12,13,8,0,6 +0,1,12,14,16,11,0,0,0,3,15,11,13,16,2,0,0,0,2,11,16,5,0,0,0,0,8,16,16,6,0,0,0,0,0,3,12,16,3,0,0,0,0,0,0,12,15,0,0,3,10,1,3,14,15,0,0,3,11,16,16,16,4,0,3 +0,0,11,12,16,15,2,0,0,0,14,13,4,4,0,0,0,0,14,9,0,0,0,0,0,1,16,16,11,1,0,0,0,0,3,4,14,7,0,0,0,0,0,0,6,12,0,0,0,2,9,2,14,11,0,0,0,0,15,16,15,3,0,0,5 +0,3,16,16,16,16,0,0,0,0,13,12,8,4,0,0,0,0,12,12,0,0,0,0,0,0,16,16,8,0,0,0,0,0,11,5,14,7,0,0,0,0,0,0,8,16,0,0,0,2,11,4,14,13,0,0,0,2,15,16,15,2,0,0,5 +0,0,9,15,16,6,0,0,0,7,16,12,11,14,1,0,0,7,16,4,9,16,5,0,0,0,11,16,16,16,3,0,0,0,0,3,8,16,2,0,0,0,0,0,3,16,4,0,0,0,9,2,5,16,3,0,0,0,10,16,16,13,1,0,9 +0,2,13,16,14,3,0,0,0,11,15,8,14,14,0,0,0,9,6,0,6,15,2,0,0,0,0,0,11,13,0,0,0,0,0,2,16,6,0,0,0,0,1,12,13,0,0,0,0,1,12,16,8,4,4,0,0,2,15,16,16,16,16,5,2 +0,0,9,16,16,8,0,0,0,0,15,13,12,12,0,0,0,0,11,2,13,11,0,0,0,0,0,4,16,8,0,0,0,0,5,15,10,0,0,0,0,3,16,13,0,0,0,0,0,7,16,10,4,4,7,0,0,0,8,16,16,16,14,0,2 +0,3,16,14,12,13,7,0,0,0,7,8,13,15,5,0,0,0,0,3,15,4,0,0,0,0,0,9,10,0,0,0,0,9,14,16,13,12,9,0,0,3,13,13,9,8,5,0,0,2,16,2,0,0,0,0,0,5,14,0,0,0,0,0,7 +0,0,0,14,6,0,0,0,0,0,4,16,6,0,0,0,0,0,8,15,0,0,0,0,0,0,12,11,4,3,0,0,0,0,15,16,16,16,10,0,0,0,16,12,4,3,12,6,0,0,9,13,1,0,11,12,0,0,0,9,15,16,16,7,6 +0,0,0,15,2,0,0,0,0,0,10,12,0,0,0,0,0,5,15,1,0,6,6,0,0,12,9,0,3,16,11,0,0,6,16,12,14,13,1,0,0,0,3,11,16,2,0,0,0,0,0,10,12,0,0,0,0,0,0,15,7,0,0,0,4 +0,0,5,16,16,10,0,0,0,0,14,12,10,13,0,0,0,0,11,12,15,10,0,0,0,0,3,16,14,1,0,0,0,0,7,16,15,0,0,0,0,0,16,9,14,6,0,0,0,1,16,4,10,12,0,0,0,0,5,16,16,8,0,0,8 +0,5,15,15,3,0,0,0,0,12,14,13,12,0,0,0,0,7,9,3,16,2,0,0,0,0,0,0,16,4,0,0,0,0,0,5,16,0,0,0,0,0,0,9,12,0,0,0,0,1,8,16,12,4,1,0,0,8,16,16,16,16,13,0,2 +0,0,7,10,10,2,0,0,0,0,12,16,16,4,0,0,0,0,5,16,16,4,0,0,0,0,4,16,16,5,0,0,0,0,1,15,16,7,0,0,0,0,4,16,16,12,0,0,0,0,9,16,16,12,0,0,0,0,6,12,12,11,0,0,1 +0,7,16,13,2,0,0,0,0,8,14,12,14,0,0,0,0,1,2,4,16,0,0,0,0,0,0,0,16,4,0,0,0,0,0,5,16,2,0,0,0,0,0,11,15,0,0,0,0,1,11,16,16,13,11,0,0,4,16,16,12,12,9,0,2 +0,0,8,15,16,8,0,0,0,5,16,12,11,16,0,0,0,3,8,1,12,13,0,0,0,0,0,11,16,13,1,0,0,0,0,5,8,15,9,0,0,0,2,1,0,9,11,0,0,0,14,12,5,15,7,0,0,0,9,16,16,13,1,0,3 +0,0,8,16,12,2,0,0,0,0,13,4,6,12,0,0,0,3,15,1,3,13,0,0,0,6,14,10,4,16,1,0,0,0,8,12,11,13,4,0,0,0,0,0,0,6,8,0,0,0,6,4,0,6,7,0,0,0,6,14,14,15,1,0,9 +0,0,5,16,16,6,0,0,0,1,15,13,10,15,1,0,0,5,16,9,0,12,7,0,0,8,11,0,0,7,12,0,0,12,9,0,0,4,12,0,0,9,13,0,0,6,13,0,0,1,15,10,4,11,11,0,0,0,6,16,16,13,1,0,0 +0,0,3,11,9,3,0,0,0,0,8,16,16,15,1,0,0,0,1,16,16,16,0,0,0,0,2,16,16,15,2,0,0,0,4,16,16,16,3,0,0,0,6,16,16,16,0,0,0,0,9,16,16,14,0,0,0,0,7,12,11,5,0,0,1 +0,3,16,16,7,0,0,0,0,4,16,11,16,5,0,0,0,0,3,1,12,12,0,0,0,0,0,0,10,12,0,0,0,0,0,0,10,11,0,0,0,0,0,3,15,8,0,0,0,0,4,14,16,11,6,0,0,1,16,16,16,14,16,5,2 +0,1,12,12,13,13,4,0,0,3,14,6,4,2,1,0,0,5,11,6,2,0,0,0,0,6,16,12,15,4,0,0,0,1,1,0,5,14,0,0,0,0,0,0,2,16,0,0,0,0,4,3,7,14,0,0,0,2,14,16,14,3,0,0,5 +0,2,11,16,15,4,0,0,0,11,15,8,15,10,0,0,0,3,3,8,16,7,0,0,0,0,4,16,16,9,0,0,0,0,0,4,7,16,4,0,0,0,0,0,0,13,11,0,0,2,15,6,1,14,10,0,0,2,16,16,16,13,3,0,3 +0,0,9,16,10,1,0,0,0,9,16,9,16,6,0,0,0,16,9,3,16,5,0,0,0,3,6,12,16,10,0,0,0,0,1,10,9,15,8,0,0,0,0,0,0,9,15,0,0,0,3,9,2,13,12,0,0,0,7,16,16,15,4,0,3 +0,0,2,10,13,3,0,0,0,1,13,7,7,13,0,0,0,7,7,0,2,16,1,0,0,2,11,2,0,13,4,0,0,0,6,15,13,16,8,0,0,0,0,0,0,8,8,0,0,0,0,10,4,9,7,0,0,0,0,11,16,14,2,0,9 +0,0,0,1,14,7,0,0,0,0,0,5,16,1,0,0,0,0,2,14,7,0,0,0,0,0,10,15,1,6,12,0,0,3,15,3,1,14,6,0,1,15,10,4,10,15,0,0,10,16,16,16,16,12,0,0,0,2,2,0,16,2,0,0,4 +0,0,1,12,7,0,0,0,0,0,10,13,0,0,0,0,0,2,15,3,0,0,0,0,0,4,15,0,3,0,0,0,0,4,14,15,16,15,2,0,0,0,16,11,2,6,12,0,0,0,13,10,1,5,13,0,0,0,2,10,16,16,6,0,6 +0,0,0,9,15,10,0,0,0,0,6,10,4,14,4,0,0,0,12,2,0,12,5,0,0,5,15,3,6,13,2,0,0,0,5,16,14,1,0,0,0,0,2,14,12,5,0,0,0,0,0,13,1,13,3,0,0,0,0,9,13,13,2,0,8 +0,1,12,16,15,3,0,0,0,11,13,5,13,8,0,0,0,3,1,8,16,5,0,0,0,0,3,16,16,13,1,0,0,0,0,0,1,14,6,0,0,0,1,0,0,9,11,0,0,2,15,1,0,13,9,0,0,0,13,16,15,15,2,0,3 +0,0,8,12,13,14,4,0,0,0,15,8,2,4,1,0,0,1,16,5,2,0,0,0,0,4,12,13,14,8,0,0,0,0,0,0,1,12,4,0,0,0,0,0,0,8,5,0,0,0,6,3,2,14,2,0,0,0,9,16,14,5,0,0,5 +0,0,6,12,0,0,0,0,0,0,12,9,0,0,0,0,0,0,15,3,0,0,0,0,0,1,16,1,0,0,0,0,0,3,16,15,10,4,2,0,0,2,16,9,0,6,12,0,0,0,13,10,0,6,15,0,0,0,6,16,16,15,7,0,6 +0,0,0,1,12,10,0,0,0,0,0,1,16,7,0,0,0,0,0,12,12,0,4,0,0,0,5,15,3,7,13,0,0,2,15,5,1,15,6,0,0,12,14,8,11,16,5,0,1,15,16,15,15,14,2,0,0,0,0,0,12,10,0,0,4 +0,0,0,11,6,0,0,0,0,0,5,16,3,0,0,0,0,0,10,14,0,0,0,0,0,0,14,8,0,0,0,0,0,0,16,15,16,14,5,0,0,0,15,9,0,4,14,0,0,0,9,15,0,3,16,2,0,0,0,9,14,12,7,0,6 +0,0,12,15,16,8,0,0,0,0,11,13,0,2,0,0,0,0,11,6,1,0,0,0,0,2,16,16,15,8,0,0,0,0,3,0,0,11,2,0,0,0,0,0,0,8,8,0,0,4,5,0,1,12,2,0,0,1,11,15,16,8,0,0,5 +0,0,0,10,2,0,0,0,0,0,2,16,5,0,0,0,0,0,6,11,0,0,0,0,0,0,12,13,7,1,0,0,0,1,16,14,12,14,2,0,0,2,16,10,0,6,13,0,0,0,10,14,2,1,16,2,0,0,1,10,16,15,14,2,6 +0,0,6,12,9,4,0,0,0,0,2,14,16,14,0,0,0,0,0,12,16,16,0,0,0,0,0,15,16,13,0,0,0,0,0,16,16,12,0,0,0,0,5,16,16,5,0,0,0,0,7,16,16,5,0,0,0,0,9,12,11,2,0,0,1 +0,0,3,13,0,0,0,0,0,0,11,9,0,0,0,0,0,0,14,6,0,0,0,0,0,0,16,1,0,0,0,0,0,2,16,15,16,8,0,0,0,1,16,13,5,11,10,0,0,0,13,11,0,2,16,3,0,0,2,15,16,16,13,1,6 +0,0,13,16,15,13,4,0,0,0,11,14,4,2,1,0,0,0,6,12,15,7,0,0,0,0,0,0,1,11,2,0,0,0,0,0,0,5,7,0,0,1,3,0,0,5,8,0,0,7,9,2,4,13,4,0,0,1,10,14,14,7,0,0,5 +0,0,9,12,12,12,7,0,0,0,10,12,4,4,2,0,0,0,14,9,5,0,0,0,0,3,16,15,14,12,0,0,0,3,9,0,0,15,5,0,0,0,0,0,0,10,7,0,0,0,7,6,1,15,2,0,0,0,8,16,16,10,0,0,5 +0,0,3,10,15,13,2,0,0,3,14,12,9,12,7,0,0,4,14,4,2,14,2,0,0,0,5,15,14,8,0,0,0,0,3,15,12,13,1,0,0,0,10,8,0,12,7,0,0,0,11,7,0,9,8,0,0,0,3,11,15,14,2,0,8 +0,0,9,16,16,12,14,0,0,0,4,8,11,14,13,0,0,0,0,0,3,15,2,0,0,0,0,6,13,15,8,0,0,0,6,16,15,8,2,0,0,0,1,14,9,0,0,0,0,0,4,16,1,0,0,0,0,0,12,9,0,0,0,0,7 +0,0,13,15,16,16,3,0,0,1,14,4,4,0,0,0,0,2,14,11,7,1,0,0,0,2,15,9,13,11,0,0,0,0,0,0,0,15,2,0,0,0,0,0,0,14,3,0,0,3,13,4,6,14,0,0,0,0,13,16,12,3,0,0,5 +0,0,12,16,12,1,0,0,0,8,15,10,15,6,0,0,0,3,6,1,15,3,0,0,0,0,0,10,16,9,1,0,0,0,2,14,12,16,6,0,0,0,4,0,0,10,9,0,0,0,16,8,6,15,7,0,0,0,10,16,16,13,1,0,3 +0,0,0,0,12,7,0,0,0,0,0,2,16,4,0,0,0,0,0,12,13,9,3,0,0,0,8,15,4,16,1,0,0,1,15,8,4,16,0,0,0,14,14,8,11,15,1,0,0,9,16,16,16,16,4,0,0,0,0,0,13,6,0,0,4 +0,0,10,16,15,14,8,0,0,0,7,9,8,14,13,0,0,0,0,0,3,15,2,0,0,0,0,6,13,13,6,0,0,0,1,16,15,12,4,0,0,0,0,12,6,0,0,0,0,0,6,13,1,0,0,0,0,0,12,6,0,0,0,0,7 +0,0,6,13,15,6,0,0,0,1,16,5,3,13,0,0,0,8,13,0,4,12,0,0,0,3,12,5,15,4,0,0,0,0,7,16,11,0,0,0,0,0,14,7,12,7,0,0,0,0,13,1,1,16,2,0,0,0,4,13,13,10,1,0,8 +0,3,15,15,3,0,0,0,0,6,16,10,14,2,0,0,0,0,4,0,16,5,0,0,0,0,0,0,15,7,0,0,0,0,0,3,16,2,0,0,0,0,0,8,14,0,0,0,0,0,10,16,7,6,7,1,0,2,16,16,16,16,13,2,2 +0,0,0,13,6,0,0,0,0,0,4,15,1,0,0,0,0,0,11,9,0,0,0,0,0,1,15,6,0,0,0,0,0,1,16,12,16,12,3,0,0,0,15,16,5,6,14,0,0,0,12,15,1,1,15,2,0,0,1,12,15,15,10,0,6 +0,0,4,12,9,0,0,0,0,0,8,15,9,9,0,0,0,0,15,6,0,12,2,0,0,3,12,0,0,8,7,0,0,7,11,0,0,5,8,0,0,6,12,0,0,7,8,0,0,3,16,7,5,15,4,0,0,0,5,13,13,8,0,0,0 +0,0,5,12,15,11,2,0,0,7,11,5,5,16,4,0,0,7,6,0,11,13,0,0,0,0,13,14,11,1,0,0,0,0,14,14,13,2,0,0,0,3,13,0,8,12,2,0,0,0,14,3,0,11,12,0,0,0,8,16,16,14,3,0,8 +0,0,6,12,8,4,0,0,0,0,6,16,16,10,0,0,0,0,2,16,16,14,0,0,0,0,0,14,16,16,1,0,0,0,1,14,16,13,0,0,0,0,7,16,16,10,0,0,0,0,8,16,16,7,0,0,0,0,6,12,12,11,0,0,1 +0,2,14,16,10,1,0,0,0,11,15,8,15,9,0,0,0,8,5,0,11,12,0,0,0,0,0,0,11,15,0,0,0,0,0,1,16,7,0,0,0,0,0,8,16,4,0,0,0,0,7,16,14,4,1,0,0,2,16,16,16,16,15,1,2 +0,0,7,16,16,10,0,0,0,0,13,16,16,7,0,0,0,0,9,16,16,14,1,0,0,0,7,16,16,13,0,0,0,0,2,16,16,10,0,0,0,0,6,16,16,10,0,0,0,0,12,16,16,6,0,0,0,0,8,16,16,10,0,0,1 +0,0,3,14,3,0,0,0,0,0,12,11,0,0,0,0,0,0,15,4,0,0,0,0,0,4,16,10,6,0,0,0,0,3,16,15,13,11,1,0,0,2,16,4,0,9,9,0,0,0,13,11,0,6,15,0,0,0,4,14,13,16,7,0,6 +0,0,5,12,14,5,0,0,0,0,6,16,16,13,1,0,0,0,0,16,16,16,0,0,0,0,0,13,16,16,1,0,0,0,0,14,16,10,0,0,0,0,2,16,16,8,0,0,0,0,7,16,16,5,0,0,0,0,9,16,16,7,0,0,1 +0,0,0,6,14,1,0,0,0,0,0,13,7,0,0,0,0,0,6,14,2,0,6,0,0,1,14,5,0,9,15,0,0,9,14,0,2,16,6,0,1,15,15,12,14,15,3,0,1,12,12,10,16,4,0,0,0,0,0,7,13,0,0,0,4 +0,0,2,10,16,13,0,0,0,2,15,9,1,15,3,0,0,8,10,0,6,14,0,0,0,6,14,12,14,3,0,0,0,2,16,13,11,0,0,0,0,7,10,0,11,8,0,0,0,0,14,3,0,13,1,0,0,0,2,14,16,14,0,0,8 +0,0,9,15,12,2,0,0,0,4,15,5,8,15,0,0,0,7,13,0,4,16,1,0,0,2,16,7,8,16,4,0,0,0,5,12,11,14,6,0,0,0,0,0,0,12,8,0,0,0,3,2,1,13,4,0,0,0,11,16,16,10,0,0,9 +0,1,14,15,2,0,0,0,0,2,13,12,12,0,0,0,0,0,0,4,16,3,0,0,0,0,0,1,16,6,0,0,0,0,0,5,16,0,0,0,0,0,0,8,12,0,0,0,0,0,5,16,10,8,7,1,0,1,15,16,16,14,12,2,2 +0,0,4,15,0,0,0,0,0,0,11,9,0,0,0,0,0,0,16,3,0,0,0,0,0,3,16,4,7,0,0,0,0,3,16,16,14,11,1,0,0,3,16,9,0,8,10,0,0,0,14,9,0,2,16,0,0,0,3,14,16,16,10,1,6 +0,2,12,13,15,16,7,0,0,2,16,6,0,0,1,0,0,1,16,12,8,0,0,0,0,2,12,6,11,13,0,0,0,0,0,0,0,11,6,0,0,1,1,0,0,8,8,0,0,6,10,0,3,15,2,0,0,2,15,16,12,5,0,0,5 +0,0,3,12,13,1,0,0,0,1,14,11,10,8,0,0,0,4,16,3,0,12,2,0,0,4,13,1,0,9,6,0,0,6,12,0,0,5,8,0,0,3,13,0,0,4,9,0,0,0,15,8,4,13,8,0,0,0,5,15,15,9,0,0,0 +0,0,7,16,16,4,0,0,0,5,15,14,8,13,0,0,0,8,12,0,0,12,6,0,0,8,8,0,0,6,12,0,0,12,8,0,0,4,12,0,0,8,10,0,0,7,11,0,0,2,16,7,6,14,7,0,0,0,6,15,15,8,0,0,0 +0,0,3,11,13,10,0,0,0,3,15,7,4,14,2,0,0,8,13,1,1,14,2,0,0,7,13,14,10,13,0,0,0,0,1,14,16,1,0,0,0,0,10,11,10,10,0,0,0,0,11,6,3,13,0,0,0,0,3,13,16,9,0,0,8 +0,0,0,4,15,2,0,0,0,0,0,9,12,0,1,2,0,0,1,15,4,0,12,8,0,1,12,9,0,5,15,1,0,8,13,0,0,12,10,0,4,16,13,12,14,15,1,0,7,14,12,12,16,6,0,0,0,0,0,6,14,0,0,0,4 +0,0,0,0,15,11,0,0,0,0,0,1,16,8,0,0,0,0,0,12,13,1,0,0,0,0,7,15,2,12,3,0,0,1,15,7,2,16,4,0,0,10,16,11,11,16,3,0,0,15,12,12,15,16,5,0,0,0,0,0,13,7,0,0,4 +0,0,14,16,16,15,5,0,0,0,13,8,4,5,2,0,0,0,13,6,2,0,0,0,0,0,16,13,14,2,0,0,0,0,0,0,5,11,0,0,0,0,0,0,0,16,0,0,0,4,11,1,5,12,0,0,0,1,11,16,15,3,0,0,5 +0,1,6,14,11,0,0,0,0,7,10,2,15,4,0,0,0,5,10,1,13,9,0,0,0,2,14,4,12,16,0,0,0,0,5,12,5,11,7,0,0,0,0,0,0,8,8,0,0,0,3,1,0,11,6,0,0,0,10,13,12,8,0,0,9 +0,0,6,14,16,9,0,0,0,5,16,10,12,12,0,0,0,1,7,0,9,12,0,0,0,0,2,13,16,8,0,0,0,0,2,13,14,16,7,0,0,0,1,0,0,10,9,0,0,0,11,9,5,15,7,0,0,0,7,16,16,10,0,0,3 +0,0,11,15,0,0,0,0,0,0,9,16,1,0,0,0,0,0,9,16,0,0,0,0,0,0,11,13,0,9,5,0,0,0,13,11,0,11,13,0,0,1,15,10,6,12,16,1,0,4,16,16,16,15,16,4,0,0,7,7,4,0,15,7,4 +0,0,0,10,15,6,0,0,0,0,7,16,10,12,2,0,0,1,15,5,1,11,4,0,0,3,14,0,0,5,6,0,0,7,10,0,0,3,8,0,0,4,12,0,0,4,11,0,0,0,15,4,0,10,8,0,0,0,2,12,13,13,1,0,0 +0,0,0,1,16,6,0,0,0,0,0,9,15,2,0,0,0,0,3,16,4,1,4,0,0,1,13,11,0,11,13,0,0,6,15,2,2,16,7,0,0,13,15,12,15,16,6,0,0,4,8,8,14,11,1,0,0,0,0,0,15,6,0,0,4 +0,0,4,13,14,3,0,0,0,6,15,6,5,11,0,0,0,11,8,0,4,12,0,0,0,8,11,4,16,5,0,0,0,1,10,16,15,2,0,0,0,1,14,7,6,15,4,0,0,1,15,2,0,6,15,0,0,0,5,12,13,15,6,0,8 +0,0,3,16,13,6,0,0,0,1,9,13,12,16,2,0,0,10,16,10,6,16,4,0,0,7,16,16,16,16,8,0,0,0,4,8,5,10,10,0,0,0,0,0,0,8,12,0,0,0,1,10,5,15,8,0,0,0,2,14,16,12,0,0,9 +0,2,12,16,16,13,1,0,0,11,15,4,3,16,7,0,0,7,11,1,13,13,0,0,0,0,0,10,16,15,3,0,0,0,0,1,4,14,11,0,0,1,4,0,0,12,12,0,0,8,14,0,3,16,5,0,0,3,13,16,16,10,0,0,3 +0,0,7,15,16,11,1,0,0,0,0,15,16,16,1,0,0,0,0,13,16,16,6,0,0,0,0,13,16,16,3,0,0,0,0,14,16,14,2,0,0,0,2,16,16,12,0,0,0,0,5,16,16,14,0,0,0,0,8,16,16,8,0,0,1 +0,0,12,16,14,6,0,0,0,2,16,16,16,6,0,0,0,4,16,16,12,2,0,0,0,3,16,16,16,1,0,0,0,2,16,16,16,0,0,0,0,4,16,16,12,0,0,0,0,3,16,16,15,4,0,0,0,2,14,16,12,8,0,0,1 +0,0,0,8,16,5,0,0,0,0,9,13,7,14,0,0,0,0,15,2,0,9,4,0,0,3,13,0,0,4,8,0,0,4,7,0,0,4,8,0,0,3,12,0,0,4,9,0,0,0,11,10,1,7,9,0,0,0,1,9,15,14,3,0,0 +0,0,14,16,12,16,16,0,0,0,8,12,8,16,11,0,0,0,0,0,7,15,3,0,0,0,2,7,14,14,3,0,0,0,12,16,14,12,6,0,0,0,1,15,5,0,0,0,0,0,9,16,0,0,0,0,0,0,16,9,0,0,0,0,7 +0,6,16,12,2,0,0,0,0,1,13,16,12,0,0,0,0,0,0,12,12,0,0,0,0,0,1,15,9,0,0,0,0,0,4,16,1,0,0,0,0,0,14,11,0,0,0,0,0,5,16,15,16,15,4,0,0,4,15,13,12,11,8,0,2 +0,0,0,5,16,4,0,0,0,0,1,13,13,1,0,0,0,0,7,16,5,2,3,0,0,3,16,15,10,15,13,0,0,10,16,16,16,16,8,0,0,1,4,5,16,15,3,0,0,0,0,4,16,11,0,0,0,0,0,7,16,3,0,0,4 +0,0,6,9,13,7,0,0,0,7,16,12,13,14,0,0,0,5,3,1,15,7,0,0,0,0,0,5,16,1,0,0,0,0,0,4,16,6,0,0,0,0,0,0,10,16,10,0,0,0,0,1,7,16,10,0,0,0,4,15,13,8,0,0,3 +0,5,14,15,9,1,0,0,0,7,14,12,16,15,1,0,0,0,1,0,9,16,4,0,0,0,0,0,7,16,4,0,0,0,0,0,12,14,0,0,0,0,1,11,16,4,0,0,0,3,14,16,13,8,2,0,0,4,12,16,12,10,4,0,2 +0,0,3,10,12,0,0,0,0,0,9,16,16,11,0,0,0,0,14,12,2,15,3,0,0,1,16,9,0,9,7,0,0,4,15,1,0,8,8,0,0,3,16,0,0,7,11,0,0,2,15,10,9,15,6,0,0,0,3,15,15,7,0,0,0 +0,0,1,14,10,0,0,0,0,0,10,13,1,0,0,0,0,2,15,4,0,0,0,0,0,5,16,2,0,0,0,0,0,8,15,2,0,0,0,0,0,4,16,16,16,14,2,0,0,0,12,16,14,16,10,0,0,0,2,11,14,13,7,0,6 +0,0,1,10,2,3,0,0,0,0,8,16,15,16,0,0,0,0,12,16,16,4,0,0,0,0,12,16,7,0,0,0,0,3,16,13,13,1,0,0,0,1,16,4,12,10,0,0,0,0,11,12,11,16,2,0,0,0,2,12,14,9,1,0,8 +0,0,9,15,4,0,0,0,0,8,16,11,7,0,0,0,0,11,11,0,2,10,0,0,0,6,16,6,13,14,1,0,0,0,7,16,16,4,0,0,0,0,5,16,16,9,0,0,0,0,10,16,16,14,0,0,0,0,6,16,9,4,0,0,8 +0,0,7,15,10,8,1,0,0,4,16,7,11,16,8,0,0,8,16,2,7,16,5,0,0,0,14,16,16,9,0,0,0,0,6,16,16,3,0,0,0,0,9,14,15,14,2,0,0,0,15,12,11,16,4,0,0,0,7,15,14,7,0,0,8 +0,0,1,11,10,0,0,0,0,0,11,16,13,3,0,0,0,2,16,14,14,16,10,0,0,2,13,16,16,16,8,0,0,0,0,2,0,16,7,0,0,0,0,0,2,16,3,0,0,0,0,4,15,12,0,0,0,0,0,13,8,0,0,0,9 +0,0,11,8,0,0,0,0,0,0,11,12,0,0,0,0,0,0,14,8,0,0,0,0,0,4,16,10,5,1,0,0,0,3,16,16,16,13,2,0,0,0,16,10,10,16,8,0,0,0,16,11,12,16,6,0,0,0,8,16,16,9,0,0,6 +0,1,8,14,13,2,0,0,0,8,15,12,16,10,0,0,0,3,3,3,16,4,0,0,0,0,0,4,14,0,0,0,0,0,0,3,16,5,0,0,0,0,0,0,8,16,5,0,0,0,2,7,9,16,6,0,0,0,7,15,11,5,0,0,3 +0,1,3,15,14,4,0,0,0,6,13,16,14,14,0,0,0,10,6,9,2,14,3,0,0,8,4,0,0,7,8,0,0,7,6,0,0,8,8,0,0,1,13,1,0,13,5,0,0,0,10,11,9,15,1,0,0,0,2,12,16,6,0,0,0 +0,0,0,7,15,0,0,0,0,0,0,14,13,0,0,0,0,0,5,16,3,0,0,0,0,1,13,13,0,6,11,0,0,8,16,9,2,15,10,0,1,14,16,16,16,16,6,0,0,4,8,9,16,14,0,0,0,0,0,7,16,10,0,0,4 +0,0,4,15,10,7,0,0,0,0,8,16,16,16,1,0,0,0,8,16,16,16,0,0,0,0,3,16,16,10,1,0,0,0,10,16,16,4,0,0,0,3,16,14,15,11,0,0,0,3,15,11,13,15,0,0,0,0,5,13,16,8,0,0,8 +0,0,4,12,12,1,0,0,0,1,15,16,16,8,0,0,0,6,16,16,16,16,1,0,0,1,4,5,7,16,4,0,0,0,0,0,1,16,8,0,0,0,0,0,2,16,7,0,0,0,10,8,13,14,1,0,0,0,7,16,9,1,0,0,9 +0,0,5,12,15,10,1,0,0,6,16,16,16,15,2,0,0,4,14,16,16,5,0,0,0,0,5,16,8,0,0,0,0,0,3,16,16,10,1,0,0,0,0,3,16,16,7,0,0,0,4,14,16,12,0,0,0,0,4,14,8,0,0,0,3 +0,0,10,10,7,0,0,0,0,0,13,16,16,12,1,0,0,3,16,14,6,15,7,0,0,6,12,2,0,8,8,0,0,4,8,0,1,14,7,0,0,4,10,0,8,15,1,0,0,2,16,14,15,4,0,0,0,0,10,16,9,0,0,0,0 +0,0,6,9,0,0,0,0,0,0,5,16,12,0,0,0,0,4,5,3,13,11,0,0,0,6,11,0,1,14,5,0,0,3,13,0,0,8,9,0,0,0,15,1,5,12,12,0,0,0,15,14,16,14,4,0,0,0,5,16,10,2,0,0,0 +0,0,3,14,12,0,0,0,0,1,16,16,16,7,0,0,0,6,16,12,16,16,2,0,0,3,16,16,12,16,4,0,0,0,0,0,0,16,6,0,0,0,0,0,2,16,7,0,0,0,1,11,15,15,2,0,0,0,4,13,9,3,0,0,9 +0,0,0,3,13,8,0,0,0,0,0,12,16,10,0,0,0,0,0,15,16,11,0,0,0,1,15,16,16,12,0,0,0,7,16,16,16,12,0,0,0,4,13,14,16,13,0,0,0,0,0,8,16,16,2,0,0,0,0,6,14,11,3,0,1 +0,0,0,9,8,0,0,0,0,0,2,15,16,8,0,0,0,0,9,16,16,8,0,0,0,0,13,16,16,12,0,0,0,1,13,16,16,12,0,0,0,0,1,16,16,15,0,0,0,0,0,16,16,16,3,0,0,0,0,9,12,12,7,0,1 +0,0,6,11,16,8,0,0,0,1,15,15,11,2,0,0,0,8,16,7,0,0,0,0,0,6,16,5,0,0,0,0,0,0,10,16,13,4,0,0,0,0,0,1,14,16,0,0,0,0,2,8,15,10,0,0,0,0,10,14,7,0,0,0,5 +0,0,3,13,16,16,2,0,0,0,10,14,13,16,7,0,0,0,0,0,6,16,4,0,0,0,2,11,15,16,12,0,0,0,9,14,16,9,1,0,0,0,0,9,15,0,0,0,0,0,2,15,6,0,0,0,0,0,7,10,0,0,0,0,7 +0,0,5,15,2,0,0,0,0,0,16,15,0,0,0,0,0,5,16,6,0,0,0,0,0,8,16,11,11,4,0,0,0,8,16,16,16,16,2,0,0,6,16,8,6,16,5,0,0,2,16,14,16,16,2,0,0,0,9,16,14,7,0,0,6 +0,0,0,11,15,6,0,0,0,0,11,16,16,11,0,0,0,2,16,16,16,12,0,0,0,0,12,16,16,9,0,0,0,0,5,16,16,15,1,0,0,0,7,16,3,15,2,0,0,0,1,16,6,16,0,0,0,0,1,11,15,7,0,0,8 +0,0,4,14,16,8,0,0,0,3,16,16,16,16,0,0,0,3,9,4,16,16,4,0,0,0,2,16,16,16,14,0,0,0,2,13,16,12,2,0,0,0,0,7,16,4,0,0,0,0,0,13,13,0,0,0,0,0,5,13,3,0,0,0,7 +0,0,0,0,13,4,0,0,0,0,0,9,12,1,0,0,0,0,4,14,4,0,0,0,0,1,14,12,4,4,1,0,0,7,16,16,16,16,7,0,0,4,7,2,9,16,3,0,0,0,0,0,12,12,0,0,0,0,0,0,14,5,0,0,4 +0,0,9,15,8,2,0,0,0,3,15,15,16,9,0,0,0,0,11,16,16,12,0,0,0,0,0,0,2,14,2,0,0,0,0,0,0,15,8,0,0,0,0,0,0,9,12,0,0,0,14,5,7,13,14,0,0,0,10,16,15,12,1,0,9 +0,0,11,3,8,7,0,0,0,1,15,11,16,16,7,0,0,0,16,4,5,13,10,0,0,3,13,0,0,8,9,0,0,5,13,0,0,8,8,0,0,6,13,0,7,16,2,0,0,2,16,12,16,10,0,0,0,0,9,13,8,0,0,0,0 +0,0,0,1,11,7,0,0,0,0,0,13,16,15,2,0,0,0,4,16,16,16,4,0,0,2,16,16,16,16,0,0,0,1,8,9,16,16,1,0,0,0,0,10,16,16,1,0,0,0,0,9,16,16,0,0,0,0,0,2,11,7,0,0,1 +0,0,0,1,13,9,0,0,0,0,0,8,16,2,0,0,0,0,7,16,4,0,0,0,0,4,16,7,3,11,4,0,0,12,16,13,16,16,5,0,0,11,12,12,16,15,0,0,0,0,0,0,15,13,0,0,0,0,0,1,16,5,0,0,4 +0,0,0,1,13,7,0,0,0,0,0,12,14,0,0,0,0,0,8,15,3,0,2,0,0,6,16,7,4,10,13,0,0,13,16,16,16,16,10,0,0,1,8,8,9,15,2,0,0,0,0,0,10,10,0,0,0,0,0,0,15,7,0,0,4 +0,0,4,15,6,0,0,0,0,3,16,16,16,5,0,0,0,7,11,4,8,14,1,0,0,7,10,0,0,14,6,0,0,7,10,0,0,11,9,0,0,2,14,0,0,9,10,0,0,0,12,10,9,15,6,0,0,0,2,13,16,7,0,0,0 +0,0,8,13,1,0,0,0,0,0,12,16,0,0,0,0,0,0,15,11,0,0,0,0,0,0,15,13,6,3,0,0,0,0,15,16,16,16,5,0,0,0,15,16,14,16,12,0,0,0,13,16,15,16,7,0,0,0,5,15,13,7,0,0,6 +0,1,10,12,16,12,3,0,0,0,12,16,10,11,3,0,0,0,7,14,0,0,0,0,0,0,7,16,7,1,0,0,0,0,1,12,15,12,0,0,0,2,11,13,3,16,2,0,0,8,15,6,15,11,0,0,0,3,13,14,9,1,0,0,5 +0,2,15,12,12,12,7,0,0,0,12,16,14,14,9,0,0,0,10,10,0,0,0,0,0,0,8,13,3,0,0,0,0,0,0,7,15,4,0,0,0,0,0,0,10,11,0,0,0,2,7,9,14,3,0,0,0,4,15,11,1,0,0,0,5 +0,0,3,13,16,15,2,0,0,0,10,12,10,16,10,3,0,0,0,0,8,16,15,4,0,0,0,8,16,15,1,0,0,0,0,2,10,10,0,0,0,0,0,4,15,1,0,0,0,0,0,11,9,0,0,0,0,0,4,15,0,0,0,0,7 +0,0,11,16,14,9,1,0,0,0,15,13,13,16,4,0,0,0,0,0,4,16,5,0,0,0,0,0,10,15,2,0,0,0,0,8,16,6,0,0,0,0,8,16,8,0,0,0,0,6,16,16,16,10,0,0,0,1,8,8,12,13,0,0,2 +0,4,16,13,0,0,0,0,0,10,16,16,2,0,0,0,0,8,7,16,4,0,0,0,0,0,5,16,2,0,0,0,0,0,10,14,0,0,0,0,0,1,15,10,0,0,0,0,0,8,16,16,16,16,6,0,0,3,13,16,16,15,5,0,2 +0,0,8,0,8,7,0,0,0,0,16,4,13,16,2,0,0,2,14,2,7,16,6,0,0,7,12,0,0,13,7,0,0,5,13,0,0,12,8,0,0,5,16,8,4,15,7,0,0,3,16,16,16,16,1,0,0,0,7,14,10,3,0,0,0 +0,4,16,14,1,0,0,0,0,9,16,16,8,0,0,0,0,5,9,12,7,0,0,0,0,0,1,16,3,0,0,0,0,0,6,15,0,0,0,0,0,1,15,10,0,0,0,0,0,6,16,14,10,6,1,0,0,4,15,16,16,16,13,0,2 +0,0,5,16,10,0,0,0,0,0,12,15,15,5,0,0,0,0,10,3,11,9,0,0,0,0,0,0,12,8,0,0,0,0,0,0,15,6,0,0,0,0,2,7,16,2,0,0,0,0,11,16,16,15,10,2,0,0,6,8,4,9,15,3,2 +0,0,5,13,3,0,0,0,0,0,12,16,11,0,0,0,0,0,5,13,16,1,0,0,0,0,0,3,16,3,0,0,0,0,0,9,16,1,0,0,0,0,1,16,9,0,0,0,0,0,7,16,13,8,4,0,0,0,5,16,16,16,16,5,2 +0,0,4,13,12,3,0,0,0,0,15,16,16,7,0,0,0,0,16,16,16,16,3,0,0,0,11,16,16,5,0,0,0,0,15,10,12,15,1,0,0,2,16,4,1,16,10,0,0,1,15,12,11,16,5,0,0,0,6,15,9,1,0,0,8 +0,0,0,6,16,2,0,0,0,0,0,12,13,0,0,0,0,0,8,15,2,0,0,0,0,3,16,8,1,11,7,0,0,14,16,16,16,16,10,0,0,5,12,12,16,16,4,0,0,0,0,3,16,9,0,0,0,0,0,7,16,5,0,0,4 +0,0,6,15,16,9,0,0,0,2,15,14,15,16,0,0,0,0,2,0,13,15,0,0,0,0,2,6,14,15,2,0,0,0,12,16,16,16,10,0,0,0,2,11,16,2,0,0,0,0,2,15,9,0,0,0,0,0,8,13,1,0,0,0,7 +0,0,8,13,16,11,1,0,0,0,16,8,8,12,2,0,0,4,15,0,0,0,0,0,0,4,12,0,0,0,0,0,0,2,15,14,9,1,0,0,0,0,2,7,12,11,1,0,0,2,10,5,7,16,2,0,0,0,10,16,14,5,0,0,5 +0,0,6,14,16,10,0,0,0,3,15,14,16,16,1,0,0,0,3,0,10,16,4,0,0,0,0,5,12,16,8,0,0,0,0,13,16,15,6,0,0,0,0,5,16,10,0,0,0,0,0,11,15,0,0,0,0,0,8,15,2,0,0,0,7 +0,0,1,11,14,10,4,0,0,0,8,16,16,16,12,0,0,1,15,14,14,16,12,0,0,7,16,16,16,16,8,0,0,0,4,3,8,16,1,0,0,0,0,0,16,12,0,0,0,0,0,6,16,9,0,0,0,0,2,16,7,0,0,0,9 +0,0,1,10,14,7,1,0,0,4,16,12,11,16,4,0,0,6,6,0,5,15,1,0,0,0,0,3,16,6,0,0,0,0,0,6,16,11,0,0,0,0,0,0,10,16,4,0,0,0,0,7,10,16,4,0,0,0,0,7,13,5,0,0,3 +0,0,11,15,8,0,0,0,0,4,16,9,15,5,0,0,0,6,15,1,15,13,0,0,0,1,15,15,16,16,4,0,0,0,1,7,5,16,6,0,0,0,0,0,0,16,5,0,0,0,15,9,11,15,3,0,0,1,13,16,14,3,0,0,9 +0,0,5,12,16,9,0,0,0,1,16,15,15,13,0,0,0,0,4,1,13,14,2,0,0,0,0,5,16,16,13,0,0,0,0,12,16,11,4,0,0,0,0,8,15,1,0,0,0,0,0,15,12,0,0,0,0,0,4,15,2,0,0,0,7 +0,0,5,14,10,0,0,0,0,2,15,14,13,0,0,0,0,6,16,3,9,14,1,0,0,8,16,16,16,16,7,0,0,0,8,5,6,16,8,0,0,0,0,0,8,15,6,0,0,0,3,12,16,7,0,0,0,0,6,16,7,0,0,0,9 +0,0,5,10,16,8,0,0,0,1,16,16,16,6,0,0,0,4,16,15,4,0,0,0,0,3,16,10,0,0,0,0,0,0,11,16,6,0,0,0,0,0,1,15,15,2,0,0,0,0,5,13,16,8,0,0,0,0,7,16,13,3,0,0,5 +0,0,5,11,14,2,0,0,0,8,16,16,14,4,0,0,0,12,15,8,1,0,0,0,0,2,15,10,0,0,0,0,0,0,2,13,13,3,0,0,0,0,0,1,9,15,4,0,0,0,2,10,15,15,3,0,0,0,3,12,8,3,0,0,5 +0,1,10,13,16,9,0,0,0,11,16,14,16,16,2,0,0,7,6,3,16,11,0,0,0,0,0,5,16,9,0,0,0,0,0,3,16,12,0,0,0,0,0,0,9,16,5,0,0,0,3,9,16,15,1,0,0,0,12,15,11,1,0,0,3 +0,0,0,8,15,10,0,0,0,0,1,16,16,16,0,0,0,0,6,16,16,15,1,0,0,3,14,16,16,12,0,0,0,8,16,16,16,12,0,0,0,5,7,13,16,13,0,0,0,0,0,8,16,16,4,0,0,0,0,6,15,15,9,0,1 +0,0,7,15,8,0,0,0,0,7,16,16,12,0,0,0,0,12,14,11,11,0,0,0,0,8,3,16,7,0,0,0,0,0,5,16,3,0,0,0,0,0,9,14,0,0,0,0,0,0,12,15,12,8,3,0,0,0,6,16,16,16,11,0,2 +0,0,7,9,15,6,0,0,0,2,16,16,16,16,3,0,0,7,15,8,1,13,8,0,0,8,10,0,0,8,8,0,0,8,9,0,0,8,8,0,0,7,14,0,3,15,5,0,0,2,16,14,16,13,1,0,0,0,8,13,8,0,0,0,0 +0,0,8,12,5,0,0,0,0,3,15,13,15,0,0,0,0,0,0,9,13,0,0,0,0,0,0,13,12,0,0,0,0,0,0,7,16,11,3,0,0,0,0,0,3,14,14,0,0,0,4,7,7,15,12,0,0,0,8,13,12,6,0,0,3 +0,0,0,1,14,4,0,0,0,0,0,9,15,2,0,0,0,0,2,16,8,0,0,0,0,0,13,14,0,3,2,0,0,7,16,7,10,16,9,0,1,14,16,16,16,16,6,0,0,7,8,4,15,14,1,0,0,0,0,0,16,10,0,0,4 +0,1,9,12,14,6,0,0,0,6,16,10,16,12,0,0,0,1,14,13,12,16,0,0,0,0,0,0,2,16,4,0,0,0,0,0,0,15,9,0,0,0,0,0,0,10,9,0,0,0,0,0,5,14,11,0,0,0,10,16,16,7,0,0,9 +0,0,0,11,9,0,0,0,0,0,3,16,3,0,0,0,0,0,10,14,0,1,2,0,0,3,16,5,0,13,9,0,0,10,16,8,10,16,8,0,3,16,16,16,16,15,4,0,0,4,3,7,16,3,0,0,0,0,0,11,11,0,0,0,4 +0,0,0,6,14,3,0,0,0,0,2,14,16,12,0,0,0,0,4,16,16,16,0,0,0,1,11,16,16,12,0,0,0,6,16,16,16,12,0,0,0,0,8,16,16,9,0,0,0,0,4,16,16,10,1,0,0,0,0,9,14,11,2,0,1 +0,0,1,11,14,13,0,0,0,1,13,16,9,5,0,0,0,2,16,9,1,8,6,0,0,0,13,16,15,16,6,0,0,0,1,13,16,13,0,0,0,0,1,16,16,16,2,0,0,0,5,16,16,14,1,0,0,0,2,14,11,1,0,0,8 +0,0,1,11,5,0,0,0,0,0,8,16,1,0,0,0,0,2,15,10,0,0,0,0,0,6,16,5,3,0,0,0,0,8,16,16,16,14,0,0,0,3,16,9,1,16,10,0,0,0,11,16,12,16,6,0,0,0,0,10,14,11,1,0,6 +0,0,3,12,16,10,0,0,0,0,16,10,5,16,1,0,0,4,16,11,10,14,0,0,0,0,14,16,16,13,1,0,0,0,2,8,8,16,7,0,0,0,0,0,1,16,3,0,0,0,0,10,16,7,0,0,0,0,2,10,3,0,0,0,9 +0,0,1,14,6,2,6,0,0,0,10,13,1,10,10,0,0,4,16,3,3,16,5,0,0,10,16,12,14,16,9,0,0,3,16,16,16,10,2,0,0,0,0,5,15,1,0,0,0,0,0,11,10,0,0,0,0,0,0,13,8,0,0,0,4 +0,0,0,14,4,1,1,0,0,0,4,16,1,12,7,0,0,1,15,8,5,16,3,0,0,9,14,0,10,11,0,0,0,11,16,14,16,14,4,0,0,6,8,14,16,14,5,0,0,0,0,11,9,0,0,0,0,0,0,13,4,0,0,0,4 +0,0,0,0,8,16,2,0,0,0,0,6,16,16,3,0,0,0,1,15,16,16,0,0,0,0,13,16,16,12,0,0,0,4,16,7,16,12,0,0,0,5,4,3,16,9,0,0,0,0,0,2,16,8,0,0,0,0,0,0,12,14,0,0,1 +0,0,0,4,14,5,9,0,0,0,4,16,6,11,13,0,0,2,14,9,2,16,4,0,0,7,16,16,16,15,0,0,0,3,16,16,16,13,2,0,0,0,2,9,16,1,0,0,0,0,0,6,16,2,0,0,0,0,0,6,16,5,0,0,4 +0,0,9,12,13,15,16,3,0,1,14,13,12,16,14,1,0,0,0,0,6,15,3,0,0,0,5,13,16,16,10,0,0,1,16,16,15,12,3,0,0,0,5,16,7,0,0,0,0,0,7,16,0,0,0,0,0,0,11,14,0,0,0,0,7 +0,0,6,13,16,14,1,0,0,5,15,4,1,12,4,0,0,1,16,10,15,10,2,0,0,0,10,13,3,0,0,0,0,0,9,15,1,0,0,0,0,0,13,11,10,0,0,0,0,0,15,3,13,6,0,0,0,0,5,12,14,9,0,0,8 +0,0,7,12,15,13,3,0,0,2,16,6,2,11,7,0,0,7,14,9,13,11,1,0,0,2,16,12,6,7,2,0,0,2,16,15,8,2,0,0,0,2,16,14,2,0,0,0,0,0,14,16,8,0,0,0,0,0,4,12,11,0,0,0,8 +0,0,0,0,10,12,1,0,0,0,0,1,15,16,3,0,0,0,2,10,16,11,0,0,0,1,11,16,16,12,0,0,0,5,8,3,16,9,0,0,0,0,0,1,16,10,0,0,0,0,0,0,15,11,0,0,0,0,0,0,7,13,1,0,1 +0,0,0,8,11,3,7,0,0,0,6,15,4,10,9,0,0,3,15,6,1,16,5,0,0,7,16,12,14,16,14,0,0,5,16,16,16,14,7,0,0,0,4,7,16,5,0,0,0,0,0,9,15,0,0,0,0,0,0,7,16,0,0,0,4 +0,0,0,0,2,15,3,0,0,0,0,0,12,16,4,0,0,0,0,6,16,16,0,0,0,0,7,16,16,14,0,0,0,6,16,11,16,12,0,0,0,1,3,0,15,13,0,0,0,0,0,0,10,16,0,0,0,0,0,0,2,14,5,0,1 +0,0,0,7,12,2,0,0,0,1,10,16,15,6,0,0,0,0,15,13,1,0,0,0,0,2,16,8,0,0,0,0,0,2,16,11,12,9,0,0,0,2,16,14,10,14,10,0,0,0,11,15,8,11,16,1,0,0,1,9,16,16,13,1,6 +0,0,4,14,15,6,0,0,0,2,16,12,15,14,0,0,0,7,9,0,0,12,4,0,0,8,6,0,0,6,7,0,0,5,8,0,0,4,8,0,0,2,14,1,0,7,7,0,0,0,12,16,16,16,2,0,0,0,2,11,15,8,0,0,0 +0,0,0,9,16,7,0,0,0,0,7,16,8,0,0,0,0,0,14,10,0,0,0,0,0,1,16,4,0,0,0,0,0,2,16,8,8,2,0,0,0,1,14,14,9,15,7,0,0,0,9,14,4,8,14,0,0,0,0,9,15,15,7,0,6 +0,0,0,0,3,13,3,0,0,0,0,0,11,16,4,0,0,0,0,11,16,16,2,0,0,0,9,16,16,16,0,0,0,3,16,5,13,16,0,0,0,3,2,0,12,15,1,0,0,0,0,0,8,16,0,0,0,0,0,0,2,14,3,0,1 +0,0,5,15,13,2,0,0,0,3,16,16,16,16,0,0,0,8,7,1,3,14,7,0,0,3,1,0,0,5,8,0,0,5,10,0,0,5,8,0,0,3,16,12,8,14,8,0,0,0,13,16,16,16,5,0,0,0,3,14,16,10,0,0,0 +0,0,3,13,0,0,0,0,0,0,11,14,0,0,0,0,0,2,16,3,0,0,0,0,0,7,14,0,0,0,0,0,0,8,13,8,12,7,1,0,0,6,16,10,10,16,6,0,0,0,13,14,12,15,8,0,0,0,3,13,16,12,3,0,6 +0,0,3,15,16,16,9,0,0,0,1,9,14,16,5,0,0,0,0,1,11,15,0,0,0,0,5,16,16,16,14,0,0,0,1,12,14,5,2,0,0,0,1,15,8,0,0,0,0,0,5,16,2,0,0,0,0,0,7,15,2,0,0,0,7 +0,0,0,0,9,10,0,0,0,0,0,5,16,15,0,0,0,0,3,14,16,12,0,0,0,3,15,16,16,10,0,0,0,4,4,8,16,8,0,0,0,0,0,3,16,9,0,0,0,0,0,3,15,10,0,0,0,0,0,0,7,14,2,0,1 +0,0,4,15,8,0,0,0,0,1,14,12,16,1,0,0,0,2,10,1,16,0,0,0,0,0,3,11,8,0,0,0,0,0,0,10,15,6,0,0,0,0,0,2,12,15,2,0,0,0,1,10,8,15,3,0,0,0,2,15,16,11,0,0,3 +0,0,0,4,13,15,0,0,0,0,6,16,9,1,0,0,0,0,15,9,0,0,0,0,0,3,15,0,0,0,0,0,0,8,12,0,0,0,0,0,0,4,15,12,12,7,1,0,0,0,9,15,9,13,11,0,0,0,0,6,12,16,10,0,6 +0,0,0,12,15,4,0,0,0,0,7,9,8,15,0,0,0,2,12,0,0,9,4,0,0,5,11,0,0,4,8,0,0,8,4,0,0,8,6,0,0,2,12,0,1,14,5,0,0,0,13,15,16,12,1,0,0,0,2,10,12,1,0,0,0 +0,0,0,9,14,13,4,0,0,0,6,14,6,10,12,0,0,0,9,12,6,15,7,0,0,1,11,16,15,5,0,0,0,7,16,16,10,0,0,0,0,7,16,9,16,9,0,0,0,1,11,8,7,16,0,0,0,0,2,11,12,13,0,0,8 +0,0,8,15,16,16,12,0,0,0,4,12,13,16,8,0,0,0,0,0,10,14,0,0,0,1,9,12,16,13,4,0,0,5,16,16,16,14,5,0,0,0,2,16,5,0,0,0,0,0,10,14,0,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,2,11,16,9,0,0,0,0,9,14,15,14,0,0,0,0,0,0,13,10,0,0,0,0,4,9,16,12,5,0,0,9,16,16,16,16,11,0,0,3,8,12,11,0,0,0,0,0,0,16,5,0,0,0,0,0,2,16,2,0,0,0,7 +0,0,1,10,16,16,7,0,0,0,13,14,10,7,3,0,0,6,16,2,0,0,0,0,0,10,16,16,16,8,1,0,0,1,6,11,13,16,9,0,0,0,0,0,4,16,8,0,0,0,0,1,9,16,5,0,0,0,2,15,16,8,0,0,5 +0,0,0,6,15,16,6,0,0,0,3,16,5,9,12,0,0,0,4,15,7,13,4,0,0,0,14,16,16,10,0,0,0,5,16,13,11,0,0,0,0,5,15,1,16,3,0,0,0,0,10,13,15,6,0,0,0,0,0,5,13,5,0,0,8 +0,0,1,11,15,2,0,0,0,6,6,16,16,9,0,0,0,8,14,13,3,13,4,0,0,7,16,1,0,6,6,0,0,3,12,0,0,6,9,0,0,0,14,5,2,13,12,0,0,0,3,16,16,16,8,0,0,0,0,9,16,13,2,0,0 +0,0,4,12,13,11,0,0,0,3,15,3,7,16,1,0,0,4,12,9,15,4,0,0,0,0,11,15,1,0,0,0,0,1,13,16,12,0,0,0,0,3,14,1,14,3,0,0,0,0,12,3,4,12,0,0,0,0,3,12,12,13,0,0,8 +0,0,10,15,2,0,0,0,0,5,16,16,8,0,0,0,0,9,13,8,12,0,0,0,0,12,7,4,14,0,0,0,0,3,1,7,12,0,0,0,0,0,0,15,13,1,0,0,0,0,9,16,16,16,6,0,0,0,10,16,16,16,10,0,2 +0,0,2,11,16,16,11,0,0,0,13,11,4,13,13,0,0,0,12,14,15,14,2,0,0,0,12,16,15,5,0,0,0,0,8,16,11,0,0,0,0,2,16,9,16,4,0,0,0,4,16,6,14,10,0,0,0,0,3,12,15,6,0,0,8 +0,0,0,8,13,1,0,0,0,0,6,16,11,1,0,0,0,0,12,12,0,0,0,0,0,2,16,6,0,0,0,0,0,0,15,10,9,6,2,0,0,2,15,12,10,16,11,0,0,0,8,15,10,14,14,0,0,0,0,9,13,12,7,0,6 +0,2,15,14,3,0,0,0,0,7,15,14,13,0,0,0,0,9,11,2,16,4,0,0,0,7,4,0,14,6,0,0,0,0,0,0,14,7,0,0,0,0,0,3,16,3,0,0,0,0,4,15,16,8,4,0,0,1,16,16,16,16,16,5,2 +0,0,0,8,13,10,5,0,0,0,8,16,11,14,15,0,0,0,9,16,16,14,11,0,0,0,3,10,16,16,7,0,0,0,0,0,2,16,4,0,0,0,0,0,4,16,2,0,0,0,0,1,11,11,0,0,0,0,0,14,15,2,0,0,9 +0,0,3,12,13,9,0,0,0,0,12,8,2,12,2,0,0,0,11,8,12,11,0,0,0,0,7,16,5,0,0,0,0,3,12,6,11,0,0,0,0,4,8,0,7,9,0,0,0,1,12,2,0,13,4,0,0,0,2,12,11,12,6,0,8 +0,0,9,15,16,15,8,0,0,5,16,16,13,12,13,1,0,6,16,5,1,0,0,0,0,11,16,16,10,0,0,0,0,8,15,11,16,0,0,0,0,0,0,10,12,0,0,0,0,0,2,15,7,0,0,0,0,0,9,13,0,0,0,0,5 +0,0,7,11,15,13,0,0,0,1,14,12,16,9,0,0,0,0,0,3,16,6,0,0,0,0,2,11,16,8,1,0,0,1,15,16,16,16,10,0,0,0,10,16,9,4,1,0,0,0,8,13,0,0,0,0,0,0,11,9,0,0,0,0,7 +0,0,0,4,13,10,0,0,0,0,0,12,10,14,4,0,0,0,0,14,4,15,4,0,0,1,9,15,14,8,0,0,0,5,14,10,8,0,0,0,0,4,8,2,12,0,0,0,0,0,9,8,12,4,0,0,0,0,1,7,13,2,0,0,8 +0,0,2,14,1,0,0,0,0,0,9,11,0,0,0,0,0,0,16,4,0,0,0,0,0,4,16,0,0,0,0,0,0,3,13,2,8,8,3,0,0,0,16,14,8,12,12,0,0,1,10,12,4,10,13,0,0,0,2,11,14,14,4,0,6 +0,0,0,2,10,16,2,0,0,0,0,10,13,16,6,0,0,0,0,0,5,16,7,0,0,1,7,8,10,16,3,0,0,5,16,13,16,13,3,0,0,0,0,2,16,9,0,0,0,0,0,4,16,5,0,0,0,0,0,0,11,8,0,0,7 +0,0,6,12,16,16,11,0,0,0,5,15,13,4,0,0,0,0,1,14,0,0,0,0,0,0,7,16,14,4,0,0,0,0,1,6,12,15,0,0,0,0,0,0,0,16,2,0,0,0,3,0,3,13,0,0,0,0,10,16,14,4,0,0,5 +0,1,13,16,1,0,0,0,0,7,16,16,3,0,0,0,0,12,9,16,4,0,0,0,0,5,3,15,4,0,0,0,0,0,6,15,1,0,0,0,0,0,14,10,0,0,0,0,0,4,16,13,11,9,3,0,0,1,15,16,16,15,5,0,2 +0,0,0,10,10,1,0,0,0,0,5,16,5,11,7,0,0,2,16,7,3,16,4,0,0,7,16,2,9,15,2,0,0,7,16,12,16,15,7,0,0,1,10,14,16,10,1,0,0,0,0,11,15,1,0,0,0,0,0,12,13,0,0,0,4 +0,0,0,3,10,13,12,4,0,0,3,16,6,5,14,9,0,0,12,16,13,12,14,4,0,0,11,16,16,16,12,0,0,0,0,0,2,15,2,0,0,0,0,0,10,6,0,0,0,0,0,1,16,4,0,0,0,0,0,1,15,1,0,0,9 +0,0,7,15,16,11,0,0,0,0,9,11,13,12,0,0,0,0,0,0,12,8,0,0,0,0,5,8,16,7,0,0,0,10,16,16,16,16,7,0,0,5,6,16,11,4,1,0,0,0,4,16,2,0,0,0,0,0,8,14,1,0,0,0,7 +0,0,7,14,0,0,0,0,0,0,9,16,2,0,0,0,0,1,14,16,4,0,0,0,0,8,16,16,9,0,0,0,0,12,13,9,16,0,0,0,0,1,2,3,16,5,0,0,0,0,4,10,16,13,7,1,0,0,7,16,16,16,16,3,1 +0,2,14,4,0,0,0,0,0,6,16,10,0,0,0,0,0,8,8,14,0,0,0,0,0,2,3,15,0,0,0,0,0,0,4,12,0,0,0,0,0,1,13,8,0,0,0,0,0,6,16,16,16,12,4,0,0,3,15,16,14,7,2,0,2 +0,0,4,13,16,16,5,0,0,0,13,16,12,7,0,0,0,0,8,15,0,0,0,0,0,0,10,16,16,15,2,0,0,0,6,13,15,16,7,0,0,0,0,0,1,15,6,0,0,0,0,7,9,16,2,0,0,0,5,16,15,7,0,0,5 +0,0,1,12,2,0,0,0,0,0,10,8,0,9,9,0,0,1,16,1,0,15,5,0,0,3,16,5,7,16,2,0,0,2,16,16,16,16,7,0,0,0,6,10,16,1,0,0,0,0,0,9,7,0,0,0,0,0,0,14,1,0,0,0,4 +0,1,9,14,16,16,13,0,0,8,16,12,7,4,2,0,0,10,15,4,2,0,0,0,0,8,16,16,15,3,0,0,0,0,3,6,15,13,0,0,0,0,0,0,5,16,2,0,0,0,2,4,8,16,4,0,0,0,13,16,16,10,0,0,5 +0,0,12,9,0,0,0,0,0,4,15,15,5,0,0,0,0,7,10,3,11,0,0,0,0,10,6,0,12,2,0,0,0,5,3,0,12,5,0,0,0,0,1,3,16,4,0,0,0,0,14,16,16,16,14,0,0,0,12,13,10,8,4,0,2 +0,0,1,10,7,0,0,0,0,0,9,15,2,0,0,0,0,0,13,7,0,0,0,0,0,0,16,1,0,0,0,0,0,5,16,16,16,10,0,0,0,2,14,8,5,13,7,0,0,0,4,15,9,12,10,0,0,0,1,9,15,13,3,0,6 +0,1,13,16,7,0,0,0,0,8,15,15,9,0,0,0,0,12,8,8,12,0,0,0,0,10,7,8,12,0,0,0,0,1,0,11,10,0,0,0,0,0,3,16,5,0,0,0,0,0,13,15,6,6,1,0,0,1,16,16,16,16,8,0,2 +0,0,0,4,10,13,12,0,0,0,9,16,13,10,16,0,0,2,15,16,16,16,10,0,0,0,0,0,0,14,6,0,0,0,0,0,5,15,1,0,0,0,0,0,11,9,0,0,0,0,0,0,15,3,0,0,0,0,0,2,11,0,0,0,9 +0,0,6,15,10,0,0,0,0,0,15,16,9,0,0,0,0,5,16,16,3,0,0,0,0,8,16,16,16,9,0,0,0,0,4,8,13,16,4,0,0,0,0,2,4,16,6,0,0,0,10,15,14,14,1,0,0,0,8,16,14,2,0,0,3 +0,0,2,16,16,11,0,0,0,0,13,15,15,16,5,0,0,4,14,3,3,14,9,0,0,8,15,0,0,6,8,0,0,4,12,0,0,6,8,0,0,1,16,11,10,16,7,0,0,0,14,16,16,11,1,0,0,0,2,12,11,2,0,0,0 +0,0,6,16,15,3,0,0,0,0,16,13,8,1,0,0,0,0,12,7,4,0,0,0,0,0,14,16,16,13,2,0,0,0,3,4,4,13,8,0,0,0,0,0,0,9,4,0,0,0,2,8,13,15,3,0,0,0,4,14,12,5,0,0,5 +0,0,0,1,12,14,0,0,0,0,0,5,16,12,0,0,0,0,1,14,16,12,0,0,0,3,15,16,16,8,0,0,0,9,15,7,16,8,0,0,0,1,2,6,16,5,0,0,0,0,0,2,16,10,0,0,0,0,0,0,11,16,4,0,1 +0,0,4,13,16,16,9,0,0,1,15,14,13,16,7,0,0,0,6,0,10,15,2,0,0,0,0,5,16,7,0,0,0,0,0,4,15,15,1,0,0,0,0,1,8,16,5,0,0,0,4,15,13,16,2,0,0,0,3,15,15,5,0,0,3 +0,0,3,12,14,16,2,0,0,0,8,12,15,16,1,0,0,0,0,1,16,8,0,0,0,2,8,13,16,8,3,0,0,9,16,16,16,16,10,0,0,1,9,16,5,4,1,0,0,0,8,16,1,0,0,0,0,0,4,16,3,0,0,0,7 +0,0,13,13,2,0,0,0,0,2,16,15,11,0,0,0,0,2,16,9,16,1,0,0,0,2,16,8,16,0,0,0,0,0,8,9,15,0,0,0,0,0,2,15,13,4,0,0,0,0,12,16,16,16,11,3,0,0,13,11,0,6,9,3,2 +0,0,0,9,10,0,0,0,0,0,6,16,7,0,0,0,0,0,15,7,0,0,0,0,0,3,16,2,0,0,0,0,0,5,16,16,16,7,0,0,0,2,16,12,10,16,4,0,0,0,8,15,9,14,7,0,0,0,0,7,14,16,3,0,6 +0,0,5,9,13,13,0,0,0,0,11,16,9,4,0,0,0,0,7,12,0,0,0,0,0,0,10,15,12,12,2,0,0,0,3,12,8,14,7,0,0,0,0,0,0,13,4,0,0,0,1,9,14,12,1,0,0,0,3,16,10,1,0,0,5 +0,0,9,16,16,16,12,0,0,0,8,12,10,14,10,0,0,0,2,5,4,15,1,0,0,0,9,16,16,16,13,0,0,0,2,14,15,7,1,0,0,0,0,14,7,0,0,0,0,0,6,14,0,0,0,0,0,0,10,9,0,0,0,0,7 +0,0,16,13,16,16,10,0,0,0,11,7,4,2,2,0,0,0,11,1,0,0,0,0,0,5,15,6,1,0,0,0,0,2,8,10,15,4,0,0,0,0,0,0,4,11,0,0,0,2,9,0,8,8,0,0,0,0,11,16,13,1,0,0,5 +0,0,3,14,10,1,0,0,0,0,12,9,9,12,0,0,0,2,16,5,0,8,6,0,0,4,8,1,0,3,7,0,0,5,7,0,0,4,8,0,0,2,12,0,0,7,5,0,0,0,12,7,5,13,2,0,0,0,3,14,15,6,0,0,0 +0,0,8,16,15,4,0,0,0,2,16,5,7,8,0,0,0,0,6,0,5,8,0,0,0,0,0,0,12,5,0,0,0,0,0,9,14,0,0,0,0,1,11,15,4,0,0,0,0,3,16,13,4,0,0,0,0,0,8,13,16,15,5,0,2 +0,0,6,10,16,12,0,0,0,1,16,13,11,12,0,0,0,1,4,0,10,8,1,0,0,0,5,8,15,16,13,0,0,1,16,16,14,8,1,0,0,0,4,16,4,0,0,0,0,0,5,13,1,0,0,0,0,0,9,10,0,0,0,0,7 +0,1,11,13,12,4,0,0,0,1,8,8,12,11,0,0,0,0,0,1,11,10,0,0,0,0,7,12,13,1,0,0,0,7,16,16,8,0,0,0,0,0,4,9,14,12,2,0,0,1,10,7,5,16,7,0,0,2,15,16,15,9,1,0,3 +0,0,0,0,11,16,3,0,0,0,1,11,16,16,8,0,0,3,13,16,16,16,5,0,0,10,16,11,9,16,6,0,0,1,4,0,11,16,4,0,0,0,0,0,12,16,2,0,0,0,0,0,13,15,1,0,0,0,0,0,8,16,5,0,1 +0,0,1,11,12,4,0,0,0,1,13,11,6,15,0,0,0,7,13,0,3,15,0,0,0,1,12,13,15,6,0,0,0,0,4,15,13,11,0,0,0,2,15,4,1,14,6,0,0,3,14,3,0,12,7,0,0,0,4,13,16,15,1,0,8 +0,0,4,14,8,0,0,0,0,0,15,13,15,8,0,0,0,3,14,0,1,14,5,0,0,4,12,0,0,9,8,0,0,4,12,0,0,8,8,0,0,4,13,0,0,14,6,0,0,0,15,10,10,13,1,0,0,0,5,15,12,3,0,0,0 +0,0,10,13,16,16,12,0,0,0,6,12,8,13,11,0,0,0,0,0,2,16,5,0,0,0,6,8,14,16,13,0,0,0,15,16,15,9,1,0,0,0,0,13,6,0,0,0,0,0,6,14,1,0,0,0,0,0,14,9,0,0,0,0,7 +0,6,16,16,13,1,0,0,0,13,11,8,15,9,0,0,0,5,1,0,10,14,0,0,0,0,0,0,12,11,0,0,0,0,0,3,16,2,0,0,0,0,0,13,13,0,0,0,0,3,14,16,12,8,7,0,0,5,16,16,16,16,10,0,2 +0,0,0,9,14,4,0,0,0,0,10,14,4,1,0,0,0,0,14,4,0,0,0,0,0,6,16,16,10,3,0,0,0,3,16,2,5,14,4,0,0,0,14,2,0,12,10,0,0,0,7,12,0,13,9,0,0,0,0,8,16,14,2,0,6 +0,0,0,1,12,14,0,0,0,0,0,6,16,4,0,0,0,0,3,16,4,9,3,0,0,2,13,15,6,16,6,0,0,11,16,16,16,16,9,0,0,3,4,4,10,16,1,0,0,0,0,0,11,13,0,0,0,0,0,0,12,10,0,0,4 +0,0,7,16,16,16,3,0,0,0,8,15,12,16,3,0,0,0,13,12,0,0,0,0,0,1,13,16,14,5,0,0,0,0,1,4,13,13,0,0,0,1,5,0,4,16,3,0,0,4,15,8,11,15,0,0,0,0,7,16,16,8,0,0,5 +0,0,0,4,13,5,0,0,0,0,2,14,12,5,0,0,0,0,7,12,1,0,0,0,0,0,11,7,0,0,0,0,0,0,12,14,12,8,0,0,0,1,14,14,8,12,8,0,0,0,2,14,5,9,14,0,0,0,0,3,15,15,6,0,6 +0,3,13,16,16,12,1,0,0,1,8,4,5,15,6,0,0,0,0,1,9,15,2,0,0,0,6,16,15,5,0,0,0,0,5,14,16,8,0,0,0,0,0,0,8,16,2,0,0,6,12,6,12,15,1,0,0,4,13,12,11,2,0,0,3 +0,0,6,14,16,9,0,0,0,5,15,5,8,16,1,0,0,4,14,1,3,16,6,0,0,0,7,16,15,14,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,8,9,0,0,0,12,3,0,11,9,0,0,0,8,14,16,13,1,0,9 +0,0,0,4,16,11,0,0,0,0,0,13,16,10,0,0,0,0,12,16,16,7,0,0,0,5,14,13,16,5,0,0,0,0,0,8,16,4,0,0,0,0,0,4,16,7,0,0,0,0,0,4,16,8,0,0,0,0,0,3,14,8,0,0,1 +0,0,1,10,16,9,0,0,0,0,10,13,5,3,0,0,0,1,16,3,0,0,0,0,0,4,13,0,0,0,0,0,0,3,15,15,13,5,0,0,0,1,16,11,4,15,4,0,0,0,8,14,5,14,2,0,0,0,0,8,16,12,1,0,6 +0,0,4,16,16,16,16,12,0,0,2,8,8,11,16,4,0,0,0,0,1,15,8,0,0,0,2,4,10,16,11,0,0,11,16,16,16,6,0,0,0,9,9,13,11,0,0,0,0,0,2,15,2,0,0,0,0,0,8,12,0,0,0,0,7 +0,1,13,16,16,3,0,0,0,0,2,2,10,14,0,0,0,0,0,0,5,14,0,0,0,0,0,11,16,3,0,0,0,0,0,11,15,8,1,0,0,0,0,0,9,16,4,0,0,0,3,4,7,15,1,0,0,0,15,16,12,5,0,0,3 +0,0,10,15,13,8,0,0,0,1,12,4,11,10,0,0,0,0,0,8,13,3,0,0,0,3,13,16,5,0,0,0,0,1,8,13,16,13,2,0,0,0,0,0,1,13,6,0,0,5,10,8,8,16,2,0,0,0,10,16,16,6,0,0,3 +0,0,2,13,16,7,0,0,0,0,7,15,12,16,2,0,0,0,4,16,11,16,8,0,0,0,0,8,16,13,10,0,0,5,4,0,0,7,13,0,0,4,13,0,0,9,11,0,0,1,12,10,4,13,10,0,0,0,2,12,16,16,4,0,9 +0,0,0,5,16,1,0,0,0,0,1,13,8,2,7,0,0,0,9,12,0,10,10,0,0,7,16,4,4,16,11,0,0,11,16,16,16,16,7,0,0,2,8,7,14,8,0,0,0,0,0,2,16,2,0,0,0,0,0,6,12,0,0,0,4 +0,0,6,10,10,15,3,0,0,4,13,6,9,8,8,0,0,3,11,0,7,13,1,0,0,0,14,14,9,0,0,0,0,1,14,13,3,0,0,0,0,3,8,1,14,1,0,0,0,0,13,0,6,9,0,0,0,0,5,15,15,5,0,0,8 +0,0,0,6,15,10,1,0,0,0,0,12,16,14,2,0,0,1,12,16,16,7,0,0,0,7,16,16,16,7,0,0,0,1,4,10,16,4,0,0,0,0,0,5,16,7,0,0,0,0,0,8,16,11,0,0,0,0,0,6,16,12,0,0,1 +0,2,13,16,16,16,2,0,0,1,10,8,10,16,0,0,0,0,7,8,12,15,7,0,0,3,16,16,16,12,5,0,0,0,2,11,14,0,0,0,0,0,3,15,3,0,0,0,0,0,10,12,0,0,0,0,0,0,15,4,0,0,0,0,7 +0,0,0,6,15,2,2,0,0,0,3,15,10,4,15,0,0,2,14,9,0,12,11,0,0,9,16,4,3,16,14,0,0,11,16,16,16,15,7,0,0,1,10,11,16,8,0,0,0,0,0,3,16,1,0,0,0,0,0,8,11,0,0,0,4 +0,2,15,16,10,0,0,0,0,8,16,10,16,2,0,0,0,4,7,0,16,6,0,0,0,0,0,2,16,4,0,0,0,0,0,8,14,0,0,0,0,0,2,16,7,0,0,0,0,2,15,16,9,8,8,0,0,3,16,16,16,16,12,0,2 +0,4,15,16,12,0,0,0,0,12,15,7,16,5,0,0,0,3,3,0,15,4,0,0,0,0,0,0,13,7,0,0,0,0,0,5,15,2,0,0,0,0,1,14,11,0,0,0,0,1,13,16,9,8,3,0,0,3,16,16,16,16,16,1,2 +0,0,7,15,13,3,0,0,0,0,11,6,8,14,0,0,0,0,0,3,8,12,0,0,0,0,0,16,14,2,0,0,0,0,0,10,15,7,0,0,0,0,0,0,2,11,10,0,0,1,15,3,1,11,10,0,0,0,4,15,16,13,2,0,3 +0,0,10,16,12,0,0,0,0,1,14,13,16,4,0,0,0,0,2,3,16,5,0,0,0,0,0,4,16,2,0,0,0,0,0,11,14,0,0,0,0,0,2,16,8,0,3,0,0,1,13,16,14,16,16,3,0,0,12,16,16,13,7,0,2 +0,0,5,16,16,15,1,0,0,0,10,9,10,16,2,0,0,0,0,1,12,10,0,0,0,0,0,9,16,8,0,0,0,0,0,2,14,16,5,0,0,0,0,4,0,16,5,0,0,0,3,14,8,13,0,0,0,0,4,16,13,3,0,0,3 +0,0,11,16,15,2,0,0,0,0,12,10,14,8,0,0,0,0,0,0,11,8,0,0,0,0,0,1,15,3,0,0,0,0,1,11,11,0,0,0,0,6,15,15,2,0,0,0,0,9,16,14,9,3,0,0,0,1,9,12,15,16,13,0,2 +0,0,1,9,15,12,0,0,0,0,13,8,5,14,4,0,0,0,14,1,5,14,1,0,0,0,7,13,16,4,0,0,0,0,11,12,14,12,1,0,0,4,13,0,0,11,7,0,0,1,13,7,2,8,8,0,0,0,1,9,16,13,2,0,8 +0,3,15,16,11,0,0,0,0,10,15,13,16,2,0,0,0,10,7,4,16,0,0,0,0,1,0,9,12,0,0,0,0,0,1,16,6,0,0,0,0,0,10,15,0,0,0,0,0,7,16,14,12,14,11,0,0,6,16,16,16,11,3,0,2 +0,0,9,13,15,10,1,0,0,0,7,4,4,12,13,0,0,0,0,0,0,7,11,0,0,0,2,12,13,12,2,0,0,0,0,10,15,1,0,0,0,0,0,1,11,9,0,0,0,0,12,3,3,15,0,0,0,0,8,16,16,3,0,0,3 +0,2,16,10,9,8,0,0,0,0,10,16,16,12,0,0,0,1,5,10,16,10,6,0,0,11,16,16,16,15,7,0,0,3,5,14,9,2,0,0,0,0,6,16,0,0,0,0,0,0,10,10,0,0,0,0,0,3,16,1,0,0,0,0,7 +0,0,3,14,16,12,1,0,0,3,16,14,3,11,4,0,0,2,13,1,3,15,4,0,0,0,10,16,16,8,0,0,0,2,15,10,14,4,0,0,0,5,14,0,3,14,1,0,0,0,14,4,1,14,2,0,0,0,2,13,16,10,0,0,8 +0,0,0,9,14,0,0,0,0,0,5,16,4,4,9,0,0,4,16,3,0,13,9,0,0,6,16,15,12,16,13,0,0,0,8,11,15,14,3,0,0,0,0,0,16,5,0,0,0,0,0,6,13,0,0,0,0,0,0,9,10,0,0,0,4 +0,0,0,0,8,13,1,0,0,0,1,9,15,14,1,0,0,6,14,16,16,10,0,0,0,5,11,4,16,9,0,0,0,0,0,3,16,5,0,0,0,0,0,3,16,7,0,0,0,0,0,0,16,10,0,0,0,0,0,0,11,13,0,0,1 +0,0,1,13,14,2,0,0,0,0,7,16,10,12,0,0,0,3,15,10,0,12,3,0,0,7,9,0,0,8,7,0,0,5,9,0,0,8,7,0,0,1,13,0,0,11,2,0,0,0,13,7,5,10,0,0,0,0,3,14,15,3,0,0,0 +0,0,0,0,15,15,1,0,0,0,0,2,16,16,4,0,0,0,2,11,16,15,1,0,0,9,16,16,16,12,0,0,0,3,10,8,16,8,0,0,0,0,0,0,16,11,0,0,0,0,0,0,15,13,0,0,0,0,0,0,15,15,0,0,1 +0,0,2,14,15,8,0,0,0,0,8,10,3,15,1,0,0,0,5,11,1,15,4,0,0,0,0,12,15,16,5,0,0,0,0,0,2,8,6,0,0,1,3,0,0,7,5,0,0,1,12,4,0,10,7,0,0,0,4,14,14,13,1,0,9 +0,0,9,15,14,1,0,0,0,2,16,8,15,10,0,0,0,2,14,4,13,14,1,0,0,0,7,15,15,14,6,0,0,0,0,1,1,7,9,0,0,0,0,0,0,3,12,0,0,0,6,0,0,6,12,0,0,0,9,16,16,16,6,0,9 +0,0,7,10,13,5,0,0,0,6,16,12,16,12,0,0,0,6,15,6,16,14,1,0,0,1,13,16,12,16,2,0,0,0,0,0,0,13,4,0,0,0,0,0,0,11,7,0,0,0,11,6,4,13,8,0,0,0,9,16,16,12,3,0,9 +0,0,8,16,16,16,16,2,0,0,8,10,7,12,13,0,0,0,0,0,3,15,2,0,0,0,4,11,15,16,13,0,0,0,11,16,14,6,0,0,0,0,1,14,6,0,0,0,0,0,7,13,1,0,0,0,0,0,12,8,0,0,0,0,7 +0,0,10,12,13,11,2,0,0,0,14,9,8,8,2,0,0,0,15,0,0,0,0,0,0,6,16,6,0,0,0,0,0,3,12,13,15,5,0,0,0,0,0,0,6,13,1,0,0,0,9,5,4,14,0,0,0,0,8,15,15,7,0,0,5 +0,0,2,13,13,4,0,0,0,0,12,13,11,14,0,0,0,0,9,13,13,14,1,0,0,0,0,4,8,13,3,0,0,0,0,0,0,7,9,0,0,0,0,0,0,3,13,0,0,0,14,6,0,6,12,0,0,0,2,12,16,16,7,0,9 +0,0,0,5,12,15,7,0,0,0,3,14,4,13,6,0,0,0,4,13,0,14,7,0,0,0,0,13,14,16,8,0,0,0,0,0,3,8,8,0,0,1,3,0,0,10,8,0,0,5,16,7,0,12,4,0,0,0,2,6,13,15,1,0,9 +0,0,6,12,15,15,9,0,0,0,8,12,4,4,3,0,0,0,12,3,0,0,0,0,0,1,15,6,3,0,0,0,0,8,16,12,16,6,0,0,0,3,4,0,4,11,0,0,0,4,15,5,13,6,0,0,0,0,6,16,10,0,0,0,5 +0,0,3,10,13,5,0,0,0,0,15,12,5,14,1,0,0,4,12,1,0,10,4,0,0,5,8,0,0,8,7,0,0,5,8,0,0,8,8,0,0,4,11,0,0,11,5,0,0,1,14,6,7,12,0,0,0,0,4,15,14,4,0,0,0 +0,0,0,2,15,7,0,0,0,0,0,11,12,1,0,0,0,0,6,15,1,14,2,0,0,5,15,5,3,16,1,0,0,7,16,14,13,16,8,0,0,1,6,12,15,14,3,0,0,0,0,1,13,7,0,0,0,0,0,3,16,1,0,0,4 +0,0,5,13,12,15,2,0,0,0,13,13,8,8,2,0,0,3,16,5,0,0,0,0,0,7,16,13,5,0,0,0,0,1,7,10,16,10,0,0,0,0,0,0,4,16,4,0,0,0,8,6,5,16,4,0,0,0,7,13,13,10,0,0,5 +0,0,0,2,14,5,1,0,0,0,1,12,13,1,15,4,0,0,9,15,2,6,16,2,0,7,16,13,8,14,12,0,0,9,16,16,16,16,10,0,0,0,0,3,11,15,1,0,0,0,0,0,14,10,0,0,0,0,0,2,16,4,0,0,4 +0,0,7,13,12,9,1,0,0,3,15,5,5,16,4,0,0,3,15,2,7,16,1,0,0,0,5,15,16,15,4,0,0,0,0,0,0,11,7,0,0,1,4,0,0,9,7,0,0,3,13,1,0,10,8,0,0,0,9,15,16,16,1,0,9 +0,0,2,14,12,2,0,0,0,0,12,13,7,13,0,0,0,5,16,1,0,12,3,0,0,6,12,2,0,4,8,0,0,7,8,0,0,7,8,0,0,1,14,1,0,9,6,0,0,0,9,11,7,16,1,0,0,0,2,12,16,8,0,0,0 +0,0,2,8,15,7,0,0,0,0,11,15,6,14,3,0,0,0,16,6,0,4,9,0,0,8,4,0,0,4,8,0,0,6,7,0,0,5,8,0,0,3,13,0,0,13,1,0,0,0,10,11,8,11,0,0,0,0,1,13,11,2,0,0,0 +0,3,16,16,10,0,0,0,0,2,12,11,16,3,0,0,0,0,0,2,16,5,0,0,0,0,0,3,16,2,0,0,0,0,1,15,10,0,0,0,0,0,13,16,1,0,0,0,0,7,16,13,10,12,13,0,0,2,15,16,16,12,6,0,2 +0,0,14,16,14,5,0,0,0,0,3,0,2,16,3,0,0,0,0,4,10,15,1,0,0,6,16,16,14,2,0,0,0,1,4,6,12,12,0,0,0,0,0,0,0,12,9,0,0,0,10,0,4,14,5,0,0,0,13,16,13,4,0,0,3 +0,0,0,13,16,16,16,1,0,0,0,7,8,13,11,0,0,0,0,0,0,14,10,1,0,6,12,13,16,14,9,0,0,6,14,10,16,6,0,0,0,0,0,7,13,1,0,0,0,0,0,13,10,0,0,0,0,0,0,15,3,0,0,0,7 +0,0,6,12,13,12,6,0,0,0,6,8,2,5,7,0,0,0,12,2,0,0,0,0,0,7,15,12,7,2,0,0,0,2,8,8,9,15,4,0,0,0,0,0,0,7,7,0,0,0,4,5,4,13,2,0,0,0,4,15,16,6,0,0,5 +0,0,1,8,15,8,0,0,0,0,14,11,14,16,0,0,0,3,12,0,6,9,0,0,0,1,15,10,14,1,0,0,0,0,1,15,16,5,0,0,0,0,5,12,3,15,3,0,0,0,6,11,1,13,4,0,0,0,0,8,16,13,1,0,8 +0,0,5,8,12,12,13,0,0,0,14,12,8,4,4,0,0,0,14,0,2,0,0,0,0,3,16,16,16,10,0,0,0,0,5,1,2,15,2,0,0,0,0,0,0,16,0,0,0,0,4,10,8,9,0,0,0,0,3,16,13,1,0,0,5 +0,0,4,15,10,0,0,0,0,3,16,9,10,7,0,0,0,7,15,3,1,11,2,0,0,8,6,0,0,6,7,0,0,8,5,0,0,4,8,0,0,4,10,0,0,7,8,0,0,0,13,6,5,15,3,0,0,0,4,15,14,6,0,0,0 +0,0,0,3,15,4,0,0,0,0,0,13,9,0,0,0,0,0,8,12,0,7,4,0,0,4,14,3,0,15,5,0,0,11,16,16,16,16,10,0,0,5,9,9,13,12,0,0,0,0,0,0,12,7,0,0,0,0,0,2,16,3,0,0,4 +0,0,0,2,11,1,0,0,0,0,0,7,13,1,0,0,0,0,6,15,2,4,7,0,0,1,14,11,0,14,8,0,0,7,16,16,16,16,3,0,0,0,0,3,13,14,0,0,0,0,0,0,15,9,0,0,0,0,0,3,15,5,0,0,4 +0,0,4,14,5,0,0,0,0,0,12,15,14,0,0,0,0,0,12,8,15,9,0,0,0,0,5,16,15,16,4,0,0,0,0,0,6,12,10,0,0,0,0,0,0,5,15,0,0,0,3,8,4,9,16,4,0,0,5,12,15,12,11,0,9 +0,0,0,3,12,0,0,0,0,0,0,12,9,1,1,0,0,0,3,16,3,10,7,0,0,1,13,10,1,16,3,0,0,8,16,13,13,16,2,0,0,3,6,8,16,14,2,0,0,0,0,2,16,2,0,0,0,0,0,3,13,0,0,0,4 +0,0,2,13,14,2,0,0,0,1,15,16,16,9,0,0,0,7,16,8,5,16,1,0,0,6,16,3,0,11,7,0,0,5,16,4,0,8,8,0,0,0,16,9,0,10,11,0,0,0,10,15,10,16,8,0,0,0,2,13,16,14,2,0,0 +0,0,3,15,8,0,0,0,0,0,8,16,16,7,0,0,0,0,7,11,9,12,0,0,0,0,2,5,9,11,0,0,0,0,0,0,13,7,0,0,0,0,7,10,16,4,0,0,0,0,14,16,16,16,15,0,0,0,1,4,4,7,11,1,2 +0,0,0,6,11,6,0,0,0,0,8,13,9,16,3,0,0,2,15,4,0,13,3,0,0,0,7,14,9,13,0,0,0,0,1,15,16,6,0,0,0,0,11,10,10,12,0,0,0,0,9,10,1,16,3,0,0,0,0,9,12,10,1,0,8 +0,0,4,16,7,1,0,0,0,0,10,13,15,11,0,0,0,2,15,3,4,15,3,0,0,4,16,0,0,12,8,0,0,5,16,1,0,9,8,0,0,4,16,2,1,13,7,0,0,0,14,9,9,14,1,0,0,0,5,14,15,6,0,0,0 +0,0,5,12,6,0,0,0,0,0,11,16,15,6,0,0,0,0,10,11,11,15,2,0,0,0,4,15,15,16,6,0,0,0,0,4,8,13,9,0,0,0,0,0,0,8,13,0,0,0,9,8,8,12,13,0,0,0,5,10,13,12,5,0,9 +0,0,1,13,10,0,0,0,0,0,9,16,11,0,0,0,0,3,16,11,0,0,0,0,0,5,16,11,7,1,0,0,0,4,16,16,16,15,3,0,0,3,16,12,2,12,11,0,0,0,12,14,5,9,15,0,0,0,0,10,15,16,11,0,6 +0,0,2,9,3,0,0,0,0,5,14,9,13,9,0,0,0,7,14,0,10,10,0,0,0,0,9,14,15,6,0,0,0,0,7,16,16,0,0,0,0,0,12,6,7,12,1,0,0,0,15,5,0,14,6,0,0,0,2,8,12,11,3,0,8 +0,0,6,12,8,4,0,0,0,1,14,5,7,16,1,0,0,2,16,4,6,13,0,0,0,0,9,14,15,2,0,0,0,0,7,14,12,1,0,0,0,0,15,2,9,11,0,0,0,0,15,7,6,16,0,0,0,0,3,8,9,6,0,0,8 +0,1,8,15,10,0,0,0,0,6,15,13,16,8,0,0,0,0,0,3,14,12,0,0,0,0,4,15,16,10,0,0,0,0,7,12,13,16,6,0,0,0,0,0,0,14,8,0,0,0,8,10,13,16,3,0,0,0,10,16,12,5,0,0,3 +0,1,13,12,0,0,0,0,0,7,16,16,8,0,0,0,0,8,10,4,14,0,0,0,0,2,6,2,15,0,0,0,0,0,0,9,10,0,0,0,0,0,5,16,5,0,0,0,0,2,16,16,16,16,11,0,0,1,11,11,8,9,9,0,2 +0,0,8,12,9,1,0,0,0,3,16,16,16,10,0,0,0,3,16,16,16,10,0,0,0,0,8,16,16,4,0,0,0,0,12,16,16,8,0,0,0,1,15,16,16,9,0,0,0,0,13,16,16,9,0,0,0,0,6,9,11,3,0,0,1 +0,0,6,14,16,5,0,0,0,3,16,13,13,12,0,0,0,1,4,1,12,12,0,0,0,0,4,14,16,6,0,0,0,0,6,14,16,15,2,0,0,0,0,0,8,16,2,0,0,2,16,10,13,15,0,0,0,0,9,14,8,2,0,0,3 +0,0,6,12,12,8,0,0,0,0,11,16,16,16,0,0,0,0,9,16,16,16,1,0,0,0,12,16,16,15,2,0,0,0,12,16,16,16,4,0,0,0,12,16,16,11,1,0,0,0,8,16,16,1,0,0,0,1,11,12,5,0,0,0,1 +0,0,1,12,14,16,10,0,0,0,3,10,8,16,6,0,0,0,0,0,3,15,1,0,0,0,1,7,14,14,3,0,0,0,4,13,16,12,5,0,0,0,0,7,13,0,0,0,0,0,0,13,7,0,0,0,0,0,2,14,2,0,0,0,7 +0,0,10,16,6,0,0,0,0,0,15,14,16,0,0,0,0,0,14,4,16,0,0,0,0,0,0,2,15,0,0,0,0,0,0,6,10,0,0,0,0,0,1,14,6,0,0,0,0,1,14,16,13,12,6,0,0,0,8,8,8,11,11,0,2 +0,0,0,4,15,0,0,0,0,0,1,13,11,0,0,0,0,0,7,16,7,12,0,0,0,2,16,11,10,16,2,0,0,6,16,16,16,16,3,0,0,0,4,4,15,13,1,0,0,0,0,1,16,7,0,0,0,0,0,4,11,1,0,0,4 +0,0,2,16,15,6,0,0,0,7,11,15,12,16,0,0,0,9,16,10,10,16,1,0,0,0,11,16,16,6,0,0,0,0,15,16,15,2,0,0,0,0,14,5,10,13,1,0,0,0,11,11,6,16,7,0,0,0,3,11,16,12,3,0,8 +0,0,0,4,15,0,0,0,0,0,3,15,10,0,0,0,0,0,11,15,0,4,0,0,0,5,16,8,4,16,3,0,0,3,16,14,13,16,2,0,0,0,4,6,15,14,2,0,0,0,0,0,15,6,0,0,0,0,0,2,15,2,0,0,4 +0,1,9,13,12,6,0,0,0,5,16,13,8,10,2,0,0,4,14,1,0,0,0,0,0,2,16,14,4,0,0,0,0,5,16,15,16,4,0,0,0,0,0,1,11,16,3,0,0,0,6,6,15,15,1,0,0,0,7,13,10,3,0,0,5 +0,0,9,16,15,5,0,0,0,4,16,12,12,16,7,0,0,5,16,4,0,1,1,0,0,8,16,15,11,3,0,0,0,4,14,12,14,15,2,0,0,0,0,0,4,16,4,0,0,0,5,8,15,15,2,0,0,0,11,15,9,2,0,0,5 +0,0,0,0,9,6,0,0,0,0,0,9,14,2,0,0,0,0,3,15,4,3,6,0,0,1,13,11,0,13,7,0,0,7,16,13,13,16,3,0,0,0,4,4,13,13,1,0,0,0,0,1,16,7,0,0,0,0,0,1,14,1,0,0,4 +0,1,15,12,3,0,0,0,0,1,12,16,15,4,0,0,0,0,0,4,16,10,0,0,0,0,0,15,16,5,0,0,0,0,0,12,14,13,1,0,0,0,0,0,0,8,12,0,0,0,7,8,6,13,13,0,0,0,13,16,16,13,7,0,3 +0,3,14,14,4,0,0,0,0,8,15,11,16,6,0,0,0,1,4,4,16,7,0,0,0,0,5,16,16,4,0,0,0,0,2,9,15,16,3,0,0,0,0,0,4,16,8,0,0,0,7,4,11,16,5,0,0,2,14,16,14,7,0,0,3 +0,0,4,14,15,4,0,0,0,1,16,14,15,13,0,0,0,4,16,7,4,16,3,0,0,5,16,3,0,12,4,0,0,4,16,2,0,11,8,0,0,2,16,6,1,15,4,0,0,0,12,13,13,14,1,0,0,0,3,16,15,4,0,0,0 +0,0,6,14,9,0,0,0,0,3,16,13,16,6,0,0,0,2,16,3,9,13,0,0,0,1,16,1,1,15,4,0,0,3,16,0,0,12,6,0,0,0,16,1,0,13,4,0,0,0,16,8,8,15,1,0,0,0,7,15,14,5,0,0,0 +0,0,4,14,11,0,0,0,0,3,16,9,15,7,0,0,0,9,15,0,4,14,2,0,0,8,13,0,0,12,6,0,0,8,12,0,0,11,7,0,0,6,14,1,0,14,4,0,0,0,14,11,8,16,1,0,0,0,5,16,14,6,0,0,0 +0,0,1,8,6,0,0,0,0,0,14,16,16,16,0,0,0,0,16,16,16,16,0,0,0,0,16,16,16,16,0,0,0,0,16,16,16,15,0,0,0,0,16,16,16,13,0,0,0,0,9,16,16,9,0,0,0,0,6,8,6,1,0,0,1 +0,2,10,15,8,0,0,0,0,6,16,15,16,8,0,0,0,8,16,5,13,16,0,0,0,4,16,12,16,16,5,0,0,0,5,11,13,16,7,0,0,0,0,0,1,16,8,0,0,0,2,4,10,16,8,0,0,0,12,16,14,11,0,0,9 +0,2,15,13,2,0,0,0,0,6,16,14,15,1,0,0,0,7,12,0,15,8,0,0,0,4,7,0,16,8,0,0,0,0,0,1,16,3,0,0,0,0,0,9,15,0,0,0,0,2,15,16,16,14,5,0,0,2,10,12,13,16,8,0,2 +0,0,13,14,4,0,0,0,0,4,16,13,14,2,0,0,0,0,16,4,16,14,2,0,0,0,11,14,16,14,0,0,0,0,0,8,11,15,0,0,0,0,0,0,4,16,2,0,0,0,6,6,7,16,2,0,0,0,12,16,16,10,0,0,9 +0,1,12,16,15,11,3,0,0,7,16,14,11,9,4,0,0,3,16,8,2,0,0,0,0,5,16,16,15,3,0,0,0,2,14,16,16,14,0,0,0,0,0,0,12,16,1,0,0,2,12,12,16,16,0,0,0,1,14,16,10,3,0,0,5 +0,0,8,16,15,5,0,0,0,0,7,8,15,10,0,0,0,0,0,0,14,10,0,0,0,0,5,12,16,14,2,0,0,0,16,16,15,10,6,0,0,0,1,14,7,0,0,0,0,0,4,16,0,0,0,0,0,0,4,12,0,0,0,0,7 +0,0,8,12,3,0,0,0,0,5,16,16,14,4,0,0,0,10,15,2,14,16,2,0,0,3,16,10,13,16,7,0,0,0,5,8,10,16,8,0,0,0,0,0,0,12,12,0,0,0,10,7,4,14,12,0,0,0,10,16,16,14,7,0,9 +0,0,3,8,6,1,0,0,0,2,15,16,16,12,0,0,0,0,13,16,16,13,0,0,0,0,10,16,16,16,0,0,0,0,12,16,16,16,0,0,0,0,14,16,16,16,2,0,0,0,12,16,16,15,2,0,0,0,1,6,6,0,0,0,1 +0,0,5,14,12,1,0,0,0,0,14,15,16,10,0,0,0,4,13,1,3,15,0,0,0,5,7,0,0,11,7,0,0,4,13,0,0,6,8,0,0,4,16,2,0,9,8,0,0,0,13,15,10,16,3,0,0,0,4,13,16,11,0,0,0 +0,1,9,13,9,2,0,0,0,6,16,10,16,7,0,0,0,0,0,2,16,6,0,0,0,0,2,13,14,0,0,0,0,0,2,13,16,13,0,0,0,0,0,0,4,15,8,0,0,0,8,4,4,14,11,0,0,1,14,16,16,12,1,0,3 +0,0,8,13,16,10,3,0,0,5,16,10,8,10,6,0,0,6,12,0,0,0,0,0,0,6,15,12,3,0,0,0,0,4,12,14,16,1,0,0,0,0,0,0,16,8,0,0,0,0,4,7,16,7,0,0,0,0,11,14,10,0,0,0,5 +0,0,0,6,7,0,0,0,0,0,2,15,6,0,0,0,0,0,10,11,1,1,0,0,0,4,16,5,10,9,0,0,0,8,16,16,16,15,2,0,0,1,4,7,16,10,0,0,0,0,0,5,16,1,0,0,0,0,0,9,11,0,0,0,4 +0,0,2,12,5,0,0,0,0,1,13,16,13,2,0,0,0,3,16,10,14,11,0,0,0,7,16,8,5,16,4,0,0,7,16,8,1,14,5,0,0,2,15,9,1,15,5,0,0,0,11,14,11,15,2,0,0,0,3,16,15,3,0,0,0 +0,0,1,8,12,7,0,0,0,2,15,8,5,14,0,0,0,0,11,5,3,15,0,0,0,0,1,14,16,12,0,0,0,0,5,15,15,9,0,0,0,1,15,6,1,12,1,0,0,0,12,11,1,12,5,0,0,0,1,8,8,9,3,0,8 +0,0,2,12,16,14,10,0,0,0,1,8,4,11,13,0,0,0,0,0,0,14,5,0,0,0,0,3,9,15,0,0,0,0,5,16,16,15,3,0,0,0,2,10,13,3,0,0,0,0,0,13,7,0,0,0,0,0,1,15,2,0,0,0,7 +0,0,0,12,7,0,0,0,0,0,7,16,3,0,0,0,0,2,15,6,0,0,0,0,0,4,16,10,7,0,0,0,0,5,16,16,16,12,0,0,0,6,16,9,5,16,3,0,0,1,14,15,11,16,3,0,0,0,1,10,16,7,0,0,6 +0,0,9,16,8,1,0,0,0,5,16,9,14,11,1,0,0,10,10,0,8,16,4,0,0,4,16,15,15,16,8,0,0,0,4,8,7,13,8,0,0,0,0,0,0,9,11,0,0,0,11,6,9,16,7,0,0,0,11,13,12,6,0,0,9 +0,0,0,11,12,0,0,0,0,0,7,16,11,1,0,0,0,2,16,12,0,0,0,0,0,2,16,7,0,0,0,0,0,8,16,16,8,1,0,0,0,4,16,13,13,14,3,0,0,0,13,14,7,16,12,0,0,0,0,10,16,12,6,0,6 +0,0,7,14,7,0,0,0,0,0,15,14,16,3,0,0,0,0,15,12,16,6,0,0,0,0,6,16,16,4,0,0,0,0,8,16,16,11,0,0,0,0,15,12,3,16,8,0,0,2,15,11,8,16,10,0,0,0,9,16,16,12,6,0,8 +0,0,1,14,13,1,0,0,0,0,12,16,16,12,0,0,0,1,11,14,11,16,5,0,0,3,8,16,2,8,10,0,0,0,5,14,0,8,7,0,0,0,7,12,0,15,3,0,0,0,7,14,11,11,0,0,0,0,2,16,13,2,0,0,0 +0,3,12,16,15,4,0,0,0,4,11,8,14,11,0,0,0,0,0,6,16,3,0,0,0,0,3,15,15,1,0,0,0,0,1,11,15,14,1,0,0,0,0,0,4,12,6,0,0,1,8,12,15,16,6,0,0,2,12,12,11,7,0,0,3 +0,0,2,6,9,4,0,0,0,3,15,5,8,13,0,0,0,4,15,3,5,16,0,0,0,0,9,13,15,7,0,0,0,0,3,16,12,0,0,0,0,0,9,9,13,6,0,0,0,0,10,8,11,16,1,0,0,0,5,12,12,7,0,0,8 +0,0,7,15,14,3,0,0,0,0,8,11,10,12,0,0,0,0,0,0,5,13,0,0,0,0,0,5,15,9,0,0,0,0,0,5,12,15,6,0,0,0,0,0,0,8,12,0,0,0,8,1,1,12,12,0,0,0,9,15,16,13,1,0,3 +0,1,8,15,16,12,3,0,0,4,16,13,5,6,2,0,0,0,16,7,1,0,0,0,0,6,16,16,15,8,0,0,0,1,12,8,12,16,5,0,0,0,0,0,5,16,5,0,0,0,3,8,15,12,0,0,0,0,8,14,8,0,0,0,5 +0,0,4,15,6,0,0,0,0,0,13,13,16,0,0,0,0,4,15,1,16,2,0,0,0,0,0,2,16,0,0,0,0,0,0,8,12,0,0,0,0,1,12,16,6,4,0,0,0,7,16,16,16,16,6,0,0,0,2,0,4,8,3,0,2 +0,0,0,8,12,1,0,0,0,0,8,16,14,8,0,0,0,6,16,2,2,16,0,0,0,8,16,2,0,10,6,0,0,4,16,3,0,8,8,0,0,0,10,9,0,8,8,0,0,0,4,16,12,16,2,0,0,0,0,7,13,8,0,0,0 +0,0,10,12,13,9,4,0,0,2,16,11,8,5,3,0,0,3,16,1,0,0,0,0,0,7,16,16,15,3,0,0,0,6,12,9,14,15,1,0,0,0,0,0,6,16,2,0,0,0,4,4,13,15,1,0,0,0,13,15,9,2,0,0,5 +0,0,13,15,3,0,0,0,0,2,16,11,15,4,0,0,0,4,16,2,16,16,0,0,0,2,13,16,16,16,2,0,0,0,0,4,5,15,2,0,0,0,0,0,0,12,7,0,0,0,2,4,4,11,12,0,0,0,11,16,16,15,10,0,9 +0,0,12,16,6,0,0,0,0,6,16,11,15,5,0,0,0,8,16,0,11,15,1,0,0,8,14,0,2,16,5,0,0,8,14,0,0,12,7,0,0,5,16,2,1,16,4,0,0,3,15,10,11,14,2,0,0,0,6,12,10,1,0,0,0 +0,0,11,16,12,1,0,0,0,0,16,12,16,12,0,0,0,0,3,2,12,12,0,0,0,0,5,16,16,6,0,0,0,0,4,12,16,15,2,0,0,0,0,0,5,15,7,0,0,1,11,12,12,16,7,0,0,2,12,15,12,5,1,0,3 +0,0,3,11,7,0,0,0,0,4,16,16,16,0,0,0,0,4,16,16,16,0,0,0,0,4,16,16,10,0,0,0,0,1,14,16,16,0,0,0,0,0,12,16,16,6,0,0,0,0,11,16,16,11,0,0,0,0,2,11,12,6,0,0,1 +0,0,6,11,14,3,0,0,0,2,16,12,11,16,0,0,0,4,16,9,7,15,0,0,0,0,14,15,16,12,0,0,0,0,10,16,16,3,0,0,0,1,16,6,11,15,1,0,0,3,16,3,7,16,3,0,0,0,7,14,16,12,1,0,8 +0,2,12,16,16,10,2,0,0,7,16,8,6,8,2,0,0,6,15,0,0,0,0,0,0,6,16,13,8,0,0,0,0,6,15,12,16,6,0,0,0,0,0,0,11,13,0,0,0,0,4,6,16,12,0,0,0,2,15,14,9,0,0,0,5 +0,0,6,16,5,0,0,0,0,6,16,16,16,7,0,0,0,11,15,2,16,14,0,0,0,9,16,10,16,16,7,0,0,2,13,16,16,16,8,0,0,0,0,0,0,13,12,0,0,0,3,8,12,16,7,0,0,0,5,16,12,5,0,0,9 +0,0,3,15,13,2,0,0,0,2,15,14,16,8,0,0,0,5,15,6,4,15,0,0,0,1,15,1,1,15,3,0,0,3,16,2,0,12,5,0,0,3,16,3,0,12,6,0,0,0,12,14,15,15,0,0,0,0,2,14,14,4,0,0,0 +0,0,2,14,1,0,0,0,0,0,11,13,0,0,0,0,0,2,16,1,0,0,0,0,0,4,16,1,3,0,0,0,0,8,12,12,16,13,0,0,0,4,16,9,4,13,6,0,0,0,13,12,8,12,11,0,0,0,2,10,13,14,4,0,6 +0,0,8,12,11,6,0,0,0,1,14,16,16,13,0,0,0,4,16,16,16,8,0,0,0,1,15,16,16,3,0,0,0,3,15,16,16,7,0,0,0,2,13,16,16,8,0,0,0,1,14,16,16,7,0,0,0,0,7,12,9,0,0,0,1 +0,0,1,12,14,1,0,0,0,0,8,15,7,0,0,0,0,1,14,7,0,0,0,0,0,1,16,16,16,10,2,0,0,1,16,15,5,11,10,0,0,1,16,4,0,8,13,0,0,0,11,11,9,16,8,0,0,0,2,11,15,5,0,0,6 +0,1,10,16,12,1,0,0,0,7,16,10,13,5,0,0,0,1,6,0,9,8,0,0,0,0,0,7,15,10,0,0,0,0,0,6,12,14,7,0,0,0,0,0,0,2,14,0,0,0,7,6,4,9,14,0,0,0,7,15,16,13,7,0,3 +0,0,8,15,10,1,0,0,0,0,15,13,15,10,0,0,0,0,16,2,0,14,1,0,0,0,14,5,7,16,2,0,0,0,7,12,11,15,3,0,0,0,0,0,0,13,4,0,0,0,6,6,9,16,2,0,0,0,7,13,14,3,0,0,9 +0,4,16,16,16,16,13,0,0,5,12,12,13,16,14,0,0,0,0,0,10,15,4,0,0,0,0,3,16,9,0,0,0,0,0,7,16,2,0,0,0,0,3,15,9,0,0,0,0,0,13,16,2,0,0,0,0,2,16,12,0,0,0,0,7 +0,0,1,16,15,5,0,0,0,0,9,16,16,12,0,0,0,2,16,16,16,7,0,0,0,0,12,16,16,2,0,0,0,0,8,16,16,5,0,0,0,0,15,16,16,11,0,0,0,0,15,16,16,16,8,0,0,0,2,10,15,11,4,0,1 +0,0,2,16,10,1,0,0,0,0,8,15,13,7,0,0,0,0,3,9,4,13,0,0,0,0,0,0,6,13,0,0,0,0,0,0,12,8,0,0,0,0,0,4,16,3,0,0,0,0,2,15,14,8,14,5,0,0,2,15,16,16,12,7,2 +0,0,1,12,13,2,0,0,0,0,12,16,10,2,0,0,0,2,16,7,0,0,0,0,0,1,16,12,8,2,0,0,0,2,16,13,13,14,2,0,0,0,13,7,0,12,12,0,0,0,9,13,6,15,13,0,0,0,1,11,15,14,6,0,6 +0,0,4,9,9,0,0,0,0,1,16,15,12,10,0,0,0,3,16,5,0,0,0,0,0,6,16,16,7,0,0,0,0,3,16,15,16,1,0,0,0,0,1,0,12,5,0,0,0,0,13,10,16,6,0,0,0,0,8,16,9,0,0,0,5 +0,0,12,14,8,3,0,0,0,3,16,15,16,16,3,0,0,0,14,12,14,13,1,0,0,0,7,16,16,3,0,0,0,0,12,16,16,2,0,0,0,1,15,4,9,14,0,0,0,3,15,4,5,16,1,0,0,1,12,16,16,12,0,0,8 +0,0,2,9,16,6,0,0,0,1,15,16,11,5,0,0,0,2,16,7,0,0,0,0,0,5,16,0,3,0,0,0,0,4,16,16,16,14,3,0,0,2,16,6,1,10,10,0,0,0,10,12,8,15,10,0,0,0,1,11,16,12,4,0,6 +0,1,11,9,14,11,1,0,0,8,16,14,14,16,2,0,0,7,16,0,11,12,0,0,0,1,1,1,15,5,0,0,0,0,0,10,14,0,0,0,0,0,2,15,5,0,0,0,0,0,12,16,4,0,0,0,0,0,15,11,1,0,0,0,7 +0,0,11,15,16,16,16,7,0,0,14,14,12,16,16,3,0,0,1,0,4,16,6,0,0,0,0,0,13,12,0,0,0,0,0,7,15,4,0,0,0,0,3,16,9,0,0,0,0,0,11,16,5,0,0,0,0,0,15,15,0,0,0,0,7 +0,0,11,12,2,0,0,0,0,3,16,12,7,0,0,0,0,2,14,3,10,0,0,0,0,0,0,5,8,0,0,0,0,0,0,11,5,0,0,0,0,0,3,16,1,0,0,0,0,0,11,14,9,15,15,0,0,0,8,13,12,8,10,2,2 +0,0,2,16,11,0,0,0,0,1,11,16,16,7,0,0,0,10,16,16,16,9,0,0,0,2,9,15,16,11,0,0,0,0,0,12,16,8,0,0,0,0,4,16,16,5,0,0,0,0,10,16,16,15,4,0,0,0,3,15,16,12,0,0,1 +0,4,8,8,12,16,5,0,0,8,16,16,16,15,3,0,0,2,3,7,16,7,0,0,0,0,0,12,14,1,0,0,0,0,9,16,7,0,0,0,0,0,14,16,0,0,0,0,0,6,16,12,0,0,0,0,0,6,14,7,0,0,0,0,7 +0,0,9,16,16,7,0,0,0,0,13,15,13,11,0,0,0,1,15,11,1,0,0,0,0,2,16,16,15,3,0,0,0,0,7,12,12,10,0,0,0,0,0,0,9,14,0,0,0,0,4,8,15,15,0,0,0,0,10,16,16,7,0,0,5 +0,0,5,7,13,7,0,0,0,7,16,16,10,15,0,0,0,5,15,5,6,11,0,0,0,1,9,15,15,4,0,0,0,0,5,16,16,3,0,0,0,0,14,7,13,8,0,0,0,0,15,11,13,11,0,0,0,0,5,12,12,4,0,0,8 +0,0,4,15,9,1,0,0,0,0,11,16,16,9,0,0,0,0,1,10,16,11,0,0,0,0,0,8,16,12,0,0,0,0,0,11,16,10,0,0,0,0,1,16,16,7,0,0,0,0,5,16,16,12,0,0,0,0,5,16,14,6,0,0,1 +0,4,15,9,8,8,1,0,0,4,16,16,16,16,8,0,0,9,15,2,15,14,1,0,0,5,5,5,16,4,0,0,0,0,0,12,12,0,0,0,0,0,7,15,2,0,0,0,0,3,16,7,0,0,0,0,0,6,16,4,0,0,0,0,7 +0,0,3,13,14,1,0,0,0,0,8,15,7,2,0,0,0,0,10,12,2,0,0,0,0,0,13,16,16,10,1,0,0,0,15,14,11,10,10,0,0,0,13,5,0,6,14,0,0,0,8,12,8,12,10,0,0,0,1,10,12,12,4,0,6 +0,0,0,10,13,3,0,0,0,0,7,16,12,6,0,0,0,0,12,13,1,0,0,0,0,0,16,16,16,10,0,0,0,2,16,15,1,12,8,0,0,0,16,4,0,6,15,0,0,0,11,14,8,15,14,1,0,0,2,11,16,11,2,0,6 +0,0,10,13,11,10,0,0,0,0,12,16,16,16,0,0,0,0,13,11,0,2,0,0,0,0,14,16,13,1,0,0,0,0,5,10,14,9,0,0,0,0,0,0,10,13,0,0,0,0,14,16,16,11,0,0,0,0,10,16,13,1,0,0,5 +0,0,3,14,3,0,0,0,0,3,15,16,15,1,0,0,0,6,15,5,8,11,0,0,0,7,10,0,0,12,5,0,0,4,11,0,0,6,10,0,0,2,14,0,0,6,12,0,0,0,12,9,5,13,8,0,0,0,2,15,16,13,2,0,0 +0,0,8,12,13,2,0,0,0,7,16,10,10,15,2,0,0,10,11,0,1,16,4,0,0,6,15,12,16,16,7,0,0,0,5,8,4,12,8,0,0,0,0,0,0,12,12,0,0,0,11,10,12,16,8,0,0,0,9,12,15,9,1,0,9 +0,0,5,13,12,2,0,0,0,0,14,12,12,13,0,0,0,0,11,4,2,15,0,0,0,0,8,8,4,16,1,0,0,0,2,14,16,16,6,0,0,0,0,0,0,7,10,0,0,0,8,9,8,15,10,0,0,0,4,16,16,13,2,0,9 +0,0,0,15,12,3,0,0,0,0,0,16,16,8,0,0,0,0,9,16,16,9,0,0,0,0,0,10,16,13,0,0,0,0,0,4,16,16,2,0,0,0,1,8,16,16,10,0,0,0,8,16,16,16,16,5,0,0,1,9,12,14,12,4,1 +0,0,14,15,12,12,6,0,0,1,16,12,12,16,7,0,0,0,6,1,12,12,0,0,0,0,0,8,15,2,0,0,0,0,0,13,7,0,0,0,0,0,6,15,1,0,0,0,0,0,15,10,0,0,0,0,0,0,15,6,0,0,0,0,7 +0,1,12,13,9,5,0,0,0,5,16,11,15,16,0,0,0,4,16,5,8,16,4,0,0,2,13,16,16,16,5,0,0,0,0,3,4,15,6,0,0,0,0,0,1,15,6,0,0,0,11,9,12,16,2,0,0,0,11,16,14,8,0,0,9 +0,0,0,7,8,0,0,0,0,0,0,14,4,5,0,0,0,0,2,14,4,12,0,0,0,0,9,7,7,9,0,0,0,1,16,2,10,12,3,0,0,10,16,16,16,16,3,0,0,4,8,8,15,4,0,0,0,0,0,7,11,0,0,0,4 +0,0,4,16,14,2,0,0,0,0,16,11,11,10,0,0,0,1,4,1,11,10,0,0,0,0,1,11,16,6,0,0,0,0,6,16,14,13,2,0,0,0,1,2,1,10,11,0,0,0,7,11,6,13,14,0,0,0,3,12,16,15,7,0,3 +0,0,9,11,8,5,0,0,0,2,16,14,16,15,0,0,0,6,16,11,2,0,0,0,0,8,16,16,13,2,0,0,0,1,3,0,12,9,0,0,0,0,0,0,9,12,0,0,0,0,5,8,16,6,0,0,0,0,8,15,10,0,0,0,5 +0,0,4,14,10,0,0,0,0,0,14,15,15,6,0,0,0,1,16,5,5,11,0,0,0,0,5,4,6,13,0,0,0,0,0,0,14,6,0,0,0,0,0,3,16,2,0,0,0,0,3,15,16,12,16,3,0,0,4,16,12,12,12,5,2 +0,0,6,15,16,6,0,0,0,5,15,16,16,11,0,0,0,8,16,16,16,9,0,0,0,0,4,16,16,6,0,0,0,0,2,16,16,0,0,0,0,0,8,16,16,2,0,0,0,0,11,16,16,13,2,0,0,0,5,13,11,8,2,0,1 +0,0,5,3,10,10,0,0,0,4,16,13,14,13,0,0,0,4,14,0,8,8,0,0,0,1,13,13,15,0,0,0,0,0,0,14,16,1,0,0,0,0,13,13,8,5,0,0,0,1,13,10,8,10,0,0,0,0,2,13,16,6,0,0,8 +0,0,5,10,12,7,0,0,0,1,14,16,16,16,0,0,0,11,16,4,7,15,0,0,0,5,12,0,11,14,0,0,0,0,0,5,16,9,0,0,0,0,2,14,13,5,1,0,0,0,10,16,16,16,15,0,0,0,7,12,12,9,12,1,2 +0,0,9,14,9,2,0,0,0,2,16,10,14,8,0,0,0,2,16,2,6,16,1,0,0,0,14,9,11,16,5,0,0,0,2,11,11,14,9,0,0,0,0,0,0,13,11,0,0,0,5,9,11,16,6,0,0,0,7,13,12,8,0,0,9 +0,0,11,16,14,7,1,0,0,7,15,7,13,16,4,0,0,10,12,0,5,16,6,0,0,3,16,12,15,16,8,0,0,0,2,4,7,16,6,0,0,0,0,0,3,16,4,0,0,0,7,10,10,15,2,0,0,0,11,16,14,6,0,0,9 +0,0,5,15,12,3,0,0,0,0,9,12,10,3,0,0,0,0,14,6,0,0,0,0,0,0,15,16,16,3,0,0,0,0,7,8,10,10,0,0,0,0,0,0,5,13,0,0,0,0,4,12,16,11,0,0,0,0,6,16,11,2,0,0,5 +0,0,5,12,11,2,0,0,0,1,14,15,13,14,0,0,0,2,15,4,6,16,0,0,0,0,0,2,13,12,0,0,0,0,0,11,16,13,2,0,0,0,0,1,5,12,12,0,0,0,8,11,8,14,12,0,0,0,7,16,12,12,3,0,3 +0,0,0,11,15,5,0,0,0,0,8,16,13,6,0,0,0,0,11,14,0,0,0,0,0,0,15,15,12,8,0,0,0,0,16,14,12,15,9,0,0,0,16,6,0,11,14,0,0,0,10,14,9,16,11,0,0,0,1,11,13,12,1,0,6 +0,0,11,12,16,10,1,0,0,5,16,15,7,15,4,0,0,5,16,6,8,15,1,0,0,0,7,16,16,10,0,0,0,0,6,16,16,7,0,0,0,0,14,10,10,12,0,0,0,4,16,9,12,14,0,0,0,1,11,16,15,5,0,0,8 +0,0,2,13,16,6,0,0,0,0,11,16,11,5,0,0,0,0,15,6,0,0,0,0,0,4,16,4,10,2,0,0,0,3,14,15,16,14,1,0,0,3,16,8,0,14,9,0,0,0,13,8,5,16,5,0,0,0,4,13,16,10,0,0,6 +0,0,0,5,4,1,0,0,0,0,0,14,8,12,0,0,0,0,4,13,4,12,0,0,0,0,12,5,7,9,0,0,0,2,16,4,13,16,7,0,0,10,16,16,16,11,1,0,0,5,6,7,15,0,0,0,0,0,0,7,7,0,0,0,4 +0,0,0,11,8,0,0,0,0,0,0,13,7,0,0,0,0,0,3,16,6,15,0,0,0,0,9,11,7,14,0,0,0,1,15,6,12,13,1,0,0,7,16,16,16,16,11,0,0,5,12,13,16,8,3,0,0,0,0,12,12,0,0,0,4 +0,0,10,16,16,8,0,0,0,0,15,14,9,9,0,0,0,3,16,5,0,0,0,0,0,2,16,16,10,0,0,0,0,0,11,13,15,6,0,0,0,0,0,0,10,11,0,0,0,0,5,11,15,13,0,0,0,0,7,16,15,4,0,0,5 +0,0,5,12,8,2,0,0,0,0,12,6,8,13,0,0,0,2,10,0,0,12,2,0,0,0,14,12,12,10,0,0,0,0,1,15,16,7,0,0,0,0,14,8,0,9,1,0,0,0,14,2,1,12,2,0,0,0,8,12,12,6,0,0,8 +0,0,4,11,4,0,0,0,0,0,12,15,16,14,0,0,0,4,15,0,5,12,6,0,0,6,10,0,0,7,8,0,0,7,8,0,0,8,8,0,0,6,13,0,1,13,3,0,0,2,16,10,12,13,0,0,0,0,6,13,12,1,0,0,0 +0,0,3,16,11,0,0,0,0,0,5,16,16,0,0,0,0,1,11,16,15,0,0,0,0,3,15,16,15,0,0,0,0,0,2,16,14,0,0,0,0,0,0,15,16,0,0,0,0,0,1,16,16,2,0,0,0,0,1,11,14,5,0,0,1 +0,0,3,14,7,1,0,0,0,0,11,15,16,12,0,0,0,0,14,8,0,13,4,0,0,1,16,2,0,12,6,0,0,1,16,2,0,13,7,0,0,2,16,0,4,16,1,0,0,0,13,10,15,13,0,0,0,0,6,15,12,6,0,0,0 +0,0,9,13,4,0,0,0,0,1,16,9,11,0,0,0,0,2,11,0,13,0,0,0,0,0,2,3,13,0,0,0,0,0,0,11,5,0,0,0,0,0,3,14,1,0,0,0,0,0,11,14,10,8,11,0,0,0,11,13,12,12,14,2,2 +0,0,7,11,13,7,0,0,0,1,15,15,13,15,2,0,0,4,16,4,5,14,4,0,0,0,10,16,16,13,2,0,0,0,7,15,16,3,0,0,0,1,16,9,8,15,0,0,0,3,15,6,9,16,0,0,0,0,6,16,15,6,0,0,8 +0,0,4,14,15,8,1,0,0,0,14,14,12,15,7,0,0,3,15,1,0,9,7,0,0,6,12,0,0,8,8,0,0,8,11,0,0,9,8,0,0,8,12,0,0,14,5,0,0,3,16,9,14,11,0,0,0,0,6,14,11,0,0,0,0 +0,2,12,16,16,16,14,0,0,9,16,16,15,16,6,0,0,11,13,0,11,14,0,0,0,0,0,2,16,6,0,0,0,0,0,12,14,1,0,0,0,0,4,16,6,0,0,0,0,0,11,16,1,0,0,0,0,2,15,13,0,0,0,0,7 +0,0,1,7,12,5,0,0,0,0,4,16,9,6,0,0,0,0,11,8,0,0,0,0,0,0,15,8,8,5,0,0,0,0,16,16,12,16,2,0,0,0,15,5,0,15,5,0,0,0,11,9,8,16,4,0,0,0,2,14,15,8,0,0,6 +0,0,5,10,12,2,0,0,0,2,16,13,11,11,0,0,0,7,14,0,4,15,0,0,0,1,6,0,10,12,0,0,0,0,0,2,16,6,0,0,0,0,0,9,12,0,0,0,0,0,4,16,16,16,16,2,0,0,7,15,11,8,8,1,2 +0,0,7,13,16,8,0,0,0,9,16,12,8,16,2,0,0,4,7,0,6,15,1,0,0,0,4,13,16,8,0,0,0,0,6,13,15,16,3,0,0,0,0,0,2,11,12,0,0,0,7,8,4,14,12,0,0,0,10,16,16,14,4,0,3 +0,0,10,15,10,0,0,0,0,10,15,10,16,6,0,0,0,2,2,13,15,1,0,0,0,0,4,16,16,5,0,0,0,0,0,3,7,15,5,0,0,0,0,0,0,9,13,0,0,0,13,7,5,11,14,0,0,0,7,16,16,14,3,0,3 +0,0,0,11,14,2,0,0,0,0,9,16,12,6,0,0,0,2,15,8,0,0,0,0,0,0,16,4,3,1,0,0,0,0,16,14,16,15,3,0,0,0,13,14,2,4,14,1,0,0,7,14,5,8,16,4,0,0,1,9,15,14,7,0,6 +0,0,10,14,11,9,0,0,0,0,9,12,8,9,0,0,0,0,11,9,1,0,0,0,0,0,15,16,14,2,0,0,0,0,12,2,10,6,0,0,0,0,0,0,0,13,0,0,0,0,7,5,8,12,0,0,0,0,10,16,15,4,0,0,5 +0,1,11,14,12,6,0,0,0,8,15,7,11,15,0,0,0,11,9,0,8,16,4,0,0,8,14,13,16,16,7,0,0,0,6,9,4,15,8,0,0,0,0,0,0,11,10,0,0,0,9,8,11,16,3,0,0,0,14,14,11,5,0,0,9 +0,0,8,11,15,4,0,0,0,0,14,14,12,6,0,0,0,0,11,9,0,0,0,0,0,0,10,16,14,2,0,0,0,0,4,8,9,14,2,0,0,0,0,0,0,15,2,0,0,0,4,11,13,16,1,0,0,0,4,15,12,9,0,0,5 +0,0,8,12,14,15,15,1,0,3,15,16,14,16,15,2,0,0,10,2,5,16,7,0,0,0,0,1,14,12,0,0,0,0,0,6,16,5,0,0,0,0,0,15,12,0,0,0,0,0,6,16,7,0,0,0,0,0,11,16,3,0,0,0,7 +0,0,7,8,8,8,0,0,0,0,12,16,14,12,0,0,0,0,15,5,2,0,0,0,0,0,14,16,13,2,0,0,0,0,7,4,12,9,0,0,0,0,0,0,6,12,0,0,0,0,11,12,16,10,0,0,0,0,8,13,8,1,0,0,5 +0,0,3,12,15,3,0,0,0,0,14,15,14,4,0,0,0,1,16,15,1,0,0,0,0,3,16,16,5,2,1,0,0,2,16,11,4,8,10,0,0,2,16,2,0,6,13,0,0,0,11,11,4,11,10,0,0,0,1,10,13,10,3,0,6 +0,0,5,15,11,9,1,0,0,0,10,16,12,14,7,0,0,0,15,2,0,10,8,0,0,2,12,0,0,11,8,0,0,4,10,0,0,12,5,0,0,8,10,0,3,15,1,0,0,2,15,12,14,9,0,0,0,0,6,14,9,1,0,0,0 +0,0,0,10,14,0,0,0,0,0,4,16,7,0,0,0,0,0,8,16,6,8,0,0,0,0,14,9,8,15,0,0,0,4,16,3,11,15,6,0,0,10,16,16,16,16,9,0,0,6,12,13,16,7,0,0,0,0,0,12,16,2,0,0,4 +0,0,1,10,14,13,2,0,0,0,9,15,13,16,5,0,0,0,0,0,0,11,5,0,0,0,0,0,4,13,0,0,0,0,8,16,16,13,2,0,0,0,8,14,16,10,1,0,0,0,0,12,7,0,0,0,0,0,0,14,2,0,0,0,7 +0,0,7,15,8,2,0,0,0,0,12,16,14,5,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,2,0,0,0,0,13,16,16,2,0,0,0,0,14,16,16,0,0,0,0,1,14,16,16,14,1,0,0,1,8,16,11,8,0,0,1 +0,0,6,9,4,1,0,0,0,0,6,16,16,9,1,0,0,0,4,16,1,13,7,0,0,0,0,14,5,12,8,0,0,1,10,15,16,10,0,0,0,6,15,9,16,1,0,0,0,7,13,8,14,4,0,0,0,0,6,10,13,1,0,0,8 +0,0,5,12,8,1,0,0,0,0,15,16,16,8,0,0,0,0,15,16,16,6,0,0,0,1,14,16,16,4,0,0,0,2,15,16,16,9,0,0,0,0,6,16,16,14,1,0,0,0,8,16,16,15,0,0,0,0,4,12,10,4,0,0,1 +0,0,0,2,15,3,0,0,0,0,0,8,14,0,0,0,0,0,0,12,8,2,4,0,0,0,4,15,2,15,8,0,0,4,13,14,9,16,6,0,3,16,16,16,16,14,0,0,1,6,4,4,13,9,0,0,0,0,0,3,16,4,0,0,4 +0,0,7,16,10,0,0,0,0,1,13,14,16,8,0,0,0,5,16,4,7,13,0,0,0,4,16,4,0,13,5,0,0,7,16,0,0,12,8,0,0,2,15,7,0,10,11,0,0,0,12,15,12,16,5,0,0,0,6,15,16,8,0,0,0 +0,0,4,14,16,11,0,0,0,0,12,12,16,15,0,0,0,0,0,1,16,11,0,0,0,0,2,9,16,9,0,0,0,0,13,16,16,16,9,0,0,0,2,15,12,8,1,0,0,0,4,16,9,0,0,0,0,0,5,15,7,0,0,0,7 +0,0,3,12,15,3,0,0,0,0,9,15,15,7,0,0,0,0,12,14,12,1,0,0,0,0,8,16,16,14,0,0,0,0,0,0,0,13,7,0,0,0,0,0,0,7,12,0,0,2,16,10,8,13,10,0,0,0,6,11,16,14,3,0,5 +0,0,0,3,15,5,0,0,0,0,0,6,16,2,0,0,0,0,0,6,16,3,0,0,0,0,2,13,5,12,2,0,0,5,13,13,9,16,1,0,4,16,16,16,16,15,0,0,2,9,7,4,13,9,0,0,0,0,0,5,16,5,0,0,4 +0,3,13,16,16,6,0,0,0,4,16,9,14,12,0,0,0,0,8,3,14,7,0,0,0,0,0,12,16,3,0,0,0,0,0,5,15,15,1,0,0,0,0,0,1,16,10,0,0,2,12,8,11,16,7,0,0,2,15,16,15,9,0,0,3 +0,1,14,16,9,0,0,0,0,8,16,12,15,6,0,0,0,6,16,7,5,14,0,0,0,4,16,4,0,14,4,0,0,4,16,3,0,10,6,0,0,6,16,1,0,10,8,0,0,3,16,9,7,15,6,0,0,1,15,16,15,9,0,0,0 +0,0,8,16,11,0,0,0,0,1,15,15,14,9,0,0,0,1,15,12,0,12,6,0,0,0,16,7,0,10,4,0,0,0,16,6,0,10,8,0,0,3,16,3,0,15,3,0,0,1,14,14,12,16,0,0,0,0,7,13,16,8,0,0,0 +0,0,8,14,12,2,0,0,0,3,16,12,15,6,0,0,0,3,7,7,13,9,0,0,0,0,5,15,16,3,0,0,0,0,0,7,14,14,1,0,0,0,0,0,3,16,5,0,0,0,7,5,10,16,5,0,0,0,13,10,8,3,0,0,3 +0,1,9,15,5,0,0,0,0,6,16,13,15,3,0,0,0,12,11,0,12,14,0,0,0,5,15,11,15,16,3,0,0,0,4,8,10,16,5,0,0,0,0,0,0,12,12,0,0,0,6,4,7,14,13,0,0,0,7,14,14,12,5,0,9 +0,2,14,16,11,0,0,0,0,8,15,7,16,0,0,0,0,1,2,0,15,1,0,0,0,0,2,12,14,0,0,0,0,0,1,11,14,11,0,0,0,0,0,0,2,14,5,0,0,2,8,3,4,10,12,0,0,1,13,16,16,14,4,0,3 +0,0,3,11,16,16,10,0,0,0,11,10,8,16,7,0,0,0,0,0,4,16,2,0,0,0,1,4,13,11,0,0,0,0,10,16,16,16,9,0,0,0,4,13,13,8,2,0,0,0,2,14,6,0,0,0,0,0,4,12,1,0,0,0,7 +0,0,1,13,12,14,7,0,0,0,5,12,9,16,2,0,0,0,0,0,3,13,0,0,0,0,0,0,10,7,0,0,0,0,7,16,16,10,1,0,0,0,9,14,16,11,1,0,0,0,0,15,5,0,0,0,0,0,1,15,2,0,0,0,7 +0,0,0,0,10,7,0,0,0,0,0,0,15,5,0,0,0,0,0,4,16,3,0,0,0,0,0,7,15,3,0,0,0,3,7,14,12,16,1,0,1,14,16,16,16,15,0,0,0,9,10,14,16,16,0,0,0,0,0,0,12,9,0,0,4 +0,0,4,16,16,8,0,0,0,2,16,10,12,16,4,0,0,4,16,4,10,16,4,0,0,3,16,16,16,16,1,0,0,0,0,0,4,16,4,0,0,0,0,0,2,16,1,0,0,0,0,6,11,16,0,0,0,0,4,13,9,3,0,0,9 +0,1,11,16,15,2,0,0,0,4,12,8,14,8,0,0,0,1,1,2,15,6,0,0,0,0,0,7,16,4,0,0,0,0,0,1,13,13,0,0,0,0,0,0,2,16,7,0,0,0,10,11,8,15,8,0,0,0,8,14,16,12,1,0,3 +0,0,0,8,14,5,0,0,0,0,0,9,16,14,2,0,0,0,0,11,16,13,1,0,0,0,6,16,16,7,0,0,0,3,13,16,16,4,0,0,0,3,11,15,16,12,0,0,0,0,0,10,16,15,3,0,0,0,0,8,16,15,6,0,1 +0,0,3,15,12,2,0,0,0,1,16,14,13,11,0,0,0,5,16,12,0,11,5,0,0,4,11,11,0,3,8,0,0,6,8,0,0,4,8,0,0,4,11,0,0,9,7,0,0,0,15,13,9,13,5,0,0,0,3,12,14,12,0,0,0 +0,0,0,10,13,3,0,0,0,0,0,16,16,8,0,0,0,0,4,16,16,7,0,0,0,2,15,16,16,10,0,0,0,6,16,16,16,13,0,0,0,0,0,8,16,14,0,0,0,0,0,11,16,16,1,0,0,0,0,12,12,7,0,0,1 +0,0,0,9,14,14,12,6,0,0,3,8,8,11,16,7,0,0,0,0,1,12,12,0,0,0,3,4,11,14,0,0,0,0,13,16,16,15,4,0,0,0,0,10,11,0,0,0,0,0,0,14,9,0,0,0,0,0,0,15,4,0,0,0,7 +0,0,2,14,16,13,0,0,0,0,15,9,7,9,0,0,0,0,14,6,5,4,0,0,0,0,11,16,16,16,3,0,0,0,1,5,4,11,8,0,0,0,3,0,0,7,9,0,0,2,16,11,4,8,10,0,0,0,1,10,14,15,3,0,5 +0,0,4,13,16,15,4,0,0,3,16,10,8,16,4,0,0,3,10,0,3,16,4,0,0,0,0,11,16,15,0,0,0,0,0,3,14,16,6,0,0,0,0,0,2,16,9,0,0,0,2,12,8,16,8,0,0,0,3,16,16,13,1,0,3 +0,0,8,14,13,1,0,0,0,3,16,16,16,7,0,0,0,4,16,3,11,8,0,0,0,0,4,0,15,4,0,0,0,0,0,3,15,1,0,0,0,0,7,14,15,5,2,0,0,1,16,16,16,16,15,0,0,0,6,4,6,9,11,0,2 +0,0,6,11,4,0,0,0,0,0,12,16,14,0,0,0,0,0,12,16,6,1,0,0,0,0,5,16,16,5,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,9,0,0,0,0,13,16,16,12,0,0,0,0,8,12,11,6,0,0,1 +0,0,0,16,11,4,0,0,0,0,2,16,16,12,0,0,0,0,10,16,16,8,0,0,0,2,16,16,16,4,0,0,0,2,11,16,16,4,0,0,0,0,1,16,16,6,0,0,0,0,0,14,16,16,6,0,0,0,0,12,16,15,6,0,1 +0,0,0,9,15,10,0,0,0,0,2,16,16,16,0,0,0,0,0,15,16,16,0,0,0,0,2,16,16,12,0,0,0,0,14,16,16,12,0,0,0,4,16,16,16,11,0,0,0,0,0,12,16,16,4,0,0,0,0,14,15,10,2,0,1 +0,0,13,14,5,0,0,0,0,3,16,15,16,3,0,0,0,7,11,1,13,4,0,0,0,1,2,2,15,2,0,0,0,0,0,9,12,0,0,0,0,0,8,16,4,1,1,0,0,0,16,16,16,16,8,0,0,0,11,12,10,11,7,0,2 +0,0,7,13,14,6,0,0,0,2,16,16,16,14,0,0,0,3,16,9,4,2,0,0,0,2,16,16,14,5,0,0,0,0,6,8,9,15,7,0,0,0,0,0,0,8,12,0,0,0,11,4,5,14,11,0,0,0,8,16,16,13,1,0,5 +0,0,9,16,14,1,0,0,0,4,16,14,15,8,0,0,0,8,16,5,3,15,2,0,0,7,13,0,0,15,4,0,0,8,12,0,0,12,5,0,0,5,14,1,2,14,4,0,0,0,16,16,16,13,1,0,0,0,8,14,11,3,0,0,0 +0,0,0,7,8,2,0,0,0,0,2,15,16,8,0,0,0,0,4,16,16,8,0,0,0,0,8,16,16,9,0,0,0,3,16,16,16,16,0,0,0,1,6,9,16,16,0,0,0,0,0,7,16,16,2,0,0,0,0,7,11,3,0,0,1 +0,0,3,14,8,0,0,0,0,8,16,16,16,6,0,0,0,11,14,4,7,14,0,0,0,9,10,0,0,12,7,0,0,6,11,0,0,6,8,0,0,0,16,4,0,4,12,0,0,0,10,14,10,14,10,0,0,0,2,12,16,13,2,0,0 +0,0,12,13,9,2,0,0,0,3,16,16,16,7,0,0,0,2,16,5,0,0,0,0,0,0,14,16,14,3,0,0,0,0,1,4,10,15,3,0,0,0,0,0,0,9,12,0,0,0,13,12,12,13,13,0,0,0,6,12,14,14,5,0,5 +0,0,2,11,15,8,0,0,0,0,14,13,13,8,0,0,0,0,14,8,8,4,0,0,0,0,7,16,16,14,2,0,0,0,1,4,1,13,6,0,0,0,0,0,0,7,13,0,0,0,10,7,2,7,12,0,0,0,2,10,14,16,7,0,5 +0,0,12,10,2,0,0,0,0,2,16,8,10,2,0,0,0,0,16,0,10,10,0,0,0,0,5,10,12,15,0,0,0,0,0,6,8,14,3,0,0,0,0,0,0,5,12,0,0,0,0,1,4,9,13,0,0,0,10,15,16,15,5,0,9 +0,0,1,12,7,0,0,0,0,0,14,16,6,0,0,0,0,2,16,5,0,0,0,0,0,1,16,2,0,0,0,0,0,3,16,11,5,3,0,0,0,1,16,15,14,16,8,0,0,0,11,16,5,7,16,5,0,0,1,10,16,16,13,1,6 +0,1,11,14,14,3,0,0,0,9,14,10,11,14,0,0,0,8,8,0,9,12,0,0,0,3,3,8,16,6,0,0,0,0,0,8,14,16,2,0,0,0,0,0,0,10,12,0,0,0,5,6,4,12,13,0,0,0,11,12,15,14,6,0,3 +0,0,0,1,16,8,0,0,0,0,0,3,16,6,0,0,0,0,0,7,15,9,3,0,0,0,3,14,8,16,4,0,0,7,15,14,12,16,3,0,3,15,16,16,16,16,1,0,1,4,4,4,14,13,0,0,0,0,0,1,16,7,0,0,4 +0,0,3,15,16,16,6,0,0,0,11,15,12,16,2,0,0,0,0,0,11,13,0,0,0,0,4,8,16,8,2,0,0,0,14,16,16,16,10,0,0,0,3,13,12,6,1,0,0,0,1,16,6,0,0,0,0,0,7,15,2,0,0,0,7 +0,0,0,4,16,1,0,0,0,0,0,6,14,0,0,0,0,0,0,11,10,9,2,0,0,0,4,16,8,16,4,0,0,5,14,11,11,16,2,0,3,15,16,16,16,14,0,0,3,12,11,14,16,13,0,0,0,0,0,7,16,7,0,0,4 +0,0,10,16,14,6,0,0,0,0,14,16,9,15,3,0,0,0,10,16,5,15,4,0,0,0,3,16,16,13,1,0,0,0,12,16,16,10,0,0,0,2,16,5,10,14,0,0,0,7,16,8,7,16,3,0,0,0,9,14,16,11,1,0,8 +0,1,12,8,0,0,0,0,0,9,16,14,9,0,0,0,0,9,8,0,16,0,0,0,0,1,0,4,13,0,0,0,0,0,0,9,7,0,0,0,0,0,1,13,3,0,0,0,0,1,15,15,11,10,7,0,0,0,11,12,12,12,11,0,2 +0,0,0,1,16,5,0,0,0,0,0,4,16,6,0,0,0,0,0,7,15,3,2,0,0,0,1,14,8,16,7,0,0,3,12,15,9,16,2,0,2,15,16,16,16,15,0,0,0,3,7,8,15,12,0,0,0,0,0,1,16,9,0,0,4 +0,0,6,8,0,0,0,0,0,0,11,14,0,0,0,0,0,0,13,7,0,0,0,0,0,0,16,7,0,0,0,0,0,0,16,9,6,0,0,0,0,4,16,16,16,14,2,0,0,2,14,14,8,16,9,0,0,0,6,15,16,16,6,0,6 +0,0,0,9,9,1,0,0,0,0,10,16,10,12,1,0,0,0,5,14,1,12,4,0,0,0,0,11,15,16,2,0,0,0,3,12,16,9,0,0,0,1,12,10,4,16,7,0,0,2,15,5,0,11,12,0,0,0,1,10,13,13,6,0,8 +0,0,13,16,5,0,0,0,0,4,15,13,13,0,0,0,0,7,8,3,16,1,0,0,0,9,3,4,16,0,0,0,0,0,0,8,10,0,0,0,0,0,2,15,5,0,0,0,0,0,12,16,16,16,7,0,0,0,13,12,9,15,7,0,2 +0,0,10,16,14,3,0,0,0,2,16,10,14,12,0,0,0,0,7,4,14,8,0,0,0,0,0,8,16,9,0,0,0,0,0,0,7,16,4,0,0,0,0,0,0,13,7,0,0,8,11,5,4,16,4,0,0,1,11,15,16,8,1,0,3 +0,0,0,9,14,0,0,0,0,0,0,13,11,0,4,0,0,0,2,16,6,6,15,0,0,0,9,14,0,14,10,0,0,12,16,14,13,15,1,0,0,11,14,14,16,12,0,0,0,0,0,7,16,5,0,0,0,0,0,14,13,0,0,0,4 +0,0,11,15,6,0,0,0,0,0,10,16,8,0,0,0,0,0,8,16,16,0,0,0,0,2,12,16,14,2,0,0,0,0,6,16,16,7,0,0,0,0,8,16,16,9,0,0,0,0,12,16,16,15,3,0,0,0,8,15,12,5,1,0,1 +0,0,6,9,13,11,2,0,0,7,16,16,16,16,3,0,0,8,16,10,6,0,0,0,0,5,16,16,16,13,1,0,0,0,0,3,6,16,4,0,0,3,0,0,0,12,5,0,0,5,16,9,8,14,6,0,0,0,5,10,12,10,0,0,5 +0,1,10,10,6,0,0,0,0,4,15,11,15,7,0,0,0,2,14,1,11,14,0,0,0,0,11,16,16,16,0,0,0,0,0,0,0,16,3,0,0,0,0,0,0,11,8,0,0,2,8,8,8,12,12,0,0,2,12,12,16,13,5,0,9 +0,0,8,16,15,4,0,0,0,0,8,16,16,10,0,0,0,0,8,16,16,7,0,0,0,0,8,16,16,4,0,0,0,2,16,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,7,16,16,11,0,0,0,0,7,16,16,13,4,0,1 +0,0,7,11,15,6,0,0,0,3,16,15,12,4,0,0,0,5,16,13,14,8,0,0,0,6,15,11,8,15,6,0,0,0,0,0,0,8,8,0,0,0,0,0,0,4,8,0,0,0,13,9,8,13,5,0,0,0,6,11,14,9,0,0,5 +0,1,12,16,16,8,0,0,0,4,10,8,8,4,0,0,0,9,11,6,7,0,0,0,0,4,16,16,16,11,0,0,0,0,0,0,1,16,4,0,0,0,0,0,0,16,6,0,0,0,5,8,11,14,2,0,0,0,15,13,10,1,0,0,5 +0,0,7,13,16,8,0,0,0,0,16,14,10,16,2,0,0,0,9,14,4,16,1,0,0,0,1,16,16,14,0,0,0,0,8,16,16,3,0,0,0,2,16,13,14,15,1,0,0,4,16,7,5,16,6,0,0,0,10,15,16,15,2,0,8 +0,0,6,16,8,0,0,0,0,0,14,10,11,7,0,0,0,0,16,1,3,16,1,0,0,0,11,10,13,16,5,0,0,0,2,8,8,9,10,0,0,0,0,0,0,2,14,0,0,0,0,0,1,5,15,1,0,0,6,15,16,14,5,0,9 +0,1,9,15,2,0,0,0,0,10,14,14,7,0,0,0,0,5,3,10,8,0,0,0,0,0,0,14,4,0,0,0,0,0,2,16,0,0,0,0,0,0,10,12,1,0,0,0,0,2,16,16,16,14,8,0,0,0,7,9,14,16,7,0,2 +0,0,5,15,10,0,0,0,0,1,15,12,13,10,0,0,0,5,16,2,1,11,3,0,0,5,14,2,0,4,8,0,0,4,12,0,0,4,8,0,0,3,14,2,0,9,9,0,0,0,12,12,7,14,9,0,0,0,5,16,16,10,0,0,0 +0,0,0,8,11,0,0,0,0,0,5,14,7,0,0,0,0,0,12,10,0,0,0,0,0,4,16,3,0,0,0,0,0,5,15,12,10,3,0,0,0,4,16,12,8,15,5,0,0,0,10,15,6,10,15,0,0,0,0,7,12,12,8,0,6 +0,0,2,14,11,4,0,0,0,1,14,16,13,16,2,0,0,4,16,10,3,15,3,0,0,0,9,16,14,14,2,0,0,1,12,16,16,4,0,0,0,5,16,9,11,13,0,0,0,1,14,13,10,16,7,0,0,0,4,9,13,11,1,0,8 +0,0,9,14,10,1,0,0,0,3,13,2,8,9,0,0,0,4,12,0,4,16,1,0,0,1,14,8,11,16,6,0,0,0,1,6,3,10,7,0,0,0,0,0,0,8,8,0,0,3,8,3,1,10,8,0,0,1,9,11,16,12,1,0,9 +0,0,11,12,4,0,0,0,0,4,16,8,14,7,0,0,0,5,16,7,6,15,1,0,0,0,12,16,16,16,2,0,0,0,0,1,0,7,9,0,0,0,0,0,0,4,12,0,0,0,4,8,10,14,12,0,0,0,10,12,9,7,0,0,9 +0,0,10,16,16,16,5,0,0,0,2,4,10,16,5,0,0,0,0,0,9,12,0,0,0,0,4,14,16,13,6,0,0,0,6,15,16,15,7,0,0,0,0,14,10,0,0,0,0,0,6,16,7,0,0,0,0,0,11,15,0,0,0,0,7 +0,1,12,12,11,5,0,0,0,5,15,8,12,15,2,0,0,7,12,0,9,16,3,0,0,3,15,9,10,16,7,0,0,0,5,11,16,16,8,0,0,0,0,0,1,14,10,0,0,0,7,8,11,16,2,0,0,0,12,16,13,6,0,0,9 +0,1,10,16,13,4,0,0,0,9,13,4,11,15,0,0,0,0,0,0,4,16,1,0,0,0,0,3,13,11,0,0,0,0,0,7,16,14,2,0,0,0,0,0,4,15,8,0,0,0,5,4,5,14,9,0,0,0,15,16,14,9,0,0,3 +0,0,8,16,8,0,0,0,0,0,10,16,16,0,0,0,0,0,6,10,16,4,0,0,0,0,0,0,16,2,0,0,0,0,0,7,13,0,0,0,0,0,1,14,8,0,0,0,0,0,13,16,16,16,14,0,0,0,8,12,11,8,14,0,2 +0,0,3,12,6,0,0,0,0,0,16,16,16,8,0,0,0,0,14,14,7,15,0,0,0,0,5,16,15,8,0,0,0,0,0,13,16,0,0,0,0,2,14,10,13,7,0,0,0,3,15,7,9,15,0,0,0,0,5,12,11,5,0,0,8 +0,1,11,13,5,0,0,0,0,7,12,7,15,4,0,0,0,8,4,0,8,12,0,0,0,2,6,0,0,13,0,0,0,0,0,1,8,3,0,0,0,0,0,11,8,0,0,0,0,1,12,16,13,8,2,0,0,2,12,12,12,12,12,0,2 +0,3,13,16,13,1,0,0,0,11,11,8,14,8,0,0,0,5,1,0,5,14,0,0,0,0,0,0,5,11,0,0,0,0,0,1,14,3,0,0,0,0,0,9,11,0,0,0,0,0,11,16,15,12,7,0,0,1,8,8,9,13,7,0,2 +0,3,14,16,12,2,0,0,0,11,12,4,12,8,0,0,0,1,1,0,12,7,0,0,0,0,0,9,16,6,0,0,0,0,0,4,11,16,2,0,0,0,0,0,0,8,11,0,0,1,8,6,7,14,6,0,0,2,11,16,12,8,0,0,3 +0,0,8,10,16,13,0,0,0,4,16,16,16,16,1,0,0,6,16,12,8,15,5,0,0,6,16,1,0,12,8,0,0,5,13,0,1,14,6,0,0,4,14,2,12,16,2,0,0,2,16,16,16,8,0,0,0,0,9,12,7,0,0,0,0 +0,0,6,15,8,0,0,0,0,0,13,15,16,4,0,0,0,0,15,6,15,13,0,0,0,0,15,12,16,16,1,0,0,0,3,11,7,12,6,0,0,0,0,0,0,6,11,0,0,0,1,4,2,7,14,0,0,0,7,14,16,15,9,0,9 +0,2,16,14,2,0,0,0,0,6,14,12,14,0,0,0,0,7,12,8,15,0,0,0,0,2,5,8,12,0,0,0,0,0,0,15,1,0,0,0,0,0,5,13,0,0,0,0,0,1,13,14,9,8,2,0,0,2,14,15,12,16,10,0,2 +0,1,5,11,13,5,0,0,0,10,16,16,15,3,0,0,0,10,13,0,0,0,0,0,0,10,14,8,8,3,0,0,0,2,12,13,13,16,5,0,0,0,0,0,0,11,14,0,0,0,2,11,8,14,15,0,0,0,2,11,16,14,6,0,5 +0,1,10,12,12,2,0,0,0,7,15,8,12,8,0,0,0,0,2,0,13,7,0,0,0,0,0,7,16,4,0,0,0,0,0,5,13,13,1,0,0,0,0,0,1,11,9,0,0,0,8,4,6,14,8,0,0,2,12,16,14,9,1,0,3 +0,0,14,14,13,15,5,0,0,0,16,14,12,6,0,0,0,4,16,11,8,1,0,0,0,3,16,16,16,9,0,0,0,0,0,0,5,16,2,0,0,0,0,0,3,16,4,0,0,0,6,9,15,14,0,0,0,0,12,16,10,2,0,0,5 +0,0,0,1,16,14,1,0,0,0,0,10,16,14,0,0,0,3,9,16,16,3,0,0,0,5,16,16,16,4,0,0,0,0,0,14,16,4,0,0,0,0,0,9,16,8,0,0,0,0,0,5,16,12,0,0,0,0,0,2,15,14,2,0,1 +0,1,9,12,12,2,0,0,0,7,12,4,11,10,0,0,0,0,1,0,11,8,0,0,0,0,2,15,16,2,0,0,0,0,2,8,12,12,0,0,0,0,0,0,1,16,6,0,0,1,16,3,6,16,6,0,0,1,11,15,12,4,0,0,3 +0,0,2,12,16,10,0,0,0,0,11,12,11,16,4,0,0,0,1,1,1,16,4,0,0,0,0,0,8,14,2,0,0,0,0,4,15,5,0,0,0,0,8,16,5,0,0,0,0,5,16,16,14,9,1,0,0,0,3,8,10,15,3,0,2 +0,0,6,15,12,10,8,0,0,0,11,16,16,16,7,0,0,1,14,11,12,6,0,0,0,4,16,16,16,13,0,0,0,2,11,8,10,16,2,0,0,0,0,0,1,16,1,0,0,0,0,11,13,15,0,0,0,0,4,16,12,1,0,0,5 +0,0,7,12,8,0,0,0,0,0,15,16,15,2,0,0,0,0,11,16,16,2,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,7,0,0,0,0,9,16,16,11,0,0,0,0,11,16,16,13,0,0,0,0,2,8,12,6,0,0,1 +0,0,12,16,12,2,0,0,0,12,15,9,15,12,0,0,0,12,7,0,8,16,0,0,0,3,1,0,8,14,0,0,0,0,0,4,16,6,0,0,0,0,1,13,15,1,0,0,0,0,11,16,13,12,8,0,0,0,11,12,12,15,14,0,2 +0,0,14,14,16,15,0,0,0,4,16,13,12,8,0,0,0,6,13,0,0,0,0,0,0,5,16,12,4,0,0,0,0,3,14,16,14,0,0,0,0,0,0,0,12,12,0,0,0,0,10,8,15,10,0,0,0,0,15,16,11,1,0,0,5 +0,0,0,2,13,10,0,0,0,0,0,6,16,4,0,0,0,0,0,14,12,0,0,0,0,0,11,15,1,6,1,0,0,6,16,5,6,16,6,0,4,15,16,12,14,16,2,0,1,8,12,13,16,10,0,0,0,0,0,4,15,4,0,0,4 +0,0,6,15,16,14,0,0,0,2,11,5,7,16,2,0,0,0,0,0,4,16,0,0,0,0,8,13,16,6,0,0,0,0,5,13,16,14,3,0,0,0,0,14,8,14,4,0,0,0,6,16,2,0,0,0,0,0,7,9,1,0,0,0,7 +0,0,6,16,16,16,12,0,0,0,6,12,11,15,16,1,0,0,0,0,0,15,12,0,0,0,1,11,12,16,3,0,0,0,3,16,16,16,4,0,0,0,0,9,16,13,3,0,0,0,1,16,9,0,0,0,0,0,7,15,4,0,0,0,7 +0,0,9,15,7,1,0,0,0,5,16,7,12,14,0,0,0,10,10,0,8,16,0,0,0,6,12,0,5,16,4,0,0,0,12,13,15,16,3,0,0,0,0,0,1,13,10,0,0,0,2,4,5,13,11,0,0,0,8,16,12,8,1,0,9 +0,1,7,12,10,1,0,0,0,1,14,16,16,12,0,0,0,0,12,16,16,14,0,0,0,0,7,16,16,14,1,0,0,0,5,16,16,13,0,0,0,0,7,16,16,14,0,0,0,1,14,16,16,15,2,0,0,1,8,12,10,8,0,0,1 +0,0,4,14,13,4,0,0,0,0,13,14,6,15,0,0,0,0,4,15,8,16,0,0,0,0,0,3,14,14,0,0,0,0,0,9,16,12,0,0,0,0,8,15,4,15,3,0,0,0,15,8,4,14,7,0,0,0,4,13,13,12,2,0,8 +0,0,6,13,12,4,0,0,0,2,16,9,12,13,0,0,0,0,9,0,13,10,0,0,0,0,0,6,16,4,0,0,0,0,0,0,8,14,1,0,0,0,0,0,0,14,9,0,0,0,4,9,5,10,13,0,0,0,7,12,13,12,5,0,3 +0,1,15,16,14,2,0,0,0,1,11,12,16,8,0,0,0,0,0,5,16,4,0,0,0,0,2,15,16,3,0,0,0,0,4,16,16,14,4,0,0,0,5,16,12,16,7,0,0,0,11,11,0,0,0,0,0,0,16,6,0,0,0,0,7 +0,0,11,16,13,12,3,0,0,3,16,11,8,8,1,0,0,5,16,9,9,1,0,0,0,8,16,16,16,6,0,0,0,6,11,3,9,11,0,0,0,0,0,0,4,12,0,0,0,0,10,14,11,16,0,0,0,0,9,12,12,6,0,0,5 +0,0,4,6,14,14,1,0,0,0,16,16,16,16,5,0,0,3,16,7,3,12,8,0,0,4,16,0,0,9,8,0,0,4,16,0,0,8,8,0,0,1,15,1,1,13,7,0,0,0,14,13,15,12,0,0,0,0,6,14,14,4,0,0,0 +0,0,5,15,12,2,0,0,0,0,7,16,16,1,0,0,0,0,10,16,16,6,0,0,0,0,11,16,16,2,0,0,0,0,7,16,16,3,0,0,0,0,7,16,16,7,0,0,0,0,8,16,16,8,0,0,0,0,12,16,13,3,0,0,1 +0,1,13,14,6,0,0,0,0,3,16,9,15,4,0,0,0,2,16,8,15,4,0,0,0,0,0,0,16,4,0,0,0,0,0,6,12,0,0,0,0,0,0,14,6,0,0,0,0,0,14,16,9,6,4,0,0,2,11,12,12,12,12,0,2 +0,0,0,8,15,0,0,0,0,0,0,16,10,0,0,0,0,0,4,16,4,0,0,0,0,0,12,12,0,0,0,0,0,9,16,5,7,14,0,0,3,16,16,13,16,13,0,0,1,7,9,15,16,7,0,0,0,0,0,12,14,2,0,0,4 +0,0,0,5,16,2,0,0,0,0,0,12,14,0,0,0,0,0,4,16,5,0,0,0,0,1,12,12,0,4,6,0,0,8,16,3,2,16,10,0,2,16,16,16,15,16,2,0,1,4,7,11,16,11,0,0,0,0,0,7,16,4,0,0,4 +0,0,8,15,11,4,0,0,0,7,15,13,14,16,2,0,0,8,14,0,10,16,4,0,0,0,14,15,16,12,1,0,0,0,9,16,13,1,0,0,0,0,14,16,11,0,0,0,0,0,15,16,16,2,0,0,0,0,8,16,15,3,0,0,8 +0,0,9,12,9,0,0,0,0,0,16,16,16,4,0,0,0,0,13,16,16,4,0,0,0,0,12,16,16,2,0,0,0,0,12,16,16,4,0,0,0,0,13,16,16,4,0,0,0,0,16,16,16,9,0,0,0,0,4,8,10,4,0,0,1 +0,0,7,12,14,7,0,0,0,0,12,11,12,16,0,0,0,0,0,2,4,12,0,0,0,0,0,14,15,14,1,0,0,0,0,12,15,15,6,0,0,0,0,12,8,1,1,0,0,0,5,16,1,0,0,0,0,0,7,7,0,0,0,0,7 +0,0,3,14,13,2,0,0,0,0,16,14,14,11,0,0,0,7,15,2,1,16,1,0,0,5,12,0,0,12,8,0,0,8,10,0,0,11,8,0,0,1,15,0,0,8,11,0,0,0,11,10,7,15,6,0,0,0,3,12,16,11,1,0,0 +0,1,14,13,9,2,0,0,0,2,16,16,16,14,2,0,0,0,14,10,3,16,3,0,0,0,5,15,14,13,0,0,0,0,1,15,15,3,0,0,0,0,8,16,16,2,0,0,0,0,16,13,16,8,0,0,0,0,12,12,9,2,0,0,8 +0,0,9,16,4,0,0,0,0,0,14,14,13,7,0,0,0,4,15,12,16,16,5,0,0,4,12,7,8,14,8,0,0,7,12,0,0,12,8,0,0,5,13,0,0,13,8,0,0,2,16,12,15,16,5,0,0,0,9,14,12,8,0,0,0 +0,0,8,14,10,2,0,0,0,0,15,14,11,13,0,0,0,0,0,0,5,16,1,0,0,0,0,0,11,14,0,0,0,0,0,0,3,15,3,0,0,0,0,0,0,9,13,0,0,0,3,9,8,15,11,0,0,0,6,16,14,7,1,0,3 +0,0,0,8,16,1,0,0,0,0,0,14,16,2,0,0,0,0,3,15,13,0,0,0,0,0,9,16,3,3,1,0,0,1,14,11,4,16,7,0,1,12,16,14,14,16,2,0,1,11,12,14,16,11,0,0,0,0,0,9,16,5,0,0,4 +0,0,0,2,13,7,0,0,0,0,0,9,15,3,0,0,0,0,2,14,10,0,0,0,0,0,13,13,2,6,0,0,0,8,15,2,14,14,0,0,4,15,16,16,16,12,0,0,1,4,8,10,16,7,0,0,0,0,0,5,16,6,0,0,4 +0,0,0,4,15,0,0,0,0,0,0,11,13,0,0,0,0,0,1,15,7,0,0,0,0,0,11,11,0,0,0,0,0,2,16,2,11,8,0,0,2,13,14,10,16,8,0,0,2,6,12,15,16,4,0,0,0,0,0,5,14,1,0,0,4 +0,0,8,16,15,11,0,0,0,0,5,7,11,16,3,0,0,0,0,0,9,13,0,0,0,0,7,15,15,5,0,0,0,0,8,14,16,14,4,0,0,0,0,10,9,4,1,0,0,0,4,14,1,0,0,0,0,0,10,7,0,0,0,0,7 +0,0,0,0,10,13,2,0,0,0,0,4,16,16,2,0,0,0,0,10,16,16,2,0,0,1,13,16,16,16,4,0,0,5,12,10,16,16,0,0,0,0,0,0,15,16,1,0,0,0,0,0,14,16,5,0,0,0,0,0,11,16,6,0,1 +0,0,4,12,10,3,0,0,0,0,9,16,16,5,0,0,0,0,10,16,16,3,0,0,0,0,12,16,16,3,0,0,0,0,4,16,15,2,0,0,0,0,7,16,16,4,0,0,0,0,8,16,16,6,0,0,0,0,6,12,9,4,0,0,1 +0,0,5,14,0,0,0,0,0,0,12,14,0,0,0,0,0,0,15,12,0,0,0,0,0,1,16,9,5,2,0,0,0,5,16,16,16,15,2,0,0,7,16,15,10,16,9,0,0,1,15,13,13,16,5,0,0,0,5,13,13,9,0,0,6 +0,0,8,15,12,2,0,0,0,4,16,13,11,12,1,0,0,9,14,1,13,15,2,0,0,9,15,0,14,15,0,0,0,2,15,16,16,16,0,0,0,0,1,4,4,16,4,0,0,0,4,8,4,16,5,0,0,0,5,14,16,15,5,0,9 +0,1,12,15,16,9,0,0,0,2,16,16,12,9,0,0,0,6,14,1,0,0,0,0,0,7,15,5,1,0,0,0,0,7,16,16,12,0,0,0,0,0,4,4,15,4,0,0,0,0,8,5,16,6,0,0,0,0,13,16,12,1,0,0,5 +0,0,15,14,14,15,10,0,0,0,16,8,11,10,6,0,0,5,14,0,0,0,0,0,0,8,16,16,16,4,0,0,0,4,14,9,12,12,0,0,0,0,0,0,5,15,0,0,0,0,11,4,11,9,0,0,0,0,14,16,14,1,0,0,5 +0,2,14,12,16,13,1,0,0,4,16,15,4,10,7,0,0,2,16,5,2,14,2,0,0,0,9,12,14,6,0,0,0,0,2,16,11,0,0,0,0,0,11,13,12,0,0,0,0,2,13,0,15,0,0,0,0,2,14,15,10,0,0,0,8 +0,0,0,2,16,6,0,0,0,0,0,6,16,2,0,0,0,0,0,13,13,0,0,0,0,0,6,16,4,9,4,0,0,3,15,10,4,16,6,0,2,15,16,16,14,16,5,0,1,8,13,16,16,15,0,0,0,0,0,2,16,9,0,0,4 +0,0,0,7,15,6,0,0,0,0,10,13,14,13,0,0,0,2,13,0,12,6,0,0,0,0,2,3,12,1,0,0,0,0,1,15,3,0,0,0,0,2,13,8,0,0,0,0,0,10,16,14,12,8,0,0,0,2,4,7,9,14,0,0,2 +0,1,8,15,16,11,1,0,0,5,16,13,10,13,7,0,0,4,13,0,0,9,8,0,0,1,13,8,6,15,4,0,0,0,2,15,16,9,0,0,0,0,8,14,15,8,0,0,0,0,14,8,9,14,0,0,0,0,11,16,14,8,0,0,8 +0,0,6,9,16,10,1,0,0,2,15,15,9,15,8,0,0,6,16,1,0,12,8,0,0,0,14,13,14,12,2,0,0,0,4,16,15,1,0,0,0,0,11,11,16,3,0,0,0,2,16,8,13,14,0,0,0,0,8,16,12,10,0,0,8 +0,0,9,16,13,2,0,0,0,3,16,9,12,12,0,0,0,4,16,0,0,16,0,0,0,1,8,0,2,16,0,0,0,0,0,0,5,13,0,0,0,0,0,1,11,9,0,0,0,0,4,11,16,10,7,0,0,0,9,12,8,9,13,0,2 +0,0,5,14,16,14,1,0,0,2,15,4,7,16,1,0,0,0,5,0,12,12,0,0,0,0,0,6,16,3,0,0,0,0,0,0,7,15,1,0,0,0,0,0,2,16,2,0,0,0,1,4,8,16,4,0,0,0,8,15,13,8,0,0,3 +0,0,7,15,13,3,0,0,0,0,16,16,16,16,1,0,0,5,15,7,7,16,5,0,0,8,12,0,0,15,5,0,0,6,16,0,0,13,7,0,0,5,16,1,2,16,4,0,0,3,16,9,14,15,0,0,0,0,9,13,12,3,0,0,0 +0,0,0,7,12,7,0,0,0,0,13,13,13,12,0,0,0,3,12,0,9,9,0,0,0,0,0,7,16,9,0,0,0,0,0,2,8,16,0,0,0,0,0,0,1,15,3,0,0,0,0,12,9,15,0,0,0,0,1,12,15,5,0,0,3 +0,0,7,14,0,0,0,0,0,0,12,15,0,0,0,0,0,0,16,9,0,0,0,0,0,1,15,7,2,0,0,0,0,1,16,16,15,10,2,0,0,3,16,12,4,14,12,0,0,0,12,10,1,14,9,0,0,0,5,16,16,11,3,0,6 +0,0,9,15,16,10,0,0,0,0,16,16,16,16,4,0,0,0,14,11,14,16,2,0,0,0,7,16,16,7,0,0,0,0,10,16,11,0,0,0,0,2,16,15,14,1,0,0,0,4,16,14,16,4,0,0,0,0,9,15,13,1,0,0,8 +0,1,14,16,16,12,1,0,0,0,13,8,4,4,0,0,0,0,12,4,0,0,0,0,0,0,8,16,16,9,0,0,0,0,4,7,5,14,0,0,0,0,0,0,0,13,0,0,0,0,5,0,3,15,0,0,0,2,14,16,14,7,0,0,5 +0,0,0,13,7,0,0,0,0,5,14,16,16,8,0,0,0,12,10,0,6,16,0,0,0,2,14,12,5,16,3,0,0,0,0,11,16,11,0,0,0,0,2,15,9,16,5,0,0,0,10,14,4,13,12,0,0,0,1,9,14,16,11,0,8 +0,0,7,12,14,6,0,0,0,7,16,14,14,6,0,0,0,5,16,10,3,0,0,0,0,6,16,16,16,6,0,0,0,3,7,4,10,14,0,0,0,0,0,0,10,15,1,0,0,0,2,15,16,8,0,0,0,0,5,16,9,0,0,0,5 +0,0,5,15,2,0,0,0,0,0,15,16,16,13,2,0,0,4,16,16,14,16,8,0,0,7,12,0,0,8,8,0,0,4,12,0,0,8,8,0,0,4,15,0,0,9,7,0,0,1,15,5,7,15,4,0,0,0,5,13,12,7,0,0,0 +0,0,2,12,14,5,0,0,0,2,15,6,8,14,6,0,0,5,9,0,5,16,4,0,0,5,9,4,15,12,0,0,0,0,14,13,16,5,0,0,0,0,0,4,12,0,0,0,0,0,0,11,5,0,0,0,0,0,2,15,4,0,0,0,9 +0,0,0,12,15,0,0,0,0,0,5,16,9,0,0,0,0,3,14,10,0,1,0,0,0,10,14,1,4,15,9,0,0,11,15,15,16,16,4,0,0,1,8,10,16,12,0,0,0,0,0,8,16,3,0,0,0,0,0,11,16,0,0,0,4 +0,6,16,16,13,3,0,0,0,12,16,12,15,16,5,0,0,10,14,1,0,4,1,0,0,2,15,8,0,0,0,0,0,0,7,16,2,0,0,0,0,0,2,15,9,0,0,0,0,1,5,15,10,0,0,0,0,7,16,15,1,0,0,0,5 +0,0,11,16,9,0,0,0,0,9,12,6,14,10,0,0,0,11,5,0,13,13,0,0,0,1,11,14,12,15,6,0,0,0,0,0,0,12,8,0,0,0,0,0,0,10,10,0,0,0,1,0,4,13,6,0,0,0,9,14,16,10,0,0,9 +0,0,12,16,16,10,0,0,0,0,15,13,13,16,1,0,0,0,3,14,16,14,1,0,0,0,0,15,14,2,0,0,0,0,0,7,16,5,0,0,0,0,0,0,9,16,2,0,0,0,6,5,11,16,8,0,0,0,10,16,16,15,3,0,3 +0,0,0,4,13,13,0,0,0,0,3,16,14,3,0,0,0,1,15,11,1,1,0,0,0,10,15,0,0,11,9,0,0,7,15,9,9,16,6,0,0,0,4,8,16,15,1,0,0,0,0,2,16,7,0,0,0,0,0,3,16,1,0,0,4 +0,2,13,16,15,7,0,0,0,10,16,15,6,14,3,0,0,8,16,1,0,0,0,0,0,3,14,8,0,0,0,0,0,0,4,15,4,0,0,0,0,0,0,5,16,0,0,0,0,0,5,9,16,3,0,0,0,1,15,16,10,0,0,0,5 +0,0,1,12,9,0,0,0,0,0,7,16,5,0,0,0,0,0,10,9,0,0,0,0,0,0,15,11,11,1,0,0,0,0,13,16,14,14,1,0,0,0,10,13,0,12,5,0,0,0,4,13,2,16,3,0,0,0,0,11,16,8,0,0,6 +0,0,3,16,8,1,0,0,0,0,4,16,16,2,0,0,0,7,16,16,16,0,0,0,0,9,10,15,14,0,0,0,0,0,1,16,13,0,0,0,0,0,0,16,11,0,0,0,0,0,3,16,13,0,0,0,0,0,1,13,15,0,0,0,1 +0,0,8,16,16,12,0,0,0,3,16,9,11,16,3,0,0,4,7,0,8,14,1,0,0,0,5,8,14,12,1,0,0,5,16,16,16,16,10,0,0,6,5,9,11,0,0,0,0,0,4,16,3,0,0,0,0,0,11,8,0,0,0,0,7 +0,2,12,15,16,14,2,0,0,6,16,15,11,12,5,0,0,4,16,2,0,0,0,0,0,1,13,5,0,0,0,0,0,0,11,8,0,0,0,0,0,0,5,16,0,0,0,0,0,2,12,14,0,0,0,0,0,2,16,10,0,0,0,0,5 +0,1,6,12,15,4,0,0,0,6,16,12,12,16,3,0,0,12,8,0,5,16,2,0,0,1,2,0,12,13,0,0,0,0,0,6,16,2,0,0,0,0,0,16,9,0,0,0,0,0,7,16,8,8,7,0,0,0,3,11,12,12,7,0,2 +0,0,7,16,6,5,0,0,0,0,16,12,9,14,1,0,0,4,15,0,0,16,2,0,0,4,14,0,0,15,3,0,0,5,12,0,3,14,0,0,0,3,16,0,6,10,0,0,0,1,15,6,16,3,0,0,0,0,8,15,8,0,0,0,0 +0,0,0,10,12,0,0,0,0,0,3,16,9,0,0,0,0,0,8,12,0,0,0,0,0,0,10,10,1,0,0,0,0,0,12,16,16,15,3,0,0,0,10,15,4,8,16,1,0,0,6,14,0,9,15,0,0,0,1,10,16,15,4,0,6 +0,3,15,16,16,15,1,0,0,14,13,7,8,16,8,0,0,2,1,0,8,16,7,0,0,0,0,7,16,10,0,0,0,0,0,14,13,0,0,0,0,0,0,10,14,2,0,0,0,2,8,5,16,8,0,0,0,2,16,16,13,3,0,0,3 +0,1,10,16,16,13,0,0,0,6,15,6,9,16,3,0,0,3,5,0,8,16,3,0,0,0,1,10,16,14,3,0,0,0,6,16,16,9,8,0,0,0,0,15,9,0,0,0,0,0,6,15,1,0,0,0,0,0,14,5,0,0,0,0,7 +0,3,15,14,5,1,0,0,0,12,16,16,16,16,5,0,0,9,16,15,7,8,3,0,0,1,13,13,0,0,0,0,0,0,6,16,5,0,0,0,0,0,1,15,11,0,0,0,0,0,7,16,10,0,0,0,0,2,16,13,1,0,0,0,5 +0,0,0,13,3,0,0,0,0,0,5,16,7,0,0,0,0,0,9,14,0,0,0,0,0,0,10,14,8,3,0,0,0,0,15,15,12,14,6,0,0,0,15,9,0,4,15,1,0,0,7,14,2,9,16,0,0,0,1,11,15,14,6,0,6 +0,0,0,10,11,0,0,0,0,0,2,16,7,0,0,0,0,0,7,12,0,0,0,0,0,0,10,10,0,0,0,0,0,0,10,16,16,9,0,0,0,0,10,14,2,14,3,0,0,0,6,14,1,14,5,0,0,0,0,7,16,15,1,0,6 +0,0,8,16,15,5,0,0,0,0,14,16,11,15,0,0,0,0,16,2,0,8,4,0,0,2,12,0,0,8,6,0,0,3,11,0,0,11,5,0,0,3,12,0,2,15,1,0,0,1,15,2,14,8,0,0,0,0,7,15,12,0,0,0,0 +0,0,1,16,14,0,0,0,0,0,3,16,16,1,0,0,0,0,1,15,16,4,0,0,0,0,0,14,16,6,0,0,0,0,2,16,16,2,0,0,0,0,1,16,16,1,0,0,0,0,3,16,16,0,0,0,0,0,2,15,16,4,0,0,1 +0,0,6,15,13,3,0,0,0,5,15,6,13,15,3,0,0,10,11,0,13,16,5,0,0,4,15,14,16,15,1,0,0,0,0,1,12,9,0,0,0,0,0,8,15,1,0,0,0,0,1,14,8,0,0,0,0,0,6,15,0,0,0,0,9 +0,0,6,15,11,2,0,0,0,1,16,10,14,16,3,0,0,4,11,0,14,16,3,0,0,3,15,16,12,15,5,0,0,0,1,2,0,12,7,0,0,0,0,0,1,13,6,0,0,0,1,5,13,12,0,0,0,0,8,12,6,0,0,0,9 +0,4,16,16,12,1,0,0,0,8,14,5,15,6,0,0,0,3,5,0,13,8,0,0,0,0,0,3,15,7,0,0,0,0,1,13,13,0,0,0,0,0,11,15,2,0,0,0,0,6,16,9,4,4,1,0,0,4,15,16,16,16,15,1,2 +0,2,14,16,13,1,0,0,0,10,14,10,15,8,0,0,0,5,2,0,12,12,0,0,0,0,0,4,16,6,0,0,0,0,1,15,10,0,0,0,0,0,11,14,1,0,0,0,0,4,16,8,1,0,0,0,0,2,15,16,16,16,8,0,2 +0,1,9,12,15,14,4,0,0,6,14,4,4,14,8,0,0,3,8,0,7,15,2,0,0,0,0,6,14,2,0,0,0,0,0,8,11,0,0,0,0,0,0,1,15,2,0,0,0,0,5,0,12,9,0,0,0,0,11,16,14,3,0,0,3 +0,0,14,11,7,1,0,0,0,2,16,16,16,14,4,0,0,0,15,11,0,6,3,0,0,0,7,14,0,0,0,0,0,0,2,16,4,0,0,0,0,0,1,12,12,0,0,0,0,4,12,14,11,0,0,0,0,0,14,16,6,0,0,0,5 +0,0,15,2,0,0,0,0,0,5,16,15,11,5,0,0,0,7,16,15,12,13,4,0,0,1,15,7,0,0,0,0,0,0,5,16,5,0,0,0,0,0,0,10,14,0,0,0,0,0,1,12,15,0,0,0,0,0,11,16,6,0,0,0,5 +0,0,8,9,15,11,0,0,0,9,15,15,4,15,2,0,0,8,9,0,8,12,0,0,0,2,14,11,14,2,0,0,0,0,11,16,7,0,0,0,0,0,14,2,13,6,0,0,0,2,12,1,8,12,0,0,0,0,8,16,14,5,0,0,8 +0,0,6,15,8,0,0,0,0,2,15,7,16,5,0,0,0,6,11,1,10,14,2,0,0,7,7,0,0,10,6,0,0,7,5,0,0,8,8,0,0,4,10,0,0,10,7,0,0,0,12,8,4,15,2,0,0,0,2,12,12,8,0,0,0 +0,4,15,16,16,12,1,0,0,15,13,5,5,16,8,0,0,7,3,0,10,16,4,0,0,0,1,11,16,10,0,0,0,0,7,16,10,0,0,0,0,0,1,7,16,6,0,0,0,3,8,6,16,10,0,0,0,5,16,16,12,1,0,0,3 +0,0,8,16,16,15,0,0,0,3,16,9,8,16,4,0,0,9,8,0,6,16,2,0,0,1,8,13,16,16,5,0,0,0,15,12,16,13,8,0,0,0,0,9,15,1,0,0,0,0,2,16,4,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,0,5,16,5,0,0,0,0,6,15,16,9,0,0,3,16,16,16,16,3,0,0,0,9,11,13,16,3,0,0,0,0,0,14,14,0,0,0,0,0,0,14,13,0,0,0,0,0,0,11,14,0,0,0,0,0,0,4,15,4,0,0,1 +0,0,6,15,16,5,0,0,0,5,16,13,15,11,0,0,0,10,12,1,16,9,0,0,0,3,3,9,16,2,0,0,0,0,4,16,7,0,0,0,0,0,11,13,0,0,0,0,0,0,12,14,8,8,2,0,0,0,5,16,16,16,11,0,2 +0,0,0,13,11,0,0,0,0,0,6,16,6,0,0,0,0,0,15,13,0,5,1,0,0,6,16,6,4,16,9,0,0,11,16,5,13,16,2,0,0,1,11,16,16,9,0,0,0,0,0,11,15,2,0,0,0,0,0,12,13,0,0,0,4 +0,0,9,13,15,10,0,0,0,2,15,5,6,16,6,0,0,0,3,0,2,16,6,0,0,0,0,0,11,13,0,0,0,0,0,8,14,1,0,0,0,0,5,15,3,0,0,0,0,0,12,6,0,0,0,0,0,0,8,16,16,12,0,0,2 +0,0,6,13,16,11,1,0,0,4,15,6,9,16,5,0,0,6,9,0,7,16,6,0,0,3,15,12,15,15,8,0,0,0,2,5,2,10,8,0,0,0,0,0,0,13,7,0,0,2,9,4,8,15,2,0,0,0,10,13,12,2,0,0,9 +0,0,0,1,15,9,0,0,0,0,0,9,16,11,0,0,0,4,8,16,16,6,0,0,0,15,16,16,16,4,0,0,0,5,9,4,16,4,0,0,0,0,0,5,16,2,0,0,0,0,0,4,16,5,0,0,0,0,0,2,14,9,0,0,1 +0,0,6,13,6,0,0,0,0,0,10,16,14,6,0,0,0,0,14,9,3,16,1,0,0,0,15,2,0,15,5,0,0,0,14,2,0,15,1,0,0,0,12,4,5,15,0,0,0,0,10,9,15,8,0,0,0,0,5,14,10,0,0,0,0 +0,0,9,13,10,1,0,0,0,7,14,4,15,8,0,0,0,10,8,0,10,14,0,0,0,4,14,12,14,15,4,0,0,0,2,4,2,12,6,0,0,0,0,0,1,15,5,0,0,0,0,4,10,15,1,0,0,0,9,12,9,2,0,0,9 +0,0,0,5,16,4,0,0,0,0,0,13,15,1,0,0,0,0,10,16,3,2,0,0,0,5,16,9,0,13,10,0,0,12,16,6,6,16,6,0,0,1,15,16,16,16,1,0,0,0,0,4,16,11,0,0,0,0,0,4,16,11,0,0,4 +0,0,4,13,16,8,0,0,0,5,16,8,10,12,0,0,0,8,9,0,8,12,0,0,0,0,0,2,11,10,2,0,0,0,3,16,16,16,9,0,0,0,0,9,14,0,0,0,0,0,0,13,7,0,0,0,0,0,5,15,2,0,0,0,7 +0,0,2,13,3,0,0,0,0,0,11,15,4,0,0,0,0,0,11,8,0,0,0,0,0,0,14,5,0,0,0,0,0,0,16,13,16,14,4,0,0,0,15,15,9,13,12,0,0,0,13,11,2,15,9,0,0,0,2,14,15,11,1,0,6 +0,0,0,8,16,2,0,0,0,0,8,16,9,1,0,0,0,0,13,10,0,0,0,0,0,0,15,15,8,1,0,0,0,1,16,11,12,10,0,0,0,0,15,5,1,15,1,0,0,0,8,11,3,16,0,0,0,0,1,10,16,7,0,0,6 +0,0,0,11,13,0,0,0,0,0,5,15,13,0,0,0,0,3,16,16,10,0,0,0,0,0,7,16,10,0,0,0,0,0,0,15,10,0,0,0,0,0,0,15,8,0,0,0,0,0,0,16,6,0,0,0,0,0,0,12,11,0,0,0,1 +0,1,11,16,16,5,0,0,0,6,16,6,14,13,0,0,0,4,5,0,14,12,0,0,0,0,7,9,16,10,0,0,0,0,15,16,16,16,8,0,0,0,1,14,11,4,0,0,0,0,10,16,1,0,0,0,0,0,15,9,0,0,0,0,7 +0,0,8,16,16,10,0,0,0,1,16,14,11,16,1,0,0,0,12,8,13,15,1,0,0,0,1,7,16,7,0,0,0,0,0,4,16,6,0,0,0,0,0,0,11,14,0,0,0,0,0,4,13,16,2,0,0,0,10,16,16,12,0,0,3 +0,2,12,15,16,10,1,0,0,3,16,5,10,16,3,0,0,0,0,5,15,7,0,0,0,0,0,14,8,0,0,0,0,0,0,15,4,0,0,0,0,0,0,4,14,2,0,0,0,1,2,2,15,6,0,0,0,2,12,15,12,1,0,0,3 +0,0,6,15,16,10,0,0,0,0,14,11,10,15,0,0,0,0,2,0,9,14,0,0,0,0,5,9,15,14,2,0,0,11,16,16,16,16,11,0,0,1,3,11,13,1,0,0,0,0,5,16,3,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,0,8,12,1,0,0,0,0,6,16,13,0,0,0,0,0,10,14,1,0,0,0,0,0,14,8,0,0,0,0,0,0,15,12,8,8,2,0,0,0,11,16,13,14,13,0,0,0,8,16,5,10,15,0,0,0,0,8,16,13,7,0,6 +0,0,7,15,16,16,8,0,0,4,16,12,5,13,13,0,0,1,5,0,0,16,10,0,0,0,0,1,8,16,6,0,0,0,9,15,16,16,11,0,0,2,12,14,15,4,1,0,0,0,1,15,6,0,0,0,0,0,10,11,0,0,0,0,7 +0,0,6,15,16,13,1,0,0,3,16,10,11,16,3,0,0,5,8,0,4,16,4,0,0,0,0,2,9,16,1,0,0,0,9,16,16,15,6,0,0,0,10,10,16,10,3,0,0,0,0,11,12,0,0,0,0,0,7,13,1,0,0,0,7 +0,0,9,13,11,9,1,0,0,0,11,10,10,14,4,0,0,0,5,14,7,13,0,0,0,0,0,10,15,3,0,0,0,0,8,16,12,0,0,0,0,0,14,3,12,4,0,0,0,0,13,1,12,7,0,0,0,0,10,16,13,1,0,0,8 +0,0,4,12,15,2,0,0,0,0,12,14,14,15,2,0,0,2,15,1,11,16,4,0,0,5,11,0,0,11,7,0,0,5,11,0,0,13,3,0,0,0,15,0,1,15,0,0,0,0,14,7,13,11,0,0,0,0,2,15,12,3,0,0,0 +0,0,0,14,13,1,0,0,0,0,2,16,16,2,0,0,0,0,4,16,15,0,0,0,0,0,3,16,13,0,0,0,0,0,7,16,12,0,0,0,0,0,7,16,12,0,0,0,0,0,6,16,14,0,0,0,0,0,1,11,16,4,0,0,1 +0,0,8,11,16,12,1,0,0,2,16,16,9,16,3,0,0,0,13,11,8,15,2,0,0,0,5,16,16,6,0,0,0,0,9,16,12,0,0,0,0,2,16,12,16,0,0,0,0,3,16,4,16,2,0,0,0,0,12,16,12,0,0,0,8 +0,0,0,12,11,0,0,0,0,0,4,16,5,0,0,0,0,0,9,8,0,0,0,0,0,0,13,7,0,0,0,0,0,0,14,11,12,5,0,0,0,0,12,16,10,15,4,0,0,0,6,13,2,11,9,0,0,0,0,10,13,15,3,0,6 +0,0,1,15,11,0,0,0,0,0,9,16,16,1,0,0,0,8,16,16,15,0,0,0,1,16,16,16,14,0,0,0,0,4,6,15,15,0,0,0,0,0,2,16,12,0,0,0,0,0,1,16,13,0,0,0,0,0,0,14,12,0,0,0,1 +0,0,0,9,11,0,0,0,0,0,2,16,9,0,0,0,0,0,6,14,1,0,0,0,0,0,9,11,0,0,0,0,0,0,11,16,16,9,0,0,0,0,10,15,4,10,8,0,0,0,4,14,3,11,12,0,0,0,0,8,16,12,4,0,6 +0,0,15,16,16,12,1,0,0,5,16,13,11,16,4,0,0,1,16,2,3,15,4,0,0,0,11,11,15,11,0,0,0,0,4,16,14,1,0,0,0,0,13,16,8,0,0,0,0,2,16,14,11,0,0,0,0,0,12,16,7,0,0,0,8 +0,0,2,13,11,5,0,0,0,2,15,10,15,16,0,0,0,4,13,1,0,11,4,0,0,6,6,0,0,6,6,0,0,5,8,0,0,6,7,0,0,0,12,0,0,11,6,0,0,0,10,8,2,16,1,0,0,0,2,11,13,6,0,0,0 +0,0,4,16,9,0,0,0,0,0,15,16,16,9,0,0,0,3,16,15,5,13,3,0,0,5,8,4,0,5,7,0,0,5,6,0,0,7,7,0,0,2,11,0,0,11,6,0,0,0,13,1,6,15,0,0,0,0,5,15,13,3,0,0,0 +0,0,9,13,16,12,1,0,0,5,16,14,16,16,4,0,0,4,16,0,13,13,1,0,0,2,14,14,15,3,0,0,0,0,8,16,4,0,0,0,0,0,15,16,5,0,0,0,0,1,16,16,7,0,0,0,0,0,12,16,6,0,0,0,8 +0,0,8,3,11,11,2,0,0,2,16,6,7,12,8,0,0,1,16,1,7,14,1,0,0,0,11,14,12,1,0,0,0,0,11,16,1,0,0,0,0,3,12,11,8,0,0,0,0,5,10,5,12,0,0,0,0,1,10,16,9,0,0,0,8 +0,0,1,14,7,0,0,0,0,0,10,16,4,0,0,0,0,1,15,7,0,0,0,0,0,2,16,3,0,0,0,0,0,3,16,16,16,14,3,0,0,1,15,16,0,4,13,1,0,0,10,15,3,7,16,5,0,0,1,11,15,15,9,0,6 +0,0,6,14,0,0,12,6,0,0,15,13,0,6,16,6,0,2,16,12,4,16,12,0,0,1,14,16,16,16,7,0,0,0,1,8,16,7,0,0,0,0,0,12,16,2,0,0,0,0,3,16,6,0,0,0,0,0,7,15,0,0,0,0,4 +0,0,9,16,15,8,0,0,0,3,16,13,7,8,0,0,0,10,16,5,0,0,0,0,0,10,16,15,5,0,0,0,0,0,4,9,15,3,0,0,0,0,0,2,16,8,0,0,0,0,2,12,16,3,0,0,0,0,9,16,8,0,0,0,5 +0,0,5,12,15,15,5,0,0,0,8,7,4,13,7,0,0,0,0,0,2,16,1,0,0,0,2,4,12,10,0,0,0,7,16,14,16,12,2,0,0,2,0,11,7,0,0,0,0,0,3,14,1,0,0,0,0,0,8,7,0,0,0,0,7 +0,0,9,16,16,7,0,0,0,0,8,9,11,16,1,0,0,0,0,2,13,16,1,0,0,0,0,13,16,6,0,0,0,0,0,3,13,13,2,0,0,0,0,0,1,13,9,0,0,0,4,2,1,12,13,0,0,0,11,16,16,16,11,0,3 +0,0,9,16,15,4,0,0,0,0,12,14,12,16,1,0,0,0,0,10,16,14,0,0,0,0,2,16,16,10,1,0,0,0,0,4,8,13,13,0,0,0,0,0,0,4,16,2,0,0,9,6,9,15,15,0,0,0,10,16,16,14,3,0,3 +0,0,7,14,16,8,0,0,0,1,16,16,8,6,0,0,0,8,16,10,0,0,0,0,0,14,16,16,10,1,0,0,0,7,12,8,15,6,0,0,0,0,0,0,14,9,0,0,0,0,3,9,16,6,0,0,0,0,12,16,10,1,0,0,5 +0,0,0,5,12,11,1,0,0,0,3,14,6,7,11,0,0,5,16,2,1,9,9,0,0,8,14,8,14,16,1,0,0,1,7,8,13,10,0,0,0,0,0,1,14,4,0,0,0,0,0,5,13,0,0,0,0,0,0,11,7,0,0,0,9 +0,0,1,14,10,0,0,0,0,0,3,16,16,1,0,0,0,0,3,16,13,0,0,0,0,0,5,16,9,0,0,0,0,0,6,16,10,0,0,0,0,0,4,16,9,0,0,0,0,0,3,16,6,0,0,0,0,0,1,12,6,0,0,0,1 +0,1,12,10,0,0,0,0,0,4,16,3,0,9,9,0,0,8,14,0,3,16,6,0,0,9,15,8,13,14,5,0,0,1,12,16,16,15,5,0,0,0,1,14,6,0,0,0,0,0,8,13,0,0,0,0,0,2,12,3,0,0,0,0,4 +0,0,14,16,16,8,0,0,0,0,6,5,8,16,3,0,0,0,0,0,7,16,2,0,0,0,0,7,16,13,0,0,0,0,0,8,12,16,7,0,0,0,0,0,0,8,14,1,0,0,10,4,4,10,16,1,0,0,12,16,16,16,9,0,3 +0,0,2,15,16,12,2,0,0,0,13,12,4,12,12,0,0,6,16,1,3,13,12,0,0,11,16,15,16,16,9,0,0,1,7,9,16,11,0,0,0,0,0,9,15,1,0,0,0,0,0,14,10,0,0,0,0,0,4,16,3,0,0,0,9 +0,0,9,16,16,5,0,0,0,0,13,16,9,6,0,0,0,0,9,12,0,0,0,0,0,0,3,16,3,0,0,0,0,0,0,12,10,0,0,0,0,0,0,5,15,0,0,0,0,0,3,5,16,5,0,0,0,0,7,16,15,1,0,0,5 +0,3,15,16,13,4,0,0,0,4,6,4,10,16,0,0,0,0,0,5,14,11,0,0,0,0,10,16,12,0,0,0,0,0,6,9,15,12,0,0,0,0,0,0,0,12,8,0,0,1,6,4,6,15,8,0,0,4,13,13,11,7,0,0,3 +0,0,1,15,12,0,0,0,0,0,6,16,15,0,0,0,0,0,5,16,9,0,0,0,0,0,5,16,6,0,0,0,0,0,3,16,8,0,0,0,0,0,2,16,8,0,0,0,0,0,1,16,9,0,0,0,0,0,1,13,6,0,0,0,1 +0,0,10,11,9,1,0,0,0,0,14,16,16,6,0,0,0,0,14,12,6,15,0,0,0,1,16,6,0,14,3,0,0,3,16,6,0,12,6,0,0,3,16,0,6,15,4,0,0,1,16,12,16,15,2,0,0,0,7,16,14,5,0,0,0 +0,0,6,14,16,10,0,0,0,0,13,13,10,15,0,0,0,0,5,15,11,14,0,0,0,0,0,10,16,7,0,0,0,0,0,14,16,1,0,0,0,0,8,15,15,2,0,0,0,0,12,14,16,1,0,0,0,0,10,16,11,0,0,0,8 +0,5,14,16,8,1,0,0,0,4,11,4,12,11,0,0,0,1,2,1,11,11,0,0,0,0,1,15,13,1,0,0,0,0,1,10,16,9,0,0,0,0,0,0,1,13,8,0,0,5,5,0,2,12,8,0,0,5,16,16,16,13,1,0,3 +0,0,12,16,16,16,11,0,0,0,8,10,9,16,11,0,0,0,0,0,9,16,2,0,0,0,0,2,15,9,0,0,0,8,16,16,16,14,0,0,0,3,9,16,12,3,0,0,0,0,9,16,2,0,0,0,0,0,14,10,0,0,0,0,7 +0,0,2,10,16,13,1,0,0,8,16,15,5,12,6,0,0,10,16,10,0,7,10,0,0,12,16,12,10,16,8,0,0,3,12,16,16,12,0,0,0,0,0,5,16,4,0,0,0,0,0,7,15,0,0,0,0,0,0,11,11,0,0,0,9 +0,0,3,13,10,0,0,0,0,0,11,11,11,5,0,0,0,2,16,6,0,10,1,0,0,2,14,1,0,6,5,0,0,3,11,0,0,2,9,0,0,2,14,0,0,4,9,0,0,0,12,9,7,12,9,0,0,0,2,11,15,12,4,0,0 +0,0,12,15,16,16,15,2,0,0,10,12,10,14,14,2,0,0,0,0,2,16,7,0,0,0,0,0,12,11,0,0,0,3,12,14,16,13,3,0,0,12,12,16,11,7,0,0,0,0,5,15,2,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,3,16,9,1,0,0,0,0,12,16,16,1,0,0,0,0,15,16,10,0,0,0,0,0,14,16,5,0,0,0,0,0,13,16,4,0,0,0,0,0,13,16,4,0,0,0,0,0,9,16,5,0,0,0,0,0,2,16,6,0,0,0,1 +0,0,5,16,14,3,0,0,0,0,9,15,10,11,0,0,0,0,0,4,8,13,0,0,0,0,0,0,14,10,0,0,0,0,0,4,16,5,0,0,0,0,0,13,16,5,0,0,0,0,4,16,16,16,14,8,0,0,4,16,12,8,9,13,2 +0,0,9,16,16,10,0,0,0,0,15,13,11,16,1,0,0,0,3,1,13,13,0,0,0,0,0,5,16,5,0,0,0,0,1,15,11,0,0,0,0,0,8,16,2,2,2,0,0,0,15,13,14,16,15,3,0,0,9,16,15,10,8,1,2 +0,0,6,15,16,16,9,0,0,0,9,12,8,16,12,0,0,0,0,0,5,16,4,0,0,1,7,8,16,9,2,0,0,9,16,16,16,16,9,0,0,2,7,16,4,0,0,0,0,0,8,14,0,0,0,0,0,0,10,9,0,0,0,0,7 +0,0,0,6,13,0,0,0,0,0,0,13,9,0,0,0,0,0,1,16,3,0,0,0,0,0,3,15,0,0,0,0,0,0,6,14,8,7,1,0,0,0,7,16,12,12,13,2,0,0,4,16,3,3,13,7,0,0,0,7,16,16,11,1,6 +0,0,9,16,15,5,0,0,0,0,4,7,12,15,0,0,0,0,0,1,9,16,3,0,0,0,0,10,16,13,0,0,0,0,0,3,8,15,9,0,0,0,0,0,0,7,14,0,0,0,10,7,4,10,16,1,0,0,7,16,16,16,10,0,3 +0,0,15,16,15,7,0,0,0,2,16,12,11,9,0,0,0,0,12,12,0,0,0,0,0,0,3,15,5,0,0,0,0,0,0,8,14,0,0,0,0,0,0,2,16,2,0,0,0,0,7,8,16,4,0,0,0,0,12,16,14,1,0,0,5 +0,0,2,6,15,16,8,0,0,0,15,16,10,8,16,0,0,4,16,5,0,7,12,0,0,9,16,10,10,16,8,0,0,1,9,16,15,16,4,0,0,0,0,0,9,13,0,0,0,0,0,3,16,4,0,0,0,0,0,7,13,0,0,0,9 +0,1,15,16,16,13,0,0,0,0,7,8,12,16,0,0,0,0,0,0,10,14,0,0,0,1,4,6,16,5,0,0,0,11,16,16,16,15,8,0,0,4,9,16,8,8,2,0,0,0,11,12,0,0,0,0,0,2,16,4,0,0,0,0,7 +0,0,0,10,10,0,0,0,0,0,3,16,10,0,0,0,0,0,10,10,0,0,0,0,0,0,16,5,0,0,0,0,0,0,16,7,8,7,0,0,0,0,12,16,13,9,13,2,0,0,6,16,5,5,13,8,0,0,0,9,14,13,9,1,6 +0,0,15,15,8,1,0,0,0,0,4,6,11,15,2,0,0,0,0,0,0,16,4,0,0,0,4,9,15,13,1,0,0,0,4,12,16,13,1,0,0,0,1,0,2,10,8,0,0,4,11,3,5,12,8,0,0,1,9,15,15,10,0,0,3 +0,0,10,16,0,0,12,6,0,2,15,9,0,8,16,5,0,6,16,6,1,14,13,0,0,7,16,11,11,16,13,0,0,1,11,14,16,12,2,0,0,0,0,11,14,1,0,0,0,0,2,16,5,0,0,0,0,0,13,10,0,0,0,0,4 +0,0,2,16,10,0,0,0,0,0,5,16,15,1,0,0,0,0,4,16,12,0,0,0,0,0,4,16,11,0,0,0,0,0,1,16,12,0,0,0,0,0,1,16,12,0,0,0,0,0,1,16,10,0,0,0,0,0,0,15,5,0,0,0,1 +0,2,10,16,16,16,7,0,0,14,16,15,11,13,5,0,0,12,15,1,0,0,0,0,0,4,16,7,0,0,0,0,0,0,16,8,0,0,0,0,0,0,15,9,0,0,0,0,0,0,12,14,0,0,0,0,0,0,15,7,0,0,0,0,5 +0,0,1,11,16,16,6,0,0,0,13,14,7,10,15,0,0,6,16,4,2,11,14,0,0,7,16,16,16,16,10,0,0,0,5,8,9,16,2,0,0,0,0,2,14,8,0,0,0,0,0,11,13,0,0,0,0,0,0,16,7,0,0,0,9 +0,0,9,15,14,2,0,0,0,3,16,13,15,11,0,0,0,2,9,0,14,9,0,0,0,0,0,5,16,5,0,0,0,0,0,12,14,0,0,0,0,0,5,16,4,0,0,0,0,1,14,15,8,8,9,1,0,0,11,16,16,16,16,7,2 +0,0,0,7,16,16,5,0,0,0,8,15,6,13,11,0,0,2,15,5,0,13,10,0,0,10,16,10,13,16,11,0,0,2,11,16,16,16,6,0,0,0,0,1,16,8,0,0,0,0,0,7,16,1,0,0,0,0,0,10,12,0,0,0,9 +0,0,4,13,10,1,0,0,0,0,11,16,14,7,0,0,0,1,15,9,0,12,1,0,0,2,16,6,0,7,5,0,0,2,16,10,0,8,8,0,0,1,16,5,2,15,5,0,0,0,11,16,16,13,0,0,0,0,4,15,11,2,0,0,0 +0,0,7,16,2,0,4,0,0,0,15,12,0,4,16,3,0,6,16,2,2,15,10,0,0,10,14,0,12,12,1,0,0,9,16,16,16,10,0,0,0,1,8,16,9,1,0,0,0,0,7,14,1,0,0,0,0,0,10,8,0,0,0,0,4 +0,4,16,16,16,12,1,0,0,0,7,8,12,16,2,0,0,0,0,0,8,15,1,0,0,0,3,4,15,11,0,0,0,2,16,16,16,16,6,0,0,0,9,16,10,4,1,0,0,0,10,15,1,0,0,0,0,1,16,8,0,0,0,0,7 +0,0,9,16,16,9,0,0,0,0,13,13,15,16,0,0,0,0,9,16,16,7,0,0,0,0,4,16,12,0,0,0,0,0,10,15,14,1,0,0,0,0,14,7,12,5,0,0,0,0,15,9,16,5,0,0,0,0,9,16,14,1,0,0,8 +0,1,8,16,14,7,0,0,0,5,16,6,8,15,0,0,0,1,13,8,15,9,0,0,0,0,1,15,10,0,0,0,0,0,8,12,11,0,0,0,0,0,13,5,8,1,0,0,0,0,14,1,9,4,0,0,0,0,9,14,14,1,0,0,8 +0,0,9,15,16,7,0,0,0,0,16,9,7,15,0,0,0,0,5,14,13,8,0,0,0,0,0,14,14,0,0,0,0,0,4,16,13,0,0,0,0,0,10,8,12,0,0,0,0,0,13,8,15,1,0,0,0,0,11,16,5,0,0,0,8 +0,1,12,16,14,6,0,0,0,2,13,7,5,15,2,0,0,0,6,15,9,13,0,0,0,0,0,10,15,1,0,0,0,0,4,15,8,0,0,0,0,0,12,4,12,0,0,0,0,3,14,10,11,0,0,0,0,0,14,13,3,0,0,0,8 +0,0,1,16,8,0,0,0,0,0,2,16,16,7,0,0,0,0,2,16,16,1,0,0,0,0,3,16,16,1,0,0,0,0,2,16,12,0,0,0,0,0,3,16,14,0,0,0,0,0,4,16,14,0,0,0,0,0,2,15,10,0,0,0,1 +0,0,0,9,16,10,0,0,0,0,3,15,12,15,4,0,0,1,12,14,1,15,11,0,0,8,16,12,14,16,7,0,0,7,16,16,16,12,0,0,0,0,7,8,16,5,0,0,0,0,0,8,13,0,0,0,0,0,0,9,11,0,0,0,9 +0,4,16,16,16,14,3,0,0,1,8,8,8,15,10,0,0,0,0,0,5,16,6,0,0,0,0,0,13,12,0,0,0,8,16,16,16,15,8,0,0,3,11,16,9,8,3,0,0,0,14,12,0,0,0,0,0,5,16,2,0,0,0,0,7 +0,0,15,7,0,2,11,1,0,3,16,6,1,12,13,2,0,4,16,5,8,15,3,0,0,2,16,16,16,13,0,0,0,0,4,14,15,3,0,0,0,0,7,16,3,0,0,0,0,1,13,10,0,0,0,0,0,1,16,3,0,0,0,0,4 +0,0,3,13,15,6,0,0,0,2,11,15,7,16,3,0,0,16,16,9,8,16,6,0,0,9,16,16,16,16,5,0,0,0,4,8,16,11,0,0,0,0,0,9,16,1,0,0,0,0,0,14,11,0,0,0,0,0,1,16,7,0,0,0,9 +0,0,0,3,15,16,3,0,0,0,0,12,10,15,3,0,0,0,0,2,6,14,0,0,0,0,0,1,15,5,0,0,0,0,1,13,9,0,0,0,0,1,12,12,1,0,0,0,0,9,16,16,16,13,5,0,0,2,4,5,10,13,12,0,2 +0,0,1,11,13,0,0,0,0,0,7,16,8,1,0,0,0,0,15,12,0,0,0,0,0,1,15,7,0,0,0,0,0,0,15,7,4,5,1,0,0,0,15,16,16,16,13,1,0,0,10,16,9,8,16,6,0,0,1,9,14,16,13,2,6 +0,0,4,13,15,4,0,0,0,0,11,13,8,13,0,0,0,0,5,2,4,13,0,0,0,0,0,0,12,7,0,0,0,0,0,6,15,2,0,0,0,0,0,15,5,0,0,0,0,0,9,14,6,11,5,0,0,0,9,16,12,12,10,0,2 +0,0,3,15,16,6,0,0,0,0,6,11,11,16,5,0,0,0,0,0,7,16,7,0,0,0,0,0,13,16,2,0,0,9,13,13,16,12,2,0,0,2,12,14,16,16,15,0,0,0,0,15,13,0,0,0,0,0,3,16,8,0,0,0,7 +0,2,16,16,16,10,0,0,0,0,11,8,12,16,2,0,0,0,0,0,14,13,0,0,0,2,9,12,16,4,0,0,0,11,16,16,16,16,8,0,0,0,10,14,1,4,0,0,0,2,16,7,0,0,0,0,0,2,15,6,0,0,0,0,7 +0,0,0,14,15,4,0,0,0,0,7,16,14,2,0,0,0,0,14,13,1,0,0,0,0,0,3,7,0,0,0,0,0,3,16,6,8,3,0,0,0,2,15,16,16,15,3,0,0,0,9,16,15,15,11,0,0,0,0,10,16,16,11,0,6 +0,1,10,16,11,0,0,0,0,8,16,12,16,3,0,0,0,1,12,3,16,4,0,0,0,0,0,6,15,0,0,0,0,0,0,11,13,0,0,0,0,0,3,16,7,0,0,0,0,0,11,16,16,16,14,5,0,0,11,13,9,12,15,11,2 +0,1,15,8,0,1,2,0,0,8,16,8,0,12,15,0,0,7,16,11,10,16,7,0,0,2,15,16,16,16,2,0,0,0,2,12,16,9,0,0,0,0,1,16,12,0,0,0,0,0,10,15,1,0,0,0,0,0,12,11,0,0,0,0,4 +0,0,0,11,13,0,0,0,0,0,4,16,8,1,0,0,0,0,13,10,0,0,0,0,0,0,15,5,0,0,0,0,0,1,16,3,4,2,0,0,0,0,13,16,15,14,7,0,0,0,7,16,7,4,16,1,0,0,0,9,16,16,11,0,6 +0,0,1,11,16,15,3,0,0,1,13,13,5,13,8,0,0,6,16,8,2,15,8,0,0,10,16,16,16,16,7,0,0,1,1,6,15,14,1,0,0,0,0,3,16,5,0,0,0,0,0,9,14,0,0,0,0,0,1,15,7,0,0,0,9 +0,4,14,16,16,12,2,0,0,10,16,10,8,14,7,0,0,10,12,0,0,0,0,0,0,2,16,7,0,0,0,0,0,0,15,11,0,0,0,0,0,0,9,15,3,0,0,0,0,1,12,16,4,0,0,0,0,5,16,15,1,0,0,0,5 +0,0,6,10,12,4,0,0,0,2,15,10,8,9,0,0,0,0,6,0,3,12,0,0,0,0,0,0,8,7,0,0,0,0,0,6,14,1,0,0,0,0,4,14,6,0,0,0,0,1,16,11,7,11,8,0,0,0,13,16,13,12,12,0,2 +0,0,1,11,6,0,0,0,0,0,7,13,2,0,0,0,0,0,13,6,0,0,0,0,0,1,13,1,0,0,0,0,0,1,12,0,4,3,0,0,0,1,11,11,14,14,10,0,0,0,12,15,5,4,15,3,0,0,1,13,16,12,9,1,6 +0,0,5,13,11,2,0,0,0,2,14,15,15,12,0,0,0,2,16,5,0,7,4,0,0,4,16,3,0,3,6,0,0,3,14,0,0,8,7,0,0,2,16,0,2,14,6,0,0,1,15,15,16,11,0,0,0,0,7,14,10,1,0,0,0 +0,0,0,10,12,0,0,0,0,0,4,16,7,1,0,0,0,0,11,8,0,0,0,0,0,0,12,5,0,0,0,0,0,0,14,2,3,4,1,0,0,0,11,10,16,13,13,0,0,0,7,16,7,0,11,5,0,0,0,8,15,16,13,1,6 +0,1,7,11,15,11,1,0,0,4,9,4,0,13,5,0,0,0,0,0,9,15,1,0,0,0,4,15,11,3,0,0,0,0,3,16,8,2,0,0,0,0,0,2,7,16,6,0,0,0,0,6,9,16,5,0,0,0,13,12,7,3,0,0,3 +0,0,1,16,14,2,0,0,0,0,4,16,16,8,0,0,0,0,7,16,16,3,0,0,0,0,10,16,16,3,0,0,0,0,13,16,12,0,0,0,0,0,15,16,12,0,0,0,0,0,11,16,11,0,0,0,0,0,3,15,16,9,0,0,1 +0,0,0,2,9,15,9,0,0,0,8,14,7,9,12,0,0,3,16,5,4,13,6,0,0,2,10,12,15,14,0,0,0,0,0,0,1,13,3,0,0,0,0,0,2,16,0,0,0,0,0,0,7,12,0,0,0,0,0,0,12,6,0,0,9 +0,3,12,16,16,14,0,0,0,13,16,14,8,5,0,0,0,10,10,0,0,0,0,0,0,5,15,3,0,0,0,0,0,0,14,9,0,0,0,0,0,0,7,14,1,0,0,0,0,0,10,16,4,0,0,0,0,0,15,15,2,0,0,0,5 +0,0,4,11,14,12,0,0,0,4,16,9,5,16,1,0,0,11,8,0,6,13,0,0,0,5,6,1,13,5,0,0,0,0,0,11,9,0,0,0,0,0,4,16,1,0,0,0,0,0,10,9,0,0,1,0,0,0,3,16,16,14,6,0,2 +0,2,12,16,15,8,0,0,0,2,7,4,10,16,2,0,0,0,0,3,16,13,0,0,0,0,0,15,15,1,0,0,0,0,0,10,13,0,0,0,0,0,0,0,14,10,1,0,0,0,1,5,10,16,4,0,0,1,13,13,12,8,0,0,3 +0,0,1,11,8,0,0,0,0,0,8,15,3,0,0,0,0,1,15,4,0,0,0,0,0,4,16,8,15,14,3,0,0,4,14,11,7,5,10,0,0,1,13,0,0,1,13,0,0,0,11,5,0,9,13,0,0,0,2,11,16,12,1,0,6 +0,0,0,10,15,4,0,0,0,0,6,16,10,1,0,0,0,0,11,15,1,0,0,0,0,0,16,10,6,3,0,0,0,4,16,16,16,16,7,0,0,0,15,16,11,6,16,1,0,0,11,13,2,4,16,4,0,0,0,10,16,16,13,1,6 +0,0,5,14,16,14,0,0,0,7,16,15,8,8,0,0,0,14,13,0,0,0,0,0,0,13,16,13,3,0,0,0,0,0,5,11,15,1,0,0,0,0,0,0,16,9,0,0,0,0,5,11,16,8,0,0,0,0,11,16,10,1,0,0,5 +0,0,0,5,15,0,0,0,0,0,0,11,10,3,8,0,0,0,6,15,1,13,9,0,0,3,16,8,9,16,5,0,0,12,16,15,15,13,8,0,0,3,4,0,16,2,0,0,0,0,0,4,11,0,0,0,0,0,0,6,11,0,0,0,4 +0,0,5,14,16,8,0,0,0,6,15,10,12,16,1,0,0,9,5,0,13,8,0,0,0,6,15,13,15,1,0,0,0,0,11,16,4,0,0,0,0,0,15,16,7,0,0,0,0,0,16,13,14,0,0,0,0,0,5,16,15,4,0,0,8 +0,2,16,16,15,3,0,0,0,0,3,3,14,8,0,0,0,0,0,3,16,2,0,0,0,0,0,10,14,0,0,0,0,3,16,16,16,16,8,0,0,0,11,15,4,4,1,0,0,0,14,6,0,0,0,0,0,3,16,3,0,0,0,0,7 +0,1,10,16,16,14,6,0,0,11,16,7,4,16,10,0,0,9,16,10,10,16,4,0,0,0,5,12,16,3,0,0,0,0,0,7,16,0,0,0,0,0,0,13,15,0,0,0,0,0,6,16,7,0,0,0,0,2,15,10,0,0,0,0,9 +0,1,9,16,16,14,2,0,0,11,16,7,5,16,6,0,0,16,14,10,16,16,4,0,0,4,8,10,16,13,0,0,0,0,1,13,13,1,0,0,0,0,6,16,6,0,0,0,0,0,12,10,0,0,0,0,0,0,11,11,0,0,0,0,9 +0,0,5,14,13,2,0,0,0,3,14,5,8,12,0,0,0,3,9,0,12,4,0,0,0,1,14,9,13,0,0,0,0,0,2,14,12,0,0,0,0,0,4,14,14,3,0,0,0,0,10,7,12,6,0,0,0,0,6,16,13,1,0,0,8 +0,0,0,12,8,0,6,3,0,0,4,16,3,3,15,3,0,0,12,10,0,12,8,0,0,8,15,5,8,16,4,0,0,10,16,16,16,16,10,0,0,0,4,5,15,1,0,0,0,0,0,8,11,0,0,0,0,0,0,12,6,0,0,0,4 +0,3,13,16,4,0,0,0,0,7,14,16,8,0,0,0,0,0,1,16,7,0,0,0,0,0,9,15,1,0,0,0,0,1,16,8,0,0,0,0,0,7,16,1,0,0,0,0,0,9,16,4,4,4,4,0,0,4,16,16,16,16,8,0,2 +0,0,2,16,15,7,0,0,0,0,3,12,13,13,0,0,0,0,0,6,16,10,0,0,0,0,0,6,16,3,0,0,0,0,3,15,10,12,0,0,0,1,13,5,0,13,6,0,0,0,16,5,0,7,15,0,0,0,3,11,16,16,12,2,8 +0,0,9,13,16,9,0,0,0,0,4,5,11,15,0,0,0,0,0,6,15,8,0,0,0,0,0,13,10,0,0,0,0,0,0,8,12,1,0,0,0,0,0,0,9,14,2,0,0,0,0,3,12,16,4,0,0,0,11,16,11,3,0,0,3 +0,0,2,13,9,1,0,0,0,1,13,7,15,5,0,0,1,15,9,0,15,2,0,0,0,6,13,7,13,0,0,0,0,0,5,16,8,0,0,0,0,0,1,15,14,2,0,0,0,0,4,12,6,11,0,0,0,0,0,11,16,8,0,0,8 +0,0,2,13,6,0,4,0,0,0,13,11,0,2,15,3,0,4,16,2,0,13,8,0,0,3,16,16,12,16,7,0,0,0,2,7,15,10,1,0,0,0,0,4,13,0,0,0,0,0,0,13,5,0,0,0,0,0,2,12,0,0,0,0,4 +0,4,12,14,12,6,0,0,0,2,4,4,5,16,6,0,0,0,0,0,6,16,4,0,0,0,0,5,16,6,0,0,0,0,0,5,16,3,0,0,0,0,0,0,11,13,0,0,0,0,0,5,13,16,0,0,0,5,16,12,11,1,0,0,3 +0,7,16,16,16,9,0,0,0,3,8,4,13,16,0,0,0,0,0,8,16,6,0,0,0,0,0,4,16,4,0,0,0,0,0,0,11,15,5,0,0,0,0,0,1,13,15,0,0,0,5,9,15,16,10,0,0,4,16,16,10,3,0,0,3 +0,0,1,14,11,0,0,0,0,0,12,13,1,0,0,0,0,5,16,3,0,7,15,1,0,6,16,11,12,16,10,0,0,0,11,14,16,11,0,0,0,0,0,7,15,2,0,0,0,0,0,15,12,0,0,0,0,0,1,15,7,0,0,0,4 +0,0,11,16,15,3,0,0,0,0,2,4,16,7,0,0,0,0,0,4,16,3,0,0,0,2,4,10,14,4,2,0,0,9,16,16,16,16,8,0,0,0,7,15,2,0,0,0,0,0,10,9,0,0,0,0,0,0,12,7,0,0,0,0,7 +0,0,7,15,15,3,0,0,0,0,10,14,12,14,0,0,0,0,1,6,16,12,0,0,0,0,1,15,14,3,0,0,0,0,0,9,15,5,0,0,0,0,0,0,5,15,6,0,0,0,7,6,7,13,16,1,0,0,7,16,16,15,8,0,3 +0,0,1,14,6,0,0,0,0,0,6,15,3,0,0,0,0,1,14,7,0,0,0,0,0,0,16,9,8,3,0,0,0,7,16,16,12,15,3,0,0,5,14,7,0,4,9,0,0,3,15,6,1,12,9,0,0,0,4,13,16,14,3,0,6 +0,0,2,13,14,2,0,0,0,0,10,16,13,4,0,0,0,0,13,15,1,0,0,0,0,1,16,9,0,0,0,0,0,1,16,15,12,2,0,0,0,1,15,16,11,13,0,0,0,0,11,12,8,16,1,0,0,0,2,13,16,15,0,0,6 +0,0,0,10,14,0,3,1,0,0,5,16,6,1,14,5,0,2,15,8,3,10,13,0,0,8,16,14,16,16,8,0,0,5,11,6,15,13,3,0,0,0,0,1,15,0,0,0,0,0,0,7,10,0,0,0,0,0,0,13,3,0,0,0,4 +0,1,8,16,16,11,0,0,0,2,10,6,12,12,0,0,0,0,0,0,11,11,0,0,0,0,0,4,16,8,3,0,0,3,15,16,16,16,11,0,0,4,10,16,6,6,2,0,0,0,9,13,0,0,0,0,0,1,14,7,0,0,0,0,7 +0,0,0,12,14,2,0,0,0,0,6,15,16,4,0,0,0,0,13,16,12,0,0,0,0,1,14,16,8,0,0,0,0,1,15,16,2,0,0,0,0,1,15,16,4,0,0,0,0,0,10,16,11,4,0,0,0,0,0,11,13,5,0,0,1 +0,0,9,16,13,3,0,0,0,2,16,16,16,12,0,0,0,0,0,6,16,7,0,0,0,0,1,15,12,0,0,0,0,0,13,15,2,0,0,0,0,4,16,7,0,0,0,0,0,8,16,8,6,8,3,0,0,1,11,16,16,12,4,0,2 +0,0,10,16,16,9,0,0,0,4,13,5,9,12,0,0,0,0,0,1,13,5,0,0,0,0,5,13,16,16,9,0,0,0,11,16,11,11,9,0,0,0,1,13,3,0,0,0,0,0,8,10,0,0,0,0,0,0,11,8,0,0,0,0,7 +0,0,1,12,16,3,0,0,0,0,8,8,4,0,0,0,0,0,14,1,0,0,0,0,0,2,16,9,8,6,1,0,0,4,16,14,11,12,6,0,0,2,16,4,1,7,11,0,0,0,11,11,5,13,9,0,0,0,0,10,13,10,3,0,6 +0,0,5,16,16,6,0,0,0,0,16,9,11,13,0,0,0,0,11,11,14,8,0,0,0,0,1,15,12,1,0,0,0,0,3,16,15,4,0,0,0,0,12,10,7,13,1,0,0,0,15,4,3,16,6,0,0,0,8,16,16,13,1,0,8 +0,0,1,11,16,15,2,0,0,2,14,15,11,16,6,0,0,11,14,2,8,15,1,0,0,8,16,12,16,16,9,0,0,0,3,7,9,16,8,0,0,0,0,0,13,15,1,0,0,0,0,7,16,4,0,0,0,0,0,11,13,0,0,0,9 +0,0,3,11,15,15,4,0,0,2,14,8,4,15,5,0,0,7,11,1,13,15,1,0,0,1,11,12,13,16,5,0,0,0,0,0,8,12,0,0,0,0,0,3,15,2,0,0,0,0,0,11,7,0,0,0,0,0,0,13,4,0,0,0,9 +0,0,1,8,15,16,7,0,0,1,13,14,9,16,8,0,0,8,16,5,11,15,2,0,0,5,16,16,16,10,1,0,0,0,1,4,10,16,9,0,0,0,0,0,3,16,7,0,0,0,0,3,14,14,1,0,0,0,0,11,15,3,0,0,9 +0,2,12,16,16,9,0,0,0,2,15,10,7,16,4,0,0,0,0,0,5,16,6,0,0,0,0,6,16,13,0,0,0,0,0,5,13,16,6,0,0,0,0,0,0,9,16,0,0,0,6,4,5,12,16,2,0,2,13,16,16,16,10,0,3 +0,0,5,16,14,1,0,0,0,0,13,10,9,10,0,0,0,1,16,4,0,16,0,0,0,3,16,7,0,13,6,0,0,5,12,0,0,13,8,0,0,4,16,0,0,14,5,0,0,0,15,13,14,12,0,0,0,0,5,14,10,4,0,0,0 +0,0,0,9,12,0,0,0,0,0,5,16,6,0,0,0,0,0,10,11,0,0,0,0,0,0,12,8,4,0,0,0,0,0,13,16,15,11,1,0,0,0,13,14,1,6,6,0,0,0,5,12,1,11,6,0,0,0,0,9,15,10,0,0,6 +0,1,12,16,16,8,0,0,0,1,8,5,9,12,0,0,0,0,0,0,11,8,0,0,0,0,5,14,14,10,5,0,0,0,3,15,14,12,5,0,0,0,2,16,3,0,0,0,0,0,10,9,0,0,0,0,0,0,13,2,0,0,0,0,7 +0,0,1,14,13,0,0,0,0,0,6,16,8,0,0,0,0,0,12,9,0,0,0,0,0,0,14,10,16,12,1,0,0,1,16,10,14,8,13,0,0,0,13,9,10,0,12,4,0,0,8,10,0,5,16,5,0,0,1,11,16,16,11,0,6 +0,2,12,16,15,9,0,0,0,1,4,7,15,14,0,0,0,0,3,15,14,3,0,0,0,0,6,14,5,0,0,0,0,0,1,11,16,7,0,0,0,0,0,0,6,16,7,0,0,0,3,9,14,15,5,0,0,1,14,12,8,1,0,0,3 +0,0,4,14,16,7,0,0,0,1,16,14,8,5,0,0,0,8,10,0,0,0,0,0,0,3,13,0,0,0,0,0,0,0,12,6,0,0,0,0,0,0,4,13,0,0,0,0,0,0,0,16,5,0,0,0,0,0,7,16,4,0,0,0,5 +0,0,0,12,14,5,0,0,0,0,3,16,16,12,0,0,0,0,14,16,16,2,0,0,0,1,15,16,8,0,0,0,0,4,16,15,4,0,0,0,0,3,16,16,1,0,0,0,0,0,10,16,13,0,0,0,0,0,0,11,16,5,0,0,1 +0,0,6,13,16,16,13,0,0,6,16,10,7,15,14,0,0,1,4,0,10,16,6,0,0,0,0,10,16,4,0,0,0,0,3,16,9,0,0,0,0,0,15,11,0,0,0,0,0,0,16,10,0,0,0,0,0,0,9,16,16,12,1,0,2 +0,0,1,16,10,0,0,0,0,0,3,16,16,4,0,0,0,0,6,16,14,0,0,0,0,0,6,16,14,0,0,0,0,0,8,16,11,0,0,0,0,0,6,16,12,0,0,0,0,0,6,16,11,0,0,0,0,0,1,11,13,0,0,0,1 +0,0,3,13,16,16,3,0,0,0,8,10,5,5,0,0,0,7,15,0,0,0,0,0,0,10,13,2,0,0,0,0,0,3,15,14,1,0,0,0,0,0,0,11,11,0,0,0,0,0,0,7,13,0,0,0,0,0,3,16,11,0,0,0,5 +0,5,15,13,0,0,0,0,0,3,10,16,5,0,0,0,0,0,0,14,4,0,0,0,0,0,6,14,0,0,0,0,0,1,14,7,0,0,0,0,0,7,13,1,0,0,0,0,0,13,14,10,11,9,5,0,0,5,15,16,14,10,3,0,2 +0,1,10,16,16,15,2,0,0,9,16,11,4,16,11,0,0,6,16,15,12,16,7,0,0,0,4,9,16,14,1,0,0,0,0,1,15,8,0,0,0,0,0,13,15,2,0,0,0,0,9,16,5,0,0,0,0,3,16,7,0,0,0,0,9 +0,0,1,11,13,1,0,0,0,0,11,13,9,11,0,0,0,2,16,2,0,10,2,0,0,5,12,0,0,5,7,0,0,8,8,0,0,4,8,0,0,2,12,0,0,7,7,0,0,0,13,5,8,16,2,0,0,0,2,15,14,5,0,0,0 +0,0,3,15,10,1,0,0,0,0,11,14,10,7,0,0,0,2,16,3,0,11,1,0,0,4,15,0,0,7,5,0,0,3,12,0,0,3,9,0,0,2,11,0,0,7,9,0,0,0,11,8,9,16,4,0,0,0,2,10,16,9,0,0,0 +0,0,1,15,14,0,0,0,0,0,6,16,16,8,0,0,0,0,12,16,16,2,0,0,0,0,12,16,13,0,0,0,0,0,15,16,7,0,0,0,0,0,14,16,7,0,0,0,0,0,11,16,11,0,0,0,0,0,2,14,13,0,0,0,1 +0,0,2,15,12,1,0,0,0,0,6,16,15,1,0,0,0,0,14,16,8,0,0,0,0,2,16,16,2,0,0,0,0,3,16,15,0,0,0,0,0,2,16,13,0,0,0,0,0,0,9,16,2,0,0,0,0,0,2,13,12,0,0,0,1 +0,0,2,14,14,4,0,0,0,0,15,16,16,15,1,0,0,6,14,16,5,12,6,0,0,2,11,12,0,6,8,0,0,0,12,4,0,9,6,0,0,0,11,5,0,12,3,0,0,0,8,12,11,15,2,0,0,0,1,12,10,2,0,0,0 +0,0,0,8,15,1,0,0,0,0,2,16,7,0,0,0,0,0,7,15,1,0,0,0,0,0,10,11,0,0,0,0,0,0,14,16,16,13,2,0,0,0,15,16,5,6,14,1,0,0,9,12,2,4,14,5,0,0,0,8,15,16,13,4,6 +0,0,7,13,0,0,3,8,0,0,15,7,0,3,15,7,0,0,16,12,6,14,9,0,0,0,6,12,16,12,0,0,0,0,0,2,14,2,0,0,0,0,0,9,9,0,0,0,0,0,2,13,1,0,0,0,0,0,8,5,0,0,0,0,4 +0,0,3,12,16,10,0,0,0,3,14,5,0,12,0,0,0,9,8,1,9,16,4,0,0,5,15,15,15,14,2,0,0,0,0,0,15,3,0,0,0,0,0,8,10,0,0,0,0,0,0,11,6,0,0,0,0,0,3,15,2,0,0,0,9 +0,0,1,13,16,5,0,0,0,0,6,16,11,2,0,0,0,0,13,15,0,0,0,0,0,3,16,9,0,0,0,0,0,4,16,5,5,10,3,0,0,4,16,5,16,14,15,4,0,0,13,11,7,8,16,7,0,0,2,12,16,16,12,2,6 +0,0,0,8,14,0,0,0,0,0,9,16,5,0,0,0,0,2,14,12,4,7,8,0,0,5,16,16,16,16,13,0,0,1,7,10,12,16,6,0,0,0,0,2,15,10,0,0,0,0,0,10,16,2,0,0,0,0,0,10,15,3,0,0,4 +0,0,3,16,14,3,0,0,0,0,6,16,16,5,0,0,0,0,12,16,9,0,0,0,0,0,16,16,5,0,0,0,0,2,16,16,1,0,0,0,0,1,16,16,1,0,0,0,0,0,13,16,1,0,0,0,0,0,4,15,0,0,0,0,1 +0,0,4,12,13,2,0,0,0,0,16,9,12,9,0,0,0,2,12,1,15,5,0,0,0,0,1,6,14,1,0,0,0,0,0,15,7,0,0,0,0,0,9,12,0,0,0,0,0,0,9,9,4,8,14,2,0,0,4,13,13,9,2,0,2 +0,0,2,15,15,6,0,0,0,0,9,13,4,15,3,0,0,0,14,7,0,8,4,0,0,0,15,4,0,5,8,0,0,4,13,0,0,7,6,0,0,2,12,0,0,12,5,0,0,0,15,9,12,14,0,0,0,0,3,13,13,0,0,0,0 +0,0,4,12,15,6,0,0,0,4,16,8,8,15,0,0,0,10,9,0,3,15,2,0,0,6,15,7,10,16,4,0,0,0,7,8,8,13,8,0,0,0,0,0,0,7,9,0,0,0,2,4,4,10,11,0,0,0,8,16,15,10,2,0,9 +0,0,0,0,7,13,1,0,0,0,0,0,9,16,2,0,0,0,1,9,16,16,0,0,0,4,12,16,16,16,0,0,0,5,10,3,12,15,0,0,0,0,0,0,10,15,0,0,0,0,0,0,12,15,0,0,0,0,0,0,9,12,0,0,1 +0,3,13,14,7,1,0,0,0,6,12,8,13,6,0,0,0,1,0,0,10,7,0,0,0,0,0,9,16,4,0,0,0,0,0,9,14,15,4,0,0,0,0,0,0,13,8,0,0,1,8,8,10,16,3,0,0,4,14,16,11,2,0,0,3 +0,0,2,11,0,0,0,0,0,0,11,13,0,0,0,0,0,0,16,6,0,0,0,0,0,0,15,7,4,1,0,0,0,4,16,16,16,15,3,0,0,1,16,13,8,14,12,0,0,0,14,15,12,15,12,0,0,0,2,15,16,14,4,0,6 +0,0,0,10,13,0,0,0,0,0,1,16,11,2,0,0,0,0,8,16,0,0,0,0,0,0,13,12,0,0,0,0,0,0,14,16,14,9,0,0,0,0,12,15,5,9,13,0,0,0,3,16,4,6,16,2,0,0,0,8,13,14,9,0,6 +0,0,5,13,16,3,0,0,0,1,15,15,16,8,0,0,0,5,8,5,16,11,0,0,0,2,12,16,16,16,10,0,0,2,14,16,15,10,5,0,0,0,1,16,11,0,0,0,0,0,5,16,8,0,0,0,0,0,8,14,2,0,0,0,7 +0,0,11,15,10,0,0,0,0,3,15,6,16,3,0,0,0,0,0,3,14,2,0,0,0,0,1,14,13,2,0,0,0,0,3,13,14,15,3,0,0,0,0,0,0,14,7,0,0,0,8,1,7,15,2,0,0,0,12,16,14,4,0,0,3 +0,0,3,13,13,4,0,0,0,1,14,13,10,16,1,0,0,6,12,1,3,16,6,0,0,7,15,4,10,16,8,0,0,2,11,15,12,16,8,0,0,0,0,0,0,15,8,0,0,0,5,10,4,16,8,0,0,0,2,12,16,11,1,0,9 +0,1,14,13,0,0,0,0,0,9,16,16,2,0,0,0,0,7,11,16,8,0,0,0,0,0,1,13,8,0,0,0,0,0,2,16,5,0,0,0,0,0,6,16,4,0,0,0,0,1,15,16,16,12,6,0,0,0,16,16,16,16,16,3,2 +0,0,10,16,15,1,0,0,0,5,15,8,15,6,0,0,0,3,6,1,14,4,0,0,0,0,0,10,16,13,0,0,0,0,0,1,7,16,7,0,0,0,1,2,0,15,7,0,0,0,12,13,6,16,5,0,0,0,9,16,16,9,0,0,3 +0,0,4,13,14,1,0,0,0,1,16,9,16,3,0,0,0,2,5,0,12,6,0,0,0,0,3,6,16,4,0,0,0,6,16,16,16,16,6,0,0,4,6,13,12,4,2,0,0,0,0,14,4,0,0,0,0,0,5,11,0,0,0,0,7 +0,0,6,14,13,1,0,0,0,2,15,15,16,7,0,0,0,1,5,4,16,6,0,0,0,0,0,7,16,10,3,0,0,1,9,16,16,16,10,0,0,1,15,16,13,8,1,0,0,0,2,16,8,0,0,0,0,0,8,13,0,0,0,0,7 +0,0,8,15,15,5,0,0,0,4,16,13,16,9,0,0,0,1,4,4,16,6,0,0,0,0,0,11,16,5,0,0,0,1,13,16,16,15,5,0,0,5,13,16,13,12,5,0,0,0,9,16,3,0,0,0,0,0,12,10,0,0,0,0,7 +0,0,5,13,16,15,3,0,0,3,16,16,15,16,5,0,0,0,4,0,6,16,2,0,0,0,3,12,16,16,4,0,0,0,9,16,16,16,8,0,0,0,2,13,15,2,0,0,0,0,2,16,9,0,0,0,0,0,5,14,1,0,0,0,7 +0,0,6,14,3,0,0,0,0,0,12,14,14,0,0,0,0,0,14,7,14,2,0,0,0,0,10,5,10,6,0,0,0,0,0,0,13,5,0,0,0,0,0,2,14,5,3,0,0,0,8,16,16,16,16,0,0,0,9,14,8,8,8,2,2 +0,0,3,15,4,0,0,0,0,0,8,13,15,7,0,0,0,2,16,6,5,16,3,0,0,4,14,0,0,11,8,0,0,6,12,0,0,8,8,0,0,4,15,1,0,9,7,0,0,1,15,11,9,16,3,0,0,0,4,10,15,8,0,0,0 +0,1,8,15,13,2,0,0,0,5,14,8,14,7,0,0,0,1,2,2,15,2,0,0,0,0,0,15,15,3,0,0,0,0,0,9,14,16,3,0,0,0,0,0,0,13,8,0,0,0,6,8,8,15,4,0,0,0,11,12,12,5,0,0,3 +0,0,14,11,1,0,0,0,0,7,15,14,8,0,0,0,0,7,8,4,8,0,0,0,0,0,4,0,12,0,0,0,0,0,0,4,10,0,0,0,0,0,0,8,6,0,0,0,0,1,13,16,13,8,4,0,0,1,14,12,16,16,10,0,2 +0,0,5,14,16,10,0,0,0,3,15,13,8,15,6,0,0,0,14,13,7,16,9,0,0,0,10,16,16,12,3,0,0,0,6,16,16,3,0,0,0,0,14,13,16,7,0,0,0,0,14,12,14,9,0,0,0,0,6,14,11,2,0,0,8 +0,0,3,10,14,9,0,0,0,3,15,11,9,8,0,0,0,6,12,8,7,1,0,0,0,4,16,14,16,15,1,0,0,2,8,4,3,14,8,0,0,0,0,0,0,8,9,0,0,0,0,7,8,13,9,0,0,0,0,14,14,9,0,0,5 +0,0,0,4,13,2,0,0,0,0,0,14,8,0,0,0,0,0,5,14,0,9,7,0,0,2,16,3,5,16,5,0,0,7,14,0,12,13,0,0,0,7,16,13,15,11,0,0,0,0,5,13,16,16,2,0,0,0,0,3,14,2,0,0,4 +0,0,4,15,7,0,0,0,0,2,15,5,13,5,0,0,0,5,11,0,2,13,2,0,0,5,8,0,0,6,8,0,0,7,8,0,0,5,8,0,0,3,11,0,0,9,7,0,0,0,14,10,6,15,2,0,0,0,6,12,13,6,0,0,0 +0,0,4,16,1,0,0,0,0,0,10,12,0,0,0,0,0,1,14,8,0,0,0,0,0,3,16,16,14,4,0,0,0,3,16,8,6,15,5,0,0,1,14,3,0,7,12,0,0,0,11,11,7,16,8,0,0,0,5,14,14,9,0,0,6 +0,0,9,13,16,5,0,0,0,7,16,12,9,12,0,0,0,6,14,1,1,7,1,0,0,1,13,16,16,12,3,0,0,0,9,16,12,0,0,0,0,1,16,8,16,2,0,0,0,4,14,2,16,4,0,0,0,0,13,16,10,0,0,0,8 +0,0,3,11,0,0,0,0,0,0,11,10,0,0,0,0,0,0,16,5,0,0,0,0,0,2,16,5,4,2,0,0,0,3,16,16,16,16,6,0,0,2,16,6,0,6,13,0,0,0,15,7,3,12,11,0,0,0,4,14,16,11,3,0,6 +0,0,9,16,11,5,0,0,0,5,16,10,9,16,3,0,0,8,12,0,0,12,2,0,0,5,16,12,15,16,7,0,0,0,11,16,16,5,0,0,0,1,15,11,13,10,0,0,0,3,16,6,11,12,0,0,0,0,10,16,13,4,0,0,8 +0,0,0,9,4,0,0,0,0,0,0,16,3,0,0,0,0,0,8,12,0,1,0,0,0,4,15,2,1,15,3,0,0,8,13,0,8,16,3,0,0,8,16,16,16,13,4,0,0,0,4,10,13,0,0,0,0,0,0,7,12,0,0,0,4 +0,0,3,10,13,1,0,0,0,2,15,10,16,3,0,0,0,2,4,1,16,4,0,0,0,0,0,7,16,0,0,0,0,0,7,14,16,12,3,0,0,2,13,15,13,9,2,0,0,0,0,13,8,0,0,0,0,0,1,15,3,0,0,0,7 +0,0,0,0,15,16,3,0,0,0,0,0,15,16,2,0,0,0,0,4,16,16,2,0,0,0,8,16,16,13,0,0,0,8,16,16,16,9,0,0,0,1,8,6,16,12,0,0,0,0,0,3,16,13,0,0,0,0,0,0,13,13,0,0,1 +0,0,0,0,13,12,0,0,0,0,0,2,16,16,2,0,0,0,0,7,16,16,2,0,0,4,14,16,16,15,0,0,0,9,16,13,16,10,0,0,0,0,0,4,16,12,0,0,0,0,0,1,16,15,0,0,0,0,0,0,13,16,4,0,1 +0,2,15,13,0,0,0,0,0,12,16,16,6,0,0,0,0,14,7,11,10,0,0,0,0,3,1,10,12,0,0,0,0,0,1,15,6,0,0,0,0,0,12,14,0,0,0,0,0,4,16,15,12,12,5,0,0,2,15,16,16,16,13,0,2 +0,0,3,11,12,12,1,0,0,0,15,12,4,14,2,0,0,0,15,12,5,16,4,0,0,0,6,16,16,13,1,0,0,0,8,15,14,13,1,0,0,1,15,4,3,16,6,0,0,3,16,5,6,15,1,0,0,0,9,14,13,6,0,0,8 +0,0,8,15,16,3,0,0,0,3,15,14,16,5,0,0,0,0,3,6,16,2,0,0,0,0,3,12,16,8,3,0,0,1,14,16,16,16,10,0,0,0,9,16,12,5,0,0,0,0,8,16,4,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,0,8,15,1,0,0,0,0,1,15,11,0,0,0,0,0,12,14,1,8,15,1,0,7,16,6,4,16,10,0,0,9,16,16,16,16,1,0,0,1,5,12,16,11,1,0,0,0,0,10,15,1,0,0,0,0,0,10,10,0,0,0,4 +0,0,0,10,16,7,0,0,0,0,0,9,16,16,2,0,0,0,0,11,16,14,0,0,0,0,0,14,16,12,0,0,0,0,7,16,16,8,0,0,0,2,15,16,16,7,0,0,0,3,10,16,16,8,0,0,0,0,0,11,16,15,3,0,1 +0,0,6,12,14,15,2,0,0,0,14,9,8,5,0,0,0,3,14,6,0,0,0,0,0,6,16,16,15,9,0,0,0,0,4,4,7,15,7,0,0,0,0,0,0,11,8,0,0,0,2,9,13,16,4,0,0,0,7,15,12,6,0,0,5 +0,0,6,13,15,9,0,0,0,4,15,8,4,16,4,0,0,8,9,0,0,15,8,0,0,7,15,9,10,16,8,0,0,0,5,8,5,12,8,0,0,0,0,0,0,12,8,0,0,0,11,9,3,14,3,0,0,0,4,15,16,7,0,0,9 +0,0,8,12,1,0,0,0,0,0,15,16,11,0,0,0,0,0,10,3,14,1,0,0,0,0,0,0,11,2,0,0,0,0,0,0,14,3,0,0,0,0,0,4,16,1,0,0,0,0,6,16,16,16,11,1,0,0,5,16,13,12,12,6,2 +0,0,9,16,10,0,0,0,0,1,15,15,16,3,0,0,0,1,16,6,13,8,0,0,0,0,10,5,11,9,0,0,0,0,0,0,14,8,0,0,0,0,0,3,16,5,0,0,0,0,9,16,16,16,14,4,0,0,9,16,14,12,16,8,2 +0,0,0,15,3,0,0,0,0,0,3,16,2,0,0,0,0,0,7,15,0,0,0,0,0,0,10,12,3,0,0,0,0,0,14,16,16,11,2,0,0,1,15,11,1,11,11,0,0,0,12,13,4,8,13,0,0,0,2,11,14,13,5,0,6 +0,0,5,14,16,6,0,0,0,2,15,10,7,15,0,0,0,0,3,0,9,7,0,0,0,0,0,6,16,5,0,0,0,0,0,2,11,16,3,0,0,0,0,0,0,11,7,0,0,0,8,8,7,15,3,0,0,0,7,15,13,6,0,0,3 +0,0,4,11,0,0,0,0,0,0,8,9,0,0,0,0,0,0,15,4,0,0,0,0,0,2,16,8,7,2,0,0,0,6,16,12,13,14,4,0,0,5,16,2,1,10,11,0,0,0,16,9,4,16,9,0,0,0,7,14,15,8,0,0,6 +0,0,9,15,15,7,0,0,0,3,16,7,7,11,0,0,0,0,16,10,3,6,0,0,0,0,7,16,16,15,4,0,0,0,9,16,16,3,0,0,0,1,15,4,11,10,0,0,0,4,14,3,9,11,0,0,0,0,8,15,15,4,0,0,8 +0,1,8,16,14,0,0,0,0,4,15,9,16,3,0,0,0,1,4,7,16,0,0,0,0,0,0,11,16,9,0,0,0,0,0,0,5,15,5,0,0,0,0,0,0,15,5,0,0,0,15,9,9,15,2,0,0,0,6,13,10,4,0,0,3 +0,0,0,0,7,7,0,0,0,0,0,4,15,1,0,0,0,0,0,13,6,9,1,0,0,0,8,12,0,14,3,0,0,3,16,3,2,16,0,0,0,5,16,16,13,16,4,0,0,0,1,7,12,14,2,0,0,0,0,0,9,10,0,0,4 +0,0,7,12,16,16,7,0,0,0,14,14,11,16,7,0,0,0,2,0,7,16,3,0,0,0,2,11,15,16,4,0,0,0,13,16,16,16,7,0,0,0,4,15,12,0,0,0,0,0,6,16,3,0,0,0,0,0,9,15,2,0,0,0,7 +0,0,0,6,16,0,0,0,0,0,0,9,15,0,0,0,0,0,2,15,8,14,0,0,0,0,12,14,8,16,0,0,0,9,16,12,14,14,3,0,0,8,16,16,16,15,8,0,0,1,4,10,16,5,0,0,0,0,0,5,16,4,0,0,4 +0,0,6,13,12,9,1,0,0,4,16,11,6,13,7,0,0,7,16,6,2,13,8,0,0,1,14,15,16,13,3,0,0,0,7,16,16,5,0,0,0,2,16,9,13,14,0,0,0,1,15,8,7,16,3,0,0,0,6,16,15,7,0,0,8 +0,2,10,13,3,0,0,0,0,10,15,10,14,8,0,0,0,11,14,4,13,14,0,0,0,2,9,12,12,16,4,0,0,0,0,0,0,16,7,0,0,0,0,0,0,12,12,0,0,2,12,4,5,15,11,0,0,0,9,13,12,11,1,0,9 +0,0,2,15,8,0,0,0,0,0,9,14,16,6,0,0,0,0,15,9,3,14,3,0,0,4,16,2,0,9,7,0,0,8,12,0,0,7,8,0,0,3,14,1,0,8,9,0,0,0,15,15,13,15,7,0,0,0,3,14,15,9,0,0,0 +0,0,0,7,14,1,0,0,0,0,0,9,11,0,0,0,0,0,0,12,7,4,8,0,0,0,5,15,2,16,5,0,0,3,16,7,6,16,0,0,0,11,16,15,15,16,2,0,0,0,2,8,16,9,0,0,0,0,0,5,16,2,0,0,4 +0,3,15,16,11,1,0,0,0,12,13,10,16,4,0,0,0,5,1,8,16,1,0,0,0,0,0,16,16,6,0,0,0,0,0,7,13,16,5,0,0,0,0,0,1,15,13,0,0,4,11,8,8,15,13,0,0,2,14,16,16,10,1,0,3 +0,1,12,14,6,0,0,0,0,8,14,5,15,7,0,0,0,6,11,1,11,15,2,0,0,1,14,16,13,16,8,0,0,0,0,0,0,12,8,0,0,1,3,0,0,11,9,0,0,4,13,2,1,13,6,0,0,0,10,16,16,9,0,0,9 +0,0,0,12,11,0,0,0,0,0,5,16,8,0,0,0,0,0,12,11,0,0,0,0,0,0,15,8,4,1,0,0,0,3,16,16,16,15,5,0,0,2,16,3,1,9,13,0,0,0,12,9,4,13,13,0,0,0,0,11,16,13,3,0,6 +0,0,4,15,15,5,0,0,0,1,16,12,11,15,3,0,0,7,16,2,4,16,4,0,0,5,16,16,16,16,5,0,0,0,0,1,0,16,8,0,0,0,0,0,0,14,8,0,0,0,10,13,8,16,5,0,0,0,7,13,14,9,1,0,9 +0,0,5,15,8,1,0,0,0,1,15,13,15,9,0,0,0,8,15,1,3,14,1,0,0,4,14,0,0,11,8,0,0,5,12,0,0,12,8,0,0,5,15,1,0,12,10,0,0,1,16,10,9,15,3,0,0,0,5,15,15,4,0,0,0 +0,0,0,2,13,2,0,0,0,0,0,14,10,0,0,0,0,0,11,10,0,10,4,0,0,4,16,3,1,14,3,0,0,6,16,16,16,16,5,0,0,0,5,8,14,11,1,0,0,0,0,0,15,2,0,0,0,0,0,1,13,0,0,0,4 +0,0,0,1,15,5,0,0,0,0,0,11,15,0,7,1,0,0,8,15,2,5,16,0,0,3,15,8,4,12,11,0,0,10,16,16,16,16,7,0,0,2,8,8,13,16,1,0,0,0,0,0,14,10,0,0,0,0,0,2,14,3,0,0,4 +0,2,16,10,1,0,0,0,0,3,16,16,8,0,0,0,0,1,11,11,12,0,0,0,0,0,0,4,16,0,0,0,0,0,0,9,12,0,0,0,0,0,5,15,8,0,0,0,0,5,16,16,16,16,13,0,0,2,16,16,16,16,14,0,2 +0,1,7,9,12,14,1,0,0,11,16,15,10,5,1,0,0,2,16,7,0,0,0,0,0,0,9,16,16,9,0,0,0,0,0,2,8,13,6,0,0,0,0,0,0,8,11,0,0,0,7,13,14,16,4,0,0,0,10,16,12,3,0,0,5 +0,1,10,16,14,3,0,0,0,4,16,13,15,14,0,0,0,1,1,0,9,16,2,0,0,0,5,14,16,13,0,0,0,0,7,16,16,15,2,0,0,0,0,0,4,15,9,0,0,1,13,8,13,16,5,0,0,1,14,16,14,7,0,0,3 +0,0,0,11,16,8,0,0,0,0,9,15,5,3,0,0,0,0,14,6,0,0,0,0,0,4,14,0,0,0,0,0,0,5,11,2,4,1,0,0,0,4,14,15,15,15,3,0,0,1,11,16,11,11,15,0,0,0,1,11,15,16,12,0,6 +0,3,16,14,10,5,0,0,0,1,8,8,11,16,2,0,0,0,2,6,12,11,1,0,0,0,12,16,16,6,0,0,0,0,0,3,11,16,2,0,0,0,0,0,0,14,7,0,0,1,8,9,14,13,0,0,0,3,16,12,9,1,0,0,3 +0,0,2,11,13,1,0,0,0,0,11,15,8,1,0,0,0,1,16,5,0,0,0,0,0,4,15,0,0,0,0,0,0,3,16,10,12,10,2,0,0,2,16,15,11,12,14,0,0,0,14,12,7,11,16,2,0,0,3,11,16,14,6,0,6 +0,0,7,13,5,0,0,0,0,4,14,4,14,13,0,0,0,0,13,8,12,16,4,0,0,0,5,12,11,6,6,0,0,0,0,0,0,5,9,0,0,0,0,0,0,9,7,0,0,0,2,2,8,16,2,0,0,0,10,16,12,3,0,0,9 +0,0,8,15,15,1,0,0,0,4,16,16,16,10,0,0,0,7,12,0,1,16,0,0,0,6,11,0,0,9,7,0,0,8,12,0,0,9,4,0,0,4,16,1,2,15,8,0,0,0,15,16,16,16,1,0,0,0,7,16,16,8,0,0,0 +0,3,15,16,3,0,0,0,0,11,16,15,12,0,0,0,0,1,1,6,16,0,0,0,0,0,0,5,16,0,0,0,0,0,0,14,12,0,0,0,0,0,9,16,9,8,5,0,0,3,16,16,16,16,11,0,0,5,16,16,13,8,1,0,2 +0,0,7,16,16,16,13,0,0,0,6,9,11,15,13,0,0,0,0,0,2,16,5,0,0,0,0,5,13,16,4,0,0,0,9,16,16,16,8,0,0,0,3,14,11,0,0,0,0,0,3,16,5,0,0,0,0,0,7,15,0,0,0,0,7 +0,0,4,12,15,3,0,0,0,3,16,14,14,13,0,0,0,5,16,1,2,15,5,0,0,8,16,0,0,9,11,0,0,5,16,0,0,8,12,0,0,0,15,2,0,11,13,0,0,0,10,12,12,16,9,0,0,0,4,16,16,9,0,0,0 +0,0,6,11,15,16,12,0,0,0,6,8,5,12,10,0,0,0,0,0,2,15,2,0,0,0,7,12,13,15,4,0,0,0,6,10,16,9,4,0,0,0,0,13,7,0,0,0,0,0,5,15,2,0,0,0,0,0,10,10,0,0,0,0,7 +0,0,2,11,12,12,15,8,0,0,5,12,12,13,15,2,0,0,0,0,0,7,10,0,0,0,0,4,12,16,8,0,0,0,1,16,16,12,3,0,0,0,0,4,13,1,0,0,0,0,1,12,6,0,0,0,0,0,2,14,1,0,0,0,7 +0,0,9,15,16,16,10,0,0,0,15,11,11,16,4,0,0,0,0,0,12,11,0,0,0,0,2,8,16,16,12,0,0,0,14,16,13,8,2,0,0,0,5,16,6,0,0,0,0,0,8,14,0,0,0,0,0,0,13,9,0,0,0,0,7 +0,0,8,13,11,4,0,0,0,3,16,8,8,15,2,0,0,6,16,8,9,16,8,0,0,0,10,16,13,13,8,0,0,0,0,0,0,11,7,0,0,0,0,0,2,16,3,0,0,0,3,9,15,7,0,0,0,0,10,10,3,0,0,0,9 +0,3,12,11,6,0,0,0,0,11,15,8,16,8,0,0,0,10,11,1,13,15,1,0,0,2,14,16,15,15,8,0,0,0,1,4,0,14,2,0,0,0,0,0,3,16,1,0,0,1,11,9,14,8,0,0,0,2,15,13,10,0,0,0,9 +0,0,7,16,15,5,0,0,0,0,15,7,6,15,2,0,0,3,15,4,5,13,2,0,0,2,15,16,16,15,0,0,0,0,8,16,15,15,3,0,0,0,11,13,1,6,7,0,0,0,16,12,8,11,4,0,0,0,7,15,16,12,2,0,8 +0,0,4,14,15,9,0,0,0,0,14,9,0,2,0,0,0,2,15,1,0,0,0,0,0,5,9,0,0,0,0,0,0,7,12,10,15,7,0,0,0,4,16,15,3,11,6,0,0,0,13,15,14,11,10,0,0,0,5,14,16,11,2,0,6 +0,0,6,12,15,10,0,0,0,5,15,5,4,11,6,0,0,7,11,4,5,14,6,0,0,2,16,16,16,6,0,0,0,0,15,15,12,14,1,0,0,0,14,3,0,9,6,0,0,0,16,7,6,14,6,0,0,0,7,15,12,9,0,0,8 +0,4,14,15,7,1,0,0,0,8,14,12,16,4,0,0,0,0,0,0,12,8,0,0,0,0,2,9,16,7,0,0,0,0,3,13,13,16,5,0,0,0,0,0,0,12,8,0,0,3,11,7,12,16,4,0,0,4,14,16,11,4,0,0,3 +0,0,5,14,15,6,0,0,0,1,16,8,8,15,2,0,0,0,16,10,10,15,5,0,0,0,11,16,16,9,0,0,0,0,6,16,16,16,3,0,0,0,9,13,0,10,7,0,0,0,12,10,8,15,2,0,0,0,7,16,13,5,0,0,8 +0,1,8,14,16,5,0,0,0,5,15,7,11,11,0,0,0,0,1,0,11,9,0,0,0,0,3,11,16,3,0,0,0,0,12,16,16,15,3,0,0,0,2,1,1,12,8,0,0,0,5,9,14,15,3,0,0,0,11,12,8,1,0,0,3 +0,0,3,14,13,1,0,0,0,0,12,12,3,2,0,0,0,1,16,2,0,0,0,0,0,4,14,0,0,0,0,0,0,2,14,0,2,2,0,0,0,2,14,14,16,16,8,0,0,0,11,16,11,11,16,2,0,0,1,10,12,12,9,0,6 +0,3,7,13,16,12,2,0,0,10,16,12,6,2,0,0,0,9,16,8,1,0,0,0,0,4,11,16,15,6,0,0,0,0,0,1,8,15,5,0,0,0,0,0,1,14,7,0,0,0,11,13,16,15,1,0,0,0,10,12,8,3,0,0,5 +0,0,5,11,15,5,0,0,0,6,13,2,3,15,3,0,0,7,13,8,11,16,8,0,0,1,9,12,8,8,8,0,0,0,0,0,1,13,4,0,0,0,0,0,9,12,0,0,0,0,0,8,13,0,0,0,0,0,7,10,1,0,0,0,9 +0,0,0,6,15,1,0,0,0,0,6,16,8,3,7,0,0,1,16,12,0,12,11,0,0,5,16,16,13,16,12,0,0,3,12,15,16,16,7,0,0,0,0,0,14,10,0,0,0,0,0,3,16,3,0,0,0,0,0,8,14,1,0,0,4 +0,0,0,10,14,9,0,0,0,0,10,14,6,4,0,0,0,1,16,5,0,0,0,0,0,6,12,0,0,0,0,0,0,7,10,1,4,1,0,0,0,2,15,16,14,14,1,0,0,0,12,12,4,14,11,0,0,0,1,10,13,12,4,0,6 +0,1,9,12,12,13,16,4,0,1,11,12,12,16,13,1,0,0,0,0,7,16,2,0,0,0,8,12,15,13,0,0,0,0,16,16,16,12,0,0,0,0,0,13,10,0,0,0,0,0,7,16,5,0,0,0,0,0,12,15,2,0,0,0,7 +0,0,6,16,16,13,7,0,0,0,14,12,16,16,9,0,0,0,0,0,5,15,1,0,0,0,0,4,11,14,6,0,0,0,3,16,16,16,10,0,0,0,1,12,13,4,1,0,0,0,3,15,5,0,0,0,0,0,7,15,0,0,0,0,7 +0,0,3,15,13,1,0,0,0,1,15,12,13,10,0,0,0,3,16,1,1,12,2,0,0,6,12,0,0,4,6,0,0,4,11,0,0,4,8,0,0,0,13,3,5,13,8,0,0,0,10,15,16,16,5,0,0,0,2,14,12,5,0,0,0 +0,0,0,10,10,0,0,0,0,0,2,15,7,0,7,1,0,1,13,13,0,5,16,3,0,6,16,12,8,14,14,0,0,4,15,16,16,16,9,0,0,0,0,3,15,13,0,0,0,0,0,6,16,4,0,0,0,0,0,12,8,0,0,0,4 +0,0,8,16,8,0,0,0,0,6,14,5,13,6,0,0,0,7,12,2,7,16,2,0,0,0,12,16,16,16,6,0,0,0,0,3,3,9,8,0,0,0,0,0,5,16,3,0,0,0,3,9,16,8,0,0,0,0,10,11,4,0,0,0,9 +0,1,15,15,1,0,0,0,0,7,16,16,11,0,0,0,0,3,7,8,13,0,0,0,0,0,0,7,16,2,0,0,0,0,2,15,9,0,0,0,0,0,8,16,5,0,3,0,0,3,15,16,16,16,16,0,0,2,16,16,16,16,10,0,2 +0,0,7,12,12,7,0,0,0,0,11,16,16,11,0,0,0,0,10,16,16,7,0,0,0,0,12,16,16,6,0,0,0,0,12,16,16,7,0,0,0,0,13,16,16,11,0,0,0,0,16,16,16,13,0,0,0,0,6,10,8,3,0,0,1 +0,0,7,12,13,1,0,0,0,6,16,6,8,7,0,0,0,0,16,10,11,8,0,0,0,0,8,15,16,5,0,0,0,0,10,15,12,14,1,0,0,0,15,7,0,11,7,0,0,0,15,8,8,15,7,0,0,0,6,12,12,8,1,0,8 +0,0,8,15,11,0,0,0,0,7,16,15,15,9,0,0,0,10,14,0,3,12,2,0,0,5,12,0,0,5,7,0,0,4,11,0,0,7,8,0,0,3,11,0,6,16,6,0,0,0,14,13,16,14,1,0,0,0,8,15,11,2,0,0,0 +0,0,9,16,14,2,0,0,0,0,10,16,16,4,0,0,0,4,16,16,16,0,0,0,0,0,14,16,16,2,0,0,0,1,16,16,16,3,0,0,0,1,16,16,16,0,0,0,0,3,16,16,16,7,0,0,0,1,9,15,16,8,0,0,1 +0,0,9,15,2,0,0,0,0,3,16,14,13,1,0,0,0,0,6,0,14,2,0,0,0,0,0,1,15,0,0,0,0,0,0,5,14,0,0,0,0,0,1,12,9,0,0,0,0,0,12,16,16,16,16,2,0,0,8,13,11,8,7,0,2 +0,0,4,12,7,0,0,0,0,2,13,16,16,6,0,0,0,5,16,3,5,15,0,0,0,8,13,0,0,11,1,0,0,7,12,0,0,8,4,0,0,5,10,0,0,9,5,0,0,1,15,13,13,16,3,0,0,0,5,14,16,10,0,0,0 +0,0,8,15,14,6,0,0,0,4,16,5,6,16,3,0,0,7,16,2,3,16,8,0,0,2,14,16,16,16,8,0,0,0,0,2,0,9,8,0,0,0,0,0,5,16,2,0,0,0,0,8,14,5,0,0,0,0,9,8,1,0,0,0,9 +0,0,11,12,7,0,0,0,0,0,5,16,16,9,0,0,0,0,4,16,16,12,0,0,0,0,4,16,16,13,0,0,0,0,3,15,16,12,0,0,0,0,2,13,16,12,0,0,0,0,11,16,16,5,0,0,0,0,8,10,8,0,0,0,1 +0,0,5,8,8,10,13,4,0,0,10,12,12,14,14,1,0,0,0,0,0,12,5,0,0,0,2,4,7,16,0,0,0,0,10,16,16,10,0,0,0,0,0,8,11,0,0,0,0,0,0,16,7,0,0,0,0,0,7,16,3,0,0,0,7 +0,0,7,13,10,0,0,0,0,0,16,16,16,9,0,0,0,3,12,2,9,16,4,0,0,6,9,0,0,13,7,0,0,8,12,0,0,8,8,0,0,7,15,4,2,12,8,0,0,2,16,16,16,16,5,0,0,0,7,14,12,7,0,0,0 +0,0,3,12,11,7,0,0,0,0,8,16,16,6,0,0,0,3,16,16,16,6,0,0,0,4,16,16,15,1,0,0,0,1,8,9,16,4,0,0,0,1,15,16,16,9,0,0,0,0,8,16,16,12,0,0,0,0,4,11,11,6,0,0,1 +0,0,10,16,16,16,11,0,0,0,9,11,8,16,9,0,0,0,0,0,5,16,3,0,0,0,2,11,15,16,11,0,0,0,6,16,16,10,5,0,0,0,0,13,11,0,0,0,0,0,7,16,3,0,0,0,0,0,13,11,0,0,0,0,7 +0,0,9,15,8,0,0,0,0,6,14,2,7,10,0,0,0,9,10,0,2,16,3,0,0,5,16,16,16,14,2,0,0,6,16,16,16,14,1,0,0,5,16,0,0,9,11,0,0,1,16,9,7,14,6,0,0,0,9,12,13,8,0,0,8 +0,0,3,11,12,4,0,0,0,6,15,6,5,13,0,0,0,7,13,0,2,16,1,0,0,3,16,16,16,14,0,0,0,0,15,13,8,13,6,0,0,0,12,1,0,3,9,0,0,0,9,12,8,14,11,0,0,0,5,13,12,9,1,0,8 +0,0,0,10,16,6,0,0,0,0,4,16,16,4,0,0,0,0,14,16,16,4,0,0,0,0,16,16,16,8,0,0,0,0,16,16,16,8,0,0,0,0,15,16,16,14,3,0,0,0,10,16,16,16,8,0,0,0,1,12,12,15,7,0,1 +0,0,0,4,15,0,0,0,0,0,2,13,12,0,3,0,0,0,11,15,2,6,16,0,0,8,16,12,8,15,12,0,0,7,16,16,16,16,7,0,0,0,4,7,14,13,0,0,0,0,0,1,16,9,0,0,0,0,0,3,16,3,0,0,4 +0,3,12,15,16,14,2,0,0,7,16,11,4,7,1,0,0,9,16,6,0,0,0,0,0,4,15,16,11,0,0,0,0,0,0,6,16,6,0,0,0,0,0,0,9,12,0,0,0,1,12,12,16,6,0,0,0,3,16,16,7,0,0,0,5 +0,0,7,16,13,2,0,0,0,2,15,16,16,11,0,0,0,7,14,4,6,16,0,0,0,4,12,0,0,7,7,0,0,6,12,0,1,11,8,0,0,2,15,8,13,16,9,0,0,0,15,16,16,15,2,0,0,0,5,14,11,1,0,0,0 +0,0,8,14,5,0,0,0,0,8,13,5,14,5,0,0,0,8,13,2,5,14,3,0,0,4,16,14,15,16,8,0,0,0,2,5,7,10,7,0,0,0,0,0,3,15,3,0,0,0,0,4,14,10,0,0,0,0,10,13,6,0,0,0,9 +0,1,8,15,10,0,0,0,0,4,16,16,16,9,0,0,0,0,12,16,16,12,0,0,0,0,10,16,16,8,0,0,0,0,8,16,16,10,0,0,0,0,9,16,16,13,0,0,0,0,8,16,16,12,0,0,0,0,5,15,16,9,1,0,1 +0,3,9,14,15,6,0,0,0,7,13,7,8,16,0,0,0,0,0,0,7,12,0,0,0,0,1,9,16,10,0,0,0,0,5,15,12,15,7,0,0,0,0,0,0,13,9,0,0,0,8,8,14,16,4,0,0,3,16,14,9,1,0,0,3 +0,3,11,13,8,1,0,0,0,3,11,8,15,7,0,0,0,0,0,2,14,6,0,0,0,0,7,16,16,11,0,0,0,0,5,12,11,16,8,0,0,0,0,0,6,16,7,0,0,2,12,16,16,7,0,0,0,4,13,11,2,0,0,0,3 +0,0,5,14,12,2,0,0,0,1,16,11,5,11,0,0,0,4,14,0,0,9,4,0,0,8,10,0,0,5,8,0,0,8,8,0,0,8,8,0,0,4,11,0,0,10,5,0,0,2,16,11,12,14,0,0,0,0,5,14,14,3,0,0,0 +0,0,4,15,12,2,0,0,0,0,13,16,16,14,0,0,0,7,16,3,2,15,2,0,0,8,16,4,0,4,8,0,0,8,15,3,0,6,8,0,0,4,15,4,2,13,10,0,0,0,13,16,16,16,6,0,0,0,4,15,16,9,0,0,0 +0,0,0,10,12,2,0,0,0,0,11,16,16,10,0,0,0,3,16,5,6,12,2,0,0,5,12,0,0,6,8,0,0,4,16,0,0,6,8,0,0,2,15,7,2,13,8,0,0,0,9,16,14,16,5,0,0,0,0,12,16,10,0,0,0 +0,0,4,16,3,0,0,0,0,0,8,16,2,0,1,0,0,0,11,13,3,15,5,0,0,2,16,6,11,15,0,0,0,11,16,12,16,13,4,0,1,12,12,15,15,11,2,0,0,0,1,16,5,0,0,0,0,0,6,16,0,0,0,0,4 +0,0,0,14,7,1,6,0,0,0,6,15,2,11,15,0,0,2,14,10,2,16,8,0,0,7,16,10,13,16,14,0,0,6,16,16,16,11,5,0,0,0,0,7,15,1,0,0,0,0,0,10,10,0,0,0,0,0,1,15,4,0,0,0,4 +0,0,0,8,12,0,0,0,0,0,0,15,5,11,7,0,0,0,8,12,1,16,3,0,0,3,15,5,6,14,0,0,0,12,15,10,15,16,7,0,0,12,16,16,16,10,3,0,0,0,2,7,13,0,0,0,0,0,0,11,10,0,0,0,4 +0,0,4,13,15,6,0,0,0,4,15,6,7,15,2,0,0,8,14,0,4,16,5,0,0,2,15,10,14,7,0,0,0,0,7,16,14,0,0,0,0,0,12,9,15,6,0,0,0,0,12,8,11,8,0,0,0,0,3,15,15,3,0,0,8 +0,0,4,8,13,6,0,0,0,5,16,11,9,12,1,0,0,7,13,0,12,16,4,0,0,6,14,9,15,12,3,0,0,0,8,11,10,14,0,0,0,0,0,0,16,6,0,0,0,0,0,8,12,0,0,0,0,0,6,10,0,0,0,0,9 +0,0,7,12,15,4,0,0,0,4,16,9,7,12,0,0,0,0,5,0,10,8,0,0,0,0,0,13,16,4,0,0,0,0,0,10,9,16,3,0,0,0,0,0,0,11,7,0,0,0,8,6,6,15,3,0,0,0,8,15,12,3,0,0,3 +0,0,11,13,1,0,0,0,0,5,16,14,10,0,0,0,0,9,10,8,12,0,0,0,0,5,3,10,7,0,0,0,0,0,2,15,2,0,0,0,0,0,11,10,1,5,3,0,0,0,16,12,14,16,9,0,0,0,13,16,11,3,0,0,2 +0,0,6,12,14,16,16,4,0,3,16,15,12,15,16,4,0,1,5,0,1,16,11,0,0,0,0,0,9,15,3,0,0,0,0,4,16,8,0,0,0,0,0,12,16,0,0,0,0,0,6,16,6,0,0,0,0,0,10,15,0,0,0,0,7 +0,0,4,16,13,13,5,0,0,0,9,15,7,4,2,0,0,3,14,9,0,0,0,0,0,5,16,16,13,5,0,0,0,0,0,2,10,15,1,0,0,0,0,0,5,15,1,0,0,0,1,4,14,6,0,0,0,0,2,16,10,0,0,0,5 +0,0,0,16,14,5,0,0,0,0,7,16,16,7,0,0,0,0,7,16,16,1,0,0,0,0,12,16,13,0,0,0,0,0,14,16,10,0,0,0,0,0,14,16,8,0,0,0,0,0,11,16,12,0,0,0,0,0,2,10,16,7,0,0,1 +0,1,15,16,16,16,15,2,0,0,12,10,9,16,14,2,0,0,0,0,9,16,4,0,0,0,0,5,15,6,0,0,0,0,0,13,12,0,0,0,0,0,9,15,2,0,0,0,0,1,16,10,0,0,0,0,0,3,16,7,0,0,0,0,7 +0,0,2,13,16,4,0,0,0,0,12,12,10,13,0,0,0,4,16,2,0,15,2,0,0,5,16,1,0,8,8,0,0,8,12,0,0,8,8,0,0,5,15,1,0,9,8,0,0,1,15,11,8,16,3,0,0,0,4,15,14,5,0,0,0 +0,0,8,16,9,0,0,0,0,4,15,10,16,0,0,0,0,5,5,7,12,0,0,0,0,0,0,16,14,9,0,0,0,0,0,8,8,15,7,0,0,0,0,0,0,11,8,0,0,0,15,5,7,15,3,0,0,0,6,13,13,6,0,0,3 +0,0,0,0,7,16,11,0,0,0,0,1,16,16,7,0,0,0,4,15,16,5,0,0,0,4,16,16,15,0,0,0,0,1,8,16,16,3,0,0,0,0,0,10,16,9,0,0,0,0,0,4,16,12,0,0,0,0,0,0,7,12,0,0,1 +0,0,5,14,16,9,0,0,0,0,8,15,12,14,3,0,0,3,15,12,0,0,0,0,0,12,16,7,0,0,0,0,0,8,14,16,10,1,0,0,0,0,1,8,16,4,0,0,0,0,3,11,16,3,0,0,0,0,5,16,12,0,0,0,5 +0,0,0,11,13,1,3,0,0,0,0,15,8,13,13,0,0,0,9,15,4,16,7,0,0,5,16,13,12,16,8,0,0,8,16,16,16,15,7,0,0,0,0,6,16,4,0,0,0,0,0,7,13,0,0,0,0,0,0,11,7,0,0,0,4 +0,1,7,15,10,0,0,0,0,6,14,7,16,2,0,0,0,6,14,12,16,13,0,0,0,1,12,16,11,0,0,0,0,0,0,13,16,3,0,0,0,0,4,12,3,14,2,0,0,0,8,9,0,11,8,0,0,0,6,16,16,11,2,0,8 +0,0,0,10,8,0,0,0,0,0,2,16,11,1,0,0,0,0,6,15,1,0,0,0,0,0,10,11,0,0,0,0,0,0,13,12,8,6,0,0,0,0,13,16,16,16,12,1,0,0,9,16,13,11,16,4,0,0,0,10,15,12,5,0,6 +0,0,0,9,12,2,0,0,0,0,2,13,16,3,0,0,0,0,9,16,16,1,0,0,0,5,15,14,16,5,0,0,0,2,1,12,16,5,0,0,0,0,0,10,16,4,0,0,0,0,0,12,15,3,0,0,0,0,0,7,15,13,1,0,1 +0,0,2,8,12,13,9,1,0,0,15,16,14,16,16,0,0,4,16,14,13,16,12,0,0,0,13,14,11,16,10,0,0,0,0,0,5,16,7,0,0,0,0,0,13,15,3,0,0,0,0,10,15,2,0,0,0,0,1,16,5,0,0,0,9 +0,0,9,15,6,0,0,0,0,1,16,6,14,2,0,0,0,0,14,1,8,8,0,0,0,0,7,7,5,9,0,0,0,0,0,0,9,5,0,0,0,0,0,1,14,2,0,0,0,0,5,15,12,11,6,0,0,0,15,14,12,8,8,0,2 +0,0,11,16,16,13,4,0,0,2,16,14,9,8,8,0,0,7,16,4,0,0,0,0,0,12,16,16,12,1,0,0,0,2,8,10,16,9,0,0,0,0,1,0,12,12,0,0,0,0,15,12,16,6,0,0,0,0,13,16,9,0,0,0,5 +0,1,11,16,13,1,0,0,0,9,16,10,15,8,0,0,0,7,13,1,12,11,0,0,0,0,0,5,15,9,0,0,0,0,0,16,16,16,7,0,0,0,0,7,3,13,12,0,0,0,7,8,10,16,5,0,0,0,13,16,16,6,0,0,3 +0,2,13,15,5,0,0,0,0,9,16,13,14,0,0,0,0,7,6,2,16,0,0,0,0,0,0,3,16,1,0,0,0,0,0,11,12,0,0,0,0,0,5,16,7,0,0,0,0,2,16,16,9,11,11,1,0,2,15,16,16,16,11,1,2 +0,0,0,9,13,1,0,0,0,0,3,15,6,12,0,0,0,1,10,9,0,10,3,0,0,4,16,5,0,5,7,0,0,5,16,3,0,6,8,0,0,0,16,5,0,6,9,0,0,0,8,14,7,15,3,0,0,0,0,10,16,9,0,0,0 +0,0,2,8,12,5,0,0,0,0,3,16,16,12,0,0,0,0,5,16,16,8,0,0,0,0,5,16,16,11,0,0,0,0,9,16,16,5,0,0,0,0,8,16,16,8,0,0,0,0,6,16,16,10,0,0,0,0,1,10,10,7,0,0,1 +0,0,11,16,13,2,0,0,0,7,16,11,12,14,0,0,0,9,13,0,11,14,0,0,0,0,0,11,16,14,2,0,0,0,0,10,9,15,10,0,0,0,0,0,0,9,12,0,0,0,14,8,9,16,6,0,0,0,11,16,16,10,0,0,3 +0,0,0,4,9,15,2,0,0,0,6,15,11,13,4,0,0,3,16,4,4,15,0,0,0,0,15,16,16,16,1,0,0,0,2,4,3,15,6,0,0,0,0,0,1,16,1,0,0,0,0,3,12,5,0,0,0,0,0,7,10,0,0,0,9 +0,0,0,0,8,16,7,0,0,0,0,6,16,16,12,0,0,0,8,16,16,16,12,0,0,5,12,8,12,16,8,0,0,0,0,0,12,16,5,0,0,0,0,0,15,16,1,0,0,0,0,0,16,16,0,0,0,0,0,0,11,16,2,0,1 +0,2,15,11,1,0,0,0,0,13,15,15,8,0,0,0,0,16,9,8,11,0,0,0,0,7,1,10,9,0,0,0,0,0,0,13,7,0,0,0,0,0,2,16,5,0,0,0,0,1,14,15,16,16,12,2,0,2,13,16,14,11,6,0,2 +0,0,0,9,16,10,1,0,0,0,9,13,4,14,8,0,0,4,15,6,10,16,7,0,0,5,16,14,11,16,4,0,0,0,0,0,5,15,1,0,0,0,0,1,14,7,0,0,0,0,0,7,12,0,0,0,0,0,0,11,8,0,0,0,9 +0,0,4,11,16,11,1,0,0,3,15,7,5,14,4,0,0,8,13,0,14,16,5,0,0,0,15,16,11,16,5,0,0,0,1,2,5,13,0,0,0,0,0,1,13,4,0,0,0,0,1,13,7,0,0,0,0,0,6,14,0,0,0,0,9 +0,0,1,12,15,5,0,0,0,0,12,11,4,3,0,0,0,1,15,2,0,0,0,0,0,5,12,0,0,0,0,0,0,3,16,16,11,2,0,0,0,2,16,15,8,12,0,0,0,0,10,9,1,15,5,0,0,0,0,12,16,10,0,0,6 +0,0,1,12,16,14,1,0,0,0,4,16,16,16,4,0,0,2,14,16,16,16,6,0,0,2,12,16,16,10,0,0,0,0,8,16,16,8,0,0,0,0,11,16,16,5,0,0,0,0,8,16,16,4,0,0,0,0,1,15,16,8,0,0,1 +0,0,0,10,10,0,0,0,0,0,3,15,5,5,0,0,0,0,11,10,8,12,0,0,0,5,16,5,13,10,2,0,0,11,16,16,16,15,8,0,0,0,4,9,14,1,0,0,0,0,0,9,9,0,0,0,0,0,0,13,4,0,0,0,4 +0,0,2,14,7,0,0,0,0,1,9,12,13,6,0,0,0,5,16,8,2,14,0,0,0,6,16,2,0,12,6,0,0,5,15,1,0,9,9,0,0,0,16,1,0,12,8,0,0,0,11,12,8,15,1,0,0,0,3,13,15,7,0,0,0 +0,0,0,14,4,5,1,0,0,0,6,13,1,15,5,0,0,0,12,7,2,16,2,0,0,7,15,6,10,16,6,0,0,12,16,16,16,13,6,0,0,2,7,8,15,3,0,0,0,0,0,8,10,0,0,0,0,0,0,16,4,0,0,0,4 +0,1,10,14,16,16,15,1,0,1,12,11,8,11,15,2,0,0,0,0,1,14,8,0,0,0,0,0,11,11,1,0,0,0,0,5,16,2,0,0,0,0,1,16,6,0,0,0,0,0,7,14,1,0,0,0,0,0,15,11,0,0,0,0,7 +0,0,5,12,10,0,0,0,0,2,16,9,11,2,0,0,0,0,16,5,8,14,0,0,0,1,14,16,16,3,0,0,0,0,3,16,16,4,0,0,0,0,11,12,15,11,0,0,0,2,16,7,2,16,2,0,0,0,9,15,13,11,1,0,8 +0,0,5,14,0,0,0,0,0,0,9,11,1,3,0,0,0,2,14,4,10,11,0,0,0,8,16,12,15,14,6,0,0,9,16,13,16,10,5,0,0,0,0,9,11,0,0,0,0,0,0,15,5,0,0,0,0,0,3,16,3,0,0,0,4 +0,0,2,11,16,16,16,12,0,0,7,15,9,8,12,13,0,0,0,0,0,1,14,5,0,0,0,0,0,11,9,0,0,0,0,0,6,13,0,0,0,0,0,1,14,3,0,0,0,0,0,10,9,0,0,0,0,0,1,16,5,0,0,0,7 +0,0,4,14,11,1,0,0,0,4,16,12,10,8,0,0,0,9,14,1,7,7,0,0,0,1,2,0,15,9,0,0,0,0,0,0,7,15,2,0,0,0,0,0,0,5,11,0,0,0,12,3,4,10,11,0,0,0,6,15,15,11,1,0,3 +0,0,1,11,15,16,5,0,0,0,13,15,13,16,9,0,0,1,16,14,15,16,8,0,0,0,13,16,13,15,15,0,0,0,0,0,1,14,9,0,0,0,0,0,9,14,1,0,0,0,0,9,15,2,0,0,0,0,0,15,7,0,0,0,9 +0,0,5,11,13,10,3,0,0,1,13,11,5,6,3,0,0,6,15,0,0,0,0,0,0,7,13,4,1,0,0,0,0,1,14,16,15,4,0,0,0,0,0,0,8,13,0,0,0,0,8,5,5,15,0,0,0,0,4,12,16,9,0,0,5 +0,2,11,16,16,16,13,0,0,5,12,10,8,13,16,0,0,0,0,0,1,15,11,0,0,0,0,0,13,13,0,0,0,0,0,8,16,2,0,0,0,0,3,15,8,0,0,0,0,0,11,15,0,0,0,0,0,2,16,10,0,0,0,0,7 +0,0,0,6,14,9,0,0,0,0,8,15,8,2,0,0,0,3,15,9,0,0,0,0,0,7,15,0,0,0,0,0,0,7,16,4,6,3,0,0,0,1,16,15,12,15,5,0,0,0,7,14,6,11,14,0,0,0,0,5,13,15,8,0,6 +0,0,8,11,16,14,3,0,0,0,16,14,8,8,3,0,0,4,16,8,0,0,0,0,0,4,16,14,4,0,0,0,0,0,0,9,16,7,0,0,0,0,0,0,14,12,0,0,0,3,15,5,15,7,0,0,0,0,11,16,11,1,0,0,5 +0,0,12,8,2,0,0,0,0,0,16,6,14,6,0,0,0,2,16,10,16,0,0,0,0,10,13,14,10,4,1,0,0,8,16,16,16,16,10,0,0,0,13,15,4,4,1,0,0,0,12,12,0,0,0,0,0,1,14,5,0,0,0,0,4 +0,0,0,3,13,14,1,0,0,0,3,15,16,16,0,0,0,5,16,16,16,14,0,0,0,0,0,4,16,14,0,0,0,0,0,4,16,14,0,0,0,0,0,4,16,10,0,0,0,0,0,4,16,8,0,0,0,0,0,2,12,16,5,0,1 +0,0,7,12,14,11,3,0,0,0,12,13,5,5,3,0,0,1,16,7,0,0,0,0,0,5,16,16,12,1,0,0,0,0,0,1,12,10,0,0,0,0,0,0,1,14,0,0,0,0,11,4,8,11,0,0,0,0,10,16,11,2,0,0,5 +0,0,7,15,15,5,0,0,0,0,15,13,12,15,5,0,0,0,10,14,12,16,3,0,0,0,5,16,16,9,0,0,0,2,15,16,16,3,0,0,0,7,14,2,12,14,1,0,0,3,15,9,12,16,4,0,0,0,5,12,13,8,1,0,8 +0,0,9,16,8,0,0,0,0,9,13,4,13,0,0,0,0,7,6,1,14,0,0,0,0,0,0,9,16,9,0,0,0,0,0,8,6,13,6,0,0,0,0,0,0,4,11,0,0,0,6,0,3,12,5,0,0,0,9,16,16,7,0,0,3 +0,0,6,15,16,16,5,0,0,3,16,14,8,4,1,0,0,10,16,11,3,0,0,0,0,11,16,16,15,3,0,0,0,1,2,1,14,12,0,0,0,0,0,0,7,16,1,0,0,0,5,13,11,15,0,0,0,0,7,16,15,3,0,0,5 +0,0,1,7,12,14,5,0,0,0,8,13,1,8,8,0,0,2,16,6,16,16,9,0,0,0,7,8,6,12,10,0,0,0,0,0,0,12,7,0,0,0,0,0,0,16,2,0,0,0,0,0,9,10,0,0,0,0,0,8,11,4,0,0,9 +0,0,0,9,15,13,2,0,0,0,10,12,4,11,8,0,0,3,15,3,6,15,6,0,0,5,16,14,12,15,7,0,0,0,0,0,0,14,5,0,0,0,0,0,9,12,0,0,0,0,0,8,12,0,0,0,0,0,0,12,5,0,0,0,9 +0,0,4,16,13,2,0,0,0,0,11,7,2,13,0,0,0,3,16,4,0,9,1,0,0,5,13,2,0,5,7,0,0,4,8,0,0,4,8,0,0,4,14,0,0,6,8,0,0,0,13,10,1,14,3,0,0,0,4,12,16,10,0,0,0 +0,1,12,16,6,0,0,0,0,9,16,14,14,3,0,0,0,12,12,0,16,7,0,0,0,2,0,0,15,6,0,0,0,0,0,4,15,1,0,0,0,0,1,12,14,0,0,0,0,2,15,16,14,9,2,0,0,1,13,16,16,16,16,3,2 +0,0,0,13,4,0,0,0,0,0,4,16,9,9,0,0,0,0,12,8,10,9,0,0,0,7,15,4,15,8,1,0,0,11,16,16,16,16,10,0,0,1,4,10,12,0,0,0,0,0,0,12,8,0,0,0,0,0,0,12,5,0,0,0,4 +0,0,2,12,14,1,0,0,0,0,14,9,11,14,7,0,0,5,12,0,8,11,1,0,0,4,14,8,13,1,0,0,0,0,9,16,4,0,0,0,0,0,7,13,13,1,0,0,0,0,8,7,14,6,0,0,0,0,4,14,12,4,0,0,8 +0,0,2,12,8,0,0,0,0,0,13,11,1,0,0,0,0,2,15,1,0,0,0,0,0,6,13,0,0,0,0,0,0,8,14,10,11,5,0,0,0,4,16,15,8,12,7,0,0,0,15,11,4,11,10,0,0,0,3,13,15,12,3,0,6 +0,0,1,14,10,0,0,0,0,0,9,16,6,0,0,0,0,0,13,16,1,0,0,0,0,0,16,11,0,0,0,0,0,2,16,15,11,6,1,0,0,3,16,16,16,16,11,0,0,0,12,16,14,16,11,0,0,0,1,10,15,11,2,0,6 +0,0,2,14,9,1,0,0,0,1,14,10,11,10,0,0,0,4,14,1,0,12,3,0,0,7,8,0,0,3,6,0,0,7,7,0,0,1,9,0,0,3,12,0,0,5,8,0,0,0,11,5,3,12,6,0,0,0,2,14,16,9,0,0,0 +0,0,0,6,13,9,0,0,0,1,15,16,7,3,0,0,0,7,15,3,0,0,0,0,0,7,16,16,8,0,0,0,0,2,11,5,12,7,0,0,0,0,0,0,2,14,0,0,0,0,0,5,11,16,1,0,0,0,0,4,15,9,0,0,5 +0,0,1,15,4,0,0,0,0,0,9,12,0,0,0,0,0,1,14,6,0,0,0,0,0,1,16,4,3,3,0,0,0,2,16,8,16,15,2,0,0,1,16,16,10,5,12,0,0,0,13,15,8,12,10,0,0,0,0,13,15,10,1,0,6 +0,0,8,13,6,0,0,0,0,2,16,12,11,10,0,0,0,0,16,16,14,8,1,0,0,0,13,11,0,0,0,0,0,2,15,4,0,0,0,0,0,0,16,13,0,0,0,0,0,2,15,16,2,0,0,0,0,0,11,16,4,0,0,0,8 +0,0,8,13,11,5,0,0,0,4,16,14,16,10,0,0,0,1,14,16,7,0,0,0,0,0,11,16,14,2,0,0,0,2,16,3,8,15,1,0,0,6,15,2,1,16,5,0,0,5,16,9,11,16,3,0,0,0,9,15,15,7,0,0,8 +0,0,6,13,10,0,0,0,0,3,16,11,14,6,0,0,0,9,11,0,12,12,0,0,0,4,4,0,9,12,0,0,0,0,0,4,16,3,0,0,0,0,0,13,15,3,0,0,0,0,8,16,15,16,11,0,0,0,4,14,10,2,0,0,2 +0,0,5,16,4,0,0,0,0,0,13,16,1,0,0,0,0,2,16,11,0,0,0,0,0,8,16,12,10,16,6,0,0,2,13,16,16,16,6,0,0,0,1,14,16,6,0,0,0,0,5,16,11,0,0,0,0,0,5,16,7,0,0,0,4 +0,0,0,10,15,4,0,0,0,0,5,16,14,3,0,0,0,0,12,15,1,0,0,0,0,1,15,8,0,0,0,0,0,4,16,12,2,0,0,0,0,3,16,16,15,4,0,0,0,0,12,14,16,10,0,0,0,0,1,9,15,10,0,0,6 +0,0,0,13,7,0,0,0,0,0,3,16,9,0,0,0,0,0,11,15,2,6,4,0,0,4,16,14,13,16,12,0,0,11,16,16,16,16,3,0,0,3,8,10,16,9,0,0,0,0,0,11,16,2,0,0,0,0,1,13,13,0,0,0,4 +0,0,10,13,16,15,4,0,0,0,0,6,9,15,12,0,0,0,0,0,0,16,8,0,0,0,0,0,8,16,2,0,0,1,4,9,15,12,0,0,0,10,16,16,15,1,0,0,0,3,9,16,6,0,0,0,0,0,10,13,0,0,0,0,7 +0,0,2,16,14,2,0,0,0,0,7,15,15,11,0,0,0,0,5,16,15,16,2,0,0,0,0,6,15,16,8,0,0,0,0,0,2,12,11,0,0,0,0,0,0,9,14,0,0,0,0,0,7,15,13,0,0,0,1,12,16,14,7,0,9 +0,0,2,12,14,4,0,0,0,0,9,15,13,13,0,0,0,2,15,3,1,14,4,0,0,4,13,0,0,12,6,0,0,5,9,0,0,12,8,0,0,5,9,0,0,13,5,0,0,1,13,9,13,14,1,0,0,0,4,9,16,6,0,0,0 +0,0,6,16,8,0,0,0,0,3,16,6,16,1,0,0,0,3,7,2,16,2,0,0,0,0,0,7,13,0,0,0,0,0,0,12,8,0,0,0,0,0,2,15,4,1,0,0,0,0,8,12,4,13,6,0,0,0,5,16,15,8,1,0,2 +0,2,11,3,0,2,0,0,0,2,13,12,5,15,6,0,0,0,11,14,14,1,0,0,0,0,7,15,1,0,0,0,0,0,15,12,5,0,0,0,0,4,10,4,8,0,0,0,0,7,9,7,8,0,0,0,0,2,14,15,5,0,0,0,8 +0,0,7,12,9,0,0,0,0,0,13,13,14,9,0,0,0,0,11,12,14,14,0,0,0,0,1,10,12,15,4,0,0,0,0,0,0,11,9,0,0,0,0,0,0,9,9,0,0,0,0,0,1,14,6,0,0,0,5,12,14,11,1,0,9 +0,0,0,1,12,4,0,0,0,0,1,14,16,11,0,0,0,0,9,13,5,15,4,0,0,2,16,2,0,16,3,0,0,1,16,7,7,16,4,0,0,0,13,16,16,16,1,0,0,0,1,13,16,13,0,0,0,0,0,3,12,5,0,0,0 +0,0,7,15,15,8,3,0,0,3,14,3,0,13,8,0,0,6,10,1,6,14,8,0,0,0,8,12,6,8,8,0,0,0,0,0,0,8,6,0,0,0,0,0,0,12,3,0,0,0,0,0,7,13,0,0,0,0,7,15,14,2,0,0,9 +0,0,0,2,14,10,0,0,0,0,1,12,16,13,0,0,0,6,13,16,16,6,0,0,0,5,10,12,16,5,0,0,0,0,0,7,16,5,0,0,0,0,0,4,16,6,0,0,0,0,0,4,16,7,0,0,0,0,0,2,11,14,0,0,1 +0,3,15,12,11,12,2,0,0,12,16,16,16,16,6,0,0,12,13,0,3,4,0,0,0,3,15,13,2,0,0,0,0,0,5,15,11,0,0,0,0,0,0,6,16,3,0,0,0,0,5,12,16,3,0,0,0,4,16,16,12,0,0,0,5 +0,0,2,15,11,1,0,0,0,0,6,16,16,14,0,0,0,0,3,15,16,16,5,0,0,0,0,5,7,11,9,0,0,0,0,0,0,13,13,0,0,0,0,0,1,16,8,0,0,0,0,2,13,16,5,0,0,0,5,15,16,6,0,0,9 +0,0,0,9,12,0,0,0,0,0,2,16,10,0,0,0,0,0,9,16,3,0,0,0,0,0,14,12,0,0,0,0,0,1,16,5,0,2,0,0,0,2,16,13,16,16,6,0,0,0,8,16,8,12,16,3,0,0,0,5,12,16,15,4,6 +0,0,2,11,16,10,0,0,0,0,10,15,2,14,4,0,0,3,15,1,0,10,8,0,0,7,10,0,0,12,5,0,0,8,8,0,0,14,4,0,0,3,12,0,9,14,1,0,0,0,13,10,16,5,0,0,0,0,2,10,12,2,0,0,0 +0,0,0,2,11,16,8,0,0,1,9,15,16,16,12,0,0,3,15,16,13,16,10,0,0,0,0,3,16,16,6,0,0,0,0,0,16,16,4,0,0,0,0,4,16,16,3,0,0,0,0,4,16,16,1,0,0,0,0,1,13,16,1,0,1 +0,0,3,10,13,4,0,0,0,0,9,13,14,10,0,0,0,0,9,11,16,15,0,0,0,0,1,11,12,16,0,0,0,0,0,0,0,15,3,0,0,0,0,0,1,16,3,0,0,0,0,0,6,16,1,0,0,0,4,16,16,10,0,0,9 +0,2,4,9,13,13,0,0,0,5,15,11,12,16,0,0,0,0,0,0,8,13,0,0,0,0,0,0,12,12,0,0,0,0,0,0,9,16,1,0,0,0,0,0,0,15,8,0,0,0,9,14,11,16,6,0,0,0,1,10,15,9,0,0,3 +0,0,0,5,10,0,0,0,0,0,1,14,12,0,0,0,0,0,4,16,5,0,0,0,0,0,9,16,2,0,0,0,0,0,11,10,0,0,0,0,0,0,11,16,13,12,5,0,0,0,6,16,16,16,16,2,0,0,0,3,12,15,11,3,6 +0,8,12,16,13,0,0,0,0,2,10,10,16,8,0,0,0,0,0,0,10,16,4,0,0,0,0,0,10,16,4,0,0,0,0,4,16,14,0,0,0,0,0,0,12,16,8,0,0,0,4,8,15,16,8,0,0,10,16,16,11,3,0,0,3 +0,0,11,12,12,13,16,8,0,0,9,12,12,13,16,4,0,0,0,0,2,16,11,0,0,0,0,0,13,14,1,0,0,0,0,0,14,14,0,0,0,0,0,0,6,16,6,0,0,0,1,4,9,16,4,0,0,0,14,16,15,6,0,0,3 +0,0,0,9,16,5,0,0,0,1,12,15,15,8,0,0,0,7,13,5,15,4,0,0,0,0,1,6,16,0,0,0,0,0,0,8,16,0,0,0,0,0,0,9,13,0,0,0,0,0,0,12,8,4,3,0,0,0,0,11,16,15,5,0,2 +0,1,6,11,16,6,0,0,0,0,5,13,11,16,6,0,0,0,4,16,8,0,0,0,0,0,5,15,0,0,0,0,0,1,13,10,4,0,0,0,0,6,10,4,8,0,0,0,0,8,7,12,7,0,0,0,0,2,13,14,0,0,0,0,8 +0,0,1,16,14,1,0,0,0,0,3,16,16,4,0,0,0,0,1,16,16,5,0,0,0,0,0,16,16,7,0,0,0,0,7,16,16,0,0,0,0,0,8,16,14,0,0,0,0,0,7,16,15,0,0,0,0,0,2,13,16,0,0,0,1 +0,0,2,15,12,0,0,0,0,0,3,16,14,10,0,0,0,0,0,5,13,16,2,0,0,0,0,0,0,12,8,0,0,0,0,0,0,7,11,0,0,0,0,0,0,7,13,0,0,0,2,0,2,11,11,0,0,0,3,13,15,16,6,0,9 +0,0,0,2,14,0,0,0,0,0,1,13,14,1,0,0,0,0,5,16,4,0,0,0,0,0,11,8,0,0,0,0,0,0,12,8,1,0,0,0,0,0,8,16,16,14,5,0,0,0,4,16,4,8,16,3,0,0,0,4,11,15,14,6,6 +0,0,10,16,15,6,0,0,0,0,0,4,16,15,0,0,0,0,0,0,14,13,0,0,0,0,0,0,14,16,3,0,0,0,0,0,3,15,12,0,0,0,0,0,1,15,15,0,0,0,0,2,12,16,8,0,0,0,7,15,15,7,0,0,3 +0,0,0,0,11,8,0,0,0,0,0,2,16,8,0,0,0,0,0,6,16,2,0,0,0,0,0,7,15,0,0,0,0,0,0,10,14,0,0,0,0,0,13,16,16,11,0,0,0,0,7,7,16,16,10,0,0,0,0,0,8,16,15,0,6 +0,1,13,16,16,16,12,1,0,0,0,3,4,12,16,2,0,0,0,0,4,16,7,0,0,0,0,2,15,12,0,0,0,1,8,13,16,16,5,0,0,3,15,16,12,7,0,0,0,0,13,10,0,0,0,0,0,2,15,7,0,0,0,0,7 +0,2,11,16,10,1,0,0,0,7,14,7,16,14,2,0,0,6,9,7,15,1,0,0,0,1,13,16,7,0,0,0,0,0,9,16,9,0,0,0,0,0,12,16,11,0,0,0,0,3,16,16,3,0,0,0,0,0,14,9,0,0,0,0,8 +0,0,0,10,16,10,0,0,0,0,9,16,12,8,0,0,0,1,15,13,1,0,0,0,0,2,16,5,0,0,0,0,0,2,16,8,0,0,0,0,0,2,16,16,9,0,0,0,0,1,12,16,16,3,0,0,0,0,0,12,16,7,0,0,6 +0,0,1,14,12,1,0,0,0,0,8,16,9,0,0,0,0,0,15,16,0,0,0,0,0,0,16,12,0,0,0,0,0,1,16,11,0,0,0,0,0,0,16,16,15,4,0,0,0,0,11,13,13,16,1,0,0,0,2,12,16,14,2,0,6 +0,0,13,16,12,0,0,0,0,8,16,14,16,0,0,0,0,12,13,7,16,1,0,0,0,5,8,10,15,0,0,0,0,0,1,16,9,0,0,0,0,0,8,16,3,0,0,0,0,0,14,15,8,9,6,0,0,0,16,16,15,11,3,0,2 +0,0,9,16,11,0,0,0,0,5,16,16,16,7,0,0,0,3,16,16,16,15,0,0,0,0,2,6,3,11,5,0,0,0,0,0,0,11,7,0,0,0,0,0,0,13,11,0,0,0,4,5,11,16,5,0,0,0,5,12,12,6,0,0,9 +0,0,1,11,15,2,0,0,0,0,11,14,10,13,0,0,0,2,15,4,0,15,3,0,0,3,13,0,0,9,7,0,0,6,9,0,0,10,8,0,0,2,12,0,0,11,8,0,0,0,13,5,6,16,3,0,0,0,2,14,16,11,1,0,0 +0,0,9,15,1,0,0,0,0,2,16,14,10,0,0,0,0,9,13,8,12,0,0,0,0,9,8,12,9,0,0,0,0,1,1,14,6,0,0,0,0,0,2,16,2,0,0,0,0,0,9,14,12,15,8,0,0,0,7,16,15,5,2,0,2 +0,2,8,9,14,10,0,0,0,3,11,9,13,16,0,0,0,0,0,0,15,14,0,0,0,0,0,0,16,8,0,0,0,0,0,0,15,13,1,0,0,0,0,0,6,16,8,0,0,0,1,6,15,14,3,0,0,0,14,11,7,1,0,0,3 +0,0,3,14,13,1,0,0,0,0,12,15,11,9,0,0,0,1,16,4,0,15,0,0,0,5,13,0,0,13,6,0,0,7,9,0,0,13,8,0,0,4,12,0,0,13,9,0,0,3,13,4,10,16,5,0,0,0,3,8,13,15,2,0,0 +0,2,5,10,16,6,0,0,0,4,12,13,16,5,0,0,0,0,0,5,16,0,0,0,0,0,0,11,11,0,0,0,0,0,0,11,13,2,0,0,0,0,0,1,12,15,4,0,0,0,0,0,7,16,8,0,0,0,1,13,16,11,2,0,3 +0,0,5,14,12,8,2,0,0,0,1,10,16,16,12,0,0,0,5,14,16,16,2,0,0,0,10,16,16,12,0,0,0,6,16,16,14,4,0,0,0,9,16,16,11,0,0,0,0,10,16,16,6,0,0,0,0,1,10,14,12,4,0,0,1 +0,0,9,16,13,15,5,0,0,4,16,11,10,13,5,0,0,12,11,0,0,0,0,0,0,8,14,3,0,0,0,0,0,0,10,15,0,0,0,0,0,0,0,15,5,0,0,0,0,0,2,16,7,0,0,0,0,0,14,13,1,0,0,0,5 +0,3,16,15,7,0,0,0,0,5,16,16,16,13,2,0,0,0,7,8,10,16,6,0,0,0,0,0,2,16,4,0,0,0,0,0,4,16,1,0,0,0,0,0,12,9,0,0,0,0,4,11,15,3,0,0,0,2,15,12,3,0,0,0,9 +0,0,13,16,16,16,2,0,0,1,16,16,12,9,0,0,0,8,16,8,0,0,0,0,0,10,16,1,0,0,0,0,0,7,16,13,0,0,0,0,0,0,9,16,10,0,0,0,0,0,11,16,12,0,0,0,0,0,15,14,6,0,0,0,5 +0,0,0,3,10,10,0,0,0,8,14,16,14,15,4,0,0,0,3,1,1,15,4,0,0,0,0,7,14,16,6,0,0,0,9,16,15,10,0,0,0,0,2,2,14,4,0,0,0,0,0,7,11,0,0,0,0,0,0,9,4,0,0,0,7 +0,0,12,16,6,0,0,0,0,3,15,12,12,0,0,0,0,6,12,8,12,0,0,0,0,3,14,11,10,0,0,0,0,0,5,16,3,0,0,0,0,0,13,12,0,0,0,0,0,8,16,12,7,5,2,0,0,0,12,13,10,10,4,0,2 +0,0,0,8,15,1,0,0,0,0,1,15,15,1,0,0,0,0,8,16,5,0,0,0,0,0,12,15,1,0,0,0,0,0,15,7,0,0,0,0,0,0,14,14,12,7,0,0,0,0,8,16,12,16,7,0,0,0,0,7,15,16,13,0,6 +0,0,3,15,14,12,12,5,0,0,0,9,12,14,16,7,0,0,0,0,1,13,14,0,0,4,9,10,11,16,13,0,0,3,12,14,16,14,5,0,0,0,0,8,16,4,0,0,0,0,1,15,8,0,0,0,0,0,4,15,0,0,0,0,7 +0,0,10,7,0,0,0,0,0,1,14,8,0,0,0,0,0,5,16,3,2,8,4,0,0,7,16,14,16,15,5,0,0,0,4,8,16,12,0,0,0,0,0,6,16,2,0,0,0,0,5,16,7,0,0,0,0,0,11,9,0,0,0,0,4 +0,0,0,8,15,2,0,0,0,0,0,13,16,3,0,0,0,0,4,16,14,0,0,0,0,0,7,16,7,0,0,0,0,0,9,16,2,0,0,0,0,0,12,16,4,0,0,0,0,0,8,16,16,8,0,0,0,0,0,6,15,15,0,0,6 +0,1,7,13,16,16,8,0,0,5,16,12,16,16,5,0,0,0,1,4,16,8,0,0,0,0,0,11,16,9,1,0,0,0,0,0,9,15,10,0,0,0,0,0,0,13,9,0,0,0,0,5,11,16,5,0,0,0,6,16,14,7,0,0,3 +0,0,6,16,12,1,0,0,0,8,15,5,16,4,0,0,0,13,6,4,16,3,0,0,0,3,3,5,16,0,0,0,0,0,0,13,12,0,0,0,0,0,3,16,9,0,0,0,0,0,10,16,6,4,1,0,0,0,4,15,16,16,14,0,2 +0,2,15,16,16,12,2,0,0,0,2,10,16,16,5,0,0,0,0,11,16,5,0,0,0,0,0,11,16,10,0,0,0,0,0,1,10,16,7,0,0,0,0,0,8,16,9,0,0,0,5,11,16,15,2,0,0,3,16,14,10,2,0,0,3 +0,2,14,13,16,13,0,0,0,0,7,8,14,14,0,0,0,0,0,0,13,12,0,0,0,0,11,13,16,14,5,0,0,0,8,16,16,12,5,0,0,0,3,16,7,0,0,0,0,0,11,14,1,0,0,0,0,2,16,8,0,0,0,0,7 +0,0,6,12,14,16,14,0,0,1,12,12,10,12,16,0,0,0,0,0,1,11,15,0,0,0,0,9,16,16,10,0,0,0,0,9,16,11,0,0,0,0,0,7,15,0,0,0,0,0,1,16,5,0,0,0,0,0,8,13,0,0,0,0,7 +0,0,3,12,0,0,0,0,0,0,7,16,2,0,0,0,0,0,13,11,1,7,1,0,0,6,16,16,16,16,8,0,0,2,11,14,16,13,0,0,0,0,0,13,15,3,0,0,0,0,4,16,12,0,0,0,0,0,5,12,6,0,0,0,4 +0,0,11,16,10,0,0,0,0,0,3,16,16,9,0,0,0,0,0,14,16,9,0,0,0,0,2,16,16,4,0,0,0,0,7,16,16,3,0,0,0,0,8,16,13,1,0,0,0,0,12,16,12,0,0,0,0,0,9,14,16,0,0,0,1 +0,0,5,10,14,8,0,0,0,8,16,13,16,15,0,0,0,0,0,1,16,8,0,0,0,0,0,6,16,2,0,0,0,0,0,5,16,14,2,0,0,0,0,0,6,16,8,0,0,0,1,10,14,15,1,0,0,0,2,13,10,2,0,0,3 +0,0,4,13,4,0,0,0,0,0,11,10,11,7,0,0,0,0,14,3,1,15,0,0,0,0,15,1,0,12,5,0,0,1,15,0,0,13,5,0,0,0,16,0,4,16,4,0,0,0,11,10,15,11,0,0,0,0,3,12,14,1,0,0,0 +0,0,2,15,12,1,0,0,0,0,7,15,15,9,0,0,0,0,5,15,7,16,2,0,0,0,1,13,14,16,7,0,0,0,0,4,13,15,10,0,0,0,0,0,0,14,11,0,0,0,0,1,8,16,10,0,0,0,2,13,16,15,3,0,9 +0,0,0,12,16,2,0,0,0,0,4,16,12,1,0,0,0,0,14,16,2,0,0,0,0,0,16,10,0,0,0,0,0,1,16,8,5,0,0,0,0,0,14,16,16,13,1,0,0,0,7,15,7,15,13,0,0,0,1,9,16,16,16,3,6 +0,0,3,12,15,2,0,0,0,2,15,6,0,0,0,0,0,4,14,2,9,14,8,0,0,1,13,16,14,4,0,0,0,0,8,16,6,0,0,0,0,0,11,11,15,0,0,0,0,0,11,5,12,2,0,0,0,0,3,12,15,3,0,0,8 +0,0,5,15,16,7,0,0,0,0,8,12,16,16,1,0,0,0,2,7,13,15,0,0,0,0,10,16,16,15,8,0,0,0,2,15,16,16,9,0,0,0,0,8,16,2,0,0,0,0,0,15,9,0,0,0,0,0,4,16,1,0,0,0,7 +0,3,15,16,8,0,0,0,0,10,16,15,13,0,0,0,0,6,9,12,12,0,0,0,0,0,0,14,8,0,0,0,0,0,5,16,4,0,0,0,0,0,13,12,0,1,4,0,0,4,16,14,12,15,9,0,0,3,16,16,13,8,0,0,2 +0,0,8,12,15,16,6,0,0,6,16,16,14,8,0,0,0,12,14,4,1,0,0,0,0,8,15,9,1,0,0,0,0,0,13,16,10,0,0,0,0,0,1,9,15,0,0,0,0,0,3,11,14,0,0,0,0,0,11,15,3,0,0,0,5 +0,0,7,10,0,0,0,0,0,0,12,13,11,6,0,0,0,3,16,16,16,16,6,0,0,8,13,4,5,15,6,0,0,8,12,0,0,10,8,0,0,6,14,1,5,15,4,0,0,4,16,16,16,13,0,0,0,1,12,14,9,1,0,0,0 +0,0,9,15,6,0,0,0,0,3,14,16,16,3,0,0,0,7,11,1,15,4,0,0,0,1,2,0,14,4,0,0,0,0,0,9,13,0,0,0,0,0,3,16,4,0,0,0,0,0,14,16,12,11,4,0,0,0,9,15,14,12,5,0,2 +0,0,6,12,11,6,0,0,0,0,8,16,16,16,3,0,0,0,8,16,16,13,0,0,0,0,8,16,16,12,0,0,0,0,10,16,16,12,0,0,0,3,15,16,16,7,0,0,0,3,15,16,16,2,0,0,0,0,3,8,9,5,0,0,1 +0,0,0,4,11,12,5,0,0,0,1,16,16,16,1,0,0,0,8,16,16,13,0,0,0,1,14,16,16,4,0,0,0,4,16,16,16,4,0,0,0,7,16,16,16,1,0,0,0,1,12,16,16,3,0,0,0,0,0,7,12,8,0,0,1 +0,7,16,15,4,0,0,0,0,11,10,7,13,0,0,0,0,2,3,4,12,0,0,0,0,0,0,12,12,0,0,0,0,0,4,16,6,0,0,0,0,0,13,13,0,0,0,0,0,6,16,16,16,16,8,0,0,4,15,16,16,13,3,0,2 +0,0,8,16,14,2,0,0,0,1,14,6,11,8,0,0,0,8,16,0,4,16,0,0,0,2,11,15,15,16,6,0,0,0,0,6,7,12,6,0,0,0,0,0,0,8,8,0,0,0,12,6,4,13,10,0,0,0,6,14,16,10,2,0,9 +0,0,6,15,15,2,0,0,0,0,9,16,16,5,0,0,0,0,9,16,16,1,0,0,0,0,11,16,16,1,0,0,0,0,14,16,14,2,0,0,0,0,14,16,14,0,0,0,0,0,15,16,15,4,0,0,0,0,5,16,12,0,0,0,1 +0,0,1,10,13,8,0,0,0,1,14,10,7,15,0,0,0,1,16,7,7,16,3,0,0,0,7,16,13,10,8,0,0,0,0,0,0,6,8,0,0,0,0,0,0,12,4,0,0,0,1,10,5,13,3,0,0,0,1,11,16,7,0,0,9 +0,0,0,6,15,1,0,0,0,0,2,15,11,0,0,0,0,0,8,15,1,0,0,0,0,7,16,3,0,6,6,0,0,9,16,16,13,15,12,0,0,1,6,10,16,16,8,0,0,0,0,1,16,10,1,0,0,0,0,6,16,2,0,0,4 +0,0,11,16,6,0,0,0,0,2,16,15,16,3,0,0,0,0,8,4,16,4,0,0,0,0,0,7,14,1,0,0,0,0,0,14,9,0,0,0,0,0,11,11,0,0,0,0,0,1,16,9,2,5,1,0,0,0,9,14,12,9,0,0,2 +0,2,13,11,5,0,0,0,0,1,8,13,16,8,0,0,0,0,0,0,12,16,0,0,0,0,0,7,16,11,0,0,0,0,0,9,16,7,0,0,0,0,0,1,9,16,5,0,0,2,8,5,7,16,6,0,0,3,14,16,13,8,0,0,3 +0,0,0,8,15,0,0,0,0,0,3,16,10,0,0,0,0,1,14,15,0,0,0,0,0,4,16,10,0,0,0,0,0,2,14,16,16,16,6,0,0,0,3,12,16,15,2,0,0,0,0,3,16,10,0,0,0,0,0,4,13,3,0,0,4 +0,0,1,13,11,0,0,0,0,0,11,16,3,0,0,0,0,2,16,11,0,1,2,0,0,8,16,13,8,14,10,0,0,3,15,16,16,16,3,0,0,0,3,9,16,11,0,0,0,0,0,12,16,6,0,0,0,0,0,15,13,3,0,0,4 +0,0,11,15,8,0,0,0,0,5,16,16,16,11,0,0,0,8,13,2,9,16,3,0,0,8,15,0,0,15,4,0,0,8,12,0,0,13,6,0,0,8,15,0,1,16,3,0,0,4,16,14,16,9,0,0,0,0,11,16,12,0,0,0,0 +0,0,7,7,4,4,0,0,0,0,16,16,16,16,4,0,0,1,16,16,8,12,8,0,0,6,15,7,0,12,5,0,0,6,12,0,0,13,4,0,0,5,12,0,8,14,1,0,0,4,16,11,15,7,0,0,0,0,8,15,7,0,0,0,0 +0,0,3,12,2,0,0,0,0,0,9,16,3,0,0,0,0,0,16,10,0,0,0,0,0,1,16,6,0,0,0,0,0,5,16,10,8,3,0,0,0,6,16,16,16,16,5,0,0,0,13,16,8,16,4,0,0,0,1,11,15,14,0,0,6 +0,0,4,8,16,13,0,0,0,3,16,16,14,16,2,0,0,4,16,6,0,12,4,0,0,4,12,0,0,12,6,0,0,5,12,0,0,12,3,0,0,4,16,6,6,14,0,0,0,0,12,16,16,7,0,0,0,0,4,12,8,0,0,0,0 +0,0,7,16,7,0,0,0,0,4,16,9,10,11,2,0,0,11,10,0,11,14,1,0,0,6,14,7,14,7,0,0,0,0,11,16,12,0,0,0,0,0,13,12,15,1,0,0,0,4,16,7,14,2,0,0,0,0,8,16,10,0,0,0,8 +0,0,7,16,15,0,0,0,0,5,16,16,16,0,0,0,0,3,8,8,16,0,0,0,0,0,2,15,8,0,0,0,0,0,5,16,3,0,0,0,0,0,15,10,0,2,1,0,0,0,15,16,12,16,8,0,0,0,6,16,16,15,3,0,2 +0,0,0,9,15,0,0,0,0,0,5,16,12,0,0,0,0,0,13,15,2,0,0,0,0,5,16,8,0,0,0,0,0,11,16,2,2,8,3,0,0,13,16,14,15,16,6,0,0,3,15,16,16,12,0,0,0,0,2,12,16,5,0,0,4 +0,0,1,11,13,0,0,0,0,0,5,16,3,0,0,0,0,1,15,9,0,0,0,0,0,6,15,1,0,4,6,0,0,9,16,11,9,16,8,0,0,1,9,12,16,12,1,0,0,0,0,4,16,4,0,0,0,0,0,14,11,0,0,0,4 +0,0,0,13,7,0,0,0,0,0,9,16,4,0,0,0,0,1,14,11,0,0,0,0,0,7,16,7,6,16,5,0,0,0,10,14,16,14,1,0,0,0,0,8,16,6,0,0,0,0,0,15,6,0,0,0,0,0,0,16,9,0,0,0,4 +0,0,12,13,9,6,2,0,0,4,16,16,16,16,7,0,0,7,13,3,5,3,0,0,0,7,14,5,0,0,0,0,0,4,16,16,5,0,0,0,0,1,9,16,12,0,0,0,0,0,5,12,12,0,0,0,0,0,8,15,5,0,0,0,5 +0,0,0,6,14,2,0,0,0,0,2,14,12,0,0,0,0,0,7,15,1,0,0,0,0,0,13,10,0,0,0,0,0,1,16,11,8,4,0,0,0,1,15,16,16,16,8,0,0,0,8,13,2,4,15,1,0,0,0,7,14,16,14,1,6 +0,0,8,16,10,0,0,0,0,2,16,13,16,0,0,0,0,2,8,4,14,0,0,0,0,0,0,8,10,0,0,0,0,0,0,13,6,0,0,0,0,0,6,15,0,0,0,0,0,0,12,15,12,8,2,0,0,0,8,15,10,8,1,0,2 +0,0,3,11,12,4,0,0,0,2,15,10,16,12,0,0,0,5,11,0,11,14,4,0,0,8,8,0,0,5,8,0,0,6,8,0,0,4,8,0,0,4,10,0,0,10,5,0,0,1,14,5,7,14,0,0,0,0,7,15,10,1,0,0,0 +0,0,0,7,10,0,0,0,0,0,7,16,5,0,0,0,0,0,14,11,0,0,0,0,0,4,16,5,4,8,0,0,0,3,16,16,16,14,0,0,0,1,6,8,16,7,0,0,0,0,0,3,16,5,0,0,0,0,0,9,13,2,0,0,4 +0,0,4,12,0,0,0,0,0,0,14,6,0,0,0,0,0,4,16,4,0,0,0,0,0,7,16,1,0,0,0,0,0,8,16,16,16,13,1,0,0,5,16,7,9,16,5,0,0,1,14,12,4,16,5,0,0,0,3,15,16,8,0,0,6 +0,0,8,12,14,10,1,0,0,3,16,12,9,15,8,0,0,1,6,0,8,14,4,0,0,0,0,4,16,7,0,0,0,0,0,2,15,7,0,0,0,0,0,0,6,16,1,0,0,0,8,7,8,16,3,0,0,0,6,14,11,6,0,0,3 +0,0,4,16,14,3,0,0,0,3,16,16,16,15,2,0,0,8,16,2,1,14,6,0,0,8,16,0,0,5,8,0,0,5,15,0,0,4,8,0,0,0,16,6,0,9,7,0,0,0,14,14,8,16,3,0,0,0,3,14,16,13,0,0,0 +0,2,12,13,11,1,0,0,0,4,12,12,16,7,0,0,0,0,0,5,16,5,0,0,0,0,0,16,16,1,0,0,0,0,0,4,15,15,0,0,0,0,0,0,1,16,5,0,0,2,8,5,8,16,3,0,0,2,10,16,14,8,0,0,3 +0,0,0,8,15,4,0,0,0,0,0,13,16,13,0,0,0,1,1,10,3,13,6,0,0,5,8,0,0,6,8,0,0,7,11,0,0,4,8,0,0,3,15,7,0,4,9,0,0,0,7,16,13,11,12,0,0,0,0,6,15,16,4,0,0 +0,0,4,13,16,12,0,0,0,0,7,9,13,15,0,0,0,0,0,3,11,11,0,0,0,0,7,16,16,14,6,0,0,0,5,13,16,12,5,0,0,0,1,11,4,0,0,0,0,0,6,14,0,0,0,0,0,0,6,9,0,0,0,0,7 +0,0,3,13,15,16,16,3,0,0,10,16,13,13,11,1,0,0,11,16,11,4,0,0,0,0,4,13,16,15,1,0,0,0,0,2,13,16,1,0,0,0,0,0,6,16,0,0,0,0,0,5,15,7,0,0,0,0,3,16,9,0,0,0,5 +0,0,0,8,14,15,2,0,0,0,9,12,4,13,7,0,0,0,14,13,8,14,3,0,0,0,3,16,16,16,2,0,0,0,0,0,0,15,0,0,0,0,3,0,1,16,1,0,0,0,14,13,9,14,0,0,0,0,2,10,12,4,0,0,9 +0,5,14,10,4,0,0,0,0,3,12,16,16,6,0,0,0,0,1,13,16,5,0,0,0,0,8,16,13,2,0,0,0,0,5,16,13,2,0,0,0,0,0,6,16,15,4,0,0,1,8,9,15,16,5,0,0,4,15,14,8,4,0,0,3 +0,0,7,16,16,7,0,0,0,3,16,16,16,11,0,0,0,1,6,4,16,7,0,0,0,0,0,9,15,2,0,0,0,0,1,14,8,0,0,0,0,0,7,16,5,4,1,0,0,0,10,16,16,16,5,0,0,0,7,16,16,7,0,0,2 +0,1,8,16,15,1,0,0,0,8,12,14,16,0,0,0,0,0,1,16,11,0,0,0,0,0,4,16,12,2,0,0,0,0,1,9,15,16,2,0,0,0,2,0,0,15,8,0,0,1,16,14,5,15,7,0,0,0,7,13,14,10,1,0,3 +0,3,15,16,13,5,0,0,0,7,16,12,14,15,1,0,0,1,6,0,11,15,1,0,0,0,0,16,16,5,0,0,0,0,0,6,15,15,2,0,0,0,0,0,2,16,7,0,0,3,8,2,6,16,4,0,0,2,12,16,16,9,0,0,3 +0,0,3,12,14,16,14,0,0,0,1,8,7,10,14,0,0,0,0,5,4,13,9,0,0,0,5,16,16,16,10,0,0,0,3,8,16,5,0,0,0,0,0,11,9,0,0,0,0,0,3,16,1,0,0,0,0,0,7,9,0,0,0,0,7 +0,0,3,13,7,0,0,0,0,0,12,16,16,11,0,0,0,1,16,16,10,16,3,0,0,7,16,14,0,14,4,0,0,1,16,9,0,12,5,0,0,0,15,8,4,16,4,0,0,0,12,16,16,12,1,0,0,0,5,15,11,1,0,0,0 +0,0,7,16,15,5,0,0,0,0,5,8,11,15,4,0,0,0,0,1,14,13,1,0,0,0,0,10,16,3,0,0,0,0,0,5,16,5,0,0,0,0,2,0,11,12,0,0,0,3,15,11,12,15,0,0,0,0,8,13,11,3,0,0,3 +0,0,3,9,14,4,0,0,0,1,16,15,13,10,2,0,0,1,15,5,1,13,12,0,0,0,15,12,11,16,3,0,0,0,4,16,16,9,0,0,0,0,11,16,16,7,0,0,0,0,15,16,16,8,0,0,0,0,3,13,15,4,0,0,8 +0,2,10,15,11,4,0,0,0,2,10,6,13,12,0,0,0,0,0,2,13,9,0,0,0,0,0,15,16,0,0,0,0,0,0,11,16,9,0,0,0,0,0,0,2,15,11,0,0,0,1,3,11,14,2,0,0,2,15,16,11,1,0,0,3 +0,0,5,15,8,0,0,0,0,2,15,9,10,3,1,0,0,6,9,7,11,14,1,0,0,2,15,13,8,2,0,0,0,1,15,6,0,0,0,0,0,1,16,12,0,0,0,0,0,1,16,13,1,0,0,0,0,0,8,15,3,0,0,0,8 +0,0,5,14,11,0,0,0,0,0,13,16,14,0,0,0,0,0,6,9,12,0,0,0,0,0,0,10,8,0,0,0,0,0,1,15,3,0,0,0,0,0,8,14,0,0,0,0,0,0,11,16,12,15,1,0,0,0,6,16,16,7,0,0,2 +0,0,4,15,3,0,0,0,0,0,8,16,1,0,0,0,0,0,14,13,0,0,0,0,0,3,16,10,4,3,0,0,0,8,16,16,16,16,5,0,0,8,16,6,4,14,8,0,0,5,16,11,8,16,5,0,0,0,6,15,16,11,0,0,6 +0,0,7,14,4,0,0,0,0,1,16,16,16,7,0,0,0,4,16,16,16,14,0,0,0,0,13,16,16,11,0,0,0,0,8,16,16,1,0,0,0,0,10,16,16,6,0,0,0,0,11,16,16,13,0,0,0,0,6,12,13,10,0,0,8 +0,0,2,11,11,3,0,0,0,0,2,16,16,16,4,0,0,0,1,16,16,16,4,0,0,0,3,16,16,16,3,0,0,0,7,16,16,15,2,0,0,0,10,16,16,14,1,0,0,0,8,16,16,12,0,0,0,0,1,5,8,9,2,0,1 +0,1,7,12,12,2,0,0,0,10,16,16,16,10,0,0,0,0,2,1,16,8,0,0,0,0,0,1,16,9,0,0,0,0,0,2,16,14,0,0,0,0,0,0,4,15,5,0,0,0,3,7,7,16,10,0,0,0,7,16,16,12,0,0,3 +0,0,5,12,14,5,0,0,0,2,16,13,16,6,0,0,0,0,0,3,16,4,0,0,0,0,0,15,16,5,0,0,0,0,0,4,11,16,4,0,0,0,0,0,1,16,4,0,0,0,6,12,13,15,1,0,0,0,8,12,11,3,0,0,3 +0,0,5,11,15,8,0,0,0,4,14,8,10,16,0,0,0,8,13,1,15,12,0,0,0,4,16,15,16,13,0,0,0,0,4,12,13,16,4,0,0,0,4,1,0,14,8,0,0,0,13,13,6,14,7,0,0,0,2,14,14,9,2,0,9 +0,0,9,16,7,0,0,0,0,0,16,16,14,0,0,0,0,0,3,13,16,0,0,0,0,0,7,16,16,12,8,0,0,0,8,16,16,16,9,0,0,0,0,14,11,0,0,0,0,0,4,16,5,0,0,0,0,0,7,12,0,0,0,0,7 +0,1,14,16,14,4,0,0,0,3,16,8,8,14,0,0,0,0,15,14,13,9,0,0,0,0,3,11,16,4,0,0,0,0,0,0,12,12,0,0,0,0,0,0,4,16,3,0,0,0,3,4,3,16,1,0,0,0,12,14,16,14,1,0,9 +0,0,8,15,16,12,0,0,0,5,16,12,15,14,0,0,0,1,5,1,15,8,0,0,0,0,4,14,16,4,0,0,0,0,3,16,16,14,1,0,0,0,0,0,7,16,4,0,0,0,4,15,16,14,1,0,0,0,9,16,12,3,0,0,3 +0,0,1,7,11,13,11,5,0,0,7,16,16,13,16,4,0,0,14,8,0,0,0,0,0,4,16,9,8,5,0,0,0,8,16,16,16,16,2,0,0,2,4,4,12,15,0,0,0,0,0,7,16,5,0,0,0,0,0,12,5,0,0,0,5 +0,0,6,15,16,7,0,0,0,1,16,12,15,13,0,0,0,0,0,3,16,11,0,0,0,0,5,16,11,0,0,0,0,0,5,13,16,12,0,0,0,0,0,0,9,15,1,0,0,0,5,9,14,15,0,0,0,0,5,16,11,4,0,0,3 +0,1,11,16,15,6,0,0,0,2,16,7,6,13,2,0,0,0,10,13,14,16,3,0,0,0,2,9,9,12,3,0,0,0,0,0,0,12,4,0,0,0,0,0,0,9,7,0,0,0,0,0,3,14,3,0,0,0,9,16,16,11,2,0,9 +0,0,9,16,6,0,0,0,0,0,15,10,15,2,0,0,0,0,5,2,16,2,0,0,0,0,2,7,16,3,0,0,0,7,16,16,16,16,8,0,0,1,5,14,6,0,1,0,0,0,9,12,0,0,0,0,0,0,10,8,0,0,0,0,7 +0,0,0,7,15,0,0,0,0,0,7,16,10,0,0,0,0,1,16,9,0,12,8,0,0,9,14,1,5,16,7,0,0,8,15,8,12,16,9,0,0,3,15,16,16,11,1,0,0,0,0,7,16,1,0,0,0,0,0,7,13,0,0,0,4 +0,0,6,14,14,2,0,0,0,0,15,11,9,10,0,0,0,3,14,0,0,7,5,0,0,4,12,0,0,4,8,0,0,4,13,0,0,11,8,0,0,5,13,0,4,16,3,0,0,0,16,14,16,7,0,0,0,0,10,15,7,0,0,0,0 +0,0,9,16,14,4,0,0,0,1,10,8,16,13,0,0,0,0,0,0,15,11,0,0,0,0,1,12,16,3,0,0,0,0,2,14,16,13,0,0,0,0,0,0,7,16,2,0,0,0,1,4,9,15,2,0,0,0,11,16,13,3,0,0,3 +0,0,2,10,12,14,16,12,0,0,8,16,16,16,14,4,0,0,2,16,12,4,0,0,0,0,1,16,15,2,0,0,0,0,0,8,16,11,0,0,0,0,0,0,13,16,0,0,0,0,0,7,16,11,0,0,0,0,1,16,11,1,0,0,5 +0,0,0,9,14,16,12,0,0,0,10,14,6,11,15,1,0,0,11,15,16,16,8,0,0,0,0,0,1,15,9,0,0,0,0,0,5,15,0,0,0,0,0,1,14,5,0,0,0,0,0,6,12,0,0,0,0,0,0,12,5,0,0,0,9 +0,0,3,15,12,1,0,0,0,0,11,14,8,11,0,0,0,0,15,1,0,13,1,0,0,5,14,0,0,9,5,0,0,5,12,0,0,11,2,0,0,0,16,1,2,15,2,0,0,0,14,13,14,12,0,0,0,0,2,14,8,1,0,0,0 +0,0,5,14,16,9,0,0,0,0,8,16,14,16,5,0,0,0,7,16,13,16,4,0,0,8,13,16,16,12,0,0,0,3,15,16,12,2,0,0,0,0,14,16,12,1,0,0,0,0,15,16,16,4,0,0,0,0,7,16,11,1,0,0,8 +0,0,0,5,8,0,0,0,0,0,4,16,2,0,14,0,0,0,9,8,0,8,8,0,0,2,15,0,1,15,0,0,0,10,9,4,9,15,1,0,0,11,16,16,16,11,1,0,0,3,4,7,9,0,0,0,0,0,0,8,5,0,0,0,4 +0,0,3,14,16,9,0,0,0,0,8,14,11,16,1,0,0,10,11,15,15,11,0,0,0,4,13,16,12,1,0,0,0,2,16,14,14,0,0,0,0,4,14,0,13,8,0,0,0,2,13,4,13,16,0,0,0,0,3,14,12,7,0,0,8 +0,0,9,16,12,3,0,0,0,0,9,16,16,7,0,0,0,0,8,16,14,2,0,0,0,0,14,16,14,0,0,0,0,0,15,16,16,1,0,0,0,2,15,16,14,1,0,0,0,0,14,16,8,0,0,0,0,0,12,16,10,0,0,0,1 +0,0,7,13,16,5,0,0,0,6,8,8,14,9,0,0,0,0,0,0,15,5,0,0,0,0,0,12,11,0,0,0,0,0,6,15,1,0,0,0,0,0,12,4,0,0,0,0,0,0,11,10,8,8,4,0,0,0,6,15,16,12,4,0,2 +0,0,4,12,14,6,0,0,0,5,16,11,9,16,2,0,0,12,16,2,4,16,3,0,0,6,16,14,14,14,0,0,0,0,1,11,16,1,0,0,0,0,1,14,16,9,0,0,0,0,5,16,9,15,1,0,0,0,2,12,13,8,0,0,8 +0,0,0,13,9,1,0,0,0,0,9,16,16,11,0,0,0,0,8,14,5,16,3,0,0,1,7,10,0,12,6,0,0,6,14,14,2,10,9,0,0,4,16,16,12,16,10,0,0,0,11,16,16,15,4,0,0,0,2,13,16,7,0,0,0 +0,0,8,16,16,8,0,0,0,5,15,8,16,14,0,0,0,1,2,2,15,11,0,0,0,0,3,13,16,9,1,0,0,0,8,16,15,15,10,0,0,0,2,2,3,16,6,0,0,0,7,14,16,12,1,0,0,0,14,16,7,0,0,0,3 +0,0,10,16,16,9,0,0,0,2,15,12,14,16,1,0,0,1,4,4,13,16,5,0,0,0,10,16,16,16,13,0,0,0,5,15,16,6,1,0,0,0,0,16,11,0,0,0,0,0,9,16,3,0,0,0,0,0,11,12,0,0,0,0,7 +0,0,3,13,1,0,0,0,0,0,13,12,0,0,0,0,0,3,16,1,0,0,0,0,0,2,14,1,1,4,0,0,0,5,14,6,14,16,7,0,0,1,16,16,11,5,15,0,0,0,10,16,14,16,11,0,0,0,2,12,16,13,0,0,6 +0,0,7,15,16,16,13,0,0,2,16,9,8,14,16,0,0,1,2,0,4,16,8,0,0,0,0,0,10,15,2,0,0,0,0,0,7,16,8,0,0,0,0,0,3,16,8,0,0,0,0,5,15,13,1,0,0,0,9,16,11,2,0,0,3 +0,0,3,14,16,8,0,0,0,2,15,15,16,14,5,0,0,3,16,16,16,16,14,1,0,0,6,5,8,16,6,0,0,0,0,0,12,13,0,0,0,0,0,5,16,3,0,0,0,0,0,14,8,0,0,0,0,0,4,15,1,0,0,0,9 +0,0,2,15,16,14,5,0,0,0,5,15,8,13,11,0,0,2,11,12,7,15,8,0,0,7,16,16,16,8,1,0,0,2,15,16,8,0,0,0,0,0,7,16,12,0,0,0,0,0,4,16,16,0,0,0,0,0,2,14,14,0,0,0,8 +0,0,6,8,10,12,11,0,0,1,16,16,16,12,3,0,0,4,16,8,1,0,0,0,0,6,14,0,0,0,0,0,0,1,14,12,3,0,0,0,0,0,5,15,16,6,0,0,0,0,0,4,16,10,0,0,0,0,6,14,7,1,0,0,5 +0,0,5,10,12,15,5,0,0,1,13,16,15,8,2,0,0,7,16,5,0,0,0,0,0,1,13,12,3,0,0,0,0,0,1,9,16,5,0,0,0,0,0,0,11,12,0,0,0,0,0,8,15,6,0,0,0,0,6,12,3,0,0,0,5 +0,0,8,15,15,5,0,0,0,6,16,7,5,14,2,0,0,8,16,6,6,16,3,0,0,0,9,16,15,9,0,0,0,1,12,16,13,0,0,0,0,3,15,4,13,7,0,0,0,3,8,0,10,9,0,0,0,0,7,14,14,4,0,0,8 +0,0,12,4,0,0,0,0,0,0,15,4,0,0,0,0,0,2,15,2,0,0,0,0,0,2,15,6,12,8,0,0,0,7,16,14,9,10,6,0,0,4,16,14,3,1,10,0,0,3,16,13,15,16,5,0,0,0,10,15,10,3,0,0,6 +0,0,1,9,15,16,14,1,0,0,12,15,11,14,13,1,0,0,16,16,16,16,6,0,0,0,4,8,8,14,14,0,0,0,0,0,3,14,7,0,0,0,0,1,12,11,0,0,0,0,0,8,14,1,0,0,0,0,0,15,7,0,0,0,9 +0,0,11,3,0,0,0,0,0,2,16,4,0,0,0,0,0,4,15,0,0,0,0,0,0,4,14,6,12,11,1,0,0,7,16,16,15,14,8,0,0,4,16,15,3,9,11,0,0,1,16,16,16,16,6,0,0,0,7,14,13,5,0,0,6 +0,0,0,3,15,6,0,0,0,0,1,13,13,1,0,0,0,0,10,16,9,8,1,0,0,3,16,10,12,16,1,0,0,9,16,5,16,13,3,0,0,7,16,16,16,16,11,0,0,0,0,7,16,7,0,0,0,0,0,3,16,8,0,0,4 +0,0,6,15,16,16,16,11,0,0,10,15,16,14,10,2,0,0,3,16,8,0,0,0,0,0,3,16,13,1,0,0,0,0,0,9,16,9,0,0,0,0,0,0,15,15,1,0,0,0,0,10,15,7,0,0,0,0,5,15,3,0,0,0,5 +0,0,2,11,13,16,11,0,0,0,9,16,13,16,10,0,0,0,14,16,16,16,16,4,0,0,6,11,9,14,14,0,0,0,0,0,6,16,4,0,0,0,0,2,15,7,0,0,0,0,0,13,13,0,0,0,0,0,3,15,4,0,0,0,9 +0,0,6,15,12,1,0,0,0,8,13,4,10,8,0,0,0,2,0,0,15,6,0,0,0,0,0,6,16,5,0,0,0,0,0,16,16,16,5,0,0,0,4,5,1,16,7,0,0,2,16,10,14,15,0,0,0,0,6,13,9,1,0,0,3 +0,0,5,10,13,16,9,0,0,2,16,16,11,8,2,0,0,1,15,9,0,0,0,0,0,0,9,16,9,1,0,0,0,0,0,1,13,11,0,0,0,0,0,0,2,15,0,0,0,0,0,4,15,5,0,0,0,0,7,14,3,0,0,0,5 +0,0,7,16,13,2,0,0,0,2,16,12,13,12,0,0,0,14,15,4,15,8,0,0,0,5,15,16,15,4,0,0,0,0,10,16,13,0,0,0,0,0,15,10,14,2,0,0,0,0,13,3,12,6,0,0,0,0,4,15,15,3,0,0,8 +0,0,0,3,11,15,1,0,0,0,6,16,16,16,9,0,0,2,16,16,16,13,14,0,0,0,11,8,1,9,9,0,0,0,0,0,1,15,0,0,0,0,0,0,6,10,0,0,0,0,0,0,12,3,0,0,0,0,0,0,13,0,0,0,9 +0,0,7,8,12,16,12,0,0,1,16,16,16,15,6,0,0,10,16,7,1,0,0,0,0,8,16,9,1,0,0,0,0,0,9,16,12,0,0,0,0,0,0,8,16,4,0,0,0,0,5,14,14,1,0,0,0,0,9,15,3,0,0,0,5 +0,0,5,15,12,1,0,0,0,0,14,14,14,13,0,0,0,3,13,0,1,14,3,0,0,4,11,0,0,10,8,0,0,5,15,0,0,5,8,0,0,2,15,2,0,7,8,0,0,0,12,14,13,16,4,0,0,0,4,14,16,8,0,0,0 +0,2,15,16,6,0,0,0,0,3,14,14,16,2,0,0,0,0,0,6,16,2,0,0,0,0,0,11,16,1,0,0,0,8,14,16,16,16,5,0,0,10,16,15,11,12,5,0,0,5,16,7,0,0,0,0,0,5,14,1,0,0,0,0,7 +0,2,15,16,11,0,0,0,0,4,15,14,16,3,0,0,0,0,0,10,16,2,0,0,0,0,0,13,13,0,0,0,0,0,6,16,4,0,0,0,0,1,11,13,0,0,0,0,0,8,16,14,13,16,6,0,0,3,16,16,11,6,0,0,2 +0,0,2,15,9,2,0,0,0,0,5,16,15,0,0,0,0,0,8,16,10,0,0,0,0,0,7,16,9,0,0,0,0,0,13,16,10,0,0,0,0,0,8,16,8,0,0,0,0,0,8,16,10,0,0,0,0,0,4,14,8,0,0,0,1 +0,0,1,8,15,16,9,0,0,1,12,15,9,13,14,0,0,5,13,2,0,13,11,0,0,3,5,0,1,15,7,0,0,0,0,2,10,16,9,0,0,0,0,14,16,11,1,0,0,0,0,13,14,0,0,0,0,0,0,12,5,0,0,0,7 +0,0,2,15,13,3,0,0,0,0,2,16,16,6,0,0,0,0,3,16,16,5,0,0,0,0,4,16,16,0,0,0,0,0,7,16,13,0,0,0,0,0,9,16,7,0,0,0,0,0,11,16,9,0,0,0,0,0,4,13,12,1,0,0,1 +0,0,6,15,16,16,7,0,0,0,8,13,15,16,7,0,0,0,1,15,14,8,0,0,0,0,2,16,16,13,2,0,0,0,0,3,11,16,5,0,0,0,0,0,7,16,4,0,0,0,1,10,15,11,0,0,0,0,6,16,9,0,0,0,3 +0,0,7,16,15,4,0,0,0,0,14,6,5,15,2,0,0,1,16,0,6,15,0,0,0,2,16,10,16,8,0,0,0,0,9,16,13,0,0,0,0,0,10,12,15,7,0,0,0,1,16,3,5,13,0,0,0,0,9,15,14,7,0,0,8 +0,0,4,14,4,0,0,0,0,0,13,10,1,0,0,0,0,6,15,1,0,0,0,0,0,7,12,0,0,0,0,0,0,8,7,10,16,14,0,0,0,5,14,16,14,16,7,0,0,2,16,16,12,16,8,0,0,0,6,15,14,8,1,0,6 +0,0,10,16,12,0,0,0,0,7,16,13,16,1,0,0,0,6,5,6,15,2,0,0,0,0,0,10,12,0,0,0,0,0,0,14,9,0,0,0,0,0,6,15,5,0,0,0,0,0,16,15,16,12,3,0,0,0,12,16,15,12,5,0,2 +0,0,2,8,9,12,12,0,0,0,10,16,15,10,4,0,0,0,13,14,1,0,0,0,0,3,16,12,2,0,0,0,0,0,3,10,15,12,0,0,0,0,0,0,8,16,0,0,0,0,0,8,14,6,0,0,0,0,0,13,4,0,0,0,5 +0,1,7,12,14,5,0,0,0,3,13,8,14,10,0,0,0,0,0,3,16,2,0,0,0,0,0,13,12,3,0,0,0,0,0,8,13,16,4,0,0,0,0,0,4,14,3,0,0,0,1,12,16,9,0,0,0,0,9,13,5,0,0,0,3 +0,0,1,15,13,2,0,0,0,0,8,16,13,9,0,0,0,0,9,11,2,12,2,0,0,5,16,7,0,9,6,0,0,4,16,8,0,9,9,0,0,1,15,4,0,13,8,0,0,0,10,14,12,16,5,0,0,0,1,13,15,8,0,0,0 +0,0,8,16,14,1,0,0,0,0,6,10,16,8,0,0,0,0,0,0,14,7,0,0,0,0,0,1,16,9,2,0,0,4,15,16,16,16,10,0,0,1,8,15,11,4,1,0,0,0,7,15,1,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,0,7,16,11,0,0,0,0,9,16,16,16,16,7,0,0,10,16,16,12,16,4,0,0,3,8,4,13,9,0,0,0,0,0,7,14,0,0,0,0,0,0,15,4,0,0,0,0,0,5,13,0,0,0,0,0,0,8,16,1,0,0,9 +0,0,6,16,0,0,0,0,0,0,13,7,0,0,0,0,0,2,16,0,0,0,0,0,0,0,16,3,0,0,0,0,0,4,15,10,10,4,0,0,0,5,16,14,8,13,8,0,0,2,16,13,11,15,11,0,0,0,5,12,13,7,0,0,6 +0,0,7,16,14,0,0,0,0,0,11,16,16,0,0,0,0,0,1,10,15,0,0,0,0,0,0,13,11,0,0,0,0,0,2,16,3,0,0,0,0,0,6,15,0,2,0,0,0,0,10,16,16,14,0,0,0,0,6,16,16,9,0,0,2 +0,0,4,15,16,5,0,0,0,0,7,16,16,13,0,0,0,0,15,16,16,9,0,0,0,0,13,16,16,7,0,0,0,2,14,16,16,7,0,0,0,0,10,16,16,5,0,0,0,0,6,16,16,10,2,0,0,0,2,10,14,3,0,0,1 +0,0,4,14,1,0,0,0,0,0,12,10,0,0,0,0,0,0,15,5,0,0,0,0,0,3,16,2,8,2,0,0,0,4,16,16,13,15,2,0,0,4,16,12,0,8,9,0,0,1,13,13,8,14,8,0,0,0,3,12,12,8,0,0,6 +0,0,2,11,15,16,10,0,0,0,13,14,10,15,11,0,0,0,11,2,1,16,3,0,0,0,0,0,8,15,2,0,0,0,0,11,16,16,10,0,0,0,1,16,13,4,1,0,0,0,0,14,6,0,0,0,0,0,2,16,2,0,0,0,7 +0,0,0,3,14,8,0,0,0,0,0,13,12,1,2,0,0,1,11,13,1,7,14,0,0,5,15,3,0,13,8,0,0,13,14,8,11,16,7,0,0,10,16,16,16,12,3,0,0,0,0,3,16,5,0,0,0,0,0,3,15,0,0,0,4 +0,0,3,12,15,9,1,0,0,2,16,13,9,14,10,0,0,6,16,8,0,11,10,0,0,4,16,16,14,16,4,0,0,0,4,12,16,15,0,0,0,0,1,11,16,16,2,0,0,0,7,16,16,14,0,0,0,0,3,15,13,2,0,0,8 +0,2,11,15,16,10,0,0,0,5,12,9,16,15,0,0,0,0,0,11,15,5,0,0,0,0,11,16,3,0,0,0,0,0,8,15,16,8,0,0,0,0,0,2,9,16,5,0,0,0,7,11,15,14,3,0,0,0,15,13,7,0,0,0,3 +0,0,8,14,14,4,0,0,0,0,6,14,16,11,1,0,0,0,3,15,16,15,3,0,0,0,1,16,16,13,0,0,0,0,2,16,16,8,0,0,0,0,10,16,16,6,0,0,0,1,14,16,15,2,0,0,0,0,12,16,8,0,0,0,1 +0,0,7,16,16,1,0,0,0,0,7,16,16,7,0,0,0,0,9,16,16,9,0,0,0,0,11,16,16,13,0,0,0,0,6,16,16,16,2,0,0,0,2,16,16,15,1,0,0,0,6,16,16,16,2,0,0,0,6,14,15,9,2,0,1 +0,0,0,11,16,2,7,0,0,0,7,16,7,10,13,0,0,2,16,7,4,16,9,0,0,6,16,13,12,16,15,0,0,1,11,16,16,12,3,0,0,0,0,2,16,2,0,0,0,0,0,7,12,0,0,0,0,0,0,11,9,0,0,0,4 +0,0,5,16,11,2,0,0,0,6,16,8,10,14,2,0,0,2,16,16,16,16,8,0,0,0,9,12,7,16,4,0,0,0,0,0,0,16,0,0,0,0,0,0,7,11,0,0,0,0,0,7,15,3,0,0,0,0,8,13,1,0,0,0,9 +0,0,5,13,15,5,0,0,0,0,13,16,12,16,2,0,0,5,16,4,0,15,4,0,0,6,16,1,0,11,8,0,0,8,16,1,1,14,6,0,0,4,16,8,6,16,6,0,0,0,14,16,16,10,0,0,0,0,7,15,14,0,0,0,0 +0,0,8,12,0,0,0,0,0,1,13,8,0,0,0,0,0,4,13,0,0,0,0,0,0,2,14,0,0,0,0,0,0,5,12,7,12,8,0,0,0,5,16,16,16,16,8,0,0,1,14,16,13,15,8,0,0,0,6,15,16,11,0,0,6 +0,5,16,12,0,0,0,0,0,11,16,16,5,0,0,0,0,0,2,16,8,0,0,0,0,0,2,16,8,0,0,0,0,0,7,16,4,0,0,0,0,2,15,11,0,0,0,0,0,10,16,16,16,16,9,0,0,8,16,14,9,8,3,0,2 +0,0,2,14,8,0,0,0,0,0,10,16,16,2,0,0,0,0,9,16,16,1,0,0,0,0,10,16,14,1,0,0,0,0,7,16,16,4,0,0,0,0,5,16,13,2,0,0,0,0,4,16,15,1,0,0,0,0,0,8,15,2,0,0,1 +0,0,3,14,9,1,0,0,0,0,8,16,13,0,0,0,0,0,10,16,15,1,0,0,0,0,8,16,15,0,0,0,0,0,10,16,15,1,0,0,0,0,6,16,16,1,0,0,0,0,3,16,15,2,0,0,0,0,2,12,16,3,0,0,1 +0,0,9,13,13,9,1,0,0,0,16,9,4,14,6,0,0,0,2,0,8,13,0,0,0,0,0,10,16,7,0,0,0,0,0,2,6,15,4,0,0,0,0,0,0,8,8,0,0,6,4,0,2,15,4,0,0,2,10,15,15,7,0,0,3 +0,0,10,16,16,15,5,0,0,0,5,5,7,15,10,0,0,0,0,0,1,16,7,0,0,0,6,12,13,16,3,0,0,0,7,14,16,13,7,0,0,0,0,13,11,0,0,0,0,0,6,16,2,0,0,0,0,0,13,11,0,0,0,0,7 +0,0,7,16,16,16,5,0,0,0,3,6,4,13,14,0,0,0,0,0,0,11,12,0,0,0,0,0,3,15,3,0,0,0,7,16,16,16,9,0,0,0,2,10,14,4,1,0,0,0,2,16,6,0,0,0,0,0,9,14,0,0,0,0,7 +0,0,4,13,15,8,0,0,0,2,13,4,0,12,0,0,0,6,9,0,2,10,0,0,0,3,13,1,4,15,5,0,0,0,5,14,14,3,0,0,0,0,3,16,13,0,0,0,0,0,8,6,14,4,0,0,0,0,6,13,15,3,0,0,8 +0,0,5,12,4,0,0,0,0,2,16,14,13,2,0,0,0,0,16,7,14,15,0,0,0,0,8,15,16,16,3,0,0,0,0,0,0,13,6,0,0,0,0,0,0,10,9,0,0,0,3,4,7,16,8,0,0,0,9,14,12,8,1,0,9 +0,0,15,16,7,0,0,0,0,3,16,6,16,3,0,0,0,0,12,6,12,9,0,0,0,0,0,0,12,7,0,0,0,0,0,0,15,4,0,0,0,0,0,6,15,2,0,0,0,1,11,15,13,2,8,1,0,0,13,14,15,16,16,3,2 +0,0,0,0,12,13,1,0,0,0,0,2,16,16,3,0,0,0,0,5,16,16,4,0,0,0,0,12,16,16,5,0,0,0,5,16,16,16,4,0,0,3,15,14,15,16,3,0,0,3,8,2,13,16,0,0,0,0,0,0,12,11,1,0,1 +0,6,15,16,10,0,0,0,0,15,13,9,16,2,0,0,0,7,9,0,14,7,0,0,0,0,0,0,12,8,0,0,0,0,0,2,16,5,0,0,0,0,0,11,15,1,0,0,0,1,11,16,12,2,0,0,0,6,16,16,16,16,14,0,2 +0,0,13,16,5,0,0,0,0,0,16,7,15,5,0,0,0,0,15,3,11,9,0,0,0,0,7,6,9,11,0,0,0,0,0,0,9,11,0,0,0,0,0,0,12,7,0,0,0,0,7,9,16,3,0,0,0,0,11,16,16,16,16,8,2 +0,1,10,16,12,1,0,0,0,8,15,5,12,11,0,0,0,11,12,3,13,16,3,0,0,4,14,16,13,14,9,0,0,0,0,4,0,8,13,0,0,0,1,0,0,3,16,1,0,2,15,1,0,8,15,1,0,0,9,16,16,16,6,0,9 +0,1,10,16,16,8,0,0,0,8,13,6,14,8,0,0,0,1,1,10,15,2,0,0,0,0,3,16,15,8,0,0,0,0,0,2,7,15,6,0,0,0,0,0,0,8,12,0,0,0,2,1,2,13,10,0,0,0,11,16,16,11,1,0,3 +0,0,10,8,0,0,0,0,0,0,13,11,0,0,0,0,0,1,16,7,0,0,0,0,0,4,16,3,0,0,0,0,0,4,16,9,11,10,1,0,0,8,16,16,16,16,7,0,0,4,16,16,16,16,5,0,0,0,8,15,16,12,0,0,6 +0,0,3,14,10,0,0,0,0,0,13,14,15,10,0,0,0,2,16,5,8,16,3,0,0,4,14,0,4,16,1,0,0,4,16,0,3,15,1,0,0,4,16,1,0,12,4,0,0,1,14,11,9,16,3,0,0,0,3,12,12,4,0,0,0 +0,0,11,16,11,0,0,0,0,0,13,12,14,6,0,0,0,0,6,14,10,10,0,0,0,0,0,1,6,14,0,0,0,0,0,0,6,14,0,0,0,0,0,0,9,14,0,0,0,0,9,9,15,12,2,0,0,0,10,16,16,16,16,4,2 +0,0,4,8,0,0,0,0,0,0,13,10,0,0,0,0,0,3,16,5,0,0,0,0,0,2,16,3,0,0,0,0,0,5,16,10,12,7,1,0,0,0,16,16,16,16,8,0,0,0,12,16,13,16,9,0,0,0,3,9,13,10,0,0,6 +0,0,0,2,16,12,0,0,0,0,0,6,16,15,0,0,0,0,0,11,16,11,0,0,0,1,8,16,16,11,0,0,0,9,16,16,16,10,0,0,0,1,8,8,16,8,0,0,0,0,0,4,16,9,0,0,0,0,0,1,16,10,0,0,1 +0,0,11,16,15,3,0,0,0,0,6,8,14,12,0,0,0,0,0,0,7,10,0,0,0,0,0,3,13,9,5,0,0,0,4,16,16,15,8,0,0,0,1,14,11,2,0,0,0,0,1,16,2,0,0,0,0,0,8,12,0,0,0,0,7 +0,2,15,16,13,1,0,0,0,8,16,9,15,8,0,0,0,8,16,0,8,12,0,0,0,0,5,1,7,13,0,0,0,0,0,0,12,10,0,0,0,0,0,4,16,7,0,0,0,5,15,15,16,2,0,0,0,3,11,12,16,16,13,2,2 +0,0,2,15,16,16,10,0,0,0,1,6,4,11,13,0,0,0,0,0,0,9,8,0,0,0,0,4,8,15,3,0,0,0,3,16,16,16,9,0,0,0,0,3,14,0,0,0,0,0,0,13,3,0,0,0,0,0,4,12,0,0,0,0,7 +0,4,16,16,6,0,0,0,0,7,13,8,15,1,0,0,0,0,0,6,16,2,0,0,0,0,3,16,14,1,0,0,0,0,1,11,15,13,1,0,0,0,0,0,3,14,8,0,0,5,6,0,0,7,16,0,0,3,14,16,16,16,10,0,3 +0,0,6,12,14,10,1,0,0,2,13,4,0,10,9,0,0,2,14,0,3,14,6,0,0,1,14,11,14,5,0,0,0,0,9,16,6,0,0,0,0,2,14,10,12,0,0,0,0,2,11,1,14,0,0,0,0,0,11,13,9,0,0,0,8 +0,0,3,15,15,5,0,0,0,0,13,13,10,15,0,0,0,0,12,14,13,16,5,0,0,0,1,8,8,14,6,0,0,0,0,0,0,8,12,0,0,0,1,0,0,3,16,1,0,1,14,2,0,3,16,2,0,0,3,15,16,16,13,1,9 +0,0,12,12,0,0,0,0,0,1,15,11,0,0,0,0,0,6,16,3,0,0,0,0,0,6,16,1,0,0,0,0,0,11,13,3,14,15,3,0,0,9,15,16,13,13,15,0,0,5,16,7,1,11,15,0,0,1,11,16,16,15,6,0,6 +0,0,7,13,0,0,0,0,0,2,15,11,0,0,0,0,0,9,16,2,0,0,0,0,0,7,16,0,0,0,0,0,0,9,16,2,4,4,0,0,0,5,16,16,16,16,12,0,0,3,16,16,9,13,16,0,0,0,5,13,16,16,7,0,6 +0,0,0,0,1,14,4,0,0,0,0,0,2,16,8,0,0,0,0,0,8,16,6,0,0,0,0,0,15,16,5,0,0,0,0,8,16,16,3,0,0,0,6,16,9,16,0,0,0,2,16,8,4,16,0,0,0,2,4,0,2,15,3,0,1 +0,0,1,12,16,16,12,0,0,0,0,7,4,11,13,0,0,0,0,0,0,8,11,0,0,0,0,1,4,12,8,0,0,0,0,12,16,16,8,0,0,0,0,3,12,6,0,0,0,0,0,6,14,0,0,0,0,0,0,15,5,0,0,0,7 +0,0,8,16,16,16,12,3,0,0,2,2,0,5,16,8,0,0,0,0,0,5,15,1,0,0,0,0,1,13,8,0,0,1,8,8,10,16,1,0,0,3,12,15,16,11,3,0,0,0,2,15,7,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,10,14,16,16,8,0,0,0,16,12,5,5,11,0,0,4,15,11,6,0,0,0,0,10,16,16,16,11,0,0,0,5,7,0,5,15,0,0,0,0,0,0,7,14,0,0,0,0,5,4,14,7,0,0,0,0,8,16,14,1,0,0,5 +0,0,8,16,5,0,0,0,0,0,16,9,0,0,0,0,0,6,16,2,0,0,0,0,0,7,16,0,1,0,0,0,0,10,13,9,16,14,3,0,0,8,15,16,9,9,15,0,0,4,16,13,0,9,16,1,0,0,6,16,16,16,10,0,6 +0,0,4,15,15,4,0,0,0,0,9,16,16,9,0,0,0,0,7,16,16,9,0,0,0,0,7,16,16,13,0,0,0,0,10,16,16,10,0,0,0,1,15,16,16,4,0,0,0,0,13,16,16,3,0,0,0,0,4,10,16,4,0,0,1 +0,0,10,12,2,0,0,0,0,4,15,13,13,0,0,0,0,5,12,0,14,2,0,0,0,2,10,0,13,3,0,0,0,0,0,2,14,0,0,0,0,0,0,9,9,0,0,0,0,0,11,16,8,4,2,0,0,0,13,15,16,16,7,0,2 +0,0,3,14,16,16,7,0,0,0,11,9,4,12,15,0,0,0,0,0,0,8,14,0,0,0,0,0,0,12,9,0,0,0,5,11,12,16,5,0,0,0,11,16,16,12,3,0,0,0,0,9,13,1,0,0,0,0,3,16,2,0,0,0,7 +0,0,8,15,8,0,0,0,0,4,16,16,16,5,0,0,0,8,16,2,6,15,0,0,0,3,15,4,4,16,0,0,0,0,1,0,7,15,0,0,0,0,0,3,15,7,0,0,0,2,11,16,15,7,4,0,0,1,13,16,15,12,4,0,2 +0,0,7,14,16,16,7,0,0,0,14,13,8,5,1,0,0,3,16,10,3,0,0,0,0,10,16,16,16,2,0,0,0,3,4,0,11,8,0,0,0,0,0,0,10,8,0,0,0,0,14,5,16,4,0,0,0,0,9,16,14,0,0,0,5 +0,0,0,6,15,1,0,0,0,0,4,16,3,0,0,0,0,0,12,8,0,0,0,0,0,7,15,1,0,5,9,0,0,9,16,0,6,15,10,0,0,10,16,16,16,14,0,0,0,2,5,7,16,7,0,0,0,0,0,6,15,1,0,0,4 +0,0,8,12,12,1,0,0,0,6,12,2,6,8,0,0,0,5,11,0,3,16,7,0,0,0,14,6,10,10,1,0,0,0,6,16,8,0,0,0,0,0,7,16,5,0,0,0,0,0,12,10,10,0,0,0,0,0,11,14,9,0,0,0,8 +0,0,6,15,8,0,0,0,0,4,16,13,16,2,0,0,0,6,15,0,13,13,0,0,0,8,10,0,7,16,5,0,0,8,8,0,6,16,3,0,0,7,11,0,7,15,0,0,0,0,14,10,13,11,0,0,0,0,6,14,12,2,0,0,0 +0,0,11,16,13,2,0,0,0,0,10,15,11,13,0,0,0,0,3,15,7,15,5,0,0,0,0,1,1,13,6,0,0,0,0,0,0,16,5,0,0,0,0,0,9,16,2,0,0,0,15,16,16,11,0,0,0,0,9,15,13,16,16,5,2 +0,0,3,12,7,0,0,0,0,0,14,11,11,6,0,0,0,4,12,0,2,16,0,0,0,5,9,0,0,10,7,0,0,6,8,0,0,8,8,0,0,3,12,0,0,3,10,0,0,1,13,5,3,13,5,0,0,0,3,13,16,9,1,0,0 +0,0,8,14,11,2,0,0,0,2,16,10,13,9,0,0,0,4,16,4,1,15,5,0,0,2,14,12,11,16,7,0,0,0,6,16,16,6,0,0,0,0,12,16,12,0,0,0,0,0,16,16,15,0,0,0,0,0,8,15,8,0,0,0,8 +0,0,15,8,0,0,0,0,0,2,16,15,2,0,0,0,0,0,16,8,10,0,0,0,0,0,12,8,12,0,0,0,0,0,0,8,8,0,0,0,0,0,0,12,8,0,0,0,0,0,12,16,15,15,5,0,0,0,15,11,6,2,0,0,2 +0,0,4,13,13,8,0,0,0,0,16,1,1,11,2,0,0,4,13,0,0,6,7,0,0,3,14,0,9,14,2,0,0,0,9,15,14,1,0,0,0,0,5,16,9,0,0,0,0,0,8,12,15,2,0,0,0,0,4,15,13,2,0,0,8 +0,0,14,16,11,1,0,0,0,1,12,16,16,6,0,0,0,0,0,4,16,7,0,0,0,0,1,10,16,13,2,0,0,0,5,16,16,16,8,0,0,0,1,16,9,1,0,0,0,0,9,16,1,0,0,0,0,0,13,12,0,0,0,0,7 +0,1,14,16,16,16,5,0,0,4,16,7,4,5,3,0,0,7,16,5,1,0,0,0,0,8,16,16,15,1,0,0,0,0,2,4,15,7,0,0,0,0,0,0,10,13,0,0,0,6,9,1,13,14,0,0,0,1,14,16,15,3,0,0,5 +0,0,2,13,1,0,0,0,0,0,5,15,0,0,0,0,0,0,13,6,0,0,0,0,0,5,14,0,0,0,0,0,0,8,12,3,7,6,1,0,0,7,16,14,8,14,7,0,0,2,15,9,5,12,7,0,0,0,4,9,13,13,1,0,6 +0,2,15,16,15,3,0,0,0,4,16,7,13,13,0,0,0,0,7,12,16,8,0,0,0,0,7,16,15,6,0,0,0,0,0,4,11,16,4,0,0,1,0,0,0,14,11,0,0,9,9,0,6,14,9,0,0,3,14,16,16,13,1,0,3 +0,0,8,16,16,9,0,0,0,0,15,9,6,14,2,0,0,0,16,5,1,16,8,0,0,0,9,16,16,16,11,0,0,0,0,6,7,10,12,0,0,1,3,0,0,8,14,0,0,4,13,5,0,11,12,0,0,0,9,16,16,16,6,0,9 +0,0,0,8,14,0,0,0,0,0,5,16,3,0,0,0,0,0,15,9,0,0,2,0,0,8,15,1,0,11,14,0,0,11,13,4,7,16,5,0,0,8,16,16,16,13,0,0,0,0,0,5,16,3,0,0,0,0,0,10,14,0,0,0,4 +0,0,0,15,10,0,0,0,0,0,4,16,16,7,0,0,0,0,3,16,16,10,0,0,0,0,3,16,16,14,0,0,0,0,4,16,16,16,2,0,0,0,10,16,16,16,5,0,0,0,9,16,16,16,9,0,0,0,1,8,3,7,16,2,1 +0,0,3,13,0,0,0,0,0,0,12,6,0,0,0,0,0,2,16,1,0,0,0,0,0,5,12,0,0,0,0,0,0,8,10,5,11,7,0,0,0,6,16,16,9,12,8,0,0,0,14,9,4,11,8,0,0,0,3,12,13,9,1,0,6 +0,1,13,16,16,6,0,0,0,3,13,6,12,13,0,0,0,0,0,5,15,7,0,0,0,0,2,16,15,2,0,0,0,0,0,4,14,14,1,0,0,0,0,0,3,15,5,0,0,1,10,1,2,13,7,0,0,1,13,16,16,13,1,0,3 +0,0,1,14,15,3,0,0,0,0,0,15,16,9,0,0,0,0,0,15,16,7,0,0,0,0,2,15,16,5,0,0,0,0,4,16,15,1,0,0,0,0,7,16,10,0,0,0,0,0,8,16,8,0,0,0,0,0,2,12,16,5,0,0,1 +0,0,2,10,12,13,6,0,0,0,15,14,8,7,0,0,0,4,16,0,0,0,0,0,0,7,16,16,15,2,0,0,0,3,14,8,15,6,0,0,0,0,0,0,12,7,0,0,0,0,0,6,14,4,0,0,0,0,0,13,12,0,0,0,5 +0,0,6,16,6,0,0,0,0,2,15,14,16,5,0,0,0,6,15,1,9,14,0,0,0,4,16,0,1,16,5,0,0,7,13,0,1,16,4,0,0,5,15,2,0,14,5,0,0,0,14,10,12,13,1,0,0,0,4,14,14,3,0,0,0 +0,0,7,16,16,4,0,0,0,5,16,7,8,13,1,0,0,10,13,0,6,16,7,0,0,5,16,12,15,16,9,0,0,0,5,8,3,11,12,0,0,0,0,0,0,9,12,0,0,0,1,4,0,13,12,0,0,0,9,16,16,13,3,0,9 +0,0,8,16,16,12,4,0,0,0,12,10,8,8,4,0,0,4,16,13,3,0,0,0,0,6,16,16,15,1,0,0,0,0,0,0,12,6,0,0,0,0,0,0,9,6,0,0,0,0,4,7,16,3,0,0,0,0,4,16,6,0,0,0,5 +0,0,15,15,4,0,0,0,0,0,16,13,15,2,0,0,0,0,9,15,12,8,0,0,0,0,0,2,11,10,0,0,0,0,0,0,13,8,0,0,0,0,0,1,14,5,0,0,0,0,16,16,16,6,1,0,0,1,11,15,12,15,15,4,2 +0,0,5,13,12,1,0,0,0,2,15,5,6,7,0,0,0,6,9,0,0,16,4,0,0,6,10,0,0,14,6,0,0,0,9,13,14,12,9,0,0,0,0,0,0,0,12,0,0,0,3,0,0,3,14,0,0,0,5,12,13,14,5,0,9 +0,0,1,10,10,1,0,0,0,0,7,16,16,5,0,0,0,0,13,16,16,3,0,0,0,0,14,16,16,8,0,0,0,0,14,16,7,2,0,0,0,0,10,16,16,6,0,0,0,0,7,16,16,5,0,0,0,0,2,10,12,5,0,0,1 +0,1,6,9,15,16,14,0,0,4,16,16,10,7,2,0,0,8,16,13,3,0,0,0,0,10,14,14,14,0,0,0,0,1,1,1,16,0,0,0,0,0,0,1,16,0,0,0,0,0,3,10,14,0,0,0,0,0,15,14,3,0,0,0,5 +0,0,3,14,16,6,0,0,0,1,13,5,4,13,0,0,0,5,12,0,0,9,4,0,0,5,9,0,0,5,8,0,0,8,7,0,0,3,8,0,0,6,9,0,0,2,9,0,0,0,15,3,1,9,8,0,0,0,5,15,14,9,0,0,0 +0,0,5,15,16,16,10,0,0,0,11,14,8,4,1,0,0,4,16,16,15,3,0,0,0,8,15,9,13,13,1,0,0,0,2,0,3,16,4,0,0,0,0,0,3,16,2,0,0,0,0,3,13,9,0,0,0,0,5,16,12,1,0,0,5 +0,0,6,15,16,15,2,0,0,0,4,6,6,15,7,0,0,0,0,0,0,16,3,0,0,0,2,8,10,16,4,0,0,0,7,12,16,13,5,0,0,0,0,5,14,0,0,0,0,0,1,15,5,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,4,14,5,0,0,0,0,0,13,7,11,4,2,0,0,2,13,0,6,14,4,0,0,3,9,0,3,14,7,0,0,5,8,0,4,11,8,0,0,4,8,0,0,1,12,0,0,1,14,2,0,10,7,0,0,0,3,15,16,13,2,0,0 +0,1,12,16,12,0,0,0,0,11,13,5,16,3,8,0,0,10,13,4,16,15,3,0,0,2,16,12,15,6,0,0,0,0,14,14,2,0,0,0,0,2,16,16,3,0,0,0,0,4,15,12,11,0,0,0,0,0,14,16,13,0,0,0,8 +0,0,7,14,15,13,10,0,0,1,14,8,3,0,2,0,0,4,16,10,15,6,0,0,0,4,16,12,11,16,3,0,0,2,7,0,0,12,6,0,0,0,0,0,1,14,3,0,0,0,9,9,11,12,0,0,0,0,8,13,8,0,0,0,5 +0,0,0,11,9,0,0,0,0,0,6,16,8,0,0,0,0,0,13,12,0,0,0,0,0,0,16,6,4,2,0,0,0,0,16,16,15,15,4,0,0,0,15,6,0,0,15,1,0,0,9,13,0,6,14,5,0,0,0,9,16,16,10,0,6 +0,0,3,13,9,2,0,0,0,0,5,16,16,7,0,0,0,0,10,16,16,0,0,0,0,1,15,16,14,1,0,0,0,6,16,16,14,0,0,0,0,0,12,16,10,0,0,0,0,0,7,16,14,0,0,0,0,0,3,15,16,12,0,0,1 +0,0,0,1,16,9,0,0,0,0,0,6,16,11,0,0,0,0,0,11,16,6,0,0,0,1,7,15,14,1,0,0,0,6,14,16,15,0,0,0,0,0,1,13,15,0,0,0,0,0,0,14,16,4,0,0,0,0,0,0,12,15,6,0,1 +0,0,9,14,15,4,0,0,0,0,16,9,9,16,0,0,0,0,0,1,13,12,0,0,0,0,2,15,16,3,0,0,0,0,2,12,12,12,1,0,0,0,0,0,0,9,9,0,0,0,14,9,5,13,12,0,0,0,9,16,16,14,1,0,3 +0,0,7,14,15,2,0,0,0,0,8,4,4,15,0,0,0,0,0,0,8,11,0,0,0,0,2,16,16,4,0,0,0,0,1,8,13,13,0,0,0,0,0,0,0,7,10,0,0,2,14,2,3,12,9,0,0,1,12,16,16,10,1,0,3 +0,0,0,1,13,13,0,0,0,0,5,15,15,16,0,0,0,3,15,8,7,14,0,0,0,11,15,9,15,16,9,0,0,8,16,14,15,15,5,0,0,0,0,0,12,8,0,0,0,0,0,0,12,6,0,0,0,0,0,0,12,4,0,0,4 +0,0,0,12,4,0,0,0,0,0,6,16,10,0,0,0,0,0,14,11,0,0,0,0,0,0,13,7,0,0,0,0,0,0,14,16,15,9,1,0,0,0,12,11,4,10,13,0,0,0,4,14,4,10,16,3,0,0,0,9,16,14,9,0,6 +0,0,0,2,12,6,14,0,0,0,1,15,13,8,10,0,0,2,15,7,0,15,4,0,0,9,15,7,7,16,5,0,0,8,16,16,16,16,10,0,0,1,4,4,13,8,0,0,0,0,0,3,15,1,0,0,0,0,0,3,14,0,0,0,4 +0,0,0,4,13,12,1,0,0,0,3,16,9,15,14,2,0,0,14,8,2,10,14,0,0,3,16,12,13,16,7,0,0,0,8,3,2,16,2,0,0,0,0,0,8,12,0,0,0,0,0,0,14,6,0,0,0,0,0,3,14,2,0,0,9 +0,0,4,14,10,0,0,0,0,3,16,9,8,7,0,0,0,8,16,0,2,11,0,0,0,6,14,0,0,6,6,0,0,5,14,0,0,3,9,0,0,0,15,1,0,5,13,0,0,0,12,9,2,13,10,0,0,0,2,14,15,10,1,0,0 +0,0,8,13,12,2,0,0,0,4,16,3,2,13,0,0,0,10,13,5,12,12,0,0,0,5,14,16,16,3,0,0,0,0,8,15,9,9,0,0,0,0,12,6,0,8,5,0,0,0,11,8,4,12,6,0,0,0,5,12,13,10,0,0,8 +0,0,6,13,12,2,0,0,0,7,14,4,9,10,0,0,0,8,3,0,10,7,0,0,0,0,2,9,16,2,0,0,0,0,10,12,12,14,1,0,0,0,0,0,0,6,12,0,0,0,11,5,4,12,10,0,0,0,6,13,13,10,1,0,3 +0,0,2,8,11,16,16,14,0,0,10,15,10,11,16,7,0,0,0,0,0,13,9,0,0,0,4,8,11,16,4,0,0,0,13,16,16,11,5,0,0,0,0,4,16,1,0,0,0,0,0,12,11,0,0,0,0,0,0,15,5,0,0,0,7 +0,0,5,13,12,3,0,0,0,0,14,9,8,13,0,0,0,3,16,4,0,13,6,0,0,4,14,1,0,8,9,0,0,1,15,0,0,4,8,0,0,2,13,1,2,13,4,0,0,0,15,13,16,11,0,0,0,0,8,11,5,0,0,0,0 +0,0,0,12,13,1,0,0,0,0,7,16,10,1,0,0,0,0,11,13,0,0,0,0,0,0,13,10,4,1,0,0,0,0,13,16,16,13,3,0,0,2,16,14,6,10,15,2,0,2,13,14,8,13,15,1,0,0,1,13,16,14,3,0,6 +0,0,0,7,13,14,4,0,0,0,15,13,4,0,1,0,0,1,16,2,0,0,0,0,0,5,16,9,14,13,1,0,0,7,15,8,1,11,4,0,0,0,0,0,0,13,4,0,0,0,0,5,7,12,0,0,0,0,0,8,14,3,0,0,5 +0,0,7,11,4,0,0,0,0,0,15,16,15,5,0,0,0,3,16,5,3,10,2,0,0,4,16,2,0,7,7,0,0,8,10,0,0,6,8,0,0,5,9,0,0,10,6,0,0,0,15,4,10,14,2,0,0,0,7,16,10,2,0,0,0 +0,0,2,12,6,0,0,0,0,1,12,10,11,3,0,0,0,1,16,7,3,13,1,0,0,2,16,1,0,9,5,0,0,2,16,2,0,2,10,0,0,0,14,8,0,7,13,0,0,0,10,7,7,16,5,0,0,0,2,13,12,6,0,0,0 +0,0,3,10,13,16,7,0,0,0,10,10,8,12,13,0,0,0,0,0,0,10,10,0,0,5,12,12,12,15,8,0,0,5,12,12,15,11,1,0,0,0,0,5,15,3,0,0,0,0,0,11,9,0,0,0,0,0,3,16,2,0,0,0,7 +0,0,6,12,16,16,13,0,0,0,13,12,8,14,16,2,0,0,1,5,7,15,11,0,0,0,13,16,16,16,11,0,0,0,3,9,16,3,0,0,0,0,0,11,13,0,0,0,0,0,5,16,7,0,0,0,0,0,9,15,2,0,0,0,7 +0,0,4,16,13,16,13,0,0,0,12,11,5,4,2,0,0,1,16,1,9,8,2,0,0,8,16,14,10,13,6,0,0,1,4,0,0,9,6,0,0,0,0,0,5,11,1,0,0,0,1,6,14,4,0,0,0,0,6,15,5,0,0,0,5 +0,0,0,2,12,13,8,1,0,0,3,12,5,14,16,2,0,0,11,1,0,12,14,0,0,2,11,3,7,14,3,0,0,3,15,11,5,10,0,0,0,0,0,0,9,6,0,0,0,0,0,1,14,2,0,0,0,0,0,0,14,1,0,0,9 +0,0,1,16,12,2,0,0,0,0,4,15,16,5,0,0,0,0,8,16,13,0,0,0,0,1,12,16,11,0,0,0,0,5,16,16,11,0,0,0,0,0,9,16,9,0,0,0,0,0,4,16,12,1,0,0,0,0,1,14,16,10,0,0,1 +0,0,0,1,13,12,3,0,0,0,6,13,11,12,10,0,0,6,16,11,4,13,7,0,0,8,16,16,16,16,10,0,0,0,4,4,7,16,5,0,0,0,0,0,12,8,0,0,0,0,0,0,13,7,0,0,0,0,0,0,15,4,0,0,4 +0,4,15,16,7,0,0,0,0,4,16,11,16,2,0,0,0,2,6,4,16,3,0,0,0,0,0,1,16,4,0,0,0,0,0,8,14,1,0,0,0,0,2,15,10,0,0,0,0,3,15,16,13,12,11,0,0,5,16,16,15,12,12,0,2 +0,0,3,11,16,15,5,0,0,0,10,6,4,12,10,0,0,0,0,0,0,12,8,0,0,4,12,10,12,16,3,0,0,2,8,5,16,9,0,0,0,0,0,6,11,0,0,0,0,0,0,13,6,0,0,0,0,0,0,15,1,0,0,0,7 +0,0,7,15,6,0,0,0,0,1,15,12,15,0,0,0,0,3,15,1,12,2,0,0,0,0,11,0,13,3,0,0,0,0,0,1,14,1,0,0,0,0,0,7,12,0,0,0,0,0,7,16,16,16,14,3,0,0,6,12,8,8,8,3,2 +0,1,8,13,16,8,0,0,0,11,13,6,8,15,1,0,0,2,1,9,14,5,0,0,0,0,14,16,14,3,0,0,0,0,8,4,10,15,1,0,0,0,0,0,0,13,7,0,0,2,15,8,9,15,1,0,0,1,10,13,13,4,0,0,3 +0,0,0,0,8,16,11,0,0,0,0,13,9,8,14,1,0,0,10,8,0,5,16,4,0,2,15,8,14,14,12,0,0,2,12,9,2,10,6,0,0,0,0,0,2,14,0,0,0,0,0,0,9,7,0,0,0,0,0,0,13,1,0,0,9 +0,0,9,14,12,8,0,0,0,8,13,0,2,16,2,0,0,11,14,14,14,6,0,0,0,1,14,16,13,1,0,0,0,3,15,4,6,12,0,0,0,1,15,0,0,8,9,0,0,3,16,1,3,13,6,0,0,0,8,16,13,6,0,0,8 +0,0,7,16,15,11,5,0,0,0,14,11,8,8,5,0,0,1,16,2,8,5,0,0,0,8,15,15,15,15,3,0,0,8,15,5,0,12,4,0,0,0,0,0,2,15,1,0,0,0,6,8,13,8,0,0,0,0,8,15,10,0,0,0,5 +0,0,0,2,15,2,0,0,0,0,0,12,9,0,0,0,0,0,5,15,2,2,0,0,0,1,13,6,0,14,3,0,0,5,15,0,8,16,1,0,0,9,16,16,16,16,3,0,0,0,4,4,16,7,0,0,0,0,0,3,15,4,0,0,4 +0,0,0,7,15,14,2,0,0,4,13,9,8,16,4,0,0,7,16,6,3,16,3,0,0,0,7,16,16,8,0,0,0,0,1,16,14,14,0,0,0,0,7,11,0,15,8,0,0,0,6,15,5,15,5,0,0,0,0,9,14,7,0,0,8 +0,0,8,14,9,0,0,0,0,6,15,12,16,3,0,0,0,1,3,0,8,4,0,0,0,0,0,0,9,5,0,0,0,0,0,1,15,3,0,0,0,0,0,12,11,0,0,0,0,0,10,16,13,12,6,0,0,0,11,12,12,9,4,0,2 +0,0,0,6,12,12,0,0,0,0,0,15,16,13,0,0,0,0,7,16,16,12,0,0,0,3,16,16,16,11,0,0,0,3,12,16,16,9,0,0,0,0,0,11,16,9,0,0,0,0,0,13,16,10,0,0,0,0,0,9,13,11,0,0,1 +0,0,4,12,12,6,0,0,0,3,16,9,8,15,0,0,0,9,16,3,0,8,6,0,0,6,14,0,0,6,8,0,0,4,11,0,0,9,5,0,0,4,10,0,0,15,2,0,0,0,15,5,9,14,0,0,0,0,7,14,13,2,0,0,0 +0,0,0,1,15,2,0,0,0,0,0,8,16,0,0,0,0,0,2,16,6,6,6,0,0,0,11,13,2,14,9,0,0,6,16,9,10,16,10,0,0,15,16,16,16,16,5,0,0,2,4,4,16,9,0,0,0,0,0,2,16,9,0,0,4 +0,0,7,12,8,0,0,0,0,6,16,11,15,3,0,0,0,10,8,0,11,5,0,0,0,0,0,0,11,3,0,0,0,0,0,0,14,1,0,0,0,0,0,7,13,0,0,0,0,0,8,16,16,14,9,0,0,0,11,13,11,8,9,0,2 +0,0,1,8,13,12,1,0,0,1,15,8,4,14,6,0,0,6,10,0,1,10,8,0,0,7,12,5,11,16,7,0,0,2,11,12,10,16,2,0,0,0,0,0,9,10,0,0,0,0,0,4,16,1,0,0,0,0,0,12,7,0,0,0,9 +0,2,5,7,10,13,4,0,0,3,16,13,12,11,3,0,0,0,16,1,0,0,0,0,0,6,16,16,15,2,0,0,0,3,8,4,10,9,0,0,0,0,0,0,8,11,0,0,0,0,1,9,15,3,0,0,0,2,16,12,2,0,0,0,5 +0,1,10,16,11,0,0,0,0,5,16,9,14,6,0,0,0,1,3,0,11,8,0,0,0,0,0,0,11,8,0,0,0,0,0,4,15,0,0,0,0,0,0,9,11,0,0,0,0,0,11,16,10,12,8,0,0,0,11,12,12,12,14,0,2 +0,0,0,4,13,0,0,0,0,0,0,13,9,0,0,0,0,0,4,15,2,2,4,0,0,1,15,6,0,9,10,0,0,10,16,8,9,16,4,0,0,3,12,12,16,12,0,0,0,0,0,2,16,6,0,0,0,0,0,5,16,3,0,0,4 +0,0,6,9,12,12,8,0,0,2,16,12,8,8,6,0,0,0,15,8,4,0,0,0,0,0,14,16,16,13,1,0,0,0,9,7,5,16,7,0,0,0,0,0,2,16,5,0,0,0,0,6,15,9,0,0,0,0,15,15,7,0,0,0,5 +0,0,2,14,16,15,9,0,0,1,14,11,5,9,14,0,0,4,14,0,3,12,11,0,0,6,13,9,15,15,1,0,0,2,13,15,15,11,0,0,0,0,0,4,15,0,0,0,0,0,1,13,11,0,0,0,0,0,3,16,5,0,0,0,9 +0,0,4,14,12,7,1,0,0,4,13,9,7,16,4,0,0,12,12,0,9,15,1,0,0,3,16,10,15,6,0,0,0,0,5,16,16,3,0,0,0,0,5,13,5,15,3,0,0,0,8,8,7,16,4,0,0,0,3,15,16,9,0,0,8 +0,0,1,9,15,13,1,0,0,0,11,12,6,16,4,0,0,10,15,1,7,14,0,0,0,3,15,14,15,3,0,0,0,0,1,12,15,3,0,0,0,0,0,12,8,13,1,0,0,0,0,12,6,16,2,0,0,0,0,9,15,6,0,0,8 +0,0,4,15,14,7,0,0,0,2,14,10,8,15,2,0,0,8,16,2,1,16,5,0,0,1,14,14,14,12,0,0,0,0,1,15,16,7,0,0,0,0,7,14,8,15,2,0,0,0,8,9,3,14,7,0,0,0,5,16,16,11,1,0,8 +0,0,6,10,11,15,4,0,0,0,10,15,12,12,3,0,0,0,14,6,0,0,0,0,0,5,16,9,9,7,0,0,0,2,12,12,12,16,7,0,0,0,0,0,1,13,7,0,0,0,0,7,13,13,0,0,0,0,9,12,10,1,0,0,5 +0,0,1,9,16,13,1,0,0,0,13,9,5,14,8,0,0,3,14,0,0,13,12,0,0,2,15,12,16,16,3,0,0,0,3,8,11,12,0,0,0,0,0,3,14,2,0,0,0,0,0,12,9,0,0,0,0,0,1,15,3,0,0,0,9 +0,0,0,8,16,10,0,0,0,0,1,14,16,14,0,0,0,1,10,16,16,5,0,0,0,6,16,16,16,3,0,0,0,1,12,16,16,0,0,0,0,0,4,16,15,0,0,0,0,0,4,16,16,4,0,0,0,0,0,11,12,11,0,0,1 +0,0,3,14,9,0,0,0,0,1,16,15,13,10,0,0,0,7,16,2,1,15,2,0,0,7,16,2,0,9,7,0,0,5,16,0,0,9,8,0,0,0,16,4,0,9,11,0,0,0,13,11,9,16,5,0,0,0,3,14,16,9,1,0,0 +0,0,13,12,1,0,0,0,0,11,16,14,12,0,0,0,0,5,8,1,16,1,0,0,0,0,0,0,16,3,0,0,0,0,0,8,16,0,0,0,0,0,3,14,9,0,0,0,0,0,15,16,13,13,11,0,0,0,11,12,12,14,16,2,2 +0,0,0,9,15,7,0,0,0,0,7,16,10,6,0,0,0,0,14,8,0,0,0,0,0,1,16,5,11,8,0,0,0,3,16,16,10,15,9,0,0,1,15,9,0,4,13,0,0,0,11,14,6,11,15,0,0,0,0,10,14,13,8,0,6 +0,0,9,12,14,16,8,0,0,1,12,12,10,14,11,0,0,0,0,0,3,16,2,0,0,1,10,8,12,16,7,0,0,2,16,16,16,16,10,0,0,0,2,14,10,5,1,0,0,0,6,16,2,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,4,14,16,11,1,0,0,0,15,10,4,13,11,0,0,3,16,3,4,12,10,0,0,1,13,16,16,16,7,0,0,0,0,0,5,16,1,0,0,0,0,1,13,9,0,0,0,0,2,13,14,0,0,0,0,0,5,16,7,0,0,0,9 +0,0,10,16,16,9,0,0,0,1,6,4,5,15,4,0,0,0,0,0,3,15,2,0,0,1,4,0,12,8,0,0,0,3,16,16,16,16,8,0,0,0,2,13,7,0,0,0,0,0,7,12,0,0,0,0,0,0,12,6,0,0,0,0,7 +0,0,1,14,6,0,0,0,0,0,9,15,3,0,0,0,0,0,12,6,0,0,0,0,0,0,16,3,0,0,0,0,0,3,16,16,16,14,4,0,0,1,16,7,4,6,13,0,0,0,10,10,0,4,14,0,0,0,1,11,14,16,7,0,6 +0,0,0,3,14,12,0,0,0,0,1,11,16,14,0,0,0,0,8,16,16,14,0,0,0,6,16,16,16,10,0,0,0,1,5,16,16,8,0,0,0,0,0,15,16,9,0,0,0,0,0,10,16,14,5,0,0,0,0,2,15,15,7,0,1 +0,1,11,13,7,0,0,0,0,6,15,13,15,4,0,0,0,2,2,0,9,9,0,0,0,0,0,0,8,8,0,0,0,0,0,0,13,5,0,0,0,0,0,8,12,0,0,0,0,0,8,16,10,9,3,0,0,0,11,15,15,16,7,0,2 +0,0,4,10,12,15,16,5,0,0,15,14,12,13,16,7,0,0,3,0,0,8,15,2,0,0,1,8,8,14,9,0,0,0,5,16,16,16,4,0,0,0,0,7,16,4,0,0,0,0,0,12,14,0,0,0,0,0,3,16,5,0,0,0,7 +0,2,8,12,12,15,8,0,0,4,16,12,7,5,2,0,0,4,15,0,0,0,0,0,0,7,15,9,5,0,0,0,0,1,8,12,15,5,0,0,0,0,0,0,11,12,0,0,0,2,8,9,16,4,0,0,0,3,14,14,6,0,0,0,5 +0,0,1,11,15,1,0,0,0,0,11,15,5,0,0,0,0,3,16,6,0,0,0,0,0,6,7,0,5,3,0,0,0,8,10,4,13,15,2,0,0,4,16,4,0,8,10,0,0,0,13,10,5,14,9,0,0,0,1,10,13,13,4,0,6 +0,0,2,11,11,2,0,0,0,0,13,13,11,12,0,0,0,7,13,1,0,12,1,0,0,7,9,0,0,7,5,0,0,6,10,0,0,7,8,0,0,2,13,0,0,10,7,0,0,0,10,10,6,16,3,0,0,0,2,11,15,9,0,0,0 +0,0,5,12,12,3,0,0,0,0,9,16,16,12,0,0,0,0,9,16,16,15,0,0,0,0,11,16,16,10,0,0,0,0,11,16,16,9,0,0,0,0,13,16,16,3,0,0,0,0,12,16,15,2,0,0,0,0,2,9,11,2,0,0,1 +0,0,7,16,16,13,0,0,0,0,13,11,11,16,4,0,0,0,0,0,5,16,3,0,0,0,0,0,9,15,0,0,0,1,10,16,16,15,3,0,0,3,12,16,15,12,4,0,0,0,3,16,7,0,0,0,0,0,11,13,0,0,0,0,7 +0,0,6,12,13,7,0,0,0,0,13,13,8,7,0,0,0,2,16,1,0,0,0,0,0,5,16,12,12,7,0,0,0,2,9,8,8,15,1,0,0,0,0,0,0,13,4,0,0,0,4,4,12,14,1,0,0,0,7,13,11,3,0,0,5 +0,0,5,15,5,0,0,0,0,0,13,11,12,0,0,0,0,0,14,7,2,4,4,0,0,0,9,15,14,13,5,0,0,1,13,16,13,0,0,0,0,7,14,4,14,9,0,0,0,0,15,4,2,14,7,0,0,0,3,13,16,16,7,0,8 +0,0,2,12,12,3,0,0,0,0,4,16,16,13,0,0,0,0,3,16,16,11,0,0,0,0,5,16,16,11,0,0,0,0,9,16,16,9,0,0,0,0,9,16,16,8,0,0,0,0,7,16,16,10,0,0,0,0,1,11,12,9,0,0,1 +0,0,6,15,12,0,0,0,0,0,13,16,16,13,0,0,0,2,15,16,7,13,4,0,0,5,11,7,0,5,7,0,0,8,7,0,0,7,8,0,0,4,12,0,1,11,9,0,0,2,13,8,12,15,1,0,0,0,5,15,12,3,0,0,0 +0,0,6,15,16,16,9,0,0,2,16,11,8,15,12,0,0,0,2,0,0,14,11,0,0,0,3,8,8,16,7,0,0,3,16,16,16,16,10,0,0,0,6,6,15,4,0,0,0,0,0,15,7,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,4,12,12,0,0,0,0,2,16,8,12,10,0,0,0,7,10,4,14,14,0,0,0,2,13,14,11,14,0,0,0,0,0,0,0,13,2,0,0,0,0,0,0,6,7,0,0,0,4,2,0,7,8,0,0,0,5,14,16,11,2,0,9 +0,0,0,5,15,5,0,0,0,0,3,15,11,1,0,0,0,1,13,14,1,2,6,0,0,6,16,6,0,12,15,0,0,13,15,0,4,16,6,0,0,13,16,16,16,16,1,0,0,0,8,11,16,8,0,0,0,0,0,7,16,3,0,0,4 +0,3,11,16,7,0,0,0,0,5,12,8,16,0,0,0,0,0,0,10,14,0,0,0,0,0,10,14,3,0,0,0,0,0,10,16,16,15,3,0,0,0,0,0,3,10,8,0,0,0,4,7,10,15,5,0,0,5,13,12,10,3,0,0,3 +0,0,7,16,12,3,0,0,0,0,12,16,16,8,0,0,0,0,15,16,16,1,0,0,0,1,14,16,16,1,0,0,0,1,15,16,16,4,0,0,0,0,13,16,16,3,0,0,0,0,11,16,16,9,0,0,0,0,4,9,14,6,0,0,1 +0,2,11,13,14,11,1,0,0,1,10,5,4,16,4,0,0,0,0,1,10,13,0,0,0,0,0,7,16,1,0,0,0,0,0,5,16,13,2,0,0,0,0,0,3,12,7,0,0,5,8,4,6,15,4,0,0,3,12,14,13,9,0,0,3 +0,1,11,16,14,8,0,0,0,1,16,15,12,15,3,0,0,2,16,10,0,2,0,0,0,5,16,8,2,0,0,0,0,3,15,16,15,3,0,0,0,0,2,4,13,11,0,0,0,0,6,8,15,11,0,0,0,0,13,16,16,5,0,0,5 +0,0,5,15,3,0,0,0,0,0,12,16,14,8,0,0,0,1,16,16,12,16,3,0,0,3,14,1,0,6,7,0,0,6,8,0,0,4,8,0,0,4,12,0,0,10,12,0,0,1,14,10,13,16,5,0,0,0,6,16,14,8,0,0,0 +0,1,9,12,13,7,0,0,0,6,13,8,7,13,0,0,0,0,2,0,5,12,0,0,0,0,0,5,16,5,0,0,0,0,0,6,16,9,0,0,0,0,0,0,3,13,6,0,0,1,7,3,6,12,6,0,0,0,12,13,13,8,1,0,3 +0,0,0,5,13,0,0,0,0,0,0,14,6,0,0,0,0,0,9,13,0,2,0,0,0,1,16,4,5,14,0,0,0,9,14,0,7,10,0,0,0,10,16,12,14,14,0,0,0,2,8,12,15,1,0,0,0,0,0,4,11,0,0,0,4 +0,0,10,16,9,0,0,0,0,2,16,16,16,2,0,0,0,0,8,1,15,8,0,0,0,0,0,0,14,9,0,0,0,0,0,1,16,10,0,0,0,0,0,9,16,16,7,0,0,0,8,16,15,12,15,5,0,0,13,15,1,0,5,6,2 +0,0,0,6,12,0,0,0,0,0,2,16,10,0,0,0,0,0,7,14,1,0,0,0,0,0,11,10,0,0,0,0,0,0,14,11,1,0,0,0,0,0,15,15,15,16,10,1,0,0,8,14,3,7,16,7,0,0,0,8,15,16,12,3,6 +0,0,6,16,16,15,5,0,0,0,8,6,7,16,11,0,0,0,0,0,3,16,9,0,0,0,0,0,9,15,0,0,0,1,8,8,15,14,3,0,0,3,16,16,16,16,4,0,0,0,3,16,8,0,0,0,0,0,8,15,2,0,0,0,7 +0,0,2,13,11,0,0,0,0,0,10,14,2,0,0,0,0,0,12,5,0,0,0,0,0,0,14,4,0,0,0,0,0,0,15,8,8,8,2,0,0,0,14,16,12,12,13,0,0,0,10,10,4,6,16,0,0,0,3,11,15,12,7,0,6 +0,0,5,16,16,10,1,0,0,8,14,14,8,9,1,0,0,14,16,9,1,0,0,0,0,12,16,16,15,3,0,0,0,1,10,12,13,13,0,0,0,0,0,0,4,16,0,0,0,0,1,4,12,14,0,0,0,0,4,15,16,4,0,0,5 +0,0,6,11,7,2,0,0,0,0,15,16,15,12,0,0,0,3,16,7,0,10,3,0,0,4,8,0,0,2,8,0,0,8,5,0,0,5,8,0,0,5,7,0,0,12,5,0,0,0,14,5,13,13,1,0,0,0,7,16,11,0,0,0,0 +0,0,8,7,0,0,0,0,0,0,15,16,16,7,0,0,0,2,16,16,14,14,2,0,0,4,16,0,0,11,6,0,0,3,13,0,0,11,9,0,0,3,15,0,5,16,7,0,0,0,15,12,14,16,3,0,0,0,5,16,14,7,0,0,0 +0,0,1,13,10,0,0,0,0,0,6,16,4,0,0,0,0,0,10,12,0,0,0,0,0,0,14,10,0,0,0,0,0,0,16,14,12,12,2,0,0,4,16,14,12,13,14,0,0,0,11,14,4,7,16,1,0,0,1,12,16,16,13,0,6 +0,1,8,14,13,2,0,0,0,4,16,6,12,8,0,0,0,3,16,4,3,4,0,0,0,0,9,15,4,11,6,0,0,0,5,16,16,13,6,0,0,4,15,12,16,7,0,0,0,7,14,2,12,11,0,0,0,1,10,16,14,5,0,0,8 +0,0,6,9,15,14,2,0,0,8,13,12,6,4,1,0,0,10,13,8,3,0,0,0,0,2,11,15,16,5,0,0,0,0,0,0,7,12,0,0,0,0,0,0,1,13,0,0,0,0,0,1,9,12,0,0,0,0,8,12,10,1,0,0,5 +0,0,1,11,16,11,0,0,0,0,14,15,8,9,1,0,0,3,15,1,0,0,0,0,0,7,14,8,8,0,0,0,0,4,16,16,16,4,0,0,0,0,0,0,8,8,0,0,0,0,1,6,15,5,0,0,0,0,1,14,9,0,0,0,5 +0,0,6,15,16,13,1,0,0,0,12,12,14,16,1,0,0,0,0,0,13,14,0,0,0,0,4,4,16,13,2,0,0,8,16,16,16,16,10,0,0,5,8,15,14,4,1,0,0,0,3,16,8,0,0,0,0,0,7,16,0,0,0,0,7 +0,2,12,16,13,5,0,0,0,5,12,12,16,8,0,0,0,0,0,4,16,4,0,0,0,0,0,12,14,1,0,0,0,0,0,3,14,16,2,0,0,0,0,0,1,10,8,0,0,0,8,4,6,13,6,0,0,0,12,16,14,9,0,0,3 +0,0,3,14,16,5,0,0,0,2,14,11,14,9,0,0,0,5,16,7,14,15,0,0,0,2,16,16,16,16,6,0,0,0,1,4,2,16,7,0,0,0,0,0,0,15,6,0,0,0,0,5,7,16,2,0,0,0,4,16,16,9,0,0,9 +0,0,2,13,13,2,0,0,0,0,9,16,6,13,0,0,0,1,14,1,0,9,4,0,0,4,9,0,0,6,7,0,0,8,8,0,0,4,8,0,0,4,9,0,0,4,8,0,0,0,15,8,4,12,7,0,0,0,2,10,16,15,2,0,0 +0,0,9,15,16,13,0,0,0,0,10,5,13,14,0,0,0,0,0,0,12,12,0,0,0,0,0,2,16,6,0,0,0,1,12,16,16,13,7,0,0,5,14,16,12,8,3,0,0,0,5,16,2,0,0,0,0,0,11,12,0,0,0,0,7 +0,0,2,12,0,0,0,0,0,0,11,11,0,0,0,0,0,5,12,1,0,0,0,0,0,4,10,0,0,0,0,0,0,5,9,2,11,12,3,0,0,1,15,16,16,14,11,0,0,0,11,13,6,15,8,0,0,0,3,15,14,8,0,0,6 +0,0,0,0,12,3,0,0,0,0,0,2,15,0,0,0,0,0,0,9,9,0,0,0,0,0,3,15,0,3,10,0,0,1,14,3,0,14,6,0,0,10,16,15,13,16,1,0,0,9,10,8,14,11,0,0,0,0,0,0,12,2,0,0,4 +0,0,0,9,8,0,0,0,0,0,7,14,1,0,0,0,0,0,14,3,0,0,0,0,0,3,14,0,0,0,0,0,0,7,10,1,5,3,0,0,0,2,16,12,16,16,6,0,0,0,8,16,11,11,13,0,0,0,0,7,12,16,9,0,6 +0,0,0,10,15,3,0,0,0,2,11,15,12,12,0,0,0,5,16,4,0,10,4,0,0,7,14,0,0,6,6,0,0,7,11,0,0,9,5,0,0,0,16,1,0,11,5,0,0,0,9,13,8,15,2,0,0,0,1,11,16,8,0,0,0 +0,0,5,13,14,5,0,0,0,0,9,8,9,12,0,0,0,3,12,2,11,9,0,0,0,3,13,15,14,1,0,0,0,0,0,11,12,12,2,0,0,0,4,10,0,9,9,0,0,0,10,5,2,8,13,0,0,0,5,14,15,11,2,0,8 +0,0,0,7,16,0,0,0,0,0,0,14,12,0,0,0,0,0,5,15,4,2,3,0,0,1,15,8,1,15,8,0,0,10,15,8,10,16,3,0,2,16,16,16,16,16,1,0,0,6,6,10,16,3,0,0,0,0,0,9,11,0,0,0,4 +0,0,0,4,14,0,0,0,0,0,2,15,7,0,0,0,0,0,10,11,0,0,0,0,0,3,15,2,0,0,0,0,0,6,12,7,10,4,0,0,0,5,15,16,13,15,4,0,0,0,8,16,8,13,12,0,0,0,0,8,14,13,6,0,6 +0,0,3,14,13,1,0,0,0,0,13,16,15,9,0,0,0,6,16,3,0,13,0,0,0,6,13,0,0,7,6,0,0,8,8,0,0,4,8,0,0,0,14,0,0,2,12,0,0,0,12,11,8,15,8,0,0,0,2,14,16,13,3,0,0 +0,0,7,13,3,0,0,0,0,0,5,15,3,0,0,0,0,0,12,7,0,0,0,0,0,3,14,0,0,0,0,0,0,4,13,8,12,10,1,0,0,5,16,14,10,13,8,0,0,2,15,11,5,13,7,0,0,0,5,13,14,11,1,0,6 +0,0,0,6,12,0,0,0,0,0,0,11,11,0,0,0,0,0,1,16,3,4,3,0,0,1,11,12,0,14,7,0,0,8,16,10,9,15,2,0,0,10,16,16,16,15,0,0,0,0,0,5,16,3,0,0,0,0,0,5,14,0,0,0,4 +0,0,10,15,16,6,0,0,0,1,13,5,7,15,2,0,0,0,0,0,7,12,2,0,0,0,0,6,15,3,0,0,0,0,7,12,1,0,0,0,0,5,14,2,0,0,0,0,0,9,14,10,11,7,1,0,0,1,7,8,8,11,4,0,2 +0,0,0,1,15,10,0,0,0,0,0,6,16,7,0,0,0,0,0,11,16,0,0,0,0,0,2,16,12,15,11,0,0,1,15,14,6,16,8,0,0,8,16,15,16,16,6,0,0,5,12,12,16,13,3,0,0,0,0,2,14,3,0,0,4 +0,0,0,6,14,16,7,0,0,0,10,15,6,15,7,0,0,4,16,3,2,12,0,0,0,3,15,11,7,16,3,0,0,0,2,9,15,13,0,0,0,0,0,4,15,5,0,0,0,0,0,8,16,0,0,0,0,0,0,5,16,0,0,0,9 +0,0,6,12,16,10,0,0,0,0,12,8,11,13,0,0,0,5,13,3,15,5,0,0,0,4,15,16,11,0,0,0,0,0,8,14,15,11,0,0,0,0,11,5,2,15,7,0,0,0,12,5,4,13,8,0,0,0,6,15,16,14,3,0,8 +0,0,0,0,2,14,4,0,0,0,0,1,11,16,5,0,0,0,6,14,16,16,1,0,0,5,16,12,16,13,0,0,0,3,10,0,14,13,0,0,0,0,0,0,15,14,0,0,0,0,0,0,8,16,1,0,0,0,0,0,4,15,0,0,1 +0,0,2,12,13,4,0,0,0,0,12,11,8,13,0,0,0,0,13,0,13,5,0,0,0,11,14,7,11,0,0,0,0,0,7,16,5,0,0,0,0,0,7,12,12,4,0,0,0,0,9,6,10,10,0,0,0,0,2,14,15,6,0,0,8 +0,3,15,13,2,0,0,0,0,11,14,12,11,0,0,0,0,8,2,4,15,0,0,0,0,0,0,4,15,0,0,0,0,0,0,13,7,0,0,0,0,0,6,15,1,0,0,0,0,2,14,14,12,15,8,0,0,2,13,13,12,12,5,0,2 +0,0,4,6,14,10,2,0,0,0,15,16,12,12,12,0,0,4,14,1,0,0,0,0,0,5,16,16,12,1,0,0,0,0,4,8,15,10,0,0,0,0,0,0,5,9,0,0,0,0,0,5,14,3,0,0,0,0,5,14,5,0,0,0,5 +0,0,1,13,13,2,0,0,0,0,5,15,13,13,0,0,0,1,13,5,0,14,1,0,0,3,16,7,0,8,8,0,0,4,9,0,0,8,8,0,0,2,13,1,0,7,11,0,0,0,14,14,9,16,7,0,0,0,3,12,16,9,0,0,0 +0,3,15,16,13,0,0,0,0,10,14,15,16,4,0,0,0,3,3,1,16,8,0,0,0,0,0,6,16,4,0,0,0,0,1,14,12,0,0,0,0,0,10,16,3,0,0,0,0,3,16,15,12,11,1,0,0,2,16,16,16,16,8,0,2 +0,0,5,15,9,0,0,0,0,2,13,8,15,5,0,0,0,2,2,0,8,5,0,0,0,0,5,9,15,2,0,0,0,0,11,15,15,11,0,0,0,0,0,0,1,14,1,0,0,0,4,9,4,11,3,0,0,0,5,12,13,9,0,0,3 +0,0,3,9,14,8,0,0,0,0,13,16,14,16,0,0,0,0,10,0,12,10,0,0,0,0,0,7,16,8,0,0,0,0,0,3,13,15,1,0,0,0,0,1,1,13,4,0,0,0,3,13,11,14,1,0,0,0,3,16,10,1,0,0,3 +0,0,0,0,12,16,2,0,0,0,3,11,16,16,5,0,0,3,15,16,16,14,0,0,0,4,12,12,16,14,0,0,0,0,0,5,16,11,0,0,0,0,0,7,16,11,0,0,0,0,0,4,16,13,0,0,0,0,0,1,13,13,0,0,1 +0,2,12,11,1,0,0,0,0,6,14,16,8,0,0,0,0,5,5,10,11,0,0,0,0,0,0,11,6,0,0,0,0,0,3,16,1,0,0,0,0,1,15,7,2,1,0,0,0,5,16,16,16,16,7,0,0,3,12,12,8,6,3,0,2 +0,0,0,1,8,15,10,0,0,0,3,15,15,16,12,0,0,0,16,9,6,16,2,0,0,4,16,9,13,16,1,0,0,0,10,12,15,16,0,0,0,0,0,0,10,12,0,0,0,0,0,0,10,11,0,0,0,0,0,0,6,6,0,0,9 +0,0,2,14,15,3,0,0,0,0,11,14,14,13,0,0,0,0,16,4,1,13,4,0,0,3,16,4,0,7,8,0,0,5,13,0,0,4,8,0,0,3,13,1,0,8,9,0,0,0,12,11,10,15,6,0,0,0,4,15,15,8,0,0,0 +0,0,0,1,9,13,5,0,0,0,3,13,10,12,9,0,0,0,11,8,0,13,3,0,0,1,15,14,11,16,2,0,0,0,1,8,10,14,0,0,0,0,0,0,7,13,0,0,0,0,0,0,10,9,0,0,0,0,0,0,13,5,0,0,9 +0,1,14,16,8,0,0,0,0,7,12,9,16,0,0,0,0,4,2,0,16,0,0,0,0,0,1,10,9,0,0,0,0,0,7,13,1,0,0,0,0,0,16,1,0,0,0,0,0,3,13,4,7,8,5,0,0,1,13,16,15,12,10,0,2 +0,0,2,10,16,14,3,0,0,3,13,16,11,8,2,0,0,6,16,6,0,0,0,0,0,2,14,16,15,7,0,0,0,0,0,4,10,16,5,0,0,0,0,0,2,15,4,0,0,0,5,7,15,12,0,0,0,0,3,15,10,1,0,0,5 +0,1,13,11,0,0,0,0,0,5,14,16,6,0,0,0,0,7,3,12,6,0,0,0,0,0,0,14,0,0,0,0,0,0,6,12,0,0,0,0,0,2,15,4,0,0,0,0,0,4,16,9,8,10,5,0,0,1,13,16,16,15,7,0,2 +0,0,0,0,6,14,2,0,0,0,0,0,10,16,4,0,0,0,0,6,16,16,2,0,0,0,8,15,16,15,0,0,0,4,16,11,12,16,0,0,0,1,4,1,8,15,0,0,0,0,0,0,8,12,0,0,0,0,0,0,6,16,2,0,1 +0,0,0,13,3,0,0,0,0,0,7,15,3,0,0,0,0,0,15,8,0,0,0,0,0,2,16,1,0,0,0,0,0,3,14,1,4,4,0,0,0,2,16,15,16,16,9,0,0,0,11,16,12,16,15,2,0,0,1,10,15,13,10,0,6 +0,0,3,11,14,16,4,0,0,9,16,16,16,9,3,0,0,15,14,6,2,0,0,0,0,10,16,5,0,0,0,0,0,3,13,15,5,0,0,0,0,0,1,10,15,6,0,0,0,0,0,7,15,15,0,0,0,0,2,15,16,15,0,0,5 +0,0,6,12,16,14,3,0,0,6,14,6,3,15,8,0,0,7,14,9,16,16,0,0,0,0,5,11,10,14,0,0,0,0,0,0,10,8,0,0,0,0,0,3,16,3,0,0,0,0,1,15,9,0,0,0,0,0,10,11,0,0,0,0,9 +0,0,1,15,11,1,0,0,0,0,5,16,16,7,0,0,0,0,0,14,16,14,0,0,0,0,0,14,16,16,2,0,0,0,2,16,16,16,2,0,0,0,0,14,16,15,1,0,0,0,1,16,16,16,3,0,0,0,2,12,16,15,5,0,1 +0,0,10,7,12,3,0,0,0,0,16,16,16,11,0,0,0,0,15,5,0,12,7,0,0,8,8,0,0,11,8,0,0,8,8,0,0,8,8,0,0,9,9,0,2,15,5,0,0,5,16,12,15,11,0,0,0,0,13,16,10,1,0,0,0 +0,0,0,2,11,13,2,0,0,0,2,13,16,16,2,0,0,3,15,14,13,16,0,0,0,0,6,0,15,9,0,0,0,0,0,0,16,6,0,0,0,0,0,4,16,0,0,0,0,0,0,5,16,3,0,0,0,0,0,3,16,6,0,0,1 +0,0,4,11,14,7,0,0,0,0,15,12,16,3,0,0,0,0,8,16,10,0,0,0,0,0,9,16,7,0,0,0,0,3,15,4,14,4,0,0,0,2,13,0,3,15,6,0,0,0,8,9,4,8,14,0,0,0,0,7,12,12,8,0,8 +0,0,3,12,16,14,3,0,0,0,7,10,11,16,10,0,0,0,0,0,0,16,12,0,0,0,0,2,7,16,11,0,0,3,10,16,16,16,6,0,0,10,12,10,16,10,0,0,0,0,1,15,15,0,0,0,0,0,5,15,5,0,0,0,7 +0,0,7,16,15,6,0,0,0,0,2,9,15,14,0,0,0,0,0,0,13,12,0,0,0,0,0,3,16,14,8,0,0,0,9,16,16,16,9,0,0,0,5,14,14,2,0,0,0,0,2,15,6,0,0,0,0,0,9,15,0,0,0,0,7 +0,0,6,13,16,16,7,0,0,9,16,15,10,8,1,0,0,13,15,3,0,0,0,0,0,4,16,13,1,0,0,0,0,0,7,16,12,0,0,0,0,0,0,9,16,7,0,0,0,0,4,16,16,12,0,0,0,0,5,16,15,5,0,0,5 +0,4,15,15,8,1,0,0,0,4,16,11,16,15,0,0,0,1,5,0,11,16,4,0,0,0,0,6,16,13,1,0,0,0,0,12,16,1,0,0,0,0,0,6,16,5,0,0,0,0,0,4,16,8,0,0,0,4,16,16,16,6,0,0,3 +0,0,7,12,12,0,0,0,0,9,14,10,14,7,0,0,0,1,0,0,12,12,0,0,0,0,0,5,16,5,0,0,0,0,2,15,10,0,0,0,0,0,10,14,1,0,0,0,0,0,12,16,8,11,10,0,0,0,8,14,13,7,0,0,2 +0,0,1,12,11,0,0,0,0,0,9,15,3,0,0,0,0,2,16,9,0,0,4,0,0,7,16,10,4,11,15,0,0,4,16,16,16,16,9,0,0,0,5,9,15,13,0,0,0,0,0,8,16,3,0,0,0,0,0,11,13,0,0,0,4 +0,0,7,12,15,5,0,0,0,0,10,8,13,12,0,0,0,0,0,0,12,10,0,0,0,0,0,5,16,14,7,0,0,3,16,16,15,9,2,0,0,0,4,13,10,0,0,0,0,0,5,14,1,0,0,0,0,0,13,6,0,0,0,0,7 +0,0,8,13,12,2,0,0,0,0,10,12,13,10,0,0,0,0,1,0,8,12,0,0,0,0,0,0,9,12,0,0,0,0,0,3,16,5,0,0,0,0,1,13,12,2,3,0,0,0,12,16,16,16,9,0,0,0,10,12,9,2,0,0,2 +0,0,6,13,15,5,0,0,0,7,16,11,11,16,0,0,0,10,12,0,3,16,2,0,0,4,3,0,5,13,0,0,0,0,0,0,14,6,0,0,0,0,0,6,14,1,0,0,0,0,2,16,14,16,13,0,0,0,8,16,12,8,0,0,2 +0,0,0,5,14,16,16,5,0,0,10,16,15,16,16,7,0,4,15,6,1,16,12,0,0,0,2,0,5,16,7,0,0,0,0,0,4,16,2,0,0,0,0,0,6,16,1,0,0,0,0,2,15,14,0,0,0,0,0,3,15,7,0,0,3 +0,0,6,14,16,13,0,0,0,6,14,3,0,13,4,0,0,5,12,0,4,16,3,0,0,0,14,14,15,5,0,0,0,1,14,12,1,0,0,0,0,5,16,13,2,0,0,0,0,2,16,16,13,0,0,0,0,0,2,12,12,0,0,0,8 +0,0,0,7,16,5,0,0,0,0,3,15,16,11,0,0,0,2,15,16,16,1,0,0,0,13,16,16,12,0,0,0,0,1,11,16,11,0,0,0,0,0,6,16,11,0,0,0,0,0,0,14,15,3,0,0,0,0,0,4,15,12,0,0,1 +0,0,0,6,6,0,0,0,0,0,0,13,9,0,0,0,0,0,5,16,4,0,0,0,0,0,7,15,0,0,0,0,0,0,8,12,12,8,2,0,0,0,12,16,12,12,14,1,0,0,6,16,6,8,16,6,0,0,0,4,14,16,15,1,6 +0,2,8,9,6,0,0,0,0,0,6,16,16,14,1,0,0,0,4,16,16,16,4,0,0,0,4,16,16,16,4,0,0,0,4,16,16,16,3,0,0,0,7,16,16,12,0,0,0,1,14,16,16,11,0,0,0,2,10,9,5,1,0,0,1 +0,0,2,13,13,0,0,0,0,1,14,13,2,0,0,0,0,6,16,3,0,0,0,0,0,7,16,6,0,10,7,0,0,1,14,16,14,16,10,0,0,0,5,15,16,11,0,0,0,0,0,11,16,0,0,0,0,0,3,16,8,0,0,0,4 +0,0,9,15,16,12,1,0,0,0,9,10,14,16,9,0,0,0,0,0,2,16,10,0,0,0,0,0,8,15,2,0,0,0,0,10,15,4,0,0,0,0,6,16,6,0,0,0,0,0,15,16,12,12,0,0,0,0,14,16,13,5,0,0,2 +0,0,3,10,13,16,12,0,0,3,15,13,8,10,12,0,0,1,12,13,16,16,10,0,0,0,0,0,4,16,5,0,0,0,0,0,10,16,0,0,0,0,0,1,15,12,0,0,0,0,0,9,15,0,0,0,0,0,0,14,9,0,0,0,9 +0,0,1,11,12,0,0,0,0,0,8,16,3,3,1,0,0,1,16,11,0,15,8,0,0,11,15,5,10,16,1,0,0,6,16,16,16,16,8,0,0,0,3,12,15,12,4,0,0,0,0,13,11,0,0,0,0,0,0,14,5,0,0,0,4 +0,1,10,16,16,12,2,0,0,8,11,8,5,12,8,0,0,1,0,0,5,15,7,0,0,0,10,15,16,9,0,0,0,0,5,9,16,9,0,0,0,0,0,0,3,16,0,0,0,1,8,2,5,16,0,0,0,2,14,16,12,5,0,0,3 +0,0,0,0,3,16,6,0,0,0,0,5,15,16,9,0,0,1,10,16,15,16,8,0,0,10,16,12,2,16,5,0,0,1,6,0,4,16,4,0,0,0,0,0,5,16,1,0,0,0,0,0,6,16,3,0,0,0,0,0,3,16,9,0,1 +0,0,1,15,8,1,0,0,0,0,5,16,16,1,0,0,0,0,2,16,16,3,0,0,0,0,5,16,16,3,0,0,0,0,4,16,16,4,0,0,0,0,3,16,16,5,0,0,0,0,2,15,16,9,0,0,0,0,2,11,16,13,0,0,1 +0,1,8,16,16,6,0,0,0,12,16,15,12,6,0,0,1,16,13,2,0,0,0,0,0,7,16,2,0,0,0,0,0,0,7,15,7,0,0,0,0,0,0,4,14,11,0,0,0,0,1,8,14,16,2,0,0,0,5,16,16,9,0,0,5 +0,0,6,12,13,16,14,1,0,0,6,8,6,10,15,1,0,0,0,0,0,10,12,0,0,0,0,0,5,16,2,0,0,0,1,14,16,14,3,0,0,0,2,13,13,1,0,0,0,0,2,16,6,0,0,0,0,0,9,12,0,0,0,0,7 +0,0,5,15,16,16,9,0,0,0,4,6,5,8,16,2,0,0,0,0,0,9,12,0,0,0,0,6,12,16,11,0,0,0,0,12,16,9,0,0,0,0,0,5,13,1,0,0,0,0,3,14,5,0,0,0,0,0,6,9,0,0,0,0,7 +0,0,0,11,10,1,0,0,0,0,10,16,9,3,0,0,0,0,15,14,0,0,0,0,0,2,16,7,0,0,0,0,0,0,16,5,4,0,0,0,0,0,15,16,16,13,1,0,0,0,6,16,16,16,11,0,0,0,0,6,16,16,9,0,6 +0,1,11,16,11,11,0,0,0,10,14,4,8,14,0,0,0,5,16,1,13,11,0,0,0,0,8,16,15,1,0,0,0,0,7,16,10,0,0,0,0,0,14,11,16,0,0,0,0,2,16,11,12,0,0,0,0,0,12,15,4,0,0,0,8 +0,1,10,15,15,6,0,0,0,8,16,15,14,15,3,0,0,3,6,3,15,15,0,0,0,0,0,12,16,5,0,0,0,0,0,8,16,2,0,0,0,0,0,4,16,8,0,0,0,0,4,15,16,4,0,0,0,0,14,16,7,0,0,0,3 +0,0,7,15,15,4,0,0,0,9,16,10,16,10,0,0,0,0,3,0,14,12,0,0,0,0,0,2,16,5,0,0,0,0,1,14,11,0,0,0,0,0,12,16,0,0,0,0,0,0,12,16,5,4,3,0,0,0,5,14,16,15,8,0,2 +0,0,5,11,16,13,6,0,0,0,5,8,8,14,15,0,0,0,0,0,0,12,15,0,0,0,2,8,12,16,11,0,0,0,8,16,16,16,4,0,0,0,0,8,16,6,0,0,0,0,6,13,15,0,0,0,0,0,6,14,5,0,0,0,7 +0,0,6,14,11,3,0,0,0,1,15,10,16,14,0,0,0,5,14,0,0,11,6,0,0,7,11,0,0,8,8,0,0,8,8,0,0,12,8,0,0,7,9,0,4,16,2,0,0,0,16,12,15,12,0,0,0,0,6,16,11,1,0,0,0 +0,0,0,4,15,0,0,0,0,0,1,14,8,0,0,0,0,0,7,14,0,0,0,0,0,4,16,6,0,11,4,0,0,9,16,0,8,15,2,0,0,13,16,16,16,11,0,0,0,4,8,6,16,8,0,0,0,0,0,2,14,0,0,0,4 +0,0,5,12,13,1,0,0,0,0,7,16,16,10,0,0,0,0,1,13,16,13,0,0,0,0,0,0,10,15,0,0,0,0,0,0,9,15,0,0,0,0,2,5,15,12,0,0,0,0,11,16,16,7,0,0,0,0,4,13,9,0,0,0,9 +0,0,0,6,11,14,3,0,0,0,9,13,8,15,12,0,0,0,0,0,0,10,11,0,0,0,0,0,0,13,9,0,0,0,4,11,13,16,4,0,0,0,3,8,14,12,0,0,0,0,0,3,16,3,0,0,0,0,0,9,14,0,0,0,7 +0,0,6,15,15,4,0,0,0,4,15,16,16,14,0,0,0,12,16,13,16,16,1,0,0,9,16,15,14,16,2,0,0,0,4,2,7,16,1,0,0,0,0,2,15,14,0,0,0,0,1,14,16,6,0,0,0,0,5,15,6,0,0,0,9 +0,0,7,16,10,0,0,0,0,1,16,14,16,9,0,0,0,0,11,16,16,15,0,0,0,0,0,7,13,14,0,0,0,0,0,0,11,14,0,0,0,0,0,0,13,12,0,0,0,0,1,11,15,3,0,0,0,0,6,16,6,0,0,0,9 +0,0,0,10,10,0,0,0,0,1,8,2,12,4,0,0,0,7,14,1,2,12,0,0,0,7,10,0,0,12,3,0,0,3,12,0,0,11,8,0,0,1,15,1,1,15,8,0,0,0,6,14,13,16,4,0,0,0,0,9,14,7,0,0,0 +0,0,3,15,9,1,0,0,0,0,11,14,16,14,1,0,0,4,14,1,3,11,8,0,0,8,9,0,0,8,8,0,0,8,8,0,1,15,4,0,0,7,9,0,7,13,0,0,0,3,14,10,15,3,0,0,0,0,5,13,4,0,0,0,0 +0,2,7,14,16,12,0,0,0,9,16,16,15,13,2,0,0,3,16,7,2,0,0,0,0,0,13,9,0,0,0,0,0,0,7,15,1,0,0,0,0,0,2,15,5,0,0,0,0,1,16,16,7,0,0,0,0,0,9,16,4,0,0,0,5 +0,0,0,11,8,0,0,0,0,0,4,16,15,5,0,0,0,0,6,11,4,15,0,0,0,0,6,4,0,14,4,0,0,0,16,2,0,14,6,0,0,0,14,3,4,16,5,0,0,0,9,14,15,16,1,0,0,0,1,11,14,4,0,0,0 +0,0,9,15,16,9,0,0,0,11,16,9,11,16,2,0,0,5,3,1,14,14,1,0,0,0,0,7,16,5,0,0,0,0,0,4,16,5,0,0,0,0,0,0,10,16,0,0,0,0,1,6,15,13,0,0,0,0,14,15,9,1,0,0,3 +0,0,2,11,11,2,0,0,0,0,11,16,16,5,0,0,0,0,10,16,16,5,0,0,0,0,2,11,14,9,0,0,0,0,0,0,8,12,0,0,0,0,0,0,8,16,0,0,0,0,0,8,15,10,0,0,0,0,1,14,14,3,0,0,9 +0,0,3,12,16,16,16,12,0,0,9,12,8,8,16,11,0,0,1,0,1,8,16,6,0,0,1,12,16,16,14,1,0,0,0,12,15,14,3,0,0,0,0,4,16,6,0,0,0,0,0,15,13,0,0,0,0,0,5,16,3,0,0,0,7 +0,0,8,16,2,3,0,0,0,0,15,11,5,16,1,0,0,0,16,4,9,16,1,0,0,0,8,13,15,12,0,0,0,0,8,16,13,0,0,0,0,0,13,16,7,0,0,0,0,1,16,16,7,0,0,0,0,0,8,13,1,0,0,0,8 +0,0,1,13,15,5,0,0,0,0,10,15,8,6,0,0,0,2,16,6,0,0,0,0,0,4,15,1,4,0,0,0,0,5,13,3,16,11,1,0,0,0,15,1,5,15,5,0,0,0,11,12,8,16,6,0,0,0,0,10,16,10,0,0,6 +0,0,7,9,15,12,1,0,0,8,15,10,9,16,4,0,0,6,3,1,12,14,0,0,0,0,0,6,16,5,0,0,0,0,0,5,15,8,0,0,0,0,0,0,8,16,3,0,0,0,1,7,15,9,0,0,0,0,7,16,8,0,0,0,3 +0,0,3,9,16,13,0,0,0,4,15,12,12,16,0,0,0,8,4,4,14,9,0,0,0,0,0,8,16,7,0,0,0,0,0,1,16,15,0,0,0,0,0,0,0,16,4,0,0,0,0,7,14,15,2,0,0,0,1,15,12,2,0,0,3 +0,0,0,9,15,2,0,0,0,0,8,16,9,1,0,0,0,1,15,12,0,0,0,0,0,3,16,4,0,0,0,0,0,1,16,2,5,0,0,0,0,0,14,16,16,11,1,0,0,0,5,16,16,16,7,0,0,0,0,8,12,13,1,0,6 +0,0,1,8,12,1,0,0,0,0,7,16,11,0,0,0,0,0,13,14,0,0,0,0,0,2,16,10,0,0,0,0,0,1,16,8,5,3,0,0,0,0,14,16,16,16,10,0,0,0,7,16,9,15,16,2,0,0,0,6,12,14,7,0,6 +0,0,10,16,16,16,3,0,0,0,14,16,16,13,5,0,0,0,8,16,4,0,0,0,0,0,1,15,9,0,0,0,0,0,0,11,12,0,0,0,0,0,3,9,16,0,0,0,0,4,16,15,16,0,0,0,0,0,10,16,10,0,0,0,5 +0,0,5,14,16,8,0,0,0,3,16,14,15,15,0,0,0,0,8,3,14,11,0,0,0,0,0,9,15,3,0,0,0,0,3,15,8,0,0,0,0,0,10,14,1,0,0,0,0,0,16,13,4,9,8,0,0,0,5,14,15,12,3,0,2 +0,0,0,12,9,1,0,0,0,0,7,14,12,10,0,0,0,0,11,3,3,16,1,0,0,6,10,0,1,16,2,0,0,5,13,0,2,16,3,0,0,2,14,2,7,16,1,0,0,0,10,14,16,11,0,0,0,0,1,12,12,1,0,0,0 +0,1,8,15,16,8,0,0,0,14,16,16,11,4,0,0,0,4,15,12,0,0,0,0,0,0,6,16,3,0,0,0,0,0,0,10,12,0,0,0,0,0,0,6,16,1,0,0,0,0,12,15,16,1,0,0,0,0,9,16,13,1,0,0,5 +0,0,3,15,13,2,0,0,0,1,13,16,16,0,0,0,0,9,16,16,16,1,0,0,0,0,3,15,16,1,0,0,0,0,0,14,16,6,0,0,0,0,0,9,16,11,0,0,0,0,5,12,16,9,0,0,0,0,4,15,10,1,0,0,1 +0,0,0,1,15,3,0,0,0,0,0,14,15,0,0,0,0,0,10,13,1,3,4,0,0,3,13,3,0,11,9,0,0,9,12,0,7,15,3,0,0,8,16,16,16,10,0,0,0,1,7,6,16,5,0,0,0,0,0,1,12,2,0,0,4 +0,0,2,13,13,3,0,0,0,0,11,12,12,12,0,0,0,2,15,0,1,15,4,0,0,7,9,0,0,13,5,0,0,6,11,0,0,11,8,0,0,5,12,0,0,16,4,0,0,0,15,9,8,15,1,0,0,0,5,15,13,2,0,0,0 +0,0,4,13,16,7,0,0,0,0,11,9,15,11,0,0,0,0,0,0,12,9,0,0,0,0,0,4,16,3,0,0,0,0,12,16,16,16,5,0,0,0,6,14,16,15,2,0,0,0,1,16,3,0,0,0,0,0,6,14,2,0,0,0,7 +0,0,0,4,13,7,0,0,0,0,0,5,15,16,2,0,0,0,1,14,16,16,0,0,0,2,14,16,16,16,0,0,0,5,12,10,16,16,0,0,0,0,0,4,16,16,0,0,0,0,0,5,16,16,4,0,0,0,0,4,14,15,4,0,1 +0,0,5,13,16,6,0,0,0,0,12,11,5,16,1,0,0,0,10,8,5,16,6,0,0,0,2,16,16,14,0,0,0,0,7,16,16,5,0,0,0,4,16,7,8,13,0,0,0,2,16,3,2,16,6,0,0,0,6,15,16,15,4,0,8 +0,0,0,6,16,4,0,0,0,0,0,16,16,5,0,0,0,0,5,16,16,4,0,0,0,5,15,16,16,4,0,0,0,8,16,16,16,7,0,0,0,0,0,7,16,16,0,0,0,0,0,6,16,16,5,0,0,0,0,3,16,16,3,0,1 +0,0,6,13,14,3,0,0,0,7,15,8,9,12,0,0,0,6,15,5,4,16,1,0,0,0,8,16,16,16,6,0,0,0,0,11,16,12,0,0,0,0,5,15,9,15,0,0,0,0,10,13,1,16,4,0,0,0,5,12,16,15,2,0,8 +0,3,10,16,14,14,3,0,0,8,16,9,12,12,3,0,0,7,15,8,7,1,0,0,0,5,16,16,16,11,0,0,0,0,1,0,4,15,3,0,0,0,2,0,4,16,4,0,0,2,15,10,14,16,2,0,0,1,11,12,12,4,0,0,5 +0,0,2,14,16,7,0,0,0,0,8,16,8,16,2,0,0,0,4,16,13,16,6,0,0,0,0,15,16,9,0,0,0,0,5,16,16,6,0,0,0,1,15,8,9,12,0,0,0,1,16,11,9,16,2,0,0,0,3,12,13,11,0,0,8 +0,0,5,13,0,0,0,0,0,0,12,11,0,0,0,0,0,3,16,2,0,0,0,0,0,5,15,0,0,0,0,0,0,7,14,0,4,1,0,0,0,5,16,16,16,13,0,0,0,3,16,16,10,16,5,0,0,0,6,13,16,12,0,0,6 +0,0,15,8,0,0,0,0,0,6,15,16,3,0,0,0,0,6,9,13,5,0,0,0,0,0,1,8,8,0,0,0,0,0,0,13,7,0,0,0,0,0,5,15,4,0,2,0,0,4,16,16,16,16,10,0,0,1,8,8,11,12,13,0,2 +0,0,2,14,16,14,2,0,0,0,0,11,13,16,3,0,0,0,0,0,10,14,0,0,0,0,2,8,14,12,2,0,0,0,10,16,16,16,9,0,0,0,6,16,14,12,2,0,0,0,3,15,11,0,0,0,0,0,6,15,3,0,0,0,7 +0,0,2,12,13,3,0,0,0,0,14,12,12,14,2,0,0,4,16,2,8,16,7,0,0,3,15,14,16,7,0,0,0,0,3,15,16,9,0,0,0,0,10,13,9,16,0,0,0,0,11,11,7,16,2,0,0,0,2,13,12,8,1,0,8 +0,0,0,9,7,0,0,0,0,0,2,16,2,5,1,0,0,0,9,8,3,15,1,0,0,3,14,1,8,12,0,0,0,6,16,12,16,16,5,0,0,8,15,13,16,9,0,0,0,0,0,5,14,0,0,0,0,0,0,10,7,0,0,0,4 +0,0,0,11,4,0,0,0,0,0,0,15,1,0,0,0,0,0,5,9,3,7,0,0,0,0,13,5,7,10,0,0,0,7,15,2,11,15,1,0,0,9,16,16,16,14,2,0,0,0,4,8,16,1,0,0,0,0,0,8,12,0,0,0,4 +0,0,1,13,13,0,0,0,0,0,11,14,15,11,0,0,0,4,16,4,2,15,5,0,0,8,9,0,0,12,7,0,0,5,10,0,0,12,7,0,0,5,14,1,2,15,1,0,0,1,14,15,12,12,0,0,0,0,2,13,12,3,0,0,0 +0,0,0,10,14,6,0,0,0,0,0,8,16,10,1,0,0,0,0,11,16,6,0,0,0,0,6,16,16,4,0,0,0,4,12,14,16,4,0,0,0,0,0,8,16,4,0,0,0,0,0,11,16,5,0,0,0,0,0,6,16,16,7,0,1 +0,0,0,1,13,8,0,0,0,0,0,6,16,9,0,0,0,0,2,15,16,8,0,0,0,3,16,16,16,10,0,0,0,0,3,3,16,12,0,0,0,0,0,0,16,12,0,0,0,0,0,2,16,16,4,0,0,0,0,1,16,14,3,0,1 +0,0,4,11,14,12,2,0,0,0,11,12,4,8,0,0,0,1,16,6,0,0,0,0,0,3,16,8,7,2,0,0,0,5,16,16,16,15,2,0,0,0,3,0,1,15,6,0,0,0,3,8,9,15,9,0,0,0,7,16,12,10,2,0,5 +0,0,4,12,2,0,0,0,0,0,14,8,0,0,0,0,0,5,16,2,0,0,0,0,0,8,12,0,0,0,0,0,0,8,13,9,14,10,0,0,0,7,16,13,9,16,5,0,0,0,14,11,5,16,7,0,0,0,4,14,14,8,0,0,6 +0,2,14,14,3,0,0,0,0,8,15,15,11,0,0,0,0,9,14,10,14,2,0,0,0,1,7,8,16,0,0,0,0,0,0,11,14,0,0,0,0,0,0,16,10,0,0,0,0,2,13,16,13,10,7,0,0,4,16,16,16,16,15,0,2 +0,0,7,15,12,4,0,0,0,5,15,4,9,14,0,0,0,10,8,0,4,16,2,0,0,6,11,2,12,11,0,0,0,0,13,16,16,6,0,0,0,0,5,16,16,8,0,0,0,0,10,14,5,16,7,0,0,0,9,15,16,16,8,0,8 +0,1,12,15,3,0,0,0,0,8,15,14,13,0,0,0,0,3,5,9,14,0,0,0,0,0,2,16,14,2,0,0,0,0,3,16,16,15,2,0,0,0,1,0,2,14,5,0,0,0,14,11,9,16,6,0,0,3,16,16,14,7,1,0,3 +0,0,10,15,6,0,0,0,0,7,15,11,14,0,0,0,0,3,9,2,16,0,0,0,0,0,0,6,16,0,0,0,0,0,0,14,16,14,1,0,0,0,0,1,5,16,5,0,0,3,11,2,7,16,5,0,0,1,13,16,13,7,0,0,3 +0,0,2,13,15,5,0,0,0,0,8,13,12,14,1,0,0,1,14,5,0,11,6,0,0,6,16,1,0,9,7,0,0,6,16,2,0,13,4,0,0,3,16,8,4,14,0,0,0,0,8,16,16,8,0,0,0,0,0,11,10,0,0,0,0 +0,0,10,15,9,0,0,0,0,6,15,11,16,0,0,0,0,2,4,4,16,0,0,0,0,0,0,11,16,7,0,0,0,0,0,7,12,16,4,0,0,0,0,0,0,16,7,0,0,3,16,12,10,16,3,0,0,1,10,14,12,6,0,0,3 +0,0,5,14,4,0,0,0,0,0,12,12,0,0,0,0,0,0,16,9,0,0,0,0,0,8,16,0,0,0,0,0,0,8,16,13,16,9,1,0,0,11,16,14,10,16,2,0,0,7,16,14,12,15,1,0,0,1,9,13,10,3,0,0,6 +0,0,0,14,6,0,0,0,0,0,6,16,5,5,0,0,0,0,14,10,10,10,0,0,0,4,16,3,13,10,0,0,0,11,16,9,16,15,7,0,0,16,16,16,16,15,4,0,0,4,4,13,16,2,0,0,0,0,1,15,11,0,0,0,4 +0,0,0,3,10,0,0,0,0,0,0,10,8,0,0,0,0,0,5,14,0,7,0,0,0,1,14,5,1,15,0,0,0,8,10,0,6,10,0,0,0,14,14,12,14,15,1,0,0,6,12,9,16,6,0,0,0,0,0,4,14,0,0,0,4 +0,0,12,15,9,0,0,0,0,8,13,10,16,2,0,0,0,4,4,0,15,6,0,0,0,0,0,7,16,9,0,0,0,0,0,5,10,16,4,0,0,0,0,0,0,8,9,0,0,0,8,5,4,9,12,0,0,1,14,14,16,14,3,0,3 +0,0,9,6,0,0,0,0,0,0,16,6,0,0,0,0,0,1,16,0,0,0,0,0,0,4,16,0,0,0,0,0,0,4,15,12,13,9,1,0,0,6,16,11,8,14,5,0,0,3,16,12,10,15,6,0,0,0,8,15,14,7,0,0,6 +0,0,0,9,8,0,0,0,0,0,1,15,5,0,0,0,0,0,9,11,1,2,0,0,0,1,16,3,9,11,0,0,0,10,14,4,14,13,3,0,0,15,16,16,16,16,5,0,0,2,4,10,13,2,0,0,0,0,0,11,12,0,0,0,4 +0,0,4,14,16,15,1,0,0,0,6,9,14,16,0,0,0,0,0,0,14,12,2,0,0,0,6,13,16,16,12,0,0,0,6,15,15,11,5,0,0,0,1,15,9,0,0,0,0,0,2,16,7,0,0,0,0,0,7,14,2,0,0,0,7 +0,0,13,15,0,0,0,0,0,4,16,4,0,0,0,0,0,7,16,0,0,0,0,0,0,10,14,0,3,1,0,0,0,9,12,10,16,14,3,0,0,8,16,13,5,7,15,0,0,2,16,9,0,7,16,0,0,0,13,16,16,16,7,0,6 +0,0,9,15,5,0,0,0,0,7,16,13,14,0,0,0,0,14,10,4,16,0,0,0,0,7,6,2,16,1,0,0,0,0,0,6,13,0,0,0,0,0,0,9,11,0,0,0,0,0,5,16,15,9,4,0,0,0,10,14,11,15,16,2,2 +0,0,11,16,8,0,0,0,0,5,16,12,15,0,0,0,0,10,12,9,11,0,0,0,0,7,15,15,6,0,0,0,0,0,14,16,11,1,0,0,0,1,14,11,12,15,3,0,0,7,16,2,0,11,12,0,0,1,10,15,16,16,6,0,8 +0,3,12,12,3,0,0,0,0,8,10,11,9,0,0,0,0,0,2,14,4,0,0,0,0,0,12,13,1,0,0,0,0,0,3,13,15,3,0,0,0,0,0,0,7,14,1,0,0,2,4,4,3,11,7,0,0,5,12,12,14,16,8,0,3 +0,0,5,16,10,0,0,0,0,0,8,16,12,0,0,0,0,0,8,16,9,0,0,0,0,5,15,16,8,0,0,0,0,3,12,16,9,0,0,0,0,0,0,13,16,2,0,0,0,0,0,9,16,11,2,0,0,0,4,16,16,16,11,0,1 +0,0,2,15,11,0,0,0,0,0,8,16,11,0,0,0,0,1,13,16,7,0,0,0,0,8,16,16,7,0,0,0,0,11,9,16,12,0,0,0,0,0,0,7,16,6,0,0,0,0,0,5,16,15,3,0,0,0,3,16,16,16,15,1,1 +0,0,0,5,13,0,0,0,0,0,1,14,5,0,10,3,0,0,8,15,0,8,16,1,0,2,16,6,0,13,11,0,1,13,16,10,12,16,4,0,6,15,14,12,15,12,0,0,0,0,0,1,15,5,0,0,0,0,0,8,12,0,0,0,4 +0,2,15,15,2,0,0,0,0,10,11,16,6,0,0,0,0,9,5,12,9,0,0,0,0,0,0,13,8,0,0,0,0,0,1,16,3,0,0,0,0,0,9,13,0,0,0,0,0,1,14,16,15,10,5,0,0,5,15,8,12,15,12,0,2 +0,0,0,2,15,15,2,0,0,0,1,13,16,14,0,0,0,0,8,16,16,10,0,0,0,7,16,16,16,6,0,0,0,1,0,14,15,1,0,0,0,0,0,14,15,0,0,0,0,0,0,10,16,4,0,0,0,0,0,6,16,9,0,0,1 +0,0,0,8,15,16,7,0,0,1,14,9,1,0,8,0,0,6,12,0,0,10,8,0,0,4,14,12,13,14,8,0,0,0,0,0,1,4,8,0,0,0,0,0,0,4,8,0,0,0,0,0,0,7,8,0,0,0,0,8,12,15,6,0,9 +0,0,2,13,16,14,2,0,0,0,12,13,8,15,8,0,0,8,14,0,0,11,12,0,0,7,16,13,11,16,13,0,0,0,6,8,8,14,10,0,0,0,0,0,0,15,8,0,0,0,3,12,5,15,5,0,0,0,1,12,16,14,2,0,9 +0,0,7,10,13,10,0,0,0,0,14,11,4,11,3,0,0,2,15,1,0,5,8,0,0,7,9,0,0,3,8,0,0,5,12,0,0,4,8,0,0,5,11,0,0,8,4,0,0,0,12,2,2,12,0,0,0,0,6,15,14,2,0,0,0 +0,7,16,15,2,0,0,0,0,13,10,12,11,0,0,0,0,13,4,8,12,0,0,0,0,0,1,7,13,0,0,0,0,0,1,14,8,0,0,0,0,0,5,16,1,0,0,0,0,1,13,15,12,11,7,0,0,5,16,16,15,14,16,1,2 +0,0,0,5,14,16,16,1,0,0,8,14,8,13,16,1,0,0,1,0,0,11,12,0,0,0,0,2,4,14,8,0,0,2,11,16,16,16,8,0,0,7,10,5,11,10,0,0,0,0,0,1,15,3,0,0,0,0,0,8,12,0,0,0,7 +0,0,13,16,16,16,6,0,0,6,16,8,5,5,5,0,0,11,16,0,0,0,0,0,0,5,16,14,9,1,0,0,0,0,1,6,14,10,0,0,0,0,0,0,5,16,0,0,0,0,4,2,13,12,0,0,0,1,13,16,14,1,0,0,5 +0,0,0,2,16,1,0,0,0,0,1,13,7,0,4,5,0,0,8,12,0,1,15,5,0,4,16,2,0,11,10,0,0,12,15,12,12,16,1,0,0,9,12,12,14,13,0,0,0,0,0,1,14,5,0,0,0,0,0,4,12,0,0,0,4 +0,0,9,15,16,3,0,0,0,5,15,10,16,4,0,0,0,0,2,7,13,0,0,0,0,0,4,16,5,0,0,0,0,0,2,15,16,11,1,0,0,0,0,0,6,15,6,0,0,0,5,5,4,13,8,0,0,0,14,16,16,15,3,0,3 +0,0,4,16,16,6,0,0,0,3,11,11,5,15,2,0,0,8,16,8,0,13,7,0,0,11,16,3,0,10,11,0,0,9,16,0,0,4,12,0,0,6,16,2,0,7,11,0,0,1,16,10,5,15,5,0,0,0,4,15,16,10,0,0,0 +0,0,2,11,15,5,0,0,0,0,12,8,9,9,0,0,0,0,12,5,9,7,0,0,0,0,9,15,10,0,0,0,0,0,6,15,11,1,0,0,0,8,13,2,11,11,0,0,0,7,13,7,0,13,3,0,0,0,2,11,15,13,2,0,8 +0,0,6,9,12,16,10,0,0,0,11,13,5,4,4,0,0,0,12,4,0,0,0,0,0,3,15,3,4,2,0,0,0,2,16,14,12,14,7,0,0,0,1,0,0,10,4,0,0,12,13,0,1,13,2,0,0,0,6,16,16,7,0,0,5 +0,0,10,16,15,3,0,0,0,6,16,6,9,14,1,0,0,8,16,3,0,13,6,0,0,11,14,0,0,7,10,0,0,8,12,0,0,4,12,0,0,8,13,0,0,9,9,0,0,4,16,8,9,16,3,0,0,0,10,16,15,6,0,0,0 +0,0,1,12,15,6,0,0,0,0,5,13,1,12,0,0,0,0,2,11,1,15,0,0,0,0,0,10,11,9,0,0,0,0,0,6,16,6,0,0,0,0,10,15,4,13,3,0,0,0,12,2,0,1,14,0,0,0,1,10,10,8,15,1,8 +0,0,0,5,16,1,0,0,0,0,2,15,10,1,9,2,0,0,12,14,1,8,15,1,0,4,16,6,1,15,8,0,0,13,16,16,16,16,9,0,0,8,10,8,16,13,0,0,0,0,0,1,16,9,0,0,0,0,0,5,16,2,0,0,4 +0,3,15,16,12,1,0,0,0,12,14,12,16,7,0,0,0,12,6,0,14,10,0,0,0,1,1,1,16,8,0,0,0,0,0,10,15,1,0,0,0,0,5,16,7,0,0,0,0,3,14,16,9,6,4,0,0,3,15,16,16,16,12,0,2 +0,0,12,14,12,12,3,0,0,0,15,12,10,9,1,0,0,2,16,6,7,2,0,0,0,4,16,16,16,15,1,0,0,0,4,4,4,16,8,0,0,0,1,0,0,10,7,0,0,4,12,0,3,14,4,0,0,1,13,16,16,6,0,0,5 +0,3,16,9,0,0,0,0,0,7,15,16,2,0,0,0,0,8,9,16,4,0,0,0,0,2,4,13,7,0,0,0,0,0,0,14,7,0,0,0,0,0,1,15,7,0,0,0,0,1,14,16,16,13,9,1,0,4,16,11,8,8,11,2,2 +0,2,12,16,16,8,0,0,0,8,10,9,16,6,0,0,0,0,1,13,13,0,0,0,0,0,8,16,5,0,0,0,0,0,2,14,15,3,0,0,0,0,0,0,9,15,3,0,0,0,0,3,4,14,11,0,0,3,15,16,16,16,9,0,3 +0,0,0,0,14,9,0,0,0,0,0,8,15,3,7,1,0,0,6,15,4,5,16,6,0,5,16,5,0,13,14,0,0,11,16,12,12,16,8,0,0,6,9,8,12,15,2,0,0,0,0,0,10,14,0,0,0,0,0,0,15,7,0,0,4 +0,0,0,10,16,15,3,0,0,0,6,13,10,16,2,0,0,0,3,1,2,14,0,0,0,0,0,0,8,8,0,0,0,0,1,8,14,15,8,0,0,1,15,16,13,3,0,0,0,0,2,9,7,0,0,0,0,0,0,14,1,0,0,0,7 +0,2,15,9,10,8,6,0,0,10,16,16,16,12,8,0,0,8,16,6,0,0,0,0,0,1,12,16,12,1,0,0,0,0,1,6,15,12,0,0,0,0,0,0,5,16,2,0,0,10,12,6,10,16,1,0,0,1,13,16,14,5,0,0,5 +0,0,0,10,6,0,0,0,0,0,5,16,9,0,0,0,0,0,10,16,10,0,0,0,0,4,15,12,12,0,0,0,0,2,4,3,16,2,0,0,0,0,0,0,11,9,0,0,0,0,0,7,12,15,4,0,0,0,0,12,16,16,8,0,1 +0,0,12,16,15,1,0,0,0,2,16,6,16,2,0,0,0,1,16,13,13,0,0,0,0,0,10,16,5,0,0,0,0,2,15,16,12,1,0,0,0,10,12,2,11,12,0,0,0,8,13,4,0,13,9,0,0,0,7,15,16,16,6,0,8 +0,0,14,8,0,0,0,0,0,3,16,15,4,0,0,0,0,1,10,14,9,0,0,0,0,0,0,9,13,0,0,0,0,0,0,8,13,0,0,0,0,0,1,12,15,3,0,0,0,0,11,16,16,16,12,2,0,0,13,14,2,1,3,1,2 +0,0,0,1,13,10,0,0,0,0,0,3,16,15,0,0,0,0,3,9,16,15,0,0,0,7,16,15,16,12,0,0,0,0,0,1,16,15,0,0,0,0,0,4,16,16,0,0,0,0,0,4,16,16,3,0,0,0,0,0,12,16,4,0,1 +0,0,3,15,15,12,12,4,0,0,9,10,2,9,15,2,0,0,7,2,2,14,2,0,0,0,3,4,11,10,0,0,0,0,13,16,16,16,5,0,0,0,0,10,9,0,0,0,0,0,2,16,3,0,0,0,0,0,7,13,0,0,0,0,7 +0,0,12,15,6,0,0,0,0,8,14,7,16,3,0,0,0,8,6,0,8,8,0,0,0,5,8,0,8,10,0,0,0,0,1,0,12,9,0,0,0,0,0,7,15,3,0,0,0,0,7,16,12,5,7,0,0,0,11,16,16,16,15,0,2 +0,0,7,12,12,6,0,0,0,0,16,11,15,15,2,0,0,4,12,0,1,10,6,0,0,8,10,0,0,10,6,0,0,4,12,0,0,8,8,0,0,3,13,0,0,9,7,0,0,2,15,4,5,15,2,0,0,0,9,16,16,9,0,0,0 +0,0,6,15,10,0,0,0,0,0,16,9,16,7,0,0,0,4,10,0,6,15,0,0,0,8,8,0,0,11,3,0,0,8,8,0,0,8,5,0,0,7,10,0,0,5,6,0,0,1,15,4,1,12,4,0,0,0,8,16,16,11,0,0,0 +0,0,0,0,12,15,1,0,0,3,8,8,14,16,0,0,0,11,16,16,16,16,0,0,0,1,4,4,15,16,0,0,0,0,0,0,12,16,4,0,0,0,0,0,12,16,3,0,0,0,0,0,12,16,10,0,0,0,0,0,9,16,11,0,1 +0,0,13,13,4,0,0,0,0,4,14,7,14,0,0,0,0,7,8,0,10,4,0,0,0,1,4,0,13,3,0,0,0,0,0,1,15,0,0,0,0,0,0,11,9,0,0,0,0,0,11,16,12,12,4,0,0,0,14,14,12,12,6,0,2 +0,0,0,4,14,0,0,0,0,0,0,12,12,0,0,0,0,0,1,16,9,0,0,0,0,0,10,16,7,13,0,0,0,3,16,9,9,16,0,0,0,14,16,13,15,16,5,0,0,5,13,14,16,15,3,0,0,0,0,3,16,10,0,0,4 +0,0,7,14,10,2,0,0,0,1,16,12,15,8,0,0,0,7,13,0,5,13,2,0,0,8,12,0,0,10,6,0,0,8,12,0,0,8,8,0,0,6,13,0,0,10,8,0,0,0,14,10,6,14,6,0,0,0,5,16,16,9,0,0,0 +0,0,1,13,14,11,0,0,0,0,10,12,12,16,4,0,0,0,15,0,2,9,6,0,0,5,13,0,0,6,9,0,0,5,12,0,0,5,7,0,0,1,13,2,0,9,4,0,0,0,12,11,7,11,0,0,0,0,2,15,12,3,0,0,0 +0,1,8,16,16,11,0,0,0,6,15,10,14,16,4,0,0,6,13,2,7,15,2,0,0,0,11,16,16,7,0,0,0,0,11,15,15,8,0,0,0,1,16,3,6,16,4,0,0,0,14,9,4,13,8,0,0,0,5,15,16,15,4,0,8 +0,0,7,16,15,1,0,0,0,1,16,5,12,16,6,0,0,2,15,1,4,16,6,0,0,1,13,16,16,16,5,0,0,0,0,1,0,11,8,0,0,0,0,0,0,11,7,0,0,7,14,9,10,16,3,0,0,1,9,14,15,9,0,0,9 +0,0,0,6,16,0,0,0,0,0,0,14,11,0,0,0,0,0,4,16,3,0,0,0,0,0,12,11,3,0,0,0,0,4,16,5,16,7,0,0,1,15,16,13,16,14,4,0,0,9,15,15,16,15,1,0,0,0,0,6,15,0,0,0,4 +0,0,0,10,16,0,0,0,0,0,6,14,2,0,0,0,0,0,12,7,0,0,0,0,0,4,16,8,7,1,0,0,0,3,16,15,13,13,3,0,0,0,16,2,0,8,11,0,0,0,9,9,1,4,14,0,0,0,1,10,16,16,8,0,6 +0,0,6,15,9,0,0,0,0,1,15,14,16,7,0,0,0,4,13,0,7,16,1,0,0,6,12,0,0,13,8,0,0,7,12,0,0,12,8,0,0,6,13,0,0,12,8,0,0,2,15,10,8,15,4,0,0,0,5,12,13,6,0,0,0 +0,0,0,4,16,2,0,0,0,0,0,9,13,0,0,0,0,0,2,16,5,0,0,0,0,0,9,13,4,0,0,0,0,3,16,6,15,5,0,0,0,10,15,9,16,10,1,0,1,16,16,16,16,16,5,0,0,3,4,9,16,4,0,0,4 +0,0,4,10,12,12,11,1,0,0,8,10,11,16,10,0,0,0,0,2,7,15,1,0,0,0,5,16,16,15,5,0,0,0,3,11,13,8,4,0,0,0,1,14,3,0,0,0,0,0,6,14,0,0,0,0,0,0,9,7,0,0,0,0,7 +0,0,6,13,13,6,0,0,0,1,14,5,7,13,0,0,0,0,0,0,2,15,0,0,0,0,0,10,14,9,0,0,0,0,0,8,9,16,3,0,0,1,5,0,0,16,4,0,0,1,16,4,5,15,3,0,0,0,6,16,14,5,0,0,3 +0,0,3,13,10,8,9,2,0,0,10,11,8,11,15,0,0,0,13,3,1,12,6,0,0,0,5,3,7,14,0,0,0,0,9,16,16,16,5,0,0,0,0,7,11,1,0,0,0,0,0,14,6,0,0,0,0,0,4,15,2,0,0,0,7 +0,0,0,1,12,8,0,0,0,0,0,4,16,4,0,0,0,0,0,9,16,1,0,0,0,0,5,16,8,1,0,0,0,0,10,16,11,15,0,0,0,5,16,11,15,15,3,0,0,12,16,16,16,16,5,0,0,0,0,0,15,14,0,0,4 +0,0,0,7,12,0,0,0,0,0,0,11,11,0,0,0,0,0,2,16,3,0,0,0,0,0,9,12,9,3,0,0,0,1,15,7,16,7,0,0,0,9,16,14,16,16,8,0,0,8,12,14,16,12,5,0,0,0,0,7,15,0,0,0,4 +0,0,5,14,16,16,14,0,0,0,14,14,12,15,13,0,0,0,9,2,5,16,5,0,0,0,1,9,15,16,7,0,0,0,3,16,16,16,9,0,0,0,0,10,14,4,0,0,0,0,0,15,7,0,0,0,0,0,5,15,2,0,0,0,7 +0,0,3,13,13,14,6,0,0,2,15,8,16,15,11,0,0,3,14,3,5,15,4,0,0,1,13,16,16,3,0,0,0,0,13,13,14,9,0,0,0,4,12,0,6,11,0,0,0,3,14,5,7,15,0,0,0,0,7,15,15,9,0,0,8 +0,0,9,13,3,0,0,0,0,1,15,14,12,0,0,0,0,0,16,0,16,2,0,0,0,0,3,2,16,2,0,0,0,0,0,2,16,3,0,0,0,0,2,11,15,2,0,0,0,0,13,16,14,8,7,0,0,0,8,13,12,12,13,0,2 +0,0,1,7,16,12,0,0,0,0,3,15,16,12,0,0,0,5,13,16,16,9,0,0,0,5,14,16,16,8,0,0,0,0,4,16,16,7,0,0,0,0,0,11,16,12,0,0,0,0,0,8,16,16,5,0,0,0,0,6,14,13,5,0,1 +0,0,0,0,7,15,2,0,0,0,0,0,8,16,5,0,0,0,0,1,13,14,0,0,0,8,16,16,16,12,0,0,0,0,3,0,16,12,0,0,0,0,0,0,12,15,0,0,0,0,0,0,9,16,2,0,0,0,0,0,6,16,8,0,1 +0,4,13,16,16,12,0,0,0,5,14,4,4,3,0,0,0,2,16,8,6,1,0,0,0,2,16,13,14,11,0,0,0,0,5,0,0,15,5,0,0,0,0,0,0,11,9,0,0,0,1,0,0,11,12,0,0,2,15,16,16,14,3,0,5 +0,0,9,12,9,0,0,0,0,3,15,10,16,1,0,0,0,0,1,1,16,4,0,0,0,0,3,13,16,2,0,0,0,0,7,12,13,14,1,0,0,3,1,0,4,16,5,0,0,12,12,4,11,15,2,0,0,4,13,15,11,3,0,0,3 +0,0,8,14,11,0,0,0,0,0,14,4,11,8,0,0,0,0,0,0,9,8,0,0,0,0,2,10,16,3,0,0,0,0,5,9,9,15,2,0,0,0,0,0,0,10,7,0,0,7,12,1,1,14,7,0,0,0,8,16,16,10,1,0,3 +0,0,4,15,15,8,1,0,0,2,12,3,10,14,11,0,0,3,14,5,3,11,11,0,0,0,7,15,16,14,2,0,0,0,5,16,11,15,2,0,0,0,13,1,2,16,1,0,0,0,10,11,8,15,0,0,0,0,3,13,14,3,0,0,8 +0,1,10,15,14,2,0,0,0,7,10,3,11,10,0,0,0,1,1,0,8,14,0,0,0,0,3,9,15,9,0,0,0,0,4,8,9,15,2,0,0,0,0,0,0,14,6,0,0,0,6,1,0,13,8,0,0,0,9,16,16,12,3,0,3 +0,0,1,9,12,8,0,0,0,1,12,9,13,13,0,0,0,6,11,0,2,15,0,0,0,1,14,14,14,8,0,0,0,0,0,11,16,13,1,0,0,0,4,13,2,14,6,0,0,0,8,9,2,14,7,0,0,0,4,16,16,8,1,0,8 +0,0,10,14,12,0,0,0,0,2,16,8,3,0,0,0,0,5,16,12,11,1,0,0,0,2,12,9,10,13,1,0,0,0,0,0,0,12,6,0,0,0,0,0,0,4,12,0,0,2,14,4,4,13,9,0,0,0,10,14,15,11,1,0,5 +0,0,9,15,12,6,0,0,0,3,16,15,13,8,0,0,0,2,16,4,12,5,0,0,0,0,13,16,10,0,0,0,0,0,8,14,11,1,0,0,0,1,12,0,14,6,0,0,0,0,13,5,2,16,2,0,0,0,7,14,16,16,2,0,8 +0,0,0,12,16,10,0,0,0,0,10,15,15,13,0,0,0,0,9,6,16,7,0,0,0,0,0,8,15,3,0,0,0,0,5,16,6,0,0,0,0,8,15,16,3,0,0,0,5,16,16,16,16,6,0,0,1,4,4,9,16,11,0,0,2 +0,0,8,14,14,2,0,0,0,5,15,5,9,15,6,0,0,6,15,6,4,16,6,0,0,0,5,13,16,16,4,0,0,0,0,0,0,14,4,0,0,0,0,0,0,11,4,0,0,0,8,6,1,10,8,0,0,0,10,16,16,16,4,0,9 +0,0,5,15,7,0,0,0,0,1,16,16,10,7,0,0,0,8,13,0,3,16,3,0,0,9,10,0,0,9,7,0,0,8,12,0,0,8,8,0,0,4,14,0,0,5,12,0,0,2,15,5,6,14,6,0,0,0,6,13,15,10,2,0,0 +0,1,10,16,10,1,0,0,0,8,14,5,10,13,2,0,0,1,4,0,11,15,2,0,0,0,0,0,16,7,0,0,0,0,0,0,14,11,0,0,0,0,0,0,0,14,11,0,0,0,10,5,4,10,13,0,0,0,10,14,16,14,9,0,3 +0,2,6,11,8,0,0,0,0,9,16,14,5,0,0,0,0,9,16,12,8,2,0,0,0,8,15,8,11,13,1,0,0,0,0,0,0,8,10,0,0,0,0,0,0,5,14,0,0,0,6,6,4,8,14,0,0,0,5,15,16,15,7,0,5 +0,0,3,12,15,6,0,0,0,1,14,16,14,16,5,0,0,6,16,9,0,13,8,0,0,8,16,0,0,9,8,0,0,7,16,0,0,9,8,0,0,5,16,0,3,15,2,0,0,2,14,15,15,9,0,0,0,0,4,14,9,1,0,0,0 +0,0,0,7,15,0,0,0,0,0,0,9,15,0,0,0,0,0,2,15,7,0,0,0,0,0,9,16,11,5,0,0,0,9,16,16,16,15,7,0,0,5,12,14,16,16,8,0,0,0,0,8,16,0,0,0,0,0,0,8,15,1,0,0,4 +0,0,9,12,2,0,0,0,0,3,16,16,16,16,3,0,0,4,15,13,14,16,6,0,0,2,15,11,16,11,1,0,0,1,14,16,11,0,0,0,0,5,14,6,16,5,0,0,0,7,15,8,13,16,0,0,0,1,7,11,16,13,0,0,8 +0,0,9,14,9,0,0,0,0,6,15,5,16,6,0,0,0,5,14,1,6,13,0,0,0,0,10,14,13,16,0,0,0,0,0,5,5,12,2,0,0,0,0,0,0,9,5,0,0,0,16,9,7,10,8,0,0,0,6,10,14,16,5,0,9 +0,2,15,16,9,0,0,0,0,7,15,12,16,2,0,0,0,9,9,2,16,2,0,0,0,1,0,7,16,0,0,0,0,0,0,14,10,0,0,0,0,1,12,15,2,0,0,0,0,8,16,14,12,12,3,0,0,3,14,16,16,16,7,0,2 +0,0,12,10,0,0,0,0,0,3,13,16,4,0,0,0,0,6,16,16,7,0,0,0,0,0,8,15,9,0,0,0,0,0,0,10,14,0,0,0,0,0,0,6,16,6,0,0,0,0,9,13,16,13,8,3,0,0,9,16,16,16,16,9,1 +0,0,0,10,16,3,0,0,0,0,2,16,7,0,0,0,0,0,9,15,0,0,0,0,0,0,15,12,4,2,0,0,0,1,16,10,0,11,2,0,0,0,16,6,0,5,13,0,0,0,7,14,5,8,16,1,0,0,0,3,12,12,9,0,6 +0,0,2,8,13,13,2,0,0,6,14,14,11,15,3,0,0,10,14,4,10,6,0,0,0,1,14,12,13,0,0,0,0,0,9,15,11,0,0,0,0,0,13,2,11,8,0,0,0,0,8,10,4,14,3,0,0,0,1,8,14,14,1,0,8 +0,0,6,15,15,4,0,0,0,0,11,10,4,2,0,0,0,0,13,11,2,0,0,0,0,0,12,14,12,5,0,0,0,0,0,0,3,15,3,0,0,0,0,0,0,4,11,0,0,0,15,8,3,2,15,0,0,0,5,12,16,16,12,0,5 +0,0,0,11,11,0,0,0,0,0,0,14,11,0,0,0,0,1,12,15,2,0,0,0,0,8,16,8,11,6,2,0,0,5,15,16,16,16,11,0,0,0,1,9,16,7,1,0,0,0,0,8,16,3,0,0,0,0,0,9,16,1,0,0,4 +0,0,5,15,14,12,9,0,0,0,2,4,4,9,15,0,0,0,0,0,1,13,6,0,0,0,3,11,12,16,1,0,0,0,5,13,16,16,4,0,0,0,0,10,12,2,0,0,0,0,4,15,4,0,0,0,0,0,8,9,0,0,0,0,7 +0,0,12,7,0,0,0,0,0,0,9,15,0,0,0,0,0,0,11,16,5,0,0,0,0,0,10,16,7,0,0,0,0,0,0,11,14,0,0,0,0,0,0,6,16,2,0,0,0,0,14,16,16,13,12,3,0,0,8,15,15,14,16,11,1 +0,1,13,10,0,0,0,0,0,7,16,16,7,0,0,0,0,3,13,8,16,0,0,0,0,0,1,5,16,2,0,0,0,0,0,5,16,3,0,0,0,0,4,13,15,1,0,0,0,4,16,16,16,16,8,0,0,2,15,16,14,16,16,5,2 +0,0,8,12,13,3,0,0,0,0,11,15,7,1,0,0,0,0,15,16,14,3,0,0,0,0,4,5,6,13,2,0,0,0,0,0,0,9,7,0,0,0,6,0,0,2,12,0,0,3,15,8,4,10,11,0,0,0,8,12,13,12,3,0,5 +0,0,8,15,7,0,0,0,0,2,16,6,9,3,0,0,0,4,14,1,12,10,0,0,0,0,8,16,16,15,0,0,0,0,0,0,0,14,0,0,0,0,0,0,0,8,7,0,0,1,8,4,1,4,13,0,0,0,5,13,16,16,13,0,9 +0,0,8,12,12,0,0,0,0,0,8,11,2,0,0,0,0,0,15,13,11,3,0,0,0,1,13,8,10,15,1,0,0,0,0,0,0,9,7,0,0,1,1,0,0,8,8,0,0,6,13,4,4,11,8,0,0,1,7,14,16,13,2,0,5 +0,0,7,15,3,0,0,0,0,0,1,16,9,0,0,0,0,0,7,15,15,0,0,0,0,0,10,16,13,0,0,0,0,0,0,6,16,2,0,0,0,0,0,4,16,6,0,0,0,0,9,13,16,13,8,3,0,0,6,15,16,16,16,12,1 +0,0,1,12,14,3,0,0,0,0,10,15,7,0,0,0,0,0,13,8,0,0,0,0,0,0,15,16,11,5,0,0,0,0,14,13,9,14,7,0,0,0,14,6,0,3,14,0,0,0,10,11,4,7,16,1,0,0,1,11,15,14,7,1,6 +0,0,5,15,13,8,3,0,0,0,14,14,10,16,11,0,0,0,11,13,5,12,12,0,0,0,6,16,16,8,1,0,0,1,13,13,14,0,0,0,0,6,10,1,15,7,0,0,0,5,15,7,11,13,0,0,0,0,6,12,16,10,0,0,8 +0,0,5,15,13,7,0,0,0,4,15,8,7,16,4,0,0,6,12,1,10,14,1,0,0,0,0,12,12,2,0,0,0,0,0,14,11,3,0,0,0,0,0,1,10,14,0,0,0,0,0,0,4,14,8,0,0,0,4,16,16,14,4,0,3 +0,0,9,13,7,0,0,0,0,7,14,4,14,4,0,0,0,2,2,3,15,2,0,0,0,0,0,16,11,0,0,0,0,0,0,8,15,11,0,0,0,0,0,0,0,12,7,0,0,0,10,4,2,10,8,0,0,0,7,13,16,16,4,0,3 +0,0,7,14,12,4,0,0,0,0,10,12,12,12,2,0,0,0,9,5,3,14,3,0,0,0,4,13,15,9,0,0,0,0,8,16,8,0,0,0,0,0,15,4,14,1,0,0,0,2,13,5,9,7,0,0,0,0,4,12,16,10,0,0,8 +0,0,0,0,15,7,0,0,0,0,0,2,16,8,0,0,0,0,0,10,16,3,0,0,0,7,16,16,4,12,12,0,0,1,13,16,16,16,9,0,0,0,1,7,14,16,0,0,0,0,0,0,13,15,0,0,0,0,0,0,16,11,0,0,4 +0,0,11,16,10,1,0,0,0,0,16,8,11,13,1,0,0,0,13,7,7,16,1,0,0,0,4,13,14,16,1,0,0,0,0,0,0,15,4,0,0,0,0,0,0,8,11,0,0,1,7,4,5,14,12,0,0,0,8,15,14,11,3,0,9 +0,0,6,14,15,1,0,0,0,0,11,12,4,0,0,0,0,0,15,10,4,1,0,0,0,0,13,13,14,12,1,0,0,0,0,0,0,10,10,0,0,0,0,0,0,4,14,0,0,0,0,2,1,9,14,0,0,0,6,16,16,16,6,0,5 +0,0,0,8,10,0,0,0,0,0,6,16,12,0,0,0,0,0,12,12,1,0,0,0,0,0,15,5,0,0,0,0,0,1,16,3,13,14,2,0,0,0,14,16,15,14,13,0,0,0,9,16,12,10,15,1,0,0,0,6,12,15,8,0,6 +0,0,6,16,2,0,0,0,0,3,16,13,11,0,0,0,0,12,14,2,14,0,0,0,0,2,2,0,16,0,0,0,0,0,0,2,16,1,0,0,0,0,0,10,14,4,3,0,0,0,5,16,16,16,16,0,0,0,6,15,10,8,11,2,2 +0,0,5,13,12,1,0,0,0,2,15,14,15,9,0,0,0,7,16,4,13,16,1,0,0,6,16,14,16,16,6,0,0,0,12,14,9,16,7,0,0,0,0,0,1,16,7,0,0,0,3,4,8,15,5,0,0,0,8,15,15,9,0,0,9 +0,1,14,15,2,0,0,0,0,7,16,16,7,0,0,0,0,14,9,12,11,0,0,0,0,7,4,13,10,0,0,0,0,0,4,16,4,0,0,0,0,0,10,15,2,0,0,0,0,3,16,14,12,12,10,0,0,0,12,16,16,16,16,3,2 +0,0,2,12,16,4,0,0,0,1,13,15,11,12,0,0,0,8,16,8,4,16,0,0,0,2,5,0,6,13,0,0,0,0,0,0,11,8,0,0,0,0,0,7,16,8,1,0,0,0,1,16,16,16,12,0,0,0,2,8,5,6,13,0,2 +0,0,7,15,14,2,0,0,0,0,9,16,16,11,0,0,0,0,3,16,16,11,0,0,0,0,5,16,16,13,0,0,0,0,6,16,16,14,0,0,0,0,6,16,16,12,0,0,0,0,7,16,16,15,6,0,0,0,4,16,16,15,7,0,1 +0,0,4,13,4,0,0,0,0,2,13,16,15,1,0,0,0,9,16,5,10,9,0,0,0,11,16,1,1,14,1,0,0,6,16,3,0,6,9,0,0,2,16,2,0,5,11,0,0,0,12,12,8,15,9,0,0,0,2,12,13,9,1,0,0 +0,0,6,13,15,6,0,0,0,1,15,16,16,14,0,0,0,4,16,8,4,14,4,0,0,4,16,2,0,9,7,0,0,4,15,0,0,13,5,0,0,2,16,0,5,15,5,0,0,1,15,13,16,11,0,0,0,0,6,15,12,3,0,0,0 +0,0,5,15,16,7,0,0,0,0,15,15,12,15,0,0,0,0,8,1,5,16,2,0,0,0,0,0,8,16,0,0,0,0,0,2,15,16,8,0,0,0,0,0,3,12,13,0,0,0,14,12,13,16,8,0,0,0,5,12,13,10,1,0,3 +0,0,3,10,12,16,5,0,0,0,8,8,8,15,11,0,0,0,0,0,2,16,7,0,0,0,1,6,14,16,6,0,0,0,4,14,16,12,4,0,0,0,0,9,13,2,0,0,0,0,2,16,6,0,0,0,0,0,5,16,2,0,0,0,7 +0,0,0,8,4,0,0,0,0,0,0,12,4,1,0,0,0,0,4,13,2,16,1,0,0,0,14,7,7,14,4,0,0,5,16,16,16,16,6,0,0,3,10,7,15,6,0,0,0,0,0,5,10,0,0,0,0,0,0,7,10,0,0,0,4 +0,0,6,16,6,0,0,0,0,2,16,14,15,0,0,0,0,9,13,0,14,3,0,0,0,4,6,1,16,5,0,0,0,0,0,8,16,3,0,0,0,0,3,16,14,0,2,0,0,0,13,16,16,16,15,0,0,0,7,14,12,12,12,1,2 +0,0,0,7,13,5,0,0,0,0,2,16,15,3,0,0,0,0,10,15,4,0,0,0,0,1,16,8,0,0,0,0,0,5,16,14,16,14,2,0,0,3,16,14,12,14,10,0,0,0,11,13,5,12,14,0,0,0,1,8,14,15,5,0,6 +0,0,5,14,16,9,0,0,0,6,16,9,13,12,0,0,0,3,7,1,14,12,0,0,0,0,1,15,16,15,2,0,0,0,1,11,9,14,11,0,0,0,1,3,0,13,9,0,0,0,10,13,9,16,6,0,0,0,3,16,16,10,0,0,3 +0,0,2,12,9,1,0,0,0,0,7,16,16,7,0,0,0,0,12,16,16,2,0,0,0,1,16,16,16,1,0,0,0,0,14,16,15,1,0,0,0,0,8,16,16,3,0,0,0,0,8,16,16,7,0,0,0,0,3,12,16,16,5,0,1 +0,0,0,10,13,0,0,0,0,0,5,16,8,3,0,0,0,0,11,13,7,16,0,0,0,7,16,10,14,15,8,0,0,11,16,16,16,16,9,0,0,1,4,10,16,3,0,0,0,0,0,12,15,0,0,0,0,0,0,13,8,0,0,0,4 +0,0,9,12,15,10,0,0,0,6,16,14,12,10,0,0,0,5,12,1,6,0,0,0,0,7,15,15,16,4,0,0,0,6,13,11,13,9,0,0,0,0,0,0,12,9,0,0,0,0,6,8,16,10,0,0,0,0,13,15,11,2,0,0,5 +0,0,0,5,12,0,0,0,0,0,5,16,14,2,0,0,0,0,15,15,1,0,0,0,0,2,16,9,0,0,0,0,0,0,16,10,13,15,4,0,0,0,13,16,16,16,15,1,0,0,5,16,16,13,16,4,0,0,0,5,14,16,12,0,6 +0,0,2,11,15,2,0,0,0,1,15,14,11,10,0,0,0,7,16,2,1,16,0,0,0,6,16,12,14,16,1,0,0,0,10,16,16,16,4,0,0,0,4,16,12,9,10,0,0,0,7,16,5,15,8,0,0,0,2,12,12,13,2,0,8 +0,0,2,13,9,0,0,0,0,0,8,16,16,0,0,0,0,14,16,16,8,0,0,0,0,6,12,16,6,0,0,0,0,0,8,16,6,0,0,0,0,0,6,16,7,0,0,0,0,0,4,16,11,0,0,0,0,0,1,14,14,0,0,0,1 +0,1,10,14,5,0,0,0,0,9,16,14,16,0,0,0,0,3,5,9,16,0,0,0,0,0,5,16,16,14,0,0,0,0,8,14,13,16,6,0,0,0,0,0,0,16,11,0,0,0,11,5,11,16,7,0,0,1,12,16,14,5,0,0,3 +0,0,0,13,13,1,0,0,0,0,11,14,15,8,0,0,0,6,16,3,10,7,0,0,0,8,16,4,12,8,0,0,0,1,3,4,15,1,0,0,0,0,2,13,12,0,0,0,0,0,11,16,16,12,7,0,0,0,3,9,8,11,13,0,2 +0,0,2,10,7,0,0,0,0,3,16,11,8,6,0,0,0,4,13,0,0,8,0,0,0,4,16,9,11,11,0,0,0,0,10,16,16,15,2,0,0,0,4,16,4,3,8,0,0,0,8,9,0,2,9,0,0,0,3,11,8,8,4,0,8 +0,0,9,16,16,13,12,1,0,0,6,6,8,14,15,0,0,0,0,0,5,16,4,0,0,0,2,8,15,16,3,0,0,0,2,13,15,8,3,0,0,0,0,15,7,0,0,0,0,0,6,16,1,0,0,0,0,0,11,8,0,0,0,0,7 +0,0,8,14,15,8,0,0,0,5,15,11,8,10,0,0,0,7,12,0,0,0,0,0,0,9,16,16,4,0,0,0,0,3,8,9,13,0,0,0,0,0,0,1,13,0,0,0,0,0,6,9,14,0,0,0,0,0,7,13,8,0,0,0,5 +0,0,0,7,3,0,0,0,0,0,0,15,5,0,0,0,0,0,6,14,2,8,0,0,0,0,12,8,5,14,0,0,0,6,16,12,14,15,4,0,0,2,10,15,16,14,5,0,0,0,0,12,11,0,0,0,0,0,0,11,9,0,0,0,4 +0,0,5,15,10,2,0,0,0,0,9,16,16,3,0,0,0,0,13,16,16,3,0,0,0,0,13,16,16,1,0,0,0,0,12,16,16,2,0,0,0,0,10,16,16,2,0,0,0,0,5,16,16,11,0,0,0,0,4,15,16,15,3,0,1 +0,0,0,3,14,0,0,0,0,0,1,14,12,8,2,0,0,0,9,15,6,16,4,0,0,3,16,14,12,16,5,0,0,3,13,16,16,16,8,0,0,0,0,0,16,11,1,0,0,0,0,2,16,4,0,0,0,0,0,3,12,4,0,0,4 +0,0,7,11,15,10,0,0,0,4,16,15,13,12,4,0,0,0,7,1,5,16,4,0,0,0,0,6,15,14,0,0,0,0,0,15,16,16,4,0,0,1,2,3,5,16,8,0,0,3,14,5,7,16,6,0,0,0,8,15,16,12,1,0,3 +0,0,0,2,16,2,0,0,0,0,0,9,13,0,2,0,0,0,4,16,5,11,11,0,0,0,12,13,3,14,10,0,0,5,16,14,13,16,5,0,0,15,16,16,16,16,8,0,0,3,4,2,12,13,1,0,0,0,0,1,16,7,0,0,4 +0,0,4,11,12,15,13,0,0,0,10,11,11,14,14,0,0,0,1,4,5,16,8,0,0,0,13,16,16,16,6,0,0,0,0,7,16,7,1,0,0,0,0,9,12,0,0,0,0,0,2,15,7,0,0,0,0,0,3,14,2,0,0,0,7 +0,0,5,11,12,0,0,0,0,0,16,16,16,13,7,0,0,0,12,16,16,15,4,0,0,0,4,15,14,8,0,0,0,0,12,4,1,4,0,0,0,1,14,0,3,7,0,0,0,1,16,12,16,4,0,0,0,0,3,9,5,0,0,0,8 +0,1,11,16,16,12,0,0,0,7,15,10,6,3,0,0,0,10,10,3,0,0,0,0,0,11,16,16,7,0,0,0,0,10,11,6,15,1,0,0,0,0,0,0,12,6,0,0,0,0,0,3,15,3,0,0,0,0,14,16,10,0,0,0,5 +0,0,3,16,16,9,0,0,0,0,13,14,9,16,3,0,0,0,15,6,5,16,3,0,0,0,8,16,16,16,3,0,0,0,0,6,7,16,2,0,0,0,1,0,3,16,3,0,0,1,14,3,6,15,1,0,0,0,3,15,16,13,0,0,9 +0,0,10,16,5,0,0,0,0,6,16,13,11,0,0,0,0,7,9,4,12,0,0,0,0,0,0,5,15,0,0,0,0,0,0,8,11,0,0,0,0,0,3,15,8,0,0,0,0,4,16,16,10,8,7,1,0,0,7,11,16,16,15,2,2 +0,0,2,13,15,4,0,0,0,0,9,10,5,11,0,0,0,0,10,7,13,10,0,0,0,0,7,12,16,3,0,0,0,0,6,16,7,0,0,0,0,0,14,13,2,0,0,0,0,0,11,2,10,0,0,0,0,0,0,13,15,0,0,0,8 +0,0,3,12,15,3,0,0,0,0,15,16,6,11,0,0,0,7,16,5,0,12,1,0,0,5,14,0,0,9,7,0,0,8,10,0,0,13,8,0,0,4,12,0,2,15,6,0,0,1,14,8,13,12,0,0,0,0,3,15,10,3,0,0,0 +0,0,5,15,16,6,0,0,0,0,13,12,11,11,0,0,0,0,13,11,16,5,0,0,0,0,7,16,14,1,0,0,0,0,10,16,4,0,0,0,0,1,15,12,13,0,0,0,0,0,15,5,15,5,0,0,0,0,5,16,16,6,0,0,8 +0,0,1,15,10,0,0,0,0,0,11,16,4,0,0,0,0,3,16,8,0,10,9,0,0,10,16,3,5,16,10,0,0,10,16,12,15,13,1,0,0,1,10,13,16,9,0,0,0,0,0,9,16,2,0,0,0,0,0,16,9,0,0,0,4 +0,0,3,16,0,0,0,0,0,0,10,10,3,5,0,0,0,0,16,5,12,10,0,0,0,4,15,2,15,4,0,0,0,10,13,9,15,10,11,0,0,3,14,16,14,8,2,0,0,0,0,15,6,0,0,0,0,0,3,15,0,0,0,0,4 +0,0,2,12,11,1,0,0,0,1,16,13,9,8,0,0,0,4,14,1,0,11,1,0,0,6,9,0,0,7,5,0,0,4,9,0,0,6,7,0,0,2,12,0,0,10,6,0,0,0,10,9,13,16,2,0,0,0,2,12,14,3,0,0,0 +0,1,10,8,12,15,1,0,0,0,16,16,16,9,1,0,0,0,13,13,3,0,0,0,0,1,16,16,13,2,0,0,0,0,3,1,13,10,0,0,0,0,0,0,8,16,0,0,0,0,8,1,8,16,1,0,0,2,13,16,16,11,0,0,5 +0,0,5,15,16,12,0,0,0,1,16,9,5,16,2,0,0,2,16,2,11,13,0,0,0,0,13,15,15,2,0,0,0,0,12,16,6,0,0,0,0,2,16,8,13,0,0,0,0,2,16,0,12,6,0,0,0,0,6,15,14,8,0,0,8 +0,0,5,14,16,13,2,0,0,0,15,10,5,15,4,0,0,0,0,1,7,15,1,0,0,0,0,11,16,6,0,0,0,0,0,2,10,14,1,0,0,0,2,0,0,9,8,0,0,1,14,6,4,11,9,0,0,0,5,12,14,10,1,0,3 +0,0,4,15,13,1,0,0,0,1,15,11,9,7,0,0,0,3,16,3,5,16,3,0,0,0,8,16,16,16,3,0,0,0,0,2,4,14,6,0,0,0,0,0,0,9,11,0,0,0,0,0,0,7,15,0,0,0,5,12,12,16,8,0,9 +0,0,6,15,1,0,0,0,0,2,16,7,0,0,0,0,0,8,15,0,0,7,3,0,0,11,13,4,8,16,5,0,0,5,16,16,16,10,0,0,0,0,1,14,15,0,0,0,0,0,4,16,6,0,0,0,0,0,10,13,0,0,0,0,4 +0,0,0,4,16,2,0,0,0,0,0,14,11,8,5,0,0,0,10,13,1,12,8,0,0,6,16,1,0,14,6,0,0,10,14,8,13,16,11,0,0,3,11,12,13,14,2,0,0,0,0,1,14,6,0,0,0,0,0,8,13,1,0,0,4 +0,0,4,14,15,5,0,0,0,7,14,7,6,16,0,0,0,10,6,0,0,11,3,0,0,4,2,0,1,14,0,0,0,0,0,0,10,7,0,0,0,0,0,5,13,1,0,0,0,0,6,16,8,0,0,0,0,0,5,9,12,12,16,2,2 +0,0,5,12,14,7,0,0,0,5,16,7,5,16,2,0,0,8,10,0,4,16,4,0,0,2,11,15,11,13,4,0,0,0,0,0,0,8,4,0,0,0,0,0,0,5,8,0,0,1,7,1,0,11,5,0,0,0,5,13,10,9,1,0,9 +0,0,9,16,16,10,0,0,0,2,16,11,6,6,0,0,0,3,16,2,9,15,1,0,0,0,12,13,15,16,2,0,0,0,0,8,9,16,4,0,0,0,0,0,0,16,4,0,0,0,0,0,6,16,3,0,0,0,8,16,16,13,1,0,9 +0,0,0,15,5,0,0,0,0,0,8,16,1,0,0,0,0,1,16,11,0,2,1,0,0,9,16,2,2,15,11,0,0,11,14,9,15,15,3,0,0,3,12,13,16,6,0,0,0,0,0,7,16,1,0,0,0,0,0,13,13,0,0,0,4 +0,6,16,16,16,12,0,0,0,0,7,9,15,13,0,0,0,0,0,4,16,5,0,0,0,6,12,14,16,9,5,0,0,8,13,16,11,11,5,0,0,0,8,14,0,0,0,0,0,2,16,6,0,0,0,0,0,5,16,1,0,0,0,0,7 +0,0,1,15,16,8,0,0,0,0,6,16,15,13,0,0,0,0,5,16,16,11,0,0,0,0,0,15,16,7,0,0,0,0,6,16,15,0,0,0,0,0,13,11,11,7,0,0,0,0,13,10,5,16,0,0,0,0,3,13,16,12,0,0,8 +0,0,15,16,16,16,12,0,0,0,4,8,11,16,9,0,0,0,0,0,11,13,0,0,0,2,4,3,16,4,0,0,0,10,16,16,16,16,4,0,0,1,8,16,9,7,0,0,0,0,8,13,0,0,0,0,0,0,15,6,0,0,0,0,7 +0,0,5,16,16,6,0,0,0,0,13,12,11,13,0,0,0,0,13,10,14,8,0,0,0,0,7,16,14,1,0,0,0,0,8,16,6,0,0,0,0,0,16,13,14,0,0,0,0,0,15,3,12,9,0,0,0,0,5,15,16,7,0,0,8 +0,0,1,14,11,0,0,0,0,0,11,10,8,8,0,0,0,3,16,1,0,10,2,0,0,6,11,0,0,6,6,0,0,7,9,0,0,3,9,0,0,3,13,0,0,5,10,0,0,0,12,5,4,12,4,0,0,0,1,13,16,7,0,0,0 +0,0,1,16,13,0,0,0,0,0,6,16,15,0,0,0,0,7,16,16,12,0,0,0,1,12,16,16,12,0,0,0,0,0,2,10,14,0,0,0,0,0,0,8,16,1,0,0,0,0,0,10,16,0,0,0,0,0,0,12,16,0,0,0,1 +0,0,6,10,15,11,1,0,0,1,15,9,6,12,4,0,0,5,16,0,0,5,6,0,0,4,14,0,0,6,8,0,0,6,9,0,0,12,2,0,0,7,9,0,6,11,0,0,0,3,15,8,15,4,0,0,0,0,10,14,5,0,0,0,0 +0,0,0,3,16,7,0,0,0,0,2,12,16,12,0,0,0,6,15,16,16,8,0,0,0,12,15,6,16,7,0,0,0,0,2,0,15,9,0,0,0,0,0,0,16,7,0,0,0,0,0,2,16,9,0,0,0,0,0,2,15,10,0,0,1 +0,0,1,12,16,7,0,0,0,0,8,16,11,4,0,0,0,0,13,12,0,0,0,0,0,2,16,6,0,0,0,0,0,4,16,13,12,5,0,0,0,6,16,14,12,15,4,0,0,0,11,14,5,14,11,0,0,0,0,12,16,16,8,0,6 +0,0,0,12,16,1,0,0,0,0,2,16,8,2,0,0,0,0,8,16,5,15,0,0,0,0,15,7,8,9,0,0,0,7,16,12,15,16,11,0,0,9,15,10,15,7,5,0,0,0,0,10,7,0,0,0,0,0,0,16,4,0,0,0,4 +0,3,16,16,16,16,3,0,0,0,7,8,12,14,1,0,0,0,0,1,15,4,0,0,0,1,8,11,15,8,8,0,0,10,16,16,16,12,5,0,0,0,5,15,1,0,0,0,0,0,13,7,0,0,0,0,0,3,15,1,0,0,0,0,7 +0,0,7,16,15,3,0,0,0,0,14,13,13,13,1,0,0,0,12,11,8,16,6,0,0,0,2,14,16,16,8,0,0,0,0,2,4,13,9,0,0,0,0,0,0,12,10,0,0,1,13,4,4,14,10,0,0,0,5,15,16,16,8,0,9 +0,0,1,9,12,8,0,0,0,0,11,13,8,12,0,0,0,0,11,7,14,6,0,0,0,0,5,16,9,0,0,0,0,0,10,15,5,0,0,0,0,0,13,1,10,1,0,0,0,0,13,1,8,5,0,0,0,0,3,14,15,3,0,0,8 +0,5,16,15,5,0,0,0,0,3,9,13,15,1,0,0,0,0,0,1,15,6,0,0,0,0,0,2,16,4,0,0,0,0,0,10,14,0,0,0,0,0,3,16,7,0,0,0,0,0,12,16,13,10,7,0,0,7,16,14,12,12,9,0,2 +0,0,0,9,6,0,0,0,0,0,2,15,6,0,0,0,0,0,6,10,0,0,0,0,0,0,11,8,7,2,0,0,0,0,14,15,8,14,5,0,0,0,16,5,0,4,13,0,0,0,9,13,0,7,13,0,0,0,0,11,16,14,2,0,6 +0,0,8,16,14,1,0,0,0,0,6,16,16,8,0,0,0,0,4,16,16,8,0,0,0,0,2,16,16,14,0,0,0,0,1,16,16,11,0,0,0,0,5,16,16,10,0,0,0,0,9,16,16,9,0,0,0,0,13,16,16,9,0,0,1 +0,6,16,15,4,0,0,0,0,8,15,14,14,1,0,0,0,0,2,2,16,6,0,0,0,0,0,1,16,7,0,0,0,0,0,6,15,1,0,0,0,0,1,15,8,0,0,0,0,1,12,16,9,12,8,0,0,10,16,16,15,12,7,0,2 +0,2,14,16,14,2,0,0,0,9,16,5,12,11,0,0,0,2,11,0,8,16,0,0,0,0,0,0,7,16,0,0,0,0,0,1,13,10,0,0,0,0,0,5,16,5,0,0,0,0,7,16,13,8,8,1,0,2,16,16,16,13,11,1,2 +0,0,10,16,16,16,16,6,0,0,4,9,8,11,15,2,0,0,0,0,1,14,6,0,0,0,0,4,8,15,6,0,0,0,8,16,16,11,5,0,0,0,3,14,11,0,0,0,0,0,4,16,1,0,0,0,0,0,12,6,0,0,0,0,7 +0,1,9,15,14,3,0,0,0,7,12,2,10,12,0,0,0,8,11,0,10,16,1,0,0,3,16,8,9,16,3,0,0,0,4,11,11,16,5,0,0,0,0,0,0,9,8,0,0,0,6,4,0,9,8,0,0,0,8,15,16,12,2,0,9 +0,0,7,13,9,1,0,0,0,1,13,5,9,8,0,0,0,7,10,0,5,12,0,0,0,5,15,8,10,16,4,0,0,0,7,9,7,5,8,0,0,0,0,0,0,1,11,0,0,0,3,0,0,5,10,0,0,0,9,16,16,14,3,0,9 +0,0,4,16,16,16,16,16,0,0,3,8,8,8,15,9,0,0,0,0,0,6,14,1,0,0,0,0,1,15,3,0,0,0,3,15,16,13,0,0,0,0,2,11,15,12,2,0,0,0,2,14,6,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,4,12,16,7,0,0,0,0,15,5,4,14,0,0,0,0,15,0,3,15,4,0,0,5,16,4,5,16,4,0,0,0,10,12,11,11,8,0,0,0,0,0,0,11,5,0,0,0,2,8,0,15,1,0,0,0,2,13,15,5,0,0,9 +0,0,9,16,16,5,0,0,0,2,16,7,9,15,1,0,0,6,15,0,0,13,7,0,0,3,16,0,0,8,12,0,0,8,16,4,0,8,12,0,0,11,13,1,0,10,11,0,0,5,16,6,5,15,7,0,0,0,10,16,16,9,0,0,0 +0,0,6,16,16,16,16,12,0,0,4,9,8,8,16,7,0,0,0,0,0,11,13,0,0,0,0,2,6,16,3,0,0,0,9,16,16,16,8,0,0,0,3,9,15,2,0,0,0,0,0,15,8,0,0,0,0,0,8,16,0,0,0,0,7 +0,0,0,0,10,8,0,0,0,0,0,4,15,2,0,0,0,0,1,13,9,5,3,0,0,0,8,14,2,14,6,0,0,5,15,4,1,16,3,0,0,14,15,14,16,16,4,0,0,8,12,9,12,13,0,0,0,0,0,0,12,8,0,0,4 +0,0,4,15,16,16,16,15,0,0,2,8,8,5,13,9,0,0,0,0,0,5,15,2,0,0,0,0,3,14,8,0,0,0,2,15,16,16,8,0,0,0,3,9,16,4,0,0,0,0,0,11,13,0,0,0,0,0,5,15,3,0,0,0,7 +0,0,4,12,10,1,0,0,0,0,14,9,9,13,0,0,0,7,16,0,6,16,4,0,0,4,15,10,4,16,8,0,0,0,5,12,12,14,8,0,0,0,0,0,0,5,11,0,0,0,2,3,0,7,11,0,0,0,5,14,16,16,4,0,9 +0,0,9,12,14,14,4,0,0,0,15,7,4,4,2,0,0,0,14,0,0,0,0,0,0,3,15,14,14,6,0,0,0,4,7,2,2,15,0,0,0,0,0,0,0,12,2,0,0,0,9,2,2,14,0,0,0,0,11,16,16,5,0,0,5 +0,0,6,14,9,0,0,0,0,2,16,8,13,8,0,0,0,6,12,0,2,12,2,0,0,4,9,0,0,7,7,0,0,6,12,0,0,4,8,0,0,6,15,0,0,3,9,0,0,0,15,6,4,8,11,0,0,0,6,14,16,12,1,0,0 +0,0,14,9,0,0,0,0,0,0,8,16,0,0,0,0,0,0,8,12,0,2,1,0,0,0,12,8,0,13,9,0,0,1,16,4,0,16,8,0,0,5,16,0,0,15,6,0,0,8,16,16,16,16,12,0,0,1,8,8,8,13,7,0,4 +0,0,11,12,8,4,0,0,0,0,11,16,16,8,0,0,0,0,10,16,16,8,0,0,0,0,12,16,16,6,0,0,0,0,12,16,16,4,0,0,0,0,13,16,16,6,0,0,0,0,16,16,16,8,0,0,0,0,12,12,12,7,0,0,1 +0,0,7,16,13,4,0,0,0,1,11,0,6,15,3,0,0,6,9,0,0,13,4,0,0,2,14,5,8,16,5,0,0,0,4,12,10,6,8,0,0,0,0,0,0,4,8,0,0,0,2,4,0,8,7,0,0,0,10,16,16,10,1,0,9 +0,1,13,16,16,11,0,0,0,6,16,7,8,6,0,0,0,4,16,3,4,3,0,0,0,5,16,16,16,16,3,0,0,8,13,5,0,11,12,0,0,0,0,0,0,10,11,0,0,2,11,4,6,16,4,0,0,1,15,16,16,8,0,0,5 +0,0,1,14,15,9,1,0,0,1,9,14,1,7,8,0,0,7,16,6,0,11,6,0,0,0,8,14,13,10,0,0,0,0,1,14,15,7,0,0,0,0,11,7,1,16,1,0,0,0,11,6,0,13,3,0,0,0,3,13,14,11,0,0,8 +0,0,7,16,16,16,12,0,0,0,4,9,8,16,6,0,0,0,0,0,6,12,0,0,0,0,0,4,13,9,0,0,0,0,8,16,16,16,9,0,0,0,5,16,8,4,1,0,0,0,3,13,1,0,0,0,0,0,10,8,0,0,0,0,7 +0,0,8,14,13,3,0,0,0,6,13,3,6,11,0,0,0,1,2,0,4,13,0,0,0,0,0,11,16,11,0,0,0,0,0,3,4,11,7,0,0,0,0,0,0,5,9,0,0,2,13,1,0,7,10,0,0,0,9,16,13,12,1,0,3 +0,0,3,11,15,5,0,0,0,1,11,9,10,15,1,0,0,4,14,7,6,16,3,0,0,1,14,8,11,16,2,0,0,0,1,4,3,9,8,0,0,0,0,0,0,8,8,0,0,0,0,2,1,13,2,0,0,0,2,14,13,11,0,0,9 +0,0,8,16,15,4,0,0,0,2,15,16,13,15,0,0,0,9,15,4,0,15,5,0,0,8,13,0,0,6,12,0,0,10,12,0,0,4,12,0,0,7,15,0,0,4,14,0,0,3,16,8,3,12,11,0,0,0,8,16,16,15,1,0,0 +0,0,5,16,16,16,16,7,0,0,3,8,8,8,14,10,0,0,0,0,0,4,15,2,0,0,0,0,2,14,7,0,0,0,3,13,16,16,9,0,0,0,4,13,15,3,0,0,0,0,0,12,10,0,0,0,0,0,8,15,2,0,0,0,7 +0,1,9,15,12,2,0,0,0,8,10,0,7,14,0,0,0,8,4,0,0,16,4,0,0,2,12,9,9,16,7,0,0,0,1,5,5,9,8,0,0,0,0,0,0,4,11,0,0,0,6,1,0,10,8,0,0,0,9,13,14,11,2,0,9 +0,0,1,9,15,15,2,0,0,1,15,9,5,16,2,0,0,8,10,1,12,8,0,0,0,7,15,12,11,1,0,0,0,0,7,16,13,3,0,0,0,0,6,8,8,15,3,0,0,0,2,15,2,12,8,0,0,0,0,10,16,13,4,0,8 +0,0,5,15,14,1,0,0,0,0,15,5,8,9,0,0,0,4,14,0,0,11,2,0,0,8,16,0,0,6,7,0,0,8,12,2,0,4,8,0,0,4,10,0,0,4,9,0,0,0,14,2,1,10,5,0,0,0,6,14,13,8,0,0,0 +0,3,16,15,2,0,0,0,0,4,14,10,11,0,0,0,0,2,8,4,16,0,0,0,0,0,0,0,16,2,0,0,0,0,0,4,15,0,0,0,0,0,0,9,13,0,0,0,0,0,7,16,14,12,11,0,0,3,16,16,12,9,7,0,2 +0,1,7,12,13,7,0,0,0,4,14,4,5,16,2,0,0,8,9,0,8,13,1,0,0,3,11,14,15,3,0,0,0,0,11,13,16,2,0,0,0,0,16,1,6,11,0,0,0,0,14,1,5,14,0,0,0,0,7,15,15,7,0,0,8 +0,0,0,10,7,1,0,0,0,0,7,16,16,9,0,0,0,2,15,16,16,13,0,0,0,1,9,16,16,12,0,0,0,0,0,16,16,12,0,0,0,0,1,16,16,12,0,0,0,0,3,16,16,16,0,0,0,0,0,9,12,11,0,0,1 +0,0,4,12,8,3,0,0,0,0,7,16,16,3,0,0,0,0,6,16,16,6,0,0,0,0,7,16,16,7,0,0,0,0,7,16,16,9,0,0,0,0,5,16,16,12,0,0,0,0,6,16,16,10,0,0,0,0,5,12,12,4,0,0,1 +0,0,5,13,10,2,0,0,0,3,16,7,10,11,0,0,0,10,16,8,15,16,0,0,0,4,12,12,10,16,4,0,0,0,0,0,0,16,5,0,0,0,0,0,0,12,8,0,0,0,8,5,7,15,6,0,0,0,6,15,15,9,0,0,9 +0,0,5,8,0,0,0,0,0,0,12,6,12,7,0,0,0,2,10,6,9,15,1,0,0,7,8,0,0,12,8,0,0,8,8,0,0,12,6,0,0,5,13,0,0,13,4,0,0,0,14,10,10,13,0,0,0,0,4,12,11,1,0,0,0 +0,0,10,16,14,5,0,0,0,0,7,16,16,5,0,0,0,0,3,16,16,9,0,0,0,0,3,16,16,13,0,0,0,0,2,16,16,16,0,0,0,0,2,16,16,16,2,0,0,0,6,16,16,14,2,0,0,0,4,15,16,6,0,0,1 +0,0,0,11,12,6,0,0,0,0,4,16,16,16,0,0,0,0,9,16,16,16,0,0,0,0,13,16,16,16,4,0,0,0,5,16,16,16,4,0,0,0,1,16,16,16,2,0,0,0,2,16,16,16,5,0,0,0,0,5,11,9,2,0,1 +0,2,9,12,13,4,0,0,0,14,16,13,16,11,0,0,0,5,4,7,16,4,0,0,0,0,0,10,16,2,0,0,0,0,0,6,16,6,0,0,0,0,0,0,14,16,1,0,0,0,7,11,16,13,1,0,0,0,8,12,7,2,0,0,3 +0,0,3,14,0,0,0,0,0,0,3,16,1,0,0,0,0,0,8,13,0,0,0,0,0,0,7,13,0,0,0,0,0,0,11,11,8,4,0,0,0,0,14,16,16,16,8,0,0,0,12,16,7,10,15,0,0,0,2,14,16,15,8,0,6 +0,0,3,12,15,11,0,0,0,0,14,15,15,16,1,0,0,0,3,2,14,16,12,1,0,0,0,13,16,14,9,0,0,0,0,2,14,8,0,0,0,0,0,2,16,5,0,0,0,0,0,13,13,0,0,0,0,0,3,16,3,0,0,0,7 +0,1,15,6,0,0,0,0,0,7,16,16,0,0,0,0,0,11,14,16,0,0,0,0,0,2,8,15,0,0,0,0,0,0,14,10,0,0,0,0,0,5,16,3,0,0,0,0,0,10,16,16,16,12,5,0,0,3,15,16,12,14,14,0,2 +0,0,10,1,0,0,0,0,0,0,16,0,0,0,0,0,0,3,13,0,0,0,0,0,0,5,12,1,4,2,0,0,0,8,14,15,16,15,4,0,0,8,15,5,5,14,8,0,0,8,14,4,10,16,4,0,0,1,12,15,10,2,0,0,6 +0,0,4,14,10,6,0,0,0,0,12,16,16,16,4,0,0,7,16,4,15,16,4,0,0,7,16,16,16,16,4,0,0,0,5,8,14,16,5,0,0,0,0,0,12,16,2,0,0,0,6,16,16,9,0,0,0,0,5,16,11,0,0,0,9 +0,1,8,12,3,0,0,0,0,1,16,16,14,2,0,0,0,2,16,16,16,16,1,0,0,0,12,16,16,4,0,0,0,0,12,16,16,7,0,0,0,2,16,16,16,14,0,0,0,4,16,16,15,4,0,0,0,1,12,10,5,0,0,0,8 +0,0,2,11,15,13,0,0,0,5,15,15,13,15,0,0,0,9,13,3,16,7,0,0,0,3,3,4,16,4,0,0,0,0,0,2,16,11,0,0,0,0,0,0,7,16,6,0,0,0,0,9,8,15,12,0,0,0,0,13,16,13,3,0,3 +0,0,8,14,12,2,0,0,0,10,14,12,14,0,0,0,0,6,2,11,7,0,0,0,0,0,0,12,5,0,0,0,0,0,0,8,15,2,0,0,0,0,0,0,9,14,3,0,0,0,6,2,7,14,8,0,0,0,14,15,11,4,0,0,3 +0,2,10,15,16,15,0,0,0,4,16,9,4,6,0,0,0,8,15,0,0,0,0,0,0,6,14,5,0,0,0,0,0,1,12,16,13,5,0,0,0,0,0,2,9,16,9,0,0,0,9,8,10,16,9,0,0,0,7,16,12,8,1,0,5 +0,0,8,4,0,0,0,0,0,0,15,3,0,0,0,0,0,2,13,0,0,0,0,0,0,4,14,11,9,1,0,0,0,4,9,13,16,14,1,0,0,2,12,0,0,9,11,0,0,0,16,8,5,13,11,0,0,0,4,15,16,12,1,0,6 +0,1,13,16,8,1,0,0,0,8,14,10,16,8,0,0,0,11,14,15,16,9,0,0,0,0,8,7,6,16,4,0,0,0,0,0,0,16,4,0,0,0,1,0,0,14,8,0,0,3,16,2,10,15,1,0,0,2,13,16,12,1,0,0,9 +0,0,6,12,14,14,2,0,0,12,16,11,4,5,2,0,0,4,14,5,0,0,0,0,0,0,9,13,1,0,0,0,0,0,2,13,15,6,0,0,0,0,0,2,11,15,6,0,0,0,8,6,6,15,8,0,0,0,6,16,16,9,0,0,5 +0,0,3,14,16,11,0,0,0,2,15,13,14,13,0,0,0,0,4,0,8,14,5,0,0,0,5,13,16,16,14,0,0,0,12,15,16,9,2,0,0,0,1,9,13,0,0,0,0,0,0,16,6,0,0,0,0,0,4,13,0,0,0,0,7 +0,0,2,11,8,1,0,0,0,0,9,16,16,9,0,0,0,0,11,16,16,8,0,0,0,0,15,16,16,10,0,0,0,0,13,16,16,8,0,0,0,0,7,16,16,9,0,0,0,0,5,16,16,13,0,0,0,0,1,11,12,10,0,0,1 +0,0,5,15,15,6,0,0,0,0,13,11,16,16,4,0,0,3,14,0,8,11,7,0,0,8,7,0,0,7,8,0,0,7,7,0,0,5,8,0,0,9,9,0,1,11,4,0,0,7,13,8,14,12,0,0,0,0,6,15,10,1,0,0,0 +0,0,11,13,4,0,0,0,0,7,16,16,15,0,0,0,0,7,11,5,15,0,0,0,0,0,2,10,11,0,0,0,0,0,2,15,2,0,0,0,0,0,11,8,0,2,3,0,0,1,16,13,14,16,12,0,0,1,12,13,10,5,0,0,2 +0,0,8,2,0,0,0,0,0,0,12,6,0,0,0,0,0,2,16,4,0,0,0,0,0,0,16,8,0,0,0,0,0,4,16,14,11,6,0,0,0,2,16,16,16,16,9,0,0,0,15,16,6,16,12,0,0,0,7,15,16,14,3,0,6 +0,0,0,2,15,8,0,0,0,0,0,10,16,3,0,0,0,0,6,15,4,0,0,0,0,3,15,10,4,10,8,0,0,12,16,16,16,16,10,0,0,13,12,12,15,16,6,0,0,0,0,0,14,15,1,0,0,0,0,2,16,9,0,0,4 +0,0,5,9,12,15,9,0,0,0,13,16,16,16,9,0,0,0,3,3,5,16,3,0,0,0,0,3,12,16,10,0,0,0,2,16,16,13,5,0,0,0,3,15,13,0,0,0,0,0,3,16,5,0,0,0,0,0,7,11,0,0,0,0,7 +0,0,3,14,13,2,0,0,0,1,15,16,16,3,0,0,0,2,14,15,5,0,0,0,0,0,5,15,6,0,0,0,0,0,0,5,16,7,0,0,0,0,0,0,4,15,7,0,0,0,3,8,12,16,13,0,0,0,3,12,15,14,4,0,5 +0,0,6,10,0,0,0,0,0,0,10,11,0,0,0,0,0,1,15,4,0,0,0,0,0,4,16,5,4,0,0,0,0,6,16,16,16,14,3,0,0,7,16,4,6,14,8,0,0,2,15,8,9,14,3,0,0,0,5,14,13,3,0,0,6 +0,2,8,15,15,3,0,0,0,11,16,12,15,12,0,0,0,4,7,0,10,14,0,0,0,0,0,0,8,16,0,0,0,0,0,0,7,16,2,0,0,0,0,0,1,15,8,0,0,0,1,6,11,16,7,0,0,0,8,16,10,5,0,0,3 +0,0,4,13,6,0,0,0,0,4,16,13,4,3,0,0,0,12,10,1,12,16,3,0,0,7,14,9,16,5,0,0,0,0,12,16,13,1,0,0,0,0,5,15,15,15,3,0,0,0,7,14,12,16,10,0,0,0,3,16,11,8,2,0,8 +0,0,4,10,16,12,1,0,0,3,15,14,8,16,5,0,0,4,7,0,5,15,2,0,0,0,0,0,13,8,0,0,0,0,0,3,16,10,0,0,0,0,0,0,10,16,4,0,0,0,4,9,11,15,2,0,0,0,5,13,11,1,0,0,3 +0,0,3,14,8,4,0,0,0,0,15,14,14,16,0,0,0,4,16,13,16,16,0,0,0,3,11,11,10,16,3,0,0,0,0,0,3,15,5,0,0,0,1,5,1,13,6,0,0,0,6,15,16,11,0,0,0,0,2,14,9,1,0,0,9 +0,0,12,7,0,0,0,0,0,3,16,16,3,0,0,0,0,7,11,8,6,0,0,0,0,3,14,7,8,0,0,0,0,0,0,14,3,0,0,0,0,0,4,15,0,0,0,0,0,2,15,16,16,13,9,0,0,0,12,16,16,16,7,0,2 +0,0,3,12,16,15,4,0,0,0,14,16,14,16,6,0,0,1,11,3,13,13,1,0,0,0,1,14,12,1,0,0,0,0,3,16,14,3,0,0,0,0,1,6,16,14,1,0,0,0,14,13,14,15,0,0,0,0,4,16,15,5,0,0,3 +0,0,0,7,12,0,12,5,0,0,8,14,1,7,14,0,0,1,16,5,0,14,6,0,0,5,16,12,13,16,10,0,0,3,15,15,16,13,5,0,0,0,0,4,15,1,0,0,0,0,0,9,10,0,0,0,0,0,0,11,4,0,0,0,4 +0,0,1,12,12,13,8,0,0,0,12,10,4,7,13,0,0,0,15,10,4,9,14,0,0,0,5,12,12,16,9,0,0,0,0,0,2,15,0,0,0,0,0,0,11,7,0,0,0,0,0,4,14,0,0,0,0,0,0,15,2,0,0,0,9 +0,0,2,7,12,8,0,0,0,0,11,16,16,6,0,0,0,0,6,12,0,0,0,0,0,0,13,8,4,0,0,0,0,0,14,16,16,14,1,0,0,0,3,8,9,16,2,0,0,0,0,9,10,14,0,0,0,0,0,13,13,4,0,0,5 +0,0,1,11,16,6,0,0,0,0,12,14,11,16,3,0,0,0,11,5,2,16,5,0,0,0,0,0,7,14,2,0,0,0,0,7,16,7,0,0,0,0,0,3,16,10,0,0,0,0,0,12,16,15,0,0,0,0,1,12,16,7,0,0,3 +0,0,0,2,13,8,4,0,0,0,0,11,13,8,16,2,0,0,7,16,3,14,10,0,0,6,16,8,7,16,6,0,0,11,16,16,16,16,7,0,0,2,8,9,16,9,0,0,0,0,0,2,16,6,0,0,0,0,0,1,16,2,0,0,4 +0,0,8,12,16,16,5,0,0,0,9,16,16,16,1,0,0,0,0,1,13,10,0,0,0,0,3,7,16,13,8,0,0,2,16,16,16,12,7,0,0,0,6,16,4,0,0,0,0,0,11,16,0,0,0,0,0,0,11,13,0,0,0,0,7 +0,0,1,12,16,16,4,0,0,0,10,16,13,8,1,0,0,0,10,13,0,0,0,0,0,0,13,12,4,0,0,0,0,0,14,16,16,6,0,0,0,0,6,9,15,14,0,0,0,0,0,0,9,14,0,0,0,0,4,12,15,6,0,0,5 +0,0,0,2,12,11,0,0,0,0,0,13,16,10,0,0,0,0,9,16,16,9,0,0,0,4,16,14,16,6,0,0,0,1,6,7,16,6,0,0,0,0,0,5,16,6,0,0,0,0,0,3,16,4,0,0,0,0,0,0,12,4,0,0,1 +0,0,3,15,1,0,0,0,0,0,13,13,0,0,0,0,0,2,16,1,0,0,0,0,0,7,10,0,1,3,0,0,0,8,9,4,15,15,2,0,0,3,16,6,0,9,8,0,0,0,16,14,9,15,7,0,0,0,3,13,16,9,0,0,6 +0,0,9,0,0,0,0,0,0,2,16,6,8,5,0,0,0,4,16,16,16,15,3,0,0,6,16,10,12,15,8,0,0,4,11,0,0,7,8,0,0,4,16,9,10,15,8,0,0,2,15,16,16,16,3,0,0,0,8,16,15,7,0,0,0 +0,0,0,4,12,0,0,10,0,0,3,15,5,0,11,13,0,0,11,12,0,4,16,4,0,2,16,12,3,14,10,0,0,4,16,16,16,15,1,0,0,0,10,10,16,8,0,0,0,0,0,3,15,2,0,0,0,0,0,7,11,0,0,0,4 +0,0,5,12,16,13,4,0,0,0,12,13,8,8,6,0,0,2,15,3,0,0,0,0,0,4,16,12,12,6,0,0,0,4,16,15,13,14,0,0,0,0,0,0,4,15,0,0,0,0,3,4,13,8,0,0,0,0,6,12,13,1,0,0,5 +0,0,0,0,13,5,0,0,0,0,0,8,15,2,2,0,0,0,9,15,5,13,7,0,0,5,16,6,2,16,4,0,0,11,16,15,11,16,3,0,0,3,12,13,15,15,8,0,0,0,0,0,12,9,0,0,0,0,0,0,10,9,0,0,4 +0,0,2,13,13,2,0,0,0,1,15,13,15,4,0,0,0,5,13,1,14,2,0,0,0,0,2,12,14,0,0,0,0,0,0,8,14,14,1,0,0,0,0,0,0,13,7,0,0,0,3,8,8,15,2,0,0,0,2,14,11,4,0,0,3 +0,0,0,5,9,13,14,6,0,0,4,16,8,4,10,9,0,0,6,16,16,16,15,4,0,0,1,11,12,16,6,0,0,0,0,0,2,16,2,0,0,0,0,0,10,6,0,0,0,0,0,2,15,0,0,0,0,0,0,5,14,0,0,0,9 +0,0,1,6,12,14,4,0,0,0,4,13,8,3,0,0,0,0,13,4,0,0,0,0,0,3,15,5,2,0,0,0,0,8,16,16,16,12,0,0,0,2,4,3,7,15,3,0,0,0,0,2,7,14,1,0,0,0,0,8,12,4,0,0,5 +0,0,0,15,2,0,0,0,0,0,9,13,0,0,0,0,0,0,16,6,0,0,0,0,0,4,14,2,0,0,0,0,0,4,16,6,6,7,1,0,0,4,16,15,8,14,6,0,0,0,9,15,8,15,7,0,0,0,2,14,15,8,0,0,6 +0,0,0,1,14,12,0,0,0,4,6,15,14,12,0,0,0,5,16,16,13,13,1,0,0,0,11,16,16,16,4,0,0,0,0,3,2,9,8,0,0,0,0,0,0,9,8,0,0,0,0,0,11,16,4,0,0,0,0,0,11,5,1,0,9 +0,1,8,14,13,3,0,0,0,10,13,5,10,12,0,0,0,3,0,2,14,9,0,0,0,0,0,15,8,0,0,0,0,0,0,13,13,1,0,0,0,0,0,0,7,14,4,0,0,1,6,1,0,11,10,0,0,0,10,12,16,16,7,0,3 +0,0,0,0,3,14,1,0,0,0,0,1,14,15,0,0,0,0,0,9,16,10,0,0,0,0,9,16,16,8,0,0,0,4,16,7,16,8,0,0,0,1,4,0,15,10,0,0,0,0,0,0,11,12,0,0,0,0,0,0,3,15,3,0,1 +0,0,2,8,15,11,0,0,0,2,15,14,12,16,4,0,0,2,7,0,10,15,0,0,0,0,0,3,15,3,0,0,0,0,0,13,9,0,0,0,0,0,0,4,14,5,0,0,0,2,10,8,7,16,6,0,0,0,4,9,13,15,7,0,3 +0,0,0,8,8,8,13,8,0,0,6,15,13,14,15,1,0,0,13,6,0,12,5,0,0,0,14,5,5,10,0,0,0,0,1,10,16,14,2,0,0,0,0,12,12,6,1,0,0,0,0,14,4,0,0,0,0,0,0,12,1,0,0,0,7 +0,0,13,16,13,1,0,0,0,3,16,13,16,8,0,0,0,0,9,11,16,7,0,0,0,0,0,14,14,0,0,0,0,0,0,11,15,12,1,0,0,0,0,0,1,14,10,0,0,1,11,6,4,8,16,3,0,0,13,16,16,16,16,2,3 +0,0,6,16,9,0,0,0,0,2,15,16,14,0,0,0,0,2,10,16,6,0,0,0,0,2,16,16,9,2,0,0,0,0,3,10,16,14,1,0,0,0,0,0,10,16,5,0,0,0,4,8,8,16,7,0,0,0,8,16,16,13,0,0,3 +0,1,0,6,16,8,0,0,0,11,7,13,3,13,2,0,0,11,14,4,0,8,4,0,0,9,7,0,0,5,7,0,0,6,9,0,0,5,8,0,0,0,13,4,0,11,9,0,0,0,6,14,11,16,7,0,0,0,0,5,16,10,0,0,0 +0,0,0,0,4,16,5,0,0,0,0,2,15,16,4,0,0,0,1,13,16,16,0,0,0,1,14,12,14,12,0,0,0,6,14,1,10,13,0,0,0,4,2,0,9,13,0,0,0,0,0,0,8,12,0,0,0,0,0,0,4,14,0,0,1 +0,1,13,16,10,0,0,0,0,2,15,13,16,2,0,0,0,0,5,1,14,5,0,0,0,0,0,0,14,7,0,0,0,0,0,4,16,3,0,0,0,0,0,13,13,1,0,0,0,1,14,16,13,12,11,1,0,1,15,16,14,10,8,1,2 +0,0,7,11,16,13,4,0,0,4,16,16,16,16,11,0,0,4,16,16,16,16,8,0,0,0,4,11,14,13,1,0,0,0,0,0,8,12,0,0,0,0,0,0,9,11,0,0,0,0,4,10,16,5,0,0,0,0,9,12,7,0,0,0,9 +0,0,5,12,12,0,0,0,0,3,16,12,15,4,0,0,0,4,14,0,14,6,0,0,0,1,12,11,12,0,0,0,0,0,7,16,8,1,0,0,0,0,1,5,12,13,1,0,0,0,8,4,0,16,4,0,0,0,8,15,15,11,1,0,3 +0,4,9,11,13,16,7,0,0,9,16,13,14,16,1,0,0,0,0,0,13,10,0,0,0,5,12,13,16,15,8,0,0,5,12,16,13,6,2,0,0,0,5,16,3,0,0,0,0,0,12,13,0,0,0,0,0,3,16,4,0,0,0,0,7 +0,3,15,16,7,0,0,0,0,8,15,14,12,0,0,0,0,1,3,3,14,0,0,0,0,0,0,5,15,0,0,0,0,0,0,11,10,0,0,0,0,0,5,16,6,0,1,0,0,5,16,16,13,16,15,0,0,4,16,16,14,12,9,0,2 +0,0,1,8,16,8,0,0,0,2,14,13,12,16,2,0,0,10,9,0,7,15,1,0,0,3,14,9,15,2,0,0,0,0,10,16,12,2,0,0,0,0,12,6,5,15,3,0,0,0,9,8,0,13,7,0,0,0,0,8,16,12,1,0,8 +0,0,9,13,4,0,0,0,0,4,15,9,14,1,0,0,0,2,7,0,10,6,0,0,0,0,0,0,9,5,0,0,0,0,0,3,16,2,0,0,0,0,0,14,13,0,0,0,0,0,11,16,9,8,14,0,0,0,10,16,12,12,7,1,2 +0,0,0,15,14,6,0,0,0,0,3,16,16,6,0,0,0,2,13,16,12,0,0,0,0,8,16,16,7,0,0,0,0,1,10,16,5,0,0,0,0,0,7,16,6,0,0,0,0,0,5,16,9,0,0,0,0,0,0,13,15,0,0,0,1 +0,0,0,7,12,2,0,0,0,0,5,14,9,3,0,0,0,0,16,8,0,0,0,0,0,3,13,0,0,0,0,0,0,4,9,12,14,7,0,0,0,1,11,12,8,7,11,1,0,0,4,10,2,0,11,5,0,0,0,6,13,14,14,1,6 +0,0,0,6,13,0,0,0,0,0,1,15,5,1,2,0,0,0,12,8,0,13,7,0,0,6,14,0,1,15,4,0,0,13,13,8,11,16,9,0,0,8,16,16,16,8,0,0,0,0,0,7,13,0,0,0,0,0,0,8,8,0,0,0,4 +0,0,8,14,12,9,1,0,0,0,15,5,8,8,1,0,0,0,14,1,0,0,0,0,0,0,15,4,0,0,0,0,0,5,16,16,16,9,0,0,0,1,4,0,1,15,5,0,0,0,1,4,4,14,3,0,0,0,5,16,15,3,0,0,5 +0,0,4,15,15,14,1,0,0,1,15,16,10,15,6,0,0,5,16,11,0,6,11,0,0,8,12,0,0,4,12,0,0,10,12,0,0,5,12,0,0,6,16,1,0,9,9,0,0,0,14,14,8,15,6,0,0,0,4,15,16,11,1,0,0 +0,0,1,14,16,12,1,0,0,0,14,7,3,12,4,0,0,2,14,0,1,16,1,0,0,0,12,10,16,6,0,0,0,0,11,14,15,4,0,0,0,4,14,0,5,14,2,0,0,1,15,6,1,11,7,0,0,0,3,8,14,15,1,0,8 +0,0,1,12,16,5,0,0,0,0,9,16,9,2,0,0,0,3,16,6,0,0,0,0,0,6,16,8,1,0,0,0,0,10,16,11,13,3,0,0,0,4,16,1,9,13,1,0,0,0,9,11,9,16,1,0,0,0,1,12,16,12,0,0,6 +0,0,0,5,14,0,0,0,0,0,0,13,7,2,8,0,0,0,5,13,1,11,10,0,0,2,14,4,1,16,4,0,0,12,14,8,13,16,10,0,0,15,16,16,16,12,2,0,0,3,3,5,15,0,0,0,0,0,0,6,13,0,0,0,4 +0,0,3,13,8,1,0,0,0,0,11,16,14,9,0,0,0,4,16,5,4,14,0,0,0,4,15,0,0,12,8,0,0,5,12,0,0,12,8,0,0,2,15,0,0,12,6,0,0,0,14,10,8,15,2,0,0,0,4,13,16,6,0,0,0 +0,0,0,4,15,3,0,0,0,0,0,12,8,0,10,1,0,0,8,11,0,6,14,0,0,5,15,13,14,16,15,0,0,7,15,12,10,16,3,0,0,0,0,0,9,9,0,0,0,0,0,0,15,1,0,0,0,0,0,6,12,0,0,0,4 +0,0,10,16,16,16,16,5,0,0,5,9,8,15,12,1,0,0,0,0,7,13,1,0,0,0,6,8,16,16,13,0,0,0,14,16,12,8,1,0,0,0,2,16,3,0,0,0,0,0,7,13,0,0,0,0,0,0,13,6,0,0,0,0,7 +0,0,4,11,16,12,5,0,0,0,6,7,3,11,9,0,0,0,0,0,0,12,4,0,0,0,0,3,10,10,0,0,0,0,4,16,15,2,0,0,0,0,1,7,12,15,1,0,0,0,8,7,5,15,0,0,0,0,7,12,15,6,0,0,3 +0,0,0,8,16,5,0,0,0,0,5,15,10,2,0,0,0,2,15,6,0,0,0,0,0,5,12,0,0,0,0,0,0,4,15,14,16,14,5,0,0,0,16,15,1,5,12,0,0,0,7,15,1,7,14,0,0,0,0,8,16,15,4,0,6 +0,1,11,12,12,12,4,0,0,0,15,8,8,11,7,0,0,0,12,0,0,0,0,0,0,0,13,1,0,0,0,0,0,3,16,15,11,5,0,0,0,0,4,4,10,15,2,0,0,0,12,5,5,14,3,0,0,0,11,13,12,8,0,0,5 +0,0,7,16,16,16,11,0,0,0,12,12,8,8,5,0,0,3,16,2,0,0,0,0,0,9,16,16,10,1,0,0,0,3,7,8,14,13,0,0,0,0,0,0,4,16,3,0,0,0,9,7,11,15,0,0,0,0,7,16,15,3,0,0,5 +0,0,0,3,11,16,5,0,0,0,0,11,16,16,5,0,0,3,13,16,16,14,0,0,0,3,11,12,16,12,0,0,0,0,0,9,16,9,0,0,0,0,0,12,16,10,0,0,0,0,0,9,16,11,0,0,0,0,0,5,13,8,0,0,1 +0,0,0,5,15,2,0,0,0,0,1,15,8,0,0,0,0,0,8,12,0,0,0,0,0,0,12,8,0,0,0,0,0,0,16,10,8,7,0,0,0,1,14,12,8,9,10,0,0,0,5,13,1,8,14,0,0,0,0,6,14,15,5,0,6 +0,0,10,16,16,13,0,0,0,0,13,6,11,16,3,0,0,0,0,6,16,8,0,0,0,0,11,16,12,0,0,0,0,0,6,16,16,13,0,0,0,0,0,0,7,16,7,0,0,3,15,5,10,16,3,0,0,0,14,16,15,5,0,0,3 +0,0,0,4,12,11,1,0,0,0,12,15,10,13,4,0,0,8,10,0,3,15,1,0,0,3,14,5,15,6,0,0,0,0,13,16,11,0,0,0,0,3,15,6,11,12,3,0,0,1,14,10,2,10,10,0,0,0,1,6,11,16,6,0,8 +0,0,4,10,14,3,0,0,0,6,14,8,11,15,0,0,0,8,8,0,7,14,0,0,0,3,14,12,14,2,0,0,0,1,15,15,8,0,0,0,0,3,13,0,10,10,0,0,0,0,11,3,1,15,3,0,0,0,3,12,13,14,0,0,8 +0,0,1,9,14,5,0,0,0,0,8,15,9,14,1,0,0,0,3,14,0,16,4,0,0,0,0,8,14,16,4,0,0,0,0,0,3,13,5,0,0,0,3,0,0,8,7,0,0,3,15,6,2,14,6,0,0,0,1,10,14,14,1,0,9 +0,0,0,10,16,3,0,0,0,0,5,15,5,0,0,0,0,0,12,9,0,0,0,0,0,0,15,3,0,0,0,0,0,2,15,10,8,2,0,0,0,1,13,13,10,14,0,0,0,0,5,12,2,14,6,0,0,0,0,7,16,16,2,0,6 +0,0,0,10,12,4,0,0,0,0,7,12,6,13,0,0,0,0,7,9,2,13,0,0,0,0,1,15,15,6,0,0,0,0,7,15,11,9,0,0,0,0,16,1,0,10,9,0,0,0,13,5,0,0,15,0,0,0,1,9,14,14,12,0,8 +0,3,16,15,3,0,0,0,0,2,10,12,10,0,0,0,0,0,0,7,12,0,0,0,0,0,0,8,12,0,0,0,0,0,1,15,6,0,0,0,0,0,5,15,1,0,0,0,0,2,15,12,7,4,0,0,0,2,15,16,16,16,16,3,2 +0,0,12,12,16,16,9,0,0,0,8,6,4,13,8,0,0,0,0,4,8,16,6,0,0,3,16,16,16,11,4,0,0,0,3,9,9,0,0,0,0,0,3,14,0,0,0,0,0,0,11,7,0,0,0,0,0,0,14,3,0,0,0,0,7 +0,0,0,1,15,3,0,0,0,0,1,11,11,1,0,0,0,0,10,13,0,10,0,0,0,6,16,6,6,15,3,0,0,8,16,16,16,16,9,0,0,0,2,4,15,4,0,0,0,0,0,1,16,1,0,0,0,0,0,2,15,0,0,0,4 +0,0,0,8,14,3,4,0,0,0,1,15,8,9,10,0,0,0,11,12,1,15,4,0,0,4,16,4,5,16,4,0,0,11,16,16,16,16,10,0,0,4,12,13,16,8,1,0,0,0,0,6,14,0,0,0,0,0,0,11,8,0,0,0,4 +0,0,0,11,13,1,0,0,0,0,12,14,9,2,0,0,0,4,15,3,0,0,0,0,0,7,12,12,12,6,1,0,0,7,16,13,8,12,7,0,0,4,16,6,0,7,11,0,0,0,12,10,5,16,6,0,0,0,2,10,15,10,0,0,6 +0,0,7,13,3,0,0,0,0,2,16,16,10,0,0,0,0,6,14,8,14,0,0,0,0,7,9,4,16,1,0,0,0,0,0,7,12,0,0,0,0,0,4,15,10,6,1,0,0,0,8,16,16,16,6,0,0,0,5,11,12,15,3,0,2 +0,0,9,16,9,1,0,0,0,2,16,13,16,11,0,0,0,9,13,0,10,16,4,0,0,4,15,12,13,16,6,0,0,0,5,12,13,16,7,0,0,0,0,0,0,15,8,0,0,0,9,6,11,16,2,0,0,0,9,13,11,3,0,0,9 +0,3,14,14,2,0,0,0,0,10,15,14,11,0,0,0,0,11,7,5,16,0,0,0,0,1,2,0,16,3,0,0,0,0,0,7,12,0,0,0,0,0,1,12,15,7,0,0,0,1,15,16,16,16,11,0,0,1,10,10,5,6,7,0,2 +0,1,12,16,11,4,0,0,0,4,16,10,11,12,0,0,0,0,16,6,0,0,0,0,0,2,16,16,7,0,0,0,0,0,3,9,15,6,0,0,0,0,0,0,8,13,0,0,0,0,2,4,9,15,0,0,0,0,11,16,15,6,0,0,5 +0,0,4,11,0,0,0,0,0,0,11,13,0,0,0,0,0,2,15,8,0,0,0,0,0,3,16,4,0,0,0,0,0,7,16,16,16,9,0,0,0,4,16,12,9,16,7,0,0,2,15,9,6,16,9,0,0,0,4,15,16,13,2,0,6 +0,0,0,14,6,0,0,0,0,0,4,16,6,0,0,0,0,0,10,14,1,0,0,0,0,0,14,15,3,0,0,0,0,2,16,16,16,13,1,0,0,3,16,13,6,15,11,0,0,0,10,16,5,15,13,0,0,0,1,10,16,15,7,0,6 +0,0,2,14,6,0,0,0,0,0,11,16,7,0,0,0,0,0,16,16,0,0,0,0,0,3,16,12,0,0,0,0,0,5,16,16,16,11,1,0,0,4,16,16,14,16,6,0,0,0,12,16,11,16,10,0,0,0,1,11,15,11,2,0,6 +0,0,7,14,13,13,8,0,0,0,9,12,14,16,13,0,0,0,0,0,9,16,3,0,0,0,2,10,16,15,7,0,0,1,14,16,16,13,7,0,0,0,3,16,13,0,0,0,0,0,5,16,7,0,0,0,0,0,9,13,2,0,0,0,7 +0,0,6,14,6,0,0,0,0,1,16,13,16,6,0,0,0,4,16,6,14,16,2,0,0,0,11,16,16,16,6,0,0,0,0,3,6,15,9,0,0,0,0,0,0,10,13,0,0,0,15,9,6,13,11,0,0,0,4,14,15,10,3,0,9 +0,5,12,14,16,14,4,0,0,7,16,11,7,4,2,0,0,7,16,0,0,0,0,0,0,8,16,16,13,1,0,0,0,5,12,12,16,8,0,0,0,0,0,0,9,15,0,0,0,2,8,8,15,11,0,0,0,3,14,11,6,0,0,0,5 +0,0,0,7,16,3,0,0,0,0,0,15,12,0,0,0,0,0,9,16,2,7,13,0,0,4,15,7,1,15,8,0,0,9,16,16,16,16,7,0,0,3,8,8,15,13,1,0,0,0,0,3,16,4,0,0,0,0,0,10,9,0,0,0,4 +0,0,0,7,8,0,0,0,0,0,3,16,4,0,0,0,0,0,14,10,0,6,3,0,0,4,16,9,4,16,8,0,0,3,16,16,16,16,6,0,0,0,4,6,16,12,0,0,0,0,0,3,16,6,0,0,0,0,0,7,13,0,0,0,4 +0,0,6,16,16,14,4,0,0,0,4,7,8,16,7,0,0,0,0,0,4,16,5,0,0,0,9,16,16,16,3,0,0,0,14,15,16,10,4,0,0,0,1,14,10,0,0,0,0,0,5,16,6,0,0,0,0,0,7,15,2,0,0,0,7 +0,0,6,12,12,9,0,0,0,0,11,16,16,13,0,0,0,0,9,16,16,12,0,0,0,0,14,16,16,12,0,0,0,0,12,16,16,12,0,0,0,2,16,16,16,9,0,0,0,1,15,16,15,2,0,0,0,0,7,11,3,0,0,0,1 +0,0,6,14,15,0,0,0,0,1,16,9,16,3,0,0,0,0,1,3,16,2,0,0,0,0,4,16,15,4,0,0,0,0,4,8,14,16,3,0,0,0,0,0,0,16,4,0,0,0,3,7,10,15,2,0,0,0,7,15,9,1,0,0,3 +0,0,9,16,16,16,5,0,0,0,6,8,11,16,10,0,0,0,0,0,9,16,5,0,0,0,1,8,15,15,3,0,0,0,9,16,16,16,7,0,0,0,2,15,14,0,0,0,0,0,9,16,7,0,0,0,0,0,9,16,5,0,0,0,7 +0,3,11,16,15,6,0,0,0,5,10,8,14,11,0,0,0,0,0,1,15,6,0,0,0,0,0,11,16,5,0,0,0,0,0,1,13,15,2,0,0,0,0,0,0,13,6,0,0,1,7,5,4,14,7,0,0,2,12,12,15,11,1,0,3 +0,0,1,11,15,4,0,0,0,1,13,13,14,16,0,0,0,8,15,1,10,16,4,0,0,1,15,14,16,14,2,0,0,0,6,16,15,2,0,0,0,0,8,15,14,9,0,0,0,0,11,16,10,16,3,0,0,0,2,12,13,12,1,0,8 +0,0,11,16,16,14,4,0,0,3,16,10,11,9,5,0,0,5,15,3,0,0,0,0,0,4,16,16,15,3,0,0,0,3,13,12,15,14,0,0,0,0,0,0,6,16,0,0,0,0,4,12,16,10,0,0,0,0,9,11,6,0,0,0,5 +0,0,9,14,16,16,5,0,0,0,6,16,7,11,10,0,0,0,0,15,7,9,9,0,0,0,0,12,15,15,3,0,0,0,7,16,16,6,0,0,0,3,14,2,16,2,0,0,0,5,15,5,16,4,0,0,0,1,11,16,12,0,0,0,8 +0,0,6,11,16,14,2,0,0,1,16,15,7,4,1,0,0,4,16,13,2,0,0,0,0,6,16,16,16,8,0,0,0,1,6,5,11,16,3,0,0,0,0,0,0,16,5,0,0,0,1,5,11,16,2,0,0,0,7,15,10,5,0,0,5 +0,0,5,14,7,1,0,0,0,2,15,14,16,10,0,0,0,2,16,6,12,16,0,0,0,1,11,5,10,16,0,0,0,0,0,0,16,9,0,0,0,0,3,5,16,5,0,0,0,0,13,16,15,7,5,0,0,0,5,8,10,13,13,0,2 +0,0,0,11,4,0,0,0,0,0,6,16,9,0,0,0,0,0,12,15,1,0,0,0,0,0,16,10,3,0,0,0,0,2,16,16,16,10,1,0,0,0,15,14,4,11,11,0,0,0,9,15,2,6,16,0,0,0,0,9,16,16,12,2,6 +0,0,0,8,15,0,0,0,0,0,3,16,10,0,6,0,0,1,13,14,2,10,15,0,0,7,16,16,13,15,13,0,0,3,10,14,16,16,10,0,0,0,0,0,13,15,0,0,0,0,0,3,16,8,0,0,0,0,0,9,15,1,0,0,4 +0,0,5,12,13,11,3,0,0,2,16,12,8,6,3,0,0,4,16,9,3,0,0,0,0,7,16,16,16,7,0,0,0,3,8,4,11,14,0,0,0,0,0,0,4,16,2,0,0,0,2,6,12,16,2,0,0,0,8,12,8,3,0,0,5 +0,0,9,16,7,0,0,0,0,0,13,15,14,7,0,0,0,0,15,10,7,15,0,0,0,0,5,4,5,13,0,0,0,0,0,0,8,12,0,0,0,0,1,4,13,10,0,0,0,0,12,16,16,13,8,1,0,0,4,12,9,9,12,5,2 +0,0,10,15,14,4,0,0,0,1,15,13,13,15,2,0,0,0,0,4,14,14,0,0,0,0,6,16,16,6,0,0,0,0,3,10,16,12,0,0,0,0,0,0,9,16,8,0,0,1,15,8,13,16,4,0,0,0,10,14,11,6,1,0,3 +0,0,4,12,15,16,16,2,0,0,3,6,4,13,15,1,0,0,0,0,2,15,6,0,0,0,0,1,12,13,0,0,0,4,16,16,16,12,4,0,0,1,3,12,14,0,0,0,0,0,3,16,6,0,0,0,0,0,5,15,2,0,0,0,7 +0,0,0,5,14,1,0,0,0,0,1,14,7,0,3,0,0,0,9,14,1,6,15,0,0,3,16,12,4,12,8,0,0,0,7,10,14,16,4,0,0,0,0,0,10,13,0,0,0,0,0,1,16,3,0,0,0,0,0,6,12,0,0,0,4 +0,1,15,15,2,0,0,0,0,9,15,12,10,0,0,0,0,8,10,8,12,0,0,0,0,1,1,5,15,0,0,0,0,0,0,6,14,0,0,0,0,0,0,10,14,1,0,0,0,0,11,16,16,16,10,0,0,0,14,13,8,10,7,0,2 +0,0,3,14,12,15,11,0,0,0,2,8,8,15,14,0,0,0,0,0,0,15,5,0,0,0,2,4,9,16,1,0,0,4,15,16,16,16,6,0,0,2,4,11,13,1,0,0,0,0,1,16,7,0,0,0,0,0,4,14,2,0,0,0,7 +0,1,11,16,14,7,0,0,0,6,16,10,8,4,0,0,0,0,16,1,0,0,0,0,0,1,16,9,5,0,0,0,0,0,12,12,15,2,0,0,0,0,0,0,9,8,0,0,0,0,9,8,14,9,0,0,0,0,9,15,9,1,0,0,5 +0,1,10,14,2,0,0,0,0,4,16,14,6,0,0,0,0,5,9,8,8,0,0,0,0,0,1,13,7,0,0,0,0,0,3,16,1,0,0,0,0,0,6,14,0,2,2,0,0,0,16,16,16,16,12,0,0,0,15,13,10,9,8,0,2 +0,0,11,13,8,0,0,0,0,5,16,11,16,16,6,0,0,7,16,1,9,15,3,0,0,1,13,14,13,14,0,0,0,0,4,16,16,5,0,0,0,0,14,10,13,10,0,0,0,4,16,6,10,12,0,0,0,1,11,16,15,5,0,0,8 +0,0,0,13,9,0,0,0,0,0,8,16,13,0,0,0,0,4,16,16,15,1,0,0,0,0,3,10,16,4,0,0,0,0,0,14,16,3,0,0,0,0,0,15,16,3,0,0,0,0,5,16,16,10,0,0,0,0,0,10,12,11,1,0,1 +0,0,0,6,9,0,0,0,0,0,0,12,12,8,0,0,0,0,0,16,7,12,0,0,0,0,6,11,7,10,0,0,0,1,14,4,13,13,5,0,0,5,16,16,16,13,5,0,0,0,4,6,16,9,0,0,0,0,0,8,16,7,0,0,4 +0,0,9,14,7,1,0,0,0,6,16,10,15,5,0,0,0,7,15,0,12,8,0,0,0,0,0,3,16,4,0,0,0,0,0,7,14,0,0,0,0,0,2,16,5,0,1,0,0,0,8,15,13,16,15,2,0,0,8,14,11,6,10,2,2 +0,0,3,12,11,1,0,0,0,0,12,15,11,10,0,0,0,0,12,7,2,14,0,0,0,0,0,0,3,16,0,0,0,0,0,0,13,11,0,0,0,0,0,10,15,1,0,0,0,0,5,16,12,9,8,1,0,0,3,11,15,12,16,4,2 +0,0,2,11,12,4,0,0,0,0,11,15,14,11,0,0,0,0,3,9,12,11,0,0,0,0,0,10,16,14,2,0,0,0,0,2,5,13,7,0,0,0,6,0,0,9,11,0,0,0,14,13,8,14,9,0,0,0,5,15,14,11,1,0,3 +0,0,0,6,10,0,0,0,0,0,0,12,5,0,0,0,0,0,2,14,2,5,0,0,0,0,12,7,4,14,0,0,0,3,16,4,10,14,5,0,0,7,16,16,16,12,5,0,0,0,0,2,16,1,0,0,0,0,0,5,14,1,0,0,4 +0,0,0,11,7,0,0,0,0,0,0,16,8,2,0,0,0,0,6,16,9,15,0,0,0,0,11,10,11,15,0,0,0,4,16,12,16,16,9,0,0,11,16,16,16,13,6,0,0,1,4,11,16,0,0,0,0,0,0,11,13,0,0,0,4 +0,0,2,14,13,5,0,0,0,0,8,16,16,10,0,0,0,6,16,16,16,7,0,0,0,9,16,16,16,3,0,0,0,0,7,16,16,2,0,0,0,0,8,16,16,6,0,0,0,0,10,16,16,11,0,0,0,0,3,13,14,7,0,0,1 +0,0,3,15,12,3,0,0,0,0,9,16,16,5,0,0,0,3,14,16,16,5,0,0,0,7,16,16,16,3,0,0,0,0,3,16,16,3,0,0,0,0,0,16,16,4,0,0,0,0,7,15,16,6,0,0,0,0,6,16,14,3,0,0,1 +0,0,5,15,8,2,0,0,0,0,15,16,14,12,0,0,0,5,13,2,0,14,3,0,0,5,11,0,0,8,8,0,0,8,8,0,0,12,5,0,0,3,14,0,0,15,4,0,0,0,13,12,10,14,0,0,0,0,4,14,15,4,0,0,0 +0,0,0,9,16,4,0,0,0,0,6,16,16,4,0,0,0,0,11,15,1,0,0,0,0,0,14,13,0,0,0,0,0,0,16,16,16,9,0,0,0,1,16,12,8,14,5,0,0,0,11,15,9,15,9,0,0,0,0,10,13,15,3,0,6 +0,0,9,13,7,0,0,0,0,2,16,12,15,12,2,0,0,8,11,0,4,16,4,0,0,8,13,1,8,16,7,0,0,1,15,16,13,15,8,0,0,0,0,2,0,9,12,0,0,0,6,9,9,15,9,0,0,0,6,16,14,8,1,0,9 +0,2,11,14,12,9,0,0,0,8,13,6,9,14,4,0,0,5,16,5,1,14,6,0,0,0,8,16,16,13,1,0,0,0,3,16,16,10,0,0,0,1,14,9,7,13,0,0,0,1,16,5,7,16,1,0,0,0,14,16,16,9,0,0,8 +0,0,6,12,14,2,0,0,0,1,16,14,13,11,0,0,0,0,5,3,9,13,0,0,0,0,3,9,16,8,0,0,0,0,4,12,12,15,7,0,0,0,0,0,0,9,14,0,0,0,4,8,6,12,14,1,0,0,6,16,16,15,4,0,3 +0,0,4,12,10,1,0,0,0,1,15,9,14,4,0,0,0,0,7,3,13,4,0,0,0,0,0,16,16,6,0,0,0,0,0,2,5,14,4,0,0,0,3,0,0,8,12,0,0,0,15,8,5,15,9,0,0,0,3,13,16,10,0,0,3 +0,0,10,15,11,2,0,0,0,2,16,7,14,10,0,0,0,3,16,1,9,16,1,0,0,0,11,14,15,16,5,0,0,0,1,4,5,10,7,0,0,0,0,0,0,11,9,0,0,0,9,8,9,15,6,0,0,0,7,15,12,6,0,0,9 +0,0,5,15,4,0,0,0,0,0,15,14,15,0,0,0,0,2,15,3,14,2,0,0,0,0,3,0,14,4,0,0,0,0,0,2,15,0,0,0,0,0,0,10,10,0,0,0,0,0,6,16,8,10,14,0,0,0,7,12,12,12,12,1,2 +0,0,0,11,4,0,0,0,0,0,0,14,5,1,0,0,0,0,4,13,8,8,0,0,0,0,10,5,10,6,0,0,0,3,16,5,14,12,3,0,0,8,16,16,15,14,6,0,0,0,4,8,8,0,0,0,0,0,0,8,9,0,0,0,4 +0,0,5,11,15,5,0,0,0,0,14,13,9,6,0,0,0,1,15,9,1,0,0,0,0,4,16,16,14,2,0,0,0,0,6,3,6,10,0,0,0,0,0,0,1,14,0,0,0,0,5,10,10,14,0,0,0,0,5,15,16,9,0,0,5 +0,0,0,6,8,0,0,0,0,0,0,15,3,0,0,0,0,0,2,14,2,11,0,0,0,0,9,8,5,10,0,0,0,0,14,2,8,7,2,0,0,4,16,9,14,16,8,0,0,2,8,12,16,5,2,0,0,0,0,6,13,2,0,0,4 +0,0,2,8,14,2,0,0,0,0,9,16,10,1,0,0,0,1,16,9,1,0,0,0,0,2,16,16,14,3,0,0,0,0,15,10,7,14,0,0,0,0,14,1,0,13,4,0,0,0,7,10,5,15,4,0,0,0,0,9,16,12,0,0,6 +0,0,0,2,9,0,0,0,0,0,0,5,10,0,0,0,0,0,0,12,4,8,0,0,0,0,4,12,6,13,0,0,0,0,10,8,8,10,0,0,0,7,16,13,16,15,2,0,0,6,10,11,16,8,0,0,0,0,0,4,13,0,0,0,4 +0,0,11,10,7,4,0,0,0,2,15,8,16,16,2,0,0,9,11,0,8,16,1,0,0,5,16,9,15,16,6,0,0,0,5,10,4,12,8,0,0,0,0,0,0,10,9,0,0,0,6,7,10,15,5,0,0,1,12,15,12,3,0,0,9 +0,0,8,14,8,0,0,0,0,6,15,12,15,4,0,0,0,6,8,0,11,8,0,0,0,0,0,14,16,8,0,0,0,0,1,11,10,15,6,0,0,0,0,0,0,8,12,0,0,0,7,8,6,13,14,0,0,0,12,14,16,13,1,0,3 +0,0,2,12,14,1,0,0,0,0,13,16,12,12,0,0,0,1,16,5,0,15,4,0,0,3,16,3,0,11,7,0,0,6,12,0,0,12,5,0,0,4,16,3,2,15,7,0,0,0,15,12,14,13,1,0,0,0,4,15,13,2,0,0,0 +0,0,0,5,15,0,0,0,0,0,1,15,11,0,0,0,0,0,3,16,16,7,0,0,0,0,10,15,13,12,0,0,0,1,15,14,15,16,9,0,0,8,16,16,16,14,6,0,0,2,10,10,16,7,0,0,0,0,0,6,15,2,0,0,4 +0,0,3,14,12,1,0,0,0,0,14,15,13,12,0,0,0,0,16,2,0,14,2,0,0,1,14,0,0,10,6,0,0,2,15,0,0,13,5,0,0,3,16,1,1,15,6,0,0,0,13,10,13,15,1,0,0,0,2,12,14,6,0,0,0 +0,8,16,16,16,16,9,0,0,5,12,12,14,16,9,0,0,0,0,2,15,13,0,0,0,0,0,9,16,5,0,0,0,0,2,16,13,0,0,0,0,0,11,16,4,0,0,0,0,7,16,15,0,0,0,0,0,6,16,11,0,0,0,0,7 +0,0,0,11,15,4,0,0,0,3,7,16,16,8,0,0,0,12,16,16,16,5,0,0,0,3,8,13,16,5,0,0,0,0,0,8,16,10,0,0,0,0,0,8,16,14,1,0,0,0,0,10,16,16,2,0,0,0,0,6,14,12,5,0,1 +0,0,9,12,14,7,0,0,0,0,12,14,9,8,0,0,0,0,12,8,1,0,0,0,0,0,15,16,14,1,0,0,0,0,6,8,10,10,0,0,0,0,0,0,8,11,0,0,0,0,7,13,16,8,0,0,0,0,8,15,8,0,0,0,5 +0,0,5,16,14,3,0,0,0,0,2,14,16,10,0,0,0,0,0,12,16,8,0,0,0,0,0,14,16,6,0,0,0,0,1,16,16,3,0,0,0,0,3,15,16,0,0,0,0,0,0,15,16,9,0,0,0,0,0,13,16,11,0,0,1 +0,2,13,10,0,0,0,0,0,8,15,14,7,0,0,0,0,8,5,4,12,0,0,0,0,2,3,2,15,0,0,0,0,0,0,4,9,0,0,0,0,0,0,11,9,0,0,0,0,0,9,16,14,12,10,0,0,0,16,13,12,14,11,0,2 +0,0,13,14,2,0,0,0,0,5,16,15,10,0,0,0,0,4,8,0,16,0,0,0,0,0,2,3,12,0,0,0,0,0,0,5,11,0,0,0,0,0,2,13,6,0,0,0,0,0,13,16,14,15,14,0,0,0,8,8,8,12,15,0,2 +0,0,0,0,13,8,0,0,0,0,0,2,16,6,0,0,0,0,0,7,16,7,3,0,0,0,1,13,12,15,7,0,0,3,12,14,5,16,3,0,2,15,16,16,16,16,1,0,5,12,13,16,16,15,0,0,0,0,0,0,15,6,0,0,4 +0,0,0,1,12,1,0,0,0,0,0,4,15,0,0,0,0,0,0,6,11,8,2,0,0,0,1,13,4,16,1,0,0,4,12,10,9,14,0,0,0,15,16,16,16,12,0,0,1,8,7,7,15,9,0,0,0,0,0,0,16,5,0,0,4 +0,0,4,16,15,7,0,0,0,0,6,16,10,13,2,0,0,0,2,15,12,15,6,0,0,0,3,14,15,13,2,0,0,2,15,15,15,9,0,0,0,6,16,2,7,13,0,0,0,3,16,11,8,16,4,0,0,0,4,12,16,13,1,0,8 +0,0,10,16,10,3,0,0,0,1,16,9,13,12,0,0,0,1,13,2,7,16,3,0,0,0,12,16,16,16,8,0,0,0,1,4,2,8,8,0,0,0,0,0,0,7,9,0,0,1,14,9,8,14,6,0,0,0,8,13,13,7,0,0,9 +0,2,12,13,1,0,0,0,0,10,15,14,11,0,0,0,0,12,9,5,12,0,0,0,0,4,5,4,16,0,0,0,0,0,0,9,9,0,0,0,0,0,1,13,8,0,1,0,0,1,16,16,14,14,11,0,0,2,13,12,12,12,6,0,2 +0,0,1,14,4,0,0,0,0,0,7,14,2,0,0,0,0,0,10,14,0,0,0,0,0,0,13,9,0,0,0,0,0,0,14,10,8,1,0,0,0,0,14,16,16,14,4,0,0,0,12,16,9,14,15,0,0,0,1,11,15,13,11,2,6 +0,0,15,16,12,4,0,0,0,0,11,16,11,16,4,0,0,0,4,16,11,16,6,0,0,0,3,16,16,14,1,0,0,0,12,14,16,5,0,0,0,3,16,1,14,12,0,0,0,6,15,4,11,13,0,0,0,1,10,16,11,2,0,0,8 +0,0,0,3,14,5,0,0,0,0,0,6,16,13,1,0,0,0,0,8,16,13,1,0,0,0,0,9,16,11,0,0,0,5,14,16,16,12,0,0,0,0,2,9,16,16,4,0,0,0,0,5,16,16,8,0,0,0,0,3,13,14,5,0,1 +0,0,4,15,0,0,0,0,0,0,13,5,0,0,0,0,0,0,16,0,0,0,0,0,0,4,13,0,0,0,0,0,0,7,15,8,9,3,0,0,0,6,16,12,13,16,4,0,0,0,15,8,2,14,7,0,0,0,4,15,13,6,0,0,6 +0,0,3,12,16,8,0,0,0,0,12,12,15,12,0,0,0,0,0,0,7,12,0,0,0,0,0,0,13,7,0,0,0,0,4,15,16,9,1,0,0,0,5,15,16,16,3,0,0,0,0,14,14,5,0,0,0,0,4,15,3,0,0,0,7 +0,0,0,14,5,0,0,0,0,0,9,13,1,0,0,0,0,0,15,6,0,0,0,0,0,3,16,3,0,0,0,0,0,7,12,8,16,8,0,0,0,4,16,15,9,15,4,0,0,0,12,12,4,10,12,0,0,0,1,12,15,16,6,0,6 +0,0,14,15,16,7,0,0,0,4,15,8,12,14,0,0,0,0,2,4,15,9,0,0,0,0,0,8,16,8,0,0,0,0,0,1,11,16,3,0,0,0,0,0,0,10,10,0,0,1,11,11,8,14,11,0,0,1,10,12,14,13,4,0,3 +0,0,6,11,2,0,0,0,0,1,16,12,14,3,0,0,0,0,13,3,11,15,0,0,0,0,6,16,15,16,5,0,0,0,0,4,8,10,10,0,0,0,0,0,0,1,15,0,0,0,1,1,2,7,15,5,0,0,4,12,16,16,13,2,9 +0,0,7,11,12,11,3,0,0,0,15,16,7,10,11,0,0,0,10,12,5,13,9,0,0,0,3,16,16,10,1,0,0,0,8,13,13,3,0,0,0,0,15,0,11,5,0,0,0,1,13,0,10,9,0,0,0,0,10,16,12,1,0,0,8 +0,0,0,0,13,7,0,0,0,0,0,0,14,7,0,0,0,0,0,3,15,2,0,0,0,0,1,13,7,11,2,0,0,5,14,16,10,16,2,0,0,15,16,16,16,15,1,0,0,0,0,0,10,12,0,0,0,0,0,0,15,7,0,0,4 +0,1,9,13,11,4,0,0,0,2,16,12,12,15,0,0,0,0,14,13,13,11,0,0,0,0,12,16,13,0,0,0,0,0,14,16,11,0,0,0,0,6,13,7,16,4,0,0,0,7,14,1,13,15,1,0,0,1,13,15,12,9,0,0,8 +0,0,5,16,15,6,0,0,0,0,9,15,13,10,0,0,0,0,11,15,11,4,0,0,0,0,2,12,13,16,2,0,0,0,0,0,0,7,10,0,0,0,0,0,0,4,13,0,0,0,13,11,8,14,11,0,0,0,4,11,15,15,4,0,5 +0,1,13,12,4,0,0,0,0,1,16,12,16,6,0,0,0,0,16,7,14,10,0,0,0,0,9,14,16,13,0,0,0,0,0,7,9,15,1,0,0,0,0,0,0,10,8,0,0,0,4,5,4,10,15,0,0,0,6,13,16,14,6,0,9 +0,0,11,15,13,1,0,0,0,5,15,9,15,12,0,0,0,8,12,0,12,16,0,0,0,5,16,9,15,16,0,0,0,0,4,10,13,16,2,0,0,0,0,0,1,16,6,0,0,0,1,5,8,16,7,0,0,0,12,12,10,7,1,0,9 +0,0,9,15,15,3,0,0,0,0,11,9,16,10,0,0,0,0,0,0,14,10,0,0,0,0,2,5,16,6,0,0,0,1,15,16,16,13,5,0,0,1,10,16,16,15,5,0,0,0,5,16,11,2,0,0,0,0,9,16,5,0,0,0,7 +0,0,6,13,14,6,0,0,0,0,16,13,6,16,3,0,0,0,13,10,5,16,2,0,0,0,4,16,16,12,1,0,0,0,12,16,13,0,0,0,0,2,16,7,14,9,0,0,0,3,16,5,9,14,0,0,0,0,8,15,16,8,0,0,8 +0,0,3,10,14,15,3,0,0,0,15,16,14,11,1,0,0,3,16,11,8,2,0,0,0,3,16,16,16,13,0,0,0,0,0,0,0,15,7,0,0,0,1,0,0,10,7,0,0,0,14,13,9,16,4,0,0,0,4,13,15,7,0,0,5 +0,0,11,16,6,0,0,0,0,9,15,14,16,0,0,0,0,10,12,3,16,2,0,0,0,5,6,0,15,2,0,0,0,0,0,7,11,0,0,0,0,0,4,16,8,4,1,0,0,0,11,16,16,16,9,0,0,0,8,12,10,13,7,0,2 +0,1,10,13,15,11,1,0,0,4,16,12,8,7,0,0,0,5,16,4,5,1,0,0,0,3,16,16,15,15,2,0,0,0,0,1,5,14,5,0,0,0,0,0,0,15,5,0,0,7,11,6,6,16,4,0,0,2,10,14,14,8,0,0,5 +0,0,9,16,10,0,0,0,0,0,16,13,16,5,0,0,0,3,16,4,7,14,0,0,0,4,15,3,0,12,7,0,0,7,12,0,0,10,8,0,0,5,13,0,4,15,3,0,0,2,16,13,16,9,0,0,0,0,8,13,9,1,0,0,0 +0,0,7,16,12,1,0,0,0,0,16,11,13,11,0,0,0,3,16,2,4,14,0,0,0,7,13,0,0,13,1,0,0,4,15,0,0,12,6,0,0,2,16,4,0,10,7,0,0,1,15,10,8,14,2,0,0,0,5,16,15,5,0,0,0 +0,0,12,16,13,11,3,0,0,4,16,6,12,16,5,0,0,0,15,11,14,16,6,0,0,0,6,8,8,13,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,9,8,0,0,6,13,5,6,16,3,0,0,0,9,13,12,6,0,0,9 +0,0,3,16,6,0,0,0,0,0,10,14,4,0,0,0,0,0,15,10,0,0,0,0,0,2,16,4,0,0,0,0,0,5,16,15,14,6,0,0,0,3,16,16,12,16,4,0,0,0,15,14,6,13,12,0,0,0,3,10,13,10,4,0,6 +0,0,2,11,16,15,5,0,0,0,13,14,11,16,6,0,0,0,2,0,1,15,1,0,0,0,0,2,10,11,0,0,0,0,5,16,16,16,6,0,0,0,4,12,15,9,2,0,0,0,1,14,9,0,0,0,0,0,3,15,4,0,0,0,7 +0,1,11,14,12,3,0,0,0,7,16,13,16,15,0,0,0,8,16,5,14,16,2,0,0,6,16,10,15,16,6,0,0,0,5,8,12,16,6,0,0,0,0,0,2,16,11,0,0,0,5,8,7,16,9,0,0,0,8,16,15,10,1,0,9 +0,0,5,13,9,7,0,0,0,0,14,16,16,16,6,0,0,2,14,7,6,10,10,0,0,5,11,0,0,8,8,0,0,4,12,0,0,8,8,0,0,4,15,1,0,9,7,0,0,0,14,9,6,16,2,0,0,0,5,15,16,7,0,0,0 +0,0,1,12,0,0,0,0,0,0,4,14,0,0,0,0,0,0,11,9,0,0,0,0,0,0,13,10,2,0,0,0,0,4,16,16,16,13,1,0,0,1,16,10,4,13,11,0,0,0,8,13,2,11,11,0,0,0,0,11,16,14,3,0,6 +0,0,2,13,1,0,0,0,0,0,7,16,6,0,0,0,0,0,12,16,3,0,0,0,0,0,9,16,5,0,0,0,0,0,15,16,16,15,5,0,0,0,13,16,10,14,15,0,0,0,10,14,9,16,14,0,0,0,2,11,15,15,4,0,6 +0,0,7,14,12,5,0,0,0,0,10,16,16,12,0,0,0,0,10,16,16,8,0,0,0,0,10,16,16,6,0,0,0,0,9,16,16,3,0,0,0,0,12,16,16,2,0,0,0,0,12,16,16,11,0,0,0,0,14,16,13,8,0,0,1 +0,0,2,9,15,12,0,0,0,1,13,13,10,16,1,0,0,0,10,2,8,16,0,0,0,0,0,4,16,14,1,0,0,0,0,0,4,15,4,0,0,0,0,0,0,15,5,0,0,0,0,12,15,16,1,0,0,0,0,9,13,7,0,0,3 +0,0,0,1,12,12,0,0,0,0,0,2,16,16,2,0,0,0,0,4,16,16,0,0,0,6,16,16,16,13,0,0,0,1,12,14,16,12,0,0,0,0,0,0,16,15,0,0,0,0,0,0,15,16,2,0,0,0,0,0,8,13,5,0,1 +0,0,9,11,0,0,0,0,0,7,16,16,8,0,0,0,0,8,11,7,11,0,0,0,0,2,4,8,11,0,0,0,0,0,1,14,3,0,0,0,0,0,9,12,0,0,0,0,0,0,14,16,15,14,7,0,0,0,8,12,12,15,10,0,2 +0,0,6,15,2,0,0,0,0,0,14,11,0,0,0,0,0,0,16,8,0,0,0,0,0,4,16,4,0,0,0,0,0,6,16,11,8,3,0,0,0,7,16,16,14,15,3,0,0,0,16,13,8,16,7,0,0,0,7,16,16,10,1,0,6 +0,0,15,16,16,14,0,0,0,0,3,4,13,13,0,0,0,0,0,0,14,10,0,0,0,0,5,13,16,5,0,0,0,0,10,16,16,16,8,0,0,0,2,16,7,7,1,0,0,0,8,16,0,0,0,0,0,0,14,11,0,0,0,0,7 +0,0,0,1,15,11,0,0,0,0,0,6,16,6,0,0,0,0,0,13,14,1,0,0,0,0,8,15,5,9,4,0,0,7,16,8,6,16,7,0,0,13,16,16,16,16,5,0,0,0,0,7,15,15,0,0,0,0,0,1,16,10,0,0,4 +0,0,6,11,0,0,0,0,0,0,8,16,1,0,0,0,0,0,11,16,1,0,0,0,0,0,14,13,2,0,0,0,0,0,13,16,16,13,3,0,0,0,15,14,8,14,12,0,0,0,14,11,7,15,10,0,0,0,4,13,16,10,2,0,6 +0,0,6,15,11,8,3,0,0,2,16,16,16,16,11,0,0,0,15,11,7,16,8,0,0,0,8,16,15,4,0,0,0,0,5,16,7,0,0,0,0,0,15,15,13,0,0,0,0,1,16,11,16,2,0,0,0,0,12,16,12,0,0,0,8 +0,0,13,12,10,12,8,0,0,2,16,16,16,14,5,0,0,3,16,5,2,0,0,0,0,7,16,10,7,0,0,0,0,5,12,12,16,15,1,0,0,0,0,0,7,16,4,0,0,0,4,6,7,15,3,0,0,0,10,16,16,8,0,0,5 +0,0,8,16,14,4,0,0,0,5,16,11,12,13,1,0,0,8,16,0,8,16,0,0,0,6,16,7,8,16,5,0,0,0,8,12,12,16,8,0,0,0,0,0,0,12,9,0,0,0,5,10,1,15,5,0,0,0,10,16,14,12,2,0,9 +0,0,0,7,16,0,0,0,0,0,0,16,11,0,0,0,0,0,6,16,6,0,0,0,0,0,14,13,0,11,7,0,0,11,16,2,8,16,1,0,5,16,16,16,16,14,0,0,0,4,9,14,16,7,0,0,0,0,0,9,15,1,0,0,4 +0,0,2,14,9,2,0,0,0,0,10,16,16,13,0,0,0,0,12,6,3,14,4,0,0,1,13,2,0,6,8,0,0,7,14,0,0,7,7,0,0,2,16,2,0,12,4,0,0,0,11,12,12,15,0,0,0,0,2,14,14,2,0,0,0 +0,1,16,16,15,3,0,0,0,0,7,6,16,10,0,0,0,0,0,1,16,8,0,0,0,0,6,15,15,2,0,0,0,0,7,16,16,11,3,0,0,0,5,16,14,16,7,0,0,0,10,12,1,4,1,0,0,0,16,6,0,0,0,0,7 +0,0,0,2,12,1,0,0,0,0,0,10,13,0,0,0,0,0,0,15,4,0,0,0,0,0,8,12,0,7,1,0,0,5,16,2,7,16,4,0,0,15,16,13,16,11,0,0,1,6,8,9,16,7,0,0,0,0,0,3,15,1,0,0,4 +0,0,14,16,15,10,0,0,0,0,8,12,14,16,0,0,0,0,0,0,12,12,0,0,0,0,7,12,16,9,0,0,0,0,16,16,16,16,7,0,0,0,2,16,12,10,3,0,0,0,8,16,1,0,0,0,0,1,16,11,0,0,0,0,7 +0,0,0,4,8,0,0,0,0,0,0,11,8,0,0,0,0,0,0,13,6,0,0,0,0,0,2,15,2,3,0,0,0,0,11,10,4,16,2,0,0,11,14,0,9,16,2,0,0,9,15,12,15,16,0,0,0,0,2,6,15,6,0,0,4 +0,0,11,16,16,9,0,0,0,0,10,8,12,11,0,0,0,0,0,0,12,6,0,0,0,0,8,13,16,6,0,0,0,0,7,14,14,13,6,0,0,0,0,13,3,0,0,0,0,0,6,15,0,0,0,0,0,0,11,10,0,0,0,0,7 +0,0,5,12,11,4,0,0,0,3,16,16,16,16,1,0,0,9,13,1,14,16,1,0,0,4,16,4,13,16,3,0,0,0,12,16,16,16,4,0,0,0,0,0,1,13,7,0,0,0,3,8,6,14,8,0,0,0,5,16,16,14,3,0,9 +0,0,4,12,14,12,3,0,0,0,15,14,10,16,3,0,0,2,16,8,8,5,0,0,0,7,16,16,16,15,2,0,0,3,7,2,2,14,5,0,0,0,0,0,2,15,3,0,0,0,3,11,14,13,0,0,0,0,8,15,11,4,0,0,5 +0,1,13,16,16,15,4,0,0,0,7,8,10,16,4,0,0,0,0,0,9,16,0,0,0,0,8,12,16,16,7,0,0,0,12,15,16,14,10,0,0,0,0,14,8,0,0,0,0,0,8,14,2,0,0,0,0,2,16,9,0,0,0,0,7 +0,1,9,15,12,5,0,0,0,10,16,14,16,16,1,0,0,2,14,12,15,14,0,0,0,0,7,16,15,5,0,0,0,0,7,16,12,0,0,0,0,0,14,16,13,0,0,0,0,0,16,15,16,4,0,0,0,0,12,16,13,2,0,0,8 +0,0,5,16,16,7,0,0,0,1,9,16,16,8,0,0,0,0,4,16,16,12,0,0,0,0,8,16,16,8,0,0,0,0,5,16,16,12,0,0,0,0,7,16,16,13,0,0,0,0,8,16,16,16,4,0,0,0,7,13,15,10,2,0,1 +0,1,12,16,16,16,6,0,0,0,7,8,6,16,13,0,0,0,0,0,3,16,7,0,0,0,3,7,11,14,1,0,0,0,10,16,16,15,3,0,0,0,1,15,15,15,3,0,0,0,7,16,6,0,0,0,0,0,15,13,0,0,0,0,7 +0,0,9,11,4,1,0,0,0,1,16,16,16,1,0,0,0,0,16,16,16,0,0,0,0,1,15,16,16,4,0,0,0,0,14,16,16,4,0,0,0,1,16,16,16,7,0,0,0,0,16,16,16,8,0,0,0,0,6,12,12,7,2,0,1 +0,0,7,16,16,16,8,0,0,0,9,7,4,14,11,0,0,0,0,2,5,16,3,0,0,0,1,15,16,16,5,0,0,0,0,13,16,15,9,0,0,0,0,10,10,3,1,0,0,0,3,16,3,0,0,0,0,0,9,12,0,0,0,0,7 +0,0,14,3,0,0,0,0,0,6,16,2,0,0,0,0,0,8,16,0,0,0,0,0,0,8,16,4,3,0,0,0,0,8,16,16,16,14,1,0,0,8,16,5,5,16,8,0,0,4,16,2,7,16,2,0,0,0,12,16,15,7,0,0,6 +0,0,14,11,1,0,0,0,0,11,14,15,8,0,0,0,0,6,6,1,16,0,0,0,0,0,0,2,15,0,0,0,0,0,0,5,15,0,0,0,0,0,0,8,11,0,0,0,0,0,8,16,13,8,7,0,0,0,11,16,15,13,15,1,2 +0,0,6,12,13,5,0,0,0,0,14,9,8,16,2,0,0,0,5,1,6,15,3,0,0,0,0,3,16,8,0,0,0,0,2,14,8,0,0,0,0,0,11,13,0,0,0,0,0,1,16,5,0,0,0,0,0,0,9,15,13,10,8,0,2 +0,0,0,13,14,0,0,0,0,0,5,16,7,0,0,0,0,0,8,13,0,0,0,0,0,0,12,11,0,0,0,0,0,1,16,16,14,2,0,0,0,0,10,11,10,14,0,0,0,0,7,13,9,15,0,0,0,0,1,11,15,8,0,0,6 +0,4,12,13,16,16,4,0,0,12,16,16,11,8,5,0,0,16,13,5,0,0,0,0,0,8,14,0,0,0,0,0,0,1,15,6,0,0,0,0,0,0,7,15,0,0,0,0,0,0,8,16,0,0,0,0,0,3,16,12,0,0,0,0,5 +0,0,0,9,12,0,0,0,0,0,4,16,6,0,0,0,0,3,15,10,0,10,7,0,0,10,14,0,6,16,4,0,0,10,15,12,14,13,0,0,0,0,5,9,16,7,0,0,0,0,0,5,16,2,0,0,0,0,0,9,12,0,0,0,4 +0,0,0,16,14,1,0,0,0,0,7,16,15,2,0,0,0,9,16,16,11,0,0,0,1,15,15,16,10,0,0,0,0,2,3,16,9,0,0,0,0,0,0,16,9,0,0,0,0,0,2,16,6,0,0,0,0,0,0,16,8,0,0,0,1 +0,1,11,16,16,4,0,0,0,8,12,4,14,8,0,0,0,5,2,0,12,8,0,0,0,0,1,7,16,11,1,0,0,0,10,16,16,16,11,0,0,0,0,14,8,0,1,0,0,0,6,15,1,0,0,0,0,1,15,4,0,0,0,0,7 +0,0,7,13,16,11,4,0,0,1,16,5,2,13,12,0,0,4,14,0,4,15,4,0,0,2,15,13,16,8,0,0,0,0,1,8,15,1,0,0,0,0,1,13,5,0,0,0,0,0,9,10,0,0,0,0,0,0,12,6,0,0,0,0,9 +0,0,0,4,16,9,0,0,0,0,0,11,16,9,0,0,0,0,6,16,16,3,0,0,0,7,15,16,16,2,0,0,0,9,16,13,15,0,0,0,0,0,0,10,13,0,0,0,0,0,0,10,15,0,0,0,0,0,0,6,16,6,0,0,1 +0,3,12,16,16,16,4,0,0,8,11,6,4,12,15,0,0,1,0,0,6,15,10,0,0,0,0,7,16,7,0,0,0,0,0,10,14,1,0,0,0,0,0,1,15,9,0,0,0,0,6,2,13,12,0,0,0,2,16,16,14,3,0,0,3 +0,0,2,15,12,0,0,0,0,0,10,15,3,0,0,0,0,8,16,4,0,13,7,0,0,10,16,1,2,16,10,0,0,9,16,12,14,14,1,0,0,0,6,12,16,7,0,0,0,0,0,14,13,0,0,0,0,0,2,16,8,0,0,0,4 +0,0,4,14,11,3,0,0,0,1,14,15,16,14,0,0,0,3,16,2,3,11,4,0,0,6,11,0,0,6,6,0,0,7,13,0,0,8,7,0,0,2,15,0,0,14,3,0,0,0,12,6,11,13,0,0,0,0,4,12,13,2,0,0,0 +0,0,5,14,1,0,0,0,0,0,10,14,0,0,0,0,0,1,16,6,1,9,3,0,0,8,16,0,11,15,1,0,0,10,16,8,16,6,0,0,0,0,6,14,16,4,0,0,0,0,0,13,16,2,0,0,0,0,2,16,9,1,0,0,4 +0,0,0,12,12,1,0,0,0,0,6,16,8,0,0,0,0,0,11,11,0,0,0,0,0,0,13,6,0,0,0,0,0,0,15,16,16,15,5,0,0,0,14,14,4,5,15,1,0,0,8,14,2,6,16,3,0,0,0,8,13,15,9,0,6 +0,2,13,16,16,13,1,0,0,14,15,8,10,16,4,0,0,5,2,0,6,16,4,0,0,0,8,16,16,16,9,0,0,0,5,14,16,9,2,0,0,0,2,16,11,0,0,0,0,0,8,16,3,0,0,0,0,2,16,9,0,0,0,0,7 +0,0,5,9,16,13,4,0,0,1,15,8,5,14,12,0,0,0,4,0,4,14,8,0,0,0,0,2,14,10,0,0,0,0,0,12,14,0,0,0,0,0,7,14,3,0,0,0,0,0,15,10,0,0,0,0,0,0,4,12,12,6,0,0,2 +0,0,11,14,4,0,0,0,0,6,15,14,16,5,0,0,0,11,10,12,16,12,0,0,0,2,12,10,3,14,6,0,0,0,0,0,0,12,8,0,0,0,0,0,0,11,10,0,0,0,0,1,7,14,4,0,0,0,12,16,12,4,0,0,9 +0,0,4,14,15,6,0,0,0,1,15,9,8,15,0,0,0,5,12,0,12,15,3,0,0,3,16,16,12,16,6,0,0,0,1,3,0,13,8,0,0,0,0,0,2,16,5,0,0,0,0,4,14,11,0,0,0,0,10,16,9,0,0,0,9 +0,2,15,16,15,7,0,0,0,8,16,14,12,14,3,0,0,6,16,5,0,0,0,0,0,2,15,11,0,0,0,0,0,0,5,16,6,0,0,0,0,0,0,12,15,0,0,0,0,0,6,14,14,0,0,0,0,2,13,16,4,0,0,0,5 +0,0,0,9,13,0,0,0,0,0,5,16,13,4,0,0,0,0,12,12,0,0,0,0,0,0,14,5,0,0,0,0,0,0,16,11,15,11,0,0,0,0,13,16,13,15,8,0,0,0,7,16,5,13,10,0,0,0,0,9,13,13,3,0,6 +0,0,0,0,15,5,0,0,0,0,0,5,16,9,0,0,0,0,1,15,16,5,0,0,0,6,13,16,16,7,0,0,0,7,16,10,16,6,0,0,0,0,0,2,16,6,0,0,0,0,0,2,16,6,0,0,0,0,0,1,15,12,0,0,1 +0,0,0,7,12,1,0,0,0,0,3,16,12,0,0,0,0,0,8,13,0,0,0,0,0,0,14,9,1,0,0,0,0,0,14,16,16,16,5,0,0,0,12,16,11,6,16,1,0,0,6,15,2,7,15,2,0,0,0,8,13,12,6,0,6 +0,0,10,15,10,5,0,0,0,0,14,16,16,16,3,0,0,0,13,14,1,1,0,0,0,0,5,14,2,0,0,0,0,0,0,13,8,0,0,0,0,0,0,7,14,1,0,0,0,0,9,13,16,2,0,0,0,0,12,16,15,1,0,0,5 +0,0,1,10,9,0,0,0,0,0,7,16,14,0,0,0,0,4,14,16,15,0,0,0,0,12,14,16,16,0,0,0,0,0,0,15,16,2,0,0,0,0,0,14,16,2,0,0,0,0,0,12,15,0,0,0,0,0,0,6,16,2,0,0,1 +0,3,13,16,16,12,0,0,0,13,16,15,8,10,3,0,0,12,16,2,0,0,0,0,0,3,15,9,0,0,0,0,0,0,7,16,2,0,0,0,0,0,1,16,8,0,0,0,0,0,10,16,5,0,0,0,0,3,16,13,0,0,0,0,5 +0,0,0,9,12,0,0,0,0,0,1,14,12,0,0,0,0,0,11,13,0,6,8,0,0,4,16,4,2,15,7,0,0,10,14,4,11,14,1,0,0,3,14,16,16,6,0,0,0,0,0,10,14,0,0,0,0,0,0,10,10,0,0,0,4 +0,0,2,10,16,13,2,0,0,0,13,10,4,13,9,0,0,1,16,1,0,12,12,0,0,0,13,16,14,16,9,0,0,0,0,0,1,11,8,0,0,0,0,0,0,15,4,0,0,0,0,0,4,16,3,0,0,0,3,13,15,6,0,0,9 +0,0,8,16,6,1,0,0,0,0,13,16,16,15,0,0,0,3,16,2,0,13,3,0,0,6,14,0,0,11,6,0,0,3,13,0,0,13,5,0,0,0,16,0,6,15,1,0,0,0,13,10,15,9,0,0,0,0,4,12,11,0,0,0,0 +0,1,10,12,14,9,0,0,0,11,11,5,5,16,4,0,0,4,3,0,2,16,2,0,0,0,0,0,12,9,0,0,0,0,1,13,10,0,0,0,0,0,9,13,0,0,0,0,0,2,16,4,0,0,0,0,0,1,13,15,12,2,0,0,2 +0,0,6,14,6,1,0,0,0,0,14,16,14,9,0,0,0,3,16,3,1,15,1,0,0,4,13,0,0,11,5,0,0,2,14,0,0,11,8,0,0,2,16,2,2,16,4,0,0,0,12,9,12,12,0,0,0,0,4,13,11,1,0,0,0 +0,4,16,16,15,5,0,0,0,7,15,13,13,14,5,0,0,2,15,8,0,0,1,0,0,0,6,15,4,0,0,0,0,0,0,15,7,0,0,0,0,0,0,11,12,0,0,0,0,0,2,15,9,0,0,0,0,1,16,15,3,0,0,0,5 +0,0,0,14,12,0,0,0,0,0,1,16,16,2,0,0,0,0,0,16,15,0,0,0,0,0,1,16,15,0,0,0,0,0,0,15,16,0,0,0,0,0,1,16,15,0,0,0,0,0,1,16,14,0,0,0,0,0,0,11,16,3,0,0,1 +0,3,13,15,16,6,0,0,0,15,16,13,9,16,5,0,0,11,16,2,0,4,3,0,0,2,15,9,0,0,0,0,0,0,7,16,1,0,0,0,0,0,3,15,5,0,0,0,0,0,6,16,5,0,0,0,0,4,16,10,0,0,0,0,5 +0,0,5,16,1,0,0,0,0,0,11,11,0,0,6,5,0,0,14,7,0,2,15,6,0,3,16,10,1,12,12,0,0,0,14,16,16,16,4,0,0,0,1,11,16,7,0,0,0,0,0,13,10,0,0,0,0,0,7,15,1,0,0,0,4 +0,0,11,13,12,7,0,0,0,2,15,4,5,16,4,0,0,0,16,5,13,11,0,0,0,0,7,16,10,1,0,0,0,0,8,16,4,0,0,0,0,0,15,5,11,0,0,0,0,3,13,4,12,0,0,0,0,1,11,16,8,0,0,0,8 +0,0,0,8,16,13,1,0,0,0,4,16,11,13,9,0,0,6,15,10,0,11,11,0,0,8,16,13,14,16,9,0,0,1,9,8,11,16,4,0,0,0,0,0,12,12,0,0,0,0,0,6,16,2,0,0,0,0,0,11,10,0,0,0,9 +0,0,2,8,12,13,2,0,0,2,16,15,6,8,8,0,0,7,11,5,2,13,7,0,0,6,15,13,15,15,1,0,0,0,3,2,9,6,0,0,0,0,0,1,14,1,0,0,0,0,0,7,10,0,0,0,0,0,0,13,3,0,0,0,9 +0,2,15,16,13,2,0,0,0,3,15,10,14,9,0,0,0,0,0,0,14,10,0,0,0,0,0,10,16,2,0,0,0,0,2,16,10,0,0,0,0,0,12,13,1,0,0,0,0,2,16,12,8,10,13,2,0,2,13,16,16,16,16,3,2 +0,0,6,15,13,1,0,0,0,1,16,10,5,12,0,0,0,8,16,5,0,9,1,0,0,7,15,0,0,7,5,0,0,5,14,0,0,5,9,0,0,2,14,0,1,12,6,0,0,0,10,11,15,16,3,0,0,0,2,11,11,4,0,0,0 +0,0,1,16,9,0,0,0,0,0,3,16,16,2,0,0,0,0,5,16,15,1,0,0,0,0,4,16,11,0,0,0,0,0,3,16,15,0,0,0,0,0,1,16,12,0,0,0,0,0,2,16,14,0,0,0,0,0,0,14,9,0,0,0,1 +0,0,3,14,10,0,0,0,0,0,10,13,11,9,0,0,0,0,15,9,0,9,1,0,0,0,16,6,0,6,5,0,0,0,15,4,0,10,6,0,0,0,14,0,2,16,3,0,0,0,12,11,15,12,0,0,0,0,2,13,10,1,0,0,0 +0,0,12,11,0,0,0,0,0,8,16,4,0,5,3,0,0,11,16,0,4,16,9,0,0,11,16,8,13,14,1,0,0,2,13,16,16,8,0,0,0,0,1,15,10,0,0,0,0,0,5,16,3,0,0,0,0,0,12,12,0,0,0,0,4 +0,0,0,16,7,0,0,0,0,0,0,15,16,3,0,0,0,0,0,14,16,4,0,0,0,0,0,16,16,2,0,0,0,0,0,15,16,2,0,0,0,0,3,16,14,0,0,0,0,0,0,14,14,0,0,0,0,0,0,13,12,0,0,0,1 +0,0,2,13,9,1,0,0,0,0,13,14,10,10,0,0,0,0,14,3,0,8,0,0,0,0,15,9,0,5,3,0,0,0,16,8,0,7,5,0,0,0,16,7,2,16,2,0,0,0,11,9,13,14,1,0,0,0,2,15,13,1,0,0,0 +0,0,5,15,13,1,0,0,0,0,12,16,12,10,0,0,0,0,14,8,0,13,3,0,0,0,16,9,0,10,9,0,0,0,16,13,0,8,9,0,0,1,16,6,2,16,9,0,0,0,13,12,15,15,2,0,0,0,6,16,16,7,0,0,0 +0,0,11,14,10,2,0,0,0,0,12,13,13,15,0,0,0,0,3,11,12,12,1,0,0,0,0,15,12,0,0,0,0,0,5,15,10,0,0,0,0,0,12,4,11,0,0,0,0,0,14,1,12,2,0,0,0,0,10,16,10,0,0,0,8 +0,0,4,10,13,3,0,0,0,0,16,15,14,11,0,0,0,0,5,1,10,12,0,0,0,0,0,2,16,5,0,0,0,0,0,11,13,0,0,0,0,0,4,16,3,0,0,0,0,0,9,14,4,4,4,1,0,0,4,14,16,15,12,5,2 +0,0,0,10,13,5,0,0,0,0,11,12,8,15,2,0,0,6,16,1,1,14,4,0,0,6,16,8,13,16,5,0,0,2,13,12,16,12,0,0,0,0,0,0,14,6,0,0,0,0,0,4,14,1,0,0,0,0,0,11,5,0,0,0,9 +0,0,11,16,16,8,0,0,0,0,11,12,8,13,0,0,0,0,7,10,0,0,0,0,0,0,3,13,0,0,0,0,0,0,0,14,2,0,0,0,0,0,0,13,6,0,0,0,0,0,6,12,10,0,0,0,0,0,11,16,9,0,0,0,5 +0,0,0,12,1,0,0,0,0,0,4,16,3,0,0,0,0,0,7,14,0,0,0,0,0,0,9,10,4,2,0,0,0,0,12,16,16,15,5,0,0,0,10,9,0,0,12,1,0,0,7,11,1,5,15,2,0,0,0,11,16,15,8,0,6 +0,0,12,12,0,0,5,1,0,2,16,9,0,4,16,5,0,1,16,13,5,13,12,0,0,0,7,16,16,16,6,0,0,0,0,3,16,9,0,0,0,0,1,12,12,0,0,0,0,0,7,16,1,0,0,0,0,0,12,7,0,0,0,0,4 +0,0,10,13,11,2,0,0,0,0,9,11,10,15,0,0,0,0,0,1,12,14,2,0,0,0,0,15,16,5,0,0,0,0,0,4,7,14,6,0,0,0,0,0,0,6,12,0,0,0,11,4,5,14,10,0,0,0,13,16,16,10,0,0,3 +0,0,11,16,16,16,9,0,0,0,3,8,8,15,12,0,0,0,0,0,4,16,6,0,0,3,8,8,12,14,0,0,0,10,16,16,16,15,1,0,0,1,4,11,15,2,0,0,0,0,1,15,8,0,0,0,0,0,10,13,1,0,0,0,7 +0,0,0,10,13,0,0,0,0,0,4,16,7,0,0,0,0,0,12,13,0,0,0,0,0,0,13,8,0,0,0,0,0,0,13,6,3,8,1,0,0,0,12,14,16,14,14,2,0,0,7,16,13,6,11,8,0,0,0,10,15,16,13,2,6 +0,0,12,16,12,16,3,0,0,0,14,14,8,8,1,0,0,0,7,15,1,0,0,0,0,0,1,14,11,0,0,0,0,0,0,6,16,1,0,0,0,0,0,0,12,11,0,0,0,2,7,6,14,13,0,0,0,0,12,16,16,6,0,0,5 +0,1,15,10,0,0,0,0,0,7,16,5,1,13,6,0,0,9,16,3,9,16,4,0,0,3,15,16,16,11,0,0,0,0,2,14,15,3,0,0,0,0,3,16,8,0,0,0,0,0,12,15,0,0,0,0,0,1,16,5,0,0,0,0,4 +0,0,2,16,7,0,0,0,0,0,5,16,16,2,0,0,0,0,4,16,16,1,0,0,0,0,3,16,16,0,0,0,0,0,1,16,16,0,0,0,0,0,3,16,16,0,0,0,0,0,4,16,16,0,0,0,0,0,0,14,16,0,0,0,1 +0,2,16,16,16,16,4,0,0,1,8,8,12,16,6,0,0,0,0,1,14,13,0,0,0,1,4,6,16,8,0,0,0,9,16,16,16,13,2,0,0,2,11,16,10,7,0,0,0,0,12,13,0,0,0,0,0,2,16,7,0,0,0,0,7 +0,0,8,15,11,1,0,0,0,1,14,14,14,10,0,0,0,3,16,3,0,9,3,0,0,5,14,0,0,6,6,0,0,5,13,0,0,7,7,0,0,4,12,0,0,13,6,0,0,1,16,13,16,12,0,0,0,0,7,16,12,1,0,0,0 +0,0,0,7,12,1,0,0,0,0,7,16,9,1,0,0,0,0,12,11,0,0,0,0,0,0,14,4,0,0,0,0,0,0,15,1,0,0,0,0,0,0,11,13,16,16,8,0,0,0,9,16,13,11,16,3,0,0,0,9,12,13,9,0,6 +0,0,2,16,11,1,0,0,0,0,0,16,16,2,0,0,0,0,1,16,16,6,0,0,0,0,0,15,16,3,0,0,0,0,1,15,16,2,0,0,0,0,2,16,15,1,0,0,0,0,1,16,14,0,0,0,0,0,2,16,8,0,0,0,1 +0,1,14,7,0,0,0,0,0,8,16,2,0,3,5,0,0,10,12,0,1,14,11,0,0,9,15,1,9,16,3,0,0,1,15,16,16,8,0,0,0,0,5,16,13,5,0,0,0,0,12,13,0,0,0,0,0,2,16,6,0,0,0,0,4 +0,2,16,16,16,7,0,0,0,0,7,8,11,16,3,0,0,0,0,2,12,16,4,0,0,0,1,16,16,6,0,0,0,0,2,12,16,10,0,0,0,0,0,0,4,16,4,0,0,0,10,8,11,16,7,0,0,1,14,16,16,12,1,0,3 +0,0,9,14,16,13,2,0,0,8,16,16,14,9,1,0,0,15,16,14,4,0,0,0,0,4,12,13,16,6,0,0,0,0,0,0,8,15,0,0,0,0,0,0,5,16,3,0,0,1,11,10,15,11,0,0,0,0,9,16,13,3,0,0,5 +0,2,7,16,15,8,0,0,0,5,16,11,15,12,0,0,0,0,11,15,13,0,0,0,0,0,3,16,1,0,0,0,0,0,9,16,1,0,0,0,0,0,13,10,5,0,0,0,0,0,14,9,8,0,0,0,0,0,6,16,5,0,0,0,8 +0,0,3,12,10,0,0,0,0,0,10,11,5,10,0,0,0,1,16,7,0,10,2,0,0,2,16,2,0,6,6,0,0,3,15,10,0,7,7,0,0,0,12,7,0,10,5,0,0,0,13,2,6,15,1,0,0,0,3,15,14,7,0,0,0 +0,0,0,12,16,13,0,0,0,0,6,16,10,5,0,0,1,14,2,6,0,0,0,0,0,12,11,0,0,0,0,0,0,3,14,11,1,0,0,0,0,0,3,13,11,0,0,0,0,0,0,5,16,6,0,0,0,0,0,13,16,6,0,0,5 +0,0,1,16,12,1,0,0,0,0,0,16,16,5,0,0,0,0,2,16,16,7,0,0,0,0,4,16,16,0,0,0,0,0,9,16,12,0,0,0,0,0,13,16,8,0,0,0,0,0,10,16,10,0,0,0,0,0,2,15,16,5,0,0,1 +0,0,3,12,15,7,0,0,0,0,4,9,3,12,0,0,0,0,13,5,11,5,0,0,0,0,3,16,11,0,0,0,0,0,2,16,9,0,0,0,0,0,10,6,11,6,0,0,0,0,13,3,7,12,0,0,0,0,4,15,13,8,0,0,8 +0,0,0,13,9,0,5,1,0,0,11,13,1,4,15,2,0,4,16,1,0,13,10,0,0,11,14,8,10,16,4,0,0,5,15,16,16,13,2,0,0,0,0,7,14,0,0,0,0,0,0,11,8,0,0,0,0,0,0,14,2,0,0,0,4 +0,0,4,14,11,1,0,0,0,0,9,8,10,9,0,0,0,0,7,0,14,4,0,0,0,0,9,11,14,1,0,0,0,0,1,15,8,0,0,0,0,0,3,13,12,5,0,0,0,0,8,8,4,14,0,0,0,0,3,15,16,13,0,0,8 +0,0,9,16,16,10,1,0,0,10,16,9,4,16,4,0,0,13,12,0,8,14,5,0,0,7,16,15,16,12,0,0,0,0,1,4,15,13,0,0,0,0,0,10,16,6,0,0,0,0,5,16,10,0,0,0,0,0,9,16,1,0,0,0,9 +0,0,2,13,16,16,9,0,0,0,12,10,4,7,12,0,0,1,15,8,2,6,4,0,0,0,4,10,16,9,0,0,0,0,0,0,2,16,0,0,0,0,0,0,9,11,0,0,0,0,0,5,15,4,0,0,0,0,0,13,9,0,0,0,9 +0,0,3,13,12,2,0,0,0,0,14,16,13,12,0,0,0,2,16,12,0,12,4,0,0,6,15,0,0,10,6,0,0,3,13,0,0,7,9,0,0,3,12,0,1,12,6,0,0,1,13,9,13,16,2,0,0,0,5,14,13,4,0,0,0 +0,0,0,9,9,0,0,0,0,0,7,15,3,0,0,0,0,0,11,8,0,0,0,0,0,0,14,4,2,3,0,0,0,0,12,12,16,12,10,0,0,0,9,14,8,0,8,4,0,0,4,9,1,2,14,5,0,0,0,6,11,14,8,0,6 +0,0,2,7,14,14,2,0,0,2,15,9,5,15,3,0,0,2,16,8,8,15,0,0,0,0,4,7,11,15,2,0,0,0,0,0,0,16,6,0,0,0,0,0,10,13,1,0,0,0,0,6,14,2,0,0,0,0,0,10,4,0,0,0,9 +0,0,0,11,12,1,0,0,0,0,2,14,16,3,0,0,0,0,5,16,11,0,0,0,0,0,7,16,8,0,0,0,0,0,6,16,8,0,0,0,0,0,10,16,7,0,0,0,0,0,7,16,9,4,0,0,0,0,0,10,16,12,1,0,1 +0,3,13,16,16,16,15,1,0,3,9,5,6,16,11,0,0,0,0,0,10,14,1,0,0,0,0,6,16,3,0,0,0,0,0,14,11,0,0,0,0,0,8,15,2,0,0,0,0,1,14,12,0,0,0,0,0,5,16,5,0,0,0,0,7 +0,1,13,16,16,7,0,0,0,0,10,8,15,12,0,0,0,0,0,1,16,7,0,0,0,1,13,15,16,13,7,0,0,0,8,16,14,12,8,0,0,0,8,16,1,0,0,0,0,0,14,11,0,0,0,0,0,1,16,4,0,0,0,0,7 +0,0,2,13,12,1,0,0,0,0,15,12,9,10,0,0,0,2,16,2,0,9,1,0,0,4,16,1,0,4,5,0,0,2,16,4,0,2,9,0,0,0,14,0,0,6,8,0,0,0,12,7,5,15,4,0,0,0,2,13,15,6,0,0,0 +0,0,3,14,3,0,1,8,0,0,9,15,0,1,13,11,0,0,14,14,0,9,14,2,0,0,14,16,16,16,7,0,0,0,3,13,16,11,1,0,0,0,0,7,15,1,0,0,0,0,0,14,10,0,0,0,0,0,2,15,4,0,0,0,4 +0,0,1,11,7,0,2,1,0,0,10,13,0,1,13,6,0,4,16,3,0,10,12,0,0,3,16,11,9,16,3,0,0,0,9,16,16,8,0,0,0,0,0,9,11,0,0,0,0,0,0,14,3,0,0,0,0,0,0,14,4,0,0,0,4 +0,0,2,15,9,0,0,0,0,0,3,16,12,8,0,0,0,0,8,14,3,10,2,0,0,0,14,5,0,6,6,0,0,2,15,0,0,3,9,0,0,1,15,0,0,1,12,0,0,0,12,9,5,11,11,0,0,0,2,14,16,10,3,0,0 +0,0,3,15,14,3,0,0,0,0,13,8,2,11,0,0,0,3,16,7,0,10,1,0,0,4,14,0,0,7,5,0,0,2,12,0,0,5,7,0,0,2,13,0,0,10,5,0,0,0,13,3,6,14,1,0,0,0,3,15,14,4,0,0,0 +0,0,2,13,1,0,0,0,0,0,11,10,1,0,0,0,0,2,15,1,0,0,0,0,0,6,11,1,4,1,0,0,0,7,10,9,16,14,1,0,0,4,12,7,6,7,10,0,0,1,13,6,2,10,14,0,0,0,3,12,13,13,3,0,6 +0,0,5,14,12,5,0,0,0,0,3,16,16,9,0,0,0,0,2,16,16,7,0,0,0,0,5,16,16,3,0,0,0,0,10,16,16,4,0,0,0,0,9,16,14,1,0,0,0,0,8,16,16,2,0,0,0,0,8,15,13,4,0,0,1 +0,0,11,14,10,3,0,0,0,0,6,8,12,15,3,0,0,0,0,0,6,16,5,0,0,0,0,6,15,12,1,0,0,0,5,16,13,1,0,0,0,0,15,13,0,0,0,0,0,5,16,8,4,2,0,0,0,1,10,16,16,10,0,0,2 +0,0,2,10,16,16,2,0,0,1,16,15,5,1,0,0,0,8,13,0,0,0,0,0,0,12,11,0,0,0,0,0,0,5,16,7,0,0,0,0,0,0,3,13,12,0,0,0,0,0,0,4,16,0,0,0,0,0,0,14,12,0,0,0,5 +0,0,0,3,15,1,0,0,0,0,0,12,11,1,5,0,0,0,6,16,2,6,16,0,0,3,16,7,0,16,6,0,0,10,16,12,14,16,5,0,0,11,14,12,16,13,1,0,0,0,0,0,16,6,0,0,0,0,0,5,14,2,0,0,4 +0,1,8,12,15,6,0,0,0,2,12,8,13,15,2,0,0,0,0,0,13,16,1,0,0,0,0,6,16,10,0,0,0,0,6,16,11,0,0,0,0,0,13,16,3,0,0,0,0,0,16,12,2,6,0,0,0,0,8,14,16,10,2,0,2 +0,0,13,16,16,8,0,0,0,0,7,5,10,8,0,0,0,0,0,0,14,4,0,0,0,0,7,10,16,13,7,0,0,0,15,16,13,15,10,0,0,0,0,16,1,0,0,0,0,0,8,12,0,0,0,0,0,0,16,3,0,0,0,0,7 +0,1,10,13,13,12,1,0,0,6,13,4,4,13,8,0,0,0,1,1,7,16,3,0,0,0,0,15,13,4,0,0,0,0,0,8,15,3,0,0,0,0,0,0,10,13,1,0,0,0,0,1,9,16,2,0,0,1,13,16,12,7,0,0,3 +0,3,11,16,16,12,0,0,0,5,9,10,16,16,3,0,0,0,0,8,16,7,0,0,0,0,0,12,14,0,0,0,0,0,0,4,16,9,0,0,0,0,0,0,5,16,6,0,0,0,6,8,12,16,7,0,0,1,13,11,8,3,0,0,3 +0,5,16,16,16,3,0,0,0,8,7,5,16,2,0,0,0,3,5,10,14,0,0,0,0,7,16,16,15,12,6,0,0,0,9,14,11,14,6,0,0,0,16,6,0,0,0,0,0,3,15,2,0,0,0,0,0,7,12,0,0,0,0,0,7 +0,0,0,9,16,7,0,0,0,0,3,16,16,7,0,0,0,0,11,16,16,0,0,0,0,1,14,16,16,1,0,0,0,0,14,16,14,0,0,0,0,0,10,16,13,1,0,0,0,0,6,16,16,6,0,0,0,0,3,8,15,13,1,0,1 +0,0,8,15,16,11,0,0,0,13,16,12,6,4,0,0,1,16,7,0,0,0,0,0,0,12,14,1,0,0,0,0,0,1,14,10,0,0,0,0,0,0,4,16,4,0,0,0,0,0,2,14,8,0,0,0,0,0,11,15,3,0,0,0,5 +0,0,13,3,0,0,0,0,0,4,16,4,0,6,12,0,0,9,15,0,6,16,7,0,0,6,16,11,16,9,0,0,0,0,9,16,14,3,0,0,0,0,6,15,3,0,0,0,0,0,13,9,0,0,0,0,0,0,15,7,0,0,0,0,4 +0,0,0,0,12,15,2,0,0,0,0,0,12,16,4,0,0,2,4,6,16,16,2,0,0,9,16,16,16,16,0,0,0,1,8,5,15,16,0,0,0,0,0,0,16,16,4,0,0,0,0,0,15,16,5,0,0,0,0,0,9,16,8,0,1 +0,0,8,13,12,6,0,0,0,1,14,13,12,6,0,0,0,4,12,2,2,0,0,0,0,5,16,16,16,13,1,0,0,0,3,1,2,15,7,0,0,0,0,0,0,12,8,0,0,0,6,8,8,15,4,0,0,0,7,13,12,5,0,0,5 +0,0,6,14,16,16,4,0,0,0,11,12,13,16,7,0,0,0,0,0,9,15,3,0,0,0,1,6,14,14,2,0,0,0,10,16,16,16,9,0,0,0,2,15,11,4,1,0,0,0,4,16,5,0,0,0,0,0,9,13,0,0,0,0,7 +0,0,8,11,14,10,0,0,0,0,14,15,7,8,0,0,0,1,14,4,0,0,0,0,0,8,16,9,8,3,0,0,0,4,12,12,14,16,4,0,0,0,0,0,0,13,8,0,0,0,4,8,12,15,4,0,0,0,15,14,12,3,0,0,5 +0,2,10,15,9,1,0,0,0,10,13,9,16,4,0,0,0,2,0,5,16,0,0,0,0,0,0,12,16,9,0,0,0,0,0,3,7,16,7,0,0,0,0,0,0,11,9,0,0,0,12,9,9,16,8,0,0,0,13,12,12,6,0,0,3 +0,0,0,0,9,7,0,0,0,0,0,9,12,0,0,0,0,0,3,15,2,2,1,0,0,2,15,4,0,14,4,0,0,7,14,0,4,16,0,0,0,8,16,16,16,16,4,0,0,0,0,3,16,5,0,0,0,0,0,0,13,6,0,0,4 +0,0,3,16,14,3,0,0,0,0,12,14,14,12,0,0,0,2,16,6,0,15,2,0,0,7,10,0,0,10,4,0,0,8,8,0,0,11,5,0,0,7,10,0,0,13,3,0,0,5,16,13,15,11,0,0,0,0,7,14,13,1,0,0,0 +0,0,0,0,8,11,1,0,0,0,0,0,10,16,4,0,0,0,0,8,16,16,0,0,0,0,6,15,16,16,0,0,0,5,15,8,16,14,0,0,0,0,0,0,13,15,0,0,0,0,0,0,12,16,1,0,0,0,0,0,7,15,9,0,1 +0,0,4,12,12,6,0,0,0,0,3,6,4,8,0,0,0,3,13,6,8,3,0,0,0,4,16,13,12,15,2,0,0,0,3,0,0,10,8,0,0,0,0,0,0,8,6,0,0,0,7,6,5,14,2,0,0,0,3,14,15,6,0,0,5 +0,0,9,14,13,6,0,0,0,4,12,5,8,16,0,0,0,0,0,0,4,13,0,0,0,0,0,3,15,7,0,0,0,0,0,10,16,15,2,0,0,0,0,0,0,13,7,0,0,0,12,4,5,13,2,0,0,0,13,16,14,6,0,0,3 +0,0,8,16,12,0,0,0,0,6,16,15,10,0,0,0,0,4,16,14,14,16,6,0,0,0,12,16,15,5,0,0,0,0,12,16,11,0,0,0,0,0,16,10,16,6,0,0,0,0,16,5,13,12,0,0,0,0,10,15,11,1,0,0,8 +0,0,0,0,8,11,0,0,0,0,0,2,14,6,0,0,0,0,0,11,9,2,3,0,0,0,9,12,1,11,9,0,0,4,16,6,0,13,5,0,0,8,16,16,16,16,0,0,0,0,0,3,8,15,0,0,0,0,0,0,6,12,0,0,4 +0,0,10,16,16,15,0,0,0,3,15,9,8,4,0,0,0,9,13,6,2,0,0,0,0,8,16,16,16,11,0,0,0,1,8,5,10,16,7,0,0,0,0,0,0,12,9,0,0,0,4,9,14,16,4,0,0,0,11,13,11,5,0,0,5 +0,0,9,14,6,0,0,0,0,0,16,14,16,6,0,0,0,0,13,12,13,16,1,0,0,0,6,13,10,16,2,0,0,0,0,0,0,12,7,0,0,0,5,1,0,12,8,0,0,1,16,10,8,15,7,0,0,0,8,15,16,11,1,0,9 +0,0,7,13,11,3,0,0,0,2,15,8,8,2,0,0,0,6,11,3,3,0,0,0,0,7,16,16,16,13,2,0,0,1,8,5,3,11,10,0,0,0,0,0,0,2,14,0,0,0,2,5,4,10,15,0,0,0,8,16,16,14,4,0,5 +0,0,0,10,9,0,0,0,0,0,0,15,9,0,0,0,0,0,6,14,2,11,4,0,0,2,14,7,9,15,2,0,0,11,16,8,14,13,1,0,0,12,16,16,16,16,6,0,0,2,4,10,16,3,0,0,0,0,0,11,15,0,0,0,4 +0,0,7,16,10,1,0,0,0,4,16,11,14,13,0,0,0,6,16,4,10,16,5,0,0,2,15,16,12,14,8,0,0,0,0,0,0,12,8,0,0,0,0,0,0,10,8,0,0,0,4,8,8,14,8,0,0,0,8,15,14,11,0,0,9 +0,0,3,10,16,7,0,0,0,0,15,12,8,15,0,0,0,5,14,2,10,16,6,0,0,2,14,16,11,3,0,0,0,0,6,16,6,0,0,0,0,0,12,14,15,2,0,0,0,0,11,8,16,8,0,0,0,0,3,14,14,3,0,0,8 +0,0,12,16,15,5,0,0,0,3,15,8,7,5,0,0,0,9,13,8,8,2,0,0,0,4,15,12,15,16,3,0,0,0,0,0,1,14,8,0,0,0,1,0,0,11,10,0,0,2,16,9,9,15,3,0,0,0,8,11,13,4,0,0,5 +0,1,10,16,6,0,0,0,0,4,12,8,12,0,0,0,0,1,0,2,11,0,0,0,0,0,0,9,16,9,0,0,0,0,0,5,8,16,5,0,0,0,0,0,0,8,9,0,0,2,8,8,7,11,9,0,0,1,13,16,16,10,1,0,3 +0,0,9,14,14,9,1,0,0,7,16,10,4,12,0,0,0,8,16,6,6,15,4,0,0,0,11,16,16,9,1,0,0,0,7,16,16,0,0,0,0,1,15,13,15,12,0,0,0,4,16,8,12,15,0,0,0,1,8,13,13,4,0,0,8 +0,0,0,8,14,0,0,0,0,0,0,12,12,0,0,0,0,0,1,15,9,5,0,0,0,0,11,15,15,12,0,0,0,3,16,11,16,10,2,0,0,13,16,16,16,16,8,0,0,5,8,12,16,7,0,0,0,0,0,8,16,2,0,0,4 +0,0,6,15,13,2,0,0,0,5,15,7,9,10,0,0,0,11,9,1,10,12,0,0,0,2,13,16,13,15,4,0,0,0,0,0,0,13,8,0,0,0,0,0,0,8,11,0,0,0,6,8,4,11,11,0,0,0,5,14,16,14,1,0,9 +0,0,13,16,10,0,0,0,0,3,16,11,16,5,0,0,0,0,15,13,15,13,0,0,0,0,5,12,11,16,2,0,0,0,0,0,0,14,10,0,0,0,0,0,0,5,16,0,0,0,7,8,11,13,16,2,0,0,15,16,16,16,11,0,9 +0,0,10,16,9,0,0,0,0,8,14,8,16,0,0,0,0,10,2,7,13,0,0,0,0,0,0,10,16,9,0,0,0,0,0,2,9,16,5,0,0,0,0,0,0,9,11,0,0,1,11,5,7,16,6,0,0,0,11,15,14,7,0,0,3 +0,0,8,14,13,7,0,0,0,1,16,12,9,16,2,0,0,0,11,11,1,9,0,0,0,0,3,16,16,12,0,0,0,0,6,16,16,2,0,0,0,1,15,10,13,8,0,0,0,1,16,5,9,12,0,0,0,0,11,12,11,3,0,0,8 +0,0,0,0,8,15,1,0,0,0,0,0,14,16,5,0,0,0,2,10,16,16,4,0,0,9,16,16,16,16,5,0,0,5,7,4,12,16,4,0,0,0,0,0,8,16,5,0,0,0,0,0,9,16,4,0,0,0,0,0,7,16,5,0,1 +0,0,0,0,10,9,0,0,0,0,0,5,15,3,0,0,0,0,3,16,6,2,1,0,0,0,13,9,0,11,6,0,0,6,16,1,1,16,4,0,0,8,16,14,14,16,4,0,0,1,6,7,15,13,0,0,0,0,0,0,10,9,0,0,4 +0,0,0,6,14,10,0,0,0,0,5,16,11,7,0,0,0,0,12,13,0,0,0,0,0,5,16,3,0,0,0,0,0,4,16,16,15,7,0,0,0,4,16,11,8,15,5,0,0,1,13,14,8,11,14,0,0,0,0,7,14,16,9,0,6 +0,0,0,0,9,13,0,0,0,0,0,7,16,3,0,0,0,0,5,16,4,3,5,0,0,3,13,7,0,10,14,0,0,10,16,16,16,16,11,0,0,5,8,10,13,16,7,0,0,0,0,0,7,16,4,0,0,0,0,0,10,11,0,0,4 +0,0,9,14,11,1,0,0,0,3,15,5,5,10,0,0,0,6,14,4,7,12,0,0,0,1,10,16,16,7,0,0,0,0,11,14,12,13,1,0,0,0,16,1,0,8,6,0,0,0,16,5,4,10,8,0,0,0,7,16,16,12,1,0,8 +0,0,0,1,15,3,0,0,0,0,0,12,15,0,0,0,0,0,8,16,1,5,12,0,0,4,15,11,3,12,13,0,0,11,16,16,16,16,7,0,0,3,8,10,15,16,1,0,0,0,0,0,15,10,0,0,0,0,0,1,16,5,0,0,4 +0,2,8,12,15,15,2,0,0,13,16,16,13,12,3,0,0,12,16,9,0,0,0,0,0,6,16,16,7,0,0,0,0,0,4,10,15,0,0,0,0,0,2,4,16,4,0,0,0,4,16,16,13,0,0,0,0,0,15,15,5,0,0,0,5 +0,1,9,16,13,2,0,0,0,7,13,7,8,12,0,0,0,10,11,0,4,16,0,0,0,2,16,16,16,12,0,0,0,0,14,15,16,14,1,0,0,0,15,6,0,9,9,0,0,0,13,12,8,12,9,0,0,0,6,13,12,11,4,0,8 +0,0,0,9,12,0,0,0,0,0,4,15,6,1,8,0,0,1,13,12,0,11,14,1,0,10,16,6,5,16,9,0,0,9,16,16,16,16,10,0,0,0,1,5,16,11,0,0,0,0,0,4,16,5,0,0,0,0,0,12,11,0,0,0,4 +0,1,8,10,14,15,6,0,0,6,16,11,7,5,2,0,0,3,16,8,0,0,0,0,0,2,16,16,10,0,0,0,0,0,1,3,13,2,0,0,0,0,0,0,9,4,0,0,0,0,11,11,14,1,0,0,0,0,11,9,2,0,0,0,5 +0,0,4,12,7,0,0,0,0,0,13,11,12,4,0,0,0,0,8,0,3,10,0,0,0,0,0,0,2,13,0,0,0,0,0,0,9,8,0,0,0,0,0,3,16,1,0,0,0,0,8,16,15,14,14,1,0,0,6,12,12,10,10,2,2 +0,0,5,16,16,10,0,0,0,0,9,16,16,15,0,0,0,0,7,16,16,11,0,0,0,0,9,16,16,11,0,0,0,0,8,16,16,12,0,0,0,0,10,16,16,10,0,0,0,0,14,16,16,12,0,0,0,0,8,13,16,8,0,0,1 +0,0,3,4,4,2,0,0,0,1,14,16,15,2,0,0,0,4,16,16,14,0,0,0,0,4,16,16,15,0,0,0,0,2,15,16,15,1,0,0,0,0,11,16,16,5,0,0,0,0,5,16,16,12,0,0,0,0,6,12,8,5,0,0,1 +0,3,14,14,2,0,0,0,0,11,16,16,13,0,0,0,0,3,2,4,14,6,0,0,0,0,0,0,13,8,0,0,0,0,0,7,15,2,0,0,0,0,6,15,7,0,0,0,0,4,16,16,10,8,5,0,0,2,11,15,16,15,12,0,2 +0,0,6,16,11,1,0,0,0,0,16,13,16,11,0,0,0,5,10,0,3,13,2,0,0,7,8,0,0,4,7,0,0,8,8,0,0,6,8,0,0,4,11,2,4,15,5,0,0,0,14,16,16,15,1,0,0,0,5,12,10,2,0,0,0 +0,0,8,16,8,0,0,0,0,2,15,16,16,6,0,0,0,4,13,1,6,15,1,0,0,7,13,0,0,13,5,0,0,8,12,0,0,8,8,0,0,7,14,1,4,12,8,0,0,2,15,16,16,16,4,0,0,0,7,16,16,10,0,0,0 +0,0,4,10,14,14,8,0,0,7,16,15,11,8,4,0,0,2,15,9,3,0,0,0,0,0,12,16,16,7,0,0,0,0,0,4,9,16,4,0,0,0,0,0,0,13,8,0,0,0,7,12,11,16,5,0,0,0,4,16,10,5,0,0,5 +0,0,7,14,16,16,7,0,0,0,12,14,9,8,2,0,0,0,14,4,0,0,0,0,0,3,16,16,9,0,0,0,0,8,16,14,16,3,0,0,0,2,2,0,12,8,0,0,0,0,6,15,15,2,0,0,0,0,11,11,3,0,0,0,5 +0,0,3,10,11,5,0,0,0,8,16,10,8,5,0,0,0,11,11,0,0,0,0,0,0,8,14,11,8,4,0,0,0,0,3,7,9,14,9,0,0,0,0,0,0,5,13,0,0,0,1,4,10,16,5,0,0,0,2,13,10,2,0,0,5 +0,2,13,13,1,0,0,0,0,3,14,13,13,0,0,0,0,0,2,0,16,0,0,0,0,0,0,0,16,0,0,0,0,0,0,9,8,0,0,0,0,0,4,15,3,0,0,0,0,2,16,16,9,7,6,0,0,0,11,12,15,16,12,0,2 +0,0,8,14,15,11,0,0,0,1,16,14,8,8,0,0,0,2,16,10,2,0,0,0,0,1,12,16,14,2,0,0,0,0,0,0,11,10,0,0,0,0,0,0,7,13,0,0,0,0,14,14,16,3,0,0,0,0,12,10,5,0,0,0,5 +0,0,2,11,16,13,1,0,0,1,16,11,4,12,8,0,0,0,14,8,3,11,8,0,0,0,13,16,16,14,1,0,0,0,10,16,10,15,3,0,0,0,14,7,0,10,5,0,0,0,11,12,8,14,4,0,0,0,3,14,12,10,1,0,8 +0,0,1,10,11,0,0,0,0,0,7,15,3,0,9,1,0,2,16,7,0,6,15,2,0,8,16,7,4,13,11,0,0,8,16,16,16,16,2,0,0,1,4,7,16,12,0,0,0,0,0,4,16,4,0,0,0,0,0,14,9,0,0,0,4 +0,1,10,16,16,15,0,0,0,2,15,10,12,15,0,0,0,0,0,1,15,8,0,0,0,0,8,16,16,16,13,0,0,0,6,16,10,4,2,0,0,0,4,16,4,0,0,0,0,0,9,16,1,0,0,0,0,0,13,13,0,0,0,0,7 +0,0,10,15,10,1,0,0,0,5,16,4,11,13,1,0,0,5,16,9,12,16,7,0,0,1,10,12,8,14,8,0,0,0,0,0,0,14,5,0,0,0,0,0,10,13,1,0,0,0,5,12,15,2,0,0,0,0,13,10,2,0,0,0,9 +0,0,1,12,12,11,0,0,0,0,12,16,16,7,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,0,0,0,0,0,12,16,16,4,0,0,0,0,7,16,16,7,0,0,0,0,4,16,16,15,1,0,0,0,3,10,10,4,0,0,1 +0,0,3,8,10,12,6,0,0,0,13,15,11,8,2,0,0,0,12,11,2,0,0,0,0,0,13,16,16,9,1,0,0,0,0,4,6,13,6,0,0,0,0,0,0,13,4,0,0,0,5,5,13,11,0,0,0,0,13,14,7,0,0,0,5 +0,1,9,15,5,0,0,0,0,4,16,15,13,0,0,0,0,1,2,1,15,2,0,0,0,0,0,0,16,0,0,0,0,0,0,5,13,0,0,0,0,0,4,15,3,0,1,0,0,0,12,16,12,14,4,0,0,0,11,16,13,9,2,0,2 +0,3,8,12,13,3,0,0,0,5,11,8,12,10,0,0,0,0,0,0,13,7,0,0,0,0,5,14,16,13,0,0,0,0,7,13,7,14,8,0,0,0,0,0,2,16,7,0,0,1,6,11,16,7,0,0,0,3,13,11,3,0,0,0,3 +0,2,12,15,6,0,0,0,0,6,13,12,16,5,0,0,0,0,3,9,16,5,0,0,0,0,7,16,16,12,0,0,0,0,0,4,8,16,5,0,0,0,0,0,0,13,8,0,0,2,13,8,14,15,3,0,0,2,15,15,10,1,0,0,3 +0,0,0,7,12,5,0,0,0,0,5,16,16,12,0,0,0,0,12,16,16,15,0,0,0,0,8,16,16,15,0,0,0,0,10,16,16,10,0,0,0,0,6,16,16,10,0,0,0,0,0,13,16,6,0,0,0,0,0,8,6,0,0,0,1 +0,0,8,13,13,16,8,0,0,1,15,15,12,16,4,0,0,0,0,0,10,14,0,0,0,0,1,5,16,10,3,0,0,0,8,16,16,13,6,0,0,0,1,13,9,0,0,0,0,0,4,16,7,0,0,0,0,0,9,13,1,0,0,0,7 +0,0,9,16,7,2,0,0,0,1,16,10,15,11,0,0,0,4,16,4,1,15,3,0,0,6,16,2,0,10,6,0,0,4,14,0,0,9,8,0,0,4,14,0,0,14,6,0,0,2,16,12,15,12,0,0,0,0,6,15,13,1,0,0,0 +0,0,2,7,13,12,2,0,0,0,10,13,6,12,6,0,0,2,16,4,1,13,8,0,0,0,14,14,15,15,10,0,0,0,0,3,1,12,5,0,0,0,0,0,5,11,0,0,0,0,0,2,14,2,0,0,0,0,0,9,8,0,0,0,9 +0,0,4,15,16,5,0,0,0,4,16,10,10,15,0,0,0,10,16,7,4,15,0,0,0,1,4,3,13,9,0,0,0,0,0,9,16,16,4,0,0,0,0,1,0,11,11,0,0,0,2,12,4,13,9,0,0,0,4,16,16,12,1,0,3 +0,0,9,12,14,14,12,1,0,0,15,15,12,14,16,6,0,0,3,0,0,10,15,3,0,0,0,0,4,16,6,0,0,0,0,3,15,9,0,0,0,0,2,13,12,0,0,0,0,0,10,14,1,0,0,0,0,0,13,9,0,0,0,0,7 +0,0,5,15,14,2,0,0,0,2,15,14,13,15,0,0,0,7,16,2,1,16,5,0,0,8,14,0,0,12,10,0,0,8,13,0,0,8,12,0,0,8,15,0,0,14,10,0,0,2,16,11,10,16,2,0,0,0,8,16,16,6,0,0,0 +0,0,0,13,7,0,0,0,0,0,5,13,3,13,2,0,0,0,14,6,4,16,0,0,0,5,16,9,14,16,10,0,0,5,16,15,16,7,2,0,0,0,0,10,11,0,0,0,0,0,0,13,6,0,0,0,0,0,0,16,1,0,0,0,4 +0,0,3,16,15,4,0,0,0,0,11,13,10,16,2,0,0,6,16,6,0,12,8,0,0,10,16,3,0,11,10,0,0,10,16,1,0,13,8,0,0,4,16,4,3,15,4,0,0,0,12,11,14,15,0,0,0,0,3,15,16,4,0,0,0 +0,0,6,12,16,16,15,3,0,0,14,16,12,14,16,8,0,0,0,0,0,14,16,3,0,0,0,0,3,16,11,0,0,0,0,0,12,16,2,0,0,0,0,6,16,11,0,0,0,0,2,15,14,1,0,0,0,0,6,16,9,0,0,0,7 +0,0,10,16,16,16,15,4,0,0,10,12,8,11,16,7,0,0,0,0,1,12,14,1,0,0,0,1,13,13,1,0,0,0,0,13,12,1,0,0,0,0,6,14,3,0,0,0,0,0,10,10,0,0,0,0,0,0,14,2,0,0,0,0,7 +0,0,12,16,9,0,0,0,0,6,16,12,15,5,0,0,0,7,16,7,10,8,0,0,0,1,11,4,10,9,0,0,0,0,0,1,16,3,0,0,0,0,0,8,14,1,0,0,0,1,12,16,14,12,6,0,0,1,15,16,16,16,16,3,2 +0,0,4,11,13,6,0,0,0,2,16,12,5,12,0,0,0,2,14,0,8,12,0,0,0,0,11,14,13,3,0,0,0,0,14,14,11,0,0,0,0,2,13,1,10,6,0,0,0,0,14,4,7,11,0,0,0,0,2,13,15,5,0,0,8 +0,0,8,13,4,0,0,0,0,1,14,10,15,3,0,0,0,2,16,1,4,14,0,0,0,4,16,0,0,12,5,0,0,4,13,0,0,7,9,0,0,4,16,1,0,10,8,0,0,1,15,8,9,15,3,0,0,0,7,15,14,4,0,0,0 +0,0,9,14,0,0,0,0,0,4,16,8,0,0,0,0,0,7,14,1,0,0,0,0,0,9,11,0,4,5,1,0,0,9,12,13,16,16,11,0,0,4,16,15,8,11,14,0,0,3,16,11,9,16,6,0,0,0,8,16,16,8,0,0,6 +0,1,11,12,15,16,13,1,0,1,15,11,8,10,16,2,0,0,0,0,0,13,10,0,0,0,0,0,11,12,1,0,0,0,0,8,16,2,0,0,0,0,3,16,5,0,0,0,0,0,9,13,0,0,0,0,0,0,14,5,0,0,0,0,7 +0,0,2,10,14,2,0,0,0,0,10,13,6,0,0,0,0,0,14,4,0,0,0,0,0,1,16,0,0,0,0,0,0,4,16,16,16,11,0,0,0,5,16,6,5,13,9,0,0,0,15,9,6,13,7,0,0,0,4,14,14,6,0,0,6 +0,0,12,16,8,0,0,0,0,7,16,10,16,2,0,0,0,10,12,0,15,7,0,0,0,5,8,0,15,6,0,0,0,0,0,1,16,2,0,0,0,0,0,8,12,0,0,0,0,0,6,16,9,4,2,0,0,0,12,16,16,16,9,0,2 +0,0,2,12,7,0,0,0,0,0,11,16,5,0,0,0,0,0,16,9,0,0,0,0,0,0,15,11,4,3,0,0,0,0,16,16,16,16,6,0,0,0,15,13,0,10,14,0,0,0,11,15,8,16,11,0,0,0,1,11,14,11,1,0,6 +0,0,5,14,3,0,0,0,0,0,15,15,3,0,0,0,0,2,16,4,0,0,0,0,0,4,16,4,3,1,0,0,0,6,16,9,16,15,3,0,0,4,16,16,9,13,11,0,0,0,14,15,5,16,6,0,0,0,4,14,14,9,0,0,6 +0,0,0,8,13,2,0,0,0,0,12,14,9,0,0,0,0,1,15,5,0,0,0,0,0,5,15,0,0,0,0,0,0,3,15,13,16,15,4,0,0,2,16,11,4,8,13,0,0,0,9,12,5,11,12,0,0,0,0,9,14,13,4,0,6 +0,3,15,13,0,0,0,0,0,12,14,16,6,0,0,0,0,14,9,10,9,0,0,0,0,2,1,12,8,0,0,0,0,0,0,16,2,0,0,0,0,0,6,15,0,0,0,0,0,1,13,14,12,12,11,0,0,4,16,16,16,15,5,0,2 +0,0,0,8,13,16,9,0,0,0,9,12,4,13,8,0,0,1,14,4,10,16,3,0,0,1,15,16,10,14,9,0,0,0,0,0,0,11,7,0,0,0,0,0,3,14,1,0,0,0,0,1,14,6,0,0,0,0,0,11,8,0,0,0,9 +0,0,12,16,6,0,0,0,0,7,12,4,14,0,0,0,0,11,14,0,6,4,0,0,0,6,8,0,7,4,0,0,0,0,0,0,10,3,0,0,0,0,0,5,13,0,0,0,0,0,3,14,10,0,0,0,0,0,14,13,12,15,13,1,2 +0,0,9,16,16,16,7,0,0,5,16,13,6,2,1,0,0,11,15,14,8,0,0,0,0,6,15,12,16,8,0,0,0,0,0,0,10,15,0,0,0,1,13,3,5,15,0,0,0,3,16,9,15,9,0,0,0,0,8,16,14,1,0,0,5 +0,2,14,16,10,1,0,0,0,15,15,8,16,8,0,0,0,9,10,0,13,10,0,0,0,0,1,5,16,3,0,0,0,0,0,13,14,0,0,0,0,0,4,16,5,0,0,0,0,1,14,16,13,8,4,0,0,2,13,16,16,16,16,5,2 +0,0,7,16,16,10,1,0,0,0,8,16,16,13,1,0,0,0,6,16,16,12,0,0,0,0,4,16,16,8,0,0,0,0,7,16,16,5,0,0,0,1,14,16,16,3,0,0,0,4,16,16,8,0,0,0,0,2,14,16,14,2,0,0,1 +0,0,9,12,16,7,0,0,0,10,11,2,0,16,0,0,0,3,2,0,10,8,0,0,0,0,0,7,15,5,0,0,0,0,0,7,9,14,2,0,0,0,0,0,0,8,10,0,0,0,2,2,4,14,5,0,0,0,14,16,14,2,0,0,3 +0,0,2,11,13,11,3,0,0,0,12,11,6,14,13,0,0,3,16,0,10,16,4,0,0,2,16,16,16,13,0,0,0,0,2,4,9,8,0,0,0,0,0,3,15,0,0,0,0,0,0,11,7,0,0,0,0,0,1,14,0,0,0,0,9 +0,0,7,15,12,3,0,0,0,0,6,16,16,8,0,0,0,0,7,16,16,7,0,0,0,0,7,16,16,4,0,0,0,0,6,16,16,6,0,0,0,0,2,16,16,9,0,0,0,0,4,16,16,6,0,0,0,0,5,16,10,3,0,0,1 +0,0,7,16,16,16,13,2,0,0,8,11,8,11,16,7,0,0,0,0,0,12,14,0,0,0,0,0,6,15,3,0,0,0,0,2,14,7,0,0,0,0,0,9,14,1,0,0,0,0,3,16,4,0,0,0,0,0,9,15,0,0,0,0,7 +0,0,6,14,9,0,0,0,0,3,16,9,15,12,3,0,0,1,16,6,16,10,0,0,0,0,6,16,16,6,0,0,0,1,13,10,10,15,1,0,0,3,16,1,0,11,11,0,0,1,15,2,3,14,5,0,0,0,5,15,15,6,0,0,8 +0,0,3,8,12,4,0,0,0,5,16,13,9,13,0,0,0,8,13,9,16,13,0,0,0,0,13,16,12,0,0,0,0,3,15,9,16,10,0,0,0,3,11,0,2,13,11,0,0,2,14,2,4,12,12,0,0,0,6,13,13,11,3,0,8 +0,0,15,12,7,15,6,0,0,8,16,16,16,14,5,0,0,10,16,5,1,0,0,0,0,8,16,9,1,0,0,0,0,0,5,15,13,0,0,0,0,0,0,5,16,4,0,0,0,0,0,10,15,0,0,0,0,0,15,16,6,0,0,0,5 +0,0,5,10,15,11,1,0,0,6,16,8,7,16,5,0,0,6,15,2,8,14,1,0,0,0,11,14,16,3,0,0,0,0,0,13,15,1,0,0,0,0,5,15,15,6,0,0,0,0,11,16,16,7,0,0,0,0,6,13,11,2,0,0,8 +0,0,0,9,12,0,0,0,0,0,4,16,11,0,0,0,0,0,12,15,2,1,5,0,0,5,16,8,1,14,13,0,0,10,16,12,15,16,9,0,0,6,14,14,16,14,0,0,0,0,0,3,16,9,0,0,0,0,0,7,16,4,0,0,4 +0,1,13,16,16,12,1,0,0,2,16,16,16,15,2,0,0,0,5,12,16,14,0,0,0,0,0,0,10,15,0,0,0,0,0,0,12,12,0,0,0,0,0,3,16,9,0,0,0,1,8,13,15,2,0,0,0,2,13,16,6,0,0,0,9 +0,2,12,15,16,12,1,0,0,0,8,16,6,14,2,0,0,0,14,11,0,0,0,0,0,1,15,6,0,0,0,0,0,0,8,14,4,0,0,0,0,0,0,8,15,0,0,0,0,0,2,10,16,2,0,0,0,0,12,16,6,0,0,0,5 +0,0,3,15,10,0,0,0,0,0,9,16,6,0,0,0,0,2,16,13,3,8,1,0,0,11,16,16,16,16,10,0,0,3,12,11,16,15,2,0,0,0,0,4,16,9,0,0,0,0,0,11,16,3,0,0,0,0,2,15,16,0,0,0,4 +0,0,4,16,16,4,0,0,0,0,10,16,16,13,0,0,0,0,3,13,11,16,2,0,0,0,0,0,0,13,8,0,0,0,0,0,0,11,9,0,0,0,0,0,0,13,9,0,0,0,0,0,8,16,5,0,0,0,3,16,16,11,0,0,9 +0,0,2,13,12,1,0,0,0,0,10,14,9,12,0,0,0,1,15,1,0,11,1,0,0,4,13,0,0,10,5,0,0,3,10,0,0,11,8,0,0,2,10,0,3,16,5,0,0,0,11,13,16,8,0,0,0,0,3,13,8,0,0,0,0 +0,2,16,16,12,12,11,0,0,0,3,9,13,16,14,2,0,0,0,1,13,16,4,0,0,0,0,14,15,3,0,0,0,0,0,9,16,6,0,0,0,0,0,1,16,12,0,0,0,0,3,9,16,10,0,0,0,4,16,16,7,0,0,0,3 +0,0,10,16,2,0,0,0,0,0,11,15,0,0,0,0,0,3,16,7,0,0,0,0,0,10,16,5,7,12,5,0,0,5,16,16,16,15,3,0,0,0,4,16,15,3,0,0,0,0,7,16,3,0,0,0,0,0,8,15,2,0,0,0,4 +0,1,9,14,5,0,0,0,0,7,9,7,11,8,4,0,0,4,9,5,14,7,2,0,0,0,11,14,4,0,0,0,0,0,10,15,5,0,0,0,0,2,14,2,15,1,0,0,0,4,13,2,15,2,0,0,0,0,12,16,7,0,0,0,8 +0,0,4,14,8,0,0,0,0,0,14,12,15,9,0,0,0,0,14,10,15,9,1,0,0,0,4,16,11,0,0,0,0,0,9,16,12,0,0,0,0,0,12,12,16,1,0,0,0,1,15,14,16,1,0,0,0,0,7,16,8,0,0,0,8 +0,0,7,16,15,3,0,0,0,0,14,16,16,13,0,0,0,0,9,16,16,16,3,0,0,0,0,6,9,16,9,0,0,0,0,0,0,16,11,0,0,0,0,0,6,16,8,0,0,0,0,4,14,16,2,0,0,0,8,16,15,7,0,0,9 +0,0,1,8,12,1,0,0,0,0,4,15,16,12,0,0,0,0,0,10,16,15,0,0,0,0,5,16,16,11,0,0,0,0,7,16,16,13,0,0,0,0,4,16,16,11,0,0,0,0,0,16,16,9,0,0,0,0,1,11,12,10,5,0,1 +0,0,6,16,13,11,5,0,0,0,1,8,10,16,15,0,0,0,0,0,1,14,13,0,0,0,0,0,7,16,5,0,0,3,4,1,12,16,0,0,0,12,16,16,16,13,2,0,0,0,3,14,15,0,0,0,0,0,6,16,7,0,0,0,7 +0,0,5,12,15,14,10,3,0,0,4,7,4,5,14,3,0,0,0,0,0,9,11,0,0,0,0,0,4,16,4,0,0,2,8,9,16,10,0,0,0,3,12,16,9,0,0,0,0,0,5,14,0,0,0,0,0,0,6,9,0,0,0,0,7 +0,0,8,15,11,3,0,0,0,4,15,8,16,16,6,0,0,7,13,4,15,11,1,0,0,0,14,14,10,1,0,0,0,0,8,16,6,0,0,0,0,0,11,13,16,3,0,0,0,0,16,6,16,4,0,0,0,0,11,16,13,2,0,0,8 +0,0,0,12,16,0,0,0,0,0,3,16,13,0,0,0,0,1,11,15,1,2,6,0,0,6,16,6,1,13,13,0,0,5,16,14,12,16,5,0,0,0,7,14,16,7,0,0,0,0,0,11,13,0,0,0,0,0,0,15,8,0,0,0,4 +0,0,2,15,11,10,16,2,0,0,10,16,16,16,9,0,0,2,16,11,5,2,0,0,0,12,15,4,2,0,0,0,0,5,16,16,15,5,0,0,0,0,2,7,16,11,0,0,0,0,0,3,16,10,0,0,0,0,4,15,12,2,0,0,5 +0,0,10,15,9,12,5,0,0,8,16,14,16,16,5,0,0,12,14,3,0,0,0,0,0,3,15,16,9,0,0,0,0,0,0,8,16,3,0,0,0,0,0,3,16,5,0,0,0,0,1,10,15,3,0,0,0,0,10,16,4,0,0,0,5 +0,0,6,8,11,15,0,0,0,0,15,16,16,10,0,0,0,0,15,12,4,0,0,0,0,0,7,15,3,0,0,0,0,0,0,7,12,0,0,0,0,0,0,4,15,0,0,0,0,0,0,11,11,0,0,0,0,0,10,16,6,0,0,0,5 +0,0,6,11,0,0,0,0,0,0,13,7,0,5,16,2,0,1,16,7,11,16,9,0,0,0,6,12,16,11,0,0,0,0,0,7,14,2,0,0,0,0,0,14,8,0,0,0,0,0,6,16,2,0,0,0,0,0,9,13,0,0,0,0,4 +0,0,9,4,10,15,7,0,0,3,16,16,16,12,3,0,0,7,16,9,2,0,0,0,0,10,16,3,0,0,0,0,0,5,15,15,2,0,0,0,0,0,2,12,12,0,0,0,0,0,5,12,11,0,0,0,0,0,9,16,4,0,0,0,5 +0,0,9,16,11,1,0,0,0,1,16,9,12,13,7,0,0,2,16,5,13,11,6,0,0,0,9,16,12,0,0,0,0,0,5,16,6,0,0,0,0,0,13,15,10,0,0,0,0,0,16,16,8,0,0,0,0,0,11,15,1,0,0,0,8 +0,0,2,7,12,16,11,0,0,0,10,15,16,16,16,0,0,0,0,10,16,16,12,0,0,0,0,4,16,16,4,0,0,0,0,5,16,15,4,0,0,0,0,10,16,12,0,0,0,0,2,15,16,3,0,0,0,0,1,13,14,1,0,0,1 +0,0,0,11,2,0,0,0,0,0,4,16,0,0,0,0,0,0,11,7,0,2,0,0,0,3,15,2,7,15,9,0,0,9,16,16,16,15,3,0,0,6,8,6,16,8,0,0,0,0,0,8,10,0,0,0,0,0,0,15,5,0,0,0,4 +0,0,6,12,16,15,1,0,0,0,0,4,6,16,3,0,0,0,0,0,6,16,2,0,0,0,2,5,12,15,4,0,0,1,11,15,16,12,4,0,0,0,0,8,12,0,0,0,0,0,0,16,6,0,0,0,0,0,5,13,0,0,0,0,7 +0,0,7,12,15,15,2,0,0,3,16,12,8,16,5,0,0,0,0,0,6,16,0,0,0,0,0,0,14,15,6,0,0,0,1,15,16,16,9,0,0,0,0,13,10,2,0,0,0,0,2,16,3,0,0,0,0,0,10,15,0,0,0,0,7 +0,0,0,10,16,5,0,0,0,0,2,16,13,2,0,0,0,0,7,16,4,0,0,0,0,0,8,16,1,0,0,0,0,0,11,16,10,1,0,0,0,0,13,16,16,13,4,0,0,0,7,16,7,15,14,0,0,0,0,9,16,16,12,0,6 +0,0,8,16,6,0,0,0,0,3,15,13,1,0,0,0,0,8,16,4,0,2,1,0,0,9,16,8,10,16,11,0,0,1,13,16,16,15,5,0,0,0,4,16,16,3,0,0,0,0,8,16,10,0,0,0,0,0,7,16,10,0,0,0,4 +0,0,0,9,14,4,0,0,0,10,14,16,16,6,0,0,0,3,9,16,16,2,0,0,0,0,0,16,10,0,0,0,0,0,0,14,9,0,0,0,0,0,2,16,9,0,0,0,0,0,1,16,12,0,0,0,0,0,0,7,15,1,0,0,1 +0,0,1,15,0,0,0,0,0,0,7,16,0,0,0,0,0,0,10,11,0,0,0,0,0,0,14,10,0,0,0,0,0,0,16,13,12,9,1,0,0,1,16,15,12,15,11,0,0,0,9,14,4,13,10,0,0,0,0,11,16,14,2,0,6 +0,0,7,14,16,12,0,0,0,0,15,11,15,15,0,0,0,0,0,1,16,10,0,0,0,0,7,13,16,14,10,0,0,0,12,16,15,12,4,0,0,0,5,16,6,0,0,0,0,0,7,16,2,0,0,0,0,0,10,14,0,0,0,0,7 +0,0,7,9,13,14,4,0,0,0,9,7,6,16,8,0,0,0,0,1,13,10,0,0,0,0,0,14,14,1,0,0,0,0,0,10,16,4,0,0,0,0,0,2,11,14,0,0,0,1,10,9,8,16,3,0,0,1,9,13,12,8,0,0,3 +0,0,5,12,0,0,0,0,0,0,11,12,0,0,0,0,0,0,14,10,0,0,0,0,0,0,14,14,7,1,0,0,0,3,16,16,16,14,1,0,0,5,16,16,10,15,5,0,0,0,13,16,11,15,11,0,0,0,4,12,14,14,3,0,6 +0,0,4,12,12,15,7,0,0,0,7,12,12,15,9,0,0,0,0,0,1,13,5,0,0,0,3,8,10,16,1,0,0,0,8,16,16,16,5,0,0,0,0,6,12,1,0,0,0,0,0,14,5,0,0,0,0,0,3,15,0,0,0,0,7 +0,0,12,16,14,2,0,0,0,7,11,2,7,12,0,0,0,11,15,12,14,11,0,0,0,2,8,8,13,12,0,0,0,0,0,0,7,14,0,0,0,0,0,0,1,14,5,0,0,0,12,12,5,10,12,0,0,0,8,8,12,16,6,0,9 +0,0,0,8,12,11,0,0,0,0,7,16,16,8,0,0,0,0,7,16,16,9,0,0,0,0,8,16,16,6,0,0,0,0,5,16,16,8,0,0,0,0,9,16,16,8,0,0,0,0,5,16,16,11,0,0,0,0,0,12,12,5,0,0,1 +0,2,11,16,5,0,0,0,0,11,16,16,9,0,0,0,0,2,6,16,9,0,0,0,0,0,8,16,5,0,0,0,0,0,14,11,0,0,0,0,0,6,16,5,0,0,2,0,0,6,16,14,13,15,12,0,0,0,15,16,13,11,3,0,2 +0,0,3,9,15,8,0,0,0,1,15,16,16,7,0,0,0,0,5,16,16,10,4,0,0,0,3,16,16,16,9,0,0,0,0,15,14,4,0,0,0,0,0,13,5,0,0,0,0,0,1,15,3,0,0,0,0,0,4,13,0,0,0,0,7 +0,0,4,15,16,7,0,0,0,0,13,12,16,9,0,0,0,0,2,1,16,7,0,0,0,0,0,5,15,3,0,0,0,0,0,14,10,0,0,0,0,0,6,16,2,0,0,0,0,0,9,16,11,7,0,0,0,0,5,16,16,10,0,0,2 +0,0,4,13,11,2,0,0,0,0,2,13,16,7,0,0,0,0,0,7,16,15,0,0,0,0,0,3,16,15,4,0,0,0,0,7,16,16,1,0,0,0,0,10,16,14,0,0,0,0,1,13,15,4,0,0,0,0,8,16,14,0,0,0,1 +0,0,4,14,16,10,0,0,0,0,3,8,14,15,2,0,0,0,0,0,10,16,5,0,0,0,6,11,15,16,7,0,0,0,7,14,16,13,7,0,0,0,0,12,12,0,0,0,0,0,4,16,6,0,0,0,0,0,5,14,3,0,0,0,7 +0,0,4,14,3,0,0,0,0,0,12,11,0,0,0,0,0,1,16,4,0,0,0,0,0,0,16,0,0,0,0,0,0,4,16,9,12,10,2,0,0,6,16,9,8,14,9,0,0,0,13,7,4,16,9,0,0,0,3,15,16,8,0,0,6 +0,0,0,11,16,14,3,0,0,0,8,16,16,16,6,0,0,1,16,6,0,0,0,0,0,5,16,8,7,1,0,0,0,3,16,16,16,11,0,0,0,0,1,5,10,16,2,0,0,0,0,7,12,14,0,0,0,0,0,13,15,3,0,0,5 +0,0,8,15,16,8,0,0,0,0,16,13,14,16,4,0,0,5,14,1,2,16,6,0,0,7,12,0,0,12,5,0,0,4,16,1,0,11,8,0,0,1,16,9,4,13,6,0,0,1,16,16,16,11,0,0,0,0,7,15,16,2,0,0,0 +0,0,0,12,12,0,0,0,0,0,2,16,10,0,0,0,0,0,9,16,6,0,0,0,0,3,16,11,3,12,2,0,0,12,16,16,16,16,7,0,0,3,8,13,16,13,1,0,0,0,0,9,16,9,0,0,0,0,0,11,16,5,0,0,4 +0,0,0,10,6,0,0,0,0,0,7,16,16,4,0,0,0,0,12,16,16,3,0,0,0,0,13,16,16,3,0,0,0,2,15,16,16,2,0,0,0,0,12,16,16,5,0,0,0,0,4,16,16,14,1,0,0,0,0,6,12,9,1,0,1 +0,0,4,15,7,0,0,0,0,3,16,11,2,0,0,0,0,11,11,0,8,11,1,0,0,7,14,11,15,2,0,0,0,1,13,16,4,0,0,0,0,0,8,16,8,0,0,0,0,0,8,16,16,2,0,0,0,0,2,12,15,5,0,0,8 +0,0,8,13,9,0,0,0,0,5,16,13,12,2,0,0,0,8,13,2,7,10,2,0,0,4,15,15,16,9,0,0,0,1,14,16,0,0,0,0,0,0,16,14,5,0,0,0,0,2,13,10,12,0,0,0,0,0,9,16,8,0,0,0,8 +0,0,3,11,16,16,13,0,0,0,15,16,13,11,5,0,0,3,16,8,1,0,0,0,0,5,16,11,4,0,0,0,0,1,8,15,16,3,0,0,0,0,0,5,16,5,0,0,0,0,1,10,16,2,0,0,0,0,5,11,7,0,0,0,5 +0,0,3,15,12,12,6,0,0,0,13,13,13,16,12,0,0,5,11,0,0,0,1,0,0,5,16,15,7,0,0,0,0,0,9,14,16,0,0,0,0,0,0,1,15,2,0,0,0,0,1,12,11,0,0,0,0,0,3,14,1,0,0,0,5 +0,0,5,12,9,1,0,0,0,4,16,9,11,6,1,0,0,1,15,4,1,12,4,0,0,0,6,6,13,14,0,0,0,0,3,16,16,4,0,0,0,0,7,16,16,1,0,0,0,0,10,16,16,7,0,0,0,0,8,12,12,4,0,0,8 +0,0,0,2,13,11,0,0,0,0,4,11,16,16,6,0,0,2,15,16,16,14,0,0,0,0,3,10,16,12,0,0,0,0,0,12,16,10,0,0,0,0,0,6,16,8,0,0,0,0,0,4,16,13,0,0,0,0,0,3,14,15,0,0,1 +0,0,4,13,11,0,0,0,0,0,1,13,16,7,0,0,0,0,0,0,14,9,0,0,0,0,1,6,16,14,5,0,0,1,13,16,16,10,3,0,0,2,8,13,10,0,0,0,0,0,1,16,2,0,0,0,0,0,4,13,1,0,0,0,7 +0,0,7,16,16,16,2,0,0,0,14,14,9,16,2,0,0,2,16,6,0,9,7,0,0,4,16,1,0,8,8,0,0,6,16,0,0,12,8,0,0,5,16,4,2,16,5,0,0,0,15,16,16,13,0,0,0,0,8,13,10,2,0,0,0 +0,0,13,16,7,0,0,0,0,2,16,16,15,0,0,0,0,0,4,14,16,0,0,0,0,0,0,12,13,0,0,0,0,0,0,15,12,0,0,0,0,0,11,16,1,1,1,0,0,0,16,16,16,16,6,0,0,0,13,16,16,12,0,0,2 +0,0,4,15,16,16,10,0,0,0,13,15,12,14,14,0,0,0,14,7,0,0,0,0,0,0,16,16,12,8,0,0,0,0,2,7,10,16,4,0,0,0,0,0,2,16,3,0,0,0,1,9,12,13,0,0,0,0,2,16,11,3,0,0,5 +0,0,3,14,16,16,6,0,0,0,8,16,16,16,9,0,0,0,0,0,7,16,6,0,0,0,0,0,12,15,1,0,0,0,2,13,16,16,7,0,0,0,2,16,16,14,2,0,0,0,6,16,9,0,0,0,0,0,7,14,2,0,0,0,7 +0,0,0,1,15,10,0,0,0,0,0,9,16,4,0,0,0,0,1,15,12,0,0,0,0,0,9,16,3,0,0,0,0,1,15,10,0,15,11,0,0,8,16,16,16,16,8,0,0,0,4,8,16,12,0,0,0,0,0,4,16,7,0,0,4 +0,1,11,16,16,13,2,0,0,6,16,16,8,13,8,0,0,0,10,16,14,15,8,0,0,0,0,7,12,14,5,0,0,0,0,0,0,8,4,0,0,0,0,0,0,11,4,0,0,0,8,8,9,15,4,0,0,2,13,16,16,9,1,0,9 +0,0,0,8,12,15,6,0,0,0,8,16,6,5,14,0,0,0,14,15,12,16,13,0,0,0,3,6,7,15,8,0,0,0,0,0,1,15,2,0,0,0,0,0,10,9,0,0,0,0,0,4,13,1,0,0,0,0,0,11,2,0,0,0,9 +0,0,8,13,16,14,0,0,0,3,16,10,16,13,0,0,0,0,2,11,16,6,0,0,0,0,2,16,15,8,0,0,0,0,1,13,16,16,7,0,0,0,0,1,8,16,7,0,0,0,5,16,16,11,0,0,0,0,9,14,5,0,0,0,3 +0,0,1,7,14,15,2,0,0,2,12,14,8,14,7,0,0,3,14,13,5,14,5,0,0,0,5,15,16,15,2,0,0,0,0,10,16,10,0,0,0,0,2,14,14,12,0,0,0,0,4,16,14,12,0,0,0,0,0,12,13,6,0,0,8 +0,0,8,12,14,3,0,0,0,10,16,14,15,13,0,0,0,7,5,0,9,15,0,0,0,0,0,0,12,11,0,0,0,0,0,0,9,16,2,0,0,0,0,0,0,14,9,0,0,0,11,12,14,16,5,0,0,0,8,14,9,4,0,0,3 +0,0,2,14,5,0,0,0,0,0,10,13,4,0,0,0,0,0,12,9,0,0,0,0,0,0,12,8,0,0,0,0,0,0,12,14,16,15,3,0,0,0,13,14,6,9,10,0,0,0,13,12,7,14,8,0,0,0,2,15,15,10,1,0,6 +0,0,4,16,5,0,0,0,0,0,9,16,2,0,0,0,0,0,11,13,0,0,0,0,0,0,11,12,0,0,0,0,0,0,15,15,13,12,5,0,0,0,13,16,15,11,15,2,0,0,12,16,9,14,15,2,0,0,5,15,16,13,2,0,6 +0,2,11,14,4,0,0,0,0,0,16,13,16,4,0,0,0,0,0,3,16,3,0,0,0,0,0,11,12,0,0,0,0,0,4,16,5,0,0,0,0,1,15,12,0,0,0,0,0,5,16,10,8,11,2,0,0,2,14,16,13,9,5,0,2 +0,0,7,15,13,3,0,0,0,0,16,8,9,13,0,0,0,3,13,0,1,13,3,0,0,7,14,4,0,5,8,0,0,8,16,8,0,10,8,0,0,7,16,9,0,15,3,0,0,2,16,16,11,14,1,0,0,0,5,15,15,3,0,0,0 +0,0,4,15,10,2,0,0,0,0,3,16,15,4,0,0,0,0,6,16,16,2,0,0,0,0,7,16,11,0,0,0,0,0,9,16,7,0,0,0,0,0,12,16,4,0,0,0,0,0,9,16,4,0,0,0,0,0,4,15,8,0,0,0,1 +0,0,1,12,15,2,0,0,0,0,11,13,8,10,0,0,0,4,16,13,1,10,0,0,0,8,15,3,0,6,3,0,0,8,12,0,0,8,5,0,0,2,16,8,0,8,8,0,0,0,10,11,7,15,4,0,0,0,0,13,15,6,0,0,0 +0,0,7,15,14,3,0,0,0,4,16,13,14,13,0,0,0,8,16,5,0,12,4,0,0,8,16,8,0,8,8,0,0,8,16,5,0,9,8,0,0,5,16,10,7,16,3,0,0,0,13,16,16,7,0,0,0,0,6,15,11,1,0,0,0 +0,0,0,13,14,0,0,0,0,0,8,15,3,5,3,0,0,2,16,8,1,16,10,0,0,11,16,8,10,16,6,0,0,5,16,16,16,16,6,0,0,0,0,9,15,0,0,0,0,0,0,12,10,0,0,0,0,0,0,16,2,0,0,0,4 +0,1,4,8,14,16,6,0,0,11,16,16,16,13,3,0,0,9,16,5,2,0,0,0,0,3,16,11,3,0,0,0,0,0,6,16,16,5,0,0,0,0,0,2,16,8,0,0,0,0,0,12,16,3,0,0,0,0,0,12,11,0,0,0,5 +0,3,9,12,12,16,10,0,0,7,16,16,16,14,6,0,0,7,16,11,2,0,0,0,0,10,16,10,2,0,0,0,0,1,11,16,16,4,0,0,0,0,1,12,16,5,0,0,0,0,13,16,6,0,0,0,0,2,15,9,0,0,0,0,5 +0,0,5,10,12,16,16,2,0,0,10,16,16,10,6,0,0,0,5,16,5,0,0,0,0,0,0,12,16,2,0,0,0,0,0,0,12,12,0,0,0,0,0,0,13,9,0,0,0,0,1,12,13,2,0,0,0,0,7,13,2,0,0,0,5 +0,0,0,12,13,0,0,0,0,0,4,16,16,9,0,0,0,0,2,16,6,14,0,0,0,4,3,13,7,9,5,0,0,8,12,3,16,12,8,0,0,0,14,5,1,12,8,0,0,0,10,14,13,16,2,0,0,0,0,13,16,6,0,0,0 +0,0,0,5,14,13,3,0,0,0,11,16,16,16,9,0,0,2,16,16,16,16,11,0,0,0,6,8,8,13,8,0,0,0,0,0,1,14,3,0,0,0,0,0,9,9,0,0,0,0,0,1,13,2,0,0,0,0,0,5,6,0,0,0,9 +0,0,6,16,16,16,9,0,0,0,5,8,4,14,13,0,0,0,0,0,0,15,9,0,0,0,0,4,11,16,2,0,0,0,0,14,16,16,10,0,0,0,0,16,12,8,2,0,0,0,8,16,3,0,0,0,0,0,9,14,1,0,0,0,7 +0,0,6,16,15,3,0,0,0,4,16,13,16,6,0,0,0,3,6,0,16,5,0,0,0,0,0,6,16,1,0,0,0,0,0,14,11,0,0,0,0,0,8,14,1,0,0,0,0,0,12,14,8,6,2,0,0,0,8,16,16,16,3,0,2 +0,0,12,13,3,0,0,0,0,4,16,14,10,0,0,0,0,2,4,6,10,0,0,0,0,0,0,9,11,0,0,0,0,0,2,15,1,0,0,0,0,0,10,11,0,0,0,0,0,4,16,10,6,4,3,0,0,1,10,15,16,16,9,0,2 +0,1,9,16,16,7,0,0,0,7,15,12,15,12,0,0,0,0,2,0,9,14,0,0,0,0,0,0,15,8,0,0,0,0,5,13,16,14,6,0,0,5,16,16,14,8,2,0,0,0,11,16,6,0,0,0,0,0,11,13,2,0,0,0,7 +0,0,0,2,15,2,0,0,0,0,0,10,8,0,8,3,0,0,6,12,0,3,15,0,0,2,14,1,0,12,8,0,0,9,15,12,16,16,5,0,0,3,8,5,11,10,0,0,0,0,0,1,15,4,0,0,0,0,0,3,9,0,0,0,4 +0,0,1,13,3,0,0,0,0,0,4,15,1,0,0,0,0,0,9,10,0,0,0,0,0,0,10,11,0,0,0,0,0,0,11,12,6,5,0,0,0,0,14,16,16,16,8,0,0,0,16,16,11,12,13,0,0,0,3,8,14,10,6,0,6 +0,0,4,11,12,16,10,0,0,3,16,16,14,7,0,0,0,0,16,8,0,0,0,0,0,0,12,10,0,0,0,0,0,0,2,14,10,1,0,0,0,0,0,1,14,11,0,0,0,0,0,1,13,14,2,0,0,0,7,14,10,2,0,0,5 +0,0,3,10,14,14,3,0,0,0,15,6,9,16,4,0,0,0,1,5,14,6,0,0,0,0,7,16,9,1,0,0,0,0,2,9,13,15,1,0,0,0,0,0,4,16,2,0,0,0,0,7,15,6,0,0,0,0,0,14,7,0,0,0,3 +0,0,0,5,8,12,10,0,0,0,11,14,4,15,8,0,0,4,16,14,11,16,7,0,0,7,16,16,16,14,7,0,0,1,8,6,0,7,8,0,0,0,0,0,0,5,8,0,0,0,0,0,3,13,6,0,0,0,0,4,16,8,0,0,9 +0,1,13,5,0,0,0,0,0,4,14,0,0,0,0,0,0,4,8,0,1,6,1,0,0,5,8,1,11,16,5,0,0,4,10,7,16,16,7,0,0,4,14,10,16,16,3,0,0,1,16,16,16,10,0,0,0,0,10,14,9,2,0,0,6 +0,0,2,13,15,1,0,0,0,2,13,15,5,9,0,0,0,7,15,2,0,8,0,0,0,8,15,0,0,5,4,0,0,7,16,0,0,9,6,0,0,0,14,7,0,13,7,0,0,0,10,15,14,14,0,0,0,0,0,14,14,5,0,0,0 +0,0,10,15,16,6,0,0,0,7,16,14,16,11,0,0,0,6,5,11,16,3,0,0,0,0,0,12,16,6,0,0,0,0,0,1,13,16,5,0,0,0,0,0,2,16,9,0,0,0,3,8,13,16,7,0,0,0,12,16,15,4,0,0,3 +0,0,6,14,14,3,0,0,0,9,16,3,8,13,0,0,0,8,15,13,16,16,2,0,0,1,12,12,9,14,5,0,0,0,0,0,0,10,6,0,0,0,0,0,0,10,5,0,0,0,0,1,6,13,2,0,0,0,7,12,7,2,0,0,9 +0,1,12,14,2,0,0,0,0,8,16,16,7,0,0,0,0,2,8,10,8,0,0,0,0,0,0,13,8,0,0,0,0,0,3,16,2,0,0,0,0,0,14,8,0,0,0,0,0,0,16,10,9,10,6,0,0,2,15,13,12,8,1,0,2 +0,0,5,15,10,2,0,0,0,0,5,16,16,11,0,0,0,0,1,16,16,16,1,0,0,0,3,14,16,11,0,0,0,0,6,16,16,10,0,0,0,0,6,16,16,2,0,0,0,0,5,16,16,9,0,0,0,0,4,16,14,10,0,0,1 +0,0,15,15,2,0,0,0,0,4,16,11,13,1,0,0,0,3,15,5,15,6,0,0,0,0,3,1,8,12,0,0,0,0,0,0,6,15,0,0,0,0,0,0,9,12,0,0,0,0,5,9,16,10,1,0,0,0,13,16,16,16,16,6,2 +0,0,0,9,13,0,0,0,0,0,5,16,6,0,0,0,0,1,13,11,1,0,2,0,0,8,16,1,0,10,13,0,0,11,16,4,10,16,5,0,0,12,16,16,16,10,0,0,0,0,4,5,16,3,0,0,0,0,0,10,16,0,0,0,4 +0,0,5,15,8,0,0,0,0,0,13,8,12,5,0,0,0,1,16,1,6,15,2,0,0,1,16,1,2,16,6,0,0,3,14,0,0,11,9,0,0,3,16,1,0,7,10,0,0,0,15,4,2,15,6,0,0,0,5,14,15,10,1,0,0 +0,0,12,7,0,0,0,0,0,3,16,4,0,0,0,0,0,7,15,0,0,0,0,0,0,8,12,3,8,2,0,0,0,12,14,16,16,16,4,0,0,9,16,13,3,9,13,0,0,3,16,9,5,13,12,0,0,0,10,16,16,13,1,0,6 +0,0,4,10,16,16,11,1,0,0,12,13,11,8,7,0,0,3,16,16,16,4,0,0,0,5,16,9,15,6,0,0,0,0,0,0,6,11,0,0,0,0,0,0,11,8,0,0,0,0,1,8,13,2,0,0,0,0,4,13,4,0,0,0,5 +0,0,0,9,13,0,0,0,0,0,8,15,1,0,0,0,0,2,14,10,0,2,15,3,0,6,16,4,1,13,13,0,0,2,16,15,13,14,3,0,0,0,5,11,16,8,0,0,0,0,0,10,14,1,0,0,0,0,0,13,12,0,0,0,4 +0,0,8,16,16,12,0,0,0,4,16,16,16,15,2,0,0,2,11,16,16,11,0,0,0,0,0,15,16,7,0,0,0,0,0,0,11,16,2,0,0,0,7,0,0,15,5,0,0,2,16,7,7,15,7,0,0,0,8,14,12,5,0,0,3 +0,0,10,7,0,0,0,0,0,0,15,9,0,0,0,0,0,2,16,2,0,0,0,0,0,7,16,1,5,3,0,0,0,8,16,13,16,15,2,0,0,8,16,16,12,16,8,0,0,3,16,15,12,16,4,0,0,0,9,16,15,8,0,0,6 +0,2,13,16,16,11,0,0,0,7,13,4,11,14,0,0,0,0,1,9,16,7,0,0,0,0,7,16,14,1,0,0,0,0,0,9,15,13,1,0,0,0,0,0,3,16,5,0,0,0,6,0,5,15,7,0,0,0,15,16,16,10,1,0,3 +0,0,5,16,5,0,0,0,0,0,11,15,2,0,0,0,0,1,16,10,0,0,0,0,0,6,16,3,0,0,0,0,0,8,16,15,16,14,2,0,0,8,16,12,8,13,11,0,0,8,16,12,2,13,9,0,0,0,5,13,16,15,1,0,6 +0,1,11,16,16,15,1,0,0,3,15,7,10,16,3,0,0,0,0,4,16,8,0,0,0,0,0,5,15,10,0,0,0,0,0,0,3,16,8,0,0,0,0,0,0,11,13,0,0,4,11,1,1,11,14,0,0,1,13,16,16,15,6,0,3 +0,0,7,16,14,2,0,0,0,0,13,13,9,13,0,0,0,0,11,10,0,14,5,0,0,0,5,14,0,12,9,0,0,0,0,0,0,12,8,0,0,0,0,0,4,16,5,0,0,0,2,8,14,15,0,0,0,0,8,16,16,16,16,4,2 +0,0,8,15,16,13,0,0,0,3,16,4,1,16,4,0,0,0,15,5,1,16,8,0,0,0,14,11,16,8,0,0,0,0,6,16,8,0,0,0,0,0,10,16,8,0,0,0,0,0,16,14,16,2,0,0,0,0,8,16,15,2,0,0,8 +0,1,12,16,15,2,0,0,0,6,16,8,14,10,0,0,0,1,8,3,15,7,0,0,0,0,1,16,16,5,0,0,0,0,0,3,13,16,4,0,0,0,6,0,2,14,8,0,0,1,16,7,7,15,7,0,0,0,12,16,16,10,1,0,3 +0,2,16,15,4,0,0,0,0,4,16,14,9,3,5,0,0,4,15,7,16,16,8,0,0,1,15,16,15,6,0,0,0,0,10,16,4,0,0,0,0,1,14,16,2,0,0,0,0,7,10,16,0,0,0,0,0,2,14,11,0,0,0,0,8 +0,1,14,16,9,0,0,0,0,1,11,15,15,2,0,0,0,0,1,13,16,2,0,0,0,0,10,16,16,15,9,0,0,0,9,16,13,10,3,0,0,0,5,16,1,0,0,0,0,0,15,13,0,0,0,0,0,1,16,4,0,0,0,0,7 +0,0,12,16,14,4,0,0,0,2,16,16,16,6,0,0,0,0,12,16,16,4,0,0,0,0,15,16,16,5,0,0,0,0,16,16,16,4,0,0,0,4,16,16,15,3,0,0,0,1,14,16,16,5,0,0,0,0,10,16,16,10,0,0,1 +0,0,2,14,16,16,14,0,0,0,2,6,4,9,16,6,0,0,0,0,0,4,16,4,0,0,0,0,0,8,15,1,0,0,2,15,13,15,8,0,0,0,3,8,14,14,2,0,0,0,0,5,16,1,0,0,0,0,2,16,6,0,0,0,7 +0,3,14,16,7,1,0,0,0,12,16,9,16,11,0,0,0,12,16,11,16,12,0,0,0,1,10,12,15,16,0,0,0,0,0,0,4,16,4,0,0,0,0,0,0,13,11,0,0,1,6,4,8,16,11,0,0,2,9,16,16,10,1,0,9 +0,0,12,16,15,1,0,0,0,5,16,7,11,13,0,0,0,4,16,1,7,16,8,0,0,2,13,16,16,16,8,0,0,0,1,6,2,8,13,0,0,0,0,0,0,4,16,0,0,1,5,2,4,8,16,1,0,1,11,16,16,16,8,0,9 +0,0,0,12,16,15,3,0,0,0,4,10,12,11,2,0,0,6,14,0,0,0,0,0,0,8,16,15,5,0,0,0,0,1,8,13,16,2,0,0,0,0,0,0,10,9,0,0,0,0,5,11,13,11,0,0,0,0,1,14,12,2,0,0,5 +0,3,14,15,16,15,8,0,0,4,16,12,8,7,2,0,0,6,16,15,8,0,0,0,0,8,16,13,16,4,0,0,0,1,1,0,10,10,0,0,0,0,0,0,8,12,0,0,0,5,8,6,15,10,0,0,0,3,16,16,13,1,0,0,5 +0,0,5,15,15,11,0,0,0,0,16,5,0,11,5,0,0,0,15,3,0,12,10,0,0,0,5,13,0,12,6,0,0,0,0,10,16,9,0,0,0,0,0,10,16,4,0,0,0,0,6,12,8,9,0,0,0,0,6,16,12,4,0,0,8 +0,0,0,0,14,15,2,0,0,0,0,6,16,16,4,0,0,0,3,13,16,16,0,0,0,1,13,16,16,16,3,0,0,9,16,6,14,16,0,0,0,2,4,0,14,15,0,0,0,0,0,0,16,12,0,0,0,0,0,0,14,13,0,0,1 +0,0,0,10,12,0,0,0,0,0,6,16,3,0,0,0,0,0,14,10,0,0,3,1,0,6,16,2,0,6,16,2,0,14,16,5,9,15,6,0,0,10,16,14,16,12,0,0,0,0,0,4,16,2,0,0,0,0,0,10,11,0,0,0,4 +0,0,5,12,16,16,9,0,0,3,16,9,3,0,0,0,0,7,16,11,5,0,0,0,0,2,11,9,16,1,0,0,0,0,0,0,9,9,0,0,0,0,0,0,9,9,0,0,0,0,9,6,16,6,0,0,0,0,5,12,6,0,0,0,5 +0,0,0,9,9,0,0,0,0,0,4,16,2,0,0,0,0,0,12,9,0,1,12,4,0,8,15,1,0,11,12,1,0,10,14,6,13,16,5,0,0,8,16,11,16,10,0,0,0,0,0,5,15,2,0,0,0,0,0,12,7,0,0,0,4 +0,0,5,12,14,3,0,0,0,3,14,2,4,11,0,0,0,4,12,0,5,16,4,0,0,1,14,6,14,4,0,0,0,0,7,16,4,0,0,0,0,0,7,16,4,0,0,0,0,0,10,13,11,0,0,0,0,0,7,16,11,0,0,0,8 +0,0,5,14,15,5,0,0,0,2,16,9,9,15,2,0,0,2,10,0,6,16,3,0,0,0,2,14,16,7,0,0,0,0,0,11,16,14,1,0,0,0,6,3,6,16,1,0,0,0,13,11,10,15,0,0,0,0,9,16,16,7,0,0,3 +0,0,0,1,11,10,0,0,0,0,0,5,16,16,2,0,0,0,0,6,16,16,2,0,0,0,0,12,16,16,0,0,0,0,7,16,16,8,0,0,0,6,16,16,16,9,0,0,0,2,12,12,16,11,0,0,0,0,0,0,12,11,0,0,1 +0,0,6,16,3,0,0,0,0,0,13,13,0,0,0,0,0,1,16,8,0,0,0,0,0,6,16,1,0,0,0,0,0,8,14,6,9,6,0,0,0,7,16,15,12,14,12,0,0,2,16,9,0,1,16,3,0,0,5,15,16,16,14,1,6 +0,3,16,16,16,16,3,0,0,6,16,10,6,2,0,0,0,10,14,2,0,0,0,0,0,11,16,16,5,0,0,0,0,1,2,10,13,0,0,0,0,0,0,1,16,3,0,0,0,1,6,6,16,2,0,0,0,2,13,16,11,0,0,0,5 +0,0,0,0,11,12,0,0,0,0,0,0,11,16,2,0,0,0,0,0,15,13,0,0,0,0,6,16,16,12,0,0,0,7,16,16,16,8,0,0,0,1,4,7,16,11,0,0,0,0,0,2,15,13,0,0,0,0,0,0,8,16,4,0,1 +0,0,0,1,10,16,6,0,0,0,3,14,8,6,16,6,0,0,11,4,0,6,16,2,0,4,12,2,5,16,11,0,0,6,16,15,8,15,4,0,0,0,0,0,4,9,0,0,0,0,0,0,10,4,0,0,0,0,0,1,13,1,0,0,9 +0,0,1,15,16,15,5,0,0,0,8,13,4,3,1,0,0,2,14,9,7,1,0,0,0,5,16,16,15,10,0,0,0,0,0,0,1,14,3,0,0,0,0,0,1,14,2,0,0,0,1,4,13,10,0,0,0,0,1,14,9,1,0,0,5 +0,0,4,10,12,12,13,4,0,0,6,8,4,6,15,2,0,0,0,0,0,12,6,0,0,0,0,0,7,12,0,0,0,5,16,16,16,13,1,0,0,2,4,13,10,0,0,0,0,0,3,16,2,0,0,0,0,0,7,14,0,0,0,0,7 +0,0,6,15,14,4,0,0,0,0,16,6,4,15,0,0,0,5,16,6,13,11,0,0,0,1,16,16,16,13,1,0,0,6,15,7,0,11,8,0,0,4,12,0,0,9,11,0,0,0,16,4,9,15,2,0,0,0,8,14,9,2,0,0,8 +0,0,0,0,6,11,0,0,0,0,0,3,15,12,0,0,0,0,5,14,10,12,0,0,0,2,15,6,5,9,0,0,0,6,15,12,15,16,8,0,0,1,8,8,12,12,2,0,0,0,0,0,8,8,0,0,0,0,0,0,5,11,0,0,4 +0,2,8,16,11,1,0,0,0,10,15,5,11,5,0,0,0,11,10,13,16,6,0,0,0,9,16,16,12,13,2,0,0,1,15,8,0,6,9,0,0,0,12,5,0,5,11,0,0,0,9,9,7,16,8,0,0,0,4,14,15,6,0,0,8 +0,2,9,15,12,1,0,0,0,6,16,10,14,6,0,0,0,1,1,6,16,4,0,0,0,3,13,16,16,8,0,0,0,3,12,7,7,16,4,0,0,0,0,0,0,13,9,0,0,3,15,6,12,16,4,0,0,0,10,16,14,7,1,0,3 +0,0,4,16,16,15,4,0,0,0,10,10,5,3,0,0,0,6,14,1,4,2,0,0,0,8,15,14,15,15,2,0,0,5,12,5,0,9,8,0,0,0,0,0,0,8,8,0,0,0,5,9,7,14,2,0,0,0,3,16,11,4,0,0,5 +0,4,15,16,16,16,5,0,0,7,16,13,6,4,1,0,0,10,16,6,2,0,0,0,0,9,16,16,16,4,0,0,0,0,0,1,14,10,0,0,0,0,0,0,12,12,0,0,0,3,9,11,16,7,0,0,0,3,15,16,6,0,0,0,5 +0,0,3,15,11,4,0,0,0,0,11,7,6,13,0,0,0,0,12,2,0,15,1,0,0,0,11,1,4,16,5,0,0,0,3,15,13,12,6,0,0,0,0,0,0,8,8,0,0,0,6,2,3,13,5,0,0,0,4,13,16,11,0,0,9 +0,0,4,14,15,2,0,0,0,4,16,7,8,9,0,0,0,6,14,0,0,12,2,0,0,3,16,0,0,7,7,0,0,0,14,0,0,4,8,0,0,0,11,4,0,5,9,0,0,0,6,15,8,14,7,0,0,0,1,13,15,10,0,0,0 +0,0,2,12,13,4,0,0,0,5,16,6,1,14,0,0,0,13,11,5,12,15,0,0,0,9,16,16,16,12,0,0,0,1,12,14,3,7,7,0,0,0,8,7,0,0,13,0,0,0,8,9,1,12,13,0,0,0,0,14,14,12,3,0,8 +0,0,8,16,10,1,0,0,0,7,16,11,15,8,0,0,0,15,13,0,11,12,0,0,0,10,7,0,11,10,0,0,0,0,0,1,15,8,0,0,0,0,0,9,15,1,0,0,0,0,7,16,10,6,8,3,0,0,9,16,16,16,16,7,2 +0,0,5,15,4,0,0,0,0,2,16,11,1,0,0,0,0,6,14,0,0,0,0,0,0,6,12,4,4,1,0,0,0,7,16,16,16,15,3,0,0,0,16,9,0,9,13,0,0,0,10,12,8,12,9,0,0,0,2,12,14,8,1,0,6 +0,0,7,15,16,12,0,0,0,0,9,6,3,15,2,0,0,0,0,4,13,9,0,0,0,0,9,16,16,6,0,0,0,0,5,5,6,15,4,0,0,0,8,0,0,4,12,0,0,1,13,1,3,12,8,0,0,0,8,16,15,10,0,0,3 +0,0,6,16,12,3,0,0,0,3,15,7,6,14,1,0,0,7,13,0,1,14,1,0,0,4,13,8,13,13,0,0,0,2,15,15,7,13,5,0,0,0,12,5,0,9,8,0,0,0,12,5,6,14,2,0,0,0,5,16,12,3,0,0,8 +0,2,15,16,7,0,0,0,0,7,16,13,16,3,0,0,0,3,16,1,16,7,0,0,0,0,2,5,16,3,0,0,0,0,0,11,13,0,0,0,0,0,3,15,7,0,0,0,0,0,13,16,12,10,7,0,0,2,15,16,16,16,16,5,2 +0,0,4,10,0,0,0,0,0,0,2,15,2,0,0,0,0,0,2,16,4,0,0,0,0,0,7,16,9,0,0,0,0,0,14,14,14,0,0,0,0,0,3,3,16,3,0,0,0,0,1,7,14,11,4,0,0,0,4,14,16,13,14,8,1 +0,0,9,14,2,0,0,0,0,0,5,16,7,0,0,0,0,0,6,16,10,0,0,0,0,2,15,16,10,0,0,0,0,6,16,15,14,0,0,0,0,0,0,10,16,2,0,0,0,0,9,15,16,14,12,3,0,0,6,15,16,16,12,3,1 +0,0,4,14,4,0,0,0,0,0,13,12,2,0,0,0,0,0,16,2,0,0,0,0,0,1,16,16,16,9,0,0,0,2,16,5,0,8,9,0,0,0,14,4,0,1,14,0,0,0,10,9,4,12,13,0,0,0,4,15,15,9,2,0,6 +0,0,7,16,6,0,0,0,0,0,16,13,1,0,0,0,0,2,16,3,0,0,0,0,0,3,16,4,4,2,0,0,0,5,16,16,16,15,4,0,0,4,16,6,4,7,16,1,0,0,16,8,4,12,15,2,0,0,6,16,16,14,6,0,6 +0,1,8,14,16,12,0,0,0,1,12,9,11,16,0,0,0,0,0,9,15,5,0,0,0,2,14,16,7,0,0,0,0,1,11,13,16,9,0,0,0,0,0,0,2,16,4,0,0,0,11,8,14,13,0,0,0,0,11,14,11,1,0,0,3 +0,0,0,6,15,2,11,2,0,0,2,16,10,5,16,3,0,1,15,10,0,9,13,0,0,9,16,9,9,16,12,0,0,7,16,16,15,15,7,0,0,0,3,2,14,8,0,0,0,0,0,6,14,1,0,0,0,0,0,11,9,0,0,0,4 +0,2,13,16,14,3,0,0,0,2,8,11,16,8,0,0,0,0,3,16,12,1,0,0,0,0,1,11,14,2,0,0,0,0,0,0,7,13,0,0,0,0,0,0,0,11,7,0,0,7,11,4,4,13,7,0,0,2,11,16,12,10,0,0,3 +0,0,0,1,14,1,14,0,0,0,0,12,11,6,13,0,0,0,8,13,2,10,9,0,0,6,14,2,1,14,11,0,0,11,15,12,16,16,9,0,0,7,14,12,14,12,0,0,0,0,0,0,12,8,0,0,0,0,0,0,14,3,0,0,4 +0,0,2,12,16,16,11,0,0,0,12,15,9,6,8,0,0,3,16,5,3,2,0,0,0,10,16,15,16,13,0,0,0,8,16,13,10,16,0,0,0,0,3,0,11,13,0,0,0,0,0,9,16,5,0,0,0,0,1,16,11,0,0,0,5 +0,0,4,10,14,5,0,0,0,0,13,10,0,14,4,0,0,6,15,0,9,15,2,0,0,6,16,16,16,13,0,0,0,0,12,15,7,12,4,0,0,0,10,9,0,10,7,0,0,0,11,9,7,15,3,0,0,0,5,15,12,3,0,0,8 +0,1,13,16,12,1,0,0,0,6,16,11,14,7,0,0,0,5,16,0,10,10,0,0,0,0,2,0,13,10,0,0,0,0,0,2,16,7,0,0,0,0,0,11,15,1,0,0,0,0,11,16,16,12,12,2,0,0,15,16,12,12,12,3,2 +0,1,9,14,14,3,0,0,0,2,12,10,12,11,0,0,0,0,0,3,14,11,0,0,0,0,11,16,16,7,0,0,0,0,10,7,11,16,5,0,0,0,0,0,0,5,12,0,0,0,8,5,6,12,12,0,0,0,12,16,15,10,3,0,3 +0,0,3,9,8,0,0,0,0,0,15,9,1,0,0,0,0,4,16,8,2,0,0,0,0,5,16,12,12,11,1,0,0,4,12,0,0,5,10,0,0,1,14,3,0,1,14,1,0,0,10,6,5,14,13,0,0,0,4,13,14,8,0,0,6 +0,1,6,8,13,8,0,0,0,5,16,10,5,15,0,0,0,8,15,6,13,13,0,0,0,5,16,16,14,15,1,0,0,0,14,9,0,6,6,0,0,0,16,1,0,6,8,0,0,0,16,5,9,15,2,0,0,0,11,16,10,2,0,0,8 +0,0,3,16,3,0,0,0,0,0,8,14,2,0,0,0,0,0,14,7,0,0,0,0,0,1,16,4,3,0,0,0,0,4,16,16,16,15,2,0,0,3,16,9,1,10,13,0,0,0,15,10,4,9,13,0,0,0,3,12,14,14,7,0,6 +0,0,2,9,15,16,7,0,0,0,15,9,8,15,9,0,0,2,15,3,4,16,2,0,0,4,15,15,16,2,0,0,0,0,1,16,16,9,0,0,0,0,10,10,10,16,3,0,0,0,10,12,10,16,4,0,0,0,2,13,12,8,1,0,8 +0,0,10,13,2,0,0,0,0,6,16,14,9,0,0,0,0,3,12,2,13,0,0,0,0,0,0,4,14,0,0,0,0,0,0,8,14,0,0,0,0,0,1,15,6,0,0,0,0,0,10,16,9,8,10,2,0,0,6,12,13,12,11,2,2 +0,0,3,10,14,7,0,0,0,4,16,12,14,12,0,0,0,0,7,0,11,9,0,0,0,0,0,11,16,3,0,0,0,0,0,12,16,16,5,0,0,0,0,0,0,14,8,0,0,0,1,8,11,15,3,0,0,0,5,13,10,2,0,0,3 +0,0,3,12,15,2,0,0,0,5,16,12,9,13,0,0,0,7,15,2,0,12,4,0,0,6,12,0,0,7,6,0,0,6,13,0,0,7,7,0,0,0,16,0,0,10,7,0,0,0,10,10,8,16,1,0,0,0,2,10,16,6,0,0,0 +0,0,5,13,16,7,0,0,0,2,16,7,8,11,0,0,0,8,15,5,10,8,0,0,0,2,6,15,16,7,0,0,0,0,1,13,10,16,4,0,0,0,4,13,1,9,12,0,0,0,7,10,5,15,7,0,0,0,2,13,15,8,1,0,8 +0,0,6,14,12,6,0,0,0,3,16,11,10,15,5,0,0,6,16,4,3,16,4,0,0,0,9,16,16,12,0,0,0,0,2,15,7,15,6,0,0,0,8,6,0,14,8,0,0,0,13,10,13,13,1,0,0,0,5,12,9,1,0,0,8 +0,0,1,11,14,3,0,0,0,0,6,16,12,2,0,0,0,0,13,11,0,0,0,0,0,1,14,9,3,0,0,0,0,6,16,16,15,7,0,0,0,4,16,11,7,15,6,0,0,0,12,14,6,11,10,0,0,0,3,11,16,15,6,0,6 +0,0,0,5,16,12,0,0,0,0,1,15,16,16,0,0,0,2,10,16,16,11,0,0,0,7,16,16,16,7,0,0,0,6,12,16,16,8,0,0,0,0,0,12,16,12,0,0,0,0,0,11,16,13,0,0,0,0,0,2,15,12,0,0,1 +0,0,0,7,13,5,0,0,0,0,6,15,11,5,0,0,0,0,15,8,0,0,0,0,0,4,16,3,7,7,1,0,0,3,16,16,15,15,6,0,0,1,15,8,0,8,11,0,0,0,11,13,7,14,10,0,0,0,0,9,16,12,3,0,6 +0,0,0,0,13,12,0,0,0,0,0,4,16,6,0,0,0,0,1,13,10,1,8,0,0,0,7,15,2,9,15,0,0,6,16,4,4,14,9,0,2,15,16,16,16,16,6,0,2,8,8,4,13,13,0,0,0,0,0,0,15,8,0,0,4 +0,0,2,13,7,0,0,0,0,0,12,14,4,0,0,0,0,0,15,3,0,0,0,0,0,6,14,2,6,0,0,0,0,7,16,16,16,13,1,0,0,4,16,8,1,12,10,0,0,1,12,9,4,9,15,0,0,0,2,11,16,15,4,0,6 +0,2,11,10,12,13,7,0,0,7,16,13,11,12,5,0,0,4,16,4,4,1,0,0,0,4,16,16,16,16,5,0,0,0,2,2,1,13,8,0,0,0,0,0,0,14,6,0,0,0,9,5,13,14,0,0,0,0,15,12,9,2,0,0,5 +0,1,10,16,16,16,4,0,0,1,14,9,8,16,4,0,0,0,0,0,6,15,0,0,0,2,11,12,15,15,9,0,0,0,13,16,16,13,6,0,0,0,0,16,8,0,0,0,0,0,9,15,1,0,0,0,0,0,13,9,0,0,0,0,7 +0,0,6,8,12,15,4,0,0,1,15,13,12,12,4,0,0,0,16,5,0,0,0,0,0,3,16,16,16,14,1,0,0,2,11,5,5,15,8,0,0,0,0,0,0,16,4,0,0,0,5,8,13,11,0,0,0,0,8,15,10,1,0,0,5 +0,0,8,14,11,3,0,0,0,4,16,9,13,10,0,0,0,2,5,0,8,11,0,0,0,0,0,0,12,8,0,0,0,0,0,5,15,0,0,0,0,0,8,16,6,0,0,0,0,4,16,15,8,4,1,0,0,0,9,12,14,16,5,0,2 +0,0,2,13,15,7,0,0,0,3,15,9,10,15,3,0,0,8,12,0,1,15,6,0,0,6,15,9,12,16,7,0,0,1,8,8,10,16,0,0,0,0,0,0,12,12,0,0,0,0,0,8,16,5,0,0,0,0,0,14,13,0,0,0,9 +0,0,2,12,16,13,2,0,0,1,14,12,6,15,7,0,0,7,16,2,7,15,6,0,0,2,16,16,16,16,3,0,0,0,3,6,10,13,0,0,0,0,0,1,14,5,0,0,0,0,0,8,14,1,0,0,0,0,0,15,9,0,0,0,9 +0,2,10,13,10,1,0,0,0,11,12,10,15,9,0,0,0,0,0,0,12,9,0,0,0,0,3,11,16,6,0,0,0,0,12,16,16,16,4,0,0,0,0,0,0,13,9,0,0,0,1,7,9,16,5,0,0,0,13,15,11,3,0,0,3 +0,1,10,12,14,16,5,0,0,0,14,10,8,8,1,0,0,0,16,4,3,1,0,0,0,2,16,16,16,15,3,0,0,0,2,0,0,11,8,0,0,0,0,0,3,14,3,0,0,0,3,11,15,9,0,0,0,1,12,7,3,0,0,0,5 +0,0,4,12,10,1,0,0,0,0,13,12,11,10,0,0,0,2,16,3,0,12,3,0,0,4,13,0,0,10,6,0,0,6,11,0,0,10,7,0,0,4,12,0,1,14,4,0,0,0,11,9,12,13,0,0,0,0,4,12,11,3,0,0,0 +0,2,13,12,13,16,4,0,0,4,15,9,8,8,0,0,0,4,12,0,0,0,0,0,0,6,15,12,5,0,0,0,0,2,11,10,16,2,0,0,0,0,0,0,12,6,0,0,0,0,2,9,15,3,0,0,0,1,13,10,2,0,0,0,5 +0,0,0,13,7,0,0,0,0,0,8,15,5,0,0,0,0,1,16,7,0,0,0,0,0,2,16,4,7,6,0,0,0,4,16,16,16,16,8,0,0,3,16,9,0,9,11,0,0,0,11,12,5,12,12,0,0,0,1,10,13,13,5,0,6 +0,0,8,12,16,16,2,0,0,1,16,14,13,16,1,0,0,0,3,0,8,14,0,0,0,0,5,16,16,16,4,0,0,0,7,16,15,14,8,0,0,0,1,14,9,0,0,0,0,0,5,16,2,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,4,11,14,6,0,0,0,4,14,7,6,15,0,0,0,4,5,0,5,11,0,0,0,0,0,7,15,11,0,0,0,0,0,11,10,15,4,0,0,0,0,0,0,10,8,0,0,0,1,4,7,15,1,0,0,0,3,16,11,2,0,0,3 +0,0,3,13,13,2,0,0,0,4,16,15,14,10,0,0,0,3,16,2,0,16,4,0,0,6,13,0,0,13,6,0,0,4,13,0,0,11,9,0,0,0,16,0,0,11,7,0,0,0,9,12,9,15,2,0,0,0,2,11,15,7,0,0,0 +0,2,9,13,16,12,2,0,0,5,16,10,7,16,5,0,0,0,0,0,11,15,1,0,0,0,0,9,16,11,1,0,0,0,0,1,10,15,8,0,0,0,0,0,3,15,8,0,0,0,2,9,16,11,1,0,0,1,12,11,3,0,0,0,3 +0,0,5,8,13,13,10,1,0,0,7,8,5,12,14,1,0,0,0,0,0,14,8,0,0,0,11,16,16,16,9,0,0,0,2,6,16,11,5,0,0,0,0,10,8,0,0,0,0,0,3,16,1,0,0,0,0,0,8,10,0,0,0,0,7 +0,0,8,13,11,2,0,0,0,7,15,9,13,11,0,0,0,12,15,0,0,15,4,0,0,4,16,0,0,13,6,0,0,5,13,0,0,11,7,0,0,4,16,0,0,14,7,0,0,1,15,8,10,16,4,0,0,0,6,15,16,10,0,0,0 +0,0,0,4,16,9,2,0,0,0,1,14,16,16,7,0,0,5,14,16,16,16,6,0,0,5,12,13,16,16,5,0,0,0,0,4,16,16,1,0,0,0,0,7,16,16,1,0,0,0,0,8,16,16,3,0,0,0,0,5,14,16,6,0,1 +0,0,2,13,13,0,0,0,0,0,10,14,3,0,0,0,0,1,15,6,0,0,0,0,0,4,16,3,4,2,0,0,0,5,16,16,13,16,3,0,0,2,16,6,0,6,12,0,0,0,11,8,0,8,12,0,0,0,1,13,16,14,3,0,6 +0,0,0,0,10,14,2,0,0,0,0,2,16,16,3,0,0,0,1,15,16,16,2,0,0,3,16,16,16,16,1,0,0,2,8,5,16,16,0,0,0,0,0,0,15,16,1,0,0,0,0,0,12,16,6,0,0,0,0,0,10,15,6,0,1 +0,1,11,16,15,1,0,0,0,5,16,16,16,5,0,0,0,0,3,8,16,2,0,0,0,0,0,15,11,0,0,0,0,0,7,16,6,0,0,0,0,0,14,12,0,0,0,0,0,1,16,15,9,8,2,0,0,1,12,16,16,16,3,0,2 +0,0,3,10,8,1,0,0,0,0,11,16,16,9,0,0,0,3,16,16,16,3,0,0,0,4,16,16,16,3,0,0,0,4,16,16,12,0,0,0,0,1,16,16,15,0,0,0,0,0,9,16,16,0,0,0,0,0,2,10,12,1,0,0,1 +0,1,8,12,12,6,0,0,0,9,16,11,8,10,1,0,0,6,14,0,0,0,0,0,0,1,12,15,8,0,0,0,0,0,0,7,15,5,0,0,0,0,0,0,4,14,0,0,0,0,0,0,11,11,0,0,0,0,12,16,12,4,0,0,5 +0,0,4,14,16,14,1,0,0,1,16,13,14,16,5,0,0,1,5,0,3,16,3,0,0,0,0,0,7,15,0,0,0,2,13,16,16,14,4,0,0,3,10,14,14,8,1,0,0,0,2,14,8,0,0,0,0,0,6,14,1,0,0,0,7 +0,0,10,15,3,0,0,0,0,0,16,7,8,0,0,0,0,0,11,5,4,10,3,0,0,0,3,13,15,10,1,0,0,0,2,15,14,1,0,0,0,0,13,9,10,6,0,0,0,1,14,4,8,8,0,0,0,0,8,14,13,4,0,0,8 +0,0,2,12,12,2,0,0,0,0,8,16,16,5,0,0,0,0,10,16,16,8,0,0,0,0,12,16,16,4,0,0,0,0,11,16,16,6,0,0,0,0,7,16,16,8,0,0,0,0,3,16,16,13,0,0,0,0,2,8,12,7,0,0,1 +0,0,7,13,6,0,0,0,0,3,16,8,13,0,0,0,0,2,14,8,0,0,0,0,0,0,6,16,16,14,6,0,0,1,14,14,13,0,1,0,0,4,16,2,13,6,0,0,0,1,15,4,4,13,0,0,0,0,5,14,16,10,0,0,8 +0,0,0,0,6,9,0,0,0,0,0,7,15,4,0,0,0,0,3,14,7,0,0,0,0,0,9,9,0,13,3,0,0,2,16,2,2,16,0,0,0,8,16,14,16,16,6,0,0,2,4,4,12,8,0,0,0,0,0,0,9,11,0,0,4 +0,0,0,1,15,5,0,0,0,0,0,11,12,0,0,0,0,0,8,15,1,0,0,0,0,3,15,6,2,13,3,0,0,11,12,0,5,16,4,0,0,15,14,14,16,15,5,0,0,3,8,9,16,6,0,0,0,0,0,1,16,5,0,0,4 +0,0,7,14,5,0,0,0,0,3,16,15,12,0,0,0,0,0,12,0,12,0,0,0,0,0,1,0,12,2,0,0,0,0,0,1,16,2,0,0,0,0,1,10,15,3,0,0,0,0,13,16,16,15,7,0,0,0,9,6,3,4,13,1,2 +0,0,15,16,16,15,3,0,0,0,7,8,10,16,6,0,0,0,0,0,4,16,4,0,0,0,0,0,11,12,1,0,0,3,13,16,16,14,8,0,0,3,13,16,13,9,3,0,0,0,6,15,2,0,0,0,0,0,15,12,0,0,0,0,7 +0,1,9,13,15,3,0,0,0,8,14,8,12,16,0,0,0,1,1,0,10,14,0,0,0,0,0,6,15,7,0,0,0,0,10,16,16,10,2,0,0,0,5,8,8,14,8,0,0,1,6,4,7,15,6,0,0,1,11,15,12,4,0,0,3 +0,0,8,16,6,0,0,0,0,2,14,8,12,2,0,0,0,7,14,9,15,9,0,0,0,1,12,13,7,14,0,0,0,0,0,0,0,11,4,0,0,0,0,0,0,8,8,0,0,0,2,4,5,13,7,0,0,0,6,15,15,10,0,0,9 +0,0,9,11,12,12,1,0,0,0,11,12,13,16,4,0,0,0,0,0,12,10,0,0,0,0,2,11,14,2,0,0,0,0,9,16,5,0,0,0,0,0,6,15,16,14,2,0,0,0,3,6,14,16,1,0,0,0,11,16,13,5,0,0,3 +0,0,9,16,16,16,13,1,0,0,14,8,8,11,16,5,0,0,0,0,0,7,16,2,0,0,3,4,5,15,10,0,0,0,16,16,16,16,8,0,0,0,3,11,15,8,1,0,0,0,3,16,8,0,0,0,0,0,12,14,0,0,0,0,7 +0,0,8,16,16,7,0,0,0,2,15,3,10,16,0,0,0,2,15,12,14,11,0,0,0,0,8,9,8,12,0,0,0,0,0,0,1,15,0,0,0,0,0,0,0,14,2,0,0,0,1,1,5,16,4,0,0,0,9,13,16,11,0,0,9 +0,0,9,15,16,10,0,0,0,3,16,13,14,16,1,0,0,0,0,0,12,16,0,0,0,0,4,6,14,14,2,0,0,1,16,16,16,16,11,0,0,0,4,15,12,4,1,0,0,0,6,16,3,0,0,0,0,0,10,11,0,0,0,0,7 +0,0,4,12,1,0,0,0,0,0,11,16,16,7,0,0,0,0,15,15,10,16,4,0,0,3,16,2,0,10,6,0,0,3,16,1,0,7,9,0,0,2,16,2,2,12,12,0,0,0,13,11,14,16,7,0,0,0,3,12,13,5,0,0,0 +0,0,0,6,11,0,0,0,0,0,1,16,14,1,0,0,0,0,5,16,2,0,0,0,0,0,8,14,0,0,0,0,0,0,8,12,2,0,0,0,0,0,11,16,16,16,11,2,0,0,2,16,4,3,12,10,0,0,0,4,14,16,13,8,6 +0,2,14,16,13,7,0,0,0,3,12,8,13,16,0,0,0,0,0,5,14,11,0,0,0,2,12,16,10,1,0,0,0,3,16,15,9,1,0,0,0,0,3,11,15,15,1,0,0,3,6,4,5,16,8,0,0,3,13,16,14,12,2,0,3 +0,0,2,12,3,0,0,0,0,0,11,14,0,0,0,0,0,0,15,6,0,0,0,0,0,0,15,2,4,0,0,0,0,2,16,16,16,12,2,0,0,2,15,7,1,8,9,0,0,0,12,9,1,9,12,0,0,0,1,15,16,13,4,0,6 +0,0,7,15,15,5,0,0,0,4,16,16,16,13,1,0,0,2,16,16,16,6,0,0,0,0,16,16,16,6,0,0,0,0,16,16,16,0,0,0,0,1,16,16,16,1,0,0,0,4,16,16,15,3,0,0,0,1,8,15,12,2,0,0,1 +0,0,3,12,14,2,0,0,0,0,13,9,11,12,0,0,0,1,16,12,13,16,0,0,0,0,7,8,9,13,0,0,0,0,0,0,0,13,3,0,0,0,0,0,0,10,4,0,0,0,1,3,4,12,3,0,0,0,4,16,15,8,0,0,9 +0,0,8,15,11,0,0,0,0,3,15,13,16,3,0,0,0,8,16,6,15,13,0,0,0,4,15,16,16,15,0,0,0,0,2,4,5,16,5,0,0,0,0,0,1,16,5,0,0,0,0,0,4,16,5,0,0,0,10,16,16,10,0,0,9 +0,0,5,16,6,0,0,0,0,0,14,16,3,0,0,0,0,0,16,13,0,0,0,0,0,0,3,4,0,0,0,0,0,1,8,10,12,11,3,0,0,4,16,14,12,14,14,2,0,0,14,13,8,13,16,4,0,0,5,15,16,16,13,1,6 +0,0,1,11,16,9,0,0,0,0,13,11,6,10,0,0,0,4,14,0,9,11,0,0,0,7,15,15,16,15,1,0,0,0,7,6,0,14,8,0,0,0,0,0,0,15,5,0,0,0,0,3,10,15,2,0,0,0,2,12,10,1,0,0,9 +0,0,4,11,1,0,0,0,0,0,8,13,0,0,0,0,0,0,12,10,0,0,0,0,0,0,14,4,0,0,0,0,0,1,16,13,13,8,0,0,0,0,14,13,8,10,10,0,0,0,11,11,0,2,16,0,0,0,2,11,16,16,12,1,6 +0,0,2,11,14,5,0,0,0,1,14,13,13,8,0,0,0,5,16,0,1,1,0,0,0,3,16,6,12,16,5,0,0,0,9,16,16,8,2,0,0,2,14,12,12,13,1,0,0,2,15,7,2,14,7,0,0,0,3,13,14,12,1,0,8 +0,0,5,12,12,3,0,0,0,4,16,7,8,10,0,0,0,8,13,1,3,14,4,0,0,2,14,16,16,12,4,0,0,0,3,16,16,8,0,0,0,0,11,12,5,15,2,0,0,0,16,5,1,16,4,0,0,0,7,15,16,10,0,0,8 +0,0,5,14,13,4,0,0,0,0,12,16,16,10,0,0,0,0,11,16,16,7,0,0,0,0,8,16,16,5,0,0,0,0,8,16,16,6,0,0,0,0,11,16,15,4,0,0,0,0,11,16,16,5,0,0,0,0,5,12,16,11,3,0,1 +0,0,4,15,5,0,0,0,0,0,12,16,14,1,0,0,0,1,15,12,13,14,0,0,0,4,13,0,0,9,4,0,0,3,10,0,0,3,9,0,0,4,12,0,0,2,12,0,0,0,13,10,10,16,14,0,0,0,4,13,16,14,3,0,0 +0,0,3,10,14,15,2,0,0,1,15,11,8,16,4,0,0,0,5,0,8,12,0,0,0,0,0,0,15,2,0,0,0,0,0,0,16,7,0,0,0,0,0,0,9,16,5,0,0,0,1,4,6,16,2,0,0,0,2,16,15,7,0,0,3 +0,0,0,9,15,5,0,0,0,0,8,16,16,6,0,0,0,2,16,16,16,4,0,0,0,1,14,16,16,3,0,0,0,0,0,12,16,4,0,0,0,0,0,16,16,1,0,0,0,0,0,13,16,7,0,0,0,0,0,7,12,12,0,0,1 +0,1,10,5,0,0,0,0,0,2,16,16,12,1,0,0,0,0,10,5,11,6,0,0,0,0,0,1,12,7,0,0,0,0,6,16,16,6,0,0,0,0,3,8,10,15,8,0,0,0,3,4,7,14,15,0,0,0,6,15,15,11,4,0,3 +0,0,5,12,10,4,0,0,0,0,15,13,14,12,0,0,0,0,2,0,12,7,0,0,0,0,2,13,15,2,0,0,0,0,2,13,16,13,0,0,0,0,0,0,3,16,3,0,0,0,12,5,8,15,1,0,0,0,7,13,10,4,0,0,3 +0,0,3,13,9,2,0,0,0,0,14,16,16,11,0,0,0,5,16,7,4,16,3,0,0,4,14,1,0,12,8,0,0,5,12,0,0,8,8,0,0,1,16,4,0,9,11,0,0,0,11,16,12,16,7,0,0,0,3,11,15,10,0,0,0 +0,0,8,15,8,0,0,0,0,2,16,12,16,1,0,0,0,3,9,0,14,2,0,0,0,0,1,3,13,0,0,0,0,0,0,10,7,0,0,0,0,0,5,14,1,0,0,0,0,0,13,16,12,11,1,0,0,0,6,9,12,13,3,0,2 +0,0,3,10,7,15,13,0,0,0,12,9,16,16,13,0,0,1,16,12,14,8,4,0,0,1,14,16,16,16,10,0,0,0,0,1,0,11,6,0,0,0,0,0,4,15,2,0,0,0,2,13,15,3,0,0,0,0,4,13,3,0,0,0,5 +0,0,0,8,14,15,9,0,0,0,8,15,9,8,9,0,0,0,16,1,0,0,0,0,0,7,9,0,0,0,0,0,0,5,16,16,16,15,1,0,0,0,4,3,5,16,3,0,0,0,0,7,9,12,0,0,0,0,0,13,12,1,0,0,5 +0,0,3,13,16,13,1,0,0,0,12,13,8,16,3,0,0,2,15,16,16,8,0,0,0,6,8,11,15,10,0,0,0,0,4,14,4,16,0,0,0,0,8,9,1,13,7,0,0,0,7,9,0,14,6,0,0,0,3,14,14,14,1,0,8 +0,0,0,4,15,1,0,0,0,0,2,16,14,4,0,0,0,0,11,15,2,0,0,0,0,1,16,9,0,0,0,0,0,2,16,16,16,9,1,0,0,2,15,16,13,16,11,0,0,0,8,16,10,12,16,0,0,0,0,6,14,15,8,0,6 +0,0,3,14,11,1,0,0,0,0,14,15,13,10,0,0,0,1,16,5,1,14,2,0,0,3,16,2,0,7,6,0,0,4,13,1,0,7,8,0,0,3,13,0,0,11,7,0,0,0,14,6,7,14,1,0,0,0,4,16,14,4,0,0,0 +0,0,0,6,13,0,0,0,0,0,0,13,12,0,0,0,0,0,1,13,4,0,0,0,0,0,9,9,1,9,1,0,0,4,13,0,4,14,0,0,2,16,16,16,16,13,0,0,3,8,7,9,14,2,0,0,0,0,0,10,5,0,0,0,4 +0,0,0,0,5,14,3,0,0,0,0,0,11,16,8,0,0,0,0,6,16,16,2,0,0,0,7,16,16,16,0,0,0,3,16,14,14,16,0,0,0,4,12,4,13,15,0,0,0,0,0,0,14,16,0,0,0,0,0,0,6,16,1,0,1 +0,0,3,10,14,2,0,0,0,0,15,16,14,10,0,0,0,6,16,5,0,14,2,0,0,8,15,1,0,8,6,0,0,8,8,0,0,6,8,0,0,5,13,0,0,6,8,0,0,0,15,12,8,14,5,0,0,0,3,15,16,11,0,0,0 +0,0,4,15,13,1,0,0,0,0,11,12,13,9,0,0,0,2,15,4,1,16,1,0,0,4,16,3,0,10,6,0,0,3,15,2,0,8,8,0,0,3,16,1,0,12,6,0,0,0,13,11,9,15,1,0,0,0,4,13,13,2,0,0,0 +0,1,13,16,10,3,0,0,0,2,13,8,13,15,0,0,0,0,1,8,11,14,2,0,0,0,0,16,16,7,0,0,0,0,0,0,9,16,1,0,0,0,1,0,0,14,7,0,0,5,12,4,7,16,3,0,0,1,11,14,12,5,0,0,3 +0,0,3,15,10,1,0,0,0,2,14,9,13,11,0,0,0,7,15,0,2,16,2,0,0,4,16,2,0,6,8,0,0,6,9,0,0,6,8,0,0,5,11,0,0,10,8,0,0,0,15,6,7,16,3,0,0,0,3,12,16,5,0,0,0 +0,0,7,10,8,11,16,3,0,0,5,10,9,14,11,0,0,0,0,0,2,15,3,0,0,0,0,0,8,10,0,0,0,4,15,16,16,16,7,0,0,2,4,11,10,3,0,0,0,0,2,16,1,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,0,0,12,5,0,0,0,0,0,2,16,2,0,0,0,0,0,9,12,0,0,0,0,0,6,14,1,5,4,0,0,3,15,3,2,14,6,0,0,12,16,14,16,16,1,0,0,7,8,5,14,10,0,0,0,0,0,0,12,6,0,0,4 +0,0,1,11,11,1,0,0,0,0,9,16,15,3,0,0,0,1,15,6,0,0,0,0,0,5,14,0,0,0,0,0,0,7,11,0,1,2,0,0,0,4,14,5,15,16,7,0,0,1,12,16,16,12,15,0,0,0,2,11,16,14,9,0,6 +0,0,0,5,12,16,8,0,0,1,10,16,15,16,5,0,0,10,16,13,13,16,2,0,0,3,12,13,14,16,2,0,0,0,0,0,8,15,0,0,0,0,0,0,12,15,0,0,0,0,0,1,16,8,0,0,0,0,0,6,16,7,0,0,9 +0,0,5,8,14,14,8,0,0,4,16,13,12,9,4,0,0,4,16,0,0,0,0,0,0,5,16,8,2,0,0,0,0,0,6,14,16,8,0,0,0,0,0,0,6,16,2,0,0,0,11,8,10,15,1,0,0,0,8,13,10,1,0,0,5 +0,0,1,6,14,16,12,0,0,1,15,16,8,16,11,0,0,10,15,4,5,16,10,0,0,3,12,16,16,16,8,0,0,0,0,0,11,16,0,0,0,0,0,0,13,11,0,0,0,0,0,5,16,5,0,0,0,0,0,9,16,3,0,0,9 +0,2,0,0,9,7,0,0,0,15,16,16,16,9,0,0,0,1,5,4,16,7,0,0,0,1,4,4,16,7,0,0,0,5,16,16,16,15,2,0,0,0,3,1,14,6,0,0,0,0,0,0,13,7,0,0,0,0,0,0,10,11,0,0,7 +0,1,10,13,5,0,0,0,0,10,14,12,14,0,0,0,0,5,4,2,15,0,0,0,0,0,0,7,10,0,0,0,0,0,2,15,4,0,0,0,0,0,6,15,0,0,0,0,0,1,16,13,12,13,12,0,0,0,15,12,12,10,4,0,2 +0,0,9,14,10,4,0,0,0,1,15,12,14,16,2,0,0,0,1,0,0,15,4,0,0,0,0,14,16,8,0,0,0,0,0,6,13,14,1,0,0,0,0,0,1,16,5,0,0,0,5,4,6,15,4,0,0,0,12,15,12,6,0,0,3 +0,0,7,12,8,10,15,2,0,0,4,8,10,16,8,0,0,0,0,0,9,10,0,0,0,0,0,5,14,2,0,0,0,6,16,16,16,13,1,0,0,3,6,16,0,0,0,0,0,0,7,13,0,0,0,0,0,0,10,10,0,0,0,0,7 +0,0,0,6,12,2,0,0,0,0,2,15,10,4,0,0,0,0,13,9,0,0,0,0,0,1,15,1,0,0,0,0,0,2,13,6,9,2,0,0,0,0,15,16,12,13,1,0,0,0,6,12,8,15,4,0,0,0,0,7,15,11,1,0,6 +0,0,6,12,14,6,0,0,0,12,14,6,4,15,0,0,0,11,1,0,5,12,0,0,0,6,11,5,15,3,0,0,0,0,7,15,12,2,0,0,0,0,5,13,8,13,1,0,0,0,11,5,6,14,7,0,0,0,5,16,12,7,1,0,8 +0,0,9,16,15,5,0,0,0,0,15,14,16,9,0,0,0,0,5,1,13,10,0,0,0,0,0,0,13,7,0,0,0,0,0,8,13,0,0,0,0,0,3,15,7,1,0,0,0,0,10,16,16,15,0,0,0,0,13,16,12,12,0,0,2 +0,0,1,12,15,12,1,0,0,0,2,12,10,16,4,0,0,6,10,6,14,9,0,0,0,5,13,16,16,7,0,0,0,0,5,15,10,15,0,0,0,0,5,12,0,11,7,0,0,0,4,14,4,13,7,0,0,0,2,12,15,10,1,0,8 +0,0,0,12,1,0,0,0,0,0,5,14,1,0,0,0,0,0,12,9,0,0,0,0,0,3,16,2,0,0,0,0,0,3,13,6,8,8,2,0,0,2,16,16,12,13,14,0,0,0,11,14,6,5,16,2,0,0,1,10,14,14,10,0,6 +0,0,5,14,16,10,0,0,0,0,15,15,14,13,0,0,0,0,4,3,15,8,0,0,0,0,1,16,16,15,2,0,0,0,1,7,6,16,6,0,0,0,0,1,0,14,6,0,0,0,2,15,12,14,1,0,0,0,7,16,12,1,0,0,3 +0,0,0,5,16,10,0,0,0,0,0,13,14,0,0,0,0,0,3,16,9,0,0,0,0,0,7,16,4,0,0,0,0,0,10,16,12,8,1,0,0,2,16,16,16,16,11,0,0,1,9,16,11,12,16,1,0,0,0,6,16,16,10,0,6 +0,0,2,12,16,3,0,0,0,3,16,6,8,8,0,0,0,5,14,11,15,2,0,0,0,0,0,10,11,0,0,0,0,0,1,16,14,2,0,0,0,0,8,12,6,9,0,0,0,0,10,6,6,16,1,0,0,0,3,14,13,8,0,0,8 +0,0,0,9,14,14,1,0,0,2,15,16,16,11,1,0,0,6,16,15,5,0,0,0,0,12,16,9,1,0,0,0,0,3,13,16,13,2,0,0,0,0,0,3,13,15,0,0,0,0,0,11,10,16,7,0,0,0,0,10,16,15,1,0,5 +0,1,6,14,12,0,0,0,0,10,15,12,16,2,0,0,0,7,4,4,16,0,0,0,0,0,0,9,10,0,0,0,0,0,3,16,1,0,0,0,0,0,9,10,0,0,0,0,0,0,14,12,8,11,14,0,0,0,5,16,15,9,8,1,2 +0,0,9,15,16,12,1,0,0,0,3,4,4,15,6,0,0,0,0,0,0,16,4,0,0,0,0,0,8,13,0,0,0,3,12,16,16,15,4,0,0,3,5,12,9,0,0,0,0,0,3,15,1,0,0,0,0,0,11,6,0,0,0,0,7 +0,0,5,11,0,0,0,0,0,1,14,5,0,0,0,0,0,4,12,0,0,0,0,0,0,4,12,3,4,0,0,0,0,8,16,16,16,14,1,0,0,5,16,4,3,9,8,0,0,0,14,7,5,11,12,0,0,0,5,15,16,11,4,0,6 +0,0,3,13,11,1,0,0,0,10,15,10,11,10,0,0,0,6,16,1,1,13,2,0,0,4,8,0,0,8,8,0,0,4,8,0,0,8,8,0,0,3,12,0,0,14,8,0,0,0,13,10,12,16,3,0,0,0,4,15,10,3,0,0,0 +0,0,5,11,15,16,5,0,0,6,16,10,5,16,8,0,0,0,2,1,12,16,1,0,0,0,1,14,15,2,0,0,0,0,2,9,16,10,0,0,0,0,2,0,7,16,3,0,0,0,13,10,5,16,7,0,0,0,7,13,14,15,2,0,3 +0,0,6,15,16,12,1,0,0,3,16,9,8,15,7,0,0,3,3,0,0,15,6,0,0,0,0,0,6,15,2,0,0,0,0,1,15,6,0,0,0,0,1,12,10,0,0,0,0,0,8,16,13,13,6,0,0,0,6,14,12,4,0,0,2 +0,0,1,8,14,13,2,0,0,2,13,12,8,15,2,0,0,6,15,2,11,6,0,0,0,0,5,14,15,1,0,0,0,0,0,12,13,0,0,0,0,0,0,15,15,5,0,0,0,0,0,16,9,15,0,0,0,0,0,9,16,10,0,0,8 +0,0,6,12,16,16,9,0,0,0,12,5,1,14,10,0,0,0,0,2,12,12,0,0,0,0,4,15,14,1,0,0,0,0,4,13,16,8,0,0,0,0,0,0,2,16,1,0,0,0,4,8,12,13,0,0,0,0,7,14,7,1,0,0,3 +0,0,4,12,15,9,1,0,0,7,14,8,13,15,2,0,0,5,7,12,11,1,0,0,0,0,4,16,5,0,0,0,0,0,10,15,10,0,0,0,0,0,9,6,12,4,0,0,0,0,7,9,10,9,0,0,0,0,2,12,16,7,0,0,8 +0,0,2,10,15,5,0,0,0,3,9,12,7,16,0,0,0,12,9,5,6,14,1,0,0,4,12,6,16,4,0,0,0,0,8,16,10,0,0,0,0,0,8,14,16,7,0,0,0,0,12,6,5,16,11,0,0,0,3,14,16,13,7,0,8 +0,0,4,15,15,16,16,15,0,0,5,12,12,11,16,11,0,0,0,0,0,7,16,3,0,0,2,11,16,16,14,0,0,0,14,12,16,13,0,0,0,0,6,4,15,1,0,0,0,0,1,12,10,0,0,0,0,0,8,14,1,0,0,0,7 +0,0,1,10,13,16,14,0,0,0,9,15,12,15,16,1,0,0,1,2,0,10,14,0,0,0,0,2,4,16,12,0,0,0,5,16,16,16,5,0,0,0,7,12,16,8,0,0,0,0,0,6,16,3,0,0,0,0,0,13,11,1,0,0,7 +0,0,2,9,12,13,3,0,0,1,14,10,4,14,5,0,0,6,12,4,8,16,2,0,0,1,11,12,12,16,3,0,0,0,0,0,8,11,0,0,0,0,0,1,14,4,0,0,0,0,0,5,14,0,0,0,0,0,0,7,10,0,0,0,9 +0,0,15,16,16,11,3,0,0,0,11,7,7,13,13,0,0,0,0,0,5,15,6,0,0,0,2,11,16,7,0,0,0,0,3,16,16,13,1,0,0,0,0,0,8,16,2,0,0,0,14,12,15,11,0,0,0,0,11,9,6,0,0,0,3 +0,0,0,1,7,15,5,0,0,0,5,13,16,16,8,0,0,4,15,11,5,16,7,0,0,1,4,0,3,16,4,0,0,0,0,0,4,16,0,0,0,0,0,0,8,16,0,0,0,0,0,0,12,16,0,0,0,0,0,0,12,12,0,0,1 +0,0,6,16,15,3,0,0,0,2,16,11,13,13,0,0,0,7,14,1,1,14,4,0,0,8,12,0,0,8,12,0,0,9,11,0,0,8,12,0,0,8,13,1,0,14,11,0,0,1,16,13,14,16,6,0,0,0,6,16,15,7,0,0,0 +0,2,12,14,16,12,1,0,0,1,16,16,14,11,1,0,0,0,12,13,0,0,0,0,0,0,7,15,3,0,0,0,0,0,0,15,9,0,0,0,0,0,0,9,15,0,0,0,0,0,3,8,16,3,0,0,0,1,15,16,16,3,0,0,5 +0,0,1,12,10,2,0,0,0,0,0,9,16,9,0,0,0,0,0,14,16,12,0,0,0,0,1,16,16,12,0,0,0,0,4,16,16,10,0,0,0,0,6,16,16,10,0,0,0,0,1,16,16,12,0,0,0,0,3,11,13,12,3,0,1 +0,0,0,1,12,4,0,0,0,0,0,9,14,0,0,0,0,0,2,16,5,0,0,0,0,0,5,16,9,2,0,0,0,2,16,16,13,16,8,0,0,0,9,15,0,2,15,0,0,0,0,15,8,8,16,3,0,0,0,3,11,13,10,0,6 +0,0,0,9,10,0,0,0,0,0,6,16,5,0,0,0,0,1,15,9,0,0,0,0,0,4,16,4,0,0,0,0,0,7,16,14,15,6,0,0,0,2,15,5,2,12,6,0,0,0,9,14,3,6,16,0,0,0,0,8,14,14,12,0,6 +0,1,13,16,10,0,0,0,0,2,14,15,16,2,0,0,0,0,0,8,16,4,0,0,0,0,0,13,16,1,0,0,0,0,1,16,11,0,0,0,0,0,11,16,3,5,1,0,0,1,15,16,14,16,4,0,0,1,13,16,15,5,0,0,2 +0,0,2,16,10,0,0,0,0,0,7,16,14,1,0,0,0,0,9,16,13,0,0,0,0,0,12,16,8,0,0,0,0,0,14,16,9,0,0,0,0,0,14,16,6,0,0,0,0,0,7,16,10,0,0,0,0,0,1,11,16,4,0,0,1 +0,0,1,13,10,0,0,0,0,0,9,16,4,0,0,0,0,1,16,8,0,4,0,0,0,4,16,16,16,16,6,0,0,0,8,8,12,16,5,0,0,0,0,1,16,13,0,0,0,0,0,13,16,3,0,0,0,0,2,15,5,0,0,0,4 +0,0,5,13,2,0,0,0,0,1,15,16,15,3,0,0,0,4,14,1,5,14,2,0,0,4,9,0,0,6,8,0,0,5,8,0,0,4,8,0,0,3,10,0,0,9,9,0,0,0,15,10,10,16,6,0,0,0,7,15,14,5,0,0,0 +0,0,8,16,13,0,0,0,0,1,13,9,16,4,0,0,0,0,0,0,14,7,5,0,0,0,3,8,15,16,12,0,0,11,16,16,14,7,1,0,0,7,2,15,4,0,0,0,0,0,5,15,2,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,2,16,14,1,0,0,0,0,6,16,16,5,0,0,0,2,16,16,16,3,0,0,0,0,3,9,16,4,0,0,0,0,0,2,16,10,0,0,0,0,0,0,15,16,0,0,0,0,2,9,16,16,3,0,0,0,2,14,16,10,1,0,1 +0,1,12,13,16,15,1,0,0,3,16,16,10,5,0,0,0,0,12,12,0,0,0,0,0,0,8,14,1,0,0,0,0,0,3,15,5,0,0,0,0,0,0,13,8,0,0,0,0,0,11,16,9,0,0,0,0,1,13,16,3,0,0,0,5 +0,0,0,9,13,3,0,0,0,0,9,14,14,4,0,0,0,1,14,13,0,0,0,0,0,4,16,6,0,0,0,0,0,4,16,6,4,2,0,0,0,2,16,16,16,15,2,0,0,0,7,16,13,16,10,0,0,0,0,7,13,10,5,0,6 +0,0,7,12,14,10,1,0,0,6,16,11,10,16,4,0,0,3,5,0,8,13,1,0,0,0,0,5,15,5,0,0,0,0,2,15,11,0,0,0,0,0,13,14,1,0,0,0,0,1,15,11,6,8,3,0,0,0,7,14,12,12,6,0,2 +0,0,2,7,14,12,1,0,0,2,14,15,10,16,7,0,0,4,9,0,5,16,3,0,0,0,0,2,16,13,0,0,0,0,0,2,15,11,0,0,0,0,0,0,6,16,4,0,0,0,0,5,16,13,0,0,0,0,0,9,11,2,0,0,3 +0,0,0,8,15,4,0,0,0,0,4,16,11,4,0,0,0,0,15,13,0,0,0,0,0,5,16,7,0,0,0,0,0,7,16,13,12,12,1,0,0,3,16,15,12,15,12,0,0,0,11,15,9,15,13,0,0,0,0,8,15,13,6,0,6 +0,1,13,16,13,5,2,0,0,2,16,9,13,14,0,0,0,0,11,13,15,5,0,0,0,0,3,16,13,0,0,0,0,0,3,16,8,0,0,0,0,0,10,16,12,0,0,0,0,1,15,16,12,0,0,0,0,1,16,10,1,0,0,0,8 +0,1,8,14,16,8,0,0,0,3,12,9,11,14,0,0,0,0,0,0,7,14,0,0,0,0,0,0,14,11,0,0,0,0,0,9,13,2,0,0,0,0,7,16,3,0,0,0,0,0,15,16,8,8,3,0,0,0,9,13,12,8,3,0,2 +0,1,11,16,13,3,0,0,0,11,16,11,16,12,0,0,0,4,5,0,13,15,0,0,0,0,0,0,16,10,0,0,0,0,0,8,15,1,0,0,0,0,5,16,5,0,0,0,0,0,14,16,10,8,5,0,0,0,13,16,13,12,5,0,2 +0,0,0,0,6,13,1,0,0,0,0,3,16,7,0,0,0,0,0,15,13,1,0,0,0,0,12,15,1,0,0,0,0,5,16,7,7,14,5,0,0,6,16,16,16,15,3,0,0,0,0,1,14,12,0,0,0,0,0,0,9,11,0,0,4 +0,0,7,16,16,10,0,0,0,4,16,13,16,15,0,0,0,0,8,2,16,12,0,0,0,0,0,8,16,6,0,0,0,0,4,16,9,0,0,0,0,1,13,15,2,0,0,0,0,1,16,15,12,12,5,0,0,0,9,16,14,7,1,0,2 +0,0,8,16,16,12,0,0,0,5,16,15,10,3,0,0,0,3,16,7,0,0,0,0,0,0,7,16,2,0,0,0,0,0,0,15,8,0,0,0,0,0,0,11,13,0,0,0,0,2,12,15,10,0,0,0,0,0,11,15,3,0,0,0,5 +0,0,0,13,7,0,0,0,0,0,9,16,15,0,0,0,0,8,16,16,14,0,0,0,0,7,8,11,16,4,0,0,0,0,0,2,16,10,0,0,0,0,0,0,10,16,4,0,0,0,1,7,11,16,12,0,0,0,0,11,16,14,8,0,1 +0,0,1,9,15,7,1,0,0,4,13,15,13,16,4,0,0,11,13,1,11,14,0,0,0,0,0,0,16,12,0,0,0,0,0,0,14,12,0,0,0,0,0,0,9,14,1,0,0,0,0,3,13,15,0,0,0,0,0,12,13,3,0,0,3 +0,0,0,2,15,6,0,0,0,0,0,13,15,1,0,0,0,0,9,16,4,2,1,0,0,4,16,9,2,14,11,0,0,10,15,5,13,16,4,0,0,15,16,16,16,14,0,0,0,9,12,8,16,8,0,0,0,0,0,2,15,3,0,0,4 +0,0,1,15,10,0,0,0,0,0,9,10,10,7,0,0,0,2,15,1,2,14,0,0,0,2,16,1,0,11,4,0,0,2,15,0,0,9,7,0,0,0,13,4,1,15,7,0,0,0,8,12,11,16,3,0,0,0,1,12,16,7,0,0,0 +0,0,0,5,15,16,6,0,0,1,15,15,10,15,14,0,0,0,1,0,0,8,12,0,0,0,0,2,5,15,9,0,0,0,0,8,16,16,6,0,0,0,0,0,8,13,0,0,0,0,0,0,12,8,0,0,0,0,0,2,13,2,0,0,7 +0,0,1,8,12,12,2,0,0,0,13,12,12,15,9,0,0,0,0,0,0,14,10,0,0,0,0,2,8,16,8,0,0,0,2,16,16,15,5,0,0,0,0,1,16,8,0,0,0,0,0,8,16,4,0,0,0,0,0,9,12,0,0,0,7 +0,0,3,9,13,2,0,0,0,6,16,16,16,8,0,0,0,9,5,3,16,6,0,0,0,0,0,7,15,1,0,0,0,0,0,13,10,0,0,0,0,0,6,16,3,0,0,0,0,0,8,16,14,12,7,0,0,0,3,13,16,14,8,0,2 +0,0,2,16,9,0,0,0,0,0,8,16,15,0,0,0,0,6,16,16,15,1,0,0,0,0,0,5,16,8,0,0,0,0,0,0,14,14,0,0,0,0,0,0,2,16,6,0,0,0,1,4,7,16,13,0,0,0,0,11,16,16,15,1,1 +0,0,2,13,16,3,0,0,0,4,15,14,15,12,0,0,0,4,7,1,14,7,0,0,0,0,0,6,16,3,0,0,0,0,4,16,5,0,0,0,0,1,13,10,0,0,0,0,0,0,14,12,5,8,3,0,0,0,3,12,16,10,2,0,2 +0,0,5,13,8,3,0,0,0,0,14,16,16,12,0,0,0,3,15,1,3,12,5,0,0,4,10,0,0,7,8,0,0,4,8,0,0,9,8,0,0,4,11,1,1,15,6,0,0,0,15,12,11,13,0,0,0,0,3,13,13,3,0,0,0 +0,0,0,11,13,1,0,0,0,1,2,15,13,8,0,0,0,5,13,0,3,13,0,0,0,9,11,0,0,15,1,0,0,6,11,0,0,14,5,0,0,2,15,2,3,16,4,0,0,0,8,14,14,16,2,0,0,0,0,7,14,6,0,0,0 +0,0,9,15,16,12,1,0,0,11,16,16,13,8,1,0,0,3,15,11,1,0,0,0,0,0,8,14,0,0,0,0,0,0,3,16,4,0,0,0,0,0,1,16,7,0,0,0,0,3,15,16,4,0,0,0,0,0,13,15,0,0,0,0,5 +0,2,14,14,1,0,0,0,0,4,16,11,4,6,0,0,0,4,16,4,12,16,2,0,0,0,13,16,16,6,0,0,0,0,13,16,10,0,0,0,0,0,15,16,4,0,0,0,0,4,16,16,7,0,0,0,0,2,13,15,1,0,0,0,8 +0,0,0,2,13,2,0,0,0,0,0,9,15,1,0,0,0,0,1,15,7,0,0,0,0,0,11,12,0,6,6,0,0,7,15,7,5,16,4,0,0,10,16,16,16,13,0,0,0,0,4,3,14,7,0,0,0,0,0,3,12,0,0,0,4 +0,0,0,1,12,6,0,0,0,0,0,6,16,2,0,0,0,0,1,16,5,0,0,0,0,0,11,12,0,2,0,0,0,6,15,2,2,16,3,0,0,11,14,9,15,15,0,0,0,3,11,14,16,8,0,0,0,0,0,1,13,2,0,0,4 +0,1,8,12,11,4,0,0,0,12,16,11,15,14,0,0,0,3,3,3,16,11,0,0,0,0,1,13,16,1,0,0,0,0,0,5,16,7,0,0,0,0,0,0,12,14,0,0,0,0,0,7,16,9,0,0,0,0,10,13,7,0,0,0,3 +0,0,6,12,8,0,0,0,0,7,16,8,15,1,0,0,0,9,4,0,13,4,0,0,0,0,0,1,16,1,0,0,0,0,0,6,10,0,0,0,0,0,0,12,8,0,0,0,0,0,6,15,6,4,6,0,0,0,7,16,15,12,12,0,2 +0,0,1,8,12,16,15,0,0,1,14,16,15,15,13,0,0,0,4,1,0,14,11,0,0,0,1,7,14,16,14,0,0,0,8,16,16,14,2,0,0,0,1,8,16,6,0,0,0,0,0,8,16,0,0,0,0,0,0,15,7,0,0,0,7 +0,0,6,16,13,2,0,0,0,1,11,16,15,3,0,0,0,11,16,16,16,2,0,0,0,6,11,11,16,12,1,0,0,0,0,1,14,16,7,0,0,0,0,0,6,16,13,0,0,0,0,0,7,16,16,3,0,0,2,12,16,15,8,1,1 +0,0,4,15,12,3,0,0,0,3,16,16,16,13,0,0,0,5,16,16,16,13,0,0,0,0,2,2,7,16,1,0,0,0,0,0,7,16,3,0,0,0,0,1,15,13,0,0,0,0,2,13,16,8,0,0,0,0,6,15,5,0,0,0,9 +0,1,11,12,12,13,2,0,0,7,16,9,8,8,2,0,0,7,14,8,8,1,0,0,0,3,15,11,11,13,0,0,0,0,0,0,4,16,0,0,0,0,0,0,9,13,0,0,0,0,2,11,14,4,0,0,0,0,12,9,1,0,0,0,5 +0,0,6,13,16,14,0,0,0,0,14,12,14,13,0,0,0,0,0,0,13,7,0,0,0,0,0,3,16,10,4,0,0,0,13,16,16,16,8,0,0,0,10,15,14,12,4,0,0,0,3,16,5,0,0,0,0,0,6,14,0,0,0,0,7 +0,0,0,9,12,5,0,0,0,0,0,16,9,15,2,0,0,0,0,16,6,16,4,0,0,0,0,4,16,16,8,0,0,0,0,0,2,15,4,0,0,0,0,0,0,10,7,0,0,0,16,11,3,13,4,0,0,0,3,6,13,15,2,0,9 +0,0,0,3,13,2,0,0,0,0,0,9,16,2,0,0,0,0,2,15,16,4,0,0,0,2,13,16,16,2,0,0,0,4,12,8,16,7,0,0,0,0,0,4,16,9,0,0,0,0,0,5,16,16,0,0,0,0,0,2,15,9,0,0,1 +0,0,5,14,11,2,0,0,0,3,16,10,15,13,0,0,0,8,13,0,14,16,1,0,0,3,16,13,16,15,3,0,0,0,3,16,16,3,0,0,0,0,1,16,16,14,0,0,0,0,6,16,13,16,4,0,0,0,5,15,12,12,1,0,8 +0,0,0,6,16,6,0,0,0,0,0,12,16,13,0,0,0,0,9,16,16,16,0,0,0,8,16,16,16,13,0,0,0,0,4,3,16,14,0,0,0,0,0,3,16,13,0,0,0,0,0,4,16,16,2,0,0,0,0,4,16,11,2,0,1 +0,0,2,13,15,6,0,0,0,0,8,14,10,16,3,0,0,0,15,6,0,10,8,0,0,6,15,1,0,11,7,0,0,8,13,0,0,10,8,0,0,7,14,1,0,16,2,0,0,1,15,14,11,14,0,0,0,0,3,15,12,1,0,0,0 +0,0,2,15,15,7,0,0,0,0,10,12,6,16,5,0,0,2,16,3,0,8,10,0,0,7,16,0,0,8,8,0,0,3,16,4,0,12,5,0,0,0,16,7,1,15,3,0,0,0,12,13,13,10,0,0,0,0,2,12,13,1,0,0,0 +0,1,10,15,9,0,0,0,0,5,16,8,16,7,0,0,0,5,14,0,12,9,0,0,0,1,12,13,16,16,2,0,0,0,1,9,12,14,7,0,0,0,0,0,0,4,13,0,0,0,14,12,8,11,16,0,0,0,7,16,16,16,9,0,9 +0,0,5,11,0,0,0,0,0,0,13,6,0,0,0,0,0,2,16,4,0,0,0,0,0,8,13,0,0,0,0,0,0,8,16,15,12,6,0,0,0,8,16,14,12,16,2,0,0,2,16,12,5,16,4,0,0,0,5,14,15,10,0,0,6 +0,0,4,13,1,0,0,0,0,0,10,12,0,0,0,0,0,0,13,9,0,0,0,0,0,0,15,6,0,0,0,0,0,2,16,9,4,2,0,0,0,3,16,15,12,15,6,0,0,1,14,15,4,13,14,0,0,0,3,13,16,14,5,0,6 +0,4,16,9,0,0,0,0,0,9,16,16,3,0,0,0,0,12,8,16,4,0,0,0,0,5,2,16,5,0,0,0,0,0,0,16,4,0,0,0,0,0,3,16,4,0,0,0,0,5,15,16,13,12,8,0,0,4,16,16,16,16,12,0,2 +0,1,7,6,12,14,7,0,0,4,16,13,7,4,1,0,0,5,14,4,2,0,0,0,0,5,16,16,15,3,0,0,0,1,7,3,14,8,0,0,0,0,0,0,12,6,0,0,0,1,3,5,15,2,0,0,0,1,13,13,5,0,0,0,5 +0,0,10,16,10,2,0,0,0,5,15,5,16,15,0,0,0,6,14,2,10,16,2,0,0,0,12,16,16,16,4,0,0,0,0,3,8,14,7,0,0,0,0,0,0,11,9,0,0,2,14,9,8,13,12,0,0,1,9,14,14,12,3,0,9 +0,0,3,14,1,0,0,0,0,0,10,14,1,7,0,0,0,0,15,9,7,16,0,0,0,5,16,4,13,13,0,0,0,12,16,16,16,16,9,0,0,3,8,15,16,12,2,0,0,0,1,16,11,0,0,0,0,0,3,16,5,0,0,0,4 +0,0,1,15,6,0,0,0,0,0,8,15,2,0,0,0,0,0,15,9,0,0,0,0,0,2,16,10,0,0,0,0,0,8,16,11,10,4,0,0,0,5,16,16,11,16,6,0,0,2,15,16,11,16,11,0,0,0,3,11,15,13,1,0,6 +0,0,0,8,13,0,0,0,0,0,5,16,5,6,2,0,0,0,14,10,2,16,6,0,0,6,16,3,9,15,0,0,0,11,16,16,16,16,6,0,0,4,12,14,16,12,2,0,0,0,0,8,16,4,0,0,0,0,0,11,11,1,0,0,4 +0,0,2,12,12,2,0,0,0,3,14,13,11,11,0,0,0,5,16,2,2,14,4,0,0,7,14,2,0,12,7,0,0,4,12,0,0,12,5,0,0,2,14,1,2,15,2,0,0,0,11,10,13,8,0,0,0,0,3,13,12,2,0,0,0 +0,0,3,13,16,15,1,0,0,0,6,12,13,16,4,0,0,0,0,0,9,16,1,0,0,0,2,5,14,14,3,0,0,0,14,16,16,16,10,0,0,0,5,10,16,7,1,0,0,0,1,16,11,0,0,0,0,0,5,16,5,0,0,0,7 +0,0,4,15,10,5,0,0,0,0,15,10,13,16,2,0,0,4,15,1,9,16,4,0,0,0,14,14,16,16,4,0,0,0,1,4,2,15,5,0,0,0,1,0,0,13,6,0,0,7,16,8,9,15,5,0,0,0,6,12,13,11,1,0,9 +0,0,7,15,13,1,0,0,0,0,15,9,13,10,0,0,0,0,16,5,12,12,0,0,0,0,4,15,16,12,0,0,0,0,0,2,7,15,1,0,0,0,0,0,0,15,4,0,0,4,12,7,5,14,4,0,0,0,7,13,13,9,1,0,9 +0,0,1,14,7,0,0,0,0,1,13,16,14,11,0,0,0,4,16,12,0,14,3,0,0,6,14,14,0,8,8,0,0,4,16,14,0,8,8,0,0,1,15,3,0,10,8,0,0,0,7,15,10,16,1,0,0,0,1,9,16,8,0,0,0 +0,0,7,12,13,12,10,0,0,1,16,10,4,8,4,0,0,1,16,9,8,2,0,0,0,7,15,12,14,9,0,0,0,0,0,0,6,12,0,0,0,0,0,0,7,14,0,0,0,0,6,8,12,12,0,0,0,0,12,13,9,0,0,0,5 +0,1,11,12,0,0,0,0,0,7,14,15,6,0,0,0,0,7,3,12,10,0,0,0,0,0,0,15,14,5,0,0,0,0,0,8,10,16,2,0,0,0,0,0,0,12,8,0,0,0,9,8,8,15,10,0,0,0,13,16,14,8,1,0,3 +0,1,10,15,8,0,0,0,0,4,16,10,13,14,1,0,0,8,16,0,10,16,4,0,0,1,16,12,15,16,4,0,0,0,2,11,10,16,6,0,0,0,0,0,0,16,6,0,0,0,12,11,11,16,4,0,0,0,7,12,12,7,0,0,9 +0,0,0,8,10,0,0,0,0,0,4,15,2,0,0,0,0,0,11,9,0,12,3,0,0,2,15,0,3,15,0,0,0,6,16,6,10,14,0,0,0,2,16,16,16,14,0,0,0,0,0,6,15,1,0,0,0,0,0,10,9,0,0,0,4 +0,0,7,15,4,0,0,0,0,0,14,16,12,0,0,0,0,0,12,6,16,0,0,0,0,0,0,10,16,2,0,0,0,0,0,8,14,15,3,0,0,0,0,0,0,9,11,0,0,0,5,9,9,15,11,0,0,0,9,14,12,9,1,0,3 +0,0,4,14,0,0,0,0,0,0,15,7,0,0,0,0,0,3,13,0,0,0,0,0,0,6,12,4,3,0,0,0,0,8,16,16,16,13,1,0,0,4,16,8,2,16,6,0,0,2,16,13,10,16,4,0,0,0,5,15,13,3,0,0,6 +0,1,11,14,6,0,0,0,0,7,12,5,15,0,0,0,0,6,6,1,16,0,0,0,0,0,0,5,16,0,0,0,0,0,0,13,16,12,0,0,0,0,0,0,3,14,6,0,0,0,5,7,6,14,8,0,0,0,14,16,16,11,1,0,3 +0,5,16,5,0,0,0,0,0,9,16,14,0,0,0,0,0,12,14,14,0,0,0,0,0,7,13,12,0,0,0,0,0,0,11,8,0,3,1,0,0,0,16,8,7,14,11,0,0,8,16,16,16,16,11,0,0,6,16,16,11,6,1,0,2 +0,0,9,12,11,0,0,0,0,7,13,4,14,2,0,0,0,2,6,0,14,4,0,0,0,0,0,8,14,0,0,0,0,0,0,10,14,13,1,0,0,0,0,0,1,14,5,0,0,0,8,5,10,14,2,0,0,1,15,16,11,1,0,0,3 +0,0,10,15,9,1,0,0,0,5,16,10,16,9,0,0,0,7,10,0,14,10,0,0,0,3,15,7,12,15,1,0,0,0,4,11,14,16,4,0,0,0,0,0,0,12,11,0,0,0,11,9,5,12,13,0,0,0,10,12,12,15,11,0,9 +0,0,5,15,15,9,0,0,0,0,15,8,5,13,5,0,0,6,16,9,0,6,10,0,0,8,16,4,0,6,12,0,0,0,16,4,0,4,12,0,0,0,16,3,0,9,9,0,0,0,15,8,7,15,1,0,0,0,6,16,16,6,0,0,0 +0,0,1,15,14,0,0,0,0,0,6,16,8,0,0,0,0,0,14,16,13,0,0,0,0,9,15,14,16,1,0,0,0,5,3,6,16,5,0,0,0,0,0,3,16,11,0,0,0,0,0,5,14,15,0,0,0,0,3,15,16,16,6,0,1 +0,0,5,12,8,0,0,0,0,0,13,6,1,0,0,0,0,2,14,1,0,0,0,0,0,4,14,0,3,1,0,0,0,7,11,13,13,13,5,0,0,4,16,8,0,2,12,0,0,1,15,6,2,12,3,0,0,0,6,13,12,4,0,0,6 +0,0,13,16,16,5,0,0,0,1,10,8,16,6,0,0,0,0,0,10,14,0,0,0,0,0,7,16,6,0,0,0,0,0,3,13,16,6,0,0,0,0,0,0,10,15,1,0,0,0,2,5,7,16,7,0,0,0,15,16,16,14,2,0,3 +0,0,0,11,16,10,0,0,0,0,6,11,2,12,0,0,0,0,5,15,3,11,0,0,0,0,1,13,16,1,0,0,0,0,1,12,16,7,0,0,0,7,13,3,7,14,0,0,0,10,13,8,5,10,0,0,0,0,0,14,15,3,0,0,8 +0,0,6,15,16,13,0,0,0,5,16,8,6,15,3,0,0,10,16,4,0,9,8,0,0,5,16,4,0,7,11,0,0,4,16,3,0,6,12,0,0,3,16,3,0,9,9,0,0,0,15,11,8,16,3,0,0,0,11,16,14,5,0,0,0 +0,0,2,9,15,12,0,0,0,0,12,15,14,13,0,0,0,0,2,0,11,7,0,0,0,0,0,1,16,9,4,0,0,1,9,16,16,11,5,0,0,4,13,13,9,0,0,0,0,0,0,13,3,0,0,0,0,0,2,14,1,0,0,0,7 +0,0,7,13,16,13,2,0,0,2,16,15,5,13,10,0,0,6,16,3,0,7,11,0,0,11,14,0,0,5,12,0,0,8,16,0,0,4,12,0,0,8,15,0,0,7,11,0,0,7,16,8,8,15,2,0,0,0,12,16,16,5,0,0,0 +0,0,10,16,16,16,14,0,0,4,16,11,7,3,2,0,0,10,11,0,0,0,0,0,0,7,15,9,8,2,0,0,0,1,11,12,15,14,1,0,0,0,0,0,1,15,6,0,0,2,12,6,0,15,6,0,0,0,9,14,16,15,2,0,5 +0,0,7,16,4,0,0,0,0,0,9,16,6,0,0,0,0,1,13,16,6,0,0,0,0,9,16,16,11,0,0,0,0,8,6,10,16,1,0,0,0,0,0,2,16,11,0,0,0,0,1,7,16,16,7,0,0,0,4,15,16,15,15,3,1 +0,0,9,16,13,0,0,0,0,0,15,11,0,0,0,0,0,4,16,6,0,0,0,0,0,3,16,7,4,3,0,0,0,2,16,16,16,16,10,0,0,0,15,16,2,3,14,2,0,0,15,16,6,4,16,3,0,0,8,6,15,16,8,0,6 +0,0,0,13,16,16,16,8,0,0,0,7,4,6,15,9,0,0,0,0,0,6,15,0,0,0,0,2,4,14,6,0,0,0,3,16,16,16,6,0,0,0,0,1,15,4,0,0,0,0,0,8,12,0,0,0,0,0,1,15,4,0,0,0,7 +0,0,0,6,14,16,8,0,0,0,4,13,8,16,9,0,0,0,0,0,0,15,6,0,0,0,0,5,9,16,9,0,0,0,10,16,16,15,4,0,0,1,9,3,13,7,0,0,0,0,0,1,16,2,0,0,0,0,0,8,9,0,0,0,7 +0,0,0,7,15,0,0,0,0,0,0,14,11,3,11,0,0,0,6,16,2,14,9,0,0,2,14,7,6,16,2,0,1,14,15,11,15,16,4,0,2,13,12,11,16,7,0,0,0,0,0,5,16,1,0,0,0,0,0,11,12,0,0,0,4 +0,0,10,16,16,6,0,0,0,3,13,6,16,4,0,0,0,0,0,10,12,0,0,0,0,0,6,16,5,0,0,0,0,0,1,12,15,5,0,0,0,0,0,0,10,15,3,0,0,0,0,2,7,16,5,0,0,1,13,16,14,10,0,0,3 +0,0,7,16,15,3,0,0,0,1,15,10,12,4,0,0,0,7,16,1,0,0,0,0,0,8,13,0,4,3,0,0,0,9,14,13,16,16,5,0,0,7,16,13,2,9,12,0,0,2,16,10,1,11,12,0,0,0,8,15,16,15,3,0,6 +0,0,0,6,16,1,0,0,0,0,3,16,8,4,15,0,0,1,14,11,0,10,14,0,0,9,16,13,12,16,12,0,0,3,12,11,12,16,6,0,0,0,0,0,12,12,1,0,0,0,0,2,16,6,0,0,0,0,0,7,13,1,0,0,4 +0,0,0,7,14,7,0,0,0,0,3,13,4,12,1,0,0,0,13,3,0,12,4,0,0,1,13,0,6,16,6,0,0,0,15,15,9,12,3,0,0,0,1,1,0,9,2,0,0,0,2,9,2,12,0,0,0,0,1,9,15,7,0,0,9 +0,0,7,13,15,3,0,0,0,3,16,4,8,4,0,0,0,1,16,3,10,0,0,0,0,0,8,16,6,0,0,0,0,0,6,11,11,10,1,0,0,1,13,3,0,9,6,0,0,2,14,1,0,9,3,0,0,0,4,13,14,9,0,0,8 +0,2,16,13,1,0,0,0,0,7,14,10,3,0,0,0,0,8,12,0,0,0,0,0,0,9,13,1,6,3,0,0,0,7,14,15,16,15,4,0,0,4,16,11,4,5,16,0,0,1,16,11,0,7,15,1,0,2,14,13,16,16,4,0,6 +0,0,3,11,13,8,0,0,0,1,14,5,1,12,3,0,0,4,12,0,0,8,11,0,0,1,15,6,4,15,4,0,0,0,2,8,10,14,4,0,0,0,0,0,0,11,4,0,0,0,0,2,2,11,4,0,0,0,2,14,11,7,1,0,9 +0,0,8,16,10,1,0,0,0,0,15,6,13,4,0,0,0,0,15,3,12,2,0,0,0,0,6,16,13,0,0,0,0,0,2,14,14,6,0,0,0,1,13,3,1,13,3,0,0,3,14,0,0,5,7,0,0,0,6,13,12,15,5,0,8 +0,0,2,11,16,15,1,0,0,0,4,8,10,16,4,0,0,0,0,0,3,16,4,0,0,0,0,3,8,16,3,0,0,5,12,16,16,16,7,0,0,8,8,8,16,3,0,0,0,0,0,14,9,0,0,0,0,0,3,16,2,0,0,0,7 +0,1,10,16,15,4,0,0,0,9,16,7,7,15,2,0,0,12,13,0,0,12,8,0,0,12,12,0,0,6,11,0,0,10,12,0,0,4,12,0,0,7,14,0,0,6,11,0,0,2,16,5,3,14,4,0,0,0,10,16,16,12,0,0,0 +0,0,4,16,13,0,0,0,0,0,14,9,15,0,0,0,0,5,14,2,16,6,0,0,0,9,12,7,15,10,0,0,0,3,16,15,9,16,1,0,0,0,4,0,1,14,5,0,0,0,0,6,6,12,7,0,0,0,2,16,16,13,1,0,9 +0,3,16,16,16,16,14,0,0,9,15,9,7,3,2,0,0,10,12,0,0,0,0,0,0,10,16,15,9,0,0,0,0,1,8,9,16,7,0,0,0,0,0,0,10,12,0,0,0,5,7,0,8,14,0,0,0,4,16,16,16,8,0,0,5 +0,1,12,16,14,5,0,0,0,3,13,9,16,12,0,0,0,0,1,11,16,6,0,0,0,0,8,16,9,0,0,0,0,0,3,15,13,0,0,0,0,0,0,2,15,9,0,0,0,0,3,8,12,16,3,0,0,0,14,16,16,16,7,0,3 +0,0,11,15,10,0,0,0,0,2,13,0,9,3,0,0,0,3,12,0,10,0,0,0,0,0,11,12,12,0,0,0,0,0,0,11,14,6,0,0,0,1,9,5,0,10,5,0,0,4,10,0,0,8,4,0,0,1,14,11,14,10,0,0,8 +0,0,9,16,16,6,0,0,0,3,16,6,8,16,4,0,0,10,12,0,6,16,6,0,0,10,14,5,13,16,4,0,0,1,11,12,7,14,8,0,0,0,0,0,0,10,10,0,0,0,0,0,0,13,8,0,0,0,11,16,16,16,5,0,9 +0,0,1,10,13,1,0,0,0,0,14,12,8,2,0,0,0,5,14,1,0,0,0,0,0,6,11,0,0,0,0,0,0,8,14,14,12,11,0,0,0,1,16,10,0,2,10,0,0,0,13,11,1,0,13,0,0,0,1,8,15,16,9,0,6 +0,2,15,16,5,0,0,0,0,6,16,10,10,0,0,0,0,3,15,6,12,0,0,0,0,0,1,7,13,0,0,0,0,0,0,13,7,0,0,0,0,0,3,15,3,0,0,0,0,0,14,15,10,8,4,0,0,3,16,16,16,16,16,3,2 +0,0,13,13,0,0,0,0,0,6,16,7,0,0,0,0,0,10,13,0,0,0,0,0,0,8,16,13,16,10,0,0,0,8,16,14,9,15,6,0,0,5,16,6,0,2,15,0,0,7,16,12,0,3,15,0,0,1,6,13,16,16,8,0,6 +0,0,10,13,12,5,0,0,0,3,16,7,10,16,6,0,0,3,16,3,0,14,8,0,0,1,11,15,14,16,8,0,0,0,0,3,6,14,8,0,0,0,0,0,1,13,7,0,0,0,5,4,7,16,2,0,0,0,13,16,16,10,0,0,9 +0,0,0,0,7,13,2,0,0,0,0,0,8,16,4,0,0,3,8,9,15,15,1,0,0,4,12,11,14,12,0,0,0,0,0,0,12,12,0,0,0,0,0,0,11,13,0,0,0,0,0,0,8,16,5,0,0,0,0,0,6,16,5,0,1 +0,0,10,14,8,0,0,0,0,7,15,5,15,9,0,0,0,8,11,0,5,15,2,0,0,4,16,6,5,16,4,0,0,0,8,12,11,14,7,0,0,0,0,0,0,11,9,0,0,0,4,2,0,11,11,0,0,0,11,16,16,15,4,0,9 +0,0,14,5,0,0,0,0,0,0,13,8,0,0,0,0,0,0,16,7,0,0,0,0,0,0,15,8,0,0,0,0,0,2,16,5,15,3,0,0,0,2,16,9,14,11,1,0,0,6,16,16,16,16,15,0,0,1,8,5,7,16,8,0,4 +0,1,10,13,10,1,0,0,0,6,14,4,10,15,2,0,0,8,12,0,2,16,3,0,0,2,14,8,10,16,4,0,0,0,2,4,4,13,6,0,0,0,0,0,0,12,8,0,0,6,12,2,6,16,4,0,0,1,10,16,13,5,0,0,9 +0,0,6,15,11,1,0,0,0,0,15,7,13,15,2,0,0,5,14,0,2,16,4,0,0,3,15,8,10,16,4,0,0,0,6,12,10,14,8,0,0,2,3,0,0,12,7,0,0,9,15,8,8,16,3,0,0,0,7,15,15,4,0,0,9 +0,0,7,15,14,9,0,0,0,4,15,5,12,16,4,0,0,7,13,1,9,14,3,0,0,1,14,16,16,2,0,0,0,0,12,12,14,12,0,0,0,2,16,1,2,15,4,0,0,1,15,7,0,14,8,0,0,0,6,16,16,13,2,0,8 +0,1,10,15,6,0,0,0,0,7,13,6,13,2,0,0,0,7,8,0,13,4,0,0,0,0,1,0,13,4,0,0,0,0,0,3,14,0,0,0,0,0,0,11,10,0,0,0,0,0,7,16,5,4,2,0,0,0,15,16,14,16,15,0,2 +0,0,6,16,16,15,14,0,0,0,2,4,10,16,5,0,0,0,0,1,12,10,0,0,0,1,6,11,16,13,6,0,0,7,14,16,14,9,4,0,0,0,1,16,8,0,0,0,0,0,4,16,8,0,0,0,0,0,5,15,2,0,0,0,7 +0,0,4,13,1,0,0,0,0,0,13,11,0,0,0,0,0,2,16,3,0,0,0,0,0,4,16,11,5,1,0,0,0,4,16,15,15,13,4,0,0,2,16,2,0,7,14,0,0,0,14,10,8,9,16,2,0,0,4,13,16,12,10,0,6 +0,0,10,14,12,1,0,0,0,4,11,4,10,9,0,0,0,1,3,0,7,11,0,0,0,0,0,4,14,6,0,0,0,0,0,11,13,15,1,0,0,0,0,0,0,14,7,0,0,3,6,2,4,15,6,0,0,1,11,15,14,8,0,0,3 +0,0,4,14,15,6,0,0,0,0,13,5,9,11,0,0,0,0,0,0,5,11,0,0,0,0,1,9,16,4,0,0,0,0,3,9,9,15,1,0,0,0,0,0,0,15,4,0,0,8,14,8,5,16,4,0,0,0,6,14,15,8,0,0,3 +0,0,2,14,11,8,12,1,0,0,8,10,9,14,14,0,0,0,10,1,0,12,4,0,0,0,1,2,7,13,1,0,0,0,1,15,16,16,5,0,0,0,0,3,13,2,0,0,0,0,0,9,7,0,0,0,0,0,0,15,4,0,0,0,7 +0,1,13,12,1,0,0,0,0,5,13,12,9,0,0,0,0,1,11,4,16,0,0,0,0,0,3,1,16,3,0,0,0,0,0,2,16,0,0,0,0,0,0,5,15,1,0,0,0,0,7,16,11,4,5,0,0,0,15,16,16,16,16,0,2 +0,0,0,7,9,0,0,0,0,0,4,15,6,0,0,0,0,0,11,11,0,0,0,0,0,1,16,14,12,4,0,0,0,3,16,16,13,14,3,0,0,0,16,6,1,8,11,0,0,0,11,11,1,1,16,0,0,0,2,9,14,16,16,1,6 +0,0,6,15,12,1,0,0,0,0,16,9,15,14,2,0,0,6,14,0,2,16,6,0,0,5,15,5,6,16,4,0,0,0,6,12,12,15,8,0,0,0,0,0,0,13,7,0,0,2,14,4,7,16,2,0,0,0,7,15,15,5,0,0,9 +0,0,0,4,16,2,0,0,0,0,0,8,16,0,0,0,0,0,0,14,11,0,0,0,0,0,7,15,1,0,0,0,0,0,15,11,9,3,0,0,0,9,16,11,16,9,0,0,0,13,16,16,16,16,6,0,0,1,0,8,16,5,0,0,4 +0,0,5,16,14,8,0,0,0,5,15,6,11,16,1,0,0,8,14,0,5,16,1,0,0,1,12,14,16,8,0,0,0,0,6,14,15,10,0,0,0,0,12,4,1,13,6,0,0,0,12,7,1,13,8,0,0,0,4,15,16,10,1,0,8 +0,0,8,14,14,9,0,0,0,1,16,9,14,15,9,0,0,2,16,6,9,15,6,0,0,0,8,16,16,4,0,0,0,0,10,12,13,12,0,0,0,1,15,2,1,16,6,0,0,2,14,4,6,16,4,0,0,0,10,16,16,10,1,0,8 +0,3,14,15,5,0,0,0,0,8,11,7,15,2,0,0,0,9,4,0,16,4,0,0,0,1,3,0,14,8,0,0,0,0,0,4,15,0,0,0,0,0,0,12,10,0,0,0,0,1,14,16,10,10,6,0,0,2,14,15,13,16,12,0,2 +0,0,13,12,12,12,5,0,0,3,16,7,4,4,2,0,0,5,16,8,5,0,0,0,0,5,14,11,16,6,0,0,0,0,0,0,6,14,0,0,0,0,0,0,0,16,0,0,0,2,4,1,9,16,0,0,0,1,13,16,13,2,0,0,5 +0,0,6,15,15,11,5,0,0,0,14,8,12,15,13,0,0,2,16,3,0,11,12,0,0,0,13,16,15,13,1,0,0,0,6,16,16,7,0,0,0,0,13,5,11,12,0,0,0,0,13,11,9,16,2,0,0,0,6,14,15,7,0,0,8 +0,0,0,5,15,0,0,0,0,0,0,12,12,0,0,0,0,0,0,16,8,0,0,0,0,0,9,14,4,1,0,0,0,0,16,7,16,8,0,0,0,11,16,15,16,16,3,0,0,13,16,16,16,16,7,0,0,0,0,5,16,2,0,0,4 +0,0,6,13,6,0,0,0,0,0,14,11,16,10,0,0,0,2,14,0,9,15,0,0,0,5,9,0,0,12,3,0,0,8,8,0,0,8,5,0,0,7,11,0,0,8,8,0,0,1,15,7,5,14,4,0,0,0,6,16,16,9,0,0,0 +0,0,0,11,10,5,8,5,0,0,5,14,13,16,13,3,0,0,12,5,1,15,3,0,0,0,7,1,9,9,0,0,0,0,9,16,16,16,6,0,0,0,0,6,12,4,1,0,0,0,0,13,5,0,0,0,0,0,2,13,1,0,0,0,7 +0,0,0,9,10,0,0,0,0,0,2,15,9,0,0,0,0,0,6,16,6,0,0,0,0,0,13,13,5,1,0,0,0,3,16,7,16,8,0,0,1,13,16,10,16,12,0,0,0,13,16,16,16,16,3,0,0,2,4,10,16,2,0,0,4 +0,0,8,13,10,0,0,0,0,1,16,7,14,10,1,0,0,5,12,0,2,16,4,0,0,3,16,10,9,16,4,0,0,0,3,8,8,14,6,0,0,0,0,0,0,12,8,0,0,3,12,3,3,14,6,0,0,1,9,15,16,9,0,0,9 +0,0,0,4,13,4,0,0,0,0,0,9,16,11,0,0,0,5,16,16,16,8,0,0,0,0,4,11,16,9,0,0,0,0,0,4,16,12,0,0,0,0,0,4,16,14,0,0,0,0,0,12,16,16,5,0,0,0,0,3,16,16,4,0,1 +0,0,5,14,9,3,0,0,0,0,14,11,16,15,2,0,0,5,11,0,11,15,4,0,0,7,8,0,0,9,7,0,0,4,9,0,0,8,8,0,0,4,13,0,0,9,5,0,0,2,16,5,7,15,0,0,0,0,7,15,15,6,0,0,0 +0,0,11,13,2,0,0,0,0,5,13,9,14,0,0,0,0,4,8,0,12,8,0,0,0,1,5,0,11,8,0,0,0,0,0,0,12,7,0,0,0,0,0,6,15,1,0,0,0,0,5,16,11,2,0,0,0,0,11,16,16,16,16,1,2 +0,1,10,13,4,0,0,0,0,9,13,8,14,2,0,0,0,5,10,0,14,4,0,0,0,1,2,0,12,7,0,0,0,0,0,0,13,4,0,0,0,0,0,1,15,3,0,0,0,0,6,15,11,4,2,0,0,0,12,16,16,16,15,0,2 +0,1,11,13,16,1,0,0,0,3,16,5,4,4,1,0,0,5,15,3,4,0,0,0,0,4,15,12,13,15,2,0,0,0,0,0,0,10,8,0,0,0,0,0,0,10,8,0,0,2,8,7,5,15,5,0,0,0,8,12,16,12,0,0,5 +0,1,14,13,4,0,0,0,0,6,15,11,15,0,0,0,0,8,14,9,16,4,0,0,0,2,8,11,15,6,0,0,0,0,0,0,7,13,1,0,0,0,0,0,0,11,10,0,0,0,10,4,0,3,15,0,0,0,10,14,16,16,15,0,9 +0,0,0,1,13,9,0,0,0,0,0,7,16,5,0,0,0,0,1,14,12,0,0,0,0,4,14,13,2,13,6,0,0,6,16,16,16,16,9,0,0,0,6,8,13,16,0,0,0,0,0,0,15,11,0,0,0,0,0,2,16,9,0,0,4 +0,0,3,14,8,8,8,0,0,0,2,11,12,15,10,0,0,0,0,0,0,13,4,0,0,1,16,16,16,15,3,0,0,2,8,8,16,16,6,0,0,0,0,8,13,3,1,0,0,0,1,16,7,0,0,0,0,0,5,11,1,0,0,0,7 +0,0,5,16,5,0,0,0,0,0,2,16,13,0,0,0,0,0,11,16,16,2,0,0,0,0,3,12,16,9,0,0,0,0,0,0,14,14,0,0,0,0,0,0,9,16,3,0,0,0,9,12,14,16,8,5,0,0,4,12,13,16,16,15,1 +0,0,4,6,14,6,0,0,0,8,16,15,13,10,0,0,0,3,12,3,2,13,0,0,0,0,11,11,13,11,0,0,0,0,6,16,16,3,0,0,0,0,16,9,6,13,2,0,0,0,9,8,0,3,14,0,0,0,0,6,15,16,16,4,8 +0,0,8,16,16,11,2,0,0,0,5,8,9,16,9,0,0,0,0,0,2,16,5,0,0,0,4,8,12,12,0,0,0,0,11,16,16,16,4,0,0,0,2,14,11,14,5,0,0,0,6,16,2,0,0,0,0,0,10,14,0,0,0,0,7 +0,0,0,4,16,10,0,0,0,0,0,7,16,6,0,0,0,0,0,15,11,1,0,0,0,0,9,15,2,1,0,0,0,11,16,9,5,15,7,0,0,8,16,16,16,16,1,0,0,0,2,8,16,11,0,0,0,0,0,6,16,6,0,0,4 +0,0,7,16,8,0,0,0,0,0,13,16,16,7,0,0,0,4,16,7,11,15,0,0,0,6,12,0,0,14,6,0,0,8,12,0,0,14,8,0,0,7,13,0,6,15,7,0,0,4,16,16,16,16,1,0,0,0,7,14,7,0,0,0,0 +0,0,16,10,0,0,0,0,0,3,16,16,6,0,0,0,0,0,16,12,12,0,0,0,0,0,13,11,14,0,0,0,0,0,1,6,16,0,0,0,0,0,0,10,12,0,0,0,0,0,13,16,16,14,8,0,0,1,13,16,16,15,16,5,2 +0,0,10,14,14,11,1,0,0,4,16,12,5,14,11,0,0,5,14,2,4,13,8,0,0,1,11,11,14,6,0,0,0,0,9,16,5,0,0,0,0,4,13,11,9,0,0,0,0,5,11,2,16,2,0,0,0,0,13,16,16,6,0,0,8 +0,0,7,13,14,1,0,0,0,0,16,10,5,0,0,0,0,1,16,13,8,3,0,0,0,0,12,10,11,15,3,0,0,0,0,0,0,9,8,0,0,0,0,0,0,3,11,0,0,1,7,4,4,11,11,0,0,1,11,14,15,12,2,0,5 +0,0,0,8,14,3,0,0,0,0,1,16,15,7,0,0,0,0,7,15,5,0,0,0,0,3,14,14,5,0,0,0,0,3,16,10,12,10,0,0,0,0,11,5,0,14,2,0,0,0,3,11,6,14,1,0,0,0,0,7,15,6,0,0,6 +0,0,0,4,13,3,0,0,0,0,6,16,13,2,0,0,0,2,16,9,0,0,0,0,0,3,16,10,2,0,0,0,0,2,16,16,16,14,3,0,0,1,13,9,2,8,11,0,0,0,3,15,8,9,16,1,0,0,0,3,11,14,10,0,6 +0,0,10,16,16,12,6,0,0,0,6,9,16,16,15,1,0,0,0,0,3,13,9,0,0,1,12,8,10,16,2,0,0,1,15,16,16,16,8,0,0,0,1,13,11,10,4,0,0,0,3,16,2,0,0,0,0,0,9,14,0,0,0,0,7 +0,0,8,15,14,1,0,0,0,0,15,15,7,0,0,0,0,0,14,15,9,2,0,0,0,0,10,15,14,14,3,0,0,0,1,1,0,11,12,0,0,0,0,0,0,3,16,1,0,2,14,6,4,7,16,3,0,0,9,16,16,16,13,1,5 +0,0,0,1,14,10,0,0,0,0,0,2,16,9,0,0,0,0,1,11,14,3,0,0,0,1,10,16,6,9,6,0,0,10,16,15,13,16,9,0,0,5,11,12,16,15,1,0,0,0,0,0,16,12,0,0,0,0,0,0,16,11,0,0,4 +0,4,15,16,4,0,0,0,0,8,16,11,15,0,0,0,0,6,14,4,16,4,0,0,0,0,2,4,16,3,0,0,0,0,0,11,15,0,0,0,0,0,9,16,4,0,0,0,0,10,16,16,16,13,3,0,0,5,14,12,13,16,12,0,2 +0,0,0,12,11,0,0,0,0,0,11,16,10,0,0,0,0,0,16,13,0,0,0,0,0,3,16,3,0,0,0,0,0,2,16,16,11,1,0,0,0,0,14,11,3,10,0,0,0,0,5,14,2,9,6,0,0,0,0,7,16,16,6,0,6 +0,0,8,13,8,0,0,0,0,4,13,6,14,3,0,0,0,7,8,0,12,5,0,0,0,3,10,8,16,1,0,0,0,0,0,5,14,12,0,0,0,0,0,0,0,13,8,0,0,0,10,10,0,6,12,0,0,0,4,15,16,16,6,0,3 +0,0,12,16,16,16,3,0,0,0,6,9,12,16,7,0,0,0,0,0,8,15,3,0,0,3,15,16,16,7,0,0,0,1,8,16,16,15,2,0,0,0,3,16,10,14,6,0,0,0,11,16,0,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,5,12,10,0,0,0,0,2,15,14,16,1,0,0,0,0,11,3,16,1,0,0,0,0,0,0,11,5,0,0,0,0,0,0,7,14,0,0,0,0,0,0,0,9,10,0,0,0,10,7,5,12,12,0,0,0,6,14,16,13,4,0,3 +0,0,5,11,12,9,0,0,0,0,12,14,5,16,0,0,0,1,16,5,6,11,0,0,0,0,16,8,15,3,0,0,0,0,9,16,9,0,0,0,0,1,11,4,13,4,0,0,0,3,13,2,4,14,0,0,0,0,5,8,11,11,0,0,8 +0,0,1,7,12,7,0,0,0,0,6,16,10,5,0,0,0,0,14,16,14,4,0,0,0,0,3,4,6,13,0,0,0,0,0,0,0,11,6,0,0,0,0,0,0,7,9,0,0,0,10,5,2,8,11,0,0,0,4,14,16,16,4,0,5 +0,0,9,16,15,1,0,0,0,0,3,8,16,5,0,0,0,0,0,0,14,7,0,0,0,2,11,9,16,3,0,0,0,11,16,16,16,12,1,0,0,0,0,13,15,16,5,0,0,0,5,16,2,0,0,0,0,0,10,10,0,0,0,0,7 +0,0,11,16,7,0,0,0,0,4,15,8,16,9,0,0,0,8,10,0,9,12,0,0,0,4,15,11,13,13,0,0,0,0,0,7,6,16,0,0,0,0,0,0,0,13,2,0,0,0,7,7,2,12,4,0,0,0,6,13,16,16,6,0,9 +0,0,0,12,8,0,0,0,0,0,0,16,8,0,0,0,0,0,10,16,3,0,0,0,0,7,16,12,12,4,0,0,0,8,16,16,16,16,9,0,0,1,9,15,16,10,6,0,0,0,0,12,13,0,0,0,0,0,0,14,12,0,0,0,4 +0,0,11,16,16,4,0,0,0,0,12,16,12,3,0,0,0,2,16,16,15,4,0,0,0,0,5,8,11,15,3,0,0,0,0,0,0,10,12,0,0,0,3,1,0,3,16,0,0,0,16,12,8,11,16,1,0,0,8,14,16,16,11,0,5 +0,0,4,10,15,4,0,0,0,0,8,13,6,1,0,0,0,1,15,14,10,1,0,0,0,0,11,8,10,12,1,0,0,0,0,0,0,10,6,0,0,0,0,0,0,2,14,0,0,0,6,7,3,3,15,2,0,0,4,13,16,16,15,1,5 +0,0,9,16,16,16,7,0,0,0,4,8,8,14,14,0,0,0,0,0,2,15,6,0,0,0,10,12,14,14,0,0,0,0,8,16,16,16,6,0,0,0,0,15,9,13,4,0,0,0,7,16,1,0,0,0,0,0,11,10,0,0,0,0,7 +0,2,12,15,11,3,0,0,0,10,13,5,13,13,0,0,0,2,2,0,12,14,0,0,0,0,0,7,15,3,0,0,0,0,0,5,15,3,0,0,0,0,0,0,8,15,1,0,0,0,8,5,4,12,10,0,0,0,14,16,16,16,5,0,3 +0,0,5,13,6,1,0,0,0,0,8,16,16,15,2,0,0,0,3,13,1,13,8,0,0,0,2,14,15,14,2,0,0,2,14,15,15,3,0,0,0,7,11,0,6,10,0,0,0,5,11,1,1,16,0,0,0,0,8,13,16,13,1,0,8 +0,0,0,1,15,6,0,0,0,0,0,6,15,2,0,0,0,0,1,16,9,0,0,0,0,1,11,14,2,9,7,0,0,12,16,14,15,16,9,0,0,9,12,12,14,14,2,0,0,0,0,0,14,15,0,0,0,0,0,0,16,14,0,0,4 +0,0,2,10,16,12,0,0,0,2,15,13,9,16,2,0,0,1,9,0,8,14,1,0,0,0,0,4,16,10,0,0,0,0,0,1,10,16,3,0,0,0,2,0,0,14,7,0,0,0,9,12,11,16,3,0,0,0,1,11,14,6,0,0,3 +0,0,0,10,5,0,0,0,0,0,0,15,5,0,0,0,0,0,10,12,1,14,1,0,0,5,16,4,8,15,0,0,0,7,16,16,16,16,5,0,0,0,3,5,15,8,1,0,0,0,0,9,12,0,0,0,0,0,0,12,8,0,0,0,4 +0,0,1,11,3,0,0,0,0,0,13,15,13,0,0,0,0,3,16,2,8,5,0,0,0,6,16,14,15,14,2,0,0,2,14,16,16,16,5,0,0,0,1,4,3,9,6,0,0,0,4,6,10,16,5,0,0,0,1,11,10,4,0,0,9 +0,0,0,3,11,0,0,0,0,0,0,11,7,0,0,0,0,0,4,14,2,5,0,0,0,0,12,9,7,11,0,0,0,7,16,12,14,10,1,0,0,3,10,13,16,16,8,0,0,0,0,5,14,0,0,0,0,0,0,7,10,0,0,0,4 +0,0,0,3,9,0,0,0,0,0,0,9,11,0,0,0,0,0,0,15,5,0,2,0,0,0,11,12,0,14,8,0,0,3,16,12,11,16,8,0,0,7,12,12,15,16,4,0,0,0,0,0,12,7,0,0,0,0,0,5,13,2,0,0,4 +0,1,13,16,14,1,0,0,0,14,15,8,14,10,0,0,0,5,5,0,13,10,0,0,0,0,0,12,16,8,0,0,0,0,4,16,16,16,5,0,0,0,0,3,0,16,10,0,0,0,3,4,10,16,8,0,0,0,11,16,16,8,0,0,3 +0,0,3,14,14,2,0,0,0,4,16,16,16,7,0,0,0,4,16,8,3,15,1,0,0,3,16,6,0,14,5,0,0,1,16,5,0,13,7,0,0,0,15,4,1,16,9,0,0,0,11,13,13,16,4,0,0,0,2,15,15,8,0,0,0 +0,0,4,12,14,2,0,0,0,4,15,10,10,10,0,0,0,9,16,0,5,12,0,0,0,5,16,9,14,16,4,0,0,0,7,8,7,15,6,0,0,0,0,0,0,10,12,0,0,0,1,4,4,14,12,0,0,0,3,13,16,11,4,0,9 +0,0,8,16,12,0,0,0,0,4,16,9,8,9,0,0,0,6,16,3,1,16,0,0,0,0,14,16,16,11,0,0,0,0,3,16,16,14,0,0,0,0,9,16,3,14,2,0,0,0,10,15,7,16,2,0,0,0,5,16,15,7,0,0,8 +0,0,2,11,9,0,0,0,0,0,12,7,8,8,0,0,0,6,16,3,9,11,0,0,0,1,13,16,16,14,2,0,0,0,4,16,8,10,5,0,0,0,8,11,0,4,6,0,0,0,10,9,0,8,5,0,0,0,3,11,12,6,0,0,8 +0,0,0,9,10,0,0,0,0,0,2,16,16,3,0,0,0,0,13,16,9,0,0,0,0,1,15,13,0,0,0,0,0,2,16,13,11,12,4,0,0,1,15,16,16,16,15,1,0,0,8,16,16,16,16,4,0,0,0,7,12,13,14,3,6 +0,0,11,12,0,0,0,0,0,4,16,15,3,0,0,0,0,2,11,9,6,0,0,0,0,0,2,10,5,0,0,0,0,0,0,14,3,0,0,0,0,0,3,16,0,0,0,0,0,0,12,16,12,15,14,0,0,0,10,16,14,10,10,1,2 +0,0,4,13,10,3,0,0,0,0,16,15,13,13,0,0,0,1,16,9,8,15,0,0,0,2,16,15,15,16,1,0,0,0,7,12,12,16,6,0,0,0,0,0,0,14,10,0,0,0,3,2,1,14,11,0,0,0,5,13,14,12,2,0,9 +0,0,8,12,16,9,1,0,0,0,14,11,7,16,4,0,0,0,0,0,4,16,2,0,0,0,0,4,14,16,3,0,0,0,0,8,14,15,8,0,0,0,0,0,0,9,11,0,0,3,12,4,6,14,7,0,0,1,9,16,16,11,0,0,3 +0,0,4,13,13,0,0,0,0,0,12,14,12,7,0,0,0,0,12,12,5,14,0,0,0,0,12,10,0,12,3,0,0,4,16,5,0,8,8,0,0,4,16,2,0,9,8,0,0,3,14,9,4,15,5,0,0,0,4,13,14,10,0,0,0 +0,0,4,14,12,1,0,0,0,0,11,11,10,5,0,0,0,0,0,0,12,9,0,0,0,0,0,7,16,12,2,0,0,0,0,0,4,10,10,0,0,0,1,1,0,3,14,0,0,0,11,6,3,12,13,0,0,0,6,14,16,11,1,0,3 +0,0,3,16,14,4,0,0,0,0,10,16,16,15,0,0,0,2,16,12,5,14,2,0,0,5,16,4,0,10,6,0,0,6,16,1,0,7,9,0,0,3,16,3,0,9,12,0,0,0,13,14,13,16,9,0,0,0,3,13,16,12,2,0,0 +0,0,6,13,13,3,0,0,0,2,16,16,16,11,0,0,0,3,16,15,6,16,4,0,0,3,16,9,0,16,8,0,0,8,16,3,1,16,6,0,0,7,16,2,8,16,3,0,0,1,14,16,16,13,0,0,0,0,4,13,12,2,0,0,0 +0,0,5,14,7,0,0,0,0,2,16,16,16,3,0,0,0,1,16,8,13,9,0,0,0,0,6,3,16,7,0,0,0,0,0,0,16,7,0,0,0,0,0,11,15,0,0,0,0,0,9,16,14,8,10,0,0,0,6,15,12,16,16,7,2 +0,0,5,9,13,12,0,0,0,10,16,15,12,16,2,0,0,8,7,0,11,14,0,0,0,0,0,2,16,14,1,0,0,0,0,1,11,16,7,0,0,0,0,0,0,8,12,0,0,0,14,10,10,16,8,0,0,0,6,15,16,10,1,0,3 +0,0,11,12,7,0,0,0,0,3,16,14,16,3,0,0,0,6,16,6,16,6,0,0,0,2,15,16,16,14,0,0,0,0,4,9,8,15,6,0,0,0,0,0,0,11,11,0,0,0,2,4,9,16,7,0,0,0,10,16,13,8,1,0,9 +0,0,0,0,10,7,0,0,0,0,0,2,16,13,0,0,0,0,3,16,16,9,0,0,0,8,16,16,16,10,0,0,0,1,3,3,16,11,0,0,0,0,0,1,16,13,0,0,0,0,0,0,16,16,3,0,0,0,0,0,8,16,10,0,1 +0,0,3,13,14,3,0,0,0,0,15,16,16,12,0,0,0,4,16,8,6,16,0,0,0,5,16,7,1,15,3,0,0,8,16,6,0,16,5,0,0,4,16,7,0,15,6,0,0,3,15,14,12,16,4,0,0,0,3,15,15,8,0,0,0 +0,0,4,13,8,0,0,0,0,0,14,16,16,5,0,0,0,4,16,7,8,15,0,0,0,0,15,12,15,16,4,0,0,0,5,14,14,16,6,0,0,0,0,0,0,13,8,0,0,0,0,0,0,10,13,0,0,0,5,14,16,15,8,0,9 +0,0,0,11,14,2,0,0,0,0,5,16,16,11,0,0,0,2,16,10,6,14,0,0,0,2,16,10,2,14,2,0,0,3,16,10,0,9,8,0,0,3,16,4,0,11,11,0,0,0,12,13,9,16,8,0,0,0,1,11,15,9,2,0,0 +0,0,6,16,6,0,0,0,0,4,16,13,16,2,0,0,0,8,15,1,15,5,0,0,0,5,16,16,16,16,6,0,0,0,14,16,14,13,8,0,0,0,9,16,3,14,8,0,0,0,12,14,11,15,2,0,0,0,4,10,14,7,0,0,8 +0,0,8,16,14,6,0,0,0,5,16,13,12,14,0,0,0,6,16,12,4,0,0,0,0,9,16,16,15,3,0,0,0,0,6,5,14,10,0,0,0,0,0,0,12,14,0,0,0,0,15,9,16,14,0,0,0,0,6,15,15,6,0,0,5 +0,0,8,16,8,0,0,0,0,2,16,16,15,5,0,0,0,2,16,5,12,10,0,0,0,0,8,1,11,10,0,0,0,0,0,4,16,4,0,0,0,0,0,13,15,5,3,0,0,0,10,16,16,16,16,0,0,0,10,14,9,8,11,2,2 +0,0,0,2,11,0,0,0,0,0,0,11,8,0,0,0,0,0,1,16,3,9,0,0,0,0,11,11,2,16,0,0,0,4,16,10,12,15,5,0,0,3,11,12,15,13,3,0,0,0,0,0,14,4,0,0,0,0,0,2,16,2,0,0,4 +0,0,2,15,14,4,0,0,0,0,8,16,16,11,0,0,0,0,7,16,16,10,0,0,0,0,7,16,16,9,0,0,0,0,10,16,16,6,0,0,0,0,10,16,16,8,0,0,0,0,13,16,16,13,0,0,0,0,2,11,15,7,0,0,1 +0,0,6,11,12,3,0,0,0,0,9,16,16,13,0,0,0,0,5,16,16,9,0,0,0,0,5,16,16,7,0,0,0,0,7,16,16,6,0,0,0,0,8,16,16,6,0,0,0,0,9,16,16,6,0,0,0,0,5,12,11,3,0,0,1 +0,0,0,11,12,2,0,0,0,0,6,16,16,10,0,0,0,0,12,14,6,16,1,0,0,2,16,12,0,14,4,0,0,3,16,9,0,14,6,0,0,0,16,8,1,16,5,0,0,0,9,14,12,16,2,0,0,0,1,11,16,6,0,0,0 +0,0,5,15,13,2,0,0,0,0,14,13,9,13,0,0,0,0,10,0,0,10,2,0,0,0,0,0,0,11,3,0,0,0,0,0,2,15,0,0,0,0,0,1,12,9,0,0,0,0,10,12,16,14,2,0,0,0,5,14,12,11,4,0,2 +0,0,13,13,13,8,0,0,0,0,7,8,16,5,0,0,0,0,0,2,16,1,0,0,0,1,3,9,14,5,8,0,0,10,16,16,16,15,6,0,0,3,11,15,3,0,0,0,0,0,10,9,0,0,0,0,0,0,13,4,0,0,0,0,7 +0,0,12,16,16,6,0,0,0,9,16,9,12,15,0,0,0,8,10,0,3,16,4,0,0,0,0,0,6,16,0,0,0,0,0,1,14,11,0,0,0,0,1,10,14,3,0,0,0,2,16,16,9,4,4,0,0,1,10,16,16,16,16,2,2 +0,0,2,13,15,2,0,0,0,0,11,16,13,10,0,0,0,0,14,9,8,12,0,0,0,0,8,14,16,10,0,0,0,0,5,16,13,1,0,0,0,0,9,15,14,2,0,0,0,0,7,14,14,6,0,0,0,0,1,13,16,2,0,0,8 +0,1,13,16,16,6,0,0,0,10,16,11,11,13,0,0,0,5,12,0,4,16,2,0,0,0,0,0,6,16,1,0,0,0,0,0,9,14,0,0,0,0,0,4,16,5,0,0,0,0,6,16,15,9,8,3,0,0,12,16,16,14,13,6,2 +0,0,0,8,11,0,0,0,0,0,7,16,11,9,0,0,0,0,14,15,4,10,2,0,0,1,16,5,0,6,6,0,0,3,12,0,3,8,9,0,0,3,13,0,0,13,8,0,0,0,11,5,6,15,3,0,0,0,1,11,13,9,0,0,0 +0,0,9,14,16,9,1,0,0,4,15,7,2,14,4,0,0,0,1,1,8,14,0,0,0,0,0,2,16,6,0,0,0,0,0,0,2,14,0,0,0,0,0,0,0,6,10,0,0,3,9,4,4,10,10,0,0,1,10,13,12,9,0,0,3 +0,0,10,16,16,12,0,0,0,0,6,16,9,7,0,0,0,0,7,16,4,0,0,0,0,0,11,16,16,10,1,0,0,0,3,8,6,13,11,0,0,0,0,0,0,7,16,0,0,0,0,0,3,15,15,0,0,0,10,16,16,13,2,0,5 +0,0,12,12,14,13,1,0,0,2,16,16,11,10,1,0,0,6,16,5,0,0,0,0,0,8,16,15,8,0,0,0,0,3,14,9,16,3,0,0,0,0,0,0,14,8,0,0,0,1,7,4,14,11,0,0,0,0,13,16,16,8,0,0,5 +0,1,10,16,16,14,0,0,0,10,16,10,6,4,0,0,0,7,14,3,0,0,0,0,0,9,16,16,11,1,0,0,0,5,11,4,13,5,0,0,0,0,0,0,9,8,0,0,0,0,1,3,15,4,0,0,0,0,14,14,10,0,0,0,5 +0,1,13,16,16,4,0,0,0,2,15,9,12,12,0,0,0,0,0,5,15,9,0,0,0,0,0,16,16,14,3,0,0,0,0,4,4,11,12,0,0,3,6,0,0,5,16,0,0,7,14,4,8,15,10,0,0,0,13,16,16,11,1,0,3 +0,0,0,6,13,0,0,0,0,0,1,13,9,0,0,0,0,0,4,16,4,0,0,0,0,0,8,14,0,0,0,0,0,0,13,15,12,8,0,0,0,0,12,14,5,10,12,0,0,0,8,13,4,4,15,2,0,0,0,5,12,14,7,0,6 +0,0,7,13,14,4,0,0,0,8,15,7,8,16,0,0,0,12,12,0,2,16,4,0,0,3,13,16,16,16,2,0,0,0,0,0,0,11,9,0,0,0,0,0,0,6,12,0,0,0,5,0,0,10,12,0,0,0,8,12,14,10,2,0,9 +0,0,3,15,15,3,0,0,0,0,11,14,7,13,0,0,0,0,11,11,12,16,0,0,0,0,3,16,16,16,0,0,0,0,6,16,14,1,0,0,0,0,16,10,15,1,0,0,0,0,14,6,14,3,0,0,0,0,4,15,16,4,0,0,8 +0,0,0,10,15,0,0,0,0,0,8,15,5,7,11,0,0,2,16,6,0,13,11,0,0,10,16,3,6,16,3,0,0,4,16,16,16,16,8,0,0,0,4,12,16,3,0,0,0,0,0,10,15,0,0,0,0,0,0,12,10,0,0,0,4 +0,0,3,16,10,0,0,0,0,1,10,16,16,1,0,0,0,10,16,16,15,0,0,0,0,3,11,16,16,0,0,0,0,0,0,14,16,3,0,0,0,0,0,14,16,3,0,0,0,0,1,16,16,5,0,0,0,0,1,14,13,0,0,0,1 +0,0,5,14,14,6,0,0,0,0,11,5,2,12,0,0,0,0,11,1,8,11,0,0,0,0,7,13,16,6,0,0,0,0,3,16,8,0,0,0,0,0,11,10,12,1,0,0,0,0,14,2,4,12,0,0,0,0,5,15,16,12,0,0,8 +0,0,1,15,13,1,0,0,0,0,8,16,16,8,0,0,3,13,16,16,16,6,0,0,0,9,8,9,16,6,0,0,0,0,0,10,16,1,0,0,0,0,0,12,16,0,0,0,0,0,0,16,14,1,0,0,0,0,0,12,16,1,0,0,1 +0,0,6,16,16,8,0,0,0,1,16,11,8,15,0,0,0,0,15,10,14,14,0,0,0,0,8,16,16,3,0,0,0,0,10,16,10,0,0,0,0,3,16,7,15,2,0,0,0,1,16,6,12,7,0,0,0,0,5,16,15,5,0,0,8 +0,0,4,13,13,2,0,0,0,2,15,7,4,7,0,0,0,6,12,0,5,9,0,0,0,3,11,3,7,14,1,0,0,0,6,15,16,14,4,0,0,0,0,0,0,4,11,0,0,0,0,0,0,4,15,0,0,0,3,13,16,16,8,0,9 +0,1,16,16,15,5,0,0,0,1,16,13,9,15,3,0,0,3,16,0,5,16,5,0,0,0,12,16,16,16,6,0,0,0,1,4,4,12,5,0,0,0,0,0,0,12,9,0,0,0,1,0,2,14,7,0,0,0,12,16,16,15,3,0,9 +0,0,1,15,13,0,0,0,0,0,3,16,16,2,0,0,0,1,12,16,12,0,0,0,0,8,16,16,8,0,0,0,0,2,12,16,6,0,0,0,0,0,7,16,7,0,0,0,0,0,4,16,8,0,0,0,0,0,1,14,9,0,0,0,1 +0,0,7,15,15,9,1,0,0,7,15,5,4,14,4,0,0,2,2,0,7,13,1,0,0,0,2,12,15,5,0,0,0,0,1,5,10,13,0,0,0,0,0,0,0,10,7,0,0,0,11,1,1,12,7,0,0,0,8,14,13,8,0,0,3 +0,2,7,12,16,15,1,0,1,15,16,13,10,6,1,0,0,5,16,6,0,0,0,0,0,6,16,16,8,0,0,0,0,5,15,5,14,7,0,0,0,0,2,0,8,14,0,0,0,0,5,2,12,13,0,0,0,0,8,16,15,4,0,0,5 +0,0,3,16,3,0,0,0,0,0,11,13,1,1,0,0,0,6,14,2,9,12,0,0,0,9,12,0,14,5,1,0,0,7,16,12,16,16,11,0,0,0,6,11,15,5,1,0,0,0,0,11,10,0,0,0,0,0,3,16,4,0,0,0,4 +0,0,0,6,16,10,0,0,0,0,3,15,15,4,0,0,0,0,5,16,8,0,0,0,0,0,10,16,4,0,0,0,0,0,10,16,8,1,0,0,0,1,15,16,16,15,2,0,0,1,13,16,15,16,7,0,0,0,0,5,15,15,5,0,6 +0,0,3,14,15,5,0,0,0,0,11,14,5,11,2,0,0,0,14,7,0,7,4,0,0,1,16,2,0,3,7,0,0,4,13,0,0,5,8,0,0,2,12,0,0,11,7,0,0,0,14,8,13,14,0,0,0,0,3,12,10,3,0,0,0 +0,0,12,16,16,10,0,0,0,0,13,13,8,16,5,0,0,0,0,0,0,16,8,0,0,0,1,8,13,15,2,0,0,0,7,16,16,15,3,0,0,0,0,3,4,11,15,0,0,1,11,0,2,13,15,0,0,1,13,16,16,14,6,0,3 +0,0,9,16,11,2,0,0,0,9,16,8,13,8,0,0,0,8,6,0,0,12,0,0,0,0,0,0,8,10,0,0,0,0,0,0,14,6,0,0,0,0,0,8,11,0,0,0,0,0,8,16,6,4,4,0,0,0,12,15,15,16,13,0,2 +0,2,15,16,16,16,11,0,0,1,8,8,9,16,9,0,0,0,0,0,8,12,0,0,0,1,4,4,15,9,0,0,0,11,16,16,16,15,2,0,0,2,5,16,5,0,0,0,0,0,11,12,0,0,0,0,0,3,16,5,0,0,0,0,7 +0,0,2,16,15,4,0,0,0,0,10,15,11,11,0,0,0,0,10,12,15,10,0,0,0,0,7,16,14,2,0,0,0,0,12,16,7,0,0,0,0,0,14,7,15,1,0,0,0,0,11,7,13,7,0,0,0,0,2,14,15,7,0,0,8 +0,0,7,16,16,12,2,0,0,0,12,16,13,14,7,0,0,2,16,11,0,0,0,0,0,5,16,16,11,0,0,0,0,11,16,11,16,3,0,0,0,1,2,0,13,8,0,0,0,0,2,4,13,8,0,0,0,0,8,16,16,4,0,0,5 +0,0,8,14,12,8,6,0,0,0,12,11,6,8,8,0,0,0,14,5,4,1,0,0,0,4,16,14,12,14,1,0,0,5,7,0,0,9,7,0,0,0,0,0,0,10,4,0,0,0,5,2,4,13,1,0,0,0,8,16,14,1,0,0,5 +0,0,0,0,14,4,0,0,0,0,0,5,15,1,0,0,0,0,0,14,8,0,2,0,0,0,8,14,2,7,13,0,0,3,15,5,0,13,7,0,0,12,15,8,10,16,2,0,1,9,12,12,15,15,1,0,0,0,0,0,15,4,0,0,4 +0,0,1,12,6,0,0,0,0,0,10,13,1,0,0,0,0,0,16,1,0,0,0,0,0,2,12,0,0,0,0,0,0,4,15,14,16,15,4,0,0,3,16,4,0,2,13,0,0,0,11,7,0,4,15,0,0,0,0,10,16,14,5,0,6 +0,3,13,16,14,2,0,0,0,14,11,3,16,8,0,0,0,7,3,7,16,8,0,0,0,0,5,16,16,16,5,0,0,0,2,10,6,11,12,0,0,0,0,0,0,11,13,0,0,1,10,1,4,15,9,0,0,2,15,16,16,11,0,0,3 +0,0,10,13,9,0,0,0,0,5,11,1,11,5,0,0,0,6,7,0,6,14,0,0,0,2,14,8,9,16,1,0,0,0,2,8,5,13,4,0,0,0,0,0,0,7,9,0,0,0,3,2,0,5,11,0,0,0,8,15,13,14,3,0,9 +0,3,16,15,3,0,0,0,0,8,14,12,11,0,0,0,0,3,11,2,16,3,0,0,0,0,0,2,16,2,0,0,0,0,0,5,16,0,0,0,0,0,0,10,12,0,0,0,0,0,11,16,13,13,10,1,0,2,14,16,15,12,12,1,2 +0,0,12,16,16,16,14,0,0,0,5,8,8,15,10,0,0,0,0,0,5,15,1,0,0,0,2,8,14,16,9,0,0,0,13,16,15,12,7,0,0,0,1,14,6,0,0,0,0,0,4,14,0,0,0,0,0,0,12,9,0,0,0,0,7 +0,0,12,12,12,5,0,0,0,0,4,16,16,8,0,0,0,0,2,16,16,9,0,0,0,0,4,16,16,9,0,0,0,0,4,16,16,4,0,0,0,0,5,16,16,3,0,0,0,0,9,16,16,0,0,0,0,0,11,12,12,4,0,0,1 +0,3,15,15,3,0,0,0,0,8,15,14,14,0,0,0,0,4,9,2,16,5,0,0,0,0,0,2,16,5,0,0,0,0,0,7,15,1,0,0,0,0,0,14,9,0,0,0,0,1,11,16,10,8,4,0,0,5,16,16,16,16,11,0,2 +0,0,10,13,16,10,0,0,0,0,15,9,2,2,0,0,0,4,16,16,14,4,0,0,0,0,3,1,4,16,3,0,0,0,0,0,0,12,7,0,0,0,0,0,0,12,7,0,0,5,8,0,4,15,1,0,0,1,11,14,15,2,0,0,5 +0,0,1,14,2,0,0,0,0,0,5,14,0,0,0,0,0,0,8,10,0,0,0,0,0,0,12,6,0,0,0,0,0,0,16,9,12,9,2,0,0,0,16,13,8,10,13,0,0,0,11,12,0,4,15,0,0,0,0,13,16,16,9,0,6 +0,0,10,14,15,12,5,0,0,0,14,4,2,2,0,0,0,3,14,10,10,5,0,0,0,6,15,10,6,15,2,0,0,1,0,0,0,8,8,0,0,0,0,0,0,6,8,0,0,0,11,2,3,12,3,0,0,0,10,16,15,5,0,0,5 +0,0,6,12,16,10,0,0,0,0,0,12,16,16,1,0,0,0,0,9,16,16,4,0,0,0,0,13,16,16,3,0,0,0,0,14,16,14,0,0,0,0,0,14,16,10,0,0,0,0,2,16,16,13,0,0,0,0,7,16,15,2,0,0,1 +0,0,4,15,16,16,16,14,0,0,6,12,12,13,16,8,0,0,0,0,0,8,14,1,0,0,0,6,8,16,4,0,0,0,2,16,16,13,2,0,0,0,0,7,14,1,0,0,0,0,2,14,6,0,0,0,0,0,8,13,1,0,0,0,7 +0,0,13,16,16,6,0,0,0,6,16,6,10,16,0,0,0,1,7,5,14,8,0,0,0,0,1,15,16,13,1,0,0,0,0,3,4,13,10,0,0,0,0,0,0,9,11,0,0,2,14,5,6,16,4,0,0,1,15,16,16,10,0,0,3 +0,0,3,14,4,0,0,0,0,0,12,15,12,4,0,0,0,3,15,5,2,12,0,0,0,5,9,0,0,8,5,0,0,8,8,0,0,4,8,0,0,4,12,0,0,2,12,0,0,0,14,5,0,9,8,0,0,0,3,15,16,13,1,0,0 +0,0,13,16,16,10,0,0,0,1,11,6,9,16,6,0,0,0,0,5,10,16,5,0,0,0,2,15,16,15,0,0,0,0,0,0,5,15,7,0,0,0,0,0,0,13,8,0,0,1,13,10,6,14,7,0,0,1,11,16,16,12,0,0,3 +0,0,0,14,4,0,0,0,0,0,7,15,1,0,0,0,0,0,11,11,0,0,0,0,0,0,13,6,0,0,0,0,0,0,16,8,8,5,0,0,0,0,15,15,9,11,12,0,0,0,9,15,0,0,15,4,0,0,1,11,13,12,12,1,6 +0,0,3,12,7,2,0,0,0,0,9,16,16,8,0,0,0,0,7,16,16,10,0,0,0,0,6,16,16,12,0,0,0,0,6,16,16,11,0,0,0,0,4,16,16,10,0,0,0,0,8,16,16,12,0,0,0,0,3,11,12,6,0,0,1 +0,0,13,16,5,0,0,0,0,0,16,12,15,0,0,0,0,0,3,3,12,4,0,0,0,0,0,0,12,6,0,0,0,0,0,0,14,7,0,0,0,0,0,2,16,5,0,0,0,0,5,14,16,9,4,0,0,0,15,16,13,12,16,2,2 +0,5,16,16,15,2,0,0,0,11,13,8,11,15,0,0,0,1,1,0,7,16,0,0,0,0,0,11,16,14,1,0,0,0,0,12,12,15,11,0,0,0,0,0,0,8,12,0,0,5,11,4,3,12,11,0,0,4,14,16,16,16,4,0,3 +0,0,8,16,13,13,16,8,0,0,7,12,12,10,16,4,0,0,0,0,0,11,8,0,0,0,0,2,6,15,1,0,0,0,9,16,16,16,6,0,0,0,5,14,13,2,0,0,0,0,4,16,5,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,0,1,14,10,0,0,0,0,0,9,15,2,0,0,0,0,1,14,5,0,2,0,0,0,11,9,0,8,14,0,0,7,14,0,0,14,10,0,4,16,15,11,9,16,4,0,1,12,14,16,16,12,0,0,0,0,0,0,14,6,0,0,4 +0,0,7,16,15,2,0,0,0,7,15,7,12,10,0,0,0,8,6,0,10,12,0,0,0,0,0,6,16,12,1,0,0,0,0,5,12,15,10,0,0,0,0,0,0,8,12,0,0,0,6,9,4,13,10,0,0,0,5,16,16,13,1,0,3 +0,0,10,14,15,10,2,0,0,2,15,5,4,16,8,0,0,0,13,0,8,13,1,0,0,3,15,12,14,2,0,0,0,1,12,14,13,1,0,0,0,6,11,0,9,12,0,0,0,7,11,0,1,16,3,0,0,1,10,13,14,12,0,0,8 +0,0,13,16,15,2,0,0,0,5,16,7,11,13,0,0,0,8,16,2,1,14,6,0,0,8,15,0,0,8,11,0,0,10,12,0,0,4,12,0,0,11,12,0,0,7,12,0,0,7,15,4,5,15,8,0,0,0,13,16,16,10,0,0,0 +0,0,4,13,14,3,0,0,0,0,14,7,4,14,0,0,0,3,10,0,0,8,3,0,0,8,4,0,0,4,8,0,0,5,12,0,0,2,8,0,0,0,15,1,0,4,7,0,0,0,10,8,1,12,6,0,0,0,2,12,16,15,0,0,0 +0,0,5,11,16,12,2,0,0,0,15,7,4,13,4,0,0,1,14,1,4,16,2,0,0,1,7,8,14,5,0,0,0,5,16,16,7,0,0,0,0,0,14,6,14,6,0,0,0,0,11,6,5,16,0,0,0,0,5,15,14,8,0,0,8 +0,0,0,0,11,12,0,0,0,0,0,2,15,8,0,0,0,0,0,9,15,3,0,0,0,0,5,15,4,15,5,0,0,0,13,10,0,16,4,0,0,10,15,4,10,16,3,0,0,15,16,16,16,15,3,0,0,0,1,0,11,12,0,0,4 +0,0,1,13,3,0,0,0,0,0,11,13,0,0,0,0,0,0,14,5,0,0,0,0,0,0,16,0,0,0,0,0,0,2,16,16,16,15,4,0,0,1,16,10,4,8,13,0,0,0,10,13,1,5,14,0,0,0,0,13,16,16,8,0,6 +0,0,5,13,12,9,0,0,0,2,14,4,1,16,4,0,0,7,10,0,3,16,1,0,0,5,14,8,15,6,0,0,0,0,6,16,13,0,0,0,0,0,11,8,13,5,0,0,0,0,13,1,1,15,0,0,0,0,6,14,14,10,0,0,8 +0,0,3,11,13,14,16,8,0,0,11,14,12,12,16,3,0,0,0,0,1,15,8,0,0,0,0,2,9,15,1,0,0,0,2,15,16,16,8,0,0,0,1,12,14,0,0,0,0,0,0,12,9,0,0,0,0,0,4,15,2,0,0,0,7 +0,0,7,15,6,0,0,0,0,0,14,14,13,0,0,0,0,0,3,4,13,0,0,0,0,0,0,8,9,0,0,0,0,0,0,12,5,0,0,0,0,0,3,16,1,0,0,0,0,0,11,14,10,12,11,2,0,0,7,15,11,8,9,3,2 +0,0,8,15,16,12,0,0,0,12,16,14,12,6,0,0,0,10,16,5,0,0,0,0,0,6,16,5,0,0,0,0,0,0,8,16,8,0,0,0,0,0,0,6,16,10,0,0,0,0,6,8,15,12,0,0,0,0,7,14,8,0,0,0,5 +0,0,9,12,10,5,0,0,0,5,16,16,16,13,0,0,0,1,16,16,16,16,0,0,0,0,8,16,16,16,3,0,0,0,4,16,16,16,4,0,0,0,2,16,16,16,1,0,0,0,9,16,16,16,1,0,0,0,5,12,12,5,0,0,1 +0,0,6,13,16,15,2,0,0,0,14,14,12,16,9,0,0,0,3,1,7,16,10,0,0,0,1,15,16,16,13,0,0,0,1,7,15,14,1,0,0,0,0,4,16,10,0,0,0,0,0,12,13,1,0,0,0,0,7,16,3,0,0,0,7 +0,0,7,14,3,0,0,0,0,3,16,16,12,0,0,0,0,10,15,10,16,16,2,0,0,8,16,16,16,10,2,0,0,4,16,15,2,0,0,0,0,4,16,16,4,0,0,0,0,3,16,16,15,0,0,0,0,0,9,16,15,0,0,0,8 +0,0,0,0,11,12,0,0,0,0,0,8,16,4,0,0,0,0,2,16,12,1,0,0,0,4,15,16,13,14,11,0,0,13,16,16,16,16,10,0,0,0,0,0,10,16,6,0,0,0,0,0,11,16,2,0,0,0,0,0,14,13,0,0,4 +0,0,2,9,15,11,0,0,0,3,15,16,16,16,0,0,0,3,13,3,9,16,2,0,0,0,0,6,14,16,13,0,0,0,5,16,16,13,2,0,0,0,5,11,16,4,0,0,0,0,0,11,15,0,0,0,0,0,0,16,7,0,0,0,7 +0,0,2,10,13,4,0,0,0,0,11,16,15,5,0,0,0,5,16,11,2,0,0,0,0,10,15,2,0,0,0,0,0,3,13,15,5,0,0,0,0,0,0,8,16,6,0,0,0,0,2,10,16,7,0,0,0,0,3,15,10,0,0,0,5 +0,0,3,15,15,1,0,0,0,0,9,16,12,2,0,0,0,4,16,9,6,9,2,0,0,8,16,16,16,10,3,0,0,2,16,16,14,1,0,0,0,4,16,16,16,4,0,0,0,1,14,16,16,12,0,0,0,0,3,14,12,5,0,0,8 +0,0,7,11,0,0,0,0,0,0,14,13,0,0,0,0,0,3,16,10,3,0,0,0,0,3,16,16,16,11,0,0,0,6,16,16,16,16,1,0,0,4,16,6,1,16,9,0,0,2,15,14,14,16,3,0,0,0,5,16,14,6,0,0,6 +0,0,2,10,16,3,0,0,0,0,15,16,16,4,0,0,0,3,12,1,16,15,9,0,0,0,0,7,16,16,13,0,0,0,0,16,15,2,0,0,0,0,0,8,12,0,0,0,0,0,0,13,9,0,0,0,0,0,0,14,3,0,0,0,7 +0,0,3,12,9,9,0,0,0,0,12,16,16,16,0,0,0,0,12,5,14,16,4,0,0,0,12,0,2,13,2,0,0,2,15,0,0,12,3,0,0,2,16,2,0,12,0,0,0,0,15,14,13,9,0,0,0,0,2,10,8,0,0,0,0 +0,0,5,13,13,2,0,0,0,0,12,14,16,8,0,0,0,0,0,3,16,5,0,0,0,0,0,4,16,5,0,0,0,0,0,0,10,14,2,0,0,0,0,0,3,16,6,0,0,0,1,0,5,16,4,0,0,0,6,13,16,12,3,0,3 +0,0,0,6,12,6,0,0,0,0,5,16,16,13,0,0,0,2,15,16,16,12,0,0,0,4,16,16,16,14,1,0,0,0,8,16,16,13,0,0,0,0,4,16,16,14,0,0,0,0,6,16,16,16,4,0,0,0,1,12,12,6,0,0,1 +0,0,11,13,1,1,0,0,0,3,16,10,16,16,2,0,0,8,14,14,12,2,0,0,0,1,15,16,11,0,0,0,0,0,9,15,16,9,0,0,0,0,12,7,1,14,7,0,0,0,12,11,9,15,3,0,0,0,4,12,10,1,0,0,8 +0,0,1,11,0,0,0,0,0,0,8,16,2,0,0,0,0,0,11,12,0,0,0,0,0,0,12,10,0,0,0,0,0,0,15,14,9,6,0,0,0,0,15,16,16,16,9,0,0,0,11,16,11,16,14,0,0,0,1,12,13,10,2,0,6 +0,0,7,14,16,14,1,0,0,5,16,13,8,6,0,0,0,4,16,4,0,0,0,0,0,7,16,16,15,2,0,0,0,3,8,5,15,9,0,0,0,0,0,0,3,16,3,0,0,2,15,10,11,16,3,0,0,0,8,15,15,4,0,0,5 +0,0,0,4,15,7,0,0,0,0,0,15,15,1,0,0,0,0,8,16,2,0,0,0,0,4,16,7,6,9,0,0,0,10,16,13,16,16,0,0,2,16,16,12,15,11,0,0,4,9,4,3,16,7,0,0,0,0,0,5,16,5,0,0,4 +0,0,2,12,10,2,5,0,0,0,10,16,11,13,15,0,0,2,14,15,14,16,14,0,0,0,13,16,14,16,10,0,0,0,0,0,4,15,4,0,0,0,0,2,15,11,0,0,0,0,0,14,16,3,0,0,0,0,2,13,4,0,0,0,9 +0,0,9,16,9,0,0,0,0,3,16,16,16,1,0,0,0,0,6,12,16,14,4,0,0,0,4,14,16,16,13,0,0,0,8,16,16,7,0,0,0,0,0,13,14,0,0,0,0,0,5,16,8,0,0,0,0,0,9,13,1,0,0,0,7 +0,0,8,12,16,8,0,0,0,5,16,10,8,8,0,0,0,2,16,4,0,0,0,0,0,1,15,12,5,0,0,0,0,0,4,13,16,6,0,0,0,0,0,0,10,16,6,0,0,0,7,9,15,15,4,0,0,0,10,13,8,4,0,0,5 +0,0,0,1,14,8,0,0,0,0,0,12,14,2,0,0,0,0,7,16,4,0,0,0,0,5,16,11,3,10,5,0,0,11,16,16,16,16,6,0,0,7,12,11,16,13,1,0,0,0,0,5,16,10,0,0,0,0,0,2,16,9,0,0,4 +0,0,1,7,10,7,0,0,0,0,8,16,16,16,0,0,0,0,15,16,16,16,0,0,0,1,7,16,16,16,3,0,0,0,1,16,16,16,4,0,0,0,3,16,16,16,1,0,0,0,4,16,16,16,0,0,0,0,1,9,12,6,0,0,1 +0,0,5,12,16,4,0,0,0,2,16,15,10,4,0,0,0,4,16,14,16,5,0,0,0,4,15,16,16,9,0,0,0,0,7,16,16,13,2,0,0,0,10,15,6,16,8,0,0,0,14,14,13,14,1,0,0,0,7,16,11,5,0,0,8 +0,0,0,0,13,5,0,0,0,0,0,9,13,1,0,0,0,0,8,14,1,5,7,0,0,4,16,14,13,16,8,0,0,2,8,8,12,16,2,0,0,0,0,0,10,13,0,0,0,0,0,0,14,9,0,0,0,0,0,0,13,3,0,0,4 +0,0,10,13,2,0,0,0,0,0,14,16,4,0,0,0,0,2,16,14,0,0,0,0,0,6,16,14,2,0,0,0,0,7,16,16,16,13,1,0,0,2,16,16,16,16,7,0,0,2,16,16,16,16,4,0,0,0,8,15,16,13,0,0,6 +0,0,13,15,15,4,0,0,0,3,16,14,16,13,0,0,0,3,16,3,5,16,5,0,0,7,16,4,0,13,8,0,0,5,16,2,0,15,8,0,0,3,16,5,7,16,6,0,0,0,14,16,16,14,1,0,0,0,11,16,12,3,0,0,0 +0,0,10,13,7,1,0,0,0,6,16,15,16,5,0,0,0,9,9,8,16,2,0,0,0,0,2,1,16,8,0,0,0,0,0,0,9,16,1,0,0,0,0,0,2,16,10,0,0,0,7,11,14,15,7,0,0,0,11,13,12,3,0,0,3 +0,0,13,16,5,0,0,0,0,6,16,16,14,0,0,0,0,10,12,4,16,0,0,0,0,8,5,7,14,0,0,0,0,0,1,14,8,0,0,0,0,0,7,15,2,0,0,0,0,0,16,15,12,11,3,0,0,1,16,16,15,16,8,0,2 +0,0,0,13,10,1,0,0,0,0,8,16,11,1,0,0,0,0,12,14,0,0,0,0,0,0,16,10,0,0,0,0,0,3,16,13,8,3,0,0,0,2,16,16,16,16,3,0,0,0,10,16,16,16,15,0,0,0,0,10,16,15,7,0,6 +0,0,11,12,5,0,0,0,0,0,12,16,16,10,0,0,0,0,16,4,8,16,3,0,0,2,16,1,0,9,8,0,0,4,13,0,0,10,8,0,0,1,15,0,3,16,9,0,0,2,16,13,16,13,1,0,0,0,13,13,7,0,0,0,0 +0,0,13,16,6,0,0,0,0,4,16,16,16,1,0,0,0,2,15,4,16,5,0,0,0,0,1,1,16,7,0,0,0,0,0,2,16,7,0,0,0,0,0,11,16,2,0,0,0,0,10,16,16,16,15,4,0,0,13,16,11,8,14,4,2 +0,0,3,12,15,16,8,0,0,0,11,14,13,16,4,0,0,0,0,0,11,11,0,0,0,0,0,3,16,5,0,0,0,2,12,16,16,16,7,0,0,0,10,16,10,5,1,0,0,0,5,16,3,0,0,0,0,0,3,16,1,0,0,0,7 +0,0,2,12,16,7,0,0,0,5,13,13,14,14,0,0,0,6,15,0,0,10,3,0,0,6,12,0,0,5,8,0,0,6,10,0,0,11,5,0,0,3,13,2,13,16,5,0,0,0,11,16,16,15,2,0,0,0,3,16,15,4,0,0,0 +0,0,0,6,15,6,0,0,0,0,0,12,10,13,0,0,0,0,0,11,15,14,0,0,0,0,9,11,14,0,0,0,0,4,11,0,10,6,0,0,0,3,9,0,1,14,1,0,0,0,11,8,2,9,7,0,0,0,0,6,12,13,7,0,8 +0,0,0,0,15,12,0,0,0,0,0,5,16,15,0,0,0,0,3,14,16,10,0,0,0,3,15,16,16,9,0,0,0,11,15,9,16,10,0,0,0,0,0,6,16,7,0,0,0,0,0,7,16,8,0,0,0,0,0,1,13,16,6,0,1 +0,0,0,6,10,14,0,0,0,0,10,16,16,16,6,0,0,1,16,10,4,15,11,0,0,1,12,5,0,9,9,0,0,0,8,9,0,8,8,0,0,0,9,16,16,16,4,0,0,0,2,16,16,14,2,0,0,0,0,6,10,3,0,0,0 +0,0,0,7,12,14,2,0,0,0,3,14,7,13,8,0,0,0,6,14,11,16,5,0,0,2,13,16,15,8,1,0,0,5,12,6,15,0,0,0,0,7,10,0,8,10,0,0,0,2,12,7,5,16,3,0,0,0,0,9,13,14,2,0,8 +0,0,0,7,13,0,0,0,0,0,6,16,5,0,0,0,0,0,15,7,0,0,0,0,0,2,16,1,0,0,0,0,0,4,15,4,4,0,0,0,0,6,16,14,14,14,3,0,0,0,8,15,8,14,12,0,0,0,0,6,12,13,3,0,6 +0,0,6,15,16,16,11,0,0,2,16,11,7,15,16,3,0,2,15,16,16,16,13,0,0,0,5,11,16,12,1,0,0,0,0,8,16,4,0,0,0,0,0,12,11,0,0,0,0,0,5,16,4,0,0,0,0,0,10,14,0,0,0,0,9 +0,0,3,11,16,16,14,1,0,0,12,10,4,7,15,0,0,0,7,14,15,16,4,0,0,0,0,0,7,12,0,0,0,0,0,1,14,4,0,0,0,0,0,5,13,0,0,0,0,0,0,12,7,0,0,0,0,0,2,14,0,0,0,0,9 +0,0,4,14,16,11,0,0,0,0,13,12,9,15,4,0,0,0,11,0,0,8,6,0,0,4,12,0,0,6,8,0,0,6,8,0,0,10,7,0,0,3,13,4,10,16,2,0,0,0,13,16,16,15,0,0,0,0,5,14,12,3,0,0,0 +0,0,1,12,15,1,0,0,0,0,10,16,16,0,0,0,0,3,16,15,10,0,0,0,0,6,16,15,15,2,0,0,0,0,4,2,14,15,1,0,0,0,0,0,3,15,5,0,0,0,2,14,14,16,5,0,0,0,0,15,16,13,1,0,3 +0,0,0,7,12,14,5,0,0,0,11,10,4,10,16,0,0,1,16,12,11,16,14,0,0,1,10,12,12,16,10,0,0,0,0,0,1,15,3,0,0,0,0,0,11,9,0,0,0,0,0,6,13,0,0,0,0,0,0,11,9,0,0,0,9 +0,2,15,12,0,0,0,0,0,5,16,16,4,0,0,0,0,8,14,13,8,0,0,0,0,1,8,6,12,0,0,0,0,0,0,9,12,0,0,0,0,0,4,14,13,4,3,0,0,7,16,16,16,16,15,0,0,3,11,15,11,8,2,0,2 +0,0,0,8,12,11,3,0,0,0,6,15,6,8,12,0,0,2,15,10,0,3,11,0,0,4,16,14,12,14,4,0,0,0,8,16,16,14,0,0,0,0,0,0,5,16,2,0,0,0,0,4,11,15,0,0,0,0,0,13,12,6,0,0,9 +0,0,3,16,15,3,0,0,0,0,6,16,15,3,0,0,0,0,10,16,6,2,0,0,0,0,4,14,16,3,0,0,0,4,13,2,12,6,0,0,0,5,11,0,1,12,3,0,0,0,13,3,0,7,11,0,0,0,3,11,11,14,10,0,8 +0,0,10,10,12,15,12,0,0,0,8,16,13,16,8,0,0,0,0,0,11,14,0,0,0,0,9,12,16,14,7,0,0,1,16,16,15,11,5,0,0,0,7,16,6,0,0,0,0,0,7,16,1,0,0,0,0,0,10,11,0,0,0,0,7 +0,0,5,12,14,0,0,0,0,2,14,10,15,0,0,0,0,3,15,11,11,0,0,0,0,2,12,15,11,2,0,0,0,0,0,0,8,15,1,0,0,0,0,0,1,14,4,0,0,0,2,7,4,14,3,0,0,0,3,13,14,10,0,0,3 +0,0,1,11,14,4,0,0,0,1,7,14,14,15,1,0,0,6,13,0,0,11,5,0,0,8,9,0,0,4,8,0,0,5,12,0,0,4,8,0,0,1,15,2,0,10,8,0,0,0,9,16,16,16,2,0,0,0,1,12,14,9,0,0,0 +0,0,6,13,12,11,2,0,0,0,16,13,5,13,13,0,0,0,15,11,5,15,8,0,0,0,11,16,16,9,0,0,0,1,10,16,15,0,0,0,0,4,16,12,16,6,0,0,0,1,15,16,16,12,0,0,0,0,3,13,16,11,0,0,8 +0,0,0,10,16,5,0,0,0,0,9,16,14,2,0,0,0,1,16,13,0,0,0,0,0,4,16,11,4,0,0,0,0,7,16,16,16,15,2,0,0,1,15,10,4,13,13,0,0,0,7,16,15,16,15,0,0,0,0,7,16,16,7,0,6 +0,0,7,15,10,0,0,0,0,2,16,9,14,3,0,0,0,5,9,0,8,4,0,0,0,2,10,0,13,1,0,0,0,0,0,9,12,0,0,0,0,0,4,15,1,0,0,0,0,0,16,7,4,4,2,0,0,0,9,16,14,12,6,0,2 +0,0,0,6,10,1,15,6,0,0,5,16,4,8,15,1,0,1,15,8,2,15,8,0,0,4,16,14,13,15,0,0,0,1,15,16,16,13,0,0,0,0,3,10,15,0,0,0,0,0,0,11,14,1,0,0,0,0,0,10,15,0,0,0,4 +0,0,0,11,7,5,8,0,0,0,5,16,1,15,8,0,0,1,14,9,5,16,2,0,0,6,14,1,12,11,2,0,0,12,14,8,16,16,10,0,0,9,16,16,15,5,0,0,0,0,1,10,12,0,0,0,0,0,0,11,9,0,0,0,4 +0,0,13,13,1,0,0,0,0,2,16,16,9,0,0,0,0,9,16,12,12,0,0,0,0,12,10,8,13,0,0,0,0,5,7,10,14,0,0,0,0,0,2,15,12,0,0,0,0,1,15,16,16,14,6,0,0,0,11,16,16,14,6,0,2 +0,0,0,7,5,0,1,0,0,0,5,13,1,0,14,4,0,1,16,3,0,9,7,0,0,3,16,7,4,16,3,0,0,2,15,16,16,14,4,0,0,0,0,0,14,6,0,0,0,0,0,3,16,1,0,0,0,0,0,8,7,0,0,0,4 +0,0,0,1,11,10,0,0,0,0,0,8,16,13,0,0,0,0,7,16,16,7,0,0,0,5,16,16,16,0,0,0,0,4,10,16,16,0,0,0,0,0,0,15,16,0,0,0,0,0,0,11,16,4,0,0,0,0,0,0,11,13,1,0,1 +0,0,2,10,13,5,0,0,0,0,13,15,11,1,0,0,0,0,14,4,0,0,0,0,0,6,14,0,0,0,0,0,0,6,16,16,16,16,2,0,0,1,4,4,1,15,2,0,0,0,0,7,10,16,0,0,0,0,1,16,15,7,0,0,5 +0,0,0,0,6,11,0,0,0,0,0,4,16,12,0,0,0,0,1,12,16,11,0,0,0,1,12,13,16,7,0,0,0,9,16,13,16,3,0,0,0,2,3,6,16,3,0,0,0,0,0,6,16,3,0,0,0,0,0,0,9,8,0,0,1 +0,0,7,16,16,10,0,0,0,1,16,16,16,16,4,0,0,4,10,5,3,11,10,0,0,5,12,4,0,6,8,0,0,0,12,2,0,5,8,0,0,0,15,5,5,14,3,0,0,0,11,16,16,12,0,0,0,0,7,16,12,4,0,0,0 +0,0,1,9,16,15,4,0,0,0,4,16,10,14,10,0,0,0,3,16,7,14,11,0,0,0,9,16,16,13,0,0,0,5,16,16,16,2,0,0,0,6,14,9,16,2,0,0,0,0,12,16,16,4,0,0,0,0,0,12,11,3,0,0,8 +0,0,0,6,15,15,4,0,0,0,12,15,8,12,7,0,0,0,14,13,12,16,3,0,0,0,11,16,16,16,4,0,0,0,1,4,4,16,5,0,0,0,0,0,2,16,2,0,0,0,0,3,12,11,0,0,0,0,0,10,13,2,0,0,9 +0,0,8,16,16,14,0,0,0,1,16,12,8,5,0,0,0,3,16,6,0,0,0,0,0,10,16,8,2,0,0,0,0,3,12,14,15,7,0,0,0,0,0,0,8,16,3,0,0,0,11,5,6,16,5,0,0,0,10,16,16,12,0,0,5 +0,0,10,15,14,16,12,0,0,0,5,12,11,13,11,0,0,0,0,0,2,16,3,0,0,0,9,12,14,16,10,0,0,0,12,15,15,9,2,0,0,0,0,14,7,0,0,0,0,0,7,14,1,0,0,0,0,0,13,6,0,0,0,0,7 +0,0,6,11,16,11,1,0,0,4,14,4,9,16,0,0,0,0,15,2,11,15,2,0,0,0,8,15,9,12,5,0,0,0,0,0,0,8,8,0,0,0,0,0,0,8,5,0,0,5,13,2,0,14,2,0,0,0,8,16,16,8,0,0,9 +0,0,0,9,15,4,0,0,0,0,4,15,8,1,0,0,0,0,12,11,0,0,0,0,0,0,15,5,0,0,0,0,0,2,16,15,16,12,3,0,0,0,16,13,5,8,13,0,0,0,7,13,4,6,15,0,0,0,0,8,15,16,11,0,6 +0,4,16,16,9,1,0,0,0,2,11,9,15,7,0,0,0,0,0,0,14,6,0,0,0,0,0,5,16,2,0,0,0,0,1,14,12,0,0,0,0,3,15,13,1,0,0,0,0,11,16,13,8,6,3,0,0,4,14,16,16,16,11,0,2 +0,0,2,13,12,4,0,0,0,1,15,12,3,13,2,0,0,8,13,8,0,6,4,0,0,8,4,3,0,3,8,0,0,5,5,0,0,6,6,0,0,1,14,1,0,8,3,0,0,0,8,8,5,13,1,0,0,0,0,11,15,6,0,0,0 +0,0,3,14,9,2,0,0,0,3,16,12,7,13,0,0,0,3,14,1,8,14,0,0,0,0,6,14,16,2,0,0,0,0,2,16,15,9,0,0,0,0,12,10,1,10,9,0,0,0,11,7,0,7,13,0,0,0,1,11,16,15,4,0,8 +0,0,0,2,15,9,0,0,0,0,0,7,16,15,0,0,0,0,5,15,16,10,0,0,0,3,16,16,16,6,0,0,0,3,12,14,16,5,0,0,0,0,0,8,16,8,0,0,0,0,0,5,16,15,2,0,0,0,0,2,13,15,4,0,1 +0,0,0,9,12,1,0,0,0,0,10,15,12,15,2,0,0,2,16,4,0,11,6,0,0,5,13,0,0,8,8,0,0,8,12,0,0,7,7,0,0,6,14,0,0,13,4,0,0,0,15,10,7,15,1,0,0,0,2,11,13,3,0,0,0 +0,0,0,15,12,5,0,0,0,0,4,16,16,8,0,0,0,0,9,16,15,3,0,0,0,1,16,16,9,0,0,0,0,6,16,16,9,0,0,0,0,0,11,16,11,0,0,0,0,0,4,16,14,1,0,0,0,0,0,13,16,0,0,0,1 +0,1,13,16,15,5,0,0,0,6,15,12,15,11,0,0,0,0,2,0,9,12,0,0,0,0,0,1,16,4,0,0,0,0,1,12,14,0,0,0,0,5,15,14,2,0,0,0,0,9,16,12,7,2,3,0,0,1,15,16,16,16,6,0,2 +0,0,8,14,16,12,1,0,0,2,11,6,5,15,4,0,0,0,0,1,9,14,0,0,0,0,0,11,11,0,0,0,0,0,0,2,15,7,0,0,0,0,0,0,4,16,3,0,0,0,9,5,4,15,3,0,0,0,9,15,13,5,0,0,3 +0,0,3,16,13,2,0,0,0,0,11,7,7,12,0,0,0,2,16,1,0,9,3,0,0,5,13,1,0,6,6,0,0,7,6,0,0,5,8,0,0,4,8,0,0,10,5,0,0,1,13,5,8,15,1,0,0,0,4,15,15,4,0,0,0 +0,0,12,16,16,16,10,0,0,0,4,8,11,16,13,0,0,0,2,0,4,16,5,0,0,6,15,12,15,16,11,0,0,1,11,16,15,11,3,0,0,0,1,13,8,0,0,0,0,0,8,16,1,0,0,0,0,0,10,10,0,0,0,0,7 +0,0,0,12,15,2,0,0,0,0,7,16,7,1,0,0,0,0,15,10,0,0,0,0,0,1,16,9,1,0,0,0,0,3,16,16,15,5,0,0,0,1,15,11,4,12,9,0,0,0,10,12,3,11,14,0,0,0,1,9,16,16,6,0,6 +0,0,6,12,12,12,4,0,0,3,16,8,6,16,7,0,0,4,13,0,5,16,4,0,0,1,13,13,15,16,4,0,0,0,1,8,4,12,4,0,0,0,2,0,0,12,7,0,0,6,15,4,0,13,4,0,0,0,8,15,16,16,2,0,9 +0,0,7,14,14,13,3,0,0,0,14,7,4,9,6,0,0,4,13,0,0,0,0,0,0,6,16,14,8,1,0,0,0,0,0,3,9,13,0,0,0,0,0,0,0,13,0,0,0,0,9,0,3,14,0,0,0,0,11,16,14,3,0,0,5 +0,0,0,0,14,10,0,0,0,0,1,11,16,12,0,0,0,2,12,16,16,12,0,0,0,4,11,5,15,11,0,0,0,0,0,1,16,7,0,0,0,0,0,3,16,7,0,0,0,0,0,1,16,11,0,0,0,0,0,0,11,9,0,0,1 +0,0,0,9,12,0,0,0,0,0,1,15,5,1,5,0,0,1,12,10,0,13,9,0,0,7,16,9,10,16,7,0,0,4,16,16,16,16,10,0,0,0,0,0,14,11,1,0,0,0,0,5,16,1,0,0,0,0,0,12,6,0,0,0,4 +0,0,0,14,15,3,0,0,0,0,7,16,9,2,0,0,0,0,14,12,0,0,0,0,0,2,16,7,0,0,0,0,0,7,16,16,12,2,0,0,0,1,15,8,9,15,1,0,0,0,9,12,7,16,3,0,0,0,0,12,16,14,1,0,6 +0,0,0,4,12,13,2,0,0,0,2,13,16,15,0,0,0,4,16,16,16,9,0,0,0,3,10,15,16,6,0,0,0,0,0,13,16,0,0,0,0,0,0,14,16,1,0,0,0,0,0,9,16,10,0,0,0,0,0,2,15,13,1,0,1 +0,0,0,8,13,6,0,0,0,0,5,14,6,14,2,0,0,0,8,8,1,14,4,0,0,0,2,13,11,16,5,0,0,0,0,3,7,9,4,0,0,1,2,0,0,8,8,0,0,1,13,3,0,14,5,0,0,0,0,11,16,13,1,0,9 +0,0,8,15,12,6,0,0,0,1,16,6,5,13,2,0,0,0,16,4,6,15,3,0,0,0,9,16,16,8,0,0,0,0,7,15,14,10,0,0,0,0,13,12,0,13,7,0,0,3,16,5,4,15,8,0,0,0,9,14,16,12,1,0,8 +0,0,1,9,16,16,12,1,0,0,10,13,8,12,16,3,0,0,2,0,1,13,10,0,0,0,0,9,16,11,0,0,0,0,0,14,16,15,0,0,0,0,0,7,10,16,2,0,0,0,2,16,14,13,0,0,0,0,0,14,14,2,0,0,3 +0,0,8,15,15,12,4,0,0,0,15,4,3,10,16,0,0,0,8,14,12,16,12,0,0,0,0,2,4,9,8,0,0,0,0,0,0,9,7,0,0,1,1,0,0,12,4,0,0,7,11,1,4,12,1,0,0,0,10,16,13,3,0,0,9 +0,0,4,16,13,12,11,0,0,0,7,13,4,7,11,0,0,0,12,1,0,0,0,0,0,5,15,12,8,1,0,0,0,2,8,8,12,12,0,0,0,0,0,0,1,13,0,0,0,0,7,7,7,11,0,0,0,0,4,15,14,2,0,0,5 +0,0,0,6,14,2,0,0,0,0,4,16,9,3,0,0,0,0,13,8,0,0,0,0,0,0,15,10,2,0,0,0,0,4,16,11,13,9,0,0,0,0,10,8,0,9,8,0,0,0,2,13,3,10,8,0,0,0,0,4,14,16,2,0,6 +0,0,1,11,14,10,1,0,0,1,13,10,8,12,8,0,0,6,11,0,1,13,4,0,0,2,15,12,15,6,0,0,0,0,13,16,15,2,0,0,0,1,14,0,7,12,0,0,0,0,9,9,4,16,0,0,0,0,1,9,15,10,0,0,8 +0,1,9,12,14,10,1,0,0,7,15,7,6,15,8,0,0,1,1,0,3,16,5,0,0,0,2,11,16,9,0,0,0,0,2,14,16,8,0,0,0,0,0,1,7,15,7,0,0,0,12,6,6,15,7,0,0,0,8,15,14,9,0,0,3 +0,0,2,14,10,0,0,0,0,0,9,10,10,6,0,0,0,0,16,2,0,12,2,0,0,4,12,0,0,8,5,0,0,5,8,0,0,9,8,0,0,3,12,0,0,14,3,0,0,0,11,6,7,13,0,0,0,0,3,16,15,3,0,0,0 +0,0,0,6,13,6,0,0,0,0,0,12,16,13,0,0,0,0,10,16,16,10,0,0,0,5,16,16,16,7,0,0,0,2,5,13,16,4,0,0,0,0,0,9,16,4,0,0,0,0,0,12,16,1,0,0,0,0,0,8,14,2,0,0,1 +0,0,0,0,5,16,8,0,0,0,0,0,13,16,12,0,0,0,0,9,16,16,9,0,0,0,10,16,14,16,8,0,0,4,15,11,8,16,5,0,0,0,7,1,9,16,3,0,0,0,0,0,12,16,0,0,0,0,0,0,8,15,2,0,1 +0,0,8,12,12,6,0,0,0,0,15,16,16,16,0,0,0,0,8,16,16,16,0,0,0,0,6,16,16,16,4,0,0,0,5,16,16,16,4,0,0,0,4,16,16,16,7,0,0,0,15,16,16,16,8,0,0,0,4,9,11,8,4,0,1 +0,0,3,12,14,2,0,0,0,0,12,11,13,11,0,0,0,7,16,1,4,16,2,0,0,6,16,7,14,16,1,0,0,0,13,16,15,3,0,0,0,0,11,16,16,10,0,0,0,0,14,16,10,16,8,0,0,0,2,11,16,16,12,0,8 +0,0,7,16,11,1,0,0,0,2,15,12,15,12,0,0,0,6,16,6,6,16,3,0,0,8,16,4,1,16,7,0,0,4,16,4,0,13,8,0,0,4,16,4,2,16,5,0,0,2,15,11,14,11,0,0,0,0,6,15,11,1,0,0,0 +0,0,5,16,16,13,5,0,0,0,4,8,8,14,15,0,0,0,0,0,1,16,7,0,0,0,0,0,7,15,0,0,0,2,15,16,16,16,4,0,0,1,4,12,12,0,0,0,0,0,0,15,5,0,0,0,0,0,5,15,0,0,0,0,7 +0,1,15,6,0,0,0,0,0,7,15,16,3,0,0,0,0,11,5,12,4,0,0,0,0,2,3,10,7,0,0,0,0,0,1,16,2,0,0,0,0,0,6,14,0,0,0,0,0,0,14,16,16,15,6,0,0,0,8,8,8,10,13,0,2 +0,0,3,11,12,13,14,3,0,0,8,14,9,12,16,2,0,0,1,0,0,13,11,0,0,0,1,12,13,16,7,0,0,0,1,8,16,11,2,0,0,0,0,6,16,2,0,0,0,0,0,14,11,0,0,0,0,0,2,13,4,0,0,0,7 +0,0,0,5,15,7,0,0,0,0,6,15,10,16,2,0,0,4,16,5,0,10,8,0,0,1,15,4,0,8,8,0,0,0,14,4,0,8,8,0,0,0,12,7,0,14,6,0,0,0,7,16,13,15,1,0,0,0,0,6,13,5,0,0,0 +0,0,1,13,9,0,0,0,0,0,7,16,10,0,0,0,0,0,14,15,1,0,0,0,0,1,16,14,4,0,0,0,0,3,16,16,16,10,1,0,0,0,13,15,8,15,9,0,0,0,8,16,7,11,13,0,0,0,0,8,13,16,13,1,6 +0,0,3,12,10,1,0,0,0,0,12,16,16,10,0,0,0,0,7,16,16,8,0,0,0,0,3,16,16,12,0,0,0,0,2,15,16,14,0,0,0,0,0,16,16,15,0,0,0,0,2,16,16,14,0,0,0,0,4,8,12,3,0,0,1 +0,0,3,13,16,9,1,0,0,3,13,14,7,16,6,0,0,6,16,4,5,16,3,0,0,6,16,8,15,11,0,0,0,1,16,16,13,1,0,0,0,4,16,16,14,2,0,0,0,2,16,15,16,10,0,0,0,0,5,14,16,11,0,0,8 +0,0,0,9,16,1,0,0,0,0,3,16,13,0,0,0,0,0,10,16,7,10,6,0,0,6,16,16,13,16,8,0,0,4,15,16,16,16,6,0,0,0,0,7,16,12,0,0,0,0,0,11,16,2,0,0,0,0,0,14,10,0,0,0,4 +0,1,12,3,0,0,0,0,0,10,16,15,1,0,0,0,0,8,3,13,5,0,0,0,0,1,0,9,9,0,0,0,0,0,0,13,6,0,0,0,0,0,2,15,2,0,0,0,0,0,13,16,14,14,12,0,0,0,8,12,12,10,11,0,2 +0,0,4,15,2,0,0,0,0,0,12,15,1,0,0,0,0,2,16,10,0,0,0,0,0,3,16,8,1,0,0,0,0,6,16,16,15,9,2,0,0,3,16,14,8,15,10,0,0,0,10,15,8,13,15,0,0,0,4,8,13,13,6,0,6 +0,0,0,2,15,3,0,0,0,0,0,9,13,0,2,0,0,0,7,15,1,5,12,0,0,2,16,10,4,12,7,0,0,6,13,14,16,16,5,0,0,0,0,0,8,13,0,0,0,0,0,0,15,5,0,0,0,0,0,3,12,0,0,0,4 +0,0,1,8,12,7,0,0,0,0,7,16,16,16,10,0,0,0,5,16,16,16,5,0,0,0,5,16,16,15,1,0,0,0,4,16,16,16,4,0,0,0,9,16,16,16,0,0,0,0,9,16,16,14,0,0,0,0,2,10,12,9,0,0,1 +0,0,3,13,16,16,9,0,0,0,3,9,11,16,6,0,0,0,0,0,8,14,1,0,0,3,12,13,16,15,4,0,0,4,13,14,16,12,3,0,0,0,0,9,12,0,0,0,0,0,0,15,8,0,0,0,0,0,2,16,3,0,0,0,7 +0,2,12,15,10,0,0,0,0,4,14,8,16,7,0,0,0,0,0,2,16,10,0,0,0,0,2,16,16,9,0,0,0,0,1,8,13,14,2,0,0,0,0,0,2,13,8,0,0,2,9,8,8,12,11,0,0,2,11,14,14,10,3,0,3 +0,0,6,13,3,0,0,0,0,0,15,16,15,5,0,0,0,0,14,4,6,13,0,0,0,0,0,0,4,15,0,0,0,0,0,0,8,12,0,0,0,0,1,6,15,8,0,0,0,0,16,16,16,16,12,0,0,0,5,8,9,10,15,3,2 +0,0,1,14,5,0,0,0,0,0,7,16,6,0,0,0,0,0,14,16,0,0,0,0,0,1,16,9,0,0,0,0,0,0,16,16,14,5,0,0,0,0,13,16,12,16,5,0,0,0,8,16,8,11,16,2,0,0,1,8,13,16,14,2,6 +0,0,1,10,12,11,5,0,0,0,8,16,16,16,3,0,0,0,13,16,16,11,0,0,0,1,16,16,16,12,0,0,0,2,16,16,16,10,0,0,0,2,16,16,16,6,0,0,0,0,7,16,16,8,0,0,0,0,2,11,12,4,0,0,1 +0,0,0,1,13,15,6,0,0,0,0,7,16,16,8,0,0,0,3,14,16,16,6,0,0,5,15,16,16,16,4,0,0,5,12,13,16,16,4,0,0,0,0,4,16,16,8,0,0,0,0,4,16,16,8,0,0,0,0,1,12,16,5,0,1 +0,0,0,8,13,0,0,0,0,0,1,15,12,0,0,0,0,0,9,15,3,0,0,0,0,0,15,11,0,0,0,0,0,3,16,14,12,4,0,0,0,2,16,15,13,16,6,0,0,0,9,15,5,13,13,0,0,0,0,7,14,14,6,0,6 +0,0,0,8,11,3,0,0,0,0,6,16,16,15,1,0,0,0,5,16,16,16,1,0,0,0,5,16,16,16,1,0,0,0,1,16,16,16,3,0,0,0,3,16,16,16,0,0,0,0,0,16,16,12,0,0,0,0,0,11,11,2,0,0,1 +0,0,1,11,12,12,10,0,0,0,3,10,8,16,10,0,0,0,0,0,3,16,3,0,0,0,3,10,15,14,2,0,0,0,14,16,16,16,6,0,0,0,1,14,9,0,0,0,0,0,3,16,3,0,0,0,0,0,6,12,0,0,0,0,7 +0,0,14,16,4,0,0,0,0,2,16,16,16,4,0,0,0,7,16,5,16,16,6,0,0,2,16,14,16,16,8,0,0,0,4,8,6,16,8,0,0,0,0,0,2,16,8,0,0,0,8,8,10,16,7,0,0,0,9,12,15,9,0,0,9 +0,1,12,13,7,0,0,0,0,2,15,13,16,7,0,0,0,0,0,6,16,8,0,0,0,0,7,16,16,3,0,0,0,0,4,12,16,12,0,0,0,0,0,0,4,16,8,0,0,1,8,10,12,16,10,0,0,2,12,14,12,9,1,0,3 +0,0,2,8,12,4,0,0,0,1,14,11,5,16,1,0,0,1,15,3,10,14,0,0,0,0,7,16,13,1,0,0,0,0,9,16,11,0,0,0,0,0,12,4,12,7,0,0,0,0,12,13,8,12,0,0,0,0,2,9,12,6,0,0,8 +0,0,2,15,4,0,0,0,0,0,10,16,2,0,0,0,0,0,12,11,0,0,0,0,0,0,14,12,0,0,0,0,0,2,16,16,16,9,0,0,0,3,16,15,12,16,6,0,0,1,14,16,6,14,10,0,0,0,3,14,16,13,3,0,6 +0,0,10,12,6,0,0,0,0,0,16,14,15,4,0,0,0,1,16,8,16,14,0,0,0,0,7,15,16,16,5,0,0,0,0,3,4,15,9,0,0,0,0,0,0,12,11,0,0,0,5,7,9,16,9,0,0,0,7,16,13,9,2,0,9 +0,0,5,14,10,3,0,0,0,1,16,14,16,11,0,0,0,4,16,6,12,16,7,0,0,1,13,15,15,16,8,0,0,0,0,2,7,16,7,0,0,0,0,0,4,16,4,0,0,0,2,4,7,16,3,0,0,0,6,16,15,8,0,0,9 +0,0,3,12,12,12,2,0,0,0,3,13,9,16,7,0,0,0,0,1,2,16,4,0,0,0,0,1,10,9,0,0,0,0,8,16,16,16,5,0,0,0,3,14,10,0,0,0,0,0,4,16,3,0,0,0,0,0,3,12,1,0,0,0,7 +0,0,0,3,12,2,0,0,0,0,0,12,12,0,0,0,0,0,6,16,4,8,13,0,0,1,15,8,1,15,8,0,0,6,16,14,13,16,1,0,0,2,4,10,16,10,0,0,0,0,0,5,16,2,0,0,0,0,0,3,10,0,0,0,4 +0,0,4,12,15,5,0,0,0,2,15,13,11,11,0,0,0,2,10,2,5,16,0,0,0,0,0,4,14,14,0,0,0,0,0,5,12,14,7,0,0,0,0,0,0,3,14,0,0,0,9,10,3,8,15,2,0,0,2,9,16,16,7,0,3 +0,0,7,16,15,7,0,0,0,0,16,10,8,15,0,0,0,0,5,2,9,15,0,0,0,0,5,16,16,8,0,0,0,0,2,5,11,14,3,0,0,0,0,0,0,8,12,0,0,0,10,10,7,13,12,0,0,0,6,12,16,11,4,0,3 +0,0,3,11,16,5,0,0,0,0,12,15,12,7,0,0,0,0,15,6,0,0,0,0,0,0,16,15,11,1,0,0,0,0,15,15,14,9,0,0,0,0,15,6,2,16,2,0,0,0,12,12,12,16,5,0,0,0,3,13,15,9,0,0,6 +0,0,7,12,5,3,0,0,0,3,15,8,13,16,3,0,0,4,14,1,0,9,0,0,0,0,9,15,13,9,0,0,0,0,3,14,15,6,0,0,0,0,14,5,3,14,2,0,0,0,16,5,5,15,3,0,0,0,6,11,11,6,0,0,8 +0,0,8,15,16,8,0,0,0,2,16,15,13,11,0,0,0,3,16,3,0,0,0,0,0,6,16,15,7,0,0,0,0,2,11,12,16,7,0,0,0,0,0,0,7,14,0,0,0,0,10,15,15,15,1,0,0,0,8,16,15,6,0,0,5 +0,0,8,14,13,13,10,0,0,4,16,13,13,16,9,0,0,4,16,2,6,16,1,0,0,0,0,1,14,5,0,0,0,0,0,6,15,0,0,0,0,0,1,15,6,0,0,0,0,0,11,16,3,0,0,0,0,0,11,11,1,0,0,0,7 +0,0,4,12,12,1,0,0,0,0,14,16,16,11,0,0,0,0,16,3,3,14,4,0,0,1,15,0,0,10,6,0,0,3,13,0,0,11,5,0,0,2,16,0,1,14,3,0,0,1,14,7,12,16,0,0,0,0,6,15,15,5,0,0,0 +0,4,16,14,14,15,5,0,0,8,16,16,16,16,9,0,0,11,14,0,11,15,1,0,0,5,5,3,16,8,0,0,0,0,0,11,14,1,0,0,0,0,4,16,5,0,0,0,0,0,14,14,0,0,0,0,0,3,16,10,0,0,0,0,7 +0,0,9,9,1,0,0,0,0,0,10,16,3,0,0,0,0,0,13,16,7,0,0,0,0,0,6,16,13,0,0,0,0,0,0,14,16,0,0,0,0,0,0,13,16,13,4,0,0,0,10,16,16,16,15,2,0,0,8,16,13,8,5,0,1 +0,0,5,15,10,3,0,0,0,0,14,16,12,14,2,0,0,3,16,5,0,13,4,0,0,5,14,0,0,13,7,0,0,6,12,0,1,16,3,0,0,3,16,2,4,15,2,0,0,0,14,11,15,10,0,0,0,0,6,15,12,3,0,0,0 +0,0,4,15,11,1,0,0,0,2,13,16,16,0,0,0,0,2,12,16,12,0,0,0,0,0,0,14,14,0,0,0,0,0,0,15,14,0,0,0,0,0,0,16,13,0,0,0,0,0,3,16,15,0,0,0,0,0,2,15,16,1,0,0,1 +0,0,5,12,12,1,0,0,0,11,16,12,14,9,0,0,0,14,7,0,10,11,0,0,0,0,3,15,16,8,0,0,0,0,2,12,12,14,3,0,0,0,0,0,0,10,14,0,0,0,9,10,8,13,16,0,0,0,6,12,16,12,4,0,3 +0,0,8,15,13,8,0,0,0,1,16,15,16,15,0,0,0,5,16,1,5,16,2,0,0,2,16,6,15,16,2,0,0,0,9,16,14,16,5,0,0,0,0,0,4,16,3,0,0,0,9,12,15,14,0,0,0,0,7,14,12,3,0,0,9 +0,0,8,12,8,14,3,0,0,2,15,13,11,16,4,0,0,3,16,10,11,9,0,0,0,0,4,16,16,4,0,0,0,0,2,15,16,8,0,0,0,3,15,7,12,10,0,0,0,4,16,4,13,8,0,0,0,1,9,16,15,3,0,0,8 +0,1,15,16,16,16,9,0,0,7,16,13,11,16,9,0,0,9,16,4,10,15,1,0,0,8,9,3,15,8,0,0,0,0,0,10,15,0,0,0,0,0,1,16,9,0,0,0,0,0,10,16,2,0,0,0,0,0,16,13,0,0,0,0,7 +0,0,8,16,7,0,0,0,0,1,15,11,15,0,0,0,0,2,14,0,11,2,0,0,0,0,1,0,13,4,0,0,0,0,0,6,13,0,0,0,0,0,0,13,8,0,0,0,0,0,13,16,10,10,11,0,0,0,10,15,12,12,12,1,2 +0,0,7,16,10,0,0,0,0,0,14,11,13,10,0,0,0,1,16,4,6,13,0,0,0,0,4,0,8,10,0,0,0,0,0,2,15,4,0,0,0,0,0,9,13,0,0,0,0,0,9,16,16,16,9,0,0,0,11,14,12,12,15,2,2 +0,0,12,16,9,5,0,0,0,0,16,9,13,15,2,0,0,4,16,0,4,16,4,0,0,0,14,13,15,16,8,0,0,0,5,11,8,16,8,0,0,0,0,0,0,14,7,0,0,0,12,10,12,16,2,0,0,0,10,13,12,4,0,0,9 +0,0,4,10,12,8,0,0,0,0,9,16,12,11,0,0,0,0,12,9,3,0,0,0,0,0,13,16,16,6,0,0,0,0,9,6,8,13,1,0,0,0,0,0,4,16,3,0,0,0,13,12,14,13,1,0,0,0,7,14,10,0,0,0,5 +0,0,4,16,11,4,0,0,0,1,14,13,13,16,0,0,0,2,15,2,0,12,4,0,0,4,13,0,0,13,1,0,0,3,13,0,0,15,1,0,0,1,16,0,4,15,1,0,0,0,14,10,11,13,0,0,0,0,5,15,15,4,0,0,0 +0,0,6,9,14,10,1,0,0,4,16,15,5,13,4,0,0,7,13,4,6,15,2,0,0,0,9,16,16,9,0,0,0,0,11,13,15,10,0,0,0,0,15,4,5,15,0,0,0,1,16,9,10,14,0,0,0,0,7,13,14,5,0,0,8 +0,0,8,13,7,5,0,0,0,2,16,14,14,15,2,0,0,8,10,4,3,8,4,0,0,8,8,0,0,4,8,0,0,8,8,0,0,8,8,0,0,4,13,0,0,13,7,0,0,0,14,10,10,15,3,0,0,0,6,12,14,5,0,0,0 +0,0,0,10,11,0,0,0,0,0,0,15,11,9,0,0,0,0,5,15,10,16,2,0,0,0,13,9,12,13,0,0,0,3,16,7,13,14,5,0,0,12,16,16,16,16,8,0,0,5,8,13,16,4,0,0,0,0,0,11,15,0,0,0,4 +0,0,4,14,10,2,0,0,0,0,12,13,14,13,0,0,0,0,11,5,3,13,0,0,0,0,7,12,11,16,1,0,0,0,1,10,9,14,3,0,0,0,0,0,0,13,5,0,0,0,4,8,8,16,5,0,0,0,7,16,16,11,0,0,9 +0,0,6,14,9,0,0,0,0,3,16,15,16,6,0,0,0,6,14,0,8,10,0,0,0,0,2,0,11,10,0,0,0,0,0,5,16,4,0,0,0,0,2,14,12,1,0,0,0,0,10,16,16,16,16,0,0,0,10,13,12,8,10,1,2 +0,0,9,12,10,3,0,0,0,0,14,16,13,13,0,0,0,4,16,2,1,16,3,0,0,4,16,0,0,12,8,0,0,6,16,0,0,15,8,0,0,5,15,0,1,16,5,0,0,2,16,11,14,13,0,0,0,0,8,16,13,6,0,0,0 +0,0,9,16,16,9,0,0,0,1,16,12,16,16,0,0,0,0,2,7,16,9,0,0,0,0,7,16,15,8,1,0,0,0,5,9,9,15,8,0,0,0,0,0,0,13,12,0,0,0,6,8,12,16,10,0,0,0,9,15,12,4,0,0,3 +0,0,6,11,8,12,1,0,0,2,16,12,16,14,8,0,0,3,15,4,1,13,4,0,0,0,9,15,14,12,0,0,0,0,6,16,16,9,0,0,0,2,15,8,6,12,0,0,0,4,16,10,12,15,0,0,0,0,5,12,12,7,0,0,8 +0,0,10,8,10,12,6,0,0,2,16,14,12,15,11,0,0,3,9,2,4,14,2,0,0,0,0,1,14,4,0,0,0,0,0,5,14,0,0,0,0,0,1,13,6,0,0,0,0,1,12,16,1,0,0,0,0,0,14,10,0,0,0,0,7 +0,0,0,7,11,0,0,0,0,0,0,11,5,2,0,0,0,0,3,14,3,14,0,0,0,0,8,6,7,7,0,0,0,1,15,4,12,9,4,0,0,5,16,16,16,15,8,0,0,1,1,6,10,0,0,0,0,0,0,10,9,0,0,0,4 +0,0,11,12,7,0,0,0,0,6,14,8,15,16,3,0,0,3,15,9,4,16,3,0,0,0,3,14,16,12,0,0,0,0,3,12,15,15,2,0,0,0,15,10,0,13,8,0,0,1,16,5,5,12,11,0,0,0,14,16,16,9,2,0,8 +0,0,2,11,16,7,0,0,0,0,8,16,16,2,0,0,0,2,16,16,16,0,0,0,0,0,5,16,16,0,0,0,0,0,2,16,16,1,0,0,0,0,8,16,16,3,0,0,0,0,9,16,16,13,0,0,0,0,2,12,13,9,0,0,1 +0,0,5,15,13,2,0,0,0,0,10,13,12,11,0,0,0,0,3,2,4,15,0,0,0,0,0,9,16,10,0,0,0,0,0,3,13,16,6,0,0,0,0,0,0,7,14,0,0,0,5,8,8,10,16,1,0,0,4,9,14,13,7,0,3 +0,0,8,16,16,15,1,0,0,0,16,14,14,11,0,0,0,4,16,16,13,4,0,0,0,3,14,12,14,15,1,0,0,0,0,0,0,15,6,0,0,0,0,0,1,14,7,0,0,0,15,12,14,13,0,0,0,0,11,16,13,2,0,0,5 +0,0,16,14,12,7,0,0,0,0,16,8,12,10,0,0,0,4,16,4,0,0,0,0,0,6,16,16,13,3,0,0,0,0,1,5,10,16,3,0,0,0,0,0,0,14,5,0,0,2,14,8,8,16,3,0,0,1,10,16,15,8,0,0,5 +0,0,6,13,16,3,0,0,0,1,15,16,16,9,0,0,0,0,3,4,16,7,0,0,0,0,0,5,16,6,0,0,0,0,9,16,16,4,1,0,0,0,14,16,16,16,6,0,0,0,9,16,9,5,0,0,0,0,9,13,2,0,0,0,7 +0,0,12,5,0,0,0,0,0,0,16,2,0,0,0,0,0,3,12,0,0,0,0,0,0,4,12,0,0,0,0,0,0,7,14,16,14,3,0,0,0,3,16,8,9,16,4,0,0,2,16,3,4,16,4,0,0,0,11,12,11,4,0,0,6 +0,1,7,13,7,0,0,0,0,8,16,15,16,5,0,0,0,6,16,8,8,13,1,0,0,4,12,0,0,13,7,0,0,8,12,0,0,8,8,0,0,7,13,0,0,14,7,0,0,4,16,8,10,16,5,0,0,1,15,16,14,7,0,0,0 +0,0,9,8,1,0,0,0,0,5,16,16,7,0,0,0,0,8,10,8,8,0,0,0,0,0,2,4,12,0,0,0,0,0,0,6,10,0,0,0,0,0,0,7,9,0,0,0,0,0,6,16,15,11,5,0,0,0,9,14,12,14,10,0,2 +0,0,11,11,2,0,0,0,0,0,12,14,15,2,0,0,0,0,10,8,6,16,0,0,0,0,7,13,7,16,2,0,0,0,0,11,12,14,5,0,0,0,0,0,0,8,10,0,0,0,7,10,10,14,15,0,0,0,7,12,16,16,12,1,9 +0,0,0,14,6,0,0,0,0,0,9,11,2,0,0,0,0,1,16,6,0,0,0,0,0,4,14,0,0,0,0,0,0,6,15,11,6,2,0,0,0,2,16,13,8,15,3,0,0,0,10,13,3,5,14,0,0,0,1,10,13,16,10,0,6 +0,0,8,16,16,8,0,0,0,0,8,11,13,16,0,0,0,0,0,0,13,11,0,0,0,0,0,1,16,7,0,0,0,0,10,16,16,13,6,0,0,0,13,16,13,8,1,0,0,0,6,16,3,0,0,0,0,0,11,15,0,0,0,0,7 +0,0,8,15,16,14,2,0,0,1,16,14,12,16,7,0,0,0,8,1,3,16,5,0,0,0,0,8,16,11,0,0,0,0,0,6,14,13,0,0,0,0,0,0,3,16,6,0,0,0,10,8,12,16,2,0,0,0,9,16,15,5,0,0,3 +0,0,3,13,0,0,0,0,0,0,9,13,0,0,0,0,0,0,12,6,0,0,0,0,0,0,15,7,0,0,0,0,0,3,16,14,8,2,0,0,0,4,16,12,12,16,4,0,0,2,15,12,8,15,13,0,0,0,3,11,15,12,8,0,6 +0,0,4,12,12,11,1,0,0,1,16,10,1,14,0,0,0,1,14,7,0,14,0,0,0,0,4,16,13,10,0,0,0,0,2,16,16,3,0,0,0,1,13,11,15,7,0,0,0,2,16,5,14,8,0,0,0,0,6,9,12,3,0,0,8 +0,0,6,16,7,0,0,0,0,0,16,11,14,6,0,0,0,2,15,0,3,16,1,0,0,7,8,0,0,13,2,0,0,5,8,0,0,11,4,0,0,7,7,0,0,12,4,0,0,5,16,7,7,16,0,0,0,0,7,16,12,4,0,0,0 +0,0,8,13,12,2,0,0,0,3,16,13,12,14,0,0,0,0,6,1,9,13,0,0,0,0,0,5,16,9,0,0,0,0,0,1,12,16,2,0,0,0,0,0,0,12,9,0,0,1,10,6,7,15,9,0,0,0,9,12,15,11,2,0,3 +0,2,11,14,5,0,0,0,0,5,15,12,16,2,0,0,0,4,12,0,12,4,0,0,0,0,1,0,12,4,0,0,0,0,0,1,14,1,0,0,0,0,0,9,7,0,0,0,0,1,9,15,10,8,4,0,0,2,15,15,12,12,7,0,2 +0,0,8,15,12,1,0,0,0,4,16,14,12,11,0,0,0,4,16,5,1,14,4,0,0,4,14,1,0,12,4,0,0,6,12,0,0,10,5,0,0,8,13,0,1,13,4,0,0,4,16,16,16,14,1,0,0,0,8,13,11,3,0,0,0 +0,0,4,13,14,6,0,0,0,0,8,15,6,16,4,0,0,0,8,15,3,16,1,0,0,0,0,13,16,15,1,0,0,0,7,16,16,4,0,0,0,4,16,9,12,10,0,0,0,2,14,9,11,16,0,0,0,0,5,12,15,7,0,0,8 +0,1,6,16,13,6,1,0,0,1,12,16,16,16,1,0,0,0,10,16,16,12,0,0,0,2,14,16,16,12,0,0,0,2,9,16,16,12,0,0,0,0,1,14,16,14,0,0,0,0,6,16,16,14,2,0,0,0,4,12,13,8,2,0,1 +0,0,8,14,13,3,0,0,0,0,16,16,16,15,0,0,0,0,13,12,4,16,2,0,0,0,2,15,14,15,2,0,0,0,4,15,16,7,0,0,0,0,15,14,8,15,4,0,0,4,16,12,6,16,7,0,0,0,6,13,12,8,0,0,8 +0,1,10,16,9,0,0,0,0,9,15,13,16,0,0,0,0,4,2,0,14,2,0,0,0,0,0,0,12,4,0,0,0,0,0,1,14,1,0,0,0,0,0,9,7,0,0,0,0,0,14,16,16,16,11,0,0,0,8,10,12,14,10,0,2 +0,0,1,14,8,0,0,0,0,0,10,15,3,0,0,0,0,0,13,12,0,0,0,0,0,0,16,8,0,0,0,0,0,3,16,14,11,2,0,0,0,4,16,14,12,15,1,0,0,0,12,15,8,16,11,0,0,0,1,10,13,12,5,0,6 +0,0,0,4,16,2,0,0,0,0,0,14,10,2,8,0,0,0,10,15,1,8,12,0,0,4,16,5,1,15,8,0,0,13,16,14,16,16,2,0,1,12,12,12,16,10,0,0,0,0,0,5,16,8,0,0,0,0,0,8,15,4,0,0,4 +0,0,8,10,0,0,0,0,0,0,14,16,5,0,0,0,0,0,15,11,11,0,0,0,0,0,10,6,12,0,0,0,0,0,1,4,14,0,0,0,0,0,0,6,10,0,0,0,0,0,5,15,14,9,9,2,0,0,10,16,15,15,12,1,2 +0,0,6,16,16,13,0,0,0,0,13,12,13,16,3,0,0,0,0,0,8,14,0,0,0,0,1,7,14,10,0,0,0,0,9,16,16,16,7,0,0,0,1,15,12,9,3,0,0,0,6,16,4,0,0,0,0,0,9,13,1,0,0,0,7 +0,0,3,14,5,0,0,0,0,0,12,16,3,0,0,0,0,2,12,8,0,0,0,0,0,3,3,0,0,0,0,0,0,7,4,0,6,0,0,0,0,4,16,15,13,15,3,0,0,2,14,14,4,14,11,0,0,0,3,13,16,16,12,0,6 +0,0,0,2,16,4,0,0,0,0,0,9,13,0,0,0,0,0,2,15,5,9,7,0,0,0,12,10,1,16,5,0,0,7,15,8,10,16,2,0,3,16,16,16,16,13,0,0,1,8,7,5,16,14,0,0,0,0,0,1,15,6,0,0,4 +0,0,9,14,15,4,0,0,0,7,16,16,16,10,0,0,0,5,10,4,16,4,0,0,0,0,0,16,16,7,0,0,0,0,0,3,9,16,3,0,0,0,2,0,1,15,7,0,0,0,15,13,13,16,4,0,0,0,9,15,15,8,0,0,3 +0,0,0,10,15,0,0,0,0,0,5,15,5,0,0,0,0,0,13,8,0,0,0,0,0,0,16,4,0,0,0,0,0,5,16,12,9,3,0,0,0,2,16,15,12,15,8,0,0,0,12,14,5,12,11,0,0,0,1,8,14,15,5,0,6 +0,0,7,11,14,4,0,0,0,12,16,12,15,8,0,0,0,6,9,0,12,7,0,0,0,0,1,9,16,2,0,0,0,0,8,16,16,15,1,0,0,0,0,0,2,15,9,0,0,2,10,4,2,13,10,0,0,0,7,12,15,12,1,0,3 +0,0,0,4,15,7,0,0,0,0,0,6,15,1,0,0,0,0,0,12,8,4,15,0,0,0,4,14,1,12,12,0,0,8,15,13,8,16,6,0,6,16,16,16,16,14,0,0,3,7,6,9,16,9,0,0,0,0,0,5,13,2,0,0,4 +0,0,9,14,4,0,0,0,0,4,16,16,16,7,0,0,0,5,16,10,10,14,1,0,0,8,14,0,1,13,4,0,0,8,12,0,0,10,8,0,0,8,12,0,0,11,7,0,0,5,15,9,5,13,4,0,0,0,8,16,16,13,1,0,0 +0,0,3,14,16,14,3,0,0,3,15,10,14,16,6,0,0,7,13,0,10,16,7,0,0,8,12,0,1,16,4,0,0,7,12,0,0,13,4,0,0,3,14,1,0,13,4,0,0,0,11,11,9,15,2,0,0,0,2,11,12,4,0,0,0 +0,1,10,14,13,0,0,0,0,6,12,8,16,7,0,0,0,0,0,0,14,8,0,0,0,0,8,8,16,6,0,0,0,0,9,16,16,16,6,0,0,0,2,15,5,4,2,0,0,0,11,14,0,0,0,0,0,0,12,5,0,0,0,0,7 +0,0,9,16,16,10,0,0,0,1,16,7,8,16,6,0,0,4,16,7,9,16,6,0,0,0,9,12,12,16,4,0,0,0,0,0,0,16,6,0,0,0,0,0,0,14,7,0,0,0,4,6,4,16,4,0,0,0,9,13,12,10,0,0,9 +0,0,6,8,0,0,0,0,0,0,10,15,0,0,0,0,0,0,10,15,0,0,0,0,0,0,12,14,0,0,0,0,0,2,16,16,16,15,4,0,0,0,13,13,9,16,12,0,0,0,13,12,12,16,7,0,0,0,8,16,15,8,0,0,6 +0,2,11,15,8,6,0,0,0,10,14,8,15,16,0,0,0,9,14,0,10,16,2,0,0,2,16,10,16,16,3,0,0,0,4,8,8,16,4,0,0,0,0,0,0,15,7,0,0,2,11,12,15,16,8,0,0,2,12,9,8,4,0,0,9 +0,0,8,14,7,0,0,0,0,3,16,13,16,10,0,0,0,7,14,1,7,14,1,0,0,7,11,0,0,13,4,0,0,4,8,0,0,8,8,0,0,4,12,0,0,9,8,0,0,3,15,5,6,15,6,0,0,0,8,16,16,9,0,0,0 +0,1,9,16,15,1,0,0,0,8,14,8,14,8,0,0,0,8,5,3,15,5,0,0,0,0,0,9,16,6,0,0,0,0,0,0,9,16,3,0,0,0,0,0,0,10,10,0,0,0,5,7,6,16,7,0,0,0,9,16,14,9,1,0,3 +0,1,8,10,3,0,0,0,0,4,16,16,16,11,1,0,0,3,16,9,8,11,4,0,0,0,10,7,3,10,2,0,0,0,3,16,13,5,0,0,0,0,13,15,8,0,0,0,0,4,15,5,13,0,0,0,0,1,7,12,11,0,0,0,8 +0,0,9,16,12,4,0,0,0,3,16,16,16,16,3,0,0,5,16,1,1,13,8,0,0,1,14,10,10,16,6,0,0,0,6,16,16,8,0,0,0,1,14,16,12,0,0,0,0,4,16,10,16,4,0,0,0,1,11,16,13,1,0,0,8 +0,0,1,14,15,3,0,0,0,2,15,16,16,13,0,0,0,6,15,12,10,16,3,0,0,8,15,0,0,10,8,0,0,6,12,0,0,11,8,0,0,1,15,6,2,14,8,0,0,0,10,16,14,15,3,0,0,0,2,9,16,7,0,0,0 +0,0,2,15,12,0,0,0,0,0,11,8,15,4,0,0,0,0,13,4,12,4,0,0,0,0,1,1,15,0,0,0,0,0,0,10,9,0,0,0,0,0,8,15,1,0,0,0,0,6,16,13,8,8,2,0,0,0,4,10,12,13,6,0,2 +0,0,6,14,14,4,0,0,0,0,16,15,15,16,2,0,0,5,12,1,0,15,5,0,0,8,12,0,0,8,8,0,0,5,12,0,0,8,8,0,0,4,13,0,0,10,7,0,0,1,16,6,7,16,3,0,0,0,5,15,16,11,0,0,0 +0,0,5,14,16,7,0,0,0,3,14,2,5,12,0,0,0,5,9,0,7,11,0,0,0,0,0,5,15,5,0,0,0,0,0,7,12,14,1,0,0,0,0,0,0,11,7,0,0,0,3,8,3,12,6,0,0,0,7,16,13,8,0,0,3 +0,0,0,1,13,12,1,0,0,0,0,6,16,14,1,0,0,8,12,16,16,11,0,0,0,4,8,9,16,15,0,0,0,0,0,0,16,15,0,0,0,0,0,0,13,16,3,0,0,0,0,0,12,16,4,0,0,0,0,0,11,16,5,0,1 +0,0,6,15,5,0,0,0,0,0,15,16,13,3,0,0,0,1,16,11,15,15,3,0,0,8,9,3,5,16,4,0,0,8,9,0,0,10,8,0,0,8,10,0,0,12,5,0,0,3,15,12,13,16,1,0,0,0,8,16,13,6,0,0,0 +0,1,10,16,13,4,0,0,0,7,16,6,12,12,1,0,0,10,13,0,9,16,4,0,0,4,16,9,14,16,5,0,0,0,5,8,11,16,4,0,0,0,0,0,0,16,9,0,0,0,9,8,1,14,9,0,0,0,12,16,16,15,7,0,9 +0,1,7,15,13,2,0,0,0,7,14,7,13,8,0,0,0,0,0,1,13,7,0,0,0,0,0,7,16,5,0,0,0,0,0,1,9,12,0,0,0,0,0,0,2,14,6,0,0,0,5,5,2,14,8,0,0,0,11,16,12,10,1,0,3 +0,0,0,14,8,0,0,0,0,0,0,16,8,0,0,0,0,0,6,16,3,0,0,0,0,0,7,15,0,0,0,0,0,0,15,16,15,8,1,0,0,0,16,12,4,11,12,0,0,0,10,13,0,2,16,4,0,0,1,9,15,16,13,0,6 +0,2,11,14,7,1,0,0,0,10,15,13,16,4,0,0,0,11,12,5,16,3,0,0,0,1,2,15,14,0,0,0,0,0,5,12,15,11,1,0,0,0,0,0,2,14,10,0,0,0,8,12,12,16,11,0,0,0,9,15,11,8,1,0,3 +0,0,15,16,16,7,0,0,0,7,16,10,15,16,3,0,0,10,16,2,14,14,0,0,0,1,13,16,16,16,3,0,0,0,0,0,5,16,4,0,0,0,0,0,1,16,10,0,0,0,7,8,8,16,10,0,0,0,9,12,13,9,1,0,9 +0,2,9,12,16,14,1,0,0,5,14,6,6,7,0,0,0,4,14,6,8,3,0,0,0,4,16,16,15,16,2,0,0,1,6,2,0,13,8,0,0,0,0,0,0,12,7,0,0,1,8,8,9,15,2,0,0,2,12,13,9,3,0,0,5 +0,0,7,12,11,9,0,0,0,7,16,15,11,13,0,0,0,0,12,7,0,0,0,0,0,0,12,10,6,0,0,0,0,0,8,12,13,14,0,0,0,0,0,0,0,8,8,0,0,0,10,6,4,12,9,0,0,0,10,13,12,10,2,0,5 +0,0,7,10,0,0,0,0,0,0,12,13,0,0,0,0,0,2,16,8,0,0,0,0,0,0,16,5,0,0,0,0,0,4,16,10,6,0,0,0,0,4,16,16,16,14,5,0,0,3,16,10,11,16,11,0,0,0,7,14,12,9,2,0,6 +0,2,10,14,13,6,0,0,0,4,16,8,13,15,3,0,0,2,16,4,9,16,4,0,0,0,12,10,12,16,4,0,0,0,2,9,12,16,0,0,0,0,0,0,4,16,1,0,0,0,2,4,11,16,4,0,0,3,16,13,11,4,0,0,9 +0,0,0,1,16,8,0,0,0,0,0,9,16,2,0,0,0,0,2,15,8,0,0,0,0,0,10,14,0,0,0,0,0,6,16,5,2,13,6,0,0,14,16,15,13,16,3,0,0,4,8,8,15,11,0,0,0,0,0,2,16,7,0,0,4 +0,0,0,4,12,0,0,0,0,0,0,10,14,0,0,0,0,0,1,15,5,0,0,0,0,0,10,13,0,12,5,0,0,3,16,2,6,16,3,0,0,11,16,10,13,16,1,0,0,1,7,12,16,11,0,0,0,0,0,5,16,5,0,0,4 +0,0,0,7,13,3,0,0,0,0,1,15,16,15,0,0,0,9,16,16,16,16,2,0,0,4,8,12,16,10,0,0,0,0,0,11,16,13,0,0,0,0,0,7,16,15,0,0,0,0,0,6,16,16,2,0,0,0,0,6,16,16,7,0,1 +0,0,8,15,7,0,0,0,0,0,14,14,16,1,0,0,0,0,8,2,13,5,0,0,0,0,0,0,14,4,0,0,0,0,0,1,15,3,0,0,0,0,0,4,15,0,0,0,0,0,4,15,15,12,10,1,0,0,7,15,14,12,16,3,2 +0,0,4,11,0,0,0,0,0,0,11,9,0,0,0,0,0,0,16,4,0,0,0,0,0,1,15,2,4,2,0,0,0,5,14,13,16,14,1,0,0,4,16,6,0,10,10,0,0,1,15,3,1,13,8,0,0,0,3,12,14,10,1,0,6 +0,0,9,15,15,4,0,0,0,4,15,7,14,12,0,0,0,7,12,2,14,13,0,0,0,2,14,16,14,16,1,0,0,0,1,3,3,16,4,0,0,0,1,0,0,14,4,0,0,0,14,11,6,14,5,0,0,0,8,12,14,11,2,0,9 +0,0,0,10,11,0,0,0,0,0,0,15,8,0,0,0,0,0,3,16,2,0,0,0,0,0,6,14,0,0,0,0,0,0,13,16,16,13,3,0,0,2,14,16,4,11,10,0,0,0,1,14,6,12,12,0,0,0,0,9,16,14,4,0,6 +0,0,4,10,0,0,0,0,0,0,9,15,2,0,0,0,0,0,12,10,0,0,0,0,0,0,15,8,3,0,0,0,0,1,16,16,16,14,2,0,0,0,16,10,5,15,8,0,0,0,11,13,11,16,4,0,0,0,3,11,13,5,0,0,6 +0,0,8,15,16,7,0,0,0,5,16,11,1,12,0,0,0,0,14,4,12,7,0,0,0,0,3,14,12,1,0,0,0,0,2,15,7,0,0,0,0,0,11,10,12,0,0,0,0,0,13,3,16,0,0,0,0,0,7,14,12,0,0,0,8 +0,1,9,13,13,5,0,0,0,10,14,6,9,16,3,0,0,7,10,0,0,16,6,0,0,0,0,0,10,14,1,0,0,0,0,9,15,3,0,0,0,0,7,16,2,0,0,0,0,0,14,9,0,3,1,0,0,0,14,16,15,12,2,0,2 +0,1,10,16,16,15,6,0,0,5,15,8,6,16,11,0,0,0,2,5,15,13,3,0,0,0,0,16,14,0,0,0,0,0,0,6,16,11,1,0,0,0,0,0,6,16,8,0,0,0,3,5,12,16,5,0,0,0,16,16,13,7,0,0,3 +0,0,3,14,16,13,6,0,0,0,13,7,1,13,16,1,0,0,12,5,0,10,12,0,0,0,8,14,12,14,2,0,0,0,0,4,14,6,0,0,0,0,0,5,13,0,0,0,0,0,0,13,6,0,0,0,0,0,2,11,0,0,0,0,9 +0,0,6,13,11,1,0,0,0,0,12,14,11,11,0,0,0,0,15,2,0,14,2,0,0,3,15,0,0,8,6,0,0,5,13,0,0,7,7,0,0,3,14,0,0,12,7,0,0,0,12,7,8,14,2,0,0,0,4,12,12,5,0,0,0 +0,1,8,16,16,12,0,0,0,7,15,9,14,15,0,0,0,3,3,0,12,14,0,0,0,0,6,10,15,14,1,0,0,8,16,16,16,15,8,0,0,5,5,10,14,1,0,0,0,0,1,15,8,0,0,0,0,0,13,11,1,0,0,0,7 +0,0,4,9,16,8,0,0,0,3,16,16,16,13,0,0,0,3,16,8,16,8,0,0,0,0,11,16,12,0,0,0,0,0,10,16,4,0,0,0,0,0,14,16,10,0,0,0,0,0,14,14,13,0,0,0,0,0,6,16,9,0,0,0,8 +0,0,13,16,13,6,0,0,0,0,15,14,15,15,3,0,0,0,10,11,7,13,1,0,0,0,0,13,16,7,0,0,0,0,10,16,13,0,0,0,0,0,15,4,14,3,0,0,0,3,12,4,16,1,0,0,0,1,12,15,6,0,0,0,8 +0,3,15,15,8,9,0,0,0,7,14,11,16,16,6,0,0,2,16,2,7,15,3,0,0,0,8,14,16,5,0,0,0,0,7,16,6,0,0,0,0,1,15,14,8,0,0,0,0,6,14,14,7,0,0,0,0,3,16,15,2,0,0,0,8 +0,0,7,12,0,0,0,0,0,0,15,15,10,13,0,0,0,5,14,3,3,14,2,0,0,4,10,0,0,10,5,0,0,3,9,0,0,15,3,0,0,2,9,0,3,16,1,0,0,0,14,4,13,8,0,0,0,0,5,16,12,1,0,0,0 +0,0,4,13,16,16,9,0,0,2,12,6,1,12,12,0,0,1,3,2,15,10,0,0,0,0,0,11,9,0,0,0,0,0,0,4,14,2,0,0,0,0,0,0,13,10,0,0,0,0,0,0,10,11,0,0,0,6,9,13,12,1,0,0,3 +0,0,9,10,15,14,0,0,0,2,16,16,16,15,0,0,0,2,16,4,15,5,0,0,0,0,12,16,12,0,0,0,0,0,13,16,4,0,0,0,0,2,16,14,12,0,0,0,0,1,16,11,16,4,0,0,0,0,9,16,15,3,0,0,8 +0,0,0,3,15,5,0,0,0,0,3,14,13,1,0,0,0,1,14,13,1,1,2,0,0,8,16,3,0,12,12,0,0,12,14,8,8,16,7,0,0,5,15,16,16,16,1,0,0,0,0,0,15,11,0,0,0,0,0,2,16,7,0,0,4 +0,5,14,15,12,10,1,0,0,13,16,16,16,16,7,0,0,12,16,3,0,1,1,0,0,8,16,4,0,0,0,0,0,0,15,13,0,0,0,0,0,0,5,16,2,0,0,0,0,0,8,16,5,0,0,0,0,6,16,13,0,0,0,0,5 +0,3,13,15,16,9,1,0,0,6,12,2,3,16,6,0,0,0,0,2,13,11,0,0,0,0,0,9,13,0,0,0,0,0,0,8,12,0,0,0,0,0,0,3,15,5,0,0,0,0,4,3,14,11,0,0,0,3,15,15,9,1,0,0,3 +0,0,3,12,14,8,5,0,0,2,14,8,10,16,14,0,0,4,12,0,4,16,11,0,0,2,16,10,13,16,4,0,0,0,5,8,9,16,0,0,0,0,0,0,11,9,0,0,0,0,0,5,15,2,0,0,0,0,2,13,6,0,0,0,9 +0,0,0,4,14,14,0,0,0,0,3,15,13,3,0,0,0,0,15,15,1,0,0,0,0,7,16,5,0,9,6,0,0,11,16,8,12,16,9,0,0,3,11,12,16,14,0,0,0,0,0,6,16,9,0,0,0,0,0,7,16,4,0,0,4 +0,3,14,16,16,13,0,0,0,8,15,8,13,16,0,0,0,0,1,2,16,10,0,0,0,0,0,12,15,1,0,0,0,0,9,15,3,0,0,0,0,2,16,8,0,0,0,0,0,6,16,4,4,7,7,0,0,3,15,16,16,13,7,0,2 +0,0,5,14,16,16,12,0,0,5,16,13,8,13,16,0,0,3,7,0,3,14,10,0,0,0,0,1,13,14,0,0,0,0,0,9,15,3,0,0,0,0,4,16,7,0,0,0,0,0,12,13,0,0,0,0,0,0,6,16,16,10,0,0,2 +0,0,2,14,15,2,0,0,0,0,13,15,14,8,0,0,0,0,16,6,11,10,0,0,0,1,9,8,15,9,0,0,0,10,16,16,16,16,9,0,0,1,4,8,16,5,5,0,0,0,0,14,13,0,0,0,0,0,2,16,6,0,0,0,7 +0,0,9,14,16,12,1,0,0,3,14,5,4,12,8,0,0,1,2,0,0,13,6,0,0,0,4,8,10,16,4,0,0,5,14,11,16,8,1,0,0,3,2,9,11,0,0,0,0,0,3,15,2,0,0,0,0,0,9,8,0,0,0,0,7 +0,0,9,16,14,12,3,0,0,3,13,4,11,8,11,0,0,1,15,5,0,12,4,0,0,0,3,14,13,7,0,0,0,0,0,10,13,0,0,0,0,0,5,12,15,0,0,0,0,0,12,2,12,0,0,0,0,0,10,16,6,0,0,0,8 +0,0,0,7,16,2,0,0,0,0,4,16,9,0,0,0,0,0,13,12,0,0,1,0,0,6,16,2,0,10,11,0,0,10,16,6,13,16,8,0,0,5,16,16,16,14,2,0,0,0,0,5,16,6,0,0,0,0,0,8,16,2,0,0,4 +0,0,1,9,8,1,0,0,0,0,8,16,14,9,0,0,0,0,14,14,1,16,0,0,0,1,16,3,0,11,4,0,0,0,16,0,0,15,4,0,0,0,14,1,5,16,0,0,0,0,11,7,14,7,0,0,0,0,4,15,13,1,0,0,0 +0,6,15,16,16,15,6,0,0,9,16,6,5,15,12,0,0,0,1,1,13,15,3,0,0,0,0,5,16,5,0,0,0,0,0,0,14,10,0,0,0,0,0,0,12,14,0,0,0,3,7,5,14,13,0,0,0,9,16,16,13,2,0,0,3 +0,0,0,11,12,0,0,0,0,0,6,16,6,0,0,0,0,3,16,6,0,5,3,0,0,10,16,0,2,15,10,0,0,6,16,14,14,14,1,0,0,0,2,4,16,10,0,0,0,0,0,8,16,4,0,0,0,0,0,10,14,0,0,0,4 +0,0,10,15,16,16,9,0,0,1,10,2,0,10,14,0,0,0,0,0,7,15,3,0,0,0,0,13,11,1,0,0,0,0,0,13,11,0,0,0,0,0,0,5,15,5,0,0,0,0,5,4,12,11,0,0,0,0,9,16,10,2,0,0,3 +0,0,11,16,12,5,0,0,0,4,16,11,16,16,3,0,0,4,16,11,16,14,0,0,0,0,10,16,16,6,0,0,0,0,0,15,14,0,0,0,0,0,7,16,5,0,0,0,0,0,15,13,0,0,0,0,0,0,12,11,0,0,0,0,9 +0,0,2,16,13,0,0,0,0,0,4,16,15,0,0,0,0,0,2,16,16,2,0,0,0,0,1,16,16,2,0,0,0,0,0,15,16,4,0,0,0,0,0,14,16,3,0,0,0,0,0,16,16,3,0,0,0,0,0,14,16,6,0,0,1 +0,1,9,15,16,13,0,0,0,5,11,4,2,13,3,0,0,1,3,0,1,15,3,0,0,0,0,0,11,11,0,0,0,0,0,11,11,0,0,0,0,0,7,14,1,0,0,0,0,0,15,6,0,0,0,0,0,0,11,16,16,15,6,0,2 +0,0,0,9,16,6,0,0,0,0,8,16,11,1,0,0,0,3,16,8,0,0,0,0,0,11,15,0,0,9,9,0,0,8,16,16,16,16,8,0,0,0,6,8,15,14,1,0,0,0,0,5,16,5,0,0,0,0,0,12,15,1,0,0,4 +0,1,7,11,16,11,0,0,0,8,14,8,10,16,3,0,0,4,4,3,15,12,0,0,0,0,0,8,13,0,0,0,0,0,0,5,16,4,0,0,0,0,0,0,10,11,0,0,0,0,0,5,13,13,0,0,0,0,13,13,8,0,0,0,3 +0,0,4,16,12,1,0,0,0,0,13,16,13,12,0,0,0,5,16,11,0,14,2,0,0,5,16,12,0,9,7,0,0,5,16,6,0,9,6,0,0,1,16,4,0,14,6,0,0,0,10,12,14,16,4,0,0,0,3,14,16,7,0,0,0 +0,3,16,5,0,3,5,0,0,7,16,5,0,12,14,0,0,10,16,2,9,16,5,0,0,7,16,14,16,13,0,0,0,0,7,16,15,5,0,0,0,0,5,16,6,0,0,0,0,0,14,11,0,0,0,0,0,3,16,9,0,0,0,0,4 +0,0,12,8,0,0,0,0,0,1,16,8,0,1,5,0,0,7,16,2,0,12,13,0,0,8,16,8,10,16,6,0,0,3,15,16,16,13,3,0,0,0,0,11,14,1,0,0,0,0,5,16,3,0,0,0,0,0,14,8,0,0,0,0,4 +0,2,14,16,12,1,0,0,0,8,15,12,16,2,0,0,0,0,3,4,16,2,0,0,0,0,0,13,10,0,0,0,0,0,4,16,4,0,0,0,0,0,12,11,0,0,0,0,0,2,16,8,6,8,9,1,0,2,13,16,16,16,16,5,2 +0,0,12,14,12,11,4,0,0,0,4,10,12,15,14,0,0,0,0,0,0,12,10,0,0,0,2,4,5,16,3,0,0,2,14,16,16,16,8,0,0,0,4,11,14,2,0,0,0,0,3,14,3,0,0,0,0,0,14,10,0,0,0,0,7 +0,0,4,16,12,1,0,0,0,0,10,15,13,11,0,0,0,0,5,12,5,16,0,0,0,0,0,0,8,15,0,0,0,0,0,1,15,12,0,0,0,0,0,11,16,10,0,0,0,0,5,16,16,16,16,6,0,0,6,16,5,5,10,13,2 +0,0,0,7,11,0,0,0,0,0,0,16,10,0,0,0,0,0,4,16,0,0,0,0,0,0,4,12,0,0,0,0,0,0,8,12,12,11,2,0,0,0,8,16,12,8,12,0,0,0,3,16,5,8,15,2,0,0,0,9,16,14,5,0,6 +0,2,15,16,16,15,1,0,0,2,13,13,11,16,1,0,0,0,2,16,15,7,0,0,0,0,0,14,14,0,0,0,0,0,9,14,16,0,0,0,0,1,14,8,12,5,0,0,0,3,16,6,15,3,0,0,0,2,16,16,10,0,0,0,8 +0,0,7,13,12,3,0,0,0,0,14,8,11,12,0,0,0,0,0,1,9,12,0,0,0,0,0,13,16,1,0,0,0,0,0,9,12,12,3,0,0,0,0,0,0,6,12,0,0,0,8,1,2,8,13,0,0,0,10,16,16,11,2,0,3 +0,0,2,16,12,0,0,0,0,0,2,16,16,3,0,0,0,0,2,16,16,3,0,0,0,0,1,16,16,2,0,0,0,0,5,16,13,0,0,0,0,0,1,16,14,0,0,0,0,0,4,16,12,0,0,0,0,0,2,14,10,0,0,0,1 +0,0,5,15,9,0,0,0,0,1,13,13,15,6,0,0,0,2,16,4,3,15,2,0,0,5,16,0,0,10,6,0,0,5,14,0,0,9,9,0,0,4,16,0,1,11,6,0,0,1,14,11,12,16,2,0,0,0,5,13,14,4,0,0,0 +0,0,2,11,15,3,0,0,0,0,9,13,5,12,0,0,0,0,14,3,0,5,4,0,0,5,11,0,0,4,6,0,0,7,12,0,0,7,6,0,0,4,14,0,1,13,5,0,0,0,13,10,13,14,0,0,0,0,3,11,13,2,0,0,0 +0,0,1,14,7,0,0,0,0,0,6,14,12,6,0,0,0,0,11,11,0,12,0,0,0,0,13,8,0,7,5,0,0,0,15,6,0,5,8,0,0,0,12,3,0,10,9,0,0,0,10,10,9,15,4,0,0,0,2,11,14,7,0,0,0 +0,0,0,12,11,0,0,0,0,0,3,16,13,1,0,0,0,0,9,16,3,0,0,0,0,0,13,13,0,0,0,0,0,0,12,12,8,6,0,0,0,0,11,16,16,16,9,0,0,0,8,16,11,9,16,2,0,0,0,11,16,16,15,1,6 +0,0,8,13,12,7,0,0,0,0,12,9,9,15,2,0,0,0,0,7,14,11,0,0,0,0,4,16,10,0,0,0,0,0,0,9,16,6,0,0,0,0,0,0,2,15,5,0,0,0,9,1,4,16,4,0,0,0,12,16,16,8,1,0,3 +0,1,7,11,13,16,14,0,0,0,8,7,4,11,10,0,0,0,0,0,1,16,1,0,0,0,0,0,11,6,0,0,0,2,11,12,16,13,4,0,0,5,9,16,6,2,0,0,0,0,4,12,0,0,0,0,0,0,13,3,0,0,0,0,7 +0,0,8,16,15,3,0,0,0,0,10,16,15,10,0,0,0,0,3,11,13,12,0,0,0,0,0,1,16,9,0,0,0,0,0,6,16,4,0,0,0,0,0,12,14,0,0,0,0,0,7,16,15,12,12,3,0,0,8,15,13,16,16,11,2 +0,0,7,15,12,0,0,0,0,6,15,8,14,3,0,0,0,3,6,0,15,3,0,0,0,0,0,4,12,0,0,0,0,0,0,11,6,0,0,0,0,0,3,15,1,0,0,0,0,0,13,13,9,12,10,0,0,0,12,13,12,10,7,2,2 +0,0,12,16,16,10,1,0,0,4,16,9,8,9,1,0,0,9,14,1,0,0,0,0,0,9,16,11,1,0,0,0,0,0,7,12,11,0,0,0,0,0,0,2,16,1,0,0,0,1,12,12,15,0,0,0,0,0,13,16,6,0,0,0,5 +0,0,9,16,15,9,0,0,0,4,16,15,12,14,2,0,0,9,16,12,3,0,0,0,0,5,16,16,13,2,0,0,0,0,3,4,14,5,0,0,0,0,0,0,12,9,0,0,0,0,7,11,16,7,0,0,0,0,13,16,13,1,0,0,5 +0,0,3,14,16,13,0,0,0,1,14,11,5,14,7,0,0,9,16,3,0,13,10,0,0,11,16,8,14,16,7,0,0,1,11,12,15,14,1,0,0,0,0,3,16,7,0,0,0,0,0,12,15,1,0,0,0,0,5,16,4,0,0,0,9 +0,0,2,16,10,0,0,0,0,0,6,16,16,4,0,0,0,0,2,15,16,7,0,0,0,0,0,11,16,4,0,0,0,0,0,15,16,3,0,0,0,0,0,13,15,0,0,0,0,0,1,16,13,0,0,0,0,0,2,15,11,0,0,0,1 +0,0,11,16,16,8,0,0,0,1,13,7,9,16,3,0,0,0,0,1,10,15,1,0,0,0,0,15,16,10,0,0,0,0,0,14,16,16,5,0,0,0,0,0,0,8,14,0,0,1,12,4,5,13,15,0,0,0,11,16,16,15,5,0,3 +0,3,6,15,13,1,0,0,0,10,16,5,11,10,0,0,0,1,11,10,11,11,0,0,0,0,0,10,16,3,0,0,0,0,1,13,13,3,0,0,0,0,7,15,6,11,0,0,0,0,10,12,7,14,0,0,0,0,4,16,15,3,0,0,8 +0,0,11,7,10,3,0,0,0,0,9,12,12,14,0,0,0,0,0,15,15,11,1,0,0,0,1,14,9,0,0,0,0,0,9,13,8,0,0,0,0,0,14,5,10,0,0,0,0,0,14,2,12,0,0,0,0,0,10,16,10,0,0,0,8 +0,4,16,16,16,15,2,0,0,3,11,8,9,16,8,0,0,0,0,1,14,15,1,0,0,0,2,15,16,6,0,0,0,0,2,12,14,14,0,0,0,0,0,0,3,16,1,0,0,5,10,4,7,16,2,0,0,2,16,16,16,8,0,0,3 +0,0,7,16,12,5,0,0,0,0,13,10,7,15,0,0,0,0,10,15,12,13,0,0,0,0,0,13,16,5,0,0,0,0,8,15,10,8,0,0,0,0,13,5,0,13,1,0,0,0,14,2,4,15,1,0,0,0,6,16,16,6,0,0,8 +0,0,5,13,15,6,0,0,0,1,15,15,10,11,0,0,0,7,16,1,0,0,0,0,0,10,16,16,12,2,0,0,0,3,8,4,9,10,0,0,0,0,0,0,5,14,0,0,0,0,4,4,13,13,0,0,0,0,11,16,16,4,0,0,5 +0,0,12,16,16,9,1,0,0,8,16,14,11,14,3,0,0,8,16,4,0,0,0,0,0,3,15,16,6,0,0,0,0,0,0,6,14,1,0,0,0,0,0,1,16,4,0,0,0,0,9,11,16,1,0,0,0,1,13,16,10,0,0,0,5 +0,0,1,15,11,1,0,0,0,0,3,16,16,2,0,0,0,0,3,16,16,1,0,0,0,0,1,16,16,2,0,0,0,0,4,16,14,0,0,0,0,0,2,16,16,0,0,0,0,0,2,16,14,0,0,0,0,0,1,15,7,0,0,0,1 +0,0,6,14,13,5,0,0,0,0,5,6,14,10,0,0,0,0,9,16,14,1,0,0,0,0,7,16,10,0,0,0,0,0,10,7,12,2,0,0,0,0,13,3,6,6,0,0,0,0,12,5,11,9,0,0,0,0,5,16,15,3,0,0,8 +0,0,0,16,6,0,0,0,0,0,0,16,14,0,0,0,0,0,0,14,16,0,0,0,0,0,0,15,15,0,0,0,0,0,0,14,15,0,0,0,0,0,0,15,14,0,0,0,0,0,0,13,14,0,0,0,0,0,0,10,13,0,0,0,1 +0,0,5,14,10,1,0,0,0,2,15,15,15,10,0,0,0,7,16,11,1,16,1,0,0,6,16,2,0,14,6,0,0,3,15,0,0,13,6,0,0,3,16,0,3,16,1,0,0,1,15,12,15,14,0,0,0,0,5,13,13,4,0,0,0 +0,0,3,12,14,3,0,0,0,0,11,11,8,13,0,0,0,5,15,1,0,7,2,0,0,3,15,3,0,3,6,0,0,3,11,2,0,4,7,0,0,1,9,0,0,5,7,0,0,0,11,4,4,14,2,0,0,0,2,13,15,9,0,0,0 +0,0,8,15,16,8,0,0,0,3,13,13,6,2,0,0,0,13,8,0,0,0,0,0,0,5,16,5,0,0,0,0,0,0,8,14,2,0,0,0,0,0,0,13,11,0,0,0,0,0,4,13,16,0,0,0,0,0,7,16,8,0,0,0,5 +0,0,16,16,16,10,0,0,0,0,6,9,15,16,1,0,0,0,0,8,16,7,0,0,0,0,0,4,16,6,0,0,0,0,0,0,8,15,1,0,0,0,0,0,0,15,10,0,0,0,1,4,9,15,12,0,0,0,15,16,16,11,2,0,3 +0,2,12,16,16,6,0,0,0,11,15,8,15,12,0,0,0,10,2,7,16,10,0,0,0,0,0,12,15,2,0,0,0,0,0,3,16,11,0,0,0,0,0,0,6,15,10,0,0,0,7,5,10,16,15,0,0,3,16,16,16,10,2,0,3 +0,0,1,12,8,0,0,0,0,0,3,16,6,0,0,0,0,0,9,12,0,0,0,0,0,0,12,11,4,2,0,0,0,0,16,16,16,16,4,0,0,0,12,10,0,2,14,1,0,0,7,11,2,6,15,5,0,0,1,10,15,16,9,0,6 +0,4,16,16,16,8,0,0,0,0,4,2,11,12,0,0,0,0,0,3,15,6,0,0,0,2,4,11,15,1,0,0,0,10,16,16,16,16,9,0,0,1,12,12,4,4,2,0,0,2,16,2,0,0,0,0,0,7,14,0,0,0,0,0,7 +0,1,12,16,16,12,0,0,0,3,15,9,14,14,0,0,0,0,0,0,14,12,0,0,0,0,6,11,16,11,3,0,0,0,11,16,16,16,12,0,0,0,5,16,6,7,3,0,0,0,11,15,0,0,0,0,0,1,15,8,0,0,0,0,7 +0,0,9,16,16,14,0,0,0,3,15,10,2,5,0,0,0,11,9,0,0,0,0,0,0,8,14,1,0,0,0,0,0,1,15,5,0,0,0,0,0,0,11,9,0,0,0,0,0,0,9,12,0,0,0,0,0,0,14,9,0,0,0,0,5 +0,2,16,16,16,13,1,0,0,0,3,3,3,15,5,0,0,0,0,0,5,15,0,0,0,0,3,5,14,3,0,0,0,0,15,16,16,14,5,0,0,0,6,14,4,9,5,0,0,0,12,7,0,0,0,0,0,4,15,0,0,0,0,0,7 +0,0,8,16,16,11,0,0,0,5,15,7,2,16,3,0,0,12,10,0,4,16,1,0,0,11,14,2,12,12,0,0,0,0,11,14,16,2,0,0,0,0,1,16,13,1,0,0,0,0,7,13,16,8,0,0,0,0,13,9,12,12,0,0,8 +0,2,0,13,12,3,0,0,5,14,1,13,16,5,0,0,0,8,16,14,16,4,0,0,0,0,8,16,16,2,0,0,0,0,0,16,16,9,0,0,0,0,3,16,16,16,4,0,0,0,11,16,16,16,6,0,0,0,1,15,16,10,0,0,8 +0,0,8,16,16,4,0,0,0,5,16,8,13,10,0,0,0,6,11,1,14,7,0,0,0,0,0,6,16,1,0,0,0,0,0,15,9,0,0,0,0,0,8,14,2,0,0,0,0,0,16,9,0,2,3,0,0,0,10,16,16,12,3,0,2 +0,0,3,13,12,0,0,0,0,1,16,13,16,4,0,0,0,8,6,7,16,1,0,0,0,0,2,15,9,0,0,0,0,0,11,14,0,0,0,0,0,0,16,10,0,0,0,0,0,0,15,13,8,9,4,0,0,0,3,13,15,13,2,0,2 +0,3,13,16,8,0,0,0,0,14,11,12,16,0,0,0,0,10,1,7,16,1,0,0,0,0,0,12,12,0,0,0,0,0,7,15,1,0,0,0,0,0,15,8,0,0,0,0,0,4,16,9,7,8,8,0,0,2,15,16,16,16,11,0,2 +0,0,5,13,16,7,0,0,0,0,15,8,11,12,0,0,0,0,12,5,14,5,0,0,0,0,10,16,8,0,0,0,0,0,9,16,2,0,0,0,0,0,15,15,6,0,0,0,0,0,14,13,10,0,0,0,0,0,8,16,4,0,0,0,8 +0,0,4,16,12,1,0,0,0,0,10,16,14,9,0,0,0,0,15,14,2,16,2,0,0,1,16,15,1,12,7,0,0,1,16,8,3,10,10,0,0,0,15,4,0,12,10,0,0,0,11,8,4,16,6,0,0,0,2,13,16,14,1,0,0 +0,0,8,16,13,4,0,0,0,0,5,6,12,14,0,0,0,0,0,3,15,14,0,0,0,0,0,13,15,4,0,0,0,0,0,9,12,1,0,0,0,0,0,1,11,12,0,0,0,0,3,4,9,16,4,0,0,0,8,15,14,12,3,0,3 +0,0,4,16,8,0,0,0,0,0,13,12,1,0,0,0,0,3,16,6,0,0,0,0,0,4,16,3,0,0,0,0,0,6,16,1,9,11,3,0,0,2,16,10,15,12,14,2,0,0,11,16,9,7,16,6,0,0,3,13,16,16,10,0,6 +0,0,5,16,16,8,0,0,0,3,11,11,7,4,0,0,0,11,11,0,0,0,0,0,0,13,15,7,0,0,0,0,0,3,8,13,8,0,0,0,0,0,0,2,15,2,0,0,0,0,8,5,15,6,0,0,0,0,7,16,15,3,0,0,5 +0,0,2,9,15,16,7,0,0,0,13,12,4,10,12,0,0,1,16,6,5,14,6,0,0,0,8,16,16,11,0,0,0,0,0,2,4,16,2,0,0,0,0,0,8,13,1,0,0,0,0,2,16,4,0,0,0,0,0,10,7,0,0,0,9 +0,0,0,6,14,16,10,0,0,0,12,16,9,14,11,0,0,7,15,3,8,13,3,0,0,6,16,14,16,8,0,0,0,0,3,4,11,13,0,0,0,0,0,0,14,10,0,0,0,0,0,4,16,5,0,0,0,0,0,7,16,0,0,0,9 +0,0,0,14,14,0,0,0,0,0,5,16,12,0,0,0,0,0,7,16,6,0,0,0,0,0,12,16,4,0,0,0,0,0,12,16,1,0,0,0,0,0,12,16,0,0,0,0,0,0,9,16,4,0,0,0,0,0,1,13,15,2,0,0,1 +0,0,5,11,14,5,0,0,0,7,13,5,4,14,2,0,0,0,4,8,2,14,2,0,0,0,1,13,14,3,0,0,0,0,1,14,8,0,0,0,0,0,9,9,10,2,0,0,0,0,13,1,8,8,0,0,0,0,6,13,14,7,0,0,8 +0,0,8,11,0,0,13,9,0,3,15,6,0,12,13,1,0,3,16,5,7,16,5,0,0,0,12,16,16,16,8,0,0,0,1,11,13,4,0,0,0,0,1,15,4,0,0,0,0,0,7,13,0,0,0,0,0,0,11,5,0,0,0,0,4 +0,2,7,12,15,13,0,0,2,15,16,15,12,5,0,0,2,16,9,1,0,0,0,0,0,10,13,1,0,0,0,0,0,2,15,7,0,0,0,0,0,0,7,14,1,0,0,0,0,0,9,16,3,0,0,0,0,0,11,13,1,0,0,0,5 +0,0,8,15,16,16,2,0,0,7,16,12,8,5,0,0,0,10,10,0,0,0,0,0,0,11,10,0,0,0,0,0,0,2,16,4,0,0,0,0,0,0,6,14,0,0,0,0,0,0,5,14,0,0,0,0,0,0,14,11,0,0,0,0,5 +0,0,2,14,12,0,0,0,0,0,4,16,12,0,0,0,0,0,5,16,6,0,0,0,0,0,6,16,6,0,0,0,0,0,9,16,5,0,0,0,0,0,10,16,4,0,0,0,0,0,8,16,7,0,0,0,0,0,2,14,16,3,0,0,1 +0,1,8,15,15,6,0,0,0,10,16,15,15,14,0,0,0,4,13,7,16,7,0,0,0,2,14,16,14,1,0,0,0,0,3,16,7,0,0,0,0,0,11,16,15,0,0,0,0,0,13,16,16,3,0,0,0,0,7,16,15,1,0,0,8 +0,0,8,16,8,0,0,0,0,9,16,13,16,5,0,0,0,7,3,6,16,4,0,0,0,0,0,15,13,0,0,0,0,0,9,16,2,0,0,0,0,0,14,9,0,0,0,0,0,0,16,11,4,6,8,0,0,0,7,16,16,11,7,0,2 +0,0,3,10,15,8,0,0,0,3,15,9,8,12,0,0,0,8,8,0,5,11,0,0,0,2,6,0,10,7,0,0,0,0,0,5,14,1,0,0,0,0,0,14,5,0,0,0,0,0,4,16,4,5,11,0,0,0,1,14,15,11,2,0,2 +0,0,6,12,14,5,0,0,0,4,16,11,8,15,6,0,0,4,16,7,2,12,6,0,0,0,13,15,15,13,2,0,0,0,1,16,16,6,0,0,0,0,8,15,14,15,1,0,0,3,16,10,10,16,1,0,0,1,12,16,14,5,0,0,8 +0,0,1,12,16,16,11,0,0,0,5,15,8,8,5,0,0,2,15,15,12,7,0,0,0,11,16,16,16,16,4,0,0,3,5,2,1,13,9,0,0,0,0,0,0,11,9,0,0,0,2,10,9,16,4,0,0,0,2,16,16,8,0,0,5 +0,0,0,0,6,16,4,0,0,0,0,0,9,16,8,0,0,0,1,5,16,16,8,0,0,8,16,16,16,16,6,0,0,5,12,12,12,16,4,0,0,0,0,0,9,16,5,0,0,0,0,0,8,16,4,0,0,0,0,0,5,16,2,0,1 +0,0,5,12,15,15,1,0,0,6,16,13,14,16,0,0,0,1,4,0,8,14,0,0,0,0,0,7,15,11,4,0,0,0,7,16,16,16,11,0,0,0,6,15,13,4,0,0,0,0,0,16,9,0,0,0,0,0,5,16,5,0,0,0,7 +0,1,9,15,16,7,0,0,0,6,14,7,14,8,0,0,0,3,2,0,14,6,0,0,0,0,1,16,16,4,0,0,0,0,0,8,15,16,4,0,0,0,0,0,0,13,8,0,0,0,6,8,11,16,4,0,0,0,15,12,10,3,0,0,3 +0,0,0,0,14,12,1,0,0,0,0,0,16,16,2,0,0,0,0,1,15,16,2,0,0,1,8,11,16,11,0,0,0,10,16,16,16,8,0,0,0,1,8,9,16,8,0,0,0,0,0,1,16,14,0,0,0,0,0,0,12,16,2,0,1 +0,0,0,0,5,13,3,0,0,0,0,1,11,16,4,0,0,2,8,14,16,16,5,0,0,2,7,4,12,16,3,0,0,0,0,0,8,16,4,0,0,0,0,0,8,16,5,0,0,0,0,0,8,16,8,0,0,0,0,0,8,16,6,0,1 +0,0,6,11,4,0,0,0,0,0,14,12,14,1,0,0,0,0,7,0,16,6,0,0,0,0,0,0,11,9,0,0,0,0,0,0,13,3,0,0,0,0,0,3,14,2,0,0,0,0,9,16,12,8,7,0,0,0,11,16,16,15,12,0,2 +0,0,1,15,4,0,0,0,0,4,7,9,16,4,0,0,0,8,7,0,4,15,0,0,0,6,6,0,0,11,6,0,0,5,9,0,0,3,9,0,0,0,15,0,0,5,9,0,0,0,11,11,5,15,5,0,0,0,3,13,16,11,0,0,0 +0,0,0,0,7,15,0,0,0,0,0,0,10,16,1,0,0,0,2,11,16,16,0,0,0,7,16,14,11,16,3,0,0,1,2,0,8,16,3,0,0,0,0,0,4,16,4,0,0,0,0,0,4,16,3,0,0,0,0,0,6,15,2,0,1 +0,0,0,4,16,4,0,0,0,0,0,9,16,3,0,0,0,0,4,16,8,4,11,0,0,1,12,14,2,13,14,0,0,11,16,13,13,16,7,0,0,9,16,16,16,13,3,0,0,0,0,6,16,6,0,0,0,0,0,6,16,4,0,0,4 +0,0,0,0,12,15,2,0,0,0,0,1,15,16,0,0,0,0,3,12,16,16,0,0,0,6,16,16,16,15,0,0,0,6,11,6,16,12,0,0,0,0,0,0,16,12,0,0,0,0,0,0,15,13,0,0,0,0,0,0,9,16,3,0,1 +0,0,8,16,15,5,0,0,0,3,16,13,16,6,0,0,0,1,5,3,16,8,0,0,0,0,1,6,16,5,0,0,0,2,15,16,16,16,6,0,0,5,12,16,15,12,3,0,0,0,1,16,6,0,0,0,0,0,10,15,0,0,0,0,7 +0,0,1,10,16,8,0,0,0,0,13,11,10,15,2,0,0,5,16,0,6,16,4,0,0,3,16,16,14,16,4,0,0,0,0,0,0,13,6,0,0,0,0,0,0,12,8,0,0,0,2,13,5,13,7,0,0,0,1,14,13,9,1,0,9 +0,0,9,16,12,1,0,0,0,7,16,13,16,9,0,0,0,9,6,0,12,12,0,0,0,0,0,0,11,10,0,0,0,0,0,2,15,6,0,0,0,0,0,11,12,0,0,0,0,0,12,16,13,9,2,0,0,0,11,16,16,16,11,0,2 +0,0,6,16,15,10,1,0,0,1,14,8,8,11,2,0,0,5,12,11,12,6,0,0,0,7,16,14,12,15,3,0,0,1,2,0,0,10,8,0,0,0,1,0,0,9,6,0,0,0,14,4,5,14,1,0,0,0,8,16,13,3,0,0,5 +0,0,0,14,3,0,0,0,0,0,5,15,2,0,0,0,0,0,10,11,0,0,0,0,0,0,13,6,0,0,0,0,0,0,14,16,16,11,3,0,0,0,13,7,2,7,15,0,0,0,8,13,1,5,16,1,0,0,1,11,16,15,7,0,6 +0,0,2,15,1,0,0,0,0,0,10,10,0,0,0,0,0,1,14,3,0,0,0,0,0,7,13,0,0,0,0,0,0,7,15,16,12,6,1,0,0,4,16,11,8,13,9,0,0,1,15,10,4,10,14,0,0,0,4,12,16,12,5,0,6 +0,2,14,16,9,0,0,0,0,6,16,14,14,2,0,0,0,5,12,4,16,1,0,0,0,0,1,3,16,1,0,0,0,0,0,8,12,0,0,0,0,0,1,13,9,0,0,0,0,1,15,16,16,13,8,1,0,2,16,16,16,16,16,3,2 +0,0,5,16,16,5,0,0,0,5,16,11,7,1,0,0,0,11,16,14,5,0,0,0,0,4,8,11,16,13,1,0,0,0,0,0,4,16,7,0,0,0,2,0,0,9,11,0,0,0,8,11,8,15,7,0,0,0,3,12,16,9,0,0,5 +0,0,9,14,11,9,0,0,0,1,16,11,9,15,0,0,0,4,16,3,6,14,7,0,0,1,14,16,16,10,1,0,0,0,10,16,12,0,0,0,0,1,14,10,16,7,0,0,0,2,16,4,11,15,0,0,0,0,10,14,15,11,0,0,8 +0,0,5,14,10,0,0,0,0,1,16,11,11,11,0,0,0,4,15,3,1,13,4,0,0,5,9,0,0,6,8,0,0,6,8,0,0,4,8,0,0,4,14,0,0,7,6,0,0,1,16,11,10,15,2,0,0,0,5,12,13,6,0,0,0 +0,0,11,16,15,10,0,0,0,0,10,16,8,7,0,0,0,2,16,3,0,0,0,0,0,6,16,16,15,7,0,0,0,1,8,8,9,15,6,0,0,0,0,0,0,10,8,0,0,0,2,7,12,16,4,0,0,0,11,13,11,2,0,0,5 +0,0,5,16,8,1,0,0,0,4,13,14,15,6,0,0,0,8,13,0,8,16,1,0,0,8,12,0,0,12,8,0,0,8,12,0,0,12,8,0,0,5,15,2,0,13,9,0,0,0,15,11,12,15,4,0,0,0,6,16,14,5,0,0,0 +0,2,14,14,1,0,0,0,0,9,13,13,6,0,0,0,0,9,5,3,10,0,0,0,0,0,1,3,9,0,0,0,0,0,0,8,4,0,0,0,0,0,2,15,4,2,0,0,0,1,13,16,16,16,9,0,0,0,13,8,8,7,2,0,2 +0,1,8,16,16,15,3,0,0,4,15,9,6,4,0,0,0,7,15,12,8,1,0,0,0,8,15,12,14,15,1,0,0,1,2,0,1,14,8,0,0,0,0,0,0,12,8,0,0,0,12,6,7,15,3,0,0,0,10,16,11,3,0,0,5 +0,0,3,14,16,14,0,0,0,0,15,11,14,16,0,0,0,5,10,0,13,10,0,0,0,0,3,8,16,10,1,0,0,0,13,16,16,16,10,0,0,0,4,14,9,5,1,0,0,0,2,16,4,0,0,0,0,0,3,15,0,0,0,0,7 +0,2,10,13,7,0,0,0,0,8,12,9,16,3,0,0,0,1,0,0,16,5,0,0,0,0,0,6,16,2,0,0,0,0,0,4,14,13,0,0,0,0,0,0,3,16,7,0,0,0,3,5,10,16,5,0,0,3,16,14,12,5,0,0,3 +0,0,4,12,9,0,0,0,0,4,13,7,15,4,0,0,0,7,11,0,8,10,0,0,0,2,16,16,16,7,0,0,0,0,6,16,16,15,2,0,0,0,8,11,1,6,8,0,0,0,8,12,8,13,6,0,0,0,4,16,13,8,0,0,8 +0,2,10,16,14,5,0,0,0,6,13,8,15,12,0,0,0,0,0,1,13,10,0,0,0,0,7,15,16,8,0,0,0,0,6,12,13,16,5,0,0,0,0,0,0,14,8,0,0,0,11,14,16,15,3,0,0,1,13,10,8,2,0,0,3 +0,0,11,15,4,0,0,0,0,6,16,16,16,1,0,0,0,1,6,2,15,7,0,0,0,0,0,0,14,8,0,0,0,0,0,4,16,6,0,0,0,0,0,11,16,2,0,0,0,0,12,16,15,12,13,4,0,0,12,16,16,16,15,6,2 +0,0,0,0,12,10,0,0,0,0,0,5,16,2,0,0,0,0,2,15,8,3,10,0,0,1,12,11,1,6,14,0,0,11,16,16,16,15,10,0,0,5,8,8,10,16,3,0,0,0,0,0,11,11,0,0,0,0,0,0,13,6,0,0,4 +0,1,9,13,7,0,0,0,0,7,12,6,15,6,0,0,0,0,1,4,13,10,0,0,0,0,4,16,16,13,0,0,0,0,0,0,2,13,6,0,0,0,0,0,0,10,8,0,0,0,9,7,9,15,1,0,0,0,13,15,9,1,0,0,3 +0,2,14,16,7,0,0,0,0,2,11,11,16,4,0,0,0,0,0,0,13,8,0,0,0,0,4,9,15,7,0,0,0,0,7,16,16,12,1,0,0,0,0,0,3,16,8,0,0,1,6,11,15,16,7,0,0,2,13,14,11,5,0,0,3 +0,0,3,12,15,2,0,0,0,0,14,10,0,0,0,0,0,4,14,0,0,0,0,0,0,6,10,0,0,0,0,0,0,8,9,8,10,7,1,0,0,4,16,14,6,8,10,0,0,2,15,9,5,11,11,0,0,0,3,13,16,11,1,0,6 +0,0,1,10,15,4,0,0,0,4,13,12,8,13,0,0,0,10,14,0,1,14,0,0,0,8,15,8,14,14,0,0,0,1,14,16,16,15,1,0,0,0,11,10,0,10,9,0,0,0,4,14,5,10,12,0,0,0,1,13,16,13,6,0,8 +0,2,8,9,13,16,7,0,0,4,16,10,5,5,2,0,0,4,16,0,0,0,0,0,0,4,16,16,11,3,0,0,0,1,6,8,13,13,0,0,0,0,0,0,7,16,0,0,0,3,11,12,16,6,0,0,0,3,15,14,4,0,0,0,5 +0,0,7,13,16,7,0,0,0,4,16,7,4,14,6,0,0,2,15,7,4,13,12,0,0,0,12,16,14,14,8,0,0,0,0,0,0,8,8,0,0,0,0,0,1,13,4,0,0,0,0,4,14,11,0,0,0,1,12,12,6,0,0,0,9 +0,0,4,13,15,2,0,0,0,1,13,3,7,10,0,0,0,6,8,0,0,12,0,0,0,4,10,3,11,11,0,0,0,0,14,16,15,15,1,0,0,0,8,16,1,5,8,0,0,0,8,10,5,13,7,0,0,0,4,15,11,6,0,0,8 +0,0,10,14,4,0,0,0,0,4,16,12,16,2,0,0,0,0,7,0,8,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,14,5,0,0,0,0,0,7,13,1,0,0,0,0,6,16,14,12,9,0,0,0,14,14,12,12,15,1,2 +0,1,9,16,13,7,0,0,0,7,13,4,5,16,3,0,0,7,13,4,7,16,7,0,0,1,14,14,12,11,6,0,0,0,0,0,0,12,3,0,0,0,0,0,5,13,0,0,0,0,7,7,15,4,0,0,0,0,11,13,5,0,0,0,9 +0,0,5,12,10,2,0,0,0,0,12,16,16,5,0,0,0,0,16,16,16,3,0,0,0,0,13,16,16,3,0,0,0,3,16,16,16,3,0,0,0,0,16,16,16,5,0,0,0,0,14,16,16,12,1,0,0,0,3,8,10,8,2,0,1 +0,5,16,14,2,0,0,0,0,7,16,16,10,0,0,0,0,0,0,12,10,0,0,0,0,0,0,15,7,0,0,0,0,0,5,16,2,0,0,0,0,1,12,11,0,0,0,0,0,6,16,16,16,12,3,0,0,5,16,16,16,15,5,0,2 +0,0,9,13,16,15,0,0,0,1,12,12,14,13,0,0,0,0,0,1,14,9,0,0,0,0,5,10,16,13,3,0,0,0,16,16,16,12,7,0,0,0,4,16,7,0,0,0,0,0,5,16,0,0,0,0,0,0,11,12,0,0,0,0,7 +0,0,3,12,6,0,0,0,0,0,11,11,1,0,0,0,0,1,13,1,0,0,0,0,0,3,12,0,4,3,0,0,0,6,10,13,16,15,2,0,0,4,16,12,1,2,10,0,0,0,14,4,7,13,7,0,0,0,3,16,14,8,0,0,6 +0,1,14,16,5,0,0,0,0,2,13,13,16,4,0,0,0,0,4,13,16,3,0,0,0,0,9,16,16,13,0,0,0,0,4,8,8,16,8,0,0,0,0,1,5,14,11,0,0,1,13,16,16,15,3,0,0,0,15,11,5,1,0,0,3 +0,0,3,11,11,6,0,0,0,3,14,16,16,12,0,0,0,0,7,16,16,12,0,0,0,0,13,16,16,12,0,0,0,0,13,16,16,14,0,0,0,2,15,16,16,13,0,0,0,0,12,16,16,12,0,0,0,0,3,11,11,7,0,0,1 +0,0,2,13,16,6,0,0,0,0,11,10,1,0,0,0,0,0,14,1,0,0,0,0,0,1,15,0,0,0,0,0,0,1,14,6,8,2,0,0,0,0,15,13,8,11,1,0,0,0,9,12,0,9,7,0,0,0,0,13,16,15,6,0,6 +0,0,0,1,13,10,0,0,0,0,0,10,15,3,0,0,0,0,7,16,6,1,14,1,0,3,16,8,0,9,15,0,0,14,16,16,12,16,10,0,0,6,13,16,16,16,6,0,0,0,0,0,14,12,0,0,0,0,0,2,15,3,0,0,4 +0,0,3,11,14,16,14,0,0,0,13,15,12,15,13,0,0,0,3,0,1,16,5,0,0,0,0,4,16,16,10,0,0,0,0,7,16,16,7,0,0,0,0,6,15,2,0,0,0,0,0,15,9,0,0,0,0,0,3,15,2,0,0,0,7 +0,0,3,8,12,12,16,11,0,0,14,16,16,15,16,9,0,0,6,3,0,9,15,0,0,0,0,9,14,16,12,0,0,0,1,16,16,12,3,0,0,0,0,3,16,4,0,0,0,0,0,13,13,0,0,0,0,0,2,15,5,0,0,0,7 +0,0,5,12,12,14,5,0,0,0,13,13,11,8,4,0,0,5,16,1,0,0,0,0,0,8,16,16,12,2,0,0,0,1,7,8,15,15,0,0,0,0,0,0,1,16,1,0,0,0,5,9,11,15,0,0,0,0,5,14,11,3,0,0,5 +0,0,0,2,16,4,0,0,0,0,1,15,14,1,3,0,0,0,11,16,3,4,16,0,0,9,16,8,4,13,12,0,0,3,15,16,16,16,11,0,0,0,1,8,14,14,4,0,0,0,0,0,16,8,0,0,0,0,0,2,16,6,0,0,4 +0,1,9,15,9,0,0,0,0,4,13,7,15,4,0,0,0,0,0,2,15,3,0,0,0,0,12,16,16,11,1,0,0,0,5,5,5,13,8,0,0,0,0,0,1,12,6,0,0,0,9,8,14,12,1,0,0,0,15,12,6,0,0,0,3 +0,0,4,12,15,8,0,0,0,3,16,6,4,15,2,0,0,2,15,1,2,12,6,0,0,0,13,16,16,15,1,0,0,2,15,12,10,14,7,0,0,6,15,1,0,5,9,0,0,0,15,8,4,10,8,0,0,0,2,15,15,11,1,0,8 +0,0,0,9,12,5,0,0,0,0,15,16,16,9,0,0,0,0,13,16,16,15,0,0,0,0,13,16,16,12,0,0,0,0,11,16,16,13,0,0,0,0,7,16,16,11,1,0,0,0,7,16,16,8,0,0,0,0,0,10,12,5,0,0,1 +0,0,6,14,13,2,0,0,0,10,16,9,16,12,0,0,0,5,16,16,16,16,3,0,0,0,5,9,5,8,8,0,0,0,0,0,0,8,8,0,0,0,0,0,1,12,8,0,0,0,5,16,16,15,3,0,0,0,4,15,9,2,0,0,9 +0,0,13,16,10,1,0,0,0,8,15,8,16,6,0,0,0,11,13,0,11,12,0,0,0,5,7,0,10,12,0,0,0,0,0,0,12,10,0,0,0,0,0,3,16,5,0,0,0,0,7,15,15,12,12,2,0,1,12,16,13,11,11,1,2 +0,0,7,16,14,8,0,0,0,0,5,14,13,12,0,0,0,0,8,16,16,10,0,0,0,0,9,16,16,6,0,0,0,0,11,16,16,5,0,0,0,2,14,16,16,4,0,0,0,1,16,16,16,3,0,0,0,0,7,14,16,10,1,0,1 +0,0,4,15,12,2,0,0,0,0,12,11,10,14,0,0,0,6,11,0,0,15,4,0,0,4,14,0,0,8,8,0,0,6,12,0,0,9,8,0,0,1,15,3,0,10,10,0,0,0,12,13,12,16,3,0,0,0,4,13,15,6,0,0,0 +0,0,5,16,16,16,4,0,0,0,12,15,9,5,1,0,0,12,16,14,7,0,0,0,0,9,16,16,16,15,0,0,0,0,0,0,2,16,4,0,0,0,6,5,0,14,7,0,0,0,12,10,9,16,2,0,0,0,5,16,16,7,0,0,5 +0,1,8,10,3,0,0,0,0,2,15,16,16,12,0,0,0,1,14,16,16,12,0,0,0,4,16,16,16,8,0,0,0,4,16,16,16,6,0,0,0,4,16,16,15,3,0,0,0,4,16,16,13,2,0,0,0,1,7,10,8,2,0,0,1 +0,0,4,13,11,1,0,0,0,9,15,10,12,8,0,0,0,8,10,0,6,9,0,0,0,0,0,1,16,3,0,0,0,0,0,14,16,13,3,0,0,0,6,2,4,8,12,0,0,1,15,4,5,12,12,0,0,0,7,13,16,13,1,0,3 +0,0,8,15,14,1,0,0,0,1,15,15,14,11,0,0,0,7,16,3,1,16,3,0,0,8,16,0,0,13,7,0,0,5,16,0,0,13,8,0,0,2,16,3,1,16,4,0,0,0,16,14,16,12,0,0,0,0,9,16,10,1,0,0,0 +0,0,0,13,6,2,0,0,0,0,2,16,8,16,3,0,0,1,14,13,12,16,0,0,0,9,16,16,16,16,10,0,0,3,12,14,16,13,6,0,0,0,0,8,16,1,0,0,0,0,0,11,10,0,0,0,0,0,0,13,7,0,0,0,4 +0,0,9,12,12,1,0,0,0,6,14,5,9,11,3,0,0,7,12,9,15,9,1,0,0,1,16,14,2,0,0,0,0,0,14,12,14,2,0,0,0,2,12,0,9,13,0,0,0,0,15,1,6,14,0,0,0,0,6,14,15,3,0,0,8 +0,0,4,16,7,2,0,0,0,0,8,16,16,4,0,0,0,0,11,16,16,3,0,0,0,0,9,16,16,0,0,0,0,0,11,16,16,2,0,0,0,0,9,16,15,0,0,0,0,0,9,16,15,0,0,0,0,0,6,12,3,0,0,0,1 +0,0,4,16,10,1,0,0,0,0,11,10,11,12,0,0,0,6,16,0,1,12,3,0,0,6,14,1,0,5,7,0,0,8,8,0,0,5,8,0,0,4,13,0,1,12,9,0,0,0,14,10,14,16,2,0,0,0,4,15,12,3,0,0,0 +0,0,0,5,12,15,12,0,0,0,8,11,3,4,14,1,0,2,15,2,4,16,13,0,0,1,15,12,12,12,10,0,0,0,1,2,0,13,7,0,0,0,0,0,2,14,1,0,0,0,0,0,12,7,0,0,0,0,0,8,11,0,0,0,9 +0,0,1,12,12,1,0,0,0,0,10,13,12,9,0,0,0,2,16,6,1,13,3,0,0,6,16,2,0,12,8,0,0,2,16,1,0,8,8,0,0,0,14,5,0,9,9,0,0,0,5,15,4,14,6,0,0,0,0,14,15,7,0,0,0 +0,3,12,12,15,16,16,2,0,2,14,13,12,16,11,0,0,0,0,0,11,15,2,0,0,0,0,11,16,3,0,0,0,0,1,16,12,0,0,0,0,0,8,16,2,0,0,0,0,0,14,12,0,0,0,0,0,3,16,9,0,0,0,0,7 +0,0,0,3,13,16,5,0,0,0,3,15,7,13,13,0,0,1,13,6,14,16,11,0,0,1,16,14,10,11,10,0,0,0,0,0,0,14,6,0,0,0,0,0,2,15,2,0,0,0,0,0,10,8,0,0,0,0,0,4,13,0,0,0,9 +0,0,0,2,15,10,0,0,0,0,0,10,16,16,0,0,0,0,4,16,16,3,0,0,0,4,15,16,16,1,0,0,0,4,12,16,16,0,0,0,0,0,0,12,16,4,0,0,0,0,0,9,16,4,0,0,0,0,0,4,15,4,0,0,1 +0,0,2,15,15,11,3,0,0,2,15,6,4,4,3,0,0,5,16,15,7,0,0,0,0,1,4,6,13,13,0,0,0,0,0,0,0,14,4,0,0,0,3,3,0,11,3,0,0,0,7,7,10,12,0,0,0,0,3,14,9,1,0,0,5 +0,0,7,13,15,6,0,0,0,1,16,6,10,8,8,0,0,4,16,3,4,11,8,0,0,0,11,11,13,9,0,0,0,0,2,15,16,3,0,0,0,0,9,9,7,15,2,0,0,0,12,7,0,15,4,0,0,0,4,15,16,13,1,0,8 +0,0,1,14,6,9,5,0,0,0,11,12,3,16,7,0,0,5,16,2,12,11,3,0,0,10,16,8,16,16,14,0,0,7,16,16,14,11,3,0,0,0,3,13,8,0,0,0,0,0,0,15,5,0,0,0,0,0,2,16,2,0,0,0,4 +0,0,8,16,16,16,9,0,0,5,15,9,5,14,16,2,0,0,0,0,3,16,9,0,0,0,0,0,14,11,1,0,0,0,0,7,16,5,0,0,0,0,3,16,8,0,0,0,0,0,9,16,3,0,0,0,0,0,12,16,1,0,0,0,7 +0,0,5,14,8,0,0,0,0,0,15,11,15,0,0,0,0,0,5,2,11,3,0,0,0,0,0,9,16,5,0,0,0,0,1,12,12,15,5,0,0,0,0,0,0,5,12,0,0,0,6,4,7,12,10,0,0,0,9,15,12,9,1,0,3 +0,0,6,16,9,1,0,0,0,3,15,8,13,3,0,0,0,8,8,0,11,4,0,0,0,1,0,7,16,6,0,0,0,0,2,16,14,16,5,0,0,0,0,5,0,11,8,0,0,0,4,6,9,15,2,0,0,0,4,15,11,3,0,0,3 +0,0,4,12,14,2,0,0,0,4,15,7,11,14,5,0,0,7,12,0,3,15,4,0,0,2,14,12,14,6,0,0,0,0,3,16,16,2,0,0,0,0,6,12,10,11,0,0,0,0,7,12,9,11,0,0,0,0,2,12,10,1,0,0,8 +0,2,4,7,13,15,2,0,0,14,16,16,15,16,5,0,0,1,4,0,14,14,0,0,0,0,0,0,16,12,0,0,0,0,0,3,16,7,0,0,0,0,0,5,16,7,0,0,0,0,0,5,16,6,0,0,0,0,0,7,16,9,0,0,7 +0,0,2,10,15,5,0,0,0,0,10,15,5,1,0,0,0,1,16,2,0,0,0,0,0,4,14,0,0,0,0,0,0,5,12,2,7,9,2,0,0,1,16,15,13,12,12,0,0,0,15,11,7,15,6,0,0,0,4,13,15,7,0,0,6 +0,0,12,14,1,0,0,0,0,8,16,12,6,0,0,0,0,8,7,7,8,0,0,0,0,0,0,9,7,0,0,0,0,0,0,11,5,0,0,0,0,0,0,14,1,0,0,0,0,0,9,13,4,7,4,0,0,0,15,16,16,16,13,0,2 +0,0,2,12,15,5,0,0,0,0,13,5,5,10,4,0,0,0,14,1,3,15,7,0,0,0,12,10,10,7,0,0,0,0,1,15,11,0,0,0,0,0,4,14,14,0,0,0,0,0,6,10,12,1,0,0,0,0,1,15,11,0,0,0,8 +0,0,3,14,11,0,0,0,0,3,14,9,14,0,0,0,0,8,11,0,4,11,7,0,0,3,15,13,14,11,1,0,0,0,0,14,16,12,1,0,0,0,7,14,2,14,8,0,0,0,8,10,5,14,5,0,0,0,4,15,13,6,0,0,8 +0,0,5,13,8,3,5,0,0,2,16,11,3,2,3,0,0,7,13,4,4,0,0,0,0,5,16,12,14,11,0,0,0,0,0,0,2,14,2,0,0,0,0,0,0,13,3,0,0,0,10,4,6,13,0,0,0,0,6,14,13,3,0,0,5 +0,0,10,16,12,1,0,0,0,9,16,12,16,7,0,0,0,4,7,1,16,4,0,0,0,0,0,14,16,8,0,0,0,0,0,10,14,16,4,0,0,5,7,0,0,14,9,0,0,3,15,5,6,15,7,0,0,0,8,16,16,8,0,0,3 +0,0,0,12,14,1,0,0,0,0,7,16,6,0,0,0,0,1,14,8,0,0,0,0,0,3,16,10,8,2,0,0,0,2,16,16,16,15,4,0,0,1,14,16,6,8,15,0,0,0,5,16,16,13,16,1,0,0,0,9,16,15,7,0,6 +0,1,13,16,15,1,0,0,0,10,15,7,16,4,0,0,0,11,15,7,16,4,0,0,0,1,11,12,15,0,0,0,0,0,1,16,11,0,0,0,0,0,7,16,4,0,0,0,0,0,14,16,5,3,0,0,0,0,13,16,16,16,10,0,2 +0,0,1,13,16,6,0,0,0,6,14,16,16,7,0,0,2,14,16,16,16,2,0,0,0,0,5,16,16,2,0,0,0,0,2,16,16,2,0,0,0,0,0,16,16,1,0,0,0,0,0,14,16,2,0,0,0,0,0,13,16,1,0,0,1 +0,5,14,12,15,15,3,0,0,0,2,4,11,16,5,0,0,0,0,1,15,9,0,0,0,0,0,7,16,1,0,0,0,0,0,3,16,11,0,0,0,0,0,0,15,16,0,0,0,1,7,10,16,7,0,0,0,2,12,13,4,0,0,0,3 +0,0,2,12,13,0,0,0,0,3,15,13,16,0,0,0,0,10,13,1,16,0,0,0,0,0,1,4,16,0,0,0,0,0,0,6,13,0,0,0,0,0,0,9,10,7,8,0,0,0,0,14,15,15,3,0,0,0,0,15,12,3,0,0,2 +0,0,0,9,16,12,2,0,0,0,3,15,10,16,4,0,0,1,14,8,0,13,7,0,0,7,15,1,0,14,6,0,0,4,15,0,0,15,5,0,0,1,12,4,9,15,0,0,0,0,5,16,16,9,0,0,0,0,0,9,14,2,0,0,0 +0,0,0,11,15,4,0,0,0,0,4,16,14,2,0,0,0,0,11,16,3,0,0,0,0,0,14,13,0,0,0,0,0,1,16,12,3,0,0,0,0,2,16,16,16,6,0,0,0,0,11,16,16,15,0,0,0,0,0,8,16,15,1,0,6 +0,0,2,10,13,16,6,0,0,9,16,14,12,9,2,0,0,5,16,3,0,0,0,0,0,0,10,14,6,1,0,0,0,0,1,13,16,11,0,0,0,0,0,0,8,16,0,0,0,0,0,2,15,12,0,0,0,0,0,13,16,2,0,0,5 +0,0,3,11,15,8,0,0,0,0,9,14,8,9,0,0,0,0,6,14,5,0,0,0,0,0,0,12,16,7,0,0,0,0,4,14,11,11,0,0,0,0,12,8,0,16,1,0,0,0,14,11,11,15,1,0,0,0,3,13,13,6,0,0,8 +0,3,13,12,9,12,1,0,0,4,16,16,16,14,1,0,0,4,16,7,3,0,0,0,0,5,16,10,0,0,0,0,0,0,10,16,4,0,0,0,0,0,0,12,12,0,0,0,0,0,6,16,7,0,0,0,0,1,15,12,1,0,0,0,5 +0,0,7,16,16,16,16,3,0,0,2,10,9,9,16,7,0,0,0,0,0,10,15,0,0,0,0,6,12,16,11,0,0,0,0,11,16,16,6,0,0,0,0,9,16,8,0,0,0,0,5,16,10,0,0,0,0,0,11,15,0,0,0,0,7 +0,0,2,12,16,7,0,0,0,0,0,10,16,3,0,0,0,0,0,3,15,5,0,0,0,0,0,0,6,15,3,0,0,0,0,0,0,8,14,0,0,0,0,0,0,11,16,0,0,0,0,3,13,16,9,0,0,0,6,16,12,8,1,0,3 +0,0,0,6,14,15,3,0,0,1,9,16,16,14,2,0,0,6,16,16,16,14,0,0,0,0,6,16,16,8,0,0,0,0,6,16,16,8,0,0,0,0,4,16,16,9,0,0,0,0,1,16,16,15,3,0,0,0,0,10,16,16,4,0,1 +0,0,10,14,14,6,0,0,0,0,8,16,16,11,1,0,0,0,4,16,16,14,2,0,0,0,1,16,16,16,7,0,0,0,0,16,16,16,2,0,0,0,2,14,16,15,3,0,0,0,5,16,16,11,0,0,0,0,8,12,14,6,0,0,1 +0,0,6,16,8,0,0,0,0,2,16,14,13,0,0,0,0,8,14,5,15,0,0,0,0,9,11,8,12,0,0,0,0,3,6,14,8,0,0,0,0,0,4,16,2,0,0,0,0,0,8,16,12,10,5,0,0,0,8,16,16,16,8,0,2 +0,0,0,11,15,10,1,0,0,0,9,10,8,16,5,0,0,0,15,2,0,11,10,0,0,7,10,0,0,9,9,0,0,8,8,0,0,9,8,0,0,8,6,1,5,16,2,0,0,3,14,13,16,14,0,0,0,0,1,9,14,3,0,0,0 +0,0,8,16,9,0,0,0,0,2,15,16,16,4,0,0,0,7,16,4,16,3,0,0,0,9,12,7,14,0,0,0,0,1,4,16,7,0,0,0,0,0,9,16,2,0,0,0,0,0,12,16,15,15,3,0,0,0,7,13,15,14,14,0,2 +0,0,1,7,14,12,0,0,0,0,13,11,12,14,0,0,0,9,16,12,14,14,0,0,0,2,4,2,5,15,0,0,0,0,0,0,3,15,0,0,0,0,0,0,3,16,0,0,0,0,0,0,14,7,0,0,0,0,2,11,11,1,0,0,9 +0,0,2,10,14,3,0,0,0,3,16,16,16,15,1,0,0,12,12,5,16,4,0,0,0,2,15,16,12,0,0,0,0,0,5,16,13,0,0,0,0,0,4,14,14,9,0,0,0,0,7,15,15,12,0,0,0,0,0,12,16,6,0,0,8 +0,0,0,6,11,1,0,0,0,0,5,15,11,1,0,0,0,0,9,12,0,0,0,0,0,0,13,4,0,0,0,0,0,0,14,3,3,0,0,0,0,0,11,16,16,14,4,0,0,0,5,16,11,14,13,0,0,0,0,4,9,14,13,0,6 +0,0,6,11,15,16,13,0,0,0,9,8,4,9,16,0,0,0,0,0,0,10,10,0,0,0,4,8,8,16,4,0,0,0,5,12,16,12,0,0,0,0,0,6,15,1,0,0,0,0,2,16,3,0,0,0,0,0,8,8,0,0,0,0,7 +0,0,11,15,2,0,0,0,0,1,16,16,1,0,0,0,0,6,16,13,6,8,2,0,0,10,16,16,16,16,8,0,0,1,5,10,16,15,0,0,0,0,0,8,15,3,0,0,0,0,2,16,9,0,0,0,0,0,10,16,3,0,0,0,4 +0,0,9,12,14,16,16,2,0,0,4,12,9,12,16,1,0,0,0,0,1,14,8,0,0,0,0,0,8,14,1,0,0,0,0,2,16,12,0,0,0,0,0,0,13,16,5,0,0,0,0,1,13,14,1,0,0,0,10,16,13,1,0,0,3 +0,1,12,14,1,0,0,0,0,10,16,16,4,0,0,0,0,8,11,16,0,0,0,0,0,0,6,13,0,0,0,0,0,0,11,11,0,0,0,0,0,0,16,8,0,0,0,0,0,4,16,15,16,16,9,0,0,1,16,16,16,14,8,0,2 +0,0,4,15,11,1,0,0,0,0,11,14,13,12,0,0,0,3,15,3,1,16,4,0,0,4,14,0,0,12,8,0,0,7,12,0,1,15,7,0,0,5,11,0,5,16,6,0,0,0,12,12,16,10,0,0,0,0,5,12,11,0,0,0,0 +0,0,9,13,10,5,0,0,0,0,4,15,16,13,0,0,0,0,1,14,16,15,0,0,0,0,8,16,16,14,1,0,0,0,6,16,16,14,0,0,0,0,12,16,16,14,0,0,0,0,6,16,16,6,0,0,0,0,4,14,15,3,0,0,1 +0,0,6,15,14,5,0,0,0,0,4,15,16,13,0,0,0,0,3,16,16,16,1,0,0,0,5,16,16,15,2,0,0,0,6,16,16,15,1,0,0,0,6,16,16,16,2,0,0,0,4,16,16,12,0,0,0,0,8,16,16,8,0,0,1 +0,0,5,9,14,16,11,0,0,3,16,16,12,8,3,0,0,7,15,1,0,0,0,0,0,12,13,7,4,0,0,0,0,5,16,16,16,6,0,0,0,0,0,4,16,7,0,0,0,0,0,10,16,2,0,0,0,0,9,15,6,0,0,0,5 +0,0,4,14,11,3,0,0,0,0,14,4,8,3,0,0,0,0,15,1,7,14,10,0,0,0,5,16,12,0,0,0,0,0,3,16,9,0,0,0,0,0,9,7,13,0,0,0,0,0,11,10,13,0,0,0,0,0,5,15,11,0,0,0,8 +0,0,1,16,12,1,0,0,0,0,11,16,16,10,0,0,0,0,13,7,3,12,2,0,0,1,15,1,0,10,6,0,0,2,13,0,0,11,8,0,0,3,15,1,0,13,9,0,0,0,9,10,3,16,11,0,0,0,0,8,12,12,3,0,0 +0,0,5,9,13,8,0,0,0,0,4,16,15,3,0,0,0,0,13,14,2,6,3,0,0,5,16,5,11,16,8,0,0,7,16,16,16,14,1,0,0,3,8,16,16,4,0,0,0,0,5,16,10,0,0,0,0,0,10,12,3,0,0,0,4 +0,0,2,6,12,16,16,2,0,2,15,16,9,5,16,5,0,0,5,1,0,2,16,2,0,0,0,0,0,8,10,0,0,0,0,0,4,16,7,0,0,0,10,13,16,13,1,0,0,0,9,13,16,1,0,0,0,0,0,7,12,0,0,0,7 +0,0,4,11,12,7,0,0,0,1,16,13,14,12,0,0,0,2,16,5,8,14,0,0,0,1,15,16,16,14,0,0,0,0,6,12,12,16,6,0,0,0,1,3,2,15,5,0,0,0,7,16,16,16,2,0,0,0,1,13,12,5,0,0,9 +0,0,2,11,16,16,8,0,0,0,10,16,16,16,8,0,0,0,2,0,8,16,2,0,0,0,0,6,14,16,5,0,0,0,0,14,16,16,10,0,0,0,0,10,16,7,0,0,0,0,0,13,13,0,0,0,0,0,1,16,7,0,0,0,7 +0,0,0,2,14,14,1,0,0,0,0,12,16,16,1,0,0,1,10,16,16,14,0,0,0,6,16,15,16,10,0,0,0,1,3,11,16,7,0,0,0,0,0,7,16,8,0,0,0,0,0,7,16,13,1,0,0,0,0,3,15,15,0,0,1 +0,0,2,14,3,0,0,0,0,0,9,14,0,0,0,0,0,0,11,10,0,0,0,0,0,0,16,7,0,0,0,0,0,2,16,12,8,4,0,0,0,0,16,16,16,16,11,0,0,0,12,10,4,7,14,0,0,0,3,11,16,16,7,0,6 +0,0,3,15,16,14,2,0,0,3,15,13,5,14,4,0,0,4,15,13,10,16,4,0,0,0,7,15,16,16,4,0,0,0,0,0,0,12,4,0,0,0,0,0,0,11,5,0,0,2,13,10,5,12,8,0,0,0,5,13,16,14,5,0,9 +0,0,9,14,8,8,0,0,0,4,16,16,16,16,2,0,0,8,16,4,1,0,0,0,0,1,16,8,0,0,0,0,0,0,7,16,9,0,0,0,0,0,0,3,16,6,0,0,0,0,4,9,15,13,0,0,0,0,14,13,12,5,0,0,5 +0,0,6,14,14,4,0,0,0,0,14,15,6,13,2,0,0,0,7,16,16,15,1,0,0,0,0,10,10,0,0,0,0,0,0,0,14,1,0,0,0,0,0,0,4,11,0,0,0,3,10,4,0,13,0,0,0,0,9,12,16,12,0,0,9 +0,0,7,11,0,0,0,0,0,0,12,10,0,0,0,0,0,1,14,6,0,0,0,0,0,4,16,7,5,2,0,0,0,5,16,16,16,15,2,0,0,2,16,15,13,16,7,0,0,1,16,14,8,16,3,0,0,0,7,15,16,7,0,0,6 +0,0,3,8,10,12,15,0,0,0,8,10,9,15,13,0,0,0,0,0,5,16,5,0,0,0,0,5,13,14,3,0,0,0,4,14,16,16,5,0,0,0,0,11,12,1,0,0,0,0,0,16,7,0,0,0,0,0,2,13,0,0,0,0,7 +0,0,4,12,6,0,0,0,0,0,16,10,16,1,0,0,0,0,16,5,14,13,0,0,0,0,7,16,15,4,0,0,0,0,4,16,9,0,0,0,0,0,13,13,15,4,0,0,0,0,16,8,9,15,2,0,0,0,5,12,12,8,1,0,8 +0,0,10,15,9,2,0,0,0,7,16,9,12,13,0,0,0,8,11,0,0,13,2,0,0,5,12,0,0,8,8,0,0,8,10,0,0,10,6,0,0,5,11,0,0,14,2,0,0,2,16,9,14,12,0,0,0,0,7,14,9,0,0,0,0 +0,0,6,15,14,8,0,0,0,3,13,0,3,14,3,0,0,6,14,0,2,14,4,0,0,0,13,16,16,16,4,0,0,0,0,4,4,13,4,0,0,0,0,0,0,12,5,0,0,0,14,6,0,13,4,0,0,0,6,15,16,9,0,0,9 +0,0,2,16,16,4,0,0,0,0,8,16,16,12,0,0,0,0,14,16,16,0,0,0,0,2,16,16,14,4,0,0,0,8,16,16,10,2,0,0,0,8,16,16,14,2,0,0,0,0,16,16,16,14,2,0,0,0,2,8,16,10,0,0,1 +0,0,7,14,7,0,0,0,0,6,16,16,8,0,0,0,0,3,7,13,10,0,0,0,0,0,0,13,8,0,0,0,0,0,6,15,4,0,0,0,0,0,12,11,0,0,0,0,0,0,14,15,12,16,6,0,0,0,6,14,11,6,1,0,2 +0,0,6,16,14,0,0,0,0,1,11,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,15,16,16,1,0,0,0,0,16,16,16,0,0,0,0,4,16,16,15,3,0,0,0,0,9,16,16,13,0,0,0,0,4,14,16,6,0,0,1 +0,0,2,13,16,9,0,0,0,0,10,16,16,16,2,0,0,4,16,9,1,15,4,0,0,4,16,3,0,12,5,0,0,5,16,1,0,12,8,0,0,1,16,9,0,12,8,0,0,0,10,16,12,15,3,0,0,0,2,13,16,9,0,0,0 +0,0,14,10,10,4,0,0,0,0,16,8,8,15,4,0,0,0,13,5,0,7,8,0,0,0,6,14,8,15,7,0,0,0,8,16,16,9,0,0,0,2,15,9,15,6,0,0,0,5,15,8,14,10,0,0,0,0,8,10,16,7,0,0,8 +0,0,1,10,2,0,0,0,0,0,3,16,5,0,0,0,0,0,8,15,0,0,0,0,0,0,12,8,0,0,0,0,0,0,13,9,3,0,0,0,0,2,16,16,16,16,7,0,0,0,14,8,2,8,16,2,0,0,2,15,16,16,13,1,6 +0,0,0,5,8,3,0,0,0,0,15,16,16,16,0,0,0,0,16,16,16,13,0,0,0,0,16,16,16,5,0,0,0,0,16,16,16,9,0,0,0,0,16,16,16,4,0,0,0,0,15,16,16,10,0,0,0,0,2,8,8,7,0,0,1 +0,0,0,9,10,0,0,0,0,0,2,16,4,0,0,0,0,0,11,12,0,0,0,0,0,3,16,4,0,6,1,0,0,9,16,3,9,16,4,0,0,9,16,16,16,10,0,0,0,0,4,9,15,2,0,0,0,0,0,8,13,0,0,0,4 +0,0,0,2,15,4,0,0,0,0,0,7,16,5,0,0,0,0,8,16,14,1,0,0,0,7,16,16,12,0,0,0,0,2,8,14,12,0,0,0,0,0,0,9,13,0,0,0,0,0,0,6,16,13,2,0,0,0,0,2,12,13,2,0,1 +0,0,6,16,16,16,8,0,0,6,16,16,15,11,5,0,0,12,13,3,0,0,0,0,0,9,15,3,0,0,0,0,0,3,15,15,2,0,0,0,0,0,5,15,7,0,0,0,0,0,0,14,6,0,0,0,0,0,6,15,1,0,0,0,5 +0,0,5,14,16,14,0,0,0,0,10,13,13,16,1,0,0,0,0,0,12,14,0,0,0,0,2,9,16,16,11,0,0,0,8,16,16,11,3,0,0,0,0,15,13,0,0,0,0,0,3,16,6,0,0,0,0,0,8,16,2,0,0,0,7 +0,0,0,9,14,1,0,0,0,0,7,16,6,0,0,0,0,4,16,10,0,0,0,0,0,8,16,3,0,6,4,0,0,4,16,16,16,16,8,0,0,0,5,8,16,12,0,0,0,0,0,6,16,2,0,0,0,0,0,8,15,2,0,0,4 +0,0,2,15,0,0,0,0,0,0,8,14,0,0,0,0,0,0,12,8,0,0,0,0,0,2,16,5,0,0,0,0,0,6,16,8,8,1,0,0,0,8,16,16,16,14,0,0,0,3,15,10,6,16,6,0,0,0,4,11,16,14,1,0,6 +0,0,6,12,7,0,0,0,0,2,15,13,15,13,0,0,0,8,13,0,9,15,0,0,0,1,15,7,14,11,0,0,0,0,6,16,16,1,0,0,0,0,5,16,16,14,0,0,0,0,11,16,16,16,4,0,0,0,3,7,12,12,3,0,8 +0,3,12,15,9,1,0,0,0,3,16,8,13,10,0,0,0,0,11,12,12,12,0,0,0,0,3,15,16,12,0,0,0,0,0,0,6,14,1,0,0,0,0,0,0,15,6,0,0,7,14,7,4,15,7,0,0,3,9,12,15,9,0,0,9 +0,0,0,4,14,0,0,0,0,0,0,13,9,0,0,0,0,0,2,16,2,0,0,0,0,1,12,11,1,6,2,0,0,9,16,16,16,16,2,0,0,0,7,9,16,8,0,0,0,0,0,6,16,5,0,0,0,0,0,7,13,1,0,0,4 +0,0,8,13,15,10,3,0,0,0,13,15,12,12,9,0,0,3,14,7,0,0,0,0,0,7,8,4,2,0,0,0,0,2,16,16,15,0,0,0,0,0,1,5,13,0,0,0,0,0,0,10,6,0,0,0,0,0,6,13,1,0,0,0,5 +0,0,0,10,9,0,0,0,0,0,1,15,7,0,0,0,0,0,11,13,0,0,0,0,0,1,16,7,0,0,0,0,0,7,16,6,7,15,3,0,0,6,16,16,16,15,2,0,0,0,0,8,16,4,0,0,0,0,0,9,12,0,0,0,4 +0,0,6,12,4,0,0,0,0,0,12,16,15,1,0,0,0,2,16,6,5,14,0,0,0,9,16,4,0,14,3,0,0,7,16,6,0,14,6,0,0,4,16,15,2,16,6,0,0,0,12,16,16,11,0,0,0,0,5,14,14,5,0,0,0 +0,0,7,15,10,0,0,0,0,0,12,10,15,3,0,0,0,0,0,0,12,11,0,0,0,0,0,0,14,9,0,0,0,1,5,8,16,12,2,0,0,10,16,16,15,10,3,0,0,0,2,16,4,0,0,0,0,0,8,12,0,0,0,0,7 +0,0,1,12,6,0,0,0,0,0,5,15,2,3,8,0,0,1,13,11,1,14,9,0,0,6,15,3,6,15,0,0,0,12,15,12,15,15,6,0,0,6,12,14,16,16,5,0,0,0,0,10,14,0,0,0,0,0,0,14,6,0,0,0,4 +0,6,16,6,0,0,0,0,0,9,16,16,2,0,0,0,0,1,2,16,7,0,0,0,0,0,2,16,3,0,0,0,0,0,10,15,0,0,0,0,0,4,16,6,0,0,0,0,0,10,16,12,14,16,9,0,0,7,16,16,12,10,3,0,2 +0,0,0,9,13,5,0,0,0,0,7,16,15,1,0,0,0,0,7,16,8,0,0,0,0,0,11,16,9,0,0,0,0,0,9,16,6,0,0,0,0,0,10,16,5,0,0,0,0,0,11,16,11,0,0,0,0,0,1,12,10,0,0,0,1 +0,0,4,14,9,0,0,0,0,2,15,16,16,0,0,0,0,5,11,7,14,0,0,0,0,0,0,8,11,0,0,0,0,0,0,11,5,0,0,0,0,0,4,15,0,4,0,0,0,0,8,15,12,16,2,0,0,0,7,16,12,7,0,0,2 +0,0,0,3,15,4,0,0,0,0,2,14,13,1,6,1,0,1,13,13,0,5,16,3,0,5,16,3,0,11,13,0,0,12,16,12,13,16,2,0,0,6,12,13,16,14,1,0,0,0,0,1,16,5,0,0,0,0,0,4,15,0,0,0,4 +0,0,7,16,14,2,0,0,0,6,14,8,16,9,0,0,0,1,2,2,16,8,0,0,0,0,0,12,16,5,0,0,0,0,0,5,13,16,3,0,0,0,0,0,1,16,8,0,0,0,1,8,14,14,1,0,0,0,7,15,6,1,0,0,3 +0,0,0,6,16,3,0,0,0,0,2,15,10,11,5,0,0,0,13,9,4,16,3,0,0,4,15,4,9,16,3,0,0,11,15,12,16,16,10,0,0,5,15,14,16,9,0,0,0,0,0,7,13,1,0,0,0,0,0,9,9,0,0,0,4 +0,0,4,14,16,10,0,0,0,0,13,8,11,15,0,0,0,0,1,0,8,11,0,0,0,0,0,0,13,7,0,0,0,0,4,8,16,9,1,0,0,0,7,16,15,10,1,0,0,0,3,15,1,0,0,0,0,0,6,14,0,0,0,0,7 +0,0,0,3,15,4,0,0,0,0,0,12,11,3,5,0,0,0,9,14,2,13,11,0,0,3,16,5,3,16,3,0,0,11,15,6,12,16,3,0,0,11,16,16,16,15,4,0,0,1,5,6,16,2,0,0,0,0,0,5,16,5,0,0,4 +0,0,2,11,14,5,0,0,0,0,0,10,16,16,0,0,0,0,0,10,16,16,4,0,0,0,0,12,16,15,1,0,0,0,1,14,16,13,0,0,0,0,1,16,16,9,0,0,0,0,11,16,16,5,0,0,0,0,6,13,14,0,0,0,1 +0,0,8,14,16,5,0,0,0,2,16,8,10,8,0,0,0,0,4,2,13,6,0,0,0,0,0,7,12,0,0,0,0,0,1,15,7,0,0,0,0,0,9,11,0,0,0,0,0,4,16,9,8,12,2,0,0,0,10,14,9,2,0,0,2 +0,0,0,0,11,15,2,0,0,0,0,9,16,16,3,0,0,1,10,16,16,12,0,0,0,9,16,16,16,13,0,0,0,11,14,8,16,9,0,0,0,0,0,0,16,12,0,0,0,0,0,0,15,13,0,0,0,0,0,0,12,10,0,0,1 +0,0,0,5,12,16,7,0,0,0,9,15,4,13,12,0,0,2,16,16,15,16,13,0,0,3,16,16,14,16,7,0,0,0,6,7,1,15,3,0,0,0,0,0,9,11,0,0,0,0,0,0,13,5,0,0,0,0,0,6,13,0,0,0,9 +0,0,0,12,15,5,0,0,0,0,3,13,9,16,0,0,0,12,14,10,1,14,4,0,0,3,12,13,11,13,0,0,0,0,2,10,16,9,0,0,0,0,0,5,16,8,0,0,0,0,0,13,16,9,0,0,0,0,0,12,12,2,0,0,8 +0,0,0,5,16,3,0,0,0,0,1,16,10,1,14,4,0,0,11,12,1,9,14,2,0,6,15,3,4,16,5,0,0,13,15,14,16,16,8,0,0,12,13,14,16,9,0,0,0,0,0,5,16,2,0,0,0,0,0,7,11,0,0,0,4 +0,0,0,14,10,1,0,0,0,0,8,16,16,11,0,0,0,0,10,10,2,15,1,0,0,4,12,6,0,11,4,0,0,6,16,10,0,9,8,0,0,0,14,16,5,16,9,0,0,0,8,16,16,16,4,0,0,0,0,12,16,9,0,0,0 +0,0,6,14,15,1,0,0,0,0,14,16,16,8,0,0,0,0,4,2,10,10,0,0,0,0,0,0,10,8,0,0,0,0,7,16,16,16,4,0,0,0,9,13,15,8,1,0,0,0,0,12,9,0,0,0,0,0,5,13,0,0,0,0,7 +0,0,2,9,5,0,0,0,0,0,8,16,16,13,0,0,0,2,11,16,16,4,0,0,0,1,11,16,12,0,0,0,0,0,10,12,16,6,0,0,0,0,13,1,7,15,1,0,0,0,14,6,7,13,2,0,0,0,2,11,11,4,0,0,8 +0,0,0,5,14,16,7,0,0,0,6,16,8,12,13,3,0,4,16,14,10,16,16,3,0,1,15,16,16,16,12,0,0,0,3,5,6,16,3,0,0,0,0,0,10,10,0,0,0,0,0,2,15,5,0,0,0,0,0,8,14,0,0,0,9 +0,4,16,13,6,0,0,0,0,1,9,15,16,8,0,0,0,0,0,1,12,16,3,0,0,0,0,1,15,13,1,0,0,0,11,13,16,14,5,0,0,3,16,16,16,14,5,0,0,1,14,13,1,0,0,0,0,3,16,7,0,0,0,0,7 +0,1,12,16,5,0,0,0,0,11,16,16,11,0,0,0,0,6,5,16,11,0,0,0,0,0,2,16,7,0,0,0,0,0,8,16,3,0,0,0,0,1,13,14,0,2,1,0,0,4,16,16,16,16,10,0,0,0,15,16,14,9,3,0,2 +0,0,0,0,13,15,3,0,0,0,0,9,16,16,7,0,0,1,7,16,16,16,1,0,0,6,16,15,14,16,2,0,0,5,11,2,16,15,0,0,0,0,0,0,14,14,0,0,0,0,0,0,16,12,0,0,0,0,0,0,11,13,0,0,1 +0,0,1,8,14,16,12,0,0,0,10,15,13,16,12,0,0,1,12,3,9,15,1,0,0,0,0,8,16,8,0,0,0,0,0,8,16,16,4,0,0,0,0,1,8,16,6,0,0,0,0,5,15,12,1,0,0,0,0,13,10,1,0,0,3 +0,0,3,10,14,14,0,0,0,10,16,14,10,15,0,0,0,0,8,0,0,0,0,0,0,6,16,8,1,0,0,0,0,2,11,15,15,5,0,0,0,0,0,0,15,15,0,0,0,0,0,12,13,1,0,0,0,0,0,11,4,0,0,0,5 +0,0,0,14,2,0,0,0,0,0,2,16,2,0,0,0,0,0,7,13,0,0,0,0,0,0,8,12,0,0,0,0,0,0,9,11,4,2,0,0,0,0,11,16,16,15,8,0,0,0,13,15,2,2,16,3,0,0,0,11,13,12,10,0,6 +0,0,2,11,16,10,0,0,0,0,14,12,8,12,9,0,0,4,16,5,2,14,9,0,0,4,16,16,15,10,2,0,0,0,3,16,15,0,0,0,0,0,8,15,14,0,0,0,0,0,9,16,16,4,0,0,0,0,1,14,11,2,0,0,8 +0,0,4,13,4,0,0,0,0,0,12,15,6,0,0,0,0,0,15,10,0,0,0,0,0,0,16,10,4,1,0,0,0,5,16,16,16,15,1,0,0,5,16,16,3,13,8,0,0,1,16,16,14,16,9,0,0,0,5,14,13,5,1,0,6 +0,0,9,16,16,16,3,0,0,1,14,15,10,7,1,0,0,7,16,2,0,0,0,0,0,2,16,8,0,0,0,0,0,0,2,13,14,0,0,0,0,0,0,4,16,7,0,0,0,0,5,14,13,0,0,0,0,0,11,10,0,0,0,0,5 +0,0,0,3,10,1,0,0,0,0,3,15,9,0,4,0,0,0,12,12,0,4,11,0,0,2,14,3,0,13,6,0,0,7,14,8,9,16,2,0,0,5,12,12,14,15,1,0,0,0,0,0,11,6,0,0,0,0,0,0,13,3,0,0,4 +0,0,3,13,14,2,0,0,0,0,7,9,14,12,0,0,0,0,0,0,7,11,0,0,0,0,0,0,9,7,0,0,0,2,13,16,16,13,3,0,0,2,16,16,14,7,1,0,0,0,0,16,8,0,0,0,0,0,2,13,2,0,0,0,7 +0,0,8,13,11,1,0,0,0,0,16,12,15,8,0,0,0,3,13,0,5,15,2,0,0,6,9,0,1,15,5,0,0,8,8,0,0,12,6,0,0,5,10,0,1,14,3,0,0,2,16,8,12,13,0,0,0,0,8,16,8,1,0,0,0 +0,0,6,14,6,0,0,0,0,0,15,6,12,3,0,0,0,5,12,0,3,14,3,0,0,4,12,0,0,11,8,0,0,4,12,0,0,9,8,0,0,5,15,0,0,8,9,0,0,1,15,2,4,13,2,0,0,0,5,15,15,5,0,0,0 +0,0,15,15,16,14,1,0,0,3,16,15,8,7,0,0,0,8,16,9,1,0,0,0,0,9,16,16,12,0,0,0,0,1,2,0,16,4,0,0,0,0,0,0,13,8,0,0,0,2,7,7,16,4,0,0,0,0,15,16,12,0,0,0,5 +0,0,2,13,16,15,4,0,0,0,9,16,16,16,6,0,0,0,0,0,3,16,7,0,0,0,0,0,4,16,1,0,0,0,2,11,14,16,4,0,0,0,10,16,16,15,2,0,0,0,3,14,11,0,0,0,0,0,3,14,1,0,0,0,7 +0,0,12,16,7,0,0,0,0,2,16,14,16,5,0,0,0,0,14,5,11,13,0,0,0,0,2,1,8,13,0,0,0,0,0,0,8,15,0,0,0,0,0,0,13,10,0,0,0,0,10,10,16,6,0,0,0,0,11,16,16,16,16,8,2 +0,0,0,3,13,0,0,0,0,0,0,12,8,0,0,0,0,0,5,14,2,0,0,0,0,1,15,4,0,3,8,0,0,13,15,12,9,13,10,0,0,5,8,8,15,14,2,0,0,0,0,2,14,5,0,0,0,0,0,6,12,0,0,0,4 +0,0,0,12,8,0,0,0,0,0,6,15,1,0,0,0,0,2,15,6,0,0,6,0,0,8,14,1,0,7,15,1,0,13,10,0,6,16,6,0,0,15,14,14,16,12,0,0,0,7,12,12,16,2,0,0,0,0,0,14,6,0,0,0,4 +0,0,0,9,12,0,0,0,0,0,2,16,6,0,0,0,0,0,13,14,0,0,7,1,0,2,16,9,0,9,16,1,0,6,16,11,11,16,6,0,0,9,16,16,16,12,0,0,0,1,4,9,16,4,0,0,0,0,0,10,11,0,0,0,4 +0,0,5,16,13,1,0,0,0,0,12,14,15,5,0,0,0,0,12,10,16,7,0,0,0,0,3,15,16,12,0,0,0,0,0,0,2,15,2,0,0,0,0,0,0,8,9,0,0,2,12,8,8,13,12,0,0,1,6,10,12,12,2,0,9 +0,0,6,16,8,0,0,0,0,2,16,14,15,0,0,0,0,2,16,13,16,9,0,0,0,0,8,15,16,14,1,0,0,0,0,0,0,13,7,0,0,0,0,0,0,3,13,0,0,0,3,4,4,7,15,0,0,0,7,13,16,13,9,0,9 +0,0,5,16,11,0,0,0,0,1,16,5,12,11,3,0,0,3,13,0,5,16,2,0,0,4,12,0,0,16,4,0,0,5,11,0,1,16,3,0,0,5,13,0,0,12,1,0,0,0,14,7,6,12,0,0,0,0,4,14,12,2,0,0,0 +0,1,12,16,14,4,0,0,0,8,11,0,10,8,0,0,0,1,1,4,14,4,0,0,0,0,2,16,11,0,0,0,0,0,0,3,13,11,0,0,0,0,0,0,1,11,7,0,0,0,10,0,1,10,8,0,0,0,13,15,15,10,2,0,3 +0,0,3,14,13,3,0,0,0,0,12,10,7,12,0,0,0,3,15,1,0,15,7,0,0,5,12,0,0,12,5,0,0,6,12,0,0,9,4,0,0,4,13,0,0,10,4,0,0,1,14,6,5,14,2,0,0,0,3,13,13,6,0,0,0 +0,0,12,16,16,10,1,0,0,0,13,7,4,16,7,0,0,0,0,5,12,15,2,0,0,0,0,7,16,12,0,0,0,0,0,0,7,16,5,0,0,1,3,0,0,9,15,0,0,6,12,4,2,11,16,0,0,1,10,16,16,16,7,0,3 +0,0,0,8,11,0,0,0,0,0,1,15,4,0,0,0,0,0,12,9,0,0,3,0,0,2,16,2,0,9,14,0,0,11,13,0,7,15,3,0,0,13,16,12,16,8,0,0,0,6,12,12,14,1,0,0,0,0,0,7,12,0,0,0,4 +0,1,9,16,14,6,0,0,0,5,14,1,6,15,3,0,0,3,14,2,5,14,4,0,0,0,9,14,15,3,0,0,0,0,4,16,5,0,0,0,0,0,11,13,9,0,0,0,0,0,15,4,13,0,0,0,0,0,11,16,7,0,0,0,8 +0,0,4,14,15,3,0,0,0,0,12,10,5,13,1,0,0,3,13,0,0,16,4,0,0,4,11,0,0,13,6,0,0,5,10,0,0,12,4,0,0,2,13,0,0,9,6,0,0,0,14,3,4,12,0,0,0,0,3,14,14,6,0,0,0 +0,2,15,16,15,1,0,0,0,3,9,5,14,7,0,0,0,0,0,3,15,5,0,0,0,0,6,16,8,0,0,0,0,0,6,13,16,10,0,0,0,0,0,0,6,16,1,0,0,0,0,0,2,14,7,0,0,0,14,13,16,11,1,0,3 +0,0,5,15,16,6,0,0,0,0,15,12,5,16,2,0,0,4,16,3,0,12,8,0,0,8,14,0,0,10,10,0,0,8,13,0,0,10,13,0,0,4,16,0,0,4,15,0,0,1,15,9,3,12,10,0,0,0,5,15,16,15,1,0,0 +0,0,4,16,11,1,0,0,0,0,5,16,16,7,0,0,0,0,0,14,16,10,0,0,0,0,2,16,16,6,0,0,0,0,3,16,16,9,0,0,0,0,1,16,16,11,0,0,0,0,5,16,16,5,0,0,0,0,2,12,16,11,1,0,1 +0,0,0,0,7,11,1,0,0,0,0,0,12,16,3,0,0,0,0,4,16,16,0,0,0,0,0,2,16,16,0,0,0,0,1,13,16,12,0,0,0,0,7,16,15,14,0,0,0,2,15,5,12,11,0,0,0,0,2,0,7,11,0,0,1 +0,0,0,6,13,0,0,0,0,0,3,15,5,0,0,0,0,0,12,9,0,0,4,5,0,5,16,2,0,3,16,2,0,9,14,2,9,15,8,0,0,7,16,15,13,11,1,0,0,0,2,1,14,2,0,0,0,0,0,6,9,0,0,0,4 +0,0,0,6,8,0,0,0,0,0,2,14,3,0,0,0,0,0,9,8,0,0,0,0,0,4,15,1,0,10,5,0,0,7,13,0,2,16,3,0,0,11,14,8,15,9,0,0,0,12,16,14,15,0,0,0,0,1,4,9,8,0,0,0,4 +0,0,9,16,6,0,0,0,0,2,14,16,16,8,3,0,0,0,14,16,16,16,11,0,0,0,14,16,12,5,1,0,0,0,9,16,5,0,0,0,0,0,15,16,5,0,0,0,0,0,16,16,8,0,0,0,0,0,10,13,1,0,0,0,8 +0,0,9,16,12,1,0,0,0,3,16,10,15,9,0,0,0,3,16,4,11,16,1,0,0,0,12,16,16,15,7,0,0,0,0,3,2,6,14,0,0,0,6,0,0,3,16,1,0,1,16,7,4,9,16,0,0,0,8,16,16,16,8,0,9 +0,1,10,16,16,4,0,0,0,8,13,12,16,10,0,0,0,2,0,14,15,3,0,0,0,0,0,11,16,8,0,0,0,0,0,0,7,16,3,0,0,2,1,0,0,13,8,0,0,8,12,4,10,16,5,0,0,2,13,16,12,5,0,0,3 +0,0,6,16,11,0,0,0,0,1,16,2,12,9,0,0,0,4,15,5,13,10,0,0,0,0,9,10,9,15,0,0,0,0,0,0,0,11,5,0,0,0,0,0,0,7,7,0,0,3,9,1,0,8,4,0,0,1,7,14,16,12,1,0,9 +0,0,5,13,11,2,0,0,0,2,15,6,5,12,0,0,0,6,12,0,1,16,2,0,0,1,12,5,5,16,6,0,0,0,1,9,9,12,8,0,0,0,0,0,0,3,13,0,0,0,0,0,1,8,10,0,0,0,8,13,15,10,1,0,9 +0,0,0,1,12,1,0,0,0,0,0,14,10,0,0,0,0,0,10,14,2,0,0,0,0,2,16,7,0,5,6,0,0,12,15,0,9,15,1,0,0,12,16,14,16,8,0,0,0,7,12,10,15,1,0,0,0,0,0,4,9,0,0,0,4 +0,0,3,15,0,0,0,0,0,0,11,14,0,0,0,0,0,0,13,8,0,0,0,0,0,0,16,8,4,0,0,0,0,1,16,16,16,15,2,0,0,6,16,14,11,15,7,0,0,0,15,15,9,15,4,0,0,0,4,14,16,9,0,0,6 +0,0,6,16,2,0,0,0,0,0,15,10,0,0,0,0,0,6,16,3,0,0,0,0,0,9,14,0,0,0,0,0,0,12,13,11,12,12,3,0,0,7,16,15,12,13,13,0,0,2,15,12,2,8,15,0,0,0,5,16,16,16,5,0,6 +0,0,2,15,16,13,1,0,0,0,3,7,10,16,10,0,0,0,0,0,0,11,11,0,0,0,0,2,8,15,5,0,0,0,0,9,16,16,8,0,0,0,0,2,16,5,0,0,0,0,0,12,7,0,0,0,0,0,4,14,1,0,0,0,7 +0,0,5,13,9,1,0,0,0,0,13,15,10,15,5,0,0,3,15,2,0,11,8,0,0,4,12,0,0,8,8,0,0,5,8,0,0,9,8,0,0,4,11,0,1,12,7,0,0,2,14,5,10,12,0,0,0,0,6,13,10,0,0,0,0 +0,0,0,12,13,5,0,0,0,0,0,11,16,9,0,0,0,0,3,15,16,6,0,0,0,7,15,16,16,2,0,0,0,0,1,16,16,3,0,0,0,0,1,16,16,6,0,0,0,0,1,16,16,6,0,0,0,0,0,11,16,10,0,0,1 +0,0,0,4,15,12,0,0,0,0,3,16,15,14,0,0,0,0,8,13,8,16,0,0,0,0,1,6,15,11,0,0,0,1,8,13,15,1,0,0,0,9,16,16,5,0,0,0,0,3,13,16,16,11,5,0,0,0,0,3,11,16,9,0,2 +0,0,7,15,13,1,0,0,0,8,13,6,15,4,0,0,0,2,1,13,13,0,0,0,0,0,2,15,11,1,0,0,0,0,0,1,12,12,1,0,0,0,0,0,1,10,8,0,0,0,8,4,5,14,9,0,0,0,7,13,13,9,0,0,3 +0,0,0,1,11,0,0,0,0,0,0,7,8,0,0,0,0,0,1,13,6,2,2,0,0,0,7,15,0,9,8,0,0,5,16,10,0,16,6,0,0,4,15,16,13,16,1,0,0,0,0,3,15,10,0,0,0,0,0,2,16,4,0,0,4 +0,0,12,10,0,0,0,0,0,0,14,16,16,14,0,0,0,0,13,16,15,10,1,0,0,0,11,16,16,7,0,0,0,0,0,4,7,16,7,0,0,0,0,0,4,16,9,0,0,0,5,4,12,16,4,0,0,0,9,16,16,10,0,0,5 +0,0,0,12,13,0,0,0,0,0,5,16,8,0,0,0,0,0,13,16,3,0,0,0,0,0,14,13,0,0,0,0,0,0,15,12,7,2,0,0,0,0,13,16,13,16,3,0,0,0,7,16,11,15,8,0,0,0,1,9,15,11,3,0,6 +0,0,7,8,13,16,15,1,0,0,7,7,4,11,12,0,0,0,0,0,8,13,1,0,0,4,8,8,15,15,6,0,0,2,11,15,15,4,0,0,0,0,0,16,5,0,0,0,0,0,9,15,1,0,0,0,0,0,13,5,0,0,0,0,7 +0,0,9,14,8,1,0,0,0,0,12,14,14,12,0,0,0,0,9,10,0,15,4,0,0,0,3,16,12,14,2,0,0,0,4,16,16,2,0,0,0,3,16,8,10,13,2,0,0,1,15,1,3,16,8,0,0,0,11,16,15,11,1,0,8 +0,0,11,12,0,0,0,0,0,2,16,16,16,13,0,0,0,3,16,12,10,14,0,0,0,1,16,1,12,15,0,0,0,0,13,16,9,15,2,0,0,0,0,3,0,9,11,0,0,0,0,0,9,15,4,0,0,0,9,12,13,3,0,0,9 +0,0,1,9,15,11,0,0,0,0,11,16,8,14,6,0,0,2,16,10,0,9,9,0,0,1,16,4,0,8,8,0,0,4,16,4,0,8,8,0,0,1,16,5,1,11,3,0,0,0,12,12,10,10,0,0,0,0,1,10,13,3,0,0,0 +0,0,0,0,14,13,1,0,0,0,0,5,16,16,2,0,0,0,0,14,16,12,0,0,0,1,10,16,16,12,0,0,0,3,12,14,16,9,0,0,0,0,0,5,16,15,0,0,0,0,0,4,16,14,0,0,0,0,0,1,13,16,1,0,1 +0,0,5,12,1,0,0,0,0,0,15,14,7,0,0,0,0,0,13,1,12,0,0,0,0,2,10,0,14,0,0,0,0,0,2,0,16,1,0,0,0,0,0,6,15,0,0,0,0,0,9,16,15,9,8,2,0,0,3,11,8,13,12,4,2 +0,2,9,15,14,9,3,0,0,4,13,8,9,16,8,0,0,0,0,6,14,15,3,0,0,0,0,11,14,2,0,0,0,0,0,2,15,11,0,0,0,0,0,0,2,15,4,0,0,1,5,6,13,16,6,0,0,2,12,12,13,11,0,0,3 +0,0,0,8,15,1,0,0,0,0,1,14,13,1,1,0,0,0,10,15,3,15,11,0,0,7,16,7,1,16,8,0,0,9,16,13,14,16,5,0,0,1,10,15,16,14,0,0,0,0,0,1,16,10,0,0,0,0,0,10,15,4,0,0,4 +0,5,12,13,16,16,2,0,0,11,16,15,8,4,0,0,0,8,14,11,1,0,0,0,0,8,16,16,14,0,0,0,0,1,6,6,16,0,0,0,0,0,0,5,16,3,0,0,0,1,5,15,13,0,0,0,0,4,15,16,2,0,0,0,5 +0,0,0,8,15,1,0,0,0,0,0,12,14,0,0,0,0,0,3,16,7,0,0,0,0,0,6,16,2,0,0,0,0,0,7,16,16,13,5,0,0,0,15,16,9,9,14,0,0,0,3,14,9,2,16,2,0,0,0,7,15,16,11,0,6 +0,0,1,8,15,10,0,0,0,3,13,15,14,14,0,0,0,5,10,0,10,12,0,0,0,0,3,5,15,10,2,0,0,0,16,16,16,16,12,0,0,1,8,12,14,8,3,0,0,0,0,10,13,0,0,0,0,0,0,11,9,0,0,0,7 +0,0,10,7,13,9,0,0,0,0,9,10,12,15,2,0,0,0,4,11,10,11,0,0,0,0,1,16,10,1,0,0,0,0,12,13,4,0,0,0,0,0,12,1,12,0,0,0,0,1,10,2,14,0,0,0,0,0,11,14,5,0,0,0,8 +0,0,6,14,4,0,0,0,0,0,11,16,10,0,0,0,0,0,8,14,16,2,0,0,0,0,1,12,12,11,0,0,0,0,0,0,0,11,3,0,0,0,0,0,0,5,11,0,0,0,1,4,4,7,16,2,0,0,7,16,16,13,11,1,9 +0,0,3,13,11,7,0,0,0,0,11,16,16,16,2,0,0,4,16,9,1,14,2,0,0,4,16,0,0,16,2,0,0,0,16,1,0,12,8,0,0,0,15,9,0,13,6,0,0,0,9,14,9,14,1,0,0,0,2,12,13,4,0,0,0 +0,0,0,2,16,16,2,0,0,0,0,4,16,16,2,0,0,1,4,12,16,12,0,0,0,7,16,16,16,12,0,0,0,0,3,10,16,14,0,0,0,0,0,8,16,12,0,0,0,0,0,6,16,16,2,0,0,0,0,2,12,15,4,0,1 +0,0,8,16,5,0,0,0,0,1,13,11,16,0,0,0,0,0,10,0,13,3,0,0,0,0,3,1,16,1,0,0,0,0,0,9,12,0,0,0,0,0,3,15,5,0,0,0,0,0,14,15,8,8,3,0,0,0,7,12,12,12,13,1,2 +0,1,8,12,15,14,4,0,0,3,11,8,8,12,12,0,0,0,0,0,2,13,7,0,0,0,0,2,15,12,1,0,0,0,0,0,13,5,0,0,0,0,0,0,9,13,0,0,0,0,7,8,14,15,0,0,0,0,14,15,11,2,0,0,3 +0,0,0,0,12,2,0,0,0,0,0,6,14,1,0,0,0,0,4,16,7,8,0,0,0,0,13,9,0,16,6,0,0,6,16,10,11,16,0,0,0,0,5,10,13,16,0,0,0,0,0,0,6,16,0,0,0,0,0,0,12,8,0,0,4 +0,0,12,8,8,7,0,0,0,3,16,16,11,7,0,0,0,2,14,1,0,0,0,0,0,5,14,5,0,0,0,0,0,2,15,16,9,0,0,0,0,0,0,2,16,2,0,0,0,0,4,8,16,4,0,0,0,0,11,14,9,0,0,0,5 +0,0,1,13,14,3,0,0,0,0,8,16,13,2,0,0,0,2,16,16,3,0,0,0,0,3,16,12,1,0,0,0,0,5,16,14,5,0,0,0,0,3,16,16,16,16,6,0,0,1,14,16,16,16,12,0,0,0,3,12,15,14,7,0,6 +0,0,0,8,14,14,2,0,0,0,0,6,10,15,11,0,0,0,0,0,0,14,10,0,0,2,8,11,12,16,8,0,0,8,16,16,16,16,7,0,0,0,0,0,11,15,1,0,0,0,0,9,16,7,0,0,0,0,0,12,13,1,0,0,7 +0,0,10,11,4,0,0,0,0,0,10,15,13,13,1,0,0,0,8,11,0,14,4,0,0,0,0,13,15,13,0,0,0,1,11,16,16,0,0,0,0,1,15,3,9,10,0,0,0,0,14,6,15,10,0,0,0,0,8,14,7,1,0,0,8 +0,0,9,13,7,0,0,0,0,0,12,16,16,2,0,0,0,0,12,13,16,6,0,0,0,0,6,16,16,14,0,0,0,0,0,0,2,16,3,0,0,0,0,0,0,9,10,0,0,0,3,7,12,14,16,2,0,0,7,12,12,12,11,0,9 +0,0,10,14,11,3,0,0,0,4,16,13,6,14,1,0,0,4,16,2,0,11,7,0,0,8,16,0,0,10,5,0,0,8,16,0,0,14,4,0,0,8,16,0,1,16,1,0,0,4,16,1,11,15,0,0,0,0,11,16,12,3,0,0,0 +0,0,2,13,8,0,0,0,0,0,6,16,16,6,0,0,0,0,5,15,13,11,0,0,0,0,0,7,16,15,0,0,0,0,0,0,0,14,3,0,0,0,0,0,0,7,11,0,0,0,0,3,4,4,16,2,0,0,2,15,13,14,13,2,9 +0,2,13,16,16,16,11,0,0,5,16,10,5,4,1,0,0,6,16,7,3,0,0,0,0,9,16,16,16,6,0,0,0,3,8,4,11,15,0,0,0,0,0,1,12,15,0,0,0,0,4,13,16,6,0,0,0,2,16,15,8,0,0,0,5 +0,6,13,5,8,8,1,0,0,8,16,16,16,16,6,0,0,6,16,9,6,4,0,0,0,6,16,16,15,5,0,0,0,0,4,5,15,12,0,0,0,0,0,3,16,9,0,0,0,1,8,13,15,3,0,0,0,4,16,15,3,0,0,0,5 +0,0,0,5,14,2,0,0,0,0,1,13,11,0,0,0,0,0,5,16,2,0,0,0,0,0,6,15,5,0,0,0,0,1,15,16,15,11,1,0,0,2,13,14,1,12,9,0,0,0,4,16,7,13,9,0,0,0,0,5,16,15,3,0,6 +0,3,15,8,8,6,0,0,0,4,16,16,16,13,2,0,0,3,16,9,2,0,0,0,0,2,16,16,15,3,0,0,0,0,7,6,12,9,0,0,0,0,0,1,14,10,0,0,0,0,5,14,15,2,0,0,0,1,15,14,1,0,0,0,5 +0,0,6,14,10,2,0,0,0,0,15,15,13,15,3,0,0,2,16,10,0,13,9,0,0,1,16,5,0,12,5,0,0,0,16,3,0,13,6,0,0,1,15,5,6,13,1,0,0,0,16,11,14,10,0,0,0,0,7,16,11,1,0,0,0 +0,0,13,10,1,0,0,0,0,5,16,14,7,0,0,0,0,4,16,8,14,0,0,0,0,2,14,16,16,6,0,0,0,0,1,4,9,13,1,0,0,0,0,0,0,13,6,0,0,0,5,8,5,9,14,0,0,0,13,13,15,16,13,0,9 +0,0,7,7,13,16,4,0,0,0,13,13,6,12,7,0,0,0,10,4,10,11,1,0,0,0,8,16,10,0,0,0,0,3,14,16,0,0,0,0,0,8,8,11,5,0,0,0,0,4,10,9,8,0,0,0,0,1,11,16,6,0,0,0,8 +0,1,9,16,13,7,0,0,0,7,14,4,10,12,0,0,0,6,15,9,16,11,0,0,0,0,9,11,7,14,0,0,0,0,0,0,0,15,2,0,0,0,0,0,0,11,6,0,0,3,13,8,5,14,5,0,0,0,9,14,13,10,1,0,9 +0,0,11,10,12,4,0,0,0,0,12,13,9,16,1,0,0,0,7,13,11,16,0,0,0,0,1,16,14,4,0,0,0,0,10,16,13,0,0,0,0,0,14,7,12,7,0,0,0,4,14,4,12,13,0,0,0,1,11,14,12,4,0,0,8 +0,0,0,9,15,1,0,0,0,0,4,16,12,0,0,0,0,0,15,14,2,11,3,0,0,4,16,9,4,16,10,0,0,9,16,11,13,16,2,0,0,0,9,16,16,14,0,0,0,0,0,8,16,6,0,0,0,0,0,9,16,2,0,0,4 +0,0,0,0,12,5,0,0,0,0,0,2,16,12,0,0,0,0,1,12,16,11,0,0,0,2,12,16,16,10,0,0,0,6,11,5,15,6,0,0,0,0,0,1,16,9,0,0,0,0,0,2,16,11,0,0,0,0,0,3,16,8,0,0,1 +0,0,0,9,15,12,0,0,0,0,4,7,7,14,0,0,0,0,0,0,0,13,3,0,0,4,9,8,10,13,1,0,0,4,16,15,16,16,6,0,0,0,0,0,14,3,0,0,0,0,0,9,12,0,0,0,0,0,0,11,7,0,0,0,7 +0,0,9,16,16,16,5,0,0,1,14,10,8,16,8,0,0,0,0,0,7,16,3,0,0,3,8,11,15,16,11,0,0,8,16,16,15,11,3,0,0,0,2,16,7,0,0,0,0,0,8,16,1,0,0,0,0,0,13,10,0,0,0,0,7 +0,0,9,16,13,6,0,0,0,0,6,5,16,16,0,0,0,0,0,8,15,5,0,0,0,0,0,5,14,3,0,0,0,0,0,0,9,15,2,0,0,0,0,0,0,11,12,0,0,0,4,8,11,15,12,0,0,0,11,14,12,8,0,0,3 +0,1,15,4,0,0,0,0,0,2,16,16,16,14,2,0,0,6,16,11,8,8,3,0,0,5,16,11,5,0,0,0,0,0,11,14,14,1,0,0,0,0,0,5,16,7,0,0,0,0,6,16,16,4,0,0,0,0,14,14,4,0,0,0,5 +0,0,0,1,11,9,0,0,0,0,0,7,16,13,0,0,0,0,4,14,16,9,0,0,0,10,16,11,16,8,0,0,0,0,0,3,16,6,0,0,0,0,0,3,16,8,0,0,0,0,0,5,16,10,0,0,0,0,0,2,14,6,0,0,1 +0,0,2,15,13,3,0,0,0,0,10,15,11,15,0,0,0,3,16,6,0,10,0,0,0,4,16,8,0,3,8,0,0,8,14,3,0,4,8,0,0,3,15,1,0,3,7,0,0,0,14,11,6,14,5,0,0,0,4,12,15,6,0,0,0 +0,0,1,15,13,1,0,0,0,0,7,16,14,8,0,0,0,8,12,9,2,13,2,0,0,7,9,1,0,6,6,0,0,5,9,0,0,3,9,0,0,0,15,2,0,8,12,0,0,0,9,15,13,16,6,0,0,0,0,13,14,8,0,0,0 +0,0,0,5,14,12,2,0,0,0,7,15,8,14,4,0,0,0,6,2,3,13,1,0,0,0,0,1,13,4,0,0,0,0,1,11,9,0,0,0,0,8,16,13,0,0,0,0,0,5,14,16,11,2,0,0,0,0,0,6,12,13,3,0,2 +0,0,0,3,15,10,1,0,0,0,0,11,10,16,4,0,0,0,0,12,1,15,6,0,0,0,0,3,4,15,4,0,0,0,0,6,15,6,0,0,0,4,15,16,9,0,0,0,0,0,13,16,15,9,3,0,0,0,0,4,9,14,7,0,2 +0,0,3,12,16,16,6,0,0,0,10,11,7,16,11,0,0,0,0,0,2,14,10,0,0,5,11,8,9,16,3,0,0,9,16,16,16,16,9,0,0,1,4,9,16,6,0,0,0,0,0,11,14,0,0,0,0,0,4,16,5,0,0,0,7 +0,0,4,8,16,5,0,0,0,0,9,16,8,11,0,0,0,0,5,10,0,13,2,0,0,0,0,13,4,15,2,0,0,0,0,9,16,8,0,0,0,0,8,15,14,5,0,0,0,0,16,5,14,4,0,0,0,0,6,16,12,1,0,0,8 +0,0,0,1,14,14,3,0,0,0,0,10,11,13,8,0,0,0,0,7,0,13,8,0,0,0,0,0,7,15,1,0,0,4,8,12,15,4,0,0,0,6,16,16,6,0,0,0,0,0,2,12,12,4,2,0,0,0,0,1,13,16,5,0,2 +0,0,2,14,15,5,0,0,0,0,10,16,16,15,1,0,0,3,16,10,10,16,4,0,0,5,16,0,0,14,6,0,0,5,16,6,0,12,7,0,0,1,15,13,4,13,6,0,0,0,11,16,16,15,0,0,0,0,2,11,13,4,0,0,0 +0,0,0,0,12,13,1,0,0,0,0,8,16,15,2,0,0,0,10,16,16,12,0,0,0,4,16,16,16,13,0,0,0,4,7,4,16,6,0,0,0,0,0,1,16,8,0,0,0,0,0,1,16,8,0,0,0,0,0,0,12,12,0,0,1 +0,0,0,1,9,11,0,0,0,0,0,13,16,16,0,0,0,0,0,12,7,14,0,0,0,0,0,0,14,7,0,0,0,0,5,12,12,0,0,0,0,7,16,16,6,0,0,0,0,4,9,13,16,11,4,0,0,0,0,0,9,13,3,0,2 +0,0,0,10,13,1,0,0,0,1,11,12,7,0,0,0,0,2,16,12,0,0,0,0,0,4,16,11,0,0,0,0,0,4,16,15,8,4,0,0,0,4,16,16,13,16,6,0,0,0,7,16,7,13,14,0,0,0,0,7,15,15,5,0,6 +0,1,10,15,11,1,0,0,0,3,8,8,11,12,0,0,0,0,0,5,14,15,1,0,0,0,0,11,15,2,0,0,0,0,0,4,15,2,0,0,0,0,0,0,12,10,0,0,0,0,3,4,10,16,1,0,0,0,13,16,15,10,0,0,3 +0,0,10,15,14,4,0,0,0,0,4,6,13,16,2,0,0,0,0,3,16,9,0,0,0,0,0,1,16,6,0,0,0,0,0,0,10,12,0,0,0,0,0,0,1,16,4,0,0,1,9,5,6,16,7,0,0,0,14,12,15,11,2,0,3 +0,0,6,13,16,6,0,0,0,3,16,14,15,16,1,0,0,0,5,0,8,16,2,0,0,0,0,0,8,16,3,0,0,3,15,16,16,16,9,0,0,5,13,14,16,11,3,0,0,0,0,12,15,1,0,0,0,0,4,16,7,0,0,0,7 +0,0,14,16,14,6,0,0,0,0,7,10,16,16,3,0,0,0,0,5,16,16,1,0,0,0,0,2,16,8,0,0,0,0,0,0,12,13,1,0,0,0,0,0,4,16,7,0,0,0,5,9,14,16,7,0,0,0,13,16,16,10,1,0,3 +0,3,16,16,14,7,1,0,0,1,9,9,15,16,4,0,0,0,0,7,16,12,1,0,0,0,0,9,16,2,0,0,0,0,0,3,15,7,0,0,0,0,0,0,9,15,0,0,0,1,10,10,16,16,3,0,0,2,13,16,12,5,0,0,3 +0,0,0,6,16,4,0,0,0,0,1,13,15,1,0,0,0,1,11,16,5,0,0,0,0,8,16,10,0,10,6,0,0,12,16,8,9,16,12,0,0,2,15,16,16,16,7,0,0,0,0,4,16,11,0,0,0,0,0,7,16,3,0,0,4 +0,0,0,9,10,0,0,0,0,0,7,16,7,0,0,0,0,0,13,13,1,0,0,0,0,0,15,7,0,0,0,0,0,4,16,15,12,7,0,0,0,2,16,12,4,11,10,0,0,0,8,14,5,9,14,0,0,0,0,6,12,14,9,0,6 +0,0,0,10,11,0,0,0,0,0,9,16,6,0,0,0,0,0,15,13,0,0,0,0,0,0,14,10,0,0,0,0,0,1,15,12,8,2,0,0,0,0,12,16,16,16,10,1,0,0,7,16,12,12,16,4,0,0,0,9,15,12,5,0,6 +0,0,5,14,0,0,0,0,0,0,12,9,0,0,0,0,0,0,15,3,0,0,0,0,0,1,16,0,0,0,0,0,0,1,16,2,7,4,0,0,0,3,16,16,16,16,9,0,0,0,15,15,4,10,16,0,0,0,4,14,16,12,7,0,6 +0,0,0,9,9,0,0,0,0,0,3,16,9,0,0,0,0,3,14,10,0,2,0,0,0,10,16,5,7,15,1,0,0,2,11,15,16,13,1,0,0,0,0,7,16,3,0,0,0,0,0,6,15,0,0,0,0,0,0,4,16,5,0,0,4 +0,0,6,12,13,6,0,0,0,6,16,9,12,16,2,0,0,7,16,9,15,13,0,0,0,0,11,15,16,4,0,0,0,0,0,12,10,0,0,0,0,0,3,16,4,0,0,0,0,0,1,16,2,0,0,0,0,0,6,11,0,0,0,0,9 +0,0,0,0,14,7,0,0,0,0,0,13,16,9,0,0,0,0,10,16,16,7,0,0,0,7,16,8,16,2,0,0,0,1,5,6,16,6,0,0,0,0,0,4,16,6,0,0,0,0,0,2,16,6,0,0,0,0,0,0,12,11,0,0,1 +0,1,13,15,12,12,5,0,0,4,16,8,8,6,0,0,0,7,13,0,0,0,0,0,0,8,15,13,15,7,0,0,0,1,6,5,8,12,0,0,0,0,0,0,12,11,0,0,0,0,2,13,14,1,0,0,0,3,14,10,1,0,0,0,5 +0,0,1,13,10,0,0,0,0,7,16,16,16,7,0,0,0,8,16,13,10,15,0,0,0,8,16,2,2,15,3,0,0,5,15,2,0,12,7,0,0,1,15,6,2,16,3,0,0,0,11,15,13,16,0,0,0,0,1,15,14,8,0,0,0 +0,1,12,13,4,0,0,0,0,4,16,16,16,3,0,0,0,4,16,16,16,10,0,0,0,0,6,16,14,16,0,0,0,0,0,0,0,16,4,0,0,0,0,0,0,13,7,0,0,1,2,3,7,14,10,0,0,2,12,16,14,12,3,0,9 +0,0,13,13,8,2,0,0,0,5,16,16,16,12,0,0,0,1,15,12,0,0,0,0,0,0,12,13,7,1,0,0,0,0,8,16,16,12,0,0,0,0,0,4,9,16,3,0,0,0,1,5,14,15,1,0,0,0,10,16,16,6,0,0,5 +0,0,0,0,9,13,0,0,0,0,0,2,16,16,1,0,0,0,0,5,9,15,0,0,0,0,0,0,5,14,0,0,0,0,0,3,15,7,0,0,0,7,16,16,11,0,0,0,0,0,11,14,16,7,3,0,0,0,0,0,9,15,9,0,2 +0,3,5,14,13,6,0,0,0,9,16,12,10,12,0,0,0,6,16,3,12,11,0,0,0,1,13,10,16,6,0,0,0,0,10,16,10,0,0,0,0,1,15,16,10,0,0,0,0,0,16,12,16,0,0,0,0,0,3,15,16,5,0,0,8 +0,0,0,0,11,15,4,0,0,0,0,3,16,16,12,0,0,0,0,8,14,16,12,0,0,0,0,5,10,16,6,0,0,1,7,11,16,13,0,0,0,9,16,16,14,1,0,0,0,3,8,14,16,9,0,0,0,0,0,1,11,16,12,0,2 +0,0,10,12,10,0,0,0,0,3,16,16,16,4,0,0,0,7,15,3,8,13,0,0,0,8,12,0,0,14,1,0,0,8,12,0,0,7,8,0,0,5,13,0,0,4,8,0,0,0,14,8,0,10,8,0,0,0,7,12,13,12,4,0,0 +0,0,4,14,11,0,0,0,0,3,15,15,16,9,0,0,0,8,13,0,3,15,1,0,0,8,12,0,0,8,6,0,0,8,12,0,0,8,8,0,0,5,13,1,0,8,8,0,0,2,15,14,12,15,6,0,0,0,5,16,15,8,0,0,0 +0,0,0,1,14,13,1,0,0,0,0,1,16,16,3,0,0,5,11,15,16,16,0,0,0,4,15,16,16,15,0,0,0,0,0,8,16,7,0,0,0,0,0,10,16,3,0,0,0,0,0,8,16,6,0,0,0,0,0,2,13,15,2,0,1 +0,0,3,14,16,14,0,0,0,0,13,13,13,16,2,0,0,0,1,0,9,15,0,0,0,0,9,12,15,16,10,0,0,4,16,16,16,11,3,0,0,0,4,9,14,2,0,0,0,0,2,15,9,0,0,0,0,0,4,13,1,0,0,0,7 +0,0,0,10,15,3,0,0,0,0,7,16,11,0,0,0,0,0,13,15,1,0,0,0,0,0,15,11,0,0,0,0,0,0,16,13,8,1,0,0,0,0,15,16,16,15,6,0,0,0,10,16,14,16,14,2,0,0,1,9,15,16,11,0,6 +0,2,13,15,10,4,0,0,0,0,5,4,13,15,2,0,0,0,0,0,11,16,4,0,0,0,0,0,16,12,0,0,0,0,0,0,13,11,0,0,0,0,0,0,8,13,0,0,0,1,6,8,14,12,0,0,0,2,12,14,11,1,0,0,3 +0,1,13,15,2,0,0,0,0,6,15,15,9,0,0,0,0,9,8,10,13,0,0,0,0,5,3,12,12,0,0,0,0,0,3,16,6,0,0,0,0,5,15,15,1,0,0,0,0,6,16,15,12,12,11,0,0,1,11,13,16,16,12,0,2 +0,0,0,1,16,5,0,0,0,0,0,5,16,11,0,0,0,0,0,12,16,11,0,0,0,7,12,16,16,7,0,0,0,4,8,12,16,4,0,0,0,0,0,9,16,2,0,0,0,0,0,10,16,2,0,0,0,0,0,3,13,5,0,0,1 +0,0,2,7,15,13,1,0,0,0,14,12,9,14,8,0,0,0,2,0,0,12,8,0,0,0,0,0,0,13,6,0,0,5,16,16,16,16,5,0,0,2,5,7,13,14,2,0,0,0,0,1,15,5,0,0,0,0,0,11,9,0,0,0,7 +0,0,0,9,16,4,0,0,0,1,9,16,13,2,0,0,0,14,16,14,8,0,0,0,1,15,15,5,16,9,0,0,0,5,16,16,16,8,0,0,0,0,2,13,16,1,0,0,0,0,0,11,13,0,0,0,0,0,0,11,13,0,0,0,4 +0,0,0,10,11,0,0,0,0,0,3,16,10,0,0,0,0,0,8,16,0,0,0,0,0,0,12,14,0,0,0,0,0,0,14,16,15,6,0,0,0,0,12,16,12,15,6,0,0,0,7,16,10,13,14,0,0,0,0,9,13,11,6,0,6 +0,0,13,16,15,4,0,0,0,0,9,8,13,16,3,0,0,0,0,0,13,16,7,0,0,0,0,1,16,12,0,0,0,0,0,0,15,10,0,0,0,0,0,0,8,15,0,0,0,0,3,6,15,16,7,0,0,0,15,16,16,11,1,0,3 +0,0,0,1,12,8,1,0,0,0,0,4,16,16,1,0,0,0,1,13,16,11,0,0,0,1,11,16,16,12,0,0,0,2,12,8,16,10,0,0,0,0,0,0,15,8,0,0,0,0,0,4,16,4,0,0,0,0,0,3,13,4,0,0,1 +0,4,14,16,16,12,1,0,0,2,12,7,14,16,6,0,0,0,0,5,16,10,0,0,0,0,0,4,16,7,0,0,0,0,0,4,16,6,0,0,0,0,0,1,15,11,0,0,0,1,8,10,16,10,0,0,0,5,16,16,15,1,0,0,3 +0,0,9,13,14,5,0,0,0,4,16,10,13,16,0,0,0,0,13,15,14,16,1,0,0,0,0,3,7,16,3,0,0,0,0,0,4,16,0,0,0,0,0,0,1,16,3,0,0,1,15,5,8,16,2,0,0,0,7,15,16,9,0,0,9 +0,0,0,11,16,5,0,0,0,0,0,10,16,5,0,0,0,0,4,16,16,5,0,0,0,11,16,16,16,3,0,0,0,5,8,14,16,2,0,0,0,0,0,14,16,2,0,0,0,0,0,11,16,2,0,0,0,0,0,8,16,8,0,0,1 +0,0,3,12,16,10,0,0,0,2,14,12,12,12,0,0,0,5,10,0,10,11,0,0,0,0,0,1,14,9,2,0,0,0,8,16,16,16,10,0,0,0,6,16,13,7,0,0,0,0,0,16,5,0,0,0,0,0,5,13,0,0,0,0,7 +0,0,0,11,16,8,0,0,0,0,6,16,13,3,0,0,0,0,8,16,8,0,0,0,0,0,13,16,2,0,0,0,0,0,15,16,5,0,0,0,0,2,16,16,16,5,0,0,0,1,10,16,16,14,0,0,0,0,0,12,16,15,0,0,6 +0,1,9,16,15,10,0,0,0,6,16,8,7,16,3,0,0,0,11,14,16,11,1,0,0,1,13,16,6,0,0,0,0,8,15,16,3,0,0,0,0,5,14,10,11,0,0,0,0,0,15,7,16,3,0,0,0,0,11,16,8,0,0,0,8 +0,0,0,3,14,1,0,0,0,0,0,13,12,1,0,0,0,0,7,16,5,3,0,0,0,3,15,11,5,16,2,0,0,5,16,11,11,16,6,0,0,0,6,12,16,13,3,0,0,0,0,1,15,7,0,0,0,0,0,2,16,7,0,0,4 +0,2,15,16,16,13,2,0,0,1,10,8,14,16,8,0,0,0,0,0,16,15,1,0,0,0,0,0,16,8,0,0,0,0,0,0,14,14,0,0,0,0,0,0,11,16,1,0,0,2,14,13,16,16,3,0,0,2,15,16,14,5,0,0,3 +0,0,1,15,13,0,0,0,0,0,1,16,16,5,0,0,0,0,7,16,16,0,0,0,0,0,13,16,13,0,0,0,0,7,16,16,13,0,0,0,0,1,11,16,13,0,0,0,0,0,2,16,16,0,0,0,0,0,1,14,16,3,0,0,1 +0,0,0,2,13,0,0,0,0,0,0,8,15,0,0,0,0,0,5,16,5,2,0,0,0,0,15,12,1,16,4,0,0,4,16,2,9,16,8,0,0,0,10,14,16,16,4,0,0,0,0,0,13,8,0,0,0,0,0,0,13,6,0,0,4 +0,0,1,12,5,0,0,0,0,0,9,16,14,3,0,0,0,2,16,14,11,13,0,0,0,2,16,10,0,14,4,0,0,4,16,0,0,12,4,0,0,4,16,3,0,11,10,0,0,0,13,12,8,14,6,0,0,0,3,10,16,12,1,0,0 +0,0,12,16,16,8,0,0,0,3,16,13,8,5,0,0,0,2,16,3,0,0,0,0,0,0,16,13,9,0,0,0,0,0,10,16,16,7,0,0,0,0,0,1,10,13,0,0,0,0,2,11,16,10,0,0,0,0,11,16,12,0,0,0,5 +0,4,13,16,16,12,3,0,0,3,7,4,13,16,6,0,0,0,0,8,15,5,0,0,0,0,0,12,8,0,0,0,0,0,0,7,12,0,0,0,0,0,0,4,12,0,0,0,0,1,7,12,11,0,0,0,0,3,15,12,2,0,0,0,3 +0,0,0,8,14,4,0,0,0,0,7,16,7,0,0,0,0,0,14,10,0,0,0,0,0,1,16,6,0,0,0,0,0,3,16,16,10,0,0,0,0,2,16,12,14,6,0,0,0,0,12,15,11,10,0,0,0,0,0,10,13,8,0,0,6 +0,0,9,15,5,0,0,0,0,0,13,14,13,7,0,0,0,0,6,14,10,13,1,0,0,0,0,9,12,15,5,0,0,0,0,0,0,10,10,0,0,0,0,0,0,7,14,0,0,0,3,8,9,15,15,0,0,0,5,12,12,9,1,0,9 +0,0,0,5,11,1,0,0,0,0,0,14,14,2,0,0,0,0,5,16,5,0,0,0,0,0,8,15,2,0,0,0,0,0,10,13,0,0,0,0,0,0,14,16,16,8,0,0,0,0,6,16,9,15,6,0,0,0,0,6,14,16,8,0,6 +0,0,0,0,6,10,0,0,0,0,0,0,14,15,0,0,0,0,0,10,16,16,0,0,0,0,7,16,16,15,0,0,0,3,15,7,15,10,0,0,0,0,0,0,13,11,0,0,0,0,0,0,15,9,0,0,0,0,0,0,8,15,0,0,1 +0,0,2,11,16,4,0,0,0,0,12,9,11,15,1,0,0,0,2,0,4,16,0,0,0,0,0,2,8,15,1,0,0,4,16,16,16,15,7,0,0,3,6,4,16,3,0,0,0,0,0,6,11,0,0,0,0,0,0,12,7,0,0,0,7 +0,0,15,16,16,12,2,0,0,2,16,15,12,12,3,0,0,4,16,8,0,0,0,0,0,8,16,12,4,0,0,0,0,0,12,16,16,0,0,0,0,0,0,3,16,8,0,0,0,2,6,9,16,8,0,0,0,1,15,16,15,3,0,0,5 +0,0,0,10,15,0,0,0,0,0,11,15,3,0,0,0,0,7,15,4,0,0,0,0,0,12,11,1,3,8,2,0,0,4,12,15,15,16,9,0,0,0,0,8,16,8,2,0,0,0,0,10,12,0,0,0,0,0,0,12,9,0,0,0,4 +0,0,1,10,9,0,0,0,0,0,9,15,4,0,0,0,0,1,16,5,0,0,0,0,0,4,16,1,4,14,4,0,0,4,16,12,14,16,5,0,0,0,1,7,16,9,0,0,0,0,0,2,16,4,0,0,0,0,0,10,13,0,0,0,4 +0,0,3,11,16,13,0,0,0,3,15,15,13,16,0,0,0,6,8,2,9,14,0,0,0,0,4,7,15,14,5,0,0,4,16,16,16,14,6,0,0,1,8,13,12,0,0,0,0,0,1,16,4,0,0,0,0,0,3,15,1,0,0,0,7 +0,0,2,16,13,1,0,0,0,0,6,15,16,7,0,0,0,0,3,6,16,8,0,0,0,0,0,3,16,6,0,0,0,0,1,13,13,0,0,0,0,6,14,16,4,0,0,0,0,9,16,16,13,10,3,0,0,0,4,11,15,16,10,0,2 +0,0,4,9,13,5,0,0,0,1,16,16,12,11,0,0,0,0,11,8,5,16,0,0,0,0,7,10,13,10,0,0,0,0,4,16,13,0,0,0,0,0,13,15,12,0,0,0,0,0,11,9,14,5,0,0,0,0,0,10,16,8,0,0,8 +0,0,0,8,15,8,0,0,0,0,3,16,12,16,4,0,0,0,2,10,1,16,4,0,0,0,0,0,8,14,0,0,0,0,0,9,15,3,0,0,0,3,16,14,4,0,0,0,0,4,15,14,7,1,0,0,0,0,0,9,12,14,4,0,2 +0,0,1,10,16,8,0,0,0,0,11,13,10,16,0,0,0,0,12,1,4,16,1,0,0,0,1,0,13,7,0,0,0,0,0,9,12,0,0,0,0,2,13,15,1,0,0,0,0,4,15,14,7,4,0,0,0,0,1,11,14,15,5,0,2 +0,2,15,16,16,14,2,0,0,3,16,14,9,10,1,0,0,7,16,6,0,0,0,0,0,3,14,15,6,0,0,0,0,0,1,13,12,0,0,0,0,0,0,8,16,0,0,0,0,0,9,16,11,0,0,0,0,3,16,13,1,0,0,0,5 +0,0,9,12,15,15,2,0,0,1,15,14,11,16,7,0,0,0,2,0,2,16,4,0,0,0,2,4,10,15,2,0,0,0,13,16,16,16,9,0,0,0,13,16,10,1,0,0,0,0,6,16,1,0,0,0,0,0,11,9,0,0,0,0,7 +0,0,0,12,14,1,0,0,0,0,9,16,10,5,0,0,0,0,8,13,5,14,0,0,0,0,2,14,16,16,4,0,0,0,0,0,4,10,10,0,0,0,0,0,0,4,16,0,0,0,2,6,4,9,16,0,0,0,1,11,16,15,7,0,9 +0,0,6,13,2,0,0,0,0,4,16,16,16,11,0,0,0,0,12,11,1,6,1,0,0,0,12,14,10,2,0,0,0,0,1,8,12,12,0,0,0,0,0,0,9,14,0,0,0,0,4,9,16,5,0,0,0,0,9,14,4,0,0,0,5 +0,0,0,12,13,0,0,0,0,0,5,16,10,0,0,0,0,6,16,13,11,12,2,0,0,12,16,10,15,16,9,0,0,4,14,16,16,12,4,0,0,0,0,14,13,0,0,0,0,0,0,14,12,0,0,0,0,0,0,11,13,0,0,0,4 +0,0,3,6,14,5,0,0,0,0,7,15,15,13,0,0,0,0,3,14,13,12,0,0,0,0,0,16,13,1,0,0,0,0,2,16,8,0,0,0,0,0,10,9,14,0,0,0,0,0,12,4,15,2,0,0,0,0,4,15,14,0,0,0,8 +0,0,5,14,15,4,0,0,0,0,8,16,16,14,0,0,0,0,5,16,16,9,0,0,0,0,0,15,16,1,0,0,0,0,1,16,13,0,0,0,0,0,11,15,14,5,0,0,0,0,12,12,8,15,1,0,0,0,5,16,16,16,2,0,8 +0,0,0,8,14,0,0,0,0,0,5,16,11,0,0,0,0,1,15,14,1,6,0,0,0,7,16,5,3,16,8,0,0,8,16,8,14,16,2,0,0,0,6,14,16,11,0,0,0,0,0,6,16,4,0,0,0,0,0,10,15,0,0,0,4 +0,0,12,16,7,0,0,0,0,2,16,5,12,3,0,0,0,0,14,6,3,16,2,0,0,0,2,14,16,12,0,0,0,0,0,0,10,10,0,0,0,0,0,0,10,8,0,0,0,0,8,2,13,7,0,0,0,0,11,16,16,3,0,0,9 +0,0,3,12,10,1,0,0,0,1,16,16,16,10,0,0,0,5,16,13,6,16,1,0,0,5,16,7,0,13,3,0,0,5,16,4,0,13,7,0,0,1,16,8,0,14,7,0,0,0,13,14,13,16,3,0,0,0,2,13,15,6,0,0,0 +0,0,5,4,9,10,0,0,0,0,10,8,11,16,2,0,0,0,8,12,14,14,1,0,0,0,5,15,7,0,0,0,0,0,14,12,0,0,0,0,0,1,14,13,3,0,0,0,0,0,12,13,5,0,0,0,0,0,7,16,5,0,0,0,8 +0,0,1,11,15,8,0,0,0,0,12,14,10,16,5,0,0,0,16,7,13,16,4,0,0,0,9,15,13,16,4,0,0,0,0,0,1,15,4,0,0,0,0,0,0,16,4,0,0,8,16,14,9,16,4,0,0,0,2,10,15,15,2,0,9 +0,0,7,13,15,5,0,0,0,0,8,16,16,12,0,0,0,0,7,16,15,3,0,0,0,0,6,16,5,0,0,0,0,0,5,16,2,0,0,0,0,0,8,16,6,0,0,0,0,0,12,12,13,0,0,0,0,0,5,13,10,0,0,0,8 +0,0,2,12,9,0,0,0,0,0,11,15,12,5,0,0,0,0,15,5,0,14,0,0,0,2,15,1,0,9,7,0,0,4,10,0,0,7,8,0,0,0,12,0,0,8,10,0,0,2,15,5,10,16,1,0,0,0,5,14,12,4,0,0,0 +0,0,0,0,5,15,8,0,0,0,0,2,15,16,9,0,0,0,3,15,16,16,10,0,0,7,16,10,8,16,7,0,0,0,1,0,8,16,4,0,0,0,0,0,11,16,1,0,0,0,0,0,9,16,1,0,0,0,0,0,8,14,0,0,1 +0,2,15,16,6,0,0,0,0,5,16,15,14,0,0,0,0,5,13,10,14,0,0,0,0,0,0,12,12,0,0,0,0,0,1,16,7,0,0,0,0,0,10,15,2,0,0,0,0,3,16,10,8,6,1,0,0,2,15,16,16,16,7,0,2 +0,3,16,16,12,12,6,0,0,0,4,4,5,14,8,0,0,0,0,0,11,11,0,0,0,0,0,4,16,3,0,0,0,0,0,0,12,11,0,0,0,0,0,0,9,14,0,0,0,0,3,7,15,4,0,0,0,3,16,14,4,0,0,0,3 +0,0,0,3,16,3,0,0,0,0,0,12,16,2,0,0,0,0,8,16,16,4,0,0,0,7,16,15,16,12,11,0,0,8,16,16,16,13,3,0,0,0,0,7,14,1,0,0,0,0,0,6,16,0,0,0,0,0,0,4,14,0,0,0,4 +0,4,16,16,16,10,3,0,0,12,16,9,8,12,3,0,0,10,16,2,0,0,0,0,0,3,16,12,0,0,0,0,0,0,7,16,9,0,0,0,0,0,0,12,16,1,0,0,0,3,10,15,15,1,0,0,0,4,16,13,3,0,0,0,5 +0,0,2,15,4,0,0,0,0,0,11,13,0,0,0,0,0,0,16,6,0,0,0,0,0,3,16,7,0,0,0,0,0,4,16,16,15,5,0,0,0,4,16,5,3,13,7,0,0,1,14,9,0,8,13,0,0,0,2,13,16,16,8,0,6 +0,0,2,15,15,3,0,0,0,0,8,14,16,11,0,0,0,0,0,0,11,14,0,0,0,0,0,0,11,14,3,0,0,0,4,12,16,16,7,0,0,0,11,16,12,1,0,0,0,0,1,14,6,0,0,0,0,0,4,12,1,0,0,0,7 +0,0,5,12,13,2,0,0,0,3,16,14,16,13,1,0,0,4,16,9,16,12,1,0,0,1,9,16,15,1,0,0,0,1,13,16,16,5,0,0,0,3,16,5,12,16,0,0,0,3,15,7,14,12,0,0,0,0,6,16,13,3,0,0,8 +0,0,5,15,5,0,0,0,0,0,12,11,13,8,0,0,0,0,11,9,10,16,0,0,0,0,3,15,16,16,1,0,0,0,0,0,0,14,6,0,0,0,0,0,0,13,8,0,0,0,6,3,0,14,6,0,0,0,2,13,16,15,3,0,9 +0,0,12,9,12,1,0,0,0,0,14,16,16,8,0,0,0,3,16,9,3,15,2,0,0,4,16,1,0,16,5,0,0,5,12,0,0,16,5,0,0,3,14,1,4,16,4,0,0,0,15,12,14,14,0,0,0,0,7,12,12,2,0,0,0 +0,0,0,0,7,13,5,0,0,0,0,0,15,16,8,0,0,0,1,13,16,16,3,0,0,0,12,16,16,16,0,0,0,8,16,3,16,13,0,0,0,2,3,0,16,12,0,0,0,0,0,0,16,13,0,0,0,0,0,0,8,12,0,0,1 +0,0,13,14,8,1,0,0,0,1,16,16,16,6,0,0,0,0,3,5,16,8,0,0,0,0,0,9,16,2,0,0,0,0,1,16,9,0,0,0,0,1,16,13,0,0,0,0,0,5,16,11,4,4,0,0,0,1,13,14,12,12,0,0,2 +0,6,16,16,15,7,0,0,0,2,11,12,16,16,5,0,0,0,0,0,14,15,2,0,0,0,0,1,16,11,0,0,0,0,0,0,14,14,0,0,0,0,0,0,5,16,9,0,0,0,3,7,12,16,7,0,0,3,16,16,15,11,1,0,3 +0,0,0,5,14,0,0,0,0,0,1,15,13,0,0,0,0,0,9,16,13,0,0,0,0,5,16,15,13,0,0,0,0,8,16,16,16,15,9,0,0,0,4,10,14,8,5,0,0,0,0,8,12,0,0,0,0,0,0,7,10,0,0,0,4 +0,4,15,16,16,16,4,0,0,4,16,15,9,7,1,0,0,0,15,14,1,0,0,0,0,0,5,16,9,0,0,0,0,0,0,14,16,0,0,0,0,0,0,9,16,5,0,0,0,3,10,13,16,4,0,0,0,5,16,16,12,0,0,0,5 +0,0,0,6,14,1,0,0,0,0,2,16,10,0,0,0,0,0,11,15,2,0,0,0,0,0,13,13,8,5,0,0,0,2,16,14,12,16,3,0,0,1,16,11,0,5,12,0,0,0,11,15,5,12,12,0,0,0,0,9,15,14,6,0,6 +0,0,3,15,14,1,0,0,0,0,13,16,16,6,0,0,0,0,4,4,16,8,0,0,0,0,0,1,16,10,1,0,0,0,7,16,16,16,8,0,0,0,12,15,16,6,0,0,0,0,0,14,11,0,0,0,0,0,4,12,1,0,0,0,7 +0,0,3,13,13,3,0,0,0,2,14,12,12,11,0,0,0,4,16,8,5,15,3,0,0,1,13,14,16,7,0,0,0,0,11,16,13,1,0,0,0,1,16,12,13,14,1,0,0,0,13,13,9,16,7,0,0,0,3,13,16,10,1,0,8 +0,0,6,13,10,4,0,0,0,4,16,15,13,13,0,0,0,4,16,14,16,16,1,0,0,2,10,16,16,16,5,0,0,0,0,0,3,16,5,0,0,0,0,0,0,16,8,0,0,0,0,2,8,16,8,0,0,0,7,16,14,9,1,0,9 +0,0,2,12,4,0,0,0,0,1,12,16,16,3,0,0,0,7,16,6,4,13,0,0,0,8,16,6,0,13,5,0,0,1,16,5,0,7,9,0,0,0,16,8,0,8,12,0,0,0,13,14,14,16,10,0,0,0,4,14,15,7,0,0,0 +0,0,0,0,5,14,3,0,0,0,0,0,9,16,8,0,0,0,0,9,16,16,5,0,0,1,13,15,12,16,1,0,0,4,12,3,10,15,0,0,0,0,0,0,11,12,0,0,0,0,0,0,8,12,0,0,0,0,0,0,5,13,4,0,1 +0,0,6,13,10,3,0,0,0,5,15,11,16,11,0,0,0,2,6,0,16,12,0,0,0,0,0,0,16,12,0,0,0,0,0,7,16,6,0,0,0,0,5,15,10,0,0,0,0,0,11,16,8,4,0,0,0,0,8,14,13,12,4,0,2 +0,1,11,12,14,6,0,0,0,1,6,4,8,16,7,0,0,0,0,0,3,16,5,0,0,0,0,2,14,9,0,0,0,0,0,4,16,7,0,0,0,0,0,1,14,13,0,0,0,0,0,1,12,14,0,0,0,5,12,15,9,1,0,0,3 +0,0,0,3,16,3,0,0,0,0,0,10,16,11,0,0,0,0,4,16,16,8,0,0,0,2,14,12,16,5,0,0,0,10,16,14,16,16,11,0,0,5,12,13,16,8,3,0,0,0,0,2,15,3,0,0,0,0,0,4,12,0,0,0,4 +0,2,14,16,16,13,5,0,0,7,16,13,8,8,1,0,0,10,15,0,0,0,0,0,0,10,16,0,0,0,0,0,0,7,16,6,0,0,0,0,0,1,12,16,8,0,0,0,0,1,8,16,10,0,0,0,0,3,16,15,1,0,0,0,5 +0,0,0,16,7,0,0,0,0,0,6,16,4,0,0,0,0,0,11,15,0,0,0,0,0,0,12,12,0,0,0,0,0,0,15,16,16,8,0,0,0,0,12,16,13,15,8,0,0,0,12,16,7,13,15,0,0,0,1,11,16,15,9,0,6 +0,0,10,16,14,1,0,0,0,0,14,16,16,7,0,0,0,0,1,5,16,7,0,0,0,0,2,10,16,11,6,0,0,7,16,16,15,12,7,0,0,11,10,15,10,0,0,0,0,0,6,16,2,0,0,0,0,0,13,9,0,0,0,0,7 +0,0,2,13,13,1,0,0,0,0,8,16,14,4,0,0,0,0,5,16,10,8,4,0,0,0,1,16,16,10,2,0,0,2,15,13,12,0,0,0,0,5,12,3,15,1,0,0,0,0,14,3,13,4,0,0,0,0,3,15,13,1,0,0,8 +0,0,5,13,4,0,0,0,0,0,15,14,14,5,0,0,0,0,12,7,8,16,1,0,0,0,4,13,16,16,1,0,0,0,0,0,0,15,2,0,0,0,0,0,0,14,6,0,0,0,2,7,8,16,4,0,0,0,6,12,13,10,0,0,9 +0,0,5,14,10,7,0,0,0,0,16,16,16,16,3,0,0,3,16,10,2,16,7,0,0,7,16,3,0,12,8,0,0,8,16,1,0,12,8,0,0,7,16,5,2,16,4,0,0,2,16,15,14,13,0,0,0,0,7,15,13,2,0,0,0 +0,0,2,13,6,0,0,0,0,0,4,16,15,5,0,0,0,0,1,15,12,15,0,0,0,0,0,10,16,16,1,0,0,0,0,0,2,16,2,0,0,0,0,0,0,15,5,0,0,0,4,4,6,16,3,0,0,0,2,14,16,10,0,0,9 +0,5,16,16,16,11,1,0,0,4,16,15,10,8,1,0,0,4,16,12,0,0,0,0,0,1,13,16,5,0,0,0,0,0,6,16,13,0,0,0,0,0,0,10,16,4,0,0,0,0,4,13,16,2,0,0,0,2,15,16,9,0,0,0,5 +0,6,16,16,16,15,10,0,0,9,16,13,8,6,5,0,0,12,16,1,0,0,0,0,0,10,16,7,0,0,0,0,0,3,15,15,3,0,0,0,0,0,3,16,13,0,0,0,0,1,5,16,9,0,0,0,0,9,16,11,0,0,0,0,5 +0,0,5,13,1,0,0,0,0,0,12,13,0,0,0,0,0,0,16,8,0,0,0,0,0,5,16,2,0,0,0,0,0,4,16,8,15,9,1,0,0,4,16,16,12,15,11,0,0,1,15,14,4,14,11,0,0,0,5,14,14,10,1,0,6 +0,2,15,16,16,13,2,0,0,7,16,13,8,8,3,0,0,4,16,4,0,0,0,0,0,0,16,11,0,0,0,0,0,0,12,16,3,0,0,0,0,0,2,13,11,0,0,0,0,3,8,15,8,0,0,0,0,2,15,15,1,0,0,0,5 +0,0,5,16,15,1,0,0,0,1,15,9,10,12,0,0,0,3,16,1,0,16,4,0,0,6,16,0,0,11,6,0,0,3,16,1,0,11,8,0,0,4,16,4,3,15,4,0,0,1,13,13,13,14,1,0,0,0,4,13,14,1,0,0,0 +0,0,2,13,8,6,0,0,0,0,7,14,14,16,0,0,0,0,7,9,3,16,4,0,0,0,5,14,15,16,4,0,0,0,0,3,2,13,5,0,0,0,3,0,0,12,6,0,0,1,12,6,0,11,7,0,0,0,3,12,16,16,1,0,9 +0,0,6,15,13,3,0,0,0,0,13,14,15,12,0,0,0,0,14,11,13,15,5,0,0,0,9,16,15,8,0,0,0,2,16,16,16,2,0,0,0,8,16,2,14,10,0,0,0,5,16,9,14,11,0,0,0,0,8,14,13,2,0,0,8 +0,0,6,15,6,0,0,0,0,0,14,11,13,4,0,0,0,0,11,7,7,13,0,0,0,0,5,16,16,16,1,0,0,0,0,0,0,14,7,0,0,0,0,0,0,10,13,0,0,0,2,11,0,11,12,0,0,0,5,16,14,9,4,0,9 +0,0,2,13,4,0,0,0,0,0,8,14,11,0,0,0,0,0,10,6,14,5,2,0,0,0,2,14,12,14,0,0,0,0,1,15,13,2,0,0,0,0,11,13,14,1,0,0,0,0,13,8,10,4,0,0,0,0,2,11,16,7,0,0,8 +0,0,0,8,13,0,0,0,0,0,1,16,16,0,0,0,0,0,7,16,16,0,0,0,0,1,15,16,16,0,0,0,0,6,16,15,16,9,2,0,0,6,15,16,16,16,11,0,0,0,0,11,16,0,0,0,0,0,0,10,14,0,0,0,4 +0,0,0,0,14,16,7,0,0,0,0,5,16,16,8,0,0,0,1,12,16,16,8,0,0,5,14,16,16,16,5,0,0,1,4,7,16,16,8,0,0,0,0,2,16,16,7,0,0,0,0,2,16,16,3,0,0,0,0,0,16,16,0,0,1 +0,0,7,16,16,16,13,0,0,0,8,9,8,15,15,0,0,0,0,0,4,16,7,0,0,0,4,8,12,16,5,0,0,3,16,16,16,14,7,0,0,0,3,8,16,3,0,0,0,0,0,15,13,0,0,0,0,0,7,16,3,0,0,0,7 +0,0,1,8,10,15,11,0,0,2,14,15,13,16,7,0,0,0,6,0,6,14,2,0,0,0,0,3,11,12,2,0,0,0,2,16,16,15,8,0,0,0,3,13,15,2,0,0,0,0,0,13,10,0,0,0,0,0,1,16,5,0,0,0,7 +0,1,10,16,16,11,0,0,0,5,10,8,12,16,4,0,0,0,0,1,10,14,0,0,0,0,0,6,16,4,0,0,0,0,0,7,16,5,0,0,0,0,0,1,12,16,4,0,0,0,2,4,9,16,4,0,0,1,15,14,11,4,0,0,3 +0,7,16,16,16,11,2,0,0,5,16,12,8,6,1,0,0,9,16,1,0,0,0,0,0,2,16,15,3,0,0,0,0,0,5,16,14,1,0,0,0,0,0,2,16,10,0,0,0,1,7,13,16,3,0,0,0,4,15,16,6,0,0,0,5 +0,0,0,0,12,11,1,0,0,0,0,1,16,16,7,0,0,0,1,14,16,16,7,0,0,1,14,16,14,16,8,0,0,5,12,3,8,16,7,0,0,0,0,0,8,16,4,0,0,0,0,0,8,16,1,0,0,0,0,0,11,12,0,0,1 +0,0,6,15,15,3,0,0,0,3,16,14,14,13,0,0,0,6,15,2,1,14,5,0,0,8,14,2,0,9,8,0,0,8,16,4,0,8,8,0,0,5,16,6,0,11,9,0,0,1,16,16,14,16,9,0,0,0,5,14,15,10,1,0,0 +0,0,0,10,15,1,0,0,0,0,5,16,16,10,0,0,0,0,14,6,6,15,0,0,0,4,16,4,0,15,3,0,0,5,15,5,0,11,5,0,0,0,12,11,0,13,5,0,0,0,8,16,16,16,3,0,0,0,1,8,13,8,0,0,0 +0,0,7,15,15,4,0,0,0,8,16,16,16,4,0,0,0,8,15,8,16,4,0,0,0,0,0,10,15,0,0,0,0,0,1,15,9,0,0,0,0,0,6,16,2,0,0,0,0,0,8,16,8,11,9,0,0,0,9,16,16,12,3,0,2 +0,1,12,14,10,0,0,0,0,5,16,16,16,4,0,0,0,0,0,4,16,4,0,0,0,0,0,7,15,1,0,0,0,0,2,15,9,0,0,0,0,0,9,15,2,0,0,0,0,0,16,14,8,8,3,0,0,0,10,15,13,9,4,0,2 +0,0,5,16,12,2,0,0,0,0,4,11,16,10,0,0,0,0,0,0,14,11,0,0,0,0,2,4,14,14,2,0,0,0,13,16,16,10,4,0,0,0,3,10,14,0,0,0,0,0,0,15,5,0,0,0,0,0,6,11,0,0,0,0,7 +0,0,5,15,13,3,0,0,0,0,12,15,12,14,0,0,0,0,12,12,14,14,0,0,0,0,3,16,14,3,0,0,0,0,9,14,15,3,0,0,0,1,15,5,8,12,1,0,0,0,16,4,4,16,4,0,0,0,5,16,16,12,2,0,8 +0,0,11,12,0,0,0,0,0,7,16,16,5,0,0,0,0,5,13,16,8,0,0,0,0,0,1,16,8,0,0,0,0,0,7,16,5,0,0,0,0,0,9,15,1,0,0,0,0,0,16,15,9,7,2,0,0,0,12,14,13,12,5,0,2 +0,0,2,15,13,0,0,0,0,0,13,16,16,9,0,0,0,5,16,9,6,16,4,0,0,5,16,3,1,14,7,0,0,6,16,4,0,16,8,0,0,3,16,12,6,16,12,0,0,0,14,16,16,16,4,0,0,0,2,13,15,8,0,0,0 +0,0,0,1,11,14,5,0,0,0,0,9,16,16,5,0,0,0,10,16,16,16,1,0,0,5,16,16,16,16,0,0,0,1,5,11,16,16,0,0,0,0,0,5,16,16,0,0,0,0,0,0,11,16,0,0,0,0,0,0,11,10,0,0,1 +0,0,9,16,5,0,0,0,0,6,16,16,15,0,0,0,0,7,16,14,16,2,0,0,0,3,6,12,16,0,0,0,0,0,0,15,12,0,0,0,0,0,7,16,6,1,3,0,0,0,9,16,13,15,8,0,0,0,7,16,16,8,1,0,2 +0,0,4,14,6,0,0,0,0,0,10,15,2,0,0,0,0,0,16,11,0,0,0,0,0,0,13,11,0,0,0,0,0,3,16,12,8,3,0,0,0,2,16,14,8,12,9,0,0,0,16,13,4,12,12,0,0,0,6,15,16,12,1,0,6 +0,4,12,13,13,6,0,0,0,6,14,8,13,16,0,0,0,0,0,0,11,16,2,0,0,0,0,0,16,15,0,0,0,0,0,0,12,16,3,0,0,0,0,0,4,16,9,0,0,0,1,4,12,16,8,0,0,2,13,16,12,6,0,0,3 +0,2,15,15,6,0,0,0,0,0,10,13,16,5,0,0,0,0,0,2,16,9,0,0,0,0,0,3,16,11,0,0,0,0,0,0,13,14,1,0,0,0,0,0,7,16,5,0,0,1,4,6,13,15,1,0,0,3,15,14,11,2,0,0,3 +0,0,15,15,2,0,0,0,0,0,8,15,11,0,0,0,0,0,0,12,14,0,0,0,0,0,7,12,16,13,9,0,0,7,16,16,16,10,5,0,0,1,5,16,14,0,0,0,0,0,9,15,3,0,0,0,0,2,16,4,0,0,0,0,7 +0,1,10,15,16,13,3,0,0,5,14,5,5,15,8,0,0,0,0,0,2,15,6,0,0,0,0,0,9,16,0,0,0,0,0,0,9,16,0,0,0,0,0,0,4,16,6,0,0,0,2,3,13,12,0,0,0,0,15,13,7,0,0,0,3 +0,2,14,16,14,4,0,0,0,4,11,5,13,12,0,0,0,0,0,2,15,7,0,0,0,0,0,4,16,4,0,0,0,0,0,1,13,13,1,0,0,0,0,0,2,16,4,0,0,0,1,3,11,15,2,0,0,1,12,16,9,2,0,0,3 +0,0,0,0,11,5,0,0,0,0,0,8,16,2,0,0,0,0,4,15,16,3,0,0,0,0,12,6,16,3,0,0,0,5,15,0,15,5,3,0,0,6,16,16,16,11,4,0,0,0,0,0,14,0,0,0,0,0,0,0,12,2,0,0,4 +0,0,0,6,14,3,0,0,0,0,2,16,11,0,0,0,0,0,11,15,2,0,0,0,0,1,15,13,2,0,0,0,0,3,16,16,16,7,0,0,0,4,16,14,8,13,7,0,0,0,12,16,5,12,10,0,0,0,0,8,14,13,5,0,6 +0,0,1,14,6,0,0,0,0,0,7,16,1,0,0,0,0,0,13,10,0,0,0,0,0,0,14,7,0,0,0,0,0,3,16,10,8,3,0,0,0,0,16,13,12,14,7,0,0,0,14,9,4,11,13,0,0,0,2,12,16,12,4,0,6 +0,0,2,14,11,0,0,0,0,0,10,16,1,0,0,0,0,1,16,9,0,0,0,0,0,4,16,5,0,0,0,0,0,5,16,12,12,11,1,0,0,4,16,14,10,14,11,0,0,2,15,10,6,16,10,0,0,0,3,15,16,10,1,0,6 +0,0,0,2,15,4,0,0,0,0,0,8,15,1,0,0,0,0,1,14,12,0,0,0,0,0,6,16,12,0,0,0,0,0,13,16,15,4,2,0,0,9,16,16,16,16,11,0,0,3,8,8,16,3,0,0,0,0,0,3,15,0,0,0,4 +0,0,6,15,5,0,0,0,0,1,15,15,15,3,0,0,0,2,16,6,11,14,0,0,0,0,15,14,15,16,1,0,0,0,3,8,10,16,5,0,0,0,0,0,3,16,8,0,0,0,0,4,10,16,8,0,0,0,7,12,13,12,1,0,9 +0,0,0,0,11,12,0,0,0,0,0,3,15,14,0,0,0,0,0,11,16,11,0,0,0,0,9,16,16,10,0,0,0,4,16,12,16,12,0,0,0,3,10,3,16,11,0,0,0,0,0,0,16,14,0,0,0,0,0,0,11,11,0,0,1 +0,2,16,16,16,14,5,0,0,9,16,11,6,8,3,0,0,9,16,2,0,0,0,0,0,3,16,13,1,0,0,0,0,0,8,16,12,0,0,0,0,0,0,12,16,4,0,0,0,0,1,11,16,6,0,0,0,4,16,16,8,0,0,0,5 +0,0,4,15,11,1,0,0,0,2,14,14,16,8,0,0,0,8,15,2,3,13,0,0,0,4,16,0,0,12,7,0,0,7,16,0,0,12,8,0,0,3,16,6,1,14,9,0,0,0,15,16,16,16,2,0,0,0,4,13,14,6,0,0,0 +0,0,10,15,7,0,0,0,0,4,16,13,11,11,0,0,0,10,16,12,15,16,4,0,0,3,12,12,14,16,4,0,0,0,0,0,10,16,4,0,0,0,0,0,9,16,4,0,0,0,4,4,15,15,0,0,0,1,12,15,12,3,0,0,9 +0,4,16,16,16,16,5,0,0,11,16,8,5,8,3,0,0,10,16,2,0,0,0,0,0,3,16,6,0,0,0,0,0,0,16,9,0,0,0,0,0,0,12,16,2,0,0,0,0,0,6,16,11,0,0,0,0,4,16,12,1,0,0,0,5 +0,0,3,13,12,2,0,0,0,0,14,13,15,11,0,0,0,0,7,0,8,15,0,0,0,0,0,0,13,6,0,0,0,0,0,3,16,4,0,0,0,0,0,13,11,0,0,0,0,0,3,16,12,8,1,0,0,0,3,16,11,8,0,0,2 +0,0,10,13,0,0,0,0,0,0,14,15,11,0,0,0,0,0,12,9,16,8,2,0,0,0,5,14,16,11,1,0,0,0,3,16,10,0,0,0,0,0,12,11,16,0,0,0,0,1,16,7,16,5,0,0,0,0,11,16,13,1,0,0,8 +0,0,5,14,11,0,0,0,0,0,15,16,15,0,0,0,0,0,10,8,16,1,0,0,0,0,0,6,14,0,0,0,0,0,0,12,9,0,0,0,0,0,1,16,3,0,0,0,0,0,8,16,12,11,0,0,0,0,7,16,12,7,0,0,2 +0,0,2,12,12,2,0,0,0,0,10,16,16,9,0,0,0,3,16,16,8,15,3,0,0,6,16,6,0,13,8,0,0,8,16,4,0,15,8,0,0,5,16,8,12,16,6,0,0,0,15,16,16,15,2,0,0,0,3,13,12,3,0,0,0 +0,0,0,14,7,0,0,0,0,1,9,16,16,3,0,0,0,4,16,8,11,11,0,0,0,3,16,7,4,16,4,0,0,8,16,4,0,16,8,0,0,5,16,10,0,13,11,0,0,0,13,16,16,16,9,0,0,0,2,10,13,6,0,0,0 +0,0,0,0,7,14,7,0,0,0,0,3,16,16,9,0,0,0,6,15,16,16,6,0,0,5,16,16,16,16,2,0,0,4,8,8,16,16,0,0,0,0,0,3,16,16,3,0,0,0,0,1,13,16,0,0,0,0,0,0,8,15,0,0,1 +0,0,8,16,13,0,0,0,0,0,7,14,16,4,0,0,0,0,1,12,16,13,12,1,0,0,11,16,16,14,9,0,0,0,10,16,14,1,0,0,0,0,0,14,9,0,0,0,0,0,3,16,1,0,0,0,0,0,12,9,0,0,0,0,7 +0,0,3,14,3,0,0,0,0,0,13,13,0,0,0,0,0,0,16,7,0,0,0,0,0,5,16,3,0,0,0,0,0,3,16,7,4,2,0,0,0,4,16,16,16,16,7,0,0,1,14,15,4,11,15,0,0,0,5,14,16,12,6,0,6 +0,2,14,16,12,6,0,0,0,1,10,8,14,16,1,0,0,0,0,0,10,15,2,0,0,0,0,2,16,12,0,0,0,0,0,3,16,12,0,0,0,0,0,0,11,16,2,0,0,0,7,10,15,15,2,0,0,3,13,11,7,2,0,0,3 +0,0,10,16,9,0,0,0,0,4,16,16,16,0,0,0,0,2,12,9,15,0,0,0,0,0,0,11,11,0,0,0,0,0,1,16,7,0,0,0,0,0,9,16,2,0,0,0,0,0,15,16,8,5,0,0,0,0,11,16,16,16,2,0,2 +0,0,0,0,7,15,1,0,0,0,0,0,11,16,0,0,0,0,0,0,16,14,0,0,0,0,0,10,16,15,0,0,0,0,12,16,16,11,0,0,0,5,16,6,15,12,0,0,0,0,1,0,12,16,0,0,0,0,0,0,4,15,4,0,1 +0,0,9,16,3,0,0,0,0,0,11,16,14,1,0,0,0,0,0,11,16,4,0,0,0,0,0,8,16,10,1,0,0,1,12,16,16,16,9,0,0,1,11,16,11,4,0,0,0,0,6,16,4,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,15,16,11,3,0,0,0,0,4,10,15,15,3,0,0,0,0,0,14,16,5,0,0,0,0,5,16,12,0,0,0,0,0,3,16,11,1,0,0,0,0,2,13,16,9,0,0,0,6,15,16,12,3,0,0,0,15,14,7,1,0,0,3 +0,0,0,0,3,15,6,0,0,0,0,0,11,16,7,0,0,0,0,9,16,16,4,0,0,0,10,16,16,16,4,0,0,4,16,7,8,16,4,0,0,1,4,0,10,16,2,0,0,0,0,0,7,16,1,0,0,0,0,0,3,16,1,0,1 +0,1,12,12,13,8,1,0,0,0,8,9,15,16,2,0,0,0,0,3,16,10,0,0,0,0,0,7,16,6,0,0,0,0,0,5,16,10,0,0,0,0,0,0,7,16,7,0,0,0,3,8,15,13,2,0,0,2,14,16,10,1,0,0,3 +0,0,11,16,8,0,0,0,0,6,16,11,13,9,0,0,0,7,16,0,9,16,0,0,0,2,15,12,16,16,3,0,0,0,5,7,7,16,4,0,0,0,0,0,5,16,5,0,0,0,3,7,16,11,0,0,0,0,13,16,11,1,0,0,9 +0,0,0,4,14,14,5,0,0,0,0,9,16,16,7,0,0,0,5,15,16,15,3,0,0,4,15,16,16,12,0,0,0,5,16,16,16,12,0,0,0,0,0,12,16,13,1,0,0,0,0,8,16,16,7,0,0,0,0,4,14,15,6,0,1 +0,0,4,15,16,16,5,0,0,0,6,9,11,16,11,0,0,0,0,0,3,16,5,0,0,0,0,3,14,16,10,0,0,0,7,16,16,11,3,0,0,0,8,15,13,0,0,0,0,0,5,16,7,0,0,0,0,0,7,14,2,0,0,0,7 +0,0,0,8,12,1,0,0,0,0,1,15,12,1,0,0,0,0,6,16,3,0,0,0,0,0,7,16,1,0,0,0,0,1,16,16,14,5,0,0,0,1,12,16,6,14,9,0,0,0,2,16,6,10,15,0,0,0,0,6,16,16,11,0,6 +0,0,0,10,12,3,0,0,0,0,8,16,15,14,0,0,0,0,5,16,10,16,1,0,0,0,5,16,16,10,1,0,0,1,16,12,16,8,0,0,0,1,16,3,4,16,4,0,0,0,12,11,4,16,9,0,0,0,2,10,14,13,4,0,8 +0,0,0,7,15,1,0,0,0,0,0,15,16,1,0,0,0,0,9,16,16,2,0,0,0,2,16,16,16,0,0,0,0,10,16,16,16,16,8,0,0,8,15,15,14,8,5,0,0,0,0,11,12,0,0,0,0,0,0,6,15,1,0,0,4 +0,2,11,16,16,8,1,0,0,2,12,9,9,16,10,0,0,0,0,0,4,16,9,0,0,0,0,2,15,16,0,0,0,0,0,3,16,14,0,0,0,0,0,0,13,16,3,0,0,0,4,11,16,8,0,0,0,3,15,12,4,0,0,0,3 +0,0,0,1,16,11,0,0,0,0,0,1,16,16,4,0,0,0,0,8,16,16,0,0,0,0,10,16,16,16,1,0,0,6,16,14,16,15,0,0,0,1,3,5,16,12,0,0,0,0,0,4,16,12,0,0,0,0,0,2,15,12,0,0,1 +0,0,0,2,15,8,0,0,0,0,0,11,16,4,0,0,0,0,9,16,16,0,0,0,0,3,15,16,16,0,0,0,0,12,16,16,16,14,6,0,0,6,12,14,16,12,5,0,0,0,0,8,13,0,0,0,0,0,0,2,14,1,0,0,4 +0,0,4,12,10,1,0,0,0,3,16,13,15,10,0,0,0,5,16,2,1,14,3,0,0,8,13,0,0,10,8,0,0,8,12,0,0,8,8,0,0,8,14,0,0,11,8,0,0,3,16,14,13,16,2,0,0,0,8,16,13,5,0,0,0 +0,3,15,13,12,8,1,0,0,4,16,14,12,12,2,0,0,0,16,4,0,0,0,0,0,0,12,9,0,0,0,0,0,0,7,16,3,0,0,0,0,0,0,14,8,0,0,0,0,0,5,15,10,0,0,0,0,2,15,16,2,0,0,0,5 +0,0,5,11,16,16,8,0,0,0,15,14,8,12,15,0,0,0,0,0,2,14,9,0,0,0,0,0,11,12,1,0,0,0,0,1,16,5,0,0,0,0,0,1,14,9,0,0,0,0,1,4,15,9,0,0,0,0,7,16,11,2,0,0,3 +0,0,0,11,14,0,0,0,0,0,10,16,4,0,0,0,0,1,15,11,0,0,0,0,0,3,16,12,8,2,0,0,0,7,16,16,12,14,3,0,0,4,16,8,0,10,9,0,0,1,12,15,9,14,10,0,0,0,2,10,13,11,1,0,6 +0,0,5,15,7,0,0,0,0,0,14,16,16,5,0,0,0,0,13,14,14,15,0,0,0,0,3,11,14,16,3,0,0,0,0,0,6,16,2,0,0,0,0,0,4,16,7,0,0,0,0,1,11,16,6,0,0,0,4,15,16,10,0,0,9 +0,0,3,15,4,0,0,0,0,0,12,16,2,0,0,0,0,1,16,7,1,0,0,0,0,3,16,1,6,1,0,0,0,3,15,8,12,13,3,0,0,2,16,2,0,7,12,0,0,0,13,9,4,9,15,0,0,0,3,13,16,15,5,0,6 +0,0,0,0,10,13,7,0,0,0,0,5,16,16,11,0,0,0,4,14,16,16,7,0,0,3,14,16,16,16,4,0,0,7,16,16,16,16,4,0,0,0,2,13,16,16,3,0,0,0,0,11,16,16,0,0,0,0,0,2,13,16,1,0,1 +0,0,9,15,16,5,0,0,0,0,16,16,16,13,0,0,0,0,0,3,16,11,0,0,0,0,2,7,16,13,10,0,0,2,15,16,16,12,4,0,0,3,13,16,10,0,0,0,0,0,7,16,2,0,0,0,0,0,12,13,0,0,0,0,7 +0,0,13,16,12,7,0,0,0,4,16,15,12,12,3,0,0,4,16,5,0,0,0,0,0,3,16,9,0,0,0,0,0,0,15,16,2,0,0,0,0,0,4,16,14,0,0,0,0,1,9,14,16,0,0,0,0,1,13,16,10,0,0,0,5 +0,0,0,3,13,1,0,0,0,0,0,8,16,3,0,0,0,0,1,15,16,4,0,0,0,0,8,16,16,1,0,0,0,2,16,14,16,5,1,0,0,11,16,16,16,16,10,0,0,5,8,11,16,4,1,0,0,0,0,2,16,2,0,0,4 +0,0,0,0,15,9,0,0,0,0,0,8,16,5,0,0,0,0,1,16,16,5,0,0,0,0,11,16,16,1,1,0,0,6,16,16,16,15,9,0,0,7,15,16,16,10,1,0,0,0,1,7,16,1,0,0,0,0,0,1,15,5,0,0,4 +0,0,7,15,6,0,0,0,0,0,2,14,15,2,0,0,0,0,0,5,16,6,0,0,0,0,0,5,16,9,2,0,0,5,14,16,15,11,4,0,0,5,7,12,11,0,0,0,0,0,4,15,1,0,0,0,0,0,10,11,0,0,0,0,7 +0,2,15,13,0,0,0,0,0,12,16,16,3,0,0,0,0,5,13,16,4,0,0,0,0,0,8,16,4,0,0,0,0,0,8,16,4,0,0,0,0,0,12,16,0,0,0,0,0,0,16,16,16,14,6,0,0,1,16,16,16,12,7,0,2 +0,0,2,12,12,0,0,0,0,0,7,16,15,9,1,0,0,0,6,14,13,15,3,0,0,0,1,16,16,4,0,0,0,0,7,16,14,0,0,0,0,1,15,9,16,5,0,0,0,2,13,13,16,10,0,0,0,0,1,11,12,5,0,0,8 +0,2,15,15,5,0,0,0,0,5,16,16,11,0,0,0,0,9,15,16,12,0,0,0,0,0,4,16,6,0,0,0,0,0,8,16,2,0,0,0,0,1,14,13,0,0,0,0,0,4,16,14,14,16,5,0,0,2,14,16,13,9,1,0,2 +0,0,4,14,16,4,0,0,0,3,16,16,16,6,0,0,0,8,16,12,16,7,0,0,0,3,5,12,15,0,0,0,0,0,0,15,12,0,0,0,0,0,6,16,2,0,0,0,0,0,8,16,12,5,1,0,0,0,4,16,16,15,4,0,2 +0,3,11,15,12,7,1,0,0,4,16,13,11,9,6,0,0,4,15,0,0,0,0,0,0,3,16,8,0,0,0,0,0,0,11,16,8,0,0,0,0,0,0,13,12,0,0,0,0,1,7,16,3,0,0,0,0,5,13,6,0,0,0,0,5 +0,4,13,9,8,3,0,0,0,5,16,14,12,12,6,0,0,8,16,1,0,0,0,0,0,3,16,11,1,0,0,0,0,0,8,16,9,0,0,0,0,0,0,10,16,0,0,0,0,1,1,12,14,0,0,0,0,4,14,14,3,0,0,0,5 +0,0,0,0,13,11,0,0,0,0,0,8,16,5,0,0,0,0,3,15,16,4,0,0,0,0,8,15,16,10,1,0,0,4,16,14,16,16,11,0,0,7,16,13,15,14,3,0,0,0,0,0,16,11,0,0,0,0,0,0,12,11,0,0,4 +0,0,3,14,13,1,0,0,0,0,9,16,16,9,0,0,0,0,8,16,11,15,1,0,0,0,2,14,16,15,2,0,0,0,6,16,16,6,0,0,0,0,15,11,10,12,0,0,0,1,16,11,11,15,0,0,0,0,5,13,15,7,0,0,8 +0,0,2,16,10,1,0,0,0,0,7,16,16,12,0,0,0,0,3,16,16,15,0,0,0,0,2,16,14,0,0,0,0,0,8,15,16,6,0,0,0,0,13,8,9,13,0,0,0,0,12,10,7,16,0,0,0,0,3,13,15,10,0,0,8 +0,0,0,5,16,3,0,0,0,0,1,15,16,4,0,0,0,0,10,16,16,1,0,0,0,3,16,16,15,4,2,0,0,10,16,16,16,16,12,0,0,1,7,14,13,6,5,0,0,0,0,11,12,0,0,0,0,0,0,4,10,0,0,0,4 +0,0,7,16,11,1,0,0,0,1,15,12,12,12,0,0,0,2,16,2,6,16,2,0,0,1,16,6,6,16,6,0,0,0,7,16,15,16,9,0,0,0,0,0,0,12,11,0,0,0,3,3,6,16,5,0,0,0,8,16,14,6,0,0,9 +0,0,2,14,13,0,0,0,0,0,10,15,15,8,0,0,0,2,16,7,4,15,0,0,0,4,16,4,0,13,7,0,0,4,16,1,0,10,8,0,0,4,16,5,1,12,11,0,0,1,15,14,13,16,3,0,0,0,3,12,13,5,0,0,0 +0,0,0,12,15,6,0,0,0,0,5,16,13,15,0,0,0,0,2,16,16,12,1,0,0,0,0,11,16,14,1,0,0,0,7,16,15,10,0,0,0,1,16,8,2,14,5,0,0,0,12,10,4,12,7,0,0,0,2,11,16,13,3,0,8 +0,0,3,12,5,0,0,0,0,1,15,14,16,6,0,0,0,6,16,0,6,16,6,0,0,5,16,11,14,16,4,0,0,0,8,10,12,16,0,0,0,0,1,1,7,15,1,0,0,0,8,10,10,16,2,0,0,0,2,13,14,7,0,0,9 +0,0,5,14,15,2,0,0,0,0,13,14,9,10,0,0,0,0,15,8,2,15,3,0,0,0,11,12,9,14,2,0,0,0,7,16,14,2,0,0,0,0,13,14,16,4,0,0,0,3,15,8,14,10,0,0,0,0,6,16,16,8,0,0,8 +0,0,4,11,15,8,0,0,0,0,13,16,11,13,7,0,0,3,16,12,0,4,8,0,0,6,16,5,0,4,8,0,0,7,9,0,0,9,7,0,0,4,10,0,2,15,2,0,0,1,16,12,14,10,0,0,0,0,4,14,14,1,0,0,0 +0,0,0,13,15,8,0,0,0,0,1,16,16,10,0,0,0,0,1,16,16,8,0,0,0,0,4,16,16,2,0,0,0,0,11,16,14,0,0,0,0,2,16,16,10,0,0,0,0,0,13,16,15,2,0,0,0,0,1,13,16,4,0,0,1 +0,0,6,14,8,0,0,0,0,6,16,10,14,4,0,0,0,11,5,0,11,4,0,0,0,4,6,2,16,2,0,0,0,0,0,13,7,0,0,0,0,0,7,14,0,0,0,0,0,0,12,10,8,8,4,0,0,0,7,14,14,14,13,0,2 +0,0,10,16,10,1,0,0,0,6,14,6,16,3,0,0,0,5,4,5,15,0,0,0,0,0,0,12,14,1,0,0,0,0,0,3,11,15,1,0,0,0,0,0,0,10,8,0,0,0,9,8,8,15,6,0,0,0,9,15,15,9,0,0,3 +0,0,0,12,4,0,0,0,0,0,6,15,2,0,0,0,0,0,16,5,0,4,4,0,0,4,15,2,3,15,9,0,0,2,15,16,16,16,4,0,0,0,2,8,16,8,0,0,0,0,0,8,15,0,0,0,0,0,0,11,9,0,0,0,4 +0,0,6,8,12,14,0,0,0,5,16,15,12,7,0,0,0,8,16,13,4,0,0,0,0,2,11,8,14,11,0,0,0,0,0,0,0,15,1,0,0,0,0,0,0,14,5,0,0,1,9,8,12,14,1,0,0,0,10,15,12,3,0,0,5 +0,0,3,13,6,0,0,0,0,0,10,15,2,0,0,0,0,2,15,3,0,0,0,0,0,4,14,0,0,0,0,0,0,4,14,14,16,13,2,0,0,3,16,9,1,4,12,0,0,0,14,10,5,11,11,0,0,0,3,13,15,8,0,0,6 +0,0,6,16,16,16,16,10,0,0,6,10,8,14,16,3,0,0,0,0,4,16,8,0,0,0,2,10,14,15,6,0,0,0,6,16,16,16,7,0,0,0,0,15,9,0,0,0,0,0,6,16,6,0,0,0,0,0,10,13,1,0,0,0,7 +0,0,1,10,13,13,1,0,0,1,13,10,4,14,4,0,0,8,13,0,7,12,0,0,0,2,12,14,15,2,0,0,0,0,5,15,15,2,0,0,0,0,8,7,3,13,3,0,0,0,8,8,0,13,4,0,0,0,1,11,16,16,2,0,8 +0,0,6,12,13,5,0,0,0,2,16,9,8,15,2,0,0,8,12,0,3,15,8,0,0,4,15,12,16,13,1,0,0,0,2,2,16,6,0,0,0,0,0,1,16,1,0,0,0,0,0,5,16,0,0,0,0,0,3,16,14,0,0,0,9 +0,0,4,13,8,0,0,0,0,0,13,16,15,7,0,0,0,3,16,9,6,15,6,0,0,8,14,0,0,4,8,0,0,8,12,0,0,4,8,0,0,4,12,0,0,11,6,0,0,0,14,10,12,14,1,0,0,0,7,15,11,2,0,0,0 +0,0,0,0,14,4,0,0,0,0,0,3,16,11,0,0,0,0,0,10,16,9,0,0,0,0,0,13,16,7,0,0,0,0,12,16,16,4,0,0,0,1,13,12,16,5,0,0,0,0,0,4,16,9,0,0,0,0,0,0,14,10,0,0,1 +0,0,10,16,11,0,0,0,0,8,15,10,15,2,0,0,0,11,11,2,16,5,0,0,0,7,5,6,16,3,0,0,0,0,1,15,10,0,0,0,0,0,10,16,2,0,0,0,0,0,12,14,8,10,5,0,0,0,12,16,16,16,15,0,2 +0,0,6,15,16,10,0,0,0,3,16,11,15,10,0,0,0,4,10,10,16,4,0,0,0,0,4,16,15,3,0,0,0,0,1,9,16,15,2,0,0,0,0,0,3,16,6,0,0,0,3,9,12,16,5,0,0,0,9,16,16,8,0,0,3 +0,0,0,13,12,0,0,0,0,0,6,16,7,0,0,0,0,0,15,15,1,1,4,0,0,6,16,10,9,15,14,0,0,9,16,16,16,16,4,0,0,2,8,12,16,9,0,0,0,0,0,13,16,0,0,0,0,0,0,14,15,1,0,0,4 +0,0,15,16,16,15,9,0,0,6,16,13,12,12,11,2,0,3,15,14,2,0,0,0,0,0,6,16,5,0,0,0,0,0,0,14,11,0,0,0,0,0,0,12,8,0,0,0,0,1,14,14,10,0,0,0,0,0,13,16,3,0,0,0,5 +0,0,0,13,9,0,0,0,0,0,8,15,5,0,0,0,0,0,14,8,0,0,0,0,0,2,16,3,0,2,0,0,0,0,16,13,16,16,6,0,0,1,16,11,4,7,12,0,0,0,11,12,5,13,9,0,0,0,1,12,15,11,2,0,6 +0,0,4,12,16,10,0,0,0,0,15,12,12,11,0,0,0,0,1,0,9,7,0,0,0,0,4,7,15,13,7,0,0,6,16,16,15,10,3,0,0,1,4,12,7,0,0,0,0,0,2,16,2,0,0,0,0,0,6,12,0,0,0,0,7 +0,0,2,12,16,10,0,0,0,0,12,7,1,13,4,0,0,3,16,0,8,12,0,0,0,4,16,11,14,1,0,0,0,0,7,16,6,0,0,0,0,0,6,12,15,2,0,0,0,0,8,7,13,4,0,0,0,0,3,13,16,3,0,0,8 +0,0,4,14,16,15,1,0,0,5,16,8,4,16,7,0,0,8,13,0,4,16,12,0,0,7,16,15,16,13,3,0,0,0,6,12,16,4,0,0,0,0,0,11,12,0,0,0,0,0,0,16,7,0,0,0,0,0,3,16,2,0,0,0,9 +0,0,5,15,12,8,0,0,0,0,13,16,10,13,3,0,0,5,16,9,0,8,4,0,0,4,13,1,0,4,8,0,0,4,8,0,0,8,4,0,0,1,14,0,0,11,3,0,0,0,12,9,9,15,0,0,0,0,4,14,15,4,0,0,0 +0,0,0,4,13,13,0,0,0,0,0,10,16,16,1,0,0,0,3,14,16,13,0,0,0,0,8,16,16,5,0,0,0,3,15,16,16,4,0,0,0,4,16,16,16,6,0,0,0,2,8,15,16,9,0,0,0,0,0,4,14,12,0,0,1 +0,1,13,16,16,4,0,0,0,9,15,6,13,8,0,0,0,5,10,0,12,10,0,0,0,0,0,6,16,5,0,0,0,0,5,16,10,0,0,0,0,2,16,11,0,0,0,0,0,5,16,13,8,8,5,0,0,1,10,14,16,16,16,0,2 +0,0,8,14,14,4,0,0,0,5,12,4,7,12,0,0,0,4,2,3,13,5,0,0,0,0,0,16,15,1,0,0,0,0,0,3,9,14,1,0,0,0,0,0,0,7,8,0,0,0,3,4,4,13,7,0,0,0,11,16,15,5,0,0,3 +0,0,1,11,3,0,0,0,0,0,9,16,0,0,0,0,0,1,16,5,0,1,2,0,0,6,16,2,1,13,10,0,0,7,16,9,15,13,0,0,0,2,9,12,16,1,0,0,0,0,0,14,9,0,0,0,0,0,2,16,7,0,0,0,4 +0,0,11,7,12,15,1,0,0,1,16,14,9,6,0,0,0,8,12,0,0,0,0,0,0,5,14,15,15,5,0,0,0,1,6,4,10,9,0,0,0,0,0,0,4,11,0,0,0,0,10,4,13,8,0,0,0,0,12,16,10,1,0,0,5 +0,0,4,12,1,0,0,0,0,0,14,13,0,0,0,0,0,2,16,3,0,0,0,0,0,7,13,0,0,0,0,0,0,7,12,7,12,6,2,0,0,4,15,15,12,13,11,0,0,1,13,16,5,11,12,0,0,0,5,13,16,11,1,0,6 +0,0,4,13,16,16,16,10,0,0,11,15,12,13,16,5,0,0,0,0,0,12,11,0,0,0,1,0,5,15,2,0,0,0,14,13,15,15,6,0,0,0,15,16,15,9,2,0,0,0,1,16,7,0,0,0,0,0,6,14,2,0,0,0,7 +0,0,2,11,16,12,2,0,0,0,11,7,4,7,8,0,0,5,14,4,0,8,4,0,0,2,15,9,6,11,0,0,0,0,3,16,11,0,0,0,0,0,9,13,11,0,0,0,0,0,12,10,16,1,0,0,0,0,2,12,16,3,0,0,8 +0,0,9,16,12,2,0,0,0,0,16,3,5,10,0,0,0,0,13,4,14,16,4,0,0,0,4,16,16,16,7,0,0,0,0,3,4,10,4,0,0,0,0,0,0,8,6,0,0,0,12,1,1,13,3,0,0,0,8,15,16,9,0,0,9 +0,0,2,12,15,12,1,0,0,1,14,14,14,11,8,0,0,5,16,3,0,2,8,0,0,8,14,0,0,6,8,0,0,4,12,0,0,9,4,0,0,1,16,1,1,14,1,0,0,0,11,9,11,8,0,0,0,0,2,13,14,1,0,0,0 +0,0,3,11,13,5,0,0,0,0,10,12,5,16,0,0,0,0,7,10,6,15,4,0,0,0,2,13,16,14,6,0,0,0,0,0,0,7,9,0,0,0,0,0,0,4,11,0,0,0,9,7,0,8,11,0,0,0,3,9,16,16,6,0,9 +0,0,6,8,8,13,3,0,0,1,14,14,12,9,3,0,0,4,16,8,2,0,0,0,0,4,16,13,15,7,0,0,0,0,0,0,3,15,0,0,0,0,0,0,3,15,0,0,0,0,6,8,13,8,0,0,0,0,9,15,8,0,0,0,5 +0,0,3,8,9,9,0,0,0,6,16,12,8,5,0,0,0,11,13,0,0,0,0,0,0,9,16,10,5,0,0,0,0,0,3,8,13,10,1,0,0,0,0,0,0,10,7,0,0,0,5,2,4,13,8,0,0,0,7,16,14,8,0,0,5 +0,0,0,8,15,2,0,0,0,0,6,16,5,0,0,0,0,0,12,8,0,0,0,0,0,0,13,6,0,0,0,0,0,0,12,12,16,14,0,0,0,0,14,15,6,8,11,0,0,3,12,14,5,10,13,0,0,0,0,9,16,13,5,0,6 +0,1,5,11,15,4,0,0,0,8,16,13,6,2,0,0,0,11,7,0,0,0,0,0,0,11,16,16,11,2,0,0,0,0,4,4,5,12,3,0,0,0,0,0,0,5,11,0,0,0,1,6,0,10,11,0,0,0,2,12,16,15,2,0,5 +0,0,9,16,15,14,1,0,0,1,15,15,5,10,7,0,0,6,16,1,0,1,8,0,0,8,13,0,0,4,8,0,0,7,6,0,0,6,6,0,0,5,9,0,0,13,1,0,0,0,16,5,12,12,0,0,0,0,8,15,10,1,0,0,0 +0,0,7,13,16,7,0,0,0,2,16,6,5,12,1,0,0,4,12,0,1,16,4,0,0,1,12,12,13,16,3,0,0,0,0,4,8,13,0,0,0,0,0,0,4,13,0,0,0,0,8,3,10,10,0,0,0,0,6,15,15,3,0,0,9 +0,0,4,15,14,10,1,0,0,0,7,9,0,9,8,0,0,0,11,9,2,13,7,0,0,0,4,15,14,4,0,0,0,0,6,15,15,1,0,0,0,4,14,1,13,7,0,0,0,7,13,1,5,13,0,0,0,0,7,14,16,16,1,0,8 +0,0,4,12,16,12,0,0,0,5,16,8,4,12,2,0,0,12,6,0,0,13,4,0,0,6,16,13,16,16,7,0,0,0,3,4,1,8,8,0,0,0,0,0,0,4,12,0,0,0,8,9,2,9,9,0,0,0,2,13,16,15,3,0,9 +0,0,5,12,15,10,1,0,0,2,14,7,4,9,7,0,0,7,15,7,0,9,8,0,0,1,5,15,11,13,3,0,0,0,3,15,16,5,0,0,0,0,15,9,12,7,0,0,0,0,15,5,8,12,0,0,0,0,4,14,16,11,0,0,8 +0,0,0,7,10,0,0,0,0,0,3,15,5,0,0,0,0,0,11,11,0,2,2,0,0,5,14,2,1,13,7,0,0,7,15,2,8,16,3,0,0,3,14,16,16,8,0,0,0,0,0,7,16,0,0,0,0,0,0,6,16,2,0,0,4 +0,0,7,12,9,0,0,0,0,0,12,16,16,1,0,0,0,0,11,16,16,0,0,0,0,0,12,16,16,0,0,0,0,0,13,16,16,0,0,0,0,0,13,16,16,0,0,0,0,1,14,16,16,1,0,0,0,0,4,12,12,9,0,0,1 +0,0,6,16,16,9,0,0,0,0,14,9,12,11,0,0,0,0,1,0,9,6,0,0,0,0,1,6,16,10,6,0,0,0,10,16,14,11,5,0,0,0,5,15,2,0,0,0,0,0,3,13,0,0,0,0,0,0,7,8,0,0,0,0,7 +0,0,1,11,16,16,10,0,0,0,13,14,8,12,11,0,0,0,4,0,0,13,4,0,0,0,0,0,3,15,0,0,0,0,2,15,16,16,9,0,0,0,3,13,16,8,1,0,0,0,0,7,10,0,0,0,0,0,0,13,3,0,0,0,7 +0,0,7,15,16,12,0,0,0,4,16,11,12,12,0,0,0,2,7,1,13,11,0,0,0,0,0,13,16,6,0,0,0,0,0,11,15,16,3,0,0,0,0,0,1,15,8,0,0,0,5,16,12,15,8,0,0,0,7,16,16,13,2,0,3 +0,0,6,14,14,13,11,0,0,0,14,12,5,4,2,0,0,3,16,16,4,0,0,0,0,2,11,11,16,3,0,0,0,0,0,0,7,10,0,0,0,0,0,0,1,11,0,0,0,0,3,7,12,8,0,0,0,0,7,14,11,1,0,0,5 +0,0,0,10,13,0,0,0,0,0,0,13,16,5,0,0,0,0,0,16,16,4,0,0,0,0,3,16,16,7,0,0,0,0,7,16,16,9,0,0,0,0,9,16,16,10,0,0,0,0,10,16,16,14,0,0,0,0,1,5,7,15,8,0,1 +0,0,2,13,10,3,0,0,0,0,10,15,12,13,1,0,0,0,16,4,0,6,4,0,0,2,16,3,0,1,7,0,0,5,13,5,0,2,8,0,0,4,12,0,0,3,8,0,0,0,13,5,6,13,5,0,0,0,5,14,13,8,1,0,0 +0,0,5,13,13,5,0,0,0,0,16,16,10,15,3,0,0,5,16,2,1,8,4,0,0,4,13,0,0,4,8,0,0,8,12,0,0,6,7,0,0,5,15,0,0,7,7,0,0,0,16,8,5,15,3,0,0,0,5,14,15,9,0,0,0 +0,2,15,16,7,0,0,0,0,10,15,10,16,2,0,0,0,9,11,5,16,0,0,0,0,0,0,12,11,0,0,0,0,0,5,16,2,0,0,0,0,3,15,8,0,0,0,0,0,8,15,5,5,8,3,0,0,3,15,16,16,16,10,0,2 +0,0,5,15,15,2,0,0,0,3,16,9,16,5,0,0,0,5,9,1,16,1,0,0,0,0,0,10,9,0,0,0,0,0,1,16,3,0,0,0,0,0,9,9,0,0,0,0,0,0,11,14,7,6,2,0,0,0,6,16,16,15,2,0,2 +0,0,3,11,16,16,4,0,0,0,9,12,12,16,9,0,0,0,0,0,1,16,8,0,0,0,0,9,14,16,9,0,0,0,1,16,16,14,5,0,0,0,0,6,16,4,0,0,0,0,0,11,14,0,0,0,0,0,1,15,7,0,0,0,7 +0,0,4,13,16,14,2,0,0,2,15,5,4,14,4,0,0,8,15,6,1,15,1,0,0,4,16,16,13,10,0,0,0,0,1,9,16,10,0,0,0,0,4,14,3,14,6,0,0,0,9,10,3,13,8,0,0,0,3,15,16,11,1,0,8 +0,0,7,16,16,3,0,0,0,7,16,9,14,7,0,0,0,10,9,0,14,5,0,0,0,3,3,4,16,2,0,0,0,0,0,12,11,0,0,0,0,0,6,16,3,0,0,0,0,0,12,15,8,8,3,0,0,0,10,16,16,16,9,0,2 +0,0,4,13,11,7,0,0,0,0,14,16,13,16,2,0,0,5,16,4,0,5,7,0,0,8,14,0,0,4,8,0,0,6,9,0,0,4,8,0,0,2,14,1,0,8,6,0,0,0,13,12,9,15,2,0,0,0,3,16,12,5,0,0,0 +0,0,0,0,11,14,3,0,0,0,0,2,16,16,2,0,0,0,0,11,16,14,0,0,0,0,3,16,16,15,0,0,0,1,13,16,16,13,0,0,0,6,16,9,15,13,0,0,0,0,0,0,12,16,1,0,0,0,0,0,9,14,1,0,1 +0,2,11,16,12,1,0,0,0,9,16,9,16,4,0,0,0,14,7,4,16,1,0,0,0,6,5,9,14,0,0,0,0,0,3,16,5,0,0,0,0,0,13,14,0,0,0,0,0,3,16,11,8,12,9,0,0,2,15,16,16,13,16,1,2 +0,0,3,12,3,0,0,0,0,0,13,14,2,0,0,0,0,3,10,0,0,0,0,0,0,0,0,3,5,3,0,0,0,4,10,16,16,16,4,0,0,6,16,4,0,8,9,0,0,0,15,12,4,9,12,0,0,0,2,13,16,14,4,0,6 +0,2,11,16,15,2,0,0,0,12,15,12,16,4,0,0,0,3,3,6,16,2,0,0,0,0,2,15,12,0,0,0,0,0,3,16,16,12,1,0,0,0,0,1,6,15,10,0,0,0,6,12,8,14,11,0,0,1,16,16,16,11,3,0,3 +0,0,7,14,16,11,0,0,0,2,16,11,11,16,2,0,0,0,3,3,15,9,0,0,0,0,0,10,16,8,0,0,0,0,0,3,14,15,2,0,0,0,0,0,1,16,6,0,0,0,3,12,13,15,2,0,0,0,6,16,12,5,0,0,3 +0,0,5,16,16,16,16,11,0,0,6,9,5,5,15,8,0,0,0,0,0,9,11,1,0,0,0,3,6,16,3,0,0,0,5,16,16,16,7,0,0,0,4,8,16,4,1,0,0,0,1,13,10,0,0,0,0,0,3,16,2,0,0,0,7 +0,2,13,16,16,11,0,0,0,10,11,4,12,12,0,0,0,1,1,4,14,8,0,0,0,0,2,16,16,8,0,0,0,0,0,7,9,16,8,0,0,0,0,0,0,10,12,0,0,0,5,9,10,16,9,0,0,0,15,16,13,7,0,0,3 +0,1,10,16,16,6,0,0,0,7,14,9,12,12,0,0,0,1,1,5,15,5,0,0,0,0,3,16,14,3,0,0,0,0,1,11,14,16,6,0,0,0,0,0,0,12,11,0,0,0,7,8,13,16,5,0,0,0,15,16,12,5,0,0,3 +0,0,1,14,8,0,0,0,0,0,8,16,4,0,0,0,0,1,16,9,0,1,5,0,0,8,16,5,1,12,15,0,0,10,16,12,11,16,6,0,0,3,14,16,16,8,0,0,0,0,0,11,16,1,0,0,0,0,0,13,14,0,0,0,4 +0,0,2,12,8,0,0,0,0,0,12,13,5,0,0,0,0,1,16,1,0,0,0,0,0,2,14,0,0,0,0,0,0,2,16,5,10,10,4,0,0,0,16,14,8,6,13,0,0,0,13,9,2,4,14,0,0,0,3,10,16,16,7,0,6 +0,0,2,13,13,1,0,0,0,0,9,13,5,0,0,0,0,0,13,5,0,0,0,0,0,0,15,2,0,0,0,0,0,0,15,10,9,9,2,0,0,0,16,11,8,11,12,0,0,1,14,11,1,4,13,0,0,0,3,11,16,15,4,0,6 +0,0,1,13,4,0,0,0,0,0,5,15,2,0,0,0,0,0,12,7,0,0,0,0,0,0,14,6,0,0,0,0,0,0,16,12,15,15,7,0,0,0,14,14,6,4,14,1,0,0,9,14,3,4,14,2,0,0,1,7,14,16,11,0,6 +0,0,4,16,6,0,0,0,0,0,12,15,1,0,0,0,0,1,16,11,0,0,0,0,0,8,16,3,0,7,4,0,0,12,16,6,11,16,7,0,0,7,16,16,15,3,0,0,0,0,4,16,10,0,0,0,0,0,4,16,6,0,0,0,4 +0,0,0,9,15,16,9,0,0,0,10,13,4,12,7,0,0,5,14,1,2,15,3,0,0,4,14,12,16,15,0,0,0,0,1,1,13,7,0,0,0,0,0,4,15,1,0,0,0,0,0,11,8,0,0,0,0,0,0,12,4,0,0,0,9 +0,0,1,15,11,1,0,0,0,0,2,16,16,7,0,0,0,0,6,16,16,5,0,0,0,0,8,16,16,4,0,0,0,0,9,16,13,0,0,0,0,0,11,16,13,0,0,0,0,0,11,16,11,0,0,0,0,0,1,14,16,3,0,0,1 +0,0,2,10,16,4,0,0,1,10,16,16,15,4,0,0,0,16,16,10,1,0,0,0,0,15,16,16,7,0,0,0,0,5,11,5,15,2,0,0,0,0,0,0,11,9,0,0,0,0,3,10,16,9,0,0,0,0,2,16,15,2,0,0,5 +0,0,5,8,11,5,0,0,0,0,13,16,12,12,0,0,0,1,16,9,0,9,3,0,0,3,16,6,0,6,6,0,0,3,11,1,0,5,6,0,0,0,12,0,0,11,6,0,0,0,14,5,12,15,1,0,0,0,6,16,13,2,0,0,0 +0,0,3,14,16,8,0,0,0,3,15,8,4,15,1,0,0,8,10,0,3,16,8,0,0,3,15,13,16,14,1,0,0,0,2,5,16,4,0,0,0,0,0,8,11,0,0,0,0,0,0,15,2,0,0,0,0,0,2,13,0,0,0,0,9 +0,0,5,8,12,16,4,0,0,3,16,11,7,1,0,0,0,3,14,6,4,0,0,0,0,5,16,12,14,6,0,0,0,0,2,0,4,12,0,0,0,0,0,0,4,10,0,0,0,0,6,8,14,7,0,0,0,0,7,13,7,0,0,0,5 +0,0,11,16,15,5,0,0,0,8,16,14,16,8,0,0,0,11,10,0,16,8,0,0,0,3,1,6,16,2,0,0,0,0,1,15,10,0,0,0,0,0,11,15,1,0,0,0,0,0,16,14,12,12,8,0,0,1,15,16,16,14,8,0,2 +0,0,4,13,15,9,0,0,0,4,14,6,5,16,0,0,0,7,12,2,2,16,0,0,0,4,16,15,14,7,0,0,0,0,9,16,16,5,0,0,0,0,10,6,8,15,2,0,0,0,11,9,4,13,11,0,0,0,2,14,16,15,6,0,8 +0,2,15,16,15,2,0,0,0,8,14,8,14,8,0,0,0,7,5,2,16,5,0,0,0,0,0,12,13,0,0,0,0,0,8,15,1,0,0,0,0,1,15,7,0,0,0,0,0,4,16,9,8,8,2,0,0,2,15,16,16,16,13,0,2 +0,0,3,11,16,10,0,0,0,0,10,16,10,14,6,0,0,0,15,7,0,11,8,0,0,3,16,2,0,8,8,0,0,4,12,0,0,9,8,0,0,6,15,1,0,12,8,0,0,3,15,10,8,15,4,0,0,0,5,12,14,9,0,0,0 +0,0,7,15,9,5,0,0,0,0,14,16,14,15,0,0,0,1,16,8,4,6,4,0,0,4,15,1,0,6,5,0,0,3,11,0,0,7,5,0,0,3,11,0,1,13,2,0,0,1,13,8,13,13,0,0,0,0,6,15,11,1,0,0,0 +0,0,0,2,13,9,0,0,0,0,0,7,16,15,0,0,0,0,0,11,16,7,0,0,0,0,0,15,16,2,0,0,0,0,7,16,14,0,0,0,0,0,13,16,14,0,0,0,0,0,8,15,16,0,0,0,0,0,0,3,13,8,0,0,1 +0,0,3,11,16,15,1,0,0,1,16,14,10,16,2,0,0,5,12,0,8,12,0,0,0,0,1,1,13,9,0,0,0,0,11,16,16,13,2,0,0,0,11,14,15,12,5,0,0,0,0,15,9,0,0,0,0,0,0,15,5,0,0,0,7 +0,0,0,4,15,1,0,0,0,0,0,13,8,1,0,0,0,0,6,12,0,0,0,0,0,0,8,10,0,0,0,0,0,0,13,12,11,10,0,0,0,4,16,15,8,11,10,0,0,1,7,15,4,3,12,0,0,0,0,7,15,16,8,0,6 +0,0,8,14,15,6,0,0,0,4,16,12,14,11,0,0,0,4,8,1,14,7,0,0,0,0,0,14,16,5,0,0,0,0,0,11,16,16,2,0,0,0,0,0,1,15,7,0,0,0,4,11,8,14,7,0,0,0,11,16,16,10,0,0,3 +0,0,10,16,16,7,0,0,0,6,15,9,14,12,0,0,0,3,5,0,13,8,0,0,0,0,0,10,13,0,0,0,0,0,2,16,4,0,0,0,0,0,12,8,0,0,0,0,0,0,16,13,11,8,3,0,0,0,12,16,16,16,5,0,2 +0,0,0,0,10,11,0,0,0,0,0,0,16,13,0,0,0,0,0,7,16,9,0,0,0,0,0,13,16,3,0,0,0,0,10,16,16,0,0,0,0,0,15,16,16,4,0,0,0,3,12,6,16,6,0,0,0,0,0,0,10,13,7,0,1 +0,0,3,13,16,11,0,0,0,0,14,12,14,16,1,0,0,0,6,0,7,15,0,0,0,0,0,0,12,11,0,0,0,0,8,13,16,14,4,0,0,5,16,16,14,12,4,0,0,0,3,16,3,0,0,0,0,0,8,13,0,0,0,0,7 +0,0,2,13,13,0,0,0,0,0,9,16,6,0,0,0,0,0,14,16,2,0,4,0,0,5,16,10,1,13,15,0,0,7,16,16,16,16,4,0,0,0,8,15,16,8,0,0,0,0,0,14,10,0,0,0,0,0,1,16,11,0,0,0,4 +0,0,3,11,0,0,0,0,0,0,12,11,0,0,0,0,0,1,14,1,0,0,0,0,0,2,15,0,0,0,0,0,0,4,15,15,16,15,2,0,0,1,16,8,4,8,11,0,0,1,16,11,7,10,12,0,0,0,5,10,12,15,7,0,6 +0,0,12,16,16,13,1,0,0,4,15,9,12,16,2,0,0,0,2,0,11,15,0,0,0,0,0,12,16,4,0,0,0,0,0,8,16,13,1,0,0,0,0,0,6,16,6,0,0,0,4,6,6,16,6,0,0,0,12,16,16,9,0,0,3 +0,0,0,12,15,2,0,0,0,0,0,13,16,8,0,0,0,0,0,14,16,9,0,0,0,0,0,13,16,6,0,0,0,0,7,16,16,3,0,0,0,1,16,16,16,1,0,0,0,0,8,16,16,2,0,0,0,0,0,9,16,7,0,0,1 +0,2,10,14,11,1,0,0,0,7,15,8,16,4,0,0,0,1,1,6,15,1,0,0,0,0,0,15,9,0,0,0,0,0,1,16,15,9,1,0,0,0,0,1,6,15,8,0,0,2,8,4,6,15,7,0,0,2,13,16,15,9,0,0,3 +0,0,2,10,16,13,0,0,0,3,16,8,2,16,1,0,0,8,13,0,2,16,6,0,0,6,16,12,16,16,7,0,0,0,2,4,8,12,1,0,0,0,0,1,15,3,0,0,0,0,0,9,10,0,0,0,0,0,1,16,3,0,0,0,9 +0,0,1,14,7,0,0,0,0,0,6,16,16,1,0,0,0,0,7,16,15,0,0,0,0,0,6,16,15,0,0,0,0,0,6,16,15,0,0,0,0,0,4,16,16,1,0,0,0,0,4,16,16,6,0,0,0,0,1,15,16,8,0,0,1 +0,0,6,15,16,3,0,0,0,3,16,12,15,8,0,0,0,0,4,0,14,6,0,0,0,0,0,2,16,6,2,0,0,0,4,14,16,16,8,0,0,0,15,16,7,0,0,0,0,0,6,16,0,0,0,0,0,0,7,9,0,0,0,0,7 +0,0,0,11,16,2,0,0,0,0,7,16,8,2,0,0,0,0,15,7,0,0,0,0,0,0,16,4,3,1,0,0,0,1,16,16,16,16,4,0,0,0,14,12,4,6,12,0,0,0,10,16,5,10,15,0,0,0,2,11,16,12,8,0,6 +0,0,3,10,12,12,2,0,0,1,13,12,6,13,8,0,0,8,16,8,8,14,1,0,0,5,14,16,16,3,0,0,0,0,12,16,15,2,0,0,0,3,16,2,15,10,0,0,0,4,16,8,12,12,0,0,0,0,9,16,15,3,0,0,8 +0,0,0,9,11,0,0,0,0,0,2,15,8,0,0,0,0,0,11,15,1,3,8,0,0,6,16,4,0,14,12,0,0,12,16,4,11,16,5,0,0,9,16,16,16,11,0,0,0,0,6,11,16,7,0,0,0,0,0,10,16,4,0,0,4 +0,1,12,16,10,0,0,0,0,7,11,7,14,1,0,0,0,2,2,3,14,0,0,0,0,0,3,14,6,0,0,0,0,0,12,16,16,6,0,0,0,0,2,0,5,15,6,0,0,1,11,4,4,13,8,0,0,2,14,16,16,13,1,0,3 +0,0,3,16,11,0,0,0,0,0,0,16,16,6,0,0,0,0,0,13,16,7,0,0,0,0,0,11,16,10,0,0,0,0,0,12,16,6,0,0,0,0,3,16,16,2,0,0,0,0,5,16,15,0,0,0,0,0,3,13,15,0,0,0,1 +0,0,2,15,12,0,0,0,0,0,9,16,5,0,2,0,0,2,15,10,0,11,16,1,0,10,16,4,6,16,10,0,0,6,16,16,16,15,1,0,0,0,6,13,16,4,0,0,0,0,0,15,13,0,0,0,0,0,6,16,5,0,0,0,4 +0,0,3,14,7,0,0,0,0,0,14,16,14,9,0,0,0,4,16,16,11,15,3,0,0,5,15,6,0,4,8,0,0,8,8,0,0,4,8,0,0,5,11,0,0,6,6,0,0,0,13,10,5,15,5,0,0,0,2,12,14,8,0,0,0 +0,0,5,11,16,12,0,0,0,0,16,12,4,3,0,0,0,4,16,6,3,0,0,0,0,4,15,16,16,9,0,0,0,0,0,0,3,16,6,0,0,0,0,0,0,11,6,0,0,0,0,1,8,15,1,0,0,0,6,16,13,1,0,0,5 +0,0,6,13,16,10,0,0,0,4,13,5,4,16,0,0,0,0,0,1,10,8,0,0,0,0,0,12,16,3,0,0,0,0,0,3,7,15,1,0,0,0,0,0,0,9,8,0,0,0,8,6,3,11,7,0,0,0,4,14,16,11,1,0,3 +0,0,1,13,7,0,0,0,0,0,7,14,2,0,0,0,0,0,13,5,0,0,0,0,0,0,16,3,0,0,0,0,0,3,16,10,12,12,3,0,0,3,16,11,5,9,12,0,0,1,13,11,4,13,11,0,0,0,1,12,16,11,2,0,6 +0,0,3,11,15,13,2,0,0,2,15,11,8,14,7,0,0,8,14,0,2,13,2,0,0,3,13,16,16,15,1,0,0,0,0,0,14,5,0,0,0,0,0,7,14,0,0,0,0,0,1,15,4,0,0,0,0,0,2,16,1,0,0,0,9 +0,0,1,10,14,0,0,0,0,0,8,14,11,3,0,0,0,0,16,2,2,0,0,0,0,3,13,0,0,0,0,0,0,4,13,0,6,10,3,0,0,3,15,13,12,10,12,0,0,0,10,16,4,5,14,0,0,0,0,9,15,14,9,0,6 +0,0,8,16,15,8,0,0,0,1,16,16,16,2,0,0,0,2,16,16,10,0,0,0,0,2,16,16,12,0,0,0,0,6,16,16,13,0,0,0,0,1,16,16,11,0,0,0,0,0,16,16,10,0,0,0,0,0,7,15,15,0,0,0,1 +0,0,8,16,16,1,0,0,0,1,12,10,16,5,0,0,0,0,0,3,16,6,0,0,0,0,0,6,16,2,0,0,0,4,12,14,16,12,5,0,0,12,16,16,14,12,5,0,0,0,6,13,0,0,0,0,0,0,11,8,0,0,0,0,7 +0,2,12,13,16,15,1,0,0,8,16,14,11,7,0,0,0,8,16,7,0,0,0,0,0,7,16,16,11,1,0,0,0,0,2,6,15,9,0,0,0,0,0,0,9,15,0,0,0,0,1,7,16,11,0,0,0,1,16,16,13,1,0,0,5 +0,0,0,10,14,0,0,0,0,0,1,16,10,0,0,0,0,0,10,16,1,0,0,0,0,4,16,8,0,3,5,0,0,10,15,0,2,15,10,0,0,12,16,14,16,13,1,0,0,2,11,14,16,3,0,0,0,0,0,8,16,2,0,0,4 +0,0,0,7,12,0,0,0,0,0,3,14,6,0,0,0,0,0,11,8,0,0,1,0,0,4,16,3,1,10,10,0,0,8,16,12,14,13,3,0,0,2,12,10,16,5,0,0,0,0,0,6,16,0,0,0,0,0,0,6,13,0,0,0,4 +0,0,2,13,16,13,0,0,0,0,14,15,14,16,0,0,0,0,5,0,10,15,0,0,0,0,0,0,13,13,0,0,0,0,7,14,16,16,7,0,0,0,14,16,14,10,3,0,0,0,3,15,5,0,0,0,0,0,4,15,0,0,0,0,7 +0,0,7,12,12,2,0,0,0,5,15,6,10,9,0,0,0,11,4,0,11,6,0,0,0,3,0,2,15,2,0,0,0,0,1,13,6,0,0,0,0,0,11,11,1,0,0,0,0,1,16,7,4,4,2,0,0,0,11,12,13,14,11,0,2 +0,0,7,13,16,11,0,0,0,3,16,5,4,14,2,0,0,8,11,1,4,15,2,0,0,3,12,14,16,8,0,0,0,0,8,16,15,1,0,0,0,1,15,5,11,12,0,0,0,3,16,5,7,16,1,0,0,0,5,14,16,15,2,0,8 +0,3,15,16,13,1,0,0,0,10,13,9,16,4,0,0,0,1,1,0,16,6,0,0,0,0,0,10,15,1,0,0,0,0,10,16,3,0,0,0,0,3,16,7,0,0,0,0,0,5,16,13,12,7,2,0,0,2,13,13,13,16,15,0,2 +0,3,13,16,9,0,0,0,0,10,15,13,15,2,0,0,0,15,4,4,16,1,0,0,0,0,0,5,16,2,0,0,0,0,1,14,13,0,0,0,0,0,10,16,5,0,0,0,0,4,16,13,8,10,9,1,0,2,16,16,14,12,9,1,2 +0,0,7,11,12,14,2,0,0,8,16,9,4,3,0,0,0,10,15,5,0,0,0,0,0,3,12,16,14,4,0,0,0,0,0,2,13,16,2,0,0,0,0,0,0,15,9,0,0,0,2,4,8,15,9,0,0,0,10,16,13,8,0,0,5 +0,0,1,9,16,16,3,0,0,0,14,11,8,16,8,0,0,0,4,0,0,15,6,0,0,0,0,0,7,16,3,0,0,0,6,12,16,16,9,0,0,1,16,14,16,5,0,0,0,0,2,8,16,0,0,0,0,0,0,12,7,0,0,0,7 +0,0,0,5,13,16,8,0,0,0,8,15,6,7,14,0,0,2,16,1,1,11,10,0,0,4,16,15,16,16,6,0,0,0,4,4,5,15,1,0,0,0,0,0,9,8,0,0,0,0,0,2,15,1,0,0,0,0,0,6,10,0,0,0,9 +0,4,10,15,16,16,14,0,0,11,16,14,8,5,2,0,0,6,16,8,0,0,0,0,0,0,10,15,1,0,0,0,0,0,2,16,5,0,0,0,0,0,0,13,10,0,0,0,0,5,10,14,10,0,0,0,0,3,16,15,3,0,0,0,5 +0,0,1,15,7,0,0,0,0,0,6,16,3,0,0,0,0,1,16,10,0,1,3,0,0,5,16,3,1,12,15,0,0,11,16,8,14,15,3,0,0,6,16,16,16,5,0,0,0,0,1,14,11,0,0,0,0,0,2,16,12,0,0,0,4 +0,0,4,11,14,4,0,0,0,5,13,4,9,7,0,0,0,7,10,10,13,2,0,0,0,1,9,16,15,2,0,0,0,0,8,7,9,12,0,0,0,0,12,0,1,14,5,0,0,0,11,6,0,7,8,0,0,0,2,15,16,15,4,0,8 +0,0,3,12,15,14,3,0,0,1,16,5,0,8,12,0,0,6,16,11,2,13,7,0,0,2,9,15,16,4,0,0,0,0,3,14,16,7,0,0,0,0,9,5,6,15,0,0,0,0,11,10,7,16,2,0,0,0,3,12,16,13,0,0,8 +0,0,0,6,16,2,0,0,0,0,2,15,15,0,0,0,0,0,15,16,3,2,3,0,0,7,16,7,3,15,11,0,0,7,16,14,14,16,5,0,0,1,7,12,16,10,0,0,0,0,0,7,16,4,0,0,0,0,0,10,15,0,0,0,4 +0,0,0,2,9,16,10,0,0,0,7,15,8,7,12,0,0,1,15,3,0,11,12,0,0,8,14,9,13,16,8,0,0,1,7,7,3,13,4,0,0,0,0,0,5,13,0,0,0,0,0,0,10,9,0,0,0,0,0,0,14,4,0,0,9 +0,0,5,11,4,1,0,0,0,0,15,16,16,11,0,0,0,2,16,9,2,12,4,0,0,6,13,0,0,6,6,0,0,3,13,0,0,5,9,0,0,3,16,0,0,6,8,0,0,0,13,12,8,16,7,0,0,0,4,13,12,10,0,0,0 +0,0,1,13,16,14,4,0,0,2,11,8,4,11,7,0,0,6,16,3,3,13,2,0,0,0,9,14,14,4,0,0,0,0,7,16,10,0,0,0,0,0,12,10,16,1,0,0,0,0,11,10,15,4,0,0,0,0,1,14,15,1,0,0,8 +0,0,0,9,15,12,1,0,0,1,11,12,5,15,4,0,0,6,14,0,0,13,7,0,0,5,16,12,12,16,4,0,0,0,3,8,14,8,0,0,0,0,0,2,15,1,0,0,0,0,0,9,10,0,0,0,0,0,0,10,9,0,0,0,9 +0,1,9,15,16,9,0,0,0,6,12,1,2,16,0,0,0,0,1,0,8,14,0,0,0,0,0,10,15,3,0,0,0,0,0,7,15,5,0,0,0,0,0,0,4,13,2,0,0,2,7,4,4,14,3,0,0,0,9,16,16,10,0,0,3 +0,0,9,15,5,0,0,0,0,3,15,15,16,4,0,0,0,10,14,0,9,14,0,0,0,8,12,0,0,12,5,0,0,8,8,0,0,10,8,0,0,5,14,0,0,12,8,0,0,0,16,7,12,16,4,0,0,0,9,16,15,7,0,0,0 +0,0,0,0,13,16,6,0,0,0,3,11,16,16,5,0,0,5,16,16,16,16,4,0,0,4,10,9,16,16,4,0,0,0,0,0,13,16,4,0,0,0,0,0,12,16,4,0,0,0,0,2,16,16,7,0,0,0,0,1,12,14,5,0,1 +0,0,7,14,5,0,0,0,0,5,16,16,11,0,0,0,0,2,14,2,14,0,0,0,0,0,0,5,16,0,0,0,0,0,0,8,16,0,0,0,0,0,0,11,12,0,0,0,0,0,11,16,14,8,10,0,0,0,7,12,12,12,15,2,2 +0,0,8,12,12,14,3,0,0,0,11,11,10,16,2,0,0,0,0,0,9,13,0,0,0,0,0,14,16,13,0,0,0,0,0,8,8,16,4,0,0,0,3,0,0,16,4,0,0,1,16,9,9,15,2,0,0,1,11,14,15,3,0,0,3 +0,0,0,2,13,1,0,0,0,0,0,9,15,2,0,0,0,0,4,16,16,8,0,0,0,0,12,9,14,6,0,0,0,5,14,0,13,7,1,0,0,9,15,12,16,16,4,0,0,2,8,9,16,10,1,0,0,0,0,1,13,2,0,0,4 +0,0,12,13,12,12,12,0,0,0,16,13,12,11,11,0,0,0,16,13,11,2,0,0,0,3,16,14,16,7,0,0,0,0,0,0,11,11,0,0,0,0,0,0,8,11,0,0,0,1,14,11,15,9,0,0,0,0,10,14,12,0,0,0,5 +0,0,1,11,14,0,0,0,0,0,9,16,12,0,0,0,0,1,16,7,0,0,0,0,0,7,16,5,5,4,0,0,0,7,16,16,16,16,5,0,0,0,16,13,4,13,7,0,0,0,9,16,14,16,4,0,0,0,1,11,14,9,0,0,6 +0,0,2,12,8,7,6,2,0,0,9,16,15,16,16,5,0,0,13,11,0,10,14,0,0,0,11,3,2,15,4,0,0,0,0,0,11,9,0,0,0,0,0,8,16,4,0,0,0,0,0,15,12,0,0,0,0,0,4,16,2,0,0,0,7 +0,0,1,6,8,9,3,0,0,0,13,15,12,11,7,0,0,0,13,11,0,9,7,0,0,0,5,15,15,15,0,0,0,0,1,14,16,16,0,0,0,0,11,9,0,16,1,0,0,0,9,10,10,13,0,0,0,0,3,11,9,2,0,0,8 +0,0,7,14,13,8,0,0,0,1,15,13,14,14,0,0,0,0,13,13,13,16,3,0,0,0,4,14,13,16,4,0,0,0,0,0,0,12,4,0,0,0,0,0,0,14,4,0,0,7,16,9,10,15,2,0,0,1,8,13,15,8,0,0,9 +0,0,4,13,12,1,0,0,0,2,15,14,16,13,0,0,0,6,16,4,6,16,5,0,0,8,15,1,0,12,8,0,0,8,12,0,0,12,8,0,0,5,13,0,1,13,8,0,0,1,15,10,12,16,3,0,0,0,6,16,13,4,0,0,0 +0,0,1,8,16,7,0,0,0,0,3,16,16,12,0,0,0,1,15,16,16,12,0,0,0,3,12,15,16,12,0,0,0,0,0,8,16,10,0,0,0,0,0,11,16,14,0,0,0,0,0,11,16,16,1,0,0,0,0,5,13,7,0,0,1 +0,0,7,13,3,0,0,0,0,0,15,16,11,0,0,0,0,0,14,5,15,3,0,0,0,0,6,2,14,5,0,0,0,0,0,0,12,8,0,0,0,0,0,6,16,4,4,0,0,0,7,16,16,16,16,3,0,0,6,15,6,9,9,1,2 +0,0,10,16,10,0,0,0,0,8,16,14,16,2,0,0,0,3,15,8,16,3,0,0,0,0,0,11,16,7,0,0,0,0,0,3,10,15,2,0,0,0,10,0,0,14,8,0,0,1,16,6,8,13,8,0,0,1,15,16,13,10,1,0,3 +0,0,0,11,4,0,0,0,0,0,1,16,4,3,0,0,0,0,10,9,16,4,0,0,0,2,14,5,16,2,0,0,0,8,13,7,16,11,2,0,0,10,16,16,16,14,1,0,0,0,0,11,13,0,0,0,0,0,0,11,7,0,0,0,4 +0,1,8,8,9,13,8,0,0,2,16,16,16,14,9,0,0,3,16,2,0,0,0,0,0,5,16,16,15,1,0,0,0,5,10,8,15,5,0,0,0,0,0,0,13,7,0,0,0,0,15,12,16,2,0,0,0,0,11,16,9,0,0,0,5 +0,0,2,11,13,4,0,0,0,0,12,16,13,15,0,0,0,0,16,9,1,3,0,0,0,4,16,6,14,9,1,0,0,7,16,16,16,16,6,0,0,1,16,14,4,16,8,0,0,0,12,16,13,16,2,0,0,0,2,10,16,7,0,0,6 +0,0,1,8,8,9,12,7,0,0,8,16,12,13,16,5,0,0,11,6,0,8,11,0,0,0,15,3,1,15,3,0,0,0,1,0,10,9,0,0,0,0,0,3,13,1,0,0,0,0,0,13,7,0,0,0,0,0,1,11,1,0,0,0,7 +0,0,5,11,8,7,0,0,0,3,16,11,9,16,4,0,0,0,14,3,7,15,0,0,0,0,5,15,15,3,0,0,0,0,10,15,14,1,0,0,0,4,13,0,9,7,0,0,0,3,11,5,13,7,0,0,0,0,6,10,6,0,0,0,8 +0,0,5,16,11,0,0,0,0,0,12,13,13,11,0,0,0,0,13,8,6,16,0,0,0,0,7,14,16,16,4,0,0,0,0,7,8,14,7,0,0,0,4,0,0,8,12,0,0,1,15,11,8,13,11,0,0,0,5,11,12,14,3,0,9 +0,0,3,12,11,1,0,0,0,1,14,14,15,8,0,0,0,3,16,2,5,16,1,0,0,4,16,0,0,14,6,0,0,4,16,0,0,11,8,0,0,3,16,2,0,10,8,0,0,0,10,15,13,16,3,0,0,0,1,15,14,6,0,0,0 +0,0,0,5,15,13,1,0,0,0,2,14,16,16,4,0,0,0,8,16,16,16,4,0,0,6,15,16,16,16,1,0,0,3,7,10,16,16,4,0,0,0,0,11,16,16,1,0,0,0,0,12,16,16,1,0,0,0,0,6,14,12,1,0,1 +0,0,9,11,2,0,0,0,0,8,16,14,12,0,0,0,0,9,10,5,15,0,0,0,0,5,10,4,16,1,0,0,0,0,0,5,15,0,0,0,0,0,2,13,9,2,1,0,0,0,10,16,15,14,15,0,0,0,7,9,9,12,4,0,2 +0,0,3,13,13,1,0,0,0,0,10,15,16,7,0,0,0,0,5,3,15,10,0,0,0,0,0,0,16,15,1,0,0,2,7,0,4,16,8,0,0,5,13,0,0,14,9,0,0,0,14,11,9,16,8,0,0,0,3,12,13,8,0,0,3 +0,0,0,4,12,0,0,0,0,0,0,12,16,8,0,0,0,0,4,16,15,8,0,0,0,1,15,8,14,7,0,0,0,6,16,8,14,14,4,0,0,10,16,16,16,13,1,0,0,0,0,2,16,4,0,0,0,0,0,4,13,2,0,0,4 +0,0,6,8,11,14,14,0,0,1,16,16,13,12,7,0,0,0,16,7,1,0,0,0,0,4,16,16,15,1,0,0,0,1,5,6,13,9,0,0,0,0,6,0,12,9,0,0,0,0,12,10,16,4,0,0,0,0,10,14,8,0,0,0,5 +0,0,1,11,14,6,0,0,0,0,4,16,14,4,0,0,0,0,12,16,2,0,0,0,0,2,16,16,12,5,0,0,0,7,16,16,16,16,3,0,0,3,15,5,0,15,13,0,0,0,11,16,14,16,10,0,0,0,0,12,13,9,1,0,6 +0,0,2,10,10,12,15,10,0,0,9,16,12,8,15,6,0,0,13,9,0,4,12,1,0,1,16,3,1,13,2,0,0,0,5,0,9,7,0,0,0,0,0,3,13,1,0,0,0,0,0,12,7,0,0,0,0,0,0,14,2,0,0,0,7 +0,0,1,11,14,5,0,0,0,1,16,14,6,13,1,0,0,9,14,2,0,16,4,0,0,5,13,0,6,16,1,0,0,1,15,16,16,12,0,0,0,0,5,14,3,13,4,0,0,0,3,15,7,16,1,0,0,0,0,11,16,8,0,0,8 +0,0,3,14,10,3,0,0,0,0,10,14,13,15,1,0,0,0,12,4,4,16,4,0,0,0,6,15,15,16,8,0,0,0,1,8,8,14,8,0,0,0,2,0,0,9,11,0,0,0,16,10,8,12,12,0,0,0,7,12,14,14,6,0,9 +0,0,4,14,9,0,0,0,0,0,13,16,16,10,0,0,0,4,16,4,5,16,6,0,0,8,14,0,1,15,5,0,0,6,16,0,0,13,4,0,0,4,15,1,7,16,1,0,0,2,15,14,16,7,0,0,0,0,4,13,9,0,0,0,0 +0,0,5,14,11,1,0,0,0,0,16,10,14,13,0,0,0,0,14,2,8,16,6,0,0,0,12,7,10,16,8,0,0,0,5,16,16,15,8,0,0,1,3,1,2,11,9,0,0,1,15,6,4,12,11,0,0,0,6,16,14,12,3,0,9 +0,1,14,13,12,8,5,0,0,4,16,11,12,15,7,0,0,8,16,16,13,1,0,0,0,3,9,7,15,7,0,0,0,0,0,0,8,9,0,0,0,0,2,0,6,12,0,0,0,0,16,8,12,11,0,0,0,0,12,14,12,4,0,0,5 +0,1,8,12,16,16,7,0,0,7,16,12,12,12,5,0,0,4,13,3,0,0,0,0,0,4,16,16,13,0,0,0,0,2,8,6,15,6,0,0,0,0,7,0,9,12,0,0,0,0,16,11,13,12,0,0,0,0,5,13,12,5,0,0,5 +0,0,1,9,14,11,1,0,0,0,10,15,9,13,5,0,0,3,16,7,0,0,0,0,0,5,16,16,16,10,0,0,0,7,16,11,10,16,5,0,0,2,16,5,0,12,8,0,0,0,10,15,13,16,5,0,0,0,0,9,12,7,0,0,6 +0,0,11,10,12,14,11,0,0,0,16,16,16,16,7,0,0,1,16,16,16,12,0,0,0,1,5,2,11,15,0,0,0,0,1,0,2,16,0,0,0,3,12,0,3,15,0,0,0,6,15,8,13,11,0,0,0,0,9,14,9,2,0,0,5 +0,0,10,15,6,0,0,0,0,2,16,14,16,7,0,0,0,4,16,8,5,16,1,0,0,5,12,0,0,12,8,0,0,8,9,0,0,12,8,0,0,5,12,0,1,15,3,0,0,4,13,4,12,13,0,0,0,0,9,16,13,4,0,0,0 +0,0,9,16,11,1,0,0,0,5,16,10,16,9,0,0,0,6,14,1,9,15,0,0,0,1,15,6,11,16,2,0,0,0,7,16,15,16,7,0,0,0,0,3,1,11,9,0,0,3,14,9,9,14,12,0,0,0,12,16,16,13,3,0,9 +0,0,4,15,13,3,0,0,0,1,16,13,16,15,1,0,0,6,15,0,4,16,4,0,0,3,15,14,16,14,0,0,0,0,4,16,16,11,0,0,0,0,11,12,8,16,5,0,0,0,16,10,12,16,3,0,0,0,7,16,13,7,0,0,8 +0,1,7,12,13,3,0,0,0,7,13,6,15,14,0,0,0,6,10,0,13,16,0,0,0,1,13,13,15,16,1,0,0,0,0,4,1,12,8,0,0,0,0,0,0,12,8,0,0,0,12,13,5,14,8,0,0,0,5,12,16,11,1,0,9 +0,0,5,12,9,1,0,0,0,0,16,9,15,9,0,0,0,2,14,1,10,12,0,0,0,0,9,14,16,11,0,0,0,0,3,15,16,9,0,0,0,0,12,10,3,13,1,0,0,0,11,8,5,16,3,0,0,0,4,10,8,3,0,0,8 +0,0,0,5,14,0,0,0,0,0,0,13,14,12,0,0,0,0,7,13,6,13,0,0,0,2,16,3,10,11,0,0,0,6,16,13,16,16,5,0,0,2,8,9,16,11,2,0,0,0,0,3,16,0,0,0,0,0,0,7,12,0,0,0,4 +0,0,0,1,12,7,0,0,0,0,0,9,16,16,1,0,0,1,7,15,16,14,0,0,0,4,16,16,16,16,0,0,0,0,0,3,16,16,0,0,0,0,0,2,16,16,3,0,0,0,0,6,16,16,0,0,0,0,0,3,15,13,0,0,1 +0,0,1,8,11,13,15,3,0,0,7,16,10,10,16,5,0,1,13,3,0,9,14,0,0,3,15,0,2,15,4,0,0,5,8,0,10,11,0,0,0,0,0,2,15,4,0,0,0,0,0,8,14,0,0,0,0,0,0,13,7,0,0,0,7 +0,0,6,12,12,15,16,6,0,2,15,16,14,16,15,3,0,3,16,6,6,16,6,0,0,7,15,4,14,11,0,0,0,1,2,8,15,3,0,0,0,0,1,16,9,0,0,0,0,0,6,16,4,0,0,0,0,0,8,16,3,0,0,0,7 +0,0,6,15,14,2,0,0,0,5,16,11,14,12,0,0,0,5,11,3,16,5,0,0,0,0,0,14,16,7,0,0,0,0,0,8,10,16,3,0,0,1,4,0,0,12,7,0,0,7,16,5,6,16,5,0,0,1,8,15,16,12,1,0,3 +0,0,3,8,9,11,14,1,0,0,9,16,16,16,13,0,0,0,16,5,8,0,0,0,0,4,16,16,16,6,0,0,0,2,9,2,9,10,0,0,0,0,0,0,8,8,0,0,0,0,8,12,13,5,0,0,0,0,5,13,10,1,0,0,5 +0,0,0,5,16,8,0,0,0,0,4,16,16,8,0,0,0,2,15,16,16,8,0,0,0,4,8,12,16,5,0,0,0,0,0,6,16,11,0,0,0,0,0,6,16,12,0,0,0,0,0,6,16,15,1,0,0,0,0,4,15,11,2,0,1 +0,0,2,15,10,1,0,0,0,2,13,12,14,9,0,0,0,6,16,1,1,14,2,0,0,8,16,0,0,10,5,0,0,8,14,2,0,8,8,0,0,6,14,0,0,8,8,0,0,1,14,12,8,15,6,0,0,0,3,13,16,8,1,0,0 +0,0,2,15,8,0,0,0,0,0,7,14,15,8,0,0,0,7,15,3,3,15,0,0,0,6,16,1,0,9,8,0,0,4,12,0,0,8,8,0,0,0,12,3,0,12,7,0,0,0,9,13,13,15,1,0,0,0,1,9,12,5,0,0,0 +0,0,11,15,4,0,0,0,0,5,16,15,15,0,0,0,0,0,14,11,16,2,0,0,0,0,0,4,16,5,0,0,0,0,0,4,16,6,0,0,0,0,0,7,16,10,3,0,0,0,11,16,16,16,16,6,0,0,11,16,10,5,13,6,2 +0,0,12,15,3,0,0,0,0,6,15,12,14,0,0,0,0,7,5,1,16,2,0,0,0,0,0,0,11,7,0,0,0,0,0,2,15,3,0,0,0,0,0,8,14,1,0,0,0,0,12,16,12,8,5,0,0,0,9,8,13,15,7,0,2 +0,0,2,12,12,12,9,2,0,0,9,15,12,13,16,5,0,0,12,8,0,8,10,0,0,1,16,3,3,15,2,0,0,1,3,0,12,7,0,0,0,0,0,4,13,0,0,0,0,0,0,13,9,0,0,0,0,0,3,15,3,0,0,0,7 +0,0,3,9,14,7,0,0,0,3,15,11,8,15,2,0,0,4,16,5,2,16,7,0,0,0,4,15,13,16,7,0,0,0,0,6,16,16,1,0,0,0,2,15,8,16,7,0,0,0,4,16,4,15,7,0,0,0,0,10,15,10,0,0,8 +0,0,7,13,2,0,0,0,0,11,15,12,13,0,0,0,0,12,7,0,16,4,0,0,0,4,4,0,14,8,0,0,0,0,0,0,14,7,0,0,0,0,0,4,16,3,0,0,0,0,12,16,16,12,9,0,0,0,9,12,8,10,14,0,2 +0,0,4,15,12,2,0,0,0,2,15,7,11,10,0,0,0,4,16,0,0,15,1,0,0,6,10,0,0,10,8,0,0,8,8,0,0,6,8,0,0,5,12,0,0,11,8,0,0,2,16,7,8,16,2,0,0,0,6,15,16,8,0,0,0 +0,0,7,10,0,0,0,0,0,0,9,16,0,0,0,0,0,0,6,16,5,0,0,0,0,0,9,16,9,0,0,0,0,0,0,6,14,1,0,0,0,0,0,2,16,4,0,0,0,0,1,4,14,12,4,1,0,0,7,16,16,16,16,5,1 +0,0,7,15,6,0,0,0,0,4,16,9,14,3,0,0,0,2,14,0,13,6,0,0,0,0,2,0,11,10,0,0,0,0,0,0,13,6,0,0,0,0,0,5,15,7,0,0,0,0,9,16,16,16,15,0,0,0,6,15,7,4,6,1,2 +0,0,0,11,12,1,0,0,0,0,8,16,9,4,0,0,0,0,3,4,0,0,0,0,0,1,0,2,8,2,0,0,0,5,16,16,16,14,2,0,0,2,16,9,3,13,7,0,0,0,11,14,7,16,9,0,0,0,1,10,14,10,2,0,6 +0,0,3,10,13,7,0,0,0,1,14,13,15,14,0,0,0,0,15,5,14,9,0,0,0,0,0,10,16,5,0,0,0,0,0,7,14,16,3,0,0,4,5,0,2,16,4,0,0,10,16,10,8,16,3,0,0,0,5,12,14,8,0,0,3 +0,0,3,14,13,0,0,0,0,2,16,9,16,2,0,0,0,4,12,3,16,0,0,0,0,2,9,15,16,10,1,0,0,0,0,11,8,16,6,0,0,0,6,0,0,12,8,0,0,0,14,10,5,16,7,0,0,0,3,13,16,11,1,0,3 +0,0,0,10,12,15,16,13,0,0,6,15,6,4,14,9,0,0,10,6,0,3,14,2,0,1,14,1,0,12,6,0,0,0,3,0,5,13,0,0,0,0,0,1,13,3,0,0,0,0,0,6,13,0,0,0,0,0,0,14,6,0,0,0,7 +0,0,4,13,14,2,0,0,0,0,15,10,11,10,0,0,0,3,15,2,12,6,0,0,0,0,3,8,16,7,0,0,0,0,0,4,9,16,2,0,0,0,10,3,0,13,6,0,0,0,16,5,7,16,3,0,0,0,7,13,13,8,0,0,3 +0,0,5,14,9,0,0,0,0,1,16,13,16,0,0,0,0,2,13,10,14,0,0,0,0,0,4,16,16,7,0,0,0,0,2,4,5,16,4,0,0,0,0,0,0,14,7,0,0,0,11,8,8,16,4,0,0,0,8,13,15,10,0,0,3 +0,0,0,5,8,0,0,0,0,0,1,15,10,5,0,0,0,0,9,11,10,10,0,0,0,2,15,2,14,6,0,0,0,8,13,5,14,13,4,0,0,11,16,16,16,14,3,0,0,0,0,3,16,0,0,0,0,0,0,7,10,0,0,0,4 +0,0,0,12,15,1,0,0,0,0,4,16,13,1,0,0,0,1,14,15,2,0,0,0,0,4,16,15,10,7,0,0,0,7,16,16,12,16,6,0,0,3,16,13,0,16,12,0,0,0,11,16,13,16,12,0,0,0,1,9,13,12,4,0,6 +0,0,2,15,13,3,0,0,0,0,12,16,9,4,0,0,0,3,16,9,0,0,0,0,0,3,16,14,12,5,0,0,0,8,16,16,16,16,0,0,0,5,16,15,6,16,9,0,0,1,13,14,13,16,3,0,0,0,3,12,14,10,0,0,6 +0,0,2,12,16,6,0,0,0,0,5,16,12,4,0,0,0,0,15,14,0,0,0,0,0,2,16,14,8,8,0,0,0,4,16,16,13,15,8,0,0,4,16,11,1,12,12,0,0,0,11,16,12,14,15,0,0,0,1,8,12,12,6,0,6 +0,0,0,4,9,0,0,0,0,0,0,12,11,0,0,0,0,0,4,13,16,4,0,0,0,0,12,6,14,4,0,0,0,4,16,9,15,13,3,0,0,4,12,12,16,14,6,0,0,0,0,0,16,4,0,0,0,0,0,2,16,3,0,0,4 +0,0,10,16,13,5,0,0,0,7,13,5,14,12,0,0,0,9,10,0,13,14,0,0,0,4,15,13,16,15,3,0,0,0,2,6,3,12,8,0,0,0,0,0,0,12,8,0,0,0,12,6,5,15,4,0,0,0,9,13,16,8,0,0,9 +0,0,0,2,16,15,3,0,0,0,0,8,16,16,4,0,0,0,9,16,16,14,0,0,0,7,16,16,16,12,0,0,0,0,0,8,16,12,0,0,0,0,0,7,16,12,0,0,0,0,0,4,16,16,7,0,0,0,0,0,13,16,7,0,1 +0,0,4,12,16,16,11,2,0,0,15,13,8,11,8,1,0,2,15,13,16,8,0,0,0,6,16,13,13,16,2,0,0,7,11,2,2,16,6,0,0,0,0,0,5,15,2,0,0,0,9,6,13,10,0,0,0,0,7,14,13,1,0,0,5 +0,0,3,14,10,1,0,0,0,2,14,12,15,14,1,0,0,6,13,0,3,14,8,0,0,5,12,0,0,11,8,0,0,4,14,0,0,12,7,0,0,1,14,4,3,16,3,0,0,0,8,12,12,12,0,0,0,0,2,15,10,3,0,0,0 +0,0,7,14,10,7,0,0,0,5,16,14,16,14,0,0,0,7,11,0,9,14,1,0,0,4,14,7,11,16,5,0,0,0,9,15,15,12,8,0,0,0,0,1,1,8,9,0,0,0,14,11,10,15,9,0,0,0,9,13,13,9,0,0,9 +0,0,9,13,14,15,13,0,0,0,16,13,12,12,5,0,0,4,16,0,0,0,0,0,0,8,16,16,11,1,0,0,0,1,7,8,16,12,0,0,0,0,0,0,9,13,0,0,0,0,12,8,12,10,0,0,0,0,10,16,13,3,0,0,5 +0,0,10,13,2,0,0,0,0,0,12,16,10,0,0,0,0,0,7,14,16,0,0,0,0,0,0,2,16,1,0,0,0,0,0,2,16,1,0,0,0,0,4,8,15,0,0,0,0,0,16,16,16,13,11,0,0,0,8,14,8,11,14,1,2 +0,0,1,7,6,11,1,0,0,0,13,11,15,16,7,0,0,0,13,6,11,16,4,0,0,0,3,15,16,7,0,0,0,0,4,15,14,7,0,0,0,1,14,3,1,13,0,0,0,2,12,2,3,12,0,0,0,0,1,10,8,1,0,0,8 +0,0,13,14,3,0,0,0,0,4,16,15,11,0,0,0,0,7,12,4,16,0,0,0,0,3,6,4,16,0,0,0,0,0,0,9,12,0,0,0,0,0,1,14,7,0,0,0,0,1,14,16,10,10,2,0,0,0,11,12,14,14,6,0,2 +0,0,4,15,12,1,0,0,0,0,12,11,13,13,1,0,0,3,12,0,0,14,6,0,0,8,12,0,0,11,8,0,0,8,12,0,0,8,8,0,0,6,13,0,0,11,7,0,0,4,16,7,10,15,2,0,0,0,7,13,12,2,0,0,0 +0,0,3,16,9,0,0,0,0,4,15,15,16,7,0,0,0,8,16,3,7,12,0,0,0,6,16,3,0,13,3,0,0,8,10,0,0,12,8,0,0,1,15,2,0,9,11,0,0,0,13,14,10,15,12,0,0,0,3,10,16,14,3,0,0 +0,0,0,5,16,9,0,0,0,0,1,13,16,6,0,0,0,0,13,16,16,4,0,0,0,5,15,16,16,5,0,0,0,0,0,10,16,7,0,0,0,0,0,9,16,8,0,0,0,0,0,9,16,13,0,0,0,0,0,5,14,9,0,0,1 +0,0,1,11,12,13,14,5,0,0,7,15,11,10,16,6,0,0,10,7,0,2,16,2,0,1,16,1,0,12,8,0,0,2,11,0,4,14,1,0,0,0,0,1,14,4,0,0,0,0,0,8,14,0,0,0,0,0,0,15,7,0,0,0,7 +0,0,0,9,14,4,0,0,0,0,6,16,12,4,0,0,0,1,16,11,0,0,0,0,0,2,16,7,3,0,0,0,0,4,16,14,16,10,0,0,0,3,16,15,10,16,6,0,0,0,12,16,7,13,9,0,0,0,1,11,16,16,9,0,6 +0,0,7,13,11,0,0,0,0,6,16,11,16,0,0,0,0,3,8,5,16,0,0,0,0,0,3,15,16,6,0,0,0,0,0,7,8,16,5,0,0,6,8,0,0,11,9,0,0,0,16,6,6,14,6,0,0,0,6,15,16,10,0,0,3 +0,0,11,11,2,0,0,0,0,3,16,16,8,0,0,0,0,6,12,8,8,0,0,0,0,0,5,6,12,0,0,0,0,0,0,11,9,0,0,0,0,0,0,15,6,3,5,0,0,0,13,16,13,15,9,0,0,1,12,12,12,12,1,0,2 +0,0,0,6,16,6,0,0,0,0,0,13,16,10,0,0,0,0,9,16,16,6,0,0,0,3,16,16,16,4,0,0,0,5,16,16,16,6,0,0,0,0,0,9,16,10,0,0,0,0,0,8,16,15,0,0,0,0,0,4,13,11,2,0,1 +0,0,6,15,8,10,12,2,0,0,10,14,10,12,16,1,0,0,9,10,1,13,7,0,0,0,4,4,8,12,1,0,0,0,0,2,15,5,0,0,0,0,0,11,8,0,0,0,0,0,3,16,0,0,0,0,0,0,7,14,0,0,0,0,7 +0,0,0,0,7,6,0,0,0,0,0,6,15,6,0,0,0,0,1,15,5,14,3,0,0,0,12,8,4,16,0,0,0,5,16,9,10,16,4,0,0,1,11,12,14,14,4,0,0,0,0,0,8,8,0,0,0,0,0,0,11,7,0,0,4 +0,0,0,9,14,2,0,0,0,0,2,16,12,0,0,0,0,0,10,16,7,0,0,0,0,0,16,16,16,8,0,0,0,4,16,14,8,15,3,0,0,1,15,6,0,11,11,0,0,0,10,15,7,12,16,0,0,0,1,9,15,15,10,0,6 +0,0,6,15,11,0,0,0,0,6,16,13,16,0,0,0,0,2,7,13,9,0,0,0,0,0,3,16,15,5,0,0,0,0,0,3,11,16,2,0,0,0,0,0,0,16,7,0,0,0,16,11,10,16,7,0,0,0,5,16,15,9,0,0,3 +0,0,0,0,12,8,0,0,0,0,0,3,16,16,0,0,0,0,1,13,16,11,0,0,0,3,15,16,16,8,0,0,0,0,3,7,16,8,0,0,0,0,0,4,16,8,0,0,0,0,0,4,16,14,0,0,0,0,0,0,14,15,5,0,1 +0,0,9,10,2,0,0,0,0,8,16,16,10,0,0,0,0,7,7,4,16,2,0,0,0,0,0,8,16,5,0,0,0,0,0,10,16,14,2,0,0,0,0,0,2,14,7,0,0,0,11,10,4,11,12,0,0,0,8,14,16,15,6,0,3 +0,0,4,10,13,3,0,0,0,4,16,13,16,8,0,0,0,5,15,0,14,11,0,0,0,3,15,15,16,16,1,0,0,0,5,9,8,14,8,0,0,0,0,0,0,12,8,0,0,0,9,15,10,14,7,0,0,0,4,12,14,11,2,0,9 +0,0,0,7,16,16,7,0,0,0,0,14,16,16,4,0,0,2,13,16,16,12,0,0,0,7,16,16,16,12,0,0,0,0,0,10,16,8,0,0,0,0,0,11,16,13,0,0,0,0,0,10,16,16,2,0,0,0,0,9,16,12,2,0,1 +0,0,4,6,11,14,6,0,0,4,16,16,12,16,7,0,0,6,16,2,1,16,3,0,0,5,16,0,5,14,0,0,0,0,2,0,11,10,0,0,0,0,0,2,15,4,0,0,0,0,0,8,16,0,0,0,0,0,0,7,12,0,0,0,7 +0,0,0,6,14,8,0,0,0,0,8,16,12,8,0,0,0,3,16,14,3,0,0,0,0,6,16,16,16,11,1,0,0,8,16,13,4,14,5,0,0,2,16,9,0,8,12,0,0,0,10,15,6,13,9,0,0,0,0,8,14,16,9,0,6 +0,0,4,6,11,5,0,0,0,2,14,7,2,15,0,0,0,4,8,0,0,10,2,0,0,0,14,8,8,13,1,0,0,0,15,10,16,7,0,0,0,1,10,0,1,10,4,0,0,0,12,2,0,6,8,0,0,0,6,10,11,7,1,0,8 +0,0,0,5,9,0,0,0,0,0,2,14,14,2,0,0,0,0,6,14,11,12,0,0,0,3,15,3,11,10,0,0,0,8,11,0,13,10,2,0,0,10,16,16,16,15,3,0,0,0,4,10,15,0,0,0,0,0,0,8,9,0,0,0,4 +0,2,13,16,4,0,0,0,0,12,12,12,15,0,0,0,0,5,2,7,14,0,0,0,0,0,3,15,15,7,0,0,0,0,4,12,12,16,3,0,0,0,0,0,0,15,7,0,0,3,16,8,9,16,6,0,0,1,11,12,14,9,0,0,3 +0,0,0,7,16,12,0,0,0,0,7,16,16,12,0,0,0,3,16,16,16,8,0,0,0,7,16,16,16,8,0,0,0,0,0,11,16,12,0,0,0,0,0,7,16,15,0,0,0,0,0,6,16,16,5,0,0,0,0,6,15,15,2,0,1 +0,0,0,7,6,0,0,0,0,0,1,15,16,7,0,0,0,0,8,13,15,6,0,0,0,3,16,3,12,7,1,0,0,4,14,9,15,16,8,0,0,4,12,12,16,10,2,0,0,0,0,5,16,0,0,0,0,0,0,4,14,0,0,0,4 +0,0,3,11,6,0,0,0,0,0,10,14,16,2,0,0,0,3,15,1,11,11,0,0,0,4,12,0,2,16,2,0,0,7,12,0,0,12,8,0,0,4,14,0,1,15,8,0,0,2,15,14,15,15,1,0,0,0,5,13,14,5,0,0,0 +0,2,16,12,12,14,7,0,0,3,16,9,8,8,4,0,0,2,16,10,4,0,0,0,0,4,16,13,16,4,0,0,0,0,3,0,12,10,0,0,0,6,3,0,9,11,0,0,0,11,11,9,16,3,0,0,0,3,12,15,7,0,0,0,5 +0,0,6,14,10,0,0,0,0,6,16,14,16,0,0,0,0,5,10,11,16,0,0,0,0,0,0,9,16,12,0,0,0,0,0,0,3,16,7,0,0,4,6,0,3,16,8,0,0,5,15,9,16,13,1,0,0,0,9,15,8,0,0,0,3 +0,0,0,6,14,1,0,0,0,0,1,16,10,0,0,0,0,0,13,14,1,0,0,0,0,2,16,12,10,3,0,0,0,5,16,15,14,16,1,0,0,3,16,12,0,15,8,0,0,0,11,16,9,16,8,0,0,0,0,11,15,11,1,0,6 +0,0,6,12,11,0,0,0,0,2,16,14,14,11,0,0,0,8,15,1,8,16,0,0,0,3,15,5,11,16,5,0,0,0,11,16,15,14,8,0,0,0,4,2,3,6,12,0,0,2,16,13,10,14,12,0,0,0,8,12,13,13,5,0,9 +0,0,0,11,16,12,1,0,0,0,5,16,10,16,4,0,0,2,15,10,0,8,1,0,0,5,16,9,1,0,0,0,0,8,16,16,9,0,0,0,0,2,16,10,16,6,0,0,0,0,11,16,16,7,0,0,0,0,1,8,13,0,0,0,6 +0,0,0,4,14,14,4,0,0,0,1,15,16,16,2,0,0,2,13,16,16,16,0,0,0,8,16,16,16,16,0,0,0,2,7,8,16,16,1,0,0,0,0,4,16,16,0,0,0,0,0,4,16,16,2,0,0,0,0,4,16,15,4,0,1 +0,0,2,13,16,16,16,12,0,0,9,15,8,9,16,7,0,0,10,10,0,6,14,1,0,1,16,5,1,16,4,0,0,0,4,0,9,13,0,0,0,0,0,4,15,1,0,0,0,0,0,14,10,0,0,0,0,0,3,15,4,0,0,0,7 +0,1,8,8,11,15,10,0,0,4,16,16,11,12,6,0,0,4,16,4,0,0,0,0,0,1,16,15,8,0,0,0,0,0,4,10,16,6,0,0,0,0,0,0,12,12,0,0,0,6,15,9,13,10,0,0,0,1,13,16,13,4,0,0,5 +0,0,0,2,16,4,0,0,0,0,0,10,16,6,0,0,0,0,4,16,12,15,0,0,0,2,13,8,9,14,0,0,0,9,16,16,16,16,6,0,0,2,4,5,14,15,0,0,0,0,0,0,15,11,0,0,0,0,0,0,13,8,0,0,4 +0,0,0,2,14,0,0,0,0,0,0,10,9,0,0,0,0,0,1,15,8,11,0,0,0,0,12,7,12,8,0,0,0,5,16,12,15,14,5,0,0,8,13,9,16,13,3,0,0,0,0,0,16,4,0,0,0,0,0,0,16,6,0,0,4 +0,0,2,5,5,11,15,5,0,0,12,16,14,13,16,3,0,1,14,9,0,6,11,0,0,0,16,5,1,13,4,0,0,0,1,0,7,11,0,0,0,0,0,2,12,2,0,0,0,0,0,10,10,0,0,0,0,0,0,15,5,0,0,0,7 +0,1,11,13,2,0,0,0,0,8,15,15,6,0,0,0,0,10,9,6,14,0,0,0,0,3,10,4,10,0,0,0,0,0,0,11,7,0,0,0,0,0,2,15,3,0,0,0,0,2,13,16,13,11,2,0,0,1,12,12,12,15,11,0,2 +0,0,3,10,14,3,0,0,0,8,16,11,10,13,0,0,0,7,14,0,1,15,2,0,0,2,16,9,16,16,1,0,0,0,12,16,15,15,2,0,0,0,12,10,0,8,8,0,0,0,9,12,4,7,12,0,0,0,2,11,16,16,9,0,8 +0,1,11,12,1,0,0,0,0,8,16,12,9,0,0,0,0,7,8,7,12,0,0,0,0,1,1,4,14,0,0,0,0,0,0,5,11,0,0,0,0,0,0,11,9,0,0,0,0,1,14,16,16,15,10,0,0,0,13,11,8,12,8,0,2 +0,0,0,3,15,13,1,0,0,0,2,15,16,16,2,0,0,0,13,10,5,15,0,0,0,0,6,2,11,8,0,0,0,0,0,6,14,2,0,0,0,3,8,16,8,0,0,0,3,16,16,16,16,8,0,0,1,4,4,5,13,6,0,0,2 +0,0,11,15,15,16,9,0,0,4,16,14,8,9,3,0,0,4,12,0,0,0,0,0,0,6,16,15,3,0,0,0,0,3,11,11,12,0,0,0,0,0,0,2,16,0,0,0,0,2,12,9,16,0,0,0,0,0,11,16,8,0,0,0,5 +0,0,2,8,8,8,12,2,0,0,12,16,14,14,15,1,0,0,14,9,0,12,6,0,0,0,10,2,8,11,0,0,0,0,0,2,14,3,0,0,0,0,0,9,8,0,0,0,0,0,0,14,4,0,0,0,0,0,3,15,0,0,0,0,7 +0,0,3,10,14,3,0,0,0,4,16,13,15,11,0,0,0,8,13,1,13,16,2,0,0,6,16,14,14,14,6,0,0,0,5,7,1,11,8,0,0,1,8,1,0,8,8,0,0,2,16,11,8,14,7,0,0,0,5,12,14,9,1,0,9 +0,0,3,8,11,13,14,0,0,2,13,16,13,13,13,0,0,1,16,0,0,0,0,0,0,3,16,11,10,1,0,0,0,3,16,14,14,10,0,0,0,0,8,3,9,11,0,0,0,0,7,15,14,11,0,0,0,0,2,12,13,2,0,0,5 +0,0,0,0,13,3,0,0,0,0,0,11,13,4,0,0,0,0,6,14,4,16,1,0,0,2,14,3,6,14,0,0,0,6,16,11,12,12,0,0,0,2,7,14,16,14,0,0,0,0,0,0,12,8,0,0,0,0,0,0,15,3,0,0,4 +0,0,6,10,9,4,0,0,0,0,14,10,16,16,1,0,0,4,15,1,9,16,0,0,0,3,16,16,16,8,0,0,0,0,12,14,16,5,0,0,0,0,12,1,9,12,0,0,0,0,16,6,14,9,0,0,0,0,8,12,6,1,0,0,8 +0,0,5,15,15,7,0,0,0,2,16,11,16,16,8,0,0,2,16,5,4,16,8,0,0,1,12,16,16,10,0,0,0,0,7,16,16,5,0,0,0,0,15,9,14,10,0,0,0,0,14,12,16,8,0,0,0,0,5,14,12,1,0,0,8 +0,0,0,5,11,0,0,0,0,0,0,10,13,0,0,0,0,0,0,16,16,6,0,0,0,0,9,12,16,5,0,0,0,2,16,4,16,7,0,0,0,9,16,14,16,16,3,0,0,3,8,11,16,8,1,0,0,0,0,5,13,0,0,0,4 +0,0,2,10,16,10,0,0,0,0,14,9,6,16,16,0,0,0,16,6,5,14,11,0,0,0,5,14,14,16,6,0,0,0,0,0,1,16,3,0,0,0,3,1,4,16,3,0,0,2,15,13,11,13,1,0,0,0,3,12,13,4,0,0,9 +0,0,2,12,14,4,0,0,0,0,15,12,11,13,0,0,0,4,16,4,1,14,6,0,0,4,12,0,0,8,8,0,0,6,9,0,0,5,8,0,0,3,12,1,0,12,8,0,0,0,8,12,9,16,3,0,0,0,0,10,13,3,0,0,0 +0,0,5,13,13,8,0,0,0,0,16,11,13,16,6,0,0,1,16,5,2,14,9,0,0,0,9,16,16,15,0,0,0,0,10,16,14,14,0,0,0,5,15,4,0,16,6,0,0,6,14,7,6,16,4,0,0,0,7,15,16,10,0,0,8 +0,0,5,13,15,6,0,0,0,2,16,9,16,13,0,0,0,4,14,0,10,16,2,0,0,4,15,11,15,16,1,0,0,0,7,10,3,13,8,0,0,0,3,0,0,12,5,0,0,0,13,11,4,16,4,0,0,0,7,14,16,11,1,0,9 +0,0,8,11,8,10,0,0,0,3,15,8,12,16,4,0,0,3,12,0,3,16,2,0,0,0,11,10,15,10,0,0,0,0,4,16,16,6,0,0,0,0,7,9,4,16,0,0,0,0,12,11,5,16,0,0,0,0,3,10,9,3,0,0,8 +0,0,6,15,13,2,0,0,0,1,15,10,11,14,0,0,0,2,16,3,1,16,4,0,0,4,12,0,1,14,4,0,0,4,10,0,0,15,3,0,0,4,12,0,0,15,3,0,0,1,13,9,11,16,2,0,0,0,4,12,14,5,0,0,0 +0,0,7,14,2,0,0,0,0,0,2,16,6,0,0,0,0,0,2,15,9,0,0,0,0,0,2,16,12,0,0,0,0,0,2,16,16,5,0,0,0,0,0,3,13,13,0,0,0,0,6,12,14,16,12,5,0,0,5,16,16,16,16,15,1 +0,0,2,13,11,0,0,0,0,0,11,14,11,9,0,0,0,0,15,7,6,12,0,0,0,0,8,5,9,12,0,0,0,0,0,1,14,7,0,0,0,0,0,7,16,0,0,0,0,0,1,12,12,4,4,1,0,0,1,15,16,16,16,7,2 +0,0,1,9,15,5,0,0,0,0,14,11,5,11,0,0,0,4,15,1,4,14,0,0,0,0,6,1,13,9,0,0,0,0,0,0,10,13,1,0,0,0,0,0,0,8,10,0,0,0,12,9,4,4,15,0,0,0,1,10,16,15,11,1,3 +0,0,1,14,3,0,0,0,0,0,8,14,0,3,0,0,0,1,16,4,10,12,0,0,0,7,14,2,15,5,0,0,0,13,14,11,16,16,9,0,0,8,16,16,14,4,0,0,0,0,0,15,9,0,0,0,0,0,2,16,5,0,0,0,4 +0,0,9,14,16,13,2,0,0,0,13,8,2,6,4,0,0,0,16,2,9,8,0,0,0,3,15,15,11,14,4,0,0,5,16,6,0,12,2,0,0,5,7,0,3,13,0,0,0,0,5,7,13,6,0,0,0,0,10,16,9,0,0,0,5 +0,0,1,14,15,4,0,0,0,0,9,16,10,5,0,0,0,1,16,10,0,0,0,0,0,3,16,12,5,0,0,0,0,4,16,16,16,8,0,0,0,1,15,7,4,16,4,0,0,0,11,13,4,16,9,0,0,0,1,12,16,16,9,0,6 +0,0,9,15,16,2,0,0,0,0,13,9,16,4,0,0,0,0,2,6,16,16,12,0,0,0,14,16,14,8,7,0,0,0,3,14,9,0,0,0,0,0,3,16,3,0,0,0,0,0,10,16,0,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,0,5,15,13,2,0,0,0,0,12,7,11,6,0,0,0,0,9,12,15,1,0,0,0,1,8,16,4,0,0,0,3,15,8,13,0,0,0,0,7,12,0,10,7,0,0,0,0,12,11,10,8,0,0,0,0,0,6,13,10,0,0,8 +0,0,3,11,15,8,0,0,0,3,14,10,5,15,2,0,0,8,10,0,3,16,4,0,0,8,9,1,10,16,7,0,0,1,15,16,9,9,7,0,0,0,0,0,0,5,8,0,0,0,4,6,5,13,7,0,0,0,3,16,15,8,1,0,9 +0,0,0,9,14,6,0,0,0,0,10,13,4,13,2,0,0,2,14,0,0,10,6,0,0,4,9,0,0,6,8,0,0,5,8,0,0,8,7,0,0,2,11,1,0,9,5,0,0,0,6,11,4,13,3,0,0,0,1,11,16,12,0,0,0 +0,0,6,13,0,0,0,0,0,0,8,16,2,0,0,0,0,0,6,16,3,0,0,0,0,0,3,15,6,0,0,0,0,0,0,10,10,0,0,0,0,0,0,3,15,0,0,0,0,0,7,10,14,12,5,1,0,0,6,16,16,16,16,12,1 +0,0,5,16,14,2,0,0,0,1,13,14,16,8,0,0,0,9,15,3,16,5,0,0,0,10,13,3,16,3,0,0,0,3,3,11,13,0,0,0,0,0,0,13,10,0,0,0,0,0,2,16,16,16,10,0,0,0,6,16,14,12,9,0,2 +0,0,3,10,15,6,0,0,0,4,16,9,4,16,2,0,0,8,14,0,9,10,0,0,0,1,4,7,16,2,0,0,0,0,0,2,15,7,0,0,0,0,0,0,1,14,1,0,0,0,12,8,1,11,7,0,0,0,6,8,16,15,5,0,3 +0,0,3,16,4,0,0,0,0,0,12,13,2,5,0,0,0,2,16,6,10,15,1,0,0,9,15,3,16,11,7,0,0,12,16,16,15,11,5,0,0,3,9,16,3,0,0,0,0,0,2,16,3,0,0,0,0,0,6,14,0,0,0,0,4 +0,0,13,13,13,12,4,0,0,1,16,5,5,9,4,0,0,4,13,0,2,1,0,0,0,5,14,11,16,13,2,0,0,5,15,6,0,9,8,0,0,0,3,0,0,10,8,0,0,3,14,5,7,15,1,0,0,1,9,14,15,4,0,0,5 +0,0,0,11,16,6,0,0,0,0,10,16,10,0,0,0,0,0,16,3,0,0,0,0,0,5,14,0,3,0,0,0,0,2,16,16,13,12,1,0,0,2,15,3,0,8,7,0,0,0,8,8,0,10,7,0,0,0,1,11,12,15,4,0,6 +0,0,5,11,16,16,5,0,0,3,15,11,10,16,4,0,0,0,4,0,10,14,0,0,0,0,7,15,16,16,12,0,0,0,9,16,14,4,1,0,0,0,1,14,7,0,0,0,0,0,4,16,4,0,0,0,0,0,8,16,0,0,0,0,7 +0,0,0,9,16,6,0,0,0,0,4,15,6,15,0,0,0,0,8,11,9,11,0,0,0,0,8,16,14,2,0,0,0,0,11,16,13,0,0,0,0,6,14,2,12,9,0,0,0,5,16,11,5,13,4,0,0,0,3,8,13,16,9,0,8 +0,0,1,12,16,14,2,0,0,0,13,11,3,16,5,0,0,4,14,0,0,15,6,0,0,6,12,8,13,16,5,0,0,0,9,12,4,10,8,0,0,0,3,0,0,11,5,0,0,0,16,14,5,15,4,0,0,0,3,12,16,11,1,0,9 +0,0,5,15,12,4,0,0,0,2,15,8,11,16,4,0,0,8,9,0,6,16,4,0,0,8,8,0,2,10,8,0,0,8,7,0,0,13,5,0,0,2,14,0,0,16,2,0,0,0,14,8,11,10,0,0,0,0,4,13,14,0,0,0,0 +0,0,8,14,1,0,0,0,0,0,7,16,3,0,0,0,0,0,6,16,6,0,0,0,0,0,6,16,9,0,0,0,0,0,2,16,14,0,0,0,0,0,0,8,16,3,0,0,0,0,12,12,16,16,12,4,0,0,7,16,16,16,12,5,1 +0,0,3,13,15,1,0,0,0,4,15,14,15,10,0,0,0,13,13,2,13,9,0,0,0,14,10,0,15,9,0,0,0,1,1,2,16,4,0,0,0,0,0,9,15,1,0,0,0,0,2,15,16,16,16,6,0,0,2,15,16,10,12,4,2 +0,0,2,11,12,1,0,0,0,2,14,9,9,8,0,0,0,10,12,0,13,6,0,0,0,6,5,2,13,2,0,0,0,0,0,10,9,0,0,0,0,0,0,1,10,9,1,0,0,0,6,7,0,12,6,0,0,0,1,12,16,16,5,0,3 +0,0,0,11,8,0,0,0,0,0,2,16,5,0,0,0,0,0,12,10,4,10,0,0,0,6,15,2,15,8,0,0,0,10,12,4,16,7,6,0,0,10,16,15,16,14,6,0,0,3,8,16,9,0,0,0,0,0,0,14,11,0,0,0,4 +0,1,13,14,16,15,7,0,0,4,15,3,3,4,1,0,0,4,13,5,8,5,0,0,0,6,16,12,8,14,2,0,0,0,4,0,0,12,4,0,0,0,0,0,1,14,2,0,0,2,12,3,11,9,0,0,0,1,11,16,13,1,0,0,5 +0,0,2,15,15,3,0,0,0,0,13,15,8,3,0,0,0,5,16,6,0,0,0,0,0,8,15,5,4,2,0,0,0,8,16,16,16,14,2,0,0,4,16,7,1,13,8,0,0,0,11,12,1,11,13,0,0,0,1,12,16,16,10,0,6 +0,0,4,16,16,16,12,0,0,0,4,12,11,14,13,0,0,0,0,0,0,15,9,0,0,0,2,8,10,16,9,0,0,0,7,13,16,14,5,0,0,0,0,3,16,5,0,0,0,0,0,10,15,0,0,0,0,0,3,16,9,0,0,0,7 +0,0,2,13,14,6,0,0,0,0,10,13,5,16,0,0,0,0,9,9,4,14,0,0,0,0,4,15,15,5,0,0,0,0,5,16,14,1,0,0,0,0,14,6,8,9,0,0,0,0,13,7,1,11,5,0,0,0,3,11,15,16,12,0,8 +0,0,8,12,13,1,0,0,0,5,12,2,6,13,0,0,0,11,5,0,6,12,0,0,0,7,10,4,13,15,0,0,0,1,11,12,7,12,4,0,0,0,0,0,0,4,9,0,0,0,10,5,0,3,13,0,0,0,6,12,16,13,10,0,9 +0,0,4,15,14,4,0,0,0,1,14,8,10,13,1,0,0,5,13,0,0,16,3,0,0,6,12,0,0,13,3,0,0,7,12,0,0,14,3,0,0,1,16,0,0,14,3,0,0,0,10,11,12,14,0,0,0,0,1,11,12,3,0,0,0 +0,0,2,10,16,11,1,0,0,0,13,13,10,16,8,0,0,4,14,1,8,14,1,0,0,4,15,12,15,8,0,0,0,0,6,7,14,5,0,0,0,1,2,0,12,5,0,0,0,8,15,6,13,4,0,0,0,0,5,11,16,3,0,0,9 +0,0,13,16,16,16,13,0,0,4,16,9,8,5,4,0,0,9,15,7,8,2,0,0,0,11,16,16,14,15,1,0,0,1,3,0,4,16,4,0,0,0,0,0,8,14,0,0,0,0,11,8,16,6,0,0,0,1,15,16,10,0,0,0,5 +0,0,9,8,12,13,1,0,0,3,15,8,5,4,0,0,0,6,9,2,6,2,0,0,0,6,16,14,9,13,4,0,0,2,7,0,0,7,8,0,0,0,0,0,0,7,10,0,0,0,8,5,6,14,3,0,0,0,10,14,15,5,0,0,5 +0,0,2,13,16,8,0,0,0,0,11,16,6,2,0,0,0,2,16,8,0,0,0,0,0,5,16,9,1,0,0,0,0,5,16,16,13,2,0,0,0,1,16,6,8,14,0,0,0,0,11,10,1,16,5,0,0,0,3,15,16,16,3,0,6 +0,0,8,12,16,16,4,0,0,0,15,6,10,5,0,0,0,4,12,2,8,6,0,0,0,8,14,14,8,13,5,0,0,3,7,0,0,8,8,0,0,0,0,0,0,12,2,0,0,0,5,2,5,12,0,0,0,0,7,15,15,2,0,0,5 +0,0,2,11,13,5,0,0,0,1,14,9,8,14,0,0,0,6,13,1,2,16,2,0,0,7,7,0,0,12,5,0,0,7,9,0,0,3,9,0,0,2,12,0,0,4,11,0,0,0,12,6,4,14,7,0,0,0,3,13,16,9,0,0,0 +0,0,3,10,15,14,4,0,0,2,14,7,9,16,8,0,0,7,12,3,14,16,0,0,0,2,14,16,13,16,0,0,0,0,0,0,0,16,0,0,0,1,3,0,0,14,0,0,0,5,15,8,2,16,0,0,0,0,4,11,16,15,0,0,9 +0,0,0,6,15,11,2,0,0,0,6,13,4,13,5,0,0,0,7,11,0,13,3,0,0,0,2,15,13,7,0,0,0,3,13,12,16,2,0,0,0,8,15,1,9,8,0,0,0,0,7,14,8,16,1,0,0,0,0,5,12,16,2,0,8 +0,0,2,10,14,10,0,0,0,1,15,9,9,16,1,0,0,7,9,0,9,12,0,0,0,7,7,3,15,15,0,0,0,2,15,15,7,16,1,0,0,0,1,2,0,9,4,0,0,0,5,13,4,8,9,0,0,0,1,10,15,16,6,0,9 +0,0,0,9,16,9,0,0,0,0,3,15,5,16,0,0,0,0,7,12,7,12,0,0,0,0,1,16,16,5,0,0,0,0,2,16,14,1,0,0,0,3,16,8,9,11,0,0,0,0,12,13,4,12,8,0,0,0,0,8,14,16,14,0,8 +0,0,0,9,11,0,0,0,0,0,5,14,3,2,0,0,0,0,15,2,1,14,3,0,0,5,13,0,13,8,1,0,0,8,13,3,16,14,6,0,0,6,15,16,13,3,0,0,0,0,0,9,11,0,0,0,0,0,0,11,7,0,0,0,4 +0,0,5,14,1,0,0,0,0,0,9,16,4,0,0,0,0,0,12,16,4,0,0,0,0,0,12,16,8,0,0,0,0,0,9,16,11,0,0,0,0,0,0,8,16,2,0,0,0,0,3,8,15,13,11,8,0,0,5,16,16,16,16,10,1 +0,0,4,10,15,16,4,0,0,0,13,14,9,16,3,0,0,0,2,1,5,15,0,0,0,0,3,4,13,14,2,0,0,5,16,16,16,16,8,0,0,4,9,12,14,1,0,0,0,0,5,15,10,0,0,0,0,0,6,16,4,0,0,0,7 +0,0,3,15,16,16,6,0,0,0,3,14,7,15,3,0,0,0,0,0,4,14,0,0,0,0,2,9,15,16,14,0,0,0,7,16,14,6,2,0,0,0,0,8,8,0,0,0,0,0,2,16,2,0,0,0,0,0,5,12,0,0,0,0,7 +0,0,1,10,15,10,0,0,0,1,13,11,8,12,0,0,0,2,9,0,13,6,0,0,0,0,0,0,16,1,0,0,0,0,0,0,14,10,1,0,0,0,0,0,1,9,10,0,0,0,13,7,0,2,16,0,0,0,2,11,15,16,12,0,3 +0,0,11,13,12,12,3,0,0,5,14,4,4,7,2,0,0,7,10,1,4,1,0,0,0,8,15,14,12,15,2,0,0,2,7,0,0,12,4,0,0,0,0,0,1,16,3,0,0,1,8,3,10,12,0,0,0,1,12,16,12,2,0,0,5 +0,0,2,16,5,0,0,0,0,0,4,16,10,0,0,0,0,0,5,16,7,0,0,0,0,0,8,16,9,0,0,0,0,0,15,16,14,2,0,0,0,0,7,8,14,10,0,0,0,0,12,15,14,16,14,9,0,0,2,10,13,16,10,3,1 +0,0,1,15,11,1,0,0,0,0,9,12,8,12,0,0,0,1,15,1,6,16,2,0,0,2,12,0,1,11,6,0,0,5,10,0,0,11,4,0,0,2,13,0,0,10,3,0,0,0,13,2,3,13,3,0,0,0,1,13,16,15,1,0,0 +0,0,5,16,15,4,0,0,0,3,15,13,13,12,0,0,0,7,14,1,0,16,5,0,0,12,9,0,1,11,10,0,0,10,10,0,0,7,13,0,0,6,15,0,0,8,12,0,0,1,14,7,6,15,11,0,0,0,5,15,16,14,3,0,0 +0,0,3,11,13,1,0,0,0,6,16,11,13,6,0,0,1,16,8,0,11,4,0,0,0,4,4,0,16,0,0,0,0,0,0,5,11,0,0,0,0,0,0,8,9,0,0,0,0,0,0,12,9,4,5,0,0,0,1,14,13,12,15,5,2 +0,0,2,14,8,0,0,0,0,3,14,10,16,1,0,0,0,11,8,2,15,0,0,0,0,9,8,1,13,0,0,0,0,1,3,6,10,0,0,0,0,0,0,9,7,0,0,0,0,0,0,14,9,4,7,3,0,0,1,14,16,16,13,8,2 +0,0,9,14,16,10,0,0,0,0,10,6,12,13,0,0,0,0,0,0,13,13,5,0,0,2,12,15,16,15,14,0,0,2,12,16,7,0,1,0,0,0,3,15,0,0,0,0,0,0,8,14,0,0,0,0,0,0,9,11,0,0,0,0,7 +0,0,0,6,14,4,0,0,0,0,4,13,2,12,0,0,0,0,8,6,0,12,0,0,0,0,2,12,6,14,0,0,0,0,1,12,16,9,0,0,0,0,13,11,6,11,0,0,0,0,9,11,2,7,8,0,0,0,0,5,10,15,13,0,8 +0,0,7,13,14,1,0,0,0,7,15,9,13,7,0,0,0,5,15,3,8,8,0,0,0,0,1,0,12,5,0,0,0,0,0,1,14,0,0,0,0,0,0,10,6,0,0,0,0,0,2,15,5,4,4,0,0,0,6,16,16,13,16,6,2 +0,0,0,7,13,9,1,0,0,0,7,15,8,15,5,0,0,1,15,2,0,10,8,0,0,4,12,0,0,12,7,0,0,5,9,0,0,14,3,0,0,4,14,0,0,11,0,0,0,1,16,8,8,11,0,0,0,0,2,11,14,5,0,0,0 +0,0,10,12,0,0,0,0,0,0,8,16,1,0,0,0,0,0,9,16,1,0,0,0,0,0,12,16,5,0,0,0,0,0,13,16,10,0,0,0,0,0,1,10,15,0,0,0,0,0,7,12,16,12,12,4,0,0,7,16,16,16,16,11,1 +0,0,3,13,7,0,0,0,0,3,15,8,14,0,0,0,0,10,8,1,14,0,0,0,0,8,11,5,13,0,0,0,0,0,0,5,12,0,0,0,0,0,0,8,8,0,0,0,0,0,1,12,10,7,5,2,0,0,2,14,14,12,14,7,2 +0,0,3,12,16,6,0,0,0,0,13,16,12,4,0,0,0,3,16,9,0,0,0,0,0,6,13,5,4,0,0,0,0,8,14,3,16,2,0,0,0,4,16,4,13,7,0,0,0,0,11,11,11,14,0,0,0,0,3,12,16,15,0,0,6 +0,0,1,9,16,6,0,0,0,4,14,10,11,10,0,0,0,12,10,0,13,6,0,0,0,6,7,4,16,5,0,0,0,0,0,0,7,12,1,0,0,0,0,0,0,8,10,0,0,0,8,13,3,0,14,3,0,0,0,8,16,16,13,3,3 +0,0,4,12,16,14,7,0,0,2,16,6,0,7,12,0,0,0,7,0,3,13,3,0,0,0,0,1,16,6,0,0,0,0,0,0,9,12,0,0,0,0,2,0,0,13,5,0,0,3,16,7,1,12,4,0,0,0,3,12,16,15,2,0,3 +0,0,3,8,12,15,16,2,0,0,12,14,10,13,15,0,0,0,1,1,2,14,6,0,0,0,2,8,13,16,8,0,0,0,9,16,16,10,5,0,0,0,1,8,12,1,0,0,0,0,0,14,8,0,0,0,0,0,2,16,5,0,0,0,7 +0,0,0,6,15,12,1,0,0,4,12,16,12,16,3,0,0,15,16,6,4,16,3,0,0,4,5,1,15,12,0,0,0,0,0,7,16,10,1,0,0,0,3,2,4,15,7,0,0,0,12,15,8,11,14,0,0,0,1,8,15,16,11,0,3 +0,0,0,7,13,10,0,0,0,0,10,13,5,13,0,0,0,7,12,0,8,8,0,0,0,6,6,3,15,1,0,0,0,0,0,2,13,9,0,0,0,0,0,0,0,11,7,0,0,0,5,9,1,2,12,0,0,0,0,9,15,16,9,0,3 +0,0,0,12,10,0,0,0,0,0,4,16,5,0,0,0,0,0,15,7,2,14,1,0,0,6,16,2,9,16,11,0,0,9,14,9,16,15,6,0,0,5,16,16,16,1,0,0,0,0,2,11,13,0,0,0,0,0,0,12,13,0,0,0,4 +0,0,0,10,16,6,0,0,0,0,11,14,5,0,0,0,0,3,16,2,0,0,0,0,0,8,10,0,0,0,0,0,0,6,16,14,11,3,0,0,0,2,14,0,7,13,0,0,0,0,10,9,1,15,2,0,0,0,0,8,16,15,1,0,6 +0,0,0,9,16,6,0,0,0,0,8,16,12,5,0,0,0,2,16,9,0,0,0,0,0,6,16,6,2,0,0,0,0,8,16,16,16,7,0,0,0,2,16,7,7,16,4,0,0,0,9,13,3,14,9,0,0,0,0,8,16,16,7,0,6 +0,0,0,8,15,10,0,0,0,0,8,13,6,1,0,0,0,1,16,2,0,0,0,0,0,4,11,0,0,0,0,0,0,4,16,12,12,9,2,0,0,1,15,1,0,9,10,0,0,0,10,9,4,13,3,0,0,0,0,11,15,5,0,0,6 +0,0,3,15,6,0,0,0,0,1,14,13,4,0,0,0,0,4,16,5,16,7,0,0,0,8,16,8,16,9,5,0,0,10,16,14,16,16,9,0,0,3,11,16,11,2,0,0,0,0,4,16,8,0,0,0,0,0,5,15,4,0,0,0,4 +0,0,1,8,14,15,5,0,0,1,14,8,1,14,8,0,0,7,12,0,7,16,8,0,0,4,14,12,12,9,8,0,0,0,1,3,0,9,8,0,0,0,0,0,0,13,6,0,0,0,12,10,4,16,0,0,0,0,2,8,16,7,0,0,9 +0,0,3,15,5,0,0,0,0,0,5,16,10,0,0,0,0,0,6,16,7,0,0,0,0,2,12,16,9,0,0,0,0,8,16,15,14,0,0,0,0,0,6,3,16,6,0,0,0,0,6,8,12,15,12,10,0,0,2,13,16,16,15,11,1 +0,1,9,12,13,11,0,0,0,3,15,4,3,3,0,0,0,5,12,7,6,0,0,0,0,5,16,14,13,7,0,0,0,1,8,0,2,12,0,0,0,0,0,0,2,14,0,0,0,0,6,2,10,6,0,0,0,0,11,16,13,1,0,0,5 +0,0,0,16,6,0,0,0,0,0,11,16,16,11,0,0,0,4,16,11,13,14,0,0,0,7,12,1,3,13,0,0,0,4,10,0,0,16,0,0,0,2,14,0,1,16,1,0,0,0,9,7,9,14,0,0,0,0,1,11,15,3,0,0,0 +0,0,6,11,13,6,0,0,0,7,14,6,7,13,0,0,0,10,7,0,7,10,0,0,0,4,13,12,15,10,0,0,0,0,1,4,0,12,0,0,0,0,0,0,0,11,1,0,0,0,8,2,0,12,0,0,0,0,6,14,15,12,0,0,9 +0,0,9,16,16,13,2,0,0,2,15,2,3,3,0,0,0,7,9,0,1,4,0,0,0,8,12,7,13,14,7,0,0,6,16,8,0,5,8,0,0,1,3,0,0,9,6,0,0,0,3,4,1,15,0,0,0,0,7,16,12,7,0,0,5 +0,0,7,15,13,0,0,0,0,9,16,15,16,5,0,0,0,12,16,5,15,6,0,0,0,0,7,2,13,9,0,0,0,0,0,1,16,7,0,0,0,0,0,6,16,4,0,0,0,0,1,15,16,12,15,7,0,0,5,16,14,12,12,11,2 +0,0,0,6,14,10,0,0,0,0,3,16,7,13,2,0,0,0,4,16,3,14,1,0,0,0,0,11,16,9,0,0,0,0,5,14,16,6,0,0,0,3,15,4,1,13,4,0,0,2,14,11,5,5,12,0,0,0,0,6,10,15,15,0,8 +0,0,9,15,13,0,0,0,0,5,14,7,13,2,0,0,0,12,10,1,13,0,0,0,0,4,7,6,11,0,0,0,0,0,0,10,6,0,0,0,0,0,1,15,0,0,0,0,0,0,9,11,0,6,5,0,0,0,11,16,16,16,16,3,2 +0,0,2,11,15,2,0,0,0,0,12,6,11,9,0,0,0,4,11,0,7,16,0,0,0,5,6,0,1,16,6,0,0,5,4,0,0,10,7,0,0,0,10,0,0,10,5,0,0,0,13,2,6,12,0,0,0,0,4,16,12,1,0,0,0 +0,0,1,12,14,3,0,0,0,1,13,11,9,13,0,0,0,7,11,0,1,16,4,0,0,8,6,0,2,15,0,0,0,4,12,0,0,15,0,0,0,0,15,1,1,15,0,0,0,0,7,10,7,13,0,0,0,0,1,13,16,7,0,0,0 +0,0,5,13,2,0,0,0,0,0,4,16,7,0,0,0,0,0,4,16,4,0,0,0,0,0,4,16,6,0,0,0,0,0,9,16,10,0,0,0,0,0,2,11,15,1,0,0,0,0,10,13,16,15,16,9,0,0,3,12,16,16,11,2,1 +0,0,6,14,16,13,0,0,0,0,9,9,9,15,0,0,0,0,0,0,14,9,0,0,0,0,2,10,16,16,12,0,0,0,13,16,12,7,3,0,0,0,3,14,6,0,0,0,0,0,6,16,2,0,0,0,0,0,10,13,0,0,0,0,7 +0,0,0,6,13,8,0,0,0,0,7,16,8,4,0,0,0,3,15,2,0,0,0,0,0,6,12,4,0,0,0,0,0,4,16,13,13,3,0,0,0,2,15,2,5,14,0,0,0,0,9,10,2,15,0,0,0,0,0,8,15,12,0,0,6 +0,0,1,7,12,3,0,0,0,4,16,12,12,10,0,0,0,14,9,0,11,8,0,0,0,7,5,0,15,4,0,0,0,0,0,2,14,7,0,0,0,0,0,0,2,13,9,0,0,0,5,10,4,0,14,5,0,0,1,9,15,16,16,8,3 +0,0,10,15,2,0,0,0,0,7,16,16,6,0,0,0,0,12,13,12,9,0,0,0,0,8,9,13,7,0,0,0,0,0,0,16,5,0,0,0,0,0,6,15,1,0,0,0,0,0,16,14,4,5,8,3,0,0,8,16,16,16,16,9,2 +0,0,6,16,4,0,0,0,0,0,4,16,9,0,0,0,0,0,7,16,12,0,0,0,0,0,13,16,15,0,0,0,0,0,13,15,16,2,0,0,0,0,1,2,15,8,0,0,0,0,6,9,14,15,13,7,0,0,5,15,16,16,15,3,1 +0,0,4,13,14,16,3,0,0,0,6,11,10,16,1,0,0,0,0,0,7,14,0,0,0,0,9,16,16,16,12,0,0,0,15,13,16,7,2,0,0,0,0,8,12,0,0,0,0,0,2,14,6,0,0,0,0,0,8,13,1,0,0,0,7 +0,0,2,15,6,0,0,0,0,0,10,14,0,5,0,0,0,0,13,9,9,16,3,0,0,6,15,6,16,3,0,0,0,9,13,12,15,12,8,0,0,9,16,16,14,7,2,0,0,1,7,16,7,0,0,0,0,0,2,16,7,0,0,0,4 +0,0,2,13,16,8,0,0,0,1,13,16,10,7,0,0,0,5,16,9,0,0,0,0,0,7,16,7,0,0,0,0,0,9,16,16,13,1,0,0,0,5,16,6,14,9,0,0,0,0,13,12,14,15,0,0,0,0,3,12,16,11,0,0,6 +0,0,0,6,13,7,0,0,0,0,10,13,6,15,0,0,0,0,12,8,4,12,0,0,0,0,0,1,15,3,0,0,0,0,0,10,15,2,0,0,0,0,0,1,5,15,2,0,0,0,14,10,2,5,11,0,0,0,2,7,13,15,8,0,3 +0,0,2,15,3,0,0,0,0,0,4,16,4,0,0,0,0,0,4,16,5,0,0,0,0,0,14,16,6,0,0,0,0,0,7,15,7,0,0,0,0,0,2,10,9,0,0,0,0,0,16,16,15,9,16,5,0,0,3,15,16,15,7,1,1 +0,0,0,6,15,6,0,0,0,1,11,13,8,11,0,0,0,9,13,0,9,10,0,0,0,8,9,3,15,3,0,0,0,0,0,5,14,3,0,0,0,0,3,0,5,13,2,0,0,0,9,12,5,10,7,0,0,0,0,6,12,15,5,0,3 +0,0,5,11,13,6,0,0,0,4,15,8,7,16,3,0,0,8,7,0,4,16,1,0,0,4,11,1,10,16,4,0,0,2,15,15,8,16,4,0,0,0,0,0,0,13,6,0,0,1,16,9,0,12,5,0,0,0,4,11,16,16,2,0,9 +0,0,4,15,4,0,0,0,0,0,3,16,9,0,0,0,0,0,2,16,11,0,0,0,0,0,0,16,13,0,0,0,0,0,2,16,16,2,0,0,0,0,0,5,15,10,0,0,0,0,4,12,14,16,13,13,0,0,2,13,16,16,15,8,1 +0,0,4,15,16,12,0,0,0,0,6,9,12,10,0,0,0,0,0,0,10,9,0,0,0,0,2,4,15,10,4,0,0,2,15,16,16,15,7,0,0,0,8,13,9,0,0,0,0,0,1,16,4,0,0,0,0,0,6,13,0,0,0,0,7 +0,0,0,11,16,6,0,0,0,0,9,16,11,2,0,0,0,3,16,11,0,0,0,0,0,6,16,12,4,0,0,0,0,7,16,13,15,11,0,0,0,1,15,8,3,16,5,0,0,0,9,14,5,16,10,0,0,0,0,9,16,16,10,0,6 +0,0,0,7,13,8,0,0,0,0,2,16,8,15,0,0,0,0,4,12,8,11,0,0,0,0,2,16,16,3,0,0,0,0,5,16,16,2,0,0,0,2,16,7,9,11,0,0,0,0,9,12,1,14,6,0,0,0,0,6,15,15,12,0,8 +0,0,1,14,9,0,0,0,0,0,8,13,3,7,1,0,0,1,16,6,5,16,3,0,0,7,13,0,14,11,3,0,0,12,13,5,16,16,9,0,0,13,16,16,15,6,0,0,0,0,3,12,14,0,0,0,0,0,0,15,10,0,0,0,4 +0,0,3,11,15,8,0,0,0,4,14,8,13,14,0,0,0,8,11,3,15,6,0,0,0,1,1,9,14,0,0,0,0,0,0,0,13,10,0,0,0,0,0,0,1,13,7,0,0,0,9,8,2,6,11,0,0,0,4,10,14,16,10,0,3 +0,0,5,16,14,8,0,0,0,0,4,16,16,7,0,0,0,0,14,16,16,8,0,0,0,0,14,16,16,8,0,0,0,0,11,16,16,5,0,0,0,0,10,16,16,8,0,0,0,0,11,16,16,14,3,0,0,0,6,16,16,16,3,0,1 +0,0,0,15,8,0,0,0,0,0,5,15,2,13,5,0,0,0,13,9,2,15,2,0,0,4,14,1,10,12,2,0,0,10,14,8,16,16,10,0,0,10,16,16,15,5,0,0,0,0,2,12,8,0,0,0,0,0,0,16,5,0,0,0,4 +0,0,4,13,14,8,0,0,0,3,14,3,1,16,3,0,0,7,9,0,0,14,6,0,0,8,4,0,0,16,4,0,0,8,6,0,0,16,0,0,0,3,11,0,1,14,0,0,0,0,12,4,6,11,0,0,0,0,5,16,14,1,0,0,0 +0,0,8,12,14,12,3,0,0,0,12,5,0,3,0,0,0,0,16,2,4,1,0,0,0,4,16,14,12,15,4,0,0,0,4,0,0,8,8,0,0,1,0,0,0,11,5,0,0,6,14,1,2,15,1,0,0,0,8,14,16,4,0,0,5 +0,0,2,9,13,8,0,0,0,1,14,11,8,14,0,0,0,9,14,0,14,6,0,0,0,0,2,4,15,0,0,0,0,0,0,6,12,12,2,0,0,0,0,0,0,9,9,0,0,0,14,13,4,10,11,0,0,0,3,10,14,15,5,0,3 +0,0,0,13,15,4,0,0,0,0,11,16,9,4,0,0,0,1,16,14,0,0,0,0,0,5,16,7,0,0,0,0,0,5,16,16,14,4,0,0,0,2,15,9,7,15,5,0,0,0,11,13,4,12,13,0,0,0,1,13,16,16,10,0,6 +0,0,2,11,13,4,0,0,0,1,13,7,8,15,0,0,0,6,11,0,5,13,0,0,0,9,7,2,14,14,0,0,0,3,14,15,8,15,1,0,0,0,0,0,0,11,5,0,0,0,11,7,0,10,7,0,0,0,4,10,15,15,3,0,9 +0,0,0,11,16,10,0,0,0,0,9,16,10,7,0,0,0,3,16,8,0,0,0,0,0,9,16,13,4,0,0,0,0,10,16,8,16,7,0,0,0,4,16,3,7,16,2,0,0,0,13,13,8,16,5,0,0,0,1,11,16,16,1,0,6 +0,0,0,13,12,0,0,0,0,0,0,14,16,1,0,0,0,0,1,15,16,1,0,0,0,0,4,16,16,3,0,0,0,0,5,15,16,9,0,0,0,0,0,0,12,15,1,0,0,0,1,11,9,16,11,2,0,0,0,11,16,16,16,16,1 +0,0,4,13,16,14,0,0,0,0,13,10,11,15,0,0,0,0,0,0,9,11,0,0,0,0,1,6,14,16,8,0,0,0,11,16,15,8,5,0,0,0,2,11,10,0,0,0,0,0,1,14,6,0,0,0,0,0,5,16,2,0,0,0,7 +0,0,10,12,13,16,2,0,0,4,15,6,4,4,0,0,0,5,10,0,0,0,0,0,0,5,16,16,16,9,0,0,0,2,11,3,3,12,0,0,0,0,0,0,2,13,0,0,0,0,4,1,9,10,0,0,0,0,16,16,13,1,0,0,5 +0,0,0,12,10,0,0,0,0,0,4,16,3,9,3,0,0,0,14,7,6,16,2,0,0,3,15,2,10,10,0,0,0,10,9,1,16,12,10,0,0,14,11,14,16,11,1,0,0,9,16,15,9,0,0,0,0,0,0,14,8,0,0,0,4 +0,0,0,10,12,0,0,0,0,0,4,16,5,3,3,0,0,0,15,7,0,13,11,0,0,7,14,1,7,16,8,0,0,9,13,5,15,13,1,0,0,11,16,16,16,1,0,0,0,0,4,9,16,0,0,0,0,0,0,11,15,0,0,0,4 +0,0,10,16,15,0,0,0,0,4,14,8,16,1,0,0,0,0,1,4,16,0,2,0,0,0,3,11,16,16,13,0,0,0,12,16,11,7,2,0,0,0,6,16,0,0,0,0,0,0,7,15,0,0,0,0,0,0,12,11,0,0,0,0,7 +0,0,6,15,9,0,0,0,0,9,16,14,16,1,0,0,0,14,12,3,16,4,0,0,0,9,11,3,16,3,0,0,0,0,2,9,16,0,0,0,0,0,0,13,11,0,0,0,0,0,4,16,12,9,10,3,0,0,8,16,16,16,16,14,2 +0,0,0,7,12,13,1,0,0,0,8,11,1,10,8,0,0,0,12,2,1,11,7,0,0,0,10,10,14,8,0,0,0,1,7,16,9,0,0,0,0,7,16,7,14,3,0,0,0,0,7,13,5,14,0,0,0,0,0,6,15,14,2,0,8 +0,0,10,16,16,4,0,0,0,9,16,11,14,8,0,0,0,13,8,0,14,6,0,0,0,0,0,1,16,4,0,0,0,0,0,8,13,0,0,0,0,0,1,14,7,0,0,0,0,0,12,16,9,12,6,0,0,1,14,16,16,16,14,0,2 +0,0,1,13,7,0,0,0,0,1,15,9,15,1,0,0,0,9,11,0,16,0,0,0,0,2,10,3,14,0,0,0,0,0,0,2,11,0,0,0,0,0,0,5,11,0,0,0,0,0,0,9,10,4,4,2,0,0,1,15,16,15,13,15,2 +0,0,10,10,14,16,14,0,0,0,14,8,4,0,0,0,0,0,16,0,6,11,5,0,0,3,16,14,10,10,9,0,0,3,14,5,0,9,8,0,0,0,0,0,6,13,0,0,0,0,3,9,13,3,0,0,0,0,8,13,1,0,0,0,5 +0,0,5,9,13,16,6,0,0,0,12,12,7,16,5,0,0,0,0,0,6,16,3,0,0,0,3,12,15,16,14,0,0,0,7,16,15,5,1,0,0,0,0,10,10,0,0,0,0,0,2,15,5,0,0,0,0,0,6,14,0,0,0,0,7 +0,0,4,13,12,6,0,0,0,4,15,5,10,16,0,0,0,4,16,1,11,16,0,0,0,1,10,16,13,16,2,0,0,0,0,4,0,15,3,0,0,0,0,0,0,12,4,0,0,0,6,6,0,9,8,0,0,0,5,12,15,16,7,0,9 +0,3,12,12,14,15,3,0,0,4,15,4,4,4,0,0,0,5,12,0,0,2,0,0,0,5,15,12,15,15,5,0,0,5,12,6,0,8,8,0,0,0,0,0,0,10,7,0,0,1,9,0,7,14,1,0,0,2,15,16,14,3,0,0,5 +0,0,3,15,2,0,0,0,0,0,12,12,1,7,0,0,0,2,16,4,9,13,0,0,0,8,11,6,16,1,2,0,0,12,10,12,14,12,11,0,0,11,16,16,14,7,1,0,0,1,7,16,0,0,0,0,0,0,5,16,1,0,0,0,4 +0,0,0,3,14,13,3,0,0,0,0,12,9,8,8,0,0,0,0,12,8,11,6,0,0,0,0,7,14,11,1,0,0,1,8,12,15,5,0,0,0,6,14,0,4,12,0,0,0,0,7,12,1,15,2,0,0,0,0,3,13,15,2,0,8 +0,0,0,3,12,10,0,0,0,0,1,14,6,15,0,0,0,0,0,16,6,10,0,0,0,0,0,14,16,2,0,0,0,0,3,14,15,3,0,0,0,1,16,4,9,9,0,0,0,0,4,13,4,7,8,0,0,0,0,3,10,11,15,2,8 +0,0,3,15,6,0,0,0,0,0,9,13,1,6,9,0,0,3,16,3,6,15,5,0,0,7,15,1,14,9,5,0,0,10,13,9,16,15,7,0,0,7,16,16,11,4,0,0,0,0,3,16,5,0,0,0,0,0,4,16,3,0,0,0,4 +0,0,5,14,14,8,2,0,0,3,15,3,0,13,8,0,0,5,12,0,2,15,8,0,0,2,15,9,14,14,8,0,0,0,1,3,0,12,5,0,0,0,0,0,0,12,4,0,0,6,15,2,0,14,1,0,0,1,7,14,12,9,0,0,9 +0,0,4,14,14,4,0,0,0,0,15,10,10,13,0,0,0,5,15,0,2,15,6,0,0,4,13,0,0,14,8,0,0,6,9,0,0,12,7,0,0,3,14,1,0,12,5,0,0,0,12,9,6,15,2,0,0,0,3,14,14,6,0,0,0 +0,0,0,2,13,13,0,0,0,0,0,12,10,16,0,0,0,0,7,13,8,11,0,0,0,0,5,16,16,4,0,0,0,0,3,16,16,4,0,0,0,2,14,9,7,13,1,0,0,1,11,8,3,9,8,0,0,0,0,5,10,15,16,0,8 +0,0,3,13,14,4,0,0,0,0,13,12,14,16,0,0,0,1,16,3,14,16,4,0,0,1,14,9,16,16,6,0,0,0,2,8,4,11,9,0,0,0,2,2,0,12,10,0,0,0,14,14,4,11,9,0,0,0,4,8,11,16,9,0,9 +0,0,0,4,12,15,4,0,0,0,3,14,4,10,8,0,0,0,4,12,5,14,2,0,0,0,4,16,14,3,0,0,0,1,12,15,13,0,0,0,0,6,13,1,12,6,0,0,0,0,10,13,5,14,1,0,0,0,0,4,10,16,8,0,8 +0,0,7,16,15,4,0,0,0,0,14,16,9,15,2,0,0,1,15,12,1,9,8,0,0,4,16,0,0,7,10,0,0,7,13,0,0,10,11,0,0,7,12,0,2,15,6,0,0,3,15,12,14,14,1,0,0,0,10,16,14,4,0,0,0 +0,0,5,10,8,8,0,0,0,0,0,16,16,15,2,0,0,0,7,16,16,13,0,0,0,0,10,16,16,4,0,0,0,0,9,16,16,0,0,0,0,0,10,16,16,0,0,0,0,0,8,16,15,0,0,0,0,0,2,11,9,0,0,0,1 +0,2,11,16,15,2,0,0,0,12,16,15,16,4,0,0,0,2,3,2,16,4,0,0,0,0,0,10,14,0,0,0,0,0,4,16,5,0,0,0,0,0,12,12,3,11,9,0,0,0,16,16,16,16,6,0,0,0,14,15,12,5,0,0,2 +0,0,3,12,16,14,0,0,0,3,15,16,15,14,0,0,0,3,12,1,15,8,0,0,0,0,0,9,16,8,0,0,0,0,0,10,16,16,8,0,0,0,0,2,5,13,8,0,0,0,2,11,11,15,5,0,0,0,3,16,16,9,0,0,3 +0,0,0,0,15,7,0,0,0,0,0,10,16,6,0,0,0,0,8,15,14,4,0,0,0,6,15,2,15,2,1,0,0,9,16,16,16,16,11,0,0,5,10,12,16,8,1,0,0,0,0,1,15,0,0,0,0,0,0,1,15,0,0,0,4 +0,0,6,13,15,16,11,0,0,0,10,11,8,8,5,0,0,2,13,0,0,0,0,0,0,4,11,7,8,5,0,0,0,7,16,14,10,14,2,0,0,1,7,1,2,12,3,0,0,0,5,8,14,6,0,0,0,0,8,12,5,0,0,0,5 +0,0,0,13,3,0,0,0,0,0,8,13,1,0,0,0,0,0,12,5,0,0,0,0,0,1,13,0,0,0,0,0,0,1,12,6,11,9,3,0,0,1,15,16,12,8,11,0,0,0,9,13,2,6,16,2,0,0,0,11,16,14,7,0,6 +0,0,4,10,16,16,7,0,0,3,16,13,11,16,2,0,0,1,3,0,10,9,0,0,0,0,5,8,14,15,13,0,0,0,15,16,14,12,8,0,0,0,3,12,7,0,0,0,0,0,0,15,4,0,0,0,0,0,3,14,1,0,0,0,7 +0,0,4,11,15,2,0,0,0,2,16,9,8,9,0,0,0,4,15,0,5,16,3,0,0,0,11,11,16,9,0,0,0,0,4,16,15,1,0,0,0,0,13,9,6,12,1,0,0,0,15,3,0,9,5,0,0,0,5,13,13,12,5,0,8 +0,0,0,3,13,16,11,0,0,0,4,15,11,8,16,3,0,2,15,9,6,13,15,3,0,4,16,16,16,16,11,0,0,0,7,8,6,16,2,0,0,0,0,0,6,14,0,0,0,0,0,0,14,9,0,0,0,0,0,3,16,4,0,0,9 +0,0,7,8,12,6,0,0,0,1,14,11,12,15,0,0,0,3,15,0,0,10,5,0,0,4,9,0,0,8,4,0,0,8,8,0,0,13,0,0,0,7,9,0,9,11,0,0,0,2,14,10,14,5,0,0,0,0,9,15,6,0,0,0,0 +0,0,7,16,13,5,0,0,0,0,13,16,16,5,0,0,0,1,16,16,16,3,0,0,0,1,14,16,15,0,0,0,0,1,16,16,15,1,0,0,0,0,14,16,16,4,0,0,0,0,6,16,16,7,0,0,0,0,4,14,13,6,0,0,1 +0,0,6,15,15,1,0,0,0,4,16,13,16,4,0,0,0,10,11,2,16,2,0,0,0,1,1,10,14,0,0,0,0,0,1,14,6,0,0,0,0,0,6,14,1,12,9,0,0,0,11,15,14,16,9,0,0,0,8,16,12,5,0,0,2 +0,0,4,14,16,5,0,0,0,4,16,16,16,8,0,0,0,12,12,0,15,8,0,0,0,2,1,5,16,13,1,0,0,0,0,1,11,15,11,0,0,0,0,0,0,11,12,0,0,0,2,13,12,16,7,0,0,0,3,16,15,8,0,0,3 +0,0,0,1,15,5,0,0,0,0,0,12,16,0,0,0,0,0,7,16,16,3,0,0,0,5,16,8,16,8,3,0,0,11,16,12,16,16,12,0,0,11,16,15,16,7,2,0,0,1,4,2,16,0,0,0,0,0,0,2,14,0,0,0,4 +0,1,10,12,15,11,0,0,0,8,16,13,9,4,0,0,0,5,15,1,0,0,0,0,0,8,10,0,0,0,0,0,0,2,14,16,7,0,0,0,0,0,1,4,13,7,0,0,0,0,0,6,11,11,0,0,0,0,9,16,14,2,0,0,5 +0,0,1,12,2,0,0,0,0,0,6,13,0,0,0,0,0,0,11,8,0,0,0,0,0,1,15,1,0,0,0,0,0,2,15,2,14,13,4,0,0,2,15,16,10,5,14,0,0,0,9,13,4,9,14,0,0,0,0,10,13,12,3,0,6 +0,0,2,11,16,16,16,4,0,0,5,11,8,8,16,1,0,0,0,0,0,14,6,0,0,0,2,10,13,16,13,0,0,0,12,16,16,9,2,0,0,0,2,5,14,0,0,0,0,0,0,11,9,0,0,0,0,0,0,16,6,0,0,0,7 +0,0,5,12,16,7,0,0,0,5,14,4,9,15,5,0,0,4,13,6,14,6,2,0,0,1,14,16,2,0,0,0,0,3,15,12,9,0,0,0,0,5,12,0,10,7,0,0,0,3,15,4,2,15,0,0,0,0,5,14,14,7,0,0,8 +0,0,0,1,7,15,11,0,0,0,0,11,8,3,13,0,0,0,10,6,2,12,11,0,0,1,16,12,16,16,7,0,0,2,16,14,7,12,2,0,0,0,0,0,3,11,0,0,0,0,0,0,7,9,0,0,0,0,0,0,9,6,0,0,9 +0,0,5,15,14,3,0,0,0,0,13,15,9,15,2,0,0,4,16,12,0,10,6,0,0,8,16,9,0,8,10,0,0,7,15,5,0,12,11,0,0,7,13,0,5,16,6,0,0,0,16,12,15,13,1,0,0,0,6,16,12,2,0,0,0 +0,0,4,16,15,4,0,0,0,0,8,16,16,4,0,0,0,0,12,16,13,0,0,0,0,2,16,16,10,0,0,0,0,3,16,16,8,0,0,0,0,2,16,16,12,0,0,0,0,0,9,16,16,4,0,0,0,0,3,12,14,11,0,0,1 +0,0,8,15,12,1,0,0,0,8,13,8,12,6,0,0,0,4,2,0,8,6,0,0,0,0,0,1,13,2,0,0,0,0,0,9,7,0,0,0,0,0,5,13,0,4,4,0,0,0,10,12,9,15,11,0,0,0,9,16,9,7,1,0,2 +0,0,6,13,16,8,0,0,0,5,16,15,14,12,0,0,0,9,12,2,15,8,0,0,0,0,0,9,12,0,0,0,0,0,0,15,16,13,3,0,0,0,0,3,9,15,11,0,0,0,1,8,14,16,8,0,0,0,7,16,14,6,0,0,3 +0,0,0,0,6,15,2,0,0,0,0,5,16,16,2,0,0,0,4,16,12,16,0,0,0,4,15,6,7,13,0,0,0,11,15,15,16,16,9,0,0,9,13,12,13,14,3,0,0,0,0,0,9,8,0,0,0,0,0,0,8,8,0,0,4 +0,1,13,16,16,11,1,0,0,8,16,16,13,11,1,0,0,11,13,1,0,0,0,0,0,10,13,2,0,0,0,0,0,2,14,15,6,0,0,0,0,0,0,8,16,6,0,0,0,0,6,9,15,9,0,0,0,0,13,16,15,3,0,0,5 +0,0,1,10,0,0,0,0,0,0,7,12,0,0,0,0,0,0,12,7,0,0,0,0,0,0,14,3,0,0,0,0,0,0,15,9,12,10,2,0,0,0,16,13,8,8,11,0,0,0,13,10,4,9,15,0,0,0,3,10,15,9,2,0,6 +0,0,0,4,11,15,16,12,0,0,2,16,12,9,11,12,0,0,1,2,0,0,14,5,0,0,0,7,12,14,15,0,0,0,3,16,16,15,2,0,0,0,0,1,11,8,0,0,0,0,0,2,15,1,0,0,0,0,0,5,10,0,0,0,7 +0,0,3,13,14,4,0,0,0,0,15,8,7,11,0,0,0,0,16,0,0,11,3,0,0,0,10,6,14,14,1,0,0,0,5,16,14,1,0,0,0,0,12,10,8,12,0,0,0,0,14,3,0,9,8,0,0,0,4,14,15,12,4,0,8 +0,0,0,1,7,14,14,0,0,0,3,15,7,1,14,0,0,2,16,10,5,14,8,0,0,4,15,16,12,16,5,0,0,0,5,3,1,15,0,0,0,0,0,0,4,12,0,0,0,0,0,0,7,10,0,0,0,0,0,0,7,12,0,0,9 +0,0,3,12,8,3,0,0,0,0,7,16,13,13,1,0,0,0,13,8,0,9,4,0,0,0,16,2,0,6,6,0,0,4,12,0,0,10,3,0,0,3,12,0,0,13,2,0,0,0,12,4,12,10,0,0,0,0,5,16,13,2,0,0,0 +0,0,0,0,8,14,10,0,0,0,0,9,7,9,12,0,0,0,9,8,0,12,9,0,0,4,16,8,12,16,2,0,0,5,16,16,10,15,0,0,0,0,4,0,5,11,0,0,0,0,0,0,8,9,0,0,0,0,0,0,10,10,0,0,9 +0,0,2,15,15,16,11,0,0,0,8,16,11,3,0,0,0,0,13,9,0,0,0,0,0,5,16,3,9,11,3,0,0,10,15,15,16,16,11,0,0,6,16,10,7,16,5,0,0,0,3,4,15,8,0,0,0,0,4,15,7,0,0,0,5 +0,0,13,16,16,16,8,0,0,2,16,13,8,4,1,0,0,7,16,1,0,0,0,0,0,11,15,12,5,0,0,0,0,5,16,16,16,3,0,0,0,0,0,6,16,2,0,0,0,0,3,15,9,0,0,0,0,0,11,14,0,0,0,0,5 +0,0,4,15,4,0,0,0,0,0,9,16,2,0,0,0,0,0,16,10,0,0,0,0,0,6,16,3,0,0,0,0,0,10,15,11,16,13,4,0,0,7,16,16,11,14,14,0,0,2,16,11,5,15,12,0,0,0,3,16,16,14,3,0,6 +0,0,15,12,11,6,2,0,0,4,16,15,12,12,10,0,0,7,14,1,0,0,0,0,0,10,12,3,1,0,0,0,0,8,16,16,14,2,0,0,0,1,8,8,16,8,0,0,0,0,1,11,15,2,0,0,0,0,13,16,6,0,0,0,5 +0,0,5,16,12,2,0,0,0,0,13,14,15,11,0,0,0,6,15,1,2,16,4,0,0,6,14,0,0,9,8,0,0,8,10,0,0,13,8,0,0,4,13,0,1,14,8,0,0,0,14,14,15,15,3,0,0,0,5,12,13,8,0,0,0 +0,0,0,1,12,16,14,0,0,0,3,14,13,15,13,0,0,4,16,15,13,16,4,0,0,3,16,16,16,16,3,0,0,0,7,7,14,14,0,0,0,0,0,0,12,11,0,0,0,0,0,0,13,10,0,0,0,0,0,0,13,12,0,0,9 +0,0,6,14,13,4,0,0,0,4,16,11,10,15,0,0,0,9,11,0,12,11,0,0,0,7,11,8,16,3,0,0,0,0,13,16,10,0,0,0,0,0,13,13,12,9,0,0,0,0,12,8,0,15,1,0,0,0,5,16,16,11,0,0,8 +0,0,0,0,8,15,9,0,0,0,1,12,8,2,11,0,0,0,10,11,0,11,8,0,0,5,16,14,15,15,3,0,0,2,12,10,4,14,0,0,0,0,0,0,6,9,0,0,0,0,0,0,9,6,0,0,0,0,0,0,9,6,0,0,9 +0,0,4,12,16,6,0,0,0,4,16,10,5,16,4,0,0,8,13,0,5,15,5,0,0,6,12,7,15,3,0,0,0,0,12,16,12,1,0,0,0,0,11,10,9,11,0,0,0,0,12,6,0,13,3,0,0,0,6,13,13,8,0,0,8 +0,0,0,4,15,11,0,0,0,0,2,15,16,13,0,0,0,0,13,13,11,10,0,0,0,7,14,3,14,12,6,0,0,8,16,16,16,15,8,0,0,1,8,9,16,4,0,0,0,0,0,3,16,0,0,0,0,0,0,3,14,0,0,0,4 +0,0,0,14,16,15,11,0,0,0,2,16,16,16,10,0,0,0,4,16,16,16,4,0,0,0,12,16,16,12,0,0,0,0,12,16,16,6,0,0,0,0,14,16,16,6,0,0,0,0,11,16,15,2,0,0,0,0,1,15,15,1,0,0,1 +0,0,0,4,13,16,15,2,0,0,2,15,13,13,16,6,0,0,7,7,0,3,16,4,0,0,0,4,4,8,14,0,0,0,14,16,16,16,6,0,0,0,11,9,10,12,0,0,0,0,0,0,13,3,0,0,0,0,0,4,10,0,0,0,7 +0,0,0,3,9,16,16,2,0,0,4,16,13,11,16,1,0,0,3,5,0,6,13,0,0,0,0,2,7,14,9,0,0,0,4,16,16,15,3,0,0,0,9,8,11,12,0,0,0,0,0,0,12,4,0,0,0,0,0,2,15,1,0,0,7 +0,0,1,9,15,15,1,0,0,0,13,14,8,12,4,0,0,5,11,1,2,13,1,0,0,1,4,0,11,6,0,0,0,0,0,0,15,14,1,0,0,0,0,0,3,13,6,0,0,0,0,4,10,16,2,0,0,0,0,12,13,4,0,0,3 +0,0,8,12,16,16,9,0,0,4,16,16,13,9,2,0,0,11,14,4,0,0,0,0,0,7,15,10,1,0,0,0,0,0,12,16,13,1,0,0,0,0,0,4,16,4,0,0,0,0,5,10,16,3,0,0,0,0,9,16,10,0,0,0,5 +0,0,1,10,15,11,7,0,0,0,5,16,16,16,11,0,0,0,6,16,16,16,6,0,0,0,12,16,16,12,0,0,0,2,16,16,16,6,0,0,0,2,12,16,12,0,0,0,0,0,9,16,16,7,0,0,0,0,3,12,16,2,0,0,1 +0,0,1,13,12,1,0,0,0,0,9,16,16,12,0,0,0,0,14,6,0,13,3,0,0,6,10,0,0,10,6,0,0,7,13,0,0,9,8,0,0,3,16,1,3,14,7,0,0,0,11,16,16,16,1,0,0,0,0,11,16,6,0,0,0 +0,0,4,16,16,4,0,0,0,0,10,15,12,14,0,0,0,2,11,0,0,9,6,0,0,5,6,0,0,4,5,0,0,4,9,0,0,7,4,0,0,4,10,0,2,14,0,0,0,0,14,15,16,8,0,0,0,0,4,13,10,0,0,0,0 +0,0,6,16,16,7,0,0,0,8,16,13,10,16,0,0,0,6,9,0,6,15,0,0,0,0,0,0,13,9,0,0,0,0,0,6,16,1,0,0,0,0,1,15,8,3,5,0,0,0,8,16,11,16,9,0,0,0,5,16,16,7,0,0,2 +0,0,6,15,15,3,0,0,0,5,16,13,15,8,0,0,0,8,13,0,13,8,0,0,0,0,0,3,16,3,0,0,0,0,0,11,12,0,0,0,0,0,3,16,5,9,8,0,0,0,8,15,15,15,3,0,0,0,5,16,12,1,0,0,2 +0,0,1,11,16,16,7,0,0,0,7,13,8,16,5,0,0,0,0,1,1,16,4,0,0,0,2,7,13,16,15,0,0,1,15,16,16,12,3,0,0,1,8,4,16,2,0,0,0,0,0,9,11,0,0,0,0,0,1,14,4,0,0,0,7 +0,0,2,12,16,10,0,0,0,3,15,10,7,16,4,0,0,9,8,0,11,10,0,0,0,3,15,11,14,1,0,0,0,0,10,16,9,0,0,0,0,0,14,7,13,4,0,0,0,0,9,7,6,10,0,0,0,0,1,12,16,5,0,0,8 +0,0,3,12,16,16,3,0,0,2,16,16,11,16,4,0,0,8,14,2,10,16,1,0,0,5,5,3,16,4,0,0,0,0,0,11,12,0,0,0,0,0,3,16,5,2,3,0,0,0,3,16,12,15,6,0,0,0,0,15,16,8,0,0,2 +0,0,0,9,15,6,0,0,0,0,5,15,16,15,0,0,0,0,15,15,4,16,3,0,0,2,14,5,0,12,8,0,0,6,13,0,1,14,6,0,0,1,10,14,15,16,3,0,0,0,3,16,16,14,1,0,0,0,0,9,13,5,0,0,0 +0,0,3,8,11,11,1,0,0,0,3,16,16,12,0,0,0,0,2,15,16,12,0,0,0,0,0,16,16,7,0,0,0,0,1,15,16,10,0,0,0,0,1,16,16,6,0,0,0,0,3,16,16,5,0,0,0,0,2,15,16,6,0,0,1 +0,0,1,13,16,10,0,0,0,1,13,15,8,16,3,0,0,8,15,3,4,15,0,0,0,1,3,0,12,8,0,0,0,0,0,4,14,1,0,0,0,0,0,11,8,0,4,0,0,0,1,16,8,13,9,0,0,0,0,14,16,11,0,0,2 +0,0,2,14,1,0,0,0,0,0,8,12,0,0,0,0,0,0,12,5,0,0,0,0,0,2,14,0,0,0,0,0,0,0,10,0,6,7,2,0,0,4,12,13,15,14,12,0,0,0,13,12,2,11,14,0,0,0,3,13,16,13,1,0,6 +0,0,6,14,16,16,2,0,0,5,16,13,11,16,0,0,0,0,7,2,15,12,0,0,0,0,0,7,16,13,1,0,0,0,0,0,6,15,10,0,0,0,0,0,0,15,9,0,0,0,3,11,8,16,6,0,0,0,7,16,16,8,0,0,3 +0,1,7,13,16,11,0,0,0,11,16,13,15,16,0,0,0,3,8,2,16,9,0,0,0,0,0,8,16,4,0,0,0,0,0,5,16,16,5,0,0,0,0,0,3,14,11,0,0,0,3,8,14,16,8,0,0,0,7,16,12,7,0,0,3 +0,0,1,6,12,16,9,0,0,0,10,15,10,13,9,0,0,0,2,1,0,14,2,0,0,0,0,6,12,16,15,0,0,0,5,16,16,14,7,0,0,0,3,6,15,0,0,0,0,0,0,7,9,0,0,0,0,0,0,11,2,0,0,0,7 +0,0,4,13,16,11,0,0,0,9,16,9,10,15,0,0,0,5,4,0,12,11,0,0,0,0,0,5,16,12,1,0,0,0,0,1,9,15,8,0,0,0,0,0,0,8,12,0,0,0,1,6,8,16,8,0,0,0,5,16,15,9,1,0,3 +0,1,11,15,16,9,0,0,0,3,16,10,10,16,1,0,0,0,2,1,14,11,0,0,0,0,0,14,16,7,0,0,0,0,0,13,16,16,5,0,0,0,0,0,2,16,8,0,0,0,6,8,13,15,5,0,0,0,15,16,12,5,0,0,3 +0,0,0,6,15,1,0,0,0,0,3,16,9,15,3,0,0,1,15,7,5,15,0,0,0,9,16,4,11,14,10,0,0,9,16,16,16,16,9,0,0,0,2,4,16,2,0,0,0,0,0,6,14,0,0,0,0,0,0,7,10,0,0,0,4 +0,0,2,14,1,0,0,0,0,0,11,12,1,0,0,0,0,1,15,4,0,0,0,0,0,5,13,0,0,0,0,0,0,7,12,12,16,13,2,0,0,4,16,12,6,6,11,0,0,0,14,9,0,5,13,0,0,0,3,11,15,14,1,0,6 +0,0,1,10,0,0,0,0,0,0,4,15,0,0,0,0,0,0,10,11,0,0,0,0,0,0,13,9,3,2,0,0,0,0,13,16,16,15,4,0,0,0,13,13,6,4,12,0,0,0,9,11,5,9,15,2,0,0,2,12,16,12,6,0,6 +0,0,9,7,0,0,0,0,0,0,9,11,0,0,0,0,0,0,15,4,0,0,0,0,0,2,16,1,0,0,0,0,0,5,16,8,14,9,0,0,0,5,16,15,8,9,10,0,0,3,16,2,0,7,11,0,0,0,7,14,16,12,1,0,6 +0,0,0,1,11,7,0,0,0,0,0,11,16,5,0,0,0,0,9,15,15,7,0,0,0,5,16,3,16,4,0,0,0,10,13,9,16,14,8,0,0,3,15,16,16,13,6,0,0,0,0,0,16,3,0,0,0,0,0,0,14,2,0,0,4 +0,0,0,1,7,12,14,1,0,0,1,13,8,4,13,0,0,0,10,16,9,15,11,0,0,1,16,15,15,16,3,0,0,0,11,9,3,14,0,0,0,0,0,0,5,9,0,0,0,0,0,0,7,8,0,0,0,0,0,0,8,6,0,0,9 +0,0,0,10,12,8,1,0,0,0,5,16,16,16,0,0,0,0,10,16,16,9,0,0,0,2,15,16,13,2,0,0,0,4,16,16,8,0,0,0,0,1,15,16,7,0,0,0,0,0,9,16,11,1,0,0,0,0,0,6,12,6,0,0,1 +0,0,6,16,16,16,10,0,0,0,13,15,9,6,0,0,0,6,16,4,0,0,0,0,0,12,15,4,2,0,0,0,0,8,16,16,16,15,1,0,0,0,6,8,9,16,4,0,0,0,1,3,13,15,1,0,0,0,7,16,15,3,0,0,5 +0,0,0,9,13,3,0,0,0,0,8,15,12,15,2,0,0,0,12,8,0,15,4,0,0,3,13,0,0,10,7,0,0,8,9,0,0,13,7,0,0,2,16,4,7,16,5,0,0,0,14,14,16,15,1,0,0,0,1,12,14,4,0,0,0 +0,0,0,0,8,13,3,0,0,0,0,12,11,11,5,0,0,0,11,8,8,16,0,0,0,2,16,16,16,15,0,0,0,2,16,11,7,10,0,0,0,0,0,0,8,7,0,0,0,0,0,0,10,8,0,0,0,0,0,0,9,7,0,0,9 +0,0,2,16,15,15,8,0,0,0,7,16,15,12,7,0,0,3,15,8,1,0,0,0,0,9,15,4,4,2,0,0,0,5,16,16,16,15,2,0,0,0,5,6,8,16,3,0,0,0,0,1,14,10,0,0,0,0,2,16,13,1,0,0,5 +0,0,9,16,16,9,0,0,0,5,16,14,15,16,1,0,0,2,11,1,10,15,0,0,0,0,0,1,15,8,0,0,0,0,0,8,15,1,0,0,0,0,6,16,7,8,7,0,0,0,9,16,15,14,2,0,0,0,9,16,13,1,0,0,2 +0,0,3,12,11,4,0,0,0,4,15,13,12,16,0,0,0,9,14,0,0,12,2,0,0,0,13,11,7,15,3,0,0,0,0,15,16,7,0,0,0,0,5,16,10,14,2,0,0,0,11,13,0,8,8,0,0,0,2,12,16,16,7,0,8 +0,0,4,14,16,5,0,0,0,4,16,16,16,8,0,0,0,10,15,9,16,4,0,0,0,1,2,13,14,0,0,0,0,0,2,16,6,0,0,0,0,0,7,16,0,5,7,0,0,0,8,16,13,16,6,0,0,0,2,15,16,6,0,0,2 +0,0,4,12,13,5,0,0,0,0,14,16,16,16,4,0,0,6,13,2,1,11,8,0,0,6,11,0,0,8,8,0,0,4,16,0,0,10,8,0,0,4,16,4,8,16,3,0,0,0,16,16,16,12,0,0,0,0,4,15,14,3,0,0,0 +0,0,3,11,7,1,0,0,0,0,10,15,14,14,0,0,0,2,16,10,1,12,4,0,0,2,16,3,0,4,8,0,0,5,12,0,0,6,8,0,0,1,12,0,0,11,9,0,0,0,15,9,14,15,1,0,0,0,4,15,15,4,0,0,0 +0,0,0,10,16,11,1,0,0,0,0,15,16,15,2,0,0,0,1,13,16,14,0,0,0,0,1,15,16,12,0,0,0,0,0,14,16,8,0,0,0,0,0,13,16,5,0,0,0,0,1,14,16,1,0,0,0,0,0,8,15,1,0,0,1 +0,0,0,7,14,16,5,0,0,0,7,16,12,16,8,0,0,0,4,2,1,16,4,0,0,0,3,12,12,16,8,0,0,0,12,16,16,15,5,0,0,0,5,5,13,6,0,0,0,0,0,2,14,0,0,0,0,0,0,9,8,0,0,0,7 +0,0,1,13,1,0,0,0,0,0,7,15,1,0,0,0,0,1,14,6,0,0,0,0,0,0,16,3,0,1,0,0,0,1,16,6,15,15,5,0,0,1,16,14,4,3,12,0,0,0,7,7,0,9,12,0,0,0,0,11,16,9,2,0,6 +0,1,5,12,16,14,2,0,0,8,16,16,16,16,3,0,0,6,9,2,12,12,0,0,0,0,0,5,16,8,0,0,0,0,0,1,13,16,9,0,0,0,0,0,1,14,10,0,0,0,1,11,15,15,5,0,0,0,6,16,12,5,0,0,3 +0,2,12,16,12,0,0,0,0,7,16,13,16,3,0,0,0,0,3,5,16,0,0,0,0,0,3,15,7,0,0,0,0,0,11,13,0,0,0,0,0,6,13,1,0,0,0,0,0,6,16,11,8,11,5,0,0,0,15,16,16,15,3,0,2 +0,0,5,15,13,12,4,0,0,0,11,16,16,14,0,0,0,0,16,16,16,8,0,0,0,4,16,16,15,3,0,0,0,2,16,16,8,0,0,0,0,0,16,15,3,0,0,0,0,0,10,16,4,0,0,0,0,0,8,15,3,0,0,0,1 +0,0,0,6,13,16,16,9,0,0,6,16,14,11,16,10,0,0,2,3,0,4,15,4,0,0,2,9,12,16,13,0,0,2,15,16,16,16,3,0,0,4,9,3,10,10,0,0,0,0,0,1,16,2,0,0,0,0,0,7,9,0,0,0,7 +0,0,0,2,14,2,0,0,0,0,1,13,15,6,0,0,0,0,12,15,12,11,0,0,0,5,16,4,15,6,0,0,0,12,15,8,16,16,11,0,0,6,16,16,16,8,2,0,0,0,2,6,16,0,0,0,0,0,0,2,14,0,0,0,4 +0,0,0,8,3,0,0,0,0,0,2,16,8,0,0,0,0,0,9,15,1,0,0,0,0,0,12,10,0,0,0,0,0,0,14,7,0,0,0,0,0,0,10,15,16,16,14,1,0,0,4,16,1,4,15,6,0,0,0,5,14,15,10,0,6 +0,0,6,9,11,9,0,0,0,13,16,15,15,15,0,0,0,4,5,2,15,6,0,0,0,0,0,3,15,6,0,0,0,0,0,0,6,15,6,0,0,0,0,0,0,5,12,0,0,0,0,5,13,16,9,0,0,0,3,13,12,7,1,0,3 +0,1,11,16,15,12,3,0,0,1,13,16,16,12,0,0,0,2,16,16,16,8,0,0,0,0,16,16,16,2,0,0,0,8,16,16,14,0,0,0,0,7,16,16,9,0,0,0,0,1,13,16,13,1,0,0,0,0,8,16,12,0,0,0,1 +0,0,7,15,12,0,0,0,0,3,15,8,14,2,0,0,0,0,5,2,11,0,0,0,0,0,1,11,8,2,0,0,0,0,8,16,16,15,4,0,0,0,1,4,2,12,6,0,0,0,2,4,13,12,0,0,0,0,5,13,9,1,0,0,3 +0,0,0,0,5,15,10,0,0,0,0,8,11,15,7,0,0,0,6,13,10,16,7,0,0,3,16,14,12,15,4,0,0,1,11,8,1,14,2,0,0,0,0,0,3,13,0,0,0,0,0,0,6,10,0,0,0,0,0,0,9,4,0,0,9 +0,0,5,12,12,8,1,0,0,0,10,16,16,15,0,0,0,0,11,16,16,8,0,0,0,4,16,16,16,4,0,0,0,3,16,16,10,0,0,0,0,0,13,16,16,3,0,0,0,0,13,16,16,0,0,0,0,0,2,10,12,0,0,0,1 +0,0,0,7,14,16,6,0,0,0,10,16,12,15,9,0,0,0,8,3,2,16,7,0,0,0,1,8,13,16,14,0,0,2,13,16,16,12,1,0,0,6,12,6,16,3,0,0,0,0,0,5,13,0,0,0,0,0,0,9,6,0,0,0,7 +0,0,3,11,0,0,0,0,0,0,9,13,0,0,0,0,0,0,15,4,0,0,0,0,0,2,15,0,1,0,0,0,0,4,15,14,16,13,2,0,0,3,16,11,3,7,12,0,0,0,13,6,3,8,14,0,0,0,4,14,16,14,7,0,6 +0,0,4,14,14,0,0,0,0,5,16,16,16,5,1,0,0,9,13,0,13,16,2,0,0,3,16,13,15,5,0,0,0,0,7,16,13,0,0,0,0,0,10,13,14,7,0,0,0,0,10,11,10,15,0,0,0,0,4,13,11,3,0,0,8 +0,0,0,12,8,0,0,0,0,0,6,16,3,12,4,0,0,1,16,5,8,14,0,0,0,9,15,0,13,10,2,0,0,10,15,12,16,16,9,0,0,6,16,16,15,9,1,0,0,0,0,14,5,0,0,0,0,0,0,15,0,0,0,0,4 +0,0,6,14,11,0,0,0,0,3,16,9,16,0,0,0,0,3,7,5,12,0,0,0,0,0,1,14,8,2,0,0,0,0,2,16,16,16,4,0,0,0,0,0,0,11,8,0,0,0,0,4,10,15,2,0,0,0,5,16,12,4,0,0,3 +0,0,0,4,11,9,5,0,0,0,5,16,16,16,5,0,0,0,11,16,16,9,0,0,0,4,16,16,16,4,0,0,0,1,14,16,9,0,0,0,0,4,15,16,6,0,0,0,0,0,9,16,8,0,0,0,0,0,0,7,5,0,0,0,1 +0,0,1,14,8,8,1,0,0,0,10,13,8,16,1,0,0,2,16,4,10,11,0,0,0,7,15,6,14,16,13,0,0,3,16,16,15,9,2,0,0,0,3,11,9,0,0,0,0,0,0,12,4,0,0,0,0,0,0,12,0,0,0,0,4 +0,0,2,10,15,1,0,0,0,3,16,16,13,13,0,0,0,5,16,12,1,12,1,0,0,7,13,5,0,7,5,0,0,2,14,0,0,7,10,0,0,0,12,2,0,12,7,0,0,0,9,12,12,16,4,0,0,0,0,10,16,6,0,0,0 +0,0,10,16,16,13,0,0,0,4,16,15,12,4,0,0,0,8,16,4,0,0,0,0,0,4,16,11,6,1,0,0,0,0,8,16,16,13,2,0,0,0,0,1,7,14,12,0,0,0,0,6,13,16,10,0,0,0,12,16,14,6,0,0,5 +0,1,10,16,16,8,0,0,0,10,16,13,16,12,0,0,0,1,3,3,16,9,0,0,0,0,0,13,14,1,0,0,0,0,2,16,16,12,3,0,0,0,0,5,11,16,11,0,0,0,2,7,14,16,6,0,0,0,11,16,13,5,0,0,3 +0,0,0,6,11,0,0,0,0,0,0,15,10,0,0,0,0,0,7,15,2,0,0,0,0,0,16,6,0,0,0,0,0,3,16,7,5,5,0,0,0,2,16,13,9,13,11,0,0,0,8,13,7,5,15,3,0,0,0,5,11,13,12,2,6 +0,0,0,0,5,11,14,1,0,0,0,10,13,8,15,2,0,0,11,9,4,9,12,0,0,5,16,16,16,16,6,0,0,0,15,16,13,16,3,0,0,0,2,3,1,15,0,0,0,0,0,0,5,5,0,0,0,0,0,0,6,0,0,0,9 +0,0,0,5,11,0,0,0,0,0,1,14,9,0,0,0,0,0,4,14,1,0,0,0,0,0,10,8,0,0,0,0,0,0,13,8,4,6,2,0,0,0,11,16,13,12,13,0,0,0,12,14,4,5,16,2,0,0,1,8,16,13,9,1,6 +0,0,2,12,12,8,1,0,0,0,2,15,16,16,8,0,0,0,5,16,16,14,3,0,0,0,8,16,16,10,0,0,0,3,15,16,13,0,0,0,0,2,14,16,9,0,0,0,0,0,11,16,9,0,0,0,0,0,1,9,5,0,0,0,1 +0,0,1,9,15,12,5,0,0,0,8,16,16,16,13,0,0,0,3,1,1,14,10,0,0,0,3,10,13,16,15,0,0,2,16,16,16,15,3,0,0,3,8,2,13,6,0,0,0,0,0,5,13,0,0,0,0,0,0,11,5,0,0,0,7 +0,0,11,16,16,16,16,2,0,5,16,16,14,10,4,0,0,5,16,5,0,0,0,0,0,1,15,10,0,0,0,0,0,0,6,16,8,0,0,0,0,0,0,9,14,0,0,0,0,0,0,5,16,3,0,0,0,0,10,16,13,1,0,0,5 +0,0,0,9,13,0,0,0,0,0,3,15,6,12,0,0,0,1,12,8,5,14,0,0,0,6,14,0,12,7,0,0,0,14,6,2,16,9,5,0,0,16,13,13,16,15,4,0,1,15,16,16,12,2,0,0,0,3,3,13,4,0,0,0,4 +0,0,0,10,6,0,10,14,0,0,7,15,2,7,14,1,0,0,15,9,1,15,12,2,0,4,16,10,11,16,12,1,0,2,16,16,16,9,0,0,0,0,5,12,10,0,0,0,0,0,0,13,5,0,0,0,0,0,0,15,3,0,0,0,4 +0,0,0,8,14,15,7,0,0,0,4,16,12,15,14,0,0,0,1,1,0,11,12,0,0,0,2,4,6,14,15,0,0,4,16,16,16,16,5,0,0,8,12,7,14,12,0,0,0,0,0,4,16,3,0,0,0,0,0,11,7,0,0,0,7 +0,0,7,15,15,5,0,0,0,6,16,12,16,12,0,0,0,1,7,0,16,10,0,0,0,0,0,10,15,0,0,0,0,0,1,16,7,0,0,0,0,0,10,13,1,5,1,0,0,0,12,12,13,15,3,0,0,0,10,16,13,3,0,0,2 +0,0,0,8,15,9,1,0,0,0,11,14,12,15,8,0,0,0,15,5,6,14,2,0,0,0,14,14,15,1,0,0,0,1,13,16,6,0,0,0,0,6,16,9,13,0,0,0,0,2,13,15,16,4,0,0,0,0,1,9,15,2,0,0,8 +0,0,9,16,16,8,0,0,0,5,16,15,14,16,0,0,0,4,9,3,13,12,0,0,0,0,0,8,15,1,0,0,0,0,2,16,7,0,0,0,0,0,11,14,1,4,3,0,0,0,16,14,15,16,4,0,0,0,9,16,15,5,0,0,2 +0,1,8,16,16,3,0,0,0,6,16,12,16,4,0,0,0,1,7,0,16,4,0,0,0,0,0,7,15,0,0,0,0,0,0,14,9,0,0,0,0,0,10,14,1,4,5,0,0,0,13,12,11,15,3,0,0,0,12,16,12,3,0,0,2 +0,0,8,14,16,16,1,0,0,6,16,16,8,3,0,0,0,14,14,1,0,0,0,0,0,10,15,4,0,0,0,0,0,3,15,16,6,0,0,0,0,0,1,8,15,2,0,0,0,0,2,13,15,0,0,0,0,0,10,16,4,0,0,0,5 +0,0,4,15,16,11,0,0,0,0,7,9,9,16,0,0,0,0,0,0,4,13,0,0,0,0,1,9,15,16,10,0,0,0,13,15,16,8,2,0,0,0,3,7,13,0,0,0,0,0,1,13,4,0,0,0,0,0,6,11,0,0,0,0,7 +0,0,0,1,8,13,14,2,0,0,2,13,9,4,14,4,0,0,13,9,0,9,14,1,0,4,16,14,14,16,6,0,0,1,11,10,7,14,0,0,0,0,0,0,8,8,0,0,0,0,0,0,11,5,0,0,0,0,0,0,11,3,0,0,9 +0,3,10,16,16,16,2,0,0,14,16,14,9,3,0,0,0,16,12,0,0,0,0,0,0,12,14,0,0,0,0,0,0,6,16,3,0,0,0,0,0,0,9,16,3,0,0,0,0,0,4,14,13,0,0,0,0,2,15,16,8,0,0,0,5 +0,0,0,5,11,0,6,0,0,0,3,15,7,6,16,1,0,0,13,9,1,13,7,0,0,6,15,2,6,15,0,0,0,14,10,0,14,12,3,0,0,14,16,16,16,14,3,0,0,5,11,14,13,2,0,0,0,0,0,7,9,0,0,0,4 +0,0,4,12,16,8,0,0,0,5,16,11,10,16,4,0,0,8,13,0,1,13,4,0,0,3,16,13,15,13,3,0,0,0,9,16,16,7,0,0,0,0,14,7,5,15,6,0,0,0,10,12,7,13,10,0,0,0,3,13,13,10,1,0,8 +0,0,4,15,7,0,0,0,0,1,13,12,16,2,2,0,0,7,11,0,11,12,1,0,0,4,8,6,13,3,0,0,0,3,16,15,1,0,0,0,0,2,16,14,6,0,0,0,0,3,16,10,14,3,0,0,0,0,2,9,12,3,0,0,8 +0,0,0,11,5,3,11,0,0,0,7,14,2,12,9,0,0,2,15,6,3,16,5,0,0,7,16,8,13,16,13,0,0,7,16,16,16,7,1,0,0,0,4,10,13,0,0,0,0,0,0,12,6,0,0,0,0,0,0,12,0,0,0,0,4 +0,0,0,1,9,16,9,0,0,0,1,11,13,14,12,1,0,1,15,13,4,16,16,3,0,2,16,16,16,15,12,0,0,0,7,8,4,14,5,0,0,0,0,0,5,14,0,0,0,0,0,0,8,9,0,0,0,0,0,0,12,6,0,0,9 +0,0,2,12,15,3,0,0,0,0,15,15,13,15,0,0,0,2,14,3,1,12,3,0,0,4,8,0,0,8,8,0,0,7,10,0,0,9,5,0,0,1,13,5,3,15,2,0,0,0,7,16,14,15,0,0,0,0,0,10,14,4,0,0,0 +0,0,3,13,15,5,0,0,0,1,15,13,10,15,0,0,0,2,16,3,2,9,0,0,0,0,12,13,14,7,0,0,0,0,10,16,9,0,0,0,0,1,16,4,9,11,1,0,0,0,15,3,0,8,8,0,0,0,3,12,15,12,7,0,8 +0,0,0,3,12,16,15,1,0,0,3,16,9,10,16,0,0,0,14,13,7,15,10,0,0,2,16,16,16,16,2,0,0,2,12,9,13,8,0,0,0,0,0,0,15,5,0,0,0,0,0,3,16,1,0,0,0,0,0,3,14,1,0,0,9 +0,0,4,11,15,7,0,0,0,2,15,14,9,15,1,0,0,8,15,1,6,16,5,0,0,6,14,13,15,6,0,0,0,1,16,16,6,0,0,0,0,4,15,11,15,1,0,0,0,1,12,3,7,9,0,0,0,0,4,14,16,6,0,0,8 +0,0,7,11,15,9,0,0,0,0,15,15,4,11,4,0,0,3,11,5,0,2,10,0,0,7,8,0,0,3,8,0,0,6,8,0,0,4,8,0,0,5,8,0,0,8,5,0,0,1,12,2,1,13,0,0,0,0,5,16,14,3,0,0,0 +0,0,4,14,11,0,0,0,0,0,2,16,16,3,0,0,0,0,0,14,16,5,0,0,0,0,0,16,16,3,0,0,0,0,1,15,16,2,0,0,0,0,2,15,13,0,0,0,0,0,4,16,11,0,0,0,0,0,5,16,14,1,0,0,1 +0,2,15,16,12,0,0,0,0,8,11,8,16,0,0,0,0,3,1,7,13,0,0,0,0,0,0,10,8,0,0,0,0,0,0,15,5,0,0,0,0,0,7,15,0,0,0,0,0,0,14,11,6,5,2,0,0,1,16,16,16,16,9,0,2 +0,1,13,16,12,1,0,0,0,1,9,5,16,1,0,0,0,0,0,9,5,0,0,0,0,0,9,10,0,0,0,0,0,0,8,15,16,11,1,0,0,0,0,0,2,12,7,0,0,0,2,4,6,15,3,0,0,0,14,16,11,5,0,0,3 +0,0,0,12,12,0,0,0,0,0,5,16,4,0,0,0,0,1,14,11,0,0,0,0,0,6,16,3,2,0,0,0,0,13,12,8,12,0,0,0,0,15,16,15,16,13,4,0,0,4,9,14,16,7,0,0,0,0,0,11,13,0,0,0,4 +0,2,13,16,16,16,15,2,0,8,16,12,8,4,1,0,0,5,16,13,1,0,0,0,0,0,8,16,8,0,0,0,0,0,0,10,16,0,0,0,0,0,0,9,16,0,0,0,0,0,3,13,12,0,0,0,0,2,16,16,6,0,0,0,5 +0,0,1,14,9,0,0,0,0,0,14,15,3,0,0,0,0,1,16,10,0,0,0,0,0,5,14,13,15,10,0,0,0,8,16,2,3,14,5,0,0,5,16,4,0,12,6,0,0,0,10,13,2,14,6,0,0,0,2,12,16,11,1,0,6 +0,0,5,15,16,14,1,0,0,0,11,13,9,16,5,0,0,0,0,0,5,16,2,0,0,0,0,0,9,11,0,0,0,0,7,13,15,12,1,0,0,0,7,14,14,12,4,0,0,0,0,14,3,0,0,0,0,0,7,10,0,0,0,0,7 +0,0,3,13,8,0,0,0,0,4,16,16,14,0,0,0,0,11,11,9,10,0,0,0,0,8,14,15,9,0,0,0,0,0,7,16,15,5,0,0,0,0,4,16,3,13,9,0,0,0,5,15,4,13,11,0,0,0,1,15,15,8,2,0,8 +0,0,5,11,13,3,0,0,0,0,16,13,15,9,0,0,0,4,16,0,13,13,0,0,0,1,11,16,15,15,3,0,0,0,0,0,0,12,7,0,0,0,0,0,0,6,12,0,0,0,6,4,2,9,11,0,0,0,6,13,16,16,6,0,9 +0,0,6,16,16,8,0,0,0,2,16,8,9,16,3,0,0,8,16,1,0,9,9,0,0,9,12,0,0,8,12,0,0,10,12,0,0,8,10,0,0,8,13,0,0,9,8,0,0,2,16,8,6,15,3,0,0,0,8,16,15,8,0,0,0 +0,0,4,12,13,3,0,0,0,0,7,14,16,9,0,0,0,0,0,12,16,8,0,0,0,0,0,6,16,6,0,0,0,0,0,9,16,6,0,0,0,0,0,12,16,3,0,0,0,0,0,13,16,3,0,0,0,0,0,15,16,11,0,0,1 +0,3,15,14,5,0,0,0,0,14,14,14,15,0,0,0,0,8,1,6,16,2,0,0,0,0,0,9,16,2,0,0,0,0,1,14,11,0,0,0,0,0,9,16,2,0,1,0,0,4,16,15,8,9,15,0,0,3,16,16,16,15,5,0,2 +0,1,12,16,13,2,0,0,0,5,14,6,13,12,0,0,0,0,0,3,15,7,0,0,0,0,2,16,8,0,0,0,0,0,1,12,16,11,1,0,0,0,0,0,5,15,7,0,0,0,6,0,4,14,7,0,0,0,16,16,15,8,1,0,3 +0,0,0,8,15,5,0,0,0,0,3,16,13,1,0,0,0,0,12,16,2,0,0,0,0,5,16,7,9,4,0,0,0,14,16,13,16,14,3,0,0,8,14,16,16,14,2,0,0,0,0,9,16,3,0,0,0,0,0,11,14,0,0,0,4 +0,1,8,16,16,16,10,0,0,8,16,14,8,5,1,0,0,9,16,2,0,0,0,0,0,2,16,15,2,0,0,0,0,0,3,15,4,0,0,0,0,1,3,12,4,0,0,0,0,5,14,15,4,0,0,0,0,1,13,12,0,0,0,0,5 +0,0,7,15,0,0,0,0,0,0,15,15,0,0,0,0,0,3,16,12,4,1,0,0,0,6,16,16,16,16,5,0,0,8,16,7,1,15,8,0,0,7,16,0,0,16,4,0,0,2,16,7,10,12,0,0,0,0,4,15,13,3,0,0,6 +0,0,7,16,16,5,0,0,0,1,15,11,14,11,0,0,0,0,0,0,12,8,0,0,0,0,3,8,14,12,5,0,0,0,14,16,16,10,5,0,0,0,2,8,14,0,0,0,0,0,1,15,8,0,0,0,0,0,8,14,1,0,0,0,7 +0,0,0,2,15,5,0,0,0,0,2,4,10,12,0,0,0,3,15,14,10,8,0,0,0,8,15,1,11,4,0,0,0,1,8,15,16,0,0,0,0,0,0,6,16,12,1,0,0,0,0,4,14,15,4,0,0,0,0,2,14,11,0,0,8 +0,0,1,5,12,13,0,0,0,0,11,13,15,16,1,0,0,2,14,0,10,12,4,0,0,5,13,12,3,12,0,0,0,0,5,6,0,12,4,0,0,0,0,0,0,15,2,0,0,0,4,5,0,16,3,0,0,0,0,4,14,13,0,0,9 +0,0,2,13,15,8,0,0,0,0,10,14,10,11,8,0,0,0,16,1,0,0,9,0,0,3,13,0,0,0,8,0,0,4,12,0,0,1,8,0,0,5,12,0,0,10,0,0,0,0,15,8,7,10,0,0,0,0,4,14,14,1,0,0,0 +0,0,1,11,15,8,0,0,0,0,0,15,16,8,0,0,0,0,0,13,16,10,0,0,0,0,0,16,16,6,0,0,0,0,2,16,16,6,0,0,0,0,5,16,16,5,0,0,0,0,5,16,15,1,0,0,0,0,2,15,15,3,0,0,1 +0,3,16,15,6,0,0,0,0,5,14,14,16,0,0,0,0,0,0,6,14,0,0,0,0,0,0,13,11,0,0,0,0,0,5,16,3,0,0,0,0,1,14,10,0,0,0,0,0,9,16,8,8,10,5,0,0,4,16,16,16,14,3,0,2 +0,2,11,14,10,1,0,0,0,6,12,8,15,10,0,0,0,0,0,0,10,11,0,0,0,0,0,8,14,2,0,0,0,0,0,7,16,15,1,0,0,0,0,0,2,13,8,0,0,2,5,1,2,12,7,0,0,1,12,16,16,10,0,0,3 +0,0,0,13,9,0,0,0,0,0,6,16,2,0,0,0,0,0,12,9,0,2,0,0,0,7,15,1,5,15,1,0,0,14,10,4,11,12,3,0,2,16,16,16,16,13,2,0,0,3,4,11,14,0,0,0,0,0,0,15,4,0,0,0,4 +0,2,12,13,16,16,4,0,0,11,16,13,7,4,1,0,0,13,14,0,0,0,0,0,0,1,15,12,0,0,0,0,0,0,6,16,3,0,0,0,0,0,0,13,7,0,0,0,0,3,5,16,7,0,0,0,0,3,13,15,0,0,0,0,5 +0,0,0,11,13,5,0,0,0,0,3,16,13,3,0,0,0,0,10,16,2,0,0,0,0,4,16,16,13,7,0,0,0,4,16,11,8,16,2,0,0,0,15,8,0,15,6,0,0,0,9,14,4,15,4,0,0,0,1,10,16,11,1,0,6 +0,0,8,16,16,11,0,0,0,0,4,8,13,14,0,0,0,0,0,0,13,8,0,0,0,0,3,12,16,8,2,0,0,0,6,16,16,16,9,0,0,0,0,14,8,2,0,0,0,0,3,16,1,0,0,0,0,0,11,12,0,0,0,0,7 +0,0,0,8,14,9,0,0,0,0,9,15,16,15,0,0,0,4,15,5,8,14,0,0,0,8,14,1,14,7,0,0,0,1,15,13,12,0,0,0,0,0,13,16,13,0,0,0,0,0,12,10,15,7,0,0,0,0,2,10,16,5,0,0,8 +0,0,2,10,16,6,0,0,0,0,10,16,16,14,0,0,0,0,15,10,16,16,2,0,0,0,12,16,12,13,8,0,0,0,1,7,1,10,11,0,0,5,5,0,0,8,12,0,0,3,15,10,2,11,12,0,0,0,3,10,16,16,10,0,9 +0,0,1,13,12,5,0,0,0,0,11,16,4,13,2,0,0,2,16,4,0,8,5,0,0,7,12,0,0,8,8,0,0,6,12,0,0,5,8,0,0,3,16,0,0,8,7,0,0,1,15,8,6,15,3,0,0,0,2,13,15,6,0,0,0 +0,0,2,10,10,11,0,0,0,0,10,9,9,16,0,0,0,0,14,0,6,15,0,0,0,0,11,14,9,16,1,0,0,0,0,0,0,13,3,0,0,0,0,0,0,12,3,0,0,10,9,5,0,15,1,0,0,0,2,14,16,13,0,0,9 +0,0,13,10,8,8,7,0,0,4,16,16,16,16,15,2,0,0,10,16,5,0,0,0,0,0,0,13,12,0,0,0,0,0,0,6,15,0,0,0,0,0,0,8,15,0,0,0,0,1,6,10,12,0,0,0,0,1,13,16,5,0,0,0,5 +0,0,6,15,16,15,11,0,0,1,15,14,8,8,7,0,0,4,16,5,0,0,0,0,0,7,16,8,0,0,0,0,0,1,11,16,8,0,0,0,0,0,0,15,11,0,0,0,0,0,0,14,11,0,0,0,0,0,9,16,5,0,0,0,5 +0,0,6,13,0,0,0,0,0,0,15,12,0,0,0,0,0,0,16,6,0,0,0,0,0,3,16,14,11,5,0,0,0,5,16,12,11,16,6,0,0,6,16,9,2,16,9,0,0,0,13,14,8,16,8,0,0,0,4,15,16,13,2,0,6 +0,1,12,16,16,16,12,0,0,9,16,13,6,8,5,0,0,8,16,15,3,0,0,0,0,0,4,14,11,0,0,0,0,0,0,12,12,0,0,0,0,0,0,12,13,0,0,0,0,0,3,15,11,0,0,0,0,0,12,13,2,0,0,0,5 +0,0,2,10,15,7,0,0,0,0,14,15,7,15,2,0,0,4,16,3,0,11,4,0,0,4,14,0,0,7,8,0,0,7,12,0,0,6,7,0,0,4,16,1,0,12,4,0,0,1,14,12,10,16,1,0,0,0,1,14,13,5,0,0,0 +0,0,3,4,10,0,0,0,0,3,15,8,14,3,0,0,0,8,7,0,10,6,0,0,0,3,11,8,15,11,0,0,0,0,1,7,3,13,3,0,0,0,0,0,0,6,9,0,0,0,9,6,1,0,16,0,0,0,0,3,11,16,16,3,9 +0,0,2,12,14,8,0,0,0,0,13,13,15,12,0,0,0,5,15,2,10,6,0,0,0,2,14,13,14,1,0,0,0,0,0,11,15,13,1,0,0,0,1,15,3,14,7,0,0,0,6,13,1,16,4,0,0,0,1,12,16,11,0,0,8 +0,0,3,14,16,9,0,0,0,0,13,10,6,16,7,0,0,5,16,3,2,14,6,0,0,0,10,16,16,16,4,0,0,0,0,0,0,12,5,0,0,0,0,0,0,13,4,0,0,3,11,2,5,15,0,0,0,0,4,12,16,10,0,0,9 +0,0,2,10,14,9,0,0,0,2,14,11,12,16,0,0,0,4,16,1,0,15,0,0,0,2,13,12,7,13,0,0,0,0,1,8,16,12,0,0,0,0,0,4,14,15,4,0,0,0,0,13,7,14,4,0,0,0,0,14,15,10,0,0,8 +0,0,0,3,15,5,0,0,0,0,1,15,11,0,0,0,0,0,10,15,2,3,0,0,0,5,16,4,6,16,1,0,0,10,15,4,9,16,2,0,0,12,16,16,16,13,2,0,0,1,4,7,16,4,0,0,0,0,0,4,15,0,0,0,4 +0,0,4,10,11,4,0,0,0,1,11,16,16,14,0,0,0,4,16,16,16,12,0,0,0,4,16,16,16,7,0,0,0,4,16,16,16,8,0,0,0,4,16,16,16,7,0,0,0,3,15,16,16,12,0,0,0,0,5,12,12,12,1,0,1 +0,0,13,16,15,2,0,0,0,5,14,5,15,7,0,0,0,0,2,0,12,7,0,0,0,0,5,9,16,7,0,0,0,0,8,16,16,16,10,0,0,0,2,16,3,0,0,0,0,0,8,13,0,0,0,0,0,0,15,7,0,0,0,0,7 +0,0,3,15,16,15,1,0,0,0,9,11,9,16,3,0,0,0,1,0,3,16,3,0,0,0,0,0,9,14,0,0,0,0,4,15,15,16,6,0,0,0,2,12,15,7,1,0,0,0,0,13,8,0,0,0,0,0,4,14,1,0,0,0,7 +0,1,11,16,13,4,0,0,0,1,15,7,14,14,1,0,0,0,0,0,6,15,1,0,0,0,1,10,15,6,0,0,0,0,5,15,14,7,0,0,0,0,1,0,5,16,3,0,0,5,11,1,1,16,4,0,0,0,10,15,16,10,1,0,3 +0,0,9,16,16,16,10,0,0,4,16,14,8,11,11,0,0,11,16,7,0,0,0,0,0,5,15,16,6,0,0,0,0,0,1,14,15,0,0,0,0,0,0,8,16,0,0,0,0,0,9,13,14,0,0,0,0,0,12,16,7,0,0,0,5 +0,0,5,14,12,5,0,0,0,0,13,16,16,9,0,0,0,0,11,16,16,9,0,0,0,0,11,16,16,7,0,0,0,0,10,16,16,2,0,0,0,0,13,16,15,0,0,0,0,0,14,16,13,0,0,0,0,0,7,13,16,8,0,0,1 +0,0,6,15,9,0,0,0,0,0,11,16,16,13,0,0,0,0,10,16,16,16,7,0,0,1,16,8,0,11,8,0,0,7,14,1,0,10,8,0,0,8,12,0,0,13,4,0,0,5,16,8,9,13,0,0,0,0,6,12,13,5,0,0,0 +0,0,2,13,15,7,1,0,0,0,7,16,15,16,10,0,0,0,14,16,10,10,10,0,0,2,16,3,0,8,8,0,0,5,13,0,0,9,8,0,0,6,13,0,0,12,3,0,0,2,16,6,9,10,0,0,0,0,3,14,14,1,0,0,0 +0,0,12,16,12,0,0,0,0,3,16,12,16,3,0,0,0,1,8,4,16,3,0,0,0,0,0,7,16,1,0,0,0,0,0,10,12,0,0,0,0,0,4,16,2,0,0,0,0,0,11,15,8,8,2,0,0,0,12,16,16,12,1,0,2 +0,3,15,15,2,0,0,0,0,7,16,16,6,0,0,0,0,1,9,16,6,0,0,0,0,0,6,16,1,0,0,0,0,0,10,12,0,0,0,0,0,3,15,8,0,0,0,0,0,8,16,13,15,15,5,0,0,4,16,16,16,13,3,0,2 +0,0,10,16,5,0,0,0,0,1,10,14,12,0,0,0,0,0,0,9,11,0,0,0,0,0,2,11,13,3,0,0,0,0,11,16,16,16,7,0,0,0,3,16,4,5,1,0,0,0,7,13,0,0,0,0,0,0,13,6,0,0,0,0,7 +0,0,0,9,13,10,1,0,0,0,9,12,4,15,5,0,0,0,16,4,0,12,4,0,0,3,15,9,3,14,1,0,0,0,2,9,16,10,0,0,0,0,0,4,14,15,2,0,0,0,0,10,8,14,3,0,0,0,0,10,16,12,0,0,8 +0,3,15,16,14,1,0,0,0,2,12,13,16,4,0,0,0,0,0,6,16,3,0,0,0,0,1,15,10,0,0,0,0,0,6,16,4,0,0,0,0,2,15,10,0,0,0,0,0,4,16,11,8,11,3,0,0,3,16,16,16,12,3,0,2 +0,0,7,15,14,8,0,0,0,1,15,7,5,14,5,0,0,0,15,8,0,10,7,0,0,3,16,6,0,12,8,0,0,5,16,2,0,12,8,0,0,4,16,3,1,16,4,0,0,5,16,10,14,12,0,0,0,0,8,15,15,2,0,0,0 +0,0,14,10,0,0,0,0,0,0,15,13,0,0,0,0,0,11,16,16,2,0,0,0,0,3,10,16,5,0,0,0,0,0,0,14,10,0,0,0,0,0,0,10,14,0,0,0,0,0,9,14,16,11,6,0,0,0,12,16,16,16,16,9,1 +0,1,12,16,5,0,0,0,0,7,15,14,11,0,0,0,0,8,13,10,12,0,0,0,0,0,1,12,12,0,0,0,0,0,0,14,9,0,0,0,0,0,4,16,8,4,0,0,0,0,13,16,16,16,9,0,0,2,16,13,11,9,3,0,2 +0,0,0,13,13,3,0,0,0,0,4,16,8,0,0,0,0,0,9,16,1,0,0,0,0,0,13,16,5,0,0,0,0,2,16,16,14,8,1,0,0,4,16,16,6,16,9,0,0,0,8,16,11,16,10,0,0,0,1,14,16,13,1,0,6 +0,1,8,14,15,2,0,0,0,2,13,9,14,8,0,0,0,0,0,0,12,9,0,0,0,0,2,13,13,0,0,0,0,0,3,15,16,6,0,0,0,1,1,0,12,14,0,0,0,5,13,5,6,16,1,0,0,1,9,12,13,9,0,0,3 +0,0,15,16,13,6,0,0,0,0,12,12,14,13,0,0,0,0,0,0,11,9,0,0,0,0,1,11,15,2,0,0,0,0,8,16,16,12,1,0,0,1,8,4,9,16,3,0,0,5,14,7,10,15,1,0,0,2,12,16,14,6,0,0,3 +0,0,10,16,16,8,0,0,0,0,5,8,13,13,0,0,0,0,0,0,9,13,0,0,0,0,0,2,13,12,0,0,0,0,2,15,16,16,7,0,0,0,0,13,13,5,1,0,0,0,1,14,5,0,0,0,0,0,9,13,1,0,0,0,7 +0,0,7,16,16,16,10,0,0,0,10,10,5,12,16,2,0,0,0,0,7,15,6,0,0,0,1,13,16,13,0,0,0,0,0,7,12,16,6,0,0,0,0,0,2,16,6,0,0,0,2,9,11,14,1,0,0,0,5,16,15,5,0,0,3 +0,0,8,16,16,16,3,0,0,0,6,8,8,15,10,0,0,0,0,0,7,16,5,0,0,0,1,10,16,9,0,0,0,0,0,15,16,12,0,0,0,0,0,1,13,16,5,0,0,0,7,8,11,16,2,0,0,0,6,16,16,11,0,0,3 +0,0,0,8,15,2,0,0,0,0,2,16,10,0,0,0,0,0,14,13,6,11,0,0,0,6,16,3,13,13,2,0,0,14,16,8,15,16,10,0,0,12,16,16,16,11,1,0,0,0,1,6,16,3,0,0,0,0,0,10,14,0,0,0,4 +0,0,0,8,13,2,0,0,0,0,9,16,13,3,0,0,0,1,15,14,1,0,0,0,0,2,16,11,4,1,0,0,0,3,16,16,14,15,2,0,0,2,16,13,1,16,9,0,0,0,9,15,9,16,7,0,0,0,0,8,16,13,2,0,6 +0,0,8,7,0,0,0,0,0,0,11,12,0,0,0,0,0,0,15,9,0,0,0,0,0,7,16,16,9,4,0,0,0,5,16,14,11,16,5,0,0,2,16,16,0,12,8,0,0,0,15,15,1,15,6,0,0,0,7,14,16,13,1,0,6 +0,0,1,13,3,0,0,0,0,0,7,14,2,0,0,0,0,0,13,13,8,5,0,0,0,2,15,15,12,15,5,0,0,7,16,4,0,12,8,0,0,2,15,7,0,12,6,0,0,0,5,15,5,15,5,0,0,0,0,13,16,9,0,0,6 +0,0,0,10,10,0,0,0,0,0,6,16,6,0,0,0,0,1,14,10,0,0,0,0,0,7,16,3,11,7,0,0,0,12,16,8,16,9,1,0,0,10,16,16,16,16,6,0,0,0,0,10,16,0,0,0,0,0,0,10,11,0,0,0,4 +0,0,1,7,13,10,0,0,0,2,13,14,14,16,4,0,0,4,16,5,12,16,2,0,0,0,6,11,12,16,5,0,0,0,0,0,0,15,8,0,0,1,1,0,0,13,11,0,0,0,12,8,4,13,8,0,0,0,0,7,15,16,10,0,9 +0,0,5,12,10,4,0,0,0,0,5,16,16,16,3,0,0,0,0,16,16,16,0,0,0,0,3,16,16,13,0,0,0,0,4,16,16,12,0,0,0,0,8,16,16,8,0,0,0,0,10,16,16,7,0,0,0,0,8,12,12,4,0,0,1 +0,0,7,13,8,6,0,0,0,0,16,15,16,14,10,0,0,4,16,13,1,0,0,0,0,1,10,16,9,0,0,0,0,0,0,5,13,0,0,0,0,0,0,5,15,0,0,0,0,0,8,11,8,0,0,0,0,0,9,16,3,0,0,0,5 +0,0,2,16,15,5,0,0,0,0,10,16,14,15,0,0,0,0,15,10,0,16,7,0,0,4,16,1,0,12,5,0,0,4,15,0,0,12,5,0,0,5,16,6,0,16,0,0,0,0,14,13,8,15,0,0,0,0,3,14,16,6,0,0,0 +0,0,7,13,4,1,0,0,0,1,15,13,15,11,0,0,0,7,16,1,13,16,4,0,0,3,16,12,16,16,7,0,0,0,4,11,5,16,8,0,0,0,0,0,2,16,5,0,0,0,12,6,9,14,1,0,0,0,6,13,16,5,0,0,9 +0,2,13,13,11,9,0,0,0,10,16,16,16,15,10,0,0,11,16,9,0,0,0,0,0,3,15,16,8,0,0,0,0,0,2,11,14,0,0,0,0,0,0,8,16,0,0,0,0,0,1,11,11,0,0,0,0,1,16,15,4,0,0,0,5 +0,5,16,15,5,0,0,0,0,2,12,15,16,0,0,0,0,0,0,14,14,2,0,0,0,0,2,16,9,0,0,0,0,0,11,16,2,0,0,0,0,4,16,8,0,0,0,0,0,13,16,11,8,8,3,0,0,6,16,16,16,16,7,0,2 +0,0,11,14,10,1,0,0,0,0,16,15,14,13,0,0,0,1,14,8,3,16,2,0,0,0,7,16,13,16,2,0,0,0,0,12,16,9,0,0,0,0,1,14,16,12,0,0,0,0,10,16,15,16,0,0,0,0,7,14,15,11,0,0,8 +0,4,16,15,1,0,0,0,0,6,14,16,4,0,0,0,0,0,0,16,8,0,0,0,0,0,3,16,6,0,0,0,0,0,6,16,1,0,0,0,0,0,13,11,0,0,0,0,0,3,16,16,12,10,5,0,0,3,16,16,16,16,8,0,2 +0,0,4,12,14,5,0,0,0,0,11,16,16,16,3,0,0,3,16,14,2,16,7,0,0,8,16,7,0,16,6,0,0,4,16,4,3,16,4,0,0,4,16,5,10,14,0,0,0,0,14,16,16,10,0,0,0,0,4,14,14,2,0,0,0 +0,0,9,9,4,0,0,0,0,0,15,15,14,12,0,0,0,3,10,1,0,12,5,0,0,5,8,0,0,8,6,0,0,8,8,0,0,8,8,0,0,5,8,0,0,10,6,0,0,4,13,4,6,13,0,0,0,0,6,16,14,3,0,0,0 +0,1,13,13,10,0,0,0,0,1,13,16,15,0,0,0,0,0,12,16,16,0,0,0,0,0,16,16,12,0,0,0,0,0,15,16,13,1,0,0,0,0,15,16,11,0,0,0,0,0,16,16,16,5,0,0,0,0,14,16,15,8,1,0,1 +0,0,2,15,15,4,0,0,0,0,11,10,14,9,0,0,0,0,1,0,11,9,0,0,0,0,0,3,15,4,0,0,0,0,1,16,16,14,6,0,0,0,0,8,13,6,1,0,0,0,0,9,7,0,0,0,0,0,1,15,2,0,0,0,7 +0,0,2,16,8,0,0,0,0,0,8,16,6,0,0,0,0,0,15,10,0,0,0,0,0,4,16,2,0,0,0,0,0,8,16,16,16,14,2,0,0,8,16,7,4,16,8,0,0,1,16,9,6,16,4,0,0,0,3,12,16,12,0,0,6 +0,0,6,12,16,10,0,0,0,4,15,8,12,14,0,0,0,0,0,0,13,8,0,0,0,0,0,6,14,1,0,0,0,0,0,5,15,8,0,0,0,0,0,0,2,15,5,0,0,0,1,4,5,15,8,0,0,0,5,16,14,9,1,0,3 +0,0,9,16,16,13,1,0,0,0,12,13,14,16,7,0,0,0,0,0,6,16,4,0,0,0,0,0,13,14,1,0,0,0,1,10,16,6,0,0,0,0,7,16,8,0,0,0,0,2,15,16,12,7,0,0,0,0,9,14,16,16,2,0,2 +0,0,2,14,15,4,0,0,0,0,2,16,16,11,0,0,0,0,2,16,16,10,0,0,0,0,5,16,16,7,0,0,0,0,14,16,14,2,0,0,0,4,16,16,8,0,0,0,0,3,15,16,8,0,0,0,0,0,5,15,13,2,0,0,1 +0,0,5,16,16,16,9,0,0,0,1,6,4,12,14,0,0,0,0,0,0,15,9,0,0,0,4,6,11,16,1,0,0,0,15,16,16,16,9,0,0,0,2,10,11,0,1,0,0,0,2,15,3,0,0,0,0,0,11,10,0,0,0,0,7 +0,0,0,1,15,4,0,0,0,0,1,13,14,1,0,0,0,0,9,15,5,7,7,0,0,4,16,6,1,16,8,0,0,14,15,0,6,16,2,0,0,11,16,13,14,16,4,0,0,0,5,8,15,14,1,0,0,0,0,0,15,12,0,0,4 +0,0,2,13,12,0,0,0,0,0,8,16,7,0,0,0,0,0,13,16,4,0,0,0,0,4,16,16,16,11,0,0,0,3,16,10,3,15,8,0,0,0,16,8,0,13,10,0,0,0,12,15,1,15,9,0,0,0,2,11,16,16,2,0,6 +0,1,11,14,9,1,0,0,0,3,16,8,16,4,0,0,0,0,0,3,16,3,0,0,0,0,1,14,13,0,0,0,0,0,0,7,14,10,0,0,0,0,0,0,3,16,4,0,0,1,3,1,8,16,4,0,0,3,10,16,16,8,0,0,3 +0,0,0,12,16,9,0,0,0,0,2,16,16,6,0,0,0,0,3,16,16,2,0,0,0,0,8,16,12,0,0,0,0,0,6,16,16,0,0,0,0,0,10,16,15,1,0,0,0,0,9,16,11,0,0,0,0,0,8,16,10,0,0,0,1 +0,0,10,16,16,4,0,0,0,0,9,8,13,10,0,0,0,0,0,4,15,6,0,0,0,0,0,13,16,7,0,0,0,0,0,5,13,16,1,0,0,0,0,0,0,16,4,0,0,0,7,3,5,16,2,0,0,0,11,16,16,10,0,0,3 +0,0,1,9,13,11,0,0,0,0,10,11,12,16,1,0,0,0,15,4,12,16,1,0,0,0,12,16,11,15,1,0,0,0,0,0,0,14,0,0,0,0,0,0,3,14,0,0,0,4,12,8,10,11,0,0,0,0,2,9,16,6,0,0,9 +0,0,4,16,15,7,0,0,0,0,6,16,16,6,0,0,0,0,5,16,16,4,0,0,0,0,7,16,15,0,0,0,0,0,11,16,14,0,0,0,0,0,6,16,14,0,0,0,0,0,6,16,16,5,0,0,0,0,2,12,16,3,0,0,1 +0,0,6,16,15,2,0,0,0,0,7,13,16,4,0,0,0,0,0,1,16,3,0,0,0,0,1,10,16,6,1,0,0,0,9,16,16,16,8,0,0,0,1,16,8,4,0,0,0,0,5,13,0,0,0,0,0,0,11,7,0,0,0,0,7 +0,0,0,11,7,0,0,0,0,0,8,15,7,0,0,0,0,0,13,8,0,0,0,0,0,0,16,14,8,1,0,0,0,5,16,10,10,14,1,0,0,2,15,3,0,12,7,0,0,0,10,13,1,10,11,0,0,0,0,10,16,15,5,0,6 +0,0,2,13,15,1,0,0,0,1,14,13,15,4,0,0,0,5,14,2,15,0,0,0,0,6,14,8,13,0,0,0,0,0,7,16,12,1,0,0,0,0,1,15,10,13,1,0,0,0,4,13,4,13,6,0,0,0,0,11,16,14,1,0,8 +0,0,0,6,14,0,0,0,0,0,4,16,6,0,0,0,0,0,14,10,1,2,0,0,0,6,16,4,12,10,0,0,0,14,11,0,16,8,0,0,4,16,16,16,16,10,0,0,1,11,12,12,16,5,0,0,0,0,0,8,16,4,0,0,4 +0,0,7,15,15,2,0,0,0,0,13,6,12,6,0,0,0,0,0,0,15,2,0,0,0,0,0,13,10,0,0,0,0,0,0,8,15,12,0,0,0,3,7,0,2,15,1,0,0,2,15,6,6,16,1,0,0,0,4,15,16,7,0,0,3 +0,0,4,14,11,3,0,0,0,0,1,15,16,6,0,0,0,0,0,16,16,9,0,0,0,0,1,14,16,3,0,0,0,0,6,16,16,2,0,0,0,0,8,16,15,0,0,0,0,0,7,16,11,0,0,0,0,0,6,15,14,4,0,0,1 +0,0,0,1,13,2,0,0,0,0,0,12,14,0,0,0,0,0,6,14,0,0,0,0,0,1,14,5,0,0,0,0,0,9,12,0,12,7,0,0,0,12,14,6,16,14,1,0,0,6,16,16,16,5,0,0,0,0,0,3,14,0,0,0,4 +0,0,7,13,8,4,0,0,0,1,15,11,9,15,2,0,0,4,16,6,0,8,7,0,0,4,10,0,0,7,8,0,0,4,10,0,0,8,8,0,0,5,12,0,0,12,5,0,0,3,15,5,9,14,2,0,0,0,8,14,12,3,0,0,0 +0,0,13,15,11,12,11,0,0,4,16,15,16,13,9,1,0,3,16,9,0,0,0,0,0,0,12,16,9,0,0,0,0,0,0,12,14,1,0,0,0,1,1,7,16,2,0,0,0,8,12,11,16,3,0,0,0,1,13,16,12,0,0,0,5 +0,0,6,12,13,12,0,0,0,0,14,12,7,16,1,0,0,0,6,6,14,9,0,0,0,0,0,14,11,1,0,0,0,0,0,5,16,5,0,0,0,0,0,0,6,14,1,0,0,0,10,8,3,16,1,0,0,0,4,14,16,12,0,0,3 +0,0,0,7,13,2,0,0,0,0,0,14,14,2,0,0,0,0,5,16,4,0,0,0,0,1,11,16,4,0,0,0,0,5,16,16,15,12,0,0,0,0,9,16,1,13,7,0,0,0,4,16,6,15,5,0,0,0,0,6,14,14,1,0,6 +0,0,2,14,13,8,0,0,0,0,12,13,12,13,0,0,0,0,11,6,6,16,4,0,0,0,5,16,15,16,8,0,0,0,0,2,4,11,8,0,0,0,0,0,0,11,9,0,0,2,13,7,1,11,10,0,0,0,2,10,15,16,2,0,9 +0,0,1,12,8,0,0,0,0,0,11,15,5,0,0,0,0,2,16,5,0,0,0,0,0,5,16,0,0,0,0,0,0,5,12,8,14,14,3,0,0,4,16,16,9,12,8,0,0,0,13,8,0,11,8,0,0,0,1,14,16,11,1,0,6 +0,0,1,8,10,8,3,0,0,0,1,16,16,16,8,0,0,0,0,14,16,16,3,0,0,0,1,16,16,15,0,0,0,0,6,16,16,10,0,0,0,0,10,16,15,4,0,0,0,0,8,16,14,0,0,0,0,0,1,8,8,1,0,0,1 +0,0,12,16,14,4,0,0,0,0,8,14,16,10,0,0,0,0,0,0,14,13,0,0,0,0,0,0,13,10,0,0,0,2,15,16,16,13,3,0,0,1,8,12,15,12,4,0,0,0,2,15,8,0,0,0,0,0,12,13,0,0,0,0,7 +0,1,15,16,16,16,5,0,0,7,16,16,12,9,1,0,0,13,16,3,0,0,0,0,0,5,16,11,0,0,0,0,0,0,10,16,6,0,0,0,0,0,1,15,11,0,0,0,0,1,4,14,12,0,0,0,0,3,15,16,6,0,0,0,5 +0,0,0,8,15,0,0,0,0,0,3,15,3,0,0,0,0,0,12,10,0,1,0,0,0,4,16,4,11,11,0,0,0,11,15,2,14,10,1,0,0,13,16,16,16,13,1,0,0,0,4,12,12,0,0,0,0,0,0,11,9,0,0,0,4 +0,0,0,5,15,4,0,0,0,0,1,15,11,0,0,0,0,0,12,14,2,0,0,0,0,5,16,7,7,10,0,0,0,12,16,16,16,12,0,0,0,11,12,14,16,14,1,0,0,0,0,0,16,9,0,0,0,0,0,4,16,6,0,0,4 +0,0,5,16,16,7,0,0,0,0,6,9,13,11,0,0,0,0,0,0,10,12,0,0,0,0,1,6,13,8,0,0,0,0,8,16,16,15,6,0,0,0,1,11,14,8,2,0,0,0,0,13,7,0,0,0,0,0,4,16,2,0,0,0,7 +0,5,16,12,1,0,0,0,0,5,14,15,8,0,0,0,0,0,0,14,10,0,0,0,0,0,2,16,7,0,0,0,0,0,7,16,3,0,0,0,0,2,14,10,0,0,0,0,0,11,16,9,8,8,3,0,0,8,16,16,16,16,4,0,2 +0,0,1,8,14,14,2,0,0,1,13,16,16,16,5,0,0,7,16,10,10,16,4,0,0,3,16,14,15,12,0,0,0,0,3,12,16,10,0,0,0,0,0,9,16,16,3,0,0,0,0,15,16,16,4,0,0,0,0,11,16,12,2,0,8 +0,0,4,12,16,16,4,0,0,0,9,7,4,14,12,0,0,0,0,0,0,11,14,0,0,0,0,0,3,16,6,0,0,0,0,1,13,6,0,0,0,0,1,12,8,0,0,0,0,0,6,16,9,5,0,0,0,0,3,12,13,9,0,0,2 +0,0,10,15,13,1,0,0,0,4,16,7,13,7,0,0,0,2,11,0,12,6,0,0,0,0,0,4,14,0,0,0,0,0,1,15,6,0,0,0,0,0,9,12,0,0,0,0,0,4,16,7,7,13,3,0,0,0,10,16,12,3,0,0,2 +0,1,13,16,16,16,12,1,0,6,16,14,12,11,5,0,0,2,15,15,5,0,0,0,0,0,8,14,15,1,0,0,0,0,0,3,16,6,0,0,0,0,0,3,16,5,0,0,0,0,7,10,16,4,0,0,0,0,15,16,10,0,0,0,5 +0,0,6,16,16,7,0,0,0,0,13,12,15,10,0,0,0,0,3,6,13,9,0,0,0,0,8,16,16,15,6,0,0,0,1,9,14,8,5,0,0,0,0,11,9,0,0,0,0,0,4,16,3,0,0,0,0,0,10,10,0,0,0,0,7 +0,0,2,10,13,12,3,0,0,0,11,13,8,16,7,0,0,0,12,9,9,16,8,0,0,0,6,10,13,14,5,0,0,0,0,0,0,12,8,0,0,8,1,0,0,15,2,0,0,4,14,9,4,16,0,0,0,0,2,12,16,14,0,0,9 +0,1,12,12,15,16,7,0,0,7,16,16,13,6,1,0,0,12,16,3,0,0,0,0,0,3,14,15,1,0,0,0,0,0,1,16,7,0,0,0,0,0,0,15,8,0,0,0,0,5,7,16,7,0,0,0,0,3,15,16,5,0,0,0,5 +0,0,0,9,14,1,0,0,0,0,2,16,8,0,0,0,0,0,12,14,1,0,0,0,0,5,16,4,2,1,0,0,0,12,13,1,14,8,1,0,1,16,16,16,16,15,3,0,0,5,8,11,15,1,0,0,0,0,0,10,16,3,0,0,4 +0,0,1,11,14,15,3,0,0,1,13,16,12,16,8,0,0,8,16,4,6,16,5,0,0,5,15,11,13,14,0,0,0,0,2,12,16,13,0,0,0,0,0,13,16,16,6,0,0,0,0,16,16,16,7,0,0,0,0,11,13,12,1,0,8 +0,0,6,14,16,5,0,0,0,2,16,16,16,7,0,0,0,2,15,16,15,2,0,0,0,0,6,16,15,7,0,0,0,0,14,10,6,16,3,0,0,1,16,3,0,16,7,0,0,0,10,11,11,15,3,0,0,0,3,14,16,6,0,0,8 +0,0,0,4,15,6,0,0,0,0,0,13,13,1,0,0,0,0,7,16,2,0,0,0,0,4,15,8,0,5,0,0,0,11,14,1,6,16,5,0,1,16,14,12,16,16,3,0,0,10,12,10,16,10,0,0,0,0,0,6,16,2,0,0,4 +0,0,1,9,15,11,3,0,0,0,12,9,1,11,6,0,0,0,13,7,6,16,8,0,0,0,4,10,12,15,4,0,0,0,0,0,0,12,6,0,0,8,7,0,0,15,5,0,0,1,12,10,4,16,3,0,0,0,0,13,16,8,0,0,9 +0,0,0,14,12,2,0,0,0,0,0,6,8,14,1,0,0,0,9,11,0,13,5,0,0,2,16,8,0,8,8,0,0,5,13,0,0,8,7,0,0,6,13,0,0,11,4,0,0,0,12,10,6,14,0,0,0,0,1,11,14,7,0,0,0 +0,0,0,10,13,5,0,0,0,3,14,16,12,15,0,0,0,10,16,8,11,16,0,0,0,8,14,5,14,9,0,0,0,0,7,14,16,5,0,0,0,0,0,11,16,16,1,0,0,0,0,14,16,16,4,0,0,0,0,11,16,11,0,0,8 +0,0,4,11,12,14,0,0,0,0,15,12,14,16,4,0,0,0,16,9,16,13,3,0,0,0,5,12,11,12,7,0,0,0,0,0,0,8,8,0,0,0,0,0,0,10,7,0,0,6,13,4,0,14,4,0,0,0,7,13,16,14,1,0,9 +0,0,2,8,7,0,0,0,0,0,6,15,16,2,0,0,0,6,15,11,16,4,0,0,0,5,16,10,16,1,0,0,0,2,15,16,13,0,0,0,0,0,2,16,12,9,3,0,0,0,4,14,0,12,14,1,0,0,1,12,10,7,0,0,8 +0,0,10,15,1,0,0,0,0,0,11,16,1,0,0,0,0,1,16,16,1,0,0,0,0,0,8,16,5,0,0,0,0,0,0,14,10,0,0,0,0,0,0,10,14,0,0,0,0,0,5,11,15,6,4,1,0,0,10,16,16,16,16,10,1 +0,1,15,16,10,0,0,0,0,4,16,9,16,4,0,0,0,2,12,5,16,3,0,0,0,0,0,6,16,3,0,0,0,0,1,15,13,0,0,0,0,0,8,16,4,0,0,0,0,2,16,13,4,4,3,0,0,2,13,16,16,16,16,2,2 +0,0,6,13,12,2,0,0,0,0,7,7,10,12,0,0,0,0,0,1,12,9,0,0,0,0,0,7,16,7,0,0,0,0,0,0,5,14,1,0,0,1,7,0,0,7,11,0,0,1,16,4,0,9,11,0,0,0,5,13,12,16,3,0,3 +0,0,0,4,15,2,0,0,0,0,1,16,9,0,0,0,0,0,9,15,1,11,9,0,0,3,14,8,0,14,10,0,0,10,16,12,12,16,8,0,0,13,16,14,15,16,5,0,0,0,0,0,15,13,0,0,0,0,0,4,16,9,0,0,4 +0,0,14,12,12,13,3,0,0,0,16,8,8,6,1,0,0,0,14,7,5,0,0,0,0,0,15,15,16,2,0,0,0,0,13,3,6,8,0,0,0,0,0,0,3,13,0,0,0,0,5,4,8,12,1,0,0,1,15,15,11,3,0,0,5 +0,0,1,10,10,0,0,0,0,1,13,10,1,0,0,0,0,4,14,0,0,0,0,0,0,6,12,0,0,0,0,0,0,8,11,5,10,11,1,0,0,5,16,13,6,10,8,0,0,0,10,9,0,7,11,0,0,0,1,12,16,14,2,0,6 +0,0,3,14,8,6,4,0,0,0,11,16,16,16,15,1,0,3,16,3,2,15,6,0,0,5,8,0,9,14,0,0,0,0,7,9,15,13,4,0,0,0,10,16,16,15,3,0,0,0,0,13,7,0,0,0,0,0,6,15,2,0,0,0,7 +0,0,7,15,14,6,0,0,0,5,16,5,10,16,4,0,0,6,15,2,10,14,1,0,0,1,13,16,14,1,0,0,0,0,10,13,15,8,0,0,0,0,15,2,3,15,6,0,0,0,15,3,8,15,6,0,0,0,6,16,11,4,0,0,8 +0,0,7,14,9,0,0,0,0,1,16,5,10,7,0,0,0,0,13,2,3,13,0,0,0,0,5,15,16,16,1,0,0,0,0,0,5,10,7,0,0,0,0,0,0,2,14,0,0,0,4,2,0,0,14,3,0,0,5,15,16,16,12,1,9 +0,0,3,13,13,3,0,0,0,0,14,8,7,15,1,0,0,3,16,0,0,9,6,0,0,6,13,0,0,4,8,0,0,4,9,0,0,4,8,0,0,1,13,0,0,5,8,0,0,0,14,7,0,11,4,0,0,0,3,15,16,14,0,0,0 +0,0,16,8,0,0,0,0,0,2,16,13,0,0,0,0,0,2,16,16,6,0,0,0,0,0,8,16,10,0,0,0,0,0,0,14,12,0,0,0,0,0,0,10,16,2,0,0,0,0,5,12,16,11,8,3,0,0,12,16,16,16,16,9,1 +0,4,16,15,1,0,0,0,0,8,14,16,4,0,0,0,0,5,8,16,4,0,0,0,0,0,0,12,8,0,0,0,0,0,1,15,7,0,0,0,0,0,5,16,3,6,9,0,0,3,15,15,8,13,15,0,0,4,15,16,16,16,7,0,2 +0,0,9,16,10,1,0,0,0,0,8,3,16,4,0,0,0,0,0,5,14,2,0,0,0,0,2,16,15,7,0,0,0,0,0,0,3,15,2,0,0,4,6,0,0,13,7,0,0,6,13,1,5,16,3,0,0,0,10,16,15,5,0,0,3 +0,0,0,2,14,5,0,0,0,0,0,13,15,0,0,0,0,0,3,16,3,9,12,0,0,1,14,8,0,15,13,0,0,11,16,10,8,16,10,0,3,16,16,16,16,15,3,0,0,0,0,2,16,12,0,0,0,0,0,4,16,7,0,0,4 +0,1,12,13,13,0,0,0,0,4,11,6,3,0,0,0,0,7,11,8,6,1,0,0,0,5,15,12,13,12,0,0,0,0,0,0,0,13,4,0,0,0,0,0,0,8,8,0,0,2,10,8,7,15,3,0,0,1,13,16,12,5,0,0,5 +0,0,1,13,0,0,0,0,0,0,7,10,0,0,0,0,0,1,16,2,0,0,0,0,0,4,13,0,0,0,0,0,0,7,12,4,11,9,1,0,0,4,16,15,8,12,7,0,0,2,14,10,3,13,7,0,0,0,2,13,16,8,1,0,6 +0,0,6,16,16,12,3,0,0,0,13,12,10,16,2,0,0,1,16,3,10,11,0,0,0,1,7,1,16,3,0,0,0,0,0,7,15,4,1,0,0,0,10,16,16,16,4,0,0,0,2,16,8,3,0,0,0,0,6,16,3,0,0,0,7 +0,0,7,13,11,1,0,0,0,6,14,12,14,9,0,0,0,5,14,3,10,9,0,0,0,0,8,15,14,2,0,0,0,0,1,14,16,6,0,0,0,0,9,9,3,15,4,0,0,0,12,5,1,11,8,0,0,0,7,16,16,9,1,0,8 +0,0,7,14,10,0,0,0,0,7,15,4,9,11,0,0,0,9,13,0,7,16,0,0,0,3,15,16,16,16,3,0,0,0,0,4,4,12,8,0,0,0,0,0,0,4,12,0,0,0,11,5,0,7,13,0,0,0,5,13,16,14,6,0,9 +0,0,6,14,13,3,0,0,0,0,14,10,7,13,0,0,0,4,13,0,0,12,3,0,0,5,11,0,0,7,6,0,0,4,11,0,0,4,8,0,0,2,12,0,0,6,6,0,0,0,12,8,2,14,2,0,0,0,4,15,16,9,0,0,0 +0,0,11,12,0,0,0,0,0,0,13,16,0,0,0,0,0,3,15,16,4,0,0,0,0,13,15,16,6,0,0,0,0,3,3,15,10,0,0,0,0,0,0,11,16,0,0,0,0,0,2,10,16,6,3,0,0,0,7,16,16,16,16,5,1 +0,2,13,16,10,0,0,0,0,12,15,9,16,2,0,0,0,10,8,1,16,6,0,0,0,1,1,2,16,6,0,0,0,0,0,10,15,2,0,0,0,0,2,15,9,0,0,0,0,2,15,16,9,8,6,0,0,1,13,16,16,16,16,3,2 +0,2,13,16,15,1,0,0,0,7,13,10,16,4,0,0,0,0,0,8,16,2,0,0,0,0,8,16,16,10,0,0,0,0,1,4,10,16,8,0,0,0,0,0,0,16,9,0,0,2,12,6,6,16,6,0,0,1,15,16,16,9,1,0,3 +0,0,0,2,15,7,0,0,0,0,0,11,15,2,5,0,0,0,5,16,6,6,16,0,0,2,16,10,4,13,13,0,0,13,16,16,16,16,10,0,0,6,4,4,11,16,4,0,0,0,0,0,14,14,0,0,0,0,0,3,16,7,0,0,4 +0,0,9,12,14,2,0,0,0,0,12,6,4,0,0,0,0,0,12,1,3,0,0,0,0,0,9,16,16,12,0,0,0,0,4,4,0,12,6,0,0,0,0,0,0,4,12,0,0,0,9,7,4,10,11,0,0,0,9,14,16,14,5,0,5 +0,0,3,15,1,0,0,0,0,0,12,8,0,0,0,0,0,3,13,0,0,0,0,0,0,4,12,0,0,0,0,0,0,5,10,11,16,14,1,0,0,2,16,10,4,7,10,0,0,0,15,8,2,12,8,0,0,0,3,12,16,8,0,0,6 +0,0,3,14,13,12,14,0,0,0,11,14,12,15,9,0,0,0,16,5,3,16,2,0,0,1,9,1,10,12,0,0,0,0,0,7,16,14,6,0,0,0,4,16,16,11,1,0,0,0,0,15,5,0,0,0,0,0,6,13,0,0,0,0,7 +0,0,10,14,10,1,0,0,0,4,14,6,13,7,0,0,0,6,12,0,7,7,0,0,0,1,16,10,15,1,0,0,0,0,5,16,15,3,0,0,0,0,13,6,6,15,5,0,0,3,15,0,4,12,7,0,0,0,12,16,15,8,0,0,8 +0,1,10,15,15,3,0,0,0,6,13,4,10,12,0,0,0,4,11,0,7,15,0,0,0,2,14,16,16,14,2,0,0,0,1,4,3,10,6,0,0,0,0,0,0,2,12,0,0,0,3,3,0,2,13,0,0,0,10,16,16,16,11,0,9 +0,0,3,15,9,0,0,0,0,0,14,8,11,5,0,0,0,3,16,3,1,14,2,0,0,5,12,0,0,12,4,0,0,2,12,0,0,6,8,0,0,2,14,0,0,12,5,0,0,0,12,8,5,15,0,0,0,0,1,13,14,5,0,0,0 +0,0,9,15,14,8,0,0,0,6,16,4,2,16,3,0,0,5,16,5,5,16,4,0,0,0,9,16,16,16,4,0,0,0,0,0,0,9,8,0,0,0,0,0,0,8,8,0,0,2,10,2,1,12,6,0,0,1,13,14,14,11,1,0,9 +0,1,10,12,12,11,0,0,0,7,14,8,8,6,0,0,0,7,11,7,3,0,0,0,0,8,16,13,13,8,0,0,0,1,3,0,1,14,5,0,0,0,0,0,0,4,12,0,0,0,11,3,0,10,12,0,0,0,10,16,16,14,4,0,5 +0,0,10,12,12,15,4,0,0,0,16,8,8,5,3,0,0,4,15,8,6,0,0,0,0,6,15,12,14,8,0,0,0,0,1,0,2,16,0,0,0,0,0,0,0,14,3,0,0,0,11,4,8,15,3,0,0,0,10,16,15,5,0,0,5 +0,0,1,11,15,0,0,0,0,0,11,15,5,0,0,0,0,3,15,1,0,0,0,0,0,5,12,0,0,0,0,0,0,8,15,15,16,14,3,0,0,2,16,11,2,7,12,0,0,0,14,11,4,9,13,0,0,0,2,11,16,15,6,0,6 +0,3,12,12,14,4,0,0,0,1,13,4,4,0,0,0,0,4,14,4,3,0,0,0,0,5,13,12,14,10,0,0,0,0,0,0,0,11,6,0,0,0,0,0,0,4,8,0,0,0,6,2,0,8,8,0,0,2,13,16,16,16,2,0,5 +0,0,6,14,11,1,0,0,0,0,15,5,6,15,0,0,0,4,16,0,0,9,3,0,0,8,9,0,0,4,8,0,0,7,8,0,0,4,8,0,0,4,8,0,0,9,4,0,0,1,13,2,3,14,0,0,0,0,5,14,15,4,0,0,0 +0,0,6,14,15,7,0,0,0,3,15,6,2,14,3,0,0,4,13,0,1,16,4,0,0,0,10,11,9,16,6,0,0,0,1,8,10,14,5,0,0,0,0,0,0,8,11,0,0,1,12,5,0,10,11,0,0,0,7,13,16,16,4,0,9 +0,0,7,14,15,4,0,0,0,7,15,4,9,12,0,0,0,6,15,1,4,14,0,0,0,0,9,13,14,7,0,0,0,0,2,16,16,4,0,0,0,0,14,7,3,15,4,0,0,0,16,3,0,13,8,0,0,0,7,16,16,10,1,0,8 +0,0,7,13,10,1,0,0,0,1,15,3,9,10,0,0,0,3,16,4,13,11,0,0,0,0,6,12,12,16,0,0,0,0,0,0,0,12,5,0,0,0,0,0,0,5,11,0,0,1,11,2,0,7,11,0,0,0,7,13,16,15,4,0,9 +0,0,1,11,15,6,0,0,0,2,15,10,16,15,0,0,0,1,14,5,6,11,0,0,0,0,5,14,14,3,0,0,0,0,1,14,16,6,0,0,0,0,10,8,6,15,1,0,0,0,9,9,4,16,3,0,0,0,1,15,15,6,0,0,8 +0,0,0,7,8,0,0,0,0,0,0,15,2,0,3,1,0,0,8,10,0,2,16,2,0,1,15,4,3,9,12,0,0,8,16,16,16,16,6,0,0,1,4,3,9,14,0,0,0,0,0,0,15,3,0,0,0,0,0,9,10,0,0,0,4 +0,0,3,15,4,0,0,0,0,0,0,15,11,0,0,0,0,0,0,15,16,2,0,0,0,0,0,14,16,8,0,0,0,0,0,7,13,14,0,0,0,0,0,0,4,16,4,0,0,0,3,9,13,16,12,5,0,0,3,15,16,16,16,16,1 +0,0,7,16,14,13,10,0,0,0,10,12,10,16,4,0,0,0,15,5,8,13,0,0,0,1,7,1,16,3,0,0,0,2,11,13,16,12,6,0,0,4,12,15,14,11,2,0,0,0,3,16,3,0,0,0,0,0,9,13,0,0,0,0,7 +0,0,0,15,16,16,12,4,0,0,4,14,0,10,12,0,0,0,8,7,1,15,4,0,0,0,0,0,8,12,0,0,0,0,1,8,14,12,3,0,0,0,6,13,16,13,2,0,0,0,0,10,10,0,0,0,0,0,2,16,2,0,0,0,7 +0,1,10,16,15,1,0,0,0,3,15,10,16,4,0,0,0,0,1,11,15,0,0,0,0,0,12,16,15,3,0,0,0,0,0,1,11,15,1,0,0,8,3,0,3,16,7,0,0,13,15,6,8,16,6,0,0,0,12,16,16,7,0,0,3 +0,3,16,16,16,2,0,0,0,4,14,10,5,0,0,0,0,6,16,16,10,3,0,0,0,4,15,12,14,13,0,0,0,0,2,0,1,15,8,0,0,0,0,0,0,8,13,0,0,3,16,10,7,9,16,0,0,3,13,15,16,16,8,0,5 +0,0,10,9,0,0,0,0,0,0,8,16,2,0,0,0,0,0,8,16,6,0,0,0,0,0,5,16,13,1,0,0,0,0,1,5,14,6,0,0,0,0,0,0,8,11,0,0,0,0,8,12,9,16,6,4,0,0,7,16,16,16,16,14,1 +0,3,15,16,7,0,0,0,0,12,13,11,16,0,0,0,0,12,5,4,16,0,0,0,0,0,0,3,16,4,0,0,0,0,0,6,16,3,0,0,0,0,0,11,16,0,0,0,0,1,12,16,14,8,5,0,0,2,13,16,16,16,16,2,2 +0,0,7,16,16,16,8,0,0,0,10,12,10,16,2,0,0,0,13,6,7,13,0,0,0,0,10,1,13,5,0,0,0,0,9,10,16,8,3,0,0,1,12,15,16,16,5,0,0,0,1,16,2,3,0,0,0,0,9,14,0,0,0,0,7 +0,0,7,14,12,1,0,0,0,7,14,5,8,10,0,0,0,8,11,1,7,10,0,0,0,1,9,16,15,4,0,0,0,0,1,14,14,12,0,0,0,0,7,11,0,12,7,0,0,0,11,5,0,11,8,0,0,0,4,14,16,12,1,0,8 +0,1,13,16,7,0,0,0,0,5,16,12,15,3,0,0,0,0,9,6,15,9,0,0,0,0,0,0,14,10,0,0,0,0,0,0,14,11,0,0,0,0,0,8,16,4,1,0,0,0,9,16,16,6,16,5,0,0,8,12,13,16,16,11,2 +0,0,3,12,9,0,0,0,0,0,12,12,11,13,0,0,0,2,15,2,0,12,5,0,0,4,8,0,0,6,8,0,0,8,7,0,0,4,8,0,0,7,7,0,0,9,7,0,0,3,13,4,7,16,2,0,0,0,6,16,15,5,0,0,0 +0,0,8,6,0,0,0,0,0,0,6,14,0,0,0,0,0,0,6,16,3,0,0,0,0,0,10,16,9,0,0,0,0,0,1,6,16,2,0,0,0,0,0,0,13,7,0,0,0,0,4,8,14,14,8,4,0,0,9,16,16,16,16,13,1 +0,0,11,16,7,0,0,0,0,1,16,11,15,0,0,0,0,2,16,5,16,4,0,0,0,0,2,2,16,3,0,0,0,0,0,5,16,0,0,0,0,0,0,9,14,0,0,0,0,0,9,16,14,7,6,0,0,0,13,14,14,16,16,6,2 +0,0,2,12,9,0,0,0,0,0,12,10,1,0,0,0,0,4,14,0,0,0,0,0,0,8,9,0,0,0,0,0,0,8,9,5,11,8,0,0,0,4,16,14,6,12,5,0,0,0,13,7,0,10,8,0,0,0,3,14,16,16,5,0,6 +0,0,8,15,11,1,0,0,0,0,10,4,10,6,0,0,0,0,0,1,13,6,0,0,0,0,0,15,16,2,0,0,0,0,0,4,8,15,1,0,0,1,1,0,0,9,7,0,0,4,13,5,3,10,8,0,0,0,7,14,16,15,2,0,3 +0,0,8,12,13,5,0,0,0,4,13,4,9,11,0,0,0,0,0,6,13,4,0,0,0,0,0,10,15,4,0,0,0,0,0,0,4,15,2,0,0,7,8,0,0,12,7,0,0,8,9,1,3,16,3,0,0,0,10,16,16,6,0,0,3 +0,0,4,15,16,16,16,1,0,0,10,13,8,15,8,0,0,0,14,5,3,16,2,0,0,0,1,0,12,11,0,0,0,0,2,5,16,9,1,0,0,0,15,16,16,14,3,0,0,0,1,15,9,0,0,0,0,0,7,14,2,0,0,0,7 +0,0,1,14,16,8,0,0,0,0,2,10,5,14,0,0,0,0,0,2,7,15,0,0,0,0,0,6,16,10,0,0,0,0,0,0,3,14,4,0,0,0,13,0,0,4,12,0,0,0,13,6,4,8,13,0,0,0,0,12,16,15,6,0,3 +0,0,7,16,12,1,0,0,0,0,16,11,16,8,0,0,0,0,3,9,16,6,0,0,0,0,0,13,16,15,1,0,0,0,1,2,5,14,8,0,0,5,14,0,0,9,15,0,0,4,16,7,6,13,14,0,0,0,7,16,16,16,4,0,3 +0,0,0,0,10,0,0,0,0,0,0,10,8,0,8,0,0,0,4,13,2,2,14,0,0,2,14,12,7,8,10,0,0,9,16,16,16,16,7,0,0,0,0,0,5,15,1,0,0,0,0,0,8,12,0,0,0,0,0,0,16,8,0,0,4 +0,0,1,9,13,1,0,0,0,1,12,14,5,0,0,0,0,2,16,5,0,0,0,0,0,5,15,0,3,0,0,0,0,3,16,16,16,15,3,0,0,2,16,11,1,9,11,0,0,0,11,13,6,12,11,0,0,0,0,6,16,15,2,0,6 +0,0,2,14,10,0,0,0,0,0,12,10,0,0,0,0,0,2,15,2,0,0,0,0,0,7,12,1,4,6,0,0,0,7,16,16,15,15,8,0,0,0,16,13,0,4,12,0,0,0,10,12,4,8,15,0,0,0,2,11,16,15,5,0,6 +0,0,1,11,10,0,0,0,0,0,13,10,0,0,0,0,0,3,13,0,0,0,0,0,0,5,11,0,0,0,0,0,0,5,14,12,12,7,0,0,0,0,16,12,5,11,10,0,0,0,10,11,4,10,12,0,0,0,1,12,16,12,3,0,6 +0,0,0,3,16,2,0,0,0,0,0,10,13,3,8,0,0,0,1,16,5,9,16,0,0,2,12,14,5,15,9,0,0,12,16,16,16,16,7,0,0,5,5,6,14,16,0,0,0,0,0,1,13,12,0,0,0,0,0,3,16,4,0,0,4 +0,0,5,15,14,3,0,0,0,2,14,7,4,13,0,0,0,2,15,5,5,16,1,0,0,0,7,15,16,16,3,0,0,0,0,1,3,7,10,0,0,0,0,0,0,2,14,0,0,0,8,9,4,2,16,1,0,0,4,11,13,16,11,0,9 +0,0,5,15,2,0,0,0,0,0,1,16,8,0,0,0,0,0,0,14,12,0,0,0,0,0,2,16,16,3,0,0,0,0,2,9,14,6,0,0,0,0,0,0,6,13,0,0,0,0,2,10,12,16,4,4,0,0,4,15,16,16,16,16,1 +0,0,12,12,14,15,1,0,0,1,15,11,6,5,0,0,0,6,15,12,4,0,0,0,0,6,11,8,13,6,0,0,0,0,0,0,1,13,0,0,0,0,0,0,0,9,3,0,0,2,6,1,6,14,3,0,0,1,11,16,13,8,0,0,5 +0,0,8,14,11,2,0,0,0,6,16,7,6,13,1,0,0,8,11,0,0,10,4,0,0,7,8,0,0,5,7,0,0,8,4,0,0,7,8,0,0,2,10,0,0,7,10,0,0,0,14,3,4,15,3,0,0,0,5,16,16,7,0,0,0 +0,0,8,16,11,1,0,0,0,0,14,2,5,9,0,0,0,0,14,1,5,12,0,0,0,0,6,16,16,14,1,0,0,0,0,3,7,10,7,0,0,0,0,0,0,4,12,0,0,0,6,1,0,2,14,0,0,0,9,16,16,16,12,0,9 +0,0,12,9,9,8,1,0,0,2,15,8,8,8,2,0,0,8,12,8,5,0,0,0,0,8,15,9,14,9,0,0,0,2,1,0,1,14,3,0,0,0,0,0,0,6,11,0,0,1,8,4,5,14,9,0,0,1,11,16,12,7,0,0,5 +0,1,14,16,12,0,0,0,0,5,16,9,16,6,0,0,0,3,11,0,14,9,0,0,0,0,0,0,10,10,0,0,0,0,0,0,14,10,0,0,0,0,0,10,16,5,0,0,0,2,15,16,14,8,12,2,0,0,11,16,16,16,15,5,2 +0,0,5,12,16,15,2,0,0,6,15,9,10,15,4,0,0,3,14,3,1,14,4,0,0,0,10,16,15,13,1,0,0,0,6,15,15,10,0,0,0,0,15,3,2,15,3,0,0,0,16,8,1,14,4,0,0,0,4,15,16,11,2,0,8 +0,0,13,16,11,0,0,0,0,2,16,11,16,4,0,0,0,0,14,9,15,9,0,0,0,0,0,2,16,8,0,0,0,0,0,4,16,4,0,0,0,0,0,9,16,1,0,0,0,0,9,16,15,8,11,5,0,0,9,12,13,16,16,11,2 +0,0,10,10,12,7,0,0,0,0,15,13,5,12,5,0,0,4,13,4,0,2,8,0,0,8,4,0,0,3,8,0,0,8,4,0,0,7,5,0,0,6,6,0,0,11,2,0,0,1,13,3,3,12,0,0,0,0,7,15,16,7,0,0,0 +0,0,10,7,3,0,0,0,0,1,15,12,14,6,0,0,0,5,12,0,2,13,0,0,0,4,12,0,0,4,7,0,0,8,5,0,0,4,8,0,0,5,8,0,0,5,10,0,0,0,14,3,4,14,6,0,0,0,7,16,16,10,0,0,0 +0,0,8,11,0,0,0,0,0,0,7,16,3,0,0,0,0,0,6,16,10,0,0,0,0,0,10,16,15,1,0,0,0,0,0,2,16,2,0,0,0,0,0,0,15,9,0,0,0,0,6,12,16,15,8,5,0,0,4,15,16,16,16,16,1 +0,0,3,16,12,12,7,0,0,0,12,13,13,16,6,0,0,0,2,0,6,14,0,0,0,0,1,4,13,10,1,0,0,0,9,16,16,16,8,0,0,0,4,12,12,7,1,0,0,0,0,14,6,0,0,0,0,0,4,16,2,0,0,0,7 +0,0,1,12,9,0,0,0,0,0,11,10,2,0,0,0,0,4,14,0,0,0,0,0,0,5,9,0,0,0,0,0,0,8,10,11,16,14,1,0,0,2,16,10,3,7,11,0,0,0,13,8,1,8,12,0,0,0,2,12,16,15,5,0,6 +0,0,3,15,16,12,0,0,0,0,6,16,6,14,6,0,0,0,0,3,1,15,6,0,0,0,0,1,14,16,3,0,0,5,8,2,13,16,3,0,0,5,16,0,0,9,13,0,0,1,15,11,8,12,16,1,0,0,3,14,16,16,9,0,3 +0,3,15,15,3,0,0,0,0,8,14,12,10,0,0,0,0,5,11,6,14,0,0,0,0,0,0,7,14,0,0,0,0,0,0,10,12,0,0,0,0,0,0,15,9,0,0,0,0,1,11,16,12,8,5,0,0,5,16,16,16,16,16,0,2 +0,0,11,10,0,0,0,0,0,0,13,15,0,0,0,0,0,0,12,16,5,0,0,0,0,1,15,16,5,0,0,0,0,0,3,13,10,0,0,0,0,0,0,10,14,0,0,0,0,0,5,11,16,9,5,1,0,0,12,16,16,16,16,12,1 +0,0,0,4,15,2,0,0,0,0,0,13,13,0,0,0,0,0,3,16,6,0,10,1,0,0,12,12,1,7,15,1,0,5,16,3,0,14,10,0,2,16,13,8,8,16,3,0,8,16,16,16,16,13,0,0,0,0,0,7,16,6,0,0,4 +0,0,0,6,14,3,0,0,0,0,5,15,7,1,0,0,0,0,10,10,0,0,0,0,0,0,12,5,0,0,0,0,0,0,14,16,16,11,2,0,0,2,16,13,3,8,12,0,0,0,8,15,5,4,16,2,0,0,0,4,14,16,13,0,6 +0,0,6,14,13,3,0,0,0,0,12,2,3,14,0,0,0,0,0,0,8,13,0,0,0,0,0,12,16,3,0,0,0,0,0,0,8,13,1,0,0,1,7,0,0,7,11,0,0,3,13,2,0,7,13,0,0,0,5,14,14,15,6,0,3 +0,0,10,13,1,0,0,0,0,0,7,16,5,0,0,0,0,0,6,16,6,0,0,0,0,0,6,16,13,0,0,0,0,0,0,6,16,2,0,0,0,0,0,3,16,8,0,0,0,0,7,11,16,14,9,4,0,0,6,15,13,14,16,15,1 +0,0,2,15,16,9,0,0,0,0,3,13,11,16,0,0,0,0,0,2,13,12,0,0,0,0,0,9,16,11,0,0,0,3,3,1,6,15,8,0,0,11,13,0,0,10,12,0,0,3,16,12,7,16,8,0,0,0,3,15,16,10,0,0,3 +0,0,3,13,13,3,0,0,0,0,12,7,3,13,0,0,0,0,16,0,5,12,0,0,0,0,10,13,14,16,2,0,0,0,1,7,6,13,4,0,0,1,4,0,0,5,11,0,0,2,14,6,2,9,11,0,0,0,4,10,16,16,4,0,9 +0,0,2,13,1,0,0,0,0,0,0,15,6,0,0,0,0,0,0,15,10,0,0,0,0,0,0,13,16,1,0,0,0,0,0,6,15,6,0,0,0,0,0,0,12,9,0,0,0,0,5,12,14,16,9,2,0,0,2,12,12,12,13,8,1 +0,0,4,15,14,12,11,0,0,0,7,15,13,16,10,0,0,0,10,7,6,16,2,0,0,0,7,1,12,12,0,0,0,0,5,8,16,12,1,0,0,4,16,16,16,14,2,0,0,0,0,15,9,1,0,0,0,0,5,15,2,0,0,0,7 +0,0,0,5,12,12,0,0,0,0,5,16,6,1,0,0,0,0,15,5,0,0,0,0,0,5,13,2,7,4,0,0,0,7,15,16,13,15,3,0,0,3,16,9,0,1,12,0,0,0,10,12,2,6,13,0,0,0,0,8,15,16,5,0,6 +0,0,3,11,16,15,0,0,0,0,15,16,5,13,0,0,0,2,16,9,0,12,0,0,0,1,9,15,10,10,0,0,0,0,0,6,16,12,1,0,0,0,2,14,2,16,5,0,0,0,8,10,1,14,4,0,0,0,3,15,16,9,0,0,8 +0,0,0,1,15,3,0,0,0,0,0,8,13,0,9,7,0,0,2,15,4,0,15,5,0,2,13,14,11,10,15,0,0,11,15,13,16,16,10,0,0,0,0,0,3,16,5,0,0,0,0,0,9,14,0,0,0,0,0,2,16,6,0,0,4 +0,0,9,15,14,2,0,0,0,0,9,3,9,8,0,0,0,0,0,0,6,10,0,0,0,0,0,10,15,2,0,0,0,0,2,10,11,15,2,0,0,3,1,0,0,14,4,0,0,10,13,7,2,12,4,0,0,0,7,14,16,10,0,0,3 +0,0,1,14,2,0,0,0,0,0,0,16,5,0,0,0,0,0,0,14,10,0,0,0,0,0,0,11,16,1,0,0,0,0,0,3,14,6,0,0,0,0,0,0,8,12,0,0,0,0,10,14,13,16,8,3,0,0,2,11,12,15,16,15,1 +0,0,0,1,15,2,0,0,0,0,0,5,15,0,4,0,0,0,0,13,8,1,16,3,0,0,5,15,2,5,15,0,0,5,15,16,16,16,8,0,0,14,12,12,14,16,2,0,0,0,0,0,12,12,0,0,0,0,0,2,16,5,0,0,4 +0,0,6,16,12,1,0,0,0,3,16,5,9,13,0,0,0,5,12,0,0,12,6,0,0,8,14,2,0,7,8,0,0,7,12,2,0,4,8,0,0,4,12,0,0,9,7,0,0,3,16,5,7,14,2,0,0,0,7,16,13,3,0,0,0 +0,3,10,11,12,12,6,0,0,8,14,11,8,8,4,0,0,8,10,7,3,0,0,0,0,8,16,14,15,4,0,0,0,2,2,0,6,9,0,0,0,0,0,0,4,12,0,0,0,1,8,4,10,10,0,0,0,2,15,16,13,2,0,0,5 +0,0,14,16,15,3,0,0,0,0,6,5,13,8,0,0,0,0,0,8,16,5,0,0,0,0,0,11,16,10,0,0,0,1,3,0,4,15,8,0,0,6,15,0,0,9,15,0,0,5,16,5,6,14,14,0,0,1,11,16,16,14,2,0,3 +0,0,2,14,5,0,0,0,0,0,9,12,0,0,0,0,0,1,15,1,0,0,0,0,0,3,15,0,0,0,0,0,0,6,16,16,16,13,1,0,0,2,16,8,4,7,11,0,0,0,12,11,1,8,11,0,0,0,3,12,16,15,4,0,6 +0,1,12,16,10,1,0,0,0,8,12,3,11,8,0,0,0,12,13,6,12,8,0,0,0,3,15,16,16,16,1,0,0,0,0,0,0,13,6,0,0,0,0,0,0,6,11,0,0,0,13,0,0,5,12,0,0,0,12,16,16,16,8,0,9 +0,0,0,12,4,0,0,0,0,0,6,14,1,0,0,0,0,0,14,2,0,0,0,0,0,2,14,1,4,2,0,0,0,4,16,15,12,15,5,0,0,3,16,6,0,5,11,0,0,0,9,11,4,13,5,0,0,0,1,11,16,9,0,0,6 +0,0,11,10,0,0,0,0,0,0,11,15,0,0,0,0,0,0,11,16,5,0,0,0,0,0,13,16,11,0,0,0,0,0,2,7,16,2,0,0,0,0,0,2,14,6,0,0,0,0,6,10,15,13,8,3,0,0,8,16,16,16,16,12,1 +0,0,4,15,16,13,13,10,0,0,12,13,10,15,14,2,0,2,16,6,2,14,6,0,0,1,5,0,9,11,0,0,0,0,7,12,16,14,6,0,0,0,8,15,15,11,2,0,0,0,2,16,8,0,0,0,0,0,7,15,4,0,0,0,7 +0,0,9,12,12,12,6,0,0,1,14,6,4,4,2,0,0,4,15,12,9,1,0,0,0,4,15,8,11,11,0,0,0,0,1,0,0,14,4,0,0,0,0,0,0,10,8,0,0,0,10,1,0,8,8,0,0,0,9,16,16,15,4,0,5 +0,0,0,6,16,0,0,0,0,0,0,12,13,0,0,0,0,0,5,15,3,6,15,0,0,1,14,11,0,13,13,0,0,10,16,13,12,16,5,0,0,11,12,12,16,14,2,0,0,0,0,3,16,9,0,0,0,0,0,8,16,3,0,0,4 +0,0,0,9,13,0,0,0,0,0,2,16,8,0,7,1,0,0,10,13,1,6,16,5,0,6,16,11,8,14,15,0,0,13,16,16,16,16,9,0,0,2,2,0,11,16,1,0,0,0,0,4,16,7,0,0,0,0,0,9,15,2,0,0,4 +0,0,6,16,16,16,12,0,0,0,13,10,8,16,5,0,0,1,15,1,9,12,0,0,0,0,4,0,13,7,0,0,0,0,10,16,16,16,9,0,0,0,7,14,12,8,3,0,0,0,3,15,5,0,0,0,0,0,8,15,0,0,0,0,7 +0,0,8,12,5,0,0,0,0,3,16,8,12,1,0,0,0,0,14,0,12,3,0,0,0,0,3,0,12,3,0,0,0,0,0,3,14,0,0,0,0,0,0,5,12,0,0,0,0,0,5,16,6,4,4,0,0,0,14,16,16,16,14,0,2 +0,0,10,16,8,0,0,0,0,7,13,4,14,7,0,0,0,7,13,2,7,8,0,0,0,0,7,16,16,5,0,0,0,1,12,13,15,6,0,0,0,3,16,2,4,13,6,0,0,4,16,4,1,11,12,0,0,0,7,15,16,14,2,0,8 +0,0,9,16,7,0,0,0,0,0,14,13,16,2,0,0,0,0,7,9,15,8,0,0,0,0,0,1,13,9,0,0,0,0,0,0,14,7,0,0,0,0,0,4,16,5,0,0,0,0,7,16,16,8,6,0,0,0,9,15,12,16,16,9,2 +0,3,15,16,8,0,0,0,0,9,16,11,15,2,0,0,0,11,10,4,16,2,0,0,0,2,4,6,16,1,0,0,0,0,0,10,13,0,0,0,0,0,2,14,13,0,0,0,0,3,16,16,16,16,13,1,0,3,16,12,8,12,11,1,2 +0,0,7,12,13,4,0,0,0,0,16,6,6,2,0,0,0,4,13,7,8,2,0,0,0,7,16,10,10,14,1,0,0,2,2,0,0,10,6,0,0,0,0,0,0,8,8,0,0,0,11,1,0,10,8,0,0,0,8,15,15,15,2,0,5 +0,0,4,16,8,11,7,0,0,0,10,16,15,16,6,0,0,3,16,4,6,15,0,0,0,3,8,0,13,8,0,0,0,0,6,16,16,13,6,0,0,0,3,14,13,9,3,0,0,0,0,14,6,0,0,0,0,0,4,15,2,0,0,0,7 +0,0,6,12,13,2,0,0,0,3,16,6,1,15,0,0,0,5,16,13,12,16,2,0,0,2,13,16,12,15,4,0,0,0,0,0,0,8,8,0,0,0,1,0,0,8,8,0,0,3,16,2,0,10,7,0,0,0,5,11,16,13,1,0,9 +0,2,16,16,16,16,4,0,0,4,16,6,8,7,1,0,0,4,16,7,2,0,0,0,0,4,16,16,16,6,0,0,0,0,5,4,10,15,0,0,0,0,0,0,1,14,6,0,0,2,14,4,4,16,8,0,0,3,13,16,16,15,1,0,5 +0,0,0,9,13,0,6,8,0,0,3,15,3,0,15,9,0,1,13,12,4,7,15,3,0,7,16,16,16,16,10,0,0,6,12,10,14,14,2,0,0,0,0,0,13,10,0,0,0,0,0,6,16,2,0,0,0,0,0,12,11,0,0,0,4 +0,0,0,10,9,0,0,0,0,0,5,15,0,0,9,5,0,0,14,10,0,7,16,4,0,5,16,7,5,16,6,0,0,11,16,16,16,14,0,0,0,3,4,11,16,8,0,0,0,0,0,7,16,2,0,0,0,0,0,12,12,0,0,0,4 +0,0,11,14,5,0,0,0,0,6,12,4,13,4,0,0,0,10,10,0,4,14,0,0,0,7,13,5,13,16,2,0,0,1,10,12,12,14,8,0,0,0,0,0,0,7,12,0,0,0,1,0,0,1,15,0,0,0,11,8,4,5,16,1,9 +0,0,9,13,16,5,0,0,0,3,16,8,4,13,0,0,0,6,10,1,0,9,2,0,0,5,4,0,0,4,8,0,0,8,4,0,0,4,8,0,0,6,6,0,0,4,9,0,0,0,13,2,0,7,8,0,0,0,8,12,13,15,2,0,0 +0,0,2,11,14,8,1,0,0,3,14,9,8,13,4,0,0,6,11,1,4,14,1,0,0,0,9,14,15,6,0,0,0,0,0,12,14,10,0,0,0,0,4,12,2,13,5,0,0,0,4,11,1,11,8,0,0,0,1,9,16,14,2,0,8 +0,1,11,13,10,1,0,0,0,8,12,3,13,10,0,0,0,8,11,2,11,16,1,0,0,1,15,16,16,16,2,0,0,0,2,8,3,9,6,0,0,0,0,0,0,7,9,0,0,2,12,3,0,9,12,0,0,1,9,15,16,13,3,0,9 +0,0,8,16,15,6,0,0,0,5,14,4,4,15,0,0,0,6,13,0,1,15,2,0,0,1,11,11,13,10,0,0,0,0,1,16,16,3,0,0,0,0,12,9,5,13,2,0,0,0,16,2,1,13,8,0,0,0,8,15,16,14,1,0,8 +0,0,3,12,12,2,0,0,0,0,11,10,7,14,2,0,0,0,11,1,0,8,4,0,0,2,14,2,0,5,7,0,0,8,9,0,0,6,8,0,0,3,13,0,0,12,7,0,0,0,15,6,11,12,0,0,0,0,4,15,11,1,0,0,0 +0,0,5,12,12,9,3,0,0,0,8,16,16,16,4,0,0,0,9,16,16,14,1,0,0,0,11,16,16,12,0,0,0,0,12,16,16,12,0,0,0,0,11,16,16,12,0,0,0,0,4,16,16,12,0,0,0,0,6,12,12,6,0,0,1 +0,1,15,16,4,0,0,0,0,9,16,11,14,0,0,0,0,12,10,5,16,0,0,0,0,4,7,8,13,0,0,0,0,0,1,15,6,0,0,0,0,0,5,16,2,0,0,0,0,4,15,14,10,11,12,1,0,0,13,16,16,15,11,1,2 +0,0,6,12,13,9,0,0,0,7,14,6,7,16,3,0,0,4,6,5,14,6,0,0,0,0,0,12,14,4,0,0,0,0,0,0,3,14,2,0,0,0,0,0,0,9,7,0,0,0,3,1,0,9,8,0,0,0,5,14,12,13,2,0,3 +0,0,0,8,14,0,0,0,0,0,5,16,7,1,9,3,0,2,15,12,0,13,16,4,0,9,16,10,10,16,11,0,0,4,15,16,16,14,1,0,0,0,0,1,15,9,0,0,0,0,0,5,16,3,0,0,0,0,0,11,14,0,0,0,4 +0,1,8,15,16,16,9,0,0,8,16,12,8,8,5,0,0,8,14,7,0,0,0,0,0,9,16,16,12,0,0,0,0,8,13,8,16,3,0,0,0,0,0,1,16,4,0,0,0,0,0,8,15,1,0,0,0,0,12,15,5,0,0,0,5 +0,0,5,13,1,0,0,0,0,0,12,13,1,0,0,0,0,0,16,3,0,0,0,0,0,3,16,0,0,0,0,0,0,3,16,16,14,9,0,0,0,2,16,8,3,8,9,0,0,0,14,2,0,3,16,1,0,0,6,15,16,14,5,0,6 +0,0,6,12,10,14,8,0,0,0,15,14,13,16,3,0,0,1,12,0,9,11,0,0,0,0,0,4,16,8,2,0,0,0,9,16,16,16,9,0,0,0,2,15,6,0,0,0,0,0,3,15,1,0,0,0,0,0,8,11,0,0,0,0,7 +0,0,7,15,16,8,0,0,0,0,16,7,6,15,3,0,0,4,16,0,7,13,4,0,0,0,16,2,8,14,8,0,0,0,12,14,14,7,0,0,0,0,9,16,6,0,0,0,0,0,11,3,14,2,0,0,0,0,5,11,10,10,0,0,8 +0,1,11,16,11,1,0,0,0,6,11,16,16,7,0,0,0,1,2,9,16,11,0,0,0,2,14,12,16,12,0,0,0,0,3,8,4,13,4,0,0,0,0,0,0,10,8,0,0,0,4,12,16,14,6,0,0,0,14,8,4,0,0,0,9 +0,0,2,14,9,1,0,0,0,1,12,12,11,8,0,0,0,4,14,1,0,13,3,0,0,8,13,0,0,10,6,0,0,5,16,1,0,8,9,0,0,0,16,0,0,11,9,0,0,0,13,11,10,15,4,0,0,0,3,15,16,5,0,0,0 +0,0,6,10,8,3,0,0,0,0,6,16,16,9,0,0,0,0,9,16,16,6,0,0,0,0,7,16,16,10,0,0,0,0,11,16,16,8,0,0,0,0,7,16,16,9,0,0,0,0,10,16,16,6,0,0,0,0,4,9,12,11,2,0,1 +0,0,8,15,15,2,0,0,0,2,16,13,12,10,0,0,0,3,15,1,9,11,0,0,0,0,0,1,15,8,0,0,0,0,0,10,13,1,0,0,0,0,8,16,7,0,0,0,0,6,16,16,13,7,6,1,0,0,7,5,12,16,15,2,2 +0,0,7,13,16,5,0,0,0,6,15,7,6,14,0,0,0,9,5,1,10,9,0,0,0,0,0,8,16,5,0,0,0,0,0,1,6,15,1,0,0,0,0,0,0,1,12,0,0,0,4,5,2,5,13,0,0,0,6,12,16,14,5,0,3 +0,0,0,6,15,1,0,0,0,0,5,16,10,0,8,6,0,2,16,11,0,9,16,6,0,8,16,14,14,16,13,1,0,6,12,12,12,16,3,0,0,0,0,0,13,11,0,0,0,0,0,6,16,5,0,0,0,0,0,10,14,0,0,0,4 +0,1,7,15,16,16,14,0,0,10,16,11,6,3,1,0,0,7,16,16,12,0,0,0,0,8,16,12,16,4,0,0,0,1,4,0,13,8,0,0,0,0,0,0,15,8,0,0,0,0,0,7,16,2,0,0,0,0,13,15,5,0,0,0,5 +0,0,2,12,1,0,0,0,0,0,11,12,0,0,0,0,0,2,16,4,0,0,0,0,0,6,16,10,10,5,0,0,0,5,16,15,12,14,6,0,0,4,16,3,0,8,12,0,0,0,14,9,4,11,13,0,0,0,3,14,16,12,3,0,6 +0,0,3,15,16,16,12,0,0,0,12,12,7,16,6,0,0,4,12,0,9,13,0,0,0,0,1,1,13,7,0,0,0,0,8,13,16,16,6,0,0,0,12,15,12,6,1,0,0,0,0,15,5,0,0,0,0,0,3,16,2,0,0,0,7 +0,0,14,16,8,0,0,0,0,0,16,4,13,8,8,0,0,0,12,7,12,14,5,0,0,0,4,15,16,5,0,0,0,0,0,14,14,0,0,0,0,0,8,10,11,2,0,0,0,0,13,0,12,3,0,0,0,0,14,15,12,1,0,0,8 +0,0,12,15,13,2,0,0,0,1,16,5,5,13,0,0,0,1,7,13,0,8,4,0,0,6,11,13,13,15,4,0,0,1,9,12,12,13,1,0,0,0,0,0,0,11,6,0,0,0,0,0,0,5,14,0,0,0,10,13,12,15,6,0,9 +0,0,3,12,7,0,0,0,0,0,14,12,12,4,0,0,0,2,14,0,1,13,0,0,0,1,12,0,0,7,5,0,0,2,13,0,0,2,10,0,0,0,15,3,0,3,14,0,0,0,7,12,8,11,12,0,0,0,2,11,16,11,2,0,0 +0,0,3,13,10,1,0,0,0,0,3,16,16,4,0,0,0,0,1,16,16,2,0,0,0,0,6,16,16,1,0,0,0,0,4,16,16,1,0,0,0,0,4,16,16,3,0,0,0,0,7,16,16,0,0,0,0,0,2,14,16,5,0,0,1 +0,0,13,16,13,1,0,0,0,6,16,10,15,5,0,0,0,3,15,0,11,9,0,0,0,0,4,0,12,8,0,0,0,0,0,0,15,8,0,0,0,0,0,8,16,4,0,0,0,0,5,16,16,13,10,1,0,0,13,16,16,16,16,9,2 +0,0,6,14,16,11,0,0,0,6,14,7,4,16,4,0,0,7,7,0,5,16,2,0,0,0,0,14,16,5,0,0,0,0,0,4,13,11,0,0,0,0,0,0,0,11,8,0,0,0,0,2,4,10,12,0,0,0,9,16,16,11,3,0,3 +0,0,0,8,16,0,0,0,0,0,3,16,8,0,0,0,0,1,13,12,0,4,13,1,0,6,16,9,7,15,10,0,0,9,16,16,16,15,2,0,0,0,4,1,14,10,0,0,0,0,0,3,16,5,0,0,0,0,0,11,13,0,0,0,4 +0,0,5,10,14,16,11,0,0,2,15,15,5,4,1,0,0,2,16,9,4,1,0,0,0,2,16,16,16,11,0,0,0,2,9,1,0,14,4,0,0,0,0,0,1,14,3,0,0,0,0,2,13,7,0,0,0,0,7,14,7,0,0,0,5 +0,0,1,9,15,1,0,0,0,0,9,14,4,0,0,0,0,0,16,3,0,0,0,0,0,6,13,0,0,0,0,0,0,4,14,12,16,13,3,0,0,2,15,13,4,3,13,0,0,0,9,8,2,4,16,1,0,0,0,9,12,12,8,0,6 +0,0,5,12,16,12,4,0,0,1,12,7,5,16,5,0,0,2,9,0,8,9,0,0,0,0,2,3,12,1,0,0,0,4,12,14,15,12,4,0,0,5,4,16,1,0,0,0,0,0,1,12,0,0,0,0,0,0,3,12,0,0,0,0,7 +0,2,15,12,1,0,0,0,0,4,16,13,13,0,0,0,0,0,14,3,15,12,5,0,0,0,5,16,16,11,0,0,0,2,13,13,14,2,0,0,0,5,13,0,6,8,0,0,0,4,11,0,1,15,0,0,0,2,12,16,16,7,0,0,8 +0,0,9,13,6,0,0,0,0,0,14,7,11,3,0,0,0,4,7,8,5,8,0,0,0,8,10,15,14,9,0,0,0,0,4,7,9,13,1,0,0,0,0,0,0,5,11,0,0,0,2,0,2,12,6,0,0,0,10,14,14,7,0,0,9 +0,0,5,15,9,0,0,0,0,0,15,6,11,6,0,0,0,7,9,0,0,14,0,0,0,5,9,0,0,8,6,0,0,4,13,0,0,4,8,0,0,1,16,0,0,4,11,0,0,0,15,7,5,16,4,0,0,0,2,15,15,5,0,0,0 +0,0,12,14,6,0,0,0,0,2,16,7,13,10,0,0,0,0,16,2,1,13,4,0,0,0,9,13,8,16,2,0,0,0,6,16,16,13,0,0,0,0,0,2,3,16,0,0,0,0,1,6,13,10,0,0,0,0,13,9,8,2,0,0,9 +0,0,8,15,16,16,6,0,0,2,16,11,5,0,0,0,0,3,16,5,0,0,0,0,0,5,16,16,13,0,0,0,0,10,13,6,15,5,0,0,0,3,1,0,11,8,0,0,0,0,0,6,16,4,0,0,0,0,9,16,8,0,0,0,5 +0,0,6,11,16,16,3,0,0,5,16,15,5,0,0,0,0,11,16,15,2,0,0,0,0,12,15,12,12,0,0,0,0,2,1,4,16,0,0,0,0,0,0,0,16,4,0,0,0,0,0,2,16,3,0,0,0,0,5,16,13,0,0,0,5 +0,0,0,6,13,3,0,0,0,0,1,14,11,0,0,0,0,0,7,15,2,0,0,0,0,0,10,12,0,0,0,0,0,0,13,15,16,13,5,0,0,0,10,16,5,11,14,0,0,0,7,15,5,10,14,0,0,0,0,3,14,16,9,0,6 +0,0,7,16,16,16,6,0,0,0,12,13,5,1,0,0,0,0,15,7,1,0,0,0,0,3,16,16,13,0,0,0,0,11,15,5,16,4,0,0,0,5,3,1,16,3,0,0,0,0,0,11,12,0,0,0,0,0,7,15,1,0,0,0,5 +0,0,0,12,6,0,0,0,0,0,8,15,13,4,0,0,0,5,16,6,3,12,0,0,0,7,14,1,0,11,5,0,0,3,14,0,0,7,10,0,0,1,14,2,0,9,9,0,0,0,9,11,6,15,5,0,0,0,0,10,16,11,0,0,0 +0,0,10,13,9,1,0,0,0,2,16,7,10,8,0,0,0,0,12,12,7,11,0,0,0,3,16,16,16,7,0,0,0,0,5,8,12,10,1,0,0,0,0,0,0,11,7,0,0,0,0,0,0,3,15,0,0,0,11,16,16,16,8,0,9 +0,1,13,14,2,0,0,0,0,7,14,9,5,0,0,0,0,6,13,3,12,6,4,0,0,1,14,12,14,16,4,0,0,0,2,16,16,7,0,0,0,0,11,14,8,13,0,0,0,4,16,4,2,14,2,0,0,1,12,14,13,6,0,0,8 +0,0,5,15,14,3,0,0,0,0,12,7,2,12,0,0,0,0,16,3,0,12,1,0,0,0,12,11,10,15,0,0,0,0,2,10,15,13,1,0,0,0,0,0,0,14,4,0,0,0,0,6,12,15,2,0,0,0,7,13,4,0,0,0,9 +0,0,4,15,16,6,0,0,0,0,13,11,11,15,0,0,0,0,15,13,15,16,7,0,0,0,7,16,16,11,2,0,0,0,5,15,16,2,0,0,0,0,16,9,12,11,0,0,0,2,16,6,8,16,0,0,0,0,7,14,13,8,0,0,8 +0,0,0,3,16,5,0,0,0,0,3,14,10,0,9,11,0,1,13,11,0,2,15,8,0,7,16,9,11,16,15,1,0,6,15,13,12,16,9,0,0,0,0,0,8,15,2,0,0,0,0,1,15,7,0,0,0,0,0,5,15,2,0,0,4 +0,0,6,15,12,5,0,0,0,0,8,16,16,13,1,0,0,0,8,16,16,12,0,0,0,0,8,16,16,10,0,0,0,0,16,16,16,5,0,0,0,5,16,16,16,1,0,0,0,3,15,16,16,2,0,0,0,0,10,16,15,3,0,0,1 +0,0,5,10,11,13,12,0,0,2,14,8,8,13,10,0,0,1,6,0,4,13,0,0,0,0,0,1,15,2,0,0,0,0,0,11,15,8,1,0,0,2,15,15,8,7,0,0,0,1,9,12,0,0,0,0,0,0,7,11,0,0,0,0,7 +0,0,2,15,16,15,0,0,0,0,12,9,11,12,0,0,0,5,15,0,13,7,0,0,0,5,6,3,14,5,2,0,0,0,0,9,16,16,9,0,0,0,7,16,9,2,0,0,0,0,1,15,3,0,0,0,0,0,3,16,0,0,0,0,7 +0,0,7,14,15,7,0,0,0,6,16,8,7,16,4,0,0,11,6,1,10,14,1,0,0,1,0,4,16,6,0,0,0,0,0,2,11,13,1,0,0,0,0,0,0,11,7,0,0,0,3,4,8,14,3,0,0,0,10,13,12,4,0,0,3 +0,1,9,16,16,15,3,0,0,8,16,12,8,8,3,0,0,6,16,9,3,0,0,0,0,8,16,16,16,4,0,0,0,3,6,4,13,11,0,0,0,0,0,0,8,13,0,0,0,0,5,8,15,10,0,0,0,0,11,16,11,1,0,0,5 +0,0,2,16,10,0,0,0,0,0,4,16,16,5,0,0,0,0,8,16,16,3,0,0,0,0,9,16,16,3,0,0,0,0,8,16,16,3,0,0,0,0,8,16,16,1,0,0,0,0,5,16,14,0,0,0,0,0,1,12,16,3,0,0,1 +0,0,0,10,11,1,0,0,0,0,1,15,8,8,0,0,0,5,4,10,0,12,0,0,0,7,8,10,0,7,5,0,0,6,10,0,0,2,9,0,0,1,13,0,0,2,11,0,0,0,6,11,4,10,11,0,0,0,0,9,15,14,5,0,0 +0,2,0,8,9,0,0,0,0,13,5,14,8,7,0,0,0,12,5,2,0,9,0,0,0,7,5,0,0,3,5,0,0,3,10,0,0,2,10,0,0,1,13,0,0,1,12,0,0,0,5,13,5,9,13,0,0,0,0,9,16,16,7,0,0 +0,0,6,16,13,12,14,1,0,0,14,4,4,15,4,0,0,1,7,0,10,7,0,0,0,0,0,2,13,1,0,0,0,2,9,14,16,12,0,0,0,4,6,15,2,4,1,0,0,0,6,7,0,0,0,0,0,0,10,4,0,0,0,0,7 +0,0,9,16,6,0,0,0,0,3,16,1,16,10,8,0,0,0,15,6,16,8,0,0,0,0,3,16,11,0,0,0,0,0,1,14,12,0,0,0,0,0,6,9,11,2,0,0,0,0,12,1,13,0,0,0,0,0,12,14,3,0,0,0,8 +0,0,11,16,15,3,0,0,0,5,16,12,11,13,0,0,0,3,13,1,5,15,0,0,0,0,0,0,12,11,0,0,0,0,0,1,16,7,0,0,0,0,0,10,15,0,0,0,0,0,12,16,16,11,1,0,0,0,13,13,8,13,16,8,2 +0,0,6,16,15,5,0,0,0,1,16,14,8,15,1,0,0,9,13,1,0,12,6,0,0,5,9,0,0,9,10,0,0,6,9,0,0,9,11,0,0,7,16,1,0,11,11,0,0,3,16,11,13,16,8,0,0,0,8,16,16,12,1,0,0 +0,0,0,14,14,9,0,0,0,0,4,16,16,10,0,0,0,0,13,16,15,2,0,0,0,1,15,16,11,0,0,0,0,7,16,16,5,0,0,0,0,3,16,16,7,0,0,0,0,0,16,16,8,0,0,0,0,0,3,12,12,0,0,0,1 +0,0,9,16,14,0,0,0,0,0,16,8,13,7,0,0,0,0,12,0,8,8,0,0,0,0,0,0,12,8,0,0,0,0,0,0,16,5,0,0,0,0,0,9,13,0,0,0,0,0,10,16,15,10,9,1,0,0,12,14,13,16,16,5,2 +0,0,1,14,16,8,0,0,0,0,10,16,11,4,0,0,0,0,1,11,1,0,0,0,0,1,12,3,0,0,0,0,0,2,16,14,13,8,1,0,0,3,16,16,13,16,8,0,0,0,12,16,7,15,12,0,0,0,1,13,16,16,8,0,6 +0,0,11,16,16,10,0,0,1,14,16,9,11,16,1,0,1,14,3,0,12,14,0,0,0,0,0,6,16,7,0,0,0,0,0,0,8,16,5,0,0,0,0,0,0,12,10,0,0,0,2,4,5,14,13,0,0,0,11,16,16,16,4,0,3 +0,0,9,16,16,15,4,0,0,8,16,9,7,14,11,0,0,5,5,1,13,15,1,0,0,0,0,10,16,7,0,0,0,0,0,1,11,16,4,0,0,0,0,0,0,14,8,0,0,0,3,4,6,16,4,0,0,0,14,16,16,8,0,0,3 +0,0,10,16,13,12,15,5,0,4,16,8,12,16,6,0,0,6,12,2,16,7,0,0,0,1,5,9,14,1,0,0,0,1,7,16,12,2,0,0,0,8,16,16,12,5,0,0,0,1,11,10,0,0,0,0,0,0,14,6,0,0,0,0,7 +0,1,13,16,16,10,0,0,0,8,15,8,15,15,0,0,0,3,8,5,16,6,0,0,0,0,0,4,16,9,0,0,0,0,0,0,6,16,5,0,0,0,0,0,0,5,16,0,0,0,8,6,6,13,12,0,0,1,15,16,16,14,3,0,3 +0,1,12,16,16,9,0,0,0,11,15,9,7,16,3,0,0,13,3,1,10,15,1,0,0,0,0,11,16,8,0,0,0,0,0,5,15,16,5,0,0,0,0,0,0,10,13,0,0,0,7,4,8,15,9,0,0,0,13,16,16,12,1,0,3 +0,0,1,14,11,0,0,0,0,0,9,15,2,0,4,0,0,2,16,6,0,7,16,2,0,8,16,6,6,16,12,0,0,5,16,16,16,15,3,0,0,0,1,4,16,8,0,0,0,0,0,9,16,1,0,0,0,0,0,15,16,0,0,0,4 +0,0,1,14,6,0,0,0,0,0,7,15,1,0,0,0,0,0,13,7,0,0,0,0,0,0,13,5,0,0,0,0,0,0,14,7,5,4,1,0,0,0,10,16,13,14,14,0,0,0,9,14,1,4,16,3,0,0,1,12,13,16,9,1,6 +0,0,0,7,11,0,0,0,0,0,1,16,10,0,0,0,0,0,7,11,0,0,0,0,0,0,11,8,1,1,0,0,0,0,12,16,16,15,5,0,0,0,14,11,0,1,15,0,0,0,6,11,1,3,14,2,0,0,0,8,16,16,7,0,6 +0,0,0,13,13,0,0,0,0,0,7,16,3,0,0,0,0,0,12,11,0,0,0,0,0,0,14,6,0,0,0,0,0,1,16,12,16,11,3,0,0,2,16,15,9,9,15,2,0,0,11,12,1,3,16,6,0,0,1,13,16,16,15,1,6 +0,0,3,15,7,0,0,0,0,3,15,11,1,1,7,0,0,8,16,2,0,13,15,0,0,8,16,13,14,16,5,0,0,0,8,9,15,13,0,0,0,0,0,8,16,2,0,0,0,0,0,12,12,0,0,0,0,0,3,16,4,0,0,0,4 +0,0,7,12,11,1,0,0,0,0,12,10,5,14,0,0,0,6,13,13,3,15,0,0,0,8,9,11,16,8,0,0,0,1,11,10,9,11,1,0,0,0,0,0,0,13,6,0,0,0,0,0,0,10,12,0,0,0,8,12,16,13,2,0,9 +0,0,0,15,12,1,0,0,0,0,5,16,16,6,0,0,0,0,2,16,16,3,0,0,0,0,2,16,16,1,0,0,0,0,6,16,13,0,0,0,0,0,1,16,16,2,0,0,0,0,3,16,15,3,0,0,0,0,0,15,16,1,0,0,1 +0,0,0,8,16,16,7,0,0,0,15,16,10,8,1,0,0,3,16,12,5,0,0,0,0,8,16,16,16,3,0,0,0,8,11,2,13,9,0,0,0,0,0,0,11,13,0,0,0,0,0,0,12,11,0,0,0,0,0,11,14,2,0,0,5 +0,0,5,16,8,0,0,0,0,0,10,5,12,6,0,0,0,4,14,0,2,13,0,0,0,4,10,0,0,9,8,0,0,5,8,0,0,8,8,0,0,2,11,0,0,9,6,0,0,0,15,6,8,15,1,0,0,0,4,13,12,3,0,0,0 +0,1,12,15,10,2,0,0,0,4,14,1,6,12,2,0,0,7,15,0,1,14,4,0,0,3,15,12,15,10,0,0,0,0,3,15,1,0,0,0,0,0,0,3,13,1,0,0,0,0,0,0,10,6,0,0,0,0,11,12,13,4,0,0,9 +0,1,12,16,16,16,4,0,0,4,16,10,4,1,1,0,0,6,13,0,0,0,0,0,0,6,16,12,5,0,0,0,0,7,11,11,15,0,0,0,0,0,0,4,16,2,0,0,0,0,2,13,10,0,0,0,0,2,16,11,0,0,0,0,5 +0,0,10,16,8,0,0,0,0,4,16,13,16,3,0,0,0,0,12,1,11,6,0,0,0,0,0,0,12,8,0,0,0,0,0,0,14,5,0,0,0,0,0,7,16,6,4,0,0,0,5,16,16,16,16,4,0,0,11,15,9,8,6,0,2 +0,1,14,14,6,0,0,0,0,2,16,1,14,2,0,0,0,0,10,7,10,6,4,0,0,0,1,12,16,14,5,0,0,0,3,14,16,4,0,0,0,1,15,5,6,12,0,0,0,4,10,0,1,15,0,0,0,1,12,12,12,5,0,0,8 +0,1,11,16,16,10,0,0,0,8,16,11,7,16,1,0,0,7,11,0,5,16,2,0,0,0,2,0,7,14,0,0,0,0,0,0,11,12,0,0,0,0,0,3,16,6,0,0,0,0,5,15,16,11,6,0,0,0,14,16,13,13,16,5,2 +0,0,2,14,14,6,0,0,0,0,10,15,11,15,2,0,0,3,16,3,0,12,6,0,0,3,9,0,0,9,10,0,0,10,11,0,0,8,12,0,0,7,16,1,0,11,13,0,0,0,15,14,12,15,10,0,0,0,3,14,16,13,5,0,0 +0,0,5,15,13,2,0,0,0,1,15,11,8,13,0,0,0,5,14,0,0,14,5,0,0,9,16,1,0,7,9,0,0,9,13,0,0,5,14,0,0,6,16,2,0,5,15,0,0,2,14,11,5,14,12,0,0,0,5,15,16,15,3,0,0 +0,0,4,15,12,2,0,0,0,0,9,16,14,2,0,0,0,0,12,16,15,0,0,0,0,0,11,16,12,1,0,0,0,0,9,16,14,0,0,0,0,0,10,16,12,0,0,0,0,0,9,16,14,0,0,0,0,0,4,12,12,0,0,0,1 +0,0,10,15,8,13,6,0,0,0,13,14,14,15,2,0,0,1,15,0,13,7,0,0,0,2,7,9,16,13,13,0,0,0,10,16,11,7,2,0,0,0,4,16,2,0,0,0,0,0,7,13,0,0,0,0,0,0,12,4,0,0,0,0,7 +0,0,0,9,15,2,0,0,0,0,5,16,11,1,0,0,0,0,13,15,1,0,0,0,0,2,16,11,0,0,0,0,0,2,16,11,4,4,0,0,0,2,15,16,16,14,10,1,0,0,9,16,7,3,15,6,0,0,0,7,15,16,16,6,6 +0,1,10,14,13,4,0,0,0,12,11,5,8,14,0,0,0,8,3,2,12,8,0,0,0,0,3,15,15,4,0,0,0,0,1,4,7,14,5,0,0,0,0,0,0,7,12,0,0,0,0,0,1,11,11,0,0,0,12,16,16,9,1,0,3 +0,1,15,16,10,0,0,0,0,7,15,10,16,0,0,0,0,4,12,1,16,4,0,0,0,0,2,3,16,1,0,0,0,0,0,4,15,0,0,0,0,0,0,11,12,0,0,0,0,0,11,16,14,14,15,3,0,1,15,16,16,16,16,5,2 +0,0,3,15,12,2,0,0,0,0,1,16,16,6,0,0,0,0,4,16,16,2,0,0,0,0,3,16,16,6,0,0,0,0,4,16,16,0,0,0,0,0,1,15,16,6,0,0,0,0,4,16,16,4,0,0,0,0,4,16,16,6,0,0,1 +0,0,11,8,4,13,16,3,0,2,16,16,16,14,9,1,0,4,13,6,16,4,0,0,0,0,2,15,10,0,0,0,0,5,13,16,14,12,2,0,0,8,15,15,12,12,2,0,0,0,15,9,0,0,0,0,0,0,16,7,0,0,0,0,7 +0,0,0,9,12,0,0,0,0,0,4,16,5,0,1,0,0,2,14,9,0,5,15,1,0,8,16,9,12,16,9,0,0,5,16,13,13,13,0,0,0,0,0,1,15,7,0,0,0,0,0,4,16,1,0,0,0,0,0,12,12,0,0,0,4 +0,0,1,13,2,0,0,0,0,0,8,15,1,0,0,0,0,0,14,7,0,0,0,0,0,0,14,6,0,0,0,0,0,0,16,5,9,9,3,0,0,0,12,16,13,9,14,1,0,0,8,15,0,1,14,5,0,0,1,11,16,16,13,1,6 +0,1,10,15,16,11,0,0,0,8,11,4,7,14,0,0,0,7,1,2,13,7,0,0,0,0,0,10,16,6,0,0,0,0,0,0,1,14,5,0,0,0,0,0,0,7,9,0,0,0,2,0,3,11,7,0,0,0,15,16,16,7,0,0,3 +0,0,13,14,10,2,0,0,0,0,6,16,16,16,0,0,0,0,0,16,16,16,4,0,0,0,4,16,16,14,2,0,0,0,8,16,16,7,0,0,0,3,15,16,16,4,0,0,0,1,16,16,14,1,0,0,0,0,14,16,13,3,0,0,1 +0,0,3,11,14,12,3,0,0,2,13,10,4,10,12,0,0,2,11,2,0,9,9,0,0,0,0,3,10,10,1,0,0,0,7,16,16,2,0,0,0,0,3,0,14,3,0,0,0,0,0,1,13,2,0,0,0,3,7,14,5,0,0,0,3 +0,0,11,12,2,0,0,0,0,0,15,8,13,2,0,0,0,0,7,11,9,9,0,0,0,4,12,12,16,7,0,0,0,2,10,12,9,12,1,0,0,0,0,0,0,7,11,0,0,0,0,0,0,0,16,2,0,0,6,12,12,13,11,0,9 +0,0,1,11,14,5,0,0,0,0,0,15,16,11,0,0,0,0,5,16,16,8,0,0,0,0,5,16,16,5,0,0,0,0,4,16,16,3,0,0,0,0,9,16,16,2,0,0,0,0,8,16,14,0,0,0,0,0,2,13,16,9,0,0,1 +0,0,4,13,16,16,7,0,0,0,15,10,7,16,1,0,0,7,12,0,12,7,0,0,0,9,5,3,16,2,0,0,0,2,11,16,16,12,7,0,0,5,10,16,12,8,3,0,0,0,3,15,2,0,0,0,0,0,6,14,0,0,0,0,7 +0,0,0,13,6,0,0,0,0,0,5,16,8,0,0,0,0,0,11,8,0,0,0,0,0,0,13,4,0,0,0,0,0,0,14,15,16,14,5,0,0,0,13,7,0,0,13,1,0,0,10,6,0,5,14,0,0,0,2,13,12,15,4,0,6 +0,0,10,15,8,0,0,0,0,0,16,4,11,3,5,0,0,0,14,5,7,10,7,0,0,0,4,13,12,11,0,0,0,0,2,14,12,0,0,0,0,1,14,7,12,4,0,0,0,7,10,0,3,12,0,0,0,1,10,11,12,10,0,0,8 +0,0,0,8,12,0,0,0,0,0,5,16,3,0,2,0,0,1,16,5,1,10,15,1,0,9,16,4,9,16,7,0,0,7,16,16,16,7,0,0,0,0,2,8,16,2,0,0,0,0,0,10,13,0,0,0,0,0,0,12,10,0,0,0,4 +0,1,12,16,13,7,0,0,0,12,11,4,4,15,0,0,0,8,3,0,6,14,2,0,0,0,0,9,16,8,0,0,0,0,0,1,5,13,3,0,0,0,0,0,0,7,8,0,0,0,0,0,0,9,10,0,0,0,10,10,13,14,1,0,3 +0,0,1,12,11,1,0,0,0,0,1,16,16,4,0,0,0,0,3,16,15,2,0,0,0,0,9,16,12,0,0,0,0,0,12,16,5,0,0,0,0,0,14,16,6,0,0,0,0,0,9,16,8,0,0,0,0,0,2,11,13,1,0,0,1 +0,0,0,9,11,0,0,0,0,0,5,15,1,0,0,0,0,2,16,4,0,3,9,0,0,7,14,0,1,14,12,0,0,9,16,12,14,15,1,0,0,0,6,8,15,6,0,0,0,0,0,4,16,1,0,0,0,0,0,9,13,0,0,0,4 +0,0,5,15,12,1,0,0,0,0,8,8,6,12,0,0,0,2,5,0,0,12,2,0,0,4,14,0,0,8,6,0,0,7,11,0,0,9,7,0,0,3,13,0,0,12,3,0,0,0,16,5,9,14,1,0,0,0,5,15,11,4,0,0,0 +0,1,13,16,16,11,0,0,0,10,16,7,0,0,0,0,0,5,15,4,0,0,0,0,0,2,16,14,5,0,0,0,0,3,15,16,16,3,0,0,0,0,1,0,11,11,0,0,0,0,1,5,15,9,0,0,0,0,13,16,13,1,0,0,5 +0,1,13,16,16,12,1,0,0,12,15,6,12,16,3,0,0,13,10,10,16,9,0,0,0,0,5,16,15,4,0,0,0,0,0,2,10,14,1,0,0,0,0,0,1,16,7,0,0,0,3,1,5,16,9,0,0,1,14,16,16,11,1,0,3 +0,0,2,14,15,3,0,0,0,0,7,16,11,0,0,0,0,0,13,15,1,0,0,0,0,1,16,11,0,0,0,0,0,2,16,9,0,0,0,0,0,2,16,16,16,9,0,0,0,0,13,16,10,16,7,0,0,0,3,15,16,16,4,0,6 +0,0,12,16,15,6,0,0,0,0,15,12,7,15,1,0,0,1,15,15,7,16,4,0,0,1,12,16,16,14,1,0,0,0,0,4,10,13,0,0,0,0,0,0,1,15,3,0,0,0,3,0,2,16,6,0,0,0,13,16,16,15,1,0,9 +0,0,1,10,7,0,0,0,0,0,6,13,0,0,0,0,0,0,13,5,0,0,0,0,0,2,16,1,5,2,0,0,0,3,16,15,11,9,5,0,0,2,16,6,0,2,14,0,0,0,10,10,1,5,15,0,0,0,1,12,15,14,6,0,6 +0,0,3,16,12,2,0,0,0,0,6,16,16,3,0,0,0,0,9,16,16,0,0,0,0,0,9,16,16,1,0,0,0,0,9,16,16,0,0,0,0,0,9,16,13,0,0,0,0,0,7,16,13,0,0,0,0,0,3,14,16,3,0,0,1 +0,0,11,16,12,12,16,7,0,3,16,9,8,16,10,0,0,1,10,0,6,14,2,0,0,0,1,8,15,15,11,0,0,0,12,16,15,7,2,0,0,0,4,13,7,0,0,0,0,0,4,16,3,0,0,0,0,0,12,12,0,0,0,0,7 +0,0,2,10,16,12,0,0,0,2,15,14,8,1,0,0,0,2,16,4,0,0,0,0,0,6,16,14,13,3,0,0,0,10,16,7,11,12,0,0,0,0,1,0,4,16,0,0,0,0,0,0,7,16,2,0,0,0,2,14,16,8,0,0,5 +0,0,0,5,14,0,0,0,0,0,1,15,5,0,0,0,0,0,11,9,0,0,3,0,0,4,14,1,0,8,15,0,0,10,14,12,13,16,6,0,0,5,12,9,11,15,0,0,0,0,0,0,11,9,0,0,0,0,0,4,14,1,0,0,4 +0,0,0,11,8,0,0,0,0,0,8,15,2,0,2,1,0,2,16,5,0,4,16,3,0,5,16,8,11,16,9,0,0,4,15,14,13,16,2,0,0,0,0,0,10,10,0,0,0,0,0,3,16,3,0,0,0,0,0,9,11,0,0,0,4 +0,0,8,16,14,15,0,0,0,0,13,11,15,11,0,0,0,4,14,1,13,5,0,0,0,3,7,8,16,4,3,0,0,0,14,16,16,13,7,0,0,0,5,13,2,0,0,0,0,0,6,10,0,0,0,0,0,0,10,6,0,0,0,0,7 +0,2,15,15,3,0,0,0,0,11,15,11,12,0,0,0,0,8,10,0,16,0,0,0,0,1,3,2,16,0,0,0,0,0,0,4,12,0,0,0,0,0,1,12,9,0,0,0,0,0,14,16,13,13,15,3,0,2,13,14,12,12,8,1,2 +0,0,9,14,14,3,0,0,0,4,13,1,4,11,0,0,0,1,14,1,11,7,6,0,0,0,15,13,9,12,3,0,0,0,1,14,14,0,0,0,0,0,5,14,12,0,0,0,0,0,12,1,9,5,0,0,0,0,11,12,13,3,0,0,8 +0,0,5,16,15,3,0,0,0,0,13,15,14,7,0,0,0,0,13,4,13,8,0,0,0,0,0,0,16,5,0,0,0,0,0,7,16,0,0,0,0,0,6,15,9,0,0,0,0,2,16,16,16,9,13,3,0,0,5,2,9,16,14,3,2 +0,0,7,16,13,2,0,0,0,0,14,15,13,9,0,0,0,0,14,8,9,10,0,0,0,0,1,2,9,12,0,0,0,0,0,0,13,8,0,0,0,0,0,5,16,4,0,0,0,0,6,15,16,5,5,5,0,0,6,16,16,16,16,13,2 +0,0,10,16,15,8,2,0,0,9,16,12,8,9,3,0,0,13,16,9,0,0,0,0,0,7,16,16,10,0,0,0,0,7,13,8,16,4,0,0,0,0,0,1,16,5,0,0,0,0,2,12,15,1,0,0,0,0,13,16,6,0,0,0,5 +0,0,8,11,8,10,14,8,0,3,15,11,12,16,5,1,0,1,16,0,11,8,0,0,0,0,1,5,16,5,0,0,0,0,3,15,16,14,0,0,0,0,7,16,3,0,0,0,0,0,6,13,0,0,0,0,0,0,11,7,0,0,0,0,7 +0,0,8,15,9,0,0,0,0,1,16,10,16,9,0,0,0,2,6,2,16,16,1,0,0,3,9,9,16,14,0,0,0,0,7,5,12,5,0,0,0,0,0,0,7,12,1,0,0,0,0,0,2,16,4,0,0,0,9,13,12,10,1,0,9 +0,0,1,10,16,13,0,0,0,2,15,15,9,6,0,0,0,5,16,1,0,0,0,0,0,8,16,14,5,0,0,0,0,11,16,13,15,3,0,0,0,1,3,0,11,12,0,0,0,0,0,2,13,10,0,0,0,0,1,14,15,3,0,0,5 +0,0,0,9,16,3,0,0,0,0,7,16,7,0,0,0,0,2,15,8,0,1,7,0,0,9,14,0,2,13,14,0,0,8,16,14,15,16,6,0,0,1,7,8,16,12,0,0,0,0,0,7,16,5,0,0,0,0,0,12,12,0,0,0,4 +0,0,15,12,3,0,0,0,0,0,15,9,14,1,0,0,0,0,12,10,15,1,0,0,0,0,5,16,4,10,7,0,0,0,0,11,14,11,0,0,0,0,1,12,13,11,0,0,0,0,12,6,0,9,4,0,0,0,15,4,5,13,6,0,8 +0,1,14,16,16,11,2,0,0,0,14,9,2,10,11,0,0,0,5,16,5,14,5,0,0,0,0,12,16,16,5,0,0,0,7,14,14,8,0,0,0,1,14,3,6,11,0,0,0,4,8,0,8,11,0,0,0,2,13,12,15,2,0,0,8 +0,0,1,10,15,2,0,0,0,0,7,16,7,3,5,0,0,3,16,7,3,16,11,0,0,9,14,1,10,14,2,0,0,11,16,16,16,10,0,0,0,2,4,8,16,3,0,0,0,0,0,9,13,0,0,0,0,0,0,12,9,0,0,0,4 +0,0,2,12,9,0,0,0,0,0,1,15,7,9,0,0,0,0,0,11,1,14,1,0,0,0,10,8,11,13,1,0,0,0,9,14,16,9,0,0,0,0,0,0,1,10,3,0,0,0,0,0,0,3,14,2,0,0,5,14,13,14,10,1,9 +0,0,2,12,12,1,0,0,0,2,12,12,6,11,0,0,0,10,13,0,0,10,2,0,0,8,11,0,0,6,6,0,0,7,9,0,0,4,9,0,0,3,13,0,0,8,12,0,0,0,12,12,9,16,7,0,0,0,1,12,16,9,1,0,0 +0,3,14,5,0,0,0,0,0,2,15,15,3,1,1,0,0,0,6,16,4,13,8,0,0,0,0,14,14,13,1,0,0,0,2,15,13,1,0,0,0,1,14,8,15,0,0,0,0,5,12,0,12,3,0,0,0,2,14,13,15,3,0,0,8 +0,0,15,15,10,2,0,0,0,0,16,5,8,11,0,0,0,0,9,12,1,14,4,0,0,0,6,16,16,15,2,0,0,0,0,4,11,1,0,0,0,0,0,0,7,7,0,0,0,0,0,0,2,11,0,0,0,0,10,12,13,11,0,0,9 +0,3,16,9,2,0,0,0,0,3,16,13,12,0,0,0,0,0,12,11,14,10,6,0,0,0,2,15,16,10,2,0,0,0,6,14,15,1,0,0,0,3,15,4,9,7,0,0,0,6,13,1,10,9,0,0,0,2,11,12,14,4,0,0,8 +0,0,4,14,14,2,0,0,0,5,16,11,7,10,0,0,0,4,16,4,0,11,1,0,0,7,14,1,0,7,5,0,0,4,10,0,0,7,7,0,0,1,12,0,0,12,3,0,0,0,9,6,6,13,0,0,0,0,0,10,14,4,0,0,0 +0,0,0,5,15,13,1,0,0,0,2,14,16,14,0,0,0,1,13,16,16,12,0,0,0,9,16,14,16,6,0,0,0,3,5,6,16,4,0,0,0,0,0,8,16,2,0,0,0,0,0,9,16,3,0,0,0,0,0,4,15,13,1,0,1 +0,2,14,16,6,0,0,0,0,12,14,12,15,0,0,0,0,11,2,8,12,0,0,0,0,0,0,11,11,0,0,0,0,0,2,16,4,0,0,0,0,0,7,13,0,0,0,0,0,1,14,13,8,8,7,0,0,2,15,16,16,15,8,0,2 +0,1,9,15,16,6,0,0,0,13,15,10,16,11,0,0,0,5,3,4,16,7,0,0,0,0,0,8,16,7,0,0,0,0,0,1,13,15,5,0,0,0,0,0,2,13,11,0,0,0,12,5,3,13,14,0,0,0,10,16,16,14,5,0,3 +0,0,0,8,15,0,0,0,0,0,5,16,6,0,0,0,0,1,14,10,0,7,7,0,0,5,16,3,1,16,7,0,0,8,16,11,13,16,3,0,0,1,11,15,16,13,3,0,0,0,0,7,16,1,0,0,0,0,0,12,10,0,0,0,4 +0,0,5,12,16,9,0,0,0,7,16,10,3,0,0,0,0,8,13,0,0,0,0,0,0,9,14,4,6,0,0,0,0,4,14,12,13,13,1,0,0,0,0,0,0,15,4,0,0,0,0,0,4,15,7,0,0,0,4,12,13,8,0,0,5 +0,0,6,15,4,0,0,0,0,1,14,7,0,0,0,0,0,4,15,1,0,0,0,0,0,5,11,0,2,1,0,0,0,4,13,12,16,13,3,0,0,1,16,2,1,8,10,0,0,0,12,4,0,11,12,0,0,0,4,13,12,14,2,0,6 +0,0,8,14,16,16,2,0,0,0,11,6,7,16,1,0,0,0,0,0,10,12,0,0,0,1,11,16,16,16,12,0,0,2,12,13,13,2,0,0,0,0,1,14,4,0,0,0,0,0,8,12,0,0,0,0,0,0,12,7,0,0,0,0,7 +0,1,10,9,11,5,0,0,1,14,8,1,2,11,0,0,0,14,3,0,11,5,0,0,0,2,12,12,11,0,0,0,0,0,2,15,14,4,0,0,0,0,11,5,1,12,2,0,0,0,13,0,0,14,2,0,0,0,9,13,16,9,0,0,8 +0,0,9,16,15,3,0,0,0,6,16,11,10,12,0,0,0,7,15,1,1,15,5,0,0,3,16,6,9,16,9,0,0,0,5,11,13,14,10,0,0,0,0,0,0,10,10,0,0,0,0,0,0,13,11,0,0,0,10,16,16,15,6,0,9 +0,0,3,14,9,3,0,0,0,0,11,13,8,14,2,0,0,2,16,3,0,9,4,0,0,5,12,0,0,4,8,0,0,8,8,0,0,7,8,0,0,5,11,0,2,15,3,0,0,0,14,5,13,7,0,0,0,0,5,14,5,0,0,0,0 +0,0,0,0,13,12,0,0,0,0,0,10,16,14,0,0,0,1,12,16,16,11,0,0,0,11,16,12,16,8,0,0,0,6,4,7,16,6,0,0,0,0,0,6,16,5,0,0,0,0,0,4,16,8,0,0,0,0,0,0,15,11,0,0,1 +0,0,11,16,12,1,0,0,0,3,16,9,16,5,0,0,0,0,6,1,16,10,0,0,0,0,0,5,16,6,0,0,0,0,0,9,15,0,0,0,0,0,3,15,6,0,0,0,0,0,10,16,4,5,8,1,0,0,13,16,16,16,16,6,2 +0,0,7,14,16,13,1,0,0,9,15,8,10,16,7,0,0,5,1,0,14,14,1,0,0,0,0,4,16,12,0,0,0,0,0,2,16,13,0,0,0,0,0,0,6,16,7,0,0,0,9,7,6,16,9,0,0,0,5,15,16,11,3,0,3 +0,0,0,1,15,9,0,0,0,0,0,10,15,3,0,0,0,0,9,16,5,3,6,0,0,5,16,8,0,12,13,0,0,12,14,4,8,16,9,0,0,12,16,16,16,16,0,0,0,0,4,2,14,11,0,0,0,0,0,1,16,9,0,0,4 +0,1,12,16,16,15,0,0,0,7,16,9,4,3,0,0,0,10,15,1,0,0,0,0,0,11,15,8,7,1,0,0,0,2,13,16,16,15,5,0,0,0,0,0,0,13,14,0,0,0,2,7,13,16,9,0,0,0,14,16,12,5,0,0,5 +0,0,3,13,16,7,0,0,0,1,12,16,8,0,0,0,0,2,16,8,0,0,0,0,0,8,16,4,0,0,0,0,0,9,16,10,11,4,0,0,0,6,16,14,13,16,3,0,0,1,11,11,2,14,10,0,0,0,2,15,16,15,6,0,6 +0,0,5,13,16,16,6,0,0,0,12,6,4,13,9,0,0,0,0,0,1,15,3,0,0,0,0,2,10,13,2,0,0,2,12,16,16,12,5,0,0,1,6,9,11,0,0,0,0,0,0,15,1,0,0,0,0,0,6,11,0,0,0,0,7 +0,0,10,16,16,8,0,0,0,5,16,6,7,14,0,0,0,3,16,3,13,9,0,0,0,0,13,15,9,0,0,0,0,0,10,16,1,0,0,0,0,0,16,15,6,0,0,0,0,1,15,12,11,0,0,0,0,0,5,16,10,0,0,0,8 +0,0,8,16,10,2,0,0,0,0,12,13,14,11,0,0,0,0,10,13,8,16,2,0,0,0,4,15,15,16,8,0,0,0,0,3,8,11,13,0,0,0,0,0,0,5,16,4,0,0,1,2,2,7,16,5,0,0,3,14,16,16,11,1,9 +0,0,7,11,3,1,0,0,0,1,14,16,13,13,0,0,0,3,16,12,1,6,6,0,0,4,10,4,0,4,8,0,0,8,8,0,0,11,5,0,0,7,9,0,2,14,0,0,0,1,12,4,12,10,0,0,0,0,6,14,9,0,0,0,0 +0,0,0,8,13,3,0,0,0,0,8,16,16,8,0,0,0,12,16,16,16,2,0,0,0,9,9,16,10,0,0,0,0,0,2,15,8,0,0,0,0,0,4,16,7,0,0,0,0,0,3,16,10,0,0,0,0,0,0,7,13,2,0,0,1 +0,2,10,14,16,12,0,0,0,10,12,7,10,12,0,0,0,0,0,0,13,10,0,0,0,0,0,3,16,3,0,0,0,0,0,13,10,0,0,0,0,0,5,16,1,0,0,0,0,0,14,15,12,16,10,0,0,1,16,13,9,3,0,0,2 +0,1,8,10,14,10,0,0,0,12,10,6,6,16,2,0,0,3,0,0,7,14,1,0,0,0,0,0,9,11,0,0,0,0,0,0,2,16,1,0,0,0,0,0,0,8,12,0,0,0,4,0,2,11,9,0,0,0,12,16,14,12,4,0,3 +0,0,0,11,10,0,0,0,0,0,8,16,5,0,0,0,0,3,16,10,4,11,0,0,0,11,13,0,9,16,0,0,0,12,13,5,14,16,8,0,0,3,12,14,16,11,3,0,0,0,0,10,11,0,0,0,0,0,0,11,8,0,0,0,4 +0,5,15,12,12,12,4,0,0,10,14,12,12,9,7,0,0,12,11,0,0,0,0,0,0,5,15,10,0,0,0,0,0,0,3,16,3,0,0,0,0,0,0,15,4,0,0,0,0,1,7,16,4,0,0,0,0,4,15,12,0,0,0,0,5 +0,0,0,6,12,0,0,0,0,0,5,16,7,0,0,0,0,0,12,9,0,0,0,0,0,1,16,5,0,0,0,0,0,1,16,10,12,9,2,0,0,0,13,8,2,5,13,0,0,0,6,11,1,2,16,3,0,0,0,8,11,14,11,2,6 +0,0,2,9,14,16,15,0,0,3,16,13,8,10,16,0,0,1,2,0,0,11,9,0,0,0,3,11,12,16,11,0,0,0,12,14,16,12,2,0,0,0,0,3,16,3,0,0,0,0,0,7,10,0,0,0,0,0,0,14,5,0,0,0,7 +0,1,12,15,16,13,1,0,0,4,16,15,7,15,4,0,0,0,16,6,11,15,2,0,0,0,9,16,15,4,0,0,0,0,8,16,8,0,0,0,0,0,15,15,11,0,0,0,0,2,16,10,12,0,0,0,0,2,13,16,10,0,0,0,8 +0,0,13,14,9,1,0,0,0,5,16,12,12,12,1,0,0,6,16,2,2,16,5,0,0,3,16,5,3,16,9,0,0,0,9,16,16,16,10,0,0,0,0,6,7,15,9,0,0,0,0,0,6,16,5,0,0,0,10,16,16,12,0,0,9 +0,0,9,14,15,6,0,0,0,2,16,12,1,13,0,0,0,3,12,7,0,8,4,0,0,6,11,4,0,7,2,0,0,4,9,0,0,12,1,0,0,3,9,0,4,11,0,0,0,1,12,5,12,3,0,0,0,0,6,14,5,0,0,0,0 +0,1,9,15,13,4,0,0,0,8,12,4,8,8,0,0,0,9,11,0,5,16,3,0,0,4,13,8,16,16,6,0,0,0,4,8,2,11,9,0,0,0,0,0,0,10,8,0,0,0,7,0,3,14,6,0,0,0,7,16,14,8,0,0,9 +0,1,7,14,16,12,1,0,0,7,16,9,6,11,1,0,0,11,12,4,1,0,0,0,0,12,16,16,15,6,0,0,0,3,9,4,11,12,0,0,0,0,0,0,8,16,0,0,0,0,0,0,14,13,0,0,0,0,6,16,15,3,0,0,5 +0,3,16,13,15,16,11,0,0,5,16,14,12,8,10,0,0,2,16,12,0,0,0,0,0,0,7,16,12,0,0,0,0,0,0,7,16,8,0,0,0,0,0,0,13,11,0,0,0,3,6,8,16,7,0,0,0,2,15,15,9,0,0,0,5 +0,0,2,14,14,0,0,0,0,0,7,16,7,0,0,0,0,0,13,12,0,0,0,0,0,1,16,8,4,2,0,0,0,4,16,16,16,15,3,0,0,2,16,12,4,6,16,2,0,0,13,8,3,6,16,6,0,0,2,13,14,16,12,1,6 +0,3,14,14,16,16,10,0,0,9,15,9,7,1,0,0,0,10,16,11,1,0,0,0,0,1,7,14,9,0,0,0,0,0,0,7,16,0,0,0,0,0,0,6,15,0,0,0,0,1,1,11,10,0,0,0,0,3,15,16,4,0,0,0,5 +0,0,7,14,16,13,1,0,0,0,15,13,3,13,8,0,0,6,15,2,0,8,8,0,0,6,16,0,0,6,11,0,0,9,13,0,0,13,9,0,0,5,15,0,5,16,5,0,0,4,16,9,16,12,0,0,0,0,8,16,12,3,0,0,0 +0,0,6,12,14,4,0,0,0,2,15,2,1,15,0,0,0,2,14,0,3,16,2,0,0,0,13,5,14,14,6,0,0,0,2,8,4,7,9,0,0,0,0,0,0,9,9,0,0,0,5,0,5,14,3,0,0,0,7,13,12,4,0,0,9 +0,1,13,9,8,13,2,0,0,10,7,0,0,12,2,0,0,9,6,0,9,6,0,0,0,1,13,10,10,0,0,0,0,0,9,16,2,0,0,0,0,2,12,5,12,1,0,0,0,4,9,0,9,5,0,0,0,1,12,11,8,0,0,0,8 +0,1,7,14,10,0,0,0,0,10,12,5,9,7,0,0,0,10,6,0,6,15,0,0,0,5,11,11,14,15,1,0,0,0,2,7,1,10,5,0,0,0,0,0,0,9,5,0,0,0,5,3,0,13,5,0,0,0,4,14,14,12,2,0,9 +0,0,13,14,12,15,4,0,0,0,16,5,5,16,5,0,0,0,13,7,15,4,0,0,0,0,11,16,2,0,0,0,0,2,13,10,6,0,0,0,0,8,5,1,15,0,0,0,0,5,8,1,16,0,0,0,0,1,10,16,8,0,0,0,8 +0,0,0,3,16,2,0,0,0,0,0,12,12,0,0,0,0,0,5,16,2,5,12,0,0,3,15,8,0,11,13,0,0,9,16,4,7,16,8,0,0,9,16,16,16,16,2,0,0,0,0,0,13,12,0,0,0,0,0,1,16,9,0,0,4 +0,0,7,16,13,4,0,0,0,0,7,16,16,9,0,0,0,0,1,16,16,9,0,0,0,0,3,16,16,9,0,0,0,0,8,16,16,8,0,0,0,0,10,16,16,4,0,0,0,0,9,16,14,1,0,0,0,0,4,12,14,8,0,0,1 +0,0,12,16,16,12,0,0,0,0,6,4,10,13,1,0,0,0,0,0,13,9,0,0,0,0,5,9,16,16,12,0,0,3,16,16,11,3,0,0,0,0,7,13,0,0,0,0,0,0,11,8,0,0,0,0,0,0,16,3,0,0,0,0,7 +0,1,9,12,16,16,4,0,0,1,11,8,7,16,4,0,0,0,0,0,8,13,0,0,0,0,5,11,15,15,9,0,0,0,16,15,13,5,2,0,0,0,2,16,5,0,0,0,0,0,9,14,1,0,0,0,0,0,14,10,0,0,0,0,7 +0,2,10,15,16,16,14,0,0,7,11,4,6,15,9,0,0,0,0,6,15,12,0,0,0,0,3,16,9,0,0,0,0,0,5,16,8,0,0,0,0,0,0,11,15,2,0,0,0,0,1,6,16,2,0,0,0,1,16,16,6,0,0,0,3 +0,0,2,12,16,16,7,0,0,0,10,13,7,8,3,0,0,2,15,6,0,0,0,0,0,11,14,7,5,1,0,0,0,5,16,16,16,15,6,0,0,0,4,4,4,14,8,0,0,0,0,4,7,14,5,0,0,0,2,15,15,5,0,0,5 +0,0,0,4,15,12,0,0,0,0,5,15,16,11,0,0,0,8,16,16,16,4,0,0,0,14,11,11,16,2,0,0,0,0,0,7,16,0,0,0,0,0,0,9,16,2,0,0,0,0,0,9,16,3,0,0,0,0,0,5,16,5,0,0,1 +0,2,8,15,16,10,0,0,0,11,16,10,4,15,1,0,0,12,13,0,0,13,5,0,0,11,13,0,0,15,7,0,0,8,16,0,0,15,6,0,0,6,16,1,5,16,2,0,0,2,16,6,15,12,0,0,0,0,7,16,14,1,0,0,0 +0,0,8,15,16,11,0,0,0,2,16,10,4,14,4,0,0,6,16,2,0,8,8,0,0,10,12,0,0,11,6,0,0,9,11,0,0,15,6,0,0,8,12,0,7,15,1,0,0,2,15,7,15,9,0,0,0,0,10,16,12,1,0,0,0 +0,1,13,16,15,6,0,0,0,10,15,9,11,15,0,0,0,7,9,0,12,12,0,0,0,0,0,0,15,6,0,0,0,0,0,8,15,1,0,0,0,0,2,15,10,0,0,0,0,0,10,16,10,9,16,2,0,0,13,16,15,11,4,0,2 +0,0,9,16,15,3,0,0,0,7,15,7,16,7,0,0,0,2,2,0,16,2,0,0,0,0,0,6,15,0,0,0,0,0,0,13,10,0,0,0,0,0,3,16,3,3,5,0,0,0,11,14,10,16,6,0,0,0,11,16,13,5,0,0,2 +0,0,3,12,16,13,0,0,0,1,14,9,10,13,0,0,0,0,2,0,10,10,0,0,0,0,3,7,15,16,10,0,0,0,16,16,15,3,0,0,0,0,3,13,7,0,0,0,0,0,0,16,2,0,0,0,0,0,4,15,0,0,0,0,7 +0,0,5,12,16,10,0,0,0,6,16,13,3,15,1,0,0,11,8,5,5,10,0,0,0,4,11,2,12,2,0,0,0,0,6,16,6,0,0,0,0,0,2,15,8,0,0,0,0,0,8,13,8,0,0,0,0,0,5,15,4,0,0,0,8 +0,4,15,16,13,1,0,0,0,9,14,10,16,6,0,0,0,1,1,6,16,2,0,0,0,0,0,8,14,1,0,0,0,0,5,16,5,0,0,0,0,0,13,14,0,0,0,0,0,5,16,9,8,8,10,0,0,4,15,16,16,16,9,0,2 +0,0,6,14,13,1,0,0,0,3,16,10,5,11,0,0,0,5,16,0,0,13,0,0,0,6,12,0,0,12,3,0,0,7,12,0,0,13,3,0,0,3,11,0,5,12,0,0,0,0,13,4,15,4,0,0,0,0,5,16,6,0,0,0,0 +0,0,1,16,11,0,0,0,0,0,0,15,16,1,0,0,0,0,0,15,14,0,0,0,0,0,2,16,14,0,0,0,0,0,1,16,15,0,0,0,0,0,0,14,13,0,0,0,0,0,0,13,10,0,0,0,0,0,0,12,11,0,0,0,1 +0,1,10,15,15,5,0,0,0,11,16,9,12,10,0,0,0,15,6,0,14,7,0,0,0,0,0,6,16,5,0,0,0,0,1,15,11,0,0,0,0,0,5,16,4,0,0,0,0,1,15,11,8,12,14,1,0,1,15,16,16,12,5,0,2 +0,0,0,14,13,1,0,0,0,0,4,16,11,0,0,0,0,0,12,16,1,0,0,0,0,1,15,16,14,1,0,0,0,4,16,12,8,12,7,0,0,2,15,8,0,8,16,2,0,0,10,14,9,15,15,1,0,0,1,14,16,14,2,0,6 +0,2,9,15,16,15,2,0,0,11,11,5,9,16,3,0,0,0,0,0,9,12,0,0,0,0,0,0,6,15,1,0,0,0,0,0,0,14,9,0,0,0,0,0,0,12,12,0,0,0,5,3,6,15,7,0,0,0,12,16,15,9,1,0,3 +0,0,8,14,16,13,1,0,0,10,9,4,6,16,3,0,0,0,0,1,10,13,0,0,0,0,0,7,14,2,0,0,0,0,0,7,13,1,0,0,0,0,0,0,8,13,1,0,0,0,0,0,3,16,4,0,0,0,14,16,13,9,0,0,3 +0,0,4,11,16,16,2,0,0,0,8,8,9,14,0,0,0,0,0,0,11,12,3,0,0,6,15,16,16,15,6,0,0,3,7,11,13,0,0,0,0,0,0,15,2,0,0,0,0,0,3,16,0,0,0,0,0,0,5,12,0,0,0,0,7 +0,0,9,14,16,16,2,0,0,7,15,7,4,14,8,0,0,0,0,0,5,15,4,0,0,0,0,0,16,4,0,0,0,0,0,0,15,4,0,0,0,0,0,0,7,12,0,0,0,0,15,2,8,14,0,0,0,0,10,15,12,3,0,0,3 +0,2,13,16,16,15,4,0,0,7,12,8,8,16,12,0,0,0,0,0,8,16,7,0,0,0,0,0,14,10,0,0,0,0,0,0,12,15,3,0,0,0,0,0,2,16,11,0,0,0,4,4,7,16,10,0,0,2,15,16,16,12,1,0,3 +0,0,0,1,14,5,0,0,0,0,0,11,11,0,0,0,0,0,9,12,1,0,0,0,0,5,15,1,0,2,4,0,0,14,7,0,0,13,10,0,0,15,16,16,16,16,5,0,0,3,8,8,15,10,0,0,0,0,0,3,15,2,0,0,4 +0,0,0,9,9,0,0,0,0,0,3,15,4,0,0,0,0,0,10,12,0,0,0,0,0,0,12,8,4,3,0,0,0,0,14,16,12,14,5,0,0,0,12,10,0,4,13,0,0,0,9,11,0,6,16,1,0,0,0,8,14,15,8,0,6 +0,0,0,14,15,1,0,0,0,0,6,16,11,0,0,0,0,0,13,15,2,0,0,0,0,0,16,12,0,0,0,0,0,3,16,16,16,10,1,0,0,2,16,12,4,11,12,0,0,0,10,14,6,14,15,0,0,0,1,13,16,16,10,0,6 +0,0,0,13,15,2,0,0,0,0,4,16,11,1,0,0,0,0,13,15,1,0,0,0,0,0,15,9,0,0,0,0,0,4,16,14,15,8,0,0,0,1,16,15,8,13,9,0,0,0,11,10,0,11,16,0,0,0,2,14,16,16,13,0,6 +0,0,0,4,15,7,0,0,0,0,3,15,12,0,0,0,0,1,14,12,0,2,11,0,0,10,14,0,0,13,12,0,0,11,15,12,15,16,5,0,0,4,10,8,16,11,0,0,0,0,0,2,16,4,0,0,0,0,0,6,14,0,0,0,4 +0,0,9,16,10,2,0,0,0,0,16,14,11,10,0,0,0,1,16,9,12,15,0,0,0,0,7,15,16,16,8,0,0,0,0,10,6,16,10,0,0,0,0,0,1,15,9,0,0,0,0,1,7,16,8,0,0,0,6,16,16,14,1,0,9 +0,0,6,16,11,0,0,0,0,0,9,16,16,5,0,0,0,0,8,16,16,4,0,0,0,0,10,16,13,0,0,0,0,0,13,16,12,0,0,0,0,0,10,16,9,0,0,0,0,0,9,16,10,0,0,0,0,0,4,15,16,3,0,0,1 +0,0,10,12,16,16,8,0,0,4,16,16,11,5,4,0,0,10,12,3,0,0,0,0,0,12,11,0,0,0,0,0,0,6,16,14,8,0,0,0,0,0,5,10,16,3,0,0,0,0,4,8,16,3,0,0,0,0,13,16,11,0,0,0,5 +0,0,7,5,14,13,0,0,0,0,16,15,6,9,2,0,0,4,16,7,0,4,4,0,0,6,12,1,0,5,7,0,0,8,7,0,0,12,3,0,0,4,8,0,4,12,0,0,0,2,12,5,15,4,0,0,0,0,6,15,6,0,0,0,0 +0,0,9,16,16,7,0,0,0,2,16,11,4,15,2,0,0,4,16,2,2,16,6,0,0,0,13,11,13,16,10,0,0,0,1,10,13,16,6,0,0,0,0,0,3,16,7,0,0,0,0,2,13,14,1,0,0,0,11,15,15,6,0,0,9 +0,0,9,16,16,7,0,0,0,7,16,12,7,11,2,0,0,13,13,1,0,0,0,0,0,10,16,10,7,0,0,0,0,0,8,12,16,10,0,0,0,0,0,0,5,16,3,0,0,0,1,0,11,16,1,0,0,0,7,16,16,6,0,0,5 +0,2,11,16,15,6,0,0,0,11,15,9,14,13,0,0,0,7,1,0,13,9,0,0,0,0,0,1,16,8,0,0,0,0,1,11,13,1,0,0,0,0,5,16,5,0,0,0,0,2,15,9,2,4,4,0,0,2,15,16,16,16,16,1,2 +0,2,9,16,13,13,2,0,0,11,11,4,2,10,4,0,0,6,12,2,4,12,0,0,0,0,6,14,13,2,0,0,0,0,1,14,12,0,0,0,0,0,8,7,13,0,0,0,0,0,12,5,12,0,0,0,0,0,13,14,3,0,0,0,8 +0,1,12,16,14,8,0,0,0,4,16,8,10,15,3,0,0,0,0,0,5,16,3,0,0,0,0,1,12,15,0,0,0,0,0,10,16,5,0,0,0,0,5,16,10,0,0,0,0,1,14,15,6,10,11,0,0,0,13,16,16,14,8,1,2 +0,0,11,8,12,5,0,0,0,1,15,11,6,14,2,0,0,4,11,0,0,9,4,0,0,4,8,0,0,8,6,0,0,6,7,0,0,11,3,0,0,5,8,0,5,13,0,0,0,3,13,5,15,3,0,0,0,0,9,14,4,0,0,0,0 +0,0,4,13,15,6,0,0,0,0,15,11,2,14,2,0,0,3,14,1,0,12,4,0,0,5,12,0,0,9,5,0,0,5,5,0,0,12,2,0,0,4,9,0,2,13,2,0,0,0,13,2,14,7,0,0,0,0,5,16,7,0,0,0,0 +0,0,3,16,12,1,0,0,0,0,3,16,16,5,0,0,0,0,2,16,16,5,0,0,0,0,0,16,16,5,0,0,0,0,4,16,16,2,0,0,0,0,4,16,14,0,0,0,0,0,6,16,14,0,0,0,0,0,2,16,14,0,0,0,1 +0,1,13,16,16,12,0,0,0,3,12,6,11,14,0,0,0,0,0,0,9,12,0,0,0,1,9,15,16,16,9,0,0,2,12,15,14,5,5,0,0,0,5,15,1,0,0,0,0,0,11,9,0,0,0,0,0,0,15,3,0,0,0,0,7 +0,0,1,9,15,2,0,0,0,0,5,16,7,1,0,0,0,0,14,8,0,0,0,0,0,0,15,6,8,4,0,0,0,0,15,16,13,14,7,0,0,0,14,3,0,3,12,0,0,0,6,9,7,9,15,0,0,0,0,10,14,14,2,0,6 +0,3,12,15,16,16,3,0,0,6,16,9,9,16,6,0,0,0,3,0,11,15,1,0,0,0,0,4,16,7,0,0,0,0,0,7,16,4,0,0,0,0,0,0,13,11,0,0,0,0,4,5,15,14,0,0,0,3,16,16,15,6,0,0,3 +0,1,13,16,16,5,0,0,0,7,10,4,10,12,0,0,0,0,0,0,11,7,0,0,0,0,0,2,15,2,0,0,0,0,0,12,7,0,0,0,0,0,6,12,1,0,0,0,0,0,16,3,1,5,3,0,0,1,15,16,16,15,3,0,2 +0,0,1,12,16,8,0,0,0,2,13,16,16,8,0,0,0,13,16,14,16,12,0,0,0,11,6,7,16,2,0,0,0,0,0,11,16,1,0,0,0,0,0,13,13,0,0,0,0,0,0,15,13,0,0,0,0,0,0,13,13,0,0,0,1 +0,0,9,15,15,2,0,0,0,3,11,4,15,6,0,0,0,0,0,0,16,2,0,0,0,2,12,13,16,16,11,0,0,2,12,15,11,6,1,0,0,0,3,16,0,0,0,0,0,0,5,14,0,0,0,0,0,0,11,7,0,0,0,0,7 +0,0,0,2,16,3,0,0,0,0,0,12,13,0,0,0,0,0,8,15,2,1,0,0,0,1,15,8,1,13,11,0,0,9,16,7,12,16,4,0,0,5,13,16,16,11,0,0,0,0,0,2,16,5,0,0,0,0,0,4,16,2,0,0,4 +0,0,0,7,12,0,0,0,0,0,4,16,8,0,0,0,0,0,12,11,0,0,0,0,0,0,15,10,8,6,1,0,0,0,15,16,8,10,8,0,0,0,14,7,0,0,12,0,0,0,8,11,0,5,16,2,0,0,0,9,14,14,5,0,6 +0,0,10,16,16,11,0,0,0,1,11,7,6,16,3,0,0,0,0,0,10,15,0,0,0,0,0,0,15,7,0,0,0,0,0,0,15,9,0,0,0,0,0,0,7,13,0,0,0,0,5,4,10,16,0,0,0,0,10,16,16,10,0,0,3 +0,0,4,12,13,2,0,0,0,0,5,16,16,5,0,0,0,0,5,16,16,6,0,0,0,0,9,16,15,0,0,0,0,0,10,16,14,0,0,0,0,0,12,16,12,0,0,0,0,0,5,16,11,0,0,0,0,0,6,16,13,0,0,0,1 +0,1,7,15,16,9,0,0,1,13,14,7,14,14,0,0,0,6,1,8,16,8,0,0,0,0,3,16,9,0,0,0,0,0,0,11,15,6,1,0,0,0,0,0,7,15,11,0,0,0,5,1,0,11,16,2,0,0,10,16,16,16,7,0,3 +0,1,7,13,14,3,0,0,0,10,13,2,5,10,0,0,0,12,4,0,7,16,0,0,0,6,10,9,13,15,1,0,0,0,2,4,0,14,4,0,0,0,0,0,0,13,2,0,0,0,6,0,5,14,0,0,0,0,5,14,14,5,0,0,9 +0,0,3,12,12,3,0,0,0,0,4,16,16,4,0,0,0,0,5,16,16,5,0,0,0,0,11,16,15,0,0,0,0,0,12,16,14,0,0,0,0,0,13,16,9,0,0,0,0,0,7,16,10,1,0,0,0,0,5,13,14,4,0,0,1 +0,0,5,14,15,2,0,0,0,6,16,10,15,8,0,0,0,1,4,0,8,8,0,0,0,0,1,7,16,16,8,0,0,0,13,16,16,4,0,0,0,0,6,10,9,0,0,0,0,0,0,13,4,0,0,0,0,0,5,15,2,0,0,0,7 +0,0,0,9,13,0,0,0,0,0,3,15,4,0,0,0,0,0,12,11,0,0,0,0,0,0,16,11,8,4,0,0,0,0,15,16,8,12,5,0,0,0,14,13,0,1,15,1,0,0,8,12,0,4,16,2,0,0,0,8,12,13,10,1,6 +0,0,11,16,9,8,0,0,0,0,14,13,6,15,2,0,0,0,9,6,6,10,0,0,0,0,1,14,13,0,0,0,0,0,1,13,8,0,0,0,0,0,5,7,12,0,0,0,0,0,13,0,10,0,0,0,0,0,13,14,8,0,0,0,8 +0,0,0,6,16,6,0,0,0,0,5,16,10,0,0,0,0,2,15,14,0,7,1,0,0,6,16,3,3,16,9,0,0,11,16,8,11,16,6,0,0,3,15,16,16,15,1,0,0,0,0,3,16,7,0,0,0,0,0,5,16,3,0,0,4 +0,0,5,12,16,10,0,0,0,8,15,5,12,13,0,0,0,0,0,3,16,4,0,0,0,0,0,5,16,3,0,0,0,0,0,2,15,8,0,0,0,0,0,0,2,14,7,0,0,0,0,0,0,13,11,0,0,0,5,16,16,11,3,0,3 +0,0,3,16,12,1,0,0,0,0,10,16,16,0,0,0,0,0,12,16,12,0,0,0,0,0,13,16,10,0,0,0,0,0,16,16,1,0,0,0,0,0,16,16,0,0,0,0,0,0,10,16,1,0,0,0,0,0,2,13,9,0,0,0,1 +0,0,0,8,14,0,0,0,0,0,2,16,10,0,0,0,0,0,12,16,1,3,5,0,0,5,16,6,2,16,9,0,0,11,16,0,8,16,7,0,0,10,16,16,16,11,1,0,0,1,7,9,16,4,0,0,0,0,0,10,11,0,0,0,4 +0,0,5,12,0,0,0,0,0,0,14,10,14,12,0,0,0,2,16,16,8,11,2,0,0,3,16,11,0,9,3,0,0,5,12,2,0,12,4,0,0,1,12,0,0,13,3,0,0,0,13,6,8,13,0,0,0,0,3,14,12,3,0,0,0 +0,7,16,16,16,16,11,0,0,8,16,9,6,0,1,0,0,3,16,3,0,0,0,0,0,0,11,12,0,0,0,0,0,0,4,16,1,0,0,0,0,4,0,14,4,0,0,0,0,9,9,16,3,0,0,0,0,5,15,13,0,0,0,0,5 +0,4,13,16,16,15,3,0,0,10,12,7,8,16,8,0,0,0,0,1,12,15,2,0,0,0,0,4,16,10,0,0,0,0,0,8,16,4,0,0,0,0,0,1,16,10,0,0,0,5,7,4,15,13,0,0,0,5,16,16,16,7,0,0,3 +0,0,0,12,15,4,0,0,0,0,7,16,9,2,0,0,0,0,12,14,0,0,0,0,0,0,16,11,3,0,0,0,0,3,16,14,15,8,0,0,0,1,16,6,0,12,8,0,0,0,12,12,4,13,12,0,0,0,1,11,16,16,4,0,6 +0,2,14,16,16,7,0,0,0,6,15,5,6,13,0,0,0,4,15,0,9,16,6,0,0,2,13,16,16,15,9,0,0,0,1,4,2,15,8,0,0,0,0,0,1,16,6,0,0,0,0,1,12,15,2,0,0,1,14,16,16,6,0,0,9 +0,0,0,12,16,1,0,0,0,0,4,16,11,0,0,0,0,0,12,15,1,0,0,0,0,0,15,13,8,4,0,0,0,3,16,15,11,15,7,0,0,2,15,10,0,4,15,3,0,0,8,12,4,6,16,5,0,0,1,11,16,16,13,0,6 +0,0,0,14,10,0,0,0,0,3,14,16,13,0,0,0,2,16,16,16,5,0,0,0,1,11,14,15,1,0,0,0,0,0,8,16,1,0,0,0,0,0,7,16,1,0,0,0,0,0,1,15,5,0,0,0,0,0,0,13,13,0,0,0,1 +0,1,8,12,16,16,3,0,0,5,14,8,10,15,0,0,0,0,2,5,14,12,2,0,0,3,15,16,15,12,8,0,0,3,6,14,7,0,0,0,0,0,2,15,1,0,0,0,0,0,10,8,0,0,0,0,0,0,14,4,0,0,0,0,7 +0,0,15,13,12,12,2,0,0,4,16,16,12,6,0,0,0,0,16,4,0,0,0,0,0,0,11,7,0,0,0,0,0,0,7,14,0,0,0,0,0,0,0,14,6,0,0,0,0,4,5,14,8,0,0,0,0,1,15,16,4,0,0,0,5 +0,0,0,3,15,7,0,0,0,0,0,13,14,3,0,0,0,0,6,15,2,6,6,0,0,2,15,4,0,15,8,0,0,8,12,0,4,16,1,0,0,11,14,12,16,10,0,0,0,2,8,7,15,4,0,0,0,0,0,3,16,2,0,0,4 +0,0,0,7,14,0,0,0,0,0,4,16,5,0,0,0,0,0,14,9,0,0,10,3,0,7,15,0,0,9,15,0,0,12,15,8,10,15,10,0,0,8,15,12,16,12,1,0,0,0,0,5,15,3,0,0,0,0,0,9,13,0,0,0,4 +0,0,5,12,16,16,7,0,0,0,10,9,4,11,12,0,0,0,0,0,0,13,7,0,0,0,2,10,15,16,13,0,0,0,14,13,16,10,1,0,0,0,3,4,16,2,0,0,0,0,0,13,9,0,0,0,0,0,6,13,0,0,0,0,7 +0,2,13,16,10,0,0,0,0,6,13,10,16,0,0,0,0,0,0,8,14,0,0,0,0,0,1,14,9,0,0,0,0,0,9,14,1,0,0,0,0,1,14,7,0,0,1,0,0,4,16,5,7,12,14,0,0,3,15,16,16,10,1,0,2 +0,1,3,15,15,2,0,0,2,16,16,12,16,6,0,0,1,15,7,6,14,0,0,0,0,5,14,14,10,0,0,0,0,0,7,16,7,0,0,0,0,0,6,15,13,8,0,0,0,0,8,12,7,16,0,0,0,0,4,15,16,14,0,0,8 +0,2,15,16,15,1,0,0,0,3,11,5,16,4,0,0,0,0,0,0,15,5,0,0,0,0,0,4,15,1,0,0,0,0,2,14,5,0,0,0,0,0,15,10,0,0,0,0,0,5,16,3,3,4,3,0,0,4,16,16,16,16,11,0,2 +0,2,13,16,16,10,0,0,2,15,12,7,10,16,0,0,0,3,1,0,12,13,0,0,0,0,0,3,16,4,0,0,0,0,0,13,9,0,0,0,0,0,6,15,3,0,0,0,0,0,16,10,6,8,8,1,0,2,15,16,16,12,12,1,2 +0,4,7,13,16,16,4,0,0,11,16,14,9,2,0,0,0,14,5,0,0,0,0,0,0,12,8,0,0,0,0,0,0,9,12,0,0,0,0,0,0,2,13,16,9,0,0,0,0,1,3,14,16,3,0,0,0,4,14,16,13,0,0,0,5 +0,0,12,16,16,16,15,0,0,0,3,4,2,8,15,0,0,0,0,2,4,11,11,0,0,0,6,15,16,16,12,0,0,0,5,6,15,7,2,0,0,0,0,10,13,0,0,0,0,0,4,16,2,0,0,0,0,0,14,11,0,0,0,0,7 +0,1,14,16,15,4,0,0,0,4,16,9,11,15,3,0,0,6,16,1,8,16,2,0,0,2,14,10,15,16,6,0,0,0,3,11,8,15,5,0,0,0,0,0,1,16,5,0,0,0,3,3,11,15,1,0,0,0,13,16,14,4,0,0,9 +0,8,16,12,15,16,7,0,0,13,16,14,6,4,1,0,0,12,10,0,0,0,0,0,0,3,16,10,0,0,0,0,0,0,6,15,9,0,0,0,0,0,0,4,16,2,0,0,0,1,4,6,16,5,0,0,0,7,16,16,10,0,0,0,5 +0,0,0,3,14,6,0,0,0,0,3,14,10,1,0,0,0,1,13,10,0,0,0,0,0,9,14,1,0,7,5,0,0,11,15,8,9,16,10,0,0,7,16,16,15,15,2,0,0,0,0,0,12,11,0,0,0,0,0,3,16,2,0,0,4 +0,0,11,16,16,14,0,0,0,1,16,15,13,15,1,0,0,0,14,15,16,6,0,0,0,0,8,16,7,0,0,0,0,0,8,16,7,0,0,0,0,0,13,14,13,0,0,0,0,0,16,11,15,0,0,0,0,0,12,16,10,0,0,0,8 +0,0,13,16,16,15,2,0,0,0,14,13,11,16,2,0,0,0,11,13,15,6,0,0,0,0,5,16,10,0,0,0,0,0,10,14,15,0,0,0,0,1,14,3,15,7,0,0,0,6,11,0,15,6,0,0,0,1,13,16,15,3,0,0,8 +0,0,2,14,13,0,0,0,0,0,14,15,3,0,0,0,0,6,16,2,1,5,0,0,0,10,13,0,5,16,2,0,0,7,16,9,12,16,11,0,0,0,5,12,16,10,2,0,0,0,0,12,12,1,0,0,0,0,0,16,5,0,0,0,4 +0,0,9,16,16,10,0,0,0,2,16,9,11,11,0,0,0,0,15,7,12,16,3,0,0,0,7,16,15,15,7,0,0,0,0,0,0,10,10,0,0,0,0,0,0,11,10,0,0,0,11,2,6,16,6,0,0,0,9,16,16,11,1,0,9 +0,0,10,13,14,8,0,0,0,0,13,9,5,12,5,0,0,4,13,0,0,4,8,0,0,4,8,0,0,4,8,0,0,8,4,0,0,13,2,0,0,8,4,0,9,10,0,0,0,4,12,12,13,1,0,0,0,1,11,11,1,0,0,0,0 +0,0,14,16,13,9,1,0,0,0,12,10,9,16,3,0,0,0,9,14,13,13,1,0,0,0,3,13,16,6,0,0,0,0,5,16,12,9,0,0,0,0,13,8,14,6,0,0,0,1,16,11,15,1,0,0,0,1,13,16,6,0,0,0,8 +0,0,9,15,13,3,0,0,0,2,14,2,6,5,0,0,0,4,9,0,0,15,4,0,0,1,13,5,7,16,6,0,0,0,2,7,7,14,3,0,0,0,0,0,1,14,1,0,0,0,3,1,10,9,0,0,0,0,11,15,9,1,0,0,9 +0,0,13,16,9,4,0,0,0,0,15,9,9,15,1,0,0,0,11,9,13,11,0,0,0,0,5,16,14,1,0,0,0,0,7,16,10,0,0,0,0,0,14,10,16,2,0,0,0,0,16,4,15,7,0,0,0,0,11,16,16,3,0,0,8 +0,0,0,12,11,0,0,0,0,0,12,12,9,10,0,0,0,2,16,2,1,11,1,0,0,1,15,0,0,5,8,0,0,2,14,0,0,5,10,0,0,0,13,2,0,2,13,0,0,0,7,9,0,7,11,0,0,0,0,11,13,16,2,0,0 +0,0,0,1,13,13,0,0,0,0,4,15,15,16,0,0,0,8,16,10,6,14,0,0,0,10,6,0,8,13,0,0,0,0,0,0,10,13,0,0,0,0,0,0,13,9,0,0,0,0,0,0,14,7,0,0,0,0,0,0,16,7,0,0,1 +0,0,2,12,16,10,0,0,0,2,15,10,6,15,0,0,0,2,7,1,4,14,0,0,0,0,0,0,9,10,0,0,0,0,0,0,12,3,0,0,0,0,0,8,14,0,0,0,0,0,15,16,15,10,5,0,0,0,1,8,8,11,16,6,2 +0,4,14,16,16,15,2,0,0,12,11,2,4,16,5,0,0,2,0,1,11,12,0,0,0,0,0,15,16,3,0,0,0,0,0,4,15,10,0,0,0,0,0,0,1,14,10,0,0,1,7,0,3,14,8,0,0,4,15,16,16,11,0,0,3 +0,0,0,2,15,2,0,0,0,0,0,12,12,0,0,0,0,0,5,16,2,0,0,0,0,0,11,10,0,3,8,0,0,5,16,1,2,15,5,0,0,9,13,7,14,16,2,0,0,5,15,14,16,10,0,0,0,0,0,2,16,5,0,0,4 +0,0,12,16,16,16,7,0,0,1,14,15,6,4,1,0,0,8,16,2,0,0,0,0,0,9,16,12,12,9,1,0,0,1,8,8,8,15,10,0,0,0,0,0,0,13,12,0,0,0,8,2,6,16,5,0,0,1,11,16,16,8,0,0,5 +0,0,0,11,13,0,0,0,0,0,1,16,12,0,0,0,0,0,4,16,6,0,0,0,0,0,7,16,16,7,0,0,0,0,8,16,16,14,11,0,0,0,9,16,3,5,16,4,0,0,6,15,5,14,16,2,0,0,1,11,16,15,4,0,6 +0,3,15,16,15,3,0,0,0,3,10,8,15,12,0,0,0,0,0,0,14,8,0,0,0,0,11,13,16,16,8,0,0,1,15,16,15,7,2,0,0,0,5,16,5,0,0,0,0,2,16,11,0,0,0,0,0,3,16,5,0,0,0,0,7 +0,0,4,15,13,3,0,0,0,4,13,14,10,13,0,0,0,8,16,8,8,13,0,0,0,0,15,12,15,11,0,0,0,0,6,16,13,1,0,0,0,0,11,15,15,2,0,0,0,0,12,8,15,8,0,0,0,0,5,15,16,5,0,0,8 +0,0,13,13,1,0,0,0,0,7,16,13,12,13,0,0,0,8,13,1,15,16,4,0,0,4,15,13,15,15,10,0,0,0,6,11,3,9,13,0,0,0,0,0,0,5,16,0,0,1,7,0,1,9,15,1,0,1,13,16,16,16,6,0,9 +0,0,0,14,12,1,0,0,0,0,4,15,7,10,0,0,0,2,16,15,5,12,2,0,0,7,16,0,0,11,5,0,0,5,12,0,0,12,4,0,0,1,15,0,0,14,2,0,0,0,9,6,7,15,0,0,0,0,1,13,16,8,0,0,0 +0,0,0,11,16,3,0,0,0,0,5,16,16,5,0,0,0,6,16,15,16,3,0,0,0,11,11,10,16,1,0,0,0,0,0,10,15,0,0,0,0,0,0,11,13,0,0,0,0,0,0,10,16,2,0,0,0,0,0,7,16,2,0,0,1 +0,0,6,16,8,0,0,0,0,0,8,13,16,3,0,0,0,0,2,12,10,8,0,0,0,0,0,0,7,11,0,0,0,0,0,0,9,10,0,0,0,0,0,0,13,7,0,0,0,0,1,10,16,10,8,3,0,0,4,16,16,15,16,16,2 +0,0,9,16,16,12,2,0,0,3,13,5,4,14,5,0,0,0,0,0,7,15,2,0,0,0,0,5,16,11,0,0,0,0,0,0,8,16,7,0,0,0,0,0,0,13,8,0,0,0,4,5,5,15,4,0,0,0,12,16,15,5,0,0,3 +0,0,0,2,14,1,0,0,0,0,0,14,8,0,0,0,0,0,8,15,1,0,0,0,0,0,13,6,0,5,11,0,0,3,15,0,0,10,9,0,0,9,13,4,7,16,3,0,0,7,16,16,16,13,0,0,0,0,3,2,16,6,0,0,4 +0,5,16,16,16,16,10,0,0,4,10,4,4,4,0,0,0,10,10,0,0,0,0,0,0,4,16,13,7,1,0,0,0,0,2,8,14,14,2,0,0,0,0,0,1,16,7,0,0,3,3,2,11,15,0,0,0,5,16,16,12,1,0,0,5 +0,0,0,12,13,0,0,0,0,0,2,16,12,0,0,0,0,0,4,16,6,0,0,0,0,0,10,16,8,0,0,0,0,0,8,16,16,15,5,0,0,0,8,16,0,6,15,1,0,0,7,16,4,10,16,3,0,0,1,11,16,16,12,0,6 +0,0,5,13,16,14,0,0,0,1,14,8,5,16,2,0,0,0,1,0,2,15,2,0,0,0,0,2,8,15,3,0,0,0,0,15,16,13,8,0,0,0,0,6,14,0,0,0,0,0,0,13,7,0,0,0,0,0,7,14,0,0,0,0,7 +0,0,4,16,14,3,0,0,2,14,16,12,10,11,0,0,0,13,12,9,15,10,0,0,0,3,14,14,16,4,0,0,0,0,9,16,8,0,0,0,0,0,12,15,14,1,0,0,0,0,12,12,16,4,0,0,0,0,4,15,16,3,0,0,8 +0,0,8,16,4,0,0,0,0,6,12,2,14,13,0,0,0,9,6,1,14,14,0,0,0,2,11,12,8,16,2,0,0,0,0,0,0,11,8,0,0,0,0,0,0,7,9,0,0,0,2,0,1,12,6,0,0,0,8,9,13,7,0,0,9 +0,0,0,16,11,1,0,0,0,0,13,16,10,9,0,0,0,2,16,11,1,14,2,0,0,3,16,1,0,8,7,0,0,4,13,0,0,8,9,0,0,3,16,1,0,10,9,0,0,0,10,8,3,16,4,0,0,0,1,11,16,13,1,0,0 +0,0,0,0,5,15,1,0,0,0,0,0,11,16,1,0,0,0,0,8,15,16,3,0,0,2,13,15,5,16,0,0,0,8,14,2,3,16,0,0,0,0,0,0,4,15,0,0,0,0,0,0,5,15,0,0,0,0,0,0,5,16,1,0,1 +0,0,4,16,16,4,0,0,0,0,12,11,7,11,0,0,0,0,9,2,5,12,0,0,0,0,0,0,7,11,0,0,0,0,0,0,13,6,0,0,0,0,0,2,16,1,0,0,0,0,1,15,15,10,1,0,0,0,5,16,8,11,11,0,2 +0,1,13,16,16,8,0,0,0,11,13,4,13,7,0,0,0,7,1,7,16,1,0,0,0,0,5,16,15,9,0,0,0,0,3,6,8,15,8,0,0,0,6,1,0,7,13,0,0,4,16,5,2,13,10,0,0,1,12,16,16,11,1,0,3 +0,0,0,1,16,3,0,0,0,0,0,10,11,0,0,0,0,0,1,14,3,0,0,0,0,0,7,12,0,3,9,0,0,0,14,2,0,10,7,0,0,6,13,5,11,14,1,0,0,11,16,16,16,9,0,0,0,1,2,1,14,1,0,0,4 +0,0,6,15,16,16,3,0,0,0,14,16,6,6,1,0,0,3,14,5,0,0,0,0,0,11,15,8,4,0,0,0,0,1,10,12,16,8,0,0,0,0,0,0,5,16,0,0,0,0,0,5,13,14,0,0,0,0,4,16,14,3,0,0,5 +0,0,2,16,5,0,0,0,0,0,6,16,2,0,0,0,0,0,9,13,0,0,0,0,0,0,11,13,12,11,2,0,0,0,14,16,14,10,14,0,0,0,13,7,3,0,14,6,0,0,10,14,4,8,16,7,0,0,2,14,16,15,8,0,6 +0,0,9,13,16,16,4,0,0,2,12,5,4,14,4,0,0,0,0,0,5,13,0,0,0,0,0,3,13,12,5,0,0,0,0,13,14,12,8,0,0,0,0,13,6,0,0,0,0,0,4,14,1,0,0,0,0,0,14,7,0,0,0,0,7 +0,0,6,8,15,13,1,0,0,1,16,16,11,15,4,0,0,0,15,9,8,15,2,0,0,0,10,16,16,10,0,0,0,0,13,16,10,0,0,0,0,4,15,10,12,0,0,0,0,2,15,5,15,0,0,0,0,0,8,16,10,0,0,0,8 +0,1,9,14,8,0,0,0,0,8,11,3,7,11,1,0,0,12,7,0,3,16,4,0,0,3,13,12,14,14,7,0,0,0,0,0,0,6,7,0,0,0,0,0,0,9,9,0,0,0,0,0,2,16,1,0,0,0,8,13,14,5,0,0,9 +0,0,9,16,11,0,0,0,0,1,16,13,15,7,0,0,0,7,16,6,4,16,3,0,0,9,14,0,0,11,10,0,0,8,13,0,0,7,13,0,0,7,13,0,0,7,16,0,0,3,16,7,7,15,6,0,0,0,9,16,16,10,0,0,0 +0,0,6,14,7,6,0,0,0,1,14,6,13,16,1,0,0,5,12,0,9,16,3,0,0,1,15,14,13,11,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,7,8,0,0,1,14,2,1,12,1,0,0,0,4,13,15,5,0,0,9 +0,2,16,16,16,16,3,0,0,1,16,13,6,4,0,0,0,9,16,3,0,0,0,0,0,9,16,13,7,0,0,0,0,3,11,13,16,9,0,0,0,0,0,0,7,16,1,0,0,2,11,5,12,14,0,0,0,3,16,16,16,5,0,0,5 +0,0,8,16,16,9,0,0,0,1,16,15,11,8,0,0,0,1,14,10,0,0,0,0,0,7,16,10,6,0,0,0,0,2,12,16,16,10,0,0,0,0,0,1,7,15,0,0,0,0,11,5,13,13,0,0,0,0,11,16,16,6,0,0,5 +0,0,0,16,11,0,0,0,0,0,6,16,10,0,0,0,0,0,11,11,0,0,0,0,0,0,12,15,11,5,0,0,0,0,14,15,12,15,11,0,0,0,12,13,0,0,16,5,0,0,6,15,4,11,16,4,0,0,0,13,16,14,9,0,6 +0,1,14,16,16,14,1,0,0,0,10,13,6,4,0,0,0,3,15,11,3,0,0,0,0,5,16,16,16,6,0,0,0,0,0,1,10,15,0,0,0,0,0,0,11,11,0,0,0,0,7,12,16,5,0,0,0,2,15,15,5,0,0,0,5 +0,0,2,14,13,2,0,0,0,0,11,16,15,13,0,0,0,0,13,13,1,16,3,0,0,0,12,13,0,15,6,0,0,1,16,7,1,16,4,0,0,1,16,5,8,16,1,0,0,0,15,13,15,13,0,0,0,0,3,15,15,2,0,0,0 +0,0,3,13,6,0,0,0,0,0,14,11,15,8,0,0,0,0,15,1,14,16,1,0,0,0,11,13,12,13,5,0,0,0,3,8,1,8,10,0,0,0,0,0,0,9,6,0,0,0,9,4,3,16,2,0,0,0,4,14,14,7,0,0,9 +0,0,7,10,16,9,0,0,0,0,15,16,13,15,1,0,0,0,10,13,10,16,2,0,0,0,1,16,16,11,0,0,0,0,8,16,16,5,0,0,0,0,15,8,14,7,0,0,0,0,16,7,16,4,0,0,0,0,9,16,14,0,0,0,8 +0,0,3,12,12,1,0,0,0,3,13,6,9,12,0,0,0,9,5,0,2,15,0,0,0,7,9,4,12,16,1,0,0,0,9,11,3,10,2,0,0,0,0,0,0,11,3,0,0,0,10,2,1,13,0,0,0,0,3,13,16,4,0,0,9 +0,0,3,15,13,2,0,0,0,0,10,16,12,13,0,0,0,0,13,13,9,14,0,0,0,0,6,15,15,11,0,0,0,0,4,16,14,1,0,0,0,0,11,14,15,5,0,0,0,0,9,10,14,9,0,0,0,0,4,16,15,2,0,0,8 +0,0,0,2,15,8,0,0,0,0,1,15,13,3,0,0,0,0,9,13,1,0,0,0,0,1,15,6,0,5,11,0,0,7,14,0,1,15,8,0,0,8,15,9,15,16,3,0,0,1,11,16,16,10,0,0,0,0,0,2,15,5,0,0,4 +0,0,0,16,13,0,0,0,0,0,0,15,15,0,0,0,0,0,0,16,13,0,0,0,0,0,3,16,11,0,0,0,0,0,2,16,12,0,0,0,0,0,3,16,12,0,0,0,0,0,1,16,12,0,0,0,0,0,0,12,15,1,0,0,1 +0,0,3,11,16,15,2,0,0,4,16,10,4,16,4,0,0,7,6,0,5,16,1,0,0,0,0,0,10,12,0,0,0,0,0,9,16,16,10,0,0,0,0,6,15,6,1,0,0,0,0,13,9,0,0,0,0,0,1,15,2,0,0,0,7 +0,0,6,16,16,6,0,0,0,5,13,5,7,13,0,0,0,1,1,0,5,11,0,0,0,0,0,5,14,14,8,0,0,0,0,5,16,8,2,0,0,0,0,8,8,0,0,0,0,0,1,14,3,0,0,0,0,0,7,12,0,0,0,0,7 +0,0,9,16,16,12,0,0,0,2,16,8,9,16,0,0,0,1,8,0,13,14,0,0,0,0,0,13,16,5,0,0,0,0,0,8,14,15,5,0,0,0,0,0,0,9,14,0,0,0,9,6,0,11,15,0,0,0,8,16,16,16,6,0,3 +0,0,9,16,16,16,7,0,0,3,16,11,4,4,1,0,0,6,16,1,0,0,0,0,0,9,16,9,4,0,0,0,0,0,6,10,16,8,0,0,0,0,2,0,8,14,0,0,0,0,13,7,8,14,0,0,0,0,10,16,16,4,0,0,5 +0,0,0,14,11,0,0,0,0,0,0,12,16,2,0,0,0,0,0,12,16,2,0,0,0,0,0,15,16,1,0,0,0,0,0,16,16,1,0,0,0,0,3,16,15,0,0,0,0,0,4,16,14,0,0,0,0,0,1,14,16,4,0,0,1 +0,0,1,13,14,1,0,0,0,1,11,16,10,12,0,0,0,6,16,15,0,13,3,0,0,7,14,5,0,8,9,0,0,6,13,0,0,8,11,0,0,4,15,0,1,14,9,0,0,0,14,8,12,16,3,0,0,0,3,15,15,4,0,0,0 +0,0,2,14,13,1,0,0,0,0,14,16,13,11,0,0,0,4,16,11,1,13,3,0,0,5,16,3,0,10,9,0,0,6,13,0,0,9,11,0,0,2,15,0,1,15,8,0,0,0,11,12,15,15,1,0,0,0,2,13,16,5,0,0,0 +0,0,10,16,16,10,0,0,0,4,16,6,1,16,2,0,0,0,10,0,1,16,3,0,0,0,0,0,5,15,0,0,0,0,0,0,11,11,0,0,0,0,0,2,15,4,0,0,0,0,2,13,16,12,8,0,0,0,13,15,11,8,14,7,2 +0,0,10,16,14,6,0,0,0,0,16,8,6,16,3,0,0,0,9,5,0,13,6,0,0,0,0,0,0,14,8,0,0,0,0,0,4,16,2,0,0,0,0,0,13,11,0,0,0,0,2,9,16,10,6,1,0,0,12,16,14,13,16,8,2 +0,0,7,15,16,15,0,0,0,2,15,2,5,16,1,0,0,0,0,0,10,13,0,0,0,0,0,3,14,11,2,0,0,0,6,16,16,16,8,0,0,0,5,13,7,0,0,0,0,0,3,15,1,0,0,0,0,0,10,12,0,0,0,0,7 +0,0,6,14,16,6,0,0,0,6,16,16,8,15,0,0,0,7,14,14,12,14,0,0,0,0,13,10,16,6,0,0,0,0,4,16,10,0,0,0,0,0,11,13,16,2,0,0,0,0,15,5,15,4,0,0,0,0,8,16,15,1,0,0,8 +0,0,10,16,13,3,0,0,0,0,6,15,12,13,0,0,0,0,0,6,6,16,0,0,0,0,0,0,3,16,1,0,0,0,0,0,7,14,0,0,0,0,0,0,13,8,0,0,0,0,4,15,16,13,8,5,0,0,6,16,10,9,12,15,2 +0,0,10,16,14,2,0,0,0,3,16,9,8,14,0,0,0,5,16,3,2,15,6,0,0,5,16,3,0,12,10,0,0,7,14,0,0,12,11,0,0,7,16,1,3,16,5,0,0,4,16,7,12,11,1,0,0,0,10,16,14,3,0,0,0 +0,0,0,1,15,12,0,0,0,0,0,12,16,13,0,0,0,0,11,16,16,13,0,0,0,11,16,11,13,13,0,0,0,3,7,0,12,14,0,0,0,0,0,0,11,13,0,0,0,0,0,0,15,13,0,0,0,0,0,2,15,13,0,0,1 +0,0,11,16,16,7,0,0,0,2,16,10,11,15,0,0,0,0,15,4,4,16,3,0,0,0,3,3,5,16,1,0,0,0,0,0,9,13,0,0,0,0,0,1,16,7,0,0,0,0,2,12,15,6,4,1,0,0,10,16,16,16,16,10,2 +0,0,0,14,14,1,0,0,0,0,3,16,14,0,0,0,0,0,10,16,2,0,0,0,0,0,16,16,8,3,0,0,0,3,16,15,8,14,2,0,0,0,16,11,0,11,10,0,0,0,9,14,7,16,10,0,0,0,0,12,16,14,1,0,6 +0,0,8,16,16,12,0,0,0,0,14,12,10,14,0,0,0,0,3,3,10,10,0,0,0,0,0,8,16,5,0,0,0,0,0,7,16,6,0,0,0,0,4,0,7,14,0,0,0,2,16,5,10,16,0,0,0,0,7,16,16,7,0,0,3 +0,0,7,16,16,14,0,0,0,0,16,12,10,15,1,0,0,0,10,4,16,10,0,0,0,0,0,9,16,11,1,0,0,0,0,0,7,16,8,0,0,0,0,0,0,16,7,0,0,0,8,4,10,15,2,0,0,0,12,16,16,6,0,0,3 +0,0,12,16,16,5,0,0,0,3,13,8,14,15,1,0,0,0,0,0,13,16,0,0,0,6,16,16,16,16,13,0,0,6,9,11,16,9,5,0,0,0,0,14,11,0,0,0,0,0,7,16,2,0,0,0,0,0,13,10,0,0,0,0,7 +0,0,5,15,16,15,1,0,0,10,16,11,8,16,5,0,0,12,10,1,10,15,1,0,0,0,0,8,16,11,1,0,0,0,0,1,10,16,10,0,0,0,0,2,0,7,16,0,0,0,8,13,5,15,12,0,0,0,5,15,16,14,3,0,3 +0,0,10,16,16,10,1,0,0,4,16,11,11,16,3,0,0,1,9,1,10,15,1,0,0,0,0,5,16,10,0,0,0,0,0,0,7,15,10,0,0,0,0,0,0,7,16,0,0,2,12,7,4,14,15,1,0,0,11,16,16,15,4,0,3 +0,0,0,1,13,7,0,0,0,0,0,10,14,1,0,0,0,0,3,16,5,0,0,0,0,0,13,11,0,3,8,0,0,6,15,2,0,14,7,0,0,8,16,12,13,16,4,0,0,3,11,11,15,12,0,0,0,0,0,0,16,9,0,0,4 +0,0,0,14,14,1,0,0,0,0,7,16,10,2,0,0,0,0,14,14,1,0,0,0,0,0,14,16,14,4,0,0,0,1,16,16,8,16,2,0,0,0,14,11,0,13,9,0,0,0,9,14,6,16,7,0,0,0,0,14,16,14,0,0,6 +0,0,0,14,14,1,0,0,0,0,6,16,12,0,0,0,0,0,12,16,2,0,0,0,0,0,16,16,16,9,0,0,0,1,16,15,8,14,9,0,0,0,14,12,0,12,13,0,0,0,6,14,7,16,10,0,0,0,1,13,16,13,1,0,6 +0,0,0,12,14,1,0,0,0,0,5,16,12,0,0,0,0,0,10,15,1,0,0,0,0,0,14,15,9,2,0,0,0,1,16,15,16,15,2,0,0,0,15,7,1,12,10,0,0,0,10,14,4,15,12,0,0,0,0,11,16,15,5,0,6 +0,0,0,4,14,2,0,0,0,0,1,15,7,0,0,0,0,0,8,14,0,2,8,0,0,1,15,5,0,10,11,0,0,6,14,1,6,16,5,0,0,12,16,16,16,14,2,0,0,3,12,13,16,3,0,0,0,0,0,6,10,0,0,0,4 +0,0,0,12,14,5,0,0,0,0,6,11,4,15,0,0,0,0,8,9,8,16,3,0,0,0,3,14,13,13,4,0,0,0,0,0,0,10,7,0,0,0,0,0,0,7,8,0,0,0,13,6,1,7,9,0,0,0,1,10,14,15,2,0,9 +0,0,1,15,15,2,0,0,0,0,0,12,16,7,0,0,0,0,0,14,16,5,0,0,0,0,0,13,16,2,0,0,0,0,2,16,13,0,0,0,0,0,6,16,13,0,0,0,0,0,6,16,11,0,0,0,0,0,1,14,16,7,0,0,1 +0,0,7,16,16,10,0,0,0,0,14,13,7,4,0,0,0,5,16,6,0,0,0,0,0,14,15,8,6,1,0,0,0,4,12,12,16,13,2,0,0,0,0,0,1,15,6,0,0,0,5,6,6,16,4,0,0,0,7,16,16,11,1,0,5 +0,0,1,12,12,3,0,0,0,0,9,13,5,14,0,0,0,0,15,3,0,10,2,0,0,3,16,4,0,9,4,0,0,4,13,0,0,9,2,0,0,3,13,0,1,15,0,0,0,0,13,6,8,9,0,0,0,0,2,14,12,2,0,0,0 +0,0,8,16,6,3,0,0,0,2,13,5,10,14,0,0,0,4,14,1,9,16,0,0,0,0,12,13,8,13,0,0,0,0,0,3,0,11,2,0,0,0,0,0,0,12,2,0,0,0,4,1,0,14,1,0,0,0,6,15,16,10,0,0,9 +0,0,4,9,13,13,0,0,0,1,13,15,6,2,0,0,0,0,9,7,0,0,0,0,0,0,13,3,4,0,0,0,0,2,16,16,16,8,0,0,0,2,11,3,0,10,4,0,0,0,2,5,4,15,1,0,0,0,3,12,14,8,0,0,5 +0,1,13,16,9,0,0,0,0,8,15,8,15,5,0,0,0,11,9,0,12,8,0,0,0,0,0,0,11,8,0,0,0,0,0,2,16,3,0,0,0,0,0,6,15,1,0,0,0,0,7,16,16,16,10,1,0,1,16,14,10,8,11,1,2 +0,0,2,16,14,2,0,0,0,1,12,16,16,10,0,0,0,4,16,12,12,12,0,0,0,1,15,11,16,6,0,0,0,0,6,16,13,0,0,0,0,0,9,14,14,8,0,0,0,0,10,12,6,15,0,0,0,0,2,13,16,12,0,0,8 +0,0,7,16,14,3,0,0,0,0,9,14,11,15,0,0,0,0,1,5,0,15,5,0,0,0,0,0,0,16,5,0,0,0,0,0,3,16,4,0,0,0,0,1,12,14,1,0,0,0,5,12,16,16,14,1,0,0,8,16,14,10,13,3,2 +0,0,5,13,11,1,0,0,0,3,16,11,8,12,0,0,0,5,16,0,0,13,3,0,0,5,13,0,0,6,7,0,0,7,10,0,0,8,7,0,0,4,13,0,1,14,5,0,0,1,15,5,12,10,0,0,0,0,7,16,10,1,0,0,0 +0,0,4,15,13,2,0,0,0,4,16,15,7,11,0,0,0,8,14,14,0,14,2,0,0,7,9,12,4,8,7,0,0,6,11,0,0,7,9,0,0,2,15,1,0,10,8,0,0,0,11,8,4,15,4,0,0,0,2,14,16,10,0,0,0 +0,0,0,3,16,8,0,0,0,0,0,11,16,9,0,0,0,3,12,16,16,8,0,0,0,13,16,9,16,8,0,0,0,1,2,0,16,8,0,0,0,0,0,2,16,4,0,0,0,0,0,2,16,6,0,0,0,0,0,1,16,9,0,0,1 +0,1,12,16,16,15,0,0,0,7,13,7,8,16,0,0,0,0,1,0,8,14,0,0,0,0,7,15,16,16,11,0,0,3,15,12,15,4,2,0,0,0,1,12,7,0,0,0,0,0,2,16,2,0,0,0,0,0,13,9,0,0,0,0,7 +0,0,0,8,13,0,0,0,0,0,0,14,6,0,0,0,0,0,6,12,0,0,0,0,0,0,8,13,8,2,0,0,0,0,13,16,13,14,4,0,0,0,11,8,2,3,13,0,0,0,7,11,5,12,11,0,0,0,1,11,12,4,0,0,6 +0,0,13,16,16,9,0,0,0,2,16,7,7,16,0,0,0,0,4,0,11,10,0,0,0,0,1,13,14,3,0,0,0,0,0,7,15,11,1,0,0,0,0,1,2,13,10,0,0,0,8,9,1,12,11,0,0,0,11,16,16,15,1,0,3 +0,0,7,16,12,0,0,0,0,1,16,7,13,3,0,0,0,0,15,2,10,6,0,0,0,0,4,0,12,5,0,0,0,0,0,0,15,1,0,0,0,0,0,5,13,0,0,0,0,0,6,15,14,8,7,1,0,0,6,13,12,12,15,12,2 +0,0,1,14,14,2,0,0,0,0,1,14,16,3,0,0,0,0,0,10,16,2,0,0,0,0,0,11,16,5,0,0,0,0,0,15,16,5,0,0,0,0,0,15,16,5,0,0,0,0,0,10,16,12,1,0,0,0,0,14,16,11,0,0,1 +0,1,8,16,15,0,0,0,0,4,13,5,16,0,0,0,0,0,0,0,16,0,0,0,0,0,1,7,14,6,2,0,0,0,12,16,14,13,8,0,0,0,8,16,4,0,0,0,0,0,5,15,0,0,0,0,0,0,14,8,0,0,0,0,7 +0,0,0,7,14,0,0,0,0,0,1,14,8,0,1,0,0,0,8,14,0,9,11,0,0,1,15,6,1,14,10,0,0,8,15,0,8,16,1,0,0,10,15,9,15,15,0,0,0,5,15,14,16,6,0,0,0,0,0,8,15,2,0,0,4 +0,0,0,16,6,0,0,0,0,0,3,16,6,0,0,0,0,0,9,16,0,0,0,0,0,0,12,16,12,9,1,0,0,0,15,16,14,13,12,0,0,0,14,15,7,0,15,6,0,0,9,14,4,7,15,8,0,0,1,13,16,16,12,1,6 +0,0,10,16,16,10,0,0,0,8,15,7,6,14,0,0,0,1,3,0,15,8,0,0,0,0,0,12,13,1,0,0,0,0,0,7,15,4,0,0,0,0,0,0,8,12,0,0,0,0,6,0,12,10,0,0,0,0,13,16,15,3,0,0,3 +0,0,2,14,14,0,0,0,0,0,1,15,16,2,0,0,0,0,0,14,16,3,0,0,0,0,0,10,16,5,0,0,0,0,0,12,14,1,0,0,0,0,0,13,13,0,0,0,0,0,1,16,11,0,0,0,0,0,2,15,12,0,0,0,1 +0,1,12,16,14,2,0,0,0,7,15,4,13,7,0,0,0,0,2,0,12,7,0,0,0,0,0,6,16,8,0,0,0,0,0,15,13,15,9,0,0,0,0,1,0,8,15,0,0,0,8,4,1,12,10,0,0,1,13,16,16,13,1,0,3 +0,0,8,16,13,2,0,0,0,2,16,8,11,14,1,0,0,3,16,1,12,16,5,0,0,0,12,16,16,13,9,0,0,0,0,4,2,9,14,0,0,0,4,5,0,8,13,0,0,1,16,11,1,13,7,0,0,0,8,15,16,15,1,0,9 +0,0,2,16,10,0,0,0,0,0,3,16,16,1,0,0,0,0,5,16,14,0,0,0,0,0,3,16,13,0,0,0,0,0,1,16,15,0,0,0,0,0,1,16,16,0,0,0,0,0,2,16,15,2,0,0,0,0,0,15,16,11,0,0,1 +0,0,3,13,16,13,1,0,0,3,15,13,10,16,2,0,0,4,10,0,4,16,1,0,0,0,0,3,11,14,2,0,0,0,9,16,16,16,8,0,0,0,9,12,16,0,0,0,0,0,1,12,11,0,0,0,0,0,3,16,8,0,0,0,7 +0,0,0,10,11,0,0,0,0,0,0,14,14,0,0,0,0,0,4,16,3,0,0,0,0,0,6,16,10,2,0,0,0,0,12,16,12,11,5,0,0,0,11,7,3,2,14,0,0,0,6,13,0,4,13,0,0,0,0,9,16,15,5,0,6 +0,0,6,12,13,7,0,0,0,0,16,10,16,12,0,0,0,0,13,7,16,9,0,0,0,0,1,15,13,0,0,0,0,0,2,16,13,0,0,0,0,0,10,7,10,2,0,0,0,0,13,4,13,3,0,0,0,0,7,15,9,0,0,0,8 +0,0,0,0,12,10,0,0,0,0,0,11,16,3,0,0,0,0,4,15,4,0,0,0,0,0,12,10,0,8,10,0,0,6,15,1,1,15,8,0,0,7,16,8,10,16,7,0,0,4,15,16,16,13,0,0,0,0,0,0,12,9,0,0,4 +0,0,5,16,16,10,0,0,0,2,16,14,14,14,0,0,0,2,14,4,14,10,0,0,0,0,0,8,16,8,0,0,0,0,0,0,8,16,6,0,0,0,0,0,0,12,13,0,0,0,7,11,8,16,11,0,0,0,8,16,16,10,1,0,3 +0,0,2,16,12,0,0,0,0,0,5,16,16,1,0,0,0,0,2,16,15,0,0,0,0,0,6,16,14,0,0,0,0,0,8,16,9,0,0,0,0,0,7,16,14,0,0,0,0,0,8,16,11,0,0,0,0,0,2,15,13,0,0,0,1 +0,0,0,6,16,6,0,0,0,0,3,16,9,0,0,0,0,1,13,13,1,2,1,0,0,7,16,5,1,14,10,0,0,12,16,8,12,16,2,0,0,2,12,15,16,11,0,0,0,0,0,8,16,4,0,0,0,0,0,8,15,0,0,0,4 +0,0,4,16,12,1,0,0,0,2,16,16,11,8,0,0,0,3,16,13,1,14,2,0,0,4,16,0,0,13,4,0,0,4,15,0,0,13,8,0,0,1,16,1,0,14,5,0,0,0,10,8,7,15,1,0,0,0,2,13,16,8,0,0,0 +0,0,7,16,15,4,0,0,0,0,3,13,4,2,0,0,0,0,9,9,0,0,0,0,0,0,14,7,0,0,0,0,0,0,13,16,13,2,0,0,0,0,0,4,11,8,0,0,0,0,0,1,13,6,0,0,0,0,6,16,14,0,0,0,5 +0,0,9,16,16,7,0,0,0,13,15,9,12,15,0,0,0,5,4,0,13,13,0,0,0,0,0,11,16,5,0,0,0,0,0,11,16,10,3,0,0,0,0,0,4,12,13,0,0,0,7,1,1,12,14,0,0,0,9,16,16,14,5,0,3 +0,0,0,15,2,0,0,0,0,0,0,16,4,0,0,0,0,0,6,9,0,0,0,0,0,0,12,16,16,9,1,0,0,0,14,11,0,8,9,0,0,0,11,14,3,2,14,0,0,0,8,11,4,14,7,0,0,0,1,12,14,6,0,0,6 +0,0,6,12,15,9,1,0,0,5,14,4,5,16,3,0,0,9,8,3,13,16,4,0,0,3,15,15,7,10,8,0,0,0,0,1,0,12,8,0,0,0,0,0,0,11,2,0,0,0,7,4,5,13,0,0,0,0,5,14,15,4,0,0,9 +0,0,3,16,14,1,0,0,0,0,11,16,6,0,0,0,0,0,15,9,1,0,0,0,0,3,16,14,16,12,2,0,0,4,16,14,13,11,14,0,0,0,16,5,3,7,16,3,0,0,11,12,8,16,10,0,0,0,2,14,16,12,0,0,6 +0,0,2,16,13,0,0,0,0,0,4,16,15,0,0,0,0,0,4,16,12,0,0,0,0,0,7,16,10,0,0,0,0,0,5,16,9,0,0,0,0,0,7,16,7,0,0,0,0,0,3,16,9,0,0,0,0,0,1,12,15,0,0,0,1 +0,0,8,16,15,4,0,0,0,10,16,11,13,12,0,0,0,12,7,0,13,8,0,0,0,0,1,7,16,12,5,0,0,5,15,16,16,14,9,0,0,2,8,15,10,0,0,0,0,0,7,16,2,0,0,0,0,0,10,13,0,0,0,0,7 +0,0,11,16,16,11,0,0,0,0,10,16,7,2,0,0,0,1,16,9,0,0,0,0,0,0,13,16,8,0,0,0,0,0,0,4,15,4,0,0,0,0,0,0,14,8,0,0,0,0,3,8,16,4,0,0,0,0,14,16,9,0,0,0,5 +0,0,0,1,11,12,0,0,0,0,0,9,13,1,0,0,0,0,8,15,3,0,0,0,0,2,16,6,1,5,2,0,0,12,13,8,13,16,9,0,0,16,16,13,11,16,6,0,0,3,4,0,11,14,0,0,0,0,0,1,16,12,0,0,4 +0,0,0,3,14,9,0,0,0,0,0,13,11,1,0,0,0,0,9,14,0,0,0,0,0,4,16,4,0,4,2,0,0,12,12,7,14,16,10,0,0,13,16,14,11,16,4,0,0,2,2,0,11,13,0,0,0,0,0,3,16,9,0,0,4 +0,0,3,13,16,16,5,0,0,4,15,13,10,16,6,0,0,1,8,1,4,16,4,0,0,0,1,6,11,16,10,0,0,0,13,16,16,13,3,0,0,0,10,7,16,4,0,0,0,0,0,11,13,0,0,0,0,0,4,16,8,0,0,0,7 +0,0,7,14,11,1,0,0,0,6,15,6,7,10,0,0,0,11,7,0,2,12,0,0,0,5,4,0,1,12,0,0,0,0,0,0,3,9,0,0,0,0,0,0,11,3,0,0,0,0,0,9,13,2,3,0,0,0,7,16,16,16,16,6,2 +0,0,8,15,14,7,0,0,0,0,12,16,14,16,0,0,0,0,3,15,14,14,1,0,0,0,0,11,16,1,0,0,0,0,5,14,14,4,0,0,0,0,12,6,8,8,0,0,0,1,16,2,9,7,0,0,0,0,9,16,11,1,0,0,8 +0,0,11,16,11,0,0,0,0,3,16,11,15,6,0,0,0,1,10,3,10,10,0,0,0,0,0,0,9,10,0,0,0,0,0,0,12,9,0,0,0,0,0,5,16,5,0,0,0,1,13,16,16,13,8,0,0,0,11,11,8,13,16,7,2 +0,0,9,16,13,1,0,0,0,8,16,8,11,8,0,0,0,11,10,0,8,10,0,0,0,1,5,0,11,11,0,0,0,0,0,0,13,9,0,0,0,0,0,4,16,2,0,0,0,0,4,14,15,7,4,1,0,0,8,16,16,16,16,12,2 +0,0,9,16,16,16,4,0,0,0,8,16,5,4,0,0,0,1,15,9,0,0,0,0,0,9,15,8,7,3,0,0,0,3,12,12,14,16,5,0,0,0,0,0,1,11,12,0,0,0,1,2,1,11,10,0,0,0,9,16,16,16,4,0,5 +0,0,2,13,16,8,0,0,0,0,13,7,7,13,0,0,0,1,7,0,7,13,2,0,0,0,1,10,16,16,13,0,0,0,6,13,16,2,0,0,0,0,1,5,11,0,0,0,0,0,0,12,4,0,0,0,0,0,2,14,0,0,0,0,7 +0,1,8,11,13,12,0,0,0,5,9,0,4,16,1,0,0,7,5,0,5,16,5,0,0,1,13,11,13,6,8,0,0,0,3,4,1,4,8,0,0,0,0,0,0,7,5,0,0,2,8,1,2,14,0,0,0,0,8,13,11,3,0,0,9 +0,0,14,16,16,16,2,0,0,7,16,5,1,0,0,0,0,14,13,7,3,0,0,0,0,4,12,13,16,9,0,0,0,0,0,0,6,15,0,0,0,0,0,0,9,14,0,0,0,0,5,14,16,3,0,0,0,1,15,11,4,0,0,0,5 +0,0,0,2,14,5,0,0,0,0,1,11,11,0,0,0,0,0,7,14,1,0,0,0,0,5,16,3,4,6,2,0,0,14,16,14,16,16,10,0,0,9,12,7,8,16,4,0,0,0,0,0,13,12,0,0,0,0,0,3,15,6,0,0,4 +0,0,2,15,15,3,0,0,0,3,12,15,10,13,0,0,0,3,16,14,11,14,0,0,0,0,7,16,16,9,0,0,0,0,5,16,13,0,0,0,0,0,13,16,11,0,0,0,0,0,13,16,15,0,0,0,0,0,2,15,10,0,0,0,8 +0,0,4,15,15,4,0,0,0,6,16,16,12,14,0,0,0,11,11,6,14,12,0,0,0,3,14,13,14,1,0,0,0,0,12,16,5,0,0,0,0,1,16,13,9,0,0,0,0,0,13,10,15,0,0,0,0,0,3,15,15,0,0,0,8 +0,0,0,5,15,3,0,0,0,0,2,16,4,0,0,0,0,0,8,12,0,2,12,0,0,3,16,1,0,11,10,0,0,9,13,0,3,16,5,0,0,13,15,16,16,12,0,0,0,5,12,14,16,4,0,0,0,0,0,6,15,2,0,0,4 +0,0,1,11,12,9,5,0,0,0,14,6,1,15,10,0,0,2,12,4,12,7,10,0,0,1,13,12,3,4,8,0,0,0,0,0,0,4,8,0,0,0,3,4,0,7,6,0,0,0,12,7,3,11,0,0,0,0,3,13,15,3,0,0,9 +0,0,2,15,13,2,0,0,0,0,8,16,15,12,0,0,0,0,9,14,1,15,5,0,0,0,14,13,0,11,9,0,0,3,16,11,0,12,9,0,0,2,16,3,2,16,6,0,0,1,13,11,15,14,0,0,0,0,4,16,15,5,0,0,0 +0,0,10,9,14,10,0,0,0,2,15,15,4,14,2,0,0,0,13,5,9,12,0,0,0,0,4,16,14,2,0,0,0,0,11,12,10,0,0,0,0,0,15,1,12,0,0,0,0,3,12,2,13,0,0,0,0,0,9,16,7,0,0,0,8 +0,0,4,14,16,4,0,0,0,0,12,8,9,12,0,0,0,2,11,0,0,12,3,0,0,4,7,0,0,5,8,0,0,6,4,0,0,4,8,0,0,4,9,0,0,6,8,0,0,0,14,9,6,15,2,0,0,0,4,16,15,5,0,0,0 +0,0,0,3,15,16,1,0,0,0,7,15,16,14,0,0,0,4,16,16,15,13,0,0,0,0,8,2,15,13,0,0,0,0,0,0,16,15,0,0,0,0,0,0,16,12,0,0,0,0,0,1,16,16,2,0,0,0,0,3,15,13,1,0,1 +0,1,10,16,3,0,0,0,0,5,16,14,8,0,0,0,0,10,11,8,12,0,0,0,0,1,1,8,12,0,0,0,0,0,0,12,8,0,0,0,0,0,2,15,5,1,2,0,0,0,10,16,14,14,12,0,0,0,14,16,16,13,7,0,2 +0,0,8,16,8,0,0,0,0,0,16,6,15,1,0,0,0,4,10,0,12,2,0,0,0,0,0,7,16,3,0,0,0,0,0,5,13,16,3,0,0,0,0,0,0,11,6,0,0,0,12,9,9,16,2,0,0,0,8,16,15,7,0,0,3 +0,0,3,16,0,0,0,0,0,0,10,10,0,0,0,0,0,1,16,3,13,5,0,0,0,8,13,1,16,7,6,0,0,14,13,9,16,16,10,0,0,11,16,16,14,9,3,0,0,1,4,16,8,0,0,0,0,0,2,16,6,0,0,0,4 +0,1,12,12,12,15,6,0,0,1,14,5,5,4,1,0,0,0,12,0,0,0,0,0,0,8,16,16,15,8,0,0,0,1,4,4,5,12,7,0,0,0,0,0,0,11,4,0,0,2,7,2,10,12,0,0,0,2,16,15,8,1,0,0,5 +0,0,1,13,4,0,0,0,0,0,12,13,1,0,0,0,0,1,16,2,0,0,0,0,0,5,16,8,4,2,0,0,0,5,16,14,16,15,3,0,0,3,13,1,0,9,9,0,0,0,11,12,4,11,11,0,0,0,2,13,16,13,1,0,6 +0,0,0,11,16,16,10,0,0,0,10,12,9,15,9,0,0,0,13,8,0,12,5,0,0,0,6,0,4,12,0,0,0,0,2,15,16,16,9,0,0,0,6,13,15,9,1,0,0,0,0,9,9,0,0,0,0,0,1,14,4,0,0,0,7 +0,0,4,12,13,5,0,0,0,3,15,8,10,15,2,0,0,3,14,2,2,15,3,0,0,0,10,16,16,7,0,0,0,0,0,7,13,15,3,0,0,0,2,11,1,12,5,0,0,0,7,9,1,14,2,0,0,0,4,16,16,7,0,0,8 +0,0,5,12,13,12,0,0,0,7,13,5,8,15,0,0,0,4,14,4,13,16,3,0,0,0,6,12,8,9,4,0,0,0,0,0,0,8,8,0,0,0,0,0,0,6,8,0,0,0,1,3,2,13,6,0,0,0,6,16,16,8,1,0,9 +0,0,3,10,16,4,0,0,0,1,15,16,16,10,0,0,0,6,10,2,3,14,1,0,0,8,6,0,0,10,4,0,0,4,8,0,0,5,8,0,0,0,15,0,0,9,8,0,0,0,12,14,10,16,3,0,0,0,4,14,13,5,0,0,0 +0,0,0,0,9,15,0,0,0,0,1,10,16,16,1,0,0,5,16,15,14,16,0,0,0,1,8,0,10,16,0,0,0,0,0,0,11,16,0,0,0,0,0,0,10,15,0,0,0,0,0,0,12,16,3,0,0,0,0,0,8,16,3,0,1 +0,3,16,15,1,0,0,0,0,10,16,15,6,0,0,0,0,13,10,13,8,0,0,0,0,1,3,11,10,0,0,0,0,0,0,15,8,0,0,0,0,0,4,16,10,5,7,0,0,1,13,16,16,16,16,0,0,2,14,15,11,8,3,0,2 +0,0,12,16,15,1,0,0,0,5,16,10,15,8,0,0,0,1,7,3,14,6,0,0,0,0,0,12,16,7,0,0,0,0,0,3,13,16,6,0,0,0,4,0,0,15,11,0,0,2,16,10,11,16,7,0,0,0,10,16,16,10,1,0,3 +0,0,0,10,12,0,0,0,0,0,4,15,1,0,0,0,0,0,13,10,1,2,0,0,0,5,15,2,15,9,1,0,0,11,13,6,16,16,9,0,0,13,16,16,16,10,2,0,0,2,7,13,11,0,0,0,0,0,0,13,11,0,0,0,4 +0,0,11,10,8,12,1,0,0,0,16,13,12,10,0,0,0,5,14,2,0,0,0,0,0,7,16,16,13,8,0,0,0,0,4,4,8,16,7,0,0,0,0,0,0,9,8,0,0,0,9,4,7,16,5,0,0,0,14,16,14,7,0,0,5 +0,0,2,13,0,0,0,0,0,0,12,12,0,0,0,0,0,2,16,2,0,0,0,0,0,4,14,8,7,2,0,0,0,8,15,12,13,15,2,0,0,2,15,1,0,7,11,0,0,0,13,8,5,13,9,0,0,0,2,13,16,11,1,0,6 +0,0,1,9,16,16,16,1,0,0,8,15,10,14,13,0,0,0,14,6,0,14,6,0,0,0,6,2,6,16,3,0,0,0,0,12,16,16,10,0,0,0,0,6,16,8,0,0,0,0,0,7,13,0,0,0,0,0,0,14,6,0,0,0,7 +0,0,3,12,15,11,2,0,0,0,11,13,7,13,8,0,0,7,15,1,5,15,3,0,0,1,12,16,16,5,0,0,0,0,0,13,15,15,2,0,0,0,2,13,0,10,5,0,0,0,4,11,4,11,6,0,0,0,2,13,16,12,0,0,8 +0,0,11,16,16,12,0,0,0,3,16,7,14,16,1,0,0,0,15,14,15,16,6,0,0,0,2,10,9,15,9,0,0,0,0,0,0,9,10,0,0,0,0,0,0,12,8,0,0,0,6,11,6,15,5,0,0,0,9,16,16,12,0,0,9 +0,0,2,9,14,12,0,0,0,0,12,16,10,15,1,0,0,4,14,3,2,6,6,0,0,5,7,0,0,3,8,0,0,4,7,0,0,1,8,0,0,3,12,1,0,5,8,0,0,0,10,12,7,14,3,0,0,0,1,12,16,8,0,0,0 +0,0,0,0,11,15,1,0,0,0,0,6,16,16,2,0,0,3,13,16,16,16,0,0,0,9,16,12,16,14,0,0,0,1,3,0,16,13,0,0,0,0,0,0,14,13,0,0,0,0,0,0,12,16,5,0,0,0,0,0,8,16,7,0,1 +0,4,16,15,2,0,0,0,0,11,15,15,7,0,0,0,0,9,10,6,14,0,0,0,0,0,0,7,15,0,0,0,0,0,0,13,10,0,0,0,0,0,1,16,7,2,2,0,0,1,12,16,15,16,15,0,0,4,16,16,16,12,11,0,2 +0,1,12,15,5,0,0,0,0,4,15,8,15,3,0,0,0,0,3,1,14,4,0,0,0,0,4,14,16,1,0,0,0,0,1,10,14,15,4,0,0,0,0,0,0,12,8,0,0,4,8,4,10,16,4,0,0,2,12,16,13,6,0,0,3 +0,0,0,12,7,0,0,0,0,0,6,15,1,1,0,0,0,1,14,8,10,8,0,0,0,6,15,0,13,12,6,0,0,14,15,12,16,16,9,0,0,10,16,15,16,8,1,0,0,0,0,10,16,1,0,0,0,0,0,12,9,0,0,0,4 +0,2,6,10,12,1,0,0,0,14,13,10,5,1,0,0,0,10,6,0,0,0,0,0,0,10,13,12,12,5,0,0,0,2,8,5,7,14,8,0,0,0,0,0,0,5,12,0,0,0,2,2,1,10,10,0,0,0,5,16,16,14,1,0,5 +0,0,6,14,1,0,0,0,0,0,12,10,0,0,0,0,0,3,16,1,0,0,0,0,0,5,14,0,4,2,0,0,0,7,16,16,16,16,5,0,0,4,16,6,0,9,11,0,0,0,14,8,5,13,9,0,0,0,5,13,16,12,1,0,6 +0,0,1,14,16,16,15,1,0,0,11,14,8,13,11,0,0,0,15,6,0,14,3,0,0,0,5,1,5,13,1,0,0,0,0,13,16,16,9,0,0,0,2,16,15,9,1,0,0,0,0,11,9,0,0,0,0,0,3,16,2,0,0,0,7 +0,0,1,14,13,4,0,0,0,3,15,12,11,15,0,0,0,8,11,1,7,13,0,0,0,1,13,14,16,1,0,0,0,0,0,14,13,14,2,0,0,0,2,12,0,9,8,0,0,0,3,13,4,12,6,0,0,0,0,9,14,13,1,0,8 +0,0,8,13,14,5,0,0,0,5,13,4,11,9,0,0,0,4,13,1,12,14,0,0,0,0,8,14,11,12,4,0,0,0,0,0,0,7,8,0,0,0,0,0,0,4,8,0,0,0,2,2,0,11,7,0,0,0,8,16,16,13,2,0,9 +0,0,2,12,7,0,0,0,0,0,15,14,15,6,0,0,0,6,10,4,2,14,1,0,0,8,8,0,0,8,5,0,0,7,8,0,0,4,8,0,0,2,14,0,0,5,6,0,0,0,9,12,4,14,3,0,0,0,1,13,15,9,0,0,0 +0,1,11,15,13,2,0,0,0,8,13,5,14,9,0,0,0,3,15,1,10,12,0,0,0,0,7,15,14,16,3,0,0,0,0,0,0,13,8,0,0,0,0,0,0,12,8,0,0,2,8,4,5,16,7,0,0,1,15,16,15,8,1,0,9 +0,0,14,12,12,12,6,0,0,2,15,8,8,8,4,0,0,5,12,0,0,0,0,0,0,8,16,12,11,7,0,0,0,1,4,4,9,15,7,0,0,0,0,0,0,8,8,0,0,1,11,4,5,14,7,0,0,0,12,16,16,8,1,0,5 +0,2,9,11,12,15,6,0,0,6,16,9,8,8,1,0,0,4,13,0,0,0,0,0,0,4,16,12,11,5,0,0,0,1,5,4,8,14,5,0,0,0,0,0,0,8,8,0,0,1,6,4,5,15,3,0,0,3,16,16,16,8,0,0,5 +0,0,0,9,15,1,0,0,0,0,10,13,4,0,0,0,0,2,15,1,0,0,0,0,0,5,11,4,4,0,0,0,0,4,16,16,16,16,4,0,0,0,16,2,0,10,8,0,0,0,8,12,4,13,7,0,0,0,1,9,16,11,1,0,6 +0,0,8,9,8,12,8,0,0,0,12,14,10,8,5,0,0,1,14,2,0,0,0,0,0,6,16,12,12,8,0,0,0,1,4,4,7,15,7,0,0,0,0,0,0,10,7,0,0,1,12,4,9,15,1,0,0,0,9,16,14,3,0,0,5 +0,0,2,15,5,0,0,0,0,0,11,16,16,6,0,0,0,3,14,3,7,16,3,0,0,7,8,0,0,8,8,0,0,5,8,0,0,4,8,0,0,4,12,0,0,8,8,0,0,0,14,9,8,16,2,0,0,0,3,14,15,6,0,0,0 +0,0,9,12,13,6,0,0,0,0,16,8,8,15,2,0,0,0,16,10,9,16,5,0,0,0,4,12,11,12,5,0,0,0,0,0,0,8,8,0,0,0,0,0,0,9,7,0,0,0,8,4,7,16,2,0,0,0,10,16,16,7,0,0,9 +0,0,1,12,12,4,1,0,0,0,13,13,13,14,8,0,0,6,15,0,0,12,7,0,0,2,16,13,12,13,1,0,0,0,0,14,16,9,0,0,0,0,6,10,2,16,0,0,0,0,11,8,5,16,0,0,0,0,3,12,16,7,0,0,8 +0,1,14,16,14,8,0,0,0,5,12,5,14,9,0,0,0,2,15,9,13,12,0,0,0,0,3,8,8,16,2,0,0,0,0,0,0,11,6,0,0,0,4,0,0,8,8,0,0,4,14,4,5,14,7,0,0,1,13,16,16,10,1,0,9 +0,0,2,11,13,5,0,0,0,0,16,10,13,16,7,0,0,0,14,10,0,10,11,0,0,0,4,14,16,13,1,0,0,0,0,11,13,12,1,0,0,0,8,10,2,14,3,0,0,0,10,10,3,15,1,0,0,0,2,15,16,6,0,0,8 +0,0,0,5,15,2,0,0,0,0,0,14,10,2,0,0,0,0,12,14,4,15,0,0,0,6,16,4,9,15,8,0,0,14,15,11,15,16,9,0,0,7,15,15,16,11,0,0,0,0,0,3,16,6,0,0,0,0,0,6,16,2,0,0,4 +0,0,0,0,7,16,6,0,0,0,0,4,16,16,4,0,0,2,11,15,16,16,7,0,0,10,16,13,10,16,4,0,0,1,3,0,4,16,5,0,0,0,0,0,7,16,7,0,0,0,0,0,9,16,8,0,0,0,0,0,9,16,6,0,1 +0,0,3,15,16,16,5,0,0,0,10,12,10,16,6,0,0,2,15,2,3,16,1,0,0,0,2,3,10,13,2,0,0,0,3,16,16,16,10,0,0,0,0,12,13,7,1,0,0,0,1,16,6,0,0,0,0,0,5,14,2,0,0,0,7 +0,0,1,13,16,10,0,0,0,0,8,14,13,14,0,0,0,1,15,5,5,15,0,0,0,7,12,0,9,11,0,0,0,1,2,11,15,16,7,0,0,0,4,15,16,9,1,0,0,0,0,10,11,0,0,0,0,0,1,15,7,0,0,0,7 +0,0,8,13,12,3,0,0,0,6,15,7,9,12,0,0,0,0,0,0,7,11,0,0,0,0,0,4,15,3,0,0,0,0,0,10,16,9,0,0,0,0,0,0,4,15,7,0,0,0,11,2,1,15,7,0,0,0,8,16,16,12,0,0,3 +0,0,14,12,12,13,0,0,0,1,16,8,8,6,0,0,0,4,15,8,4,0,0,0,0,2,12,12,15,13,1,0,0,0,0,0,1,13,8,0,0,0,0,0,0,11,7,0,0,5,14,4,7,15,2,0,0,1,10,16,15,5,0,0,5 +0,0,0,0,3,14,3,0,0,0,0,1,14,16,5,0,0,1,9,15,16,16,4,0,0,4,12,7,3,16,4,0,0,0,0,0,4,16,4,0,0,0,0,0,4,16,4,0,0,0,0,0,6,16,4,0,0,0,0,0,5,16,4,0,1 +0,0,3,12,8,1,0,0,0,0,14,16,16,15,1,0,0,3,15,2,1,12,4,0,0,6,9,0,0,7,8,0,0,7,8,0,0,5,8,0,0,4,12,0,0,9,6,0,0,0,15,11,9,16,2,0,0,0,3,11,15,7,0,0,0 +0,0,3,13,9,1,0,0,0,0,13,14,15,13,0,0,0,2,14,1,2,13,4,0,0,4,8,0,0,5,8,0,0,4,8,0,0,4,8,0,0,4,10,0,0,5,8,0,0,0,14,11,10,14,5,0,0,0,4,12,13,9,0,0,0 +0,3,15,16,6,0,0,0,0,11,15,12,15,0,0,0,0,2,2,2,16,4,0,0,0,0,0,0,16,4,0,0,0,0,0,5,16,1,0,0,0,0,0,11,15,4,1,0,0,1,10,16,16,16,11,0,0,4,16,14,12,8,3,0,2 +0,1,15,15,2,0,0,0,0,7,14,13,9,0,0,0,0,9,9,6,13,0,0,0,0,1,0,8,12,0,0,0,0,0,0,8,12,0,0,0,0,0,0,11,6,0,0,0,0,0,7,16,16,16,7,0,0,0,16,15,12,12,3,0,2 +0,0,0,8,16,16,16,6,0,0,6,14,5,8,16,2,0,0,7,4,0,6,12,0,0,0,0,0,0,12,6,0,0,0,0,11,16,16,10,0,0,0,0,12,16,8,0,0,0,0,0,6,16,0,0,0,0,0,0,12,9,0,0,0,7 +0,0,0,4,14,14,1,0,0,0,3,14,12,10,4,0,0,3,13,4,0,8,6,0,0,3,15,9,2,15,1,0,0,0,2,10,16,13,0,0,0,0,0,0,12,14,4,0,0,0,0,2,12,9,3,0,0,0,0,2,16,14,0,0,8 +0,5,16,15,3,0,0,0,0,11,14,11,11,0,0,0,0,8,11,4,16,0,0,0,0,0,0,4,13,0,0,0,0,0,0,8,12,0,0,0,0,0,0,13,9,5,1,0,0,1,11,16,16,16,10,0,0,5,16,14,8,6,1,0,2 +0,0,11,16,10,1,0,0,0,1,15,14,15,11,0,0,0,7,14,1,4,16,3,0,0,7,13,0,0,10,11,0,0,9,12,0,0,8,12,0,0,5,14,0,0,7,13,0,0,1,16,10,5,15,8,0,0,0,7,16,16,15,0,0,0 +0,0,0,1,8,16,2,0,0,0,5,13,16,16,0,0,0,11,16,15,12,16,0,0,0,3,8,1,8,16,0,0,0,0,0,0,8,16,3,0,0,0,0,0,8,16,4,0,0,0,0,0,7,16,7,0,0,0,0,0,10,16,8,0,1 +0,1,15,15,3,0,0,0,0,8,15,12,14,0,0,0,0,8,8,6,16,0,0,0,0,0,0,8,15,0,0,0,0,0,0,12,12,0,0,0,0,0,1,16,10,10,5,0,0,1,13,16,16,16,11,0,0,3,16,12,8,5,1,0,2 +0,0,0,13,1,0,0,0,0,0,10,12,1,0,0,0,0,0,14,3,0,0,0,0,0,4,14,0,4,5,0,0,0,7,16,4,7,14,7,0,0,3,14,0,0,4,12,0,0,0,10,10,4,10,12,0,0,0,1,9,16,14,2,0,6 +0,2,10,16,16,2,0,0,0,10,15,9,16,4,0,0,0,5,3,6,16,2,0,0,0,0,2,15,16,5,0,0,0,0,1,10,14,16,3,0,0,0,5,1,0,13,10,0,0,0,16,13,10,15,11,0,0,0,11,16,16,13,3,0,3 +0,0,8,13,11,1,0,0,0,4,15,5,12,6,0,0,0,0,0,0,10,4,0,0,0,0,0,6,16,4,0,0,0,0,0,5,13,15,3,0,0,0,1,0,0,7,12,0,0,1,15,8,7,12,12,0,0,0,6,14,16,15,5,0,3 +0,0,3,13,16,16,13,0,0,0,11,14,8,15,9,0,0,0,3,5,2,14,2,0,0,0,0,0,9,11,1,0,0,0,2,15,15,16,9,0,0,0,2,15,14,8,2,0,0,0,0,11,10,0,0,0,0,0,2,15,4,0,0,0,7 +0,1,11,16,16,4,0,0,0,7,16,8,14,11,0,0,0,0,0,10,16,6,0,0,0,0,0,15,16,6,0,0,0,0,0,0,8,16,2,0,0,1,5,0,0,14,9,0,0,4,16,10,11,16,6,0,0,1,13,16,16,10,0,0,3 +0,1,12,16,16,9,0,0,0,2,14,5,9,14,0,0,0,0,0,1,12,11,0,0,0,0,0,12,16,6,0,0,0,0,0,2,8,15,5,0,0,0,2,0,0,11,9,0,0,4,14,4,4,14,6,0,0,0,15,16,16,11,1,0,3 +0,0,0,10,10,0,0,0,0,0,6,15,3,0,0,0,0,0,15,6,5,1,0,0,0,7,15,2,16,3,5,0,0,11,11,6,16,15,10,0,0,12,16,16,16,8,1,0,0,4,8,13,12,0,0,0,0,0,0,13,9,0,0,0,4 +0,0,2,12,1,0,0,0,0,0,6,13,0,0,0,0,0,0,13,4,0,0,0,0,0,0,16,1,0,0,0,0,0,5,16,16,16,10,1,0,0,2,15,6,1,10,8,0,0,0,10,10,0,9,9,0,0,0,2,12,16,15,1,0,6 +0,0,4,15,1,0,0,0,0,0,9,10,0,0,0,0,0,1,15,2,0,0,0,0,0,4,12,0,0,0,0,0,0,8,14,9,8,6,1,0,0,7,14,7,5,12,8,0,0,2,16,4,1,12,6,0,0,0,5,14,16,11,0,0,6 +0,0,2,16,8,0,0,0,0,0,8,15,5,0,0,0,0,2,16,7,0,0,0,0,0,3,16,7,5,1,0,0,0,7,16,16,16,15,3,0,0,5,16,12,4,10,14,0,0,0,14,13,5,10,15,0,0,0,3,13,16,16,7,0,6 +0,0,0,6,14,0,0,0,0,0,4,15,4,0,0,0,0,1,13,9,4,7,0,0,0,7,13,1,13,10,6,0,0,14,14,8,16,16,10,0,0,7,15,16,16,7,1,0,0,0,0,6,16,0,0,0,0,0,0,10,13,0,0,0,4 +0,1,10,13,9,2,0,0,0,2,12,4,12,10,0,0,0,0,14,5,11,11,0,0,0,0,2,8,8,15,2,0,0,0,0,0,0,12,5,0,0,0,0,0,0,4,12,0,0,0,4,1,0,8,11,0,0,0,11,16,16,14,3,0,9 +0,0,0,1,9,16,7,0,0,0,4,13,16,16,2,0,0,8,16,15,13,16,3,0,0,3,7,0,8,16,2,0,0,0,0,0,8,16,5,0,0,0,0,0,8,16,4,0,0,0,0,0,8,16,8,0,0,0,0,0,12,16,7,0,1 +0,0,7,8,6,8,4,0,0,0,12,13,12,12,5,0,0,0,13,0,0,0,0,0,0,5,15,12,9,2,0,0,0,3,8,7,8,15,2,0,0,0,0,0,0,6,8,0,0,0,6,0,1,13,4,0,0,0,7,15,15,5,0,0,5 +0,0,7,16,15,1,0,0,0,5,16,13,16,8,0,0,0,9,11,0,4,16,4,0,0,12,8,0,0,11,11,0,0,11,8,0,0,8,12,0,0,8,13,0,0,12,10,0,0,2,16,9,12,15,3,0,0,0,8,16,13,5,0,0,0 +0,4,15,14,11,2,0,0,0,7,10,1,11,11,0,0,0,1,13,4,13,16,2,0,0,0,3,11,7,9,6,0,0,0,0,0,0,5,8,0,0,0,0,0,0,5,8,0,0,3,12,0,3,13,5,0,0,2,13,16,16,8,0,0,9 +0,0,11,12,13,14,4,0,0,0,13,8,4,4,2,0,0,0,11,0,0,0,0,0,0,5,12,6,3,0,0,0,0,5,12,12,13,11,0,0,0,0,2,0,0,13,0,0,0,4,11,0,3,15,0,0,0,0,9,16,16,6,0,0,5 +0,2,15,13,1,0,0,0,0,13,12,14,7,0,0,0,0,8,6,6,13,0,0,0,0,0,0,5,15,0,0,0,0,0,0,11,10,0,0,0,0,0,2,16,5,0,0,0,0,0,11,16,13,14,12,0,0,3,16,14,8,8,7,0,2 +0,0,2,9,13,12,2,0,0,1,14,13,7,10,6,0,0,0,13,8,1,7,7,0,0,0,3,13,14,14,0,0,0,0,0,0,15,15,1,0,0,0,0,9,5,8,5,0,0,0,0,13,4,13,2,0,0,0,0,13,15,6,0,0,8 +0,4,16,16,3,0,0,0,0,11,14,11,11,0,0,0,0,11,7,2,16,1,0,0,0,0,0,3,16,0,0,0,0,0,0,10,12,0,0,0,0,0,3,16,8,5,3,0,0,0,12,16,16,16,16,0,0,3,16,15,8,7,4,0,2 +0,0,10,13,5,0,0,0,0,3,16,16,16,10,0,0,0,8,16,1,6,16,5,0,0,8,11,0,0,9,12,0,0,10,8,0,0,8,12,0,0,8,11,0,0,8,11,0,0,3,16,10,8,15,9,0,0,0,9,16,16,10,1,0,0 +0,0,2,11,10,1,0,0,0,0,10,13,14,13,0,0,0,0,13,0,0,14,5,0,0,3,9,0,0,9,6,0,0,5,9,0,0,5,8,0,0,6,12,0,0,8,4,0,0,0,14,11,5,14,1,0,0,0,3,13,14,5,0,0,0 +0,0,0,0,6,16,7,0,0,1,5,11,16,16,8,0,0,11,16,16,13,16,8,0,0,3,7,1,4,16,8,0,0,0,0,0,5,16,8,0,0,0,0,0,5,16,7,0,0,0,0,0,7,16,9,0,0,0,0,0,8,16,8,0,1 +0,0,2,13,16,9,0,0,0,0,12,12,7,16,3,0,0,1,14,3,0,16,4,0,0,0,0,4,10,16,6,0,0,0,0,13,16,16,9,0,0,0,0,5,13,1,0,0,0,0,0,11,9,0,0,0,0,0,1,16,4,0,0,0,7 +0,0,0,14,4,0,0,0,0,0,6,13,1,0,0,0,0,0,15,4,0,0,0,0,0,4,16,7,4,2,0,0,0,8,16,11,9,15,5,0,0,5,14,1,0,10,9,0,0,0,11,12,5,13,5,0,0,0,1,13,16,9,0,0,6 +0,1,9,15,13,4,0,0,0,5,12,4,10,6,0,0,0,0,0,3,15,2,0,0,0,0,0,15,13,1,0,0,0,0,0,2,9,14,2,0,0,0,0,0,0,11,8,0,0,0,10,6,4,11,7,0,0,0,8,15,16,9,1,0,3 +0,5,16,13,1,0,0,0,0,9,14,14,4,0,0,0,0,9,7,12,4,0,0,0,0,0,0,13,4,0,0,0,0,0,2,16,1,0,0,0,0,0,7,13,0,0,0,0,0,1,15,16,16,16,11,0,0,5,16,14,10,8,6,0,2 +0,0,0,3,12,12,2,0,0,0,7,15,16,16,0,0,0,4,15,9,14,16,3,0,0,2,0,0,14,16,0,0,0,0,0,0,14,16,0,0,0,0,0,0,15,13,0,0,0,0,0,0,16,14,1,0,0,0,0,3,16,13,2,0,1 +0,0,2,13,16,14,1,0,0,0,11,12,7,16,3,0,0,0,9,3,2,16,3,0,0,0,0,0,9,11,0,0,0,0,2,11,15,13,3,0,0,0,4,15,16,13,3,0,0,0,0,14,8,0,0,0,0,0,5,15,4,0,0,0,7 +0,0,0,7,13,0,0,0,0,0,0,14,6,0,0,0,0,0,10,10,2,6,0,0,0,3,16,3,9,13,2,0,0,11,12,6,14,16,10,0,0,11,16,16,16,10,3,0,0,2,8,10,16,1,0,0,0,0,0,9,13,0,0,0,4 +0,0,0,13,3,0,0,0,0,0,6,15,0,0,0,0,0,0,12,7,0,0,0,0,0,0,16,10,7,1,0,0,0,6,16,12,13,16,6,0,0,0,16,4,0,6,12,0,0,0,12,10,2,11,9,0,0,0,1,13,16,15,3,0,6 +0,0,13,16,16,5,0,0,0,5,15,6,11,13,0,0,0,0,2,2,13,8,0,0,0,0,4,16,15,2,0,0,0,0,3,11,15,16,5,0,0,0,0,0,2,15,11,0,0,0,3,4,9,16,6,0,0,0,15,16,16,10,0,0,3 +0,0,0,0,9,16,4,0,0,0,0,5,15,16,2,0,0,3,12,16,16,14,0,0,0,10,16,15,16,15,0,0,0,1,4,0,16,13,0,0,0,0,0,0,15,13,0,0,0,0,0,0,12,16,12,0,0,0,0,0,12,16,6,0,1 +0,0,11,16,12,2,0,0,0,7,16,6,10,13,0,0,0,0,2,0,3,16,0,0,0,0,0,3,12,9,0,0,0,0,0,10,16,12,0,0,0,0,3,0,3,15,7,0,0,3,16,7,6,14,8,0,0,1,9,15,16,12,1,0,3 +0,0,5,13,14,5,0,0,0,2,15,6,11,15,1,0,0,1,16,5,8,16,4,0,0,0,4,12,9,13,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,11,7,0,0,0,7,7,5,15,2,0,0,0,5,15,16,7,0,0,9 +0,0,0,0,5,15,3,0,0,0,0,3,15,16,4,0,0,3,13,16,14,16,1,0,0,2,7,4,8,16,0,0,0,0,0,0,8,14,0,0,0,0,0,0,7,16,1,0,0,0,0,0,9,16,6,0,0,0,0,0,8,15,2,0,1 +0,0,2,13,16,13,4,0,0,0,9,11,9,16,7,0,0,2,15,2,2,15,2,0,0,3,3,0,8,13,2,0,0,0,1,13,16,16,10,0,0,0,0,11,13,5,1,0,0,0,0,11,6,0,0,0,0,0,3,15,2,0,0,0,7 +0,0,0,12,10,0,0,0,0,0,4,15,7,0,0,0,0,0,15,4,0,0,0,0,0,1,16,7,1,0,0,0,0,4,16,14,16,13,1,0,0,1,16,0,1,10,11,0,0,0,14,9,1,8,12,0,0,0,2,10,16,16,5,0,6 +0,0,3,12,16,9,0,0,0,0,12,9,13,16,9,0,0,3,16,5,0,8,12,0,0,0,9,16,10,13,2,0,0,0,0,4,16,12,0,0,0,0,0,11,9,16,0,0,0,0,1,15,2,12,0,0,0,0,2,16,16,6,0,0,8 +0,0,0,10,6,0,0,0,0,0,4,16,2,3,0,0,0,0,13,8,7,14,0,0,0,7,15,0,13,14,5,0,0,14,15,14,16,16,9,0,0,13,16,15,16,6,0,0,0,0,1,9,14,0,0,0,0,0,0,14,10,0,0,0,4 +0,0,5,12,13,3,0,0,0,4,16,9,8,12,0,0,0,2,3,0,5,11,0,0,0,0,0,3,11,10,0,0,0,0,0,6,14,15,3,0,0,0,0,0,0,7,11,0,0,0,10,6,4,9,11,0,0,0,6,16,16,14,2,0,3 +0,0,0,0,5,15,6,0,0,0,0,4,15,16,4,0,0,3,11,16,10,16,4,0,0,4,11,3,0,16,4,0,0,0,0,0,1,16,4,0,0,0,0,0,3,16,3,0,0,0,0,0,5,16,4,0,0,0,0,0,6,15,4,0,1 +0,0,0,14,8,0,0,0,0,0,3,16,3,0,0,0,0,0,12,9,9,7,0,0,0,6,15,1,14,11,6,0,0,13,14,8,16,16,7,0,0,8,16,16,16,3,0,0,0,0,1,11,14,0,0,0,0,0,0,14,14,0,0,0,4 +0,0,4,12,5,0,0,0,0,0,12,14,15,7,0,0,0,2,14,1,2,16,0,0,0,4,8,0,0,10,4,0,0,7,8,0,0,6,8,0,0,4,11,0,0,5,8,0,0,0,14,11,3,13,5,0,0,0,2,11,16,11,0,0,0 +0,0,9,12,12,13,7,0,0,0,15,5,5,4,2,0,0,4,15,10,4,0,0,0,0,2,11,11,15,11,0,0,0,0,0,0,0,10,5,0,0,0,0,0,0,7,7,0,0,5,11,4,5,14,1,0,0,0,9,16,13,3,0,0,5 +0,0,8,15,12,4,0,0,0,5,14,4,11,7,0,0,0,0,0,1,14,3,0,0,0,0,2,15,14,1,0,0,0,0,0,8,13,11,0,0,0,0,0,0,0,13,5,0,0,0,12,2,3,12,7,0,0,0,13,16,15,8,0,0,3 +0,0,0,10,8,0,0,0,0,0,6,14,3,0,0,0,0,0,11,8,0,0,0,0,0,3,16,8,2,0,0,0,0,6,16,16,16,15,3,0,0,0,16,2,0,8,12,0,0,0,11,12,5,11,12,0,0,0,1,8,16,15,2,0,6 +0,2,11,13,11,2,0,0,0,7,12,4,13,8,0,0,0,6,13,5,14,13,0,0,0,0,3,11,9,11,5,0,0,0,0,0,0,8,8,0,0,0,0,0,0,3,11,0,0,0,2,0,1,9,10,0,0,1,15,16,16,14,2,0,9 +0,0,0,7,15,0,0,0,0,0,6,15,8,0,0,0,0,0,13,9,0,0,0,0,0,2,16,5,4,1,0,0,0,5,16,16,16,12,3,0,0,1,15,4,1,8,12,0,0,0,8,14,5,5,15,0,0,0,0,6,16,16,11,0,6 +0,0,0,1,10,16,8,0,0,1,8,15,16,16,9,0,0,10,16,13,11,16,8,0,0,1,4,0,10,16,4,0,0,0,0,0,12,16,0,0,0,0,0,0,12,16,0,0,0,0,0,0,12,16,3,0,0,0,0,0,10,16,7,0,1 +0,0,3,13,16,15,6,0,0,0,13,13,9,16,10,0,0,0,16,9,0,14,6,0,0,0,14,2,6,15,0,0,0,0,0,5,15,15,6,0,0,0,0,16,15,10,2,0,0,0,0,13,11,0,0,0,0,0,4,16,7,0,0,0,7 +0,0,9,9,12,12,0,0,0,0,14,10,8,3,0,0,0,0,13,6,3,0,0,0,0,0,13,16,15,12,2,0,0,0,0,0,0,5,9,0,0,0,0,0,0,2,12,0,0,0,5,1,2,11,8,0,0,0,10,16,16,11,1,0,5 +0,0,0,9,10,0,0,0,0,0,3,16,4,0,0,0,0,0,12,8,2,7,0,0,0,4,16,3,13,13,3,0,0,14,13,8,16,16,10,0,0,14,16,16,16,9,1,0,0,2,4,9,16,1,0,0,0,0,0,9,15,2,0,0,4 +0,0,2,15,5,0,0,0,0,0,9,15,0,1,0,0,0,0,14,9,9,9,0,0,0,8,16,4,16,13,12,0,0,13,16,16,16,15,7,0,0,3,8,13,12,2,0,0,0,0,0,14,10,0,0,0,0,0,3,16,7,0,0,0,4 +0,0,1,12,16,16,12,0,0,0,7,16,11,12,9,0,0,0,6,7,0,14,4,0,0,0,0,0,6,16,3,0,0,0,0,10,16,16,9,0,0,0,0,8,15,5,0,0,0,0,0,8,10,0,0,0,0,0,0,16,5,0,0,0,7 +0,3,15,16,8,0,0,0,0,14,13,10,16,2,0,0,0,5,3,2,16,2,0,0,0,0,0,3,16,2,0,0,0,0,0,9,12,0,0,0,0,0,1,16,8,0,2,0,0,0,8,16,14,16,15,0,0,2,16,16,15,12,9,0,2 +0,0,1,10,16,15,5,0,0,0,12,12,9,9,10,0,0,4,16,1,0,7,7,0,0,4,14,13,8,11,0,0,0,0,1,6,16,14,1,0,0,0,0,8,11,14,5,0,0,0,0,12,8,15,2,0,0,0,0,10,15,5,0,0,8 +0,1,13,16,15,5,0,0,0,4,16,7,14,12,0,0,0,3,12,2,11,10,0,0,0,0,0,0,14,8,0,0,0,0,0,3,16,4,0,0,0,0,1,11,13,0,0,0,0,0,9,16,14,16,7,0,0,1,16,16,15,12,5,0,2 +0,2,15,13,2,0,0,0,0,7,16,13,15,0,0,0,0,11,11,5,16,4,0,0,0,1,1,7,16,1,0,0,0,0,0,12,11,0,0,0,0,0,4,16,8,8,2,0,0,0,12,16,16,16,11,0,0,2,15,13,6,4,1,0,2 +0,0,15,14,15,9,0,0,0,0,12,10,7,6,4,0,0,1,14,2,0,0,0,0,0,5,16,12,10,4,0,0,0,4,11,8,11,15,5,0,0,0,0,0,0,13,7,0,0,4,16,10,11,14,1,0,0,1,10,16,15,4,0,0,5 +0,0,2,13,16,16,7,0,0,0,12,15,12,16,10,0,0,0,16,9,0,14,6,0,0,0,3,0,4,16,1,0,0,0,0,10,14,16,6,0,0,0,3,16,16,11,2,0,0,0,0,9,14,0,0,0,0,0,2,15,6,0,0,0,7 +0,0,10,8,8,4,0,0,0,8,15,12,14,14,0,0,0,9,11,0,10,16,4,0,0,1,9,12,10,12,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,10,7,0,0,0,11,6,4,15,4,0,0,0,13,16,16,7,0,0,9 +0,1,13,14,16,14,3,0,0,4,14,8,7,3,0,0,0,6,11,0,0,0,0,0,0,5,16,15,11,5,0,0,0,2,7,7,10,16,0,0,0,0,0,0,3,16,0,0,0,0,7,5,13,11,0,0,0,0,15,16,10,1,0,0,5 +0,0,0,13,9,0,0,0,0,0,4,15,3,0,0,0,0,0,11,9,6,4,0,0,0,5,16,3,13,8,1,0,0,14,16,9,16,16,9,0,0,12,16,16,16,11,3,0,0,0,4,12,14,0,0,0,0,0,0,12,10,0,0,0,4 +0,0,1,8,15,11,3,0,0,0,11,12,9,14,11,0,0,2,14,0,0,13,6,0,0,7,15,8,12,9,0,0,0,0,6,13,16,8,0,0,0,0,0,13,9,15,4,0,0,0,0,16,8,14,3,0,0,0,0,11,15,8,0,0,8 +0,0,2,11,14,12,3,0,0,0,14,14,9,15,8,0,0,5,12,0,5,15,2,0,0,3,16,11,15,3,0,0,0,0,1,14,16,4,0,0,0,0,0,15,5,15,0,0,0,0,1,16,10,11,0,0,0,0,0,15,13,2,0,0,8 +0,0,0,7,16,2,0,0,0,0,0,15,11,0,0,0,0,0,11,14,6,5,0,0,0,4,16,7,14,13,2,0,0,10,16,13,16,16,10,0,0,8,15,14,16,10,1,0,0,0,0,9,16,1,0,0,0,0,0,9,15,1,0,0,4 +0,0,6,16,16,11,0,0,0,1,16,8,8,16,6,0,0,0,8,16,14,16,11,0,0,0,1,4,3,10,8,0,0,0,0,0,0,9,7,0,0,0,0,0,1,14,1,0,0,0,4,7,11,9,0,0,0,0,9,16,10,1,0,0,9 +0,0,5,12,9,1,0,0,0,0,14,14,13,13,0,0,0,3,12,1,1,13,4,0,0,7,8,0,0,6,8,0,0,8,8,0,0,5,8,0,0,5,10,0,0,11,4,0,0,1,15,9,11,13,0,0,0,0,7,15,12,2,0,0,0 +0,0,2,9,15,12,5,0,0,0,12,11,11,12,11,0,0,1,16,1,0,8,11,0,0,3,15,12,10,15,2,0,0,0,1,11,16,9,0,0,0,0,0,11,15,12,0,0,0,0,0,12,15,8,0,0,0,0,0,10,13,2,0,0,8 +0,0,9,13,15,10,1,0,0,1,15,5,7,16,5,0,0,0,15,7,10,16,6,0,0,0,2,7,7,10,8,0,0,0,0,0,0,8,8,0,0,0,0,0,0,11,5,0,0,0,14,6,10,12,0,0,0,0,9,16,12,1,0,0,9 +0,0,1,10,11,2,0,0,0,0,12,12,8,15,4,0,0,6,11,0,1,12,7,0,0,4,16,7,15,12,0,0,0,0,5,15,15,3,0,0,0,0,0,14,7,14,0,0,0,0,4,12,7,10,0,0,0,0,1,16,16,3,0,0,8 +0,0,4,16,15,2,0,0,0,0,11,16,13,13,0,0,0,1,16,8,1,16,2,0,0,7,16,6,0,11,8,0,0,7,16,4,0,11,8,0,0,4,15,1,1,15,7,0,0,0,13,12,14,15,1,0,0,0,3,14,16,6,0,0,0 +0,0,0,14,10,0,0,0,0,0,3,16,16,3,0,0,0,0,7,16,16,7,0,0,0,0,12,16,16,5,0,0,0,0,4,15,16,6,0,0,0,0,0,13,16,0,0,0,0,0,0,14,12,0,0,0,0,0,0,10,10,0,0,0,1 +0,0,5,13,7,0,0,0,0,0,12,12,14,2,0,0,0,0,12,0,12,6,0,0,0,0,6,1,15,3,0,0,0,0,0,6,11,0,0,0,0,0,5,14,3,0,0,0,0,0,11,16,8,2,6,3,0,0,4,8,10,16,12,1,2 +0,0,4,16,16,9,0,0,0,0,1,9,10,16,1,0,0,0,0,5,15,16,1,0,0,0,0,10,16,5,0,0,0,0,0,2,16,6,0,0,0,0,0,0,10,10,0,0,0,0,11,8,12,14,1,0,0,0,5,13,16,15,0,0,3 +0,0,1,15,6,0,0,0,0,0,8,16,2,0,0,0,0,0,15,11,0,7,4,0,0,8,16,10,6,16,9,0,0,6,16,16,16,16,4,0,0,0,5,11,16,13,0,0,0,0,0,12,15,3,0,0,0,0,0,15,11,0,0,0,4 +0,0,11,14,16,10,0,0,0,6,15,8,6,4,0,0,0,5,12,2,0,0,0,0,0,12,16,15,1,0,0,0,0,3,11,11,6,0,0,0,0,0,0,12,10,0,0,0,0,0,5,16,9,0,0,0,0,0,13,16,3,0,0,0,5 +0,0,0,0,13,1,0,0,0,0,0,7,15,3,0,0,0,0,0,16,11,2,0,0,0,0,4,16,16,14,1,0,0,0,5,16,8,8,10,0,0,0,9,16,4,0,15,0,0,0,2,9,11,13,14,0,0,0,0,0,10,16,10,0,6 +0,3,13,16,13,0,0,0,0,9,13,8,16,5,1,0,0,0,8,14,16,16,5,0,0,0,8,16,15,2,0,0,0,0,2,16,7,0,0,0,0,0,7,16,4,0,0,0,0,0,15,9,0,0,0,0,0,2,15,3,0,0,0,0,7 +0,0,1,8,13,1,0,0,0,0,6,16,16,9,0,0,0,1,15,16,16,10,0,0,0,6,15,14,6,0,0,0,0,3,7,6,6,0,0,0,0,1,10,2,11,0,0,0,0,0,4,11,13,2,0,0,0,0,0,5,12,5,0,0,8 +0,0,0,1,9,14,7,0,0,0,5,14,9,8,10,0,0,2,14,16,11,13,3,0,0,3,16,16,16,16,5,0,0,0,6,7,1,11,7,0,0,0,0,0,1,13,3,0,0,0,0,0,6,8,0,0,0,0,0,0,10,0,0,0,9 +0,0,2,16,12,1,0,0,0,0,11,15,13,11,0,0,0,2,16,9,0,14,2,0,0,4,16,12,0,11,5,0,0,4,16,6,0,12,7,0,0,0,15,1,1,15,9,0,0,0,10,9,10,15,2,0,0,0,2,13,16,8,0,0,0 +0,0,1,15,14,1,0,0,0,2,13,16,16,3,0,0,0,5,16,16,16,4,0,0,0,4,16,16,16,1,0,0,0,0,7,16,13,0,0,0,0,0,5,16,11,0,0,0,0,0,4,16,10,0,0,0,0,0,2,16,11,0,0,0,1 +0,0,2,10,12,2,0,0,0,0,11,16,13,10,0,0,0,0,9,7,2,15,0,0,0,0,3,2,3,15,0,0,0,0,0,0,10,10,0,0,0,0,2,7,16,7,0,0,0,0,9,16,16,16,16,3,0,0,3,13,9,8,4,0,2 +0,0,3,11,13,15,3,0,0,4,16,14,11,16,8,0,0,2,5,0,14,15,1,0,0,0,0,0,16,11,0,0,0,0,0,0,11,10,0,0,0,0,0,0,8,12,0,0,0,0,8,11,15,8,0,0,0,0,2,12,14,3,0,0,3 +0,0,1,12,7,0,0,0,0,0,8,14,1,0,0,0,0,0,15,7,0,3,5,0,0,5,16,0,4,15,4,0,0,5,16,16,16,15,2,0,0,0,11,12,16,8,0,0,0,0,0,5,16,3,0,0,0,0,0,13,10,0,0,0,4 +0,0,4,13,16,14,0,0,0,2,14,16,12,4,0,0,0,13,16,5,0,0,0,0,0,11,16,10,1,0,0,0,0,5,15,16,5,0,0,0,0,0,2,15,9,0,0,0,0,0,5,15,9,0,0,0,0,0,4,16,5,0,0,0,5 +0,0,0,6,9,0,0,0,0,0,2,15,8,0,0,0,0,0,4,16,5,0,0,0,0,0,9,12,7,12,2,0,0,0,9,16,15,13,6,0,0,0,13,14,2,13,6,0,0,0,14,15,13,16,4,0,0,0,1,10,16,10,0,0,6 +0,0,0,7,16,16,11,0,0,0,6,16,16,16,16,0,0,0,11,16,16,16,9,0,0,0,2,9,11,14,10,0,0,0,0,0,0,10,6,0,0,0,0,0,4,11,1,0,0,0,0,2,14,2,0,0,0,0,0,11,3,0,0,0,9 +0,0,5,16,14,1,0,0,0,0,14,14,14,9,0,0,0,1,15,6,1,12,1,0,0,3,16,1,0,10,4,0,0,5,16,5,0,11,2,0,0,2,16,2,3,16,0,0,0,0,11,13,14,12,0,0,0,0,3,15,15,5,0,0,0 +0,0,2,15,14,1,0,0,0,0,11,16,16,4,0,0,0,4,16,16,16,1,0,0,0,7,16,16,14,0,0,0,0,9,16,16,11,0,0,0,0,1,13,16,9,0,0,0,0,0,5,16,5,0,0,0,0,0,2,14,9,0,0,0,1 +0,0,2,12,10,0,0,0,0,0,11,14,14,9,0,0,0,3,15,1,10,9,0,0,0,2,7,0,10,8,0,0,0,0,0,2,16,2,0,0,0,0,0,10,16,6,0,0,0,0,7,16,13,14,16,4,0,0,4,15,0,0,5,4,2 +0,0,10,16,15,7,0,0,0,6,16,6,10,16,2,0,0,8,6,5,15,12,0,0,0,0,0,13,16,2,0,0,0,0,0,5,16,4,0,0,0,0,0,0,10,16,0,0,0,0,0,1,11,16,1,0,0,0,11,16,16,8,1,0,3 +0,0,3,15,3,0,0,0,0,0,14,16,1,0,0,0,0,8,16,6,0,8,5,0,0,12,14,0,5,16,8,0,0,12,15,14,16,13,0,0,0,1,11,14,16,5,0,0,0,0,4,15,10,0,0,0,0,0,4,16,5,0,0,0,4 +0,0,10,16,16,14,0,0,0,4,16,14,10,8,0,0,0,13,16,8,1,0,0,0,0,12,16,16,13,2,0,0,0,3,8,9,16,5,0,0,0,0,0,3,16,6,0,0,0,0,5,12,15,2,0,0,0,0,12,15,5,0,0,0,5 +0,0,0,1,13,1,0,0,0,0,1,14,15,0,0,0,0,0,3,16,6,0,0,0,0,0,6,12,0,0,0,0,0,0,9,14,10,3,0,0,0,0,8,16,16,16,1,0,0,0,8,16,15,16,5,0,0,0,0,2,11,11,1,0,6 +0,0,10,16,14,2,0,0,0,0,13,13,14,11,0,0,0,0,4,8,15,15,4,0,0,0,10,16,16,13,3,0,0,0,1,12,14,1,0,0,0,0,2,16,8,0,0,0,0,0,13,12,1,0,0,0,0,0,13,7,0,0,0,0,7 +0,0,5,12,16,15,0,0,0,3,16,8,9,16,0,0,0,4,16,14,16,7,0,0,0,5,16,15,5,0,0,0,0,8,15,6,0,0,0,0,0,4,8,14,0,0,0,0,0,3,10,16,1,0,0,0,0,0,6,15,2,0,0,0,8 +0,0,0,7,12,16,16,15,0,0,9,16,10,4,16,10,0,0,14,16,13,14,12,1,0,0,4,16,16,16,12,0,0,0,0,0,0,16,10,0,0,0,0,0,7,15,0,0,0,0,0,1,15,7,0,0,0,0,0,12,6,0,0,0,9 +0,0,0,10,9,0,0,0,0,0,3,15,13,5,0,0,0,0,13,13,1,13,0,0,0,0,16,7,0,16,0,0,0,0,16,11,3,15,0,0,0,0,14,7,16,12,0,0,0,0,9,13,15,6,0,0,0,0,0,13,10,0,0,0,0 +0,0,0,4,11,15,15,2,0,1,10,16,13,14,14,2,0,5,16,16,16,16,14,0,0,0,9,8,8,15,9,0,0,0,0,0,1,16,2,0,0,0,0,0,10,10,0,0,0,0,0,1,16,6,0,0,0,0,0,3,16,2,0,0,9 +0,0,7,15,16,16,1,0,0,9,16,16,10,5,0,0,0,14,16,16,15,0,0,0,0,11,14,13,16,2,0,0,0,0,0,6,16,1,0,0,0,0,0,12,12,0,0,0,0,0,6,16,7,0,0,0,0,0,10,13,0,0,0,0,5 +0,8,13,15,16,16,8,0,0,9,16,16,13,11,5,0,0,6,16,12,0,0,0,0,0,1,14,14,0,0,0,0,0,0,8,16,2,0,0,0,0,0,8,15,0,0,0,0,0,3,14,11,0,0,0,0,0,9,16,6,0,0,0,0,5 +0,0,0,12,5,0,0,0,0,0,2,15,7,0,0,0,0,0,7,16,8,0,0,0,0,0,15,15,8,4,0,0,0,0,15,16,16,15,3,0,0,1,16,13,4,11,11,0,0,0,11,14,9,15,11,0,0,0,1,14,16,15,6,0,6 +0,0,5,15,14,13,2,0,0,0,12,15,9,7,1,0,0,5,16,8,0,0,0,0,0,9,16,16,7,0,0,0,0,6,12,14,7,0,0,0,0,0,0,11,6,0,0,0,0,0,2,16,3,0,0,0,0,0,7,14,0,0,0,0,5 +0,0,2,14,13,3,0,0,0,0,13,13,9,11,0,0,0,0,16,7,0,12,0,0,0,3,16,5,0,10,5,0,0,5,16,1,0,8,5,0,0,3,16,1,0,10,5,0,0,0,16,8,5,14,3,0,0,0,4,16,16,9,1,0,0 +0,0,0,4,12,16,11,0,0,0,15,12,0,5,9,0,0,4,16,5,6,15,3,0,0,3,15,16,14,1,0,0,0,0,3,10,16,2,0,0,0,0,0,0,14,2,0,0,0,0,0,0,12,0,0,0,0,0,0,4,12,0,0,0,9 +0,0,0,9,16,10,0,0,0,0,1,15,5,16,3,0,0,0,13,4,0,15,5,0,0,0,15,11,14,16,2,0,0,8,16,16,13,5,0,0,0,3,15,15,1,0,0,0,0,0,9,16,10,0,0,0,0,0,0,10,15,0,0,0,8 +0,0,6,14,16,15,1,0,0,9,16,12,9,16,3,0,0,12,16,11,14,13,0,0,0,7,15,16,14,0,0,0,0,0,2,8,16,5,0,0,0,0,0,6,16,4,0,0,0,0,2,13,12,0,0,0,0,0,9,16,1,0,0,0,9 +0,0,0,8,15,4,0,0,0,0,3,16,10,11,0,0,0,0,6,12,11,13,0,0,0,0,10,16,16,9,0,0,0,1,16,12,11,5,0,0,0,2,13,0,2,9,0,0,0,0,8,6,2,12,0,0,0,0,1,9,14,9,0,0,8 +0,0,1,13,8,0,0,0,0,0,7,16,3,0,0,0,0,0,14,9,0,7,7,0,0,3,16,3,2,15,9,0,0,9,16,8,12,15,0,0,0,6,16,16,16,7,0,0,0,0,3,12,15,1,0,0,0,0,0,13,9,0,0,0,4 +0,0,0,13,9,0,0,0,0,0,0,14,15,1,0,0,0,0,0,13,16,0,0,0,0,0,0,11,16,0,0,0,0,0,0,10,16,2,0,0,0,0,0,14,16,1,0,0,0,0,0,15,16,1,0,0,0,0,0,12,15,0,0,0,1 +0,0,8,15,16,10,0,0,0,0,7,10,10,15,0,0,0,0,0,0,7,13,0,0,0,0,0,4,14,12,1,0,0,3,16,16,16,12,4,0,0,1,10,16,7,0,0,0,0,0,8,9,0,0,0,0,0,0,13,1,0,0,0,0,7 +0,2,10,16,12,0,0,0,0,12,14,12,16,5,0,0,0,2,0,4,16,7,1,0,0,0,4,15,16,16,10,0,0,1,16,16,12,5,2,0,0,0,15,12,1,0,0,0,0,1,14,4,0,0,0,0,0,0,16,3,0,0,0,0,7 +0,2,16,16,16,12,0,0,0,1,9,5,12,16,2,0,0,0,0,6,15,14,2,0,0,0,1,16,16,1,0,0,0,0,0,12,16,1,0,0,0,0,0,6,16,6,0,0,0,0,0,12,16,5,0,0,0,1,16,16,13,1,0,0,3 +0,4,16,16,16,15,3,0,0,11,16,14,8,8,1,0,0,12,14,3,0,0,0,0,0,8,16,4,0,0,0,0,0,2,16,9,0,0,0,0,0,0,10,12,0,0,0,0,0,1,14,11,0,0,0,0,0,5,16,3,0,0,0,0,5 +0,0,0,13,14,0,0,0,0,0,0,13,16,3,0,0,0,0,2,16,15,3,0,0,0,0,4,16,14,0,0,0,0,0,3,16,11,0,0,0,0,0,8,16,10,0,0,0,0,0,6,16,7,0,0,0,0,0,1,11,9,0,0,0,1 +0,0,0,16,12,1,0,0,0,0,6,16,14,7,0,0,0,0,14,15,1,11,0,0,0,0,16,15,0,14,1,0,0,1,16,10,0,14,2,0,0,0,15,13,3,15,3,0,0,0,9,16,16,15,0,0,0,0,0,13,16,8,0,0,0 +0,0,9,13,11,1,0,0,0,0,6,4,9,14,1,0,0,0,1,5,0,11,4,0,0,0,13,14,0,7,5,0,0,3,14,1,0,10,4,0,0,3,14,0,2,15,1,0,0,2,13,8,12,11,0,0,0,0,8,14,10,1,0,0,0 +0,0,5,16,15,5,0,0,0,0,8,13,9,15,0,0,0,0,0,4,0,15,5,0,0,0,0,0,0,12,6,0,0,0,0,0,0,15,4,0,0,0,11,10,10,15,0,0,0,0,16,16,16,15,13,4,0,0,7,16,13,10,8,3,2 +0,3,12,16,9,0,0,0,0,13,15,8,15,2,0,0,0,11,6,0,12,4,0,0,0,1,0,0,15,3,0,0,0,0,0,4,16,1,0,0,0,0,0,12,11,0,0,0,0,0,11,16,10,4,6,1,0,2,15,16,16,16,16,3,2 +0,2,15,16,5,0,0,0,0,0,4,11,9,0,0,0,0,0,0,13,7,2,1,0,0,1,8,16,14,16,10,0,0,10,16,15,7,1,0,0,0,0,14,8,0,0,0,0,0,1,16,3,0,0,0,0,0,2,15,1,0,0,0,0,7 +0,0,7,15,15,6,0,0,0,4,16,16,11,15,0,0,0,7,16,10,10,16,1,0,0,3,11,16,16,6,0,0,0,1,15,16,10,0,0,0,0,5,16,5,14,0,0,0,0,1,13,3,14,1,0,0,0,0,5,15,15,2,0,0,8 +0,0,1,15,13,2,0,0,0,0,0,8,14,10,0,0,0,0,0,0,5,15,1,0,0,0,0,0,2,16,5,0,0,0,0,0,8,15,1,0,0,0,3,10,13,13,0,0,0,0,8,16,16,12,4,0,0,0,1,13,15,14,16,7,2 +0,0,0,12,11,1,0,0,0,0,3,16,13,10,0,0,0,0,8,9,1,12,0,0,0,2,16,9,0,10,5,0,0,4,16,8,0,7,8,0,0,1,16,3,0,10,7,0,0,0,8,13,9,16,6,0,0,0,1,10,16,13,1,0,0 +0,0,0,15,11,0,0,0,0,0,6,16,16,2,0,0,0,0,10,16,16,1,0,0,0,2,16,16,16,3,0,0,0,7,16,16,14,0,0,0,0,0,3,15,10,0,0,0,0,0,0,15,7,0,0,0,0,0,0,14,4,0,0,0,1 +0,2,13,16,12,0,0,0,0,9,15,10,16,3,0,0,0,5,7,5,16,3,0,0,0,0,0,10,14,0,0,0,0,0,5,16,7,0,0,0,0,0,14,16,1,3,7,1,0,3,16,12,10,16,11,1,0,0,13,16,13,7,1,0,2 +0,0,0,8,7,0,0,0,0,0,2,16,6,0,0,0,0,0,5,16,2,0,0,0,0,0,11,15,12,9,0,0,0,0,11,16,13,9,8,0,0,0,11,16,2,8,9,0,0,0,3,16,5,12,10,0,0,0,0,6,16,14,2,0,6 +0,0,11,15,16,10,0,0,0,8,16,8,15,16,0,0,0,5,6,10,16,8,0,0,0,0,4,16,11,1,0,0,0,0,2,15,9,0,0,0,0,0,0,9,16,2,0,0,0,0,0,8,16,5,0,0,0,0,13,16,15,0,0,0,3 +0,2,11,14,14,9,0,0,0,3,10,7,10,16,3,0,0,0,0,4,13,12,0,0,0,0,0,13,15,2,0,0,0,0,0,15,9,0,0,0,0,0,0,9,15,0,0,0,0,0,1,13,9,0,0,0,0,1,15,13,1,0,0,0,3 +0,2,13,16,15,2,0,0,0,15,14,7,16,5,0,0,0,10,1,2,16,4,0,0,0,0,1,11,16,15,8,0,0,0,15,16,13,8,2,0,0,0,10,14,0,0,0,0,0,0,11,10,0,0,0,0,0,0,15,3,0,0,0,0,7 +0,0,9,16,16,10,0,0,0,0,9,9,9,15,0,0,0,0,0,0,6,14,0,0,0,0,0,2,15,7,0,0,0,0,1,14,16,4,0,0,0,0,5,16,16,8,0,0,0,0,0,6,16,4,0,0,0,0,11,16,12,0,0,0,3 +0,0,8,16,16,13,0,0,0,0,10,11,9,16,2,0,0,0,0,4,16,12,0,0,0,0,2,16,15,1,0,0,0,0,2,15,11,0,0,0,0,0,0,4,16,3,0,0,0,3,12,2,14,4,0,0,0,0,9,16,16,5,0,0,3 +0,0,5,16,1,0,0,0,0,0,12,12,0,0,0,0,0,2,15,8,0,6,5,0,0,9,16,6,12,16,9,0,0,7,16,16,16,15,1,0,0,0,3,10,16,6,0,0,0,0,1,14,10,0,0,0,0,0,5,16,2,0,0,0,4 +0,0,0,11,8,0,0,0,0,0,5,16,7,0,0,0,0,0,10,14,0,0,0,0,0,0,12,9,1,3,0,0,0,0,14,14,15,16,7,0,0,0,10,16,15,12,12,0,0,0,6,16,13,14,12,0,0,0,0,9,15,15,3,0,6 +0,0,0,6,12,0,0,0,0,0,0,13,13,0,0,0,0,0,7,16,2,0,0,0,0,0,10,12,0,2,0,0,0,0,13,14,16,14,0,0,0,0,11,16,14,13,6,0,0,0,5,13,9,16,5,0,0,0,0,6,15,12,1,0,6 +0,0,0,9,11,0,0,0,0,0,1,16,11,0,0,0,0,0,6,16,1,0,0,0,0,0,11,11,6,7,1,0,0,0,13,14,15,16,8,0,0,0,12,13,5,5,13,0,0,0,6,14,8,15,12,0,0,0,0,10,16,12,2,0,6 +0,0,10,14,0,0,0,0,0,1,16,7,1,7,0,0,0,0,15,7,12,16,2,0,0,0,10,16,16,4,0,0,0,0,0,9,14,0,0,0,0,0,0,14,10,0,0,0,0,0,5,16,2,0,0,0,0,0,11,12,0,0,0,0,4 +0,0,0,2,9,13,6,0,0,0,11,15,8,9,10,0,0,3,16,10,4,13,5,0,0,1,15,16,15,15,7,0,0,0,0,0,1,14,1,0,0,0,0,0,11,6,0,0,0,0,0,1,13,0,0,0,0,0,0,5,4,0,0,0,9 +0,0,0,14,7,0,0,0,0,0,0,15,15,1,0,0,0,0,2,16,15,1,0,0,0,0,6,16,15,0,0,0,0,0,9,16,13,0,0,0,0,0,8,16,15,1,0,0,0,0,4,16,16,6,0,0,0,0,0,13,12,0,0,0,1 +0,0,12,16,16,15,3,0,0,4,16,16,6,2,1,0,0,14,16,4,0,0,0,0,0,9,16,16,5,0,0,0,0,0,7,15,8,0,0,0,0,0,0,12,11,0,0,0,0,0,5,16,7,0,0,0,0,0,14,15,0,0,0,0,5 +0,0,0,9,8,1,0,0,0,0,5,16,16,8,0,0,0,4,16,15,1,15,0,0,0,6,16,12,0,12,1,0,0,5,16,11,0,11,6,0,0,1,15,8,4,15,6,0,0,0,5,16,16,15,0,0,0,0,0,7,14,9,0,0,0 +0,0,1,13,10,1,0,0,0,0,12,6,7,10,0,0,0,0,10,10,11,15,0,0,0,0,1,14,16,16,5,0,0,0,0,0,0,10,10,0,0,0,0,0,0,5,11,0,0,0,2,4,4,14,11,0,0,0,2,11,15,16,5,0,9 +0,0,5,16,16,3,0,0,0,0,9,16,7,0,0,0,0,0,12,15,2,0,0,0,0,1,15,16,15,4,0,0,0,0,9,13,16,9,0,0,0,0,0,0,14,12,0,0,0,0,5,12,16,8,0,0,0,0,3,15,15,1,0,0,5 +0,0,6,16,12,1,0,0,0,0,5,16,13,10,0,0,0,0,0,5,5,15,0,0,0,0,0,0,8,15,0,0,0,0,0,0,13,13,0,0,0,0,0,6,16,9,4,1,0,0,3,16,16,16,16,10,0,0,5,16,11,9,6,2,2 +0,0,0,10,16,7,0,0,0,0,2,15,4,14,2,0,0,0,0,13,8,16,0,0,0,0,0,10,16,14,1,0,0,0,8,16,16,7,0,0,0,1,15,6,8,12,0,0,0,1,13,5,12,9,0,0,0,0,1,11,15,6,0,0,8 +0,0,4,15,16,6,0,0,0,0,16,12,8,15,0,0,0,7,16,4,0,11,5,0,0,10,15,0,0,8,9,0,0,10,14,0,0,8,11,0,0,6,16,4,0,11,9,0,0,1,15,7,8,16,5,0,0,0,3,14,16,10,1,0,0 +0,0,0,12,9,0,0,0,0,0,2,16,16,0,0,0,0,0,3,16,16,1,0,0,0,0,4,16,13,0,0,0,0,0,3,16,11,0,0,0,0,0,5,16,10,0,0,0,0,0,2,16,10,0,0,0,0,0,0,11,13,0,0,0,1 +0,0,7,14,16,5,0,0,0,0,16,12,15,12,0,0,0,0,3,0,14,9,0,0,0,0,5,12,16,15,10,0,0,8,16,16,13,6,0,0,0,3,9,16,6,0,0,0,0,0,10,12,1,0,0,0,0,0,12,5,0,0,0,0,7 +0,0,0,8,7,0,0,0,0,0,4,16,11,0,0,0,0,0,9,16,1,0,0,0,0,0,11,14,11,13,2,0,0,0,13,16,14,14,10,0,0,0,10,15,1,5,13,0,0,0,6,16,8,14,12,0,0,0,0,5,14,16,4,0,6 +0,1,11,16,16,12,0,0,0,8,16,13,16,16,3,0,0,1,5,7,16,14,0,0,0,0,0,11,16,4,0,0,0,0,0,2,15,9,0,0,0,0,0,0,11,13,0,0,0,0,3,7,15,14,0,0,0,0,14,16,16,6,0,0,3 +0,2,13,16,7,0,0,0,0,12,13,14,13,0,0,0,0,2,0,8,12,0,0,0,0,0,0,11,9,0,0,0,0,0,0,13,5,0,0,0,0,0,8,15,2,0,0,0,0,0,16,16,16,9,2,0,0,1,16,14,13,16,9,0,2 +0,0,0,11,9,0,0,0,0,0,0,11,14,0,0,0,0,0,0,11,13,0,0,0,0,0,0,15,13,0,0,0,0,0,0,13,13,0,0,0,0,0,0,13,9,0,0,0,0,0,0,10,10,0,0,0,0,0,0,9,11,0,0,0,1 +0,1,11,16,11,0,0,0,0,10,14,11,16,0,0,0,0,14,5,6,15,0,0,0,0,3,1,11,14,3,1,0,0,2,13,16,16,16,9,0,0,2,14,16,5,4,2,0,0,0,11,11,0,0,0,0,0,0,16,3,0,0,0,0,7 +0,0,5,13,0,0,0,0,0,0,12,9,4,13,0,0,0,0,16,5,11,13,0,0,0,0,15,13,15,7,0,0,0,0,4,14,15,0,0,0,0,0,0,14,8,0,0,0,0,0,2,16,4,0,0,0,0,0,6,16,1,0,0,0,4 +0,0,0,8,10,0,0,0,0,0,3,15,5,0,0,0,0,0,7,13,0,0,0,0,0,0,7,14,5,1,0,0,0,0,6,16,16,16,3,0,0,0,6,16,7,13,8,0,0,0,2,15,7,15,7,0,0,0,0,7,15,12,0,0,6 +0,0,6,16,15,5,0,0,0,0,2,13,14,13,0,0,0,0,0,2,14,14,1,0,0,0,0,3,16,10,0,0,0,0,0,0,14,10,0,0,0,0,0,0,10,14,0,0,0,0,8,7,12,16,0,0,0,0,6,16,16,12,1,0,3 +0,0,0,12,7,0,0,0,0,0,0,14,15,0,0,0,0,0,0,14,16,1,0,0,0,0,0,15,16,2,0,0,0,0,0,13,16,1,0,0,0,0,0,14,16,1,0,0,0,0,0,14,16,1,0,0,0,0,0,6,16,2,0,0,1 +0,0,6,15,15,4,0,0,0,6,16,16,16,14,0,0,0,7,16,14,16,13,0,0,0,0,3,7,16,6,0,0,0,0,0,2,16,9,0,0,0,0,0,0,14,15,0,0,0,0,7,10,16,14,0,0,0,0,7,15,15,4,0,0,3 +0,0,0,8,10,14,3,0,0,1,13,13,9,12,8,0,0,6,16,8,8,16,4,0,0,5,16,16,16,9,0,0,0,0,5,8,14,12,0,0,0,0,0,3,16,5,0,0,0,0,0,15,8,0,0,0,0,0,1,12,2,0,0,0,9 +0,0,0,16,11,0,0,0,0,0,2,16,16,2,0,0,0,0,3,16,16,6,0,0,0,0,3,16,15,2,0,0,0,0,2,16,16,2,0,0,0,0,4,16,15,0,0,0,0,0,1,16,15,1,0,0,0,0,0,12,16,2,0,0,1 +0,4,15,16,15,4,0,0,0,11,16,14,15,16,0,0,0,3,3,0,16,14,2,0,0,0,9,16,16,16,8,0,0,0,15,16,11,1,0,0,0,0,11,13,1,0,0,0,0,4,16,5,0,0,0,0,0,4,15,0,0,0,0,0,7 +0,0,0,9,8,0,0,0,0,0,1,16,2,0,0,0,0,0,6,14,0,0,0,0,0,0,9,11,0,3,0,0,0,0,13,8,13,13,10,0,0,0,12,16,8,0,13,1,0,0,6,16,5,9,13,0,0,0,0,8,15,14,4,0,6 +0,0,0,9,15,9,0,0,0,0,8,15,5,12,2,0,0,0,15,15,3,13,3,0,0,0,11,16,16,13,0,0,0,4,16,10,15,0,0,0,0,3,12,0,8,7,0,0,0,0,12,8,10,11,0,0,0,0,0,9,13,4,0,0,8 +0,0,0,14,5,0,0,0,0,0,5,16,5,0,0,0,0,0,13,12,0,1,3,0,0,4,16,5,1,15,11,0,0,10,15,4,13,16,3,0,0,8,16,16,16,10,0,0,0,2,11,12,15,1,0,0,0,0,0,16,9,0,0,0,4 +0,0,7,15,16,10,0,0,0,0,14,9,10,16,1,0,0,0,2,5,15,14,0,0,0,0,0,11,16,5,0,0,0,0,0,2,16,8,0,0,0,0,0,0,10,13,0,0,0,0,11,9,15,16,1,0,0,0,8,16,16,12,0,0,3 +0,0,0,14,9,0,0,0,0,0,0,14,13,0,0,0,0,0,0,11,16,2,0,0,0,0,0,14,16,5,0,0,0,0,0,13,16,4,0,0,0,0,0,16,16,4,0,0,0,0,1,16,16,0,0,0,0,0,0,14,12,0,0,0,1 +0,0,0,15,5,0,0,0,0,0,4,16,7,0,0,0,0,1,13,16,0,9,2,0,0,5,16,11,5,16,9,0,0,7,16,14,16,16,7,0,0,1,11,15,16,10,0,0,0,0,0,13,16,3,0,0,0,0,1,16,11,0,0,0,4 +0,0,0,10,13,3,0,0,0,0,8,16,14,12,0,0,0,3,16,13,0,14,1,0,0,5,16,6,0,14,5,0,0,6,16,0,0,15,4,0,0,2,13,1,5,16,4,0,0,0,10,16,16,14,1,0,0,0,2,11,13,6,0,0,0 +0,0,7,12,15,6,0,0,0,14,16,15,6,0,0,0,0,16,16,13,0,0,0,0,0,10,16,14,8,0,0,0,0,0,2,8,13,0,0,0,0,0,0,10,15,0,0,0,0,0,4,13,15,0,0,0,0,0,7,16,7,0,0,0,5 +0,0,8,15,16,6,0,0,0,0,13,13,13,13,0,0,0,0,2,6,16,9,0,0,0,0,0,3,16,2,0,0,0,0,0,0,10,10,0,0,0,0,0,0,5,15,0,0,0,0,7,4,11,16,1,0,0,0,7,16,16,8,0,0,3 +0,0,0,5,13,1,0,0,0,0,1,15,12,0,0,0,0,0,4,16,5,0,0,0,0,0,11,14,3,0,0,0,0,0,11,16,16,4,0,0,0,0,11,11,5,13,0,0,0,0,6,13,7,15,0,0,0,0,0,5,14,9,0,0,6 +0,0,7,16,15,4,0,0,0,0,11,16,15,12,0,0,0,0,5,16,16,14,0,0,0,0,0,3,8,15,5,0,0,0,0,0,0,10,10,0,0,0,0,0,0,11,11,0,0,0,1,0,5,15,9,0,0,0,6,15,16,16,2,0,9 +0,0,1,13,16,2,0,0,0,0,8,16,12,1,0,0,0,0,14,16,2,0,0,0,0,0,15,14,9,1,0,0,0,1,16,16,16,10,0,0,0,0,13,15,13,15,0,0,0,0,8,16,14,14,1,0,0,0,1,11,16,8,0,0,6 +0,0,0,12,10,0,0,0,0,0,0,14,16,2,0,0,0,0,0,13,16,0,0,0,0,0,0,11,16,3,0,0,0,0,0,10,16,3,0,0,0,0,0,11,16,2,0,0,0,0,0,14,16,2,0,0,0,0,0,11,14,0,0,0,1 +0,0,3,16,15,5,0,0,0,0,7,16,15,14,0,0,0,0,0,1,7,16,4,0,0,2,6,9,14,16,5,0,0,9,16,16,16,12,1,0,0,0,9,15,16,4,0,0,0,0,6,16,11,0,0,0,0,0,2,15,4,0,0,0,7 +0,0,15,13,13,13,0,0,0,0,16,16,11,3,0,0,0,0,12,13,0,0,0,0,0,0,5,16,3,0,0,0,0,0,0,11,10,0,0,0,0,0,0,10,14,0,0,0,0,0,9,16,10,0,0,0,0,0,11,15,1,0,0,0,5 +0,0,0,13,8,0,0,0,0,0,2,15,1,0,0,0,0,0,11,10,0,8,2,0,0,4,16,5,11,16,8,0,0,7,16,16,16,16,3,0,0,2,13,9,16,12,0,0,0,0,0,7,16,6,0,0,0,0,0,13,15,1,0,0,4 +0,0,4,16,1,0,0,0,0,0,12,13,0,1,1,0,0,3,16,8,5,16,6,0,0,9,16,6,14,16,2,0,0,11,16,16,16,9,0,0,0,0,10,15,15,2,0,0,0,0,3,16,9,0,0,0,0,0,5,16,3,0,0,0,4 +0,0,5,14,14,2,0,0,0,2,16,16,16,7,0,0,0,0,7,4,16,12,0,0,0,0,1,9,16,16,8,0,0,3,15,16,16,10,2,0,0,4,16,16,11,0,0,0,0,0,9,16,5,0,0,0,0,0,9,13,0,0,0,0,7 +0,0,7,16,5,0,0,0,0,0,16,16,11,0,0,0,0,0,10,13,16,1,0,0,0,0,0,13,15,0,0,0,0,0,0,14,13,0,0,0,0,0,12,16,6,4,8,1,0,0,14,16,16,16,16,4,0,0,7,16,15,7,3,0,2 +0,0,8,16,16,12,0,0,0,0,16,13,10,16,3,0,0,0,12,1,2,16,4,0,0,0,0,0,6,15,0,0,0,0,0,1,15,10,0,0,0,0,0,9,15,2,0,0,0,1,11,16,12,8,8,1,0,0,11,16,16,16,12,1,2 +0,0,3,9,14,9,0,0,0,5,16,14,5,0,0,0,0,12,11,3,0,0,0,0,0,13,16,12,1,0,0,0,0,4,11,13,8,0,0,0,0,0,0,7,11,0,0,0,0,0,1,12,12,0,0,0,0,0,2,15,7,0,0,0,5 +0,7,12,14,16,8,0,0,0,8,16,14,15,11,0,0,0,2,11,2,16,6,0,0,0,0,0,9,16,8,5,0,0,8,13,16,16,12,5,0,0,7,16,12,3,0,0,0,0,4,16,4,0,0,0,0,0,9,12,0,0,0,0,0,7 +0,0,7,16,16,8,0,0,0,0,14,12,11,14,0,0,0,0,11,15,16,12,0,0,0,0,5,15,15,4,0,0,0,0,0,3,12,14,0,0,0,1,10,0,7,15,0,0,0,1,14,6,13,12,0,0,0,0,7,16,16,11,0,0,9 +0,0,3,14,16,14,0,0,0,3,12,16,8,1,0,0,0,15,16,12,0,0,0,0,0,10,16,16,8,0,0,0,0,0,7,11,15,1,0,0,0,0,0,7,16,1,0,0,0,0,8,15,16,2,0,0,0,0,4,16,9,0,0,0,5 +0,0,11,7,0,0,0,0,0,5,16,3,2,14,3,0,0,9,15,0,12,15,0,0,0,6,16,15,16,5,0,0,0,0,6,15,11,0,0,0,0,0,1,16,4,0,0,0,0,0,9,12,0,0,0,0,0,0,13,10,0,0,0,0,4 +0,0,0,11,8,0,0,0,0,0,6,15,2,0,0,0,0,0,13,8,0,4,7,0,0,5,16,2,2,13,9,0,0,10,15,12,15,14,1,0,0,6,16,9,16,5,0,0,0,0,0,6,14,1,0,0,0,0,0,14,7,0,0,0,4 +0,0,7,15,16,12,0,0,0,12,16,11,16,13,0,0,0,15,16,16,14,5,0,0,0,8,16,12,0,0,0,0,0,0,2,12,9,0,0,0,0,0,0,9,13,0,0,0,0,0,2,16,8,0,0,0,0,0,10,12,1,0,0,0,9 +0,0,1,13,15,8,0,0,0,0,11,14,8,15,0,0,0,2,16,3,0,13,2,0,0,5,15,0,0,10,5,0,0,3,10,0,0,10,5,0,0,3,13,0,1,15,3,0,0,0,12,10,11,11,0,0,0,0,1,12,11,4,0,0,0 +0,0,2,10,11,1,0,0,0,0,5,14,3,12,0,0,0,0,6,13,3,15,0,0,0,0,8,15,15,9,0,0,0,2,16,11,9,0,0,0,0,2,14,2,10,0,0,0,0,0,7,6,13,0,0,0,0,0,1,15,6,0,0,0,8 +0,0,1,10,13,12,5,0,0,0,13,13,4,4,12,0,0,3,16,7,4,12,6,0,0,2,15,16,15,5,0,0,0,0,1,9,16,0,0,0,0,0,0,2,15,0,0,0,0,0,0,11,6,0,0,0,0,0,0,15,0,0,0,0,9 +0,0,1,9,15,10,1,0,0,0,2,12,8,12,4,0,0,0,0,11,1,11,3,0,0,0,0,8,8,15,4,0,0,2,15,16,16,7,0,0,0,2,15,5,11,0,0,0,0,0,14,4,12,0,0,0,0,0,3,14,9,0,0,0,8 +0,0,8,12,12,1,0,0,0,3,16,16,14,9,0,0,0,6,15,9,3,12,2,0,0,7,9,0,0,9,7,0,0,7,8,0,0,7,8,0,0,5,10,0,0,7,9,0,0,0,14,13,10,16,6,0,0,0,5,13,11,4,0,0,0 +0,0,4,14,16,14,1,0,0,2,14,16,16,8,0,0,0,4,16,16,14,3,0,0,0,4,16,16,10,0,0,0,0,0,12,13,12,0,0,0,0,1,14,16,15,3,0,0,0,0,10,16,16,12,0,0,0,0,2,9,15,16,8,0,1 +0,1,12,16,5,0,0,0,0,11,16,16,13,0,0,0,0,7,6,5,14,2,0,0,0,0,0,0,12,3,0,0,0,0,0,4,13,0,0,0,0,0,1,13,5,0,0,0,0,0,10,16,10,8,4,0,0,0,13,15,16,12,7,0,2 +0,1,6,12,15,5,0,0,0,7,14,14,16,7,0,0,0,0,2,14,10,0,0,0,0,0,11,16,2,0,0,0,0,0,2,11,16,12,0,0,0,0,0,0,4,14,7,0,0,0,2,4,5,14,7,0,0,0,6,16,14,8,0,0,3 +0,0,0,0,8,10,0,0,0,0,0,0,13,6,0,0,0,0,0,5,13,0,0,0,0,0,2,14,3,10,10,0,0,1,14,15,10,16,6,0,0,14,14,12,15,16,2,0,0,3,0,0,8,14,0,0,0,0,0,0,5,10,0,0,4 +0,0,9,15,16,15,2,0,0,4,16,5,3,1,0,0,0,4,14,0,0,0,0,0,0,5,14,9,14,15,2,0,0,5,13,9,8,15,8,0,0,0,0,0,0,13,5,0,0,0,0,5,11,14,0,0,0,0,11,12,7,1,0,0,5 +0,0,1,13,2,0,0,0,0,0,9,14,2,0,0,0,0,3,16,7,0,0,0,0,0,3,16,7,0,0,0,0,0,5,16,16,8,1,0,0,0,3,15,11,14,13,2,0,0,0,10,16,10,16,15,0,0,0,1,10,14,12,7,0,6 +0,0,7,16,16,15,5,0,0,0,9,12,15,16,7,0,0,0,0,0,9,15,1,0,0,0,7,12,15,15,8,0,0,1,16,16,16,13,5,0,0,0,0,14,10,0,0,0,0,0,5,16,2,0,0,0,0,0,8,14,1,0,0,0,7 +0,1,11,10,8,1,1,0,0,3,15,11,3,12,6,0,0,0,4,16,16,12,0,0,0,0,0,11,16,5,0,0,0,0,5,13,12,12,0,0,0,0,13,7,1,16,4,0,0,1,15,4,7,14,0,0,0,1,14,14,8,1,0,0,8 +0,1,8,13,15,5,0,0,0,8,14,7,16,14,0,0,0,10,12,1,10,16,2,0,0,2,12,14,15,16,4,0,0,0,0,4,4,15,8,0,0,0,0,0,0,9,9,0,0,0,9,7,1,10,12,0,0,0,6,13,16,15,6,0,9 +0,0,4,15,8,0,0,0,0,0,15,14,15,5,0,0,0,8,16,5,3,14,0,0,0,5,11,0,0,10,5,0,0,5,9,0,0,8,8,0,0,0,14,0,0,10,8,0,0,0,14,13,13,16,1,0,0,0,2,14,14,7,0,0,0 +0,0,0,8,12,9,2,0,0,0,5,16,16,16,4,0,0,0,9,16,16,11,0,0,0,3,16,16,16,5,0,0,0,4,16,16,16,4,0,0,0,1,15,16,16,3,0,0,0,0,8,16,16,9,0,0,0,0,0,9,12,6,0,0,1 +0,0,8,16,8,0,0,0,0,0,10,16,16,3,0,0,0,0,2,4,14,4,0,0,0,0,0,8,14,0,0,0,0,0,5,16,8,0,0,0,0,2,15,14,7,6,3,0,0,5,16,15,16,15,3,0,0,0,10,13,8,2,0,0,2 +0,1,8,13,16,13,0,0,0,1,13,12,15,16,0,0,0,0,0,5,15,8,0,0,0,0,5,15,5,0,0,0,0,0,6,16,11,1,0,0,0,0,2,13,16,11,0,0,0,0,0,2,14,16,4,0,0,0,13,16,15,7,0,0,3 +0,0,0,6,12,0,0,0,0,0,2,15,5,0,0,0,0,0,12,8,0,2,6,0,0,4,15,0,1,13,8,0,0,6,16,2,6,14,1,0,0,8,16,16,16,6,0,0,0,1,5,8,16,1,0,0,0,0,0,3,11,0,0,0,4 +0,0,8,14,16,16,0,0,0,0,14,13,8,8,0,0,0,2,16,6,0,0,0,0,0,6,16,13,16,13,0,0,0,3,16,16,12,16,7,0,0,0,4,1,2,14,6,0,0,0,1,6,16,11,0,0,0,0,11,15,8,1,0,0,5 +0,0,2,15,6,0,0,0,0,0,11,16,4,0,0,0,0,3,16,7,0,0,0,0,0,4,16,6,4,1,0,0,0,6,16,16,14,16,3,0,0,2,14,9,0,11,9,0,0,0,10,14,8,15,5,0,0,0,3,13,16,8,0,0,6 +0,0,4,16,16,16,3,0,0,0,7,12,13,16,8,0,0,0,0,0,1,16,5,0,0,1,4,4,7,16,1,0,0,10,16,16,16,16,9,0,0,5,11,13,16,10,2,0,0,0,0,11,13,0,0,0,0,0,5,13,3,0,0,0,7 +0,2,12,14,3,0,0,0,0,7,16,1,0,4,1,0,0,0,13,16,16,15,1,0,0,0,7,16,14,1,0,0,0,0,15,14,16,0,0,0,0,2,16,1,15,7,0,0,0,5,16,6,15,7,0,0,0,1,16,14,9,0,0,0,8 +0,0,8,14,12,3,0,0,0,6,16,6,14,14,0,0,0,6,13,0,8,14,0,0,0,2,14,14,14,16,3,0,0,0,2,4,6,16,5,0,0,0,0,0,0,16,5,0,0,0,0,0,5,16,3,0,0,0,7,16,16,8,0,0,9 +0,0,7,12,1,0,0,0,0,0,15,16,15,4,0,0,0,2,16,9,10,11,0,0,0,6,12,0,0,12,3,0,0,8,12,0,0,6,8,0,0,6,13,0,0,9,8,0,0,1,16,13,15,16,3,0,0,0,6,15,9,3,0,0,0 +0,0,6,12,11,0,0,0,0,0,12,16,15,0,0,0,0,0,13,16,14,2,0,0,0,1,15,16,11,2,0,0,0,0,9,16,10,0,0,0,0,0,9,16,14,5,0,0,0,0,10,16,16,14,0,0,0,0,4,11,12,8,0,0,1 +0,0,10,13,8,1,0,0,0,0,16,16,16,8,0,0,0,0,6,1,11,9,0,0,0,0,0,0,13,8,0,0,0,0,0,5,15,4,0,0,0,0,8,16,10,0,0,0,0,8,16,16,16,15,4,0,0,2,10,11,7,2,0,0,2 +0,4,13,16,16,7,0,0,0,8,12,16,16,13,0,0,0,0,9,16,16,3,0,0,0,0,15,16,6,0,0,0,0,0,10,11,9,2,0,0,0,0,1,7,15,13,2,0,0,0,3,4,7,16,10,0,0,2,11,15,11,8,2,0,3 +0,0,0,2,14,2,0,0,0,0,0,14,8,0,0,0,0,0,10,9,0,4,4,0,0,4,14,1,1,15,8,0,0,4,16,5,11,16,2,0,0,6,16,16,16,11,0,0,0,0,4,0,12,6,0,0,0,0,0,1,13,1,0,0,4 +0,0,3,10,15,8,0,0,0,0,12,14,8,1,0,0,0,1,16,3,0,0,0,0,0,2,16,9,11,16,3,0,0,4,16,14,9,15,7,0,0,1,4,0,0,15,3,0,0,0,0,3,12,8,0,0,0,0,2,10,8,0,0,0,5 +0,0,1,12,6,0,0,0,0,0,12,15,0,0,0,0,0,4,16,10,0,0,0,0,0,7,16,10,1,0,0,0,0,8,16,16,15,7,0,0,0,6,16,9,9,16,3,0,0,0,8,16,13,15,11,0,0,0,1,10,15,14,4,0,6 +0,0,5,14,16,16,3,0,0,0,7,16,16,16,5,0,0,0,0,0,8,16,0,0,0,0,9,14,16,16,13,0,0,2,16,16,15,7,1,0,0,0,1,14,10,0,0,0,0,0,3,16,5,0,0,0,0,0,7,13,0,0,0,0,7 +0,0,15,13,0,3,3,0,0,0,15,15,8,15,5,0,0,0,8,16,16,7,0,0,0,0,7,16,16,1,0,0,0,0,12,12,15,10,0,0,0,3,16,0,10,15,1,0,0,2,16,5,7,15,3,0,0,1,12,16,15,7,0,0,8 +0,0,4,13,13,4,0,0,0,0,16,10,10,8,0,0,0,0,14,7,6,11,0,0,0,0,6,15,15,16,2,0,0,0,0,0,0,11,5,0,0,0,0,0,0,7,9,0,0,1,4,4,6,12,10,0,0,1,6,11,15,12,1,0,9 +0,0,7,12,13,2,0,0,0,0,14,13,8,13,0,0,0,3,16,1,0,11,2,0,0,4,14,0,0,5,8,0,0,5,8,0,0,5,8,0,0,4,16,0,2,14,7,0,0,2,16,10,14,15,1,0,0,0,6,14,14,4,0,0,0 +0,0,5,14,11,3,0,0,0,1,15,8,13,10,0,0,0,1,15,9,9,15,2,0,0,0,10,16,16,16,3,0,0,0,0,0,1,16,4,0,0,0,0,0,0,15,4,0,0,0,7,5,9,16,0,0,0,0,6,12,13,9,0,0,9 +0,0,15,16,12,5,0,0,0,1,16,15,11,7,0,0,0,4,16,9,0,0,0,0,0,8,16,14,12,7,0,0,0,7,16,14,10,16,3,0,0,0,1,0,10,16,4,0,0,0,1,10,16,10,0,0,0,0,13,15,5,0,0,0,5 +0,0,4,9,12,16,8,0,0,0,15,15,8,8,2,0,0,4,16,11,4,1,0,0,0,8,16,16,16,14,0,0,0,0,11,9,8,16,0,0,0,0,0,0,7,16,0,0,0,0,0,8,16,12,0,0,0,0,3,13,9,1,0,0,5 +0,0,4,14,5,0,0,0,0,0,13,14,0,0,0,0,0,2,16,10,0,0,0,0,0,4,16,7,0,0,0,0,0,6,16,16,15,4,0,0,0,4,16,9,4,16,2,0,0,1,15,13,6,16,11,0,0,0,4,13,16,15,5,0,6 +0,0,7,11,13,8,1,0,0,1,15,9,8,6,0,0,0,10,16,0,0,0,0,0,0,8,16,16,16,9,0,0,0,0,6,5,10,13,0,0,0,0,0,1,14,16,0,0,0,0,6,14,14,4,0,0,0,1,10,14,2,0,0,0,5 +0,0,4,14,11,3,0,0,0,0,10,16,12,14,1,0,0,1,14,12,0,13,3,0,0,5,16,6,0,8,6,0,0,8,16,0,0,9,8,0,0,7,16,3,7,16,5,0,0,3,15,13,16,15,2,0,0,0,4,15,12,2,0,0,0 +0,0,12,16,14,8,0,0,0,7,16,10,14,16,0,0,0,4,16,11,14,16,4,0,0,0,5,14,16,16,8,0,0,0,0,0,0,16,8,0,0,0,0,0,4,16,6,0,0,2,12,9,16,15,1,0,0,1,9,16,14,3,0,0,9 +0,0,7,14,11,0,0,0,0,1,16,13,2,2,1,0,0,3,16,9,4,13,4,0,0,0,7,16,16,14,0,0,0,0,11,16,16,9,0,0,0,0,16,9,10,15,0,0,0,1,16,2,5,16,4,0,0,0,7,15,16,16,3,0,8 +0,0,9,16,14,6,0,0,0,6,16,5,10,16,0,0,0,2,15,7,10,16,3,0,0,0,4,8,12,16,4,0,0,0,0,0,0,16,7,0,0,0,0,0,1,16,8,0,0,0,3,0,8,16,1,0,0,0,10,16,13,4,0,0,9 +0,1,15,14,2,0,0,0,0,6,14,0,0,3,2,0,0,2,16,3,2,13,3,0,0,0,11,14,15,9,0,0,0,0,7,16,11,0,0,0,0,0,15,13,14,0,0,0,0,2,15,4,16,3,0,0,0,1,15,16,12,1,0,0,8 +0,0,0,5,12,0,2,1,0,0,1,14,4,1,14,8,0,0,10,8,0,9,15,1,0,1,15,1,2,15,8,0,0,5,16,6,11,16,2,0,0,5,16,16,16,10,0,0,0,0,1,0,15,2,0,0,0,0,0,5,11,0,0,0,4 +0,0,3,14,15,9,0,0,0,0,10,16,16,13,0,0,0,2,13,16,16,4,0,0,0,0,12,16,16,4,0,0,0,2,13,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,6,16,16,16,6,0,0,0,2,10,16,16,2,0,1 +0,0,3,12,16,16,15,0,0,0,9,10,7,12,14,0,0,0,0,0,2,15,6,0,0,0,0,0,11,13,0,0,0,1,9,9,16,11,1,0,0,13,16,16,16,16,4,0,0,0,1,16,7,0,0,0,0,0,6,16,2,0,0,0,7 +0,0,1,11,16,16,8,0,0,0,5,11,9,16,11,0,0,0,0,0,2,16,6,0,0,0,6,9,12,16,9,0,0,1,16,16,16,14,3,0,0,0,3,5,16,7,0,0,0,0,0,8,14,0,0,0,0,0,1,16,5,0,0,0,7 +0,0,8,14,16,16,15,1,0,0,6,6,5,12,12,1,0,0,0,2,11,12,3,0,0,0,5,14,9,0,0,0,0,0,9,16,9,0,0,0,0,0,1,11,16,7,0,0,0,0,2,7,16,7,0,0,0,0,9,13,5,0,0,0,3 +0,0,8,12,15,16,5,0,0,0,10,11,2,3,0,0,0,0,13,5,0,0,0,0,0,2,16,10,12,11,1,0,0,1,16,13,8,14,7,0,0,0,1,0,0,13,3,0,0,0,1,6,12,10,0,0,0,0,10,10,7,0,0,0,5 +0,0,1,7,10,3,0,0,0,0,8,16,16,12,0,0,0,0,8,16,16,12,0,0,0,0,10,16,16,5,0,0,0,0,7,16,16,1,0,0,0,0,14,16,15,1,0,0,0,0,12,16,16,1,0,0,0,0,1,9,12,9,0,0,1 +0,0,10,12,14,1,0,0,0,0,16,16,16,12,0,0,0,5,16,8,3,16,0,0,0,8,13,0,0,8,7,0,0,8,12,0,0,6,8,0,0,8,13,0,4,12,8,0,0,7,16,16,16,13,0,0,0,0,11,14,8,1,0,0,0 +0,0,5,12,1,6,0,0,0,0,11,12,0,16,2,0,0,0,16,5,0,12,4,0,0,3,15,0,0,8,4,0,0,7,12,0,0,4,7,0,0,2,15,1,1,12,5,0,0,0,16,11,12,15,3,0,0,0,4,12,12,3,0,0,0 +0,0,13,9,0,0,0,0,0,8,16,15,0,0,0,0,0,9,9,13,2,0,0,0,0,0,0,11,3,0,0,0,0,0,1,13,0,0,0,0,0,0,4,13,0,0,0,0,0,1,13,15,8,12,11,0,0,0,12,16,16,12,2,0,2 +0,5,15,16,6,0,0,0,0,11,16,16,11,0,0,0,0,6,10,11,14,0,0,0,0,0,0,7,15,0,0,0,0,0,0,11,11,0,0,0,0,0,3,16,6,0,0,0,0,0,13,16,15,12,11,0,0,6,16,16,16,13,3,0,2 +0,0,7,14,16,8,0,0,0,0,14,14,16,14,0,0,0,0,0,0,10,12,0,0,0,0,4,4,14,9,2,0,0,7,16,16,16,16,7,0,0,6,12,16,11,1,0,0,0,0,2,16,3,0,0,0,0,0,6,13,0,0,0,0,7 +0,0,6,14,9,5,2,0,0,7,15,6,2,12,8,0,0,5,15,2,8,15,1,0,0,1,12,14,16,4,0,0,0,0,1,16,11,0,0,0,0,0,4,10,16,3,0,0,0,0,9,2,13,8,0,0,0,0,5,14,11,3,0,0,8 +0,4,15,11,1,0,0,0,0,8,16,16,4,0,0,0,0,2,6,13,8,0,0,0,0,0,0,8,5,0,0,0,0,0,0,11,4,0,0,0,0,0,5,16,0,0,0,0,0,2,13,16,13,12,6,0,0,5,16,15,16,12,3,0,2 +0,0,9,13,8,0,0,0,0,0,13,16,16,12,0,0,0,2,16,7,6,15,3,0,0,8,14,0,0,8,3,0,0,5,14,0,0,8,8,0,0,2,16,13,11,14,4,0,0,3,16,15,16,6,0,0,0,0,6,14,8,0,0,0,0 +0,0,5,11,12,5,0,0,0,0,12,6,2,3,0,0,0,0,9,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,4,0,0,0,0,12,16,16,8,0,0,0,0,6,12,11,7,0,0,1 +0,2,16,10,0,0,0,0,0,7,16,16,3,0,0,0,0,3,10,12,8,0,0,0,0,0,0,7,10,0,0,0,0,0,0,10,12,0,0,0,0,0,8,15,15,12,5,0,0,2,16,16,16,16,15,2,0,2,15,14,12,12,7,0,2 +0,0,1,13,9,0,0,0,0,0,8,16,4,0,0,0,0,0,16,11,0,0,0,0,0,2,16,10,2,0,0,0,0,7,16,16,16,10,1,0,0,4,16,6,2,14,7,0,0,0,11,15,12,15,8,0,0,0,2,14,15,6,0,0,6 +0,0,5,13,16,10,1,0,0,7,16,16,16,16,7,0,0,0,5,2,11,14,5,0,0,0,0,10,15,6,0,0,0,0,9,16,13,2,0,0,0,0,4,11,15,14,0,0,0,0,2,2,13,16,1,0,0,0,5,14,15,9,0,0,3 +0,0,6,11,16,13,5,0,0,2,16,16,16,16,12,0,0,0,0,0,5,16,4,0,0,0,0,10,15,5,0,0,0,0,9,16,3,0,0,0,0,0,13,16,13,1,0,0,0,0,0,5,16,14,0,0,0,0,5,14,11,6,0,0,3 +0,0,2,14,16,8,0,0,0,0,4,12,16,11,0,0,0,0,0,0,16,12,0,0,0,0,0,3,16,9,0,0,0,2,5,10,16,12,2,0,0,16,16,16,16,14,3,0,0,4,4,14,12,0,0,0,0,0,2,16,7,0,0,0,7 +0,0,4,12,13,1,0,0,0,0,4,16,16,5,0,0,0,0,9,16,10,0,0,0,0,8,16,16,11,4,0,0,0,0,4,8,16,16,7,0,0,0,0,0,2,14,14,0,0,0,0,4,15,16,11,0,0,0,5,16,14,8,0,0,3 +0,1,9,16,16,12,1,0,0,0,7,8,10,16,9,0,0,0,0,0,9,16,4,0,0,0,5,13,13,3,0,0,0,0,13,16,8,0,0,0,0,0,0,11,16,8,0,0,0,0,3,5,14,15,0,0,0,0,10,16,11,4,0,0,3 +0,0,0,2,14,0,0,0,0,0,0,12,9,0,0,0,0,0,8,12,0,0,13,5,0,0,13,8,0,9,14,0,0,4,16,16,12,16,4,0,0,4,12,12,15,12,0,0,0,0,0,1,15,4,0,0,0,0,0,4,10,0,0,0,4 +0,0,4,16,6,0,0,0,0,0,7,16,5,0,0,0,0,0,12,12,1,0,0,0,0,0,16,8,0,0,0,0,0,4,16,11,2,0,0,0,0,5,16,16,16,13,1,0,0,2,16,14,15,16,5,0,0,0,5,15,14,7,0,0,6 +0,0,5,16,5,0,0,0,0,0,12,14,1,0,0,0,0,0,15,10,0,0,0,0,0,3,16,9,1,0,0,0,0,7,16,16,16,9,0,0,0,1,16,10,8,16,6,0,0,0,12,14,5,9,13,0,0,0,4,15,15,12,3,0,6 +0,0,3,14,1,0,0,0,0,0,12,12,0,0,0,0,0,3,16,6,0,0,0,0,0,5,16,2,0,0,0,0,0,6,16,2,5,2,0,0,0,4,16,2,12,15,2,0,0,1,14,13,2,13,11,0,0,0,3,11,16,13,4,0,6 +0,0,0,1,15,2,0,0,0,0,0,6,14,0,0,0,0,0,0,11,9,0,6,0,0,0,6,15,1,11,15,0,0,5,16,14,10,16,8,0,1,15,16,16,16,16,3,0,0,3,7,5,13,11,0,0,0,0,0,0,15,3,0,0,4 +0,0,10,16,9,1,0,0,0,7,16,9,14,11,0,0,0,8,14,1,7,14,2,0,0,2,14,14,14,15,3,0,0,0,2,4,4,16,4,0,0,0,3,0,0,13,9,0,0,2,15,8,8,14,8,0,0,0,8,15,13,10,0,0,9 +0,0,11,15,12,1,0,0,0,0,7,16,16,7,0,0,0,0,12,16,16,16,1,0,0,0,13,16,16,13,2,0,0,0,14,16,16,4,0,0,0,2,16,16,14,0,0,0,0,0,14,16,14,0,0,0,0,0,8,16,13,1,0,0,1 +0,0,7,12,16,9,0,0,0,4,16,6,7,3,0,0,0,4,16,2,8,3,0,0,0,7,16,15,13,16,3,0,0,5,11,1,1,16,8,0,0,0,0,0,7,16,0,0,0,0,0,10,16,6,0,0,0,0,10,11,4,0,0,0,5 +0,0,10,11,7,0,0,0,0,4,16,16,16,10,0,0,0,4,16,6,5,15,2,0,0,8,12,0,0,5,8,0,0,8,10,0,0,5,8,0,0,6,13,1,5,14,5,0,0,0,14,13,15,11,1,0,0,0,7,12,8,0,0,0,0 +0,0,8,16,11,0,0,0,0,2,15,8,16,7,0,0,0,3,13,1,14,13,0,0,0,0,10,16,16,16,3,0,0,0,0,2,5,15,4,0,0,0,0,0,0,12,6,0,0,0,5,6,5,15,4,0,0,0,6,15,16,12,1,0,9 +0,0,3,10,16,12,0,0,0,0,13,12,0,2,0,0,0,4,16,2,0,0,0,0,0,0,16,14,16,14,2,0,0,6,16,12,5,16,5,0,0,1,12,1,0,14,7,0,0,0,0,3,13,13,1,0,0,0,1,13,10,1,0,0,5 +0,4,15,15,8,0,0,0,0,8,16,16,16,3,0,0,0,1,0,1,15,5,0,0,0,0,0,0,11,6,0,0,0,0,0,3,15,2,0,0,0,0,4,15,16,14,6,0,0,6,16,16,15,11,3,0,0,7,14,11,0,0,0,0,2 +0,0,7,12,10,0,0,0,0,3,16,16,16,9,1,0,0,0,8,16,16,11,1,0,0,0,10,16,16,0,0,0,0,3,16,14,16,4,0,0,0,4,13,0,7,15,0,0,0,4,14,2,2,16,0,0,0,0,6,11,10,5,0,0,8 +0,2,11,16,13,2,0,0,0,11,15,12,16,7,0,0,0,7,6,0,14,8,0,0,0,0,0,1,16,6,0,0,0,0,0,10,10,0,0,0,0,0,7,16,4,3,3,0,0,3,15,16,15,15,11,0,0,2,13,12,9,0,0,0,2 +0,0,7,14,8,4,0,0,0,0,16,8,15,14,1,0,0,4,16,4,0,8,4,0,0,8,14,0,0,4,4,0,0,8,16,0,0,4,5,0,0,3,16,1,0,11,4,0,0,0,15,16,16,12,0,0,0,0,6,13,7,0,0,0,0 +0,0,9,15,6,0,0,0,0,2,16,16,16,7,0,0,0,4,14,5,11,13,0,0,0,7,12,0,0,12,4,0,0,8,11,0,0,7,5,0,0,4,13,1,1,10,6,0,0,2,16,15,15,14,1,0,0,0,8,15,11,4,0,0,0 +0,0,4,12,12,7,0,0,0,0,16,16,16,5,0,0,0,0,16,16,16,16,0,0,0,4,16,16,16,12,0,0,0,4,16,16,16,12,0,0,0,3,15,16,16,9,0,0,0,0,12,16,16,8,0,0,0,1,7,12,11,5,0,0,1 +0,0,6,15,14,1,0,0,0,0,13,16,16,2,0,0,0,0,3,8,16,2,0,0,0,2,6,12,16,7,2,0,0,13,16,16,16,16,9,0,0,10,13,16,7,1,0,0,0,0,6,14,0,0,0,0,0,0,10,8,0,0,0,0,7 +0,0,0,13,12,0,0,0,0,0,6,16,4,0,0,0,0,2,16,10,0,0,0,0,0,5,16,10,0,0,0,0,0,8,15,15,6,0,0,0,0,3,16,14,13,10,2,0,0,0,12,16,13,16,12,0,0,0,1,10,16,14,4,0,6 +0,0,7,16,16,15,8,0,0,0,12,15,15,16,11,0,0,0,0,3,14,15,2,0,0,0,1,14,12,1,0,0,0,0,1,16,15,5,0,0,0,0,0,3,12,15,0,0,0,0,0,4,12,14,1,0,0,0,10,15,10,4,0,0,3 +0,0,11,16,10,0,0,0,0,6,15,16,16,6,0,0,0,0,0,2,11,12,0,0,0,0,0,0,9,8,0,0,0,0,0,4,15,2,0,0,0,1,9,15,9,3,0,0,0,0,16,16,16,16,7,0,0,0,10,13,8,4,1,0,2 +0,0,4,10,12,7,0,0,0,0,8,16,16,15,0,0,0,0,9,16,16,12,0,0,0,0,7,16,16,9,0,0,0,0,2,14,16,11,1,0,0,0,0,16,16,16,0,0,0,0,2,16,16,12,0,0,0,0,1,9,10,0,0,0,1 +0,0,1,15,15,2,0,0,0,0,3,12,16,6,0,0,0,0,0,4,16,4,0,0,0,0,3,8,16,4,0,0,0,10,16,16,16,16,8,0,0,8,11,14,14,5,1,0,0,0,0,15,6,0,0,0,0,0,1,15,2,0,0,0,7 +0,0,0,0,13,8,0,0,0,0,0,5,16,3,0,0,0,0,0,14,10,2,9,0,0,1,11,13,0,10,15,0,0,12,15,5,7,14,10,0,1,15,16,16,16,16,4,0,0,4,4,3,10,14,0,0,0,0,0,0,15,7,0,0,4 +0,0,0,8,15,3,0,0,0,0,1,15,11,2,0,0,0,0,13,16,1,0,0,0,0,3,16,14,0,0,0,0,0,3,16,15,5,0,0,0,0,3,15,16,11,14,7,0,0,0,11,16,6,6,15,0,0,0,0,10,14,12,8,0,6 +0,0,10,15,15,11,4,0,0,1,10,5,7,16,10,0,0,0,0,1,14,14,0,0,0,0,0,11,13,0,0,0,0,0,0,5,16,5,0,0,0,0,0,1,10,14,0,0,0,0,0,2,7,15,3,0,0,0,6,11,16,8,0,0,3 +0,0,4,16,16,8,0,0,0,0,6,16,16,15,1,0,0,0,4,16,16,12,0,0,0,0,3,16,16,15,0,0,0,0,8,16,16,6,0,0,0,1,13,16,16,4,0,0,0,3,16,16,15,2,0,0,0,0,6,12,12,2,0,0,1 +0,0,3,13,16,5,0,0,0,6,15,9,15,7,0,0,0,0,0,6,16,10,0,0,0,0,7,14,16,3,0,0,0,0,9,16,16,14,3,0,0,0,0,3,2,15,10,0,0,0,4,5,12,16,14,0,0,0,5,13,14,8,2,0,3 +0,0,7,16,9,8,2,0,0,5,16,14,16,16,4,0,0,8,14,0,6,16,4,0,0,1,16,16,15,16,6,0,0,0,0,4,4,13,8,0,0,0,0,0,0,13,8,0,0,0,12,9,11,16,7,0,0,0,7,15,14,7,0,0,9 +0,0,1,12,10,3,0,0,0,0,7,16,16,7,0,0,0,0,12,16,16,3,0,0,0,0,14,16,16,2,0,0,0,1,15,16,16,5,0,0,0,0,15,16,15,2,0,0,0,0,11,16,16,8,0,0,0,0,1,7,12,10,0,0,1 +0,0,1,10,16,16,1,0,0,0,9,16,13,16,1,0,0,0,0,0,5,11,0,0,0,0,0,3,12,12,5,0,0,0,7,16,16,10,4,0,0,0,3,11,13,0,0,0,0,0,0,11,5,0,0,0,0,0,0,15,0,0,0,0,7 +0,0,2,13,13,1,0,0,0,0,12,16,9,0,0,0,0,0,16,11,0,0,0,0,0,4,16,8,0,0,0,0,0,6,16,9,3,0,0,0,0,3,16,14,12,13,4,0,0,0,14,10,0,10,15,0,0,0,2,12,16,13,7,0,6 +0,0,5,12,13,4,0,0,0,3,16,10,2,5,9,0,0,0,15,14,11,15,3,0,0,0,7,16,15,1,0,0,0,0,3,16,16,4,0,0,0,0,8,14,13,12,0,0,0,0,12,12,13,11,0,0,0,0,7,16,11,2,0,0,8 +0,0,0,3,13,7,0,0,0,0,1,14,11,0,0,0,0,0,12,12,1,2,3,0,0,7,16,4,1,15,10,0,0,10,14,0,7,16,8,0,0,15,16,16,16,16,1,0,0,4,11,11,15,11,0,0,0,0,0,2,15,4,0,0,4 +0,1,7,13,16,13,0,0,0,7,16,16,16,14,0,0,0,1,7,16,10,1,0,0,0,1,16,15,0,0,0,0,0,0,8,15,14,3,0,0,0,0,0,1,14,15,3,0,0,0,1,5,13,16,7,0,0,0,8,15,10,6,0,0,3 +0,0,8,12,11,6,0,0,0,0,8,16,16,13,2,0,0,2,14,16,16,14,2,0,0,2,13,16,16,8,0,0,0,4,16,16,16,8,0,0,0,4,16,16,16,10,0,0,0,1,11,16,16,8,0,0,0,0,4,11,12,7,0,0,1 +0,0,0,2,16,1,0,0,0,0,0,9,12,0,0,0,0,0,4,15,9,0,0,0,0,4,16,12,0,9,12,0,0,9,16,16,16,16,10,0,0,1,6,10,14,16,4,0,0,0,0,0,14,14,0,0,0,0,0,3,16,7,0,0,4 +0,0,5,16,10,0,0,0,0,0,8,16,16,5,0,0,0,0,14,14,1,12,0,0,0,0,15,10,0,7,4,0,0,2,16,7,0,2,9,0,0,2,16,8,0,6,11,0,0,1,12,14,14,16,5,0,0,0,4,15,16,8,1,0,0 +0,0,9,12,14,6,0,0,0,0,16,6,0,0,0,0,0,2,15,0,0,0,0,0,0,8,15,12,16,9,1,0,0,1,8,6,2,12,7,0,0,0,0,0,0,11,7,0,0,0,0,0,8,15,2,0,0,0,12,14,9,2,0,0,5 +0,2,10,12,16,8,0,0,0,4,8,5,13,16,0,0,0,0,0,7,15,7,0,0,0,0,6,16,10,0,0,0,0,0,0,5,16,11,0,0,0,0,0,0,6,16,3,0,0,0,0,0,10,16,5,0,0,2,14,16,12,9,0,0,3 +0,0,0,6,12,6,0,0,0,0,1,15,14,1,0,0,0,0,10,16,8,0,0,0,0,1,13,16,0,0,0,0,0,3,16,16,11,4,0,0,0,0,16,16,16,16,7,0,0,0,9,16,16,16,12,0,0,0,0,6,11,12,5,0,6 +0,0,5,14,11,8,0,0,0,4,15,2,16,16,0,0,0,8,12,0,12,16,0,0,0,2,15,16,16,15,4,0,0,0,0,4,10,16,2,0,0,0,0,0,4,16,2,0,0,3,13,8,14,16,0,0,0,0,7,15,12,5,0,0,9 +0,0,2,13,10,0,0,0,0,0,10,13,0,0,0,0,0,0,16,6,0,0,0,0,0,3,16,8,2,0,0,0,0,7,16,16,16,11,0,0,0,4,16,2,4,11,9,0,0,1,13,11,8,12,12,0,0,0,1,12,16,14,4,0,6 +0,0,6,12,12,6,0,0,0,0,11,16,16,13,0,0,0,0,12,16,16,8,0,0,0,0,8,16,16,12,0,0,0,2,13,16,16,12,0,0,0,1,16,16,16,10,0,0,0,0,16,16,16,8,0,0,0,0,2,11,10,4,0,0,1 +0,0,6,16,16,3,0,0,0,0,8,16,16,12,0,0,0,0,0,4,15,11,0,0,0,0,6,16,16,16,13,0,0,0,11,16,16,5,1,0,0,0,0,14,7,0,0,0,0,0,4,16,1,0,0,0,0,0,11,11,0,0,0,0,7 +0,0,12,16,16,7,0,0,0,3,16,10,2,2,0,0,0,4,16,5,0,0,0,0,0,3,16,12,12,9,1,0,0,1,15,16,12,15,9,0,0,0,0,0,3,14,11,0,0,0,3,9,16,16,7,0,0,0,10,12,12,4,0,0,5 +0,0,0,2,14,0,0,0,0,0,0,4,15,0,0,0,0,0,0,11,10,5,7,0,0,0,11,15,2,13,7,0,0,10,16,8,8,16,6,0,0,8,12,12,13,15,1,0,0,0,0,0,10,10,0,0,0,0,0,1,13,3,0,0,4 +0,0,0,1,13,8,0,0,0,0,0,9,15,3,0,0,0,0,2,16,9,2,1,0,0,2,14,13,1,16,6,0,0,11,16,6,8,16,3,0,1,16,16,16,16,13,0,0,0,7,12,13,16,10,0,0,0,0,0,0,15,7,0,0,4 +0,0,3,10,16,16,4,0,0,0,0,0,1,14,7,0,0,0,0,0,2,15,4,0,0,0,4,4,12,15,5,0,0,1,15,16,16,9,4,0,0,0,2,11,13,0,0,0,0,0,1,16,5,0,0,0,0,0,3,12,0,0,0,0,7 +0,3,14,15,6,0,0,0,0,7,15,14,15,0,0,0,0,2,7,2,14,3,0,0,0,0,0,1,14,4,0,0,0,0,0,7,15,2,0,0,0,0,5,15,14,4,1,0,0,4,15,16,16,16,6,0,0,4,15,13,12,11,1,0,2 +0,0,10,16,14,5,0,0,0,2,16,16,8,0,0,0,0,0,9,16,16,5,0,0,0,0,7,16,16,3,0,0,0,0,14,14,13,11,0,0,0,5,16,1,6,15,0,0,0,7,14,9,13,15,1,0,0,1,11,16,15,6,0,0,8 +0,1,10,13,2,0,0,0,0,10,16,16,12,0,0,0,0,9,9,8,16,0,0,0,0,0,0,6,16,2,0,0,0,0,1,11,15,0,0,0,0,0,4,16,13,2,0,0,0,0,14,16,16,16,13,0,0,0,9,13,11,10,9,0,2 +0,0,15,13,1,0,0,0,0,0,14,14,4,0,0,0,0,0,1,4,12,0,0,0,0,0,0,6,12,0,0,0,0,0,0,11,10,0,0,0,0,0,1,16,8,6,5,0,0,0,13,16,16,16,14,0,0,0,10,13,10,6,2,0,2 +0,0,1,10,14,13,1,0,0,0,8,12,6,4,0,0,0,0,14,4,0,0,0,0,0,5,16,12,13,12,0,0,0,2,11,11,8,14,4,0,0,0,0,0,0,16,4,0,0,0,0,0,6,15,2,0,0,0,0,12,14,4,0,0,5 +0,1,10,16,15,2,0,0,0,1,12,13,16,4,0,0,0,0,0,0,16,8,0,0,0,0,7,11,16,13,8,0,0,8,16,16,16,16,6,0,0,2,10,16,9,0,0,0,0,0,3,16,4,0,0,0,0,0,10,15,2,0,0,0,7 +0,0,4,14,15,6,0,0,0,5,16,16,16,16,0,0,0,5,16,16,16,16,3,0,0,0,2,8,13,16,5,0,0,0,0,0,8,16,6,0,0,0,0,0,4,16,8,0,0,0,1,6,13,16,6,0,0,0,4,13,15,9,0,0,9 +0,0,10,16,15,1,0,0,0,0,16,12,5,0,0,0,0,2,16,9,4,0,0,0,0,4,16,16,16,14,2,0,0,1,10,4,1,16,4,0,0,0,0,0,2,16,7,0,0,0,7,8,14,16,3,0,0,0,6,13,10,4,0,0,5 +0,0,0,1,12,6,0,0,0,0,0,11,15,2,0,0,0,0,8,16,6,1,2,0,0,4,16,9,1,15,9,0,0,13,15,6,10,16,6,0,0,12,16,16,16,16,1,0,0,1,7,4,14,13,0,0,0,0,0,0,14,9,0,0,4 +0,0,8,16,3,0,1,0,0,0,16,14,5,14,12,0,0,0,8,16,16,9,0,0,0,0,3,16,14,1,0,0,0,0,12,16,16,2,0,0,0,0,16,11,16,4,0,0,0,3,16,16,16,6,0,0,0,0,10,16,10,1,0,0,8 +0,0,5,12,8,0,1,0,0,0,11,16,5,13,6,0,0,0,2,15,16,12,1,0,0,0,0,10,16,6,0,0,0,0,1,15,16,7,0,0,0,0,8,16,16,11,0,0,0,0,11,16,16,9,0,0,0,0,6,12,12,3,0,0,8 +0,0,0,3,15,4,0,0,0,0,4,16,12,0,0,0,0,0,12,15,3,4,3,0,0,7,16,5,3,15,8,0,0,13,16,13,15,16,2,0,0,12,16,16,16,13,0,0,0,0,4,5,16,8,0,0,0,0,0,1,16,4,0,0,4 +0,0,4,10,13,6,0,0,0,1,16,14,12,16,3,0,0,4,16,6,3,16,4,0,0,0,12,16,16,16,5,0,0,0,0,4,4,16,8,0,0,0,0,0,0,15,5,0,0,0,5,7,7,16,4,0,0,0,2,14,15,9,0,0,9 +0,0,6,16,13,11,1,0,0,0,16,15,12,16,1,0,0,3,16,7,0,13,6,0,0,4,16,0,0,10,8,0,0,8,16,0,0,14,6,0,0,5,16,7,9,16,5,0,0,1,15,16,16,16,1,0,0,0,6,16,14,6,0,0,0 +0,0,1,11,15,1,0,0,0,0,13,16,8,2,1,0,0,0,16,15,10,16,5,0,0,0,8,16,16,7,0,0,0,0,9,16,16,4,0,0,0,0,16,14,16,15,0,0,0,0,15,15,15,16,0,0,0,0,2,9,13,6,0,0,8 +0,0,2,10,7,0,0,0,0,0,14,16,16,15,1,0,0,4,16,7,3,16,7,0,0,5,16,10,7,16,4,0,0,0,5,14,14,16,4,0,0,0,0,0,0,16,2,0,0,0,4,7,7,16,2,0,0,0,5,12,16,12,0,0,9 +0,0,10,14,8,1,0,0,0,2,16,14,6,1,0,0,0,0,15,15,8,15,0,0,0,0,5,16,16,10,0,0,0,0,12,15,15,12,0,0,0,4,16,6,4,16,6,0,0,8,16,10,8,16,8,0,0,1,8,12,14,12,1,0,8 diff --git a/reagent/ope/test/data/optdigits.names b/reagent/ope/test/data/optdigits.names new file mode 100644 index 000000000..bc7d49268 --- /dev/null +++ b/reagent/ope/test/data/optdigits.names @@ -0,0 +1,93 @@ + +1. Title of Database: Optical Recognition of Handwritten Digits + +2. Source: + E. Alpaydin, C. Kaynak + Department of Computer Engineering + Bogazici University, 80815 Istanbul Turkey + alpaydin@boun.edu.tr + July 1998 + +3. Past Usage: + C. Kaynak (1995) Methods of Combining Multiple Classifiers and Their + Applications to Handwritten Digit Recognition, + MSc Thesis, Institute of Graduate Studies in Science and + Engineering, Bogazici University. + + E. Alpaydin, C. Kaynak (1998) Cascading Classifiers, Kybernetika, + to appear. ftp://ftp.icsi.berkeley.edu/pub/ai/ethem/kyb.ps.Z + +4. Relevant Information: + We used preprocessing programs made available by NIST to extract + normalized bitmaps of handwritten digits from a preprinted form. From + a total of 43 people, 30 contributed to the training set and different + 13 to the test set. 32x32 bitmaps are divided into nonoverlapping + blocks of 4x4 and the number of on pixels are counted in each block. + This generates an input matrix of 8x8 where each element is an + integer in the range 0..16. This reduces dimensionality and gives + invariance to small distortions. + + For info on NIST preprocessing routines, see + M. D. Garris, J. L. Blue, G. T. Candela, D. L. Dimmick, J. Geist, + P. J. Grother, S. A. Janet, and C. L. Wilson, NIST Form-Based + Handprint Recognition System, NISTIR 5469, 1994. + +5. Number of Instances + optdigits.tra Training 3823 + optdigits.tes Testing 1797 + + The way we used the dataset was to use half of training for + actual training, one-fourth for validation and one-fourth + for writer-dependent testing. The test set was used for + writer-independent testing and is the actual quality measure. + +6. Number of Attributes + 64 input+1 class attribute + +7. For Each Attribute: + All input attributes are integers in the range 0..16. + The last attribute is the class code 0..9 + +8. Missing Attribute Values + None + +9. Class Distribution + Class: No of examples in training set + 0: 376 + 1: 389 + 2: 380 + 3: 389 + 4: 387 + 5: 376 + 6: 377 + 7: 387 + 8: 380 + 9: 382 + + Class: No of examples in testing set + 0: 178 + 1: 182 + 2: 177 + 3: 183 + 4: 181 + 5: 182 + 6: 181 + 7: 179 + 8: 174 + 9: 180 + +Accuracy on the testing set with k-nn +using Euclidean distance as the metric + + k = 1 : 98.00 + k = 2 : 97.38 + k = 3 : 97.83 + k = 4 : 97.61 + k = 5 : 97.89 + k = 6 : 97.77 + k = 7 : 97.66 + k = 8 : 97.66 + k = 9 : 97.72 + k = 10 : 97.55 + k = 11 : 97.89 + diff --git a/reagent/ope/test/data/satimage.data b/reagent/ope/test/data/satimage.data new file mode 100644 index 000000000..0f09072bb --- /dev/null +++ b/reagent/ope/test/data/satimage.data @@ -0,0 +1,6435 @@ +80 102 102 79 76 102 102 79 76 102 106 83 76 99 108 85 76 103 118 88 80 107 118 88 79 107 109 87 79 107 109 87 79 107 113 87 3 +76 102 102 79 76 102 106 83 76 102 106 87 76 103 118 88 80 107 118 88 80 112 118 88 79 107 109 87 79 107 113 87 79 103 104 83 3 +80 98 106 79 76 94 102 76 76 94 102 76 80 107 113 85 80 95 100 78 80 95 100 78 79 103 104 79 79 95 100 79 79 95 96 75 4 +76 94 102 76 76 94 102 76 76 94 102 76 80 95 100 78 80 95 100 78 80 91 100 78 79 95 100 79 79 95 96 75 79 95 100 75 4 +76 94 102 76 76 94 102 76 76 89 94 76 80 95 100 78 80 91 100 78 80 91 100 74 79 95 96 75 79 95 100 75 75 95 100 79 4 +76 94 102 76 76 89 94 76 76 89 98 76 80 91 100 78 80 91 100 74 80 95 104 74 79 95 100 75 75 95 100 79 75 91 96 75 4 +76 89 94 76 76 89 98 76 76 94 98 76 80 91 100 74 80 95 104 74 76 91 104 74 75 95 100 79 75 91 96 75 75 91 96 71 4 +76 94 90 76 76 89 94 76 72 94 90 72 76 91 100 74 76 87 100 74 76 87 91 74 79 87 93 67 75 87 96 71 75 91 96 71 4 +76 89 94 76 72 94 90 72 72 89 94 76 76 87 100 74 76 87 91 74 76 87 91 67 75 87 96 71 75 91 96 71 75 87 93 67 4 +72 89 98 76 76 94 98 76 72 85 90 72 71 87 87 70 71 83 87 67 68 83 87 67 71 87 89 67 71 79 81 62 71 79 85 62 4 +72 85 90 72 68 85 94 72 68 89 90 68 68 83 87 67 68 83 87 67 68 79 87 63 71 79 85 62 67 75 85 62 71 75 85 62 4 +68 85 94 72 68 89 90 68 68 85 90 72 68 83 87 67 68 79 87 63 68 79 87 67 67 75 85 62 71 75 85 62 67 79 81 62 4 +68 89 90 68 68 85 90 72 68 85 86 68 68 79 87 63 68 79 87 67 71 83 87 67 71 75 85 62 67 79 81 62 71 79 85 62 4 +68 85 90 72 68 85 86 68 68 89 86 72 68 79 87 67 71 83 87 67 68 83 87 67 67 79 81 62 71 79 85 62 71 75 81 67 4 +80 98 106 83 80 94 102 83 80 102 111 87 76 95 104 81 84 103 104 85 84 103 108 85 75 83 96 83 79 99 104 83 84 99 113 87 3 +80 102 111 87 84 106 115 91 88 106 115 91 84 103 108 85 88 107 118 88 88 107 118 92 84 99 113 87 84 99 109 87 84 103 109 83 3 +84 106 115 91 88 106 115 91 88 106 115 87 88 107 118 88 88 107 118 92 88 107 118 92 84 99 109 87 84 103 109 83 88 107 113 87 3 +88 106 115 91 88 106 115 87 88 111 111 91 88 107 118 92 88 107 118 92 88 112 113 88 84 103 109 83 88 107 113 87 88 107 104 87 3 +88 111 111 91 88 106 115 87 84 98 111 83 88 112 113 88 88 103 113 88 88 103 108 85 88 107 104 87 88 107 109 83 84 99 109 83 3 +88 106 115 87 84 98 111 83 80 89 115 87 88 103 113 88 88 103 108 85 84 99 108 85 88 107 109 83 84 99 109 83 88 103 109 87 3 +92 115 111 91 92 115 115 94 92 111 120 91 88 103 113 88 88 112 118 92 88 112 122 88 84 103 113 87 88 111 113 92 93 107 109 92 3 +84 106 111 87 84 106 111 87 84 106 111 87 92 112 128 92 92 112 118 96 92 112 113 88 93 111 113 92 93 116 118 92 88 111 118 92 3 +84 106 111 87 84 106 111 87 84 98 111 87 92 112 118 96 92 112 113 88 88 103 113 85 93 116 118 92 88 111 118 92 93 107 113 87 3 +84 98 111 87 84 98 106 91 84 102 111 87 88 103 113 85 97 107 113 88 92 112 118 92 93 107 113 87 93 107 113 87 93 107 109 87 3 +88 106 106 87 84 106 111 83 88 98 106 83 88 103 108 85 88 103 113 92 88 107 113 88 93 103 109 87 88 107 109 87 88 111 113 92 3 +88 111 111 87 88 111 106 87 88 106 111 87 84 103 108 85 88 95 104 81 84 99 108 85 88 95 100 79 88 95 100 83 88 103 100 83 3 +84 106 106 87 84 102 111 83 84 98 98 83 84 99 104 85 84 99 104 81 84 99 100 81 88 99 100 79 84 99 104 79 79 95 100 79 3 +68 77 94 79 60 62 78 76 64 73 90 76 60 54 87 74 56 61 87 78 71 79 100 81 75 79 96 79 75 83 96 79 84 99 104 83 5 +84 98 102 79 80 94 102 76 76 94 94 72 80 91 100 78 76 83 91 74 71 79 87 70 75 87 93 71 75 83 85 71 71 75 85 67 7 +76 94 94 72 72 81 82 68 68 73 78 65 71 79 87 70 71 79 79 67 71 79 83 67 71 75 85 67 71 79 77 67 71 75 81 67 7 +64 69 78 65 68 77 86 65 64 66 86 68 71 79 79 63 68 75 79 67 60 68 79 67 67 72 81 67 67 64 81 67 59 61 77 71 7 +53 49 71 65 57 49 74 65 53 49 74 68 53 54 71 63 56 54 71 63 56 51 67 63 55 51 74 67 55 48 70 62 51 48 70 67 5 +101 132 139 103 101 126 133 103 92 112 118 85 102 137 139 108 102 126 134 104 88 121 128 100 90 109 112 89 90 113 117 92 90 113 122 96 3 +76 99 104 81 76 99 108 85 76 103 118 88 84 103 104 79 79 107 109 87 79 107 109 87 82 100 108 81 82 100 104 78 78 100 104 81 3 +80 107 113 85 80 95 100 78 80 95 100 78 79 103 104 79 79 95 100 79 79 95 96 75 82 100 108 85 78 96 96 78 78 91 92 70 4 +80 95 100 78 80 95 100 78 80 91 100 78 79 95 100 79 79 95 96 75 79 95 100 75 78 96 96 78 78 91 92 70 74 91 92 70 4 +80 95 100 78 80 91 100 78 80 91 100 74 79 95 96 75 79 95 100 75 75 95 100 79 78 91 92 70 74 91 92 70 78 91 96 74 4 +80 91 100 74 80 95 104 74 76 91 104 74 75 95 100 79 75 91 96 75 75 91 96 71 78 91 96 74 74 87 92 70 74 87 88 70 4 +76 87 100 74 76 87 91 74 76 87 91 67 75 87 96 71 75 91 96 71 75 87 93 67 74 87 92 70 78 87 88 66 78 87 92 66 4 +76 87 91 67 71 87 87 70 71 83 87 67 75 87 93 67 71 87 89 67 71 79 81 62 78 87 92 66 74 83 92 66 70 83 92 66 4 +71 87 87 70 71 83 87 67 68 83 87 67 71 87 89 67 71 79 81 62 71 79 85 62 74 83 92 66 70 83 92 66 70 83 88 70 4 +68 83 87 67 71 83 87 70 76 91 91 74 71 75 81 62 67 75 85 71 67 75 96 79 59 60 96 81 56 49 104 100 49 40 112 114 4 +84 103 108 85 88 107 118 88 88 107 118 92 84 99 113 87 84 99 109 87 84 103 109 83 63 67 104 85 82 96 104 78 86 100 108 85 3 +88 107 118 88 88 107 118 92 88 107 118 92 84 99 109 87 84 103 109 83 88 107 113 87 82 96 104 78 86 100 108 85 90 104 112 85 3 +88 107 118 92 88 112 113 88 88 103 113 88 88 107 113 87 88 107 104 87 88 107 109 83 90 104 112 85 86 104 108 85 86 104 108 85 3 +88 103 113 88 88 103 108 85 84 99 108 85 88 107 109 83 84 99 109 83 88 103 109 87 86 104 108 85 86 104 108 85 86 100 108 85 3 +88 103 108 85 84 99 108 85 88 99 104 85 84 99 109 83 88 103 109 87 88 103 109 87 86 104 108 85 86 100 108 85 90 104 112 89 3 +92 112 118 92 92 107 113 92 92 107 118 88 88 107 109 92 88 107 109 87 88 107 109 87 90 104 112 89 86 104 108 89 90 104 108 92 3 +88 107 113 88 88 103 108 81 88 103 108 88 88 111 113 92 88 107 113 87 88 107 113 87 86 104 108 85 90 109 112 92 86 109 108 89 3 +88 103 108 88 84 99 104 85 84 103 108 81 88 107 113 87 88 107 109 83 84 99 104 87 86 109 108 89 86 109 112 89 90 109 112 92 3 +84 103 108 85 88 95 104 81 84 99 108 85 88 95 100 79 88 95 100 83 88 103 100 83 86 104 104 85 82 100 100 85 82 100 104 78 3 +88 99 104 85 84 99 104 85 84 99 104 81 84 103 104 83 88 99 100 79 84 99 104 79 82 96 100 81 82 100 108 81 82 96 104 78 3 +84 99 104 81 84 99 100 81 80 91 96 78 84 99 104 79 79 95 100 79 79 99 100 83 82 96 104 78 82 96 100 81 86 96 104 81 3 +71 75 87 78 60 54 87 74 56 61 87 78 79 91 104 79 75 79 96 79 75 83 96 79 82 100 104 78 82 96 104 81 82 96 104 85 5 +60 54 87 74 56 61 87 78 71 79 100 81 75 79 96 79 75 83 96 79 84 99 104 83 82 96 104 81 82 96 104 85 82 100 104 85 5 +56 61 87 78 71 79 100 81 80 95 100 85 75 83 96 79 84 99 104 83 84 99 104 83 82 96 104 85 82 100 104 85 86 100 108 85 3 +80 95 100 85 80 91 100 81 80 91 100 78 84 99 104 83 79 95 100 75 75 87 93 71 86 100 108 85 86 100 112 85 86 100 112 85 7 +76 83 91 74 71 79 87 70 71 79 79 67 75 83 85 71 71 75 85 67 71 79 77 67 82 96 100 81 78 83 84 70 74 75 88 66 7 +71 79 79 67 71 79 83 67 71 79 79 63 71 79 77 67 71 75 81 67 67 72 81 67 74 75 88 66 70 79 88 66 70 75 76 66 7 +88 121 128 100 84 107 113 87 84 99 104 79 90 113 122 96 95 128 127 103 95 123 127 100 87 103 114 90 92 122 135 109 96 127 130 105 3 +79 107 109 87 79 107 113 87 79 103 104 83 78 100 104 81 82 104 104 85 82 104 108 85 79 99 105 83 83 103 114 86 79 99 105 83 3 +79 107 113 87 79 103 104 83 79 103 104 79 82 104 104 85 82 104 108 85 82 100 108 85 83 103 114 86 79 99 105 83 79 95 101 79 3 +79 95 96 75 79 95 100 75 75 95 100 79 78 91 92 70 74 91 92 70 78 91 96 74 83 91 97 72 83 91 97 72 79 91 93 72 4 +75 91 96 71 79 87 93 71 79 87 93 67 74 87 88 70 78 87 84 70 74 87 88 66 79 88 93 68 79 91 93 72 75 91 93 68 4 +79 87 93 67 75 87 96 71 75 91 96 71 74 87 88 66 74 87 92 70 78 87 88 66 75 91 93 68 79 88 93 68 75 84 90 68 4 +75 91 96 71 75 87 93 67 71 87 89 67 78 87 88 66 78 87 92 66 74 83 92 66 75 84 90 68 75 84 93 72 75 88 90 68 4 +71 79 81 62 71 79 85 62 67 75 85 62 70 83 92 66 70 83 88 70 70 83 84 66 75 91 97 75 75 88 93 72 67 81 86 64 4 +71 79 85 62 67 75 85 62 71 75 85 62 70 83 88 70 70 83 84 66 66 79 84 63 75 88 93 72 67 81 86 64 63 77 86 72 4 +67 75 85 62 71 75 85 62 67 79 81 62 70 83 84 66 66 79 84 63 66 79 88 66 67 81 86 64 63 77 86 72 63 73 97 83 4 +71 75 85 62 67 79 81 62 71 79 85 62 66 79 84 63 66 79 88 66 70 79 88 66 63 77 86 72 63 73 97 83 59 60 110 98 4 +67 79 81 62 71 79 85 62 71 75 81 67 66 79 88 66 70 79 88 66 66 71 88 70 63 73 97 83 59 60 110 98 49 45 119 116 4 +67 75 96 79 75 83 96 83 79 99 104 83 49 40 112 114 46 34 122 125 49 40 117 114 46 34 119 131 42 34 119 131 46 34 119 131 2 +88 107 104 87 88 107 109 83 84 99 109 83 86 104 108 85 86 104 108 85 86 104 108 85 87 95 105 83 83 99 110 83 87 99 105 86 3 +88 103 109 87 93 103 109 87 88 107 109 87 86 104 112 85 86 104 104 81 86 96 104 81 92 103 110 83 92 103 110 86 87 99 105 83 3 +88 103 100 83 88 103 109 83 88 103 113 83 82 100 104 78 86 100 96 81 82 100 104 81 83 99 101 79 79 95 101 79 79 95 105 79 3 +84 103 104 83 88 99 100 79 84 99 104 79 82 96 100 81 82 100 108 81 82 96 104 78 87 95 97 83 83 99 101 79 83 99 105 79 3 +84 99 104 79 79 95 100 79 79 99 100 83 82 96 104 78 82 96 100 81 86 96 104 81 83 99 105 79 83 95 101 79 79 99 97 79 3 +79 91 104 79 75 79 96 79 75 83 96 79 82 100 104 78 82 96 104 81 82 96 104 85 83 95 105 83 83 95 101 79 83 99 105 83 3 +75 83 96 79 84 99 104 83 84 99 104 83 82 96 104 85 82 100 104 85 86 100 108 85 83 99 105 83 87 99 105 83 83 103 105 86 3 +71 75 85 67 71 79 77 67 71 75 81 67 78 83 84 70 74 75 88 66 70 79 88 66 79 88 97 72 71 81 86 68 71 77 82 64 7 +71 79 77 67 71 75 81 67 67 72 81 67 74 75 88 66 70 79 88 66 70 75 76 66 71 81 86 68 71 77 82 64 71 81 82 68 7 +95 128 127 103 95 123 127 100 82 100 108 85 92 122 135 109 96 127 130 105 92 108 114 86 93 125 135 104 93 130 129 101 89 120 129 97 3 +78 100 104 81 82 104 104 85 82 104 108 85 79 99 105 83 83 103 114 86 79 99 105 83 78 102 110 83 82 102 105 83 82 102 101 80 3 +82 104 104 85 82 104 108 85 82 100 108 85 83 103 114 86 79 99 105 83 79 95 101 79 82 102 105 83 82 102 101 80 78 102 105 80 3 +82 104 108 85 82 100 108 85 78 96 96 78 79 99 105 83 79 95 101 79 83 95 93 75 82 102 101 80 78 102 105 80 78 97 101 80 4 +82 100 108 85 78 96 96 78 78 91 92 70 79 95 101 79 83 95 93 75 83 91 97 72 78 102 105 80 78 97 101 80 82 92 93 76 4 +74 87 88 70 78 87 84 70 74 87 88 66 79 88 93 68 79 91 93 72 75 91 93 68 82 88 97 73 78 92 97 73 78 88 93 73 4 +74 83 92 66 70 83 92 66 70 83 88 70 75 88 90 68 75 91 97 75 75 88 93 72 74 84 89 69 74 88 93 76 67 75 93 80 4 +70 83 88 70 70 83 84 66 66 79 84 63 75 88 93 72 67 81 86 64 63 77 86 72 67 75 93 80 57 63 97 90 53 49 110 108 4 +66 79 84 63 66 79 88 66 70 79 88 66 63 77 86 72 63 73 97 83 59 60 110 98 53 49 110 108 47 40 119 122 42 37 119 129 2 +66 79 88 66 70 79 88 66 66 71 88 70 63 73 97 83 59 60 110 98 49 45 119 116 47 40 119 122 42 37 119 129 44 34 124 136 2 +56 49 104 100 49 40 112 114 46 34 122 125 46 32 119 131 46 34 119 131 42 34 119 131 42 31 124 133 44 34 119 133 44 37 119 136 2 +82 96 104 78 86 100 108 85 90 104 112 85 71 77 97 75 83 99 105 83 87 103 105 86 53 56 105 97 74 92 101 76 82 102 110 83 3 +86 100 108 85 90 104 112 89 90 104 112 85 87 103 105 86 87 108 114 86 92 108 114 90 93 106 114 90 93 115 114 90 93 115 114 90 3 +90 104 112 85 90 109 112 85 90 109 117 89 92 108 114 90 96 108 114 90 96 112 114 90 93 115 114 90 93 111 119 90 89 111 114 87 3 +90 109 117 89 90 109 112 89 90 109 112 89 96 112 114 90 92 108 110 90 87 108 110 90 89 111 114 87 89 106 114 87 89 106 110 87 3 +90 104 112 85 90 104 112 89 86 104 108 89 92 108 114 86 92 108 110 86 92 108 110 86 93 106 114 87 89 111 110 87 85 106 110 87 3 +90 109 108 89 86 104 112 85 86 104 104 81 87 103 105 83 92 103 110 83 92 103 110 86 89 106 114 90 93 106 105 90 89 111 110 83 3 +86 104 112 85 86 104 104 81 86 96 104 81 92 103 110 83 92 103 110 86 87 99 105 83 93 106 105 90 89 111 110 83 89 111 114 87 3 +90 109 112 92 86 109 108 89 86 109 112 89 92 108 110 90 92 108 110 90 87 108 110 86 89 106 110 87 89 106 114 90 89 102 114 90 3 +86 104 108 89 86 104 104 85 82 100 100 85 87 103 110 86 83 103 105 86 83 103 110 83 89 106 114 90 85 102 110 87 85 106 114 87 3 +82 100 100 85 82 100 104 78 86 100 96 81 83 103 110 83 83 99 101 79 79 95 101 79 85 106 114 87 89 97 105 83 85 102 105 87 3 +82 100 104 78 86 100 96 81 82 100 104 81 83 99 101 79 79 95 101 79 79 95 105 79 89 97 105 83 85 102 105 87 85 102 101 80 3 +82 100 104 81 82 100 104 81 86 100 104 81 79 95 105 79 83 99 105 83 87 99 105 83 85 102 101 80 85 97 101 83 85 102 110 83 3 +82 96 104 78 82 96 100 81 86 96 104 81 83 99 105 79 83 95 101 79 79 99 97 79 89 106 105 87 85 102 110 83 85 102 105 83 3 +86 96 104 81 82 96 100 81 82 100 104 78 79 99 97 79 79 99 105 83 83 95 105 83 85 102 105 83 85 102 101 83 82 102 105 83 3 +82 96 104 85 82 100 104 85 86 100 108 85 83 99 105 83 87 99 105 83 83 103 105 86 89 106 114 87 89 106 114 83 82 102 105 83 3 +86 100 108 85 86 100 112 85 86 100 112 85 83 103 105 86 83 103 105 79 83 103 105 83 82 102 105 83 78 102 105 83 82 106 105 87 3 +70 75 76 66 66 71 80 66 66 63 76 66 71 81 82 68 71 77 86 68 67 73 75 60 70 88 89 69 74 84 85 69 74 79 85 69 7 +79 99 105 83 83 103 114 86 79 99 105 83 78 102 110 83 82 102 105 83 82 102 101 80 80 98 102 79 80 98 102 79 80 98 98 79 3 +83 95 93 75 83 91 97 72 83 91 97 72 78 97 101 80 82 92 93 76 78 92 93 73 84 94 98 76 80 94 94 72 80 89 94 72 4 +79 88 93 68 79 91 93 72 75 91 93 68 82 88 97 73 78 92 97 73 78 88 93 73 80 94 94 72 80 89 90 68 80 89 90 72 4 +67 81 86 64 63 77 86 72 63 73 97 83 57 63 97 90 53 49 110 108 47 40 119 122 47 34 125 135 47 34 131 135 47 34 125 135 2 +63 73 97 83 59 60 110 98 49 45 119 116 47 40 119 122 42 37 119 129 44 34 124 136 47 34 125 135 44 34 131 131 44 34 120 135 2 +46 34 119 131 42 34 119 131 46 34 119 131 44 34 119 133 44 37 119 136 44 34 124 136 44 31 125 135 47 31 131 139 41 31 131 135 2 +46 34 119 131 52 48 110 105 71 77 97 75 44 34 124 136 44 34 119 133 53 56 105 97 41 31 131 135 41 31 131 139 44 40 120 120 2 +83 99 105 83 87 103 105 86 87 95 105 83 74 92 101 76 82 102 110 83 85 102 110 83 64 73 106 83 84 102 106 83 88 111 111 91 3 +96 112 114 90 92 108 110 90 87 108 110 90 89 111 114 87 89 106 114 87 89 106 110 87 84 102 106 83 88 106 106 87 88 111 115 83 3 +92 108 114 86 92 108 110 86 92 108 110 86 93 106 114 87 89 111 110 87 85 106 110 87 88 106 111 87 84 102 115 87 84 106 115 91 3 +92 108 110 86 92 103 105 86 87 103 105 83 85 106 110 87 89 106 114 90 89 106 114 90 84 106 115 91 88 111 115 87 88 106 111 87 3 +87 103 105 86 92 108 110 90 92 108 110 90 89 111 110 87 89 106 110 87 89 106 114 90 88 106 115 91 88 115 115 91 92 115 120 94 3 +87 108 119 90 87 103 110 86 83 103 105 86 89 106 114 87 89 106 114 90 85 102 110 87 84 106 111 87 88 106 115 87 92 106 111 87 3 +83 103 105 86 83 103 110 83 83 99 101 79 85 102 110 87 85 106 114 87 89 97 105 83 92 106 111 87 92 106 111 87 88 102 106 83 3 +87 99 105 83 87 95 97 83 83 99 101 79 85 102 110 83 85 111 114 87 89 106 114 87 84 102 115 91 88 111 120 94 88 111 120 91 3 +83 99 101 79 83 99 105 79 83 95 101 79 89 106 114 87 89 106 105 87 85 102 110 83 88 111 120 91 88 106 111 91 88 106 106 87 3 +83 99 105 79 83 95 101 79 79 99 97 79 89 106 105 87 85 102 110 83 85 102 105 83 88 106 111 91 88 106 106 87 88 106 111 87 3 +79 99 105 83 83 95 105 83 83 95 101 79 85 102 101 83 82 102 105 83 82 102 114 87 88 111 111 87 88 102 111 83 84 102 106 83 3 +83 95 101 79 83 99 105 83 87 99 105 83 82 102 114 87 89 106 114 87 89 106 114 83 84 102 106 83 88 102 115 87 84 102 102 83 3 +87 103 105 83 79 88 97 72 71 81 86 68 82 97 105 87 82 97 105 80 78 88 89 73 80 98 98 79 76 94 94 76 76 89 86 72 3 +79 88 97 72 71 81 86 68 71 77 82 64 82 97 105 80 78 88 89 73 70 79 82 65 76 94 94 76 76 89 86 72 76 85 86 72 7 +67 73 75 60 63 66 68 57 63 63 72 60 74 79 85 69 67 79 82 65 70 79 82 62 72 85 86 72 72 81 82 68 72 81 86 68 7 +78 106 110 87 78 102 110 83 78 102 110 83 84 111 111 91 76 102 102 79 80 98 102 79 84 107 113 85 84 99 104 78 80 95 100 78 3 +78 88 97 73 82 88 97 73 78 92 97 73 80 94 94 72 80 94 94 72 80 89 90 68 80 91 91 70 71 91 96 74 76 91 96 70 4 +78 88 93 73 78 84 93 69 74 84 89 69 72 85 94 72 72 81 94 72 64 69 102 83 56 54 108 103 56 54 104 92 53 45 113 114 4 +78 84 93 69 74 84 89 69 74 88 93 76 72 81 94 72 64 69 102 83 57 49 111 109 56 54 104 92 53 45 113 114 46 34 133 146 2 +67 75 93 80 57 63 97 90 53 49 110 108 50 40 125 128 47 34 125 135 47 34 131 135 46 31 139 143 46 31 133 146 43 31 139 146 2 +57 63 97 90 53 49 110 108 47 40 119 122 47 34 125 135 47 34 131 135 47 34 125 135 46 31 133 146 43 31 139 146 43 31 139 143 2 +44 34 124 136 44 34 124 136 42 31 124 133 44 34 120 135 44 31 120 139 44 34 131 135 46 31 133 139 43 31 133 139 43 31 128 135 2 +42 31 124 133 44 34 119 133 44 37 119 136 44 34 131 135 44 31 125 135 47 31 131 139 43 31 128 135 43 31 128 135 46 34 133 132 2 +44 34 119 133 53 56 105 97 74 92 101 76 41 31 131 139 44 40 120 120 64 73 106 83 43 31 128 132 46 34 118 132 50 51 113 103 2 +82 97 105 83 93 106 114 90 93 115 114 90 84 111 106 87 84 106 111 87 92 106 111 87 88 107 108 88 88 107 113 85 88 107 113 88 3 +89 106 110 87 89 102 110 87 93 106 114 90 88 111 115 83 92 111 115 91 88 111 111 87 88 107 113 88 88 107 113 88 88 107 118 88 3 +93 106 114 87 89 111 110 87 85 106 110 87 88 106 111 87 84 102 115 87 84 106 115 91 88 107 108 85 88 107 104 88 88 107 108 85 3 +93 106 105 90 89 111 110 83 89 111 114 87 88 111 111 87 92 111 115 91 92 111 115 91 84 107 113 88 88 107 118 92 88 107 113 88 3 +89 111 114 87 89 111 110 87 89 106 110 87 92 111 115 91 88 106 115 91 88 115 115 91 88 107 113 88 88 107 113 88 88 107 108 88 3 +89 111 110 87 89 106 110 87 89 106 114 90 88 106 115 91 88 115 115 91 92 115 120 94 88 107 113 88 88 107 108 88 88 107 113 92 3 +85 102 110 87 85 106 114 87 89 97 105 83 92 106 111 87 92 106 111 87 88 102 106 83 84 103 113 88 88 107 113 85 88 103 108 85 3 +85 102 101 80 85 97 101 83 85 102 110 83 88 106 115 87 84 111 115 87 84 102 115 91 88 103 113 92 84 107 113 88 88 112 113 92 3 +85 111 114 87 89 106 114 87 89 106 105 87 88 111 120 94 88 111 120 91 88 106 111 91 92 112 118 92 88 103 113 85 88 103 108 85 3 +78 102 105 83 82 106 105 87 82 97 105 87 84 98 106 83 80 98 102 83 80 98 98 79 76 87 96 70 68 79 83 67 68 79 83 67 3 +82 97 105 87 82 97 105 80 78 88 89 73 80 98 98 79 76 94 94 76 76 89 86 72 68 79 83 67 71 75 87 67 71 75 79 63 7 +82 97 105 80 78 88 89 73 70 79 82 65 76 94 94 76 76 89 86 72 76 85 86 72 71 75 87 67 71 75 79 63 68 79 83 67 7 +74 84 85 69 74 79 85 69 67 79 82 65 68 85 86 68 72 85 86 72 72 81 82 68 71 83 91 74 76 87 91 70 76 83 87 67 7 +67 75 74 62 60 63 74 58 57 56 74 62 72 77 78 61 64 73 74 57 68 77 78 65 71 79 79 67 71 83 79 63 68 75 79 63 7 +88 125 136 105 88 125 125 102 84 111 111 91 92 116 122 99 88 116 122 96 84 107 113 85 88 111 113 92 88 103 109 87 84 107 113 87 3 +88 125 125 102 84 111 111 91 76 102 102 79 88 116 122 96 84 107 113 85 84 99 104 78 88 103 109 87 84 107 113 87 84 103 104 83 3 +80 98 102 79 80 98 102 79 80 98 98 79 80 95 100 78 80 99 104 78 80 95 100 78 84 99 100 79 79 99 104 79 84 95 104 79 3 +80 98 102 76 84 94 98 76 80 94 94 72 80 99 100 74 84 95 100 78 80 99 100 74 84 99 100 75 79 99 100 75 84 91 100 75 4 +84 94 98 76 80 94 94 72 80 89 94 72 84 95 100 78 80 99 100 74 80 95 100 74 79 99 100 75 84 91 100 75 84 95 100 79 3 +80 89 98 72 80 94 94 72 80 94 94 72 84 95 100 74 80 91 91 70 71 91 96 74 79 95 100 75 71 83 96 75 67 72 96 83 4 +80 89 90 72 80 85 90 68 72 85 94 72 71 79 96 74 68 68 100 88 56 54 108 103 51 45 113 116 44 34 128 129 44 34 123 129 2 +47 34 125 135 44 34 131 131 44 34 120 135 43 31 139 143 43 31 133 139 46 31 133 139 44 29 139 150 44 27 134 146 44 29 134 141 2 +44 34 131 131 44 34 120 135 44 31 120 139 43 31 133 139 46 31 133 139 43 31 133 139 44 27 134 146 44 29 134 141 44 32 134 137 2 +84 111 106 87 84 106 111 87 92 106 111 87 88 107 108 88 88 107 113 85 88 107 113 88 88 107 113 87 88 107 109 87 88 107 109 87 3 +84 106 111 87 92 106 111 87 92 111 111 87 88 107 113 85 88 107 113 88 92 107 113 88 88 107 109 87 88 107 109 87 88 107 104 83 3 +88 106 111 87 88 111 111 87 92 111 115 91 84 103 108 85 84 107 113 88 88 107 118 92 84 103 104 83 88 107 113 87 93 111 109 92 3 +92 115 120 94 88 111 111 91 84 106 111 87 88 107 113 92 92 112 122 92 88 112 113 85 84 107 109 92 88 107 113 92 84 103 109 87 3 +88 111 111 91 84 106 111 87 88 106 115 87 92 112 122 92 88 112 113 85 84 99 108 85 88 107 113 92 84 103 109 87 84 103 109 83 3 +84 111 115 87 84 102 115 91 88 111 120 94 84 107 113 88 88 112 113 92 92 112 118 92 84 107 118 92 88 111 123 96 93 116 118 96 3 +88 111 120 91 88 106 111 91 88 106 106 87 88 103 113 85 88 103 108 85 88 107 113 88 88 111 113 87 88 107 109 83 84 103 109 83 3 +88 106 106 87 88 106 111 87 88 111 111 87 88 107 113 88 88 107 118 88 88 103 118 85 84 103 109 83 88 103 113 87 88 107 109 87 3 +84 102 106 83 88 102 115 87 84 102 102 83 84 103 108 85 88 103 113 85 84 99 104 81 88 103 109 87 84 99 104 79 79 91 93 71 3 +76 89 86 72 76 85 86 72 76 85 86 72 71 75 79 63 68 79 83 67 71 83 87 70 75 79 81 67 71 79 85 62 79 87 89 71 7 +76 85 86 72 76 85 86 72 68 85 86 68 68 79 83 67 71 83 87 70 71 83 91 74 71 79 85 62 79 87 89 71 75 87 89 71 7 +68 85 86 68 72 85 86 72 72 81 82 68 71 83 91 74 76 87 91 70 76 83 87 67 75 87 89 71 75 83 89 67 75 83 85 67 7 +72 85 86 72 72 81 82 68 72 81 86 68 76 87 91 70 76 83 87 67 71 79 83 67 75 83 89 67 75 83 85 67 75 83 89 71 7 +72 81 82 68 72 81 86 68 72 77 78 61 76 83 87 67 71 79 83 67 71 79 79 67 75 83 85 67 75 83 89 71 75 79 89 71 7 +72 77 78 61 64 73 74 57 68 77 78 65 71 79 79 67 71 83 79 63 68 75 79 63 75 79 89 71 71 79 85 67 75 83 89 67 7 +88 121 128 99 92 116 122 99 88 116 122 96 88 111 118 92 88 111 113 92 88 103 109 87 90 109 117 89 86 109 112 92 90 113 122 92 3 +80 95 100 78 80 99 104 78 80 95 100 78 84 99 100 79 79 99 104 79 84 95 104 79 86 109 104 85 82 100 104 81 82 100 100 81 3 +80 95 100 74 84 95 100 74 80 91 91 70 84 95 100 79 79 95 100 75 71 83 96 75 82 91 100 74 74 79 96 81 66 63 100 92 4 +80 91 91 70 71 91 96 74 76 91 96 70 71 83 96 75 67 72 96 83 59 58 104 100 66 63 100 92 56 53 108 107 49 37 122 125 2 +56 54 104 92 53 45 113 114 46 34 133 146 48 37 118 121 51 45 113 104 44 37 128 137 46 29 127 136 46 32 122 136 52 40 112 114 2 +46 31 139 143 46 31 133 146 43 31 139 146 41 32 139 150 44 32 139 154 44 29 145 150 52 37 117 122 46 29 138 151 49 32 138 151 2 +46 31 133 146 43 31 139 146 43 31 139 143 44 32 139 154 44 29 145 150 44 29 139 150 46 29 138 151 49 32 138 151 46 29 138 151 2 +46 31 133 139 43 31 133 139 43 31 128 135 44 29 134 141 44 32 134 137 48 34 128 129 46 29 138 147 46 29 133 140 46 32 127 133 2 +43 31 133 139 43 31 128 135 43 31 128 135 44 32 134 137 48 34 128 129 48 37 123 125 46 29 133 140 46 32 127 133 46 32 122 125 2 +43 31 128 135 43 31 128 135 46 34 133 132 48 34 128 129 48 37 123 125 44 34 118 129 46 32 127 133 46 32 122 125 46 34 122 125 2 +46 34 133 132 43 31 128 135 43 31 128 132 44 34 118 129 44 37 123 129 48 34 123 133 46 34 122 125 46 32 117 129 49 34 117 129 2 +71 87 104 81 88 103 108 88 88 103 108 88 59 58 104 92 79 91 100 79 88 107 109 87 49 37 117 125 49 43 117 111 66 71 100 85 2 +88 107 113 88 88 107 118 88 88 107 113 88 93 107 109 87 88 107 113 87 93 111 109 87 90 109 112 89 90 109 112 89 86 109 112 89 3 +88 107 108 85 88 107 104 88 88 107 108 85 93 111 109 87 93 107 113 92 88 103 113 87 90 113 112 92 90 113 112 89 90 109 112 89 3 +88 107 104 88 88 107 108 85 88 107 113 85 93 107 113 92 88 103 113 87 84 103 104 83 90 113 112 89 90 109 112 89 86 109 108 89 3 +88 107 113 85 84 103 108 85 84 107 113 88 84 103 104 83 84 103 104 83 88 107 113 87 86 109 108 89 86 104 108 85 86 104 108 89 3 +84 103 108 85 84 107 113 88 88 107 118 92 84 103 104 83 88 107 113 87 93 111 109 92 86 104 108 85 86 104 108 89 86 104 112 85 3 +84 107 113 88 88 107 118 92 88 107 113 88 88 107 113 87 93 111 109 92 88 107 109 87 86 104 108 89 86 104 112 85 86 104 108 89 3 +88 107 113 92 92 112 122 92 88 112 113 85 84 107 109 92 88 107 113 92 84 103 109 87 82 104 112 89 86 109 112 92 86 109 112 89 3 +92 112 122 92 88 112 113 85 84 99 108 85 88 107 113 92 84 103 109 87 84 103 109 83 86 109 112 92 86 109 112 89 82 100 104 85 3 +84 99 108 85 84 103 113 88 88 107 113 85 84 103 109 83 88 103 109 87 88 103 109 83 82 100 104 85 82 100 104 85 90 104 108 85 3 +88 103 113 92 84 107 113 88 88 112 113 92 84 111 113 92 84 107 118 92 88 111 123 96 90 109 117 92 90 113 112 96 90 113 122 96 3 +88 103 113 85 88 103 108 85 88 107 113 88 88 111 113 87 88 107 109 83 84 103 109 83 95 113 112 92 86 104 108 85 86 100 108 81 3 +88 103 118 85 88 99 108 85 84 103 108 85 88 107 109 87 88 103 113 87 88 103 109 87 90 109 108 85 82 96 100 78 70 79 84 66 3 +88 103 113 85 84 99 104 81 80 95 91 74 84 99 104 79 79 91 93 71 71 79 77 62 70 75 76 63 70 79 80 66 66 75 80 66 7 +84 99 104 81 80 95 91 74 76 87 96 70 79 91 93 71 71 79 77 62 75 83 85 67 70 79 80 66 66 75 80 66 66 71 80 63 7 +71 75 79 63 68 79 83 67 71 83 87 70 75 79 81 67 71 79 85 62 79 87 89 71 74 79 80 66 70 75 76 63 70 75 76 63 7 +71 79 79 67 71 83 79 63 68 75 79 63 75 79 89 71 71 79 85 67 75 83 89 67 74 83 84 70 74 83 80 70 78 87 92 74 7 +93 126 134 108 88 126 134 104 88 121 128 104 90 123 133 103 86 128 133 107 90 123 127 103 87 122 130 101 92 127 135 105 92 122 130 105 3 +93 116 123 96 88 111 118 92 88 111 113 92 90 118 122 96 90 109 117 89 86 109 112 92 96 117 119 94 92 112 119 90 92 112 114 94 3 +84 103 104 83 84 99 100 79 79 99 104 79 86 113 112 89 86 109 104 85 82 100 104 81 96 112 119 94 92 108 114 90 87 103 105 83 3 +79 99 104 79 84 95 104 79 84 99 100 75 82 100 104 81 82 100 100 81 82 100 96 78 87 103 105 83 83 99 101 79 83 95 101 79 3 +79 99 100 75 84 91 100 75 84 95 100 79 78 96 100 81 82 96 96 78 82 91 100 74 79 91 105 79 71 73 101 90 63 57 105 101 4 +71 83 96 75 67 72 96 83 59 58 104 100 66 63 100 92 56 53 108 107 49 37 122 125 49 37 130 131 46 34 130 135 42 32 130 135 2 +44 34 123 129 48 37 118 121 51 45 113 104 43 32 122 133 46 29 127 136 46 32 122 136 42 32 130 135 46 32 124 139 42 34 124 135 2 +44 32 134 137 48 34 128 129 48 37 123 125 46 29 133 140 46 32 127 133 46 32 122 125 46 30 124 135 46 32 124 131 46 34 130 131 2 +44 34 118 129 44 37 123 129 48 34 123 133 46 34 122 125 46 32 117 129 49 34 117 129 49 34 124 131 46 34 119 124 46 34 119 131 2 +48 32 128 129 48 37 123 125 59 58 104 92 46 34 122 129 46 34 122 125 49 37 117 125 46 37 130 127 46 34 124 124 46 37 119 127 2 +48 37 123 125 59 58 104 92 79 91 100 79 46 34 122 125 49 37 117 125 49 43 117 111 46 34 124 124 46 37 119 127 46 37 119 124 2 +88 107 113 87 88 107 109 87 88 107 109 87 82 96 104 81 90 104 108 85 86 104 108 85 75 84 101 79 87 99 105 83 87 103 110 86 3 +88 107 109 87 88 107 104 83 88 107 109 87 86 104 108 85 86 104 104 85 86 104 112 85 87 103 110 86 87 103 110 86 87 103 110 86 3 +88 103 109 87 88 111 109 87 93 107 113 92 86 104 108 89 86 109 104 85 86 109 112 85 87 103 105 86 87 103 114 86 87 108 119 90 3 +88 111 109 87 93 107 113 92 93 107 109 87 86 109 104 85 86 109 112 85 90 109 112 89 87 103 114 86 87 108 119 90 92 112 119 90 3 +88 107 113 87 93 111 109 87 93 111 109 87 90 109 112 89 86 109 112 89 90 113 112 92 92 108 110 90 92 112 119 90 92 108 119 94 3 +93 111 109 87 93 107 113 92 88 103 113 87 90 113 112 92 90 113 112 89 90 109 112 89 92 108 119 94 92 108 110 86 87 103 105 86 3 +84 107 109 92 88 107 109 87 84 107 109 92 86 104 108 89 86 109 112 89 82 104 112 89 83 103 110 90 87 108 110 90 83 103 105 90 3 +88 107 113 92 84 103 109 87 84 103 109 83 86 109 112 92 86 109 112 89 82 100 104 85 87 108 110 90 92 108 114 86 87 103 105 86 3 +88 103 109 87 88 103 109 83 88 107 109 87 82 100 104 85 90 104 108 85 90 104 112 85 87 103 105 83 92 112 114 90 96 112 114 94 3 +88 103 109 83 88 107 109 87 88 111 109 92 90 104 108 85 90 104 112 85 90 109 117 85 92 112 114 90 96 112 114 94 92 117 124 98 3 +84 111 113 92 84 107 118 92 88 111 123 96 90 109 117 92 90 113 112 96 90 113 122 96 92 117 119 94 92 108 114 94 92 108 114 90 3 +93 116 118 96 88 111 113 87 88 107 109 83 95 113 117 96 95 113 112 92 86 104 108 85 92 103 110 86 92 99 101 83 83 95 101 79 3 +84 103 109 83 88 103 113 87 88 107 109 87 86 100 108 81 86 104 108 85 90 109 108 85 75 91 93 72 75 84 93 72 75 84 90 68 3 +88 103 109 87 84 99 104 79 79 91 93 71 70 79 84 66 70 75 76 63 70 79 80 66 63 66 72 60 67 70 72 60 67 73 75 60 7 +75 83 85 67 75 79 89 71 75 79 85 71 66 71 80 63 70 79 84 66 70 79 80 70 71 73 79 64 67 73 72 60 63 70 75 57 7 +79 87 89 71 75 87 89 71 75 83 89 67 70 75 76 63 70 79 84 66 74 87 92 74 71 77 75 64 71 77 82 68 71 88 93 72 7 +75 87 89 71 75 83 89 67 75 83 85 67 70 79 84 66 74 87 92 74 74 83 84 66 71 77 82 68 71 88 93 72 75 84 90 68 7 +75 83 85 67 75 83 89 71 75 79 89 71 74 83 84 66 74 83 88 70 74 83 84 70 75 84 90 68 67 73 75 60 63 66 72 57 7 +90 113 122 92 90 109 112 92 86 113 112 89 92 112 119 94 92 117 119 98 96 112 119 94 89 115 114 94 93 115 124 97 93 115 119 94 3 +90 109 112 92 86 113 112 89 86 109 104 85 92 117 119 98 96 112 119 94 92 108 114 90 93 115 124 97 93 115 119 94 97 111 119 94 3 +82 100 100 81 82 100 96 78 78 96 100 81 83 99 101 79 83 95 101 79 79 91 105 79 89 106 101 80 74 75 97 83 53 49 114 108 4 +82 91 100 74 74 79 96 81 66 63 100 92 63 57 105 101 52 42 119 124 49 37 130 131 44 31 124 133 44 31 129 140 44 34 129 143 2 +74 79 96 81 66 63 100 92 56 53 108 107 52 42 119 124 49 37 130 131 46 34 130 135 44 31 129 140 44 34 129 143 44 31 129 140 2 +49 37 122 125 43 32 127 133 43 34 127 133 42 32 130 135 42 32 124 139 42 32 135 139 44 34 124 133 44 34 124 136 44 34 129 140 2 +43 32 127 133 43 34 127 133 43 32 122 133 42 32 124 139 42 32 135 139 42 32 130 135 44 34 124 136 44 34 129 140 44 31 124 140 2 +52 37 117 122 46 29 138 151 49 32 138 151 52 45 110 109 46 40 119 139 42 30 135 157 44 37 119 126 50 43 110 115 44 34 129 143 2 +46 29 133 151 46 29 138 147 46 29 133 140 42 30 135 150 42 30 130 142 46 30 124 135 44 29 124 143 44 34 129 143 44 34 124 143 2 +46 29 138 147 46 29 133 140 46 32 127 133 42 30 130 142 46 30 124 135 46 32 124 131 44 34 129 143 44 34 124 143 44 34 119 136 2 +46 32 127 133 46 32 122 125 46 34 122 125 46 32 124 131 46 34 130 131 49 34 124 131 44 34 119 136 42 34 119 129 44 34 114 129 2 +46 34 122 125 46 32 117 129 49 34 117 129 49 34 124 131 46 34 119 124 46 34 119 131 44 34 114 129 44 34 114 126 47 37 114 126 2 +46 34 122 129 46 34 122 125 49 37 117 125 46 37 130 127 46 34 124 124 46 37 119 127 47 34 119 126 47 34 114 126 47 34 114 122 2 +90 104 108 85 86 104 108 85 86 104 104 85 87 99 105 83 87 103 110 86 87 103 110 86 82 92 101 80 85 102 105 83 85 106 110 90 3 +86 104 108 85 86 104 104 85 86 104 112 85 87 103 110 86 87 103 110 86 87 103 110 86 85 102 105 83 85 106 110 90 89 106 114 90 3 +86 109 112 89 90 113 112 92 90 113 112 89 92 112 119 90 92 108 119 94 92 108 110 86 93 111 114 90 93 111 114 90 89 106 114 83 3 +86 109 112 89 82 104 112 89 86 109 112 92 87 108 110 90 83 103 105 90 87 108 110 90 89 111 110 90 85 106 110 87 89 111 114 94 3 +82 100 104 85 82 100 104 85 90 104 108 85 87 103 105 86 87 103 105 83 92 112 114 90 89 106 114 90 89 111 114 94 97 120 119 97 3 +90 109 117 85 90 109 117 92 90 113 112 96 92 117 124 98 92 117 119 94 92 108 114 94 89 111 114 94 89 111 110 90 85 97 105 80 3 +95 113 112 92 86 104 108 85 86 100 108 81 92 99 101 83 83 95 101 79 75 91 93 72 70 84 82 65 67 79 78 62 63 71 78 58 7 +86 100 108 81 86 104 108 85 90 109 108 85 75 91 93 72 75 84 93 72 75 84 90 68 63 71 78 58 67 71 78 58 67 75 82 62 7 +86 104 108 85 90 109 108 85 82 96 100 78 75 84 93 72 75 84 90 68 67 73 79 60 67 71 78 58 67 75 82 62 67 71 74 58 7 +90 109 108 85 82 96 100 78 70 79 84 66 75 84 90 68 67 73 79 60 63 66 72 60 67 75 82 62 67 71 74 58 63 67 70 55 7 +66 75 80 66 66 71 80 63 70 79 84 66 71 73 75 60 71 73 79 64 67 73 72 60 70 84 93 76 70 84 85 69 67 75 78 58 7 +74 79 80 66 70 75 76 63 70 75 76 63 71 77 82 64 67 77 79 64 71 77 75 64 70 79 82 62 78 84 89 73 74 88 89 69 7 +74 83 84 70 74 83 80 70 78 87 92 74 63 66 72 57 63 70 72 60 71 77 86 64 67 67 70 55 60 63 70 58 63 67 70 58 7 +92 117 119 98 96 112 119 94 92 108 114 90 93 115 124 97 93 115 119 94 97 111 119 94 88 111 115 91 92 111 115 91 88 111 111 87 3 +46 34 130 135 42 32 130 135 42 32 124 139 44 31 129 140 44 34 124 133 44 34 124 136 44 31 125 135 47 31 131 135 44 34 131 139 2 +42 32 124 139 42 32 135 139 42 32 130 135 44 34 124 136 44 34 129 140 44 31 124 140 44 34 131 139 47 34 136 139 47 31 125 139 2 +42 32 130 135 46 32 124 139 42 34 124 135 44 31 124 140 44 34 119 136 44 34 129 136 47 31 125 139 47 31 125 135 44 31 125 135 2 +75 84 101 79 87 99 105 83 87 103 110 86 57 60 105 94 82 92 101 80 85 102 105 83 50 40 111 109 64 69 102 79 80 98 102 79 3 +87 103 110 86 87 103 110 86 87 103 110 86 85 102 105 83 85 106 110 90 89 106 114 90 80 98 102 79 84 102 102 87 88 106 111 87 3 +87 103 105 86 87 103 114 86 87 108 119 90 89 106 110 90 89 111 110 87 93 106 114 87 88 106 111 87 88 102 106 87 88 102 111 83 3 +87 108 119 90 92 112 119 90 92 108 110 90 93 106 114 87 93 106 114 90 93 111 119 94 88 102 111 83 88 111 111 91 92 115 115 91 3 +92 112 119 90 92 108 119 94 92 108 110 86 93 111 114 90 93 111 114 90 89 106 114 83 88 111 115 91 92 106 115 87 88 111 111 91 3 +92 108 110 86 87 103 105 86 87 108 110 86 89 106 114 83 89 106 114 87 89 106 110 87 88 111 111 91 92 111 115 91 97 111 120 91 3 +83 103 105 83 83 99 110 86 87 103 105 86 89 106 114 87 89 106 105 87 85 106 110 87 88 111 115 87 88 111 115 87 88 111 115 87 3 +83 99 110 86 87 103 105 86 83 103 110 90 89 106 105 87 85 106 110 87 89 111 105 90 88 111 115 87 88 111 115 87 92 111 115 87 3 +87 103 105 86 83 103 110 90 87 108 110 90 85 106 110 87 89 111 105 90 89 111 110 90 88 111 115 87 92 111 115 87 88 106 111 87 3 +87 108 110 90 92 108 114 86 87 103 105 86 89 111 114 94 89 111 110 90 89 106 114 90 92 106 111 91 92 111 115 91 92 111 120 91 3 +96 112 114 94 92 117 124 98 92 117 119 94 93 115 114 90 89 111 114 94 89 111 110 90 92 106 111 87 80 98 102 76 76 85 90 68 3 +92 99 101 83 83 95 101 79 75 91 93 72 70 84 82 65 67 79 78 62 63 71 78 58 64 69 71 57 64 66 67 54 64 62 71 50 5 +67 70 72 60 67 73 75 60 71 73 75 60 67 71 70 58 67 75 82 69 70 84 93 76 64 69 74 61 68 81 86 72 72 81 90 76 7 +71 73 79 64 67 73 72 60 63 70 75 57 70 84 85 69 67 75 78 58 63 63 74 58 72 81 86 68 64 73 74 61 64 69 71 61 7 +67 73 72 60 63 70 75 57 71 77 82 64 67 75 78 58 63 63 74 58 67 71 74 65 64 73 74 61 64 69 71 61 68 73 82 65 7 +63 70 75 57 71 77 82 64 71 77 82 64 63 63 74 58 67 71 74 65 70 79 82 62 64 69 71 61 68 73 82 65 72 77 82 68 7 +63 66 72 57 63 70 72 60 71 77 86 64 67 67 70 55 60 63 70 58 63 67 70 58 68 69 74 57 64 66 67 54 64 66 71 57 7 +93 120 124 94 93 115 119 94 89 115 119 90 92 115 115 94 88 111 115 91 88 102 111 87 88 107 113 88 84 107 113 88 84 112 113 88 3 +89 115 114 94 93 115 124 97 93 115 119 94 84 106 111 91 88 111 115 91 92 111 115 91 88 107 113 88 92 112 113 88 92 112 118 88 3 +47 37 119 126 44 31 124 133 44 31 129 140 44 29 125 135 47 34 125 135 50 31 131 135 46 36 122 139 46 31 128 135 46 31 128 135 2 +44 31 129 140 44 34 124 133 44 34 124 136 44 31 125 135 47 31 131 135 44 34 131 139 46 31 139 143 43 31 133 143 43 29 133 143 2 +44 34 124 136 44 34 129 140 44 31 124 140 44 34 131 139 47 34 136 139 47 31 125 139 43 29 133 143 46 31 133 150 46 31 139 143 2 +44 34 119 136 44 34 129 136 44 31 124 136 47 31 125 135 44 31 125 135 44 31 125 135 50 31 133 135 50 31 128 132 46 34 128 135 2 +44 34 129 136 44 31 124 136 44 37 119 126 44 31 125 135 44 31 125 135 44 31 120 131 50 31 128 132 46 34 128 135 46 36 128 132 2 +44 34 129 143 42 29 135 150 44 29 124 143 50 46 111 116 44 31 131 142 44 29 136 146 53 45 108 103 50 36 118 128 43 31 139 143 2 +44 29 124 143 44 34 129 143 44 34 124 143 44 29 136 146 44 31 136 142 44 31 136 139 43 31 139 143 46 29 133 139 46 31 133 135 2 +44 34 114 129 44 34 114 126 47 37 114 126 44 31 120 128 44 34 115 124 47 34 115 120 46 34 122 125 46 36 122 121 46 36 118 125 2 +47 37 114 126 47 34 119 126 47 34 114 126 47 34 115 120 47 37 120 124 44 34 120 120 46 36 118 125 46 34 118 121 43 36 118 121 2 +47 34 114 126 47 34 114 122 47 37 114 126 44 34 120 120 47 37 120 124 44 37 120 124 43 36 118 121 46 36 118 128 46 34 122 125 2 +47 34 114 122 47 37 114 126 47 40 114 115 47 37 120 124 44 37 120 124 44 37 115 120 46 36 118 128 46 34 122 125 50 34 118 125 2 +47 40 114 115 57 60 105 94 82 92 101 80 44 37 115 120 50 40 111 109 64 69 102 79 50 34 118 125 50 36 118 128 53 51 113 103 2 +57 60 105 94 82 92 101 80 85 102 105 83 50 40 111 109 64 69 102 79 80 98 102 79 50 36 118 128 53 51 113 103 71 83 100 78 2 +89 106 114 90 89 106 110 90 89 111 110 87 88 106 111 87 88 106 111 87 88 102 106 87 84 103 113 88 88 107 118 88 88 107 108 88 3 +89 106 110 90 89 111 110 87 93 106 114 87 88 106 111 87 88 102 106 87 88 102 111 83 88 107 118 88 88 107 108 88 88 103 104 85 3 +93 106 114 87 93 106 114 90 93 111 119 94 88 102 111 83 88 111 111 91 92 115 115 91 88 103 104 85 88 103 113 85 88 107 108 88 3 +93 111 114 90 89 106 114 83 89 106 114 87 92 106 115 87 88 111 111 91 92 111 115 91 92 112 118 88 92 112 113 92 92 112 118 92 3 +89 106 114 90 89 106 114 87 89 106 105 87 92 111 111 87 88 111 115 87 88 111 115 87 92 107 118 88 88 112 118 88 88 107 113 85 3 +89 106 114 87 89 106 105 87 85 106 110 87 88 111 115 87 88 111 115 87 88 111 115 87 88 112 118 88 88 107 113 85 88 107 113 88 3 +89 106 105 87 85 106 110 87 89 111 105 90 88 111 115 87 88 111 115 87 92 111 115 87 88 107 113 85 88 107 113 88 92 103 113 88 3 +89 111 110 90 89 106 114 90 89 111 114 94 92 111 115 91 92 111 120 91 92 115 120 94 92 112 118 92 92 112 118 96 88 107 122 88 3 +93 115 114 90 89 111 114 94 89 111 110 90 92 106 111 87 80 98 102 76 76 85 90 68 80 87 91 67 68 71 75 59 60 57 60 45 3 +85 97 105 80 82 92 97 76 78 88 89 73 64 77 78 61 60 69 67 54 60 66 67 57 53 54 53 38 53 54 53 34 56 57 56 45 5 +67 79 78 62 63 71 78 58 67 71 78 58 64 66 67 54 64 62 71 50 60 62 67 50 60 57 67 49 56 54 67 49 56 54 67 52 5 +63 71 78 58 67 71 78 58 67 75 82 62 64 62 71 50 60 62 67 50 60 62 67 54 56 54 67 49 56 54 67 52 53 57 67 52 5 +67 71 74 58 63 67 70 55 67 71 70 58 64 69 74 61 64 66 67 54 64 69 74 61 60 64 75 63 64 68 79 59 64 68 71 56 7 +67 75 82 69 70 84 93 76 70 84 85 69 68 81 86 72 72 81 90 76 72 81 86 68 64 71 75 63 68 79 79 67 71 79 79 63 7 +70 84 93 76 70 84 85 69 67 75 78 58 72 81 90 76 72 81 86 68 64 73 74 61 68 79 79 67 71 79 79 63 71 79 79 67 7 +63 63 74 58 67 71 74 65 70 79 82 62 64 69 71 61 68 73 82 65 72 77 82 68 68 83 83 67 71 79 87 70 71 79 87 70 7 +88 125 125 102 92 120 120 98 97 115 120 94 92 116 122 92 92 116 118 92 88 107 113 88 93 116 118 96 93 111 118 92 88 111 113 92 3 +92 120 120 98 97 115 120 94 92 115 115 94 92 116 118 92 88 107 113 88 88 107 113 88 93 111 118 92 88 111 113 92 88 111 113 92 3 +88 102 111 87 84 106 111 91 88 111 115 91 84 112 113 88 88 107 113 88 92 112 113 88 84 111 118 92 93 111 113 92 93 111 113 92 3 +88 111 115 91 92 111 115 91 88 111 111 87 92 112 113 88 92 112 118 88 88 107 113 88 93 111 113 92 93 111 118 92 88 107 109 87 3 +92 111 115 91 88 111 111 87 92 106 115 91 92 112 118 88 88 107 113 88 88 103 108 85 93 111 118 92 88 107 109 87 88 95 104 83 3 +92 106 115 91 88 102 111 83 76 77 102 83 88 103 108 85 84 95 100 85 80 95 100 74 88 95 104 83 84 99 100 79 84 95 96 79 4 +88 102 111 83 76 77 102 83 53 40 115 116 84 95 100 85 80 95 100 74 64 64 104 96 84 99 100 79 84 95 96 79 71 83 93 79 4 +53 40 115 116 44 29 125 135 47 34 125 135 64 64 104 96 46 36 122 139 46 31 128 135 71 83 93 79 55 51 113 108 44 37 134 137 2 +47 31 125 135 44 31 125 135 47 31 131 135 46 31 133 143 46 31 139 143 43 31 133 143 44 34 139 146 44 29 134 146 44 34 139 146 2 +44 31 125 135 47 31 131 135 44 34 131 139 46 31 139 143 43 31 133 143 43 29 133 143 44 29 134 146 44 34 139 146 44 32 134 141 2 +47 31 131 135 44 34 131 139 47 34 136 139 43 31 133 143 43 29 133 143 46 31 133 150 44 34 139 146 44 32 134 141 48 32 134 141 2 +47 31 125 135 44 31 125 135 44 31 125 135 50 31 133 135 50 31 128 132 46 34 128 135 44 32 134 137 48 34 128 133 48 32 134 133 2 +50 46 111 116 44 31 131 142 44 29 136 146 53 45 108 103 50 36 118 128 43 31 139 143 48 40 118 112 51 45 104 100 48 37 123 129 2 +44 29 136 146 44 31 136 142 44 31 136 139 43 31 139 143 46 29 133 139 46 31 133 135 48 37 123 129 44 32 128 137 44 32 123 129 2 +44 31 136 142 44 31 136 139 44 31 131 135 46 29 133 139 46 31 133 135 46 31 122 132 44 32 128 137 44 32 123 129 44 34 128 129 2 +44 31 136 139 44 31 131 135 44 31 120 131 46 31 133 135 46 31 122 132 46 34 122 128 44 32 123 129 44 34 128 129 44 32 128 125 2 +44 31 131 135 44 31 120 131 44 31 120 128 46 31 122 132 46 34 122 128 46 34 122 125 44 34 128 129 44 32 128 125 48 29 123 125 2 +44 31 120 128 44 34 115 124 47 34 115 120 46 34 122 125 46 36 122 121 46 36 118 125 48 29 123 125 44 32 113 121 48 34 118 112 2 +44 34 120 120 47 37 120 124 44 37 120 124 43 36 118 121 46 36 118 128 46 34 122 125 48 40 113 112 48 37 113 116 48 34 123 125 2 +88 106 111 87 88 102 106 87 88 102 111 83 88 107 118 88 88 107 108 88 88 103 104 85 88 111 109 92 88 107 113 87 88 103 104 83 3 +92 106 115 87 88 111 111 91 92 111 115 91 92 112 118 88 92 112 113 92 92 112 118 92 88 111 118 87 88 111 118 96 93 111 118 96 3 +88 111 111 91 92 111 115 91 97 111 120 91 92 112 113 92 92 112 118 92 92 112 118 92 88 111 118 96 93 111 118 96 93 111 118 92 3 +92 111 115 91 97 111 120 91 92 111 111 87 92 112 118 92 92 112 118 92 92 107 118 88 93 111 118 96 93 111 118 92 93 111 118 92 3 +76 85 90 68 64 77 78 61 60 69 67 54 60 57 60 45 53 54 53 38 53 54 53 34 55 51 50 29 55 54 57 37 59 54 63 42 5 +68 81 86 72 72 81 90 76 72 81 86 68 64 71 75 63 68 79 79 67 71 79 79 63 63 68 70 58 67 75 74 62 71 79 85 67 7 +64 73 74 61 64 69 71 61 68 73 82 65 71 79 79 67 68 83 83 67 71 79 87 70 71 87 96 75 75 91 96 79 75 83 89 71 7 +72 77 82 68 76 81 90 76 76 85 90 72 71 79 87 70 71 83 87 70 68 75 79 67 71 79 85 67 71 75 85 67 71 75 74 62 7 +76 85 90 72 76 77 90 68 72 77 78 61 68 75 79 67 71 75 79 63 71 79 79 63 71 75 74 62 67 72 70 58 67 72 74 58 7 +72 77 78 61 68 69 71 57 64 69 74 57 71 79 79 63 68 75 75 59 68 68 71 56 67 72 74 58 63 68 74 58 63 68 74 58 7 +68 69 71 57 64 69 74 57 68 69 74 57 68 75 75 59 68 68 71 56 64 71 79 59 63 68 74 58 63 68 74 58 67 72 74 62 7 +92 116 118 92 88 107 113 88 88 107 113 88 93 111 118 92 88 111 113 92 88 111 113 92 95 109 112 89 95 109 117 85 90 113 117 92 3 +88 107 113 88 84 107 113 88 84 112 113 88 88 111 113 92 84 111 113 92 84 111 118 92 90 113 117 92 95 113 117 92 95 118 117 96 3 +84 112 113 88 88 107 113 88 92 112 113 88 84 111 118 92 93 111 113 92 93 111 113 92 95 118 117 96 95 118 122 96 99 118 117 92 3 +92 112 113 88 92 112 118 88 88 107 113 88 93 111 113 92 93 111 118 92 88 107 109 87 99 118 117 92 95 113 117 96 86 104 108 89 3 +88 107 113 88 88 103 108 85 84 95 100 85 88 107 109 87 88 95 104 83 84 99 100 79 86 104 108 89 82 96 104 78 82 96 104 81 4 +80 95 100 74 64 64 104 96 46 36 122 139 84 95 96 79 71 83 93 79 55 51 113 108 82 96 100 81 82 91 92 78 78 83 96 74 2 +46 36 122 139 46 31 128 135 46 31 128 135 55 51 113 108 44 37 134 137 44 32 139 141 78 83 96 74 63 56 108 103 46 34 127 144 2 +46 31 139 143 43 31 133 143 43 29 133 143 44 29 134 146 44 34 139 146 44 32 134 141 43 32 138 144 46 32 138 144 46 32 138 144 2 +43 29 133 143 46 31 133 150 46 31 139 143 44 32 134 141 48 32 134 141 44 32 134 137 46 32 138 144 46 32 133 144 46 32 133 136 2 +50 31 128 132 46 34 128 135 46 36 128 132 48 34 128 133 48 32 134 133 48 34 123 133 46 32 127 136 49 32 127 133 46 34 127 129 2 +50 36 118 128 43 31 139 143 46 29 133 139 51 45 104 100 48 37 123 129 44 32 128 137 49 37 112 118 52 43 104 103 49 37 117 122 2 +43 36 118 121 46 36 118 128 46 34 122 125 48 40 113 112 48 37 113 116 48 34 123 125 49 34 112 111 46 37 117 114 49 34 112 118 2 +46 36 118 128 46 34 122 125 50 34 118 125 48 37 113 116 48 34 123 125 48 37 118 125 46 37 117 114 49 34 112 118 52 34 117 122 2 +50 36 118 128 53 51 113 103 71 83 100 78 48 34 123 125 48 37 118 121 63 58 109 96 49 34 122 118 49 34 117 122 49 34 117 125 2 +53 51 113 103 71 83 100 78 84 99 104 85 48 37 118 121 63 58 109 96 79 95 100 79 49 34 117 122 49 34 117 125 52 49 112 107 2 +71 83 100 78 84 99 104 85 84 103 113 88 63 58 109 96 79 95 100 79 88 107 109 87 49 34 117 125 52 49 112 107 74 79 100 81 3 +88 107 108 88 88 103 104 85 88 103 113 85 88 107 113 87 88 103 104 83 88 107 109 87 90 109 108 89 90 104 112 89 86 104 112 85 3 +88 103 113 85 88 107 108 88 92 107 113 92 88 107 109 87 93 107 113 92 93 107 113 87 86 104 112 85 90 109 117 89 90 113 112 92 3 +92 107 113 92 92 112 118 88 92 112 113 92 93 107 113 87 88 111 118 87 88 111 118 96 90 113 112 92 90 113 112 92 90 109 112 89 3 +92 103 113 88 88 107 108 92 92 107 108 88 93 111 118 87 88 107 109 87 88 107 109 87 90 113 112 92 90 113 112 92 90 113 112 89 3 +92 107 108 88 92 112 113 88 92 112 118 92 88 107 109 87 93 111 113 87 88 103 113 83 90 113 112 89 86 104 104 85 78 96 92 81 3 +68 71 75 59 60 57 60 45 53 54 53 38 63 61 63 42 55 51 50 29 55 54 57 37 63 67 69 52 59 56 62 48 56 53 66 48 5 +60 57 60 45 53 54 53 38 53 54 53 34 55 51 50 29 55 54 57 37 59 54 63 42 59 56 62 48 56 53 66 48 59 53 66 44 5 +53 54 53 38 53 54 53 34 56 57 56 45 55 54 57 37 59 54 63 42 55 54 63 46 56 53 66 48 59 53 66 44 59 56 62 44 5 +53 54 53 34 56 57 56 45 60 57 67 49 59 54 63 42 55 54 63 46 59 51 67 46 59 53 66 44 59 56 62 44 59 53 62 44 5 +60 57 67 49 56 54 67 49 56 54 67 52 59 51 67 50 55 54 67 50 55 54 60 46 59 56 66 44 56 56 73 52 59 56 76 55 5 +56 54 67 49 56 54 67 52 53 57 67 52 55 54 67 50 55 54 60 46 55 54 67 50 56 56 73 52 59 56 76 55 59 49 69 48 5 +60 64 75 63 64 68 79 59 64 68 71 56 55 58 70 54 63 68 77 62 67 72 74 58 59 53 66 44 56 53 66 48 59 56 73 55 7 +64 68 79 59 64 68 71 56 64 71 75 63 63 68 77 62 67 72 74 58 63 68 70 58 56 53 66 48 59 56 73 55 66 67 80 63 7 +64 68 71 56 64 71 75 63 68 79 79 67 67 72 74 58 63 68 70 58 67 75 74 62 59 56 73 55 66 67 80 63 63 71 73 59 7 +64 71 75 63 68 79 79 67 71 79 79 63 63 68 70 58 67 75 74 62 71 79 85 67 66 67 80 63 63 71 73 59 63 67 73 59 7 +71 79 79 67 68 83 83 67 71 79 87 70 71 87 96 75 75 91 96 79 75 83 89 71 66 75 80 63 70 79 84 70 70 79 84 66 7 +71 79 87 70 71 79 87 70 71 83 87 70 75 83 89 71 71 79 85 67 71 75 85 67 70 79 84 66 70 71 73 63 63 67 69 59 7 +68 75 75 59 68 68 71 56 64 71 79 59 63 68 74 58 63 68 74 58 67 72 74 62 63 67 69 55 66 75 76 63 66 71 73 59 7 +68 68 71 56 64 71 79 59 68 71 71 59 63 68 74 58 67 72 74 62 71 75 77 67 66 75 76 63 66 71 73 59 63 67 66 55 7 +88 111 113 92 84 111 113 92 84 111 118 92 90 113 117 92 95 113 117 92 95 118 117 96 92 117 119 98 96 117 130 94 92 112 124 94 3 +84 111 118 92 93 111 113 92 93 111 113 92 95 118 117 96 95 118 122 96 99 118 117 92 92 112 124 94 92 112 114 98 92 108 114 90 3 +93 111 113 92 93 111 113 92 93 111 118 92 95 118 122 96 99 118 117 92 95 113 117 96 92 112 114 98 92 108 114 90 92 99 105 86 3 +93 111 113 92 93 111 118 92 88 107 109 87 99 118 117 92 95 113 117 96 86 104 108 89 92 108 114 90 92 99 105 86 83 99 101 75 3 +84 95 96 79 71 83 93 79 55 51 113 108 82 96 100 81 82 91 92 78 78 83 96 74 83 91 101 79 79 95 93 75 79 95 93 75 4 +48 32 134 141 44 32 134 137 44 32 134 137 46 32 133 144 46 32 133 136 46 32 133 136 46 32 130 142 46 32 124 139 46 32 124 139 2 +48 37 118 125 48 40 118 112 51 45 104 100 49 32 127 133 46 32 122 129 49 37 112 118 46 32 119 131 46 34 119 127 49 34 114 124 2 +51 45 104 100 48 37 123 129 44 32 128 137 49 37 112 118 52 43 104 103 49 37 117 122 49 34 114 124 49 40 105 116 52 45 105 105 2 +48 37 123 129 44 32 128 137 44 32 123 129 52 43 104 103 49 37 117 122 43 29 138 140 49 40 105 116 52 45 105 105 46 32 124 135 2 +44 32 123 129 44 34 128 129 44 32 128 125 43 29 138 140 46 29 127 133 46 29 122 125 46 32 124 135 42 32 130 139 42 32 119 127 2 +44 32 128 125 48 29 123 125 44 32 113 121 46 29 122 125 46 32 112 118 46 34 112 118 42 32 119 127 42 34 110 120 46 34 110 116 2 +48 29 123 125 44 32 113 121 48 34 118 112 46 32 112 118 46 34 112 118 46 34 112 114 42 34 110 120 46 34 110 116 49 34 110 116 2 +44 32 113 121 48 34 118 112 51 37 118 112 46 34 112 118 46 34 112 114 46 34 112 111 46 34 110 116 49 34 110 116 49 37 114 116 2 +51 37 118 112 48 40 113 112 48 37 113 116 46 34 112 111 49 34 112 111 46 37 117 114 49 37 114 116 52 40 110 113 49 37 105 113 2 +48 37 113 116 48 34 123 125 48 37 118 125 46 37 117 114 49 34 112 118 52 34 117 122 49 37 105 113 49 37 110 116 49 37 110 116 2 +48 37 118 121 63 58 109 96 79 95 100 79 49 34 117 122 49 34 117 125 52 49 112 107 46 37 114 116 46 37 110 113 52 45 110 109 2 +63 58 109 96 79 95 100 79 88 107 109 87 49 34 117 125 52 49 112 107 74 79 100 81 46 37 110 113 52 45 110 109 67 70 101 83 2 +79 95 100 79 88 107 109 87 88 111 109 92 52 49 112 107 74 79 100 81 86 100 108 85 52 45 110 109 67 70 101 83 79 95 97 75 2 +88 107 113 87 88 103 104 83 88 107 109 87 90 109 108 89 90 104 112 89 86 104 112 85 92 108 110 90 92 108 110 90 96 108 114 94 3 +88 107 109 87 93 107 113 92 93 107 113 87 86 104 112 85 90 109 117 89 90 113 112 92 96 108 114 94 96 112 114 90 96 112 119 90 3 +93 107 113 87 88 111 118 87 88 111 118 96 90 113 112 92 90 113 112 92 90 109 112 89 96 112 119 90 92 108 119 90 96 112 119 90 3 +88 111 118 87 88 111 118 96 93 111 118 96 90 113 112 92 90 109 112 89 90 113 117 92 92 108 119 90 96 112 119 90 96 112 119 94 3 +93 111 118 92 93 111 118 92 93 111 118 92 95 113 117 96 90 109 117 96 90 118 122 96 96 117 114 94 92 108 114 94 92 112 114 90 3 +88 103 113 83 84 95 100 83 79 87 96 75 78 96 92 81 74 91 96 78 74 87 92 74 71 73 79 64 71 77 82 68 67 77 86 68 7 +55 54 63 46 59 51 67 46 59 51 67 50 59 56 62 44 59 53 62 44 59 56 66 44 56 54 72 49 56 51 62 45 56 54 65 45 5 +63 68 77 62 67 72 74 58 63 68 70 58 56 53 66 48 59 56 73 55 66 67 80 63 59 54 62 45 59 57 65 49 59 60 72 57 7 +71 79 85 67 71 87 96 75 75 91 96 79 63 67 73 59 66 75 80 63 70 79 84 70 63 66 72 60 63 70 75 64 67 73 79 64 7 +75 83 89 71 71 79 85 67 71 75 85 67 70 79 84 66 70 71 73 63 63 67 69 59 63 66 75 60 63 63 72 57 63 60 72 60 7 +67 72 70 58 67 72 74 58 63 68 74 58 59 63 73 59 63 67 73 55 63 67 69 55 63 63 72 57 63 63 68 53 59 60 65 53 7 +67 72 74 58 63 68 74 58 63 68 74 58 63 67 73 55 63 67 69 55 66 75 76 63 63 63 68 53 59 60 65 53 59 66 72 57 7 +63 68 74 58 67 72 74 62 71 75 77 67 66 75 76 63 66 71 73 59 63 67 66 55 59 66 72 57 63 73 75 64 67 70 72 60 7 +67 72 74 62 71 75 77 67 71 75 74 62 66 71 73 59 63 67 66 55 63 75 80 63 63 73 75 64 67 70 72 60 67 73 82 64 7 +90 113 122 96 95 109 112 89 95 109 117 85 101 112 124 94 96 112 114 90 92 112 114 94 93 111 114 94 97 115 119 94 97 115 124 94 3 +95 109 117 85 90 113 117 92 95 113 117 92 92 112 114 94 92 117 119 98 96 117 130 94 97 115 124 94 97 115 119 94 93 115 114 90 3 +46 32 138 144 46 32 138 144 46 32 133 144 46 32 130 142 46 32 135 142 46 32 130 142 44 34 129 143 44 31 124 143 44 34 119 140 2 +46 32 133 144 46 32 133 136 46 32 133 136 46 32 130 142 46 32 124 139 46 32 124 139 44 34 119 140 44 34 124 140 44 34 129 140 2 +46 32 127 136 49 32 127 133 46 34 127 129 42 34 124 135 42 32 124 135 46 32 119 135 44 34 124 136 44 34 124 136 44 34 124 136 2 +49 32 127 133 46 34 127 129 49 32 127 133 42 32 124 135 46 32 119 135 46 32 119 131 44 34 124 136 44 34 124 136 44 34 119 133 2 +52 43 104 103 49 37 117 122 43 29 138 140 49 40 105 116 52 45 105 105 46 32 124 135 44 37 119 129 44 43 105 111 50 43 110 111 2 +49 34 112 111 46 37 117 114 49 34 112 118 52 40 110 113 49 37 105 113 49 37 110 116 44 34 110 119 44 37 105 111 47 40 110 111 2 +49 34 112 118 52 34 117 122 49 34 122 118 49 37 110 116 49 37 110 116 46 37 114 116 47 40 110 111 47 40 110 111 50 40 105 115 2 +49 34 122 118 49 34 117 122 49 34 117 125 46 37 114 116 46 37 114 116 46 37 110 113 50 40 105 115 53 46 110 111 60 63 97 94 2 +52 49 112 107 74 79 100 81 86 100 108 85 52 45 110 109 67 70 101 83 79 95 97 75 70 79 97 80 78 92 97 76 82 102 105 76 2 +86 100 108 85 90 109 108 89 90 104 112 89 79 95 97 75 92 108 110 90 92 108 110 90 82 102 105 76 85 102 110 83 93 111 114 90 3 +90 109 108 89 90 104 112 89 86 104 112 85 92 108 110 90 92 108 110 90 96 108 114 94 85 102 110 83 93 111 114 90 97 115 114 94 3 +86 104 112 85 90 109 117 89 90 113 112 92 96 108 114 94 96 112 114 90 96 112 119 90 97 115 114 94 97 111 114 94 93 115 114 94 3 +90 113 112 92 90 113 112 92 90 109 112 89 96 112 119 90 92 108 119 90 96 112 119 90 93 115 114 94 93 115 119 94 97 115 114 97 3 +90 109 112 89 90 113 117 92 95 113 117 96 96 112 119 90 96 112 119 94 96 117 114 94 97 115 114 97 97 115 114 94 97 115 114 90 3 +95 113 117 96 90 109 117 96 90 118 122 96 96 117 114 94 92 108 114 94 92 112 114 90 97 115 114 90 93 111 114 87 89 106 114 87 3 +74 91 96 78 74 87 92 74 74 79 84 66 71 77 82 68 67 77 86 68 67 77 75 64 63 67 74 58 60 67 67 55 57 63 63 51 7 +70 79 80 63 63 67 69 52 59 56 62 48 67 70 68 57 59 57 55 42 52 51 58 42 53 60 63 48 57 56 56 44 53 53 60 44 5 +59 56 62 48 56 53 66 48 59 53 66 44 52 51 58 42 52 51 72 57 56 51 68 53 53 53 60 44 57 53 67 55 53 53 74 62 5 +59 56 66 44 56 56 73 52 59 56 76 55 56 54 65 45 56 51 65 49 56 51 72 60 57 56 63 51 53 56 67 48 53 49 70 55 5 +59 49 69 48 59 53 66 44 56 53 66 48 59 54 72 60 59 51 65 45 59 54 62 45 57 56 74 62 57 60 74 58 57 53 67 51 5 +66 67 80 63 63 71 73 59 63 67 73 59 59 60 72 57 63 66 79 64 63 66 72 60 57 56 70 51 57 56 74 58 57 60 74 58 7 +66 75 80 63 70 79 84 70 70 79 84 66 63 70 75 64 67 73 79 64 63 66 75 60 63 63 74 62 63 60 70 62 60 60 70 65 7 +70 79 84 66 70 71 73 63 63 67 69 59 63 66 75 60 63 63 72 57 63 60 72 60 60 60 70 65 53 53 82 83 50 43 97 101 7 +63 67 69 59 59 63 66 55 59 63 73 59 63 60 72 60 59 63 75 64 63 63 72 57 50 43 97 101 44 37 101 108 53 53 85 76 7 +59 63 66 55 59 63 73 59 63 67 73 55 59 63 75 64 63 63 72 57 63 63 68 53 44 37 101 108 53 53 85 76 60 60 74 55 7 +66 71 73 59 63 67 66 55 63 75 80 63 63 73 75 64 67 70 72 60 67 73 82 64 67 71 78 62 67 75 78 65 67 71 82 69 7 +92 117 119 98 96 117 130 94 92 112 124 94 97 115 119 94 93 115 114 90 93 106 114 90 88 106 111 87 84 94 102 76 76 94 98 72 3 +96 117 130 94 92 112 124 94 92 112 114 98 93 115 114 90 93 106 114 90 89 102 110 83 84 94 102 76 76 94 98 72 76 89 94 76 3 +92 112 124 94 92 112 114 98 92 108 114 90 93 106 114 90 89 102 110 83 82 92 101 80 76 94 98 72 76 89 94 76 80 89 94 76 3 +92 112 114 98 92 108 114 90 92 99 105 86 89 102 110 83 82 92 101 80 82 88 89 73 76 89 94 76 80 89 94 76 76 89 98 76 3 +92 108 114 90 92 99 105 86 83 99 101 75 82 92 101 80 82 88 89 73 78 92 93 80 80 89 94 76 76 89 98 76 80 89 94 79 4 +83 99 101 75 79 91 97 75 83 91 97 79 78 92 93 80 78 92 101 76 82 92 101 80 80 89 94 79 80 89 98 79 84 94 98 76 4 +46 32 135 142 46 32 130 142 46 32 124 139 44 31 124 143 44 34 119 140 44 34 124 140 47 31 131 139 47 31 131 139 47 34 131 139 2 +46 32 130 142 46 32 124 139 46 32 124 139 44 34 119 140 44 34 124 140 44 34 129 140 47 31 131 139 47 34 131 139 44 31 136 139 2 +49 34 114 124 49 40 105 116 52 45 105 105 44 34 114 129 44 37 119 129 44 43 105 111 47 34 115 124 44 34 115 120 47 40 115 116 2 +49 40 105 116 52 45 105 105 46 32 124 135 44 37 119 129 44 43 105 111 50 43 110 111 44 34 115 120 47 40 115 116 50 43 106 102 2 +42 32 119 127 42 34 110 120 46 34 110 116 44 31 124 133 44 31 114 122 47 34 114 126 44 31 131 135 47 31 125 128 47 31 120 124 2 +79 95 97 75 92 108 110 90 92 108 110 90 82 102 105 76 85 102 110 83 93 111 114 90 76 85 94 76 84 98 102 79 88 111 115 94 3 +92 108 110 90 92 108 110 90 96 108 114 94 85 102 110 83 93 111 114 90 97 115 114 94 84 98 102 79 88 111 115 94 97 120 120 98 3 +96 108 114 94 96 112 114 90 96 112 119 90 97 115 114 94 97 111 114 94 93 115 114 94 97 120 120 98 97 120 120 94 97 115 115 94 3 +92 108 114 90 92 108 119 90 92 103 105 83 85 106 110 83 85 97 105 76 82 92 101 76 84 98 98 79 80 89 94 76 76 81 90 65 3 +92 108 119 90 92 103 105 83 83 99 101 83 85 97 105 76 82 92 101 76 78 88 93 73 80 89 94 76 76 81 90 65 72 77 78 65 7 +71 73 79 64 71 77 82 68 67 77 86 68 70 71 74 58 63 67 74 58 60 67 67 55 72 81 94 65 64 69 71 57 57 55 60 46 7 +71 77 82 68 67 77 86 68 67 77 75 64 63 67 74 58 60 67 67 55 57 63 63 51 64 69 71 57 57 55 60 46 53 55 60 42 5 +67 77 86 68 67 77 75 64 67 70 68 57 60 67 67 55 57 63 63 51 53 60 63 48 57 55 60 46 53 55 60 42 57 59 64 50 5 +59 57 55 42 52 51 58 42 52 51 72 57 57 56 56 44 53 53 60 44 57 53 67 55 60 59 67 54 60 59 71 57 57 59 78 65 5 +52 51 72 57 56 51 68 53 56 54 72 49 57 53 67 55 53 53 74 62 53 53 70 58 57 59 78 65 53 52 78 65 53 49 74 57 5 +56 51 68 53 56 54 72 49 56 51 62 45 53 53 74 62 53 53 70 58 53 53 67 48 53 52 78 65 53 49 74 57 53 52 71 50 5 +56 54 65 45 56 51 65 49 56 51 72 60 57 56 63 51 53 56 67 48 53 49 70 55 53 49 71 50 53 52 71 50 53 52 71 50 5 +59 54 72 60 59 51 65 45 59 54 62 45 57 56 74 62 57 60 74 58 57 53 67 51 57 55 74 61 57 55 78 65 57 55 67 54 5 +59 51 65 45 59 54 62 45 59 57 65 49 57 60 74 58 57 53 67 51 57 56 70 48 57 55 78 65 57 55 67 54 53 49 64 50 5 +59 57 65 49 59 60 72 57 63 66 79 64 57 56 70 48 57 56 70 51 57 56 74 58 53 49 64 50 57 55 67 50 57 55 71 54 5 +67 73 79 64 63 66 75 60 63 63 72 57 63 60 70 62 60 60 70 65 53 53 82 83 53 46 94 94 47 34 111 116 44 29 115 124 5 +63 63 68 53 59 60 65 53 59 66 72 57 60 60 74 55 57 60 70 58 63 67 74 58 53 52 78 57 57 52 71 61 57 59 78 61 7 +63 73 75 64 67 70 72 60 67 73 82 64 67 71 78 62 67 75 78 65 67 71 82 69 60 62 82 65 60 62 78 68 60 59 90 79 7 +97 115 119 94 93 115 114 90 93 106 114 90 88 106 111 87 84 94 102 76 76 94 98 72 80 91 96 78 76 87 96 74 76 91 96 74 4 +89 102 110 83 82 92 101 80 82 88 89 73 76 89 94 76 80 89 94 76 76 89 98 76 80 95 100 81 80 99 100 81 80 103 96 81 4 +82 92 101 80 82 88 89 73 78 92 93 80 80 89 94 76 76 89 98 76 80 89 94 79 80 99 100 81 80 103 96 81 84 95 100 78 4 +78 92 101 76 82 92 101 80 78 88 93 76 80 89 98 79 84 94 98 76 76 85 90 72 84 95 100 78 80 91 96 81 71 87 91 74 4 +82 92 101 80 78 88 93 76 78 92 93 76 84 94 98 76 76 85 90 72 76 85 90 72 80 91 96 81 71 87 91 74 76 87 87 74 4 +78 88 93 76 78 92 93 76 78 88 97 80 76 85 90 72 76 85 90 72 76 85 90 76 71 87 91 74 76 87 87 74 76 87 91 78 4 +78 88 97 80 78 88 93 76 63 63 101 90 76 85 90 76 80 89 94 76 72 81 94 76 76 87 91 78 80 91 96 78 84 91 96 74 4 +47 37 119 133 44 34 124 143 44 34 129 143 57 49 115 113 47 31 131 142 44 31 131 142 68 68 100 85 50 39 118 132 43 29 133 143 2 +44 34 124 140 44 34 129 140 44 34 124 136 47 34 131 139 44 31 136 139 44 31 125 139 46 31 133 143 46 31 133 139 43 31 128 135 2 +44 34 129 140 44 34 124 136 44 34 124 136 44 31 136 139 44 31 125 139 44 31 125 139 46 31 133 139 43 31 128 135 43 29 128 132 2 +44 34 124 136 44 34 119 133 44 37 114 129 47 34 125 135 44 31 125 128 47 34 120 124 46 31 118 125 46 34 118 121 50 36 118 121 2 +44 34 119 133 44 37 114 129 44 34 114 129 44 31 125 128 47 34 120 124 47 34 115 124 46 34 118 121 50 36 118 121 50 36 118 121 2 +44 34 114 129 44 37 119 129 44 43 105 111 47 34 115 124 44 34 115 120 47 40 115 116 50 36 118 121 50 36 122 121 46 36 122 125 2 +44 31 124 133 44 31 114 122 47 34 114 126 44 31 131 135 47 31 125 128 47 31 120 124 50 34 122 135 43 29 133 135 46 31 122 125 2 +47 37 114 122 47 37 114 119 44 34 110 119 47 34 115 120 47 37 111 113 47 37 111 105 50 34 113 114 50 39 104 103 56 51 100 92 2 +47 40 110 111 47 40 110 111 50 40 105 115 53 43 106 102 60 55 102 91 64 69 94 79 68 71 91 70 71 75 87 63 68 75 75 59 2 +93 111 114 90 97 115 114 94 97 111 114 94 88 111 115 94 97 120 120 98 97 120 120 94 84 103 108 85 92 112 122 92 92 112 118 92 3 +97 115 114 94 97 111 114 94 93 115 114 94 97 120 120 98 97 120 120 94 97 115 115 94 92 112 122 92 92 112 118 92 88 107 108 88 3 +93 115 114 94 93 115 119 94 97 115 114 97 97 115 115 94 88 115 120 94 88 111 115 91 88 107 108 88 84 99 104 81 84 99 104 78 3 +85 106 110 83 85 97 105 76 82 92 101 76 84 98 98 79 80 89 94 76 76 81 90 65 80 87 91 74 68 83 83 67 68 79 83 67 7 +63 67 74 58 60 67 67 55 57 63 63 51 64 69 71 57 57 55 60 46 53 55 60 42 68 75 79 63 64 64 71 56 64 61 71 59 5 +60 67 67 55 57 63 63 51 53 60 63 48 57 55 60 46 53 55 60 42 57 59 64 50 64 64 71 56 64 61 71 59 60 61 71 59 5 +53 60 63 48 57 56 56 44 53 53 60 44 57 59 64 50 60 59 67 54 60 59 71 57 60 61 71 59 60 61 75 63 60 61 75 67 5 +57 56 56 44 53 53 60 44 57 53 67 55 60 59 67 54 60 59 71 57 57 59 78 65 60 61 75 63 60 61 75 67 60 57 75 67 5 +57 56 63 51 53 56 67 48 53 49 70 55 53 49 71 50 53 52 71 50 53 52 71 50 56 51 71 56 56 51 71 56 56 51 67 56 5 +53 56 67 48 53 49 70 55 57 56 74 62 53 52 71 50 53 52 71 50 57 55 74 61 56 51 71 56 56 51 67 56 53 48 67 56 5 +57 60 74 58 57 53 67 51 57 56 70 48 57 55 78 65 57 55 67 54 53 49 64 50 56 54 79 63 56 54 79 63 56 51 67 52 5 +57 53 67 51 57 56 70 48 57 56 70 51 57 55 67 54 53 49 64 50 57 55 67 50 56 54 79 63 56 51 67 52 53 51 67 52 5 +63 63 74 62 63 60 70 62 60 60 70 65 57 52 82 72 53 46 94 94 47 34 111 116 53 48 91 96 46 34 118 128 43 29 122 139 2 +53 53 85 76 60 60 74 55 57 60 70 58 44 37 98 94 53 52 78 57 57 52 71 61 40 31 122 132 46 42 96 78 53 48 71 59 5 +60 60 74 55 57 60 70 58 63 67 74 58 53 52 78 57 57 52 71 61 57 59 78 61 46 42 96 78 53 48 71 59 56 51 71 59 5 +67 71 78 62 67 75 78 65 67 71 82 69 60 62 82 65 60 62 78 68 60 59 90 79 60 54 75 63 60 57 79 67 60 64 87 78 5 +92 115 120 94 88 106 111 87 84 94 102 76 84 95 96 78 80 91 96 78 76 87 96 74 84 91 96 75 79 95 100 79 84 95 100 79 4 +76 85 90 76 80 89 94 76 72 81 94 76 76 87 91 78 80 91 96 78 84 91 96 74 79 95 96 79 79 99 96 79 84 99 96 79 4 +44 31 125 139 47 34 125 135 44 31 125 128 43 29 128 132 46 31 118 125 46 34 118 121 44 32 118 125 44 34 118 121 48 37 118 121 2 +50 43 106 102 47 40 115 120 44 31 131 135 50 39 122 117 50 45 113 107 50 34 122 135 48 29 118 129 48 37 118 116 51 42 109 104 2 +47 34 115 120 47 37 111 113 47 37 111 105 50 34 113 114 50 39 104 103 56 51 100 92 59 51 100 83 63 64 85 67 67 75 81 62 2 +47 37 111 105 50 40 106 105 53 43 106 102 56 51 100 92 64 61 96 81 68 71 91 70 67 75 81 62 67 72 77 54 67 72 74 58 2 +53 43 106 102 60 55 102 91 64 69 94 79 68 71 91 70 71 75 87 63 68 75 75 59 67 72 74 58 67 72 70 54 71 72 74 58 7 +64 69 94 79 68 77 86 65 68 77 78 61 68 75 75 59 64 68 75 56 64 71 75 56 71 72 74 58 67 75 74 58 67 75 77 58 7 +68 77 86 65 68 77 78 61 68 77 82 61 64 68 75 56 64 71 75 56 68 75 75 59 67 75 74 58 67 75 77 58 67 79 81 62 7 +68 81 90 68 76 85 94 76 84 98 102 79 68 75 83 59 71 79 83 67 71 87 96 74 71 75 85 62 71 83 85 62 75 83 89 67 7 +76 85 94 76 84 98 102 79 88 111 115 94 71 79 83 67 71 87 96 74 84 103 108 85 71 83 85 62 75 83 89 67 75 91 96 75 7 +97 115 115 94 88 115 120 94 88 111 115 91 88 107 108 88 84 99 104 81 84 99 104 78 79 95 100 79 84 95 104 79 79 95 96 75 3 +84 98 106 83 88 98 106 79 84 98 98 79 84 103 104 81 84 95 96 78 80 87 91 74 84 95 96 75 71 83 85 67 71 79 85 67 7 +88 98 106 79 84 98 98 79 80 89 94 76 84 95 96 78 80 87 91 74 68 83 83 67 71 83 85 67 71 79 85 67 71 75 85 67 7 +80 89 94 76 76 81 90 65 72 77 78 65 68 83 83 67 68 79 83 67 71 75 83 67 71 75 85 67 71 75 85 67 71 79 81 67 7 +72 77 78 65 72 81 78 65 72 81 90 65 71 75 83 67 71 79 87 70 71 83 87 70 71 79 81 67 75 79 85 67 75 87 89 71 7 +72 81 94 65 64 69 71 57 57 55 60 46 71 79 83 67 68 75 79 63 64 64 71 56 75 87 85 71 71 83 89 75 71 79 89 75 7 +60 59 71 57 57 59 78 65 53 52 78 65 60 61 75 67 60 57 75 67 56 54 79 70 63 61 81 62 59 58 77 67 59 58 77 67 5 +53 49 74 57 53 52 71 50 53 49 71 50 53 48 75 63 53 45 75 59 56 51 71 56 55 51 81 71 55 48 77 62 55 48 74 62 5 +53 52 71 50 53 49 71 50 53 52 71 50 53 45 75 59 56 51 71 56 56 51 71 56 55 48 77 62 55 48 74 62 55 51 67 54 5 +53 49 71 50 53 52 71 50 53 52 71 50 56 51 71 56 56 51 71 56 56 51 67 56 55 48 74 62 55 51 67 54 55 51 67 50 5 +57 55 71 54 57 55 74 65 57 52 82 72 53 54 71 52 53 51 75 63 53 48 91 96 55 48 70 54 55 51 77 67 55 51 85 75 5 +57 55 74 65 57 52 82 72 53 46 94 94 53 51 75 63 53 48 91 96 46 34 118 128 55 51 77 67 55 51 85 75 51 45 104 112 2 +57 52 82 72 53 46 94 94 47 34 111 116 53 48 91 96 46 34 118 128 43 29 122 139 55 51 85 75 51 45 104 112 44 29 128 146 2 +44 29 106 113 44 31 106 116 44 37 98 94 43 29 122 128 43 29 122 128 40 31 122 132 41 27 134 137 41 27 123 129 41 27 123 133 2 +80 99 104 81 84 95 96 78 80 91 96 78 84 95 96 75 84 91 96 75 79 95 100 79 82 96 96 81 82 96 100 78 82 91 96 78 4 +80 103 96 81 84 95 100 78 84 95 100 78 84 99 100 79 84 95 104 79 79 95 96 79 82 96 104 78 78 96 104 78 82 96 100 81 4 +76 87 91 78 80 91 96 78 84 91 96 74 79 95 96 79 79 99 96 79 84 99 96 79 82 91 104 81 82 96 104 81 82 100 100 78 4 +46 31 133 139 46 31 133 143 46 31 133 139 44 32 128 137 44 32 128 133 48 32 123 129 49 34 117 129 46 34 112 129 46 32 117 125 2 +46 31 133 143 46 31 133 139 43 31 128 135 44 32 128 133 48 32 123 129 44 34 123 129 46 34 112 129 46 32 117 125 46 34 112 122 2 +43 29 128 132 46 31 118 125 46 34 118 121 44 32 118 125 44 34 118 121 48 37 118 121 49 34 122 125 49 34 117 125 46 32 117 125 2 +50 36 118 121 50 36 122 121 46 36 122 125 48 34 118 125 44 34 118 129 44 34 123 129 46 32 122 122 46 32 122 125 46 29 122 129 2 +50 39 104 103 56 51 100 92 64 61 96 81 63 64 85 67 67 75 81 62 67 72 77 54 66 71 80 59 70 71 80 59 63 67 69 55 7 +92 112 122 92 92 112 118 92 88 107 108 88 84 103 104 83 79 99 96 79 79 95 100 79 66 83 88 66 74 87 92 74 82 91 96 78 3 +84 99 104 78 84 95 104 78 84 95 104 81 79 95 96 75 84 95 100 79 84 99 104 79 82 96 100 78 82 96 100 78 82 91 100 78 7 +84 95 104 78 84 95 104 81 84 103 104 81 84 95 100 79 84 99 104 79 84 95 96 75 82 96 100 78 82 91 100 78 78 87 96 78 7 +84 95 104 81 84 103 104 81 84 95 96 78 84 99 104 79 84 95 96 75 71 83 85 67 82 91 100 78 78 87 96 78 78 83 84 70 7 +80 87 91 74 68 83 83 67 68 79 83 67 71 79 85 67 71 75 85 67 71 75 85 67 70 79 80 66 70 83 84 70 74 79 84 66 7 +68 79 83 67 71 75 83 67 71 79 87 70 71 75 85 67 71 79 81 67 75 79 85 67 74 79 84 66 70 75 84 66 70 75 80 63 7 +71 79 83 67 68 75 79 63 64 64 71 56 75 87 85 71 71 83 89 75 71 79 89 75 78 83 88 74 74 83 88 70 74 83 88 74 7 +64 61 71 59 60 61 71 59 60 61 75 63 63 64 85 75 59 58 77 71 59 58 81 67 74 83 88 74 66 71 88 70 59 60 84 70 5 +60 61 71 59 60 61 75 63 60 61 75 67 59 58 77 71 59 58 81 67 63 61 81 62 66 71 88 70 59 60 84 70 59 56 80 70 5 +60 61 75 67 60 57 75 67 56 54 79 70 63 61 81 62 59 58 77 67 59 58 77 67 59 56 80 70 59 60 80 63 66 63 76 66 5 +53 48 67 56 56 54 79 63 56 54 79 63 55 51 70 54 55 45 70 54 55 51 77 67 56 49 69 52 56 46 69 52 56 49 69 55 5 +56 54 79 63 56 54 79 63 56 51 67 52 55 45 70 54 55 51 77 67 55 54 77 62 56 46 69 52 56 49 69 55 56 53 73 63 5 +53 54 71 52 53 51 75 63 53 48 91 96 55 48 70 54 55 51 77 67 55 51 85 75 56 49 69 59 52 49 76 59 56 53 84 63 5 +43 29 122 139 43 29 122 135 43 29 122 128 44 29 128 146 41 27 134 146 41 27 134 137 52 43 104 100 46 29 117 133 43 27 133 151 2 +43 29 122 135 43 29 122 128 43 29 122 128 41 27 134 146 41 27 134 137 41 27 123 129 46 29 117 133 43 27 133 151 43 27 127 147 2 +43 29 122 128 40 31 122 132 46 42 96 78 41 27 123 129 41 27 123 133 44 32 113 116 43 27 127 147 43 27 122 133 43 27 117 129 2 +79 95 100 79 84 95 100 79 79 95 96 75 82 91 96 78 82 96 100 78 82 96 96 78 83 99 101 79 83 95 97 79 83 95 97 75 4 +84 95 100 79 84 99 100 79 84 99 100 79 82 96 100 78 82 100 96 81 82 96 104 78 83 95 101 79 83 99 101 83 79 95 101 83 4 +84 95 104 79 79 95 96 79 79 91 93 75 78 96 104 78 82 96 100 81 78 91 96 78 79 95 97 79 79 91 101 75 79 95 105 79 4 +79 95 96 79 79 91 93 75 79 91 96 75 82 96 100 81 78 91 96 78 78 91 96 78 79 91 101 75 79 95 105 79 83 95 97 75 4 +79 91 93 75 79 91 96 75 84 95 100 79 78 91 96 78 78 91 96 78 78 91 100 74 79 95 105 79 83 95 97 75 79 95 97 79 4 +63 58 104 100 48 34 128 137 44 32 128 141 82 100 100 85 78 87 92 78 63 56 104 96 92 103 114 86 92 103 105 83 75 81 93 79 2 +44 34 123 129 44 32 118 125 44 34 118 121 46 34 112 122 49 34 122 125 49 34 117 125 46 40 105 109 49 40 105 113 46 37 114 120 2 +44 34 118 121 48 37 118 121 48 34 118 121 49 34 117 125 46 32 117 125 46 32 117 122 46 37 114 120 46 34 124 131 46 32 124 139 2 +48 37 118 121 48 34 118 121 48 34 118 125 46 32 117 125 46 32 117 122 46 32 122 122 46 34 124 131 46 32 124 139 46 30 119 131 2 +48 34 118 125 44 34 118 129 44 34 123 129 46 32 122 122 46 32 122 125 46 29 122 129 46 30 119 131 46 32 114 127 42 34 119 135 2 +44 34 118 129 44 34 123 129 48 29 118 129 46 32 122 125 46 29 122 129 43 32 122 133 46 32 114 127 42 34 119 135 52 37 114 124 2 +63 64 85 67 67 75 81 62 67 72 77 54 66 71 80 59 70 71 80 59 63 67 69 55 63 70 72 53 67 66 72 53 67 66 72 53 7 +67 72 77 54 67 72 74 58 67 72 70 54 63 67 69 55 63 67 69 55 63 71 69 55 67 66 72 53 67 66 72 53 63 70 68 53 7 +67 72 74 58 67 72 70 54 71 72 74 58 63 67 69 55 63 71 69 55 63 67 73 55 67 66 72 53 63 70 68 53 67 70 72 57 7 +71 75 85 62 71 83 85 62 75 83 89 67 66 83 80 63 70 79 80 63 70 79 80 63 71 77 90 64 71 81 82 64 71 81 82 64 7 +71 83 85 62 75 83 89 67 75 91 96 75 70 79 80 63 70 79 80 63 70 79 80 63 71 81 82 64 71 81 82 64 71 81 82 64 7 +75 83 89 67 75 91 96 75 84 103 104 83 70 79 80 63 70 79 80 63 66 83 88 66 71 81 82 64 71 81 82 64 75 81 82 64 7 +84 103 104 83 79 99 96 79 79 95 100 79 66 83 88 66 74 87 92 74 82 91 96 78 75 81 82 64 75 84 86 64 75 91 90 72 7 +84 95 104 79 79 95 96 75 84 95 100 79 78 87 92 74 82 96 100 78 82 96 100 78 79 91 90 72 79 95 97 75 83 95 97 75 7 +79 95 96 75 84 95 100 79 84 99 104 79 82 96 100 78 82 96 100 78 82 91 100 78 79 95 97 75 83 95 97 75 75 84 93 72 7 +75 87 89 71 75 87 85 71 71 83 89 75 70 83 88 70 78 83 88 74 74 83 88 70 75 84 90 75 75 88 97 75 75 84 93 75 7 +63 64 85 75 59 58 77 71 59 58 81 67 74 83 88 74 66 71 88 70 59 60 84 70 71 81 93 75 71 77 93 75 63 63 79 72 5 +59 58 81 67 63 61 81 62 59 58 77 67 59 60 84 70 59 56 80 70 59 60 80 63 63 63 79 72 63 57 86 72 59 57 82 68 5 +63 61 81 62 59 58 77 67 59 58 77 67 59 56 80 70 59 60 80 63 66 63 76 66 63 57 86 72 59 57 82 68 59 60 82 68 5 +55 48 77 62 55 48 74 62 55 51 67 54 59 53 84 70 52 49 76 66 52 46 80 63 59 57 82 68 59 54 82 72 56 48 75 64 5 +55 51 67 54 55 51 67 50 55 51 70 54 52 46 80 63 56 49 73 59 56 49 69 52 56 48 75 64 52 48 75 60 56 51 72 57 5 +55 51 67 50 55 51 70 54 55 45 70 54 56 49 73 59 56 49 69 52 56 46 69 52 52 48 75 60 56 51 72 57 59 51 72 53 5 +55 51 70 54 55 45 70 54 55 51 77 67 56 49 69 52 56 46 69 52 56 49 69 55 56 51 72 57 59 51 72 53 56 48 68 53 5 +55 51 77 67 55 54 77 62 59 48 74 54 56 49 69 55 56 53 73 63 59 53 84 66 56 48 68 53 56 51 68 60 56 51 75 68 5 +44 29 128 146 41 27 134 146 41 27 134 137 52 43 104 100 46 29 117 133 43 27 133 151 59 48 90 90 52 37 110 116 46 30 124 142 2 +41 27 134 137 41 27 123 129 41 27 123 133 43 27 133 151 43 27 127 147 43 27 122 133 46 30 124 142 42 30 124 146 42 30 124 135 2 +82 96 100 78 82 96 96 78 82 96 100 78 83 95 97 79 83 95 97 75 83 95 101 79 85 97 101 80 85 97 101 80 85 97 105 80 4 +82 96 96 78 82 96 100 78 82 100 96 81 83 95 97 75 83 95 101 79 83 99 101 83 85 97 101 80 85 97 105 80 82 92 101 80 4 +78 96 104 78 82 96 100 81 78 91 96 78 79 95 97 79 79 91 101 75 79 95 105 79 78 92 97 76 82 92 97 80 82 92 101 83 4 +78 91 96 78 78 91 96 78 78 91 100 74 79 95 105 79 83 95 97 75 79 95 97 79 82 92 101 83 85 97 101 80 85 97 110 80 4 +82 91 104 81 82 96 104 81 82 100 100 78 79 99 105 83 83 103 105 83 83 103 105 83 82 102 110 83 85 106 110 83 89 106 110 87 3 +82 96 104 81 82 100 100 78 82 96 104 81 83 103 105 83 83 103 105 83 83 103 105 86 85 106 110 83 89 106 110 87 89 106 110 90 3 +82 100 100 78 82 96 104 81 82 100 100 85 83 103 105 83 83 103 105 86 92 103 114 86 89 106 110 87 89 106 110 90 93 111 114 90 3 +46 29 122 129 43 32 122 133 43 32 122 129 42 34 119 135 52 37 114 124 52 48 105 105 53 56 101 97 60 63 85 73 67 71 78 58 2 +43 32 122 133 43 32 122 129 49 34 122 129 52 37 114 124 52 48 105 105 59 60 97 83 60 63 85 73 67 71 78 58 67 67 67 51 2 +63 67 73 55 66 71 73 55 66 75 76 63 67 70 72 57 67 73 79 57 67 77 82 60 63 67 74 55 63 71 78 58 67 71 78 62 7 +66 79 84 63 66 83 80 63 70 79 80 63 67 73 86 64 71 77 90 64 71 81 82 64 67 75 78 62 70 79 78 58 74 79 82 65 7 +70 79 80 63 70 79 80 63 66 83 88 66 71 81 82 64 71 81 82 64 75 81 82 64 70 79 82 62 70 79 78 65 67 75 82 62 7 +82 96 100 78 82 96 100 78 82 91 100 78 79 95 97 75 83 95 97 75 75 84 93 72 82 92 93 76 78 88 85 73 74 84 82 69 7 +78 87 96 78 78 83 84 70 70 79 80 66 75 91 97 72 79 88 90 72 75 81 82 68 74 84 85 69 74 84 85 69 67 75 82 69 7 +70 75 80 63 70 83 88 70 78 83 88 74 75 84 90 72 75 84 90 75 75 88 97 75 70 79 85 73 74 84 93 73 74 84 89 76 7 +74 83 88 74 74 83 88 74 66 71 88 70 75 84 90 72 71 81 93 75 71 77 93 75 70 84 85 69 74 84 85 73 70 84 89 73 7 +66 63 76 66 63 60 80 66 59 53 84 70 59 60 82 68 59 60 82 68 59 57 82 68 57 56 82 65 60 60 82 65 60 60 82 69 5 +63 60 80 66 59 53 84 70 52 49 76 66 59 60 82 68 59 57 82 68 59 54 82 72 60 60 82 65 60 60 82 69 57 60 82 73 5 +52 49 76 66 52 46 80 63 56 49 73 59 59 54 82 72 56 48 75 64 52 48 75 60 57 60 82 73 53 53 78 73 53 46 78 69 5 +56 49 73 59 56 49 69 52 56 46 69 52 52 48 75 60 56 51 72 57 59 51 72 53 53 46 78 69 50 46 74 62 53 49 74 58 5 +56 53 73 63 59 53 84 66 56 49 69 59 56 51 68 60 56 51 75 68 52 51 79 68 53 53 74 58 53 53 74 65 53 53 74 69 5 +59 53 84 66 56 49 69 59 52 49 76 59 56 51 75 68 52 51 79 68 56 48 72 60 53 53 74 65 53 53 74 69 57 53 78 65 5 +46 29 117 133 43 27 133 151 43 27 127 147 52 37 110 116 46 30 124 142 42 30 124 146 42 29 114 129 42 29 119 136 44 31 124 140 2 +43 27 133 151 43 27 127 147 43 27 122 133 46 30 124 142 42 30 124 146 42 30 124 135 42 29 119 136 44 31 124 140 44 29 119 133 2 +83 95 101 79 83 99 101 83 79 95 101 83 85 97 105 80 82 92 101 80 82 92 101 76 88 98 102 79 84 98 102 79 84 102 102 79 4 +92 103 114 86 92 103 105 83 75 81 93 79 93 111 114 90 93 115 110 90 89 102 105 80 97 115 115 91 101 115 120 94 97 111 115 87 3 +46 40 105 109 49 40 105 113 46 37 114 120 47 46 105 104 53 49 101 101 50 53 101 101 64 69 98 87 68 77 90 79 64 73 98 83 2 +46 37 114 120 46 34 124 131 46 32 124 139 50 53 101 101 47 37 110 122 44 37 124 136 64 73 98 83 57 55 98 98 57 55 111 102 2 +46 32 124 139 46 30 119 131 46 32 114 127 44 37 124 136 47 37 119 133 53 43 114 119 57 55 111 102 60 69 102 87 68 73 78 65 2 +46 32 114 127 42 34 119 135 52 37 114 124 53 43 114 119 53 56 101 97 60 63 85 73 68 73 78 65 64 73 74 54 68 69 78 54 2 +52 48 105 105 59 60 97 83 63 66 79 64 67 71 78 58 67 67 67 51 60 67 70 55 64 66 64 54 64 69 64 54 64 69 71 57 7 +63 66 79 64 67 70 75 57 63 70 75 57 60 67 70 55 63 63 67 51 60 67 70 51 64 69 71 57 64 69 71 54 64 69 67 54 7 +67 70 75 57 63 70 75 57 63 70 72 53 63 63 67 51 60 67 70 51 63 67 74 55 64 69 71 54 64 69 67 54 64 69 71 54 7 +67 66 72 53 67 66 72 53 63 70 68 53 63 67 70 55 63 67 70 55 63 71 74 55 64 69 71 54 68 69 78 54 68 69 71 57 7 +67 66 72 53 63 70 68 53 67 70 72 57 63 67 70 55 63 71 74 55 63 67 74 55 68 69 78 54 68 69 71 57 68 73 71 57 7 +67 73 86 64 71 77 90 64 71 81 82 64 67 75 78 62 70 79 78 58 74 79 82 65 68 73 78 57 68 77 78 61 68 77 74 61 7 +71 81 82 64 71 81 82 64 71 81 82 64 74 79 82 65 70 79 82 62 70 79 78 65 68 77 74 61 68 73 78 61 72 77 78 57 7 +71 81 82 64 71 81 82 64 75 81 82 64 70 79 82 62 70 79 78 65 67 75 82 62 68 73 78 61 72 77 78 57 68 73 78 57 7 +75 91 90 72 79 91 90 72 79 95 97 75 74 79 89 69 78 88 93 73 82 92 93 76 68 73 82 61 76 85 86 68 80 94 94 76 7 +83 95 97 75 75 84 93 72 75 91 97 72 78 88 85 73 74 84 82 69 74 84 85 69 80 89 94 72 76 81 86 72 72 81 90 72 7 +75 84 90 72 75 84 90 75 75 88 97 75 70 79 85 73 74 84 93 73 74 84 89 76 68 73 86 72 72 81 86 72 72 77 90 72 7 +75 84 90 75 75 88 97 75 75 84 93 75 74 84 93 73 74 84 89 76 74 84 85 73 72 81 86 72 72 77 90 72 72 81 86 72 7 +75 84 93 75 75 84 90 72 71 81 93 75 74 84 85 73 70 84 85 69 74 84 85 73 72 81 86 72 72 81 82 68 72 77 82 68 7 +71 81 93 75 71 77 93 75 63 63 79 72 74 84 85 73 70 84 89 73 67 67 85 73 72 77 82 68 68 77 90 72 68 77 90 76 7 +56 48 75 64 52 48 75 60 56 51 72 57 53 53 78 73 53 46 78 69 50 46 74 62 57 52 78 72 50 46 78 76 53 49 82 65 5 +59 51 72 53 56 48 68 53 56 51 68 60 53 49 74 58 53 49 74 58 53 53 74 58 53 49 82 65 57 55 71 61 57 55 78 65 5 +52 37 110 116 46 30 124 142 42 30 124 146 42 29 114 129 42 29 119 136 44 31 124 140 44 31 111 120 44 31 115 124 44 37 115 120 2 +46 30 124 142 42 30 124 146 42 30 124 135 42 29 119 136 44 31 124 140 44 29 119 133 44 31 115 124 44 37 115 120 47 37 106 113 2 +42 30 124 135 42 30 119 127 42 28 119 127 44 29 119 133 44 34 110 115 47 37 101 101 47 37 106 113 47 37 106 109 41 34 115 113 2 +42 28 119 127 46 32 105 113 49 45 82 72 47 37 101 101 50 37 101 104 47 40 93 94 41 34 115 113 44 29 115 120 47 31 106 105 2 +97 115 119 94 97 115 114 90 89 111 114 87 97 115 120 94 97 111 115 94 97 111 115 94 97 112 118 96 101 116 122 96 101 116 122 96 3 +85 97 101 80 85 97 110 80 82 102 110 83 88 106 106 87 92 106 106 87 92 106 106 83 88 107 113 88 92 107 108 85 92 107 113 88 3 +85 97 110 80 82 102 110 83 85 106 110 83 92 106 106 87 92 106 106 83 88 106 106 87 92 107 108 85 92 107 113 88 92 107 113 88 3 +85 106 110 83 89 106 110 87 89 106 110 90 88 106 106 87 92 106 111 87 92 111 115 91 92 107 113 88 92 107 113 92 92 107 113 88 3 +67 71 89 80 50 43 97 108 44 40 105 111 80 89 98 72 64 62 94 83 60 59 98 91 92 107 113 85 84 95 104 74 71 83 100 78 2 +44 40 105 111 47 46 105 104 53 49 101 101 60 59 98 91 64 69 98 87 68 77 90 79 71 83 100 78 68 79 100 81 68 75 96 78 2 +47 37 110 122 44 37 124 136 47 37 119 133 57 55 98 98 57 55 111 102 60 69 102 87 68 75 83 70 71 75 83 63 68 71 79 59 2 +53 56 101 97 60 63 85 73 67 71 78 58 64 73 74 54 68 69 78 54 64 66 64 54 64 71 75 59 71 71 75 52 64 68 71 52 7 +60 67 70 55 63 63 67 51 60 67 70 51 64 69 71 57 64 69 71 54 64 69 67 54 64 71 71 56 60 71 75 56 64 71 75 56 7 +63 63 67 51 60 67 70 51 63 67 74 55 64 69 71 54 64 69 67 54 64 69 71 54 60 71 75 56 64 71 75 56 68 68 75 56 7 +63 67 74 55 67 67 70 55 63 67 70 55 64 69 71 54 64 66 67 54 64 69 71 54 68 68 75 56 64 68 71 52 64 71 67 59 7 +67 71 78 62 67 75 78 62 70 79 78 58 64 73 74 57 68 73 78 57 68 77 78 61 64 71 71 59 71 75 75 59 68 71 75 56 7 +70 79 82 62 70 79 78 65 67 75 82 62 68 73 78 61 72 77 78 57 68 73 78 57 68 71 71 59 68 75 71 56 68 71 75 56 7 +78 88 93 73 82 92 93 76 78 88 85 73 76 85 86 68 80 94 94 76 80 89 94 72 68 79 79 63 76 87 83 74 80 87 100 78 7 +67 75 82 69 70 75 85 69 70 75 82 69 68 77 90 72 68 73 86 72 68 69 86 76 64 64 87 78 64 68 87 78 64 71 87 74 7 +74 84 89 76 74 84 85 73 70 84 85 69 72 77 90 72 72 81 86 72 72 81 82 68 68 75 83 67 68 71 83 70 71 75 87 88 7 +74 84 85 73 70 84 85 69 74 84 85 73 72 81 86 72 72 81 82 68 72 77 82 68 68 71 83 70 71 75 87 88 71 75 83 70 7 +74 84 85 73 70 84 89 73 67 67 85 73 72 77 82 68 68 77 90 72 68 77 90 76 71 75 83 70 68 75 83 67 71 79 87 74 7 +70 84 89 73 67 67 85 73 57 56 82 73 68 77 90 72 68 77 90 76 60 59 86 72 68 75 83 67 71 79 87 74 71 71 87 74 7 +57 53 85 76 57 56 82 65 60 60 82 65 57 52 90 76 57 52 78 72 57 59 78 68 60 61 87 74 56 57 87 74 56 54 83 70 5 +57 60 82 73 53 53 78 73 53 46 78 69 60 62 86 68 57 52 78 72 50 46 78 76 64 61 83 70 60 61 83 70 56 54 83 74 5 +53 53 78 73 53 46 78 69 50 46 74 62 57 52 78 72 50 46 78 76 53 49 82 65 60 61 83 70 56 54 83 74 56 54 83 70 5 +53 46 78 69 50 46 74 62 53 49 74 58 50 46 78 76 53 49 82 65 53 49 82 65 56 54 83 74 56 54 83 70 60 54 83 70 5 +53 53 74 69 57 53 78 65 53 49 78 73 53 46 90 83 44 37 94 98 41 31 98 113 46 36 100 107 43 31 108 117 40 29 108 121 2 +42 29 114 129 42 29 119 136 44 31 124 140 44 31 111 120 44 31 115 124 44 37 115 120 50 48 96 96 46 36 104 107 43 31 104 107 2 +44 29 119 133 44 34 110 115 47 37 101 101 47 37 106 113 47 37 106 109 41 34 115 113 40 31 104 110 40 31 104 107 43 31 104 114 2 +44 34 110 115 47 37 101 101 50 37 101 104 47 37 106 109 41 34 115 113 44 29 115 120 40 31 104 107 43 31 104 114 43 29 113 114 2 +47 37 101 101 50 37 101 104 47 40 93 94 41 34 115 113 44 29 115 120 47 31 106 105 43 31 104 114 43 29 113 114 43 29 108 114 2 +97 115 120 94 97 111 115 94 97 111 115 94 97 112 118 96 101 116 122 96 101 116 122 96 97 116 118 96 97 116 123 96 93 116 123 96 3 +88 98 102 83 88 98 102 79 84 98 102 79 92 107 113 88 92 107 118 85 92 112 118 92 93 116 118 96 97 111 118 96 97 111 118 96 3 +84 102 102 79 84 102 102 83 84 98 106 83 92 112 118 88 92 107 113 85 88 103 108 81 97 116 113 92 93 111 113 92 88 111 109 87 3 +92 106 106 83 88 106 106 87 92 106 111 87 92 107 113 88 92 107 113 88 92 107 113 92 93 111 118 92 97 111 118 92 93 111 113 92 3 +88 106 106 87 92 106 111 87 92 111 115 91 92 107 113 88 92 107 113 92 92 107 113 88 97 111 118 92 93 111 113 92 93 111 109 87 3 +92 106 111 87 92 111 115 91 97 115 115 91 92 107 113 92 92 107 113 88 92 107 118 92 93 111 113 92 93 111 109 87 97 111 109 87 3 +101 115 120 94 97 111 115 87 80 89 98 72 97 112 122 88 101 112 118 92 92 107 113 85 97 111 113 87 93 107 113 92 88 111 118 92 3 +68 73 78 65 64 73 74 54 68 69 78 54 64 71 75 56 64 71 75 59 71 71 75 52 63 72 74 58 67 72 77 58 67 72 77 54 7 +64 73 74 54 68 69 78 54 64 66 64 54 64 71 75 59 71 71 75 52 64 68 71 52 67 72 77 58 67 72 77 54 67 72 77 54 7 +64 66 64 54 64 69 64 54 64 69 71 57 64 68 71 52 60 71 71 56 64 71 71 56 67 72 77 54 63 68 70 54 67 68 70 54 7 +64 69 67 54 64 69 71 54 64 66 67 54 64 71 75 56 68 68 75 56 64 68 71 52 67 72 74 54 67 72 74 54 67 72 77 54 7 +64 66 67 54 64 69 71 54 68 69 78 54 64 68 71 52 64 71 67 59 68 71 75 59 67 72 77 54 67 72 77 54 67 72 74 54 7 +64 69 71 54 68 69 78 54 68 69 71 57 64 71 67 59 68 71 75 59 64 75 75 56 67 72 77 54 67 72 74 54 67 68 74 54 7 +68 73 78 57 68 77 78 61 68 77 74 61 71 75 75 59 68 71 75 56 68 71 67 56 67 72 70 58 67 72 70 54 67 72 70 58 7 +80 89 94 72 76 81 86 72 72 81 90 72 80 87 100 78 80 87 100 74 71 75 87 74 79 95 96 75 79 91 96 75 71 75 93 79 7 +68 77 90 72 68 73 86 72 68 69 86 76 64 64 87 78 64 68 87 78 64 71 87 74 67 68 89 79 63 68 85 79 67 68 89 79 5 +72 77 82 68 68 77 90 72 68 77 90 76 71 75 83 70 68 75 83 67 71 79 87 74 67 72 85 67 67 75 81 67 71 79 89 71 7 +68 77 90 72 68 77 90 76 60 59 86 72 68 75 83 67 71 79 87 74 71 71 87 74 67 75 81 67 71 79 89 71 71 79 93 71 7 +57 52 90 76 57 52 78 72 57 59 78 68 60 61 87 74 56 57 87 74 56 54 83 70 67 68 89 75 63 61 93 79 63 58 85 75 5 +57 52 78 72 50 46 78 76 53 49 82 65 60 61 83 70 56 54 83 74 56 54 83 70 63 64 77 62 67 68 81 67 71 75 85 71 5 +57 55 71 61 57 55 78 65 57 55 82 68 56 57 83 78 53 48 91 85 53 45 96 96 48 37 100 104 48 37 104 104 51 32 100 108 2 +50 46 102 102 44 31 111 120 44 31 115 124 50 42 96 96 50 48 96 96 46 36 104 107 44 32 104 116 51 40 96 96 44 34 100 100 2 +44 31 111 120 44 31 115 124 44 37 115 120 50 48 96 96 46 36 104 107 43 31 104 107 51 40 96 96 44 34 100 100 48 29 100 100 2 +44 31 115 124 44 37 115 120 47 37 106 113 46 36 104 107 43 31 104 107 40 31 104 110 44 34 100 100 48 29 100 100 44 29 100 100 2 +47 37 106 109 41 34 115 113 44 29 115 120 40 31 104 107 43 31 104 114 43 29 113 114 44 32 104 104 44 34 104 104 44 32 109 104 2 +44 29 115 120 47 31 106 105 47 37 94 87 43 29 113 114 43 29 108 114 46 34 104 103 44 32 109 104 41 32 109 112 44 32 109 112 2 +101 116 122 96 101 116 122 96 97 116 122 96 97 116 123 96 93 116 123 96 97 116 118 96 95 118 122 96 95 118 117 92 99 113 117 96 3 +101 116 122 96 97 116 122 96 97 112 118 92 93 116 123 96 97 116 118 96 93 111 118 92 95 118 117 92 99 113 117 96 99 118 122 96 3 +88 107 113 88 92 107 108 85 92 107 113 88 88 107 109 92 97 111 113 92 93 111 118 92 90 109 117 89 90 104 117 89 95 109 112 89 3 +92 107 113 88 92 107 113 88 92 107 113 92 93 111 118 92 97 111 118 92 93 111 113 92 95 109 112 89 95 113 117 89 99 113 117 92 3 +92 107 113 88 92 107 113 92 92 107 113 88 97 111 118 92 93 111 113 92 93 111 109 87 95 113 117 89 99 113 117 92 99 113 122 96 3 +92 107 113 92 92 107 113 88 92 107 118 92 93 111 113 92 93 111 109 87 97 111 109 87 99 113 117 92 99 113 122 96 95 109 117 89 3 +97 112 122 88 101 112 118 92 92 107 113 85 97 111 113 87 93 107 113 92 88 111 118 92 95 109 117 89 90 113 112 92 90 109 108 89 3 +101 112 118 92 92 107 113 85 84 95 104 74 93 107 113 92 88 111 118 92 84 103 109 83 90 113 112 92 90 109 108 89 86 104 108 85 3 +92 107 113 85 84 95 104 74 71 83 100 78 88 111 118 92 84 103 109 83 71 79 93 71 90 109 108 89 86 104 108 85 74 91 92 74 3 +84 95 104 74 71 83 100 78 68 79 100 81 84 103 109 83 71 79 93 71 63 68 89 71 86 104 108 85 74 91 92 74 70 75 84 63 2 +71 83 100 78 68 79 100 81 68 75 96 78 71 79 93 71 63 68 89 71 67 75 77 62 74 91 92 74 70 75 84 63 63 71 73 55 2 +68 79 100 81 68 75 96 78 64 75 87 78 63 68 89 71 67 75 77 62 67 72 77 58 70 75 84 63 63 71 73 55 63 71 73 55 7 +64 75 87 78 68 75 83 70 71 75 83 63 67 72 77 58 67 68 77 54 67 72 70 54 63 71 73 55 63 67 66 55 63 67 73 55 7 +68 71 79 59 64 71 75 56 64 71 75 59 67 72 70 54 63 72 74 58 67 72 77 58 63 71 69 55 63 71 76 55 63 71 76 59 7 +68 68 75 56 64 68 71 52 64 71 67 59 67 72 74 54 67 72 77 54 67 72 77 54 66 75 73 59 66 75 76 59 66 75 76 59 7 +64 75 75 56 68 71 71 56 64 75 71 56 67 68 74 54 67 72 70 54 67 68 74 54 66 71 73 55 66 71 76 55 66 71 73 55 7 +71 75 75 59 68 71 75 56 68 71 67 56 67 72 70 58 67 72 70 54 67 72 70 58 66 71 73 55 66 71 69 55 66 71 73 55 7 +68 75 71 56 68 71 75 56 68 75 75 59 71 72 74 58 67 75 77 58 71 75 77 67 66 71 73 59 70 75 80 59 70 79 88 66 7 +80 87 100 78 80 87 100 74 71 75 87 74 79 95 96 75 79 91 96 75 71 75 93 79 78 83 84 66 78 83 92 70 78 91 96 78 7 +64 64 87 78 64 68 87 78 64 71 87 74 67 68 89 79 63 68 85 79 67 68 89 79 70 79 96 78 70 79 92 81 70 67 88 78 5 +64 68 87 78 64 71 87 74 64 71 87 78 63 68 85 79 67 68 89 79 67 68 89 75 70 79 92 81 70 67 88 78 66 71 88 78 5 +64 71 87 78 68 71 87 74 68 75 87 74 67 68 89 75 67 72 85 71 67 72 81 67 66 71 88 78 66 71 92 74 66 75 84 70 5 +68 75 83 67 68 71 83 70 71 75 87 88 67 72 81 67 71 72 77 67 67 68 81 67 66 71 84 70 66 71 80 66 66 71 80 66 5 +56 54 83 70 56 57 79 70 64 61 83 70 63 58 85 75 63 54 81 71 63 58 85 67 70 75 88 74 63 67 88 78 66 63 80 70 5 +60 61 83 70 56 54 83 74 56 54 83 70 63 64 77 62 67 68 81 67 71 75 85 71 63 67 80 63 66 71 76 63 66 79 80 63 5 +56 54 83 70 60 54 83 70 56 57 83 78 71 75 85 71 63 54 100 92 48 37 100 104 66 79 80 63 70 79 92 70 74 87 96 78 2 +60 54 83 70 56 57 83 78 53 48 91 85 63 54 100 92 48 37 100 104 48 37 104 104 70 79 92 70 74 87 96 78 63 56 104 100 2 +53 48 91 85 53 45 96 96 46 36 100 107 48 37 104 104 51 32 100 108 48 34 104 108 63 56 104 100 46 32 104 114 46 32 104 111 2 +46 36 100 107 43 31 108 117 40 29 108 121 48 34 104 108 48 37 104 112 44 29 109 121 46 32 104 111 43 32 104 114 46 34 104 118 2 +43 31 104 117 50 42 96 96 50 48 96 96 44 29 104 121 44 32 104 116 51 40 96 96 46 34 104 114 40 29 112 122 43 27 108 125 2 +46 36 104 107 43 31 104 107 40 31 104 110 44 34 100 100 48 29 100 100 44 29 100 100 46 29 108 122 49 40 96 100 49 40 92 92 2 +40 31 104 107 43 31 104 114 43 29 113 114 44 32 104 104 44 34 104 104 44 32 109 104 43 32 104 107 43 29 104 107 43 32 100 107 2 +43 29 113 114 43 29 108 114 46 34 104 103 44 32 109 104 41 32 109 112 44 32 109 112 43 32 100 107 43 32 100 103 40 32 100 107 2 +97 116 118 96 97 116 123 96 93 116 123 96 99 113 117 92 95 118 122 96 95 118 117 92 92 112 110 90 96 112 119 90 96 112 114 94 3 +97 116 123 96 93 116 123 96 97 116 118 96 95 118 122 96 95 118 117 92 99 113 117 96 96 112 119 90 96 112 114 94 96 117 119 94 3 +97 111 118 96 97 116 113 92 93 111 113 92 104 113 127 96 99 118 117 92 95 113 122 92 96 112 114 94 96 112 114 98 92 112 119 90 3 +97 116 113 92 93 111 113 92 88 111 109 87 99 118 117 92 95 113 122 92 95 113 112 89 96 112 114 98 92 112 119 90 92 112 114 90 3 +93 111 113 92 93 111 109 87 97 111 109 87 99 113 117 92 99 113 122 96 95 109 117 89 96 117 119 94 92 117 114 90 92 108 105 86 3 +88 111 118 92 84 103 109 83 71 79 93 71 90 109 108 89 86 104 108 85 74 91 92 74 75 84 90 68 75 77 82 57 67 73 75 49 3 +63 68 89 71 67 75 77 62 67 72 77 58 70 75 84 63 63 71 73 55 63 71 73 55 63 66 72 53 63 70 75 53 59 66 72 53 7 +67 72 77 58 67 68 77 54 67 72 70 54 63 71 73 55 63 67 66 55 63 67 73 55 59 66 72 53 63 66 75 57 63 70 75 57 7 +67 68 77 54 67 72 70 54 67 72 70 54 63 67 66 55 63 67 73 55 63 71 69 55 63 66 75 57 63 70 75 57 63 70 75 57 7 +67 72 70 54 63 72 74 58 67 72 77 58 63 71 69 55 63 71 76 55 63 71 76 59 63 70 75 57 67 73 79 57 67 73 75 60 7 +67 72 77 54 63 68 70 54 67 68 70 54 63 75 76 59 66 75 80 59 66 75 73 55 67 73 79 60 67 73 82 60 71 77 82 60 7 +67 68 70 54 67 72 74 54 67 72 74 54 63 71 73 55 63 71 73 55 66 75 73 59 67 73 75 57 67 81 82 60 67 81 82 64 7 +67 72 74 54 67 72 77 54 67 72 77 54 66 75 73 59 66 75 76 59 66 75 76 59 67 81 82 64 67 77 82 64 63 77 75 60 7 +67 72 77 54 67 72 77 54 67 72 74 54 66 75 76 59 66 75 76 59 66 79 80 59 67 77 82 64 63 77 75 60 71 84 86 64 7 +67 68 74 54 67 72 70 54 67 68 74 54 66 71 73 55 66 71 76 55 66 71 73 55 71 81 79 68 71 73 82 60 67 73 72 57 7 +67 72 70 54 67 72 70 58 67 72 74 58 66 71 69 55 66 71 73 55 70 71 73 55 63 70 72 57 67 77 72 60 71 77 72 64 7 +71 75 77 67 71 79 81 67 75 83 85 67 70 79 88 66 74 79 88 66 74 83 88 70 71 81 79 64 67 73 79 60 71 77 86 60 7 +79 91 96 75 71 75 93 79 67 68 93 79 78 83 92 70 78 91 96 78 78 83 88 74 75 91 97 75 83 95 105 79 83 99 105 75 7 +67 72 85 71 67 72 81 67 67 72 81 67 66 71 92 74 66 75 84 70 66 71 84 70 67 73 90 75 67 73 90 75 63 70 86 75 5 +67 72 81 67 67 72 81 67 71 72 77 67 66 75 84 70 66 71 84 70 66 71 80 66 67 73 90 75 63 70 86 75 63 70 82 72 5 +71 79 89 71 71 79 93 71 67 68 89 75 66 75 80 70 66 75 88 70 70 79 88 74 67 73 86 72 71 77 90 72 71 81 90 75 7 +67 68 89 75 63 61 93 79 63 58 85 75 70 79 88 74 70 79 88 74 70 75 88 74 71 81 90 75 71 84 93 75 75 88 93 75 4 +63 58 85 75 63 54 81 71 63 58 85 67 70 75 88 74 63 67 88 78 66 63 80 70 75 88 93 75 75 77 86 68 71 73 79 60 4 +63 58 85 67 63 64 77 62 67 68 81 67 66 63 80 70 63 67 80 63 66 71 76 63 71 73 79 60 67 66 75 60 67 66 68 60 5 +67 68 81 67 71 75 85 71 63 54 100 92 66 71 76 63 66 79 80 63 70 79 92 70 67 66 68 60 71 73 75 60 71 77 79 64 4 +48 37 104 104 51 32 100 108 48 34 104 108 63 56 104 100 46 32 104 114 46 32 104 111 71 81 93 83 59 51 101 113 46 32 101 116 2 +44 29 109 121 44 29 104 121 44 32 104 116 46 34 104 118 46 34 104 114 40 29 112 122 42 30 101 120 46 32 105 116 46 32 105 120 2 +51 40 96 96 44 34 100 100 48 29 100 100 43 27 108 125 46 29 108 122 49 40 96 100 42 32 101 127 46 30 110 127 46 32 110 120 2 +44 29 100 100 44 32 104 104 44 34 104 104 49 40 92 92 43 32 104 107 43 29 104 107 49 40 97 101 46 32 110 113 39 30 101 113 2 +44 34 104 104 44 32 109 104 41 32 109 112 43 29 104 107 43 32 100 107 43 32 100 103 39 30 101 113 42 30 105 113 42 30 105 116 2 +41 32 109 112 44 32 109 112 48 37 104 100 43 32 100 103 40 32 100 107 43 29 104 107 42 30 105 116 42 32 105 109 42 30 101 109 2 +95 113 117 96 104 113 127 96 99 118 117 92 96 112 119 94 96 112 114 94 96 112 114 98 97 115 119 97 97 111 119 94 97 115 114 94 3 +95 113 112 89 90 109 117 89 90 104 117 89 92 112 114 90 92 108 114 94 92 108 114 90 97 115 114 90 93 111 114 94 89 111 114 87 3 +95 109 112 89 95 113 117 89 99 113 117 92 96 108 110 90 96 112 114 94 96 117 119 94 93 111 110 87 93 111 114 90 93 111 114 87 3 +95 109 117 89 90 113 112 92 90 109 108 89 87 99 105 83 83 95 97 79 75 84 90 68 82 88 97 73 78 84 89 69 67 71 74 55 7 +86 104 108 85 74 91 92 74 70 75 84 63 75 77 82 57 67 73 75 49 63 66 72 53 67 67 70 48 63 67 70 51 63 67 74 51 7 +74 91 92 74 70 75 84 63 63 71 73 55 67 73 75 49 63 66 72 53 63 70 75 53 63 67 70 51 63 67 74 51 60 67 78 55 7 +63 67 66 55 63 67 73 55 63 71 69 55 63 66 75 57 63 70 75 57 63 70 75 57 63 67 74 58 63 71 78 55 67 71 78 58 7 +63 71 76 59 63 75 76 59 63 75 76 59 67 73 75 60 67 73 75 60 67 73 79 60 63 71 74 58 67 75 78 58 67 79 82 62 7 +63 75 76 59 63 75 76 59 66 75 80 59 67 73 75 60 67 73 79 60 67 73 82 60 67 75 78 58 67 79 82 62 67 75 82 62 7 +66 75 73 55 63 71 73 55 63 71 73 55 71 77 82 60 67 73 75 57 67 81 82 60 67 75 78 58 70 75 78 58 67 79 82 62 7 +66 75 73 59 66 75 76 59 66 75 76 59 67 81 82 64 67 77 82 64 63 77 75 60 67 75 82 58 63 75 78 55 63 75 78 58 7 +66 71 73 55 70 71 73 55 66 71 73 59 67 77 72 60 71 77 72 64 71 81 82 64 70 79 82 65 70 79 85 65 70 79 85 69 7 +74 79 88 66 74 83 88 70 70 79 88 66 67 73 79 60 71 77 86 60 75 81 82 64 67 79 85 62 67 84 89 69 74 88 93 73 7 +78 83 92 70 78 91 96 78 78 83 88 74 75 91 97 75 83 95 105 79 83 99 105 75 78 92 93 76 78 92 93 76 85 97 101 76 7 +70 79 92 81 70 67 88 78 66 71 88 78 71 81 93 79 71 77 93 79 71 73 93 79 74 84 89 73 70 84 97 80 70 75 93 76 5 +70 67 88 78 66 71 88 78 66 71 92 74 71 77 93 79 71 73 93 79 67 73 90 75 70 84 97 80 70 75 93 76 67 75 89 76 5 +66 71 88 78 66 71 92 74 66 75 84 70 71 73 93 79 67 73 90 75 67 73 90 75 70 75 93 76 67 75 89 76 67 75 89 80 5 +66 71 80 66 63 71 73 66 66 71 80 66 63 66 82 68 63 66 82 68 63 70 82 68 67 79 89 76 70 79 89 80 70 84 89 73 5 +70 79 88 74 70 79 88 74 70 75 88 74 71 81 90 75 71 84 93 75 75 88 93 75 74 84 97 76 74 88 97 76 74 79 89 73 4 +70 79 88 74 70 75 88 74 63 67 88 78 71 84 93 75 75 88 93 75 75 77 86 68 74 88 97 76 74 79 89 73 67 79 85 65 4 +74 87 96 78 63 56 104 100 46 32 104 114 75 81 86 72 71 81 93 83 59 51 101 113 67 75 85 65 70 84 89 76 74 79 97 94 2 +46 32 104 114 46 32 104 111 43 32 104 114 59 51 101 113 46 32 101 116 46 32 101 116 74 79 97 94 53 43 105 115 50 34 105 115 2 +46 32 104 111 43 32 104 114 46 34 104 118 46 32 101 116 46 32 101 116 42 30 101 120 53 43 105 115 50 34 105 115 47 34 101 111 2 +40 29 112 122 43 27 108 125 46 29 108 122 46 32 105 120 42 32 101 127 46 30 110 127 44 31 105 122 44 31 110 129 42 29 110 126 2 +43 29 104 107 43 32 100 107 43 32 100 103 39 30 101 113 42 30 105 113 42 30 105 116 44 29 114 126 44 29 105 119 44 29 101 115 2 +96 112 119 90 96 112 114 94 96 117 119 94 93 115 114 90 93 115 114 90 101 120 119 94 92 111 115 91 97 115 120 94 101 120 120 98 3 +96 117 119 94 96 117 119 94 96 117 119 94 101 120 119 94 97 120 124 97 97 115 119 94 101 120 120 98 101 120 120 94 101 115 120 94 3 +96 112 119 94 96 112 114 94 96 112 114 98 97 115 119 97 97 111 119 94 97 115 114 94 97 115 125 94 92 115 115 94 92 111 111 91 3 +96 112 114 98 92 112 119 90 92 112 114 90 97 115 114 94 93 106 114 90 93 111 114 90 92 111 111 91 92 111 115 91 88 106 111 91 3 +92 112 114 90 92 112 114 90 92 108 114 94 93 111 114 90 97 115 114 90 93 111 114 94 88 106 111 91 88 106 111 87 88 106 111 91 3 +92 108 114 94 92 108 114 90 96 108 110 90 93 111 114 94 89 111 114 87 93 111 110 87 88 106 111 91 84 106 111 83 84 98 102 83 3 +92 108 114 90 96 108 110 90 96 112 114 94 89 111 114 87 93 111 110 87 93 111 114 90 84 106 111 83 84 98 102 83 84 106 111 83 3 +92 117 114 90 92 108 105 86 87 99 105 83 89 106 110 87 85 97 105 80 82 88 97 73 80 106 102 79 80 98 98 76 80 94 94 72 3 +75 77 82 57 67 73 75 49 63 66 72 53 67 67 70 48 63 67 70 51 63 67 74 51 64 66 71 54 64 69 71 54 64 69 74 54 7 +63 70 75 53 59 66 72 53 63 66 75 57 60 67 78 55 60 67 74 55 63 67 74 58 64 69 74 57 64 73 74 57 68 77 74 57 7 +59 66 72 53 63 66 75 57 63 70 75 57 60 67 74 55 63 67 74 58 63 71 78 55 64 73 74 57 68 77 74 57 64 73 74 57 7 +67 73 79 57 67 73 75 60 67 73 75 60 63 75 78 58 63 71 74 58 67 75 78 58 64 73 82 61 64 73 86 61 64 73 78 57 7 +67 73 75 60 67 73 75 60 67 73 79 60 63 71 74 58 67 75 78 58 67 79 82 62 64 73 86 61 64 73 78 57 64 73 78 61 7 +67 73 79 60 67 73 82 60 71 77 82 60 67 79 82 62 67 75 82 62 67 75 78 58 64 73 78 61 64 73 78 61 68 73 78 57 7 +67 73 82 60 71 77 82 60 67 73 75 57 67 75 82 62 67 75 78 58 70 75 78 58 64 73 78 61 68 73 78 57 72 73 82 61 7 +67 73 75 57 67 81 82 60 67 81 82 64 70 75 78 58 67 79 82 62 67 75 82 58 72 73 82 61 72 77 74 57 68 77 74 57 7 +71 84 86 64 71 81 79 68 71 73 82 60 67 75 82 65 70 84 82 62 70 75 78 65 64 69 74 57 68 73 74 57 64 73 74 57 7 +71 77 72 64 71 81 82 64 71 81 86 68 70 79 85 65 70 79 85 69 74 79 82 65 72 81 82 65 72 81 82 65 76 81 82 65 7 +71 77 86 60 75 81 82 64 75 84 82 68 67 84 89 69 74 88 93 73 78 92 93 73 76 85 90 72 76 89 94 76 76 85 94 76 7 +79 84 93 75 71 81 93 79 71 77 93 79 82 92 97 80 74 84 89 73 70 84 97 80 80 94 98 76 76 85 90 76 72 81 90 76 7 +67 73 90 75 63 70 86 75 63 70 82 72 67 75 89 80 67 79 93 76 70 75 89 76 80 94 102 83 80 94 102 83 80 94 106 83 5 +63 66 82 68 63 66 82 68 63 70 82 68 67 79 89 76 70 79 89 80 70 84 89 73 72 98 106 83 80 98 102 87 76 94 98 83 5 +67 66 68 60 71 73 75 60 71 77 79 64 67 79 82 62 70 75 78 58 67 75 82 69 72 85 86 72 72 77 82 68 68 73 78 61 4 +71 77 79 64 75 81 86 72 71 81 93 83 67 75 82 69 67 75 85 65 70 84 89 76 68 73 78 61 64 73 74 65 72 81 86 72 4 +42 30 101 120 46 32 105 116 46 32 105 120 47 34 101 111 44 31 101 119 44 31 105 122 44 34 102 109 47 34 106 113 47 34 106 116 2 +46 32 110 120 49 40 97 101 46 32 110 113 42 27 110 129 44 34 110 122 50 37 110 119 41 29 111 128 44 31 106 124 47 34 102 113 2 +46 32 110 113 39 30 101 113 42 30 105 113 50 37 110 119 44 29 114 126 44 29 105 119 47 34 102 113 50 34 106 113 47 37 106 116 2 +39 30 101 113 42 30 105 113 42 30 105 116 44 29 114 126 44 29 105 119 44 29 101 115 50 34 106 113 47 37 106 116 53 49 98 94 2 +42 30 105 116 42 32 105 109 42 30 101 109 44 29 101 115 44 34 105 104 47 43 101 97 53 49 98 94 60 66 94 79 68 77 94 72 2 +93 111 114 90 93 115 114 90 93 115 114 90 92 106 115 91 92 111 115 91 97 115 120 94 92 107 113 92 97 112 118 96 101 116 122 96 3 +93 115 114 90 93 115 114 90 101 120 119 94 92 111 115 91 97 115 120 94 101 120 120 98 97 112 118 96 101 116 122 96 101 116 122 96 3 +97 120 124 97 97 115 119 94 97 115 119 97 101 120 120 94 101 115 120 94 97 115 125 94 101 116 122 96 101 112 122 96 97 112 122 92 3 +93 111 114 90 97 115 114 90 93 111 114 94 88 106 111 91 88 106 111 87 88 106 111 91 92 107 118 88 88 103 104 85 84 99 104 81 3 +89 111 114 87 93 111 110 87 93 111 114 90 84 106 111 83 84 98 102 83 84 106 111 83 84 99 104 81 84 99 108 85 84 107 113 85 4 +93 111 114 90 93 111 114 87 89 106 110 87 84 106 111 83 80 106 106 79 80 106 102 79 84 107 113 85 84 107 113 85 88 103 108 85 4 +89 106 110 87 85 97 105 80 82 88 97 73 80 106 102 79 80 98 98 76 80 94 94 72 88 103 108 85 84 99 104 78 76 87 91 74 4 +60 67 78 55 60 67 74 55 63 67 74 58 64 69 74 57 64 73 74 57 68 77 74 57 64 75 79 56 64 75 79 59 64 75 79 59 7 +60 67 74 55 63 67 74 58 63 71 78 55 64 73 74 57 68 77 74 57 64 73 74 57 64 75 79 59 64 75 79 59 64 75 75 63 7 +67 79 82 62 67 75 82 62 67 75 78 58 64 73 78 61 64 73 78 61 68 73 78 57 68 79 79 63 64 75 79 59 68 75 79 59 7 +67 75 78 58 70 75 78 58 67 79 82 62 68 73 78 57 72 73 82 61 72 77 74 57 68 75 79 59 64 75 79 59 68 75 75 59 7 +70 75 78 58 67 79 82 62 67 75 82 58 72 73 82 61 72 77 74 57 68 77 74 57 64 75 79 59 68 75 75 59 64 75 75 52 7 +67 79 82 62 67 75 82 58 63 75 78 55 72 77 74 57 68 77 74 57 64 73 82 61 68 75 75 59 64 75 75 52 64 68 75 56 7 +63 75 78 58 67 75 82 65 70 84 82 62 64 73 78 57 64 69 74 57 68 73 74 57 64 68 71 56 64 71 71 56 68 71 71 59 7 +67 75 82 65 70 84 82 62 70 75 78 65 64 69 74 57 68 73 74 57 64 73 74 57 64 71 71 56 68 71 71 59 68 71 75 56 7 +70 84 82 62 70 75 78 65 67 79 78 58 68 73 74 57 64 73 74 57 64 69 78 61 68 71 71 59 68 71 75 56 68 71 75 59 7 +67 75 78 62 70 75 82 62 70 79 82 65 68 77 82 61 68 77 74 61 68 77 78 61 68 75 79 63 68 79 79 59 68 75 83 63 7 +74 79 85 62 67 79 85 62 67 84 89 69 72 85 86 68 72 81 86 68 76 85 90 72 71 83 87 63 71 83 83 70 71 83 83 67 7 +67 79 85 62 67 84 89 69 74 88 93 73 72 81 86 68 76 85 90 72 76 89 94 76 71 83 83 70 71 83 83 67 80 87 91 74 7 +74 88 93 73 78 92 93 73 78 92 93 76 76 89 94 76 76 85 94 76 76 98 98 76 80 87 91 74 76 91 96 74 76 91 96 74 7 +85 97 101 76 82 92 97 80 74 84 89 73 80 94 98 76 80 94 98 76 76 85 90 76 80 87 91 74 80 91 100 78 80 91 100 78 7 +74 79 89 73 67 79 85 65 67 75 78 62 68 85 98 87 72 89 94 79 72 85 90 76 76 103 108 92 76 103 108 92 71 95 104 81 4 +67 75 78 65 67 79 82 62 70 75 78 58 72 81 86 72 72 85 86 72 72 77 82 68 76 91 100 81 76 91 96 81 76 83 87 67 4 +47 34 101 111 44 31 101 119 44 31 105 122 44 34 102 109 47 34 106 113 47 34 106 116 46 34 104 110 46 34 100 107 43 36 104 114 2 +44 31 110 129 42 29 110 126 42 27 110 129 44 31 111 124 44 29 111 128 41 29 111 128 46 34 108 121 40 31 104 125 40 29 113 132 2 +42 27 110 129 44 34 110 122 50 37 110 119 41 29 111 128 44 31 106 124 47 34 102 113 40 29 113 132 40 29 113 128 43 31 108 121 2 +50 37 110 119 44 29 114 126 44 29 105 119 47 34 102 113 50 34 106 113 47 37 106 116 43 31 108 121 50 45 100 99 64 68 91 78 2 +92 111 115 91 97 115 120 94 101 120 120 98 97 112 118 96 101 116 122 96 101 116 122 96 93 116 118 92 97 121 123 96 97 116 123 100 3 +97 115 120 94 101 120 120 98 101 120 120 94 101 116 122 96 101 116 122 96 101 116 122 96 97 121 123 96 97 116 123 100 97 116 123 96 3 +97 115 125 94 92 115 115 94 92 111 111 91 97 112 122 92 92 107 118 96 92 107 118 88 97 116 118 96 93 111 118 92 93 107 113 87 3 +92 111 111 91 92 111 115 91 88 106 111 91 92 107 118 88 92 112 113 92 92 107 118 88 93 107 113 87 88 107 109 83 84 99 109 79 3 +88 106 111 91 88 106 111 87 88 106 111 91 92 107 118 88 88 103 104 85 84 99 104 81 84 99 109 79 79 95 100 79 84 103 109 79 3 +80 98 98 76 80 94 94 72 72 85 82 68 84 99 104 78 76 87 91 74 76 79 87 63 84 99 100 79 79 91 93 71 71 79 85 62 7 +72 85 82 68 64 69 71 54 64 66 71 54 76 79 87 63 68 68 75 52 64 68 67 56 71 79 85 62 67 72 70 50 63 68 70 54 7 +64 69 74 54 64 69 74 57 64 73 74 57 68 75 75 56 64 75 79 56 64 75 79 59 67 72 77 54 63 72 77 58 67 75 77 58 7 +64 73 86 61 64 73 78 57 64 73 78 61 68 75 75 59 68 75 75 59 68 79 79 63 71 75 77 58 71 79 81 58 67 79 77 58 7 +64 73 78 61 64 73 78 61 68 73 78 57 68 79 79 63 64 75 79 59 68 75 79 59 67 79 77 58 67 75 81 58 67 72 74 58 7 +64 73 74 57 64 69 78 61 68 73 78 61 68 71 75 56 68 71 75 59 64 75 75 59 63 68 70 54 67 68 74 58 67 72 74 58 7 +64 69 78 61 68 73 78 61 68 77 82 61 68 71 75 59 64 75 75 59 68 75 79 63 67 68 74 58 67 72 74 58 67 72 74 58 7 +68 77 82 61 68 77 74 61 68 77 78 61 68 75 79 63 68 79 79 59 68 75 83 63 67 72 74 58 71 72 85 62 71 79 81 67 7 +68 77 74 61 68 77 78 61 72 81 82 65 68 79 79 59 68 75 83 63 71 79 87 63 71 72 85 62 71 79 81 67 71 79 85 62 7 +68 77 78 61 72 81 82 65 72 81 82 65 68 75 83 63 71 79 87 63 71 83 83 63 71 79 81 67 71 79 85 62 71 79 85 62 7 +76 81 82 65 72 85 86 68 72 81 86 68 76 79 79 67 71 83 87 63 71 83 83 70 71 79 85 62 71 79 85 67 71 83 85 67 7 +76 85 94 76 76 98 98 76 80 98 98 76 76 91 96 74 76 91 96 74 76 91 100 74 71 87 89 71 75 83 89 71 75 87 93 71 7 +80 94 98 76 76 85 90 76 72 81 90 76 80 91 100 78 80 91 100 78 80 91 96 78 79 95 100 79 79 99 109 83 79 103 109 87 7 +68 73 78 61 64 73 74 65 72 81 86 72 68 83 79 67 68 83 83 70 68 79 83 67 71 79 85 67 63 75 81 67 67 79 85 67 4 +44 34 102 109 47 34 106 113 47 34 106 116 46 34 104 110 46 34 100 107 43 36 104 114 55 48 104 108 44 32 104 112 44 34 109 112 2 +44 31 111 124 44 29 111 128 41 29 111 128 46 34 108 121 40 31 104 125 40 29 113 132 41 37 104 116 41 32 104 121 44 32 109 125 2 +44 31 106 124 47 34 102 113 50 34 106 113 40 29 113 128 43 31 108 121 50 45 100 99 41 29 113 129 44 29 113 129 48 37 109 112 2 +50 34 106 113 47 37 106 116 53 49 98 94 50 45 100 99 64 68 91 78 68 83 87 70 48 37 109 112 63 64 93 75 71 83 85 67 2 +97 112 122 92 92 107 118 96 92 107 118 88 97 116 118 96 93 111 118 92 93 107 113 87 90 109 112 89 90 104 108 85 86 109 104 81 3 +92 107 118 88 88 103 104 85 84 99 104 81 84 99 109 79 79 95 100 79 84 103 109 79 86 104 104 85 86 104 104 81 86 100 108 85 4 +84 99 104 81 84 99 108 85 84 107 113 85 88 107 109 83 88 107 109 87 88 107 113 87 86 104 108 89 86 109 112 89 90 113 122 92 4 +84 99 108 85 84 107 113 85 84 107 113 85 88 107 109 87 88 107 113 87 84 107 113 87 86 109 112 89 90 113 122 92 90 109 112 89 4 +68 75 75 59 68 75 75 59 68 75 75 59 67 83 77 58 71 75 77 58 71 79 81 58 66 75 76 59 66 75 84 63 66 79 80 59 7 +68 75 75 59 68 79 79 63 64 75 79 59 71 79 81 58 67 79 77 58 67 75 81 58 66 79 80 59 66 75 80 59 66 75 80 59 7 +68 79 79 63 64 75 79 59 68 75 79 59 67 79 77 58 67 75 81 58 67 72 74 58 66 75 80 59 66 75 80 59 66 75 76 59 7 +64 75 79 59 68 75 75 59 64 75 75 52 63 72 74 58 67 75 74 58 71 75 77 54 63 71 76 59 63 71 76 59 63 75 80 59 7 +64 75 75 52 64 68 75 56 64 68 71 56 71 75 77 54 67 72 74 54 67 75 70 54 63 75 80 59 66 75 80 59 66 79 76 59 7 +64 68 71 56 64 71 71 56 68 71 71 59 67 75 70 54 67 75 74 58 63 72 74 58 66 79 76 59 66 79 80 63 66 75 76 59 7 +68 71 75 56 68 71 75 59 64 75 75 59 63 68 70 54 67 68 74 58 67 72 74 58 59 71 73 55 63 71 73 59 63 75 73 59 7 +71 83 83 70 71 83 83 67 80 87 91 74 71 83 85 67 75 83 89 67 71 79 89 71 66 79 88 63 70 83 88 66 70 79 92 66 7 +68 83 79 67 68 83 83 70 68 79 83 67 71 79 85 67 63 75 81 67 67 79 85 67 70 87 92 78 70 79 84 70 66 79 80 70 4 +68 79 83 67 71 83 96 74 71 87 96 81 67 79 85 67 75 79 89 71 75 83 93 71 66 79 80 70 70 79 80 66 70 79 80 66 4 +71 87 96 81 60 61 104 103 46 34 104 110 75 83 93 71 75 79 100 83 55 48 104 108 70 79 80 66 70 83 92 74 74 83 100 85 2 +46 34 108 121 40 31 104 125 40 29 113 132 41 37 104 116 41 32 104 121 44 32 109 125 46 32 100 107 46 34 104 107 46 32 104 114 2 +40 31 104 125 40 29 113 132 40 29 113 128 41 32 104 121 44 32 109 125 41 29 113 129 46 34 104 107 46 32 104 114 46 27 108 129 2 +68 83 87 70 71 83 91 70 71 83 87 63 71 83 85 67 67 79 85 67 67 79 85 62 66 67 80 59 70 79 84 63 70 83 88 66 4 +88 111 113 92 93 116 118 92 97 121 123 96 95 118 117 96 99 118 122 96 95 118 122 96 96 112 124 94 96 117 130 98 96 117 114 94 3 +97 121 123 96 97 116 123 100 97 116 123 96 95 118 122 96 99 118 127 100 99 118 117 96 96 117 114 94 96 112 114 90 87 103 105 86 3 +97 116 123 96 97 111 118 96 97 116 118 96 99 118 117 96 95 113 112 92 90 109 112 89 87 103 105 86 92 108 114 90 92 112 119 90 3 +97 116 118 96 93 111 118 92 93 107 113 87 90 109 112 89 90 104 108 85 86 109 104 81 92 112 119 90 92 108 110 94 92 108 110 90 3 +93 111 118 92 93 107 113 87 88 107 109 83 90 104 108 85 86 109 104 81 86 104 112 85 92 108 110 94 92 108 110 90 83 108 114 86 4 +84 103 109 79 88 107 109 83 88 107 109 87 86 100 108 85 86 104 108 89 86 109 112 89 87 103 105 83 83 103 114 86 87 112 119 90 4 +88 107 109 87 88 107 113 87 84 107 113 87 86 109 112 89 90 113 122 92 90 109 112 89 87 112 119 90 92 112 114 90 87 103 105 83 4 +84 99 100 79 79 91 93 71 71 79 85 62 78 91 96 70 74 83 88 66 74 83 88 66 71 84 82 64 71 77 86 68 71 81 82 60 7 +67 79 77 58 67 75 81 58 67 72 74 58 66 75 80 59 66 75 80 59 66 75 76 59 67 73 75 60 67 73 79 57 67 73 72 60 7 +71 75 77 54 67 72 74 54 67 75 70 54 63 75 80 59 66 75 80 59 66 79 76 59 63 73 79 57 67 81 82 60 67 77 86 60 7 +63 72 74 58 63 68 70 54 67 68 74 58 66 75 76 59 59 71 73 55 63 71 73 59 63 73 75 60 67 73 72 57 63 70 75 57 7 +63 68 70 54 67 68 74 58 67 72 74 58 59 71 73 55 63 71 73 59 63 75 73 59 67 73 72 57 63 70 75 57 67 73 79 60 7 +67 68 74 58 67 72 74 58 67 72 74 58 63 71 73 59 63 75 73 59 63 75 73 55 63 70 75 57 67 73 79 60 67 70 75 60 7 +71 79 85 62 71 79 85 67 71 83 85 67 70 79 80 63 70 79 80 63 66 79 88 63 63 77 79 64 67 77 75 60 67 77 79 64 7 +55 48 104 108 44 32 104 112 44 34 109 112 74 83 100 85 59 49 104 107 46 32 108 114 75 84 97 72 75 70 101 94 56 42 97 113 2 +44 34 109 112 41 37 104 116 41 32 104 121 46 32 108 114 46 32 100 107 46 34 104 107 56 42 97 113 46 34 93 105 49 37 97 98 2 +41 37 104 116 41 32 104 121 44 32 109 125 46 32 100 107 46 34 104 107 46 32 104 114 46 34 93 105 49 37 97 98 52 40 97 101 2 +44 32 109 125 41 29 113 129 44 29 113 129 46 32 104 114 46 27 108 129 43 29 108 129 52 40 97 101 52 40 97 105 52 48 90 98 2 +95 113 112 92 90 109 112 89 90 104 108 85 92 108 114 90 92 112 119 90 92 108 110 94 97 115 124 101 93 120 124 97 93 120 119 97 3 +86 104 104 81 86 100 108 85 86 104 108 89 87 103 105 83 87 103 105 83 83 103 114 86 89 106 105 87 85 106 114 87 85 111 114 90 4 +63 71 69 55 66 75 76 55 66 75 80 59 67 73 75 57 67 73 79 57 67 73 79 60 67 79 82 62 70 79 82 58 63 79 78 58 7 +66 75 80 59 66 75 80 59 66 79 76 59 67 73 79 60 71 77 79 60 71 77 82 60 63 79 78 58 67 75 78 62 67 79 78 62 7 +66 75 80 59 66 75 80 59 66 75 76 59 67 73 75 60 67 73 79 57 67 73 72 60 67 71 82 62 63 75 82 62 63 75 78 62 7 +63 71 76 59 63 71 76 59 63 75 80 59 63 70 72 57 63 73 75 57 63 73 79 57 63 79 85 62 67 79 82 58 67 75 82 62 7 +63 71 76 59 63 75 80 59 66 75 80 59 63 73 75 57 63 73 79 57 67 81 82 60 67 79 82 58 67 75 82 62 67 75 82 62 7 +59 71 73 55 63 71 73 59 63 75 73 59 67 73 72 57 63 70 75 57 67 73 79 60 63 71 70 55 63 71 70 58 63 71 78 58 7 +63 75 73 59 63 75 73 55 66 75 76 59 67 73 79 60 67 70 75 60 67 73 75 57 63 71 78 58 63 67 74 62 63 75 74 62 7 +70 79 80 63 66 79 88 63 70 83 88 66 67 77 75 60 67 77 79 64 67 84 82 64 63 79 85 62 67 79 82 65 63 79 85 65 7 +70 79 80 66 70 79 80 66 70 83 92 74 71 81 82 75 71 84 90 72 71 84 86 72 67 84 89 73 70 84 89 76 74 88 89 73 4 +70 79 80 66 70 83 92 74 74 83 100 85 71 84 90 72 71 84 86 72 75 84 97 72 70 84 89 76 74 88 89 73 74 84 89 73 4 +70 83 92 74 74 83 100 85 59 49 104 107 71 84 86 72 75 84 97 72 75 70 101 94 74 88 89 73 74 84 89 73 74 84 97 76 4 +74 83 100 85 59 49 104 107 46 32 108 114 75 84 97 72 75 70 101 94 56 42 97 113 74 84 89 73 74 84 97 76 70 67 101 94 2 +46 34 104 107 46 32 104 114 46 27 108 129 49 37 97 98 52 40 97 101 52 40 97 105 53 49 93 90 60 56 85 83 63 71 85 73 2 +96 112 124 94 96 117 130 98 96 117 114 94 97 111 114 90 89 102 101 83 82 88 89 73 72 81 86 65 68 77 74 57 64 73 78 54 3 +92 112 119 90 92 108 110 94 92 108 110 90 93 120 124 97 93 120 119 97 89 115 114 87 97 120 120 102 92 120 120 98 88 120 120 91 3 +83 103 114 86 87 112 119 90 92 112 114 90 85 111 114 90 89 111 114 83 89 106 110 83 88 102 111 87 88 102 102 83 84 98 102 79 4 +71 81 82 60 67 77 75 57 67 73 75 57 70 79 85 62 70 84 82 58 67 79 82 62 68 81 82 65 68 81 82 65 72 77 82 61 7 +67 77 82 60 67 77 75 60 63 73 82 57 63 71 78 62 63 75 78 55 67 75 78 58 64 77 74 57 64 77 74 57 64 77 78 61 7 +63 73 75 57 63 73 79 57 67 81 82 60 67 79 82 58 67 75 82 62 67 75 82 62 68 81 78 61 68 77 78 61 68 77 78 57 7 +63 70 75 57 67 73 79 60 67 70 75 60 63 71 70 58 63 71 78 58 63 67 74 62 68 69 74 57 64 69 74 57 68 69 74 57 7 +67 73 79 60 67 70 75 60 67 73 75 57 63 71 78 58 63 67 74 62 63 75 74 62 64 69 74 57 68 69 74 57 64 73 74 57 7 +67 73 75 57 67 77 75 60 67 77 82 60 63 75 74 62 63 71 74 58 63 71 78 62 64 73 74 57 64 73 74 57 64 73 78 61 7 +63 77 82 60 63 77 79 64 67 77 75 60 67 75 78 62 63 75 85 58 63 79 85 62 64 77 78 65 68 77 86 65 64 77 82 65 7 +63 77 79 64 67 77 75 60 67 77 79 64 63 75 85 58 63 79 85 62 67 79 82 65 68 77 86 65 64 77 82 65 64 77 82 65 7 +67 81 82 68 67 84 86 68 67 84 82 68 63 79 89 65 63 79 82 65 60 79 85 65 60 77 82 65 60 77 82 68 64 81 86 72 7 +75 103 110 86 71 99 105 83 67 91 97 83 74 115 119 101 70 111 114 90 63 97 105 80 72 115 125 98 72 115 120 98 72 106 111 91 1 +71 81 90 72 71 81 82 75 71 84 90 72 67 84 93 76 67 84 89 73 70 84 89 76 64 81 86 72 68 81 86 68 72 85 86 68 4 +71 84 90 72 71 84 86 72 75 84 97 72 70 84 89 76 74 88 89 73 74 84 89 73 72 85 86 68 72 89 90 76 76 85 94 76 4 +71 84 86 72 75 84 97 72 75 70 101 94 74 88 89 73 74 84 89 73 74 84 97 76 72 89 90 76 76 85 94 76 72 89 94 76 4 +52 40 97 101 52 40 97 105 52 48 90 98 60 56 85 83 63 71 85 73 70 84 89 73 72 94 86 72 76 94 98 76 76 98 98 76 2 +70 84 85 65 85 102 105 83 97 115 124 101 64 73 78 61 72 89 94 76 88 115 125 98 64 75 75 59 68 75 79 63 76 99 104 85 3 +93 120 119 97 89 115 114 87 85 111 114 87 92 120 120 98 88 120 120 91 84 111 111 91 92 116 122 96 88 107 118 92 88 107 113 88 3 +89 106 105 87 85 106 114 87 85 111 114 90 88 106 111 87 84 106 111 87 88 102 111 87 84 103 108 85 84 99 108 85 84 99 104 81 4 +85 106 114 87 85 111 114 90 89 111 114 83 84 106 111 87 88 102 111 87 88 102 102 83 84 99 108 85 84 99 104 81 84 95 100 78 4 +85 111 114 90 89 111 114 83 89 106 110 83 88 102 111 87 88 102 102 83 84 98 102 79 84 99 104 81 84 95 100 78 80 91 96 74 4 +89 111 114 83 89 106 110 83 82 97 101 80 88 102 102 83 84 98 102 79 80 98 94 72 84 95 100 78 80 91 96 74 80 87 91 78 4 +89 106 110 83 82 97 101 80 78 88 97 73 84 98 102 79 80 98 94 72 76 85 94 68 80 91 96 74 80 87 91 78 76 87 91 67 4 +67 79 82 65 70 79 82 62 70 79 85 62 76 81 86 65 72 81 86 65 68 81 82 65 71 87 91 63 71 83 87 70 71 83 87 67 7 +70 79 82 58 63 79 78 58 67 75 78 62 68 77 78 61 68 77 78 61 68 73 74 57 68 75 75 56 68 75 75 56 71 75 75 56 7 +63 79 78 58 67 75 78 62 67 79 78 62 68 77 78 61 68 73 74 57 64 73 78 57 68 75 75 56 71 75 75 56 68 75 75 59 7 +63 71 78 62 63 75 78 55 67 75 78 58 64 77 74 57 64 77 74 57 64 77 78 61 60 75 79 59 64 79 79 59 64 79 79 63 7 +67 75 78 58 67 71 78 58 67 71 82 62 64 77 78 61 64 77 78 61 68 77 78 61 64 79 79 63 68 79 83 63 68 79 79 67 7 +63 75 82 62 63 75 78 62 63 79 85 62 68 77 78 65 64 77 74 65 68 77 82 65 64 83 83 67 64 79 79 63 71 83 83 67 7 +63 75 78 62 63 79 85 62 67 79 82 58 64 77 74 65 68 77 82 65 68 81 78 61 64 79 79 63 71 83 83 67 68 79 83 63 7 +67 75 82 62 67 75 82 62 67 75 82 58 68 77 78 61 68 77 78 57 68 77 74 57 68 79 83 63 68 79 79 59 68 75 79 56 7 +67 75 82 58 70 79 74 58 63 75 74 55 68 77 74 57 68 73 78 54 68 73 74 54 68 75 79 56 64 75 79 59 68 79 79 59 7 +63 75 74 55 63 71 70 55 63 71 70 58 68 73 74 54 64 69 74 57 68 69 74 57 68 79 79 59 68 75 75 56 68 71 75 59 7 +63 75 74 62 63 71 74 58 63 71 78 62 64 73 74 57 64 73 74 57 64 73 78 61 68 75 75 59 64 75 79 59 64 79 83 63 7 +63 71 74 58 63 71 78 62 67 75 78 62 64 73 74 57 64 73 78 61 64 77 78 65 64 75 79 59 64 79 83 63 68 79 83 63 7 +67 75 78 62 63 75 85 58 63 79 85 62 64 77 78 65 68 77 86 65 64 77 82 65 68 79 83 63 64 79 83 67 64 75 79 63 7 +67 79 82 65 63 79 85 65 63 79 89 65 64 77 82 65 60 77 82 65 60 77 82 65 64 75 83 67 68 79 83 67 68 83 87 70 7 +74 102 114 90 74 115 119 97 74 115 119 101 76 115 120 102 72 115 120 102 72 115 125 98 76 112 128 99 80 116 128 103 80 116 128 99 1 +67 84 93 76 67 84 89 73 70 84 89 76 64 81 86 72 68 81 86 68 72 85 86 68 71 83 87 70 76 87 91 78 76 91 96 74 4 +74 84 89 73 74 84 97 76 70 67 101 94 76 85 94 76 72 89 94 76 72 85 90 76 76 83 87 70 68 79 79 63 68 75 75 63 4 +53 43 97 101 53 49 93 90 60 56 85 83 64 73 86 72 68 81 90 68 72 94 86 72 68 83 87 70 76 91 100 81 76 99 104 81 4 +97 120 120 102 92 120 120 98 88 120 120 91 92 116 122 99 92 116 122 96 88 107 118 92 79 103 109 87 88 107 113 92 84 107 109 87 3 +88 120 120 91 84 111 111 91 88 106 111 87 88 107 118 92 88 107 113 88 84 107 108 88 84 107 109 87 84 107 104 83 84 103 104 83 4 +88 102 111 87 88 102 102 83 84 98 102 79 84 99 104 81 84 95 100 78 80 91 96 74 84 95 100 79 79 95 93 75 79 91 96 75 4 +80 98 94 72 76 85 94 68 76 81 86 65 80 87 91 78 76 87 91 67 71 87 91 63 75 91 89 75 75 91 93 75 75 91 100 75 4 +76 81 86 65 72 81 86 65 68 81 82 65 71 87 91 63 71 83 87 70 71 83 87 67 75 91 100 75 79 95 93 71 79 87 85 67 7 +68 81 82 65 72 77 82 61 68 77 78 61 68 79 83 67 68 75 79 63 68 75 75 56 71 79 81 62 67 79 77 58 67 79 77 58 7 +72 77 82 61 68 77 78 61 68 77 78 61 68 75 79 63 68 75 75 56 68 75 75 56 67 79 77 58 67 79 77 58 67 75 77 58 7 +68 77 78 61 68 73 74 57 64 73 78 57 68 75 75 56 71 75 75 56 68 75 75 59 67 75 77 58 67 72 77 58 67 72 81 58 7 +68 73 74 57 64 73 78 57 68 73 78 61 71 75 75 56 68 75 75 59 68 75 79 59 67 72 77 58 67 72 81 58 71 75 77 58 7 +68 77 78 61 64 77 74 57 64 77 74 57 68 75 79 59 60 75 79 59 64 79 79 59 71 75 74 58 67 75 77 58 67 75 81 62 7 +64 77 78 61 64 77 78 61 68 77 78 61 64 79 79 63 68 79 83 63 68 79 79 67 67 79 85 62 71 83 85 62 71 87 85 67 7 +68 77 78 61 68 77 78 65 64 77 74 65 68 79 79 67 64 83 83 67 64 79 79 63 71 87 85 67 71 79 85 67 71 83 85 62 7 +68 77 82 65 68 81 78 61 68 77 78 61 71 83 83 67 68 79 83 63 68 79 83 63 67 83 81 67 67 79 81 62 67 79 77 62 7 +68 81 78 61 68 77 78 61 68 77 78 57 68 79 83 63 68 79 83 63 68 79 79 59 67 79 81 62 67 79 77 62 67 75 81 58 7 +64 69 74 57 68 69 74 57 64 69 74 57 68 75 75 56 68 71 75 59 68 75 75 59 67 75 81 58 67 72 77 58 67 75 77 62 7 +64 73 74 57 64 73 78 61 64 77 78 65 64 75 79 59 64 79 83 63 68 79 83 63 67 79 81 62 67 79 81 67 71 83 81 67 7 +64 77 82 65 60 77 82 65 60 77 82 65 64 75 83 67 68 79 83 67 68 83 87 70 67 87 81 71 67 87 93 75 67 87 93 79 7 +72 115 120 102 72 115 125 98 72 115 120 98 80 116 128 103 80 116 128 99 76 116 122 96 75 116 123 100 75 116 128 100 75 111 128 100 1 +68 94 102 87 64 89 102 79 64 81 86 72 71 87 100 81 71 83 91 74 71 83 87 70 75 91 104 83 71 91 96 75 71 83 93 71 4 +64 81 86 72 68 81 86 68 72 85 86 68 71 83 87 70 76 87 91 78 76 91 96 74 71 83 93 71 71 79 93 71 71 79 85 67 4 +68 81 86 68 72 85 86 68 72 89 90 76 76 87 91 78 76 91 96 74 76 91 91 70 71 79 93 71 71 79 85 67 71 68 77 62 4 +76 85 94 76 72 89 94 76 72 85 90 76 76 83 87 70 68 79 79 63 68 75 75 63 67 72 74 58 67 72 74 58 67 68 77 58 7 +72 94 86 72 76 94 98 76 76 98 98 76 76 99 104 81 80 99 104 78 76 95 96 78 79 99 100 79 79 95 100 79 75 91 96 75 4 +76 94 98 76 76 98 98 76 76 94 98 76 80 99 104 78 76 95 96 78 71 87 96 74 79 95 100 79 75 91 96 75 75 91 93 71 4 +64 71 75 56 64 75 71 59 64 75 79 59 67 75 74 58 67 75 74 58 63 72 77 58 78 87 88 74 70 79 80 66 66 75 80 59 7 +84 103 108 85 84 99 108 85 84 99 104 81 84 103 104 83 88 99 104 83 84 95 100 79 82 100 104 81 82 100 104 81 86 100 100 81 4 +80 87 91 78 76 87 91 67 71 87 91 63 75 91 89 75 75 91 93 75 75 91 100 75 78 87 92 70 78 91 96 74 78 96 100 74 4 +71 83 87 67 68 79 83 67 68 75 79 63 79 87 85 67 71 79 81 62 67 79 77 58 82 100 104 81 78 91 96 74 66 79 84 66 7 +68 75 79 63 68 75 75 56 68 75 75 56 67 79 77 58 67 79 77 58 67 75 77 58 66 79 84 66 66 79 80 63 70 79 80 63 7 +68 75 75 59 68 75 79 59 68 75 79 59 67 72 81 58 71 75 77 58 71 75 74 58 70 79 80 59 70 75 73 59 70 75 76 59 7 +64 79 79 63 68 79 83 63 68 79 79 67 67 79 85 62 71 83 85 62 71 87 85 67 63 79 84 63 66 79 84 63 66 79 84 63 7 +68 79 79 67 64 83 83 67 64 79 79 63 71 87 85 67 71 79 85 67 71 83 85 62 66 79 84 63 66 79 84 63 66 79 80 63 7 +64 79 79 63 71 83 83 67 68 79 83 63 71 83 85 62 67 83 81 67 67 79 81 62 66 79 80 63 66 79 80 63 66 75 84 63 7 +71 83 83 67 68 79 83 63 68 79 83 63 67 83 81 67 67 79 81 62 67 79 77 62 66 79 80 63 66 75 84 63 66 75 84 63 7 +68 75 75 56 68 71 75 59 68 75 75 59 67 75 81 58 67 72 77 58 67 75 77 62 59 60 100 81 66 71 88 70 70 79 76 59 7 +68 75 75 59 64 75 79 59 64 79 83 63 67 75 81 62 67 79 81 62 67 79 81 67 66 79 80 66 66 75 84 66 66 79 84 66 7 +64 87 100 74 68 91 100 81 71 103 118 96 67 95 100 79 75 99 109 87 79 111 123 100 63 83 96 78 66 91 104 81 74 100 108 92 1 +68 91 100 81 71 103 118 96 76 116 122 99 75 99 109 87 79 111 123 100 75 111 123 100 66 91 104 81 74 100 108 92 78 113 117 96 1 +76 116 122 99 76 112 128 99 80 116 128 103 75 111 123 100 75 116 123 100 75 116 123 100 78 113 117 96 74 113 122 100 70 113 127 96 1 +76 112 128 99 80 116 128 103 80 116 128 99 75 116 123 100 75 116 123 100 75 116 128 100 74 113 122 100 70 113 127 96 66 113 117 100 1 +71 83 91 74 71 83 87 70 76 87 91 78 71 91 96 75 71 83 93 71 71 79 93 71 78 91 96 81 74 83 96 74 66 71 73 59 4 +76 91 96 74 76 91 91 70 76 83 87 70 71 79 85 67 71 68 77 62 67 72 74 58 63 63 66 52 59 63 66 52 59 63 66 55 7 +68 79 79 63 68 75 75 63 68 83 87 70 67 72 74 58 67 68 77 58 67 72 77 62 63 63 69 55 63 67 69 55 59 67 66 55 7 +68 75 75 63 68 83 87 70 76 91 100 81 67 68 77 58 67 72 77 62 75 87 96 79 63 67 69 55 59 67 66 55 63 67 66 55 7 +71 91 87 70 76 83 91 70 71 83 87 67 75 91 89 71 75 91 93 71 71 83 89 67 74 83 96 74 74 87 92 70 74 87 88 70 4 +67 75 74 58 63 72 77 58 67 75 81 58 70 79 80 66 66 75 80 59 66 79 80 59 71 88 93 68 67 77 82 64 67 81 86 64 7 +67 75 81 58 63 75 77 58 67 83 85 67 66 79 80 59 66 75 80 63 66 75 76 59 67 81 86 64 67 77 79 64 67 73 75 60 7 +79 103 109 87 88 107 113 92 84 107 109 87 63 71 73 59 66 79 84 63 78 100 104 85 67 73 79 57 63 77 82 60 71 84 90 72 7 +84 103 104 83 88 99 104 83 84 95 100 79 82 100 104 81 82 100 104 81 86 100 100 81 87 99 105 83 87 99 101 83 87 99 105 79 4 +75 91 93 75 75 91 100 75 79 95 93 71 78 91 96 74 78 96 100 74 82 100 104 81 75 84 93 75 79 91 101 79 83 103 105 83 4 +75 91 100 75 79 95 93 71 79 87 85 67 78 96 100 74 82 100 104 81 82 100 104 81 79 91 101 79 83 103 105 83 83 99 105 83 4 +67 72 77 58 67 72 81 58 71 75 77 58 66 75 80 63 70 79 80 59 70 75 73 59 75 81 86 64 71 81 82 60 71 77 82 64 7 +67 72 81 58 71 75 77 58 71 75 74 58 70 79 80 59 70 75 73 59 70 75 76 59 71 81 82 60 71 77 82 64 67 77 82 64 7 +71 75 74 58 67 75 77 58 67 75 81 62 70 75 76 59 63 75 80 59 63 75 76 63 67 77 82 64 67 70 90 64 67 73 82 64 7 +71 87 85 67 71 79 85 67 71 83 85 62 66 79 84 63 66 79 84 63 66 79 80 63 67 77 82 64 71 77 82 64 67 77 82 64 7 +67 75 77 62 67 72 77 62 67 75 85 62 63 63 88 74 63 60 88 85 59 56 88 85 59 57 97 86 59 57 97 86 56 57 97 86 5 +67 72 77 62 67 75 85 62 67 75 81 58 63 60 88 85 59 56 88 85 59 60 100 81 59 57 97 86 56 57 97 86 59 57 97 86 5 +67 75 85 62 67 75 81 58 67 72 77 58 59 56 88 85 59 60 100 81 66 71 88 70 56 57 97 86 59 57 97 86 59 63 90 79 5 +67 75 77 62 67 75 81 62 67 75 81 62 70 79 76 59 70 75 76 59 66 79 80 66 63 73 82 64 67 77 79 60 67 77 82 64 7 +75 99 109 87 79 111 123 100 75 111 123 100 66 91 104 81 74 100 108 92 78 113 117 96 67 99 105 86 75 112 119 101 79 112 124 101 1 +75 111 123 100 75 116 123 100 75 116 123 100 78 113 117 96 74 113 122 100 70 113 127 96 79 112 124 101 79 112 124 98 71 108 124 98 1 +71 111 123 100 67 107 118 96 63 103 113 92 66 113 127 100 66 113 122 100 66 113 127 100 63 112 124 98 63 108 124 101 67 108 135 98 1 +63 103 113 92 67 99 109 87 71 99 109 87 66 113 127 100 66 109 122 100 63 109 117 92 67 108 135 98 67 112 130 98 67 112 119 98 1 +71 99 109 87 71 95 104 87 67 95 100 83 63 109 117 92 66 100 108 89 66 96 96 85 67 112 119 98 67 103 114 90 63 91 105 83 1 +67 68 77 58 67 72 77 62 75 87 96 79 63 67 69 55 59 67 66 55 63 67 66 55 63 70 72 60 67 70 75 57 67 66 72 60 7 +67 72 77 62 75 87 96 79 79 99 100 79 59 67 66 55 63 67 66 55 63 67 73 59 67 70 75 57 67 66 72 60 63 66 68 57 7 +75 91 93 71 75 87 93 71 75 91 89 71 74 91 92 78 74 87 96 74 74 83 96 74 75 91 101 75 75 88 90 72 75 88 90 72 4 +66 75 76 59 63 71 73 59 66 79 84 63 67 73 75 60 67 73 79 57 63 77 82 60 67 75 78 62 67 75 78 62 63 75 78 58 7 +78 87 92 70 78 91 96 74 78 96 100 74 75 91 97 72 75 84 93 75 79 91 101 79 78 88 93 76 78 88 97 76 85 102 105 83 4 +78 91 96 74 78 96 100 74 82 100 104 81 75 84 93 75 79 91 101 79 83 103 105 83 78 88 97 76 85 102 105 83 85 102 101 83 4 +78 96 100 74 82 100 104 81 82 100 104 81 79 91 101 79 83 103 105 83 83 99 105 83 85 102 105 83 85 102 101 83 85 102 110 80 4 +82 100 104 81 82 100 104 81 78 91 96 74 83 103 105 83 83 99 105 83 79 91 93 72 85 102 101 83 85 102 110 80 82 88 101 76 4 +66 79 84 66 66 79 80 63 70 79 80 63 71 81 82 64 71 81 90 68 75 88 93 68 67 71 93 65 74 88 97 80 82 97 105 83 7 +66 79 80 63 70 79 80 63 66 75 80 63 71 81 90 68 75 88 93 68 75 81 86 64 74 88 97 80 82 97 105 83 78 88 93 73 7 +66 79 84 63 66 79 84 63 66 79 80 63 67 77 82 64 71 77 82 64 67 77 82 64 67 75 82 62 67 71 82 65 63 71 82 65 7 +66 79 80 63 66 75 84 63 66 75 84 63 63 70 82 68 63 66 93 79 63 63 93 83 60 60 85 76 60 60 93 83 60 60 93 87 5 +66 75 84 63 66 75 84 63 63 71 88 70 63 66 93 79 63 63 93 83 59 60 90 83 60 60 93 83 60 60 93 87 57 56 93 90 5 +59 60 100 81 66 71 88 70 70 79 76 59 59 57 97 86 59 63 90 79 63 73 82 64 57 60 93 80 57 63 89 76 60 67 78 65 5 +63 83 96 78 66 91 104 81 74 100 108 92 63 84 86 79 67 99 105 86 75 112 119 101 63 88 101 76 70 102 114 94 74 115 119 101 1 +74 100 108 92 78 113 117 96 74 113 122 100 75 112 119 101 79 112 124 101 79 112 124 98 74 115 119 101 74 115 119 101 70 111 124 101 1 +63 109 117 92 66 100 108 89 66 96 96 85 67 112 119 98 67 103 114 90 63 91 105 83 67 115 129 104 63 106 119 94 63 97 105 87 1 +74 83 96 74 66 71 73 59 63 63 66 52 79 91 97 79 71 81 86 64 63 66 62 57 67 75 85 73 70 75 82 69 70 71 78 62 7 +63 67 66 55 63 67 73 59 70 83 88 70 67 66 72 60 63 66 68 57 59 70 75 60 67 67 74 62 67 67 74 58 63 67 70 58 7 +70 83 88 70 78 91 96 78 74 91 92 78 59 70 75 60 71 84 90 72 75 91 101 75 63 67 70 58 63 75 82 65 74 88 89 76 4 +74 91 92 78 74 87 96 74 74 83 96 74 75 91 101 75 75 88 90 72 75 88 90 72 74 88 89 76 74 88 97 73 70 88 85 65 4 +75 91 93 72 71 88 93 68 67 77 82 64 74 88 89 73 78 92 93 73 70 84 85 62 76 89 90 68 76 94 94 72 76 89 90 68 4 +67 77 79 64 67 73 75 60 67 73 79 57 67 79 82 65 67 75 78 62 67 75 78 62 68 77 74 61 68 77 74 61 68 77 78 61 7 +67 73 79 57 63 77 82 60 71 84 90 72 67 75 78 62 63 75 78 58 63 79 78 62 68 77 78 61 64 73 74 61 64 73 78 57 7 +63 77 82 60 71 84 90 72 83 99 105 83 63 75 78 58 63 79 78 62 74 92 93 76 64 73 74 61 64 73 78 57 64 81 82 65 7 +83 99 105 83 83 103 105 83 87 99 105 83 74 92 93 76 82 102 105 83 82 97 105 83 64 81 82 65 76 94 102 79 84 98 102 83 4 +83 103 105 83 87 99 105 83 87 99 101 83 82 102 105 83 82 97 105 83 82 97 101 83 76 94 102 79 84 98 102 83 84 98 102 83 4 +87 99 101 83 87 99 105 79 79 99 101 83 82 97 101 83 85 102 105 83 82 97 105 80 84 98 102 83 84 102 98 83 84 102 102 79 4 +79 99 101 83 79 95 101 75 75 91 97 72 82 97 105 80 82 92 97 76 78 88 93 76 84 102 102 79 84 94 98 79 76 85 90 72 4 +75 91 97 72 75 84 93 75 79 91 101 79 78 88 93 76 78 88 97 76 85 102 105 83 76 85 90 72 76 94 94 76 80 102 102 79 4 +83 103 105 83 83 99 105 83 79 91 93 72 85 102 101 83 85 102 110 80 82 88 101 76 84 102 102 83 84 102 102 79 72 81 90 65 4 +71 81 90 68 75 88 93 68 75 81 86 64 74 88 97 80 82 97 105 83 78 88 93 73 76 89 98 79 80 94 102 76 76 85 90 68 7 +56 57 97 86 59 57 97 86 59 63 90 79 60 56 93 87 57 60 93 80 57 63 89 76 57 55 86 76 57 55 86 72 57 55 82 72 5 +63 84 86 79 67 99 105 86 75 112 119 101 63 88 101 76 70 102 114 94 74 115 119 101 64 94 106 83 68 106 115 98 72 115 120 98 1 +75 112 119 101 79 112 124 101 79 112 124 98 74 115 119 101 74 115 119 101 70 111 124 101 72 115 120 98 68 111 120 98 68 115 125 98 1 +79 112 124 101 79 112 124 98 71 108 124 98 74 115 119 101 70 111 124 101 67 106 124 101 68 111 120 98 68 115 125 98 68 111 125 98 1 +67 112 124 98 67 112 124 98 63 112 124 98 67 111 119 97 63 111 124 97 63 120 124 101 68 115 120 98 64 115 125 98 64 115 125 102 1 +63 108 124 101 67 108 135 98 67 112 130 98 63 115 124 101 67 111 124 101 63 115 124 101 64 115 125 98 60 111 120 98 64 111 115 102 1 +67 103 114 90 63 91 105 83 63 88 90 75 63 106 119 94 63 97 105 87 63 88 97 83 68 115 120 102 64 106 111 91 64 94 102 83 1 +63 70 72 60 67 70 75 57 67 66 72 60 63 71 78 62 67 71 78 62 67 67 74 62 68 73 78 65 68 69 74 57 64 66 71 54 7 +67 66 72 60 63 66 68 57 59 70 75 60 67 67 74 62 67 67 74 58 63 67 70 58 64 66 71 54 64 69 71 57 68 69 74 61 7 +75 88 90 72 75 88 90 68 71 81 90 64 70 88 85 65 67 75 78 62 63 71 74 62 64 73 78 61 64 73 78 61 68 73 78 57 7 +67 79 85 65 67 79 82 65 67 75 78 62 72 81 82 61 68 77 74 61 68 77 74 61 76 87 96 70 71 79 83 59 68 79 79 63 7 +63 75 78 58 63 79 78 62 74 92 93 76 64 73 74 61 64 73 78 57 64 81 82 65 64 75 79 59 64 75 79 63 68 75 79 59 7 +63 79 78 62 74 92 93 76 82 102 105 83 64 73 78 57 64 81 82 65 76 94 102 79 64 75 79 63 68 75 79 59 68 83 87 70 7 +74 92 93 76 82 102 105 83 82 97 105 83 64 81 82 65 76 94 102 79 84 98 102 83 68 75 79 59 68 83 87 70 80 91 91 81 7 +82 102 105 83 82 97 105 83 82 97 101 83 76 94 102 79 84 98 102 83 84 98 102 83 68 83 87 70 80 91 91 81 84 95 100 78 4 +82 97 105 80 82 92 97 76 78 88 93 76 84 102 102 79 84 94 98 79 76 85 90 72 84 99 104 85 80 99 100 81 76 91 96 74 4 +78 88 97 76 85 102 105 83 85 102 101 83 76 94 94 76 80 102 102 79 84 102 102 83 76 91 96 74 76 91 96 74 76 91 87 70 4 +78 88 93 73 78 84 93 69 78 88 97 80 76 85 90 68 80 94 98 76 80 98 98 83 71 87 87 70 76 91 91 78 76 91 100 78 7 +74 88 101 80 70 88 93 69 67 75 85 62 76 98 102 79 76 89 94 72 72 81 86 65 80 99 104 81 80 99 104 78 76 91 96 74 7 +70 88 93 69 67 75 85 62 67 75 82 62 76 89 94 72 72 81 86 65 72 77 82 61 80 99 104 78 76 91 96 74 71 79 83 63 7 +67 75 85 62 67 75 82 62 67 71 82 65 72 81 86 65 72 77 82 61 68 69 78 65 76 91 96 74 71 79 83 63 64 68 83 67 7 +67 71 82 65 63 71 82 65 60 60 85 76 68 69 78 65 64 62 82 68 60 59 90 76 64 68 83 67 60 61 83 70 56 57 79 70 5 +60 60 85 76 60 60 93 83 60 60 93 87 60 59 90 76 60 59 98 87 57 59 98 87 56 57 79 70 60 51 83 74 56 54 83 70 5 +60 60 93 87 57 56 93 90 57 56 97 94 57 59 98 87 57 55 94 87 57 55 90 83 56 54 83 70 56 57 87 78 60 57 87 78 5 +57 60 93 80 57 63 89 76 60 67 78 65 57 55 86 72 57 55 82 72 57 59 74 68 56 57 87 70 56 57 83 67 56 57 83 70 5 +63 111 124 97 63 120 124 101 63 115 124 101 64 115 125 98 64 115 125 102 64 115 125 98 64 116 128 103 64 112 128 103 64 116 122 99 1 +63 120 124 101 63 115 124 101 67 111 124 101 64 115 125 102 64 115 125 98 60 111 120 98 64 112 128 103 64 116 122 99 64 121 122 96 1 +67 111 124 101 63 115 124 101 67 115 129 104 60 111 120 98 64 111 115 102 68 115 125 102 64 121 122 96 64 116 122 99 64 116 122 96 1 +63 106 119 94 63 97 105 87 63 88 97 83 68 115 120 102 64 106 111 91 64 94 102 83 68 116 128 103 68 112 128 96 64 103 113 88 1 +67 71 78 62 67 67 74 62 67 67 74 58 68 69 74 57 64 66 71 54 64 69 71 57 64 75 75 59 68 71 75 59 68 71 75 59 7 +67 67 74 62 67 67 74 58 63 67 70 58 64 66 71 54 64 69 71 57 68 69 74 61 68 71 75 59 68 71 75 59 68 75 75 59 7 +76 89 90 68 76 94 94 72 76 89 90 68 76 87 91 70 76 87 91 67 76 91 96 74 75 87 89 67 75 87 89 67 75 87 89 67 4 +76 89 90 68 72 81 82 61 68 77 74 61 76 91 96 74 76 87 96 70 71 79 83 59 75 87 89 67 75 83 89 71 71 83 85 67 4 +72 81 82 61 68 77 74 61 68 77 74 61 76 87 96 70 71 79 83 59 68 79 79 63 75 83 89 71 71 83 85 67 67 75 85 62 7 +84 98 102 83 84 98 102 83 84 102 98 83 80 91 91 81 84 95 100 78 80 95 100 81 75 87 89 71 79 91 93 75 79 95 96 75 4 +84 102 102 79 84 94 98 79 76 85 90 72 84 99 104 85 80 99 100 81 76 91 96 74 84 95 100 79 84 95 100 75 79 87 93 75 4 +84 102 102 83 84 102 102 79 72 81 90 65 76 91 87 70 71 79 87 70 68 75 87 67 67 72 85 67 63 58 81 67 63 68 85 67 5 +72 81 90 65 68 69 86 68 76 89 98 79 68 75 87 67 76 83 91 74 80 95 100 78 63 68 85 67 71 91 93 75 75 91 89 71 7 +80 94 98 76 80 98 98 83 84 98 102 83 76 91 91 78 76 91 100 78 80 95 100 78 71 83 81 67 71 87 85 71 75 95 96 79 7 +80 98 98 83 84 98 102 83 80 98 106 83 76 91 100 78 80 95 100 78 80 99 104 81 71 87 85 71 75 95 96 79 79 95 104 79 7 +80 98 106 83 76 98 102 79 76 89 94 72 80 99 104 81 80 99 104 81 80 99 104 78 79 95 104 79 75 99 100 79 79 99 104 83 3 +57 55 86 76 57 55 86 72 57 55 82 72 56 54 87 78 56 57 87 70 56 57 83 67 55 54 85 71 55 54 85 71 55 58 81 71 5 +57 55 86 72 57 55 82 72 57 59 74 68 56 57 87 70 56 57 83 67 56 57 83 70 55 54 85 71 55 58 81 71 55 54 85 71 5 +57 55 82 72 57 59 74 68 60 66 82 65 56 57 83 67 56 57 83 70 56 64 83 67 55 58 81 71 55 54 85 71 55 51 81 71 5 +68 106 115 98 72 115 120 98 68 111 120 98 68 112 118 96 68 116 122 99 71 112 118 99 71 111 118 92 71 111 123 96 71 107 123 96 1 +68 115 125 98 68 111 125 98 68 115 120 98 68 112 122 96 68 112 128 99 68 116 122 103 67 107 113 96 67 111 118 96 71 116 123 100 1 +64 115 125 98 64 115 125 102 64 115 125 98 64 116 128 103 64 112 128 103 64 116 122 99 67 111 123 100 67 111 123 100 67 116 123 100 1 +64 115 125 98 60 111 120 98 64 111 115 102 64 116 122 99 64 121 122 96 64 116 122 99 67 116 123 100 71 111 128 100 67 111 123 96 1 +60 111 120 98 64 111 115 102 68 115 125 102 64 121 122 96 64 116 122 99 64 116 122 96 71 111 128 100 67 111 123 96 67 111 123 100 1 +64 106 111 91 64 94 102 83 68 94 102 79 68 112 128 96 64 103 113 88 60 91 104 81 71 116 123 100 71 107 118 96 67 99 109 83 1 +68 77 74 65 68 77 74 61 68 73 78 65 68 75 79 63 68 75 75 59 64 75 75 63 71 79 85 67 63 75 81 62 67 72 77 62 7 +64 66 71 54 64 69 71 57 68 69 74 61 68 71 75 59 68 71 75 59 68 75 75 59 63 68 67 58 67 72 70 62 67 75 74 58 7 +68 73 82 65 68 81 86 68 68 77 82 65 68 75 75 59 68 79 79 63 71 79 87 67 67 75 74 62 63 72 74 62 63 75 77 62 7 +76 87 91 70 76 87 91 67 76 91 96 74 75 87 89 67 75 87 89 67 75 87 89 67 74 87 84 70 74 87 92 70 74 87 88 66 4 +76 87 91 67 76 91 96 74 76 87 96 70 75 87 89 67 75 87 89 67 75 83 89 71 74 87 92 70 74 87 88 66 74 87 88 70 4 +71 79 83 59 68 79 79 63 64 79 83 59 71 83 85 67 67 75 85 62 71 79 89 62 78 91 92 74 74 83 92 70 66 79 84 63 7 +68 79 79 63 64 79 83 59 64 75 79 59 67 75 85 62 71 79 89 62 71 79 77 58 74 83 92 70 66 79 84 63 66 75 76 63 7 +84 95 100 78 80 95 100 81 84 99 104 85 79 91 93 75 79 95 96 75 84 95 100 79 74 79 84 66 82 87 96 78 82 96 100 78 4 +76 91 96 74 76 91 96 74 76 91 96 74 79 87 93 75 71 79 89 75 67 75 89 67 82 91 96 78 66 71 88 74 56 53 80 66 5 +76 91 96 74 76 91 96 74 76 91 87 70 71 79 89 75 67 75 89 67 67 72 85 67 66 71 88 74 56 53 80 66 59 53 73 63 5 +76 91 96 74 76 91 87 70 71 79 87 70 67 75 89 67 67 72 85 67 63 58 81 67 56 53 80 66 59 53 73 63 56 49 80 66 5 +68 75 87 67 76 83 91 74 80 95 100 78 63 68 85 67 71 91 93 75 75 91 89 71 56 53 73 66 70 79 84 66 78 83 88 70 7 +80 95 100 78 76 87 91 67 71 87 87 70 75 91 89 71 75 83 81 62 71 79 85 67 78 83 88 70 74 87 84 66 78 87 84 70 7 +71 87 87 70 76 91 91 78 76 91 100 78 71 79 85 67 71 83 81 67 71 87 85 71 78 87 84 70 74 79 84 63 70 83 84 66 7 +76 91 91 78 76 91 100 78 80 95 100 78 71 83 81 67 71 87 85 71 75 95 96 79 74 79 84 63 70 83 84 66 66 87 84 70 7 +76 91 100 78 80 95 100 78 80 99 104 81 71 87 85 71 75 95 96 79 79 95 104 79 70 83 84 66 66 87 84 70 74 91 100 78 7 +80 99 104 81 80 99 104 78 76 91 96 74 75 99 100 79 79 99 104 83 79 99 109 83 78 96 104 81 82 100 104 81 82 100 104 85 3 +71 79 83 63 64 68 83 67 60 61 83 70 79 91 96 75 71 72 77 58 59 54 67 54 82 100 104 85 78 91 92 74 66 67 66 41 3 +56 57 79 70 60 51 83 74 56 54 83 70 55 51 67 50 51 51 70 50 55 51 67 54 52 49 56 33 52 49 66 44 52 56 69 55 5 +60 57 87 78 56 57 83 70 56 54 87 78 59 58 81 71 55 54 85 71 55 54 85 71 59 60 76 66 59 60 80 70 56 60 84 74 5 +56 57 87 70 56 57 83 67 56 57 83 70 55 54 85 71 55 58 81 71 55 54 85 71 56 56 88 74 56 53 84 74 56 53 84 78 5 +60 91 100 78 64 99 104 88 68 112 118 96 63 91 100 75 67 103 113 87 71 111 118 92 63 87 92 81 66 104 112 89 66 104 112 92 1 +68 112 122 96 68 112 128 99 68 116 122 103 67 107 113 96 67 111 118 96 71 116 123 100 66 109 117 96 66 109 112 96 66 109 122 100 1 +64 116 128 103 64 112 128 103 64 116 122 99 67 111 123 100 67 111 123 100 67 116 123 100 66 109 122 100 66 113 122 100 66 113 127 100 1 +64 121 122 96 64 116 122 99 64 116 122 96 71 111 128 100 67 111 123 96 67 111 123 100 66 113 122 100 66 113 127 100 70 118 127 100 1 +68 71 75 59 68 71 75 59 68 75 75 59 63 68 67 58 67 72 70 62 67 75 74 58 63 67 69 55 66 71 73 55 66 71 73 59 7 +71 75 79 59 68 75 75 59 68 75 75 59 67 79 81 62 67 72 77 58 67 75 74 58 66 75 76 63 70 79 84 66 66 75 73 59 7 +75 87 89 67 75 87 89 67 75 87 89 67 74 87 84 70 74 87 92 70 74 87 88 66 75 91 93 72 75 88 90 72 71 84 93 72 4 +75 87 89 67 75 87 89 67 75 83 89 71 74 87 92 70 74 87 88 66 74 87 88 70 75 88 90 72 71 84 93 72 75 88 90 68 4 +71 79 89 62 71 79 77 58 67 79 77 62 66 79 84 63 66 75 76 63 66 79 80 63 71 84 90 68 63 81 82 64 63 81 79 64 7 +67 75 77 62 67 79 81 62 75 87 89 71 66 79 88 63 66 79 84 63 66 79 80 59 67 84 86 68 71 84 86 64 67 81 82 64 7 +67 79 81 62 75 87 89 71 79 91 93 75 66 79 84 63 66 79 80 59 74 79 84 66 71 84 86 64 67 81 82 64 67 77 82 64 7 +84 95 100 79 84 95 100 75 79 87 93 75 82 96 100 78 82 96 104 78 82 91 96 78 79 99 101 79 83 103 105 83 83 91 101 79 4 +75 83 81 62 71 79 85 67 71 83 81 67 74 87 84 66 78 87 84 70 74 79 84 63 75 88 97 75 75 88 97 72 75 84 93 68 7 +71 83 81 67 71 87 85 71 75 95 96 79 74 79 84 63 70 83 84 66 66 87 84 70 75 84 93 68 75 91 90 75 79 88 93 75 7 +75 95 96 79 79 95 104 79 75 99 100 79 66 87 84 70 74 91 100 78 78 96 104 81 79 88 93 75 75 88 97 72 75 91 101 79 7 +71 72 77 58 59 54 67 54 55 51 67 50 78 91 92 74 66 67 66 41 52 49 56 33 75 91 97 68 63 66 68 34 52 51 62 42 5 +59 54 67 54 55 51 67 50 51 51 70 50 66 67 66 41 52 49 56 33 52 49 66 44 63 66 68 34 52 51 62 42 49 48 68 49 5 +55 54 85 71 55 54 85 71 55 54 85 71 59 60 80 70 56 60 84 74 56 56 88 74 56 54 82 72 56 51 79 75 49 54 86 75 5 +55 54 85 71 55 54 85 71 55 58 81 71 56 60 84 74 56 56 88 74 56 53 84 74 56 51 79 75 49 54 86 75 52 54 79 75 5 +71 111 123 96 71 107 123 96 67 107 113 96 66 113 117 92 66 109 122 96 66 109 117 96 67 108 119 98 67 112 119 98 71 108 119 98 1 +71 111 128 100 67 111 123 96 67 111 123 100 66 113 122 100 66 113 127 100 70 118 127 100 63 112 130 101 71 112 130 101 71 112 124 101 1 +71 111 128 100 71 116 123 100 71 107 118 96 70 113 127 100 70 113 122 100 70 118 127 100 67 112 124 101 67 112 124 98 67 112 130 101 1 +71 116 123 100 71 107 118 96 67 99 109 83 70 113 122 100 70 118 127 100 70 113 122 96 67 112 124 98 67 112 130 101 71 108 130 101 1 +63 75 81 62 67 72 77 62 67 68 74 58 70 79 84 70 66 71 73 63 63 63 66 55 67 73 79 68 67 73 79 64 63 66 68 57 7 +67 68 74 58 63 68 67 58 67 72 70 62 63 63 66 55 63 67 69 55 66 71 73 55 63 66 68 57 63 66 68 57 59 70 75 57 7 +67 75 74 62 63 72 74 62 63 75 77 62 66 71 76 59 66 71 73 63 63 67 73 59 67 70 72 60 67 70 75 57 63 70 68 57 7 +63 75 77 62 67 79 81 62 67 72 77 58 63 67 73 59 66 75 76 63 70 79 84 66 63 70 68 57 63 66 68 57 67 77 75 64 7 +74 87 84 70 74 87 92 70 74 87 88 66 75 91 93 72 75 88 90 72 71 84 93 72 74 92 89 76 74 84 93 69 70 88 89 69 4 +74 87 88 66 74 87 88 70 78 91 92 74 71 84 93 72 75 88 90 68 75 88 93 68 70 88 89 69 74 84 89 69 74 84 85 65 4 +74 87 88 70 78 91 92 74 74 83 92 70 75 88 90 68 75 88 93 68 75 91 93 72 74 84 89 69 74 84 85 65 74 88 93 69 4 +78 91 92 74 74 83 92 70 66 79 84 63 75 88 93 68 75 91 93 72 71 84 90 68 74 84 85 65 74 88 93 69 78 92 93 73 4 +66 79 84 63 66 75 76 63 66 79 80 63 71 84 90 68 63 81 82 64 63 81 79 64 78 92 93 73 67 88 89 69 67 84 85 62 7 +66 75 76 63 66 79 80 63 66 79 88 63 63 81 82 64 63 81 79 64 67 84 86 68 67 88 89 69 67 84 85 62 63 79 85 65 7 +82 87 96 78 82 96 100 78 82 96 104 78 71 88 93 72 79 99 101 79 83 103 105 83 67 84 85 69 78 97 101 83 82 102 110 87 4 +82 96 100 78 82 96 104 78 82 91 96 78 79 99 101 79 83 103 105 83 83 91 101 79 78 97 101 83 82 102 110 87 78 88 101 83 4 +82 91 96 78 66 71 88 74 56 53 80 66 83 91 101 79 71 63 86 75 59 54 82 75 78 88 101 83 67 67 93 80 60 60 85 80 5 +56 49 80 66 56 53 73 66 70 79 84 66 59 51 79 72 56 54 75 64 67 73 82 64 57 53 82 73 57 53 78 69 67 67 78 65 5 +56 53 73 66 70 79 84 66 78 83 88 70 56 54 75 64 67 73 82 64 75 84 90 68 57 53 78 69 67 67 78 65 70 79 89 65 7 +74 87 84 66 78 87 84 70 74 79 84 63 75 88 97 75 75 88 97 72 75 84 93 68 74 88 93 73 78 92 97 80 78 92 97 80 7 +74 79 84 63 70 83 84 66 66 87 84 70 75 84 93 68 75 91 90 75 79 88 93 75 78 92 97 80 78 92 101 83 82 97 101 83 7 +56 60 73 59 59 60 76 66 59 60 80 70 56 60 75 64 52 57 75 68 56 54 82 72 50 53 82 69 53 53 82 76 50 56 82 73 5 +56 56 88 74 56 53 84 74 56 53 84 78 49 54 86 75 52 54 79 75 52 51 82 75 50 53 78 69 53 53 74 69 50 53 78 65 5 +56 53 84 78 52 49 88 78 56 56 88 74 52 51 82 75 52 54 90 72 52 54 79 68 50 53 78 65 50 53 82 65 53 56 74 69 5 +56 56 88 74 56 63 84 66 66 75 80 63 52 54 79 68 52 57 79 64 59 70 79 60 53 56 74 69 53 53 82 73 53 56 82 69 5 +59 79 88 70 59 83 96 74 63 87 92 81 63 77 82 68 59 84 90 75 63 99 110 86 60 79 82 65 60 92 101 83 67 111 114 94 1 +63 87 92 81 66 104 112 89 66 104 112 92 63 99 110 86 67 108 119 98 71 112 119 94 67 111 114 94 67 111 119 94 63 111 124 94 1 +66 113 117 92 66 109 122 96 66 109 117 96 67 108 119 98 67 112 119 98 71 108 119 98 67 111 119 97 67 111 119 101 67 111 119 101 1 +66 109 122 100 66 113 122 100 66 113 127 100 67 108 130 101 67 112 124 98 63 112 119 98 63 111 124 97 63 111 124 101 63 111 124 101 1 +66 113 122 100 66 113 127 100 66 113 122 100 67 112 124 98 63 112 119 98 63 112 130 101 63 111 124 101 63 111 124 101 63 111 124 101 1 +66 113 127 100 70 118 127 100 70 113 127 100 71 112 130 101 71 112 124 101 67 112 124 101 67 115 129 101 67 120 124 97 70 115 129 101 1 +70 118 127 100 70 113 127 100 70 113 122 100 71 112 124 101 67 112 124 101 67 112 124 98 67 120 124 97 70 115 129 101 70 111 119 101 1 +70 113 122 100 70 118 127 100 70 113 122 96 67 112 124 98 67 112 130 101 71 108 130 101 70 111 119 101 67 111 119 94 67 111 119 97 1 +70 79 84 70 66 71 73 63 63 63 66 55 67 73 79 68 67 73 79 64 63 66 68 57 63 71 82 65 70 75 85 69 67 71 74 65 7 +63 67 69 55 66 71 73 55 66 71 73 59 63 66 68 57 59 70 75 57 63 66 75 60 63 67 70 58 63 71 74 58 63 71 74 58 7 +75 88 90 72 71 84 93 72 75 88 90 68 74 84 93 69 70 88 89 69 74 84 89 69 76 89 94 68 72 85 90 68 72 85 86 68 4 +71 84 93 72 75 88 90 68 75 88 93 68 70 88 89 69 74 84 89 69 74 84 85 65 72 85 90 68 72 85 86 68 76 85 90 68 4 +71 84 90 68 63 81 82 64 63 81 79 64 78 92 93 73 67 88 89 69 67 84 85 62 76 94 94 72 76 94 94 68 68 85 82 65 4 +67 84 86 68 71 84 86 64 67 81 82 64 63 79 85 65 63 75 85 65 70 84 82 65 64 81 82 61 64 77 86 65 64 77 82 65 7 +67 81 82 64 67 77 82 64 71 88 93 72 70 84 82 65 67 84 82 65 67 84 85 69 64 77 82 65 64 81 78 65 68 81 82 65 7 +71 88 93 72 79 99 101 79 83 103 105 83 67 84 85 69 78 97 101 83 82 102 110 87 68 81 82 65 72 89 94 72 80 102 106 87 4 +83 103 105 83 83 91 101 79 71 63 86 75 82 102 110 87 78 88 101 83 67 67 93 80 80 102 106 87 76 89 98 79 68 73 90 79 5 +59 54 79 72 59 51 79 72 56 54 75 64 60 56 85 80 57 53 82 73 57 53 78 69 60 55 82 76 57 55 78 72 57 55 74 61 5 +59 51 79 72 56 54 75 64 67 73 82 64 57 53 82 73 57 53 78 69 67 67 78 65 57 55 78 72 57 55 74 61 64 66 78 65 5 +75 88 97 75 75 88 97 72 75 84 93 68 74 88 93 73 78 92 97 80 78 92 97 80 76 89 94 76 80 98 102 76 80 98 102 76 7 +75 88 97 72 75 84 93 68 75 91 90 75 78 92 97 80 78 92 97 80 78 92 101 83 80 98 102 76 80 98 102 76 80 94 102 79 7 +75 91 90 75 79 88 93 75 75 88 97 72 78 92 101 83 82 97 101 83 82 92 101 76 80 94 102 79 84 98 111 83 80 98 111 83 3 +75 88 97 72 75 91 101 79 79 99 105 83 82 92 101 76 78 92 105 80 82 97 105 87 80 98 111 83 80 98 106 83 84 98 111 87 7 +75 91 101 79 79 99 105 83 83 99 105 83 78 92 105 80 82 97 105 87 82 97 105 83 80 98 106 83 84 98 111 87 84 102 111 87 3 +79 99 105 83 83 99 105 83 79 99 105 83 82 97 105 87 82 97 105 83 78 97 105 83 84 98 111 87 84 102 111 87 84 98 106 83 3 +83 99 105 83 79 99 105 83 75 91 97 68 82 97 105 83 78 97 105 83 78 88 89 69 84 102 111 87 84 98 106 83 76 85 90 61 3 +49 54 86 75 52 54 79 75 52 51 82 75 50 53 78 69 53 53 74 69 50 53 78 65 57 55 74 61 53 55 82 61 50 52 74 65 5 +52 54 90 72 52 54 79 68 52 57 79 64 50 53 82 65 53 56 74 69 53 53 82 73 53 52 78 68 53 52 74 68 50 52 78 65 5 +52 54 79 68 52 57 79 64 59 70 79 60 53 56 74 69 53 53 82 73 53 56 82 69 53 52 74 68 50 52 78 65 53 52 78 65 5 +59 84 90 75 63 99 110 86 67 108 119 98 60 92 101 83 67 111 114 94 67 111 119 94 68 106 111 91 68 111 115 98 68 111 115 98 1 +63 99 110 86 67 108 119 98 71 112 119 94 67 111 114 94 67 111 119 94 63 111 124 94 68 111 115 98 68 111 115 98 64 111 125 102 1 +67 112 119 98 71 108 119 98 67 112 114 98 67 111 119 101 67 111 119 101 67 115 119 101 68 111 115 98 68 111 115 98 68 115 120 98 1 +71 112 124 101 67 112 124 101 67 112 124 98 67 120 124 97 70 115 129 101 70 111 119 101 72 115 120 102 68 115 120 102 68 115 120 98 1 +71 108 130 101 71 108 114 90 63 88 97 75 67 111 119 97 70 111 119 97 70 97 105 87 64 111 115 98 68 111 120 102 68 106 115 94 1 +67 73 79 64 63 66 68 57 63 66 68 57 70 75 85 69 67 71 74 65 63 67 70 58 72 77 82 68 72 77 78 65 68 73 71 61 7 +63 66 75 60 67 70 72 60 67 70 75 57 63 71 74 58 63 67 70 58 60 67 70 55 60 69 74 54 60 69 71 57 60 62 67 57 7 +74 84 93 69 70 88 89 69 74 84 89 69 76 89 94 68 72 85 90 68 72 85 86 68 76 91 96 70 76 83 96 70 71 87 87 70 4 +74 84 85 65 74 88 93 69 78 92 93 73 76 85 90 68 76 85 90 68 76 94 94 72 71 87 91 70 76 83 91 67 80 87 91 70 4 +78 92 93 73 67 88 89 69 67 84 85 62 76 94 94 72 76 94 94 68 68 85 82 65 80 87 91 70 80 95 91 74 71 87 87 70 4 +67 84 85 62 63 79 85 65 63 75 85 65 68 85 82 65 64 81 82 61 64 77 86 65 71 87 87 70 68 83 87 63 64 83 83 67 7 +70 84 82 65 67 84 82 65 67 84 85 69 64 77 82 65 64 81 78 65 68 81 82 65 68 79 83 63 68 83 83 67 68 83 83 63 7 +78 88 101 83 67 67 93 80 60 60 85 80 76 89 98 79 68 73 90 79 64 66 90 79 80 95 100 81 76 83 96 81 68 75 83 81 5 +57 53 78 69 67 67 78 65 70 79 89 65 57 55 74 61 64 66 78 65 72 81 86 68 60 57 75 67 64 64 83 67 71 79 91 70 5 +67 67 78 65 70 79 89 65 74 88 93 73 64 66 78 65 72 81 86 68 76 89 94 76 64 64 83 67 71 79 91 70 76 87 96 74 7 +78 92 105 80 82 97 105 87 82 97 105 83 80 98 106 83 84 98 111 87 84 102 111 87 80 103 108 85 80 99 108 85 84 103 108 85 3 +82 97 105 83 78 97 105 83 78 88 89 69 84 102 111 87 84 98 106 83 76 85 90 61 84 103 108 85 80 99 104 81 71 83 87 59 3 +50 53 78 69 53 53 74 69 50 53 78 65 57 55 74 61 53 55 82 61 50 52 74 65 53 54 75 67 53 54 79 67 56 54 75 63 5 +50 53 78 65 50 53 82 65 53 56 74 69 50 52 74 65 53 52 78 68 53 52 74 68 56 54 75 63 53 51 75 59 56 51 71 59 5 +53 56 82 69 57 75 82 65 63 79 89 65 53 52 78 65 53 62 78 61 60 77 82 65 53 51 75 59 53 57 75 63 56 68 87 63 5 +67 111 119 94 63 111 124 94 67 111 119 97 68 111 115 98 64 111 125 102 68 111 120 98 71 112 122 96 68 112 122 99 64 112 122 99 1 +67 111 119 101 67 115 119 101 67 111 119 94 68 111 115 98 68 115 120 98 68 115 125 98 64 112 122 99 64 116 122 99 64 112 128 96 1 +67 115 119 101 67 111 119 94 63 111 124 97 68 115 120 98 68 115 125 98 68 115 125 98 64 116 122 99 64 112 128 96 64 112 122 96 1 +67 111 119 94 63 111 124 97 63 111 124 101 68 115 125 98 68 115 125 98 60 111 125 98 64 112 128 96 64 112 122 96 60 107 122 96 1 +63 111 124 97 63 111 124 101 63 111 124 101 68 115 125 98 60 111 125 98 64 106 125 98 64 112 122 96 60 107 122 96 64 107 118 99 1 +63 111 124 101 63 111 124 101 67 115 129 101 64 106 125 98 64 111 120 98 64 111 125 102 64 107 118 99 64 107 122 96 68 107 122 99 1 +67 120 124 97 70 115 129 101 70 111 119 101 72 115 120 102 68 115 120 102 68 115 120 98 68 116 122 99 68 116 128 99 68 116 122 99 1 +60 67 70 55 63 71 70 58 60 67 67 58 60 62 67 57 64 66 64 57 64 66 67 57 60 68 67 56 64 68 67 56 60 68 67 56 7 +72 85 90 68 72 85 86 68 76 85 90 68 76 83 96 70 71 87 87 70 71 87 91 70 75 83 89 71 75 87 93 71 75 87 93 67 4 +72 85 86 68 76 85 90 68 76 85 90 68 71 87 87 70 71 87 91 70 76 83 91 67 75 87 93 71 75 87 93 67 79 91 93 71 4 +64 81 78 65 68 81 82 65 72 89 94 72 68 83 83 67 68 83 83 63 68 83 87 67 67 79 85 67 71 79 85 67 71 79 85 62 7 +68 73 90 79 64 66 90 79 60 55 82 76 76 83 96 81 68 75 83 81 64 68 83 74 79 91 96 79 79 91 96 75 75 79 89 75 5 +64 66 90 79 60 55 82 76 57 55 78 72 68 75 83 81 64 68 83 74 60 61 75 70 79 91 96 75 75 79 89 75 59 64 77 71 5 +57 55 74 61 64 66 78 65 72 81 86 68 60 57 75 67 64 64 83 67 71 79 91 70 55 64 81 67 67 64 85 67 71 79 89 71 5 +64 66 78 65 72 81 86 68 76 89 94 76 64 64 83 67 71 79 91 70 76 87 96 74 67 64 85 67 71 79 89 71 75 83 89 71 7 +80 98 102 76 80 98 102 76 80 94 102 79 80 91 100 78 80 95 104 78 80 95 104 81 75 87 89 75 79 91 96 75 79 95 100 79 7 +80 98 102 76 80 94 102 79 84 98 111 83 80 95 104 78 80 95 104 81 84 99 104 85 79 91 96 75 79 95 100 79 84 103 104 87 7 +80 94 102 79 84 98 111 83 80 98 111 83 80 95 104 81 84 99 104 85 84 103 108 88 79 95 100 79 84 103 104 87 79 107 109 92 3 +80 98 111 83 80 98 106 83 84 98 111 87 84 103 108 88 80 103 108 85 80 99 108 85 79 107 109 92 79 107 109 87 84 107 113 87 3 +76 85 90 61 57 59 64 39 53 49 71 46 71 83 87 59 56 57 63 41 53 51 67 52 71 83 81 62 55 61 63 46 51 54 67 50 5 +53 55 78 68 53 52 82 72 53 52 82 68 56 57 79 63 60 54 75 59 53 54 71 59 55 54 74 58 55 54 74 62 55 58 77 58 5 +57 55 74 61 53 55 82 61 50 52 74 65 53 54 75 67 53 54 79 67 56 54 75 63 55 58 70 58 55 54 74 58 55 54 74 58 5 +53 52 74 68 50 52 78 65 53 52 78 65 56 51 71 59 53 51 75 59 53 51 75 59 51 54 70 62 55 51 77 67 55 54 81 71 5 +53 52 78 65 53 62 78 61 60 77 82 65 53 51 75 59 53 57 75 63 56 68 87 63 55 54 81 71 51 58 81 75 55 68 89 71 5 +64 81 82 68 60 77 82 65 60 89 102 79 64 79 87 67 60 83 91 74 60 99 108 88 63 87 89 71 67 91 100 79 71 103 109 87 1 +60 77 82 65 60 89 102 79 68 106 111 91 60 83 91 74 60 99 108 88 68 112 118 96 67 91 100 79 71 103 109 87 71 111 113 96 1 +60 89 102 79 68 106 111 91 68 111 115 98 60 99 108 88 68 112 118 96 71 107 118 96 71 103 109 87 71 111 113 96 71 111 123 100 1 +64 106 125 98 64 111 120 98 64 111 125 102 64 107 118 99 64 107 122 96 68 107 122 99 67 107 118 96 71 116 118 100 71 111 123 104 1 +72 115 120 102 68 115 120 102 68 115 120 98 68 116 122 99 68 116 128 99 68 116 122 99 71 111 123 104 67 111 123 100 67 111 123 96 1 +68 111 120 102 68 106 115 94 64 89 98 79 68 112 122 103 71 112 122 99 68 99 108 85 71 111 118 100 71 111 123 100 71 103 118 96 1 +60 62 67 57 64 66 64 57 64 66 67 57 60 68 67 56 64 68 67 56 60 68 67 56 63 64 70 58 59 64 67 54 63 68 70 58 7 +76 83 96 70 71 87 87 70 71 87 91 70 75 83 89 71 75 87 93 71 75 87 93 67 78 87 88 70 78 87 92 74 74 87 96 74 4 +80 87 91 70 80 95 91 74 71 87 87 70 79 91 96 71 75 91 93 71 75 87 96 71 78 87 96 70 78 91 88 70 78 87 88 70 4 +80 95 91 74 71 87 87 70 68 83 87 63 75 91 93 71 75 87 96 71 71 83 93 67 78 91 88 70 78 87 88 70 78 96 92 74 4 +71 87 87 70 68 83 87 63 64 83 83 67 75 87 96 71 71 83 93 67 67 79 85 62 78 87 88 70 78 96 92 74 74 87 88 70 4 +64 83 83 67 68 79 83 63 68 83 83 67 67 79 85 62 63 75 85 62 67 79 85 67 74 87 88 70 66 79 80 66 63 83 80 63 7 +64 68 83 74 60 61 75 70 60 57 75 67 75 79 89 75 59 64 77 71 55 64 81 67 82 91 100 78 74 83 92 74 63 67 80 70 5 +71 79 91 70 76 87 96 74 80 91 100 78 71 79 89 71 75 83 89 71 75 87 89 75 66 63 84 66 70 75 88 70 74 79 88 74 7 +84 103 108 88 80 103 108 85 80 99 108 85 79 107 109 92 79 107 109 87 84 107 113 87 86 100 108 81 82 104 112 89 82 104 112 89 3 +84 103 108 85 80 99 104 81 71 83 87 59 79 107 104 87 84 99 104 83 71 83 81 62 82 104 112 89 82 100 104 89 78 96 104 81 3 +80 99 104 81 71 83 87 59 56 57 63 41 84 99 104 83 71 83 81 62 55 61 63 46 82 100 104 89 78 96 104 81 66 79 76 59 3 +71 83 87 59 56 57 63 41 53 51 67 52 71 83 81 62 55 61 63 46 51 54 67 50 78 96 104 81 66 79 76 59 59 56 66 44 5 +56 57 63 41 53 51 67 52 53 54 75 59 55 61 63 46 51 54 67 50 55 58 70 58 66 79 76 59 59 56 66 44 52 53 69 52 5 +53 51 67 52 53 54 75 59 56 57 79 63 51 54 67 50 55 58 70 58 55 54 74 58 59 56 66 44 52 53 69 52 56 56 69 59 5 +56 57 79 63 60 54 75 59 53 54 71 59 55 54 74 58 55 54 74 62 55 58 77 58 56 56 69 59 52 56 73 59 56 56 73 59 5 +53 54 71 59 56 57 75 59 53 57 79 63 55 58 77 58 51 54 74 58 55 54 70 58 56 56 73 59 52 60 73 59 56 56 69 55 5 +56 54 75 63 53 51 75 59 56 51 71 59 55 54 74 58 55 54 70 58 51 54 70 62 52 53 69 59 56 53 76 59 52 53 73 63 5 +56 51 71 59 53 51 75 59 53 51 75 59 51 54 70 62 55 51 77 67 55 54 81 71 52 53 73 63 52 56 73 66 56 56 84 78 5 +53 51 75 59 53 57 75 63 56 68 87 63 55 54 81 71 51 58 81 75 55 68 89 71 56 56 84 78 56 63 88 78 59 71 88 78 5 +60 99 108 88 68 112 118 96 71 107 118 96 71 103 109 87 71 111 113 96 71 111 123 100 63 91 100 78 66 104 108 89 70 113 122 96 1 +64 107 122 96 68 107 122 99 68 116 122 99 71 116 118 100 71 111 123 104 71 111 123 104 66 104 117 96 70 109 122 100 66 113 127 103 1 +68 116 122 99 68 116 128 99 68 116 122 99 71 111 123 104 67 111 123 100 67 111 123 96 66 113 127 103 66 113 122 103 66 109 117 96 1 +68 75 79 63 60 68 67 52 60 61 67 56 67 75 77 62 63 68 70 54 63 64 67 54 66 71 80 70 66 75 80 66 70 75 73 59 7 +60 61 67 56 64 64 71 56 60 68 67 56 63 64 67 54 63 68 70 54 63 64 70 58 70 75 73 59 63 67 66 55 63 67 66 55 7 +64 64 71 56 60 68 67 56 64 68 67 56 63 68 70 54 63 64 70 58 59 64 67 54 63 67 66 55 63 67 66 55 63 67 73 55 7 +75 83 89 71 75 87 93 71 75 87 93 67 78 87 88 70 78 87 92 74 74 87 96 74 79 88 97 72 79 88 93 72 75 91 97 72 4 +75 91 93 71 75 87 96 71 71 83 93 67 78 91 88 70 78 87 88 70 78 96 92 74 79 95 93 72 79 91 90 68 79 88 90 72 4 +67 79 85 62 63 75 85 62 67 79 85 67 74 87 88 70 66 79 80 66 63 83 80 63 79 88 93 72 71 84 86 68 67 81 86 64 7 +55 64 81 67 67 64 85 67 71 79 89 71 63 67 80 70 59 63 73 66 66 63 84 66 75 81 86 75 63 66 79 68 63 57 75 68 5 +67 64 85 67 71 79 89 71 75 83 89 71 59 63 73 66 66 63 84 66 70 75 88 70 63 66 79 68 63 57 75 68 67 73 82 72 5 +75 83 89 71 75 87 89 75 79 91 96 75 70 75 88 70 74 79 88 74 74 87 96 70 67 73 82 72 71 84 86 75 75 81 90 68 7 +75 87 89 75 79 91 96 75 79 95 100 79 74 79 88 74 74 87 96 70 78 91 100 78 71 84 86 75 75 81 90 68 75 81 93 68 7 +79 107 109 87 84 107 113 87 79 107 104 87 82 104 112 89 82 104 112 89 82 104 112 89 79 95 105 83 83 103 110 86 83 99 110 86 3 +84 107 113 87 79 107 104 87 84 99 104 83 82 104 112 89 82 104 112 89 82 100 104 89 83 103 110 86 83 99 110 86 79 95 105 86 3 +84 99 104 83 71 83 81 62 55 61 63 46 82 100 104 89 78 96 104 81 66 79 76 59 79 95 105 86 79 95 105 83 75 84 90 68 3 +51 54 67 50 55 58 70 58 55 54 74 58 59 56 66 44 52 53 69 52 56 56 69 59 63 66 68 49 56 54 65 49 56 54 68 53 5 +55 54 74 58 55 54 70 58 51 54 70 62 52 53 69 59 56 53 76 59 52 53 73 63 59 57 82 68 59 60 86 75 59 60 93 79 5 +51 58 81 75 55 68 89 71 63 87 89 71 56 63 88 78 59 71 88 78 63 87 92 78 75 91 105 86 79 103 110 90 71 103 110 86 5 +71 111 113 96 71 111 123 100 71 107 123 100 66 104 108 89 70 113 122 96 70 113 122 96 67 99 110 86 71 112 119 98 71 108 119 98 1 +71 111 123 100 67 111 123 100 67 107 118 96 70 118 117 100 66 113 122 100 66 109 122 96 67 108 119 98 63 112 114 98 63 108 119 98 1 +67 111 123 100 67 107 118 96 67 107 123 100 66 113 122 100 66 109 122 96 63 113 122 96 63 112 114 98 63 108 119 98 63 112 119 94 1 +71 111 123 104 67 111 123 100 67 111 123 96 66 113 127 103 66 113 122 103 66 109 117 96 67 108 124 98 63 108 124 98 67 108 119 98 1 +67 111 123 96 71 107 118 96 71 107 118 96 66 109 117 96 66 109 122 96 66 104 122 96 67 108 119 98 63 108 119 98 63 108 119 98 1 +71 103 118 96 67 87 100 79 59 83 89 75 70 113 122 103 66 109 122 96 63 96 104 89 63 112 124 98 67 108 119 98 63 99 110 94 1 +67 87 100 79 59 83 89 75 63 83 85 71 66 109 122 96 63 96 104 89 63 83 88 78 67 108 119 98 63 99 110 94 63 88 101 79 1 +63 64 70 58 59 64 67 54 63 68 70 58 63 67 66 55 63 67 73 55 63 67 69 59 59 66 65 60 67 70 75 60 67 66 72 57 7 +63 68 70 58 63 64 70 58 63 61 63 54 63 67 69 59 63 67 69 55 59 63 69 55 67 66 72 57 63 66 68 57 63 63 68 53 7 +78 87 92 74 78 87 88 70 78 87 88 70 75 88 90 72 75 91 97 72 79 88 97 72 74 88 93 73 78 88 97 69 78 92 97 73 4 +78 91 88 70 78 87 88 70 78 96 92 74 79 95 93 72 79 91 90 68 79 88 90 72 82 84 89 73 78 84 89 69 78 88 89 69 4 +78 87 88 70 78 96 92 74 74 87 88 70 79 91 90 68 79 88 90 72 79 88 93 72 78 84 89 69 78 88 89 69 78 88 89 73 4 +74 87 88 70 66 79 80 66 63 83 80 63 79 88 93 72 71 84 86 68 67 81 86 64 78 88 89 73 78 88 93 73 70 79 93 65 4 +66 79 80 63 66 83 84 63 70 83 84 66 67 81 82 64 67 77 86 64 67 81 82 64 67 84 85 62 67 79 82 65 67 84 89 65 7 +82 91 100 78 74 83 92 74 63 67 80 70 79 95 101 79 75 88 97 79 75 81 86 75 78 88 93 76 74 79 89 73 67 75 89 73 4 +74 83 92 74 63 67 80 70 59 63 73 66 75 88 97 79 75 81 86 75 63 66 79 68 74 79 89 73 67 75 89 73 60 67 78 62 4 +63 67 80 70 59 63 73 66 66 63 84 66 75 81 86 75 63 66 79 68 63 57 75 68 67 75 89 73 60 67 78 62 53 49 78 58 5 +74 87 96 70 78 91 100 78 86 91 96 81 75 81 90 68 75 81 93 68 75 84 90 72 70 79 85 73 70 79 85 65 70 79 85 69 7 +78 91 100 78 86 91 96 81 86 100 108 81 75 81 93 68 75 84 90 72 75 84 90 75 70 79 85 65 70 79 85 69 70 84 89 69 7 +82 104 112 89 82 104 112 89 82 104 112 89 79 95 105 83 83 103 110 86 83 99 110 86 78 92 97 80 82 106 114 87 85 111 114 90 3 +82 104 112 89 82 104 112 89 82 100 104 89 83 103 110 86 83 99 110 86 79 95 105 86 82 106 114 87 85 111 114 90 85 106 114 94 3 +82 100 104 89 78 96 104 81 66 79 76 59 79 95 105 86 79 95 105 83 75 84 90 68 85 106 114 94 82 102 114 90 74 92 97 80 3 +59 56 66 44 52 53 69 52 56 56 69 59 63 66 68 49 56 54 65 49 56 54 68 53 70 79 82 65 60 63 74 55 57 60 70 55 5 +52 56 73 59 56 56 73 59 52 60 73 59 56 57 72 57 56 57 72 57 56 57 75 57 60 63 78 62 60 71 85 69 60 63 82 69 5 +56 56 73 59 52 60 73 59 56 56 69 55 56 57 72 57 56 57 75 57 56 54 72 57 60 71 85 69 60 63 82 69 60 56 78 69 5 +52 56 73 66 56 56 84 78 56 63 88 78 63 70 97 83 67 77 97 83 75 91 105 86 89 106 114 94 93 115 124 97 93 120 124 104 5 +59 71 88 78 63 87 92 78 63 87 96 74 79 103 110 90 71 103 110 86 67 99 101 83 82 120 124 101 70 111 119 94 67 106 114 90 1 +63 91 100 78 66 104 108 89 70 113 122 96 75 99 101 79 67 99 110 86 71 112 119 98 63 92 105 80 63 88 105 83 67 97 110 87 1 +63 104 117 96 63 109 112 92 66 104 117 96 63 103 119 94 67 103 119 94 63 103 114 94 63 102 114 94 67 106 114 97 63 102 114 90 1 +63 109 112 92 66 104 117 96 70 109 122 100 67 103 119 94 63 103 114 94 67 108 119 98 67 106 114 97 63 102 114 90 63 106 119 94 1 +66 104 117 96 70 109 122 100 66 113 127 103 63 103 114 94 67 108 119 98 67 108 124 98 63 102 114 90 63 106 119 94 63 106 119 97 1 +70 109 122 100 66 113 127 103 66 113 122 103 67 108 119 98 67 108 124 98 63 108 124 98 63 106 119 94 63 106 119 97 63 111 124 97 1 +63 67 66 55 63 67 66 55 63 67 73 55 67 70 72 57 59 66 65 60 67 70 75 60 67 75 82 69 60 71 74 58 63 71 74 58 7 +63 67 73 55 63 67 69 59 63 67 69 55 67 70 75 60 67 66 72 57 63 66 68 57 63 71 74 58 67 71 74 62 63 71 74 58 7 +79 88 93 68 79 95 93 72 79 91 90 68 74 84 97 69 82 84 89 73 78 84 89 69 76 85 86 68 76 85 90 68 76 89 86 68 4 +79 95 93 72 79 91 90 68 79 88 90 72 82 84 89 73 78 84 89 69 78 88 89 69 76 85 90 68 76 89 86 68 80 85 86 68 4 +79 91 90 68 79 88 90 72 79 88 93 72 78 84 89 69 78 88 89 69 78 88 89 73 76 89 86 68 80 85 86 68 76 85 90 68 4 +67 81 86 64 67 81 86 64 67 81 82 64 70 79 93 65 70 79 85 62 67 84 85 62 76 85 94 68 68 77 82 65 68 77 86 65 7 +67 81 82 64 67 77 86 64 67 81 82 64 67 84 85 62 67 79 82 65 67 84 89 65 68 77 86 65 72 81 86 68 72 81 86 65 7 +67 77 86 64 67 81 82 64 67 84 82 68 67 79 82 65 67 84 89 65 67 75 82 62 72 81 86 68 72 81 86 65 68 77 82 65 7 +75 88 97 79 75 81 86 75 63 66 79 68 74 79 89 73 67 75 89 73 60 67 78 62 72 81 90 76 68 77 86 68 60 62 74 57 7 +67 73 82 72 71 84 86 75 75 81 90 68 60 60 78 65 67 75 85 73 70 79 85 73 64 69 86 72 76 85 94 76 72 89 94 72 7 +83 103 110 86 83 99 110 86 79 95 105 86 82 106 114 87 85 111 114 90 85 106 114 94 80 102 111 87 80 106 115 94 84 111 115 94 3 +75 84 90 68 63 66 68 49 56 54 65 49 74 92 97 80 70 79 82 65 60 63 74 55 84 102 111 87 80 94 102 83 76 89 90 68 5 +56 54 65 49 56 54 68 53 56 57 72 57 60 63 74 55 57 60 70 55 60 63 78 62 76 89 90 68 64 73 71 54 60 66 74 61 5 +56 54 68 53 56 57 72 57 56 57 72 57 57 60 70 55 60 63 78 62 60 71 85 69 64 73 71 54 60 66 74 61 60 69 86 76 5 +56 57 72 57 56 57 72 57 56 57 75 57 60 63 78 62 60 71 85 69 60 63 82 69 60 66 74 61 60 69 86 76 60 66 98 83 5 +56 57 75 57 56 54 72 57 59 54 79 60 60 63 82 69 60 56 78 69 60 60 93 80 60 66 98 83 64 69 98 87 72 81 102 87 5 +59 60 86 75 59 60 93 79 63 70 97 83 70 84 101 87 82 92 105 90 89 106 114 94 92 115 120 102 97 115 125 102 92 106 115 91 3 +63 70 97 83 67 77 97 83 75 91 105 86 89 106 114 94 93 115 124 97 93 120 124 104 92 106 115 91 80 106 106 91 80 111 120 98 3 +79 103 110 90 71 103 110 86 67 99 101 83 82 120 124 101 70 111 119 94 67 106 114 90 76 111 115 94 68 106 115 91 68 102 115 91 1 +71 108 119 98 67 108 119 98 63 112 114 98 67 111 114 94 67 106 119 97 67 106 114 94 64 98 106 91 64 106 115 94 64 106 115 94 1 +63 103 114 94 63 103 119 90 63 103 119 94 63 102 119 94 63 102 119 94 63 102 114 94 64 102 115 94 64 106 120 94 68 106 115 94 1 +63 103 119 90 63 103 119 94 67 103 119 94 63 102 119 94 63 102 114 94 67 106 114 97 64 106 120 94 68 106 115 94 64 102 115 94 1 +63 103 114 94 67 108 119 98 67 108 124 98 63 102 114 90 63 106 119 94 63 106 119 97 64 102 115 94 64 106 120 94 64 111 125 102 1 +67 108 119 98 67 108 124 98 63 108 124 98 63 106 119 94 63 106 119 97 63 111 124 97 64 106 120 94 64 111 125 102 68 111 125 102 1 +67 108 124 98 63 108 124 98 67 108 119 98 63 106 119 97 63 111 124 97 63 111 119 101 64 111 125 102 68 111 125 102 68 106 120 98 1 +74 88 93 73 78 88 97 69 78 92 97 73 72 89 94 72 76 89 94 72 80 94 94 72 80 87 96 70 80 91 96 70 80 91 96 74 4 +78 92 97 73 78 92 93 73 82 88 97 69 80 94 94 72 80 94 94 76 80 94 94 72 80 91 96 74 76 95 91 74 80 91 96 70 4 +82 88 97 69 74 88 93 73 74 84 97 69 80 94 94 72 80 89 94 72 76 85 86 68 80 91 96 70 76 91 91 70 71 87 91 70 4 +74 88 93 73 74 84 97 69 82 84 89 73 80 89 94 72 76 85 86 68 76 85 90 68 76 91 91 70 71 87 91 70 71 87 87 70 4 +74 84 97 69 82 84 89 73 78 84 89 69 76 85 86 68 76 85 90 68 76 89 86 68 71 87 91 70 71 87 87 70 76 87 91 70 4 +78 84 89 69 78 88 89 69 78 88 89 73 76 89 86 68 80 85 86 68 76 85 90 68 76 87 91 70 76 87 87 70 76 87 91 63 4 +78 88 89 73 78 88 93 73 70 79 93 65 76 85 90 68 80 89 94 72 76 85 94 68 76 87 91 63 80 91 91 67 76 87 91 70 4 +70 79 93 65 70 79 85 62 67 84 85 62 76 85 94 68 68 77 82 65 68 77 86 65 76 87 91 70 71 83 87 67 68 83 83 63 7 +70 79 85 62 67 84 85 62 67 79 82 65 68 77 82 65 68 77 86 65 72 81 86 68 71 83 87 67 68 83 83 63 68 79 87 63 7 +67 84 89 65 67 75 82 62 70 84 85 69 72 81 86 65 68 77 82 65 64 73 78 57 68 79 83 63 68 79 83 67 68 75 83 59 7 +67 75 82 62 70 84 85 69 78 88 93 76 68 77 82 65 64 73 78 57 68 81 78 68 68 79 83 67 68 75 83 59 64 71 79 63 7 +78 88 93 76 74 79 89 73 67 75 89 73 68 81 78 68 72 81 90 76 68 77 86 68 64 71 79 63 71 79 87 70 71 75 87 70 4 +74 79 89 73 67 75 89 73 60 67 78 62 72 81 90 76 68 77 86 68 60 62 74 57 71 79 87 70 71 75 87 70 64 61 75 52 7 +67 75 89 73 60 67 78 62 53 49 78 58 68 77 86 68 60 62 74 57 53 49 74 57 71 75 87 70 64 61 75 52 60 54 75 59 5 +60 67 78 62 53 49 78 58 60 60 78 65 60 62 74 57 53 49 74 57 64 69 86 72 64 61 75 52 60 54 75 59 71 79 91 78 5 +82 106 114 87 85 111 114 90 85 106 114 94 80 102 111 87 80 106 115 94 84 111 115 94 80 103 113 88 84 103 113 88 84 103 113 92 3 +70 79 82 65 60 63 74 55 57 60 70 55 80 94 102 83 76 89 90 68 64 73 71 54 88 107 118 92 84 103 108 88 71 75 83 59 3 +60 63 74 55 57 60 70 55 60 63 78 62 76 89 90 68 64 73 71 54 60 66 74 61 84 103 108 88 71 75 83 59 60 68 71 59 5 +57 60 70 55 60 63 78 62 60 71 85 69 64 73 71 54 60 66 74 61 60 69 86 76 71 75 83 59 60 68 71 59 64 75 91 78 5 +60 63 78 62 60 71 85 69 60 63 82 69 60 66 74 61 60 69 86 76 60 66 98 83 60 68 71 59 64 75 91 78 71 87 100 81 5 +63 92 105 80 63 88 105 83 67 97 110 87 64 89 102 79 60 85 94 79 64 89 98 83 64 99 104 85 56 91 104 81 60 95 113 88 1 +63 102 114 90 63 102 119 94 63 102 119 94 64 102 115 94 64 102 115 94 64 106 120 94 64 107 113 96 64 107 122 92 64 107 113 92 1 +63 102 119 94 63 102 114 94 67 106 114 97 64 106 120 94 68 106 115 94 64 102 115 94 64 107 113 92 64 103 113 92 64 103 118 96 1 +63 106 119 94 63 106 119 97 63 111 124 97 64 106 120 94 64 111 125 102 68 111 125 102 64 107 118 96 68 112 122 96 68 112 122 99 1 +67 111 114 101 67 106 114 90 63 97 97 83 72 111 120 98 72 111 111 98 68 102 106 87 71 112 128 99 71 112 122 96 76 112 118 96 1 +67 106 114 90 63 97 97 83 60 84 89 73 72 111 111 98 68 102 106 87 68 89 102 79 71 112 122 96 76 112 118 96 68 99 113 85 1 +63 71 74 58 67 71 74 62 63 71 74 58 64 73 71 57 64 77 74 61 64 73 74 61 68 75 83 63 64 71 75 63 71 75 79 63 7 +76 89 94 72 80 94 94 72 80 94 94 76 80 91 96 70 80 91 96 74 76 95 91 74 79 91 96 75 79 87 93 71 75 91 96 75 4 +80 94 94 72 80 89 94 72 76 85 86 68 80 91 96 70 76 91 91 70 71 87 91 70 79 87 96 71 75 87 93 71 75 87 89 67 4 +80 89 94 72 76 85 86 68 76 85 90 68 76 91 91 70 71 87 91 70 71 87 87 70 75 87 93 71 75 87 89 67 71 87 89 67 4 +76 85 90 68 80 89 94 72 76 85 94 68 76 87 91 63 80 91 91 67 76 87 91 70 75 87 89 67 75 87 85 67 75 87 89 67 4 +76 85 94 68 68 77 82 65 68 77 86 65 76 87 91 70 71 83 87 67 68 83 83 63 75 87 89 67 71 87 89 67 67 79 85 67 4 +68 77 82 65 68 77 86 65 72 81 86 68 71 83 87 67 68 83 83 63 68 79 87 63 71 87 89 67 67 79 85 67 67 79 81 62 7 +68 77 86 65 72 81 86 68 72 81 86 65 68 83 83 63 68 79 87 63 68 79 83 63 67 79 85 67 67 79 81 62 67 79 81 67 7 +68 77 86 68 60 62 74 57 53 49 74 57 71 75 87 70 64 61 75 52 60 54 75 59 71 75 81 71 63 61 74 54 59 54 77 54 5 +53 49 74 57 64 69 86 72 76 85 94 76 60 54 75 59 71 79 91 78 80 99 104 78 59 54 77 54 71 79 93 75 84 99 109 83 5 +76 85 94 76 72 89 94 72 76 85 86 68 80 99 104 78 84 95 100 78 76 87 91 70 84 99 109 83 79 91 104 75 75 87 89 75 7 +72 89 94 72 76 85 86 68 72 85 86 72 84 95 100 78 76 87 91 70 76 91 96 74 79 91 104 75 75 87 89 75 79 91 96 75 7 +72 94 98 76 80 98 106 83 80 102 111 87 76 99 104 85 80 103 113 88 80 103 113 88 84 103 109 83 88 107 113 92 88 107 113 92 3 +80 98 106 83 80 102 111 87 80 106 115 94 80 103 113 88 80 103 113 88 84 103 113 88 88 107 113 92 88 107 113 92 88 107 113 92 3 +80 102 111 87 80 106 115 94 84 111 115 94 80 103 113 88 84 103 113 88 84 103 113 92 88 107 113 92 88 107 113 92 88 107 118 96 3 +80 106 115 94 84 111 115 94 84 106 115 91 84 103 113 88 84 103 113 92 88 103 113 96 88 107 113 92 88 107 118 96 88 107 113 92 3 +84 111 115 94 84 106 115 91 84 102 111 87 84 103 113 92 88 103 113 96 88 107 113 92 88 107 118 96 88 107 113 92 88 107 118 92 3 +60 69 86 76 60 66 98 83 64 69 98 87 64 75 91 78 71 87 100 81 80 99 108 88 75 99 109 83 75 107 113 92 75 103 113 96 5 +80 94 111 91 84 106 111 91 92 115 120 102 84 112 118 96 92 116 128 103 97 121 128 103 75 99 113 92 75 107 113 92 79 111 123 100 3 +76 111 115 94 68 106 115 91 68 102 115 91 71 95 108 88 71 103 113 92 68 107 118 92 71 103 118 92 71 107 118 96 71 107 118 96 1 +68 102 115 91 64 89 102 79 60 85 94 79 68 107 118 92 64 99 104 85 56 91 104 81 71 107 118 96 63 107 113 92 63 99 113 87 1 +64 89 98 83 64 98 106 91 64 106 115 94 60 95 113 88 64 95 104 88 64 103 113 92 63 103 113 92 63 103 113 92 63 103 113 87 1 +68 106 115 94 64 102 115 94 64 102 115 94 64 103 113 92 64 103 118 96 64 103 118 99 67 99 109 92 67 99 118 92 71 111 118 96 1 +64 102 115 94 64 106 120 94 64 111 125 102 64 103 118 99 64 107 118 96 68 112 122 96 71 111 118 96 67 107 118 96 63 107 123 100 1 +68 111 125 102 68 106 120 98 64 111 125 98 68 112 122 99 64 103 118 96 64 107 122 99 63 107 118 100 67 111 118 100 67 111 123 100 1 +64 111 125 98 64 102 115 98 64 111 120 98 64 107 122 99 64 107 118 96 64 107 118 99 67 111 123 100 67 111 118 96 67 107 118 96 1 +68 111 120 98 68 111 131 102 72 111 120 98 68 112 122 99 68 107 128 96 71 112 128 99 67 111 113 100 67 111 118 96 71 111 118 96 1 +68 102 106 87 68 89 102 79 64 85 90 72 76 112 118 96 68 99 113 85 68 91 96 78 75 111 118 100 71 107 118 96 67 99 109 83 1 +64 77 74 61 64 73 74 61 64 66 71 57 64 71 75 63 71 75 79 63 68 71 71 56 71 79 77 62 67 75 77 62 67 72 77 58 7 +80 91 96 74 76 95 91 74 80 91 96 70 79 87 93 71 75 91 96 75 79 87 96 71 78 87 96 70 74 87 92 70 74 91 92 70 4 +76 95 91 74 80 91 96 70 76 91 91 70 75 91 96 75 79 87 96 71 75 87 93 71 74 87 92 70 74 91 92 70 74 87 92 66 4 +80 91 96 70 76 91 91 70 71 87 91 70 79 87 96 71 75 87 93 71 75 87 89 67 74 91 92 70 74 87 92 66 74 87 92 66 4 +76 87 91 63 80 91 91 67 76 87 91 70 75 87 89 67 75 87 85 67 75 87 89 67 74 83 88 66 66 79 80 63 66 79 76 59 4 +60 54 75 59 71 79 91 78 80 99 104 78 59 54 77 54 71 79 93 75 84 99 109 83 63 56 76 55 63 60 80 59 78 83 100 78 5 +88 107 113 92 88 107 118 92 84 103 108 88 88 107 118 92 88 111 118 100 88 116 123 100 86 104 112 89 86 104 112 92 86 113 122 100 3 +71 75 83 59 60 68 71 59 64 75 91 78 84 99 104 79 71 91 93 71 75 99 109 83 86 118 122 100 82 109 112 92 78 109 112 92 3 +80 99 108 88 84 107 118 96 84 112 118 96 75 103 113 96 75 99 109 96 75 99 113 92 70 100 112 92 66 96 108 92 63 87 100 81 1 +84 107 118 96 84 112 118 96 92 116 128 103 75 99 109 96 75 99 113 92 75 107 113 92 66 96 108 92 63 87 100 81 63 87 104 81 1 +84 112 118 96 92 116 128 103 97 121 128 103 75 99 113 92 75 107 113 92 79 111 123 100 63 87 100 81 63 87 104 81 63 96 104 89 1 +88 116 122 96 92 103 108 81 80 87 96 81 79 107 118 92 75 107 113 92 71 103 113 96 66 100 108 92 63 100 117 96 66 104 117 96 1 +64 99 104 85 56 91 104 81 60 95 113 88 63 107 113 92 63 99 113 87 63 103 113 92 63 104 117 92 63 100 112 92 63 104 112 92 1 +64 107 113 92 64 103 113 92 64 103 118 96 67 103 118 96 67 99 109 92 67 99 118 92 66 104 108 96 66 104 117 92 66 100 108 89 1 +64 107 118 96 68 112 122 96 68 112 122 99 67 107 118 96 63 107 123 100 63 107 118 100 63 109 122 96 63 100 117 96 66 109 122 100 1 +68 112 122 99 64 103 118 96 64 107 122 99 63 107 118 100 67 111 118 100 67 111 123 100 66 109 122 100 66 109 122 100 66 109 117 96 1 +64 103 118 96 64 107 122 99 64 107 118 96 67 111 118 100 67 111 123 100 67 111 118 96 66 109 122 100 66 109 117 96 66 113 117 96 1 +68 107 122 96 68 112 122 99 68 107 128 96 71 107 118 96 67 111 113 100 67 111 118 96 66 113 117 96 70 109 122 100 66 109 122 96 1 +68 112 122 99 68 107 128 96 71 112 128 99 67 111 113 100 67 111 118 96 71 111 118 96 70 109 122 100 66 109 122 96 70 113 127 96 1 +68 107 128 96 71 112 128 99 71 112 122 96 67 111 118 96 71 111 118 96 71 111 118 100 66 109 122 96 70 113 127 96 70 113 117 96 1 +71 112 128 99 71 112 122 96 76 112 118 96 71 111 118 96 71 111 118 100 75 111 118 100 70 113 127 96 70 113 117 96 74 113 117 96 1 +79 91 96 71 79 91 96 75 79 87 93 71 74 87 92 70 78 87 96 70 78 87 96 70 71 88 93 68 75 84 93 68 75 84 90 68 4 +79 91 96 75 79 87 93 71 75 91 96 75 78 87 96 70 78 87 96 70 74 87 92 70 75 84 93 68 75 84 90 68 75 84 90 68 4 +75 87 93 71 75 87 89 67 71 87 89 67 74 87 92 66 74 87 92 66 74 83 88 66 71 81 82 64 67 73 82 60 67 73 79 57 4 +75 87 89 67 71 87 89 67 75 83 89 67 74 87 92 66 74 83 88 66 70 83 84 70 67 73 82 60 67 73 79 57 63 73 72 57 4 +75 83 89 67 75 87 89 67 75 87 89 67 70 83 84 70 74 83 84 66 74 83 88 66 63 73 72 57 67 73 79 60 71 81 86 64 4 +71 87 89 67 67 79 85 67 67 79 81 62 70 79 88 63 74 87 88 70 70 83 84 66 63 73 75 57 67 84 79 68 71 91 90 72 7 +67 68 74 54 67 72 77 62 71 75 81 71 70 75 76 59 66 71 73 55 63 75 80 59 71 77 86 64 71 77 86 64 71 81 86 68 7 +67 72 77 62 71 75 81 71 63 61 74 54 66 71 73 55 63 75 80 59 70 75 84 66 71 77 86 64 71 81 86 68 75 81 86 68 7 +75 87 89 75 79 91 96 75 84 103 109 83 82 91 96 78 78 91 96 78 82 104 112 85 87 95 97 79 83 99 105 86 87 112 114 94 7 +88 107 113 92 88 107 113 92 88 107 118 96 90 113 127 96 90 109 117 96 95 109 117 96 92 117 130 101 96 112 124 98 92 108 114 94 3 +88 107 118 92 88 111 118 100 88 116 123 100 86 104 112 89 86 104 112 92 86 113 122 100 83 103 114 90 83 112 124 94 87 112 119 98 3 +88 111 118 100 88 116 123 100 84 99 104 79 86 104 112 92 86 113 122 100 86 118 122 100 83 112 124 94 87 112 119 98 79 103 114 90 3 +75 107 113 92 75 103 113 96 75 99 109 96 74 100 112 92 70 100 112 92 66 96 108 92 63 95 110 90 63 91 105 90 59 91 105 86 1 +75 99 109 96 75 99 113 92 75 107 113 92 66 96 108 92 63 87 100 81 63 87 104 81 59 91 105 86 59 91 101 86 59 95 110 90 1 +71 103 113 96 71 107 113 92 71 103 118 92 66 104 117 96 66 104 112 92 66 109 117 92 59 103 119 94 63 103 114 94 63 103 110 90 1 +71 107 113 92 71 103 118 92 71 107 118 96 66 104 112 92 66 109 117 92 70 104 117 92 63 103 114 94 63 103 110 90 59 99 110 90 1 +71 107 118 96 63 107 113 92 63 99 113 87 66 104 122 92 63 104 117 92 63 100 112 92 59 95 110 90 59 91 105 86 59 88 110 86 1 +63 99 113 87 63 103 113 92 63 103 113 92 63 100 112 92 63 104 112 92 63 104 112 92 59 88 110 86 59 88 110 90 59 99 114 90 1 +67 103 118 96 67 99 109 92 67 99 118 92 66 104 108 96 66 104 117 92 66 100 108 89 63 91 101 79 63 95 105 83 67 95 101 83 1 +67 99 118 92 71 111 118 96 67 107 118 96 66 100 108 89 63 100 112 92 63 109 122 96 67 95 101 83 67 95 101 86 67 99 114 86 1 +67 111 118 96 67 107 118 96 71 107 118 96 66 113 117 96 66 113 122 96 66 113 117 96 67 103 114 94 63 108 119 94 63 112 114 94 1 +71 111 118 96 71 111 118 100 75 111 118 100 70 113 127 96 70 113 117 96 74 113 117 96 67 112 119 98 67 108 119 98 71 108 114 98 1 +71 107 118 96 67 99 109 83 63 87 89 75 74 113 122 100 70 109 112 96 66 100 108 85 71 112 119 98 67 112 119 98 67 103 110 90 1 +67 99 109 83 63 87 89 75 63 79 89 71 70 109 112 96 66 100 108 85 63 87 88 74 67 112 119 98 67 103 110 90 63 88 97 79 1 +78 87 96 70 78 87 96 70 74 87 92 70 75 84 93 68 75 84 90 68 75 84 90 68 74 84 85 65 70 79 82 62 67 75 78 58 4 +74 91 92 70 74 87 92 66 74 87 92 66 75 84 82 68 71 81 82 64 67 73 82 60 63 75 78 55 63 71 74 55 63 67 82 58 4 +74 87 92 66 74 87 92 66 74 83 88 66 71 81 82 64 67 73 82 60 67 73 79 57 63 71 74 55 63 67 82 58 63 71 74 58 7 +70 83 84 70 74 83 84 66 74 83 88 66 63 73 72 57 67 73 79 60 71 81 86 64 63 71 74 58 63 71 74 58 67 75 78 58 7 +70 79 88 63 74 87 88 70 70 83 84 66 63 73 75 57 67 84 79 68 71 91 90 72 60 71 70 58 63 75 74 62 67 84 85 69 7 +70 83 84 66 66 75 80 63 70 79 76 63 71 91 90 72 67 84 90 64 67 81 82 64 67 84 85 69 70 88 93 73 74 88 89 73 7 +66 75 80 63 70 79 76 63 70 79 84 66 67 84 90 64 67 81 82 64 67 81 82 64 70 88 93 73 74 88 89 73 78 92 97 80 7 +70 79 76 63 70 79 84 66 70 75 76 59 67 81 82 64 67 81 82 64 71 77 86 64 74 88 89 73 78 92 97 80 82 97 97 80 7 +70 75 84 66 63 56 76 55 63 60 80 59 75 81 86 68 63 63 79 57 63 70 86 72 78 92 97 76 67 71 78 62 74 79 89 73 5 +63 60 80 59 78 83 100 78 82 96 104 85 63 70 86 72 79 91 101 83 83 91 101 83 74 79 89 73 78 92 97 87 78 97 101 83 7 +90 109 117 96 95 109 117 96 90 109 117 92 96 112 124 98 92 108 114 94 87 99 105 90 93 115 124 101 89 106 114 94 85 106 114 94 3 +90 109 117 92 86 104 112 89 86 104 112 92 87 99 105 90 83 103 114 90 83 112 124 94 85 106 114 94 78 115 114 97 78 111 119 94 3 +82 109 112 92 78 109 112 92 74 100 112 92 71 95 110 90 67 99 114 94 63 95 110 90 63 97 105 87 60 97 110 90 60 102 114 90 1 +74 100 112 92 70 100 112 92 66 96 108 92 63 95 110 90 63 91 105 90 59 91 105 86 60 102 114 90 57 92 110 87 57 88 101 87 1 +63 96 104 89 66 100 108 92 63 100 117 96 59 99 114 90 59 99 114 90 59 95 119 90 60 88 110 83 57 92 110 87 57 97 110 87 1 +63 104 112 92 59 104 112 92 59 100 104 81 59 99 114 90 63 99 114 90 63 99 110 86 60 97 119 94 63 111 119 97 63 106 119 90 1 +59 104 112 92 59 100 104 81 59 96 104 81 63 99 114 90 63 99 110 86 59 95 105 86 63 111 119 97 63 106 119 90 60 97 114 94 1 +59 100 104 81 59 96 104 81 63 91 108 89 63 99 110 86 59 95 105 86 63 99 101 86 63 106 119 90 60 97 114 94 63 102 114 87 1 +63 100 104 89 66 100 112 92 66 104 108 96 67 95 101 83 67 95 105 79 63 91 101 79 67 97 105 80 63 88 97 73 63 84 97 73 1 +66 100 108 89 63 100 112 92 63 109 122 96 67 95 101 83 67 95 101 86 67 99 114 86 67 84 89 76 63 79 85 73 67 84 93 76 1 +63 100 117 96 66 109 122 100 66 109 122 100 67 103 110 94 67 108 119 98 67 108 119 94 67 92 101 76 63 102 114 90 67 102 114 94 1 +66 109 122 100 66 109 122 100 66 109 117 96 67 108 119 98 67 108 119 94 63 103 119 94 63 102 114 90 67 102 114 94 67 102 114 90 1 +66 109 117 96 66 113 117 96 66 113 122 96 63 103 119 94 67 103 114 94 63 108 119 94 67 102 114 90 63 102 119 94 63 111 119 97 1 +66 109 122 96 70 113 127 96 70 113 117 96 67 108 119 98 67 112 119 98 67 108 119 98 67 111 119 97 67 111 124 94 67 115 124 97 1 +71 88 93 68 75 84 93 68 75 84 90 68 74 84 85 65 74 84 85 65 70 79 82 62 64 73 74 57 64 73 74 57 64 69 71 57 4 +75 84 93 68 75 84 90 68 75 84 90 68 74 84 85 65 70 79 82 62 67 75 78 58 64 73 74 57 64 69 71 57 64 69 74 57 7 +71 81 86 64 71 81 82 64 67 73 75 57 67 75 78 58 70 79 82 65 67 75 78 65 64 73 78 61 68 77 90 68 72 77 86 65 7 +67 84 79 68 71 91 90 72 67 84 90 64 63 75 74 62 67 84 85 69 70 88 93 73 64 69 74 61 72 81 86 68 80 98 106 83 7 +71 81 86 68 75 81 86 68 63 63 79 57 78 92 101 80 78 92 97 76 67 71 78 62 80 94 102 79 76 85 90 68 68 77 90 68 7 +75 81 86 68 63 63 79 57 63 70 86 72 78 92 97 76 67 71 78 62 74 79 89 73 76 85 90 68 68 77 90 68 76 85 98 79 5 +63 70 86 72 79 91 101 83 83 91 101 83 74 79 89 73 78 92 97 87 78 97 101 83 76 85 98 79 76 85 98 79 80 94 102 83 7 +79 91 101 83 83 91 101 83 87 95 97 79 78 92 97 87 78 97 101 83 82 102 105 87 76 85 98 79 80 94 102 83 88 106 106 87 7 +87 112 119 98 79 103 114 90 71 95 110 90 70 106 114 90 67 102 114 94 63 97 105 87 64 98 111 91 60 98 111 87 57 85 98 83 1 +67 99 114 94 63 95 110 90 63 91 105 90 60 97 110 90 60 102 114 90 57 92 110 87 53 85 102 83 57 89 106 83 57 81 94 79 1 +63 91 105 90 59 91 105 86 59 91 101 86 57 92 110 87 57 88 101 87 57 88 101 83 57 81 94 79 57 81 90 76 57 81 90 76 1 +59 91 105 86 59 91 101 86 59 95 110 90 57 88 101 87 57 88 101 83 57 88 105 83 57 81 90 76 57 81 90 76 53 85 94 76 1 +59 91 101 86 59 95 110 90 59 99 114 90 57 88 101 83 57 88 105 83 60 88 110 83 57 81 90 76 53 85 94 76 57 85 98 83 1 +59 99 114 90 59 95 119 90 59 103 119 94 57 92 110 87 57 97 110 87 63 97 110 87 60 94 106 87 60 94 111 87 57 94 102 87 1 +63 103 110 90 59 99 110 90 59 95 110 90 57 92 114 87 57 92 105 83 57 88 105 83 53 89 106 87 53 89 106 83 53 81 102 83 1 +59 99 110 90 59 95 110 90 59 91 105 86 57 92 105 83 57 88 105 83 57 92 105 83 53 89 106 83 53 81 102 83 53 85 94 83 1 +59 99 114 90 63 99 114 90 63 99 110 86 60 97 119 94 63 111 119 97 63 106 119 90 57 98 106 91 60 106 115 98 64 106 120 98 1 +63 99 101 86 67 95 101 83 67 95 105 79 63 102 114 87 67 97 105 80 63 88 97 73 64 106 120 94 64 106 115 94 68 102 115 87 1 +67 103 110 94 67 108 119 98 67 108 119 94 67 92 101 76 63 102 114 90 67 102 114 94 64 85 98 76 64 89 106 83 64 102 115 91 1 +63 108 119 94 63 112 114 94 67 108 119 101 63 111 119 97 63 106 114 97 67 111 124 94 64 111 120 94 64 111 125 98 72 111 120 98 1 +67 108 119 98 67 112 119 98 67 108 119 98 67 111 119 97 67 111 124 94 67 115 124 97 72 111 120 98 72 111 125 98 68 111 115 94 1 +67 112 119 98 67 108 119 98 71 108 114 98 67 111 124 94 67 115 124 97 67 115 119 97 72 111 125 98 68 111 115 94 68 111 115 94 1 +71 108 114 98 71 112 119 98 67 112 119 98 67 115 119 97 70 111 119 97 67 111 119 94 68 111 115 94 68 111 120 98 68 111 120 94 1 +71 112 119 98 67 112 119 98 67 103 110 90 70 111 119 97 67 111 119 94 67 106 114 97 68 111 120 98 68 111 120 94 68 111 120 94 1 +67 103 110 90 63 88 97 79 63 84 93 75 67 106 114 97 67 102 105 87 63 88 89 76 68 111 120 94 68 111 115 94 68 94 98 79 1 +70 79 82 62 67 75 78 58 63 75 78 55 64 69 71 57 64 69 74 57 64 69 74 57 68 71 71 59 68 75 71 56 68 71 75 56 7 +70 79 82 65 67 75 78 65 60 71 70 58 68 77 90 68 72 77 86 65 68 73 78 61 64 79 79 63 68 83 83 70 68 83 87 67 7 +70 88 93 73 74 88 89 73 78 92 97 80 80 98 106 83 88 106 111 87 88 106 111 87 76 87 91 78 88 103 113 85 88 103 113 88 7 +74 88 89 73 78 92 97 80 82 97 97 80 88 106 111 87 88 106 111 87 88 102 111 87 88 103 113 85 88 103 113 88 84 99 108 85 7 +82 97 97 80 82 92 93 83 78 92 101 80 88 102 111 87 80 98 102 83 80 94 102 79 84 99 108 85 84 95 104 81 76 87 96 70 7 +78 92 97 76 67 71 78 62 74 79 89 73 76 85 90 68 68 77 90 68 76 85 98 79 76 83 87 70 76 87 96 78 76 83 96 78 7 +67 71 78 62 74 79 89 73 78 92 97 87 68 77 90 68 76 85 98 79 76 85 98 79 76 87 96 78 76 83 96 78 76 83 91 78 7 +78 115 114 97 78 111 119 94 70 106 114 90 72 102 106 91 64 98 102 91 64 98 111 91 64 99 108 92 64 103 118 96 60 103 108 88 1 +70 106 114 90 67 102 114 94 63 97 105 87 64 98 111 91 60 98 111 87 57 85 98 83 60 103 108 88 53 83 100 85 53 83 104 81 1 +57 88 101 83 57 88 105 83 60 88 110 83 57 81 90 76 53 85 94 76 57 85 98 83 56 79 91 78 53 79 96 78 53 83 96 81 1 +60 97 114 94 63 102 114 87 67 97 105 80 64 102 115 94 64 106 120 94 64 106 115 94 64 112 118 96 64 107 113 96 71 107 118 96 1 +67 92 101 76 63 102 114 90 67 102 114 94 64 85 98 76 64 89 106 83 64 102 115 91 68 87 96 78 68 87 100 78 64 95 104 81 1 +63 102 114 90 67 102 114 94 67 102 114 90 64 89 106 83 64 102 115 91 68 106 115 94 68 87 100 78 64 95 104 81 64 103 113 88 1 +67 102 114 90 63 102 119 94 63 111 119 97 68 106 115 94 68 111 120 98 64 111 120 94 64 103 113 88 64 107 118 96 68 107 118 96 1 +63 111 119 97 63 106 114 97 67 111 124 94 64 111 120 94 64 111 125 98 72 111 120 98 68 107 118 96 64 112 122 96 64 112 122 99 1 +63 106 114 97 67 111 124 94 67 111 119 97 64 111 125 98 72 111 120 98 72 111 120 98 64 112 122 96 64 112 122 99 68 107 122 96 1 +67 115 124 97 67 115 119 97 70 111 119 97 68 111 115 94 68 111 115 94 68 111 120 98 76 112 122 99 71 112 122 96 71 112 122 96 1 +67 106 114 97 67 102 105 87 63 88 89 76 68 111 120 94 68 111 115 94 68 94 98 79 68 112 122 99 68 112 118 96 68 95 113 88 1 +64 69 71 57 64 69 74 57 64 69 74 57 68 71 71 59 68 75 71 56 68 71 75 56 67 72 74 58 67 72 74 54 63 75 74 58 7 +64 69 74 57 64 73 74 61 64 73 71 57 68 71 75 56 68 71 75 56 64 75 75 56 63 75 74 58 63 72 74 54 63 68 70 58 7 +64 73 74 61 64 73 71 57 68 69 74 57 68 71 75 56 64 75 75 56 68 71 75 56 63 72 74 54 63 68 70 58 63 72 70 58 7 +64 73 78 57 64 73 78 61 68 77 90 68 64 75 79 56 64 71 75 63 64 79 79 63 67 72 70 58 63 72 74 58 63 72 77 58 7 +68 77 90 68 72 77 86 65 68 73 78 61 64 79 79 63 68 83 83 70 68 83 87 67 63 72 77 58 67 79 85 67 67 83 89 71 7 +72 77 86 65 68 73 78 61 64 69 74 61 68 83 83 70 68 83 87 67 68 79 83 59 67 79 85 67 67 83 89 71 71 79 81 67 7 +68 73 78 61 64 69 74 61 72 81 86 68 68 83 87 67 68 79 83 59 68 75 79 59 67 83 89 71 71 79 81 67 67 72 81 62 7 +64 69 74 61 72 81 86 68 80 98 106 83 68 79 83 59 68 75 79 59 76 87 91 78 71 79 81 67 67 72 81 62 71 83 89 67 7 +80 98 106 83 88 106 111 87 88 106 111 87 76 87 91 78 88 103 113 85 88 103 113 88 71 83 89 67 79 87 96 79 75 83 96 79 7 +80 98 102 83 80 94 102 79 76 85 90 68 84 95 104 81 76 87 96 70 76 83 87 70 79 87 96 75 79 87 89 71 79 87 100 75 7 +80 94 102 79 76 85 90 68 68 77 90 68 76 87 96 70 76 83 87 70 76 87 96 78 79 87 89 71 79 87 100 75 75 87 96 79 7 +76 85 90 68 68 77 90 68 76 85 98 79 76 83 87 70 76 87 96 78 76 83 96 78 79 87 100 75 75 87 96 79 75 79 96 79 7 +88 106 106 87 88 106 111 91 88 115 120 94 88 103 108 88 88 107 113 92 88 112 122 96 93 103 113 92 88 107 118 96 88 121 123 100 3 +84 111 115 94 84 115 115 98 88 115 120 102 88 116 122 103 84 112 122 99 84 116 122 99 84 111 118 96 79 107 109 96 71 103 113 96 3 +84 115 115 98 88 115 120 102 80 111 115 94 84 112 122 99 84 116 122 99 76 112 118 92 79 107 109 96 71 103 113 96 67 99 113 87 1 +76 106 115 94 72 102 106 91 64 98 102 91 71 103 108 88 64 99 108 92 64 103 118 96 63 91 104 87 59 91 100 87 59 87 104 87 1 +64 98 111 91 60 98 111 87 57 85 98 83 60 103 108 88 53 83 100 85 53 83 104 81 55 83 100 83 51 79 100 79 51 75 96 79 1 +60 98 111 87 57 85 98 83 53 85 102 83 53 83 100 85 53 83 104 81 53 83 100 85 51 79 100 79 51 75 96 79 51 72 89 75 1 +57 89 106 83 57 81 94 79 57 81 90 76 50 75 91 74 53 75 79 74 56 79 91 78 51 68 85 71 51 75 93 79 55 75 96 79 1 +57 81 90 76 57 81 90 76 53 85 94 76 56 79 91 78 56 79 91 78 53 79 96 78 55 75 96 79 55 72 93 71 55 72 85 75 1 +57 85 98 83 60 94 106 87 60 94 111 87 53 83 96 81 60 87 100 85 56 87 104 81 59 79 93 75 59 91 104 83 59 87 100 83 1 +60 94 106 87 60 94 111 87 57 94 102 87 60 87 100 85 56 87 104 81 53 83 100 78 59 91 104 83 59 87 100 83 55 79 96 75 1 +57 85 102 79 53 89 106 87 53 89 106 83 53 79 96 81 53 87 104 88 53 95 108 85 55 83 96 79 55 83 104 83 51 83 100 83 1 +53 89 106 83 53 81 102 83 53 85 94 83 53 95 108 85 53 83 100 81 53 79 96 78 51 83 100 83 51 79 96 79 55 79 93 75 1 +53 81 102 83 53 85 94 83 53 85 98 83 53 83 100 81 53 79 96 78 46 79 87 78 51 79 96 79 55 79 93 75 51 75 93 75 1 +64 102 115 94 64 106 120 94 64 106 115 94 64 112 118 96 64 107 113 96 71 107 118 96 71 107 118 96 67 107 118 96 79 111 118 96 1 +68 111 120 98 64 111 120 94 64 111 125 98 64 107 118 96 68 107 118 96 64 112 122 96 67 107 118 96 67 107 118 96 67 107 123 96 1 +64 111 120 94 64 111 125 98 72 111 120 98 68 107 118 96 64 112 122 96 64 112 122 99 67 107 118 96 67 107 123 96 67 111 123 96 1 +72 111 120 98 72 111 125 98 68 111 115 94 68 107 122 96 68 112 128 99 76 112 122 99 67 111 123 100 67 111 118 100 71 111 123 96 1 +68 75 71 56 68 71 75 56 68 71 75 56 67 72 74 54 63 75 74 58 63 72 74 54 66 71 69 55 66 71 73 55 66 71 76 55 7 +68 71 75 56 64 75 75 56 68 71 75 56 63 72 74 54 63 68 70 58 63 72 70 58 66 71 76 55 63 71 76 55 63 71 73 59 7 +64 71 75 63 64 79 79 63 68 83 83 70 63 72 74 58 63 72 77 58 67 79 85 67 63 75 76 59 66 75 76 63 70 79 80 63 7 +88 103 113 85 88 103 113 88 84 99 108 85 79 87 96 79 75 83 96 79 75 91 96 83 90 104 108 85 78 91 96 78 66 71 84 78 7 +88 103 113 88 84 99 108 85 84 95 104 81 75 83 96 79 75 91 96 83 79 87 96 75 78 91 96 78 66 71 84 78 59 63 88 74 7 +84 99 108 85 84 95 104 81 76 87 96 70 75 91 96 83 79 87 96 75 79 87 89 71 66 71 84 78 59 63 88 74 70 75 92 78 7 +76 83 96 78 76 83 91 78 80 95 100 81 75 79 96 79 75 83 96 79 88 95 109 87 74 79 88 74 74 83 88 78 78 91 100 81 7 +88 103 108 88 88 107 113 92 88 112 122 96 93 103 113 92 88 107 118 96 88 121 123 100 86 104 112 92 86 100 108 92 78 104 104 92 3 +88 112 122 96 88 116 122 103 84 112 122 99 88 121 123 100 84 111 118 96 79 107 109 96 78 104 104 92 78 113 112 96 70 104 112 92 3 +84 112 122 99 84 116 122 99 76 112 118 92 79 107 109 96 71 103 113 96 67 99 113 87 70 104 112 92 66 91 100 81 63 87 100 81 1 +76 112 118 92 71 103 108 88 64 99 108 92 67 99 113 87 63 91 104 87 59 91 100 87 63 87 100 81 63 87 104 85 56 91 108 89 1 +71 103 108 88 64 99 108 92 64 103 118 96 63 91 104 87 59 91 100 87 59 87 104 87 63 87 104 85 56 91 108 89 56 87 104 85 1 +64 103 118 96 60 103 108 88 53 83 100 85 59 87 104 87 55 83 100 83 51 79 100 79 56 87 104 85 56 83 100 81 49 75 100 78 1 +53 83 100 85 53 83 104 81 53 83 100 85 51 79 100 79 51 75 96 79 51 72 89 75 49 75 100 78 52 67 84 78 52 71 84 78 1 +56 79 91 78 53 79 96 78 53 83 96 81 55 72 93 71 55 72 85 75 59 79 93 75 52 67 80 74 56 67 84 70 52 71 84 74 1 +53 83 96 81 60 87 100 85 56 87 104 81 59 79 93 75 59 91 104 83 59 87 100 83 52 71 84 74 56 79 96 74 56 83 104 85 1 +46 79 87 78 50 79 96 78 56 87 104 92 51 75 93 75 51 79 96 79 55 87 100 83 56 83 108 85 56 83 100 81 56 79 100 81 1 +56 87 104 92 60 103 118 92 64 107 118 96 55 87 100 83 63 95 109 92 67 107 118 96 56 79 100 81 52 83 100 81 59 87 108 85 1 +71 107 118 96 76 112 122 99 76 112 122 99 79 111 118 96 84 116 118 96 75 107 123 96 66 96 112 92 70 100 117 92 66 109 122 92 1 +76 112 122 99 76 112 122 99 68 103 113 88 84 116 118 96 75 107 123 96 67 107 118 92 70 100 117 92 66 109 122 92 70 109 122 96 1 +64 95 104 81 64 103 113 88 64 107 118 96 67 87 93 75 63 95 100 83 67 107 118 96 66 87 100 78 66 87 96 78 66 87 92 78 1 +64 103 113 88 64 107 118 96 68 107 118 96 63 95 100 83 67 107 118 96 67 107 118 96 66 87 96 78 66 87 92 78 66 91 104 78 1 +64 112 122 99 68 107 122 96 68 112 128 99 67 111 123 96 67 111 123 100 67 111 118 100 63 109 122 96 66 113 127 100 66 109 122 100 1 +71 112 122 96 68 112 122 99 68 112 118 96 71 111 123 100 71 107 118 96 71 107 109 92 70 113 117 100 70 109 122 100 70 113 122 100 1 +68 112 122 99 68 112 118 96 68 95 113 88 71 107 118 96 71 107 109 92 67 91 104 87 70 109 122 100 70 113 122 100 70 113 117 100 1 +63 72 74 54 63 68 70 58 63 72 70 58 66 71 76 55 63 71 76 55 63 71 73 59 63 70 72 57 63 73 72 60 67 77 82 64 7 +67 79 85 67 67 83 89 71 71 79 81 67 70 79 80 63 70 83 92 70 78 91 92 78 87 99 105 83 87 99 110 86 87 112 114 90 7 +75 91 96 83 79 87 96 75 79 87 89 71 66 71 84 78 59 63 88 74 70 75 92 78 67 66 82 72 63 66 79 72 71 77 86 72 7 +75 87 96 79 75 79 96 79 75 83 96 79 74 79 92 74 74 79 88 74 74 83 88 78 71 73 86 68 71 77 90 72 75 91 101 83 7 +75 83 96 79 88 95 109 87 93 103 113 92 74 83 88 78 78 91 100 81 86 104 112 92 75 91 101 83 87 103 114 90 92 108 114 98 7 +93 103 113 92 88 107 118 96 88 121 123 100 86 104 112 92 86 100 108 92 78 104 104 92 92 108 114 98 87 112 114 94 79 108 110 98 3 +88 121 123 100 84 111 118 96 79 107 109 96 78 104 104 92 78 113 112 96 70 104 112 92 79 108 110 98 71 103 114 94 63 95 105 86 1 +79 107 109 96 71 103 113 96 67 99 113 87 70 104 112 92 66 91 100 81 63 87 100 81 63 95 105 86 56 81 90 79 52 77 90 75 1 +59 87 104 87 55 83 100 83 51 79 100 79 56 87 104 85 56 83 100 81 49 75 100 78 49 73 97 79 49 73 86 79 52 70 90 75 1 +55 83 100 83 51 79 100 79 51 75 96 79 56 83 100 81 49 75 100 78 52 67 84 78 49 73 86 79 52 70 90 75 52 70 90 75 1 +51 72 89 75 51 68 85 71 51 75 93 79 52 71 84 78 56 75 92 74 56 79 92 78 52 73 90 75 56 84 97 79 56 81 97 79 1 +51 68 85 71 51 75 93 79 55 75 96 79 56 75 92 74 56 79 92 78 49 75 88 78 56 84 97 79 56 81 97 79 52 73 93 79 1 +55 75 96 79 55 72 93 71 55 72 85 75 49 75 88 78 52 67 80 74 56 67 84 70 52 73 93 79 52 66 86 72 52 66 82 68 1 +55 72 93 71 55 72 85 75 59 79 93 75 52 67 80 74 56 67 84 70 52 71 84 74 52 66 86 72 52 66 82 68 56 70 82 72 1 +55 72 85 75 59 79 93 75 59 91 104 83 56 67 84 70 52 71 84 74 56 79 96 74 52 66 82 68 56 70 82 72 56 84 97 79 1 +59 79 93 75 59 91 104 83 59 87 100 83 52 71 84 74 56 79 96 74 56 83 104 85 56 70 82 72 56 84 97 79 59 91 101 86 1 +59 87 100 83 55 79 96 75 55 83 96 79 56 83 104 85 63 91 108 89 59 91 104 85 59 91 101 86 59 91 101 86 56 88 101 83 1 +51 75 93 75 51 79 96 79 55 87 100 83 56 83 108 85 56 83 100 81 56 79 100 81 56 88 101 83 56 88 105 83 56 84 93 83 1 +51 79 96 79 55 87 100 83 63 95 109 92 56 83 100 81 56 79 100 81 52 83 100 81 56 88 105 83 56 84 93 83 56 84 97 79 1 +79 111 118 96 84 116 118 96 75 107 123 96 66 96 112 92 70 100 117 92 66 109 122 92 63 95 101 86 63 103 114 94 67 103 124 94 1 +67 111 123 100 67 111 118 100 71 111 123 96 66 113 127 100 66 109 122 100 66 109 122 96 63 108 124 98 63 108 124 98 67 103 124 94 1 +71 107 109 92 67 91 104 87 67 91 104 92 70 113 122 100 70 113 117 100 66 104 108 92 67 103 114 90 67 99 110 86 67 95 105 86 1 +67 91 104 87 67 91 104 92 71 95 100 83 70 113 117 100 66 104 108 92 66 91 104 89 67 99 110 86 67 95 105 86 67 88 101 86 1 +63 71 73 59 63 75 76 59 66 75 76 63 75 88 90 72 79 95 101 79 83 99 101 83 85 102 110 87 89 106 110 87 89 106 110 87 7 +70 83 92 70 78 91 92 78 82 100 108 85 87 99 110 86 87 112 114 90 96 108 119 94 89 102 105 87 89 106 114 94 93 111 119 97 3 +78 91 92 78 82 100 108 85 86 104 108 89 87 112 114 90 96 108 119 94 92 108 124 90 89 106 114 94 93 111 119 97 93 111 114 94 3 +82 100 108 85 86 104 108 89 90 104 108 85 96 108 119 94 92 108 124 90 92 99 105 86 93 111 119 97 93 111 114 94 85 102 105 83 3 +86 104 108 89 90 104 108 85 78 91 96 78 92 108 124 90 92 99 105 86 83 88 97 79 93 111 114 94 85 102 105 83 82 92 101 80 3 +90 104 108 85 78 91 96 78 66 71 84 78 92 99 105 86 83 88 97 79 67 66 82 72 85 102 105 83 82 92 101 80 74 79 93 73 7 +78 91 100 81 86 104 112 92 86 100 108 92 87 103 114 90 92 108 114 98 87 112 114 94 89 106 114 94 85 106 114 94 78 102 119 90 3 +56 91 108 89 56 87 104 85 56 83 100 81 52 81 101 79 49 73 97 79 49 73 86 79 50 71 93 76 47 67 89 73 47 71 85 73 1 +52 71 84 78 56 75 92 74 56 79 92 78 52 73 90 75 56 84 97 79 56 81 97 79 50 75 89 80 53 84 97 80 57 84 93 76 1 +49 75 88 78 52 67 80 74 56 67 84 70 52 73 93 79 52 66 86 72 52 66 82 68 57 75 82 73 53 71 78 73 53 71 82 73 1 +59 87 108 85 63 96 112 92 66 100 112 92 59 88 101 86 59 91 105 86 59 95 105 90 60 92 105 87 60 88 105 87 60 97 101 83 1 +70 100 117 92 66 109 122 92 70 109 122 96 63 103 114 94 67 103 124 94 67 108 114 98 63 111 119 97 67 111 119 94 67 106 119 97 1 +66 113 127 100 66 109 122 100 66 109 122 96 63 108 124 98 63 108 124 98 67 103 124 94 60 92 110 83 63 102 110 94 63 106 114 90 1 +70 109 122 100 70 113 122 100 70 113 117 100 67 103 119 90 67 103 114 90 67 99 110 86 63 102 114 87 63 97 110 87 63 92 110 87 1 +70 113 117 100 66 104 108 92 66 91 104 89 67 99 110 86 67 95 105 86 67 88 101 86 63 92 110 87 67 92 110 90 67 88 110 90 1 +63 66 68 53 63 66 68 57 67 73 68 57 63 71 74 55 67 71 78 58 67 75 78 62 80 89 94 72 80 89 94 76 80 98 98 79 7 +67 73 68 57 67 73 72 57 63 70 72 57 67 75 78 62 67 84 85 65 78 97 97 76 80 98 98 79 88 111 111 91 92 111 111 91 7 +67 73 72 57 63 70 72 57 63 73 72 60 67 84 85 65 78 97 97 76 82 102 105 80 88 111 111 91 92 111 111 91 88 102 115 87 3 +63 70 72 57 63 73 72 60 67 77 82 64 78 97 97 76 82 102 105 80 85 106 110 83 92 111 111 91 88 102 115 87 84 106 115 91 3 +63 73 72 60 67 77 82 64 71 81 75 68 82 102 105 80 85 106 110 83 85 102 114 83 88 102 115 87 84 106 115 91 84 102 111 87 3 +75 88 90 72 79 95 101 79 83 99 101 83 85 102 110 87 89 106 110 87 89 106 110 87 84 102 111 87 92 106 106 87 88 106 115 87 3 +87 112 114 90 96 108 119 94 92 108 124 90 89 106 114 94 93 111 119 97 93 111 114 94 88 111 111 94 92 111 115 94 92 102 115 87 3 +92 99 105 86 83 88 97 79 67 66 82 72 85 102 105 83 82 92 101 80 74 79 93 73 88 98 106 79 84 98 106 79 72 81 82 65 7 +71 77 86 72 67 73 90 68 71 73 86 68 63 63 82 69 67 71 82 65 70 75 89 73 60 66 78 61 64 66 78 65 64 66 82 65 7 +87 112 114 94 79 108 110 98 71 103 114 94 78 102 119 90 74 102 114 90 63 97 114 94 64 98 111 91 60 102 111 91 57 102 115 94 1 +79 108 110 98 71 103 114 94 63 95 105 86 74 102 114 90 63 97 114 94 57 97 105 90 60 102 111 91 57 102 115 94 57 94 111 87 1 +71 103 114 94 63 95 105 86 56 81 90 79 63 97 114 94 57 97 105 90 57 84 101 80 57 102 115 94 57 94 111 87 53 85 102 87 1 +56 84 97 79 56 81 97 79 52 73 93 79 53 84 97 80 57 84 93 76 57 75 82 73 60 89 98 83 60 94 106 87 60 81 94 76 1 +56 70 82 72 56 84 97 79 59 91 101 86 53 75 89 76 53 79 93 73 53 79 93 73 53 73 90 79 57 73 90 76 57 77 98 76 1 +56 84 97 79 59 91 101 86 59 91 101 86 53 79 93 73 53 79 93 73 50 79 97 80 57 73 90 76 57 77 98 76 57 81 98 83 1 +59 95 105 90 63 95 101 86 63 103 114 94 60 97 101 83 57 106 110 90 63 111 119 97 57 94 111 87 60 102 111 94 60 106 115 94 1 +63 103 114 94 67 103 124 94 67 108 114 98 63 111 119 97 67 111 119 94 67 106 119 97 60 106 115 94 64 106 115 94 68 111 120 98 1 +67 108 114 98 75 108 114 98 67 99 110 86 67 106 119 97 70 111 119 97 63 102 114 90 68 111 120 98 72 111 120 98 68 102 111 87 1 +67 95 105 86 67 88 101 86 67 91 105 83 67 92 110 90 67 88 110 90 63 88 105 83 68 89 115 94 72 94 111 94 76 89 115 94 5 +67 75 78 62 67 84 85 65 78 97 97 76 80 98 98 79 88 111 111 91 92 111 111 91 84 103 108 81 88 107 113 88 88 112 122 92 3 +85 106 110 87 89 102 105 87 89 106 114 94 88 106 106 87 88 106 106 87 88 111 111 94 88 103 104 81 84 103 108 85 84 103 108 88 3 +89 102 105 87 89 106 114 94 93 111 119 97 88 106 106 87 88 111 111 94 92 111 115 94 84 103 108 85 84 103 108 88 92 107 108 85 3 +89 106 114 94 93 111 119 97 93 111 114 94 88 111 111 94 92 111 115 94 92 102 115 87 84 103 108 88 92 107 108 85 88 103 104 81 3 +70 75 89 73 67 71 89 73 78 88 97 83 64 66 82 65 64 62 82 65 76 89 102 87 84 95 100 85 76 83 96 74 76 95 113 88 7 +89 106 114 94 85 106 114 94 78 102 119 90 84 106 111 94 76 102 111 91 64 98 111 91 80 107 118 96 71 99 108 88 60 95 108 88 3 +50 75 97 76 50 71 93 76 47 67 89 73 50 66 82 72 53 69 86 72 50 66 82 76 53 64 79 74 50 68 83 70 53 64 79 74 1 +50 71 93 76 47 67 89 73 47 71 85 73 53 69 86 72 50 66 82 76 50 66 86 76 50 68 83 70 53 64 79 74 53 61 79 67 1 +53 71 78 73 53 71 82 73 53 75 89 76 57 73 90 76 53 73 90 76 53 73 90 79 64 95 108 88 60 83 100 78 53 75 87 74 1 +53 75 89 76 53 79 93 73 53 79 93 73 53 73 90 79 57 73 90 76 57 77 98 76 53 75 87 74 56 79 96 78 56 87 104 85 1 +53 84 101 80 50 84 93 76 53 88 97 80 53 85 102 79 57 85 94 79 53 81 90 79 56 83 104 81 56 83 96 81 60 87 96 81 1 +50 84 93 76 53 88 97 80 57 88 105 87 57 85 94 79 53 81 90 79 53 85 98 83 56 83 96 81 60 87 96 81 56 83 100 81 1 +60 88 105 87 57 92 101 87 57 88 105 83 57 89 106 87 60 94 102 87 60 98 111 87 56 91 104 85 60 91 104 85 56 91 104 85 1 +57 92 101 87 57 88 105 83 60 92 105 87 60 94 102 87 60 98 111 87 60 94 98 83 60 91 104 85 56 91 104 85 56 91 108 85 1 +60 97 101 83 57 106 110 90 63 111 119 97 57 94 111 87 60 102 111 94 60 106 115 94 56 95 108 88 60 95 113 92 68 103 118 92 1 +67 111 119 94 67 106 119 97 70 111 119 97 64 106 115 94 68 111 120 98 72 111 120 98 64 103 118 96 68 107 122 96 71 112 122 103 1 +70 111 119 97 63 102 114 90 63 92 105 80 72 111 120 98 68 102 111 87 68 89 98 83 71 112 122 103 68 112 122 92 71 103 113 88 1 +57 88 101 80 60 92 110 83 63 102 110 94 53 73 102 94 50 62 102 98 53 66 106 91 43 34 118 132 43 31 118 132 43 34 118 125 2 +63 106 114 90 63 106 114 90 74 111 114 90 60 94 111 87 64 98 111 91 68 98 111 91 46 48 108 107 53 75 104 92 64 95 108 88 1 +74 111 114 90 67 106 114 87 63 102 114 87 68 98 111 91 68 102 111 91 64 98 106 87 64 95 108 88 64 99 113 88 64 95 108 85 1 +67 84 97 83 67 84 97 87 63 79 85 76 64 81 98 83 64 73 90 79 60 69 78 72 64 64 83 74 60 61 79 70 60 61 83 74 7 +80 89 94 76 80 98 98 79 88 111 111 91 84 99 104 85 84 103 108 81 88 107 113 88 88 103 104 87 88 103 109 83 88 103 109 83 3 +92 111 111 91 88 102 115 87 84 106 115 91 88 112 122 92 88 107 113 92 88 107 113 88 88 107 109 87 88 107 113 87 93 107 113 92 3 +84 102 111 87 92 106 106 87 88 106 115 87 88 107 113 85 84 103 104 81 84 103 104 81 88 103 109 87 88 103 104 79 79 95 100 79 3 +88 106 106 87 88 106 106 87 88 111 111 94 88 103 104 81 84 103 108 85 84 103 108 88 79 103 100 79 84 99 100 79 84 99 104 79 3 +88 106 106 87 88 111 111 94 92 111 115 94 84 103 108 85 84 103 108 88 92 107 108 85 84 99 100 79 84 99 104 79 93 107 109 87 3 +64 66 78 65 64 66 82 65 64 62 82 65 76 87 100 81 84 95 100 85 76 83 96 74 84 103 113 92 88 103 109 92 84 103 109 92 7 +64 66 82 65 64 62 82 65 76 89 102 87 84 95 100 85 76 83 96 74 76 95 113 88 88 103 109 92 84 103 109 92 84 107 118 96 7 +64 62 82 65 76 89 102 87 84 106 111 94 76 83 96 74 76 95 113 88 80 107 118 96 84 103 109 92 84 107 118 96 79 111 118 96 7 +84 106 111 94 76 102 111 91 64 98 111 91 80 107 118 96 71 99 108 88 60 95 108 88 79 111 118 96 67 99 113 92 55 87 104 87 1 +64 98 111 91 60 102 111 91 57 102 115 94 60 95 108 88 60 95 113 92 53 95 108 88 55 87 104 87 51 87 100 87 51 83 104 83 1 +60 102 111 91 57 102 115 94 57 94 111 87 60 95 113 92 53 95 108 88 50 83 104 85 51 87 100 87 51 83 104 83 48 75 96 75 1 +57 94 111 87 53 85 102 87 50 73 94 76 50 83 104 85 53 79 100 81 53 71 91 74 48 75 96 75 48 72 89 75 51 68 85 71 1 +53 85 102 87 50 73 94 76 50 66 82 72 53 79 100 81 53 71 91 74 53 64 79 74 48 72 89 75 51 68 85 71 51 68 77 71 1 +60 77 94 79 60 89 98 83 60 94 106 87 64 95 104 85 64 99 113 92 68 99 118 88 59 79 89 79 59 79 96 79 63 83 96 83 1 +60 81 94 76 57 73 90 76 53 73 90 76 60 91 104 85 64 95 108 88 60 83 100 78 63 79 96 83 63 87 96 83 63 87 96 83 1 +57 85 94 79 53 81 90 79 53 85 98 83 56 83 96 81 60 87 96 81 56 83 100 81 67 95 104 87 67 95 109 87 63 95 104 83 1 +53 81 90 79 53 85 98 83 57 89 106 87 60 87 96 81 56 83 100 81 56 91 104 85 67 95 109 87 63 95 104 83 63 95 113 87 1 +60 94 98 83 57 85 98 87 57 94 111 87 56 91 108 85 56 91 104 85 56 95 108 88 55 87 104 87 55 91 104 87 63 95 109 87 1 +64 106 115 94 68 111 120 98 72 111 120 98 64 103 118 96 68 107 122 96 71 112 122 103 67 107 118 96 67 107 123 96 67 111 123 96 1 +68 98 111 91 68 102 111 91 64 98 106 87 64 95 108 88 64 99 113 88 64 95 108 85 51 58 113 104 59 87 104 83 63 95 100 83 1 +68 102 111 91 64 98 106 87 64 98 111 87 64 99 113 88 64 95 108 85 60 99 104 85 59 87 104 83 63 95 100 83 63 95 104 83 1 +64 85 111 87 68 89 115 94 72 94 111 94 64 91 108 88 71 91 118 92 76 95 122 99 63 95 109 92 75 99 118 96 75 99 118 96 5 +76 89 115 94 72 89 111 91 76 89 106 83 76 99 122 96 80 95 118 96 80 95 118 92 75 99 118 96 75 95 109 96 75 95 113 96 5 +72 89 111 91 76 89 106 83 72 85 98 79 80 95 118 96 80 95 118 92 76 83 100 78 75 95 109 96 75 95 113 96 79 99 109 83 5 +72 85 98 79 64 81 98 83 64 73 90 79 76 83 100 78 64 64 83 74 60 61 79 70 79 99 109 83 71 75 93 79 51 51 81 79 5 +64 81 98 83 64 73 90 79 60 69 78 72 64 64 83 74 60 61 79 70 60 61 83 74 71 75 93 79 51 51 81 79 51 54 81 75 5 +88 107 113 92 88 107 113 88 88 103 113 85 88 107 113 87 93 107 113 92 88 107 113 87 86 104 108 85 90 104 108 89 90 104 112 89 3 +88 107 113 88 88 103 113 85 88 107 113 85 93 107 113 92 88 107 113 87 88 103 109 87 90 104 108 89 90 104 112 89 86 100 108 89 3 +92 107 108 85 88 103 104 81 84 95 104 85 93 107 109 87 84 103 109 79 84 99 100 79 90 104 112 85 90 109 112 89 82 100 96 81 3 +76 87 100 81 84 95 100 85 76 83 96 74 84 103 113 92 88 103 109 92 84 103 109 92 86 100 108 89 86 100 108 89 78 100 112 92 3 +84 95 100 85 76 83 96 74 76 95 113 88 88 103 109 92 84 103 109 92 84 107 118 96 86 100 108 89 78 100 112 92 78 104 122 96 3 +50 83 104 85 53 79 100 81 53 71 91 74 48 75 96 75 48 72 89 75 51 68 85 71 49 79 96 78 49 71 88 78 49 71 88 74 1 +53 61 79 67 56 68 83 74 64 83 100 85 51 61 77 71 51 68 81 71 59 72 85 75 49 63 76 66 49 67 80 70 52 71 80 74 1 +64 95 104 85 64 99 113 92 68 99 118 88 59 79 89 79 59 79 96 79 63 83 96 83 52 71 84 70 56 75 88 74 56 75 92 78 1 +60 83 100 78 53 75 87 74 56 79 96 78 63 87 96 83 59 83 89 79 59 95 109 87 59 83 100 81 59 83 100 81 59 87 104 85 1 +56 87 100 81 56 87 100 78 56 87 104 81 67 99 109 87 63 95 104 87 63 95 109 87 70 104 117 92 63 96 112 89 63 96 112 89 1 +56 87 100 78 56 87 104 81 56 83 104 81 63 95 104 87 63 95 109 87 67 95 100 87 63 96 112 89 63 96 112 89 66 100 112 89 1 +56 87 104 81 56 83 104 81 56 83 96 81 63 95 109 87 67 95 100 87 67 95 104 87 63 96 112 89 66 100 112 89 63 100 112 92 1 +60 87 96 81 56 83 100 81 56 91 104 85 67 95 109 87 63 95 104 83 63 95 113 87 63 100 117 92 63 96 112 89 63 96 108 89 1 +60 95 113 92 68 103 118 92 64 103 118 96 67 99 109 92 67 103 113 92 67 107 118 96 63 96 112 89 63 100 112 89 63 104 108 92 1 +68 112 122 92 71 103 113 88 68 99 108 88 71 111 128 100 71 111 128 96 71 107 123 96 66 104 122 103 74 113 122 100 70 113 122 96 1 +43 36 104 121 43 34 118 132 43 31 118 132 44 29 123 133 44 37 118 133 44 37 118 129 46 43 112 122 49 49 112 118 52 53 108 114 2 +43 34 118 125 46 48 108 107 53 75 104 92 44 32 113 125 44 32 118 129 48 34 113 125 49 40 112 125 46 34 112 133 46 32 112 133 2 +64 95 108 88 64 99 113 88 64 95 108 85 51 58 113 104 59 87 104 83 63 95 100 83 46 32 112 133 46 46 112 114 56 71 104 89 1 +64 99 113 88 64 95 108 85 60 99 104 85 59 87 104 83 63 95 100 83 63 95 104 83 46 46 112 114 56 71 104 89 59 87 100 81 5 +76 99 122 96 80 95 118 96 80 95 118 92 75 99 118 96 75 95 109 96 75 95 113 96 74 91 112 96 70 87 112 100 66 83 117 100 5 +80 95 118 92 76 83 100 78 64 64 83 74 75 95 113 96 79 99 109 83 71 75 93 79 66 83 117 100 70 87 112 100 82 91 108 85 5 +64 64 83 74 60 61 79 70 60 61 83 74 71 75 93 79 51 51 81 79 51 54 81 75 82 91 108 85 63 63 88 78 52 53 76 74 5 +60 57 79 70 53 54 75 70 56 57 71 67 51 48 81 79 48 42 74 75 48 48 67 71 56 53 80 74 49 49 76 74 49 46 69 66 5 +84 103 104 83 88 103 104 83 88 103 104 87 90 100 104 85 90 100 108 81 90 104 108 85 83 95 101 79 87 99 101 83 87 99 105 83 3 +88 107 109 87 88 107 113 87 93 107 113 92 86 100 104 81 86 104 108 85 90 104 108 89 83 95 101 83 87 103 110 86 92 103 105 86 3 +88 107 113 87 93 107 113 92 88 107 113 87 86 104 108 85 90 104 108 89 90 104 112 89 87 103 110 86 92 103 105 86 87 103 110 86 3 +88 107 113 87 88 103 109 87 88 103 104 79 90 104 112 89 86 100 108 89 86 104 108 89 87 103 110 86 87 103 114 86 92 112 119 94 3 +84 99 100 79 84 95 109 83 79 87 96 71 82 100 96 81 82 96 100 81 86 96 100 81 83 91 97 79 83 95 101 83 87 95 101 83 3 +84 95 109 83 79 87 96 71 67 75 81 62 82 96 100 81 86 96 100 81 82 91 92 81 83 95 101 83 87 95 101 83 83 99 101 83 3 +84 103 109 92 84 107 118 96 79 111 118 96 78 100 112 92 78 104 122 96 74 109 112 96 75 99 110 90 67 99 114 90 63 99 114 90 1 +84 107 118 96 79 111 118 96 67 99 113 92 78 104 122 96 74 109 112 96 66 104 112 92 67 99 114 90 63 99 114 90 59 91 101 90 1 +79 111 118 96 67 99 113 92 55 87 104 87 74 109 112 96 66 104 112 92 59 91 100 85 63 99 114 90 59 91 101 90 56 84 93 83 1 +51 64 77 71 48 61 74 67 51 61 77 71 52 67 84 70 52 63 80 70 49 63 76 66 49 73 86 75 52 66 82 72 52 70 82 72 1 +59 79 96 79 63 83 96 83 63 79 96 83 56 75 88 74 56 75 92 78 59 79 96 81 56 81 97 83 59 84 93 83 59 81 101 83 1 +63 95 113 87 59 95 113 92 59 91 104 87 63 96 108 89 63 96 108 89 59 96 112 89 63 103 119 90 59 99 114 90 59 95 110 86 1 +55 87 104 87 55 91 104 87 63 95 109 87 52 87 108 85 56 87 100 85 63 87 108 85 56 84 101 83 56 84 105 86 59 81 105 86 1 +67 107 118 96 67 107 123 96 67 111 123 96 63 104 108 92 63 100 108 96 66 100 117 92 63 99 110 94 63 95 105 90 67 99 110 94 1 +67 107 123 96 67 111 123 96 71 111 128 100 63 100 108 96 66 100 117 92 66 104 122 103 63 95 105 90 67 99 110 94 63 103 119 94 1 +67 111 123 96 71 111 128 100 71 111 128 96 66 100 117 92 66 104 122 103 74 113 122 100 67 99 110 94 63 103 119 94 67 108 124 98 1 +71 103 118 96 67 103 118 92 63 107 118 92 70 109 122 100 66 113 117 100 66 109 117 100 71 112 124 101 71 112 130 101 71 112 130 101 1 +67 103 118 92 63 107 118 92 63 87 109 96 66 113 117 100 66 109 117 100 66 113 122 100 71 112 130 101 71 112 130 101 71 112 119 98 1 +48 34 113 125 51 58 113 104 59 87 104 83 46 32 112 133 46 32 112 133 46 46 112 114 42 32 114 135 42 30 110 139 42 30 114 135 2 +75 99 118 96 75 95 109 96 75 95 113 96 74 91 112 96 70 87 112 100 66 83 117 100 67 88 110 98 67 88 119 98 75 91 110 94 5 +75 95 109 96 75 95 113 96 79 99 109 83 70 87 112 100 66 83 117 100 70 87 112 100 67 88 119 98 75 91 110 94 79 91 119 98 5 +75 95 113 96 79 99 109 83 71 75 93 79 66 83 117 100 70 87 112 100 82 91 108 85 75 91 110 94 79 91 119 98 79 99 110 86 5 +48 42 74 75 48 48 67 71 51 54 67 62 49 49 76 74 49 46 69 66 52 53 73 66 59 60 75 68 52 54 75 68 52 60 72 64 5 +90 104 112 85 90 109 112 89 82 100 96 81 87 103 105 83 87 103 110 83 83 91 97 79 85 106 105 83 85 102 101 83 82 92 105 76 3 +90 109 112 89 82 100 96 81 82 96 100 81 87 103 110 83 83 91 97 79 83 95 101 83 85 102 101 83 82 92 105 76 85 92 101 83 3 +82 100 96 81 82 96 100 81 86 96 100 81 83 91 97 79 83 95 101 83 87 95 101 83 82 92 105 76 85 92 101 83 85 92 105 83 3 +78 87 100 81 86 100 108 89 86 100 108 89 87 103 110 90 87 99 105 86 79 99 105 86 85 102 114 87 78 92 101 87 74 97 105 94 3 +78 104 122 96 74 109 112 96 66 104 112 92 67 99 114 90 63 99 114 90 59 91 101 90 57 97 110 94 53 88 101 83 50 71 89 76 1 +49 67 80 70 52 71 80 74 52 71 84 70 49 70 82 72 52 73 82 75 56 77 93 79 50 71 85 73 53 75 89 73 53 84 97 80 1 +56 75 92 78 59 79 96 81 56 79 88 81 59 84 93 83 59 81 101 83 56 81 93 79 53 84 101 87 50 79 93 80 53 79 89 76 1 +63 100 112 92 70 104 117 92 63 96 112 89 67 99 110 94 63 95 110 90 63 95 105 90 60 88 101 83 60 75 93 83 63 79 97 83 1 +63 96 112 89 66 100 112 89 63 100 112 92 63 99 110 90 63 103 119 90 67 99 114 94 63 88 105 90 67 97 114 90 70 106 114 94 1 +63 100 112 89 63 104 108 92 63 100 108 96 59 91 110 86 63 99 110 94 63 95 105 90 60 97 105 87 63 92 110 94 63 92 105 87 1 +46 34 112 133 46 32 112 133 46 32 112 133 46 30 119 139 42 32 114 135 42 30 110 139 44 31 114 140 44 31 114 133 44 31 114 133 2 +46 32 112 133 46 32 112 133 46 46 112 114 42 32 114 135 42 30 110 139 42 30 114 135 44 31 114 133 44 31 114 133 44 31 110 133 2 +46 32 112 133 46 46 112 114 56 71 104 89 42 30 110 139 42 30 114 135 46 34 110 124 44 31 114 133 44 31 110 133 44 29 114 136 2 +66 91 112 89 70 96 112 92 70 96 117 92 56 73 97 79 63 88 105 83 67 84 105 94 47 37 114 122 50 63 97 90 63 84 97 80 5 +70 96 117 92 74 91 112 96 70 87 112 100 67 84 105 94 67 88 110 98 67 88 119 98 63 84 97 80 70 88 105 87 74 92 114 94 5 +74 91 112 96 70 87 112 100 66 83 117 100 67 88 110 98 67 88 119 98 75 91 110 94 70 88 105 87 74 92 114 94 74 92 110 94 5 +70 87 112 100 66 83 117 100 70 87 112 100 67 88 119 98 75 91 110 94 79 91 119 98 74 92 114 94 74 92 110 94 70 88 114 97 5 +52 53 76 74 56 53 80 74 49 49 76 74 59 60 72 72 59 63 79 72 59 60 75 68 63 75 89 73 60 71 82 65 63 67 78 69 5 +83 99 101 83 83 95 97 79 83 95 101 83 82 97 101 76 82 97 101 80 85 102 110 87 80 98 98 76 80 98 102 79 84 98 106 83 3 +83 95 97 79 83 95 101 83 87 103 110 86 82 97 101 80 85 102 110 87 85 102 110 90 80 98 102 79 84 98 106 83 84 102 106 87 3 +83 95 101 83 87 103 110 86 92 103 105 86 85 102 110 87 85 102 110 90 89 102 110 87 84 98 106 83 84 102 106 87 84 102 106 87 3 +87 103 110 86 92 103 105 86 87 103 110 86 85 102 110 90 89 102 110 87 89 102 114 87 84 102 106 87 84 102 106 87 84 106 111 87 3 +92 103 105 86 87 103 110 86 87 103 114 86 89 102 110 87 89 102 114 87 89 106 114 94 84 102 106 87 84 106 111 87 88 111 115 91 3 +87 103 110 86 87 103 114 86 92 112 119 94 89 102 114 87 89 106 114 94 93 115 124 94 84 106 111 87 88 111 115 91 92 115 115 94 3 +92 103 110 90 83 95 105 79 83 95 101 79 93 106 114 94 89 97 101 80 85 97 105 80 88 102 106 83 88 102 106 83 88 98 106 79 3 +83 95 105 79 83 95 101 79 87 103 105 83 89 97 101 80 85 97 105 80 85 106 105 83 88 102 106 83 88 98 106 79 84 102 106 79 3 +87 103 110 83 83 91 97 79 83 95 101 83 85 102 101 83 82 92 105 76 85 92 101 83 84 94 102 79 84 98 98 79 84 94 102 79 3 +83 91 97 79 83 95 101 83 87 95 101 83 82 92 105 76 85 92 101 83 85 92 105 83 84 98 98 79 84 94 102 79 84 102 111 87 3 +83 95 101 83 87 95 101 83 83 99 101 83 85 92 101 83 85 92 105 83 89 102 110 87 84 94 102 79 84 102 111 87 88 106 102 91 3 +79 99 105 86 75 99 110 90 67 99 114 90 74 97 105 94 67 97 110 94 57 97 110 94 64 98 111 91 57 94 111 91 53 85 102 83 1 +75 99 110 90 67 99 114 90 63 99 114 90 67 97 110 94 57 97 110 94 53 88 101 83 57 94 111 91 53 85 102 83 50 73 90 76 1 +63 99 114 90 59 91 101 90 56 84 93 83 53 88 101 83 50 71 89 76 47 71 89 80 50 73 90 76 50 69 86 72 53 69 86 72 1 +59 91 101 90 56 84 93 83 52 77 93 79 50 71 89 76 47 71 89 80 50 71 85 76 50 69 86 72 53 69 86 72 53 69 82 72 1 +56 84 93 83 52 77 93 79 52 73 90 75 47 71 89 80 50 71 85 76 47 67 85 69 53 69 86 72 53 69 82 72 50 66 82 72 1 +52 77 93 79 52 73 90 75 46 73 90 75 50 71 85 76 47 67 85 69 47 71 85 73 53 69 82 72 50 66 82 72 50 73 90 76 1 +49 73 93 79 52 77 93 75 49 73 86 75 50 79 89 76 50 79 93 76 50 79 89 76 50 73 90 76 50 77 98 79 53 77 94 79 1 +52 77 93 75 49 73 86 75 52 66 82 72 50 79 93 76 50 79 89 76 50 71 82 73 50 77 98 79 53 77 94 79 50 73 90 76 1 +63 99 114 94 67 99 110 94 63 95 110 90 57 88 101 83 60 88 101 83 60 75 93 83 57 81 94 79 60 77 90 79 64 81 90 83 1 +63 95 110 90 63 95 105 90 63 99 110 90 60 75 93 83 63 79 97 83 63 88 105 90 64 81 90 83 64 85 94 83 64 85 98 83 1 +67 99 114 94 63 99 114 94 63 103 114 90 70 106 114 94 67 97 114 87 63 97 114 90 64 98 111 91 68 94 115 91 60 89 102 83 1 +59 95 110 86 56 84 101 83 56 84 105 86 63 106 114 90 60 92 105 87 53 84 110 87 68 106 115 94 64 98 111 91 57 94 111 87 1 +56 84 101 83 56 84 105 86 59 81 105 86 60 92 105 87 53 84 110 87 53 84 105 83 64 98 111 91 57 94 111 87 53 89 106 87 1 +56 84 105 86 59 81 105 86 59 88 105 86 53 84 110 87 53 84 105 83 57 88 105 87 57 94 111 87 53 89 106 87 57 94 111 87 1 +59 91 110 86 63 99 110 94 63 95 105 90 60 97 105 87 63 92 110 94 63 92 105 87 57 94 106 83 60 85 102 87 60 85 102 87 1 +67 99 110 94 63 103 119 94 67 108 124 98 63 97 114 90 67 102 119 97 74 106 124 104 64 98 111 91 68 106 111 98 72 111 120 102 1 +63 103 119 94 67 108 124 98 75 112 124 101 67 102 119 97 74 106 124 104 78 111 129 101 68 106 111 98 72 111 120 102 80 115 125 102 1 +67 108 124 98 75 112 124 101 71 112 124 101 74 106 124 104 78 111 129 101 67 102 119 97 72 111 120 102 80 115 125 102 68 111 120 98 1 +75 112 124 101 71 112 124 101 71 112 130 101 78 111 129 101 67 102 119 97 67 106 124 97 80 115 125 102 68 111 120 98 64 106 115 94 1 +71 112 130 101 71 112 130 101 71 112 119 98 67 106 124 97 70 111 124 101 67 106 119 97 64 106 115 94 64 106 120 98 68 111 125 98 1 +71 112 119 98 67 108 114 98 56 70 110 98 67 106 119 97 67 111 114 97 60 88 110 97 68 111 125 98 68 102 115 94 60 89 111 94 1 +67 108 114 98 56 70 110 98 52 54 97 105 67 111 114 97 60 88 110 97 47 40 105 122 68 102 115 94 60 89 111 94 53 59 106 113 1 +46 32 119 135 46 30 119 139 42 32 114 135 44 31 110 140 44 31 114 140 44 31 114 133 47 31 111 131 47 34 111 128 44 34 115 128 2 +67 84 105 94 67 88 110 98 67 88 119 98 63 84 97 80 70 88 105 87 74 92 114 94 57 73 86 72 64 85 98 79 64 85 102 91 5 +75 91 110 94 79 91 119 98 79 99 110 86 74 92 110 94 70 88 114 97 74 88 110 94 64 77 106 98 68 69 111 98 68 73 111 91 5 +71 77 86 75 59 60 72 72 59 63 79 72 78 84 93 80 63 75 89 73 60 71 82 65 68 77 98 79 72 77 94 76 76 85 98 76 7 +85 102 105 80 85 97 101 80 82 97 101 76 88 106 102 83 88 102 102 79 80 98 98 76 80 99 104 78 80 91 96 78 80 95 100 78 3 +82 97 101 80 85 102 110 87 85 102 110 90 80 98 102 79 84 98 106 83 84 102 106 87 80 95 100 78 80 91 96 74 80 95 100 81 3 +85 102 110 90 89 102 110 87 89 102 114 87 84 102 106 87 84 102 106 87 84 106 111 87 80 95 100 81 84 103 108 88 88 112 113 88 3 +85 102 114 87 78 92 101 87 74 97 105 94 84 98 102 87 72 94 106 87 64 98 111 91 76 95 104 88 68 99 113 88 60 91 108 88 3 +78 92 101 87 74 97 105 94 67 97 110 94 72 94 106 87 64 98 111 91 57 94 111 91 68 99 113 88 60 91 108 88 53 87 104 85 1 +50 71 89 76 47 71 89 80 50 71 85 76 50 69 86 72 53 69 86 72 53 69 82 72 50 68 87 74 50 71 87 70 50 71 87 74 1 +47 67 85 69 47 71 85 73 50 75 89 76 50 66 82 72 50 73 90 76 53 77 94 76 50 75 91 74 53 75 87 78 53 75 87 78 1 +47 71 85 73 50 75 89 76 50 79 89 76 50 73 90 76 53 77 94 76 50 73 90 76 53 75 87 78 53 75 87 78 50 75 91 81 1 +50 75 89 76 50 79 89 76 50 79 93 76 53 77 94 76 50 73 90 76 50 77 98 79 53 75 87 78 50 75 91 81 50 75 96 78 1 +50 71 82 73 47 67 82 65 50 71 85 73 50 73 90 76 50 69 86 72 53 69 82 72 56 68 83 67 53 68 83 70 53 71 87 74 1 +53 79 89 76 57 79 93 80 57 88 101 83 57 77 90 76 53 77 90 76 53 77 94 79 60 87 104 85 60 83 100 85 56 79 91 78 1 +57 88 101 83 60 88 101 83 60 75 93 83 57 81 94 79 60 77 90 79 64 81 90 83 60 79 96 85 64 91 100 81 68 87 96 81 1 +63 102 114 90 63 106 114 90 60 92 105 87 64 98 115 91 68 106 115 94 64 98 111 91 64 83 96 88 68 99 113 88 68 99 108 85 1 +60 92 105 87 53 84 110 87 53 84 105 83 64 98 111 91 57 94 111 87 53 89 106 87 68 99 108 85 56 91 104 88 56 95 108 92 1 +60 97 105 87 63 92 110 94 63 92 105 87 57 94 106 83 60 85 102 87 60 85 102 87 56 83 100 85 56 83 96 85 60 91 100 85 1 +63 92 110 94 63 92 105 87 63 97 114 90 60 85 102 87 60 85 102 87 64 98 111 91 56 83 96 85 60 91 100 85 60 99 108 92 1 +74 106 124 104 78 111 129 101 67 102 119 97 72 111 120 102 80 115 125 102 68 111 120 98 68 99 118 99 71 107 122 103 71 112 122 99 1 +67 102 119 97 67 106 124 97 70 111 124 101 68 111 120 98 64 106 115 94 64 106 120 98 71 112 122 99 68 112 122 99 71 112 128 99 1 +67 106 124 97 70 111 124 101 67 106 119 97 64 106 115 94 64 106 120 98 68 111 125 98 68 112 122 99 71 112 128 99 71 103 122 96 1 +67 106 119 97 67 111 114 97 60 88 110 97 68 111 125 98 68 102 115 94 60 89 111 94 71 103 122 96 64 91 104 92 60 91 108 88 1 +60 88 110 97 47 40 105 122 44 31 114 136 60 89 111 94 53 59 106 113 50 31 115 128 60 91 108 88 60 83 108 92 60 64 100 99 2 +47 40 105 122 44 31 114 136 44 31 110 140 53 59 106 113 50 31 115 128 47 31 111 131 60 83 108 92 60 64 100 99 53 51 104 114 2 +44 31 110 140 44 31 114 140 44 31 114 133 47 31 111 131 47 34 111 128 44 34 115 128 53 51 104 114 50 36 113 128 43 36 118 128 2 +44 31 114 140 44 31 114 133 44 31 114 133 47 34 111 128 44 34 115 128 44 31 115 131 50 36 113 128 43 36 118 128 46 39 108 114 2 +44 31 114 133 44 31 114 133 44 31 110 133 44 34 115 128 44 31 115 131 44 31 115 131 43 36 118 128 46 39 108 114 50 48 104 107 2 +44 29 114 133 47 37 114 122 50 63 97 90 47 37 106 124 50 43 98 109 53 55 98 91 56 61 96 88 56 61 91 85 56 64 91 85 2 +50 63 97 90 63 84 97 80 70 88 105 87 53 55 98 91 57 73 86 72 64 85 98 79 56 64 91 85 60 64 91 81 60 75 96 78 5 +70 88 105 87 74 92 114 94 74 92 110 94 64 85 98 79 64 85 102 91 64 77 106 98 60 75 96 78 64 68 104 88 64 64 108 92 5 +74 88 110 94 78 84 93 80 63 75 89 73 68 73 111 91 68 77 98 79 72 77 94 76 64 61 108 99 64 68 108 92 71 83 100 81 7 +88 106 102 83 88 102 102 79 80 98 98 76 80 99 104 78 80 91 96 78 80 95 100 78 88 99 104 83 88 103 104 83 84 95 100 79 3 +88 102 102 79 80 98 98 76 80 98 102 79 80 91 96 78 80 95 100 78 80 95 100 78 88 103 104 83 84 95 100 79 79 99 96 79 3 +84 102 106 87 84 106 111 87 88 111 115 91 84 103 108 88 88 112 113 88 92 112 118 88 84 99 104 83 88 107 113 87 88 107 109 87 3 +84 106 111 87 88 111 115 91 92 115 115 94 88 112 113 88 92 112 118 88 88 99 104 88 88 107 113 87 88 107 109 87 84 99 104 79 3 +92 106 111 87 88 102 106 83 88 102 106 83 80 99 104 81 84 103 104 81 84 103 104 85 84 99 104 79 88 99 109 83 84 103 100 83 3 +88 102 106 83 88 98 106 79 84 102 106 79 84 103 104 85 84 99 104 81 84 99 100 81 84 103 100 83 84 99 104 83 88 99 109 83 3 +84 94 102 79 84 102 111 87 88 106 102 91 84 99 104 85 88 103 108 88 88 99 113 92 88 103 113 87 88 103 109 92 79 95 100 87 3 +50 73 90 76 50 69 86 72 53 69 86 72 50 71 91 78 50 68 87 74 50 71 87 70 48 61 81 67 48 64 85 71 51 72 85 75 1 +53 77 94 76 50 73 90 76 50 77 98 79 53 75 87 78 50 75 91 81 50 75 96 78 55 79 96 79 51 75 93 75 51 75 89 75 1 +50 77 98 79 53 77 94 79 50 73 90 76 50 75 96 78 56 75 91 74 56 68 83 67 51 75 89 75 55 72 89 71 55 68 81 71 1 +53 73 90 76 53 77 94 76 53 77 94 76 53 79 96 70 53 79 96 81 56 83 96 78 55 79 96 79 59 83 96 79 71 99 104 87 1 +53 77 90 76 53 77 94 79 57 81 94 79 60 83 100 85 56 79 91 78 60 79 96 85 59 75 96 79 59 83 96 79 63 91 100 83 1 +64 81 90 83 64 85 94 83 64 85 98 83 68 87 96 81 60 83 96 81 64 87 104 85 75 91 109 92 75 95 104 87 71 95 104 87 1 +68 89 102 87 64 98 111 91 68 94 115 91 68 91 104 88 68 91 104 85 68 87 104 88 75 91 109 92 75 95 104 87 67 83 96 79 1 +64 98 111 91 68 94 115 91 60 89 102 83 68 91 104 85 68 87 104 88 60 75 91 78 75 95 104 87 67 83 96 79 59 72 85 71 1 +60 85 102 83 64 98 115 91 68 106 115 94 56 68 83 74 64 83 96 88 68 99 113 88 55 68 85 75 63 79 96 83 67 99 109 92 1 +68 106 115 94 64 98 111 91 57 94 111 87 68 99 113 88 68 99 108 85 56 91 104 88 67 99 109 92 67 103 109 92 63 95 109 87 1 +57 94 111 87 53 89 106 87 57 94 111 87 56 91 104 88 56 95 108 92 56 87 108 85 63 95 109 87 59 95 113 92 63 95 109 87 1 +64 98 111 91 68 106 111 98 72 111 120 102 60 99 108 92 64 99 113 92 68 99 118 99 63 99 113 92 63 103 113 96 71 103 113 96 1 +68 106 111 98 72 111 120 102 80 115 125 102 64 99 113 92 68 99 118 99 71 107 122 103 63 103 113 96 71 103 113 96 71 103 113 96 1 +64 106 120 98 68 111 125 98 68 102 115 94 71 112 128 99 71 103 122 96 64 91 104 92 67 99 113 96 67 91 104 92 59 75 100 83 1 +47 31 111 131 47 34 111 128 44 34 115 128 53 51 104 114 50 36 113 128 43 36 118 128 63 79 100 87 59 68 96 92 55 61 100 96 2 +44 31 115 131 47 31 111 124 47 37 106 124 50 48 104 107 50 57 96 96 56 61 96 88 59 64 100 92 55 61 100 87 55 58 96 87 5 +47 31 111 124 47 37 106 124 50 43 98 109 50 57 96 96 56 61 96 88 56 61 91 85 55 61 100 87 55 58 96 87 59 58 93 83 5 +64 85 98 79 64 85 102 91 64 77 106 98 60 75 96 78 64 68 104 88 64 64 108 92 59 75 89 79 59 64 100 92 59 58 104 100 5 +64 85 102 91 64 77 106 98 68 69 111 98 64 68 104 88 64 64 108 92 60 61 108 99 59 64 100 92 59 58 104 100 59 58 104 100 5 +64 77 106 98 68 69 111 98 68 73 111 91 64 64 108 92 60 61 108 99 64 61 108 99 59 58 104 100 59 58 104 100 59 61 109 100 5 +68 69 111 98 68 73 111 91 68 77 98 79 60 61 108 99 64 61 108 99 64 68 108 92 59 58 104 100 59 61 109 100 63 64 104 96 5 +76 85 98 76 72 81 86 72 68 73 78 65 80 99 104 85 80 95 100 81 71 79 91 74 79 95 96 79 79 95 96 79 75 87 93 79 4 +68 73 78 65 64 66 74 65 64 73 82 68 71 79 91 74 68 71 83 67 68 71 83 70 75 87 93 79 71 75 85 71 75 79 89 71 7 +84 99 100 81 88 99 104 85 84 99 100 81 88 99 109 83 84 99 100 79 84 103 104 83 86 100 104 81 82 96 104 81 82 100 104 81 3 +50 71 91 78 50 68 87 74 50 71 87 70 48 61 81 67 48 64 85 71 51 72 85 75 46 75 96 78 46 71 84 74 46 67 84 74 1 +50 68 87 74 50 71 87 70 50 71 87 74 48 64 85 71 51 72 85 75 51 72 85 75 46 71 84 74 46 67 84 74 49 71 92 74 1 +53 75 87 78 50 75 91 81 50 75 96 78 55 79 96 79 51 75 93 75 51 75 89 75 52 79 96 78 52 79 92 81 52 71 84 74 1 +50 75 91 81 50 75 96 78 56 75 91 74 51 75 93 75 51 75 89 75 55 72 89 71 52 79 92 81 52 71 84 74 52 71 84 70 1 +56 68 83 67 53 68 83 70 53 71 87 74 55 68 81 71 51 72 81 71 55 75 85 75 52 71 80 70 52 71 84 70 56 75 92 74 1 +64 91 100 81 68 87 96 81 60 83 96 81 67 91 109 87 75 91 109 92 75 95 104 87 63 83 100 85 63 83 100 85 66 87 100 85 1 +68 87 96 81 60 83 96 81 64 87 104 85 75 91 109 92 75 95 104 87 71 95 104 87 63 83 100 85 66 87 100 85 66 83 100 81 1 +68 91 104 85 68 87 104 88 60 75 91 78 75 95 104 87 67 83 96 79 59 72 85 71 66 87 104 89 70 96 104 89 63 79 88 78 1 +64 83 96 88 68 99 113 88 68 99 108 85 63 79 96 83 67 99 109 92 67 103 109 92 59 67 84 74 59 79 96 81 63 87 108 89 1 +56 95 108 92 56 87 108 85 56 83 100 85 59 95 113 92 63 95 109 87 63 87 100 83 63 96 112 89 63 100 122 92 63 104 117 92 1 +56 87 108 85 56 83 100 85 56 83 96 85 63 95 109 87 63 87 100 83 63 87 100 87 63 100 122 92 63 104 117 92 63 96 108 89 1 +60 99 108 92 64 99 113 92 68 99 118 99 63 99 113 92 63 103 113 96 71 103 113 96 66 100 112 92 70 100 112 92 70 104 112 96 1 +68 112 122 99 71 112 128 99 71 103 122 96 71 111 118 96 67 99 113 96 67 91 104 92 70 100 112 96 66 104 122 96 70 100 117 96 1 +71 112 128 99 71 103 122 96 64 91 104 92 67 99 113 96 67 91 104 92 59 75 100 83 66 104 122 96 70 100 117 96 63 83 104 89 1 +60 91 108 88 60 83 108 92 60 64 100 99 59 87 104 92 67 99 109 92 67 87 100 83 59 79 92 81 63 75 104 85 70 100 112 92 1 +43 36 118 128 46 39 108 114 50 48 104 107 55 61 100 96 55 64 104 92 59 64 100 92 63 71 104 92 59 67 104 96 59 63 104 96 5 +46 39 108 114 50 48 104 107 50 57 96 96 55 64 104 92 59 64 100 92 55 61 100 87 59 67 104 96 59 63 104 96 59 60 100 92 5 +56 61 96 88 56 61 91 85 56 64 91 85 55 58 96 87 59 58 93 83 59 61 89 79 56 60 100 89 56 60 88 81 56 60 88 78 5 +64 68 104 88 64 64 108 92 60 61 108 99 59 64 100 92 59 58 104 100 59 58 104 100 59 67 88 74 63 71 92 81 59 60 96 92 5 +80 95 100 81 71 79 91 74 68 71 83 67 79 95 96 79 75 87 93 79 71 75 85 71 74 87 92 78 74 91 100 81 78 96 96 81 4 +71 79 91 74 68 71 83 67 68 71 83 70 75 87 93 79 71 75 85 71 75 79 89 71 74 91 100 81 78 96 96 81 78 91 96 78 7 +84 95 100 79 88 99 104 83 88 103 104 83 82 96 100 81 86 96 104 81 86 96 108 81 83 91 97 79 79 95 97 75 83 95 97 79 3 +88 99 104 83 88 103 104 83 84 95 100 79 86 96 104 81 86 96 108 81 86 104 108 81 79 95 97 75 83 95 97 79 83 95 105 83 3 +84 95 100 79 79 99 96 79 79 91 96 79 86 104 108 81 86 96 104 81 82 96 100 78 83 95 105 83 83 95 101 79 79 95 101 79 3 +79 99 96 79 79 91 96 79 84 95 100 79 86 96 104 81 82 96 100 78 82 96 100 81 83 95 101 79 79 95 101 79 83 95 101 79 3 +88 107 109 87 84 99 104 79 84 99 104 79 90 109 112 92 90 104 112 89 90 100 108 85 96 112 110 94 96 108 114 90 92 103 110 86 3 +84 99 104 79 84 99 104 79 88 99 109 83 90 104 112 89 90 100 108 85 86 104 104 81 96 108 114 90 92 103 110 86 87 103 110 83 3 +84 99 104 79 88 99 109 83 84 103 100 83 90 100 108 85 86 104 104 81 86 100 108 85 92 103 110 86 87 103 110 83 87 99 105 86 3 +84 103 100 83 84 99 104 83 88 99 109 83 86 100 108 85 86 104 112 85 86 100 104 81 87 99 105 86 87 99 105 86 83 95 105 83 3 +84 99 104 83 88 99 109 83 84 99 100 79 86 104 112 85 86 100 104 81 82 96 104 81 87 99 105 86 83 95 105 83 83 99 105 83 3 +55 83 100 83 51 75 93 79 51 64 85 75 56 91 108 89 52 83 100 81 49 75 92 78 52 84 97 86 52 81 97 79 52 73 90 79 1 +48 61 81 67 48 64 85 71 51 72 85 75 46 75 96 78 46 71 84 74 46 67 84 74 49 73 97 83 49 77 93 75 46 66 86 72 1 +55 79 89 79 55 79 96 79 59 83 96 79 56 79 88 78 56 83 92 81 56 83 100 78 59 84 97 83 56 88 97 83 52 84 97 83 1 +71 99 104 87 67 103 109 87 63 91 109 87 59 87 96 81 66 100 108 89 66 96 108 92 56 81 97 79 59 84 93 79 59 88 105 86 1 +63 91 100 83 67 91 109 87 75 91 109 92 59 83 96 81 63 83 100 85 63 83 100 85 59 73 93 75 63 81 93 83 63 91 101 86 1 +67 91 109 87 75 91 109 92 75 95 104 87 63 83 100 85 63 83 100 85 66 87 100 85 63 81 93 83 63 91 101 86 59 88 101 83 1 +75 91 109 92 75 95 104 87 71 95 104 87 63 83 100 85 66 87 100 85 66 83 100 81 63 91 101 86 59 88 101 83 67 84 93 83 1 +71 95 104 87 75 91 109 92 75 95 104 87 66 83 100 81 66 83 96 81 66 87 104 89 67 84 93 83 67 84 97 83 59 77 90 75 1 +67 99 109 92 67 103 109 92 63 95 109 87 59 79 96 81 63 87 108 89 63 91 112 89 63 91 101 90 67 103 114 94 63 99 114 90 1 +63 95 109 87 63 87 100 83 63 87 100 87 63 100 122 92 63 104 117 92 63 96 108 89 67 103 114 94 67 103 114 94 67 99 110 94 1 +71 103 113 96 71 103 113 96 71 107 123 100 70 104 112 96 70 104 112 96 70 100 112 92 75 108 114 94 71 108 114 94 75 108 119 98 1 +71 103 113 96 71 107 123 100 71 111 118 96 70 104 112 96 70 100 112 92 70 100 112 96 71 108 114 94 75 108 119 98 75 103 119 98 1 +71 111 118 96 67 99 113 96 67 91 104 92 70 100 112 96 66 104 122 96 70 100 117 96 75 103 119 98 71 99 114 98 75 108 124 98 1 +67 99 113 96 67 91 104 92 59 75 100 83 66 104 122 96 70 100 117 96 63 83 104 89 71 99 114 98 75 108 124 98 71 99 110 94 1 +67 87 100 83 63 79 100 87 59 68 96 92 70 100 112 92 70 100 108 89 66 79 96 85 63 81 101 86 71 95 119 94 67 88 105 86 1 +55 61 100 96 55 64 104 92 59 64 100 92 63 71 104 92 59 67 104 96 59 63 104 96 63 73 97 86 59 70 105 94 63 66 101 90 5 +59 61 85 75 59 75 89 79 59 64 100 92 56 60 84 78 52 56 80 74 59 67 88 74 52 54 86 83 49 45 86 86 49 51 86 83 5 +59 61 109 100 63 64 104 96 71 79 96 79 56 63 104 96 59 67 104 96 63 67 108 96 59 60 97 90 59 63 93 90 63 66 97 94 5 +63 64 104 96 71 79 96 79 79 95 96 79 59 67 104 96 63 67 108 96 70 75 104 85 59 63 93 90 63 66 97 94 67 77 110 90 5 +86 96 108 81 86 104 108 81 86 96 104 81 83 95 97 79 83 95 105 83 83 95 101 79 78 92 101 76 78 92 97 76 82 97 97 80 3 +86 104 108 81 86 96 104 81 82 96 100 78 83 95 105 83 83 95 101 79 79 95 101 79 78 92 97 76 82 97 97 80 85 97 97 80 3 +86 96 104 81 82 96 100 78 82 96 100 81 83 95 101 79 79 95 101 79 83 95 101 79 82 97 97 80 85 97 97 80 85 106 105 80 3 +82 91 104 78 86 100 108 85 90 109 112 92 83 95 105 83 92 103 110 90 96 112 110 94 93 111 114 90 93 115 114 94 93 111 114 94 3 +90 109 112 92 90 104 112 89 90 100 108 85 96 112 110 94 96 108 114 90 92 103 110 86 93 111 114 94 89 102 110 87 85 97 110 83 3 +90 104 112 89 90 100 108 85 86 104 104 81 96 108 114 90 92 103 110 86 87 103 110 83 89 102 110 87 85 97 110 83 85 102 105 80 3 +86 100 108 85 86 104 112 85 86 100 104 81 87 99 105 86 87 99 105 86 83 95 105 83 85 102 105 83 85 97 101 83 85 97 101 83 3 +86 100 104 81 82 96 104 81 82 100 104 81 83 95 105 83 83 99 105 83 87 103 105 86 85 97 101 83 89 102 105 87 85 102 110 87 3 +82 104 112 85 86 104 108 92 82 100 108 89 87 99 105 86 83 95 105 90 79 99 110 90 85 102 110 94 78 92 110 87 70 88 105 90 3 +56 91 108 89 52 83 100 81 49 75 92 78 52 84 97 86 52 81 97 79 52 73 90 79 50 79 101 83 50 75 93 80 50 71 89 80 1 +52 71 84 70 52 71 80 70 52 71 84 70 52 70 86 72 52 70 86 72 56 73 86 75 53 79 89 76 53 75 93 73 53 71 85 69 1 +56 83 100 78 59 87 96 81 66 100 108 89 52 84 97 83 56 81 97 79 59 84 93 79 57 75 97 76 57 79 93 80 60 84 93 80 1 +66 96 108 92 59 91 100 85 56 79 96 81 59 88 105 86 63 95 110 86 63 84 101 83 60 75 93 83 63 84 97 83 63 84 93 80 1 +59 91 100 85 56 79 96 81 59 83 96 81 63 95 110 86 63 84 101 83 59 73 93 75 63 84 97 83 63 84 93 80 63 79 89 83 1 +59 83 96 81 63 83 100 85 63 83 100 85 59 73 93 75 63 81 93 83 63 91 101 86 63 79 89 83 67 88 105 87 67 92 101 90 1 +63 83 100 85 66 87 100 85 66 83 100 81 63 91 101 86 59 88 101 83 67 84 93 83 67 92 101 90 60 84 97 83 63 75 97 80 1 +66 87 100 85 66 83 100 81 66 83 96 81 59 88 101 83 67 84 93 83 67 84 97 83 60 84 97 83 63 75 97 80 63 79 85 80 1 +66 83 96 81 66 87 104 89 70 96 104 89 67 84 97 83 59 77 90 75 59 73 97 79 63 79 85 80 60 75 89 80 60 84 97 80 1 +66 87 104 89 70 96 104 89 63 79 88 78 59 77 90 75 59 73 97 79 59 73 93 75 60 75 89 80 60 84 97 80 63 92 105 87 1 +70 96 104 89 63 79 88 78 56 63 84 70 59 73 97 79 59 73 93 75 63 73 93 75 60 84 97 80 63 92 105 87 63 92 105 87 1 +56 63 84 70 59 67 84 74 59 79 96 81 63 73 93 75 59 81 93 79 63 91 101 90 63 92 105 87 60 92 110 90 67 102 114 90 1 +59 67 84 74 59 79 96 81 63 87 108 89 59 81 93 79 63 91 101 90 67 103 114 94 60 92 110 90 67 102 114 90 70 106 119 94 1 +63 87 108 89 63 91 112 89 63 96 112 89 67 103 114 94 63 99 114 90 63 103 114 94 70 106 119 94 67 106 110 90 70 111 114 97 1 +63 100 122 92 63 104 117 92 63 96 108 89 67 103 114 94 67 103 114 94 67 99 110 94 70 115 119 97 67 106 124 94 67 106 114 94 1 +66 96 112 89 66 100 112 92 70 100 112 92 67 103 114 94 71 103 114 98 75 112 119 98 70 106 119 94 70 106 119 94 74 111 114 97 1 +66 100 112 92 70 100 112 92 70 104 112 96 71 103 114 98 75 112 119 98 75 108 114 94 70 106 119 94 74 111 114 97 70 111 124 97 1 +70 104 112 96 70 104 112 96 70 100 112 92 75 108 114 94 71 108 114 94 75 108 119 98 70 111 124 97 70 106 114 94 74 106 114 97 1 +63 83 104 89 59 79 92 81 63 75 104 85 71 99 110 94 67 77 97 79 63 66 90 79 67 97 114 90 67 84 101 87 74 92 105 90 1 +63 75 104 85 70 100 112 92 70 100 108 89 63 66 90 79 63 81 101 86 71 95 119 94 74 92 105 90 78 92 110 94 78 97 114 97 1 +70 100 112 92 70 100 108 89 66 79 96 85 63 81 101 86 71 95 119 94 67 88 105 86 78 92 110 94 78 97 114 97 70 92 110 83 1 +63 71 104 92 59 67 104 96 59 63 104 96 63 73 97 86 59 70 105 94 63 66 101 90 60 75 101 83 60 75 101 83 60 75 97 80 5 +56 60 100 89 56 60 88 81 56 60 88 78 59 63 90 83 59 63 86 83 56 60 86 79 60 71 93 80 57 67 93 83 53 60 93 80 5 +56 60 88 78 56 60 84 78 52 56 80 74 56 60 86 79 52 54 86 83 49 45 86 86 53 60 93 80 47 49 82 83 44 43 82 87 5 +56 60 84 78 52 56 80 74 59 67 88 74 52 54 86 83 49 45 86 86 49 51 86 83 47 49 82 83 44 43 82 87 50 46 82 83 5 +52 56 80 74 59 67 88 74 63 71 92 81 49 45 86 86 49 51 86 83 59 70 90 72 44 43 82 87 50 46 82 83 57 67 85 76 5 +59 67 104 96 63 67 108 96 70 75 104 85 59 63 93 90 63 66 97 94 67 77 110 90 63 71 101 87 63 71 101 90 67 75 105 90 5 +83 95 97 79 83 95 105 83 83 95 101 79 78 92 101 76 78 92 97 76 82 97 97 80 80 94 102 79 80 98 94 76 84 94 98 79 3 +83 95 105 83 83 95 101 79 79 95 101 79 78 92 97 76 82 97 97 80 85 97 97 80 80 98 94 76 84 94 98 79 88 106 106 87 3 +83 95 101 79 79 95 101 79 83 95 101 79 82 97 97 80 85 97 97 80 85 106 105 80 84 94 98 79 88 106 106 87 92 115 115 94 3 +83 95 101 79 83 95 105 83 92 103 110 90 85 106 105 80 93 111 114 90 93 115 114 94 92 115 115 94 92 120 125 98 92 115 115 87 3 +52 81 97 79 52 73 90 79 49 73 97 83 50 75 93 80 50 71 89 80 50 75 101 80 50 69 86 72 50 69 90 76 50 69 90 76 1 +49 73 97 83 49 77 93 75 46 66 86 72 50 75 101 80 47 75 97 80 50 71 89 76 50 69 90 76 50 73 94 76 50 73 90 76 1 +49 70 86 72 52 70 82 75 49 66 86 75 53 75 97 80 53 71 89 73 50 71 89 73 53 77 98 79 53 81 98 79 53 77 94 76 1 +49 66 86 75 52 66 86 72 52 70 86 72 50 71 89 73 50 71 85 73 53 79 89 76 53 77 94 76 53 73 98 76 57 77 98 79 1 +52 66 86 72 52 70 86 72 52 70 86 72 50 71 85 73 53 79 89 76 53 75 93 73 53 73 98 76 57 77 98 79 57 73 90 72 1 +56 88 97 83 52 84 97 83 56 81 97 79 57 79 97 80 57 75 97 76 57 79 93 80 57 73 90 76 53 73 90 76 57 77 94 79 1 +63 84 101 83 59 73 93 75 63 81 93 83 63 84 93 80 63 79 89 83 67 88 105 87 60 81 94 79 64 81 98 83 64 85 98 83 1 +59 73 93 75 63 81 93 83 63 91 101 86 63 79 89 83 67 88 105 87 67 92 101 90 64 81 98 83 64 85 98 83 64 85 102 83 1 +63 81 93 83 63 91 101 86 59 88 101 83 67 88 105 87 67 92 101 90 60 84 97 83 64 85 98 83 64 85 102 83 60 81 90 76 1 +59 77 90 75 59 73 97 79 59 73 93 75 60 75 89 80 60 84 97 80 63 92 105 87 68 98 111 91 64 98 106 91 64 94 111 91 1 +59 73 93 75 63 73 93 75 59 81 93 79 63 92 105 87 63 92 105 87 60 92 110 90 64 94 111 91 60 94 111 91 64 98 111 91 1 +59 81 93 79 63 91 101 90 67 103 114 94 60 92 110 90 67 102 114 90 70 106 119 94 64 98 111 91 68 106 115 94 72 106 115 98 1 +75 108 124 98 71 99 110 94 67 77 97 79 70 106 114 94 67 97 114 90 67 84 101 87 76 111 115 94 76 106 115 94 76 102 111 98 1 +63 66 90 79 63 81 101 86 71 95 119 94 74 92 105 90 78 92 110 94 78 97 114 97 80 111 125 102 88 115 131 102 88 111 120 94 1 +71 95 119 94 67 88 105 86 63 73 97 86 78 97 114 97 70 92 110 83 60 75 101 83 88 111 120 94 76 89 102 76 64 77 94 76 5 +63 73 97 86 59 70 105 94 63 66 101 90 60 75 101 83 60 75 101 83 60 75 97 80 64 77 94 76 60 77 94 76 57 81 90 76 5 +59 70 105 94 63 66 101 90 59 66 97 86 60 75 101 83 60 75 97 80 57 71 97 80 60 77 94 76 57 81 90 76 60 85 94 79 5 +59 66 97 86 59 63 90 83 59 63 86 83 57 71 97 80 60 71 93 80 57 67 93 83 60 85 94 79 60 81 90 83 60 73 90 83 5 +59 63 90 83 59 63 86 83 56 60 86 79 60 71 93 80 57 67 93 83 53 60 93 80 60 81 90 83 60 73 90 83 53 62 86 83 5 +59 63 86 83 56 60 86 79 52 54 86 83 57 67 93 83 53 60 93 80 47 49 82 83 60 73 90 83 53 62 86 83 50 52 82 83 5 +56 60 86 79 52 54 86 83 49 45 86 86 53 60 93 80 47 49 82 83 44 43 82 87 53 62 86 83 50 52 82 83 50 52 78 83 5 +59 60 97 90 59 63 93 90 63 66 97 94 60 60 97 87 63 71 101 87 63 71 101 90 60 66 102 91 60 62 106 94 60 66 106 94 5 +67 77 110 90 75 91 97 79 79 91 97 83 67 75 105 90 74 88 105 83 74 92 101 80 64 73 102 94 76 89 106 87 76 89 98 79 4 +75 91 97 79 79 91 97 83 79 91 97 79 74 88 105 83 74 92 101 80 74 84 97 76 76 89 106 87 76 89 98 79 72 89 98 79 4 +79 91 97 83 79 91 97 79 75 88 93 75 74 92 101 80 74 84 97 76 74 88 93 76 76 89 98 79 72 89 98 79 76 85 98 79 4 +85 102 110 87 85 102 110 94 78 92 110 87 88 106 111 91 88 106 111 98 76 94 106 91 84 99 108 92 84 107 113 96 84 107 122 96 3 +50 79 101 83 50 75 93 80 50 71 89 80 50 73 86 76 50 69 86 72 50 69 90 76 50 79 100 81 50 75 96 78 46 71 87 74 1 +50 75 101 80 47 75 97 80 50 71 89 76 50 69 90 76 50 73 94 76 50 73 90 76 50 71 87 74 50 75 91 78 50 79 96 78 1 +50 71 89 76 50 67 93 76 50 75 97 80 50 73 90 76 50 73 94 79 53 81 102 83 50 79 96 78 46 79 96 78 50 79 96 81 1 +50 75 97 80 53 75 97 80 53 71 89 73 53 81 102 83 53 77 98 79 53 81 98 79 50 79 96 81 53 79 96 81 53 83 96 78 1 +57 79 97 80 57 79 97 80 57 75 97 76 57 77 94 76 57 73 90 76 53 73 90 76 56 71 79 74 56 75 87 74 56 75 96 74 1 +57 79 97 80 57 75 97 76 57 79 93 80 57 73 90 76 53 73 90 76 57 77 94 79 56 75 87 74 56 75 96 74 60 79 91 81 1 +60 75 93 83 63 84 97 83 63 84 93 80 60 73 90 79 60 73 90 79 60 81 94 79 60 83 96 81 68 83 96 81 64 87 104 85 1 +63 79 89 83 67 88 105 87 67 92 101 90 64 81 98 83 64 85 98 83 64 85 102 83 60 83 100 85 64 83 96 81 60 87 104 85 1 +67 88 105 87 67 92 101 90 60 84 97 83 64 85 98 83 64 85 102 83 60 81 90 76 64 83 96 81 60 87 104 85 60 91 108 85 1 +60 84 97 83 63 75 97 80 63 79 85 80 60 81 90 76 60 81 90 79 68 89 106 87 60 91 108 85 64 91 113 88 64 95 113 88 1 +63 79 85 80 60 75 89 80 60 84 97 80 68 89 106 87 68 98 111 91 64 98 106 91 64 95 113 88 68 103 113 88 68 103 118 92 1 +63 92 105 87 60 92 110 90 67 102 114 90 60 94 111 91 64 98 111 91 68 106 115 94 68 107 118 92 68 103 118 92 71 103 118 92 1 +67 106 110 90 70 111 114 97 70 115 119 97 72 106 115 94 68 106 120 94 72 111 120 94 68 107 122 96 68 103 118 92 64 103 122 92 1 +70 111 114 97 70 115 119 97 67 106 124 94 68 106 120 94 72 111 120 94 64 106 115 94 68 103 118 92 64 103 122 92 71 107 122 96 1 +70 106 114 94 74 106 114 97 70 111 119 97 76 111 115 94 76 111 115 94 72 106 115 91 76 107 122 99 71 116 122 99 76 107 122 103 1 +70 111 119 97 70 102 114 94 70 106 114 94 72 106 115 91 72 106 115 94 76 111 115 94 76 107 122 103 76 112 122 96 76 112 122 99 1 +60 75 101 83 60 75 101 83 60 75 97 80 64 77 94 76 60 77 94 76 57 81 90 76 64 79 96 81 60 83 100 81 60 83 96 85 1 +60 75 101 83 60 75 97 80 57 71 97 80 60 77 94 76 57 81 90 76 60 85 94 79 60 83 100 81 60 83 96 85 64 87 100 88 1 +60 75 97 80 57 71 97 80 60 71 93 80 57 81 90 76 60 85 94 79 60 81 90 83 60 83 96 85 64 87 100 88 64 83 104 88 1 +53 60 93 80 47 49 82 83 44 43 82 87 53 62 86 83 50 52 82 83 50 52 78 83 56 71 96 85 56 68 91 81 56 64 91 81 5 +47 49 82 83 44 43 82 87 50 46 82 83 50 52 82 83 50 52 78 83 50 52 82 79 56 68 91 81 56 64 91 81 53 64 83 78 5 +60 60 97 87 63 71 101 87 63 71 101 90 60 66 102 91 60 62 106 94 60 66 106 94 60 64 104 99 56 64 108 96 64 71 108 96 5 +80 98 94 76 84 94 98 79 88 106 106 87 84 95 100 85 84 103 108 92 92 107 118 96 93 107 113 92 93 111 123 96 97 111 123 96 3 +84 102 102 79 80 94 94 76 80 94 98 79 84 95 96 74 80 95 96 74 84 95 100 81 79 91 100 75 79 95 100 79 79 95 100 79 3 +84 102 111 91 84 102 106 91 88 106 111 91 84 103 113 96 84 99 113 88 84 99 108 92 88 103 109 92 84 99 109 92 88 103 113 96 3 +68 94 111 91 57 81 102 83 50 77 90 79 68 103 113 92 53 91 104 88 50 79 104 85 67 103 113 96 55 91 109 87 55 87 100 87 1 +57 81 102 83 50 77 90 79 50 73 86 76 53 91 104 88 50 79 104 85 50 79 100 81 55 91 109 87 55 87 100 87 55 83 100 87 1 +50 69 90 76 50 73 94 76 50 73 90 76 50 71 87 74 50 75 91 78 50 79 96 78 51 79 96 79 51 75 96 79 48 72 89 79 1 +50 73 94 79 53 81 102 83 53 77 98 79 46 79 96 78 50 79 96 81 53 79 96 81 48 68 89 75 48 75 89 79 51 75 96 79 1 +53 81 102 83 53 77 98 79 53 81 98 79 50 79 96 81 53 79 96 81 53 83 96 78 48 75 89 79 51 75 96 79 51 72 89 75 1 +57 77 98 79 57 73 90 72 50 62 78 68 53 71 87 74 53 71 83 74 53 71 87 74 55 79 93 75 51 75 89 75 51 68 85 75 1 +57 73 90 72 50 62 78 68 53 69 82 76 53 71 83 74 53 71 87 74 53 68 83 70 51 75 89 75 51 68 85 75 51 68 81 71 1 +60 81 98 79 60 73 90 79 60 73 90 79 64 87 100 85 60 83 96 81 68 83 96 81 63 95 104 83 63 95 104 83 63 95 104 87 1 +60 81 94 79 64 81 98 83 64 85 98 83 64 87 104 85 60 83 100 85 64 83 96 81 63 95 104 87 63 91 104 83 63 91 104 83 1 +60 81 90 79 68 89 106 87 68 98 111 91 64 91 113 88 64 95 113 88 68 103 113 88 67 103 113 92 71 103 109 92 71 103 113 92 1 +68 89 106 87 68 98 111 91 64 98 106 91 64 95 113 88 68 103 113 88 68 103 118 92 71 103 109 92 71 103 113 92 71 107 118 92 1 +64 98 106 91 64 94 111 91 60 94 111 91 68 103 118 92 68 107 113 92 68 107 118 92 71 107 118 92 71 107 113 96 71 103 118 92 1 +72 106 115 91 76 111 115 94 76 111 115 94 71 107 118 96 76 107 122 99 71 116 122 99 71 107 113 96 75 103 118 96 75 103 118 96 1 +76 106 115 94 76 102 111 98 80 111 125 102 80 107 122 96 76 107 118 96 84 116 128 103 84 103 118 96 71 79 109 92 79 103 123 100 1 +76 102 111 98 80 111 125 102 88 115 131 102 76 107 118 96 84 116 128 103 92 116 133 103 71 79 109 92 79 103 123 100 84 111 128 100 1 +88 115 131 102 88 111 120 94 76 89 102 76 92 116 133 103 84 112 122 96 71 83 96 85 84 111 128 100 84 103 118 92 71 79 96 79 1 +64 77 94 76 60 77 94 76 57 81 90 76 64 79 96 81 60 83 100 81 60 83 96 85 63 75 96 83 67 83 104 87 59 83 100 83 1 +60 85 94 79 60 81 90 83 60 73 90 83 64 87 100 88 64 83 104 88 64 79 100 85 63 87 100 87 63 83 104 87 63 79 100 87 1 +50 52 82 83 50 52 78 83 50 52 82 79 56 68 91 81 56 64 91 81 53 64 83 78 59 72 96 83 59 75 96 75 59 75 89 75 5 +50 52 82 79 57 66 82 72 60 77 90 83 53 64 83 78 56 68 87 74 60 71 91 81 59 75 89 75 59 79 89 71 63 79 93 75 5 +60 77 90 83 60 66 102 91 60 62 106 94 60 71 91 81 60 64 104 99 56 64 108 96 63 79 93 75 63 68 109 92 59 75 109 96 5 +60 62 106 94 60 66 106 94 64 73 102 94 56 64 108 96 64 71 108 96 68 75 108 96 59 75 109 96 67 87 113 96 67 95 109 92 5 +84 99 108 81 80 95 100 81 84 95 100 85 88 103 109 87 88 103 109 87 93 107 113 92 78 100 100 81 86 104 108 85 90 109 112 92 3 +84 99 113 88 84 99 108 92 84 107 113 96 84 99 109 92 88 103 113 96 88 103 118 100 90 104 112 92 90 104 112 89 95 109 117 96 3 +84 107 122 96 68 103 113 92 53 91 104 88 79 107 123 100 67 103 113 96 55 91 109 87 86 104 117 100 74 104 122 96 66 104 122 96 1 +68 103 113 92 53 91 104 88 50 79 104 85 67 103 113 96 55 91 109 87 55 87 100 87 74 104 122 96 66 104 122 96 56 91 112 89 1 +53 91 104 88 50 79 104 85 50 79 100 81 55 91 109 87 55 87 100 87 55 83 100 87 66 104 122 96 56 91 112 89 56 87 112 89 1 +53 79 96 81 53 83 96 78 53 75 96 78 51 75 96 79 51 72 89 75 48 79 93 79 49 67 84 74 49 71 92 78 52 75 92 78 1 +53 83 96 78 53 75 96 78 53 71 87 74 51 72 89 75 48 79 93 79 55 79 93 79 49 71 92 78 52 75 92 78 52 75 92 78 1 +56 75 87 74 56 75 96 74 60 79 91 81 55 72 85 75 59 79 93 79 63 87 100 83 56 75 92 74 56 79 96 78 59 87 100 81 1 +56 75 96 74 60 79 91 81 64 87 100 85 59 79 93 79 63 87 100 83 63 95 104 83 56 79 96 78 59 87 100 81 59 87 100 89 1 +60 91 108 85 64 91 113 88 64 95 113 88 71 103 113 92 67 103 113 92 71 103 109 92 70 104 117 92 70 109 117 96 70 109 112 96 1 +64 95 113 88 68 103 113 88 68 103 118 92 71 103 109 92 71 103 113 92 71 107 118 92 70 109 112 96 66 104 112 92 70 104 112 92 1 +68 103 118 92 68 107 113 92 68 107 118 92 71 107 118 92 71 107 113 96 71 103 118 92 70 104 112 92 70 109 117 96 70 109 117 92 1 +68 103 118 92 71 103 118 92 71 103 118 96 67 103 118 92 71 103 118 96 71 103 109 92 70 104 112 92 70 109 112 92 70 109 117 96 1 +71 103 118 92 71 103 118 96 68 107 122 96 71 103 118 96 71 103 109 92 71 99 113 92 70 109 112 92 70 109 117 96 70 100 108 92 1 +71 103 118 96 68 107 122 96 68 103 118 92 71 103 109 92 71 99 113 92 71 99 118 96 70 109 117 96 70 100 108 92 66 100 112 92 1 +68 107 122 96 68 103 118 92 64 103 122 92 71 99 113 92 71 99 118 96 67 103 118 96 70 100 108 92 66 100 112 92 66 104 117 92 1 +76 112 122 99 80 107 122 96 76 107 118 96 84 111 123 100 84 103 118 96 71 79 109 92 78 104 112 96 78 104 112 96 74 83 108 89 1 +80 107 122 96 76 107 118 96 84 116 128 103 84 103 118 96 71 79 109 92 79 103 123 100 78 104 112 96 74 83 108 89 66 71 100 85 1 +84 116 128 103 92 116 133 103 84 112 122 96 79 103 123 100 84 111 128 100 84 103 118 92 66 71 100 85 74 83 104 92 78 96 112 96 1 +60 83 96 85 64 87 100 88 64 83 104 88 59 83 100 83 63 87 100 87 63 83 104 87 66 91 104 92 66 87 108 89 63 83 104 85 1 +64 79 100 85 56 71 96 85 56 68 91 81 63 79 100 87 59 75 96 87 59 72 96 83 63 83 100 85 66 83 100 85 63 83 100 81 1 +56 68 91 81 56 64 91 81 53 64 83 78 59 72 96 83 59 75 96 75 59 75 89 75 63 83 100 81 59 87 96 81 63 83 92 74 5 +56 68 87 74 60 71 91 81 60 64 104 99 59 79 89 71 63 79 93 75 63 68 109 92 59 83 96 74 59 83 92 74 59 83 92 70 5 +60 71 91 81 60 64 104 99 56 64 108 96 63 79 93 75 63 68 109 92 59 75 109 96 59 83 92 74 59 83 92 70 63 79 108 92 5 +92 115 120 94 84 102 106 79 84 102 102 83 101 126 133 103 92 112 118 85 84 103 104 81 102 126 134 104 88 121 128 100 84 107 113 87 3 +84 102 106 79 84 102 102 83 80 102 102 79 92 112 118 85 84 103 104 81 84 99 104 78 88 121 128 100 84 107 113 87 84 99 104 79 3 +84 102 102 83 80 102 102 79 84 94 102 79 84 103 104 81 84 99 104 78 84 99 104 81 84 107 113 87 84 99 104 79 84 99 104 79 3 +80 102 102 79 84 94 102 79 80 94 98 76 84 99 104 78 84 99 104 81 76 99 104 81 84 99 104 79 84 99 104 79 84 103 104 79 3 +84 94 102 79 80 94 98 76 80 102 102 79 84 99 104 81 76 99 104 81 76 99 108 85 84 99 104 79 84 103 104 79 79 107 109 87 3 +80 94 98 76 80 102 102 79 76 102 102 79 76 99 104 81 76 99 108 85 76 103 118 88 84 103 104 79 79 107 109 87 79 107 109 87 3 +76 102 106 83 76 102 106 87 80 98 106 79 80 107 118 88 80 112 118 88 80 107 113 85 79 107 113 87 79 103 104 83 79 103 104 79 3 +76 102 106 87 80 98 106 79 76 94 102 76 80 112 118 88 80 107 113 85 80 95 100 78 79 103 104 83 79 103 104 79 79 95 100 79 3 +76 89 98 76 76 94 98 76 76 98 102 72 80 95 104 74 76 91 104 74 76 95 100 78 75 91 96 75 75 91 96 71 79 87 93 71 4 +76 94 98 76 76 98 102 72 76 94 90 76 76 91 104 74 76 95 100 78 76 91 100 74 75 91 96 71 79 87 93 71 79 87 93 67 4 +76 98 102 72 76 94 90 76 76 89 94 76 76 95 100 78 76 91 100 74 76 87 100 74 79 87 93 71 79 87 93 67 75 87 96 71 4 +72 94 90 72 72 89 94 76 72 89 98 76 76 87 91 74 76 87 91 67 71 87 87 70 75 91 96 71 75 87 93 67 71 87 89 67 4 +72 89 94 76 72 89 98 76 76 94 98 76 76 87 91 67 71 87 87 70 71 83 87 67 75 87 93 67 71 87 89 67 71 79 81 62 4 +76 94 98 76 72 85 90 72 68 85 94 72 71 83 87 67 68 83 87 67 68 83 87 67 71 79 81 62 71 79 85 62 67 75 85 62 4 +68 85 86 68 68 89 86 72 68 85 90 76 71 83 87 67 68 83 87 67 68 83 87 67 71 79 85 62 71 75 81 67 71 75 81 62 4 +68 89 86 72 68 85 90 76 68 94 94 79 68 83 87 67 68 83 87 67 71 83 87 70 71 75 81 67 71 75 81 62 67 75 85 71 4 +68 85 90 76 68 94 94 79 76 94 111 79 68 83 87 67 71 83 87 70 76 91 91 74 71 75 81 62 67 75 85 71 67 75 96 79 4 +68 94 94 79 76 94 111 79 80 98 106 83 71 83 87 70 76 91 91 74 76 95 104 81 67 75 85 71 67 75 96 79 75 83 96 83 4 +80 94 102 83 80 102 111 87 84 106 115 91 84 103 104 85 84 103 108 85 88 107 118 88 79 99 104 83 84 99 113 87 84 99 109 87 3 +88 106 115 87 88 111 111 91 88 106 115 87 88 107 118 92 88 112 113 88 88 103 113 88 88 107 113 87 88 107 104 87 88 107 109 83 3 +84 98 111 83 80 89 115 87 88 102 106 87 88 103 108 85 84 99 108 85 88 99 104 85 84 99 109 83 88 103 109 87 88 103 109 87 3 +80 89 115 87 88 102 106 87 92 115 111 91 84 99 108 85 88 99 104 85 88 103 113 88 88 103 109 87 88 103 109 87 84 103 113 87 3 +88 102 106 87 92 115 111 91 92 115 115 94 88 99 104 85 88 103 113 88 88 112 118 92 88 103 109 87 84 103 113 87 88 111 113 92 3 +92 115 115 94 92 111 120 91 84 106 111 87 88 112 118 92 88 112 122 88 92 112 128 92 88 111 113 92 93 107 109 92 93 111 113 92 3 +84 106 111 87 84 98 111 87 84 98 106 91 92 112 113 88 88 103 113 85 97 107 113 88 88 111 118 92 93 107 113 87 93 107 113 87 3 +84 98 106 91 84 102 111 87 84 106 111 87 97 107 113 88 92 112 118 92 92 112 118 92 93 107 113 87 93 107 109 87 88 107 109 92 3 +84 102 111 87 84 106 111 87 88 111 115 91 92 112 118 92 92 112 118 92 92 107 113 92 93 107 109 87 88 107 109 92 88 107 109 87 3 +84 106 111 87 88 111 115 91 88 111 120 87 92 112 118 92 92 107 113 92 92 107 118 88 88 107 109 92 88 107 109 87 88 107 109 87 3 +88 111 115 91 88 111 120 87 88 111 115 87 92 107 113 92 92 107 118 88 88 107 118 88 88 107 109 87 88 107 109 87 88 107 109 87 3 +88 111 115 87 92 106 106 87 88 106 106 87 88 107 118 88 88 107 118 88 88 103 108 85 88 107 109 87 88 103 109 87 93 103 109 87 3 +92 106 106 87 88 106 106 87 84 106 111 83 88 107 118 88 88 103 108 85 88 103 113 92 88 103 109 87 93 103 109 87 88 107 109 87 3 +88 98 106 83 84 98 106 83 88 106 102 83 88 107 113 88 88 103 108 81 88 103 108 88 88 111 113 92 88 107 113 87 88 107 113 87 3 +84 98 106 83 88 106 102 83 88 102 102 83 88 103 108 81 88 103 108 88 84 99 104 85 88 107 113 87 88 107 113 87 88 107 109 83 3 +88 102 102 83 88 98 106 83 84 102 106 83 84 99 104 85 84 103 108 81 88 99 104 85 88 107 109 83 84 99 104 87 79 99 100 79 3 +88 98 106 83 84 102 106 83 88 111 111 87 84 103 108 81 88 99 104 85 84 103 108 85 84 99 104 87 79 99 100 79 88 95 100 79 3 +84 102 106 83 88 111 111 87 88 111 106 87 88 99 104 85 84 103 108 85 88 95 104 81 79 99 100 79 88 95 100 79 88 95 100 83 3 +88 111 106 87 88 106 111 87 84 106 106 87 88 95 104 81 84 99 108 85 88 103 113 85 88 95 100 83 88 103 100 83 88 103 109 83 3 +84 106 106 87 84 106 111 91 84 106 115 87 88 103 113 85 88 107 113 85 88 103 108 85 88 103 109 83 88 103 113 83 84 103 104 83 3 +84 106 115 87 88 106 111 87 88 106 106 87 88 103 108 85 88 103 113 85 88 99 104 85 84 103 104 83 84 99 109 83 84 103 104 83 3 +88 106 111 87 88 106 106 87 84 106 106 87 88 103 113 85 88 99 104 85 84 99 104 85 84 99 109 83 84 103 104 83 88 99 100 79 3 +88 106 106 87 84 106 106 87 84 102 111 83 88 99 104 85 84 99 104 85 84 99 104 81 84 103 104 83 88 99 100 79 84 99 104 79 3 +84 102 111 83 84 98 98 83 80 98 102 83 84 99 104 81 84 99 100 81 80 91 96 78 84 99 104 79 79 95 100 79 79 99 100 83 3 +84 98 98 83 80 98 102 83 80 94 102 83 84 99 100 81 80 91 96 78 80 87 96 74 79 95 100 79 79 99 100 83 79 95 100 83 3 +80 94 102 83 76 89 98 79 68 77 94 79 80 87 96 74 71 75 87 78 60 54 87 74 79 95 100 83 79 91 104 79 75 79 96 79 5 +76 89 98 79 68 77 94 79 60 62 78 76 71 75 87 78 60 54 87 74 56 61 87 78 79 91 104 79 75 79 96 79 75 83 96 79 5 +60 62 78 76 64 73 90 76 80 94 106 83 56 61 87 78 71 79 100 81 80 95 100 85 75 83 96 79 84 99 104 83 84 99 104 83 5 +64 73 90 76 80 94 106 83 84 98 102 83 71 79 100 81 80 95 100 85 80 91 100 81 84 99 104 83 84 99 104 83 79 95 100 75 7 +80 94 102 76 76 94 94 72 72 81 82 68 76 83 91 74 71 79 87 70 71 79 79 67 75 83 85 71 71 75 85 67 71 79 77 67 7 +72 81 82 68 68 73 78 65 64 69 78 65 71 79 79 67 71 79 83 67 71 79 79 63 71 79 77 67 71 75 81 67 67 72 81 67 7 +68 73 78 65 64 69 78 65 68 77 86 65 71 79 83 67 71 79 79 63 68 75 79 67 71 75 81 67 67 72 81 67 67 64 81 67 7 +68 77 86 65 64 66 86 68 57 55 78 72 68 75 79 67 60 68 79 67 53 54 75 70 67 64 81 67 59 61 77 71 55 54 85 67 5 +64 66 86 68 57 55 78 72 53 49 71 65 60 68 79 67 53 54 75 70 53 54 71 63 59 61 77 71 55 54 85 67 55 51 74 67 5 +57 55 78 72 53 49 71 65 57 49 74 65 53 54 75 70 53 54 71 63 56 54 71 63 55 54 85 67 55 51 74 67 55 48 70 62 5 +57 49 74 65 53 49 74 68 53 52 74 68 56 54 71 63 56 51 67 63 53 51 67 67 55 48 70 62 51 48 70 67 51 48 70 67 5 +101 126 133 103 92 112 118 85 84 103 104 81 102 126 134 104 88 121 128 100 84 107 113 87 90 113 117 92 90 113 122 96 95 128 127 103 3 +92 112 118 85 84 103 104 81 84 99 104 78 88 121 128 100 84 107 113 87 84 99 104 79 90 113 122 96 95 128 127 103 95 123 127 100 3 +84 103 104 81 84 99 104 78 84 99 104 81 84 107 113 87 84 99 104 79 84 99 104 79 95 128 127 103 95 123 127 100 82 100 108 85 3 +84 99 104 78 84 99 104 81 76 99 104 81 84 99 104 79 84 99 104 79 84 103 104 79 95 123 127 100 82 100 108 85 82 100 108 81 3 +84 99 104 81 76 99 104 81 76 99 108 85 84 99 104 79 84 103 104 79 79 107 109 87 82 100 108 85 82 100 108 81 82 100 104 78 3 +76 99 108 85 76 103 118 88 80 107 118 88 79 107 109 87 79 107 109 87 79 107 113 87 82 100 104 78 78 100 104 81 82 104 104 85 3 +76 103 118 88 80 107 118 88 80 112 118 88 79 107 109 87 79 107 113 87 79 103 104 83 78 100 104 81 82 104 104 85 82 104 108 85 3 +80 107 118 88 80 112 118 88 80 107 113 85 79 107 113 87 79 103 104 83 79 103 104 79 82 104 104 85 82 104 108 85 82 100 108 85 3 +80 112 118 88 80 107 113 85 80 95 100 78 79 103 104 83 79 103 104 79 79 95 100 79 82 104 108 85 82 100 108 85 78 96 96 78 3 +80 91 100 78 80 91 100 74 80 95 104 74 79 95 100 75 75 95 100 79 75 91 96 75 74 91 92 70 78 91 96 74 74 87 92 70 4 +80 95 104 74 76 91 104 74 76 95 100 78 75 91 96 75 75 91 96 71 79 87 93 71 74 87 92 70 74 87 88 70 78 87 84 70 4 +76 91 104 74 76 95 100 78 76 91 100 74 75 91 96 71 79 87 93 71 79 87 93 67 74 87 88 70 78 87 84 70 74 87 88 66 4 +76 95 100 78 76 91 100 74 76 87 100 74 79 87 93 71 79 87 93 67 75 87 96 71 78 87 84 70 74 87 88 66 74 87 92 70 4 +76 91 100 74 76 87 100 74 76 87 91 74 79 87 93 67 75 87 96 71 75 91 96 71 74 87 88 66 74 87 92 70 78 87 88 66 4 +68 83 87 67 68 83 87 67 68 79 87 63 71 79 85 62 67 75 85 62 71 75 85 62 70 83 88 70 70 83 84 66 66 79 84 63 4 +68 83 87 67 68 79 87 63 68 79 87 67 67 75 85 62 71 75 85 62 67 79 81 62 70 83 84 66 66 79 84 63 66 79 88 66 4 +71 83 87 67 68 83 87 67 68 83 87 67 71 79 85 62 71 75 81 67 71 75 81 62 70 79 88 66 66 71 88 70 59 60 96 81 4 +68 83 87 67 68 83 87 67 71 83 87 70 71 75 81 67 71 75 81 62 67 75 85 71 66 71 88 70 59 60 96 81 56 49 104 100 4 +76 91 91 74 76 95 104 81 84 103 104 85 67 75 96 79 75 83 96 83 79 99 104 83 49 40 112 114 46 34 122 125 49 40 117 114 4 +76 95 104 81 84 103 104 85 84 103 108 85 75 83 96 83 79 99 104 83 84 99 113 87 46 34 122 125 49 40 117 114 63 67 104 85 3 +84 103 104 85 84 103 108 85 88 107 118 88 79 99 104 83 84 99 113 87 84 99 109 87 49 40 117 114 63 67 104 85 82 96 104 78 3 +88 107 118 92 88 107 118 92 88 112 113 88 84 103 109 83 88 107 113 87 88 107 104 87 86 100 108 85 90 104 112 85 86 104 108 85 3 +88 112 113 88 88 103 113 88 88 103 108 85 88 107 104 87 88 107 109 83 84 99 109 83 86 104 108 85 86 104 108 85 86 104 108 85 3 +84 99 108 85 88 99 104 85 88 103 113 88 88 103 109 87 88 103 109 87 84 103 113 87 86 100 108 85 90 104 112 89 90 104 112 85 3 +88 99 104 85 88 103 113 88 88 112 118 92 88 103 109 87 84 103 113 87 88 111 113 92 90 104 112 89 90 104 112 85 90 109 112 85 3 +88 103 113 88 88 112 118 92 88 112 122 88 84 103 113 87 88 111 113 92 93 107 109 92 90 104 112 85 90 109 112 85 90 109 117 89 3 +88 112 118 92 88 112 122 88 92 112 128 92 88 111 113 92 93 107 109 92 93 111 113 92 90 109 112 85 90 109 117 89 90 109 112 89 3 +88 112 122 88 92 112 128 92 92 112 118 96 93 107 109 92 93 111 113 92 93 116 118 92 90 109 117 89 90 109 112 89 90 109 112 89 3 +92 112 128 92 92 112 118 96 92 112 113 88 93 111 113 92 93 116 118 92 88 111 118 92 90 109 112 89 90 109 112 89 90 104 117 92 3 +92 112 118 96 92 112 113 88 88 103 113 85 93 116 118 92 88 111 118 92 93 107 113 87 90 109 112 89 90 104 117 92 90 109 112 89 3 +92 112 113 88 88 103 113 85 97 107 113 88 88 111 118 92 93 107 113 87 93 107 113 87 90 104 117 92 90 109 112 89 90 109 112 89 3 +88 103 113 85 97 107 113 88 92 112 118 92 93 107 113 87 93 107 113 87 93 107 109 87 90 109 112 89 90 109 112 89 90 104 112 85 3 +97 107 113 88 92 112 118 92 92 112 118 92 93 107 113 87 93 107 109 87 88 107 109 92 90 109 112 89 90 104 112 85 90 104 112 89 3 +92 112 118 92 92 112 118 92 92 107 113 92 93 107 109 87 88 107 109 92 88 107 109 87 90 104 112 85 90 104 112 89 86 104 108 89 3 +92 107 113 92 92 107 118 88 88 107 118 88 88 107 109 87 88 107 109 87 88 107 109 87 86 104 108 89 90 104 108 92 90 109 108 89 3 +92 107 118 88 88 107 118 88 88 107 118 88 88 107 109 87 88 107 109 87 88 103 109 87 90 104 108 92 90 109 108 89 86 104 112 85 3 +88 107 118 88 88 107 118 88 88 103 108 85 88 107 109 87 88 103 109 87 93 103 109 87 90 109 108 89 86 104 112 85 86 104 104 81 3 +88 107 118 88 88 103 108 85 88 103 113 92 88 103 109 87 93 103 109 87 88 107 109 87 86 104 112 85 86 104 104 81 86 96 104 81 3 +88 103 108 85 88 103 113 92 88 107 113 88 93 103 109 87 88 107 109 87 88 111 113 92 86 104 104 81 86 96 104 81 86 104 108 85 3 +88 103 108 81 88 103 108 88 84 99 104 85 88 107 113 87 88 107 113 87 88 107 109 83 90 109 112 92 86 109 108 89 86 109 112 89 3 +84 99 104 85 84 103 108 81 88 99 104 85 88 107 109 83 84 99 104 87 79 99 100 79 86 109 112 89 90 109 112 92 86 104 108 89 3 +84 103 108 81 88 99 104 85 84 103 108 85 84 99 104 87 79 99 100 79 88 95 100 79 90 109 112 92 86 104 108 89 86 104 104 85 3 +88 99 104 85 84 103 108 85 88 95 104 81 79 99 100 79 88 95 100 79 88 95 100 83 86 104 108 89 86 104 104 85 82 100 100 85 3 +84 99 108 85 88 103 113 85 88 107 113 85 88 103 100 83 88 103 109 83 88 103 113 83 82 100 104 78 86 100 96 81 82 100 104 81 3 +88 103 113 85 88 107 113 85 88 103 108 85 88 103 109 83 88 103 113 83 84 103 104 83 86 100 96 81 82 100 104 81 82 100 104 81 3 +88 107 113 85 88 103 108 85 88 103 113 85 88 103 113 83 84 103 104 83 84 99 109 83 82 100 104 81 82 100 104 81 86 100 104 81 3 +88 103 108 85 88 103 113 85 88 99 104 85 84 103 104 83 84 99 109 83 84 103 104 83 82 100 104 81 86 100 104 81 82 96 100 81 3 +88 103 113 85 88 99 104 85 84 99 104 85 84 99 109 83 84 103 104 83 88 99 100 79 86 100 104 81 82 96 100 81 82 100 108 81 3 +84 99 100 81 80 91 96 78 80 87 96 74 79 95 100 79 79 99 100 83 79 95 100 83 82 96 100 81 86 96 104 81 82 96 100 81 3 +80 87 96 74 71 75 87 78 60 54 87 74 79 95 100 83 79 91 104 79 75 79 96 79 82 96 100 81 82 100 104 78 82 96 104 81 3 +71 79 100 81 80 95 100 85 80 91 100 81 84 99 104 83 84 99 104 83 79 95 100 75 82 100 104 85 86 100 108 85 86 100 112 85 3 +80 91 100 81 80 91 100 78 76 83 91 74 79 95 100 75 75 87 93 71 75 83 85 71 86 100 112 85 86 100 112 85 82 96 100 81 7 +80 91 100 78 76 83 91 74 71 79 87 70 75 87 93 71 75 83 85 71 71 75 85 67 86 100 112 85 82 96 100 81 78 83 84 70 7 +71 79 87 70 71 79 79 67 71 79 83 67 71 75 85 67 71 79 77 67 71 75 81 67 78 83 84 70 74 75 88 66 70 79 88 66 7 +71 79 83 67 71 79 79 63 68 75 79 67 71 75 81 67 67 72 81 67 67 64 81 67 70 79 88 66 70 75 76 66 66 71 80 66 7 +71 79 79 63 68 75 79 67 60 68 79 67 67 72 81 67 67 64 81 67 59 61 77 71 70 75 76 66 66 71 80 66 66 63 76 66 5 +68 75 79 67 60 68 79 67 53 54 75 70 67 64 81 67 59 61 77 71 55 54 85 67 66 71 80 66 66 63 76 66 59 60 73 63 5 +60 68 79 67 53 54 75 70 53 54 71 63 59 61 77 71 55 54 85 67 55 51 74 67 66 63 76 66 59 60 73 63 59 56 76 66 5 +53 54 75 70 53 54 71 63 56 54 71 63 55 54 85 67 55 51 74 67 55 48 70 62 59 60 73 63 59 56 76 66 59 53 76 70 5 +53 54 71 63 56 54 71 63 56 51 67 63 55 51 74 67 55 48 70 62 51 48 70 67 59 56 76 66 59 53 76 70 56 49 73 70 5 +56 54 71 63 56 51 67 63 53 51 67 67 55 48 70 62 51 48 70 67 51 48 70 67 59 53 76 70 56 49 73 70 49 40 69 66 5 +97 126 128 104 102 137 139 108 102 126 134 104 90 109 112 89 90 109 112 89 90 113 117 92 96 108 119 90 92 103 110 86 87 108 114 86 3 +102 137 139 108 102 126 134 104 88 121 128 100 90 109 112 89 90 113 117 92 90 113 122 96 92 103 110 86 87 108 114 86 87 103 114 90 3 +102 126 134 104 88 121 128 100 84 107 113 87 90 113 117 92 90 113 122 96 95 128 127 103 87 108 114 86 87 103 114 90 92 122 135 109 3 +84 107 113 87 84 99 104 79 84 99 104 79 95 128 127 103 95 123 127 100 82 100 108 85 92 122 135 109 96 127 130 105 92 108 114 86 3 +84 99 104 79 84 99 104 79 84 103 104 79 95 123 127 100 82 100 108 85 82 100 108 81 96 127 130 105 92 108 114 86 83 103 105 83 3 +84 99 104 79 84 103 104 79 79 107 109 87 82 100 108 85 82 100 108 81 82 100 104 78 92 108 114 86 83 103 105 83 79 103 110 83 3 +84 103 104 79 79 107 109 87 79 107 109 87 82 100 108 81 82 100 104 78 78 100 104 81 83 103 105 83 79 103 110 83 79 99 105 83 3 +79 107 109 87 79 107 109 87 79 107 113 87 82 100 104 78 78 100 104 81 82 104 104 85 79 103 110 83 79 99 105 83 83 103 114 86 3 +79 103 104 83 79 103 104 79 79 95 100 79 82 104 108 85 82 100 108 85 78 96 96 78 79 99 105 83 79 95 101 79 83 95 93 75 3 +79 103 104 79 79 95 100 79 79 95 96 75 82 100 108 85 78 96 96 78 78 91 92 70 79 95 101 79 83 95 93 75 83 91 97 72 4 +79 95 100 79 79 95 96 75 79 95 100 75 78 96 96 78 78 91 92 70 74 91 92 70 83 95 93 75 83 91 97 72 83 91 97 72 4 +79 95 100 75 75 95 100 79 75 91 96 75 74 91 92 70 78 91 96 74 74 87 92 70 83 91 97 72 79 91 93 72 79 91 90 68 4 +75 95 100 79 75 91 96 75 75 91 96 71 78 91 96 74 74 87 92 70 74 87 88 70 79 91 93 72 79 91 90 68 79 88 93 68 4 +79 87 93 71 79 87 93 67 75 87 96 71 78 87 84 70 74 87 88 66 74 87 92 70 79 91 93 72 75 91 93 68 79 88 93 68 4 +75 87 96 71 75 91 96 71 75 87 93 67 74 87 92 70 78 87 88 66 78 87 92 66 79 88 93 68 75 84 90 68 75 84 93 72 4 +75 87 93 67 71 87 89 67 71 79 81 62 78 87 92 66 74 83 92 66 70 83 92 66 75 84 93 72 75 88 90 68 75 91 97 75 4 +71 87 89 67 71 79 81 62 71 79 85 62 74 83 92 66 70 83 92 66 70 83 88 70 75 88 90 68 75 91 97 75 75 88 93 72 4 +71 75 81 67 71 75 81 62 67 75 85 71 66 71 88 70 59 60 96 81 56 49 104 100 49 45 119 116 46 37 119 127 46 32 119 131 2 +71 75 81 62 67 75 85 71 67 75 96 79 59 60 96 81 56 49 104 100 49 40 112 114 46 37 119 127 46 32 119 131 46 34 119 131 2 +67 75 85 71 67 75 96 79 75 83 96 83 56 49 104 100 49 40 112 114 46 34 122 125 46 32 119 131 46 34 119 131 42 34 119 131 2 +79 99 104 83 84 99 113 87 84 99 109 87 49 40 117 114 63 67 104 85 82 96 104 78 46 34 119 131 52 48 110 105 71 77 97 75 2 +84 99 113 87 84 99 109 87 84 103 109 83 63 67 104 85 82 96 104 78 86 100 108 85 52 48 110 105 71 77 97 75 83 99 105 83 3 +84 99 109 87 84 103 109 83 88 107 113 87 82 96 104 78 86 100 108 85 90 104 112 85 71 77 97 75 83 99 105 83 87 103 105 86 3 +84 103 109 83 88 107 113 87 88 107 104 87 86 100 108 85 90 104 112 85 86 104 108 85 83 99 105 83 87 103 105 86 87 95 105 83 3 +88 107 113 87 88 107 104 87 88 107 109 83 90 104 112 85 86 104 108 85 86 104 108 85 87 103 105 86 87 95 105 83 83 99 110 83 3 +88 107 109 83 84 99 109 83 88 103 109 87 86 104 108 85 86 104 108 85 86 100 108 85 83 99 110 83 87 99 105 86 87 103 105 86 3 +84 99 109 83 88 103 109 87 88 103 109 87 86 104 108 85 86 100 108 85 90 104 112 89 87 99 105 86 87 103 105 86 87 108 114 86 3 +88 103 109 87 88 103 109 87 84 103 113 87 86 100 108 85 90 104 112 89 90 104 112 85 87 103 105 86 87 108 114 86 92 108 114 90 3 +88 103 109 87 84 103 113 87 88 111 113 92 90 104 112 89 90 104 112 85 90 109 112 85 87 108 114 86 92 108 114 90 96 108 114 90 3 +84 103 113 87 88 111 113 92 93 107 109 92 90 104 112 85 90 109 112 85 90 109 117 89 92 108 114 90 96 108 114 90 96 112 114 90 3 +88 111 113 92 93 107 109 92 93 111 113 92 90 109 112 85 90 109 117 89 90 109 112 89 96 108 114 90 96 112 114 90 92 108 110 90 3 +93 107 109 92 93 111 113 92 93 116 118 92 90 109 117 89 90 109 112 89 90 109 112 89 96 112 114 90 92 108 110 90 87 108 110 90 3 +93 111 113 92 93 116 118 92 88 111 118 92 90 109 112 89 90 109 112 89 90 104 117 92 92 108 110 90 87 108 110 90 92 108 110 86 3 +93 116 118 92 88 111 118 92 93 107 113 87 90 109 112 89 90 104 117 92 90 109 112 89 87 108 110 90 92 108 110 86 87 103 110 90 3 +88 111 118 92 93 107 113 87 93 107 113 87 90 104 117 92 90 109 112 89 90 109 112 89 92 108 110 86 87 103 110 90 87 103 114 86 3 +93 107 113 87 93 107 113 87 93 107 109 87 90 109 112 89 90 109 112 89 90 104 112 85 87 103 110 90 87 103 114 86 92 108 114 86 3 +93 107 109 87 88 107 109 92 88 107 109 87 90 104 112 85 90 104 112 89 86 104 108 89 92 108 114 86 92 108 110 86 92 108 110 86 3 +88 107 109 92 88 107 109 87 88 107 109 87 90 104 112 89 86 104 108 89 90 104 108 92 92 108 110 86 92 108 110 86 92 103 105 86 3 +88 107 109 87 88 107 109 87 88 107 109 87 86 104 108 89 90 104 108 92 90 109 108 89 92 108 110 86 92 103 105 86 87 103 105 83 3 +88 107 109 87 88 107 109 87 88 103 109 87 90 104 108 92 90 109 108 89 86 104 112 85 92 103 105 86 87 103 105 83 92 103 110 83 3 +88 107 109 87 88 103 109 87 93 103 109 87 90 109 108 89 86 104 112 85 86 104 104 81 87 103 105 83 92 103 110 83 92 103 110 86 3 +93 103 109 87 88 107 109 87 88 111 113 92 86 104 104 81 86 96 104 81 86 104 108 85 92 103 110 86 87 99 105 83 87 103 105 86 3 +88 107 109 87 88 111 113 92 88 107 113 87 86 96 104 81 86 104 108 85 90 109 112 92 87 99 105 83 87 103 105 86 92 108 110 90 3 +88 111 113 92 88 107 113 87 88 107 113 87 86 104 108 85 90 109 112 92 86 109 108 89 87 103 105 86 92 108 110 90 92 108 110 90 3 +88 107 113 87 88 107 113 87 88 107 109 83 90 109 112 92 86 109 108 89 86 109 112 89 92 108 110 90 92 108 110 90 87 108 110 86 3 +88 107 113 87 88 107 109 83 84 99 104 87 86 109 108 89 86 109 112 89 90 109 112 92 92 108 110 90 87 108 110 86 87 108 119 90 3 +88 107 109 83 84 99 104 87 79 99 100 79 86 109 112 89 90 109 112 92 86 104 108 89 87 108 110 86 87 108 119 90 87 103 110 86 3 +84 99 104 87 79 99 100 79 88 95 100 79 90 109 112 92 86 104 108 89 86 104 104 85 87 108 119 90 87 103 110 86 83 103 105 86 3 +79 99 100 79 88 95 100 79 88 95 100 83 86 104 108 89 86 104 104 85 82 100 100 85 87 103 110 86 83 103 105 86 83 103 110 83 3 +88 95 100 79 88 95 100 83 88 103 100 83 86 104 104 85 82 100 100 85 82 100 104 78 83 103 105 86 83 103 110 83 83 99 101 79 3 +88 95 100 83 88 103 100 83 88 103 109 83 82 100 100 85 82 100 104 78 86 100 96 81 83 103 110 83 83 99 101 79 79 95 101 79 3 +88 103 113 83 84 103 104 83 84 99 109 83 82 100 104 81 82 100 104 81 86 100 104 81 79 95 105 79 83 99 105 83 87 99 105 83 3 +84 103 104 83 84 99 109 83 84 103 104 83 82 100 104 81 86 100 104 81 82 96 100 81 83 99 105 83 87 99 105 83 87 95 97 83 3 +84 99 109 83 84 103 104 83 88 99 100 79 86 100 104 81 82 96 100 81 82 100 108 81 87 99 105 83 87 95 97 83 83 99 101 79 3 +88 99 100 79 84 99 104 79 79 95 100 79 82 100 108 81 82 96 104 78 82 96 100 81 83 99 101 79 83 99 105 79 83 95 101 79 3 +79 95 100 79 79 99 100 83 79 95 100 83 82 96 100 81 86 96 104 81 82 96 100 81 83 95 101 79 79 99 97 79 79 99 105 83 3 +79 99 100 83 79 95 100 83 79 91 104 79 86 96 104 81 82 96 100 81 82 100 104 78 79 99 97 79 79 99 105 83 83 95 105 83 3 +79 95 100 83 79 91 104 79 75 79 96 79 82 96 100 81 82 100 104 78 82 96 104 81 79 99 105 83 83 95 105 83 83 95 101 79 3 +75 79 96 79 75 83 96 79 84 99 104 83 82 96 104 81 82 96 104 85 82 100 104 85 83 95 101 79 83 99 105 83 87 99 105 83 3 +84 99 104 83 84 99 104 83 79 95 100 75 82 100 104 85 86 100 108 85 86 100 112 85 87 99 105 83 83 103 105 86 83 103 105 79 3 +84 99 104 83 79 95 100 75 75 87 93 71 86 100 108 85 86 100 112 85 86 100 112 85 83 103 105 86 83 103 105 79 83 103 105 83 3 +79 95 100 75 75 87 93 71 75 83 85 71 86 100 112 85 86 100 112 85 82 96 100 81 83 103 105 79 83 103 105 83 87 103 105 83 3 +75 87 93 71 75 83 85 71 71 75 85 67 86 100 112 85 82 96 100 81 78 83 84 70 83 103 105 83 87 103 105 83 79 88 97 72 7 +75 83 85 71 71 75 85 67 71 79 77 67 82 96 100 81 78 83 84 70 74 75 88 66 87 103 105 83 79 88 97 72 71 81 86 68 7 +71 75 81 67 67 72 81 67 67 64 81 67 70 79 88 66 70 75 76 66 66 71 80 66 71 77 82 64 71 81 82 68 71 77 86 68 7 +67 72 81 67 67 64 81 67 59 61 77 71 70 75 76 66 66 71 80 66 66 63 76 66 71 81 82 68 71 77 86 68 67 73 75 60 5 +67 64 81 67 59 61 77 71 55 54 85 67 66 71 80 66 66 63 76 66 59 60 73 63 71 77 86 68 67 73 75 60 63 66 68 57 5 +59 61 77 71 55 54 85 67 55 51 74 67 66 63 76 66 59 60 73 63 59 56 76 66 67 73 75 60 63 66 68 57 63 63 72 60 5 +55 54 85 67 55 51 74 67 55 48 70 62 59 60 73 63 59 56 76 66 59 53 76 70 63 66 68 57 63 63 72 60 63 66 72 64 5 +55 51 74 67 55 48 70 62 51 48 70 67 59 56 76 66 59 53 76 70 56 49 73 70 63 63 72 60 63 66 72 64 59 57 75 64 5 +55 48 70 62 51 48 70 67 51 48 70 67 59 53 76 70 56 49 73 70 49 40 69 66 63 66 72 64 59 57 75 64 56 48 75 68 5 +90 109 112 89 90 109 112 89 90 113 117 92 96 108 119 90 92 103 110 86 87 108 114 86 97 120 119 101 97 115 119 97 89 120 124 97 3 +90 113 117 92 90 113 122 96 95 128 127 103 87 108 114 86 87 103 114 90 92 122 135 109 89 120 124 97 93 115 124 101 93 125 135 104 3 +90 113 122 96 95 128 127 103 95 123 127 100 87 103 114 90 92 122 135 109 96 127 130 105 93 115 124 101 93 125 135 104 93 130 129 101 3 +95 123 127 100 82 100 108 85 82 100 108 81 96 127 130 105 92 108 114 86 83 103 105 83 93 130 129 101 89 120 129 97 78 106 110 87 3 +82 100 108 85 82 100 108 81 82 100 104 78 92 108 114 86 83 103 105 83 79 103 110 83 89 120 129 97 78 106 110 87 78 102 110 83 3 +82 100 108 81 82 100 104 78 78 100 104 81 83 103 105 83 79 103 110 83 79 99 105 83 78 106 110 87 78 102 110 83 78 102 110 83 3 +82 100 104 78 78 100 104 81 82 104 104 85 79 103 110 83 79 99 105 83 83 103 114 86 78 102 110 83 78 102 110 83 82 102 105 83 3 +78 96 96 78 78 91 92 70 74 91 92 70 83 95 93 75 83 91 97 72 83 91 97 72 78 97 101 80 82 92 93 76 78 92 93 73 4 +78 91 92 70 74 91 92 70 78 91 96 74 83 91 97 72 83 91 97 72 79 91 93 72 82 92 93 76 78 92 93 73 74 92 93 69 4 +74 91 92 70 78 91 96 74 74 87 92 70 83 91 97 72 79 91 93 72 79 91 90 68 78 92 93 73 74 92 93 69 78 88 97 73 4 +78 91 96 74 74 87 92 70 74 87 88 70 79 91 93 72 79 91 90 68 79 88 93 68 74 92 93 69 78 88 97 73 82 88 97 73 4 +74 87 92 70 74 87 88 70 78 87 84 70 79 91 90 68 79 88 93 68 79 91 93 72 78 88 97 73 82 88 97 73 78 92 97 73 4 +78 87 84 70 74 87 88 66 74 87 92 70 79 91 93 72 75 91 93 68 79 88 93 68 78 92 97 73 78 88 93 73 82 92 93 73 4 +74 87 92 70 78 87 88 66 78 87 92 66 79 88 93 68 75 84 90 68 75 84 93 72 82 92 93 73 78 88 93 73 78 84 93 69 4 +78 87 88 66 78 87 92 66 74 83 92 66 75 84 90 68 75 84 93 72 75 88 90 68 78 88 93 73 78 84 93 69 74 84 89 69 4 +78 87 92 66 74 83 92 66 70 83 92 66 75 84 93 72 75 88 90 68 75 91 97 75 78 84 93 69 74 84 89 69 74 88 93 76 4 +70 83 92 66 70 83 88 70 70 83 84 66 75 91 97 75 75 88 93 72 67 81 86 64 74 88 93 76 67 75 93 80 57 63 97 90 4 +70 83 84 66 66 79 84 63 66 79 88 66 67 81 86 64 63 77 86 72 63 73 97 83 57 63 97 90 53 49 110 108 47 40 119 122 2 +70 79 88 66 66 71 88 70 59 60 96 81 59 60 110 98 49 45 119 116 46 37 119 127 42 37 119 129 44 34 124 136 44 34 124 136 2 +66 71 88 70 59 60 96 81 56 49 104 100 49 45 119 116 46 37 119 127 46 32 119 131 44 34 124 136 44 34 124 136 42 31 124 133 2 +59 60 96 81 56 49 104 100 49 40 112 114 46 37 119 127 46 32 119 131 46 34 119 131 44 34 124 136 42 31 124 133 44 34 119 133 2 +49 40 112 114 46 34 122 125 49 40 117 114 46 34 119 131 42 34 119 131 46 34 119 131 44 34 119 133 44 37 119 136 44 34 124 136 2 +46 34 122 125 49 40 117 114 63 67 104 85 42 34 119 131 46 34 119 131 52 48 110 105 44 37 119 136 44 34 124 136 44 34 119 133 2 +63 67 104 85 82 96 104 78 86 100 108 85 52 48 110 105 71 77 97 75 83 99 105 83 44 34 119 133 53 56 105 97 74 92 101 76 2 +86 100 108 85 90 104 112 85 86 104 108 85 83 99 105 83 87 103 105 86 87 95 105 83 74 92 101 76 82 102 110 83 85 102 110 83 3 +90 104 112 85 86 104 108 85 86 104 108 85 87 103 105 86 87 95 105 83 83 99 110 83 82 102 110 83 85 102 110 83 85 97 105 83 3 +86 104 108 85 86 104 108 85 86 104 108 85 87 95 105 83 83 99 110 83 87 99 105 86 85 102 110 83 85 97 105 83 82 97 105 83 3 +86 104 108 85 86 104 108 85 86 100 108 85 83 99 110 83 87 99 105 86 87 103 105 86 85 97 105 83 82 97 105 83 93 106 114 90 3 +90 109 112 85 90 109 117 89 90 109 112 89 96 108 114 90 96 112 114 90 92 108 110 90 93 111 119 90 89 111 114 87 89 106 114 87 3 +90 109 112 89 90 109 112 89 90 104 117 92 92 108 110 90 87 108 110 90 92 108 110 86 89 106 114 87 89 106 110 87 89 102 110 87 3 +90 109 112 89 90 104 117 92 90 109 112 89 87 108 110 90 92 108 110 86 87 103 110 90 89 106 110 87 89 102 110 87 93 106 114 90 3 +90 104 117 92 90 109 112 89 90 109 112 89 92 108 110 86 87 103 110 90 87 103 114 86 89 102 110 87 93 106 114 90 93 111 110 94 3 +90 109 112 89 90 109 112 89 90 104 112 85 87 103 110 90 87 103 114 86 92 108 114 86 93 106 114 90 93 111 110 94 93 106 114 87 3 +90 109 112 89 90 104 112 85 90 104 112 89 87 103 114 86 92 108 114 86 92 108 110 86 93 111 110 94 93 106 114 87 89 111 110 87 3 +90 104 112 89 86 104 108 89 90 104 108 92 92 108 110 86 92 108 110 86 92 103 105 86 89 111 110 87 85 106 110 87 89 106 114 90 3 +86 104 108 89 90 104 108 92 90 109 108 89 92 108 110 86 92 103 105 86 87 103 105 83 85 106 110 87 89 106 114 90 89 106 114 90 3 +86 104 104 81 86 96 104 81 86 104 108 85 92 103 110 86 87 99 105 83 87 103 105 86 89 111 110 83 89 111 114 87 89 111 110 87 3 +86 96 104 81 86 104 108 85 90 109 112 92 87 99 105 83 87 103 105 86 92 108 110 90 89 111 114 87 89 111 110 87 89 106 110 87 3 +86 104 108 85 90 109 112 92 86 109 108 89 87 103 105 86 92 108 110 90 92 108 110 90 89 111 110 87 89 106 110 87 89 106 114 90 3 +86 109 112 89 90 109 112 92 86 104 108 89 87 108 110 86 87 108 119 90 87 103 110 86 89 102 114 90 89 106 114 87 89 106 114 90 3 +90 109 112 92 86 104 108 89 86 104 104 85 87 108 119 90 87 103 110 86 83 103 105 86 89 106 114 87 89 106 114 90 85 102 110 87 3 +86 104 104 85 82 100 100 85 82 100 104 78 83 103 105 86 83 103 110 83 83 99 101 79 85 102 110 87 85 106 114 87 89 97 105 83 3 +86 100 96 81 82 100 104 81 82 100 104 81 79 95 101 79 79 95 105 79 83 99 105 83 85 102 105 87 85 102 101 80 85 97 101 83 3 +86 100 104 81 82 96 100 81 82 100 108 81 87 99 105 83 87 95 97 83 83 99 101 79 85 102 110 83 85 111 114 87 89 106 114 87 3 +82 96 100 81 82 100 108 81 82 96 104 78 87 95 97 83 83 99 101 79 83 99 105 79 85 111 114 87 89 106 114 87 89 106 105 87 3 +82 100 108 81 82 96 104 78 82 96 100 81 83 99 101 79 83 99 105 79 83 95 101 79 89 106 114 87 89 106 105 87 85 102 110 83 3 +82 96 100 81 86 96 104 81 82 96 100 81 83 95 101 79 79 99 97 79 79 99 105 83 85 102 110 83 85 102 105 83 85 102 101 83 3 +82 96 100 81 82 100 104 78 82 96 104 81 79 99 105 83 83 95 105 83 83 95 101 79 85 102 101 83 82 102 105 83 82 102 114 87 3 +82 100 104 78 82 96 104 81 82 96 104 85 83 95 105 83 83 95 101 79 83 99 105 83 82 102 105 83 82 102 114 87 89 106 114 87 3 +82 96 104 81 82 96 104 85 82 100 104 85 83 95 101 79 83 99 105 83 87 99 105 83 82 102 114 87 89 106 114 87 89 106 114 83 3 +86 100 112 85 86 100 112 85 82 96 100 81 83 103 105 79 83 103 105 83 87 103 105 83 78 102 105 83 82 106 105 87 82 97 105 87 3 +86 100 112 85 82 96 100 81 78 83 84 70 83 103 105 83 87 103 105 83 79 88 97 72 82 106 105 87 82 97 105 87 82 97 105 80 3 +82 96 100 81 78 83 84 70 74 75 88 66 87 103 105 83 79 88 97 72 71 81 86 68 82 97 105 87 82 97 105 80 78 88 89 73 7 +78 83 84 70 74 75 88 66 70 79 88 66 79 88 97 72 71 81 86 68 71 77 82 64 82 97 105 80 78 88 89 73 70 79 82 65 7 +74 75 88 66 70 79 88 66 70 75 76 66 71 81 86 68 71 77 82 64 71 81 82 68 78 88 89 73 70 79 82 65 70 88 89 69 7 +70 79 88 66 70 75 76 66 66 71 80 66 71 77 82 64 71 81 82 68 71 77 86 68 70 79 82 65 70 88 89 69 74 84 85 69 7 +66 71 80 66 66 63 76 66 59 60 73 63 71 77 86 68 67 73 75 60 63 66 68 57 74 84 85 69 74 79 85 69 67 79 82 65 7 +66 63 76 66 59 60 73 63 59 56 76 66 67 73 75 60 63 66 68 57 63 63 72 60 74 79 85 69 67 79 82 65 70 79 82 62 7 +59 60 73 63 59 56 76 66 59 53 76 70 63 66 68 57 63 63 72 60 63 66 72 64 67 79 82 65 70 79 82 62 67 75 74 62 5 +59 56 76 66 59 53 76 70 56 49 73 70 63 63 72 60 63 66 72 64 59 57 75 64 70 79 82 62 67 75 74 62 60 63 74 58 7 +59 53 76 70 56 49 73 70 49 40 69 66 63 66 72 64 59 57 75 64 56 48 75 68 67 75 74 62 60 63 74 58 57 56 74 62 5 +96 108 119 90 92 103 110 86 87 108 114 86 97 120 119 101 97 115 119 97 89 120 124 97 97 131 136 105 92 120 125 98 88 120 125 98 3 +92 103 110 86 87 108 114 86 87 103 114 90 97 115 119 97 89 120 124 97 93 115 124 101 92 120 125 98 88 120 125 98 88 125 131 102 3 +87 108 114 86 87 103 114 90 92 122 135 109 89 120 124 97 93 115 124 101 93 125 135 104 88 120 125 98 88 125 131 102 88 125 136 109 3 +87 103 114 90 92 122 135 109 96 127 130 105 93 115 124 101 93 125 135 104 93 130 129 101 88 125 131 102 88 125 136 109 88 125 136 105 3 +92 122 135 109 96 127 130 105 92 108 114 86 93 125 135 104 93 130 129 101 89 120 129 97 88 125 136 109 88 125 136 105 88 125 125 102 3 +96 127 130 105 92 108 114 86 83 103 105 83 93 130 129 101 89 120 129 97 78 106 110 87 88 125 136 105 88 125 125 102 84 111 111 91 3 +79 103 110 83 79 99 105 83 83 103 114 86 78 102 110 83 78 102 110 83 82 102 105 83 76 102 102 79 80 98 102 79 80 98 102 79 3 +79 99 105 83 79 95 101 79 83 95 93 75 82 102 101 80 78 102 105 80 78 97 101 80 80 98 98 79 80 98 102 76 84 94 98 76 3 +79 95 101 79 83 95 93 75 83 91 97 72 78 102 105 80 78 97 101 80 82 92 93 76 80 98 102 76 84 94 98 76 80 94 94 72 3 +83 91 97 72 83 91 97 72 79 91 93 72 82 92 93 76 78 92 93 73 74 92 93 69 80 94 94 72 80 89 94 72 80 89 98 72 4 +83 91 97 72 79 91 93 72 79 91 90 68 78 92 93 73 74 92 93 69 78 88 97 73 80 89 94 72 80 89 98 72 80 94 94 72 4 +79 91 93 72 79 91 90 68 79 88 93 68 74 92 93 69 78 88 97 73 82 88 97 73 80 89 98 72 80 94 94 72 80 94 94 72 4 +79 91 90 68 79 88 93 68 79 91 93 72 78 88 97 73 82 88 97 73 78 92 97 73 80 94 94 72 80 94 94 72 80 89 90 68 4 +79 91 93 72 75 91 93 68 79 88 93 68 78 92 97 73 78 88 93 73 82 92 93 73 80 89 90 68 80 89 90 72 80 85 90 68 4 +75 91 93 68 79 88 93 68 75 84 90 68 78 88 93 73 82 92 93 73 78 88 93 73 80 89 90 72 80 85 90 68 72 85 94 72 4 +79 88 93 68 75 84 90 68 75 84 93 72 82 92 93 73 78 88 93 73 78 84 93 69 80 85 90 68 72 85 94 72 72 81 94 72 4 +75 84 90 68 75 84 93 72 75 88 90 68 78 88 93 73 78 84 93 69 74 84 89 69 72 85 94 72 72 81 94 72 64 69 102 83 4 +75 84 93 72 75 88 90 68 75 91 97 75 78 84 93 69 74 84 89 69 74 88 93 76 72 81 94 72 64 69 102 83 57 49 111 109 4 +75 88 90 68 75 91 97 75 75 88 93 72 74 84 89 69 74 88 93 76 67 75 93 80 64 69 102 83 57 49 111 109 50 40 125 128 4 +75 91 97 75 75 88 93 72 67 81 86 64 74 88 93 76 67 75 93 80 57 63 97 90 57 49 111 109 50 40 125 128 47 34 125 135 2 +75 88 93 72 67 81 86 64 63 77 86 72 67 75 93 80 57 63 97 90 53 49 110 108 50 40 125 128 47 34 125 135 47 34 131 135 2 +63 77 86 72 63 73 97 83 59 60 110 98 53 49 110 108 47 40 119 122 42 37 119 129 47 34 131 135 47 34 125 135 44 34 131 131 2 +59 60 110 98 49 45 119 116 46 37 119 127 42 37 119 129 44 34 124 136 44 34 124 136 44 34 131 131 44 34 120 135 44 31 120 139 2 +49 45 119 116 46 37 119 127 46 32 119 131 44 34 124 136 44 34 124 136 42 31 124 133 44 34 120 135 44 31 120 139 44 34 131 135 2 +46 37 119 127 46 32 119 131 46 34 119 131 44 34 124 136 42 31 124 133 44 34 119 133 44 31 120 139 44 34 131 135 44 31 125 135 2 +46 32 119 131 46 34 119 131 42 34 119 131 42 31 124 133 44 34 119 133 44 37 119 136 44 34 131 135 44 31 125 135 47 31 131 139 2 +52 48 110 105 71 77 97 75 83 99 105 83 44 34 119 133 53 56 105 97 74 92 101 76 41 31 131 139 44 40 120 120 64 73 106 83 2 +71 77 97 75 83 99 105 83 87 103 105 86 53 56 105 97 74 92 101 76 82 102 110 83 44 40 120 120 64 73 106 83 84 102 106 83 3 +87 103 105 86 87 95 105 83 83 99 110 83 82 102 110 83 85 102 110 83 85 97 105 83 84 102 106 83 88 111 111 91 88 102 115 87 3 +87 95 105 83 83 99 110 83 87 99 105 86 85 102 110 83 85 97 105 83 82 97 105 83 88 111 111 91 88 102 115 87 84 111 106 87 3 +83 99 110 83 87 99 105 86 87 103 105 86 85 97 105 83 82 97 105 83 93 106 114 90 88 102 115 87 84 111 106 87 84 106 111 87 3 +87 99 105 86 87 103 105 86 87 108 114 86 82 97 105 83 93 106 114 90 93 115 114 90 84 111 106 87 84 106 111 87 92 106 111 87 3 +87 103 105 86 87 108 114 86 92 108 114 90 93 106 114 90 93 115 114 90 93 115 114 90 84 106 111 87 92 106 111 87 92 111 111 87 3 +87 108 114 86 92 108 114 90 96 108 114 90 93 115 114 90 93 115 114 90 93 111 119 90 92 106 111 87 92 111 111 87 88 106 106 83 3 +92 108 114 90 96 108 114 90 96 112 114 90 93 115 114 90 93 111 119 90 89 111 114 87 92 111 111 87 88 106 106 83 84 102 106 83 3 +92 108 110 90 87 108 110 90 92 108 110 86 89 106 114 87 89 106 110 87 89 102 110 87 88 106 106 87 88 111 115 83 92 111 115 91 3 +87 108 110 90 92 108 110 86 87 103 110 90 89 106 110 87 89 102 110 87 93 106 114 90 88 111 115 83 92 111 115 91 88 111 111 87 3 +87 103 110 90 87 103 114 86 92 108 114 86 93 106 114 90 93 111 110 94 93 106 114 87 88 111 111 87 92 106 111 87 88 106 111 87 3 +87 103 114 86 92 108 114 86 92 108 110 86 93 111 110 94 93 106 114 87 89 111 110 87 92 106 111 87 88 106 111 87 84 102 115 87 3 +92 108 110 86 92 108 110 86 92 103 105 86 89 111 110 87 85 106 110 87 89 106 114 90 84 102 115 87 84 106 115 91 88 111 115 87 3 +92 103 105 86 87 103 105 83 92 103 110 83 89 106 114 90 89 106 114 90 93 106 105 90 88 111 115 87 88 106 111 87 88 111 111 87 3 +92 103 110 83 92 103 110 86 87 99 105 83 93 106 105 90 89 111 110 83 89 111 114 87 88 111 111 87 92 111 115 91 92 111 115 91 3 +92 103 110 86 87 99 105 83 87 103 105 86 89 111 110 83 89 111 114 87 89 111 110 87 92 111 115 91 92 111 115 91 88 106 115 91 3 +87 99 105 83 87 103 105 86 92 108 110 90 89 111 114 87 89 111 110 87 89 106 110 87 92 111 115 91 88 106 115 91 88 115 115 91 3 +92 108 110 90 92 108 110 90 87 108 110 86 89 106 110 87 89 106 114 90 89 102 114 90 88 115 115 91 92 115 120 94 88 111 111 91 3 +92 108 110 90 87 108 110 86 87 108 119 90 89 106 114 90 89 102 114 90 89 106 114 87 92 115 120 94 88 111 111 91 84 106 111 87 3 +87 108 110 86 87 108 119 90 87 103 110 86 89 102 114 90 89 106 114 87 89 106 114 90 88 111 111 91 84 106 111 87 88 106 115 87 3 +87 103 110 86 83 103 105 86 83 103 110 83 89 106 114 90 85 102 110 87 85 106 114 87 88 106 115 87 92 106 111 87 92 106 111 87 3 +83 103 110 83 83 99 101 79 79 95 101 79 85 106 114 87 89 97 105 83 85 102 105 87 92 106 111 87 88 102 106 83 88 106 106 83 3 +83 99 101 79 79 95 101 79 79 95 105 79 89 97 105 83 85 102 105 87 85 102 101 80 88 102 106 83 88 106 106 83 88 106 115 87 3 +79 95 101 79 79 95 105 79 83 99 105 83 85 102 105 87 85 102 101 80 85 97 101 83 88 106 106 83 88 106 115 87 84 111 115 87 3 +79 95 105 79 83 99 105 83 87 99 105 83 85 102 101 80 85 97 101 83 85 102 110 83 88 106 115 87 84 111 115 87 84 102 115 91 3 +83 99 105 83 87 99 105 83 87 95 97 83 85 97 101 83 85 102 110 83 85 111 114 87 84 111 115 87 84 102 115 91 88 111 120 94 3 +87 95 97 83 83 99 101 79 83 99 105 79 85 111 114 87 89 106 114 87 89 106 105 87 88 111 120 94 88 111 120 91 88 106 111 91 3 +83 95 101 79 79 99 97 79 79 99 105 83 85 102 110 83 85 102 105 83 85 102 101 83 88 106 106 87 88 106 111 87 88 111 111 87 3 +79 99 97 79 79 99 105 83 83 95 105 83 85 102 105 83 85 102 101 83 82 102 105 83 88 106 111 87 88 111 111 87 88 102 111 83 3 +83 95 105 83 83 95 101 79 83 99 105 83 82 102 105 83 82 102 114 87 89 106 114 87 88 102 111 83 84 102 106 83 88 102 115 87 3 +83 99 105 83 87 99 105 83 83 103 105 86 89 106 114 87 89 106 114 83 82 102 105 83 88 102 115 87 84 102 102 83 80 98 98 79 3 +87 99 105 83 83 103 105 86 83 103 105 79 89 106 114 83 82 102 105 83 78 102 105 83 84 102 102 83 80 98 98 79 84 98 106 83 3 +83 103 105 86 83 103 105 79 83 103 105 83 82 102 105 83 78 102 105 83 82 106 105 87 80 98 98 79 84 98 106 83 80 98 102 83 3 +83 103 105 79 83 103 105 83 87 103 105 83 78 102 105 83 82 106 105 87 82 97 105 87 84 98 106 83 80 98 102 83 80 98 98 79 3 +83 103 105 83 87 103 105 83 79 88 97 72 82 106 105 87 82 97 105 87 82 97 105 80 80 98 102 83 80 98 98 79 76 94 94 76 3 +71 77 82 64 71 81 82 68 71 77 86 68 70 79 82 65 70 88 89 69 74 84 85 69 76 85 86 72 76 85 86 72 68 85 86 68 7 +71 77 86 68 67 73 75 60 63 66 68 57 74 84 85 69 74 79 85 69 67 79 82 65 68 85 86 68 72 85 86 72 72 81 82 68 7 +63 66 68 57 63 63 72 60 63 66 72 64 67 79 82 65 70 79 82 62 67 75 74 62 72 81 82 68 72 81 86 68 72 77 78 61 7 +63 63 72 60 63 66 72 64 59 57 75 64 70 79 82 62 67 75 74 62 60 63 74 58 72 81 86 68 72 77 78 61 64 73 74 57 7 +63 66 72 64 59 57 75 64 56 48 75 68 67 75 74 62 60 63 74 58 57 56 74 62 72 77 78 61 64 73 74 57 68 77 78 65 7 +97 120 119 101 97 115 119 97 89 120 124 97 97 131 136 105 92 120 125 98 88 120 125 98 92 126 139 107 88 126 139 103 88 121 133 103 3 +97 115 119 97 89 120 124 97 93 115 124 101 92 120 125 98 88 120 125 98 88 125 131 102 88 126 139 103 88 121 133 103 92 121 128 103 3 +93 115 124 101 93 125 135 104 93 130 129 101 88 125 131 102 88 125 136 109 88 125 136 105 92 121 128 103 88 121 128 99 92 116 122 99 3 +93 125 135 104 93 130 129 101 89 120 129 97 88 125 136 109 88 125 136 105 88 125 125 102 88 121 128 99 92 116 122 99 88 116 122 96 3 +93 130 129 101 89 120 129 97 78 106 110 87 88 125 136 105 88 125 125 102 84 111 111 91 92 116 122 99 88 116 122 96 84 107 113 85 3 +78 102 110 83 78 102 110 83 82 102 105 83 76 102 102 79 80 98 102 79 80 98 102 79 84 99 104 78 80 95 100 78 80 99 104 78 3 +78 102 110 83 82 102 105 83 82 102 101 80 80 98 102 79 80 98 102 79 80 98 98 79 80 95 100 78 80 99 104 78 80 95 100 78 3 +82 102 105 83 82 102 101 80 78 102 105 80 80 98 102 79 80 98 98 79 80 98 102 76 80 99 104 78 80 95 100 78 80 99 100 74 3 +82 102 101 80 78 102 105 80 78 97 101 80 80 98 98 79 80 98 102 76 84 94 98 76 80 95 100 78 80 99 100 74 84 95 100 78 3 +78 102 105 80 78 97 101 80 82 92 93 76 80 98 102 76 84 94 98 76 80 94 94 72 80 99 100 74 84 95 100 78 80 99 100 74 4 +78 97 101 80 82 92 93 76 78 92 93 73 84 94 98 76 80 94 94 72 80 89 94 72 84 95 100 78 80 99 100 74 80 95 100 74 4 +78 92 93 73 74 92 93 69 78 88 97 73 80 89 94 72 80 89 98 72 80 94 94 72 80 95 100 74 84 95 100 74 80 91 91 70 4 +82 88 97 73 78 92 97 73 78 88 93 73 80 94 94 72 80 89 90 68 80 89 90 72 71 91 96 74 76 91 96 70 71 79 96 74 4 +78 88 93 73 82 92 93 73 78 88 93 73 80 89 90 72 80 85 90 68 72 85 94 72 71 79 96 74 68 68 100 88 56 54 108 103 4 +82 92 93 73 78 88 93 73 78 84 93 69 80 85 90 68 72 85 94 72 72 81 94 72 68 68 100 88 56 54 108 103 56 54 104 92 4 +74 84 89 69 74 88 93 76 67 75 93 80 64 69 102 83 57 49 111 109 50 40 125 128 53 45 113 114 46 34 133 146 46 31 139 143 2 +74 88 93 76 67 75 93 80 57 63 97 90 57 49 111 109 50 40 125 128 47 34 125 135 46 34 133 146 46 31 139 143 46 31 133 146 2 +53 49 110 108 47 40 119 122 42 37 119 129 47 34 131 135 47 34 125 135 44 34 131 131 43 31 139 146 43 31 139 143 43 31 133 139 2 +47 40 119 122 42 37 119 129 44 34 124 136 47 34 125 135 44 34 131 131 44 34 120 135 43 31 139 143 43 31 133 139 46 31 133 139 2 +42 37 119 129 44 34 124 136 44 34 124 136 44 34 131 131 44 34 120 135 44 31 120 139 43 31 133 139 46 31 133 139 43 31 133 139 2 +44 34 124 136 42 31 124 133 44 34 119 133 44 31 120 139 44 34 131 135 44 31 125 135 43 31 133 139 43 31 128 135 43 31 128 135 2 +44 34 119 133 44 37 119 136 44 34 124 136 44 31 125 135 47 31 131 139 41 31 131 135 43 31 128 135 46 34 133 132 43 31 128 135 2 +44 37 119 136 44 34 124 136 44 34 119 133 47 31 131 139 41 31 131 135 41 31 131 139 46 34 133 132 43 31 128 135 43 31 128 132 2 +44 34 124 136 44 34 119 133 53 56 105 97 41 31 131 135 41 31 131 139 44 40 120 120 43 31 128 135 43 31 128 132 46 34 118 132 2 +53 56 105 97 74 92 101 76 82 102 110 83 44 40 120 120 64 73 106 83 84 102 106 83 46 34 118 132 50 51 113 103 71 87 104 81 2 +74 92 101 76 82 102 110 83 85 102 110 83 64 73 106 83 84 102 106 83 88 111 111 91 50 51 113 103 71 87 104 81 88 103 108 88 3 +82 102 110 83 85 102 110 83 85 97 105 83 84 102 106 83 88 111 111 91 88 102 115 87 71 87 104 81 88 103 108 88 88 103 108 88 3 +85 102 110 83 85 97 105 83 82 97 105 83 88 111 111 91 88 102 115 87 84 111 106 87 88 103 108 88 88 103 108 88 88 107 108 88 3 +85 97 105 83 82 97 105 83 93 106 114 90 88 102 115 87 84 111 106 87 84 106 111 87 88 103 108 88 88 107 108 88 88 107 113 85 3 +93 106 114 90 93 115 114 90 93 115 114 90 84 106 111 87 92 106 111 87 92 111 111 87 88 107 113 85 88 107 113 88 92 107 113 88 3 +93 115 114 90 93 115 114 90 93 111 119 90 92 106 111 87 92 111 111 87 88 106 106 83 88 107 113 88 92 107 113 88 88 103 113 85 3 +93 115 114 90 93 111 119 90 89 111 114 87 92 111 111 87 88 106 106 83 84 102 106 83 92 107 113 88 88 103 113 85 88 103 108 88 3 +93 111 119 90 89 111 114 87 89 106 114 87 88 106 106 83 84 102 106 83 88 106 106 87 88 103 113 85 88 103 108 88 92 107 113 88 3 +89 111 114 87 89 106 114 87 89 106 110 87 84 102 106 83 88 106 106 87 88 111 115 83 88 103 108 88 92 107 113 88 88 107 113 88 3 +89 106 114 87 89 106 110 87 89 102 110 87 88 106 106 87 88 111 115 83 92 111 115 91 92 107 113 88 88 107 113 88 88 107 113 88 3 +89 102 110 87 93 106 114 90 93 111 110 94 92 111 115 91 88 111 111 87 92 106 111 87 88 107 113 88 88 107 118 88 88 107 113 88 3 +93 106 114 90 93 111 110 94 93 106 114 87 88 111 111 87 92 106 111 87 88 106 111 87 88 107 118 88 88 107 113 88 88 107 108 85 3 +93 111 110 94 93 106 114 87 89 111 110 87 92 106 111 87 88 106 111 87 84 102 115 87 88 107 113 88 88 107 108 85 88 107 104 88 3 +89 111 110 87 85 106 110 87 89 106 114 90 84 102 115 87 84 106 115 91 88 111 115 87 88 107 104 88 88 107 108 85 88 107 113 85 3 +85 106 110 87 89 106 114 90 89 106 114 90 84 106 115 91 88 111 115 87 88 106 111 87 88 107 108 85 88 107 113 85 84 103 108 85 3 +89 106 114 90 89 106 114 90 93 106 105 90 88 111 115 87 88 106 111 87 88 111 111 87 88 107 113 85 84 103 108 85 84 107 113 88 3 +89 111 110 83 89 111 114 87 89 111 110 87 92 111 115 91 92 111 115 91 88 106 115 91 88 107 118 92 88 107 113 88 88 107 113 88 3 +89 106 110 87 89 106 114 90 89 102 114 90 88 115 115 91 92 115 120 94 88 111 111 91 88 107 108 88 88 107 113 92 92 112 122 92 3 +89 106 114 90 89 102 114 90 89 106 114 87 92 115 120 94 88 111 111 91 84 106 111 87 88 107 113 92 92 112 122 92 88 112 113 85 3 +89 102 114 90 89 106 114 87 89 106 114 90 88 111 111 91 84 106 111 87 88 106 115 87 92 112 122 92 88 112 113 85 84 99 108 85 3 +89 106 114 87 89 106 114 90 85 102 110 87 84 106 111 87 88 106 115 87 92 106 111 87 88 112 113 85 84 99 108 85 84 103 113 88 3 +89 106 114 90 85 102 110 87 85 106 114 87 88 106 115 87 92 106 111 87 92 106 111 87 84 99 108 85 84 103 113 88 88 107 113 85 3 +85 106 114 87 89 97 105 83 85 102 105 87 92 106 111 87 88 102 106 83 88 106 106 83 88 107 113 85 88 103 108 85 88 107 113 88 3 +89 97 105 83 85 102 105 87 85 102 101 80 88 102 106 83 88 106 106 83 88 106 115 87 88 103 108 85 88 107 113 88 88 103 113 92 3 +85 102 105 87 85 102 101 80 85 97 101 83 88 106 106 83 88 106 115 87 84 111 115 87 88 107 113 88 88 103 113 92 84 107 113 88 3 +85 97 101 83 85 102 110 83 85 111 114 87 84 111 115 87 84 102 115 91 88 111 120 94 84 107 113 88 88 112 113 92 92 112 118 92 3 +85 102 110 83 85 111 114 87 89 106 114 87 84 102 115 91 88 111 120 94 88 111 120 91 88 112 113 92 92 112 118 92 88 103 113 85 3 +89 106 114 87 89 106 105 87 85 102 110 83 88 111 120 91 88 106 111 91 88 106 106 87 88 103 113 85 88 103 108 85 88 107 113 88 3 +89 106 105 87 85 102 110 83 85 102 105 83 88 106 111 91 88 106 106 87 88 106 111 87 88 103 108 85 88 107 113 88 88 107 118 88 3 +85 102 110 83 85 102 105 83 85 102 101 83 88 106 106 87 88 106 111 87 88 111 111 87 88 107 113 88 88 107 118 88 88 103 118 85 3 +85 102 101 83 82 102 105 83 82 102 114 87 88 111 111 87 88 102 111 83 84 102 106 83 88 103 118 85 88 99 108 85 84 103 108 85 3 +82 102 105 83 82 102 114 87 89 106 114 87 88 102 111 83 84 102 106 83 88 102 115 87 88 99 108 85 84 103 108 85 88 103 113 85 3 +82 102 114 87 89 106 114 87 89 106 114 83 84 102 106 83 88 102 115 87 84 102 102 83 84 103 108 85 88 103 113 85 84 99 104 81 3 +89 106 114 87 89 106 114 83 82 102 105 83 88 102 115 87 84 102 102 83 80 98 98 79 88 103 113 85 84 99 104 81 80 95 91 74 3 +89 106 114 83 82 102 105 83 78 102 105 83 84 102 102 83 80 98 98 79 84 98 106 83 84 99 104 81 80 95 91 74 76 87 96 70 3 +82 102 105 83 78 102 105 83 82 106 105 87 80 98 98 79 84 98 106 83 80 98 102 83 80 95 91 74 76 87 96 70 68 79 83 67 3 +82 106 105 87 82 97 105 87 82 97 105 80 80 98 102 83 80 98 98 79 76 94 94 76 68 79 83 67 68 79 83 67 71 75 87 67 3 +78 88 89 73 70 79 82 65 70 88 89 69 76 89 86 72 76 85 86 72 76 85 86 72 71 75 79 63 68 79 83 67 71 83 87 70 7 +70 79 82 65 70 88 89 69 74 84 85 69 76 85 86 72 76 85 86 72 68 85 86 68 68 79 83 67 71 83 87 70 71 83 91 74 7 +70 88 89 69 74 84 85 69 74 79 85 69 76 85 86 72 68 85 86 68 72 85 86 72 71 83 87 70 71 83 91 74 76 87 91 70 7 +74 79 85 69 67 79 82 65 70 79 82 62 72 85 86 72 72 81 82 68 72 81 86 68 76 87 91 70 76 83 87 67 71 79 83 67 7 +67 79 82 65 70 79 82 62 67 75 74 62 72 81 82 68 72 81 86 68 72 77 78 61 76 83 87 67 71 79 83 67 71 79 79 67 7 +70 79 82 62 67 75 74 62 60 63 74 58 72 81 86 68 72 77 78 61 64 73 74 57 71 79 83 67 71 79 79 67 71 83 79 63 7 +97 131 136 105 92 120 125 98 88 120 125 98 92 126 139 107 88 126 139 103 88 121 133 103 93 126 134 108 88 126 134 104 88 121 128 104 3 +92 120 125 98 88 120 125 98 88 125 131 102 88 126 139 103 88 121 133 103 92 121 128 103 88 126 134 104 88 121 128 104 93 116 123 96 3 +88 125 131 102 88 125 136 109 88 125 136 105 92 121 128 103 88 121 128 99 92 116 122 99 93 116 123 96 88 111 118 92 88 111 113 92 3 +88 125 136 109 88 125 136 105 88 125 125 102 88 121 128 99 92 116 122 99 88 116 122 96 88 111 118 92 88 111 113 92 88 103 109 87 3 +84 111 111 91 76 102 102 79 80 98 102 79 84 107 113 85 84 99 104 78 80 95 100 78 84 107 113 87 84 103 104 83 84 99 100 79 3 +76 102 102 79 80 98 102 79 80 98 102 79 84 99 104 78 80 95 100 78 80 99 104 78 84 103 104 83 84 99 100 79 79 99 104 79 3 +80 98 98 79 80 98 102 76 84 94 98 76 80 95 100 78 80 99 100 74 84 95 100 78 84 95 104 79 84 99 100 75 79 99 100 75 3 +80 94 94 72 80 89 94 72 80 89 98 72 80 99 100 74 80 95 100 74 84 95 100 74 84 91 100 75 84 95 100 79 79 95 100 75 4 +80 89 94 72 80 89 98 72 80 94 94 72 80 95 100 74 84 95 100 74 80 91 91 70 84 95 100 79 79 95 100 75 71 83 96 75 4 +80 94 94 72 80 94 94 72 80 89 90 68 80 91 91 70 71 91 96 74 76 91 96 70 71 83 96 75 67 72 96 83 59 58 104 100 4 +80 94 94 72 80 89 90 68 80 89 90 72 71 91 96 74 76 91 96 70 71 79 96 74 67 72 96 83 59 58 104 100 51 45 113 116 4 +80 89 90 68 80 89 90 72 80 85 90 68 76 91 96 70 71 79 96 74 68 68 100 88 59 58 104 100 51 45 113 116 44 34 128 129 2 +80 85 90 68 72 85 94 72 72 81 94 72 68 68 100 88 56 54 108 103 56 54 104 92 44 34 128 129 44 34 123 129 48 37 118 121 2 +72 85 94 72 72 81 94 72 64 69 102 83 56 54 108 103 56 54 104 92 53 45 113 114 44 34 123 129 48 37 118 121 51 45 113 104 2 +72 81 94 72 64 69 102 83 57 49 111 109 56 54 104 92 53 45 113 114 46 34 133 146 48 37 118 121 51 45 113 104 44 37 128 137 2 +64 69 102 83 57 49 111 109 50 40 125 128 53 45 113 114 46 34 133 146 46 31 139 143 51 45 113 104 44 37 128 137 41 32 139 150 2 +57 49 111 109 50 40 125 128 47 34 125 135 46 34 133 146 46 31 139 143 46 31 133 146 44 37 128 137 41 32 139 150 44 32 139 154 2 +50 40 125 128 47 34 125 135 47 34 131 135 46 31 139 143 46 31 133 146 43 31 139 146 41 32 139 150 44 32 139 154 44 29 145 150 2 +47 34 125 135 47 34 131 135 47 34 125 135 46 31 133 146 43 31 139 146 43 31 139 143 44 32 139 154 44 29 145 150 44 29 139 150 2 +44 34 120 135 44 31 120 139 44 34 131 135 46 31 133 139 43 31 133 139 43 31 128 135 44 29 134 141 44 32 134 137 48 34 128 129 2 +44 31 120 139 44 34 131 135 44 31 125 135 43 31 133 139 43 31 128 135 43 31 128 135 44 32 134 137 48 34 128 129 48 37 123 125 2 +44 34 131 135 44 31 125 135 47 31 131 139 43 31 128 135 43 31 128 135 46 34 133 132 48 34 128 129 48 37 123 125 44 34 118 129 2 +44 31 125 135 47 31 131 139 41 31 131 135 43 31 128 135 46 34 133 132 43 31 128 135 48 37 123 125 44 34 118 129 44 37 123 129 2 +47 31 131 139 41 31 131 135 41 31 131 139 46 34 133 132 43 31 128 135 43 31 128 132 44 34 118 129 44 37 123 129 48 34 123 133 2 +41 31 131 139 44 40 120 120 64 73 106 83 43 31 128 132 46 34 118 132 50 51 113 103 48 34 123 133 48 32 128 129 48 37 123 125 2 +44 40 120 120 64 73 106 83 84 102 106 83 46 34 118 132 50 51 113 103 71 87 104 81 48 32 128 129 48 37 123 125 59 58 104 92 2 +64 73 106 83 84 102 106 83 88 111 111 91 50 51 113 103 71 87 104 81 88 103 108 88 48 37 123 125 59 58 104 92 79 91 100 79 2 +84 102 106 83 88 111 111 91 88 102 115 87 71 87 104 81 88 103 108 88 88 103 108 88 59 58 104 92 79 91 100 79 88 107 109 87 3 +88 111 111 91 88 102 115 87 84 111 106 87 88 103 108 88 88 103 108 88 88 107 108 88 79 91 100 79 88 107 109 87 88 107 113 87 3 +92 106 111 87 92 111 111 87 88 106 106 83 88 107 113 88 92 107 113 88 88 103 113 85 88 107 109 87 88 107 104 83 88 107 109 87 3 +92 111 111 87 88 106 106 83 84 102 106 83 92 107 113 88 88 103 113 85 88 103 108 88 88 107 104 83 88 107 109 87 88 103 109 87 3 +88 106 106 83 84 102 106 83 88 106 106 87 88 103 113 85 88 103 108 88 92 107 113 88 88 107 109 87 88 103 109 87 88 111 109 87 3 +84 102 106 83 88 106 106 87 88 111 115 83 88 103 108 88 92 107 113 88 88 107 113 88 88 103 109 87 88 111 109 87 93 107 113 92 3 +88 106 106 87 88 111 115 83 92 111 115 91 92 107 113 88 88 107 113 88 88 107 113 88 88 111 109 87 93 107 113 92 93 107 109 87 3 +88 111 115 83 92 111 115 91 88 111 111 87 88 107 113 88 88 107 113 88 88 107 118 88 93 107 113 92 93 107 109 87 88 107 113 87 3 +92 111 115 91 88 111 111 87 92 106 111 87 88 107 113 88 88 107 118 88 88 107 113 88 93 107 109 87 88 107 113 87 93 111 109 87 3 +88 111 111 87 92 106 111 87 88 106 111 87 88 107 118 88 88 107 113 88 88 107 108 85 88 107 113 87 93 111 109 87 93 111 109 87 3 +92 106 111 87 88 106 111 87 84 102 115 87 88 107 113 88 88 107 108 85 88 107 104 88 93 111 109 87 93 111 109 87 93 107 113 92 3 +88 106 111 87 84 102 115 87 84 106 115 91 88 107 108 85 88 107 104 88 88 107 108 85 93 111 109 87 93 107 113 92 88 103 113 87 3 +84 106 115 91 88 111 115 87 88 106 111 87 88 107 108 85 88 107 113 85 84 103 108 85 88 103 113 87 84 103 104 83 84 103 104 83 3 +88 111 115 87 88 106 111 87 88 111 111 87 88 107 113 85 84 103 108 85 84 107 113 88 84 103 104 83 84 103 104 83 88 107 113 87 3 +88 111 111 87 92 111 115 91 92 111 115 91 84 107 113 88 88 107 118 92 88 107 113 88 88 107 113 87 93 111 109 92 88 107 109 87 3 +92 111 115 91 92 111 115 91 88 106 115 91 88 107 118 92 88 107 113 88 88 107 113 88 93 111 109 92 88 107 109 87 84 107 109 92 3 +92 111 115 91 88 106 115 91 88 115 115 91 88 107 113 88 88 107 113 88 88 107 108 88 88 107 109 87 84 107 109 92 88 107 109 87 3 +88 115 115 91 92 115 120 94 88 111 111 91 88 107 108 88 88 107 113 92 92 112 122 92 88 107 109 87 84 107 109 92 88 107 113 92 3 +84 106 111 87 88 106 115 87 92 106 111 87 88 112 113 85 84 99 108 85 84 103 113 88 84 103 109 87 84 103 109 83 88 103 109 87 3 +88 106 115 87 92 106 111 87 92 106 111 87 84 99 108 85 84 103 113 88 88 107 113 85 84 103 109 83 88 103 109 87 88 103 109 83 3 +92 106 111 87 92 106 111 87 88 102 106 83 84 103 113 88 88 107 113 85 88 103 108 85 88 103 109 87 88 103 109 83 88 107 109 87 3 +92 106 111 87 88 102 106 83 88 106 106 83 88 107 113 85 88 103 108 85 88 107 113 88 88 103 109 83 88 107 109 87 88 111 109 92 3 +88 102 106 83 88 106 106 83 88 106 115 87 88 103 108 85 88 107 113 88 88 103 113 92 88 107 109 87 88 111 109 92 84 111 113 92 3 +88 106 106 83 88 106 115 87 84 111 115 87 88 107 113 88 88 103 113 92 84 107 113 88 88 111 109 92 84 111 113 92 84 107 118 92 3 +88 106 115 87 84 111 115 87 84 102 115 91 88 103 113 92 84 107 113 88 88 112 113 92 84 111 113 92 84 107 118 92 88 111 123 96 3 +84 102 115 91 88 111 120 94 88 111 120 91 88 112 113 92 92 112 118 92 88 103 113 85 88 111 123 96 93 116 118 96 88 111 113 87 3 +88 111 120 94 88 111 120 91 88 106 111 91 92 112 118 92 88 103 113 85 88 103 108 85 93 116 118 96 88 111 113 87 88 107 109 83 3 +88 106 111 91 88 106 106 87 88 106 111 87 88 103 108 85 88 107 113 88 88 107 118 88 88 107 109 83 84 103 109 83 88 103 113 87 3 +88 106 111 87 88 111 111 87 88 102 111 83 88 107 118 88 88 103 118 85 88 99 108 85 88 103 113 87 88 107 109 87 88 103 113 87 3 +88 111 111 87 88 102 111 83 84 102 106 83 88 103 118 85 88 99 108 85 84 103 108 85 88 107 109 87 88 103 113 87 88 103 109 87 3 +88 102 111 83 84 102 106 83 88 102 115 87 88 99 108 85 84 103 108 85 88 103 113 85 88 103 113 87 88 103 109 87 84 99 104 79 3 +88 102 115 87 84 102 102 83 80 98 98 79 88 103 113 85 84 99 104 81 80 95 91 74 84 99 104 79 79 91 93 71 71 79 77 62 3 +84 102 102 83 80 98 98 79 84 98 106 83 84 99 104 81 80 95 91 74 76 87 96 70 79 91 93 71 71 79 77 62 75 83 85 67 7 +80 98 98 79 84 98 106 83 80 98 102 83 80 95 91 74 76 87 96 70 68 79 83 67 71 79 77 62 75 83 85 67 75 79 89 71 7 +84 98 106 83 80 98 102 83 80 98 98 79 76 87 96 70 68 79 83 67 68 79 83 67 75 83 85 67 75 79 89 71 75 79 85 71 7 +80 98 102 83 80 98 98 79 76 94 94 76 68 79 83 67 68 79 83 67 71 75 87 67 75 79 89 71 75 79 85 71 71 79 85 67 7 +80 98 98 79 76 94 94 76 76 89 86 72 68 79 83 67 71 75 87 67 71 75 79 63 75 79 85 71 71 79 85 67 75 79 81 67 7 +76 94 94 76 76 89 86 72 76 85 86 72 71 75 87 67 71 75 79 63 68 79 83 67 71 79 85 67 75 79 81 67 71 79 85 62 7 +76 85 86 72 68 85 86 68 72 85 86 72 71 83 87 70 71 83 91 74 76 87 91 70 79 87 89 71 75 87 89 71 75 83 89 67 7 +72 81 86 68 72 77 78 61 64 73 74 57 71 79 83 67 71 79 79 67 71 83 79 63 75 83 89 71 75 79 89 71 71 79 85 67 7 +92 126 139 107 88 126 139 103 88 121 133 103 93 126 134 108 88 126 134 104 88 121 128 104 90 123 133 103 86 128 133 107 90 123 127 103 3 +88 126 139 103 88 121 133 103 92 121 128 103 88 126 134 104 88 121 128 104 93 116 123 96 86 128 133 107 90 123 127 103 90 118 122 96 3 +88 121 133 103 92 121 128 103 88 121 128 99 88 121 128 104 93 116 123 96 88 111 118 92 90 123 127 103 90 118 122 96 90 109 117 89 3 +92 121 128 103 88 121 128 99 92 116 122 99 93 116 123 96 88 111 118 92 88 111 113 92 90 118 122 96 90 109 117 89 86 109 112 92 3 +92 116 122 99 88 116 122 96 84 107 113 85 88 111 113 92 88 103 109 87 84 107 113 87 86 109 112 92 90 113 122 92 90 109 112 92 3 +88 116 122 96 84 107 113 85 84 99 104 78 88 103 109 87 84 107 113 87 84 103 104 83 90 113 122 92 90 109 112 92 86 113 112 89 3 +84 107 113 85 84 99 104 78 80 95 100 78 84 107 113 87 84 103 104 83 84 99 100 79 90 109 112 92 86 113 112 89 86 109 104 85 3 +84 99 104 78 80 95 100 78 80 99 104 78 84 103 104 83 84 99 100 79 79 99 104 79 86 113 112 89 86 109 104 85 82 100 104 81 3 +80 99 104 78 80 95 100 78 80 99 100 74 79 99 104 79 84 95 104 79 84 99 100 75 82 100 104 81 82 100 100 81 82 100 96 78 3 +80 95 100 78 80 99 100 74 84 95 100 78 84 95 104 79 84 99 100 75 79 99 100 75 82 100 100 81 82 100 96 78 78 96 100 81 3 +84 95 100 78 80 99 100 74 80 95 100 74 79 99 100 75 84 91 100 75 84 95 100 79 78 96 100 81 82 96 96 78 82 91 100 74 4 +80 99 100 74 80 95 100 74 84 95 100 74 84 91 100 75 84 95 100 79 79 95 100 75 82 96 96 78 82 91 100 74 74 79 96 81 4 +84 95 100 74 80 91 91 70 71 91 96 74 79 95 100 75 71 83 96 75 67 72 96 83 74 79 96 81 66 63 100 92 56 53 108 107 2 +71 91 96 74 76 91 96 70 71 79 96 74 67 72 96 83 59 58 104 100 51 45 113 116 56 53 108 107 49 37 122 125 43 32 127 133 2 +76 91 96 70 71 79 96 74 68 68 100 88 59 58 104 100 51 45 113 116 44 34 128 129 49 37 122 125 43 32 127 133 43 34 127 133 2 +71 79 96 74 68 68 100 88 56 54 108 103 51 45 113 116 44 34 128 129 44 34 123 129 43 32 127 133 43 34 127 133 43 32 122 133 2 +68 68 100 88 56 54 108 103 56 54 104 92 44 34 128 129 44 34 123 129 48 37 118 121 43 34 127 133 43 32 122 133 46 29 127 136 2 +56 54 108 103 56 54 104 92 53 45 113 114 44 34 123 129 48 37 118 121 51 45 113 104 43 32 122 133 46 29 127 136 46 32 122 136 2 +53 45 113 114 46 34 133 146 46 31 139 143 51 45 113 104 44 37 128 137 41 32 139 150 46 32 122 136 52 40 112 114 52 37 117 122 2 +46 34 133 146 46 31 139 143 46 31 133 146 44 37 128 137 41 32 139 150 44 32 139 154 52 40 112 114 52 37 117 122 46 29 138 151 2 +43 31 139 146 43 31 139 143 43 31 133 139 44 29 145 150 44 29 139 150 44 27 134 146 49 32 138 151 46 29 138 151 46 29 133 151 2 +43 31 139 143 43 31 133 139 46 31 133 139 44 29 139 150 44 27 134 146 44 29 134 141 46 29 138 151 46 29 133 151 46 29 138 147 2 +43 31 128 135 46 34 133 132 43 31 128 135 48 37 123 125 44 34 118 129 44 37 123 129 46 32 122 125 46 34 122 125 46 32 117 129 2 +43 31 128 135 43 31 128 132 46 34 118 132 44 37 123 129 48 34 123 133 48 32 128 129 46 32 117 129 49 34 117 129 46 34 122 129 2 +43 31 128 132 46 34 118 132 50 51 113 103 48 34 123 133 48 32 128 129 48 37 123 125 49 34 117 129 46 34 122 129 46 34 122 125 2 +46 34 118 132 50 51 113 103 71 87 104 81 48 32 128 129 48 37 123 125 59 58 104 92 46 34 122 129 46 34 122 125 49 37 117 125 2 +50 51 113 103 71 87 104 81 88 103 108 88 48 37 123 125 59 58 104 92 79 91 100 79 46 34 122 125 49 37 117 125 49 43 117 111 2 +88 103 108 88 88 103 108 88 88 107 108 88 79 91 100 79 88 107 109 87 88 107 113 87 49 43 117 111 66 71 100 85 82 96 104 81 3 +88 103 108 88 88 107 108 88 88 107 113 85 88 107 109 87 88 107 113 87 88 107 109 87 66 71 100 85 82 96 104 81 90 104 108 85 3 +88 107 108 88 88 107 113 85 88 107 113 88 88 107 113 87 88 107 109 87 88 107 109 87 82 96 104 81 90 104 108 85 86 104 108 85 3 +88 107 113 88 92 107 113 88 88 103 113 85 88 107 109 87 88 107 104 83 88 107 109 87 86 104 108 85 86 104 104 85 86 104 112 85 3 +92 107 113 88 88 103 113 85 88 103 108 88 88 107 104 83 88 107 109 87 88 103 109 87 86 104 104 85 86 104 112 85 86 104 108 89 3 +88 103 113 85 88 103 108 88 92 107 113 88 88 107 109 87 88 103 109 87 88 111 109 87 86 104 112 85 86 104 108 89 86 109 104 85 3 +88 103 108 88 92 107 113 88 88 107 113 88 88 103 109 87 88 111 109 87 93 107 113 92 86 104 108 89 86 109 104 85 86 109 112 85 3 +92 107 113 88 88 107 113 88 88 107 113 88 88 111 109 87 93 107 113 92 93 107 109 87 86 109 104 85 86 109 112 85 90 109 112 89 3 +88 107 118 88 88 107 113 88 88 107 108 85 88 107 113 87 93 111 109 87 93 111 109 87 90 109 112 89 86 109 112 89 90 113 112 92 3 +88 107 113 88 88 107 108 85 88 107 104 88 93 111 109 87 93 111 109 87 93 107 113 92 86 109 112 89 90 113 112 92 90 113 112 89 3 +88 107 108 85 88 107 113 85 84 103 108 85 88 103 113 87 84 103 104 83 84 103 104 83 90 109 112 89 86 109 108 89 86 104 108 85 3 +88 107 118 92 88 107 113 88 88 107 113 88 93 111 109 92 88 107 109 87 84 107 109 92 86 104 112 85 86 104 108 89 86 104 108 89 3 +88 107 113 88 88 107 113 88 88 107 108 88 88 107 109 87 84 107 109 92 88 107 109 87 86 104 108 89 86 104 108 89 86 109 112 89 3 +88 107 113 88 88 107 108 88 88 107 113 92 84 107 109 92 88 107 109 87 84 107 109 92 86 104 108 89 86 109 112 89 82 104 112 89 3 +88 107 108 88 88 107 113 92 92 112 122 92 88 107 109 87 84 107 109 92 88 107 113 92 86 109 112 89 82 104 112 89 86 109 112 92 3 +88 112 113 85 84 99 108 85 84 103 113 88 84 103 109 87 84 103 109 83 88 103 109 87 86 109 112 89 82 100 104 85 82 100 104 85 3 +84 103 113 88 88 107 113 85 88 103 108 85 88 103 109 87 88 103 109 83 88 107 109 87 82 100 104 85 90 104 108 85 90 104 112 85 3 +88 107 113 85 88 103 108 85 88 107 113 88 88 103 109 83 88 107 109 87 88 111 109 92 90 104 108 85 90 104 112 85 90 109 117 85 3 +88 103 108 85 88 107 113 88 88 103 113 92 88 107 109 87 88 111 109 92 84 111 113 92 90 104 112 85 90 109 117 85 90 109 117 92 3 +88 107 113 88 88 103 113 92 84 107 113 88 88 111 109 92 84 111 113 92 84 107 118 92 90 109 117 85 90 109 117 92 90 113 112 96 3 +84 107 113 88 88 112 113 92 92 112 118 92 84 107 118 92 88 111 123 96 93 116 118 96 90 113 112 96 90 113 122 96 95 113 117 96 3 +88 112 113 92 92 112 118 92 88 103 113 85 88 111 123 96 93 116 118 96 88 111 113 87 90 113 122 96 95 113 117 96 95 113 112 92 3 +92 112 118 92 88 103 113 85 88 103 108 85 93 116 118 96 88 111 113 87 88 107 109 83 95 113 117 96 95 113 112 92 86 104 108 85 3 +88 103 108 85 88 107 113 88 88 107 118 88 88 107 109 83 84 103 109 83 88 103 113 87 86 104 108 85 86 100 108 81 86 104 108 85 3 +88 107 113 88 88 107 118 88 88 103 118 85 84 103 109 83 88 103 113 87 88 107 109 87 86 100 108 81 86 104 108 85 90 109 108 85 3 +88 107 118 88 88 103 118 85 88 99 108 85 88 103 113 87 88 107 109 87 88 103 113 87 86 104 108 85 90 109 108 85 82 96 100 78 3 +88 99 108 85 84 103 108 85 88 103 113 85 88 103 113 87 88 103 109 87 84 99 104 79 82 96 100 78 70 79 84 66 70 75 76 63 3 +84 103 108 85 88 103 113 85 84 99 104 81 88 103 109 87 84 99 104 79 79 91 93 71 70 79 84 66 70 75 76 63 70 79 80 66 3 +80 95 91 74 76 87 96 70 68 79 83 67 71 79 77 62 75 83 85 67 75 79 89 71 66 75 80 66 66 71 80 63 70 79 84 66 7 +68 79 83 67 68 79 83 67 71 75 87 67 75 79 89 71 75 79 85 71 71 79 85 67 70 79 84 66 70 79 80 70 74 83 84 70 7 +68 79 83 67 71 75 87 67 71 75 79 63 75 79 85 71 71 79 85 67 75 79 81 67 70 79 80 70 74 83 84 70 74 79 80 66 7 +71 75 87 67 71 75 79 63 68 79 83 67 71 79 85 67 75 79 81 67 71 79 85 62 74 83 84 70 74 79 80 66 70 75 76 63 7 +71 83 91 74 76 87 91 70 76 83 87 67 75 87 89 71 75 83 89 67 75 83 85 67 70 79 84 66 74 87 92 74 74 83 84 66 7 +76 87 91 70 76 83 87 67 71 79 83 67 75 83 89 67 75 83 85 67 75 83 89 71 74 87 92 74 74 83 84 66 74 83 88 70 7 +76 83 87 67 71 79 83 67 71 79 79 67 75 83 85 67 75 83 89 71 75 79 89 71 74 83 84 66 74 83 88 70 74 83 84 70 7 +71 79 83 67 71 79 79 67 71 83 79 63 75 83 89 71 75 79 89 71 71 79 85 67 74 83 88 70 74 83 84 70 74 83 80 70 7 +88 126 134 104 88 121 128 104 93 116 123 96 86 128 133 107 90 123 127 103 90 118 122 96 92 127 135 105 92 122 130 105 96 117 119 94 3 +88 121 128 104 93 116 123 96 88 111 118 92 90 123 127 103 90 118 122 96 90 109 117 89 92 122 130 105 96 117 119 94 92 112 119 90 3 +88 111 118 92 88 111 113 92 88 103 109 87 90 109 117 89 86 109 112 92 90 113 122 92 92 112 119 90 92 112 114 94 92 112 119 94 3 +88 111 113 92 88 103 109 87 84 107 113 87 86 109 112 92 90 113 122 92 90 109 112 92 92 112 114 94 92 112 119 94 92 117 119 98 3 +88 103 109 87 84 107 113 87 84 103 104 83 90 113 122 92 90 109 112 92 86 113 112 89 92 112 119 94 92 117 119 98 96 112 119 94 3 +84 99 100 79 79 99 104 79 84 95 104 79 86 109 104 85 82 100 104 81 82 100 100 81 92 108 114 90 87 103 105 83 83 99 101 79 3 +84 95 104 79 84 99 100 75 79 99 100 75 82 100 100 81 82 100 96 78 78 96 100 81 83 99 101 79 83 95 101 79 79 91 105 79 3 +84 99 100 75 79 99 100 75 84 91 100 75 82 100 96 78 78 96 100 81 82 96 96 78 83 95 101 79 79 91 105 79 71 73 101 90 4 +67 72 96 83 59 58 104 100 51 45 113 116 56 53 108 107 49 37 122 125 43 32 127 133 46 34 130 135 42 32 130 135 42 32 124 139 2 +59 58 104 100 51 45 113 116 44 34 128 129 49 37 122 125 43 32 127 133 43 34 127 133 42 32 130 135 42 32 124 139 42 32 135 139 2 +51 45 113 116 44 34 128 129 44 34 123 129 43 32 127 133 43 34 127 133 43 32 122 133 42 32 124 139 42 32 135 139 42 32 130 135 2 +44 34 128 129 44 34 123 129 48 37 118 121 43 34 127 133 43 32 122 133 46 29 127 136 42 32 135 139 42 32 130 135 46 32 124 139 2 +51 45 113 104 44 37 128 137 41 32 139 150 46 32 122 136 52 40 112 114 52 37 117 122 42 34 124 135 46 37 119 131 52 45 110 109 2 +44 37 128 137 41 32 139 150 44 32 139 154 52 40 112 114 52 37 117 122 46 29 138 151 46 37 119 131 52 45 110 109 46 40 119 139 2 +41 32 139 150 44 32 139 154 44 29 145 150 52 37 117 122 46 29 138 151 49 32 138 151 52 45 110 109 46 40 119 139 42 30 135 157 2 +44 32 139 154 44 29 145 150 44 29 139 150 46 29 138 151 49 32 138 151 46 29 138 151 46 40 119 139 42 30 135 157 42 30 140 150 2 +44 29 139 150 44 27 134 146 44 29 134 141 46 29 138 151 46 29 133 151 46 29 138 147 42 30 140 150 42 30 135 150 42 30 130 142 2 +44 27 134 146 44 29 134 141 44 32 134 137 46 29 133 151 46 29 138 147 46 29 133 140 42 30 135 150 42 30 130 142 46 30 124 135 2 +44 29 134 141 44 32 134 137 48 34 128 129 46 29 138 147 46 29 133 140 46 32 127 133 42 30 130 142 46 30 124 135 46 32 124 131 2 +48 34 128 129 48 37 123 125 44 34 118 129 46 32 127 133 46 32 122 125 46 34 122 125 46 32 124 131 46 34 130 131 49 34 124 131 2 +48 37 123 125 44 34 118 129 44 37 123 129 46 32 122 125 46 34 122 125 46 32 117 129 46 34 130 131 49 34 124 131 46 34 119 124 2 +48 34 123 133 48 32 128 129 48 37 123 125 49 34 117 129 46 34 122 129 46 34 122 125 46 34 119 131 46 37 130 127 46 34 124 124 2 +59 58 104 92 79 91 100 79 88 107 109 87 49 37 117 125 49 43 117 111 66 71 100 85 46 37 119 127 46 37 119 124 52 51 110 98 2 +79 91 100 79 88 107 109 87 88 107 113 87 49 43 117 111 66 71 100 85 82 96 104 81 46 37 119 124 52 51 110 98 75 84 101 79 2 +88 107 109 87 88 107 113 87 88 107 109 87 66 71 100 85 82 96 104 81 90 104 108 85 52 51 110 98 75 84 101 79 87 99 105 83 3 +88 107 109 87 88 107 109 87 88 107 104 83 90 104 108 85 86 104 108 85 86 104 104 85 87 99 105 83 87 103 110 86 87 103 110 86 3 +88 107 109 87 88 103 109 87 88 111 109 87 86 104 112 85 86 104 108 89 86 109 104 85 87 103 110 86 87 103 105 86 87 103 114 86 3 +93 107 113 92 93 107 109 87 88 107 113 87 86 109 112 85 90 109 112 89 90 109 112 89 87 108 119 90 92 112 119 90 92 108 110 90 3 +93 107 109 87 88 107 113 87 93 111 109 87 90 109 112 89 90 109 112 89 86 109 112 89 92 112 119 90 92 108 110 90 92 112 119 90 3 +93 107 113 92 88 103 113 87 84 103 104 83 90 113 112 89 90 109 112 89 86 109 108 89 92 108 110 86 87 103 105 86 87 108 110 86 3 +88 103 113 87 84 103 104 83 84 103 104 83 90 109 112 89 86 109 108 89 86 104 108 85 87 103 105 86 87 108 110 86 87 112 114 90 3 +84 103 104 83 84 103 104 83 88 107 113 87 86 109 108 89 86 104 108 85 86 104 108 89 87 108 110 86 87 112 114 90 83 103 105 83 3 +84 103 104 83 88 107 113 87 93 111 109 92 86 104 108 85 86 104 108 89 86 104 112 85 87 112 114 90 83 103 105 83 83 99 110 86 3 +88 107 113 87 93 111 109 92 88 107 109 87 86 104 108 89 86 104 112 85 86 104 108 89 83 103 105 83 83 99 110 86 87 103 105 86 3 +93 111 109 92 88 107 109 87 84 107 109 92 86 104 112 85 86 104 108 89 86 104 108 89 83 99 110 86 87 103 105 86 83 103 110 90 3 +88 107 109 87 84 107 109 92 88 107 109 87 86 104 108 89 86 104 108 89 86 109 112 89 87 103 105 86 83 103 110 90 87 108 110 90 3 +88 107 109 87 84 107 109 92 88 107 113 92 86 109 112 89 82 104 112 89 86 109 112 92 87 108 110 90 83 103 105 90 87 108 110 90 3 +84 107 109 92 88 107 113 92 84 103 109 87 82 104 112 89 86 109 112 92 86 109 112 89 83 103 105 90 87 108 110 90 92 108 114 86 3 +84 103 109 83 88 103 109 87 88 103 109 83 82 100 104 85 82 100 104 85 90 104 108 85 87 103 105 86 87 103 105 83 92 112 114 90 3 +88 107 109 87 88 111 109 92 84 111 113 92 90 104 112 85 90 109 117 85 90 109 117 92 96 112 114 94 92 117 124 98 92 117 119 94 3 +88 111 109 92 84 111 113 92 84 107 118 92 90 109 117 85 90 109 117 92 90 113 112 96 92 117 124 98 92 117 119 94 92 108 114 94 3 +84 107 118 92 88 111 123 96 93 116 118 96 90 113 112 96 90 113 122 96 95 113 117 96 92 108 114 94 92 108 114 90 92 103 110 86 3 +88 111 123 96 93 116 118 96 88 111 113 87 90 113 122 96 95 113 117 96 95 113 112 92 92 108 114 90 92 103 110 86 92 99 101 83 3 +88 111 113 87 88 107 109 83 84 103 109 83 95 113 112 92 86 104 108 85 86 100 108 81 92 99 101 83 83 95 101 79 75 91 93 72 3 +88 107 109 83 84 103 109 83 88 103 113 87 86 104 108 85 86 100 108 81 86 104 108 85 83 95 101 79 75 91 93 72 75 84 93 72 3 +88 103 113 87 88 107 109 87 88 103 113 87 86 104 108 85 90 109 108 85 82 96 100 78 75 84 93 72 75 84 90 68 67 73 79 60 3 +88 107 109 87 88 103 113 87 88 103 109 87 90 109 108 85 82 96 100 78 70 79 84 66 75 84 90 68 67 73 79 60 63 66 72 60 7 +88 103 113 87 88 103 109 87 84 99 104 79 82 96 100 78 70 79 84 66 70 75 76 63 67 73 79 60 63 66 72 60 67 70 72 60 7 +84 99 104 79 79 91 93 71 71 79 77 62 70 75 76 63 70 79 80 66 66 75 80 66 67 70 72 60 67 73 75 60 71 73 75 60 7 +79 91 93 71 71 79 77 62 75 83 85 67 70 79 80 66 66 75 80 66 66 71 80 63 67 73 75 60 71 73 75 60 71 73 79 64 7 +71 79 77 62 75 83 85 67 75 79 89 71 66 75 80 66 66 71 80 63 70 79 84 66 71 73 75 60 71 73 79 64 67 73 72 60 7 +75 79 89 71 75 79 85 71 71 79 85 67 70 79 84 66 70 79 80 70 74 83 84 70 67 73 72 60 63 70 75 57 71 77 82 64 7 +75 79 85 71 71 79 85 67 75 79 81 67 70 79 80 70 74 83 84 70 74 79 80 66 63 70 75 57 71 77 82 64 71 77 82 64 7 +75 79 81 67 71 79 85 62 79 87 89 71 74 79 80 66 70 75 76 63 70 75 76 63 71 77 82 64 67 77 79 64 71 77 75 64 7 +71 79 85 62 79 87 89 71 75 87 89 71 70 75 76 63 70 75 76 63 70 79 84 66 67 77 79 64 71 77 75 64 71 77 82 68 7 +75 83 89 67 75 83 85 67 75 83 89 71 74 87 92 74 74 83 84 66 74 83 88 70 71 88 93 72 75 84 90 68 67 73 75 60 7 +75 83 89 71 75 79 89 71 71 79 85 67 74 83 88 70 74 83 84 70 74 83 80 70 67 73 75 60 63 66 72 57 63 70 72 60 7 +75 79 89 71 71 79 85 67 75 83 89 67 74 83 84 70 74 83 80 70 78 87 92 74 63 66 72 57 63 70 72 60 71 77 86 64 7 +90 123 133 103 86 128 133 107 90 123 127 103 87 122 130 101 92 127 135 105 92 122 130 105 89 125 129 104 93 125 129 104 97 125 124 101 3 +90 123 127 103 90 118 122 96 90 109 117 89 92 122 130 105 96 117 119 94 92 112 119 90 97 125 124 101 93 120 124 94 93 115 119 94 3 +90 118 122 96 90 109 117 89 86 109 112 92 96 117 119 94 92 112 119 90 92 112 114 94 93 120 124 94 93 115 119 94 89 115 119 90 3 +90 109 117 89 86 109 112 92 90 113 122 92 92 112 119 90 92 112 114 94 92 112 119 94 93 115 119 94 89 115 119 90 89 115 114 94 3 +86 109 112 92 90 113 122 92 90 109 112 92 92 112 114 94 92 112 119 94 92 117 119 98 89 115 119 90 89 115 114 94 93 115 124 97 3 +86 113 112 89 86 109 104 85 82 100 104 81 96 112 119 94 92 108 114 90 87 103 105 83 93 115 119 94 97 111 119 94 93 106 114 90 3 +86 109 104 85 82 100 104 81 82 100 100 81 92 108 114 90 87 103 105 83 83 99 101 79 97 111 119 94 93 106 114 90 89 106 101 80 3 +82 100 104 81 82 100 100 81 82 100 96 78 87 103 105 83 83 99 101 79 83 95 101 79 93 106 114 90 89 106 101 80 74 75 97 83 3 +82 100 96 78 78 96 100 81 82 96 96 78 83 95 101 79 79 91 105 79 71 73 101 90 74 75 97 83 53 49 114 108 47 37 119 126 4 +78 96 100 81 82 96 96 78 82 91 100 74 79 91 105 79 71 73 101 90 63 57 105 101 53 49 114 108 47 37 119 126 44 31 124 133 2 +82 96 96 78 82 91 100 74 74 79 96 81 71 73 101 90 63 57 105 101 52 42 119 124 47 37 119 126 44 31 124 133 44 31 129 140 2 +66 63 100 92 56 53 108 107 49 37 122 125 49 37 130 131 46 34 130 135 42 32 130 135 44 34 129 143 44 31 129 140 44 34 124 133 2 +56 53 108 107 49 37 122 125 43 32 127 133 46 34 130 135 42 32 130 135 42 32 124 139 44 31 129 140 44 34 124 133 44 34 124 136 2 +46 29 127 136 46 32 122 136 52 40 112 114 46 32 124 139 42 34 124 135 46 37 119 131 44 34 119 136 44 34 129 136 44 31 124 136 2 +46 32 122 136 52 40 112 114 52 37 117 122 42 34 124 135 46 37 119 131 52 45 110 109 44 34 129 136 44 31 124 136 44 37 119 126 2 +46 29 138 151 49 32 138 151 46 29 138 151 46 40 119 139 42 30 135 157 42 30 140 150 50 43 110 115 44 34 129 143 42 29 135 150 2 +46 29 138 151 46 29 133 151 46 29 138 147 42 30 140 150 42 30 135 150 42 30 130 142 42 29 135 150 44 29 124 143 44 34 129 143 2 +46 29 133 140 46 32 127 133 46 32 122 125 46 30 124 135 46 32 124 131 46 34 130 131 44 34 124 143 44 34 119 136 42 34 119 129 2 +46 32 122 125 46 34 122 125 46 32 117 129 46 34 130 131 49 34 124 131 46 34 119 124 42 34 119 129 44 34 114 129 44 34 114 126 2 +46 32 117 129 49 34 117 129 46 34 122 129 46 34 119 124 46 34 119 131 46 37 130 127 44 34 114 126 47 37 114 126 47 34 119 126 2 +49 34 117 129 46 34 122 129 46 34 122 125 46 34 119 131 46 37 130 127 46 34 124 124 47 37 114 126 47 34 119 126 47 34 114 126 2 +66 71 100 85 82 96 104 81 90 104 108 85 52 51 110 98 75 84 101 79 87 99 105 83 47 40 114 115 57 60 105 94 82 92 101 80 2 +82 96 104 81 90 104 108 85 86 104 108 85 75 84 101 79 87 99 105 83 87 103 110 86 57 60 105 94 82 92 101 80 85 102 105 83 3 +86 104 104 85 86 104 112 85 86 104 108 89 87 103 110 86 87 103 110 86 87 103 105 86 85 106 110 90 89 106 114 90 89 106 110 90 3 +86 104 108 89 86 109 104 85 86 109 112 85 87 103 105 86 87 103 114 86 87 108 119 90 89 106 110 90 89 111 110 87 93 106 114 87 3 +86 109 104 85 86 109 112 85 90 109 112 89 87 103 114 86 87 108 119 90 92 112 119 90 89 111 110 87 93 106 114 87 93 106 114 90 3 +86 109 112 85 90 109 112 89 90 109 112 89 87 108 119 90 92 112 119 90 92 108 110 90 93 106 114 87 93 106 114 90 93 111 119 94 3 +90 109 112 89 90 109 112 89 86 109 112 89 92 112 119 90 92 108 110 90 92 112 119 90 93 106 114 90 93 111 119 94 93 111 114 90 3 +90 109 112 89 86 109 112 89 90 113 112 92 92 108 110 90 92 112 119 90 92 108 119 94 93 111 119 94 93 111 114 90 93 111 114 90 3 +90 113 112 92 90 113 112 89 90 109 112 89 92 108 119 94 92 108 110 86 87 103 105 86 93 111 114 90 89 106 114 83 89 106 114 87 3 +90 113 112 89 90 109 112 89 86 109 108 89 92 108 110 86 87 103 105 86 87 108 110 86 89 106 114 83 89 106 114 87 89 106 110 87 3 +90 109 112 89 86 109 108 89 86 104 108 85 87 103 105 86 87 108 110 86 87 112 114 90 89 106 114 87 89 106 110 87 89 106 114 90 3 +86 109 108 89 86 104 108 85 86 104 108 89 87 108 110 86 87 112 114 90 83 103 105 83 89 106 110 87 89 106 114 90 89 106 114 87 3 +86 104 108 85 86 104 108 89 86 104 112 85 87 112 114 90 83 103 105 83 83 99 110 86 89 106 114 90 89 106 114 87 89 106 105 87 3 +86 104 112 85 86 104 108 89 86 104 108 89 83 99 110 86 87 103 105 86 83 103 110 90 89 106 105 87 85 106 110 87 89 111 105 90 3 +86 104 108 89 86 104 108 89 86 109 112 89 87 103 105 86 83 103 110 90 87 108 110 90 85 106 110 87 89 111 105 90 89 111 110 90 3 +86 104 108 89 86 109 112 89 82 104 112 89 83 103 110 90 87 108 110 90 83 103 105 90 89 111 105 90 89 111 110 90 85 106 110 87 3 +82 104 112 89 86 109 112 92 86 109 112 89 83 103 105 90 87 108 110 90 92 108 114 86 85 106 110 87 89 111 114 94 89 111 110 90 3 +86 109 112 92 86 109 112 89 82 100 104 85 87 108 110 90 92 108 114 86 87 103 105 86 89 111 114 94 89 111 110 90 89 106 114 90 3 +86 109 112 89 82 100 104 85 82 100 104 85 92 108 114 86 87 103 105 86 87 103 105 83 89 111 110 90 89 106 114 90 89 111 114 94 3 +90 104 108 85 90 104 112 85 90 109 117 85 92 112 114 90 96 112 114 94 92 117 124 98 97 120 119 97 93 115 114 90 89 111 114 94 3 +90 104 112 85 90 109 117 85 90 109 117 92 96 112 114 94 92 117 124 98 92 117 119 94 93 115 114 90 89 111 114 94 89 111 110 90 3 +90 113 112 96 90 113 122 96 95 113 117 96 92 108 114 94 92 108 114 90 92 103 110 86 85 97 105 80 82 92 97 76 78 88 89 73 3 +90 113 122 96 95 113 117 96 95 113 112 92 92 108 114 90 92 103 110 86 92 99 101 83 82 92 97 76 78 88 89 73 70 84 82 65 3 +86 104 108 85 86 100 108 81 86 104 108 85 83 95 101 79 75 91 93 72 75 84 93 72 67 79 78 62 63 71 78 58 67 71 78 58 7 +82 96 100 78 70 79 84 66 70 75 76 63 67 73 79 60 63 66 72 60 67 70 72 60 67 71 74 58 63 67 70 55 67 71 70 58 7 +70 79 84 66 70 75 76 63 70 79 80 66 63 66 72 60 67 70 72 60 67 73 75 60 63 67 70 55 67 71 70 58 67 75 82 69 7 +70 79 80 66 66 75 80 66 66 71 80 63 67 73 75 60 71 73 75 60 71 73 79 64 67 75 82 69 70 84 93 76 70 84 85 69 7 +66 71 80 63 70 79 84 66 70 79 80 70 71 73 79 64 67 73 72 60 63 70 75 57 70 84 85 69 67 75 78 58 63 63 74 58 7 +70 79 84 66 70 79 80 70 74 83 84 70 67 73 72 60 63 70 75 57 71 77 82 64 67 75 78 58 63 63 74 58 67 71 74 65 7 +70 79 80 70 74 83 84 70 74 79 80 66 63 70 75 57 71 77 82 64 71 77 82 64 63 63 74 58 67 71 74 65 70 79 82 62 7 +74 83 84 70 74 79 80 66 70 75 76 63 71 77 82 64 71 77 82 64 67 77 79 64 67 71 74 65 70 79 82 62 78 84 89 73 7 +70 75 76 63 70 75 76 63 70 79 84 66 67 77 79 64 71 77 75 64 71 77 82 68 78 84 89 73 74 88 89 69 70 79 85 65 7 +70 75 76 63 70 79 84 66 74 87 92 74 71 77 75 64 71 77 82 68 71 88 93 72 74 88 89 69 70 79 85 65 67 79 82 65 7 +70 79 84 66 74 87 92 74 74 83 84 66 71 77 82 68 71 88 93 72 75 84 90 68 70 79 85 65 67 79 82 65 67 79 78 65 7 +74 87 92 74 74 83 84 66 74 83 88 70 71 88 93 72 75 84 90 68 67 73 75 60 67 79 82 65 67 79 78 65 67 75 74 62 7 +74 83 84 66 74 83 88 70 74 83 84 70 75 84 90 68 67 73 75 60 63 66 72 57 67 79 78 65 67 75 74 62 67 67 70 55 7 +74 83 88 70 74 83 84 70 74 83 80 70 67 73 75 60 63 66 72 57 63 70 72 60 67 75 74 62 67 67 70 55 60 63 70 58 7 +87 122 130 101 92 127 135 105 92 122 130 105 89 125 129 104 93 125 129 104 97 125 124 101 88 125 125 102 92 120 120 98 97 115 120 94 3 +92 122 130 105 96 117 119 94 92 112 119 90 97 125 124 101 93 120 124 94 93 115 119 94 97 115 120 94 92 115 115 94 88 111 115 91 3 +96 117 119 94 92 112 119 90 92 112 114 94 93 120 124 94 93 115 119 94 89 115 119 90 92 115 115 94 88 111 115 91 88 102 111 87 3 +92 112 119 90 92 112 114 94 92 112 119 94 93 115 119 94 89 115 119 90 89 115 114 94 88 111 115 91 88 102 111 87 84 106 111 91 3 +92 112 114 94 92 112 119 94 92 117 119 98 89 115 119 90 89 115 114 94 93 115 124 97 88 102 111 87 84 106 111 91 88 111 115 91 3 +92 112 119 94 92 117 119 98 96 112 119 94 89 115 114 94 93 115 124 97 93 115 119 94 84 106 111 91 88 111 115 91 92 111 115 91 3 +96 112 119 94 92 108 114 90 87 103 105 83 93 115 119 94 97 111 119 94 93 106 114 90 92 111 115 91 88 111 111 87 92 106 115 91 3 +92 108 114 90 87 103 105 83 83 99 101 79 97 111 119 94 93 106 114 90 89 106 101 80 88 111 111 87 92 106 115 91 88 102 111 83 3 +87 103 105 83 83 99 101 79 83 95 101 79 93 106 114 90 89 106 101 80 74 75 97 83 92 106 115 91 88 102 111 83 76 77 102 83 3 +83 99 101 79 83 95 101 79 79 91 105 79 89 106 101 80 74 75 97 83 53 49 114 108 88 102 111 83 76 77 102 83 53 40 115 116 2 +83 95 101 79 79 91 105 79 71 73 101 90 74 75 97 83 53 49 114 108 47 37 119 126 76 77 102 83 53 40 115 116 44 29 125 135 2 +79 91 105 79 71 73 101 90 63 57 105 101 53 49 114 108 47 37 119 126 44 31 124 133 53 40 115 116 44 29 125 135 47 34 125 135 2 +71 73 101 90 63 57 105 101 52 42 119 124 47 37 119 126 44 31 124 133 44 31 129 140 44 29 125 135 47 34 125 135 50 31 131 135 2 +63 57 105 101 52 42 119 124 49 37 130 131 44 31 124 133 44 31 129 140 44 34 129 143 47 34 125 135 50 31 131 135 47 31 125 135 2 +52 42 119 124 49 37 130 131 46 34 130 135 44 31 129 140 44 34 129 143 44 31 129 140 50 31 131 135 47 31 125 135 44 31 125 135 2 +49 37 130 131 46 34 130 135 42 32 130 135 44 34 129 143 44 31 129 140 44 34 124 133 47 31 125 135 44 31 125 135 47 31 131 135 2 +42 32 130 135 42 32 124 139 42 32 135 139 44 34 124 133 44 34 124 136 44 34 129 140 47 31 131 135 44 34 131 139 47 34 136 139 2 +42 32 135 139 42 32 130 135 46 32 124 139 44 34 129 140 44 31 124 140 44 34 119 136 47 34 136 139 47 31 125 139 47 31 125 135 2 +46 32 124 139 42 34 124 135 46 37 119 131 44 34 119 136 44 34 129 136 44 31 124 136 47 31 125 135 44 31 125 135 44 31 125 135 2 +42 34 124 135 46 37 119 131 52 45 110 109 44 34 129 136 44 31 124 136 44 37 119 126 44 31 125 135 44 31 125 135 44 31 120 131 2 +46 37 119 131 52 45 110 109 46 40 119 139 44 31 124 136 44 37 119 126 50 43 110 115 44 31 125 135 44 31 120 131 50 40 115 113 2 +52 45 110 109 46 40 119 139 42 30 135 157 44 37 119 126 50 43 110 115 44 34 129 143 44 31 120 131 50 40 115 113 50 46 111 116 2 +46 40 119 139 42 30 135 157 42 30 140 150 50 43 110 115 44 34 129 143 42 29 135 150 50 40 115 113 50 46 111 116 44 31 131 142 2 +42 30 140 150 42 30 135 150 42 30 130 142 42 29 135 150 44 29 124 143 44 34 129 143 44 31 131 142 44 29 136 146 44 31 136 142 2 +42 30 135 150 42 30 130 142 46 30 124 135 44 29 124 143 44 34 129 143 44 34 124 143 44 29 136 146 44 31 136 142 44 31 136 139 2 +46 30 124 135 46 32 124 131 46 34 130 131 44 34 124 143 44 34 119 136 42 34 119 129 44 31 136 139 44 31 131 135 44 31 120 131 2 +46 32 124 131 46 34 130 131 49 34 124 131 44 34 119 136 42 34 119 129 44 34 114 129 44 31 131 135 44 31 120 131 44 31 120 128 2 +46 34 130 131 49 34 124 131 46 34 119 124 42 34 119 129 44 34 114 129 44 34 114 126 44 31 120 131 44 31 120 128 44 34 115 124 2 +46 34 119 124 46 34 119 131 46 37 130 127 44 34 114 126 47 37 114 126 47 34 119 126 44 34 115 124 47 34 115 120 47 37 120 124 2 +46 34 119 131 46 37 130 127 46 34 124 124 47 37 114 126 47 34 119 126 47 34 114 126 47 34 115 120 47 37 120 124 44 34 120 120 2 +46 37 130 127 46 34 124 124 46 37 119 127 47 34 119 126 47 34 114 126 47 34 114 122 47 37 120 124 44 34 120 120 47 37 120 124 2 +46 34 124 124 46 37 119 127 46 37 119 124 47 34 114 126 47 34 114 122 47 37 114 126 44 34 120 120 47 37 120 124 44 37 120 124 2 +46 37 119 127 46 37 119 124 52 51 110 98 47 34 114 122 47 37 114 126 47 40 114 115 47 37 120 124 44 37 120 124 44 37 115 120 2 +46 37 119 124 52 51 110 98 75 84 101 79 47 37 114 126 47 40 114 115 57 60 105 94 44 37 120 124 44 37 115 120 50 40 111 109 2 +52 51 110 98 75 84 101 79 87 99 105 83 47 40 114 115 57 60 105 94 82 92 101 80 44 37 115 120 50 40 111 109 64 69 102 79 2 +87 99 105 83 87 103 110 86 87 103 110 86 82 92 101 80 85 102 105 83 85 106 110 90 64 69 102 79 80 98 102 79 84 102 102 87 3 +87 103 110 86 87 103 110 86 87 103 105 86 85 106 110 90 89 106 114 90 89 106 110 90 84 102 102 87 88 106 111 87 88 106 111 87 3 +87 103 110 86 87 103 105 86 87 103 114 86 89 106 114 90 89 106 110 90 89 111 110 87 88 106 111 87 88 106 111 87 88 102 106 87 3 +87 103 114 86 87 108 119 90 92 112 119 90 89 111 110 87 93 106 114 87 93 106 114 90 88 102 106 87 88 102 111 83 88 111 111 91 3 +92 112 119 90 92 108 110 90 92 112 119 90 93 106 114 90 93 111 119 94 93 111 114 90 88 111 111 91 92 115 115 91 88 111 115 91 3 +92 108 110 90 92 112 119 90 92 108 119 94 93 111 119 94 93 111 114 90 93 111 114 90 92 115 115 91 88 111 115 91 92 106 115 87 3 +92 108 119 94 92 108 110 86 87 103 105 86 93 111 114 90 89 106 114 83 89 106 114 87 92 106 115 87 88 111 111 91 92 111 115 91 3 +87 103 105 86 87 108 110 86 87 112 114 90 89 106 114 87 89 106 110 87 89 106 114 90 92 111 115 91 97 111 120 91 92 111 111 87 3 +87 108 110 86 87 112 114 90 83 103 105 83 89 106 110 87 89 106 114 90 89 106 114 87 97 111 120 91 92 111 111 87 88 111 115 87 3 +87 112 114 90 83 103 105 83 83 99 110 86 89 106 114 90 89 106 114 87 89 106 105 87 92 111 111 87 88 111 115 87 88 111 115 87 3 +83 103 110 90 87 108 110 90 83 103 105 90 89 111 105 90 89 111 110 90 85 106 110 87 92 111 115 87 88 106 111 87 88 106 111 87 3 +92 108 114 86 87 103 105 86 87 103 105 83 89 111 110 90 89 106 114 90 89 111 114 94 92 111 115 91 92 111 120 91 92 115 120 94 3 +87 103 105 86 87 103 105 83 92 112 114 90 89 106 114 90 89 111 114 94 97 120 119 97 92 111 120 91 92 115 120 94 92 120 120 94 3 +87 103 105 83 92 112 114 90 96 112 114 94 89 111 114 94 97 120 119 97 93 115 114 90 92 115 120 94 92 120 120 94 92 106 111 87 3 +92 112 114 90 96 112 114 94 92 117 124 98 97 120 119 97 93 115 114 90 89 111 114 94 92 120 120 94 92 106 111 87 80 98 102 76 3 +92 117 119 94 92 108 114 94 92 108 114 90 89 111 110 90 85 97 105 80 82 92 97 76 76 85 90 68 64 77 78 61 60 69 67 54 3 +92 108 114 90 92 103 110 86 92 99 101 83 82 92 97 76 78 88 89 73 70 84 82 65 60 69 67 54 60 66 67 57 64 69 71 57 5 +92 103 110 86 92 99 101 83 83 95 101 79 78 88 89 73 70 84 82 65 67 79 78 62 60 66 67 57 64 69 71 57 64 66 67 54 5 +83 95 101 79 75 91 93 72 75 84 93 72 67 79 78 62 63 71 78 58 67 71 78 58 64 66 67 54 64 62 71 50 60 62 67 50 5 +75 91 93 72 75 84 93 72 75 84 90 68 63 71 78 58 67 71 78 58 67 75 82 62 64 62 71 50 60 62 67 50 60 62 67 54 5 +75 84 93 72 75 84 90 68 67 73 79 60 67 71 78 58 67 75 82 62 67 71 74 58 60 62 67 50 60 62 67 54 64 69 74 61 7 +75 84 90 68 67 73 79 60 63 66 72 60 67 75 82 62 67 71 74 58 63 67 70 55 60 62 67 54 64 69 74 61 64 66 67 54 7 +67 73 79 60 63 66 72 60 67 70 72 60 67 71 74 58 63 67 70 55 67 71 70 58 64 69 74 61 64 66 67 54 64 69 74 61 7 +63 66 72 60 67 70 72 60 67 73 75 60 63 67 70 55 67 71 70 58 67 75 82 69 64 66 67 54 64 69 74 61 68 81 86 72 7 +67 73 75 60 71 73 75 60 71 73 79 64 67 75 82 69 70 84 93 76 70 84 85 69 68 81 86 72 72 81 90 76 72 81 86 68 7 +71 77 82 64 67 77 79 64 71 77 75 64 70 79 82 62 78 84 89 73 74 88 89 69 72 77 82 68 76 81 90 76 76 85 90 72 7 +67 77 79 64 71 77 75 64 71 77 82 68 78 84 89 73 74 88 89 69 70 79 85 65 76 81 90 76 76 85 90 72 76 77 90 68 7 +71 77 75 64 71 77 82 68 71 88 93 72 74 88 89 69 70 79 85 65 67 79 82 65 76 85 90 72 76 77 90 68 72 77 78 61 7 +71 77 82 68 71 88 93 72 75 84 90 68 70 79 85 65 67 79 82 65 67 79 78 65 76 77 90 68 72 77 78 61 68 69 71 57 7 +71 88 93 72 75 84 90 68 67 73 75 60 67 79 82 65 67 79 78 65 67 75 74 62 72 77 78 61 68 69 71 57 64 69 74 57 7 +75 84 90 68 67 73 75 60 63 66 72 57 67 79 78 65 67 75 74 62 67 67 70 55 68 69 71 57 64 69 74 57 68 69 74 57 7 +67 73 75 60 63 66 72 57 63 70 72 60 67 75 74 62 67 67 70 55 60 63 70 58 64 69 74 57 68 69 74 57 64 66 67 54 7 +89 125 129 104 93 125 129 104 97 125 124 101 88 125 125 102 92 120 120 98 97 115 120 94 92 116 122 92 92 116 118 92 88 107 113 88 3 +97 125 124 101 93 120 124 94 93 115 119 94 97 115 120 94 92 115 115 94 88 111 115 91 88 107 113 88 88 107 113 88 84 107 113 88 3 +93 115 119 94 89 115 119 90 89 115 114 94 88 111 115 91 88 102 111 87 84 106 111 91 84 107 113 88 84 112 113 88 88 107 113 88 3 +89 115 119 90 89 115 114 94 93 115 124 97 88 102 111 87 84 106 111 91 88 111 115 91 84 112 113 88 88 107 113 88 92 112 113 88 3 +93 115 119 94 97 111 119 94 93 106 114 90 92 111 115 91 88 111 111 87 92 106 115 91 92 112 118 88 88 107 113 88 88 103 108 85 3 +97 111 119 94 93 106 114 90 89 106 101 80 88 111 111 87 92 106 115 91 88 102 111 83 88 107 113 88 88 103 108 85 84 95 100 85 3 +93 106 114 90 89 106 101 80 74 75 97 83 92 106 115 91 88 102 111 83 76 77 102 83 88 103 108 85 84 95 100 85 80 95 100 74 3 +89 106 101 80 74 75 97 83 53 49 114 108 88 102 111 83 76 77 102 83 53 40 115 116 84 95 100 85 80 95 100 74 64 64 104 96 2 +74 75 97 83 53 49 114 108 47 37 119 126 76 77 102 83 53 40 115 116 44 29 125 135 80 95 100 74 64 64 104 96 46 36 122 139 2 +53 49 114 108 47 37 119 126 44 31 124 133 53 40 115 116 44 29 125 135 47 34 125 135 64 64 104 96 46 36 122 139 46 31 128 135 2 +44 31 124 133 44 31 129 140 44 34 129 143 47 34 125 135 50 31 131 135 47 31 125 135 46 31 128 135 46 31 128 135 46 31 133 143 2 +44 34 129 143 44 31 129 140 44 34 124 133 47 31 125 135 44 31 125 135 47 31 131 135 46 31 133 143 46 31 139 143 43 31 133 143 2 +44 34 124 133 44 34 124 136 44 34 129 140 47 31 131 135 44 34 131 139 47 34 136 139 43 31 133 143 43 29 133 143 46 31 133 150 2 +44 34 129 140 44 31 124 140 44 34 119 136 47 34 136 139 47 31 125 139 47 31 125 135 46 31 133 150 46 31 139 143 50 31 133 135 2 +44 31 124 140 44 34 119 136 44 34 129 136 47 31 125 139 47 31 125 135 44 31 125 135 46 31 139 143 50 31 133 135 50 31 128 132 2 +44 31 124 136 44 37 119 126 50 43 110 115 44 31 125 135 44 31 120 131 50 40 115 113 46 34 128 135 46 36 128 132 46 39 122 121 2 +44 37 119 126 50 43 110 115 44 34 129 143 44 31 120 131 50 40 115 113 50 46 111 116 46 36 128 132 46 39 122 121 53 45 108 103 2 +50 43 110 115 44 34 129 143 42 29 135 150 50 40 115 113 50 46 111 116 44 31 131 142 46 39 122 121 53 45 108 103 50 36 118 128 2 +42 29 135 150 44 29 124 143 44 34 129 143 44 31 131 142 44 29 136 146 44 31 136 142 50 36 118 128 43 31 139 143 46 29 133 139 2 +44 34 129 143 44 34 124 143 44 34 119 136 44 31 136 142 44 31 136 139 44 31 131 135 46 29 133 139 46 31 133 135 46 31 122 132 2 +44 34 124 143 44 34 119 136 42 34 119 129 44 31 136 139 44 31 131 135 44 31 120 131 46 31 133 135 46 31 122 132 46 34 122 128 2 +44 34 119 136 42 34 119 129 44 34 114 129 44 31 131 135 44 31 120 131 44 31 120 128 46 31 122 132 46 34 122 128 46 34 122 125 2 +42 34 119 129 44 34 114 129 44 34 114 126 44 31 120 131 44 31 120 128 44 34 115 124 46 34 122 128 46 34 122 125 46 36 122 121 2 +47 34 119 126 47 34 114 126 47 34 114 122 47 37 120 124 44 34 120 120 47 37 120 124 46 34 118 121 43 36 118 121 46 36 118 128 2 +82 92 101 80 85 102 105 83 85 106 110 90 64 69 102 79 80 98 102 79 84 102 102 87 53 51 113 103 71 83 100 78 84 99 104 85 3 +85 102 105 83 85 106 110 90 89 106 114 90 80 98 102 79 84 102 102 87 88 106 111 87 71 83 100 78 84 99 104 85 84 103 113 88 3 +85 106 110 90 89 106 114 90 89 106 110 90 84 102 102 87 88 106 111 87 88 106 111 87 84 99 104 85 84 103 113 88 88 107 118 88 3 +89 111 110 87 93 106 114 87 93 106 114 90 88 102 106 87 88 102 111 83 88 111 111 91 88 107 108 88 88 103 104 85 88 103 113 85 3 +93 106 114 90 93 111 119 94 93 111 114 90 88 111 111 91 92 115 115 91 88 111 115 91 88 103 113 85 88 107 108 88 92 107 113 92 3 +93 111 119 94 93 111 114 90 93 111 114 90 92 115 115 91 88 111 115 91 92 106 115 87 88 107 108 88 92 107 113 92 92 112 118 88 3 +93 111 114 90 93 111 114 90 89 106 114 83 88 111 115 91 92 106 115 87 88 111 111 91 92 107 113 92 92 112 118 88 92 112 113 92 3 +89 106 114 83 89 106 114 87 89 106 110 87 88 111 111 91 92 111 115 91 97 111 120 91 92 112 113 92 92 112 118 92 92 112 118 92 3 +89 106 114 87 89 106 110 87 89 106 114 90 92 111 115 91 97 111 120 91 92 111 111 87 92 112 118 92 92 112 118 92 92 107 118 88 3 +89 106 110 87 89 106 114 90 89 106 114 87 97 111 120 91 92 111 111 87 88 111 115 87 92 112 118 92 92 107 118 88 88 112 118 88 3 +89 111 105 90 89 111 110 90 85 106 110 87 92 111 115 87 88 106 111 87 88 106 111 87 92 103 113 88 88 107 108 92 92 107 108 88 3 +89 111 110 90 85 106 110 87 89 111 114 94 88 106 111 87 88 106 111 87 92 106 111 91 88 107 108 92 92 107 108 88 92 112 113 88 3 +85 106 110 87 89 111 114 94 89 111 110 90 88 106 111 87 92 106 111 91 92 111 115 91 92 107 108 88 92 112 113 88 92 112 118 92 3 +89 111 114 94 89 111 110 90 89 106 114 90 92 106 111 91 92 111 115 91 92 111 120 91 92 112 113 88 92 112 118 92 92 112 118 96 3 +89 106 114 90 89 111 114 94 97 120 119 97 92 111 120 91 92 115 120 94 92 120 120 94 92 112 118 96 88 107 122 88 88 103 108 85 3 +89 111 114 94 97 120 119 97 93 115 114 90 92 115 120 94 92 120 120 94 92 106 111 87 88 107 122 88 88 103 108 85 80 87 91 67 3 +97 120 119 97 93 115 114 90 89 111 114 94 92 120 120 94 92 106 111 87 80 98 102 76 88 103 108 85 80 87 91 67 68 71 75 59 3 +89 111 114 94 89 111 110 90 85 97 105 80 80 98 102 76 76 85 90 68 64 77 78 61 68 71 75 59 60 57 60 45 53 54 53 38 5 +89 111 110 90 85 97 105 80 82 92 97 76 76 85 90 68 64 77 78 61 60 69 67 54 60 57 60 45 53 54 53 38 53 54 53 34 5 +82 92 97 76 78 88 89 73 70 84 82 65 60 69 67 54 60 66 67 57 64 69 71 57 53 54 53 34 56 57 56 45 60 57 67 49 5 +78 88 89 73 70 84 82 65 67 79 78 62 60 66 67 57 64 69 71 57 64 66 67 54 56 57 56 45 60 57 67 49 60 57 67 49 5 +70 84 82 65 67 79 78 62 63 71 78 58 64 69 71 57 64 66 67 54 64 62 71 50 60 57 67 49 60 57 67 49 56 54 67 49 5 +67 71 78 58 67 75 82 62 67 71 74 58 60 62 67 50 60 62 67 54 64 69 74 61 56 54 67 52 53 57 67 52 60 64 75 63 7 +67 75 82 62 67 71 74 58 63 67 70 55 60 62 67 54 64 69 74 61 64 66 67 54 53 57 67 52 60 64 75 63 64 68 79 59 7 +63 67 70 55 67 71 70 58 67 75 82 69 64 66 67 54 64 69 74 61 68 81 86 72 64 68 79 59 64 68 71 56 64 71 75 63 7 +67 71 70 58 67 75 82 69 70 84 93 76 64 69 74 61 68 81 86 72 72 81 90 76 64 68 71 56 64 71 75 63 68 79 79 67 7 +70 84 85 69 67 75 78 58 63 63 74 58 72 81 86 68 64 73 74 61 64 69 71 61 71 79 79 63 71 79 79 67 68 83 83 67 7 +67 75 78 58 63 63 74 58 67 71 74 65 64 73 74 61 64 69 71 61 68 73 82 65 71 79 79 67 68 83 83 67 71 79 87 70 7 +67 71 74 65 70 79 82 62 78 84 89 73 68 73 82 65 72 77 82 68 76 81 90 76 71 79 87 70 71 79 87 70 71 83 87 70 7 +70 79 82 62 78 84 89 73 74 88 89 69 72 77 82 68 76 81 90 76 76 85 90 72 71 79 87 70 71 83 87 70 68 75 79 67 7 +78 84 89 73 74 88 89 69 70 79 85 65 76 81 90 76 76 85 90 72 76 77 90 68 71 83 87 70 68 75 79 67 71 75 79 63 7 +74 88 89 69 70 79 85 65 67 79 82 65 76 85 90 72 76 77 90 68 72 77 78 61 68 75 79 67 71 75 79 63 71 79 79 63 7 +67 79 82 65 67 79 78 65 67 75 74 62 72 77 78 61 68 69 71 57 64 69 74 57 71 79 79 63 68 75 75 59 68 68 71 56 7 +67 79 78 65 67 75 74 62 67 67 70 55 68 69 71 57 64 69 74 57 68 69 74 57 68 75 75 59 68 68 71 56 64 71 79 59 7 +67 75 74 62 67 67 70 55 60 63 70 58 64 69 74 57 68 69 74 57 64 66 67 54 68 68 71 56 64 71 79 59 68 71 71 59 7 +67 67 70 55 60 63 70 58 63 67 70 58 68 69 74 57 64 66 67 54 64 66 71 57 64 71 79 59 68 71 71 59 64 68 71 59 7 +97 115 120 94 92 115 115 94 88 111 115 91 88 107 113 88 88 107 113 88 84 107 113 88 88 111 113 92 88 111 113 92 84 111 113 92 3 +92 115 115 94 88 111 115 91 88 102 111 87 88 107 113 88 84 107 113 88 84 112 113 88 88 111 113 92 84 111 113 92 84 111 118 92 3 +88 111 115 91 88 102 111 87 84 106 111 91 84 107 113 88 84 112 113 88 88 107 113 88 84 111 113 92 84 111 118 92 93 111 113 92 3 +84 106 111 91 88 111 115 91 92 111 115 91 88 107 113 88 92 112 113 88 92 112 118 88 93 111 113 92 93 111 113 92 93 111 118 92 3 +88 111 111 87 92 106 115 91 88 102 111 83 88 107 113 88 88 103 108 85 84 95 100 85 88 107 109 87 88 95 104 83 84 99 100 79 3 +76 77 102 83 53 40 115 116 44 29 125 135 80 95 100 74 64 64 104 96 46 36 122 139 84 95 96 79 71 83 93 79 55 51 113 108 2 +44 29 125 135 47 34 125 135 50 31 131 135 46 36 122 139 46 31 128 135 46 31 128 135 55 51 113 108 44 37 134 137 44 32 139 141 2 +47 34 125 135 50 31 131 135 47 31 125 135 46 31 128 135 46 31 128 135 46 31 133 143 44 37 134 137 44 32 139 141 44 34 139 146 2 +50 31 131 135 47 31 125 135 44 31 125 135 46 31 128 135 46 31 133 143 46 31 139 143 44 32 139 141 44 34 139 146 44 29 134 146 2 +47 31 125 139 47 31 125 135 44 31 125 135 46 31 139 143 50 31 133 135 50 31 128 132 44 32 134 137 44 32 134 137 48 34 128 133 2 +44 31 125 135 44 31 125 135 44 31 120 131 50 31 128 132 46 34 128 135 46 36 128 132 48 34 128 133 48 32 134 133 48 34 123 133 2 +44 31 125 135 44 31 120 131 50 40 115 113 46 34 128 135 46 36 128 132 46 39 122 121 48 32 134 133 48 34 123 133 48 37 118 125 2 +44 31 120 131 50 40 115 113 50 46 111 116 46 36 128 132 46 39 122 121 53 45 108 103 48 34 123 133 48 37 118 125 48 40 118 112 2 +50 40 115 113 50 46 111 116 44 31 131 142 46 39 122 121 53 45 108 103 50 36 118 128 48 37 118 125 48 40 118 112 51 45 104 100 2 +44 31 131 142 44 29 136 146 44 31 136 142 50 36 118 128 43 31 139 143 46 29 133 139 51 45 104 100 48 37 123 129 44 32 128 137 2 +44 31 120 131 44 31 120 128 44 34 115 124 46 34 122 128 46 34 122 125 46 36 122 121 44 32 128 125 48 29 123 125 44 32 113 121 2 +44 34 115 124 47 34 115 120 47 37 120 124 46 36 122 121 46 36 118 125 46 34 118 121 44 32 113 121 48 34 118 112 51 37 118 112 2 +47 34 115 120 47 37 120 124 44 34 120 120 46 36 118 125 46 34 118 121 43 36 118 121 48 34 118 112 51 37 118 112 48 40 113 112 2 +47 37 120 124 44 34 120 120 47 37 120 124 46 34 118 121 43 36 118 121 46 36 118 128 51 37 118 112 48 40 113 112 48 37 113 116 2 +47 37 120 124 44 37 120 124 44 37 115 120 46 36 118 128 46 34 122 125 50 34 118 125 48 37 113 116 48 34 123 125 48 37 118 125 2 +44 37 120 124 44 37 115 120 50 40 111 109 46 34 122 125 50 34 118 125 50 36 118 128 48 34 123 125 48 37 118 125 48 34 123 125 2 +44 37 115 120 50 40 111 109 64 69 102 79 50 34 118 125 50 36 118 128 53 51 113 103 48 37 118 125 48 34 123 125 48 37 118 121 2 +50 40 111 109 64 69 102 79 80 98 102 79 50 36 118 128 53 51 113 103 71 83 100 78 48 34 123 125 48 37 118 121 63 58 109 96 2 +80 98 102 79 84 102 102 87 88 106 111 87 71 83 100 78 84 99 104 85 84 103 113 88 63 58 109 96 79 95 100 79 88 107 109 87 3 +84 102 102 87 88 106 111 87 88 106 111 87 84 99 104 85 84 103 113 88 88 107 118 88 79 95 100 79 88 107 109 87 88 111 109 92 3 +88 106 111 87 88 106 111 87 88 102 106 87 84 103 113 88 88 107 118 88 88 107 108 88 88 107 109 87 88 111 109 92 88 107 113 87 3 +88 102 106 87 88 102 111 83 88 111 111 91 88 107 108 88 88 103 104 85 88 103 113 85 88 107 113 87 88 103 104 83 88 107 109 87 3 +88 102 111 83 88 111 111 91 92 115 115 91 88 103 104 85 88 103 113 85 88 107 108 88 88 103 104 83 88 107 109 87 93 107 113 92 3 +88 111 111 91 92 115 115 91 88 111 115 91 88 103 113 85 88 107 108 88 92 107 113 92 88 107 109 87 93 107 113 92 93 107 113 87 3 +88 111 115 91 92 106 115 87 88 111 111 91 92 107 113 92 92 112 118 88 92 112 113 92 93 107 113 87 88 111 118 87 88 111 118 96 3 +97 111 120 91 92 111 111 87 88 111 115 87 92 112 118 92 92 107 118 88 88 112 118 88 93 111 118 92 93 111 118 92 93 111 118 92 3 +92 111 111 87 88 111 115 87 88 111 115 87 92 107 118 88 88 112 118 88 88 107 113 85 93 111 118 92 93 111 118 92 88 111 118 92 3 +88 111 115 87 88 111 115 87 88 111 115 87 88 112 118 88 88 107 113 85 88 107 113 88 93 111 118 92 88 111 118 92 88 107 113 92 3 +88 111 115 87 88 111 115 87 92 111 115 87 88 107 113 85 88 107 113 88 92 103 113 88 88 111 118 92 88 107 113 92 93 111 118 87 3 +88 111 115 87 92 111 115 87 88 106 111 87 88 107 113 88 92 103 113 88 88 107 108 92 88 107 113 92 93 111 118 87 88 107 109 87 3 +92 111 115 87 88 106 111 87 88 106 111 87 92 103 113 88 88 107 108 92 92 107 108 88 93 111 118 87 88 107 109 87 88 107 109 87 3 +88 106 111 87 92 106 111 91 92 111 115 91 92 107 108 88 92 112 113 88 92 112 118 92 88 107 109 87 93 111 113 87 88 103 113 83 3 +92 106 111 91 92 111 115 91 92 111 120 91 92 112 113 88 92 112 118 92 92 112 118 96 93 111 113 87 88 103 113 83 84 95 100 83 3 +92 111 120 91 92 115 120 94 92 120 120 94 92 112 118 96 88 107 122 88 88 103 108 85 84 95 100 83 79 87 96 75 75 79 89 67 3 +92 115 120 94 92 120 120 94 92 106 111 87 88 107 122 88 88 103 108 85 80 87 91 67 79 87 96 75 75 79 89 67 75 75 74 58 7 +92 106 111 87 80 98 102 76 76 85 90 68 80 87 91 67 68 71 75 59 60 57 60 45 75 75 74 58 63 61 63 42 55 51 50 29 5 +80 98 102 76 76 85 90 68 64 77 78 61 68 71 75 59 60 57 60 45 53 54 53 38 63 61 63 42 55 51 50 29 55 54 57 37 5 +60 69 67 54 60 66 67 57 64 69 71 57 53 54 53 34 56 57 56 45 60 57 67 49 59 54 63 42 55 54 63 46 59 51 67 46 5 +60 66 67 57 64 69 71 57 64 66 67 54 56 57 56 45 60 57 67 49 60 57 67 49 55 54 63 46 59 51 67 46 59 51 67 50 5 +64 69 71 57 64 66 67 54 64 62 71 50 60 57 67 49 60 57 67 49 56 54 67 49 59 51 67 46 59 51 67 50 55 54 67 50 5 +64 66 67 54 64 62 71 50 60 62 67 50 60 57 67 49 56 54 67 49 56 54 67 52 59 51 67 50 55 54 67 50 55 54 60 46 5 +64 62 71 50 60 62 67 50 60 62 67 54 56 54 67 49 56 54 67 52 53 57 67 52 55 54 67 50 55 54 60 46 55 54 67 50 5 +60 62 67 50 60 62 67 54 64 69 74 61 56 54 67 52 53 57 67 52 60 64 75 63 55 54 60 46 55 54 67 50 55 58 70 54 5 +60 62 67 54 64 69 74 61 64 66 67 54 53 57 67 52 60 64 75 63 64 68 79 59 55 54 67 50 55 58 70 54 63 68 77 62 7 +64 69 74 61 64 66 67 54 64 69 74 61 60 64 75 63 64 68 79 59 64 68 71 56 55 58 70 54 63 68 77 62 67 72 74 58 7 +64 66 67 54 64 69 74 61 68 81 86 72 64 68 79 59 64 68 71 56 64 71 75 63 63 68 77 62 67 72 74 58 63 68 70 58 7 +64 69 74 61 68 81 86 72 72 81 90 76 64 68 71 56 64 71 75 63 68 79 79 67 67 72 74 58 63 68 70 58 67 75 74 62 7 +72 81 90 76 72 81 86 68 64 73 74 61 68 79 79 67 71 79 79 63 71 79 79 67 67 75 74 62 71 79 85 67 71 87 96 75 7 +72 81 86 68 64 73 74 61 64 69 71 61 71 79 79 63 71 79 79 67 68 83 83 67 71 79 85 67 71 87 96 75 75 91 96 79 7 +68 73 82 65 72 77 82 68 76 81 90 76 71 79 87 70 71 79 87 70 71 83 87 70 75 83 89 71 71 79 85 67 71 75 85 67 7 +76 81 90 76 76 85 90 72 76 77 90 68 71 83 87 70 68 75 79 67 71 75 79 63 71 75 85 67 71 75 74 62 67 72 70 58 7 +76 77 90 68 72 77 78 61 68 69 71 57 71 75 79 63 71 79 79 63 68 75 75 59 67 72 70 58 67 72 74 58 63 68 74 58 7 +64 69 74 57 68 69 74 57 64 66 67 54 68 68 71 56 64 71 79 59 68 71 71 59 63 68 74 58 67 72 74 62 71 75 77 67 7 +68 69 74 57 64 66 67 54 64 66 71 57 64 71 79 59 68 71 71 59 64 68 71 59 67 72 74 62 71 75 77 67 71 75 74 62 7 +92 116 122 92 92 116 118 92 88 107 113 88 93 116 118 96 93 111 118 92 88 111 113 92 90 113 122 96 95 109 112 89 95 109 117 85 3 +88 107 113 88 88 107 113 88 84 107 113 88 88 111 113 92 88 111 113 92 84 111 113 92 95 109 117 85 90 113 117 92 95 113 117 92 3 +84 107 113 88 84 112 113 88 88 107 113 88 84 111 113 92 84 111 118 92 93 111 113 92 95 113 117 92 95 118 117 96 95 118 122 96 3 +88 107 113 88 92 112 113 88 92 112 118 88 93 111 113 92 93 111 113 92 93 111 118 92 95 118 122 96 99 118 117 92 95 113 117 96 3 +92 112 118 88 88 107 113 88 88 103 108 85 93 111 118 92 88 107 109 87 88 95 104 83 95 113 117 96 86 104 108 89 82 96 104 78 3 +88 103 108 85 84 95 100 85 80 95 100 74 88 95 104 83 84 99 100 79 84 95 96 79 82 96 104 78 82 96 104 81 82 96 100 81 4 +84 95 100 85 80 95 100 74 64 64 104 96 84 99 100 79 84 95 96 79 71 83 93 79 82 96 104 81 82 96 100 81 82 91 92 78 4 +46 31 128 135 46 31 128 135 46 31 133 143 44 37 134 137 44 32 139 141 44 34 139 146 63 56 108 103 46 34 127 144 43 32 133 144 2 +46 31 128 135 46 31 133 143 46 31 139 143 44 32 139 141 44 34 139 146 44 29 134 146 46 34 127 144 43 32 133 144 43 32 138 144 2 +43 31 133 143 43 29 133 143 46 31 133 150 44 34 139 146 44 32 134 141 48 32 134 141 46 32 138 144 46 32 138 144 46 32 133 144 2 +46 31 139 143 50 31 133 135 50 31 128 132 44 32 134 137 44 32 134 137 48 34 128 133 46 32 133 136 46 32 133 136 46 32 127 136 2 +50 31 133 135 50 31 128 132 46 34 128 135 44 32 134 137 48 34 128 133 48 32 134 133 46 32 133 136 46 32 127 136 49 32 127 133 2 +46 36 128 132 46 39 122 121 53 45 108 103 48 34 123 133 48 37 118 125 48 40 118 112 46 34 127 129 49 32 127 133 46 32 122 129 2 +46 39 122 121 53 45 108 103 50 36 118 128 48 37 118 125 48 40 118 112 51 45 104 100 49 32 127 133 46 32 122 129 49 37 112 118 2 +53 45 108 103 50 36 118 128 43 31 139 143 48 40 118 112 51 45 104 100 48 37 123 129 46 32 122 129 49 37 112 118 52 43 104 103 2 +43 31 139 143 46 29 133 139 46 31 133 135 48 37 123 129 44 32 128 137 44 32 123 129 52 43 104 103 49 37 117 122 43 29 138 140 2 +46 29 133 139 46 31 133 135 46 31 122 132 44 32 128 137 44 32 123 129 44 34 128 129 49 37 117 122 43 29 138 140 46 29 127 133 2 +46 31 133 135 46 31 122 132 46 34 122 128 44 32 123 129 44 34 128 129 44 32 128 125 43 29 138 140 46 29 127 133 46 29 122 125 2 +46 31 122 132 46 34 122 128 46 34 122 125 44 34 128 129 44 32 128 125 48 29 123 125 46 29 127 133 46 29 122 125 46 32 112 118 2 +46 34 122 128 46 34 122 125 46 36 122 121 44 32 128 125 48 29 123 125 44 32 113 121 46 29 122 125 46 32 112 118 46 34 112 118 2 +46 34 122 125 46 36 122 121 46 36 118 125 48 29 123 125 44 32 113 121 48 34 118 112 46 32 112 118 46 34 112 118 46 34 112 114 2 +46 36 122 121 46 36 118 125 46 34 118 121 44 32 113 121 48 34 118 112 51 37 118 112 46 34 112 118 46 34 112 114 46 34 112 111 2 +46 36 118 125 46 34 118 121 43 36 118 121 48 34 118 112 51 37 118 112 48 40 113 112 46 34 112 114 46 34 112 111 49 34 112 111 2 +46 34 118 121 43 36 118 121 46 36 118 128 51 37 118 112 48 40 113 112 48 37 113 116 46 34 112 111 49 34 112 111 46 37 117 114 2 +46 34 122 125 50 34 118 125 50 36 118 128 48 34 123 125 48 37 118 125 48 34 123 125 49 34 112 118 52 34 117 122 49 34 122 118 2 +50 34 118 125 50 36 118 128 53 51 113 103 48 37 118 125 48 34 123 125 48 37 118 121 52 34 117 122 49 34 122 118 49 34 117 122 2 +84 99 104 85 84 103 113 88 88 107 118 88 79 95 100 79 88 107 109 87 88 111 109 92 52 49 112 107 74 79 100 81 86 100 108 85 3 +84 103 113 88 88 107 118 88 88 107 108 88 88 107 109 87 88 111 109 92 88 107 113 87 74 79 100 81 86 100 108 85 90 109 108 89 3 +88 103 104 85 88 103 113 85 88 107 108 88 88 103 104 83 88 107 109 87 93 107 113 92 90 104 112 89 86 104 112 85 90 109 117 89 3 +88 107 108 88 92 107 113 92 92 112 118 88 93 107 113 92 93 107 113 87 88 111 118 87 90 109 117 89 90 113 112 92 90 113 112 92 3 +92 112 118 88 92 112 113 92 92 112 118 92 88 111 118 87 88 111 118 96 93 111 118 96 90 113 112 92 90 109 112 89 90 113 117 92 3 +92 112 113 92 92 112 118 92 92 112 118 92 88 111 118 96 93 111 118 96 93 111 118 92 90 109 112 89 90 113 117 92 95 113 117 96 3 +92 112 118 92 92 112 118 92 92 107 118 88 93 111 118 96 93 111 118 92 93 111 118 92 90 113 117 92 95 113 117 96 90 109 117 96 3 +92 112 118 92 92 107 118 88 88 112 118 88 93 111 118 92 93 111 118 92 93 111 118 92 95 113 117 96 90 109 117 96 90 118 122 96 3 +92 107 118 88 88 112 118 88 88 107 113 85 93 111 118 92 93 111 118 92 88 111 118 92 90 109 117 96 90 118 122 96 90 113 117 96 3 +88 112 118 88 88 107 113 85 88 107 113 88 93 111 118 92 88 111 118 92 88 107 113 92 90 118 122 96 90 113 117 96 90 113 122 96 3 +88 107 113 85 88 107 113 88 92 103 113 88 88 111 118 92 88 107 113 92 93 111 118 87 90 113 117 96 90 113 122 96 90 113 112 92 3 +88 107 113 88 92 103 113 88 88 107 108 92 88 107 113 92 93 111 118 87 88 107 109 87 90 113 122 96 90 113 112 92 90 113 112 92 3 +88 107 108 92 92 107 108 88 92 112 113 88 88 107 109 87 88 107 109 87 93 111 113 87 90 113 112 92 90 113 112 89 86 104 104 85 3 +92 112 113 88 92 112 118 92 92 112 118 96 93 111 113 87 88 103 113 83 84 95 100 83 86 104 104 85 78 96 92 81 74 91 96 78 3 +92 112 118 92 92 112 118 96 88 107 122 88 88 103 113 83 84 95 100 83 79 87 96 75 78 96 92 81 74 91 96 78 74 87 92 74 7 +88 107 122 88 88 103 108 85 80 87 91 67 79 87 96 75 75 79 89 67 75 75 74 58 74 87 92 74 74 79 84 66 70 79 80 63 7 +80 87 91 67 68 71 75 59 60 57 60 45 75 75 74 58 63 61 63 42 55 51 50 29 70 79 80 63 63 67 69 52 59 56 62 48 5 +56 57 56 45 60 57 67 49 60 57 67 49 55 54 63 46 59 51 67 46 59 51 67 50 59 56 62 44 59 53 62 44 59 56 66 44 5 +60 57 67 49 60 57 67 49 56 54 67 49 59 51 67 46 59 51 67 50 55 54 67 50 59 53 62 44 59 56 66 44 56 56 73 52 5 +56 54 67 52 53 57 67 52 60 64 75 63 55 54 60 46 55 54 67 50 55 58 70 54 59 56 76 55 59 49 69 48 59 53 66 44 5 +53 57 67 52 60 64 75 63 64 68 79 59 55 54 67 50 55 58 70 54 63 68 77 62 59 49 69 48 59 53 66 44 56 53 66 48 5 +68 79 79 67 71 79 79 63 71 79 79 67 67 75 74 62 71 79 85 67 71 87 96 75 63 71 73 59 63 67 73 59 66 75 80 63 7 +71 79 79 63 71 79 79 67 68 83 83 67 71 79 85 67 71 87 96 75 75 91 96 79 63 67 73 59 66 75 80 63 70 79 84 70 7 +68 83 83 67 71 79 87 70 71 79 87 70 75 91 96 79 75 83 89 71 71 79 85 67 70 79 84 70 70 79 84 66 70 71 73 63 7 +71 79 87 70 71 83 87 70 68 75 79 67 71 79 85 67 71 75 85 67 71 75 74 62 70 71 73 63 63 67 69 59 59 63 66 55 7 +71 83 87 70 68 75 79 67 71 75 79 63 71 75 85 67 71 75 74 62 67 72 70 58 63 67 69 59 59 63 66 55 59 63 73 59 7 +68 75 79 67 71 75 79 63 71 79 79 63 71 75 74 62 67 72 70 58 67 72 74 58 59 63 66 55 59 63 73 59 63 67 73 55 7 +71 75 79 63 71 79 79 63 68 75 75 59 67 72 70 58 67 72 74 58 63 68 74 58 59 63 73 59 63 67 73 55 63 67 69 55 7 +71 79 79 63 68 75 75 59 68 68 71 56 67 72 74 58 63 68 74 58 63 68 74 58 63 67 73 55 63 67 69 55 66 75 76 63 7 +64 71 79 59 68 71 71 59 64 68 71 59 67 72 74 62 71 75 77 67 71 75 74 62 66 71 73 59 63 67 66 55 63 75 80 63 7 +93 116 118 96 93 111 118 92 88 111 113 92 90 113 122 96 95 109 112 89 95 109 117 85 101 112 124 94 96 112 114 90 92 112 114 94 3 +88 111 113 92 88 111 113 92 84 111 113 92 95 109 117 85 90 113 117 92 95 113 117 92 92 112 114 94 92 117 119 98 96 117 130 94 3 +84 111 113 92 84 111 118 92 93 111 113 92 95 113 117 92 95 118 117 96 95 118 122 96 96 117 130 94 92 112 124 94 92 112 114 98 3 +93 111 118 92 88 107 109 87 88 95 104 83 95 113 117 96 86 104 108 89 82 96 104 78 92 99 105 86 83 99 101 75 79 91 97 75 3 +88 107 109 87 88 95 104 83 84 99 100 79 86 104 108 89 82 96 104 78 82 96 104 81 83 99 101 75 79 91 97 75 83 91 97 79 4 +88 95 104 83 84 99 100 79 84 95 96 79 82 96 104 78 82 96 104 81 82 96 100 81 79 91 97 75 83 91 97 79 83 91 101 79 4 +84 99 100 79 84 95 96 79 71 83 93 79 82 96 104 81 82 96 100 81 82 91 92 78 83 91 97 79 83 91 101 79 79 95 93 75 4 +71 83 93 79 55 51 113 108 44 37 134 137 82 91 92 78 78 83 96 74 63 56 108 103 79 95 93 75 79 95 93 75 71 77 93 79 2 +55 51 113 108 44 37 134 137 44 32 139 141 78 83 96 74 63 56 108 103 46 34 127 144 79 95 93 75 71 77 93 79 56 42 114 120 2 +44 37 134 137 44 32 139 141 44 34 139 146 63 56 108 103 46 34 127 144 43 32 133 144 71 77 93 79 56 42 114 120 42 32 130 146 2 +44 32 139 141 44 34 139 146 44 29 134 146 46 34 127 144 43 32 133 144 43 32 138 144 56 42 114 120 42 32 130 146 42 34 130 142 2 +44 34 139 146 44 29 134 146 44 34 139 146 43 32 133 144 43 32 138 144 46 32 138 144 42 32 130 146 42 34 130 142 46 32 130 142 2 +44 29 134 146 44 34 139 146 44 32 134 141 43 32 138 144 46 32 138 144 46 32 138 144 42 34 130 142 46 32 130 142 46 32 135 142 2 +44 34 139 146 44 32 134 141 48 32 134 141 46 32 138 144 46 32 138 144 46 32 133 144 46 32 130 142 46 32 135 142 46 32 130 142 2 +44 32 134 141 48 32 134 141 44 32 134 137 46 32 138 144 46 32 133 144 46 32 133 136 46 32 135 142 46 32 130 142 46 32 124 139 2 +44 32 134 137 44 32 134 137 48 34 128 133 46 32 133 136 46 32 133 136 46 32 127 136 46 32 124 139 46 32 124 139 42 34 124 135 2 +44 32 134 137 48 34 128 133 48 32 134 133 46 32 133 136 46 32 127 136 49 32 127 133 46 32 124 139 42 34 124 135 42 32 124 135 2 +48 34 128 133 48 32 134 133 48 34 123 133 46 32 127 136 49 32 127 133 46 34 127 129 42 34 124 135 42 32 124 135 46 32 119 135 2 +48 32 134 133 48 34 123 133 48 37 118 125 49 32 127 133 46 34 127 129 49 32 127 133 42 32 124 135 46 32 119 135 46 32 119 131 2 +48 34 123 133 48 37 118 125 48 40 118 112 46 34 127 129 49 32 127 133 46 32 122 129 46 32 119 135 46 32 119 131 46 34 119 127 2 +48 40 118 112 51 45 104 100 48 37 123 129 46 32 122 129 49 37 112 118 52 43 104 103 46 34 119 127 49 34 114 124 49 40 105 116 2 +44 32 128 137 44 32 123 129 44 34 128 129 49 37 117 122 43 29 138 140 46 29 127 133 52 45 105 105 46 32 124 135 42 32 130 139 2 +48 34 118 112 51 37 118 112 48 40 113 112 46 34 112 114 46 34 112 111 49 34 112 111 49 34 110 116 49 37 114 116 52 40 110 113 2 +48 34 123 125 48 37 118 125 48 34 123 125 49 34 112 118 52 34 117 122 49 34 122 118 49 37 110 116 49 37 110 116 46 37 114 116 2 +48 34 123 125 48 37 118 121 63 58 109 96 49 34 122 118 49 34 117 122 49 34 117 125 46 37 114 116 46 37 114 116 46 37 110 113 2 +88 107 109 87 88 111 109 92 88 107 113 87 74 79 100 81 86 100 108 85 90 109 108 89 67 70 101 83 79 95 97 75 92 108 110 90 3 +88 111 109 92 88 107 113 87 88 103 104 83 86 100 108 85 90 109 108 89 90 104 112 89 79 95 97 75 92 108 110 90 92 108 110 90 3 +88 103 104 83 88 107 109 87 93 107 113 92 90 104 112 89 86 104 112 85 90 109 117 89 92 108 110 90 96 108 114 94 96 112 114 90 3 +93 107 113 92 93 107 113 87 88 111 118 87 90 109 117 89 90 113 112 92 90 113 112 92 96 112 114 90 96 112 119 90 92 108 119 90 3 +88 111 118 96 93 111 118 96 93 111 118 92 90 109 112 89 90 113 117 92 95 113 117 96 96 112 119 90 96 112 119 94 96 117 114 94 3 +93 111 118 96 93 111 118 92 93 111 118 92 90 113 117 92 95 113 117 96 90 109 117 96 96 112 119 94 96 117 114 94 92 108 114 94 3 +93 111 118 92 93 111 118 92 88 111 118 92 90 109 117 96 90 118 122 96 90 113 117 96 92 108 114 94 92 112 114 90 92 108 114 90 3 +93 111 118 92 88 111 118 92 88 107 113 92 90 118 122 96 90 113 117 96 90 113 122 96 92 112 114 90 92 108 114 90 92 108 119 90 3 +88 107 113 92 93 111 118 87 88 107 109 87 90 113 122 96 90 113 112 92 90 113 112 92 92 108 119 90 92 103 105 83 83 99 101 83 3 +93 111 118 87 88 107 109 87 88 107 109 87 90 113 112 92 90 113 112 92 90 113 112 89 92 103 105 83 83 99 101 83 79 95 101 75 3 +88 107 109 87 88 107 109 87 93 111 113 87 90 113 112 92 90 113 112 89 86 104 104 85 83 99 101 83 79 95 101 75 79 84 86 68 3 +88 107 109 87 93 111 113 87 88 103 113 83 90 113 112 89 86 104 104 85 78 96 92 81 79 95 101 75 79 84 86 68 71 73 79 64 3 +93 111 113 87 88 103 113 83 84 95 100 83 86 104 104 85 78 96 92 81 74 91 96 78 79 84 86 68 71 73 79 64 71 77 82 68 7 +84 95 100 83 79 87 96 75 75 79 89 67 74 91 96 78 74 87 92 74 74 79 84 66 71 77 82 68 67 77 86 68 67 77 75 64 7 +79 87 96 75 75 79 89 67 75 75 74 58 74 87 92 74 74 79 84 66 70 79 80 63 67 77 86 68 67 77 75 64 67 70 68 57 7 +75 75 74 58 63 61 63 42 55 51 50 29 70 79 80 63 63 67 69 52 59 56 62 48 67 70 68 57 59 57 55 42 52 51 58 42 5 +63 61 63 42 55 51 50 29 55 54 57 37 63 67 69 52 59 56 62 48 56 53 66 48 59 57 55 42 52 51 58 42 52 51 72 57 5 +55 54 57 37 59 54 63 42 55 54 63 46 56 53 66 48 59 53 66 44 59 56 62 44 52 51 72 57 56 51 68 53 56 54 72 49 5 +59 54 63 42 55 54 63 46 59 51 67 46 59 53 66 44 59 56 62 44 59 53 62 44 56 51 68 53 56 54 72 49 56 51 62 45 5 +59 51 67 46 59 51 67 50 55 54 67 50 59 53 62 44 59 56 66 44 56 56 73 52 56 51 62 45 56 54 65 45 56 51 65 49 5 +55 54 67 50 55 54 60 46 55 54 67 50 56 56 73 52 59 56 76 55 59 49 69 48 56 51 65 49 56 51 72 60 59 54 72 60 5 +55 54 60 46 55 54 67 50 55 58 70 54 59 56 76 55 59 49 69 48 59 53 66 44 56 51 72 60 59 54 72 60 59 51 65 45 5 +55 54 67 50 55 58 70 54 63 68 77 62 59 49 69 48 59 53 66 44 56 53 66 48 59 54 72 60 59 51 65 45 59 54 62 45 5 +55 58 70 54 63 68 77 62 67 72 74 58 59 53 66 44 56 53 66 48 59 56 73 55 59 51 65 45 59 54 62 45 59 57 65 49 5 +67 72 74 58 63 68 70 58 67 75 74 62 59 56 73 55 66 67 80 63 63 71 73 59 59 57 65 49 59 60 72 57 63 66 79 64 7 +67 75 74 62 71 79 85 67 71 87 96 75 63 71 73 59 63 67 73 59 66 75 80 63 63 66 79 64 63 66 72 60 63 70 75 64 7 +71 87 96 75 75 91 96 79 75 83 89 71 66 75 80 63 70 79 84 70 70 79 84 66 63 70 75 64 67 73 79 64 63 66 75 60 7 +75 91 96 79 75 83 89 71 71 79 85 67 70 79 84 70 70 79 84 66 70 71 73 63 67 73 79 64 63 66 75 60 63 63 72 57 7 +71 79 85 67 71 75 85 67 71 75 74 62 70 71 73 63 63 67 69 59 59 63 66 55 63 63 72 57 63 60 72 60 59 63 75 64 7 +71 75 85 67 71 75 74 62 67 72 70 58 63 67 69 59 59 63 66 55 59 63 73 59 63 60 72 60 59 63 75 64 63 63 72 57 7 +71 75 74 62 67 72 70 58 67 72 74 58 59 63 66 55 59 63 73 59 63 67 73 55 59 63 75 64 63 63 72 57 63 63 68 53 7 +63 68 74 58 63 68 74 58 67 72 74 62 63 67 69 55 66 75 76 63 66 71 73 59 59 60 65 53 59 66 72 57 63 73 75 64 7 +95 109 112 89 95 109 117 85 90 113 117 92 96 112 114 90 92 112 114 94 92 117 119 98 97 115 119 94 97 115 124 94 97 115 119 94 3 +95 113 117 92 95 118 117 96 95 118 122 96 96 117 130 94 92 112 124 94 92 112 114 98 93 115 114 90 93 106 114 90 89 102 110 83 3 +95 118 117 96 95 118 122 96 99 118 117 92 92 112 124 94 92 112 114 98 92 108 114 90 93 106 114 90 89 102 110 83 82 92 101 80 3 +95 118 122 96 99 118 117 92 95 113 117 96 92 112 114 98 92 108 114 90 92 99 105 86 89 102 110 83 82 92 101 80 82 88 89 73 3 +95 113 117 96 86 104 108 89 82 96 104 78 92 99 105 86 83 99 101 75 79 91 97 75 82 88 89 73 78 92 93 80 78 92 101 76 3 +86 104 108 89 82 96 104 78 82 96 104 81 83 99 101 75 79 91 97 75 83 91 97 79 78 92 93 80 78 92 101 76 82 92 101 80 4 +82 96 104 78 82 96 104 81 82 96 100 81 79 91 97 75 83 91 97 79 83 91 101 79 78 92 101 76 82 92 101 80 78 88 93 76 4 +82 96 100 81 82 91 92 78 78 83 96 74 83 91 101 79 79 95 93 75 79 95 93 75 78 88 93 76 78 92 93 76 78 88 97 80 4 +82 91 92 78 78 83 96 74 63 56 108 103 79 95 93 75 79 95 93 75 71 77 93 79 78 92 93 76 78 88 97 80 78 88 93 76 4 +78 83 96 74 63 56 108 103 46 34 127 144 79 95 93 75 71 77 93 79 56 42 114 120 78 88 97 80 78 88 93 76 63 63 101 90 2 +63 56 108 103 46 34 127 144 43 32 133 144 71 77 93 79 56 42 114 120 42 32 130 146 78 88 93 76 63 63 101 90 47 37 119 133 2 +46 34 127 144 43 32 133 144 43 32 138 144 56 42 114 120 42 32 130 146 42 34 130 142 63 63 101 90 47 37 119 133 44 34 124 143 2 +43 32 133 144 43 32 138 144 46 32 138 144 42 32 130 146 42 34 130 142 46 32 130 142 47 37 119 133 44 34 124 143 44 34 129 143 2 +43 32 138 144 46 32 138 144 46 32 138 144 42 34 130 142 46 32 130 142 46 32 135 142 44 34 124 143 44 34 129 143 44 31 124 143 2 +46 32 138 144 46 32 133 144 46 32 133 136 46 32 135 142 46 32 130 142 46 32 124 139 44 31 124 143 44 34 119 140 44 34 124 140 2 +46 32 133 136 46 32 133 136 46 32 127 136 46 32 124 139 46 32 124 139 42 34 124 135 44 34 124 140 44 34 129 140 44 34 124 136 2 +46 32 133 136 46 32 127 136 49 32 127 133 46 32 124 139 42 34 124 135 42 32 124 135 44 34 129 140 44 34 124 136 44 34 124 136 2 +46 34 127 129 49 32 127 133 46 32 122 129 46 32 119 135 46 32 119 131 46 34 119 127 44 34 124 136 44 34 119 133 44 37 114 129 2 +49 32 127 133 46 32 122 129 49 37 112 118 46 32 119 131 46 34 119 127 49 34 114 124 44 34 119 133 44 37 114 129 44 34 114 129 2 +46 32 122 129 49 37 112 118 52 43 104 103 46 34 119 127 49 34 114 124 49 40 105 116 44 37 114 129 44 34 114 129 44 37 119 129 2 +49 37 112 118 52 43 104 103 49 37 117 122 49 34 114 124 49 40 105 116 52 45 105 105 44 34 114 129 44 37 119 129 44 43 105 111 2 +49 37 117 122 43 29 138 140 46 29 127 133 52 45 105 105 46 32 124 135 42 32 130 139 44 43 105 111 50 43 110 111 44 31 124 136 2 +43 29 138 140 46 29 127 133 46 29 122 125 46 32 124 135 42 32 130 139 42 32 119 127 50 43 110 111 44 31 124 136 44 31 124 133 2 +46 29 127 133 46 29 122 125 46 32 112 118 42 32 130 139 42 32 119 127 42 34 110 120 44 31 124 136 44 31 124 133 44 31 114 122 2 +46 29 122 125 46 32 112 118 46 34 112 118 42 32 119 127 42 34 110 120 46 34 110 116 44 31 124 133 44 31 114 122 47 34 114 126 2 +46 32 112 118 46 34 112 118 46 34 112 114 42 34 110 120 46 34 110 116 49 34 110 116 44 31 114 122 47 34 114 126 47 37 114 122 2 +46 34 112 114 46 34 112 111 49 34 112 111 49 34 110 116 49 37 114 116 52 40 110 113 47 37 114 122 47 37 114 119 44 34 110 119 2 +46 34 112 111 49 34 112 111 46 37 117 114 49 37 114 116 52 40 110 113 49 37 105 113 47 37 114 119 44 34 110 119 44 37 105 111 2 +46 37 117 114 49 34 112 118 52 34 117 122 49 37 105 113 49 37 110 116 49 37 110 116 44 37 105 111 47 40 110 111 47 40 110 111 2 +52 34 117 122 49 34 122 118 49 34 117 122 49 37 110 116 46 37 114 116 46 37 114 116 47 40 110 111 50 40 105 115 53 46 110 111 2 +49 34 117 122 49 34 117 125 52 49 112 107 46 37 114 116 46 37 110 113 52 45 110 109 53 46 110 111 60 63 97 94 70 79 97 80 2 +49 34 117 125 52 49 112 107 74 79 100 81 46 37 110 113 52 45 110 109 67 70 101 83 60 63 97 94 70 79 97 80 78 92 97 76 2 +74 79 100 81 86 100 108 85 90 109 108 89 67 70 101 83 79 95 97 75 92 108 110 90 78 92 97 76 82 102 105 76 85 102 110 83 3 +90 104 112 89 86 104 112 85 90 109 117 89 92 108 110 90 96 108 114 94 96 112 114 90 93 111 114 90 97 115 114 94 97 111 114 94 3 +90 109 117 89 90 113 112 92 90 113 112 92 96 112 114 90 96 112 119 90 92 108 119 90 97 111 114 94 93 115 114 94 93 115 119 94 3 +90 113 112 92 90 109 112 89 90 113 117 92 92 108 119 90 96 112 119 90 96 112 119 94 93 115 119 94 97 115 114 97 97 115 114 94 3 +90 113 117 92 95 113 117 96 90 109 117 96 96 112 119 94 96 117 114 94 92 108 114 94 97 115 114 94 97 115 114 90 93 111 114 87 3 +90 109 117 96 90 118 122 96 90 113 117 96 92 108 114 94 92 112 114 90 92 108 114 90 93 111 114 87 89 106 114 87 85 106 110 83 3 +90 118 122 96 90 113 117 96 90 113 122 96 92 112 114 90 92 108 114 90 92 108 119 90 89 106 114 87 85 106 110 83 85 97 105 76 3 +90 113 117 96 90 113 122 96 90 113 112 92 92 108 114 90 92 108 119 90 92 103 105 83 85 106 110 83 85 97 105 76 82 92 101 76 3 +90 113 122 96 90 113 112 92 90 113 112 92 92 108 119 90 92 103 105 83 83 99 101 83 85 97 105 76 82 92 101 76 78 88 93 73 3 +90 113 112 92 90 113 112 92 90 113 112 89 92 103 105 83 83 99 101 83 79 95 101 75 82 92 101 76 78 88 93 73 78 84 85 65 3 +90 113 112 92 90 113 112 89 86 104 104 85 83 99 101 83 79 95 101 75 79 84 86 68 78 88 93 73 78 84 85 65 70 79 82 65 7 +90 113 112 89 86 104 104 85 78 96 92 81 79 95 101 75 79 84 86 68 71 73 79 64 78 84 85 65 70 79 82 65 70 71 74 58 7 +86 104 104 85 78 96 92 81 74 91 96 78 79 84 86 68 71 73 79 64 71 77 82 68 70 79 82 65 70 71 74 58 63 67 74 58 7 +78 96 92 81 74 91 96 78 74 87 92 74 71 73 79 64 71 77 82 68 67 77 86 68 70 71 74 58 63 67 74 58 60 67 67 55 7 +74 87 92 74 74 79 84 66 70 79 80 63 67 77 86 68 67 77 75 64 67 70 68 57 60 67 67 55 57 63 63 51 53 60 63 48 5 +74 79 84 66 70 79 80 63 63 67 69 52 67 77 75 64 67 70 68 57 59 57 55 42 57 63 63 51 53 60 63 48 57 56 56 44 5 +63 67 69 52 59 56 62 48 56 53 66 48 59 57 55 42 52 51 58 42 52 51 72 57 57 56 56 44 53 53 60 44 57 53 67 55 5 +59 53 66 44 59 56 62 44 59 53 62 44 56 51 68 53 56 54 72 49 56 51 62 45 53 53 74 62 53 53 70 58 53 53 67 48 5 +59 56 62 44 59 53 62 44 59 56 66 44 56 54 72 49 56 51 62 45 56 54 65 45 53 53 70 58 53 53 67 48 57 56 63 51 5 +59 53 62 44 59 56 66 44 56 56 73 52 56 51 62 45 56 54 65 45 56 51 65 49 53 53 67 48 57 56 63 51 53 56 67 48 5 +56 56 73 52 59 56 76 55 59 49 69 48 56 51 65 49 56 51 72 60 59 54 72 60 53 56 67 48 53 49 70 55 57 56 74 62 5 +59 53 66 44 56 53 66 48 59 56 73 55 59 51 65 45 59 54 62 45 59 57 65 49 57 60 74 58 57 53 67 51 57 56 70 48 5 +59 56 73 55 66 67 80 63 63 71 73 59 59 57 65 49 59 60 72 57 63 66 79 64 57 56 70 48 57 56 70 51 57 56 74 58 7 +63 67 73 59 66 75 80 63 70 79 84 70 63 66 72 60 63 70 75 64 67 73 79 64 57 60 74 58 63 63 74 62 63 60 70 62 7 +70 79 84 70 70 79 84 66 70 71 73 63 67 73 79 64 63 66 75 60 63 63 72 57 63 60 70 62 60 60 70 65 53 53 82 83 7 +59 63 73 59 63 67 73 55 63 67 69 55 63 63 72 57 63 63 68 53 59 60 65 53 53 53 85 76 60 60 74 55 57 60 70 58 7 +63 67 73 55 63 67 69 55 66 75 76 63 63 63 68 53 59 60 65 53 59 66 72 57 60 60 74 55 57 60 70 58 63 67 74 58 7 +63 67 69 55 66 75 76 63 66 71 73 59 59 60 65 53 59 66 72 57 63 73 75 64 57 60 70 58 63 67 74 58 67 71 78 62 7 +101 112 124 94 96 112 114 90 92 112 114 94 93 111 114 94 97 115 119 94 97 115 124 94 88 111 111 91 92 115 120 94 92 115 120 94 3 +96 112 114 90 92 112 114 94 92 117 119 98 97 115 119 94 97 115 124 94 97 115 119 94 92 115 120 94 92 115 120 94 88 106 111 87 3 +92 112 114 94 92 117 119 98 96 117 130 94 97 115 124 94 97 115 119 94 93 115 114 90 92 115 120 94 88 106 111 87 84 94 102 76 3 +92 99 105 86 83 99 101 75 79 91 97 75 82 88 89 73 78 92 93 80 78 92 101 76 76 89 98 76 80 89 94 79 80 89 98 79 4 +83 91 97 79 83 91 101 79 79 95 93 75 82 92 101 80 78 88 93 76 78 92 93 76 84 94 98 76 76 85 90 72 76 85 90 72 4 +79 95 93 75 79 95 93 75 71 77 93 79 78 92 93 76 78 88 97 80 78 88 93 76 76 85 90 72 76 85 90 76 80 89 94 76 4 +79 95 93 75 71 77 93 79 56 42 114 120 78 88 97 80 78 88 93 76 63 63 101 90 76 85 90 76 80 89 94 76 72 81 94 76 2 +71 77 93 79 56 42 114 120 42 32 130 146 78 88 93 76 63 63 101 90 47 37 119 133 80 89 94 76 72 81 94 76 57 49 115 113 2 +56 42 114 120 42 32 130 146 42 34 130 142 63 63 101 90 47 37 119 133 44 34 124 143 72 81 94 76 57 49 115 113 47 31 131 142 2 +42 32 130 146 42 34 130 142 46 32 130 142 47 37 119 133 44 34 124 143 44 34 129 143 57 49 115 113 47 31 131 142 44 31 131 142 2 +42 34 130 142 46 32 130 142 46 32 135 142 44 34 124 143 44 34 129 143 44 31 124 143 47 31 131 142 44 31 131 142 47 31 131 139 2 +46 32 130 142 46 32 135 142 46 32 130 142 44 34 129 143 44 31 124 143 44 34 119 140 44 31 131 142 47 31 131 139 47 31 131 139 2 +46 32 124 139 46 32 124 139 42 34 124 135 44 34 124 140 44 34 129 140 44 34 124 136 47 34 131 139 44 31 136 139 44 31 125 139 2 +42 34 124 135 42 32 124 135 46 32 119 135 44 34 124 136 44 34 124 136 44 34 124 136 44 31 125 139 44 31 125 139 47 34 125 135 2 +42 32 124 135 46 32 119 135 46 32 119 131 44 34 124 136 44 34 124 136 44 34 119 133 44 31 125 139 47 34 125 135 44 31 125 128 2 +46 32 119 135 46 32 119 131 46 34 119 127 44 34 124 136 44 34 119 133 44 37 114 129 47 34 125 135 44 31 125 128 47 34 120 124 2 +46 34 119 127 49 34 114 124 49 40 105 116 44 37 114 129 44 34 114 129 44 37 119 129 47 34 120 124 47 34 115 124 44 34 115 120 2 +46 32 124 135 42 32 130 139 42 32 119 127 50 43 110 111 44 31 124 136 44 31 124 133 50 43 106 102 47 40 115 120 44 31 131 135 2 +42 32 130 139 42 32 119 127 42 34 110 120 44 31 124 136 44 31 124 133 44 31 114 122 47 40 115 120 44 31 131 135 47 31 125 128 2 +42 34 110 120 46 34 110 116 49 34 110 116 44 31 114 122 47 34 114 126 47 37 114 122 47 31 125 128 47 31 120 124 47 34 115 120 2 +46 34 110 116 49 34 110 116 49 37 114 116 47 34 114 126 47 37 114 122 47 37 114 119 47 31 120 124 47 34 115 120 47 37 111 113 2 +49 34 110 116 49 37 114 116 52 40 110 113 47 37 114 122 47 37 114 119 44 34 110 119 47 34 115 120 47 37 111 113 47 37 111 105 2 +49 37 114 116 52 40 110 113 49 37 105 113 47 37 114 119 44 34 110 119 44 37 105 111 47 37 111 113 47 37 111 105 50 40 106 105 2 +52 40 110 113 49 37 105 113 49 37 110 116 44 34 110 119 44 37 105 111 47 40 110 111 47 37 111 105 50 40 106 105 53 43 106 102 2 +49 37 110 116 49 37 110 116 46 37 114 116 47 40 110 111 47 40 110 111 50 40 105 115 53 43 106 102 60 55 102 91 64 69 94 79 2 +46 37 114 116 46 37 110 113 52 45 110 109 53 46 110 111 60 63 97 94 70 79 97 80 68 77 86 65 68 77 78 61 68 77 82 61 2 +46 37 110 113 52 45 110 109 67 70 101 83 60 63 97 94 70 79 97 80 78 92 97 76 68 77 78 61 68 77 82 61 68 81 90 68 2 +52 45 110 109 67 70 101 83 79 95 97 75 70 79 97 80 78 92 97 76 82 102 105 76 68 77 82 61 68 81 90 68 76 85 94 76 7 +92 108 110 90 96 108 114 94 96 112 114 90 93 111 114 90 97 115 114 94 97 111 114 94 88 111 115 94 97 120 120 98 97 120 120 94 3 +96 112 114 90 96 112 119 90 92 108 119 90 97 111 114 94 93 115 114 94 93 115 119 94 97 120 120 94 97 115 115 94 88 115 120 94 3 +96 112 119 90 92 108 119 90 96 112 119 90 93 115 114 94 93 115 119 94 97 115 114 97 97 115 115 94 88 115 120 94 88 111 115 91 3 +92 108 119 90 96 112 119 90 96 112 119 94 93 115 119 94 97 115 114 97 97 115 114 94 88 115 120 94 88 111 115 91 88 106 111 87 3 +96 112 119 90 96 112 119 94 96 117 114 94 97 115 114 97 97 115 114 94 97 115 114 90 88 111 115 91 88 106 111 87 88 102 106 83 3 +96 112 119 94 96 117 114 94 92 108 114 94 97 115 114 94 97 115 114 90 93 111 114 87 88 106 111 87 88 102 106 83 84 98 106 83 3 +96 117 114 94 92 108 114 94 92 112 114 90 97 115 114 90 93 111 114 87 89 106 114 87 88 102 106 83 84 98 106 83 88 98 106 79 3 +92 108 114 94 92 112 114 90 92 108 114 90 93 111 114 87 89 106 114 87 85 106 110 83 84 98 106 83 88 98 106 79 84 98 98 79 3 +92 112 114 90 92 108 114 90 92 108 119 90 89 106 114 87 85 106 110 83 85 97 105 76 88 98 106 79 84 98 98 79 80 89 94 76 3 +92 103 105 83 83 99 101 83 79 95 101 75 82 92 101 76 78 88 93 73 78 84 85 65 76 81 90 65 72 77 78 65 72 81 78 65 7 +83 99 101 83 79 95 101 75 79 84 86 68 78 88 93 73 78 84 85 65 70 79 82 65 72 77 78 65 72 81 78 65 72 81 90 65 7 +79 95 101 75 79 84 86 68 71 73 79 64 78 84 85 65 70 79 82 65 70 71 74 58 72 81 78 65 72 81 90 65 72 81 94 65 7 +67 77 75 64 67 70 68 57 59 57 55 42 57 63 63 51 53 60 63 48 57 56 56 44 53 55 60 42 57 59 64 50 60 59 67 54 5 +67 70 68 57 59 57 55 42 52 51 58 42 53 60 63 48 57 56 56 44 53 53 60 44 57 59 64 50 60 59 67 54 60 59 71 57 5 +52 51 58 42 52 51 72 57 56 51 68 53 53 53 60 44 57 53 67 55 53 53 74 62 60 59 71 57 57 59 78 65 53 52 78 65 5 +56 54 72 49 56 51 62 45 56 54 65 45 53 53 70 58 53 53 67 48 57 56 63 51 53 49 74 57 53 52 71 50 53 49 71 50 5 +56 51 62 45 56 54 65 45 56 51 65 49 53 53 67 48 57 56 63 51 53 56 67 48 53 52 71 50 53 49 71 50 53 52 71 50 5 +56 51 65 49 56 51 72 60 59 54 72 60 53 56 67 48 53 49 70 55 57 56 74 62 53 52 71 50 53 52 71 50 57 55 74 61 5 +56 51 72 60 59 54 72 60 59 51 65 45 53 49 70 55 57 56 74 62 57 60 74 58 53 52 71 50 57 55 74 61 57 55 78 65 5 +59 54 62 45 59 57 65 49 59 60 72 57 57 53 67 51 57 56 70 48 57 56 70 51 57 55 67 54 53 49 64 50 57 55 67 50 5 +59 60 72 57 63 66 79 64 63 66 72 60 57 56 70 51 57 56 74 58 57 60 74 58 57 55 67 50 57 55 71 54 57 55 74 65 5 +63 66 79 64 63 66 72 60 63 70 75 64 57 56 74 58 57 60 74 58 63 63 74 62 57 55 71 54 57 55 74 65 57 52 82 72 5 +63 66 72 60 63 70 75 64 67 73 79 64 57 60 74 58 63 63 74 62 63 60 70 62 57 55 74 65 57 52 82 72 53 46 94 94 5 +63 66 75 60 63 63 72 57 63 60 72 60 60 60 70 65 53 53 82 83 50 43 97 101 47 34 111 116 44 29 115 124 44 29 106 113 2 +63 63 72 57 63 60 72 60 59 63 75 64 53 53 82 83 50 43 97 101 44 37 101 108 44 29 115 124 44 29 106 113 44 31 106 116 2 +63 60 72 60 59 63 75 64 63 63 72 57 50 43 97 101 44 37 101 108 53 53 85 76 44 29 106 113 44 31 106 116 44 37 98 94 2 +59 63 75 64 63 63 72 57 63 63 68 53 44 37 101 108 53 53 85 76 60 60 74 55 44 31 106 116 44 37 98 94 53 52 78 57 2 +63 63 72 57 63 63 68 53 59 60 65 53 53 53 85 76 60 60 74 55 57 60 70 58 44 37 98 94 53 52 78 57 57 52 71 61 7 +59 60 65 53 59 66 72 57 63 73 75 64 57 60 70 58 63 67 74 58 67 71 78 62 57 52 71 61 57 59 78 61 60 62 82 65 7 +59 66 72 57 63 73 75 64 67 70 72 60 63 67 74 58 67 71 78 62 67 75 78 65 57 59 78 61 60 62 82 65 60 62 78 68 7 +93 111 114 94 97 115 119 94 97 115 124 94 88 111 111 91 92 115 120 94 92 115 120 94 88 95 108 81 80 99 104 81 84 95 96 78 3 +97 115 119 94 97 115 124 94 97 115 119 94 92 115 120 94 92 115 120 94 88 106 111 87 80 99 104 81 84 95 96 78 80 91 96 78 3 +97 115 124 94 97 115 119 94 93 115 114 90 92 115 120 94 88 106 111 87 84 94 102 76 84 95 96 78 80 91 96 78 76 87 96 74 3 +93 106 114 90 89 102 110 83 82 92 101 80 76 94 98 72 76 89 94 76 80 89 94 76 76 91 96 74 80 95 100 81 80 99 100 81 4 +78 92 93 80 78 92 101 76 82 92 101 80 80 89 94 79 80 89 98 79 84 94 98 76 84 95 100 78 84 95 100 78 80 91 96 81 4 +78 92 93 76 78 88 97 80 78 88 93 76 76 85 90 72 76 85 90 76 80 89 94 76 76 87 87 74 76 87 91 78 80 91 96 78 4 +78 88 93 76 63 63 101 90 47 37 119 133 80 89 94 76 72 81 94 76 57 49 115 113 80 91 96 78 84 91 96 74 68 68 100 85 2 +63 63 101 90 47 37 119 133 44 34 124 143 72 81 94 76 57 49 115 113 47 31 131 142 84 91 96 74 68 68 100 85 50 39 118 132 2 +44 34 124 143 44 34 129 143 44 31 124 143 47 31 131 142 44 31 131 142 47 31 131 139 50 39 118 132 43 29 133 143 46 31 133 139 2 +44 31 124 143 44 34 119 140 44 34 124 140 47 31 131 139 47 31 131 139 47 34 131 139 46 31 133 139 46 31 133 139 46 31 133 143 2 +44 34 119 140 44 34 124 140 44 34 129 140 47 31 131 139 47 34 131 139 44 31 136 139 46 31 133 139 46 31 133 143 46 31 133 139 2 +44 34 124 136 44 34 124 136 44 34 124 136 44 31 125 139 44 31 125 139 47 34 125 135 43 31 128 135 43 29 128 132 46 31 118 125 2 +44 34 124 136 44 34 124 136 44 34 119 133 44 31 125 139 47 34 125 135 44 31 125 128 43 29 128 132 46 31 118 125 46 34 118 121 2 +44 37 114 129 44 34 114 129 44 37 119 129 47 34 120 124 47 34 115 124 44 34 115 120 50 36 118 121 50 36 118 121 50 36 122 121 2 +44 37 119 129 44 43 105 111 50 43 110 111 44 34 115 120 47 40 115 116 50 43 106 102 50 36 122 121 46 36 122 125 50 39 122 117 2 +44 43 105 111 50 43 110 111 44 31 124 136 47 40 115 116 50 43 106 102 47 40 115 120 46 36 122 125 50 39 122 117 50 45 113 107 2 +50 43 110 111 44 31 124 136 44 31 124 133 50 43 106 102 47 40 115 120 44 31 131 135 50 39 122 117 50 45 113 107 50 34 122 135 2 +44 31 124 136 44 31 124 133 44 31 114 122 47 40 115 120 44 31 131 135 47 31 125 128 50 45 113 107 50 34 122 135 43 29 133 135 2 +44 31 114 122 47 34 114 126 47 37 114 122 47 31 125 128 47 31 120 124 47 34 115 120 43 29 133 135 46 31 122 125 50 34 113 114 2 +47 34 114 126 47 37 114 122 47 37 114 119 47 31 120 124 47 34 115 120 47 37 111 113 46 31 122 125 50 34 113 114 50 39 104 103 2 +47 37 114 119 44 34 110 119 44 37 105 111 47 37 111 113 47 37 111 105 50 40 106 105 50 39 104 103 56 51 100 92 64 61 96 81 2 +44 34 110 119 44 37 105 111 47 40 110 111 47 37 111 105 50 40 106 105 53 43 106 102 56 51 100 92 64 61 96 81 68 71 91 70 2 +44 37 105 111 47 40 110 111 47 40 110 111 50 40 106 105 53 43 106 102 60 55 102 91 64 61 96 81 68 71 91 70 71 75 87 63 2 +47 40 110 111 50 40 105 115 53 46 110 111 60 55 102 91 64 69 94 79 68 77 86 65 71 75 87 63 68 75 75 59 64 68 75 56 2 +50 40 105 115 53 46 110 111 60 63 97 94 64 69 94 79 68 77 86 65 68 77 78 61 68 75 75 59 64 68 75 56 64 71 75 56 7 +53 46 110 111 60 63 97 94 70 79 97 80 68 77 86 65 68 77 78 61 68 77 82 61 64 68 75 56 64 71 75 56 68 75 75 59 7 +60 63 97 94 70 79 97 80 78 92 97 76 68 77 78 61 68 77 82 61 68 81 90 68 64 71 75 56 68 75 75 59 68 75 83 59 7 +70 79 97 80 78 92 97 76 82 102 105 76 68 77 82 61 68 81 90 68 76 85 94 76 68 75 75 59 68 75 83 59 71 79 83 67 7 +78 92 97 76 82 102 105 76 85 102 110 83 68 81 90 68 76 85 94 76 84 98 102 79 68 75 83 59 71 79 83 67 71 87 96 74 7 +82 102 105 76 85 102 110 83 93 111 114 90 76 85 94 76 84 98 102 79 88 111 115 94 71 79 83 67 71 87 96 74 84 103 108 85 3 +85 102 110 83 93 111 114 90 97 115 114 94 84 98 102 79 88 111 115 94 97 120 120 98 71 87 96 74 84 103 108 85 92 112 122 92 3 +93 115 119 94 97 115 114 97 97 115 114 94 88 115 120 94 88 111 115 91 88 106 111 87 84 99 104 81 84 99 104 78 84 95 104 78 3 +97 115 114 97 97 115 114 94 97 115 114 90 88 111 115 91 88 106 111 87 88 102 106 83 84 99 104 78 84 95 104 78 84 95 104 81 3 +97 115 114 94 97 115 114 90 93 111 114 87 88 106 111 87 88 102 106 83 84 98 106 83 84 95 104 78 84 95 104 81 84 103 104 81 3 +97 115 114 90 93 111 114 87 89 106 114 87 88 102 106 83 84 98 106 83 88 98 106 79 84 95 104 81 84 103 104 81 84 95 96 78 3 +93 111 114 87 89 106 114 87 85 106 110 83 84 98 106 83 88 98 106 79 84 98 98 79 84 103 104 81 84 95 96 78 80 87 91 74 3 +89 106 114 87 85 106 110 83 85 97 105 76 88 98 106 79 84 98 98 79 80 89 94 76 84 95 96 78 80 87 91 74 68 83 83 67 3 +85 97 105 76 82 92 101 76 78 88 93 73 80 89 94 76 76 81 90 65 72 77 78 65 68 83 83 67 68 79 83 67 71 75 83 67 7 +82 92 101 76 78 88 93 73 78 84 85 65 76 81 90 65 72 77 78 65 72 81 78 65 68 79 83 67 71 75 83 67 71 79 87 70 7 +78 88 93 73 78 84 85 65 70 79 82 65 72 77 78 65 72 81 78 65 72 81 90 65 71 75 83 67 71 79 87 70 71 83 87 70 7 +78 84 85 65 70 79 82 65 70 71 74 58 72 81 78 65 72 81 90 65 72 81 94 65 71 79 87 70 71 83 87 70 71 79 83 67 7 +70 79 82 65 70 71 74 58 63 67 74 58 72 81 90 65 72 81 94 65 64 69 71 57 71 83 87 70 71 79 83 67 68 75 79 63 7 +70 71 74 58 63 67 74 58 60 67 67 55 72 81 94 65 64 69 71 57 57 55 60 46 71 79 83 67 68 75 79 63 64 64 71 56 5 +57 63 63 51 53 60 63 48 57 56 56 44 53 55 60 42 57 59 64 50 60 59 67 54 64 61 71 59 60 61 71 59 60 61 75 63 5 +53 53 60 44 57 53 67 55 53 53 74 62 60 59 71 57 57 59 78 65 53 52 78 65 60 61 75 67 60 57 75 67 56 54 79 70 5 +57 53 67 55 53 53 74 62 53 53 70 58 57 59 78 65 53 52 78 65 53 49 74 57 60 57 75 67 56 54 79 70 53 48 75 63 5 +53 53 74 62 53 53 70 58 53 53 67 48 53 52 78 65 53 49 74 57 53 52 71 50 56 54 79 70 53 48 75 63 53 45 75 59 5 +53 53 67 48 57 56 63 51 53 56 67 48 53 52 71 50 53 49 71 50 53 52 71 50 53 45 75 59 56 51 71 56 56 51 71 56 5 +53 49 70 55 57 56 74 62 57 60 74 58 53 52 71 50 57 55 74 61 57 55 78 65 56 51 67 56 53 48 67 56 56 54 79 63 5 +57 56 74 62 57 60 74 58 57 53 67 51 57 55 74 61 57 55 78 65 57 55 67 54 53 48 67 56 56 54 79 63 56 54 79 63 5 +57 56 70 48 57 56 70 51 57 56 74 58 53 49 64 50 57 55 67 50 57 55 71 54 56 51 67 52 53 51 67 52 53 54 71 52 5 +57 56 74 58 57 60 74 58 63 63 74 62 57 55 71 54 57 55 74 65 57 52 82 72 53 54 71 52 53 51 75 63 53 48 91 96 5 +63 60 70 62 60 60 70 65 53 53 82 83 53 46 94 94 47 34 111 116 44 29 115 124 46 34 118 128 43 29 122 139 43 29 122 135 2 +60 60 70 65 53 53 82 83 50 43 97 101 47 34 111 116 44 29 115 124 44 29 106 113 43 29 122 139 43 29 122 135 43 29 122 128 2 +53 53 82 83 50 43 97 101 44 37 101 108 44 29 115 124 44 29 106 113 44 31 106 116 43 29 122 135 43 29 122 128 43 29 122 128 2 +50 43 97 101 44 37 101 108 53 53 85 76 44 29 106 113 44 31 106 116 44 37 98 94 43 29 122 128 43 29 122 128 40 31 122 132 2 +44 37 101 108 53 53 85 76 60 60 74 55 44 31 106 116 44 37 98 94 53 52 78 57 43 29 122 128 40 31 122 132 46 42 96 78 2 +57 60 70 58 63 67 74 58 67 71 78 62 57 52 71 61 57 59 78 61 60 62 82 65 53 48 71 59 56 51 71 59 60 54 75 63 5 +63 67 74 58 67 71 78 62 67 75 78 65 57 59 78 61 60 62 82 65 60 62 78 68 56 51 71 59 60 54 75 63 60 57 79 67 5 +88 111 111 91 92 115 120 94 92 115 120 94 88 95 108 81 80 99 104 81 84 95 96 78 84 99 104 79 84 95 96 75 84 91 96 75 3 +92 115 120 94 92 115 120 94 88 106 111 87 80 99 104 81 84 95 96 78 80 91 96 78 84 95 96 75 84 91 96 75 79 95 100 79 4 +88 106 111 87 84 94 102 76 76 94 98 72 80 91 96 78 76 87 96 74 76 91 96 74 79 95 100 79 84 95 100 79 79 95 96 75 4 +76 94 98 72 76 89 94 76 80 89 94 76 76 91 96 74 80 95 100 81 80 99 100 81 79 95 96 75 84 95 100 79 84 99 100 79 4 +80 89 94 76 76 89 98 76 80 89 94 79 80 99 100 81 80 103 96 81 84 95 100 78 84 99 100 79 84 99 100 79 84 95 104 79 4 +76 89 98 76 80 89 94 79 80 89 98 79 80 103 96 81 84 95 100 78 84 95 100 78 84 99 100 79 84 95 104 79 79 95 96 79 4 +80 89 94 79 80 89 98 79 84 94 98 76 84 95 100 78 84 95 100 78 80 91 96 81 84 95 104 79 79 95 96 79 79 91 93 75 4 +80 89 98 79 84 94 98 76 76 85 90 72 84 95 100 78 80 91 96 81 71 87 91 74 79 95 96 79 79 91 93 75 79 91 96 75 4 +84 94 98 76 76 85 90 72 76 85 90 72 80 91 96 81 71 87 91 74 76 87 87 74 79 91 93 75 79 91 96 75 84 95 100 79 4 +76 85 90 72 76 85 90 72 76 85 90 76 71 87 91 74 76 87 87 74 76 87 91 78 79 91 96 75 84 95 100 79 79 95 96 79 4 +76 85 90 72 76 85 90 76 80 89 94 76 76 87 87 74 76 87 91 78 80 91 96 78 84 95 100 79 79 95 96 79 79 99 96 79 4 +80 89 94 76 72 81 94 76 57 49 115 113 80 91 96 78 84 91 96 74 68 68 100 85 79 99 96 79 84 99 96 79 75 87 93 75 4 +72 81 94 76 57 49 115 113 47 31 131 142 84 91 96 74 68 68 100 85 50 39 118 132 84 99 96 79 75 87 93 75 63 58 104 100 2 +57 49 115 113 47 31 131 142 44 31 131 142 68 68 100 85 50 39 118 132 43 29 133 143 75 87 93 75 63 58 104 100 48 34 128 137 2 +47 31 131 142 44 31 131 142 47 31 131 139 50 39 118 132 43 29 133 143 46 31 133 139 63 58 104 100 48 34 128 137 44 32 128 141 2 +44 31 131 142 47 31 131 139 47 31 131 139 43 29 133 143 46 31 133 139 46 31 133 139 48 34 128 137 44 32 128 141 44 32 128 137 2 +47 31 131 139 47 31 131 139 47 34 131 139 46 31 133 139 46 31 133 139 46 31 133 143 44 32 128 141 44 32 128 137 44 32 128 133 2 +47 31 131 139 47 34 131 139 44 31 136 139 46 31 133 139 46 31 133 143 46 31 133 139 44 32 128 137 44 32 128 133 48 32 123 129 2 +47 34 131 139 44 31 136 139 44 31 125 139 46 31 133 143 46 31 133 139 43 31 128 135 44 32 128 133 48 32 123 129 44 34 123 129 2 +44 31 136 139 44 31 125 139 44 31 125 139 46 31 133 139 43 31 128 135 43 29 128 132 48 32 123 129 44 34 123 129 44 32 118 125 2 +44 31 125 139 44 31 125 139 47 34 125 135 43 31 128 135 43 29 128 132 46 31 118 125 44 34 123 129 44 32 118 125 44 34 118 121 2 +47 34 125 135 44 31 125 128 47 34 120 124 46 31 118 125 46 34 118 121 50 36 118 121 44 34 118 121 48 37 118 121 48 34 118 121 2 +47 34 120 124 47 34 115 124 44 34 115 120 50 36 118 121 50 36 118 121 50 36 122 121 48 34 118 121 48 34 118 125 44 34 118 129 2 +47 34 115 124 44 34 115 120 47 40 115 116 50 36 118 121 50 36 122 121 46 36 122 125 48 34 118 125 44 34 118 129 44 34 123 129 2 +44 34 115 120 47 40 115 116 50 43 106 102 50 36 122 121 46 36 122 125 50 39 122 117 44 34 118 129 44 34 123 129 48 29 118 129 2 +47 40 115 116 50 43 106 102 47 40 115 120 46 36 122 125 50 39 122 117 50 45 113 107 44 34 123 129 48 29 118 129 48 37 118 116 2 +47 40 115 120 44 31 131 135 47 31 125 128 50 45 113 107 50 34 122 135 43 29 133 135 48 37 118 116 51 42 109 104 55 37 113 116 2 +44 31 131 135 47 31 125 128 47 31 120 124 50 34 122 135 43 29 133 135 46 31 122 125 51 42 109 104 55 37 113 116 51 40 104 100 2 +47 37 111 113 47 37 111 105 50 40 106 105 50 39 104 103 56 51 100 92 64 61 96 81 63 64 85 67 67 75 81 62 67 72 77 54 2 +50 40 106 105 53 43 106 102 60 55 102 91 64 61 96 81 68 71 91 70 71 75 87 63 67 72 77 54 67 72 74 58 67 72 70 54 2 +60 55 102 91 64 69 94 79 68 77 86 65 71 75 87 63 68 75 75 59 64 68 75 56 67 72 70 54 71 72 74 58 67 75 74 58 7 +68 77 78 61 68 77 82 61 68 81 90 68 64 71 75 56 68 75 75 59 68 75 83 59 67 75 77 58 67 79 81 62 71 75 85 62 7 +68 77 82 61 68 81 90 68 76 85 94 76 68 75 75 59 68 75 83 59 71 79 83 67 67 79 81 62 71 75 85 62 71 83 85 62 7 +84 98 102 79 88 111 115 94 97 120 120 98 71 87 96 74 84 103 108 85 92 112 122 92 75 83 89 67 75 91 96 75 84 103 104 83 3 +88 111 115 94 97 120 120 98 97 120 120 94 84 103 108 85 92 112 122 92 92 112 118 92 75 91 96 75 84 103 104 83 79 99 96 79 3 +97 120 120 98 97 120 120 94 97 115 115 94 92 112 122 92 92 112 118 92 88 107 108 88 84 103 104 83 79 99 96 79 79 95 100 79 3 +97 120 120 94 97 115 115 94 88 115 120 94 92 112 118 92 88 107 108 88 84 99 104 81 79 99 96 79 79 95 100 79 84 95 104 79 3 +88 115 120 94 88 111 115 91 88 106 111 87 84 99 104 81 84 99 104 78 84 95 104 78 84 95 104 79 79 95 96 75 84 95 100 79 7 +88 111 115 91 88 106 111 87 88 102 106 83 84 99 104 78 84 95 104 78 84 95 104 81 79 95 96 75 84 95 100 79 84 99 104 79 7 +88 106 111 87 88 102 106 83 84 98 106 83 84 95 104 78 84 95 104 81 84 103 104 81 84 95 100 79 84 99 104 79 84 95 96 75 7 +88 102 106 83 84 98 106 83 88 98 106 79 84 95 104 81 84 103 104 81 84 95 96 78 84 99 104 79 84 95 96 75 71 83 85 67 7 +84 98 98 79 80 89 94 76 76 81 90 65 80 87 91 74 68 83 83 67 68 79 83 67 71 79 85 67 71 75 85 67 71 75 85 67 7 +76 81 90 65 72 77 78 65 72 81 78 65 68 79 83 67 71 75 83 67 71 79 87 70 71 75 85 67 71 79 81 67 75 79 85 67 7 +72 81 78 65 72 81 90 65 72 81 94 65 71 79 87 70 71 83 87 70 71 79 83 67 75 79 85 67 75 87 89 71 75 87 85 71 7 +72 81 90 65 72 81 94 65 64 69 71 57 71 83 87 70 71 79 83 67 68 75 79 63 75 87 89 71 75 87 85 71 71 83 89 75 7 +64 69 71 57 57 55 60 46 53 55 60 42 68 75 79 63 64 64 71 56 64 61 71 59 71 83 89 75 71 79 89 75 63 64 85 75 5 +57 55 60 46 53 55 60 42 57 59 64 50 64 64 71 56 64 61 71 59 60 61 71 59 71 79 89 75 63 64 85 75 59 58 77 71 5 +53 55 60 42 57 59 64 50 60 59 67 54 64 61 71 59 60 61 71 59 60 61 75 63 63 64 85 75 59 58 77 71 59 58 81 67 5 +57 59 64 50 60 59 67 54 60 59 71 57 60 61 71 59 60 61 75 63 60 61 75 67 59 58 77 71 59 58 81 67 63 61 81 62 5 +60 59 67 54 60 59 71 57 57 59 78 65 60 61 75 63 60 61 75 67 60 57 75 67 59 58 81 67 63 61 81 62 59 58 77 67 5 +57 59 78 65 53 52 78 65 53 49 74 57 60 57 75 67 56 54 79 70 53 48 75 63 59 58 77 67 59 58 77 67 55 51 81 71 5 +53 52 78 65 53 49 74 57 53 52 71 50 56 54 79 70 53 48 75 63 53 45 75 59 59 58 77 67 55 51 81 71 55 48 77 62 5 +53 52 71 50 57 55 74 61 57 55 78 65 56 51 67 56 53 48 67 56 56 54 79 63 55 51 67 50 55 51 70 54 55 45 70 54 5 +57 55 78 65 57 55 67 54 53 49 64 50 56 54 79 63 56 54 79 63 56 51 67 52 55 45 70 54 55 51 77 67 55 54 77 62 5 +57 55 67 54 53 49 64 50 57 55 67 50 56 54 79 63 56 51 67 52 53 51 67 52 55 51 77 67 55 54 77 62 59 48 74 54 5 +57 55 67 50 57 55 71 54 57 55 74 65 53 51 67 52 53 54 71 52 53 51 75 63 59 48 74 54 55 48 70 54 55 51 77 67 5 +53 46 94 94 47 34 111 116 44 29 115 124 46 34 118 128 43 29 122 139 43 29 122 135 51 45 104 112 44 29 128 146 41 27 134 146 2 +47 34 111 116 44 29 115 124 44 29 106 113 43 29 122 139 43 29 122 135 43 29 122 128 44 29 128 146 41 27 134 146 41 27 134 137 2 +44 29 115 124 44 29 106 113 44 31 106 116 43 29 122 135 43 29 122 128 43 29 122 128 41 27 134 146 41 27 134 137 41 27 123 129 2 +44 31 106 116 44 37 98 94 53 52 78 57 43 29 122 128 40 31 122 132 46 42 96 78 41 27 123 129 41 27 123 133 44 32 113 116 2 +53 52 78 57 57 52 71 61 57 59 78 61 46 42 96 78 53 48 71 59 56 51 71 59 44 32 113 116 51 45 85 71 51 45 74 62 5 +57 52 71 61 57 59 78 61 60 62 82 65 53 48 71 59 56 51 71 59 60 54 75 63 51 45 85 71 51 45 74 62 55 51 74 62 5 +57 59 78 61 60 62 82 65 60 62 78 68 56 51 71 59 60 54 75 63 60 57 79 67 51 45 74 62 55 51 74 62 59 58 77 67 5 +88 95 108 81 80 99 104 81 84 95 96 78 84 99 104 79 84 95 96 75 84 91 96 75 90 104 104 85 82 96 96 81 82 96 100 78 4 +80 91 96 78 76 87 96 74 76 91 96 74 79 95 100 79 84 95 100 79 79 95 96 75 82 91 96 78 82 96 100 78 82 96 96 78 4 +76 87 96 74 76 91 96 74 80 95 100 81 84 95 100 79 79 95 96 75 84 95 100 79 82 96 100 78 82 96 96 78 82 96 100 78 4 +76 91 96 74 80 95 100 81 80 99 100 81 79 95 96 75 84 95 100 79 84 99 100 79 82 96 96 78 82 96 100 78 82 100 96 81 4 +80 95 100 81 80 99 100 81 80 103 96 81 84 95 100 79 84 99 100 79 84 99 100 79 82 96 100 78 82 100 96 81 82 96 104 78 4 +80 99 100 81 80 103 96 81 84 95 100 78 84 99 100 79 84 99 100 79 84 95 104 79 82 100 96 81 82 96 104 78 78 96 104 78 4 +84 95 100 78 84 95 100 78 80 91 96 81 84 95 104 79 79 95 96 79 79 91 93 75 78 96 104 78 82 96 100 81 78 91 96 78 4 +84 95 100 78 80 91 96 81 71 87 91 74 79 95 96 79 79 91 93 75 79 91 96 75 82 96 100 81 78 91 96 78 78 91 96 78 4 +80 91 96 81 71 87 91 74 76 87 87 74 79 91 93 75 79 91 96 75 84 95 100 79 78 91 96 78 78 91 96 78 78 91 100 74 4 +71 87 91 74 76 87 87 74 76 87 91 78 79 91 96 75 84 95 100 79 79 95 96 79 78 91 96 78 78 91 100 74 82 91 104 81 4 +76 87 87 74 76 87 91 78 80 91 96 78 84 95 100 79 79 95 96 79 79 99 96 79 78 91 100 74 82 91 104 81 82 96 104 81 4 +80 91 96 78 84 91 96 74 68 68 100 85 79 99 96 79 84 99 96 79 75 87 93 75 82 96 104 81 82 100 100 78 82 96 104 81 4 +84 91 96 74 68 68 100 85 50 39 118 132 84 99 96 79 75 87 93 75 63 58 104 100 82 100 100 78 82 96 104 81 82 100 100 85 2 +68 68 100 85 50 39 118 132 43 29 133 143 75 87 93 75 63 58 104 100 48 34 128 137 82 96 104 81 82 100 100 85 78 87 92 78 2 +43 29 133 143 46 31 133 139 46 31 133 139 48 34 128 137 44 32 128 141 44 32 128 137 78 87 92 78 63 56 104 96 49 34 117 129 2 +46 31 133 139 43 31 128 135 43 29 128 132 48 32 123 129 44 34 123 129 44 32 118 125 46 32 117 125 46 34 112 122 49 34 122 125 2 +43 31 128 135 43 29 128 132 46 31 118 125 44 34 123 129 44 32 118 125 44 34 118 121 46 34 112 122 49 34 122 125 49 34 117 125 2 +46 34 118 121 50 36 118 121 50 36 118 121 48 37 118 121 48 34 118 121 48 34 118 125 46 32 117 125 46 32 117 122 46 32 122 122 2 +50 36 118 121 50 36 118 121 50 36 122 121 48 34 118 121 48 34 118 125 44 34 118 129 46 32 117 122 46 32 122 122 46 32 122 125 2 +50 36 122 121 46 36 122 125 50 39 122 117 44 34 118 129 44 34 123 129 48 29 118 129 46 32 122 125 46 29 122 129 43 32 122 133 2 +46 36 122 125 50 39 122 117 50 45 113 107 44 34 123 129 48 29 118 129 48 37 118 116 46 29 122 129 43 32 122 133 43 32 122 129 2 +50 45 113 107 50 34 122 135 43 29 133 135 48 37 118 116 51 42 109 104 55 37 113 116 43 32 122 129 49 34 122 129 56 49 108 100 2 +43 29 133 135 46 31 122 125 50 34 113 114 55 37 113 116 51 40 104 100 59 51 100 83 56 49 108 100 63 56 88 74 66 63 88 70 2 +46 31 122 125 50 34 113 114 50 39 104 103 51 40 104 100 59 51 100 83 63 64 85 67 63 56 88 74 66 63 88 70 66 71 80 59 2 +50 34 113 114 50 39 104 103 56 51 100 92 59 51 100 83 63 64 85 67 67 75 81 62 66 63 88 70 66 71 80 59 70 71 80 59 2 +56 51 100 92 64 61 96 81 68 71 91 70 67 75 81 62 67 72 77 54 67 72 74 58 70 71 80 59 63 67 69 55 63 67 69 55 7 +64 61 96 81 68 71 91 70 71 75 87 63 67 72 77 54 67 72 74 58 67 72 70 54 63 67 69 55 63 67 69 55 63 71 69 55 7 +68 71 91 70 71 75 87 63 68 75 75 59 67 72 74 58 67 72 70 54 71 72 74 58 63 67 69 55 63 71 69 55 63 67 73 55 7 +68 75 75 59 64 68 75 56 64 71 75 56 71 72 74 58 67 75 74 58 67 75 77 58 63 67 73 55 66 71 73 55 66 75 76 63 7 +64 68 75 56 64 71 75 56 68 75 75 59 67 75 74 58 67 75 77 58 67 79 81 62 66 71 73 55 66 75 76 63 66 79 84 63 7 +64 71 75 56 68 75 75 59 68 75 83 59 67 75 77 58 67 79 81 62 71 75 85 62 66 75 76 63 66 79 84 63 66 83 80 63 7 +68 75 83 59 71 79 83 67 71 87 96 74 71 75 85 62 71 83 85 62 75 83 89 67 66 83 80 63 70 79 80 63 70 79 80 63 7 +71 87 96 74 84 103 108 85 92 112 122 92 75 83 89 67 75 91 96 75 84 103 104 83 70 79 80 63 70 79 80 63 66 83 88 66 7 +84 103 108 85 92 112 122 92 92 112 118 92 75 91 96 75 84 103 104 83 79 99 96 79 70 79 80 63 66 83 88 66 74 87 92 74 3 +92 112 118 92 88 107 108 88 84 99 104 81 79 99 96 79 79 95 100 79 84 95 104 79 74 87 92 74 82 91 96 78 78 87 92 74 7 +84 99 104 81 84 99 104 78 84 95 104 78 84 95 104 79 79 95 96 75 84 95 100 79 78 87 92 74 82 96 100 78 82 96 100 78 7 +84 103 104 81 84 95 96 78 80 87 91 74 84 95 96 75 71 83 85 67 71 79 85 67 78 87 96 78 78 83 84 70 70 79 80 66 7 +84 95 96 78 80 87 91 74 68 83 83 67 71 83 85 67 71 79 85 67 71 75 85 67 78 83 84 70 70 79 80 66 70 83 84 70 7 +68 83 83 67 68 79 83 67 71 75 83 67 71 75 85 67 71 75 85 67 71 79 81 67 70 83 84 70 74 79 84 66 70 75 84 66 7 +71 75 83 67 71 79 87 70 71 83 87 70 71 79 81 67 75 79 85 67 75 87 89 71 70 75 84 66 70 75 80 63 70 83 88 70 7 +68 75 79 63 64 64 71 56 64 61 71 59 71 83 89 75 71 79 89 75 63 64 85 75 74 83 88 70 74 83 88 74 74 83 88 74 7 +64 64 71 56 64 61 71 59 60 61 71 59 71 79 89 75 63 64 85 75 59 58 77 71 74 83 88 74 74 83 88 74 66 71 88 70 5 +53 48 75 63 53 45 75 59 56 51 71 56 55 51 81 71 55 48 77 62 55 48 74 62 63 60 80 66 59 53 84 70 52 49 76 66 5 +53 45 75 59 56 51 71 56 56 51 71 56 55 48 77 62 55 48 74 62 55 51 67 54 59 53 84 70 52 49 76 66 52 46 80 63 5 +56 51 71 56 56 51 71 56 56 51 67 56 55 48 74 62 55 51 67 54 55 51 67 50 52 49 76 66 52 46 80 63 56 49 73 59 5 +56 51 67 56 53 48 67 56 56 54 79 63 55 51 67 50 55 51 70 54 55 45 70 54 56 49 73 59 56 49 69 52 56 46 69 52 5 +56 54 79 63 56 51 67 52 53 51 67 52 55 51 77 67 55 54 77 62 59 48 74 54 56 49 69 55 56 53 73 63 59 53 84 66 5 +53 51 67 52 53 54 71 52 53 51 75 63 59 48 74 54 55 48 70 54 55 51 77 67 59 53 84 66 56 49 69 59 52 49 76 59 5 +53 51 75 63 53 48 91 96 46 34 118 128 55 51 77 67 55 51 85 75 51 45 104 112 52 49 76 59 56 53 84 63 56 49 84 70 5 +53 48 91 96 46 34 118 128 43 29 122 139 55 51 85 75 51 45 104 112 44 29 128 146 56 53 84 63 56 49 84 70 52 43 104 100 2 +46 34 118 128 43 29 122 139 43 29 122 135 51 45 104 112 44 29 128 146 41 27 134 146 56 49 84 70 52 43 104 100 46 29 117 133 2 +43 29 122 128 43 29 122 128 40 31 122 132 41 27 134 137 41 27 123 129 41 27 123 133 43 27 133 151 43 27 127 147 43 27 122 133 2 +40 31 122 132 46 42 96 78 53 48 71 59 41 27 123 133 44 32 113 116 51 45 85 71 43 27 122 133 43 27 117 129 43 29 117 133 2 +46 42 96 78 53 48 71 59 56 51 71 59 44 32 113 116 51 45 85 71 51 45 74 62 43 27 117 129 43 29 117 133 49 40 96 89 2 +53 48 71 59 56 51 71 59 60 54 75 63 51 45 85 71 51 45 74 62 55 51 74 62 43 29 117 133 49 40 96 89 52 46 80 63 5 +60 54 75 63 60 57 79 67 60 64 87 78 55 51 74 62 59 58 77 67 63 61 89 79 52 46 80 63 52 46 76 66 56 53 76 66 5 +84 99 104 79 84 95 96 75 84 91 96 75 90 104 104 85 82 96 96 81 82 96 100 78 92 108 110 86 92 103 105 83 83 99 105 79 4 +84 95 96 75 84 91 96 75 79 95 100 79 82 96 96 81 82 96 100 78 82 91 96 78 92 103 105 83 83 99 105 79 83 99 101 79 4 +84 91 96 75 79 95 100 79 84 95 100 79 82 96 100 78 82 91 96 78 82 96 100 78 83 99 105 79 83 99 101 79 83 95 97 79 4 +84 95 100 79 79 95 96 75 84 95 100 79 82 96 100 78 82 96 96 78 82 96 100 78 83 95 97 79 83 95 97 75 83 95 101 79 4 +79 95 96 75 84 95 100 79 84 99 100 79 82 96 96 78 82 96 100 78 82 100 96 81 83 95 97 75 83 95 101 79 83 99 101 83 4 +84 99 100 79 84 99 100 79 84 95 104 79 82 100 96 81 82 96 104 78 78 96 104 78 83 99 101 83 79 95 101 83 79 95 97 79 4 +84 99 100 79 84 95 104 79 79 95 96 79 82 96 104 78 78 96 104 78 82 96 100 81 79 95 101 83 79 95 97 79 79 91 101 75 4 +84 95 100 79 79 95 96 79 79 99 96 79 78 91 100 74 82 91 104 81 82 96 104 81 79 95 97 79 79 99 105 83 83 103 105 83 4 +79 95 96 79 79 99 96 79 84 99 96 79 82 91 104 81 82 96 104 81 82 100 100 78 79 99 105 83 83 103 105 83 83 103 105 83 4 +79 99 96 79 84 99 96 79 75 87 93 75 82 96 104 81 82 100 100 78 82 96 104 81 83 103 105 83 83 103 105 83 83 103 105 86 4 +84 99 96 79 75 87 93 75 63 58 104 100 82 100 100 78 82 96 104 81 82 100 100 85 83 103 105 83 83 103 105 86 92 103 114 86 4 +75 87 93 75 63 58 104 100 48 34 128 137 82 96 104 81 82 100 100 85 78 87 92 78 83 103 105 86 92 103 114 86 92 103 105 83 3 +44 32 128 141 44 32 128 137 44 32 128 133 63 56 104 96 49 34 117 129 46 34 112 129 75 81 93 79 56 45 105 105 49 37 114 120 2 +48 32 123 129 44 34 123 129 44 32 118 125 46 32 117 125 46 34 112 122 49 34 122 125 46 37 105 116 46 40 105 109 49 40 105 113 2 +44 32 118 125 44 34 118 121 48 37 118 121 49 34 122 125 49 34 117 125 46 32 117 125 49 40 105 113 46 37 114 120 46 34 124 131 2 +48 34 118 121 48 34 118 125 44 34 118 129 46 32 117 122 46 32 122 122 46 32 122 125 46 32 124 139 46 30 119 131 46 32 114 127 2 +48 29 118 129 48 37 118 116 51 42 109 104 43 32 122 133 43 32 122 129 49 34 122 129 52 37 114 124 52 48 105 105 59 60 97 83 2 +48 37 118 116 51 42 109 104 55 37 113 116 43 32 122 129 49 34 122 129 56 49 108 100 52 48 105 105 59 60 97 83 63 66 79 64 2 +51 42 109 104 55 37 113 116 51 40 104 100 49 34 122 129 56 49 108 100 63 56 88 74 59 60 97 83 63 66 79 64 67 70 75 57 2 +51 40 104 100 59 51 100 83 63 64 85 67 63 56 88 74 66 63 88 70 66 71 80 59 67 70 75 57 63 70 75 57 63 70 72 53 2 +67 75 81 62 67 72 77 54 67 72 74 58 70 71 80 59 63 67 69 55 63 67 69 55 67 66 72 53 67 66 72 53 67 66 72 53 7 +67 72 70 54 71 72 74 58 67 75 74 58 63 71 69 55 63 67 73 55 66 71 73 55 63 70 68 53 67 70 72 57 67 73 79 57 7 +71 72 74 58 67 75 74 58 67 75 77 58 63 67 73 55 66 71 73 55 66 75 76 63 67 70 72 57 67 73 79 57 67 77 82 60 7 +67 75 74 58 67 75 77 58 67 79 81 62 66 71 73 55 66 75 76 63 66 79 84 63 67 73 79 57 67 77 82 60 67 73 86 64 7 +67 75 77 58 67 79 81 62 71 75 85 62 66 75 76 63 66 79 84 63 66 83 80 63 67 77 82 60 67 73 86 64 71 77 90 64 7 +67 79 81 62 71 75 85 62 71 83 85 62 66 79 84 63 66 83 80 63 70 79 80 63 67 73 86 64 71 77 90 64 71 81 82 64 7 +79 99 96 79 79 95 100 79 84 95 104 79 74 87 92 74 82 91 96 78 78 87 92 74 75 84 86 64 75 91 90 72 79 91 90 72 7 +79 95 100 79 84 95 104 79 79 95 96 75 82 91 96 78 78 87 92 74 82 96 100 78 75 91 90 72 79 91 90 72 79 95 97 75 7 +84 99 104 79 84 95 96 75 71 83 85 67 82 91 100 78 78 87 96 78 78 83 84 70 75 84 93 72 75 91 97 72 79 88 90 72 7 +84 95 96 75 71 83 85 67 71 79 85 67 78 87 96 78 78 83 84 70 70 79 80 66 75 91 97 72 79 88 90 72 75 81 82 68 7 +71 83 85 67 71 79 85 67 71 75 85 67 78 83 84 70 70 79 80 66 70 83 84 70 79 88 90 72 75 81 82 68 71 81 82 64 7 +71 79 85 67 71 75 85 67 71 75 85 67 70 79 80 66 70 83 84 70 74 79 84 66 75 81 82 68 71 81 82 64 71 77 79 68 7 +71 75 85 67 71 75 85 67 71 79 81 67 70 83 84 70 74 79 84 66 70 75 84 66 71 81 82 64 71 77 79 68 71 81 82 68 7 +71 75 85 67 71 79 81 67 75 79 85 67 74 79 84 66 70 75 84 66 70 75 80 63 71 77 79 68 71 81 82 68 75 84 90 72 7 +71 79 81 67 75 79 85 67 75 87 89 71 70 75 84 66 70 75 80 63 70 83 88 70 71 81 82 68 75 84 90 72 75 84 90 75 7 +75 79 85 67 75 87 89 71 75 87 85 71 70 75 80 63 70 83 88 70 78 83 88 74 75 84 90 72 75 84 90 75 75 88 97 75 7 +75 87 85 71 71 83 89 75 71 79 89 75 78 83 88 74 74 83 88 70 74 83 88 74 75 88 97 75 75 84 93 75 75 84 90 72 7 +71 83 89 75 71 79 89 75 63 64 85 75 74 83 88 70 74 83 88 74 74 83 88 74 75 84 93 75 75 84 90 72 71 81 93 75 7 +71 79 89 75 63 64 85 75 59 58 77 71 74 83 88 74 74 83 88 74 66 71 88 70 75 84 90 72 71 81 93 75 71 77 93 75 7 +59 58 77 67 59 58 77 67 55 51 81 71 59 60 80 63 66 63 76 66 63 60 80 66 59 57 82 68 59 60 82 68 59 60 82 68 5 +59 58 77 67 55 51 81 71 55 48 77 62 66 63 76 66 63 60 80 66 59 53 84 70 59 60 82 68 59 60 82 68 59 57 82 68 5 +55 51 81 71 55 48 77 62 55 48 74 62 63 60 80 66 59 53 84 70 52 49 76 66 59 60 82 68 59 57 82 68 59 54 82 72 5 +55 48 74 62 55 51 67 54 55 51 67 50 52 49 76 66 52 46 80 63 56 49 73 59 59 54 82 72 56 48 75 64 52 48 75 60 5 +55 54 77 62 59 48 74 54 55 48 70 54 56 53 73 63 59 53 84 66 56 49 69 59 56 51 68 60 56 51 75 68 52 51 79 68 5 +59 48 74 54 55 48 70 54 55 51 77 67 59 53 84 66 56 49 69 59 52 49 76 59 56 51 75 68 52 51 79 68 56 48 72 60 5 +55 48 70 54 55 51 77 67 55 51 85 75 56 49 69 59 52 49 76 59 56 53 84 63 52 51 79 68 56 48 72 60 56 48 79 64 5 +55 51 77 67 55 51 85 75 51 45 104 112 52 49 76 59 56 53 84 63 56 49 84 70 56 48 72 60 56 48 79 64 59 54 82 72 5 +55 51 85 75 51 45 104 112 44 29 128 146 56 53 84 63 56 49 84 70 52 43 104 100 56 48 79 64 59 54 82 72 59 48 90 90 5 +51 45 104 112 44 29 128 146 41 27 134 146 56 49 84 70 52 43 104 100 46 29 117 133 59 54 82 72 59 48 90 90 52 37 110 116 2 +41 27 134 146 41 27 134 137 41 27 123 129 46 29 117 133 43 27 133 151 43 27 127 147 52 37 110 116 46 30 124 142 42 30 124 146 2 +41 27 123 129 41 27 123 133 44 32 113 116 43 27 127 147 43 27 122 133 43 27 117 129 42 30 124 146 42 30 124 135 42 30 119 127 2 +41 27 123 133 44 32 113 116 51 45 85 71 43 27 122 133 43 27 117 129 43 29 117 133 42 30 124 135 42 30 119 127 42 28 119 127 2 +44 32 113 116 51 45 85 71 51 45 74 62 43 27 117 129 43 29 117 133 49 40 96 89 42 30 119 127 42 28 119 127 46 32 105 113 2 +51 45 85 71 51 45 74 62 55 51 74 62 43 29 117 133 49 40 96 89 52 46 80 63 42 28 119 127 46 32 105 113 49 45 82 72 2 +51 45 74 62 55 51 74 62 59 58 77 67 49 40 96 89 52 46 80 63 52 46 76 66 46 32 105 113 49 45 82 72 52 42 79 72 5 +55 51 74 62 59 58 77 67 63 61 89 79 52 46 80 63 52 46 76 66 56 53 76 66 49 45 82 72 52 42 79 72 52 42 82 72 5 +82 96 96 81 82 96 100 78 82 91 96 78 92 103 105 83 83 99 105 79 83 99 101 79 97 115 114 90 89 111 114 87 89 106 110 83 3 +82 91 96 78 82 96 100 78 82 96 96 78 83 99 101 79 83 95 97 79 83 95 97 75 89 106 110 83 85 97 101 80 85 97 101 80 4 +82 96 100 78 82 100 96 81 82 96 104 78 83 95 101 79 83 99 101 83 79 95 101 83 85 97 105 80 82 92 101 80 82 92 101 76 4 +82 100 96 81 82 96 104 78 78 96 104 78 83 99 101 83 79 95 101 83 79 95 97 79 82 92 101 80 82 92 101 76 78 92 97 76 4 +82 96 104 78 78 96 104 78 82 96 100 81 79 95 101 83 79 95 97 79 79 91 101 75 82 92 101 76 78 92 97 76 82 92 97 80 4 +82 96 100 81 78 91 96 78 78 91 96 78 79 91 101 75 79 95 105 79 83 95 97 75 82 92 97 80 82 92 101 83 85 97 101 80 4 +78 91 96 78 78 91 100 74 82 91 104 81 83 95 97 75 79 95 97 79 79 99 105 83 85 97 101 80 85 97 110 80 82 102 110 83 4 +78 91 100 74 82 91 104 81 82 96 104 81 79 95 97 79 79 99 105 83 83 103 105 83 85 97 110 80 82 102 110 83 85 106 110 83 3 +82 96 104 81 82 100 100 85 78 87 92 78 83 103 105 86 92 103 114 86 92 103 105 83 89 106 110 90 93 111 114 90 93 115 110 90 3 +82 100 100 85 78 87 92 78 63 56 104 96 92 103 114 86 92 103 105 83 75 81 93 79 93 111 114 90 93 115 110 90 89 102 105 80 3 +78 87 92 78 63 56 104 96 49 34 117 129 92 103 105 83 75 81 93 79 56 45 105 105 93 115 110 90 89 102 105 80 67 71 89 80 2 +63 56 104 96 49 34 117 129 46 34 112 129 75 81 93 79 56 45 105 105 49 37 114 120 89 102 105 80 67 71 89 80 50 43 97 108 2 +49 34 117 129 46 34 112 129 46 32 117 125 56 45 105 105 49 37 114 120 46 37 105 116 67 71 89 80 50 43 97 108 44 40 105 111 2 +46 34 112 129 46 32 117 125 46 34 112 122 49 37 114 120 46 37 105 116 46 40 105 109 50 43 97 108 44 40 105 111 47 46 105 104 2 +46 32 117 125 46 34 112 122 49 34 122 125 46 37 105 116 46 40 105 109 49 40 105 113 44 40 105 111 47 46 105 104 53 49 101 101 2 +46 34 112 122 49 34 122 125 49 34 117 125 46 40 105 109 49 40 105 113 46 37 114 120 47 46 105 104 53 49 101 101 50 53 101 101 2 +49 34 117 125 46 32 117 125 46 32 117 122 46 37 114 120 46 34 124 131 46 32 124 139 50 53 101 101 47 37 110 122 44 37 124 136 2 +46 32 122 122 46 32 122 125 46 29 122 129 46 30 119 131 46 32 114 127 42 34 119 135 47 37 119 133 53 43 114 119 53 56 101 97 2 +46 32 122 125 46 29 122 129 43 32 122 133 46 32 114 127 42 34 119 135 52 37 114 124 53 43 114 119 53 56 101 97 60 63 85 73 2 +43 32 122 129 49 34 122 129 56 49 108 100 52 48 105 105 59 60 97 83 63 66 79 64 67 71 78 58 67 67 67 51 60 67 70 55 2 +49 34 122 129 56 49 108 100 63 56 88 74 59 60 97 83 63 66 79 64 67 70 75 57 67 67 67 51 60 67 70 55 63 63 67 51 7 +56 49 108 100 63 56 88 74 66 63 88 70 63 66 79 64 67 70 75 57 63 70 75 57 60 67 70 55 63 63 67 51 60 67 70 51 7 +63 56 88 74 66 63 88 70 66 71 80 59 67 70 75 57 63 70 75 57 63 70 72 53 63 63 67 51 60 67 70 51 63 67 74 55 7 +66 63 88 70 66 71 80 59 70 71 80 59 63 70 75 57 63 70 72 53 67 66 72 53 60 67 70 51 63 67 74 55 67 67 70 55 7 +66 71 80 59 70 71 80 59 63 67 69 55 63 70 72 53 67 66 72 53 67 66 72 53 63 67 74 55 67 67 70 55 63 67 70 55 7 +70 71 80 59 63 67 69 55 63 67 69 55 67 66 72 53 67 66 72 53 67 66 72 53 67 67 70 55 63 67 70 55 63 67 70 55 7 +63 67 69 55 63 67 69 55 63 71 69 55 67 66 72 53 67 66 72 53 63 70 68 53 63 67 70 55 63 67 70 55 63 71 74 55 7 +63 67 69 55 63 71 69 55 63 67 73 55 67 66 72 53 63 70 68 53 67 70 72 57 63 67 70 55 63 71 74 55 63 67 74 55 7 +63 71 69 55 63 67 73 55 66 71 73 55 63 70 68 53 67 70 72 57 67 73 79 57 63 71 74 55 63 67 74 55 63 71 78 58 7 +66 71 73 55 66 75 76 63 66 79 84 63 67 73 79 57 67 77 82 60 67 73 86 64 63 71 78 58 67 71 78 62 67 75 78 62 7 +66 83 80 63 70 79 80 63 70 79 80 63 71 77 90 64 71 81 82 64 71 81 82 64 70 79 78 58 74 79 82 65 70 79 82 62 7 +70 79 80 63 70 79 80 63 70 79 80 63 71 81 82 64 71 81 82 64 71 81 82 64 74 79 82 65 70 79 82 62 70 79 78 65 7 +66 83 88 66 74 87 92 74 82 91 96 78 75 81 82 64 75 84 86 64 75 91 90 72 67 75 82 62 70 79 85 65 74 79 89 69 7 +74 87 92 74 82 91 96 78 78 87 92 74 75 84 86 64 75 91 90 72 79 91 90 72 70 79 85 65 74 79 89 69 78 88 93 73 7 +82 91 96 78 78 87 92 74 82 96 100 78 75 91 90 72 79 91 90 72 79 95 97 75 74 79 89 69 78 88 93 73 82 92 93 76 7 +82 96 100 78 82 91 100 78 78 87 96 78 83 95 97 75 75 84 93 72 75 91 97 72 78 88 85 73 74 84 82 69 74 84 85 69 7 +82 91 100 78 78 87 96 78 78 83 84 70 75 84 93 72 75 91 97 72 79 88 90 72 74 84 82 69 74 84 85 69 74 84 85 69 7 +78 83 84 70 70 79 80 66 70 83 84 70 79 88 90 72 75 81 82 68 71 81 82 64 74 84 85 69 67 75 82 69 70 75 85 69 7 +70 79 80 66 70 83 84 70 74 79 84 66 75 81 82 68 71 81 82 64 71 77 79 68 67 75 82 69 70 75 85 69 70 75 82 69 7 +70 83 84 70 74 79 84 66 70 75 84 66 71 81 82 64 71 77 79 68 71 81 82 68 70 75 85 69 70 75 82 69 67 79 82 69 7 +70 75 84 66 70 75 80 63 70 83 88 70 71 81 82 68 75 84 90 72 75 84 90 75 67 79 82 69 70 79 85 73 74 84 93 73 7 +70 83 88 70 78 83 88 74 74 83 88 70 75 84 90 75 75 88 97 75 75 84 93 75 74 84 93 73 74 84 89 76 74 84 85 73 7 +78 83 88 74 74 83 88 70 74 83 88 74 75 88 97 75 75 84 93 75 75 84 90 72 74 84 89 76 74 84 85 73 70 84 85 69 7 +74 83 88 70 74 83 88 74 74 83 88 74 75 84 93 75 75 84 90 72 71 81 93 75 74 84 85 73 70 84 85 69 74 84 85 73 7 +74 83 88 74 66 71 88 70 59 60 84 70 71 81 93 75 71 77 93 75 63 63 79 72 74 84 85 73 70 84 89 73 67 67 85 73 7 +66 71 88 70 59 60 84 70 59 56 80 70 71 77 93 75 63 63 79 72 63 57 86 72 70 84 89 73 67 67 85 73 57 56 82 73 5 +59 60 84 70 59 56 80 70 59 60 80 63 63 63 79 72 63 57 86 72 59 57 82 68 67 67 85 73 57 56 82 73 57 53 85 76 5 +59 56 80 70 59 60 80 63 66 63 76 66 63 57 86 72 59 57 82 68 59 60 82 68 57 56 82 73 57 53 85 76 57 56 82 65 5 +59 60 80 63 66 63 76 66 63 60 80 66 59 57 82 68 59 60 82 68 59 60 82 68 57 53 85 76 57 56 82 65 60 60 82 65 5 +59 53 84 70 52 49 76 66 52 46 80 63 59 57 82 68 59 54 82 72 56 48 75 64 60 60 82 69 57 60 82 73 53 53 78 73 5 +56 46 69 52 56 49 69 55 56 53 73 63 59 51 72 53 56 48 68 53 56 51 68 60 53 49 74 58 53 49 74 58 53 53 74 58 5 +56 49 69 55 56 53 73 63 59 53 84 66 56 48 68 53 56 51 68 60 56 51 75 68 53 49 74 58 53 53 74 58 53 53 74 65 5 +52 49 76 59 56 53 84 63 56 49 84 70 56 48 72 60 56 48 79 64 59 54 82 72 57 53 78 65 53 49 78 73 50 49 89 87 5 +56 53 84 63 56 49 84 70 52 43 104 100 56 48 79 64 59 54 82 72 59 48 90 90 53 49 78 73 50 49 89 87 47 37 105 115 5 +56 49 84 70 52 43 104 100 46 29 117 133 59 54 82 72 59 48 90 90 52 37 110 116 50 49 89 87 47 37 105 115 42 29 114 129 2 +52 43 104 100 46 29 117 133 43 27 133 151 59 48 90 90 52 37 110 116 46 30 124 142 47 37 105 115 42 29 114 129 42 29 119 136 2 +43 27 127 147 43 27 122 133 43 27 117 129 42 30 124 146 42 30 124 135 42 30 119 127 44 31 124 140 44 29 119 133 44 34 110 115 2 +43 27 122 133 43 27 117 129 43 29 117 133 42 30 124 135 42 30 119 127 42 28 119 127 44 29 119 133 44 34 110 115 47 37 101 101 2 +43 29 117 133 49 40 96 89 52 46 80 63 42 28 119 127 46 32 105 113 49 45 82 72 47 37 101 101 50 37 101 104 47 40 93 94 2 +49 40 96 89 52 46 80 63 52 46 76 66 46 32 105 113 49 45 82 72 52 42 79 72 50 37 101 104 47 40 93 94 50 40 82 80 5 +92 108 110 86 92 103 105 83 83 99 105 79 97 115 119 94 97 115 114 90 89 111 114 87 97 115 120 94 97 111 115 94 97 111 115 94 3 +92 103 105 83 83 99 105 79 83 99 101 79 97 115 114 90 89 111 114 87 89 106 110 83 97 111 115 94 97 111 115 94 97 111 111 91 3 +83 99 105 79 83 99 101 79 83 95 97 79 89 111 114 87 89 106 110 83 85 97 101 80 97 111 115 94 97 111 111 91 88 102 106 83 3 +83 99 101 79 83 95 97 79 83 95 97 75 89 106 110 83 85 97 101 80 85 97 101 80 97 111 111 91 88 102 106 83 88 98 102 83 3 +83 95 97 79 83 95 97 75 83 95 101 79 85 97 101 80 85 97 101 80 85 97 105 80 88 102 106 83 88 98 102 83 88 98 102 79 3 +83 95 97 75 83 95 101 79 83 99 101 83 85 97 101 80 85 97 105 80 82 92 101 80 88 98 102 83 88 98 102 79 84 98 102 79 3 +83 99 101 83 79 95 101 83 79 95 97 79 82 92 101 80 82 92 101 76 78 92 97 76 84 98 102 79 84 102 102 79 84 102 102 83 4 +79 95 101 83 79 95 97 79 79 91 101 75 82 92 101 76 78 92 97 76 82 92 97 80 84 102 102 79 84 102 102 83 84 98 106 83 4 +79 95 97 79 79 91 101 75 79 95 105 79 78 92 97 76 82 92 97 80 82 92 101 83 84 102 102 83 84 98 106 83 88 98 106 87 4 +79 91 101 75 79 95 105 79 83 95 97 75 82 92 97 80 82 92 101 83 85 97 101 80 84 98 106 83 88 98 106 87 88 106 106 87 4 +79 95 105 79 83 95 97 75 79 95 97 79 82 92 101 83 85 97 101 80 85 97 110 80 88 98 106 87 88 106 106 87 92 106 106 87 3 +79 95 97 79 79 99 105 83 83 103 105 83 85 97 110 80 82 102 110 83 85 106 110 83 92 106 106 87 92 106 106 83 88 106 106 87 3 +83 103 105 83 83 103 105 86 92 103 114 86 89 106 110 87 89 106 110 90 93 111 114 90 92 106 111 87 92 111 115 91 97 115 115 91 3 +83 103 105 86 92 103 114 86 92 103 105 83 89 106 110 90 93 111 114 90 93 115 110 90 92 111 115 91 97 115 115 91 101 115 120 94 3 +92 103 105 83 75 81 93 79 56 45 105 105 93 115 110 90 89 102 105 80 67 71 89 80 101 115 120 94 97 111 115 87 80 89 98 72 3 +75 81 93 79 56 45 105 105 49 37 114 120 89 102 105 80 67 71 89 80 50 43 97 108 97 111 115 87 80 89 98 72 64 62 94 83 2 +56 45 105 105 49 37 114 120 46 37 105 116 67 71 89 80 50 43 97 108 44 40 105 111 80 89 98 72 64 62 94 83 60 59 98 91 2 +49 37 114 120 46 37 105 116 46 40 105 109 50 43 97 108 44 40 105 111 47 46 105 104 64 62 94 83 60 59 98 91 64 69 98 87 2 +49 40 105 113 46 37 114 120 46 34 124 131 53 49 101 101 50 53 101 101 47 37 110 122 68 77 90 79 64 73 98 83 57 55 98 98 2 +46 34 124 131 46 32 124 139 46 30 119 131 47 37 110 122 44 37 124 136 47 37 119 133 57 55 98 98 57 55 111 102 60 69 102 87 2 +46 30 119 131 46 32 114 127 42 34 119 135 47 37 119 133 53 43 114 119 53 56 101 97 60 69 102 87 68 73 78 65 64 73 74 54 2 +59 60 97 83 63 66 79 64 67 70 75 57 67 67 67 51 60 67 70 55 63 63 67 51 64 69 64 54 64 69 71 57 64 69 71 54 7 +63 70 75 57 63 70 72 53 67 66 72 53 60 67 70 51 63 67 74 55 67 67 70 55 64 69 67 54 64 69 71 54 64 66 67 54 7 +63 70 72 53 67 66 72 53 67 66 72 53 63 67 74 55 67 67 70 55 63 67 70 55 64 69 71 54 64 66 67 54 64 69 71 54 7 +67 66 72 53 67 66 72 53 67 66 72 53 67 67 70 55 63 67 70 55 63 67 70 55 64 66 67 54 64 69 71 54 68 69 78 54 7 +63 70 68 53 67 70 72 57 67 73 79 57 63 71 74 55 63 67 74 55 63 71 78 58 68 69 71 57 68 73 71 57 68 73 74 57 7 +67 77 82 60 67 73 86 64 71 77 90 64 67 71 78 62 67 75 78 62 70 79 78 58 64 73 74 57 68 73 78 57 68 77 78 61 7 +71 77 90 64 71 81 82 64 71 81 82 64 70 79 78 58 74 79 82 65 70 79 82 62 68 77 78 61 68 77 74 61 68 73 78 61 7 +71 81 82 64 75 81 82 64 75 84 86 64 70 79 78 65 67 75 82 62 70 79 85 65 72 77 78 57 68 73 78 57 68 73 74 61 7 +75 81 82 64 75 84 86 64 75 91 90 72 67 75 82 62 70 79 85 65 74 79 89 69 68 73 78 57 68 73 74 61 68 73 82 61 7 +75 84 86 64 75 91 90 72 79 91 90 72 70 79 85 65 74 79 89 69 78 88 93 73 68 73 74 61 68 73 82 61 76 85 86 68 7 +79 91 90 72 79 95 97 75 83 95 97 75 78 88 93 73 82 92 93 76 78 88 85 73 76 85 86 68 80 94 94 76 80 89 94 72 7 +79 95 97 75 83 95 97 75 75 84 93 72 82 92 93 76 78 88 85 73 74 84 82 69 80 94 94 76 80 89 94 72 76 81 86 72 7 +75 84 93 72 75 91 97 72 79 88 90 72 74 84 82 69 74 84 85 69 74 84 85 69 76 81 86 72 72 81 90 72 72 77 90 72 7 +75 91 97 72 79 88 90 72 75 81 82 68 74 84 85 69 74 84 85 69 67 75 82 69 72 81 90 72 72 77 90 72 68 77 90 72 7 +79 88 90 72 75 81 82 68 71 81 82 64 74 84 85 69 67 75 82 69 70 75 85 69 72 77 90 72 68 77 90 72 68 73 86 72 7 +75 81 82 68 71 81 82 64 71 77 79 68 67 75 82 69 70 75 85 69 70 75 82 69 68 77 90 72 68 73 86 72 68 69 86 76 7 +71 81 82 64 71 77 79 68 71 81 82 68 70 75 85 69 70 75 82 69 67 79 82 69 68 73 86 72 68 69 86 76 68 69 86 72 7 +71 77 79 68 71 81 82 68 75 84 90 72 70 75 82 69 67 79 82 69 70 79 85 73 68 69 86 76 68 69 86 72 68 73 86 72 7 +71 81 82 68 75 84 90 72 75 84 90 75 67 79 82 69 70 79 85 73 74 84 93 73 68 69 86 72 68 73 86 72 72 81 86 72 7 +75 88 97 75 75 84 93 75 75 84 90 72 74 84 89 76 74 84 85 73 70 84 85 69 72 77 90 72 72 81 86 72 72 81 82 68 7 +63 57 86 72 59 57 82 68 59 60 82 68 57 56 82 73 57 53 85 76 57 56 82 65 60 59 86 72 57 52 90 76 57 52 78 72 5 +59 57 82 68 59 60 82 68 59 60 82 68 57 53 85 76 57 56 82 65 60 60 82 65 57 52 90 76 57 52 78 72 57 59 78 68 5 +59 60 82 68 59 60 82 68 59 57 82 68 57 56 82 65 60 60 82 65 60 60 82 69 57 52 78 72 57 59 78 68 60 59 82 68 5 +59 60 82 68 59 57 82 68 59 54 82 72 60 60 82 65 60 60 82 69 57 60 82 73 57 59 78 68 60 59 82 68 60 62 86 68 5 +59 57 82 68 59 54 82 72 56 48 75 64 60 60 82 69 57 60 82 73 53 53 78 73 60 59 82 68 60 62 86 68 57 52 78 72 5 +59 54 82 72 56 48 75 64 52 48 75 60 57 60 82 73 53 53 78 73 53 46 78 69 60 62 86 68 57 52 78 72 50 46 78 76 5 +52 48 75 60 56 51 72 57 59 51 72 53 53 46 78 69 50 46 74 62 53 49 74 58 50 46 78 76 53 49 82 65 53 49 82 65 5 +56 48 68 53 56 51 68 60 56 51 75 68 53 49 74 58 53 53 74 58 53 53 74 65 57 55 71 61 57 55 78 65 57 55 82 68 5 +56 51 68 60 56 51 75 68 52 51 79 68 53 53 74 58 53 53 74 65 53 53 74 69 57 55 78 65 57 55 82 68 53 46 90 83 5 +56 51 75 68 52 51 79 68 56 48 72 60 53 53 74 65 53 53 74 69 57 53 78 65 57 55 82 68 53 46 90 83 44 37 94 98 5 +56 48 72 60 56 48 79 64 59 54 82 72 57 53 78 65 53 49 78 73 50 49 89 87 44 37 94 98 41 31 98 113 44 37 102 102 5 +56 48 79 64 59 54 82 72 59 48 90 90 53 49 78 73 50 49 89 87 47 37 105 115 41 31 98 113 44 37 102 102 50 46 102 102 2 +59 54 82 72 59 48 90 90 52 37 110 116 50 49 89 87 47 37 105 115 42 29 114 129 44 37 102 102 50 46 102 102 44 31 111 120 2 +59 48 90 90 52 37 110 116 46 30 124 142 47 37 105 115 42 29 114 129 42 29 119 136 50 46 102 102 44 31 111 120 44 31 115 124 2 +42 30 124 146 42 30 124 135 42 30 119 127 44 31 124 140 44 29 119 133 44 34 110 115 44 37 115 120 47 37 106 113 47 37 106 109 2 +42 30 119 127 42 28 119 127 46 32 105 113 44 34 110 115 47 37 101 101 50 37 101 104 47 37 106 109 41 34 115 113 44 29 115 120 2 +46 32 105 113 49 45 82 72 52 42 79 72 50 37 101 104 47 40 93 94 50 40 82 80 44 29 115 120 47 31 106 105 47 37 94 87 2 +49 45 82 72 52 42 79 72 52 42 82 72 47 40 93 94 50 40 82 80 44 34 82 83 47 31 106 105 47 37 94 87 44 34 90 87 2 +97 115 114 90 89 111 114 87 89 106 110 83 97 111 115 94 97 111 115 94 97 111 111 91 101 116 122 96 101 116 122 96 97 116 122 96 3 +89 111 114 87 89 106 110 83 85 97 101 80 97 111 115 94 97 111 111 91 88 102 106 83 101 116 122 96 97 116 122 96 97 112 118 92 3 +89 106 110 83 85 97 101 80 85 97 101 80 97 111 111 91 88 102 106 83 88 98 102 83 97 116 122 96 97 112 118 92 92 107 113 88 3 +85 97 101 80 85 97 101 80 85 97 105 80 88 102 106 83 88 98 102 83 88 98 102 79 97 112 118 92 92 107 113 88 92 107 118 85 3 +85 97 105 80 82 92 101 80 82 92 101 76 88 98 102 79 84 98 102 79 84 102 102 79 92 107 118 85 92 112 118 92 92 112 118 88 3 +82 92 101 80 82 92 101 76 78 92 97 76 84 98 102 79 84 102 102 79 84 102 102 83 92 112 118 92 92 112 118 88 92 107 113 85 3 +82 92 101 76 78 92 97 76 82 92 97 80 84 102 102 79 84 102 102 83 84 98 106 83 92 112 118 88 92 107 113 85 88 103 108 81 3 +78 92 97 76 82 92 97 80 82 92 101 83 84 102 102 83 84 98 106 83 88 98 106 87 92 107 113 85 88 103 108 81 88 103 108 88 3 +82 92 97 80 82 92 101 83 85 97 101 80 84 98 106 83 88 98 106 87 88 106 106 87 88 103 108 81 88 103 108 88 88 107 113 88 3 +82 92 101 83 85 97 101 80 85 97 110 80 88 98 106 87 88 106 106 87 92 106 106 87 88 103 108 88 88 107 113 88 92 107 108 85 3 +82 102 110 83 85 106 110 83 89 106 110 87 92 106 106 83 88 106 106 87 92 106 111 87 92 107 113 88 92 107 113 88 92 107 113 92 3 +89 106 110 87 89 106 110 90 93 111 114 90 92 106 111 87 92 111 115 91 97 115 115 91 92 107 113 92 92 107 113 88 92 107 118 92 3 +89 106 110 90 93 111 114 90 93 115 110 90 92 111 115 91 97 115 115 91 101 115 120 94 92 107 113 88 92 107 118 92 97 112 122 88 3 +93 111 114 90 93 115 110 90 89 102 105 80 97 115 115 91 101 115 120 94 97 111 115 87 92 107 118 92 97 112 122 88 101 112 118 92 3 +93 115 110 90 89 102 105 80 67 71 89 80 101 115 120 94 97 111 115 87 80 89 98 72 97 112 122 88 101 112 118 92 92 107 113 85 3 +89 102 105 80 67 71 89 80 50 43 97 108 97 111 115 87 80 89 98 72 64 62 94 83 101 112 118 92 92 107 113 85 84 95 104 74 3 +50 43 97 108 44 40 105 111 47 46 105 104 64 62 94 83 60 59 98 91 64 69 98 87 84 95 104 74 71 83 100 78 68 79 100 81 2 +53 49 101 101 50 53 101 101 47 37 110 122 68 77 90 79 64 73 98 83 57 55 98 98 68 75 96 78 64 75 87 78 68 75 83 70 2 +50 53 101 101 47 37 110 122 44 37 124 136 64 73 98 83 57 55 98 98 57 55 111 102 64 75 87 78 68 75 83 70 71 75 83 63 2 +44 37 124 136 47 37 119 133 53 43 114 119 57 55 111 102 60 69 102 87 68 73 78 65 71 75 83 63 68 71 79 59 64 71 75 56 2 +47 37 119 133 53 43 114 119 53 56 101 97 60 69 102 87 68 73 78 65 64 73 74 54 68 71 79 59 64 71 75 56 64 71 75 59 7 +53 43 114 119 53 56 101 97 60 63 85 73 68 73 78 65 64 73 74 54 68 69 78 54 64 71 75 56 64 71 75 59 71 71 75 52 7 +60 63 85 73 67 71 78 58 67 67 67 51 68 69 78 54 64 66 64 54 64 69 64 54 71 71 75 52 64 68 71 52 60 71 71 56 7 +67 71 78 58 67 67 67 51 60 67 70 55 64 66 64 54 64 69 64 54 64 69 71 57 64 68 71 52 60 71 71 56 64 71 71 56 7 +67 67 67 51 60 67 70 55 63 63 67 51 64 69 64 54 64 69 71 57 64 69 71 54 60 71 71 56 64 71 71 56 60 71 75 56 7 +67 67 70 55 63 67 70 55 63 67 70 55 64 66 67 54 64 69 71 54 68 69 78 54 64 68 71 52 64 71 67 59 68 71 75 59 7 +63 67 70 55 63 67 70 55 63 71 74 55 64 69 71 54 68 69 78 54 68 69 71 57 64 71 67 59 68 71 75 59 64 75 75 56 7 +63 67 70 55 63 71 74 55 63 67 74 55 68 69 78 54 68 69 71 57 68 73 71 57 68 71 75 59 64 75 75 56 68 71 71 56 7 +63 71 74 55 63 67 74 55 63 71 78 58 68 69 71 57 68 73 71 57 68 73 74 57 64 75 75 56 68 71 71 56 64 75 71 56 7 +63 67 74 55 63 71 78 58 67 71 78 62 68 73 71 57 68 73 74 57 64 73 74 57 68 71 71 56 64 75 71 56 64 71 71 59 7 +63 71 78 58 67 71 78 62 67 75 78 62 68 73 74 57 64 73 74 57 68 73 78 57 64 75 71 56 64 71 71 59 71 75 75 59 7 +67 75 78 62 70 79 78 58 74 79 82 65 68 73 78 57 68 77 78 61 68 77 74 61 71 75 75 59 68 71 75 56 68 71 67 56 7 +70 79 78 58 74 79 82 65 70 79 82 62 68 77 78 61 68 77 74 61 68 73 78 61 68 71 75 56 68 71 67 56 68 71 71 59 7 +74 79 82 65 70 79 82 62 70 79 78 65 68 77 74 61 68 73 78 61 72 77 78 57 68 71 67 56 68 71 71 59 68 75 71 56 7 +70 79 78 65 67 75 82 62 70 79 85 65 72 77 78 57 68 73 78 57 68 73 74 61 68 75 71 56 68 71 75 56 68 75 75 59 7 +70 79 85 65 74 79 89 69 78 88 93 73 68 73 74 61 68 73 82 61 76 85 86 68 68 75 75 59 71 75 75 59 68 79 79 63 7 +74 79 89 69 78 88 93 73 82 92 93 76 68 73 82 61 76 85 86 68 80 94 94 76 71 75 75 59 68 79 79 63 76 87 83 74 7 +82 92 93 76 78 88 85 73 74 84 82 69 80 94 94 76 80 89 94 72 76 81 86 72 76 87 83 74 80 87 100 78 80 87 100 74 7 +78 88 85 73 74 84 82 69 74 84 85 69 80 89 94 72 76 81 86 72 72 81 90 72 80 87 100 78 80 87 100 74 71 75 87 74 7 +74 84 82 69 74 84 85 69 74 84 85 69 76 81 86 72 72 81 90 72 72 77 90 72 80 87 100 74 71 75 87 74 64 71 87 78 7 +74 84 85 69 74 84 85 69 67 75 82 69 72 81 90 72 72 77 90 72 68 77 90 72 71 75 87 74 64 71 87 78 64 64 87 78 7 +74 84 85 69 67 75 82 69 70 75 85 69 72 77 90 72 68 77 90 72 68 73 86 72 64 71 87 78 64 64 87 78 64 68 87 78 7 +70 75 85 69 70 75 82 69 67 79 82 69 68 73 86 72 68 69 86 76 68 69 86 72 64 68 87 78 64 71 87 74 64 71 87 78 5 +70 75 82 69 67 79 82 69 70 79 85 73 68 69 86 76 68 69 86 72 68 73 86 72 64 71 87 74 64 71 87 78 68 71 87 74 5 +67 79 82 69 70 79 85 73 74 84 93 73 68 69 86 72 68 73 86 72 72 81 86 72 64 71 87 78 68 71 87 74 68 75 87 74 5 +70 79 85 73 74 84 93 73 74 84 89 76 68 73 86 72 72 81 86 72 72 77 90 72 68 71 87 74 68 75 87 74 68 75 83 67 7 +70 84 85 69 74 84 85 73 70 84 89 73 72 81 82 68 72 77 82 68 68 77 90 72 71 75 87 88 71 75 83 70 68 75 83 67 7 +67 67 85 73 57 56 82 73 57 53 85 76 68 77 90 76 60 59 86 72 57 52 90 76 71 79 87 74 71 71 87 74 60 61 87 74 5 +57 56 82 73 57 53 85 76 57 56 82 65 60 59 86 72 57 52 90 76 57 52 78 72 71 71 87 74 60 61 87 74 56 57 87 74 5 +57 56 82 65 60 60 82 65 60 60 82 69 57 52 78 72 57 59 78 68 60 59 82 68 56 57 87 74 56 54 83 70 56 57 79 70 5 +60 60 82 65 60 60 82 69 57 60 82 73 57 59 78 68 60 59 82 68 60 62 86 68 56 54 83 70 56 57 79 70 64 61 83 70 5 +50 46 74 62 53 49 74 58 53 49 74 58 53 49 82 65 53 49 82 65 57 55 71 61 56 54 83 70 60 54 83 70 56 57 83 78 5 +53 49 74 58 53 49 74 58 53 53 74 58 53 49 82 65 57 55 71 61 57 55 78 65 60 54 83 70 56 57 83 78 53 48 91 85 5 +53 49 74 58 53 53 74 58 53 53 74 65 57 55 71 61 57 55 78 65 57 55 82 68 56 57 83 78 53 48 91 85 53 45 96 96 5 +53 53 74 58 53 53 74 65 53 53 74 69 57 55 78 65 57 55 82 68 53 46 90 83 53 48 91 85 53 45 96 96 46 36 100 107 5 +53 53 74 65 53 53 74 69 57 53 78 65 57 55 82 68 53 46 90 83 44 37 94 98 53 45 96 96 46 36 100 107 43 31 108 117 5 +57 53 78 65 53 49 78 73 50 49 89 87 44 37 94 98 41 31 98 113 44 37 102 102 43 31 108 117 40 29 108 121 43 31 104 117 2 +53 49 78 73 50 49 89 87 47 37 105 115 41 31 98 113 44 37 102 102 50 46 102 102 40 29 108 121 43 31 104 117 50 42 96 96 2 +50 49 89 87 47 37 105 115 42 29 114 129 44 37 102 102 50 46 102 102 44 31 111 120 43 31 104 117 50 42 96 96 50 48 96 96 2 +47 37 105 115 42 29 114 129 42 29 119 136 50 46 102 102 44 31 111 120 44 31 115 124 50 42 96 96 50 48 96 96 46 36 104 107 2 +42 29 119 136 44 31 124 140 44 29 119 133 44 31 115 124 44 37 115 120 47 37 106 113 46 36 104 107 43 31 104 107 40 31 104 110 2 +50 37 101 104 47 40 93 94 50 40 82 80 44 29 115 120 47 31 106 105 47 37 94 87 43 29 113 114 43 29 108 114 46 34 104 103 2 +47 40 93 94 50 40 82 80 44 34 82 83 47 31 106 105 47 37 94 87 44 34 90 87 43 29 108 114 46 34 104 103 46 39 91 96 2 +97 111 115 94 97 111 115 94 97 111 111 91 101 116 122 96 101 116 122 96 97 116 122 96 97 116 123 96 93 116 123 96 97 116 118 96 3 +97 111 115 94 97 111 111 91 88 102 106 83 101 116 122 96 97 116 122 96 97 112 118 92 93 116 123 96 97 116 118 96 93 111 118 92 3 +97 111 111 91 88 102 106 83 88 98 102 83 97 116 122 96 97 112 118 92 92 107 113 88 97 116 118 96 93 111 118 92 93 116 118 96 3 +88 102 106 83 88 98 102 83 88 98 102 79 97 112 118 92 92 107 113 88 92 107 118 85 93 111 118 92 93 116 118 96 97 111 118 96 3 +88 98 102 79 84 98 102 79 84 102 102 79 92 107 118 85 92 112 118 92 92 112 118 88 97 111 118 96 97 111 118 96 97 116 113 92 3 +84 98 102 79 84 102 102 79 84 102 102 83 92 112 118 92 92 112 118 88 92 107 113 85 97 111 118 96 97 116 113 92 93 111 113 92 3 +84 102 102 83 84 98 106 83 88 98 106 87 92 107 113 85 88 103 108 81 88 103 108 88 93 111 113 92 88 111 109 87 88 107 109 87 3 +84 98 106 83 88 98 106 87 88 106 106 87 88 103 108 81 88 103 108 88 88 107 113 88 88 111 109 87 88 107 109 87 88 107 109 92 3 +88 98 106 87 88 106 106 87 92 106 106 87 88 103 108 88 88 107 113 88 92 107 108 85 88 107 109 87 88 107 109 92 97 111 113 92 3 +88 106 106 87 92 106 106 87 92 106 106 83 88 107 113 88 92 107 108 85 92 107 113 88 88 107 109 92 97 111 113 92 93 111 118 92 3 +92 111 115 91 97 115 115 91 101 115 120 94 92 107 113 88 92 107 118 92 97 112 122 88 93 111 109 87 97 111 109 87 97 111 113 87 3 +97 115 115 91 101 115 120 94 97 111 115 87 92 107 118 92 97 112 122 88 101 112 118 92 97 111 109 87 97 111 113 87 93 107 113 92 3 +97 111 115 87 80 89 98 72 64 62 94 83 101 112 118 92 92 107 113 85 84 95 104 74 93 107 113 92 88 111 118 92 84 103 109 83 3 +80 89 98 72 64 62 94 83 60 59 98 91 92 107 113 85 84 95 104 74 71 83 100 78 88 111 118 92 84 103 109 83 71 79 93 71 3 +64 62 94 83 60 59 98 91 64 69 98 87 84 95 104 74 71 83 100 78 68 79 100 81 84 103 109 83 71 79 93 71 63 68 89 71 2 +60 59 98 91 64 69 98 87 68 77 90 79 71 83 100 78 68 79 100 81 68 75 96 78 71 79 93 71 63 68 89 71 67 75 77 62 2 +64 69 98 87 68 77 90 79 64 73 98 83 68 79 100 81 68 75 96 78 64 75 87 78 63 68 89 71 67 75 77 62 67 72 77 58 2 +68 77 90 79 64 73 98 83 57 55 98 98 68 75 96 78 64 75 87 78 68 75 83 70 67 75 77 62 67 72 77 58 67 68 77 54 2 +64 73 98 83 57 55 98 98 57 55 111 102 64 75 87 78 68 75 83 70 71 75 83 63 67 72 77 58 67 68 77 54 67 72 70 54 2 +57 55 98 98 57 55 111 102 60 69 102 87 68 75 83 70 71 75 83 63 68 71 79 59 67 68 77 54 67 72 70 54 67 72 70 54 7 +57 55 111 102 60 69 102 87 68 73 78 65 71 75 83 63 68 71 79 59 64 71 75 56 67 72 70 54 67 72 70 54 63 72 74 58 7 +60 69 102 87 68 73 78 65 64 73 74 54 68 71 79 59 64 71 75 56 64 71 75 59 67 72 70 54 63 72 74 58 67 72 77 58 7 +68 69 78 54 64 66 64 54 64 69 64 54 71 71 75 52 64 68 71 52 60 71 71 56 67 72 77 54 67 72 77 54 63 68 70 54 7 +64 69 64 54 64 69 71 57 64 69 71 54 60 71 71 56 64 71 71 56 60 71 75 56 63 68 70 54 67 68 70 54 67 68 70 54 7 +64 69 71 57 64 69 71 54 64 69 67 54 64 71 71 56 60 71 75 56 64 71 75 56 67 68 70 54 67 68 70 54 67 72 74 54 7 +64 69 71 54 64 66 67 54 64 69 71 54 68 68 75 56 64 68 71 52 64 71 67 59 67 72 74 54 67 72 77 54 67 72 77 54 7 +68 69 78 54 68 69 71 57 68 73 71 57 68 71 75 59 64 75 75 56 68 71 71 56 67 72 74 54 67 68 74 54 67 72 70 54 7 +68 69 71 57 68 73 71 57 68 73 74 57 64 75 75 56 68 71 71 56 64 75 71 56 67 68 74 54 67 72 70 54 67 68 74 54 7 +68 73 74 57 64 73 74 57 68 73 78 57 64 75 71 56 64 71 71 59 71 75 75 59 67 68 74 54 67 72 74 58 67 72 70 58 7 +64 73 74 57 68 73 78 57 68 77 78 61 64 71 71 59 71 75 75 59 68 71 75 56 67 72 74 58 67 72 70 58 67 72 70 54 7 +68 77 78 61 68 77 74 61 68 73 78 61 68 71 75 56 68 71 67 56 68 71 71 59 67 72 70 54 67 72 70 58 67 72 74 58 7 +68 77 74 61 68 73 78 61 72 77 78 57 68 71 67 56 68 71 71 59 68 75 71 56 67 72 70 58 67 72 74 58 71 72 74 58 7 +68 73 78 61 72 77 78 57 68 73 78 57 68 71 71 59 68 75 71 56 68 71 75 56 67 72 74 58 71 72 74 58 67 75 77 58 7 +72 77 78 57 68 73 78 57 68 73 74 61 68 75 71 56 68 71 75 56 68 75 75 59 71 72 74 58 67 75 77 58 71 75 77 67 7 +68 73 78 57 68 73 74 61 68 73 82 61 68 71 75 56 68 75 75 59 71 75 75 59 67 75 77 58 71 75 77 67 71 79 81 67 7 +68 73 74 61 68 73 82 61 76 85 86 68 68 75 75 59 71 75 75 59 68 79 79 63 71 75 77 67 71 79 81 67 75 83 85 67 7 +76 85 86 68 80 94 94 76 80 89 94 72 68 79 79 63 76 87 83 74 80 87 100 78 75 83 85 67 75 83 85 71 79 95 96 75 7 +76 81 86 72 72 81 90 72 72 77 90 72 80 87 100 74 71 75 87 74 64 71 87 78 79 91 96 75 71 75 93 79 67 68 93 79 5 +72 81 90 72 72 77 90 72 68 77 90 72 71 75 87 74 64 71 87 78 64 64 87 78 71 75 93 79 67 68 93 79 67 68 89 79 5 +72 77 90 72 68 77 90 72 68 73 86 72 64 71 87 78 64 64 87 78 64 68 87 78 67 68 93 79 67 68 89 79 63 68 85 79 5 +68 73 86 72 68 69 86 76 68 69 86 72 64 68 87 78 64 71 87 74 64 71 87 78 63 68 85 79 67 68 89 79 67 68 89 75 5 +68 69 86 76 68 69 86 72 68 73 86 72 64 71 87 74 64 71 87 78 68 71 87 74 67 68 89 79 67 68 89 75 67 72 85 71 5 +68 69 86 72 68 73 86 72 72 81 86 72 64 71 87 78 68 71 87 74 68 75 87 74 67 68 89 75 67 72 85 71 67 72 81 67 5 +68 73 86 72 72 81 86 72 72 77 90 72 68 71 87 74 68 75 87 74 68 75 83 67 67 72 85 71 67 72 81 67 67 72 81 67 5 +72 81 86 72 72 77 90 72 72 81 86 72 68 75 87 74 68 75 83 67 68 71 83 70 67 72 81 67 67 72 81 67 71 72 77 67 5 +72 81 82 68 72 77 82 68 68 77 90 72 71 75 87 88 71 75 83 70 68 75 83 67 67 68 81 67 67 72 85 67 67 75 81 67 7 +68 77 90 76 60 59 86 72 57 52 90 76 71 79 87 74 71 71 87 74 60 61 87 74 71 79 89 71 71 79 93 71 67 68 89 75 7 +60 59 86 72 57 52 90 76 57 52 78 72 71 71 87 74 60 61 87 74 56 57 87 74 71 79 93 71 67 68 89 75 63 61 93 79 5 +57 52 78 72 57 59 78 68 60 59 82 68 56 57 87 74 56 54 83 70 56 57 79 70 63 61 93 79 63 58 85 75 63 54 81 71 5 +57 59 78 68 60 59 82 68 60 62 86 68 56 54 83 70 56 57 79 70 64 61 83 70 63 58 85 75 63 54 81 71 63 58 85 67 5 +60 62 86 68 57 52 78 72 50 46 78 76 64 61 83 70 60 61 83 70 56 54 83 74 63 58 85 67 63 64 77 62 67 68 81 67 5 +50 46 78 76 53 49 82 65 53 49 82 65 56 54 83 74 56 54 83 70 60 54 83 70 67 68 81 67 71 75 85 71 63 54 100 92 5 +53 49 82 65 53 49 82 65 57 55 71 61 56 54 83 70 60 54 83 70 56 57 83 78 71 75 85 71 63 54 100 92 48 37 100 104 5 +57 55 78 65 57 55 82 68 53 46 90 83 53 48 91 85 53 45 96 96 46 36 100 107 48 37 104 104 51 32 100 108 48 34 104 108 2 +57 55 82 68 53 46 90 83 44 37 94 98 53 45 96 96 46 36 100 107 43 31 108 117 51 32 100 108 48 34 104 108 48 37 104 112 2 +53 46 90 83 44 37 94 98 41 31 98 113 46 36 100 107 43 31 108 117 40 29 108 121 48 34 104 108 48 37 104 112 44 29 109 121 2 +41 31 98 113 44 37 102 102 50 46 102 102 40 29 108 121 43 31 104 117 50 42 96 96 44 29 109 121 44 29 104 121 44 32 104 116 2 +44 37 102 102 50 46 102 102 44 31 111 120 43 31 104 117 50 42 96 96 50 48 96 96 44 29 104 121 44 32 104 116 51 40 96 96 2 +44 37 115 120 47 37 106 113 47 37 106 109 43 31 104 107 40 31 104 110 40 31 104 107 48 29 100 100 44 29 100 100 44 32 104 104 2 +47 37 106 113 47 37 106 109 41 34 115 113 40 31 104 110 40 31 104 107 43 31 104 114 44 29 100 100 44 32 104 104 44 34 104 104 2 +41 34 115 113 44 29 115 120 47 31 106 105 43 31 104 114 43 29 113 114 43 29 108 114 44 34 104 104 44 32 109 104 41 32 109 112 2 +47 31 106 105 47 37 94 87 44 34 90 87 43 29 108 114 46 34 104 103 46 39 91 96 41 32 109 112 44 32 109 112 48 37 104 100 2 +97 112 118 96 101 116 122 96 101 116 122 96 97 116 118 96 97 116 123 96 93 116 123 96 99 113 117 92 95 118 122 96 95 118 117 92 3 +97 116 122 96 97 112 118 92 92 107 113 88 97 116 118 96 93 111 118 92 93 116 118 96 99 113 117 96 99 118 122 96 95 118 117 92 3 +97 112 118 92 92 107 113 88 92 107 118 85 93 111 118 92 93 116 118 96 97 111 118 96 99 118 122 96 95 118 117 92 95 113 117 96 3 +92 107 113 88 92 107 118 85 92 112 118 92 93 116 118 96 97 111 118 96 97 111 118 96 95 118 117 92 95 113 117 96 104 113 127 96 3 +92 107 118 85 92 112 118 92 92 112 118 88 97 111 118 96 97 111 118 96 97 116 113 92 95 113 117 96 104 113 127 96 99 118 117 92 3 +92 112 118 92 92 112 118 88 92 107 113 85 97 111 118 96 97 116 113 92 93 111 113 92 104 113 127 96 99 118 117 92 95 113 122 92 3 +92 112 118 88 92 107 113 85 88 103 108 81 97 116 113 92 93 111 113 92 88 111 109 87 99 118 117 92 95 113 122 92 95 113 112 89 3 +88 103 108 81 88 103 108 88 88 107 113 88 88 111 109 87 88 107 109 87 88 107 109 92 95 113 112 89 95 113 112 89 90 109 117 89 3 +88 103 108 88 88 107 113 88 92 107 108 85 88 107 109 87 88 107 109 92 97 111 113 92 95 113 112 89 90 109 117 89 90 104 117 89 3 +92 107 108 85 92 107 113 88 92 107 113 88 97 111 113 92 93 111 118 92 97 111 118 92 90 104 117 89 95 109 112 89 95 113 117 89 3 +92 107 113 88 92 107 118 92 97 112 122 88 93 111 109 87 97 111 109 87 97 111 113 87 99 113 122 96 95 109 117 89 95 109 117 89 3 +92 107 118 92 97 112 122 88 101 112 118 92 97 111 109 87 97 111 113 87 93 107 113 92 95 109 117 89 95 109 117 89 90 113 112 92 3 +68 75 96 78 64 75 87 78 68 75 83 70 67 75 77 62 67 72 77 58 67 68 77 54 63 71 73 55 63 71 73 55 63 67 66 55 7 +71 75 83 63 68 71 79 59 64 71 75 56 67 72 70 54 67 72 70 54 63 72 74 58 63 67 73 55 63 71 69 55 63 71 76 55 7 +64 71 75 56 64 71 75 59 71 71 75 52 63 72 74 58 67 72 77 58 67 72 77 54 63 71 76 55 63 71 76 59 63 75 76 59 7 +64 71 75 59 71 71 75 52 64 68 71 52 67 72 77 58 67 72 77 54 67 72 77 54 63 71 76 59 63 75 76 59 63 75 76 59 7 +71 71 75 52 64 68 71 52 60 71 71 56 67 72 77 54 67 72 77 54 63 68 70 54 63 75 76 59 63 75 76 59 66 75 80 59 7 +64 68 71 52 60 71 71 56 64 71 71 56 67 72 77 54 63 68 70 54 67 68 70 54 63 75 76 59 66 75 80 59 66 75 73 55 7 +60 71 71 56 64 71 71 56 60 71 75 56 63 68 70 54 67 68 70 54 67 68 70 54 66 75 80 59 66 75 73 55 63 71 73 55 7 +64 71 71 56 60 71 75 56 64 71 75 56 67 68 70 54 67 68 70 54 67 72 74 54 66 75 73 55 63 71 73 55 63 71 73 55 7 +60 71 75 56 64 71 75 56 68 68 75 56 67 68 70 54 67 72 74 54 67 72 74 54 63 71 73 55 63 71 73 55 66 75 73 59 7 +64 71 75 56 68 68 75 56 64 68 71 52 67 72 74 54 67 72 74 54 67 72 77 54 63 71 73 55 66 75 73 59 66 75 76 59 7 +64 68 71 52 64 71 67 59 68 71 75 59 67 72 77 54 67 72 77 54 67 72 74 54 66 75 76 59 66 75 76 59 66 79 80 59 7 +64 71 67 59 68 71 75 59 64 75 75 56 67 72 77 54 67 72 74 54 67 68 74 54 66 75 76 59 66 79 80 59 66 71 73 55 7 +68 71 75 59 64 75 75 56 68 71 71 56 67 72 74 54 67 68 74 54 67 72 70 54 66 79 80 59 66 71 73 55 66 71 76 55 7 +68 71 71 56 64 75 71 56 64 71 71 59 67 72 70 54 67 68 74 54 67 72 74 58 66 71 76 55 66 71 73 55 66 71 69 55 7 +64 75 71 56 64 71 71 59 71 75 75 59 67 68 74 54 67 72 74 58 67 72 70 58 66 71 73 55 66 71 69 55 66 71 73 55 7 +64 71 71 59 71 75 75 59 68 71 75 56 67 72 74 58 67 72 70 58 67 72 70 54 66 71 69 55 66 71 73 55 66 71 69 55 7 +68 71 75 56 68 71 67 56 68 71 71 59 67 72 70 54 67 72 70 58 67 72 74 58 66 71 69 55 66 71 73 55 70 71 73 55 7 +68 71 71 59 68 75 71 56 68 71 75 56 67 72 74 58 71 72 74 58 67 75 77 58 70 71 73 55 66 71 73 59 70 75 80 59 7 +68 71 75 56 68 75 75 59 71 75 75 59 67 75 77 58 71 75 77 67 71 79 81 67 70 75 80 59 70 79 88 66 74 79 88 66 7 +71 75 75 59 68 79 79 63 76 87 83 74 71 79 81 67 75 83 85 67 75 83 85 71 74 79 88 66 74 83 88 70 70 79 88 66 7 +68 79 79 63 76 87 83 74 80 87 100 78 75 83 85 67 75 83 85 71 79 95 96 75 74 83 88 70 70 79 88 66 78 83 84 66 7 +76 87 83 74 80 87 100 78 80 87 100 74 75 83 85 71 79 95 96 75 79 91 96 75 70 79 88 66 78 83 84 66 78 83 92 70 7 +80 87 100 74 71 75 87 74 64 71 87 78 79 91 96 75 71 75 93 79 67 68 93 79 78 83 92 70 78 91 96 78 78 83 88 74 5 +71 75 87 74 64 71 87 78 64 64 87 78 71 75 93 79 67 68 93 79 67 68 89 79 78 91 96 78 78 83 88 74 70 79 96 78 5 +64 71 87 74 64 71 87 78 68 71 87 74 67 68 89 79 67 68 89 75 67 72 85 71 70 67 88 78 66 71 88 78 66 71 92 74 5 +68 71 87 74 68 75 87 74 68 75 83 67 67 72 85 71 67 72 81 67 67 72 81 67 66 71 92 74 66 75 84 70 66 71 84 70 5 +68 75 87 74 68 75 83 67 68 71 83 70 67 72 81 67 67 72 81 67 71 72 77 67 66 75 84 70 66 71 84 70 66 71 80 66 5 +68 71 83 70 71 75 87 88 71 75 83 70 71 72 77 67 67 68 81 67 67 72 85 67 66 71 80 66 66 71 80 66 63 71 73 66 5 +71 75 87 88 71 75 83 70 68 75 83 67 67 68 81 67 67 72 85 67 67 75 81 67 66 71 80 66 63 71 73 66 66 71 80 66 5 +71 75 83 70 68 75 83 67 71 79 87 74 67 72 85 67 67 75 81 67 71 79 89 71 63 71 73 66 66 71 80 66 66 75 80 70 7 +68 75 83 67 71 79 87 74 71 71 87 74 67 75 81 67 71 79 89 71 71 79 93 71 66 71 80 66 66 75 80 70 66 75 88 70 7 +71 79 87 74 71 71 87 74 60 61 87 74 71 79 89 71 71 79 93 71 67 68 89 75 66 75 80 70 66 75 88 70 70 79 88 74 7 +71 71 87 74 60 61 87 74 56 57 87 74 71 79 93 71 67 68 89 75 63 61 93 79 66 75 88 70 70 79 88 74 70 79 88 74 5 +60 61 87 74 56 57 87 74 56 54 83 70 67 68 89 75 63 61 93 79 63 58 85 75 70 79 88 74 70 79 88 74 70 75 88 74 5 +56 57 87 74 56 54 83 70 56 57 79 70 63 61 93 79 63 58 85 75 63 54 81 71 70 79 88 74 70 75 88 74 63 67 88 78 5 +64 61 83 70 60 61 83 70 56 54 83 74 63 58 85 67 63 64 77 62 67 68 81 67 66 63 80 70 63 67 80 63 66 71 76 63 5 +56 54 83 74 56 54 83 70 60 54 83 70 67 68 81 67 71 75 85 71 63 54 100 92 66 71 76 63 66 79 80 63 70 79 92 70 4 +56 57 83 78 53 48 91 85 53 45 96 96 48 37 100 104 48 37 104 104 51 32 100 108 74 87 96 78 63 56 104 100 46 32 104 114 2 +53 45 96 96 46 36 100 107 43 31 108 117 51 32 100 108 48 34 104 108 48 37 104 112 46 32 104 114 46 32 104 111 43 32 104 114 2 +43 31 108 117 40 29 108 121 43 31 104 117 48 37 104 112 44 29 109 121 44 29 104 121 43 32 104 114 46 34 104 118 46 34 104 114 2 +40 29 108 121 43 31 104 117 50 42 96 96 44 29 109 121 44 29 104 121 44 32 104 116 46 34 104 118 46 34 104 114 40 29 112 122 2 +50 42 96 96 50 48 96 96 46 36 104 107 44 32 104 116 51 40 96 96 44 34 100 100 40 29 112 122 43 27 108 125 46 29 108 122 2 +50 48 96 96 46 36 104 107 43 31 104 107 51 40 96 96 44 34 100 100 48 29 100 100 43 27 108 125 46 29 108 122 49 40 96 100 2 +43 31 104 107 40 31 104 110 40 31 104 107 48 29 100 100 44 29 100 100 44 32 104 104 49 40 96 100 49 40 92 92 43 32 104 107 2 +40 31 104 110 40 31 104 107 43 31 104 114 44 29 100 100 44 32 104 104 44 34 104 104 49 40 92 92 43 32 104 107 43 29 104 107 2 +43 31 104 114 43 29 113 114 43 29 108 114 44 34 104 104 44 32 109 104 41 32 109 112 43 29 104 107 43 32 100 107 43 32 100 103 2 +43 29 108 114 46 34 104 103 46 39 91 96 41 32 109 112 44 32 109 112 48 37 104 100 43 32 100 103 40 32 100 107 43 29 104 107 2 +93 116 123 96 97 116 118 96 93 111 118 92 95 118 117 92 99 113 117 96 99 118 122 96 96 112 114 94 96 117 119 94 96 117 119 94 3 +93 111 118 92 93 116 118 96 97 111 118 96 99 118 122 96 95 118 117 92 95 113 117 96 96 117 119 94 96 117 119 94 96 112 119 94 3 +93 116 118 96 97 111 118 96 97 111 118 96 95 118 117 92 95 113 117 96 104 113 127 96 96 117 119 94 96 112 119 94 96 112 114 94 3 +97 111 118 96 97 111 118 96 97 116 113 92 95 113 117 96 104 113 127 96 99 118 117 92 96 112 119 94 96 112 114 94 96 112 114 98 3 +93 111 113 92 88 111 109 87 88 107 109 87 95 113 122 92 95 113 112 89 95 113 112 89 92 112 119 90 92 112 114 90 92 112 114 90 3 +88 111 109 87 88 107 109 87 88 107 109 92 95 113 112 89 95 113 112 89 90 109 117 89 92 112 114 90 92 112 114 90 92 108 114 94 3 +88 107 109 92 97 111 113 92 93 111 118 92 90 109 117 89 90 104 117 89 95 109 112 89 92 108 114 94 92 108 114 90 96 108 110 90 3 +97 111 113 92 93 111 118 92 97 111 118 92 90 104 117 89 95 109 112 89 95 113 117 89 92 108 114 90 96 108 110 90 96 112 114 94 3 +97 111 118 92 93 111 113 92 93 111 109 87 95 113 117 89 99 113 117 92 99 113 122 96 96 112 114 94 96 117 119 94 92 117 114 90 3 +93 111 109 87 97 111 109 87 97 111 113 87 99 113 122 96 95 109 117 89 95 109 117 89 92 117 114 90 92 108 105 86 87 99 105 83 3 +97 111 109 87 97 111 113 87 93 107 113 92 95 109 117 89 95 109 117 89 90 113 112 92 92 108 105 86 87 99 105 83 83 95 97 79 3 +97 111 113 87 93 107 113 92 88 111 118 92 95 109 117 89 90 113 112 92 90 109 108 89 87 99 105 83 83 95 97 79 75 84 90 68 3 +93 107 113 92 88 111 118 92 84 103 109 83 90 113 112 92 90 109 108 89 86 104 108 85 83 95 97 79 75 84 90 68 75 77 82 57 3 +71 79 93 71 63 68 89 71 67 75 77 62 74 91 92 74 70 75 84 63 63 71 73 55 67 73 75 49 63 66 72 53 63 70 75 53 7 +67 72 70 54 67 72 70 54 63 72 74 58 63 67 73 55 63 71 69 55 63 71 76 55 63 70 75 57 63 70 75 57 67 73 79 57 7 +67 72 77 54 67 72 77 54 63 68 70 54 63 75 76 59 63 75 76 59 66 75 80 59 67 73 75 60 67 73 79 60 67 73 82 60 7 +63 68 70 54 67 68 70 54 67 68 70 54 66 75 80 59 66 75 73 55 63 71 73 55 67 73 82 60 71 77 82 60 67 73 75 57 7 +67 68 70 54 67 68 70 54 67 72 74 54 66 75 73 55 63 71 73 55 63 71 73 55 71 77 82 60 67 73 75 57 67 81 82 60 7 +67 72 74 54 67 72 74 54 67 72 77 54 63 71 73 55 66 75 73 59 66 75 76 59 67 81 82 60 67 81 82 64 67 77 82 64 7 +67 72 77 54 67 72 74 54 67 68 74 54 66 75 76 59 66 79 80 59 66 71 73 55 63 77 75 60 71 84 86 64 71 81 79 68 7 +67 72 74 54 67 68 74 54 67 72 70 54 66 79 80 59 66 71 73 55 66 71 76 55 71 84 86 64 71 81 79 68 71 73 82 60 7 +67 72 70 54 67 68 74 54 67 72 74 58 66 71 76 55 66 71 73 55 66 71 69 55 71 73 82 60 67 73 72 57 63 70 72 57 7 +67 68 74 54 67 72 74 58 67 72 70 58 66 71 73 55 66 71 69 55 66 71 73 55 67 73 72 57 63 70 72 57 63 70 68 57 7 +67 72 74 58 67 72 70 58 67 72 70 54 66 71 69 55 66 71 73 55 66 71 69 55 63 70 72 57 63 70 68 57 63 70 72 57 7 +67 72 70 58 67 72 70 54 67 72 70 58 66 71 73 55 66 71 69 55 66 71 73 55 63 70 68 57 63 70 72 57 67 77 72 60 7 +67 72 70 58 67 72 74 58 71 72 74 58 66 71 73 55 70 71 73 55 66 71 73 59 67 77 72 60 71 77 72 64 71 81 82 64 7 +67 72 74 58 71 72 74 58 67 75 77 58 70 71 73 55 66 71 73 59 70 75 80 59 71 77 72 64 71 81 82 64 71 81 86 68 7 +67 75 77 58 71 75 77 67 71 79 81 67 70 75 80 59 70 79 88 66 74 79 88 66 71 81 86 68 71 81 79 64 67 73 79 60 7 +75 83 85 67 75 83 85 71 79 95 96 75 74 83 88 70 70 79 88 66 78 83 84 66 71 77 86 60 75 81 82 64 75 84 82 68 7 +75 83 85 71 79 95 96 75 79 91 96 75 70 79 88 66 78 83 84 66 78 83 92 70 75 81 82 64 75 84 82 68 75 91 97 75 7 +79 95 96 75 79 91 96 75 71 75 93 79 78 83 84 66 78 83 92 70 78 91 96 78 75 84 82 68 75 91 97 75 83 95 105 79 7 +71 75 93 79 67 68 93 79 67 68 89 79 78 91 96 78 78 83 88 74 70 79 96 78 83 95 105 79 83 99 105 75 79 84 93 75 7 +67 68 93 79 67 68 89 79 63 68 85 79 78 83 88 74 70 79 96 78 70 79 92 81 83 99 105 75 79 84 93 75 71 81 93 79 7 +67 68 89 79 63 68 85 79 67 68 89 79 70 79 96 78 70 79 92 81 70 67 88 78 79 84 93 75 71 81 93 79 71 77 93 79 5 +67 68 89 79 67 68 89 75 67 72 85 71 70 67 88 78 66 71 88 78 66 71 92 74 71 77 93 79 71 73 93 79 67 73 90 75 5 +67 68 89 75 67 72 85 71 67 72 81 67 66 71 88 78 66 71 92 74 66 75 84 70 71 73 93 79 67 73 90 75 67 73 90 75 5 +67 72 81 67 71 72 77 67 67 68 81 67 66 71 84 70 66 71 80 66 66 71 80 66 63 70 86 75 63 70 82 72 63 66 82 68 5 +71 72 77 67 67 68 81 67 67 72 85 67 66 71 80 66 66 71 80 66 63 71 73 66 63 70 82 72 63 66 82 68 63 66 82 68 5 +67 72 85 67 67 75 81 67 71 79 89 71 63 71 73 66 66 71 80 66 66 75 80 70 63 66 82 68 63 70 82 68 67 73 86 72 5 +67 75 81 67 71 79 89 71 71 79 93 71 66 71 80 66 66 75 80 70 66 75 88 70 63 70 82 68 67 73 86 72 71 77 90 72 7 +71 79 93 71 67 68 89 75 63 61 93 79 66 75 88 70 70 79 88 74 70 79 88 74 71 77 90 72 71 81 90 75 71 84 93 75 4 +63 54 81 71 63 58 85 67 63 64 77 62 63 67 88 78 66 63 80 70 63 67 80 63 75 77 86 68 71 73 79 60 67 66 75 60 5 +63 64 77 62 67 68 81 67 71 75 85 71 63 67 80 63 66 71 76 63 66 79 80 63 67 66 75 60 67 66 68 60 71 73 75 60 4 +71 75 85 71 63 54 100 92 48 37 100 104 66 79 80 63 70 79 92 70 74 87 96 78 71 73 75 60 71 77 79 64 75 81 86 72 4 +63 54 100 92 48 37 100 104 48 37 104 104 70 79 92 70 74 87 96 78 63 56 104 100 71 77 79 64 75 81 86 72 71 81 93 83 4 +51 32 100 108 48 34 104 108 48 37 104 112 46 32 104 114 46 32 104 111 43 32 104 114 59 51 101 113 46 32 101 116 46 32 101 116 2 +48 34 104 108 48 37 104 112 44 29 109 121 46 32 104 111 43 32 104 114 46 34 104 118 46 32 101 116 46 32 101 116 42 30 101 120 2 +48 37 104 112 44 29 109 121 44 29 104 121 43 32 104 114 46 34 104 118 46 34 104 114 46 32 101 116 42 30 101 120 46 32 105 116 2 +44 29 104 121 44 32 104 116 51 40 96 96 46 34 104 114 40 29 112 122 43 27 108 125 46 32 105 116 46 32 105 120 42 32 101 127 2 +44 32 104 116 51 40 96 96 44 34 100 100 40 29 112 122 43 27 108 125 46 29 108 122 46 32 105 120 42 32 101 127 46 30 110 127 2 +44 34 100 100 48 29 100 100 44 29 100 100 46 29 108 122 49 40 96 100 49 40 92 92 46 30 110 127 46 32 110 120 49 40 97 101 2 +48 29 100 100 44 29 100 100 44 32 104 104 49 40 96 100 49 40 92 92 43 32 104 107 46 32 110 120 49 40 97 101 46 32 110 113 2 +44 32 104 104 44 34 104 104 44 32 109 104 43 32 104 107 43 29 104 107 43 32 100 107 46 32 110 113 39 30 101 113 42 30 105 113 2 +44 32 109 104 41 32 109 112 44 32 109 112 43 32 100 107 43 32 100 103 40 32 100 107 42 30 105 113 42 30 105 116 42 32 105 109 2 +99 113 117 92 95 118 122 96 95 118 117 92 92 112 110 90 96 112 119 90 96 112 114 94 93 111 114 90 93 115 114 90 93 115 114 90 3 +95 118 122 96 95 118 117 92 99 113 117 96 96 112 119 90 96 112 114 94 96 117 119 94 93 115 114 90 93 115 114 90 101 120 119 94 3 +99 113 117 96 99 118 122 96 95 118 117 92 96 117 119 94 96 117 119 94 96 117 119 94 101 120 119 94 97 120 124 97 97 115 119 94 3 +95 118 117 92 95 113 117 96 104 113 127 96 96 117 119 94 96 112 119 94 96 112 114 94 97 115 119 94 97 115 119 97 97 111 119 94 3 +104 113 127 96 99 118 117 92 95 113 122 92 96 112 114 94 96 112 114 98 92 112 119 90 97 111 119 94 97 115 114 94 93 106 114 90 3 +99 118 117 92 95 113 122 92 95 113 112 89 96 112 114 98 92 112 119 90 92 112 114 90 97 115 114 94 93 106 114 90 93 111 114 90 3 +95 113 122 92 95 113 112 89 95 113 112 89 92 112 119 90 92 112 114 90 92 112 114 90 93 106 114 90 93 111 114 90 97 115 114 90 3 +95 113 112 89 95 113 112 89 90 109 117 89 92 112 114 90 92 112 114 90 92 108 114 94 93 111 114 90 97 115 114 90 93 111 114 94 3 +90 109 117 89 90 104 117 89 95 109 112 89 92 108 114 94 92 108 114 90 96 108 110 90 93 111 114 94 89 111 114 87 93 111 110 87 3 +90 104 117 89 95 109 112 89 95 113 117 89 92 108 114 90 96 108 110 90 96 112 114 94 89 111 114 87 93 111 110 87 93 111 114 90 3 +95 113 117 89 99 113 117 92 99 113 122 96 96 112 114 94 96 117 119 94 92 117 114 90 93 111 114 90 93 111 114 87 89 106 110 87 3 +99 113 117 92 99 113 122 96 95 109 117 89 96 117 119 94 92 117 114 90 92 108 105 86 93 111 114 87 89 106 110 87 85 97 105 80 3 +99 113 122 96 95 109 117 89 95 109 117 89 92 117 114 90 92 108 105 86 87 99 105 83 89 106 110 87 85 97 105 80 82 88 97 73 3 +95 109 117 89 95 109 117 89 90 113 112 92 92 108 105 86 87 99 105 83 83 95 97 79 85 97 105 80 82 88 97 73 78 84 89 69 3 +90 113 112 92 90 109 108 89 86 104 108 85 83 95 97 79 75 84 90 68 75 77 82 57 78 84 89 69 67 71 74 55 67 67 70 48 7 +90 109 108 89 86 104 108 85 74 91 92 74 75 84 90 68 75 77 82 57 67 73 75 49 67 71 74 55 67 67 70 48 63 67 70 51 7 +70 75 84 63 63 71 73 55 63 71 73 55 63 66 72 53 63 70 75 53 59 66 72 53 63 67 74 51 60 67 78 55 60 67 74 55 7 +63 71 73 55 63 71 73 55 63 67 66 55 63 70 75 53 59 66 72 53 63 66 75 57 60 67 78 55 60 67 74 55 63 67 74 58 7 +63 71 73 55 63 67 66 55 63 67 73 55 59 66 72 53 63 66 75 57 63 70 75 57 60 67 74 55 63 67 74 58 63 71 78 55 7 +63 71 69 55 63 71 76 55 63 71 76 59 63 70 75 57 67 73 79 57 67 73 75 60 67 71 78 58 63 75 78 58 63 71 74 58 7 +63 71 76 55 63 71 76 59 63 75 76 59 67 73 79 57 67 73 75 60 67 73 75 60 63 75 78 58 63 71 74 58 67 75 78 58 7 +63 75 76 59 66 75 80 59 66 75 73 55 67 73 79 60 67 73 82 60 71 77 82 60 67 79 82 62 67 75 82 62 67 75 78 58 7 +63 71 73 55 63 71 73 55 66 75 73 59 67 73 75 57 67 81 82 60 67 81 82 64 70 75 78 58 67 79 82 62 67 75 82 58 7 +66 75 76 59 66 75 76 59 66 79 80 59 67 77 82 64 63 77 75 60 71 84 86 64 63 75 78 55 63 75 78 58 67 75 82 65 7 +66 75 76 59 66 79 80 59 66 71 73 55 63 77 75 60 71 84 86 64 71 81 79 68 63 75 78 58 67 75 82 65 70 84 82 62 7 +66 79 80 59 66 71 73 55 66 71 76 55 71 84 86 64 71 81 79 68 71 73 82 60 67 75 82 65 70 84 82 62 70 75 78 65 7 +66 71 73 55 66 71 76 55 66 71 73 55 71 81 79 68 71 73 82 60 67 73 72 57 70 84 82 62 70 75 78 65 67 79 78 58 7 +66 71 76 55 66 71 73 55 66 71 69 55 71 73 82 60 67 73 72 57 63 70 72 57 70 75 78 65 67 79 78 58 67 71 74 58 7 +66 71 73 55 66 71 69 55 66 71 73 55 67 73 72 57 63 70 72 57 63 70 68 57 67 79 78 58 67 71 74 58 67 75 78 62 7 +66 71 69 55 66 71 73 55 66 71 69 55 63 70 72 57 63 70 68 57 63 70 72 57 67 71 74 58 67 75 78 62 70 75 82 62 7 +70 71 73 55 66 71 73 59 70 75 80 59 71 77 72 64 71 81 82 64 71 81 86 68 70 79 85 65 70 79 85 69 74 79 82 65 7 +66 71 73 59 70 75 80 59 70 79 88 66 71 81 82 64 71 81 86 68 71 81 79 64 70 79 85 69 74 79 82 65 74 79 85 62 7 +70 75 80 59 70 79 88 66 74 79 88 66 71 81 86 68 71 81 79 64 67 73 79 60 74 79 82 65 74 79 85 62 67 79 85 62 7 +70 79 88 66 74 79 88 66 74 83 88 70 71 81 79 64 67 73 79 60 71 77 86 60 74 79 85 62 67 79 85 62 67 84 89 69 7 +74 83 88 70 70 79 88 66 78 83 84 66 71 77 86 60 75 81 82 64 75 84 82 68 67 84 89 69 74 88 93 73 78 92 93 73 7 +70 79 88 66 78 83 84 66 78 83 92 70 75 81 82 64 75 84 82 68 75 91 97 75 74 88 93 73 78 92 93 73 78 92 93 76 7 +78 83 84 66 78 83 92 70 78 91 96 78 75 84 82 68 75 91 97 75 83 95 105 79 78 92 93 73 78 92 93 76 78 92 93 76 7 +78 91 96 78 78 83 88 74 70 79 96 78 83 95 105 79 83 99 105 75 79 84 93 75 78 92 93 76 85 97 101 76 82 92 97 80 7 +78 83 88 74 70 79 96 78 70 79 92 81 83 99 105 75 79 84 93 75 71 81 93 79 85 97 101 76 82 92 97 80 74 84 89 73 7 +66 71 92 74 66 75 84 70 66 71 84 70 67 73 90 75 67 73 90 75 63 70 86 75 67 75 89 76 67 75 89 80 67 79 93 76 5 +66 75 84 70 66 71 84 70 66 71 80 66 67 73 90 75 63 70 86 75 63 70 82 72 67 75 89 80 67 79 93 76 70 75 89 76 5 +66 71 84 70 66 71 80 66 66 71 80 66 63 70 86 75 63 70 82 72 63 66 82 68 67 79 93 76 70 75 89 76 67 79 89 76 5 +66 71 80 66 66 71 80 66 63 71 73 66 63 70 82 72 63 66 82 68 63 66 82 68 70 75 89 76 67 79 89 76 70 79 89 80 5 +63 71 73 66 66 71 80 66 66 75 80 70 63 66 82 68 63 70 82 68 67 73 86 72 70 79 89 80 70 84 89 73 70 79 85 73 5 +66 71 80 66 66 75 80 70 66 75 88 70 63 70 82 68 67 73 86 72 71 77 90 72 70 84 89 73 70 79 85 73 74 84 89 76 7 +66 75 80 70 66 75 88 70 70 79 88 74 67 73 86 72 71 77 90 72 71 81 90 75 70 79 85 73 74 84 89 76 74 84 97 76 7 +66 75 88 70 70 79 88 74 70 79 88 74 71 77 90 72 71 81 90 75 71 84 93 75 74 84 89 76 74 84 97 76 74 88 97 76 4 +70 75 88 74 63 67 88 78 66 63 80 70 75 88 93 75 75 77 86 68 71 73 79 60 74 79 89 73 67 79 85 65 67 75 78 62 4 +66 63 80 70 63 67 80 63 66 71 76 63 71 73 79 60 67 66 75 60 67 66 68 60 67 75 78 62 67 75 78 65 67 79 82 62 4 +63 67 80 63 66 71 76 63 66 79 80 63 67 66 75 60 67 66 68 60 71 73 75 60 67 75 78 65 67 79 82 62 70 75 78 58 4 +66 71 76 63 66 79 80 63 70 79 92 70 67 66 68 60 71 73 75 60 71 77 79 64 67 79 82 62 70 75 78 58 67 75 82 69 4 +66 79 80 63 70 79 92 70 74 87 96 78 71 73 75 60 71 77 79 64 75 81 86 72 70 75 78 58 67 75 82 69 67 75 85 65 4 +70 79 92 70 74 87 96 78 63 56 104 100 71 77 79 64 75 81 86 72 71 81 93 83 67 75 82 69 67 75 85 65 70 84 89 76 4 +63 56 104 100 46 32 104 114 46 32 104 111 71 81 93 83 59 51 101 113 46 32 101 116 70 84 89 76 74 79 97 94 53 43 105 115 2 +43 32 104 114 46 34 104 118 46 34 104 114 46 32 101 116 42 30 101 120 46 32 105 116 50 34 105 115 47 34 101 111 44 31 101 119 2 +46 34 104 118 46 34 104 114 40 29 112 122 42 30 101 120 46 32 105 116 46 32 105 120 47 34 101 111 44 31 101 119 44 31 105 122 2 +43 27 108 125 46 29 108 122 49 40 96 100 42 32 101 127 46 30 110 127 46 32 110 120 44 31 110 129 42 29 110 126 42 27 110 129 2 +46 29 108 122 49 40 96 100 49 40 92 92 46 30 110 127 46 32 110 120 49 40 97 101 42 29 110 126 42 27 110 129 44 34 110 122 2 +49 40 96 100 49 40 92 92 43 32 104 107 46 32 110 120 49 40 97 101 46 32 110 113 42 27 110 129 44 34 110 122 50 37 110 119 2 +49 40 92 92 43 32 104 107 43 29 104 107 49 40 97 101 46 32 110 113 39 30 101 113 44 34 110 122 50 37 110 119 44 29 114 126 2 +43 32 100 107 43 32 100 103 40 32 100 107 42 30 105 113 42 30 105 116 42 32 105 109 44 29 105 119 44 29 101 115 44 34 105 104 2 +43 32 100 103 40 32 100 107 43 29 104 107 42 30 105 116 42 32 105 109 42 30 101 109 44 29 101 115 44 34 105 104 47 43 101 97 2 +92 112 110 90 96 112 119 90 96 112 114 94 93 111 114 90 93 115 114 90 93 115 114 90 92 106 115 91 92 111 115 91 97 115 120 94 3 +96 117 119 94 96 117 119 94 96 112 119 94 97 120 124 97 97 115 119 94 97 115 119 97 101 120 120 94 101 115 120 94 97 115 125 94 3 +96 117 119 94 96 112 119 94 96 112 114 94 97 115 119 94 97 115 119 97 97 111 119 94 101 115 120 94 97 115 125 94 92 115 115 94 3 +92 112 119 90 92 112 114 90 92 112 114 90 93 106 114 90 93 111 114 90 97 115 114 90 92 111 115 91 88 106 111 91 88 106 111 87 3 +92 112 114 90 92 108 114 94 92 108 114 90 97 115 114 90 93 111 114 94 89 111 114 87 88 106 111 87 88 106 111 91 84 106 111 83 3 +96 112 114 94 96 117 119 94 92 117 114 90 93 111 114 90 93 111 114 87 89 106 110 87 84 106 111 83 80 106 106 79 80 106 102 79 3 +96 117 119 94 92 117 114 90 92 108 105 86 93 111 114 87 89 106 110 87 85 97 105 80 80 106 106 79 80 106 102 79 80 98 98 76 3 +92 108 105 86 87 99 105 83 83 95 97 79 85 97 105 80 82 88 97 73 78 84 89 69 80 98 98 76 80 94 94 72 72 85 82 68 7 +87 99 105 83 83 95 97 79 75 84 90 68 82 88 97 73 78 84 89 69 67 71 74 55 80 94 94 72 72 85 82 68 64 69 71 54 7 +83 95 97 79 75 84 90 68 75 77 82 57 78 84 89 69 67 71 74 55 67 67 70 48 72 85 82 68 64 69 71 54 64 66 71 54 7 +75 84 90 68 75 77 82 57 67 73 75 49 67 71 74 55 67 67 70 48 63 67 70 51 64 69 71 54 64 66 71 54 64 69 71 54 7 +67 73 75 49 63 66 72 53 63 70 75 53 63 67 70 51 63 67 74 51 60 67 78 55 64 69 71 54 64 69 74 54 64 69 74 57 7 +63 66 72 53 63 70 75 53 59 66 72 53 63 67 74 51 60 67 78 55 60 67 74 55 64 69 74 54 64 69 74 57 64 73 74 57 7 +63 66 75 57 63 70 75 57 63 70 75 57 63 67 74 58 63 71 78 55 67 71 78 58 68 77 74 57 64 73 74 57 64 73 74 61 7 +63 70 75 57 63 70 75 57 67 73 79 57 63 71 78 55 67 71 78 58 63 75 78 58 64 73 74 57 64 73 74 61 64 73 82 61 7 +63 70 75 57 67 73 79 57 67 73 75 60 67 71 78 58 63 75 78 58 63 71 74 58 64 73 74 61 64 73 82 61 64 73 86 61 7 +67 73 75 60 67 73 79 60 67 73 82 60 67 75 78 58 67 79 82 62 67 75 82 62 64 73 78 57 64 73 78 61 64 73 78 61 7 +71 77 82 60 67 73 75 57 67 81 82 60 67 75 78 58 70 75 78 58 67 79 82 62 68 73 78 57 72 73 82 61 72 77 74 57 7 +67 81 82 60 67 81 82 64 67 77 82 64 67 79 82 62 67 75 82 58 63 75 78 55 72 77 74 57 68 77 74 57 64 73 82 61 7 +67 77 82 64 63 77 75 60 71 84 86 64 63 75 78 55 63 75 78 58 67 75 82 65 64 73 82 61 64 73 78 57 64 69 74 57 7 +63 77 75 60 71 84 86 64 71 81 79 68 63 75 78 58 67 75 82 65 70 84 82 62 64 73 78 57 64 69 74 57 68 73 74 57 7 +71 81 79 68 71 73 82 60 67 73 72 57 70 84 82 62 70 75 78 65 67 79 78 58 68 73 74 57 64 73 74 57 64 69 78 61 7 +67 73 72 57 63 70 72 57 63 70 68 57 67 79 78 58 67 71 74 58 67 75 78 62 64 69 78 61 68 73 78 61 68 77 82 61 7 +63 70 72 57 63 70 68 57 63 70 72 57 67 71 74 58 67 75 78 62 70 75 82 62 68 73 78 61 68 77 82 61 68 77 74 61 7 +63 70 68 57 63 70 72 57 67 77 72 60 67 75 78 62 70 75 82 62 70 79 82 65 68 77 82 61 68 77 74 61 68 77 78 61 7 +63 70 72 57 67 77 72 60 71 77 72 64 70 75 82 62 70 79 82 65 70 79 85 65 68 77 74 61 68 77 78 61 72 81 82 65 7 +67 77 72 60 71 77 72 64 71 81 82 64 70 79 82 65 70 79 85 65 70 79 85 69 68 77 78 61 72 81 82 65 72 81 82 65 7 +71 81 82 64 71 81 86 68 71 81 79 64 70 79 85 69 74 79 82 65 74 79 85 62 72 81 82 65 76 81 82 65 72 85 86 68 7 +71 81 86 68 71 81 79 64 67 73 79 60 74 79 82 65 74 79 85 62 67 79 85 62 76 81 82 65 72 85 86 68 72 81 86 68 7 +71 81 79 64 67 73 79 60 71 77 86 60 74 79 85 62 67 79 85 62 67 84 89 69 72 85 86 68 72 81 86 68 76 85 90 72 7 +67 73 79 60 71 77 86 60 75 81 82 64 67 79 85 62 67 84 89 69 74 88 93 73 72 81 86 68 76 85 90 72 76 89 94 76 7 +75 81 82 64 75 84 82 68 75 91 97 75 74 88 93 73 78 92 93 73 78 92 93 76 76 89 94 76 76 85 94 76 76 98 98 76 7 +75 84 82 68 75 91 97 75 83 95 105 79 78 92 93 73 78 92 93 76 78 92 93 76 76 85 94 76 76 98 98 76 80 98 98 76 7 +75 91 97 75 83 95 105 79 83 99 105 75 78 92 93 76 78 92 93 76 85 97 101 76 76 98 98 76 80 98 98 76 80 94 98 76 7 +71 81 93 79 71 77 93 79 71 73 93 79 74 84 89 73 70 84 97 80 70 75 93 76 76 85 90 76 72 81 90 76 72 81 98 79 7 +71 77 93 79 71 73 93 79 67 73 90 75 70 84 97 80 70 75 93 76 67 75 89 76 72 81 90 76 72 81 98 79 72 85 94 83 5 +71 73 93 79 67 73 90 75 67 73 90 75 70 75 93 76 67 75 89 76 67 75 89 80 72 81 98 79 72 85 94 83 80 94 102 83 5 +67 73 90 75 67 73 90 75 63 70 86 75 67 75 89 76 67 75 89 80 67 79 93 76 72 85 94 83 80 94 102 83 80 94 102 83 5 +63 70 86 75 63 70 82 72 63 66 82 68 67 79 93 76 70 75 89 76 67 79 89 76 80 94 102 83 80 94 106 83 72 98 106 83 5 +63 70 82 72 63 66 82 68 63 66 82 68 70 75 89 76 67 79 89 76 70 79 89 80 80 94 106 83 72 98 106 83 80 98 102 87 5 +63 66 82 68 63 70 82 68 67 73 86 72 70 79 89 80 70 84 89 73 70 79 85 73 80 98 102 87 76 94 98 83 76 89 98 83 5 +63 70 82 68 67 73 86 72 71 77 90 72 70 84 89 73 70 79 85 73 74 84 89 76 76 94 98 83 76 89 98 83 72 85 94 79 7 +67 73 86 72 71 77 90 72 71 81 90 75 70 79 85 73 74 84 89 76 74 84 97 76 76 89 98 83 72 85 94 79 72 85 98 79 4 +71 77 90 72 71 81 90 75 71 84 93 75 74 84 89 76 74 84 97 76 74 88 97 76 72 85 94 79 72 85 98 79 68 89 94 83 4 +71 81 90 75 71 84 93 75 75 88 93 75 74 84 97 76 74 88 97 76 74 79 89 73 72 85 98 79 68 89 94 83 68 85 98 87 4 +71 84 93 75 75 88 93 75 75 77 86 68 74 88 97 76 74 79 89 73 67 79 85 65 68 89 94 83 68 85 98 87 72 89 94 79 4 +75 88 93 75 75 77 86 68 71 73 79 60 74 79 89 73 67 79 85 65 67 75 78 62 68 85 98 87 72 89 94 79 72 85 90 76 4 +75 77 86 68 71 73 79 60 67 66 75 60 67 79 85 65 67 75 78 62 67 75 78 65 72 89 94 79 72 85 90 76 72 81 86 72 4 +71 73 79 60 67 66 75 60 67 66 68 60 67 75 78 62 67 75 78 65 67 79 82 62 72 85 90 76 72 81 86 72 72 85 86 72 4 +75 81 86 72 71 81 93 83 59 51 101 113 67 75 85 65 70 84 89 76 74 79 97 94 64 73 74 65 72 81 86 72 76 85 98 79 4 +71 81 93 83 59 51 101 113 46 32 101 116 70 84 89 76 74 79 97 94 53 43 105 115 72 81 86 72 76 85 98 79 68 66 106 98 2 +59 51 101 113 46 32 101 116 46 32 101 116 74 79 97 94 53 43 105 115 50 34 105 115 76 85 98 79 68 66 106 98 50 37 102 113 2 +46 32 101 116 42 30 101 120 46 32 105 116 50 34 105 115 47 34 101 111 44 31 101 119 50 37 102 113 44 34 102 109 47 34 106 113 2 +46 32 105 116 46 32 105 120 42 32 101 127 44 31 101 119 44 31 105 122 44 31 110 129 47 34 106 113 47 34 106 116 44 31 111 124 2 +46 32 105 120 42 32 101 127 46 30 110 127 44 31 105 122 44 31 110 129 42 29 110 126 47 34 106 116 44 31 111 124 44 29 111 128 2 +46 30 110 127 46 32 110 120 49 40 97 101 42 29 110 126 42 27 110 129 44 34 110 122 44 29 111 128 41 29 111 128 44 31 106 124 2 +42 30 105 113 42 30 105 116 42 32 105 109 44 29 105 119 44 29 101 115 44 34 105 104 47 37 106 116 53 49 98 94 60 66 94 79 2 +93 115 114 90 101 120 119 94 97 120 124 97 97 115 120 94 101 120 120 98 101 120 120 94 101 116 122 96 101 116 122 96 101 116 122 96 3 +97 115 119 97 97 111 119 94 97 115 114 94 97 115 125 94 92 115 115 94 92 111 111 91 97 112 122 92 92 107 118 96 92 107 118 88 3 +97 111 119 94 97 115 114 94 93 106 114 90 92 115 115 94 92 111 111 91 92 111 115 91 92 107 118 96 92 107 118 88 92 112 113 92 3 +97 115 114 94 93 106 114 90 93 111 114 90 92 111 111 91 92 111 115 91 88 106 111 91 92 107 118 88 92 112 113 92 92 107 118 88 3 +93 106 114 90 93 111 114 90 97 115 114 90 92 111 115 91 88 106 111 91 88 106 111 87 92 112 113 92 92 107 118 88 88 103 104 85 3 +93 111 114 94 89 111 114 87 93 111 110 87 88 106 111 91 84 106 111 83 84 98 102 83 84 99 104 81 84 99 104 81 84 99 108 85 4 +93 111 110 87 93 111 114 90 93 111 114 87 84 98 102 83 84 106 111 83 80 106 106 79 84 99 108 85 84 107 113 85 84 107 113 85 4 +93 111 114 87 89 106 110 87 85 97 105 80 80 106 106 79 80 106 102 79 80 98 98 76 84 107 113 85 88 103 108 85 84 99 104 78 4 +85 97 105 80 82 88 97 73 78 84 89 69 80 98 98 76 80 94 94 72 72 85 82 68 84 99 104 78 76 87 91 74 76 79 87 63 7 +82 88 97 73 78 84 89 69 67 71 74 55 80 94 94 72 72 85 82 68 64 69 71 54 76 87 91 74 76 79 87 63 68 68 75 52 7 +78 84 89 69 67 71 74 55 67 67 70 48 72 85 82 68 64 69 71 54 64 66 71 54 76 79 87 63 68 68 75 52 64 68 67 56 7 +67 67 70 48 63 67 70 51 63 67 74 51 64 66 71 54 64 69 71 54 64 69 74 54 64 68 67 56 64 75 71 52 68 75 75 56 7 +63 67 70 51 63 67 74 51 60 67 78 55 64 69 71 54 64 69 74 54 64 69 74 57 64 75 71 52 68 75 75 56 64 75 79 56 7 +63 67 74 51 60 67 78 55 60 67 74 55 64 69 74 54 64 69 74 57 64 73 74 57 68 75 75 56 64 75 79 56 64 75 79 59 7 +63 67 74 58 63 71 78 55 67 71 78 58 68 77 74 57 64 73 74 57 64 73 74 61 64 75 79 59 64 75 75 63 68 75 79 56 7 +63 71 78 55 67 71 78 58 63 75 78 58 64 73 74 57 64 73 74 61 64 73 82 61 64 75 75 63 68 75 79 56 68 75 75 59 7 +67 71 78 58 63 75 78 58 63 71 74 58 64 73 74 61 64 73 82 61 64 73 86 61 68 75 79 56 68 75 75 59 68 75 75 59 7 +63 71 74 58 67 75 78 58 67 79 82 62 64 73 86 61 64 73 78 57 64 73 78 61 68 75 75 59 68 75 75 59 68 79 79 63 7 +67 75 78 58 67 79 82 62 67 75 82 62 64 73 78 57 64 73 78 61 64 73 78 61 68 75 75 59 68 79 79 63 64 75 79 59 7 +67 75 82 62 67 75 78 58 70 75 78 58 64 73 78 61 68 73 78 57 72 73 82 61 64 75 79 59 68 75 79 59 64 75 79 59 7 +67 75 82 58 63 75 78 55 63 75 78 58 68 77 74 57 64 73 82 61 64 73 78 57 64 75 75 52 64 68 75 56 64 68 71 56 7 +63 75 78 55 63 75 78 58 67 75 82 65 64 73 82 61 64 73 78 57 64 69 74 57 64 68 75 56 64 68 71 56 64 71 71 56 7 +70 75 78 65 67 79 78 58 67 71 74 58 64 73 74 57 64 69 78 61 68 73 78 61 68 71 75 56 68 71 75 59 64 75 75 59 7 +67 79 78 58 67 71 74 58 67 75 78 62 64 69 78 61 68 73 78 61 68 77 82 61 68 71 75 59 64 75 75 59 68 75 79 63 7 +67 71 74 58 67 75 78 62 70 75 82 62 68 73 78 61 68 77 82 61 68 77 74 61 64 75 75 59 68 75 79 63 68 79 79 59 7 +70 75 82 62 70 79 82 65 70 79 85 65 68 77 74 61 68 77 78 61 72 81 82 65 68 79 79 59 68 75 83 63 71 79 87 63 7 +70 79 82 65 70 79 85 65 70 79 85 69 68 77 78 61 72 81 82 65 72 81 82 65 68 75 83 63 71 79 87 63 71 83 83 63 7 +74 79 82 65 74 79 85 62 67 79 85 62 76 81 82 65 72 85 86 68 72 81 86 68 76 79 79 67 71 83 87 63 71 83 83 70 7 +67 84 89 69 74 88 93 73 78 92 93 73 76 85 90 72 76 89 94 76 76 85 94 76 71 83 83 67 80 87 91 74 76 91 96 74 7 +78 92 93 73 78 92 93 76 78 92 93 76 76 85 94 76 76 98 98 76 80 98 98 76 76 91 96 74 76 91 96 74 76 91 100 74 7 +78 92 93 76 78 92 93 76 85 97 101 76 76 98 98 76 80 98 98 76 80 94 98 76 76 91 96 74 76 91 100 74 80 87 91 74 7 +78 92 93 76 85 97 101 76 82 92 97 80 80 98 98 76 80 94 98 76 80 94 98 76 76 91 100 74 80 87 91 74 80 91 100 78 7 +82 92 97 80 74 84 89 73 70 84 97 80 80 94 98 76 76 85 90 76 72 81 90 76 80 91 100 78 80 91 100 78 80 91 96 78 7 +74 84 89 73 70 84 97 80 70 75 93 76 76 85 90 76 72 81 90 76 72 81 98 79 80 91 100 78 80 91 96 78 80 99 100 88 7 +70 79 85 73 74 84 89 76 74 84 97 76 76 89 98 83 72 85 94 79 72 85 98 79 76 95 100 85 71 95 100 81 76 99 108 88 4 +74 84 89 76 74 84 97 76 74 88 97 76 72 85 94 79 72 85 98 79 68 89 94 83 71 95 100 81 76 99 108 88 76 95 108 92 4 +74 84 97 76 74 88 97 76 74 79 89 73 72 85 98 79 68 89 94 83 68 85 98 87 76 99 108 88 76 95 108 92 76 103 108 92 4 +67 79 85 65 67 75 78 62 67 75 78 65 72 89 94 79 72 85 90 76 72 81 86 72 76 103 108 92 71 95 104 81 76 91 100 81 4 +67 75 78 62 67 75 78 65 67 79 82 62 72 85 90 76 72 81 86 72 72 85 86 72 71 95 104 81 76 91 100 81 76 91 96 81 4 +70 75 78 58 67 75 82 69 67 75 85 65 72 77 82 68 68 73 78 61 64 73 74 65 76 83 87 67 68 83 79 67 68 83 83 70 4 +67 75 82 69 67 75 85 65 70 84 89 76 68 73 78 61 64 73 74 65 72 81 86 72 68 83 79 67 68 83 83 70 68 79 83 67 4 +67 75 85 65 70 84 89 76 74 79 97 94 64 73 74 65 72 81 86 72 76 85 98 79 68 83 83 70 68 79 83 67 71 83 96 74 4 +70 84 89 76 74 79 97 94 53 43 105 115 72 81 86 72 76 85 98 79 68 66 106 98 68 79 83 67 71 83 96 74 71 87 96 81 4 +74 79 97 94 53 43 105 115 50 34 105 115 76 85 98 79 68 66 106 98 50 37 102 113 71 83 96 74 71 87 96 81 60 61 104 103 2 +53 43 105 115 50 34 105 115 47 34 101 111 68 66 106 98 50 37 102 113 44 34 102 109 71 87 96 81 60 61 104 103 46 34 104 110 2 +44 31 101 119 44 31 105 122 44 31 110 129 47 34 106 113 47 34 106 116 44 31 111 124 46 34 100 107 43 36 104 114 46 34 108 121 2 +44 31 105 122 44 31 110 129 42 29 110 126 47 34 106 116 44 31 111 124 44 29 111 128 43 36 104 114 46 34 108 121 40 31 104 125 2 +42 29 110 126 42 27 110 129 44 34 110 122 44 29 111 128 41 29 111 128 44 31 106 124 40 31 104 125 40 29 113 132 40 29 113 128 2 +44 34 110 122 50 37 110 119 44 29 114 126 44 31 106 124 47 34 102 113 50 34 106 113 40 29 113 128 43 31 108 121 50 45 100 99 2 +44 29 114 126 44 29 105 119 44 29 101 115 50 34 106 113 47 37 106 116 53 49 98 94 50 45 100 99 64 68 91 78 68 83 87 70 2 +44 29 101 115 44 34 105 104 47 43 101 97 53 49 98 94 60 66 94 79 68 77 94 72 68 83 87 70 71 83 91 70 71 83 87 63 2 +92 106 115 91 92 111 115 91 97 115 120 94 92 107 113 92 97 112 118 96 101 116 122 96 88 111 113 92 93 116 118 92 97 121 123 96 3 +101 120 120 98 101 120 120 94 101 115 120 94 101 116 122 96 101 116 122 96 101 112 122 96 97 116 123 100 97 116 123 96 97 111 118 96 3 +101 120 120 94 101 115 120 94 97 115 125 94 101 116 122 96 101 112 122 96 97 112 122 92 97 116 123 96 97 111 118 96 97 116 118 96 3 +101 115 120 94 97 115 125 94 92 115 115 94 101 112 122 96 97 112 122 92 92 107 118 96 97 111 118 96 97 116 118 96 93 111 118 92 3 +92 115 115 94 92 111 111 91 92 111 115 91 92 107 118 96 92 107 118 88 92 112 113 92 93 111 118 92 93 107 113 87 88 107 109 83 3 +92 111 115 91 88 106 111 91 88 106 111 87 92 112 113 92 92 107 118 88 88 103 104 85 88 107 109 83 84 99 109 79 79 95 100 79 3 +88 106 111 87 88 106 111 91 84 106 111 83 88 103 104 85 84 99 104 81 84 99 104 81 79 95 100 79 84 103 109 79 88 107 109 83 4 +88 106 111 91 84 106 111 83 84 98 102 83 84 99 104 81 84 99 104 81 84 99 108 85 84 103 109 79 88 107 109 83 88 107 109 87 4 +84 106 111 83 84 98 102 83 84 106 111 83 84 99 104 81 84 99 108 85 84 107 113 85 88 107 109 83 88 107 109 87 88 107 113 87 4 +84 98 102 83 84 106 111 83 80 106 106 79 84 99 108 85 84 107 113 85 84 107 113 85 88 107 109 87 88 107 113 87 84 107 113 87 4 +84 106 111 83 80 106 106 79 80 106 102 79 84 107 113 85 84 107 113 85 88 103 108 85 88 107 113 87 84 107 113 87 88 107 109 87 4 +80 106 106 79 80 106 102 79 80 98 98 76 84 107 113 85 88 103 108 85 84 99 104 78 84 107 113 87 88 107 109 87 84 99 100 79 4 +80 106 102 79 80 98 98 76 80 94 94 72 88 103 108 85 84 99 104 78 76 87 91 74 88 107 109 87 84 99 100 79 79 91 93 71 4 +80 94 94 72 72 85 82 68 64 69 71 54 76 87 91 74 76 79 87 63 68 68 75 52 79 91 93 71 71 79 85 62 67 72 70 50 7 +64 66 71 54 64 69 71 54 64 69 74 54 64 68 67 56 64 75 71 52 68 75 75 56 63 68 70 54 67 72 74 54 67 72 77 54 7 +64 69 71 54 64 69 74 54 64 69 74 57 64 75 71 52 68 75 75 56 64 75 79 56 67 72 74 54 67 72 77 54 63 72 77 58 7 +64 69 74 57 64 73 74 57 68 77 74 57 64 75 79 56 64 75 79 59 64 75 79 59 63 72 77 58 67 75 77 58 67 75 77 58 7 +64 73 74 57 68 77 74 57 64 73 74 57 64 75 79 59 64 75 79 59 64 75 75 63 67 75 77 58 67 75 77 58 67 72 77 58 7 +68 77 74 57 64 73 74 57 64 73 74 61 64 75 79 59 64 75 75 63 68 75 79 56 67 75 77 58 67 72 77 58 67 75 74 58 7 +64 73 74 57 64 73 74 61 64 73 82 61 64 75 75 63 68 75 79 56 68 75 75 59 67 72 77 58 67 75 74 58 67 83 77 58 7 +64 73 82 61 64 73 86 61 64 73 78 57 68 75 75 59 68 75 75 59 68 75 75 59 67 83 77 58 71 75 77 58 71 79 81 58 7 +64 73 78 57 64 73 78 61 64 73 78 61 68 75 75 59 68 79 79 63 64 75 79 59 71 79 81 58 67 79 77 58 67 75 81 58 7 +64 73 78 61 68 73 78 57 72 73 82 61 64 75 79 59 68 75 79 59 64 75 79 59 67 75 81 58 67 72 74 58 63 72 74 58 7 +68 73 78 57 72 73 82 61 72 77 74 57 68 75 79 59 64 75 79 59 68 75 75 59 67 72 74 58 63 72 74 58 67 75 74 58 7 +72 77 74 57 68 77 74 57 64 73 82 61 68 75 75 59 64 75 75 52 64 68 75 56 67 75 74 58 71 75 77 54 67 72 74 54 7 +68 77 74 57 64 73 82 61 64 73 78 57 64 75 75 52 64 68 75 56 64 68 71 56 71 75 77 54 67 72 74 54 67 75 70 54 7 +64 73 78 57 64 69 74 57 68 73 74 57 64 68 71 56 64 71 71 56 68 71 71 59 67 75 70 54 67 75 74 58 63 72 74 58 7 +64 69 74 57 68 73 74 57 64 73 74 57 64 71 71 56 68 71 71 59 68 71 75 56 67 75 74 58 63 72 74 58 63 68 70 54 7 +68 73 74 57 64 73 74 57 64 69 78 61 68 71 71 59 68 71 75 56 68 71 75 59 63 72 74 58 63 68 70 54 67 68 74 58 7 +68 73 78 61 68 77 82 61 68 77 74 61 64 75 75 59 68 75 79 63 68 79 79 59 67 72 74 58 67 72 74 58 71 72 85 62 7 +72 81 82 65 72 81 82 65 76 81 82 65 71 79 87 63 71 83 83 63 76 79 79 67 71 79 85 62 71 79 85 62 71 79 85 62 7 +72 81 82 65 76 81 82 65 72 85 86 68 71 83 83 63 76 79 79 67 71 83 87 63 71 79 85 62 71 79 85 62 71 79 85 67 7 +76 85 90 72 76 89 94 76 76 85 94 76 71 83 83 67 80 87 91 74 76 91 96 74 75 83 89 67 71 79 89 71 71 87 89 71 7 +76 89 94 76 76 85 94 76 76 98 98 76 80 87 91 74 76 91 96 74 76 91 96 74 71 79 89 71 71 87 89 71 75 83 89 71 7 +76 98 98 76 80 98 98 76 80 94 98 76 76 91 96 74 76 91 100 74 80 87 91 74 75 83 89 71 75 87 93 71 75 87 93 75 7 +80 98 98 76 80 94 98 76 80 94 98 76 76 91 100 74 80 87 91 74 80 91 100 78 75 87 93 71 75 87 93 75 79 95 100 79 7 +80 94 98 76 80 94 98 76 76 85 90 76 80 87 91 74 80 91 100 78 80 91 100 78 75 87 93 75 79 95 100 79 79 99 109 83 7 +72 89 94 79 72 85 90 76 72 81 86 72 76 103 108 92 71 95 104 81 76 91 100 81 75 99 104 87 75 99 104 87 75 103 109 87 4 +72 85 90 76 72 81 86 72 72 85 86 72 71 95 104 81 76 91 100 81 76 91 96 81 75 99 104 87 75 103 109 87 75 95 104 83 4 +72 81 86 72 72 85 86 72 72 77 82 68 76 91 100 81 76 91 96 81 76 83 87 67 75 103 109 87 75 95 104 83 75 87 93 75 4 +72 85 86 72 72 77 82 68 68 73 78 61 76 91 96 81 76 83 87 67 68 83 79 67 75 95 104 83 75 87 93 75 71 79 85 67 4 +72 77 82 68 68 73 78 61 64 73 74 65 76 83 87 67 68 83 79 67 68 83 83 70 75 87 93 75 71 79 85 67 63 75 81 67 4 +64 73 74 65 72 81 86 72 76 85 98 79 68 83 83 70 68 79 83 67 71 83 96 74 63 75 81 67 67 79 85 67 75 79 89 71 4 +72 81 86 72 76 85 98 79 68 66 106 98 68 79 83 67 71 83 96 74 71 87 96 81 67 79 85 67 75 79 89 71 75 83 93 71 4 +76 85 98 79 68 66 106 98 50 37 102 113 71 83 96 74 71 87 96 81 60 61 104 103 75 79 89 71 75 83 93 71 75 79 100 83 2 +68 66 106 98 50 37 102 113 44 34 102 109 71 87 96 81 60 61 104 103 46 34 104 110 75 83 93 71 75 79 100 83 55 48 104 108 2 +50 37 102 113 44 34 102 109 47 34 106 113 60 61 104 103 46 34 104 110 46 34 100 107 75 79 100 83 55 48 104 108 44 32 104 112 2 +47 34 106 113 47 34 106 116 44 31 111 124 46 34 100 107 43 36 104 114 46 34 108 121 44 32 104 112 44 34 109 112 41 37 104 116 2 +44 29 111 128 41 29 111 128 44 31 106 124 40 31 104 125 40 29 113 132 40 29 113 128 41 32 104 121 44 32 109 125 41 29 113 129 2 +41 29 111 128 44 31 106 124 47 34 102 113 40 29 113 132 40 29 113 128 43 31 108 121 44 32 109 125 41 29 113 129 44 29 113 129 2 +47 34 102 113 50 34 106 113 47 37 106 116 43 31 108 121 50 45 100 99 64 68 91 78 44 29 113 129 48 37 109 112 63 64 93 75 2 +47 37 106 116 53 49 98 94 60 66 94 79 64 68 91 78 68 83 87 70 71 83 91 70 63 64 93 75 71 83 85 67 67 79 85 67 4 +53 49 98 94 60 66 94 79 68 77 94 72 68 83 87 70 71 83 91 70 71 83 87 63 71 83 85 67 67 79 85 67 67 79 85 62 4 +92 107 113 92 97 112 118 96 101 116 122 96 88 111 113 92 93 116 118 92 97 121 123 96 95 118 117 96 99 118 122 96 95 118 122 96 3 +97 112 118 96 101 116 122 96 101 116 122 96 93 116 118 92 97 121 123 96 97 116 123 100 99 118 122 96 95 118 122 96 99 118 127 100 3 +101 116 122 96 101 116 122 96 101 116 122 96 97 121 123 96 97 116 123 100 97 116 123 96 95 118 122 96 99 118 127 100 99 118 117 96 3 +101 116 122 96 101 116 122 96 101 112 122 96 97 116 123 100 97 116 123 96 97 111 118 96 99 118 127 100 99 118 117 96 95 113 112 92 3 +101 116 122 96 101 112 122 96 97 112 122 92 97 116 123 96 97 111 118 96 97 116 118 96 99 118 117 96 95 113 112 92 90 109 112 89 3 +92 107 118 96 92 107 118 88 92 112 113 92 93 111 118 92 93 107 113 87 88 107 109 83 90 104 108 85 86 109 104 81 86 104 112 85 3 +92 107 118 88 92 112 113 92 92 107 118 88 93 107 113 87 88 107 109 83 84 99 109 79 86 109 104 81 86 104 112 85 86 104 104 85 4 +92 112 113 92 92 107 118 88 88 103 104 85 88 107 109 83 84 99 109 79 79 95 100 79 86 104 112 85 86 104 104 85 86 104 104 81 4 +88 103 104 85 84 99 104 81 84 99 104 81 79 95 100 79 84 103 109 79 88 107 109 83 86 104 104 81 86 100 108 85 86 104 108 89 4 +84 99 104 81 84 99 104 81 84 99 108 85 84 103 109 79 88 107 109 83 88 107 109 87 86 100 108 85 86 104 108 89 86 109 112 89 4 +84 107 113 85 84 107 113 85 88 103 108 85 88 107 113 87 84 107 113 87 88 107 109 87 90 113 122 92 90 109 112 89 82 100 100 81 4 +84 107 113 85 88 103 108 85 84 99 104 78 84 107 113 87 88 107 109 87 84 99 100 79 90 109 112 89 82 100 100 81 78 91 96 70 4 +88 103 108 85 84 99 104 78 76 87 91 74 88 107 109 87 84 99 100 79 79 91 93 71 82 100 100 81 78 91 96 70 74 83 88 66 4 +76 87 91 74 76 79 87 63 68 68 75 52 79 91 93 71 71 79 85 62 67 72 70 50 74 83 88 66 74 83 88 66 66 75 76 55 7 +76 79 87 63 68 68 75 52 64 68 67 56 71 79 85 62 67 72 70 50 63 68 70 54 74 83 88 66 66 75 76 55 63 71 69 55 7 +68 68 75 52 64 68 67 56 64 75 71 52 67 72 70 50 63 68 70 54 67 72 74 54 66 75 76 55 63 71 69 55 66 75 76 55 7 +64 68 67 56 64 75 71 52 68 75 75 56 63 68 70 54 67 72 74 54 67 72 77 54 63 71 69 55 66 75 76 55 66 75 80 59 7 +64 75 71 52 68 75 75 56 64 75 79 56 67 72 74 54 67 72 77 54 63 72 77 58 66 75 76 55 66 75 80 59 66 75 80 59 7 +68 75 75 56 64 75 79 56 64 75 79 59 67 72 77 54 63 72 77 58 67 75 77 58 66 75 80 59 66 75 80 59 66 79 76 59 7 +64 75 79 56 64 75 79 59 64 75 79 59 63 72 77 58 67 75 77 58 67 75 77 58 66 75 80 59 66 79 76 59 70 79 76 59 7 +64 75 79 59 64 75 79 59 64 75 75 63 67 75 77 58 67 75 77 58 67 72 77 58 66 79 76 59 70 79 76 59 70 79 80 63 7 +64 75 79 59 64 75 75 63 68 75 79 56 67 75 77 58 67 72 77 58 67 75 74 58 70 79 76 59 70 79 80 63 70 75 80 59 7 +64 75 75 63 68 75 79 56 68 75 75 59 67 72 77 58 67 75 74 58 67 83 77 58 70 79 80 63 70 75 80 59 66 75 76 59 7 +68 75 79 56 68 75 75 59 68 75 75 59 67 75 74 58 67 83 77 58 71 75 77 58 70 75 80 59 66 75 76 59 66 75 84 63 7 +68 75 75 59 68 75 75 59 68 79 79 63 71 75 77 58 71 79 81 58 67 79 77 58 66 75 84 63 66 79 80 59 66 75 80 59 7 +68 75 79 59 64 75 79 59 68 75 75 59 67 72 74 58 63 72 74 58 67 75 74 58 66 75 76 59 63 71 76 59 63 71 76 59 7 +68 75 75 59 64 75 75 52 64 68 75 56 67 75 74 58 71 75 77 54 67 72 74 54 63 71 76 59 63 75 80 59 66 75 80 59 7 +64 68 75 56 64 68 71 56 64 71 71 56 67 72 74 54 67 75 70 54 67 75 74 58 66 75 80 59 66 79 76 59 66 79 80 63 7 +64 71 71 56 68 71 71 59 68 71 75 56 67 75 74 58 63 72 74 58 63 68 70 54 66 79 80 63 66 75 76 59 59 71 73 55 7 +68 71 75 59 64 75 75 59 68 75 79 63 67 68 74 58 67 72 74 58 67 72 74 58 63 71 73 59 63 75 73 59 63 75 73 55 7 +64 75 75 59 68 75 79 63 68 79 79 59 67 72 74 58 67 72 74 58 71 72 85 62 63 75 73 59 63 75 73 55 66 75 76 59 7 +68 75 79 63 68 79 79 59 68 75 83 63 67 72 74 58 71 72 85 62 71 79 81 67 63 75 73 55 66 75 76 59 66 75 80 63 7 +68 79 79 59 68 75 83 63 71 79 87 63 71 72 85 62 71 79 81 67 71 79 85 62 66 75 76 59 66 75 80 63 66 79 80 63 7 +68 75 83 63 71 79 87 63 71 83 83 63 71 79 81 67 71 79 85 62 71 79 85 62 66 75 80 63 66 79 80 63 66 79 76 63 7 +71 79 87 63 71 83 83 63 76 79 79 67 71 79 85 62 71 79 85 62 71 79 85 62 66 79 80 63 66 79 76 63 70 79 80 63 7 +71 83 83 63 76 79 79 67 71 83 87 63 71 79 85 62 71 79 85 62 71 79 85 67 66 79 76 63 70 79 80 63 70 79 80 63 7 +76 79 79 67 71 83 87 63 71 83 83 70 71 79 85 62 71 79 85 67 71 83 85 67 70 79 80 63 70 79 80 63 66 79 88 63 7 +71 83 87 63 71 83 83 70 71 83 83 67 71 79 85 67 71 83 85 67 75 83 89 67 70 79 80 63 66 79 88 63 70 83 88 66 7 +71 83 83 67 80 87 91 74 76 91 96 74 75 83 89 67 71 79 89 71 71 87 89 71 70 83 88 66 70 79 92 66 70 87 88 70 7 +80 87 91 74 76 91 96 74 76 91 96 74 71 79 89 71 71 87 89 71 75 83 89 71 70 79 92 66 70 87 88 70 66 83 88 70 7 +76 91 96 74 76 91 96 74 76 91 100 74 71 87 89 71 75 83 89 71 75 87 93 71 70 87 88 70 66 83 88 70 70 83 92 70 7 +76 91 100 74 80 87 91 74 80 91 100 78 75 87 93 71 75 87 93 75 79 95 100 79 70 83 92 70 70 87 92 74 74 87 96 78 7 +76 91 96 81 76 83 87 67 68 83 79 67 75 95 104 83 75 87 93 75 71 79 85 67 74 96 112 89 74 96 104 89 70 87 92 78 4 +76 83 87 67 68 83 79 67 68 83 83 70 75 87 93 75 71 79 85 67 63 75 81 67 74 96 104 89 70 87 92 78 70 79 84 70 4 +60 61 104 103 46 34 104 110 46 34 100 107 75 79 100 83 55 48 104 108 44 32 104 112 70 83 92 74 74 83 100 85 59 49 104 107 2 +46 34 104 110 46 34 100 107 43 36 104 114 55 48 104 108 44 32 104 112 44 34 109 112 74 83 100 85 59 49 104 107 46 32 108 114 2 +46 34 100 107 43 36 104 114 46 34 108 121 44 32 104 112 44 34 109 112 41 37 104 116 59 49 104 107 46 32 108 114 46 32 100 107 2 +43 36 104 114 46 34 108 121 40 31 104 125 44 34 109 112 41 37 104 116 41 32 104 121 46 32 108 114 46 32 100 107 46 34 104 107 2 +40 29 113 132 40 29 113 128 43 31 108 121 44 32 109 125 41 29 113 129 44 29 113 129 46 32 104 114 46 27 108 129 43 29 108 129 2 +40 29 113 128 43 31 108 121 50 45 100 99 41 29 113 129 44 29 113 129 48 37 109 112 46 27 108 129 43 29 108 129 46 32 108 122 2 +43 31 108 121 50 45 100 99 64 68 91 78 44 29 113 129 48 37 109 112 63 64 93 75 43 29 108 129 46 32 108 122 52 43 92 92 2 +50 45 100 99 64 68 91 78 68 83 87 70 48 37 109 112 63 64 93 75 71 83 85 67 46 32 108 122 52 43 92 92 66 67 80 59 2 +93 116 118 92 97 121 123 96 97 116 123 100 99 118 122 96 95 118 122 96 99 118 127 100 96 117 130 98 96 117 114 94 96 112 114 90 3 +97 116 123 100 97 116 123 96 97 111 118 96 99 118 127 100 99 118 117 96 95 113 112 92 96 112 114 90 87 103 105 86 92 108 114 90 3 +97 111 118 96 97 116 118 96 93 111 118 92 95 113 112 92 90 109 112 89 90 104 108 85 92 108 114 90 92 112 119 90 92 108 110 94 3 +93 107 113 87 88 107 109 83 84 99 109 79 86 109 104 81 86 104 112 85 86 104 104 85 92 108 110 90 83 108 114 86 83 103 105 86 4 +88 107 109 83 84 99 109 79 79 95 100 79 86 104 112 85 86 104 104 85 86 104 104 81 83 108 114 86 83 103 105 86 87 103 105 83 4 +84 99 109 79 79 95 100 79 84 103 109 79 86 104 104 85 86 104 104 81 86 100 108 85 83 103 105 86 87 103 105 83 87 103 105 83 4 +79 95 100 79 84 103 109 79 88 107 109 83 86 104 104 81 86 100 108 85 86 104 108 89 87 103 105 83 87 103 105 83 83 103 114 86 4 +88 107 109 83 88 107 109 87 88 107 113 87 86 104 108 89 86 109 112 89 90 113 122 92 83 103 114 86 87 112 119 90 92 112 114 90 4 +88 107 113 87 84 107 113 87 88 107 109 87 90 113 122 92 90 109 112 89 82 100 100 81 92 112 114 90 87 103 105 83 79 88 93 72 4 +84 107 113 87 88 107 109 87 84 99 100 79 90 109 112 89 82 100 100 81 78 91 96 70 87 103 105 83 79 88 93 72 71 84 82 64 4 +88 107 109 87 84 99 100 79 79 91 93 71 82 100 100 81 78 91 96 70 74 83 88 66 79 88 93 72 71 84 82 64 71 77 86 68 7 +79 91 93 71 71 79 85 62 67 72 70 50 74 83 88 66 74 83 88 66 66 75 76 55 71 77 86 68 71 81 82 60 67 77 75 57 7 +71 79 85 62 67 72 70 50 63 68 70 54 74 83 88 66 66 75 76 55 63 71 69 55 71 81 82 60 67 77 75 57 67 73 75 57 7 +63 68 70 54 67 72 74 54 67 72 77 54 63 71 69 55 66 75 76 55 66 75 80 59 67 73 75 57 67 73 79 57 67 73 79 60 7 +67 72 74 54 67 72 77 54 63 72 77 58 66 75 76 55 66 75 80 59 66 75 80 59 67 73 79 57 67 73 79 60 71 77 79 60 7 +63 72 77 58 67 75 77 58 67 75 77 58 66 75 80 59 66 79 76 59 70 79 76 59 71 77 79 60 71 77 82 60 71 81 82 60 7 +67 75 77 58 67 75 77 58 67 72 77 58 66 79 76 59 70 79 76 59 70 79 80 63 71 77 82 60 71 81 82 60 67 77 86 64 7 +67 75 77 58 67 72 77 58 67 75 74 58 70 79 76 59 70 79 80 63 70 75 80 59 71 81 82 60 67 77 86 64 67 77 82 60 7 +67 72 77 58 67 75 74 58 67 83 77 58 70 79 80 63 70 75 80 59 66 75 76 59 67 77 86 64 67 77 82 60 67 77 75 60 7 +67 75 74 58 67 83 77 58 71 75 77 58 70 75 80 59 66 75 76 59 66 75 84 63 67 77 82 60 67 77 75 60 63 73 82 57 7 +67 83 77 58 71 75 77 58 71 79 81 58 66 75 76 59 66 75 84 63 66 79 80 59 67 77 75 60 63 73 82 57 63 77 79 60 7 +71 75 77 58 71 79 81 58 67 79 77 58 66 75 84 63 66 79 80 59 66 75 80 59 63 73 82 57 63 77 79 60 67 73 75 60 7 +67 75 81 58 67 72 74 58 63 72 74 58 66 75 80 59 66 75 76 59 63 71 76 59 67 73 79 57 67 73 72 60 63 70 72 57 7 +63 72 74 58 67 75 74 58 71 75 77 54 63 71 76 59 63 71 76 59 63 75 80 59 63 70 72 57 63 73 75 57 63 73 79 57 7 +67 75 74 58 71 75 77 54 67 72 74 54 63 71 76 59 63 75 80 59 66 75 80 59 63 73 75 57 63 73 79 57 67 81 82 60 7 +67 72 74 54 67 75 70 54 67 75 74 58 66 75 80 59 66 79 76 59 66 79 80 63 67 81 82 60 67 77 86 60 67 73 82 60 7 +67 75 70 54 67 75 74 58 63 72 74 58 66 79 76 59 66 79 80 63 66 75 76 59 67 77 86 60 67 73 82 60 63 73 75 60 7 +67 75 74 58 63 72 74 58 63 68 70 54 66 79 80 63 66 75 76 59 59 71 73 55 67 73 82 60 63 73 75 60 67 73 72 57 7 +67 72 74 58 67 72 74 58 71 72 85 62 63 75 73 59 63 75 73 55 66 75 76 59 67 73 79 60 67 70 75 60 67 73 75 57 7 +67 72 74 58 71 72 85 62 71 79 81 67 63 75 73 55 66 75 76 59 66 75 80 63 67 70 75 60 67 73 75 57 67 77 75 60 7 +71 72 85 62 71 79 81 67 71 79 85 62 66 75 76 59 66 75 80 63 66 79 80 63 67 73 75 57 67 77 75 60 67 77 82 60 7 +71 79 81 67 71 79 85 62 71 79 85 62 66 75 80 63 66 79 80 63 66 79 76 63 67 77 75 60 67 77 82 60 63 77 82 60 7 +71 79 85 62 71 79 85 62 71 79 85 62 66 79 80 63 66 79 76 63 70 79 80 63 67 77 82 60 63 77 82 60 63 77 79 64 7 +71 79 85 62 71 79 85 62 71 79 85 67 66 79 76 63 70 79 80 63 70 79 80 63 63 77 82 60 63 77 79 64 67 77 75 60 7 +71 79 85 67 71 83 85 67 75 83 89 67 70 79 80 63 66 79 88 63 70 83 88 66 67 77 75 60 67 77 79 64 67 84 82 64 7 +71 83 85 67 75 83 89 67 71 79 89 71 66 79 88 63 70 83 88 66 70 79 92 66 67 77 79 64 67 84 82 64 67 81 82 68 7 +75 87 93 75 71 79 85 67 63 75 81 67 74 96 104 89 70 87 92 78 70 79 84 70 67 95 105 86 71 88 97 83 67 84 93 72 4 +71 79 85 67 63 75 81 67 67 79 85 67 70 87 92 78 70 79 84 70 66 79 80 70 71 88 97 83 67 84 93 72 71 81 90 72 4 +63 75 81 67 67 79 85 67 75 79 89 71 70 79 84 70 66 79 80 70 70 79 80 66 67 84 93 72 71 81 90 72 71 81 82 75 4 +75 79 89 71 75 83 93 71 75 79 100 83 70 79 80 66 70 79 80 66 70 83 92 74 71 81 82 75 71 84 90 72 71 84 86 72 4 +75 83 93 71 75 79 100 83 55 48 104 108 70 79 80 66 70 83 92 74 74 83 100 85 71 84 90 72 71 84 86 72 75 84 97 72 4 +75 79 100 83 55 48 104 108 44 32 104 112 70 83 92 74 74 83 100 85 59 49 104 107 71 84 86 72 75 84 97 72 75 70 101 94 2 +44 32 104 112 44 34 109 112 41 37 104 116 59 49 104 107 46 32 108 114 46 32 100 107 75 70 101 94 56 42 97 113 46 34 93 105 2 +41 32 104 121 44 32 109 125 41 29 113 129 46 34 104 107 46 32 104 114 46 27 108 129 49 37 97 98 52 40 97 101 52 40 97 105 2 +41 29 113 129 44 29 113 129 48 37 109 112 46 27 108 129 43 29 108 129 46 32 108 122 52 40 97 105 52 48 90 98 59 63 90 75 2 +44 29 113 129 48 37 109 112 63 64 93 75 43 29 108 129 46 32 108 122 52 43 92 92 52 48 90 98 59 63 90 75 67 70 86 64 2 +48 37 109 112 63 64 93 75 71 83 85 67 46 32 108 122 52 43 92 92 66 67 80 59 59 63 90 75 67 70 86 64 67 77 86 60 2 +63 64 93 75 71 83 85 67 67 79 85 67 52 43 92 92 66 67 80 59 70 79 84 63 67 70 86 64 67 77 86 60 71 81 86 68 4 +71 83 85 67 67 79 85 67 67 79 85 62 66 67 80 59 70 79 84 63 70 83 88 66 67 77 86 60 71 81 86 68 71 81 82 72 4 +95 118 117 96 99 118 122 96 95 118 122 96 96 112 124 94 96 117 130 98 96 117 114 94 97 111 114 90 89 102 101 83 82 88 89 73 3 +99 118 122 96 95 118 122 96 99 118 127 100 96 117 130 98 96 117 114 94 96 112 114 90 89 102 101 83 82 88 89 73 70 84 85 65 3 +99 118 127 100 99 118 117 96 95 113 112 92 96 112 114 90 87 103 105 86 92 108 114 90 70 84 85 65 85 102 105 83 97 115 124 101 3 +99 118 117 96 95 113 112 92 90 109 112 89 87 103 105 86 92 108 114 90 92 112 119 90 85 102 105 83 97 115 124 101 93 120 124 97 3 +90 109 112 89 90 104 108 85 86 109 104 81 92 112 119 90 92 108 110 94 92 108 110 90 93 120 124 97 93 120 119 97 89 115 114 87 4 +90 104 108 85 86 109 104 81 86 104 112 85 92 108 110 94 92 108 110 90 83 108 114 86 93 120 119 97 89 115 114 87 85 111 114 87 4 +86 109 104 81 86 104 112 85 86 104 104 85 92 108 110 90 83 108 114 86 83 103 105 86 89 115 114 87 85 111 114 87 85 106 110 87 4 +86 104 112 85 86 104 104 85 86 104 104 81 83 108 114 86 83 103 105 86 87 103 105 83 85 111 114 87 85 106 110 87 89 106 105 87 4 +86 104 104 85 86 104 104 81 86 100 108 85 83 103 105 86 87 103 105 83 87 103 105 83 85 106 110 87 89 106 105 87 85 106 114 87 4 +86 100 108 85 86 104 108 89 86 109 112 89 87 103 105 83 83 103 114 86 87 112 119 90 85 106 114 87 85 111 114 90 89 111 114 83 4 +86 104 108 89 86 109 112 89 90 113 122 92 83 103 114 86 87 112 119 90 92 112 114 90 85 111 114 90 89 111 114 83 89 106 110 83 4 +86 109 112 89 90 113 122 92 90 109 112 89 87 112 119 90 92 112 114 90 87 103 105 83 89 111 114 83 89 106 110 83 82 97 101 80 4 +90 113 122 92 90 109 112 89 82 100 100 81 92 112 114 90 87 103 105 83 79 88 93 72 89 106 110 83 82 97 101 80 78 88 97 73 4 +90 109 112 89 82 100 100 81 78 91 96 70 87 103 105 83 79 88 93 72 71 84 82 64 82 97 101 80 78 88 97 73 67 79 82 65 7 +82 100 100 81 78 91 96 70 74 83 88 66 79 88 93 72 71 84 82 64 71 77 86 68 78 88 97 73 67 79 82 65 70 79 82 62 7 +78 91 96 70 74 83 88 66 74 83 88 66 71 84 82 64 71 77 86 68 71 81 82 60 67 79 82 65 70 79 82 62 70 79 85 62 7 +74 83 88 66 74 83 88 66 66 75 76 55 71 77 86 68 71 81 82 60 67 77 75 57 70 79 82 62 70 79 85 62 70 84 82 58 7 +74 83 88 66 66 75 76 55 63 71 69 55 71 81 82 60 67 77 75 57 67 73 75 57 70 79 85 62 70 84 82 58 67 79 82 62 7 +66 75 76 55 63 71 69 55 66 75 76 55 67 77 75 57 67 73 75 57 67 73 79 57 70 84 82 58 67 79 82 62 70 79 82 58 7 +66 75 76 55 66 75 80 59 66 75 80 59 67 73 79 57 67 73 79 60 71 77 79 60 70 79 82 58 63 79 78 58 67 75 78 62 7 +70 79 76 59 70 79 80 63 70 75 80 59 71 81 82 60 67 77 86 64 67 77 82 60 67 79 78 62 67 79 82 62 63 71 78 62 7 +70 79 80 63 70 75 80 59 66 75 76 59 67 77 86 64 67 77 82 60 67 77 75 60 67 79 82 62 63 71 78 62 63 75 78 55 7 +70 75 80 59 66 75 76 59 66 75 84 63 67 77 82 60 67 77 75 60 63 73 82 57 63 71 78 62 63 75 78 55 67 75 78 58 7 +66 75 76 59 66 75 84 63 66 79 80 59 67 77 75 60 63 73 82 57 63 77 79 60 63 75 78 55 67 75 78 58 67 71 78 58 7 +66 75 84 63 66 79 80 59 66 75 80 59 63 73 82 57 63 77 79 60 67 73 75 60 67 75 78 58 67 71 78 58 67 71 82 62 7 +66 79 80 59 66 75 80 59 66 75 80 59 63 77 79 60 67 73 75 60 67 73 79 57 67 71 78 58 67 71 82 62 63 75 82 62 7 +66 75 80 59 66 75 76 59 63 71 76 59 67 73 79 57 67 73 72 60 63 70 72 57 63 75 82 62 63 75 78 62 63 79 85 62 7 +66 75 76 59 63 71 76 59 63 71 76 59 67 73 72 60 63 70 72 57 63 73 75 57 63 75 78 62 63 79 85 62 67 79 82 58 7 +63 75 80 59 66 75 80 59 66 79 76 59 63 73 79 57 67 81 82 60 67 77 86 60 67 75 82 62 67 75 82 62 67 75 82 58 7 +66 75 80 59 66 79 76 59 66 79 80 63 67 81 82 60 67 77 86 60 67 73 82 60 67 75 82 62 67 75 82 58 70 79 74 58 7 +66 79 76 59 66 79 80 63 66 75 76 59 67 77 86 60 67 73 82 60 63 73 75 60 67 75 82 58 70 79 74 58 63 75 74 55 7 +66 79 80 63 66 75 76 59 59 71 73 55 67 73 82 60 63 73 75 60 67 73 72 57 70 79 74 58 63 75 74 55 63 71 70 55 7 +66 75 76 59 59 71 73 55 63 71 73 59 63 73 75 60 67 73 72 57 63 70 75 57 63 75 74 55 63 71 70 55 63 71 70 58 7 +63 71 73 59 63 75 73 59 63 75 73 55 63 70 75 57 67 73 79 60 67 70 75 60 63 71 70 58 63 71 78 58 63 67 74 62 7 +63 75 73 55 66 75 76 59 66 75 80 63 67 70 75 60 67 73 75 57 67 77 75 60 63 67 74 62 63 75 74 62 63 71 74 58 7 +66 75 76 59 66 75 80 63 66 79 80 63 67 73 75 57 67 77 75 60 67 77 82 60 63 75 74 62 63 71 74 58 63 71 78 62 7 +66 75 80 63 66 79 80 63 66 79 76 63 67 77 75 60 67 77 82 60 63 77 82 60 63 71 74 58 63 71 78 62 67 75 78 62 7 +66 79 80 63 66 79 76 63 70 79 80 63 67 77 82 60 63 77 82 60 63 77 79 64 63 71 78 62 67 75 78 62 63 75 85 58 7 +66 79 76 63 70 79 80 63 70 79 80 63 63 77 82 60 63 77 79 64 67 77 75 60 67 75 78 62 63 75 85 58 63 79 85 62 7 +70 79 80 63 70 79 80 63 66 79 88 63 63 77 79 64 67 77 75 60 67 77 79 64 63 75 85 58 63 79 85 62 67 79 82 65 7 +70 87 92 78 70 79 84 70 66 79 80 70 71 88 97 83 67 84 93 72 71 81 90 72 67 92 105 87 67 84 97 80 67 84 93 76 4 +70 79 84 70 66 79 80 70 70 79 80 66 67 84 93 72 71 81 90 72 71 81 82 75 67 84 97 80 67 84 93 76 67 84 89 73 4 +66 79 80 70 70 79 80 66 70 79 80 66 71 81 90 72 71 81 82 75 71 84 90 72 67 84 93 76 67 84 89 73 70 84 89 76 4 +46 32 100 107 46 34 104 107 46 32 104 114 46 34 93 105 49 37 97 98 52 40 97 101 53 43 97 101 53 49 93 90 60 56 85 83 2 +46 27 108 129 43 29 108 129 46 32 108 122 52 40 97 105 52 48 90 98 59 63 90 75 63 71 85 73 70 84 89 73 74 88 85 73 2 +43 29 108 129 46 32 108 122 52 43 92 92 52 48 90 98 59 63 90 75 67 70 86 64 70 84 89 73 74 88 85 73 74 84 85 73 2 +46 32 108 122 52 43 92 92 66 67 80 59 59 63 90 75 67 70 86 64 67 77 86 60 74 88 85 73 74 84 85 73 70 84 93 65 4 +52 43 92 92 66 67 80 59 70 79 84 63 67 70 86 64 67 77 86 60 71 81 86 68 74 84 85 73 70 84 93 65 70 84 85 65 4 +96 117 130 98 96 117 114 94 96 112 114 90 89 102 101 83 82 88 89 73 70 84 85 65 68 77 74 57 64 73 78 54 64 73 78 61 3 +96 117 114 94 96 112 114 90 87 103 105 86 82 88 89 73 70 84 85 65 85 102 105 83 64 73 78 54 64 73 78 61 72 89 94 76 3 +96 112 114 90 87 103 105 86 92 108 114 90 70 84 85 65 85 102 105 83 97 115 124 101 64 73 78 61 72 89 94 76 88 115 125 98 3 +87 103 105 86 92 108 114 90 92 112 119 90 85 102 105 83 97 115 124 101 93 120 124 97 72 89 94 76 88 115 125 98 97 120 120 102 3 +92 108 114 90 92 112 119 90 92 108 110 94 97 115 124 101 93 120 124 97 93 120 119 97 88 115 125 98 97 120 120 102 92 120 120 98 3 +92 108 110 94 92 108 110 90 83 108 114 86 93 120 119 97 89 115 114 87 85 111 114 87 92 120 120 98 88 120 120 91 84 111 111 91 3 +92 108 110 90 83 108 114 86 83 103 105 86 89 115 114 87 85 111 114 87 85 106 110 87 88 120 120 91 84 111 111 91 88 106 111 87 4 +83 108 114 86 83 103 105 86 87 103 105 83 85 111 114 87 85 106 110 87 89 106 105 87 84 111 111 91 88 106 111 87 88 106 111 87 4 +83 103 105 86 87 103 105 83 87 103 105 83 85 106 110 87 89 106 105 87 85 106 114 87 88 106 111 87 88 106 111 87 84 106 111 87 4 +87 103 105 83 83 103 114 86 87 112 119 90 85 106 114 87 85 111 114 90 89 111 114 83 84 106 111 87 88 102 111 87 88 102 102 83 4 +87 112 119 90 92 112 114 90 87 103 105 83 89 111 114 83 89 106 110 83 82 97 101 80 88 102 102 83 84 98 102 79 80 98 94 72 4 +92 112 114 90 87 103 105 83 79 88 93 72 89 106 110 83 82 97 101 80 78 88 97 73 84 98 102 79 80 98 94 72 76 85 94 68 4 +87 103 105 83 79 88 93 72 71 84 82 64 82 97 101 80 78 88 97 73 67 79 82 65 80 98 94 72 76 85 94 68 76 81 86 65 7 +71 84 82 64 71 77 86 68 71 81 82 60 67 79 82 65 70 79 82 62 70 79 85 62 76 81 86 65 72 81 86 65 68 81 82 65 7 +71 77 86 68 71 81 82 60 67 77 75 57 70 79 82 62 70 79 85 62 70 84 82 58 72 81 86 65 68 81 82 65 68 81 82 65 7 +67 77 75 57 67 73 75 57 67 73 79 57 70 84 82 58 67 79 82 62 70 79 82 58 68 81 82 65 72 77 82 61 68 77 78 61 7 +67 73 75 57 67 73 79 57 67 73 79 60 67 79 82 62 70 79 82 58 63 79 78 58 72 77 82 61 68 77 78 61 68 77 78 61 7 +67 73 79 60 71 77 79 60 71 77 82 60 63 79 78 58 67 75 78 62 67 79 78 62 68 77 78 61 68 73 74 57 64 73 78 57 7 +71 77 79 60 71 77 82 60 71 81 82 60 67 75 78 62 67 79 78 62 67 79 78 62 68 73 74 57 64 73 78 57 68 73 78 61 7 +67 77 86 64 67 77 82 60 67 77 75 60 67 79 82 62 63 71 78 62 63 75 78 55 68 77 78 61 64 77 74 57 64 77 74 57 7 +67 77 75 60 63 73 82 57 63 77 79 60 63 75 78 55 67 75 78 58 67 71 78 58 64 77 74 57 64 77 78 61 64 77 78 61 7 +63 77 79 60 67 73 75 60 67 73 79 57 67 71 78 58 67 71 82 62 63 75 82 62 64 77 78 61 68 77 78 61 68 77 78 65 7 +67 73 75 60 67 73 79 57 67 73 72 60 67 71 82 62 63 75 82 62 63 75 78 62 68 77 78 61 68 77 78 65 64 77 74 65 7 +67 73 79 57 67 73 72 60 63 70 72 57 63 75 82 62 63 75 78 62 63 79 85 62 68 77 78 65 64 77 74 65 68 77 82 65 7 +67 73 72 60 63 70 72 57 63 73 75 57 63 75 78 62 63 79 85 62 67 79 82 58 64 77 74 65 68 77 82 65 68 81 78 61 7 +63 70 72 57 63 73 75 57 63 73 79 57 63 79 85 62 67 79 82 58 67 75 82 62 68 77 82 65 68 81 78 61 68 77 78 61 7 +63 73 79 57 67 81 82 60 67 77 86 60 67 75 82 62 67 75 82 62 67 75 82 58 68 77 78 61 68 77 78 57 68 77 74 57 7 +67 81 82 60 67 77 86 60 67 73 82 60 67 75 82 62 67 75 82 58 70 79 74 58 68 77 78 57 68 77 74 57 68 73 78 54 7 +67 77 86 60 67 73 82 60 63 73 75 60 67 75 82 58 70 79 74 58 63 75 74 55 68 77 74 57 68 73 78 54 68 73 74 54 7 +67 73 82 60 63 73 75 60 67 73 72 57 70 79 74 58 63 75 74 55 63 71 70 55 68 73 78 54 68 73 74 54 64 69 74 57 7 +67 70 75 60 67 73 75 57 67 77 75 60 63 67 74 62 63 75 74 62 63 71 74 58 68 69 74 57 64 73 74 57 64 73 74 57 7 +67 77 75 60 67 77 82 60 63 77 82 60 63 71 74 58 63 71 78 62 67 75 78 62 64 73 74 57 64 73 78 61 64 77 78 65 7 +67 77 82 60 63 77 82 60 63 77 79 64 63 71 78 62 67 75 78 62 63 75 85 58 64 73 78 61 64 77 78 65 68 77 86 65 7 +67 77 75 60 67 77 79 64 67 84 82 64 63 79 85 62 67 79 82 65 63 79 85 65 64 77 82 65 64 77 82 65 60 77 82 65 7 +67 77 79 64 67 84 82 64 67 81 82 68 67 79 82 65 63 79 85 65 63 79 89 65 64 77 82 65 60 77 82 65 60 77 82 65 7 +67 84 82 64 67 81 82 68 67 84 86 68 63 79 85 65 63 79 89 65 63 79 82 65 60 77 82 65 60 77 82 65 60 77 82 68 7 +67 84 86 68 67 84 82 68 63 81 82 68 63 79 82 65 60 79 85 65 60 79 89 65 60 77 82 68 64 81 86 72 64 85 94 76 7 +63 77 86 68 67 84 93 75 71 91 101 83 63 84 89 73 67 97 101 80 74 102 114 90 68 94 106 83 76 111 120 94 76 115 120 102 1 +67 84 93 75 71 91 101 83 75 99 105 86 67 97 101 80 74 102 114 90 74 115 119 97 76 111 120 94 76 115 120 102 72 115 120 102 1 +75 99 105 86 75 103 110 86 71 99 105 83 74 115 119 97 74 115 119 101 70 111 114 90 72 115 120 102 72 115 125 98 72 115 120 98 1 +71 88 97 83 67 84 93 72 71 81 90 72 67 92 105 87 67 84 97 80 67 84 93 76 68 94 102 87 64 89 102 79 64 81 86 72 4 +67 84 93 72 71 81 90 72 71 81 82 75 67 84 97 80 67 84 93 76 67 84 89 73 64 89 102 79 64 81 86 72 68 81 86 68 4 +71 81 82 75 71 84 90 72 71 84 86 72 67 84 89 73 70 84 89 76 74 88 89 73 68 81 86 68 72 85 86 68 72 89 90 76 4 +75 70 101 94 56 42 97 113 46 34 93 105 74 84 97 76 70 67 101 94 53 43 97 101 72 89 94 76 72 85 90 76 64 73 86 72 2 +56 42 97 113 46 34 93 105 49 37 97 98 70 67 101 94 53 43 97 101 53 49 93 90 72 85 90 76 64 73 86 72 68 81 90 68 2 +46 34 93 105 49 37 97 98 52 40 97 101 53 43 97 101 53 49 93 90 60 56 85 83 64 73 86 72 68 81 90 68 72 94 86 72 2 +49 37 97 98 52 40 97 101 52 40 97 105 53 49 93 90 60 56 85 83 63 71 85 73 68 81 90 68 72 94 86 72 76 94 98 76 2 +52 40 97 105 52 48 90 98 59 63 90 75 63 71 85 73 70 84 89 73 74 88 85 73 76 94 98 76 76 98 98 76 76 94 98 76 4 +52 48 90 98 59 63 90 75 67 70 86 64 70 84 89 73 74 88 85 73 74 84 85 73 76 98 98 76 76 94 98 76 76 89 94 72 4 +59 63 90 75 67 70 86 64 67 77 86 60 74 88 85 73 74 84 85 73 70 84 93 65 76 94 98 76 76 89 94 72 72 85 86 68 4 +67 70 86 64 67 77 86 60 71 81 86 68 74 84 85 73 70 84 93 65 70 84 85 65 76 89 94 72 72 85 86 68 72 85 90 68 4 +89 102 101 83 82 88 89 73 70 84 85 65 68 77 74 57 64 73 78 54 64 73 78 61 64 75 71 59 64 75 79 59 64 75 75 59 7 +82 88 89 73 70 84 85 65 85 102 105 83 64 73 78 54 64 73 78 61 72 89 94 76 64 75 79 59 64 75 75 59 68 75 79 63 7 +85 102 105 83 97 115 124 101 93 120 124 97 72 89 94 76 88 115 125 98 97 120 120 102 68 75 79 63 76 99 104 85 92 116 122 99 3 +97 115 124 101 93 120 124 97 93 120 119 97 88 115 125 98 97 120 120 102 92 120 120 98 76 99 104 85 92 116 122 99 92 116 122 96 3 +93 120 124 97 93 120 119 97 89 115 114 87 97 120 120 102 92 120 120 98 88 120 120 91 92 116 122 99 92 116 122 96 88 107 118 92 3 +89 115 114 87 85 111 114 87 85 106 110 87 88 120 120 91 84 111 111 91 88 106 111 87 88 107 118 92 88 107 113 88 84 107 108 88 4 +85 111 114 87 85 106 110 87 89 106 105 87 84 111 111 91 88 106 111 87 88 106 111 87 88 107 113 88 84 107 108 88 84 103 108 85 4 +85 106 110 87 89 106 105 87 85 106 114 87 88 106 111 87 88 106 111 87 84 106 111 87 84 107 108 88 84 103 108 85 84 99 108 85 4 +82 97 101 80 78 88 97 73 67 79 82 65 80 98 94 72 76 85 94 68 76 81 86 65 80 87 91 78 76 87 91 67 71 87 91 63 7 +78 88 97 73 67 79 82 65 70 79 82 62 76 85 94 68 76 81 86 65 72 81 86 65 76 87 91 67 71 87 91 63 71 83 87 70 7 +70 79 82 62 70 79 85 62 70 84 82 58 72 81 86 65 68 81 82 65 68 81 82 65 71 83 87 70 71 83 87 67 68 79 83 67 7 +70 79 85 62 70 84 82 58 67 79 82 62 68 81 82 65 68 81 82 65 72 77 82 61 71 83 87 67 68 79 83 67 68 75 79 63 7 +70 84 82 58 67 79 82 62 70 79 82 58 68 81 82 65 72 77 82 61 68 77 78 61 68 79 83 67 68 75 79 63 68 75 75 56 7 +67 79 82 62 70 79 82 58 63 79 78 58 72 77 82 61 68 77 78 61 68 77 78 61 68 75 79 63 68 75 75 56 68 75 75 56 7 +67 75 78 62 67 79 78 62 67 79 78 62 68 73 74 57 64 73 78 57 68 73 78 61 71 75 75 56 68 75 75 59 68 75 79 59 7 +67 79 78 62 67 79 78 62 67 79 82 62 64 73 78 57 68 73 78 61 68 77 78 61 68 75 75 59 68 75 79 59 68 75 79 59 7 +67 79 78 62 67 79 82 62 63 71 78 62 68 73 78 61 68 77 78 61 64 77 74 57 68 75 79 59 68 75 79 59 60 75 79 59 7 +63 75 78 55 67 75 78 58 67 71 78 58 64 77 74 57 64 77 78 61 64 77 78 61 64 79 79 59 64 79 79 63 68 79 83 63 7 +67 71 78 58 67 71 82 62 63 75 82 62 64 77 78 61 68 77 78 61 68 77 78 65 68 79 83 63 68 79 79 67 64 83 83 67 7 +67 71 82 62 63 75 82 62 63 75 78 62 68 77 78 61 68 77 78 65 64 77 74 65 68 79 79 67 64 83 83 67 64 79 79 63 7 +63 79 85 62 67 79 82 58 67 75 82 62 68 77 82 65 68 81 78 61 68 77 78 61 71 83 83 67 68 79 83 63 68 79 83 63 7 +67 79 82 58 67 75 82 62 67 75 82 62 68 81 78 61 68 77 78 61 68 77 78 57 68 79 83 63 68 79 83 63 68 79 79 59 7 +67 75 82 62 67 75 82 58 70 79 74 58 68 77 78 57 68 77 74 57 68 73 78 54 68 79 79 59 68 75 79 56 64 75 79 59 7 +70 79 74 58 63 75 74 55 63 71 70 55 68 73 78 54 68 73 74 54 64 69 74 57 64 75 79 59 68 79 79 59 68 75 75 56 7 +63 71 70 55 63 71 70 58 63 71 78 58 64 69 74 57 68 69 74 57 64 69 74 57 68 75 75 56 68 71 75 59 68 75 75 59 7 +63 71 70 58 63 71 78 58 63 67 74 62 68 69 74 57 64 69 74 57 68 69 74 57 68 71 75 59 68 75 75 59 68 71 75 59 7 +63 71 78 58 63 67 74 62 63 75 74 62 64 69 74 57 68 69 74 57 64 73 74 57 68 75 75 59 68 71 75 59 68 75 75 59 7 +63 67 74 62 63 75 74 62 63 71 74 58 68 69 74 57 64 73 74 57 64 73 74 57 68 71 75 59 68 75 75 59 64 75 79 59 7 +63 71 78 62 67 75 78 62 63 75 85 58 64 73 78 61 64 77 78 65 68 77 86 65 64 79 83 63 68 79 83 63 64 79 83 67 7 +63 75 85 58 63 79 85 62 67 79 82 65 68 77 86 65 64 77 82 65 64 77 82 65 64 79 83 67 64 75 79 63 64 75 83 67 7 +63 79 85 62 67 79 82 65 63 79 85 65 64 77 82 65 64 77 82 65 60 77 82 65 64 75 79 63 64 75 83 67 68 79 83 67 7 +60 79 89 65 63 84 89 73 67 97 101 80 64 85 94 76 68 94 106 83 76 111 120 94 68 91 100 81 71 103 118 96 76 116 122 99 1 +67 97 101 80 74 102 114 90 74 115 119 97 76 111 120 94 76 115 120 102 72 115 120 102 76 116 122 99 76 112 128 99 80 116 128 103 1 +74 115 119 97 74 115 119 101 70 111 114 90 72 115 120 102 72 115 125 98 72 115 120 98 80 116 128 103 80 116 128 99 76 116 122 96 1 +74 115 119 101 70 111 114 90 63 97 105 80 72 115 125 98 72 115 120 98 72 106 111 91 80 116 128 99 76 116 122 96 71 112 122 99 1 +70 111 114 90 63 97 105 80 63 84 97 80 72 115 120 98 72 106 111 91 64 94 102 79 76 116 122 96 71 112 122 99 68 103 118 88 1 +67 92 105 87 67 84 97 80 67 84 93 76 68 94 102 87 64 89 102 79 64 81 86 72 71 87 100 81 71 83 91 74 71 83 87 70 4 +67 84 97 80 67 84 93 76 67 84 89 73 64 89 102 79 64 81 86 72 68 81 86 68 71 83 91 74 71 83 87 70 76 87 91 78 4 +70 84 89 76 74 88 89 73 74 84 89 73 72 85 86 68 72 89 90 76 76 85 94 76 76 91 96 74 76 91 91 70 76 83 87 70 4 +74 84 97 76 70 67 101 94 53 43 97 101 72 89 94 76 72 85 90 76 64 73 86 72 68 79 79 63 68 75 75 63 68 83 87 70 4 +70 67 101 94 53 43 97 101 53 49 93 90 72 85 90 76 64 73 86 72 68 81 90 68 68 75 75 63 68 83 87 70 76 91 100 81 4 +53 49 93 90 60 56 85 83 63 71 85 73 68 81 90 68 72 94 86 72 76 94 98 76 76 91 100 81 76 99 104 81 80 99 104 78 4 +60 56 85 83 63 71 85 73 70 84 89 73 72 94 86 72 76 94 98 76 76 98 98 76 76 99 104 81 80 99 104 78 76 95 96 78 4 +63 71 85 73 70 84 89 73 74 88 85 73 76 94 98 76 76 98 98 76 76 94 98 76 80 99 104 78 76 95 96 78 71 87 96 74 4 +70 84 89 73 74 88 85 73 74 84 85 73 76 98 98 76 76 94 98 76 76 89 94 72 76 95 96 78 71 87 96 74 71 87 91 70 4 +74 88 85 73 74 84 85 73 70 84 93 65 76 94 98 76 76 89 94 72 72 85 86 68 71 87 96 74 71 87 91 70 71 91 87 70 4 +74 84 85 73 70 84 93 65 70 84 85 65 76 89 94 72 72 85 86 68 72 85 90 68 71 87 91 70 71 91 87 70 76 83 91 70 4 +68 77 74 57 64 73 78 54 64 73 78 61 64 75 71 59 64 75 79 59 64 75 75 59 67 75 74 58 63 72 77 58 67 75 81 58 7 +64 73 78 54 64 73 78 61 72 89 94 76 64 75 79 59 64 75 75 59 68 75 79 63 63 72 77 58 67 75 81 58 63 75 77 58 7 +72 89 94 76 88 115 125 98 97 120 120 102 68 75 79 63 76 99 104 85 92 116 122 99 63 75 77 58 67 83 85 67 79 103 109 87 3 +88 115 125 98 97 120 120 102 92 120 120 98 76 99 104 85 92 116 122 99 92 116 122 96 67 83 85 67 79 103 109 87 88 107 113 92 3 +92 120 120 98 88 120 120 91 84 111 111 91 92 116 122 96 88 107 118 92 88 107 113 88 88 107 113 92 84 107 109 87 84 107 104 83 4 +84 111 111 91 88 106 111 87 88 106 111 87 88 107 113 88 84 107 108 88 84 103 108 85 84 107 104 83 84 103 104 83 84 103 104 83 4 +88 106 111 87 84 106 111 87 88 102 111 87 84 103 108 85 84 99 108 85 84 99 104 81 84 103 104 83 88 99 104 83 84 95 100 79 4 +84 106 111 87 88 102 111 87 88 102 102 83 84 99 108 85 84 99 104 81 84 95 100 78 88 99 104 83 84 95 100 79 79 95 93 75 4 +88 102 102 83 84 98 102 79 80 98 94 72 84 95 100 78 80 91 96 74 80 87 91 78 79 95 93 75 79 91 96 75 75 91 89 75 4 +84 98 102 79 80 98 94 72 76 85 94 68 80 91 96 74 80 87 91 78 76 87 91 67 79 91 96 75 75 91 89 75 75 91 93 75 4 +76 85 94 68 76 81 86 65 72 81 86 65 76 87 91 67 71 87 91 63 71 83 87 70 75 91 93 75 75 91 100 75 79 95 93 71 7 +72 81 86 65 68 81 82 65 68 81 82 65 71 83 87 70 71 83 87 67 68 79 83 67 79 95 93 71 79 87 85 67 71 79 81 62 7 +68 77 78 61 68 77 78 61 68 73 74 57 68 75 75 56 68 75 75 56 71 75 75 56 67 79 77 58 67 75 77 58 67 72 77 58 7 +64 73 78 57 68 73 78 61 68 77 78 61 68 75 75 59 68 75 79 59 68 75 79 59 67 72 81 58 71 75 77 58 71 75 74 58 7 +68 73 78 61 68 77 78 61 64 77 74 57 68 75 79 59 68 75 79 59 60 75 79 59 71 75 77 58 71 75 74 58 67 75 77 58 7 +64 77 74 57 64 77 74 57 64 77 78 61 60 75 79 59 64 79 79 59 64 79 79 63 67 75 77 58 67 75 81 62 67 79 85 62 7 +64 77 74 57 64 77 78 61 64 77 78 61 64 79 79 59 64 79 79 63 68 79 83 63 67 75 81 62 67 79 85 62 71 83 85 62 7 +64 77 78 61 68 77 78 61 68 77 78 65 68 79 83 63 68 79 79 67 64 83 83 67 71 83 85 62 71 87 85 67 71 79 85 67 7 +68 77 78 65 64 77 74 65 68 77 82 65 64 83 83 67 64 79 79 63 71 83 83 67 71 79 85 67 71 83 85 62 67 83 81 67 7 +64 77 74 65 68 77 82 65 68 81 78 61 64 79 79 63 71 83 83 67 68 79 83 63 71 83 85 62 67 83 81 67 67 79 81 62 7 +68 77 78 61 68 77 78 57 68 77 74 57 68 79 83 63 68 79 79 59 68 75 79 56 67 79 77 62 67 75 81 58 67 75 77 62 7 +68 77 78 57 68 77 74 57 68 73 78 54 68 79 79 59 68 75 79 56 64 75 79 59 67 75 81 58 67 75 77 62 67 72 77 62 7 +68 77 74 57 68 73 78 54 68 73 74 54 68 75 79 56 64 75 79 59 68 79 79 59 67 75 77 62 67 72 77 62 67 75 85 62 7 +68 73 78 54 68 73 74 54 64 69 74 57 64 75 79 59 68 79 79 59 68 75 75 56 67 72 77 62 67 75 85 62 67 75 81 58 7 +68 73 74 54 64 69 74 57 68 69 74 57 68 79 79 59 68 75 75 56 68 71 75 59 67 75 85 62 67 75 81 58 67 72 77 58 7 +68 69 74 57 64 69 74 57 68 69 74 57 68 71 75 59 68 75 75 59 68 71 75 59 67 72 77 58 67 75 77 62 67 75 81 62 7 +64 69 74 57 68 69 74 57 64 73 74 57 68 75 75 59 68 71 75 59 68 75 75 59 67 75 77 62 67 75 81 62 67 75 81 62 7 +68 69 74 57 64 73 74 57 64 73 74 57 68 71 75 59 68 75 75 59 64 75 79 59 67 75 81 62 67 75 81 62 67 79 81 62 7 +64 73 74 57 64 73 74 57 64 73 78 61 68 75 75 59 64 75 79 59 64 79 83 63 67 75 81 62 67 79 81 62 67 79 81 67 7 +64 73 78 61 64 77 78 65 68 77 86 65 64 79 83 63 68 79 83 63 64 79 83 67 67 79 81 67 71 83 81 67 67 79 81 67 7 +64 77 78 65 68 77 86 65 64 77 82 65 68 79 83 63 64 79 83 67 64 75 79 63 71 83 81 67 67 79 81 67 71 83 85 67 7 +68 77 86 65 64 77 82 65 64 77 82 65 64 79 83 67 64 75 79 63 64 75 83 67 67 79 81 67 71 83 85 67 67 87 81 71 7 +64 77 82 65 64 77 82 65 60 77 82 65 64 75 79 63 64 75 83 67 68 79 83 67 71 83 85 67 67 87 81 71 67 87 93 75 7 +64 85 94 76 68 94 106 83 76 111 120 94 68 91 100 81 71 103 118 96 76 116 122 99 75 99 109 87 79 111 123 100 75 111 123 100 1 +68 94 106 83 76 111 120 94 76 115 120 102 71 103 118 96 76 116 122 99 76 112 128 99 79 111 123 100 75 111 123 100 75 116 123 100 1 +76 111 120 94 76 115 120 102 72 115 120 102 76 116 122 99 76 112 128 99 80 116 128 103 75 111 123 100 75 116 123 100 75 116 123 100 1 +76 115 120 102 72 115 120 102 72 115 125 98 76 112 128 99 80 116 128 103 80 116 128 99 75 116 123 100 75 116 123 100 75 116 128 100 1 +72 115 125 98 72 115 120 98 72 106 111 91 80 116 128 99 76 116 122 96 71 112 122 99 75 116 128 100 75 111 128 100 71 111 123 100 1 +72 115 120 98 72 106 111 91 64 94 102 79 76 116 122 96 71 112 122 99 68 103 118 88 75 111 128 100 71 111 123 100 67 107 118 96 1 +72 106 111 91 64 94 102 79 64 89 90 76 71 112 122 99 68 103 118 88 64 91 100 81 71 111 123 100 67 107 118 96 63 103 113 92 1 +64 89 102 79 64 81 86 72 68 81 86 68 71 83 91 74 71 83 87 70 76 87 91 78 71 91 96 75 71 83 93 71 71 79 93 71 4 +72 85 86 68 72 89 90 76 76 85 94 76 76 91 96 74 76 91 91 70 76 83 87 70 71 79 85 67 71 68 77 62 67 72 74 58 4 +72 89 90 76 76 85 94 76 72 89 94 76 76 91 91 70 76 83 87 70 68 79 79 63 71 68 77 62 67 72 74 58 67 72 74 58 4 +72 89 94 76 72 85 90 76 64 73 86 72 68 79 79 63 68 75 75 63 68 83 87 70 67 72 74 58 67 68 77 58 67 72 77 62 7 +72 85 90 76 64 73 86 72 68 81 90 68 68 75 75 63 68 83 87 70 76 91 100 81 67 68 77 58 67 72 77 62 75 87 96 79 4 +64 73 86 72 68 81 90 68 72 94 86 72 68 83 87 70 76 91 100 81 76 99 104 81 67 72 77 62 75 87 96 79 79 99 100 79 4 +68 81 90 68 72 94 86 72 76 94 98 76 76 91 100 81 76 99 104 81 80 99 104 78 75 87 96 79 79 99 100 79 79 95 100 79 4 +76 89 94 72 72 85 86 68 72 85 90 68 71 87 91 70 71 91 87 70 76 83 91 70 75 87 93 71 75 91 89 71 75 91 93 71 4 +64 75 71 59 64 75 79 59 64 75 75 59 67 75 74 58 63 72 77 58 67 75 81 58 70 79 80 66 66 75 80 59 66 79 80 59 7 +64 75 79 59 64 75 75 59 68 75 79 63 63 72 77 58 67 75 81 58 63 75 77 58 66 75 80 59 66 79 80 59 66 75 80 63 7 +64 75 75 59 68 75 79 63 76 99 104 85 67 75 81 58 63 75 77 58 67 83 85 67 66 79 80 59 66 75 80 63 66 75 76 59 7 +68 75 79 63 76 99 104 85 92 116 122 99 63 75 77 58 67 83 85 67 79 103 109 87 66 75 80 63 66 75 76 59 63 71 73 59 7 +76 99 104 85 92 116 122 99 92 116 122 96 67 83 85 67 79 103 109 87 88 107 113 92 66 75 76 59 63 71 73 59 66 79 84 63 4 +92 116 122 99 92 116 122 96 88 107 118 92 79 103 109 87 88 107 113 92 84 107 109 87 63 71 73 59 66 79 84 63 78 100 104 85 4 +92 116 122 96 88 107 118 92 88 107 113 88 88 107 113 92 84 107 109 87 84 107 104 83 66 79 84 63 78 100 104 85 82 104 108 89 4 +88 107 118 92 88 107 113 88 84 107 108 88 84 107 109 87 84 107 104 83 84 103 104 83 78 100 104 85 82 104 108 89 82 96 108 81 4 +88 107 113 88 84 107 108 88 84 103 108 85 84 107 104 83 84 103 104 83 84 103 104 83 82 104 108 89 82 96 108 81 82 100 104 81 4 +84 107 108 88 84 103 108 85 84 99 108 85 84 103 104 83 84 103 104 83 88 99 104 83 82 96 108 81 82 100 104 81 82 100 104 81 4 +84 99 108 85 84 99 104 81 84 95 100 78 88 99 104 83 84 95 100 79 79 95 93 75 82 100 104 81 86 100 100 81 82 96 96 78 4 +84 99 104 81 84 95 100 78 80 91 96 74 84 95 100 79 79 95 93 75 79 91 96 75 86 100 100 81 82 96 96 78 78 91 96 74 4 +84 95 100 78 80 91 96 74 80 87 91 78 79 95 93 75 79 91 96 75 75 91 89 75 82 96 96 78 78 91 96 74 78 87 92 70 4 +80 91 96 74 80 87 91 78 76 87 91 67 79 91 96 75 75 91 89 75 75 91 93 75 78 91 96 74 78 87 92 70 78 91 96 74 4 +76 87 91 67 71 87 91 63 71 83 87 70 75 91 93 75 75 91 100 75 79 95 93 71 78 91 96 74 78 96 100 74 82 100 104 81 4 +71 87 91 63 71 83 87 70 71 83 87 67 75 91 100 75 79 95 93 71 79 87 85 67 78 96 100 74 82 100 104 81 82 100 104 81 7 +71 83 87 70 71 83 87 67 68 79 83 67 79 95 93 71 79 87 85 67 71 79 81 62 82 100 104 81 82 100 104 81 78 91 96 74 7 +68 79 83 67 68 75 79 63 68 75 75 56 71 79 81 62 67 79 77 58 67 79 77 58 78 91 96 74 66 79 84 66 66 79 80 63 7 +68 75 75 56 68 75 75 56 71 75 75 56 67 79 77 58 67 75 77 58 67 72 77 58 66 79 80 63 70 79 80 63 66 75 80 63 7 +68 75 75 56 71 75 75 56 68 75 75 59 67 75 77 58 67 72 77 58 67 72 81 58 70 79 80 63 66 75 80 63 70 79 80 59 7 +68 75 79 59 68 75 79 59 60 75 79 59 71 75 77 58 71 75 74 58 67 75 77 58 70 75 73 59 70 75 76 59 63 75 80 59 7 +68 75 79 59 60 75 79 59 64 79 79 59 71 75 74 58 67 75 77 58 67 75 81 62 70 75 76 59 63 75 80 59 63 75 76 63 7 +60 75 79 59 64 79 79 59 64 79 79 63 67 75 77 58 67 75 81 62 67 79 85 62 63 75 80 59 63 75 76 63 63 79 84 63 7 +68 79 83 63 68 79 79 67 64 83 83 67 71 83 85 62 71 87 85 67 71 79 85 67 66 79 84 63 66 79 84 63 66 79 84 63 7 +64 83 83 67 64 79 79 63 71 83 83 67 71 79 85 67 71 83 85 62 67 83 81 67 66 79 84 63 66 79 80 63 66 79 80 63 7 +68 79 83 63 68 79 83 63 68 79 79 59 67 79 81 62 67 79 77 62 67 75 81 58 66 75 84 63 66 75 84 63 63 71 88 70 7 +68 79 83 63 68 79 79 59 68 75 79 56 67 79 77 62 67 75 81 58 67 75 77 62 66 75 84 63 63 71 88 70 63 63 88 74 7 +68 75 79 56 64 75 79 59 68 79 79 59 67 75 77 62 67 72 77 62 67 75 85 62 63 63 88 74 63 60 88 85 59 56 88 85 7 +68 79 79 59 68 75 75 56 68 71 75 59 67 75 85 62 67 75 81 58 67 72 77 58 59 56 88 85 59 60 100 81 66 71 88 70 7 +68 71 75 59 68 75 75 59 68 71 75 59 67 72 77 58 67 75 77 62 67 75 81 62 66 71 88 70 70 79 76 59 70 75 76 59 7 +68 75 75 59 68 71 75 59 68 75 75 59 67 75 77 62 67 75 81 62 67 75 81 62 70 79 76 59 70 75 76 59 66 79 80 66 7 +68 71 75 59 68 75 75 59 64 75 79 59 67 75 81 62 67 75 81 62 67 79 81 62 70 75 76 59 66 79 80 66 66 75 84 66 7 +71 103 118 96 76 116 122 99 76 112 128 99 79 111 123 100 75 111 123 100 75 116 123 100 74 100 108 92 78 113 117 96 74 113 122 100 1 +80 116 128 103 80 116 128 99 76 116 122 96 75 116 123 100 75 116 128 100 75 111 128 100 70 113 127 96 66 113 117 100 66 113 122 100 1 +80 116 128 99 76 116 122 96 71 112 122 99 75 116 128 100 75 111 128 100 71 111 123 100 66 113 117 100 66 113 122 100 66 113 127 100 1 +71 112 122 99 68 103 118 88 64 91 100 81 71 111 123 100 67 107 118 96 63 103 113 92 66 113 127 100 66 113 122 100 66 113 127 100 1 +68 103 118 88 64 91 100 81 64 87 100 81 67 107 118 96 63 103 113 92 67 99 109 87 66 113 122 100 66 113 127 100 66 109 122 100 1 +64 91 100 81 64 87 100 81 64 91 100 81 63 103 113 92 67 99 109 87 71 99 109 87 66 113 127 100 66 109 122 100 63 109 117 92 1 +71 83 87 70 76 87 91 78 76 91 96 74 71 83 93 71 71 79 93 71 71 79 85 67 74 83 96 74 66 71 73 59 63 63 66 52 7 +76 87 91 78 76 91 96 74 76 91 91 70 71 79 93 71 71 79 85 67 71 68 77 62 66 71 73 59 63 63 66 52 59 63 66 52 7 +76 91 91 70 76 83 87 70 68 79 79 63 71 68 77 62 67 72 74 58 67 72 74 58 59 63 66 52 59 63 66 55 63 63 69 55 7 +76 83 87 70 68 79 79 63 68 75 75 63 67 72 74 58 67 72 74 58 67 68 77 58 59 63 66 55 63 63 69 55 63 67 69 55 7 +68 83 87 70 76 91 100 81 76 99 104 81 67 72 77 62 75 87 96 79 79 99 100 79 59 67 66 55 63 67 66 55 63 67 73 59 4 +76 91 100 81 76 99 104 81 80 99 104 78 75 87 96 79 79 99 100 79 79 95 100 79 63 67 66 55 63 67 73 59 70 83 88 70 4 +76 99 104 81 80 99 104 78 76 95 96 78 79 99 100 79 79 95 100 79 75 91 96 75 63 67 73 59 70 83 88 70 78 91 96 78 4 +80 99 104 78 76 95 96 78 71 87 96 74 79 95 100 79 75 91 96 75 75 91 93 71 70 83 88 70 78 91 96 78 74 91 92 78 4 +71 87 96 74 71 87 91 70 71 91 87 70 75 91 93 71 75 87 93 71 75 91 89 71 74 91 92 78 74 87 96 74 74 83 96 74 4 +71 87 91 70 71 91 87 70 76 83 91 70 75 87 93 71 75 91 89 71 75 91 93 71 74 87 96 74 74 83 96 74 74 87 92 70 4 +67 75 74 58 67 75 74 58 63 72 77 58 78 87 88 74 70 79 80 66 66 75 80 59 75 91 93 72 71 88 93 68 67 77 82 64 7 +63 72 77 58 67 75 81 58 63 75 77 58 66 75 80 59 66 79 80 59 66 75 80 63 67 77 82 64 67 81 86 64 67 77 79 64 7 +63 75 77 58 67 83 85 67 79 103 109 87 66 75 80 63 66 75 76 59 63 71 73 59 67 77 79 64 67 73 75 60 67 73 79 57 7 +67 83 85 67 79 103 109 87 88 107 113 92 66 75 76 59 63 71 73 59 66 79 84 63 67 73 75 60 67 73 79 57 63 77 82 60 7 +88 107 113 92 84 107 109 87 84 107 104 83 66 79 84 63 78 100 104 85 82 104 108 89 63 77 82 60 71 84 90 72 83 99 105 83 4 +84 107 109 87 84 107 104 83 84 103 104 83 78 100 104 85 82 104 108 89 82 96 108 81 71 84 90 72 83 99 105 83 83 103 105 83 4 +84 103 104 83 84 103 104 83 88 99 104 83 82 96 108 81 82 100 104 81 82 100 104 81 83 103 105 83 87 99 105 83 87 99 101 83 4 +88 99 104 83 84 95 100 79 79 95 93 75 82 100 104 81 86 100 100 81 82 96 96 78 87 99 101 83 87 99 105 79 79 99 101 83 4 +84 95 100 79 79 95 93 75 79 91 96 75 86 100 100 81 82 96 96 78 78 91 96 74 87 99 105 79 79 99 101 83 79 95 101 75 4 +79 95 93 75 79 91 96 75 75 91 89 75 82 96 96 78 78 91 96 74 78 87 92 70 79 99 101 83 79 95 101 75 75 91 97 72 4 +79 91 96 75 75 91 89 75 75 91 93 75 78 91 96 74 78 87 92 70 78 91 96 74 79 95 101 75 75 91 97 72 75 84 93 75 4 +75 91 89 75 75 91 93 75 75 91 100 75 78 87 92 70 78 91 96 74 78 96 100 74 75 91 97 72 75 84 93 75 79 91 101 79 4 +79 95 93 71 79 87 85 67 71 79 81 62 82 100 104 81 82 100 104 81 78 91 96 74 83 103 105 83 83 99 105 83 79 91 93 72 4 +79 87 85 67 71 79 81 62 67 79 77 58 82 100 104 81 78 91 96 74 66 79 84 66 83 99 105 83 79 91 93 72 71 81 82 64 7 +71 79 81 62 67 79 77 58 67 79 77 58 78 91 96 74 66 79 84 66 66 79 80 63 79 91 93 72 71 81 82 64 71 81 90 68 7 +67 79 77 58 67 79 77 58 67 75 77 58 66 79 84 66 66 79 80 63 70 79 80 63 71 81 82 64 71 81 90 68 75 88 93 68 7 +67 79 77 58 67 75 77 58 67 72 77 58 66 79 80 63 70 79 80 63 66 75 80 63 71 81 90 68 75 88 93 68 75 81 86 64 7 +71 75 77 58 71 75 74 58 67 75 77 58 70 75 73 59 70 75 76 59 63 75 80 59 71 77 82 64 67 77 82 64 67 70 90 64 7 +67 75 77 58 67 75 81 62 67 79 85 62 63 75 80 59 63 75 76 63 63 79 84 63 67 70 90 64 67 73 82 64 67 77 82 60 7 +67 75 81 62 67 79 85 62 71 83 85 62 63 75 76 63 63 79 84 63 66 79 84 63 67 73 82 64 67 77 82 60 71 73 82 64 7 +71 83 85 62 71 87 85 67 71 79 85 67 66 79 84 63 66 79 84 63 66 79 84 63 71 73 82 64 67 77 82 64 71 77 82 64 7 +71 79 85 67 71 83 85 62 67 83 81 67 66 79 84 63 66 79 80 63 66 79 80 63 71 77 82 64 67 77 82 64 63 70 82 68 7 +71 83 85 62 67 83 81 67 67 79 81 62 66 79 80 63 66 79 80 63 66 75 84 63 67 77 82 64 63 70 82 68 63 66 93 79 7 +67 83 81 67 67 79 81 62 67 79 77 62 66 79 80 63 66 75 84 63 66 75 84 63 63 70 82 68 63 66 93 79 63 63 93 83 7 +67 79 81 62 67 79 77 62 67 75 81 58 66 75 84 63 66 75 84 63 63 71 88 70 63 66 93 79 63 63 93 83 59 60 90 83 7 +67 79 77 62 67 75 81 58 67 75 77 62 66 75 84 63 63 71 88 70 63 63 88 74 63 63 93 83 59 60 90 83 59 57 97 86 5 +67 75 81 58 67 75 77 62 67 72 77 62 63 71 88 70 63 63 88 74 63 60 88 85 59 60 90 83 59 57 97 86 59 57 97 86 5 +67 75 81 58 67 72 77 58 67 75 77 62 59 60 100 81 66 71 88 70 70 79 76 59 59 57 97 86 59 63 90 79 63 73 82 64 5 +67 72 77 58 67 75 77 62 67 75 81 62 66 71 88 70 70 79 76 59 70 75 76 59 59 63 90 79 63 73 82 64 67 77 79 60 7 +67 75 81 62 67 75 81 62 67 79 81 62 70 75 76 59 66 79 80 66 66 75 84 66 67 77 79 60 67 77 82 64 67 77 82 64 7 +67 75 81 62 67 79 81 62 67 79 81 67 66 79 80 66 66 75 84 66 66 79 84 66 67 77 82 64 67 77 82 64 63 81 79 64 7 +67 95 100 79 75 99 109 87 79 111 123 100 63 83 96 78 66 91 104 81 74 100 108 92 63 84 86 79 67 99 105 86 75 112 119 101 1 +79 111 123 100 75 111 123 100 75 116 123 100 74 100 108 92 78 113 117 96 74 113 122 100 75 112 119 101 79 112 124 101 79 112 124 98 1 +75 116 123 100 75 116 123 100 75 116 128 100 74 113 122 100 70 113 127 96 66 113 117 100 79 112 124 98 71 108 124 98 67 112 124 98 1 +75 116 123 100 75 116 128 100 75 111 128 100 70 113 127 96 66 113 117 100 66 113 122 100 71 108 124 98 67 112 124 98 67 112 124 98 1 +75 116 128 100 75 111 128 100 71 111 123 100 66 113 117 100 66 113 122 100 66 113 127 100 67 112 124 98 67 112 124 98 63 112 124 98 1 +75 111 128 100 71 111 123 100 67 107 118 96 66 113 122 100 66 113 127 100 66 113 122 100 67 112 124 98 63 112 124 98 63 108 124 101 1 +71 91 96 75 71 83 93 71 71 79 93 71 78 91 96 81 74 83 96 74 66 71 73 59 75 88 97 79 79 91 97 79 71 81 86 64 7 +71 83 93 71 71 79 93 71 71 79 85 67 74 83 96 74 66 71 73 59 63 63 66 52 79 91 97 79 71 81 86 64 63 66 62 57 7 +71 79 85 67 71 68 77 62 67 72 74 58 63 63 66 52 59 63 66 52 59 63 66 55 63 66 62 57 63 63 65 53 63 66 68 53 7 +71 68 77 62 67 72 74 58 67 72 74 58 59 63 66 52 59 63 66 55 63 63 69 55 63 63 65 53 63 66 68 53 63 66 72 60 7 +67 72 74 58 67 72 74 58 67 68 77 58 59 63 66 55 63 63 69 55 63 67 69 55 63 66 68 53 63 66 72 60 63 70 72 60 7 +67 72 74 58 67 68 77 58 67 72 77 62 63 63 69 55 63 67 69 55 59 67 66 55 63 66 72 60 63 70 72 60 67 70 75 57 7 +75 87 96 79 79 99 100 79 79 95 100 79 63 67 66 55 63 67 73 59 70 83 88 70 67 66 72 60 63 66 68 57 59 70 75 60 7 +79 99 100 79 79 95 100 79 75 91 96 75 63 67 73 59 70 83 88 70 78 91 96 78 63 66 68 57 59 70 75 60 71 84 90 72 4 +75 91 96 75 75 91 93 71 75 87 93 71 78 91 96 78 74 91 92 78 74 87 96 74 71 84 90 72 75 91 101 75 75 88 90 72 4 +75 87 93 71 75 91 89 71 75 91 93 71 74 87 96 74 74 83 96 74 74 87 92 70 75 88 90 72 75 88 90 72 75 88 90 68 4 +75 91 89 71 75 91 93 71 71 83 89 67 74 83 96 74 74 87 92 70 74 87 88 70 75 88 90 72 75 88 90 68 71 81 90 64 4 +78 87 88 74 70 79 80 66 66 75 80 59 75 91 93 72 71 88 93 68 67 77 82 64 74 88 89 73 78 92 93 73 70 84 85 62 4 +70 79 80 66 66 75 80 59 66 79 80 59 71 88 93 68 67 77 82 64 67 81 86 64 78 92 93 73 70 84 85 62 67 79 85 65 7 +66 75 80 59 66 79 80 59 66 75 80 63 67 77 82 64 67 81 86 64 67 77 79 64 70 84 85 62 67 79 85 65 67 79 82 65 7 +66 79 80 59 66 75 80 63 66 75 76 59 67 81 86 64 67 77 79 64 67 73 75 60 67 79 85 65 67 79 82 65 67 75 78 62 7 +66 75 80 63 66 75 76 59 63 71 73 59 67 77 79 64 67 73 75 60 67 73 79 57 67 79 82 65 67 75 78 62 67 75 78 62 7 +63 71 73 59 66 79 84 63 78 100 104 85 67 73 79 57 63 77 82 60 71 84 90 72 67 75 78 62 63 75 78 58 63 79 78 62 7 +66 79 84 63 78 100 104 85 82 104 108 89 63 77 82 60 71 84 90 72 83 99 105 83 63 75 78 58 63 79 78 62 74 92 93 76 7 +82 104 108 89 82 96 108 81 82 100 104 81 83 99 105 83 83 103 105 83 87 99 105 83 74 92 93 76 82 102 105 83 82 97 105 83 4 +82 96 108 81 82 100 104 81 82 100 104 81 83 103 105 83 87 99 105 83 87 99 101 83 82 102 105 83 82 97 105 83 82 97 101 83 4 +82 100 104 81 82 100 104 81 86 100 100 81 87 99 105 83 87 99 101 83 87 99 105 79 82 97 105 83 82 97 101 83 85 102 105 83 4 +82 100 104 81 86 100 100 81 82 96 96 78 87 99 101 83 87 99 105 79 79 99 101 83 82 97 101 83 85 102 105 83 82 97 105 80 4 +86 100 100 81 82 96 96 78 78 91 96 74 87 99 105 79 79 99 101 83 79 95 101 75 85 102 105 83 82 97 105 80 82 92 97 76 4 +82 96 96 78 78 91 96 74 78 87 92 70 79 99 101 83 79 95 101 75 75 91 97 72 82 97 105 80 82 92 97 76 78 88 93 76 4 +78 91 96 74 78 87 92 70 78 91 96 74 79 95 101 75 75 91 97 72 75 84 93 75 82 92 97 76 78 88 93 76 78 88 97 76 4 +78 91 96 74 66 79 84 66 66 79 80 63 79 91 93 72 71 81 82 64 71 81 90 68 82 88 101 76 67 71 93 65 74 88 97 80 7 +70 79 80 63 66 75 80 63 70 79 80 59 75 88 93 68 75 81 86 64 71 81 82 60 82 97 105 83 78 88 93 73 78 84 93 69 7 +70 79 80 59 70 75 73 59 70 75 76 59 71 81 82 60 71 77 82 64 67 77 82 64 78 84 93 69 78 88 97 80 74 88 97 83 7 +70 75 73 59 70 75 76 59 63 75 80 59 71 77 82 64 67 77 82 64 67 70 90 64 78 88 97 80 74 88 97 83 74 84 101 83 7 +70 75 76 59 63 75 80 59 63 75 76 63 67 77 82 64 67 70 90 64 67 73 82 64 74 88 97 83 74 84 101 83 74 88 101 80 7 +63 75 80 59 63 75 76 63 63 79 84 63 67 70 90 64 67 73 82 64 67 77 82 60 74 84 101 83 74 88 101 80 70 88 93 69 7 +63 79 84 63 66 79 84 63 66 79 84 63 67 77 82 60 71 73 82 64 67 77 82 64 70 88 93 69 67 75 85 62 67 75 82 62 7 +66 79 84 63 66 79 84 63 66 79 84 63 71 73 82 64 67 77 82 64 71 77 82 64 67 75 85 62 67 75 82 62 67 71 82 65 7 +66 79 84 63 66 79 80 63 66 79 80 63 71 77 82 64 67 77 82 64 63 70 82 68 67 71 82 65 63 71 82 65 60 60 85 76 7 +66 79 80 63 66 79 80 63 66 75 84 63 67 77 82 64 63 70 82 68 63 66 93 79 63 71 82 65 60 60 85 76 60 60 93 83 5 +66 75 84 63 63 71 88 70 63 63 88 74 63 63 93 83 59 60 90 83 59 57 97 86 60 60 93 87 57 56 93 90 57 56 97 94 5 +63 71 88 70 63 63 88 74 63 60 88 85 59 60 90 83 59 57 97 86 59 57 97 86 57 56 93 90 57 56 97 94 53 56 97 90 5 +63 63 88 74 63 60 88 85 59 56 88 85 59 57 97 86 59 57 97 86 56 57 97 86 57 56 97 94 53 56 97 90 60 56 93 87 5 +63 60 88 85 59 56 88 85 59 60 100 81 59 57 97 86 56 57 97 86 59 57 97 86 53 56 97 90 60 56 93 87 57 60 93 80 5 +59 56 88 85 59 60 100 81 66 71 88 70 56 57 97 86 59 57 97 86 59 63 90 79 60 56 93 87 57 60 93 80 57 63 89 76 5 +70 79 76 59 70 75 76 59 66 79 80 66 63 73 82 64 67 77 79 60 67 77 82 64 60 67 78 65 63 75 78 62 63 79 85 62 7 +70 75 76 59 66 79 80 66 66 75 84 66 67 77 79 60 67 77 82 64 67 77 82 64 63 75 78 62 63 79 85 62 67 79 82 65 7 +66 91 104 81 74 100 108 92 78 113 117 96 67 99 105 86 75 112 119 101 79 112 124 101 70 102 114 94 74 115 119 101 74 115 119 101 1 +78 113 117 96 74 113 122 100 70 113 127 96 79 112 124 101 79 112 124 98 71 108 124 98 74 115 119 101 70 111 124 101 67 106 124 101 1 +74 113 122 100 70 113 127 96 66 113 117 100 79 112 124 98 71 108 124 98 67 112 124 98 70 111 124 101 67 106 124 101 67 111 119 97 1 +70 113 127 96 66 113 117 100 66 113 122 100 71 108 124 98 67 112 124 98 67 112 124 98 67 106 124 101 67 111 119 97 63 111 124 97 1 +66 113 117 100 66 113 122 100 66 113 127 100 67 112 124 98 67 112 124 98 63 112 124 98 67 111 119 97 63 111 124 97 63 120 124 101 1 +66 113 122 100 66 113 127 100 66 113 122 100 67 112 124 98 63 112 124 98 63 108 124 101 63 111 124 97 63 120 124 101 63 115 124 101 1 +66 113 127 100 66 113 122 100 66 113 127 100 63 112 124 98 63 108 124 101 67 108 135 98 63 120 124 101 63 115 124 101 67 111 124 101 1 +66 113 122 100 66 113 127 100 66 109 122 100 63 108 124 101 67 108 135 98 67 112 130 98 63 115 124 101 67 111 124 101 63 115 124 101 1 +66 113 127 100 66 109 122 100 63 109 117 92 67 108 135 98 67 112 130 98 67 112 119 98 67 111 124 101 63 115 124 101 67 115 129 104 1 +66 109 122 100 63 109 117 92 66 100 108 89 67 112 130 98 67 112 119 98 67 103 114 90 63 115 124 101 67 115 129 104 63 106 119 94 1 +66 100 108 89 66 96 96 85 63 87 96 78 67 103 114 90 63 91 105 83 63 88 90 75 63 106 119 94 63 97 105 87 63 88 97 83 1 +66 71 73 59 63 63 66 52 59 63 66 52 71 81 86 64 63 66 62 57 63 63 65 53 70 75 82 69 70 71 78 62 63 67 70 58 7 +63 63 66 52 59 63 66 52 59 63 66 55 63 66 62 57 63 63 65 53 63 66 68 53 70 71 78 62 63 67 70 58 63 75 74 62 7 +59 63 66 52 59 63 66 55 63 63 69 55 63 63 65 53 63 66 68 53 63 66 72 60 63 67 70 58 63 75 74 62 63 71 74 62 7 +59 63 66 55 63 63 69 55 63 67 69 55 63 66 68 53 63 66 72 60 63 70 72 60 63 75 74 62 63 71 74 62 63 71 78 62 7 +63 63 69 55 63 67 69 55 59 67 66 55 63 66 72 60 63 70 72 60 67 70 75 57 63 71 74 62 63 71 78 62 67 71 78 62 7 +63 67 69 55 59 67 66 55 63 67 66 55 63 70 72 60 67 70 75 57 67 66 72 60 63 71 78 62 67 71 78 62 67 67 74 62 7 +59 67 66 55 63 67 66 55 63 67 73 59 67 70 75 57 67 66 72 60 63 66 68 57 67 71 78 62 67 67 74 62 67 67 74 58 7 +63 67 73 59 70 83 88 70 78 91 96 78 63 66 68 57 59 70 75 60 71 84 90 72 67 67 74 58 63 67 70 58 63 75 82 65 7 +74 87 96 74 74 83 96 74 74 87 92 70 75 88 90 72 75 88 90 72 75 88 90 68 74 88 97 73 70 88 85 65 67 75 78 62 4 +74 83 96 74 74 87 92 70 74 87 88 70 75 88 90 72 75 88 90 68 71 81 90 64 70 88 85 65 67 75 78 62 63 71 74 62 4 +67 77 82 64 67 81 86 64 67 77 79 64 70 84 85 62 67 79 85 65 67 79 82 65 76 89 90 68 72 81 82 61 68 77 74 61 7 +67 81 86 64 67 77 79 64 67 73 75 60 67 79 85 65 67 79 82 65 67 75 78 62 72 81 82 61 68 77 74 61 68 77 74 61 7 +67 73 75 60 67 73 79 57 63 77 82 60 67 75 78 62 67 75 78 62 63 75 78 58 68 77 74 61 68 77 78 61 64 73 74 61 7 +71 84 90 72 83 99 105 83 83 103 105 83 63 79 78 62 74 92 93 76 82 102 105 83 64 73 78 57 64 81 82 65 76 94 102 79 7 +87 99 105 83 87 99 101 83 87 99 105 79 82 97 105 83 82 97 101 83 85 102 105 83 84 98 102 83 84 98 102 83 84 102 98 83 4 +87 99 105 79 79 99 101 83 79 95 101 75 85 102 105 83 82 97 105 80 82 92 97 76 84 102 98 83 84 102 102 79 84 94 98 79 4 +79 95 101 75 75 91 97 72 75 84 93 75 82 92 97 76 78 88 93 76 78 88 97 76 84 94 98 79 76 85 90 72 76 94 94 76 4 +75 84 93 75 79 91 101 79 83 103 105 83 78 88 97 76 85 102 105 83 85 102 101 83 76 94 94 76 80 102 102 79 84 102 102 83 4 +79 91 101 79 83 103 105 83 83 99 105 83 85 102 105 83 85 102 101 83 85 102 110 80 80 102 102 79 84 102 102 83 84 102 102 79 4 +83 99 105 83 79 91 93 72 71 81 82 64 85 102 110 80 82 88 101 76 67 71 93 65 84 102 102 79 72 81 90 65 68 69 86 68 7 +71 81 82 64 71 81 90 68 75 88 93 68 67 71 93 65 74 88 97 80 82 97 105 83 68 69 86 68 76 89 98 79 80 94 102 76 7 +75 88 93 68 75 81 86 64 71 81 82 60 82 97 105 83 78 88 93 73 78 84 93 69 80 94 102 76 76 85 90 68 80 94 98 76 7 +75 81 86 64 71 81 82 60 71 77 82 64 78 88 93 73 78 84 93 69 78 88 97 80 76 85 90 68 80 94 98 76 80 98 98 83 7 +71 81 82 60 71 77 82 64 67 77 82 64 78 84 93 69 78 88 97 80 74 88 97 83 80 94 98 76 80 98 98 83 84 98 102 83 7 +71 77 82 64 67 77 82 64 67 70 90 64 78 88 97 80 74 88 97 83 74 84 101 83 80 98 98 83 84 98 102 83 80 98 106 83 7 +67 77 82 64 67 70 90 64 67 73 82 64 74 88 97 83 74 84 101 83 74 88 101 80 84 98 102 83 80 98 106 83 76 98 102 79 7 +67 70 90 64 67 73 82 64 67 77 82 60 74 84 101 83 74 88 101 80 70 88 93 69 80 98 106 83 76 98 102 79 76 89 94 72 7 +67 73 82 64 67 77 82 60 71 73 82 64 74 88 101 80 70 88 93 69 67 75 85 62 76 98 102 79 76 89 94 72 72 81 86 65 7 +67 77 82 60 71 73 82 64 67 77 82 64 70 88 93 69 67 75 85 62 67 75 82 62 76 89 94 72 72 81 86 65 72 77 82 61 7 +71 73 82 64 67 77 82 64 71 77 82 64 67 75 85 62 67 75 82 62 67 71 82 65 72 81 86 65 72 77 82 61 68 69 78 65 7 +67 77 82 64 71 77 82 64 67 77 82 64 67 75 82 62 67 71 82 65 63 71 82 65 72 77 82 61 68 69 78 65 64 62 82 68 5 +71 77 82 64 67 77 82 64 63 70 82 68 67 71 82 65 63 71 82 65 60 60 85 76 68 69 78 65 64 62 82 68 60 59 90 76 5 +67 77 82 64 63 70 82 68 63 66 93 79 63 71 82 65 60 60 85 76 60 60 93 83 64 62 82 68 60 59 90 76 60 59 98 87 5 +63 70 82 68 63 66 93 79 63 63 93 83 60 60 85 76 60 60 93 83 60 60 93 87 60 59 90 76 60 59 98 87 57 59 98 87 5 +63 66 93 79 63 63 93 83 59 60 90 83 60 60 93 83 60 60 93 87 57 56 93 90 60 59 98 87 57 59 98 87 57 55 94 87 5 +63 63 93 83 59 60 90 83 59 57 97 86 60 60 93 87 57 56 93 90 57 56 97 94 57 59 98 87 57 55 94 87 57 55 90 83 5 +59 60 90 83 59 57 97 86 59 57 97 86 57 56 93 90 57 56 97 94 53 56 97 90 57 55 94 87 57 55 90 83 57 55 86 79 5 +59 57 97 86 59 57 97 86 56 57 97 86 57 56 97 94 53 56 97 90 60 56 93 87 57 55 90 83 57 55 86 79 57 55 86 76 5 +59 57 97 86 56 57 97 86 59 57 97 86 53 56 97 90 60 56 93 87 57 60 93 80 57 55 86 79 57 55 86 76 57 55 86 72 5 +59 63 90 79 63 73 82 64 67 77 79 60 57 63 89 76 60 67 78 65 63 75 78 62 57 55 82 72 57 59 74 68 60 66 82 65 7 +67 99 105 86 75 112 119 101 79 112 124 101 70 102 114 94 74 115 119 101 74 115 119 101 68 106 115 98 72 115 120 98 68 111 120 98 1 +79 112 124 98 71 108 124 98 67 112 124 98 70 111 124 101 67 106 124 101 67 111 119 97 68 115 125 98 68 111 125 98 68 115 120 98 1 +71 108 124 98 67 112 124 98 67 112 124 98 67 106 124 101 67 111 119 97 63 111 124 97 68 111 125 98 68 115 120 98 64 115 125 98 1 +67 112 124 98 63 112 124 98 63 108 124 101 63 111 124 97 63 120 124 101 63 115 124 101 64 115 125 98 64 115 125 102 64 115 125 98 1 +63 112 124 98 63 108 124 101 67 108 135 98 63 120 124 101 63 115 124 101 67 111 124 101 64 115 125 102 64 115 125 98 60 111 120 98 1 +67 108 135 98 67 112 130 98 67 112 119 98 67 111 124 101 63 115 124 101 67 115 129 104 60 111 120 98 64 111 115 102 68 115 125 102 1 +67 112 130 98 67 112 119 98 67 103 114 90 63 115 124 101 67 115 129 104 63 106 119 94 64 111 115 102 68 115 125 102 68 115 120 102 1 +67 112 119 98 67 103 114 90 63 91 105 83 67 115 129 104 63 106 119 94 63 97 105 87 68 115 125 102 68 115 120 102 64 106 111 91 1 +71 81 86 64 63 66 62 57 63 63 65 53 70 75 82 69 70 71 78 62 63 67 70 58 68 73 78 72 72 81 82 68 68 77 74 61 7 +63 66 62 57 63 63 65 53 63 66 68 53 70 71 78 62 63 67 70 58 63 75 74 62 72 81 82 68 68 77 74 61 68 77 74 65 7 +63 66 68 53 63 66 72 60 63 70 72 60 63 75 74 62 63 71 74 62 63 71 78 62 68 77 74 65 68 77 74 61 68 73 78 65 7 +63 66 72 60 63 70 72 60 67 70 75 57 63 71 74 62 63 71 78 62 67 71 78 62 68 77 74 61 68 73 78 65 68 69 74 57 7 +67 70 75 57 67 66 72 60 63 66 68 57 67 71 78 62 67 67 74 62 67 67 74 58 68 69 74 57 64 66 71 54 64 69 71 57 7 +59 70 75 60 71 84 90 72 75 91 101 75 63 67 70 58 63 75 82 65 74 88 89 76 68 69 74 61 68 73 82 65 68 81 86 68 7 +71 84 90 72 75 91 101 75 75 88 90 72 63 75 82 65 74 88 89 76 74 88 97 73 68 73 82 65 68 81 86 68 68 77 82 65 4 +75 91 101 75 75 88 90 72 75 88 90 72 74 88 89 76 74 88 97 73 70 88 85 65 68 81 86 68 68 77 82 65 64 73 78 61 4 +75 88 90 72 75 88 90 72 75 88 90 68 74 88 97 73 70 88 85 65 67 75 78 62 68 77 82 65 64 73 78 61 64 73 78 61 4 +74 88 89 73 78 92 93 73 70 84 85 62 76 89 90 68 76 94 94 72 76 89 90 68 76 87 91 70 76 87 91 67 76 91 96 74 4 +78 92 93 73 70 84 85 62 67 79 85 65 76 94 94 72 76 89 90 68 72 81 82 61 76 87 91 67 76 91 96 74 76 87 96 70 4 +67 79 82 65 67 75 78 62 67 75 78 62 68 77 74 61 68 77 74 61 68 77 78 61 71 79 83 59 68 79 79 63 64 79 83 59 7 +67 75 78 62 67 75 78 62 63 75 78 58 68 77 74 61 68 77 78 61 64 73 74 61 68 79 79 63 64 79 83 59 64 75 79 59 7 +67 75 78 62 63 75 78 58 63 79 78 62 68 77 78 61 64 73 74 61 64 73 78 57 64 79 83 59 64 75 79 59 64 75 79 63 7 +82 97 101 83 85 102 105 83 82 97 105 80 84 98 102 83 84 102 98 83 84 102 102 79 84 95 100 78 80 95 100 81 84 99 104 85 4 +85 102 105 83 82 97 105 80 82 92 97 76 84 102 98 83 84 102 102 79 84 94 98 79 80 95 100 81 84 99 104 85 80 99 100 81 4 +82 92 97 76 78 88 93 76 78 88 97 76 84 94 98 79 76 85 90 72 76 94 94 76 80 99 100 81 76 91 96 74 76 91 96 74 4 +78 88 93 76 78 88 97 76 85 102 105 83 76 85 90 72 76 94 94 76 80 102 102 79 76 91 96 74 76 91 96 74 76 91 96 74 4 +85 102 101 83 85 102 110 80 82 88 101 76 84 102 102 83 84 102 102 79 72 81 90 65 76 91 87 70 71 79 87 70 68 75 87 67 4 +85 102 110 80 82 88 101 76 67 71 93 65 84 102 102 79 72 81 90 65 68 69 86 68 71 79 87 70 68 75 87 67 76 83 91 74 7 +82 88 101 76 67 71 93 65 74 88 97 80 72 81 90 65 68 69 86 68 76 89 98 79 68 75 87 67 76 83 91 74 80 95 100 78 7 +67 71 93 65 74 88 97 80 82 97 105 83 68 69 86 68 76 89 98 79 80 94 102 76 76 83 91 74 80 95 100 78 76 87 91 67 7 +74 88 97 80 82 97 105 83 78 88 93 73 76 89 98 79 80 94 102 76 76 85 90 68 80 95 100 78 76 87 91 67 71 87 87 70 7 +82 97 105 83 78 88 93 73 78 84 93 69 80 94 102 76 76 85 90 68 80 94 98 76 76 87 91 67 71 87 87 70 76 91 91 78 7 +78 84 93 69 78 88 97 80 74 88 97 83 80 94 98 76 80 98 98 83 84 98 102 83 76 91 91 78 76 91 100 78 80 95 100 78 3 +78 88 97 80 74 88 97 83 74 84 101 83 80 98 98 83 84 98 102 83 80 98 106 83 76 91 100 78 80 95 100 78 80 99 104 81 3 +74 88 97 83 74 84 101 83 74 88 101 80 84 98 102 83 80 98 106 83 76 98 102 79 80 95 100 78 80 99 104 81 80 99 104 81 3 +74 84 101 83 74 88 101 80 70 88 93 69 80 98 106 83 76 98 102 79 76 89 94 72 80 99 104 81 80 99 104 81 80 99 104 78 3 +67 75 82 62 67 71 82 65 63 71 82 65 72 77 82 61 68 69 78 65 64 62 82 68 71 79 83 63 64 68 83 67 60 61 83 70 5 +63 71 82 65 60 60 85 76 60 60 93 83 64 62 82 68 60 59 90 76 60 59 98 87 60 61 83 70 56 57 79 70 60 51 83 74 5 +60 60 93 83 60 60 93 87 57 56 93 90 60 59 98 87 57 59 98 87 57 55 94 87 60 51 83 74 56 54 83 70 56 57 87 78 5 +57 56 93 90 57 56 97 94 53 56 97 90 57 55 94 87 57 55 90 83 57 55 86 79 56 57 87 78 60 57 87 78 56 57 83 70 5 +57 56 97 94 53 56 97 90 60 56 93 87 57 55 90 83 57 55 86 79 57 55 86 76 60 57 87 78 56 57 83 70 56 54 87 78 5 +53 56 97 90 60 56 93 87 57 60 93 80 57 55 86 79 57 55 86 76 57 55 86 72 56 57 83 70 56 54 87 78 56 57 87 70 5 +60 56 93 87 57 60 93 80 57 63 89 76 57 55 86 76 57 55 86 72 57 55 82 72 56 54 87 78 56 57 87 70 56 57 83 67 5 +57 63 89 76 60 67 78 65 63 75 78 62 57 55 82 72 57 59 74 68 60 66 82 65 56 57 83 67 56 57 83 70 56 64 83 67 5 +63 88 101 76 70 102 114 94 74 115 119 101 64 94 106 83 68 106 115 98 72 115 120 98 64 99 104 88 68 112 118 96 68 116 122 99 1 +70 102 114 94 74 115 119 101 74 115 119 101 68 106 115 98 72 115 120 98 68 111 120 98 68 112 118 96 68 116 122 99 71 112 118 99 1 +74 115 119 101 74 115 119 101 70 111 124 101 72 115 120 98 68 111 120 98 68 115 125 98 68 116 122 99 71 112 118 99 68 112 122 96 1 +74 115 119 101 70 111 124 101 67 106 124 101 68 111 120 98 68 115 125 98 68 111 125 98 71 112 118 99 68 112 122 96 68 112 128 99 1 +70 111 124 101 67 106 124 101 67 111 119 97 68 115 125 98 68 111 125 98 68 115 120 98 68 112 122 96 68 112 128 99 68 116 122 103 1 +67 106 124 101 67 111 119 97 63 111 124 97 68 111 125 98 68 115 120 98 64 115 125 98 68 112 128 99 68 116 122 103 64 116 128 103 1 +67 111 119 97 63 111 124 97 63 120 124 101 68 115 120 98 64 115 125 98 64 115 125 102 68 116 122 103 64 116 128 103 64 112 128 103 1 +63 115 124 101 67 111 124 101 63 115 124 101 64 115 125 98 60 111 120 98 64 111 115 102 64 116 122 99 64 121 122 96 64 116 122 99 1 +67 115 129 104 63 106 119 94 63 97 105 87 68 115 125 102 68 115 120 102 64 106 111 91 64 116 122 96 68 116 128 103 68 112 128 96 1 +63 97 105 87 63 88 97 83 63 88 101 83 64 106 111 91 64 94 102 83 68 94 102 79 68 112 128 96 64 103 113 88 60 91 104 81 1 +70 71 78 62 63 67 70 58 63 75 74 62 72 81 82 68 68 77 74 61 68 77 74 65 71 83 83 67 71 79 83 67 68 75 79 63 7 +63 67 70 58 63 75 74 62 63 71 74 62 68 77 74 61 68 77 74 65 68 77 74 61 71 79 83 67 68 75 79 63 68 75 75 59 7 +63 75 74 62 63 71 74 62 63 71 78 62 68 77 74 65 68 77 74 61 68 73 78 65 68 75 79 63 68 75 75 59 64 75 75 63 7 +63 71 74 62 63 71 78 62 67 71 78 62 68 77 74 61 68 73 78 65 68 69 74 57 68 75 75 59 64 75 75 63 64 75 75 59 7 +63 71 78 62 67 71 78 62 67 67 74 62 68 73 78 65 68 69 74 57 64 66 71 54 64 75 75 63 64 75 75 59 68 71 75 59 7 +67 67 74 58 63 67 70 58 63 75 82 65 64 69 71 57 68 69 74 61 68 73 82 65 68 71 75 59 68 75 75 59 68 75 75 59 7 +63 67 70 58 63 75 82 65 74 88 89 76 68 69 74 61 68 73 82 65 68 81 86 68 68 75 75 59 68 75 75 59 68 79 79 63 7 +74 88 89 76 74 88 97 73 70 88 85 65 68 81 86 68 68 77 82 65 64 73 78 61 68 79 79 63 71 79 87 67 71 75 79 59 7 +70 88 85 65 67 75 78 62 63 71 74 62 64 73 78 61 64 73 78 61 68 73 78 57 71 75 79 59 68 75 75 59 68 75 75 59 7 +76 94 94 72 76 89 90 68 72 81 82 61 76 87 91 67 76 91 96 74 76 87 96 70 75 87 89 67 75 87 89 67 75 83 89 71 4 +68 77 74 61 68 77 74 61 68 77 78 61 71 79 83 59 68 79 79 63 64 79 83 59 71 83 85 67 67 75 85 62 71 79 89 62 7 +68 77 74 61 68 77 78 61 64 73 74 61 68 79 79 63 64 79 83 59 64 75 79 59 67 75 85 62 71 79 89 62 71 79 77 58 7 +68 77 78 61 64 73 74 61 64 73 78 57 64 79 83 59 64 75 79 59 64 75 79 63 71 79 89 62 71 79 77 58 67 79 77 62 7 +64 73 74 61 64 73 78 57 64 81 82 65 64 75 79 59 64 75 79 63 68 75 79 59 71 79 77 58 67 79 77 62 67 75 77 62 7 +64 73 78 57 64 81 82 65 76 94 102 79 64 75 79 63 68 75 79 59 68 83 87 70 67 79 77 62 67 75 77 62 67 79 81 62 7 +64 81 82 65 76 94 102 79 84 98 102 83 68 75 79 59 68 83 87 70 80 91 91 81 67 75 77 62 67 79 81 62 75 87 89 71 7 +76 94 102 79 84 98 102 83 84 98 102 83 68 83 87 70 80 91 91 81 84 95 100 78 67 79 81 62 75 87 89 71 79 91 93 75 4 +84 102 98 83 84 102 102 79 84 94 98 79 80 95 100 81 84 99 104 85 80 99 100 81 79 95 96 75 84 95 100 79 84 95 100 75 4 +84 94 98 79 76 85 90 72 76 94 94 76 80 99 100 81 76 91 96 74 76 91 96 74 84 95 100 75 79 87 93 75 71 79 89 75 4 +76 85 90 72 76 94 94 76 80 102 102 79 76 91 96 74 76 91 96 74 76 91 96 74 79 87 93 75 71 79 89 75 67 75 89 67 4 +76 94 94 76 80 102 102 79 84 102 102 83 76 91 96 74 76 91 96 74 76 91 87 70 71 79 89 75 67 75 89 67 67 72 85 67 4 +84 102 102 79 72 81 90 65 68 69 86 68 71 79 87 70 68 75 87 67 76 83 91 74 63 58 81 67 63 68 85 67 71 91 93 75 5 +68 69 86 68 76 89 98 79 80 94 102 76 76 83 91 74 80 95 100 78 76 87 91 67 71 91 93 75 75 91 89 71 75 83 81 62 7 +76 89 98 79 80 94 102 76 76 85 90 68 80 95 100 78 76 87 91 67 71 87 87 70 75 91 89 71 75 83 81 62 71 79 85 67 7 +80 94 102 76 76 85 90 68 80 94 98 76 76 87 91 67 71 87 87 70 76 91 91 78 75 83 81 62 71 79 85 67 71 83 81 67 7 +76 85 90 68 80 94 98 76 80 98 98 83 71 87 87 70 76 91 91 78 76 91 100 78 71 79 85 67 71 83 81 67 71 87 85 71 7 +84 98 102 83 80 98 106 83 76 98 102 79 80 95 100 78 80 99 104 81 80 99 104 81 75 95 96 79 79 95 104 79 75 99 100 79 3 +76 98 102 79 76 89 94 72 72 81 86 65 80 99 104 81 80 99 104 78 76 91 96 74 75 99 100 79 79 99 104 83 79 99 109 83 3 +76 89 94 72 72 81 86 65 72 77 82 61 80 99 104 78 76 91 96 74 71 79 83 63 79 99 104 83 79 99 109 83 79 91 96 75 7 +72 81 86 65 72 77 82 61 68 69 78 65 76 91 96 74 71 79 83 63 64 68 83 67 79 99 109 83 79 91 96 75 71 72 77 58 7 +68 69 78 65 64 62 82 68 60 59 90 76 64 68 83 67 60 61 83 70 56 57 79 70 71 72 77 58 59 54 67 54 55 51 67 50 5 +64 62 82 68 60 59 90 76 60 59 98 87 60 61 83 70 56 57 79 70 60 51 83 74 59 54 67 54 55 51 67 50 51 51 70 50 5 +60 59 90 76 60 59 98 87 57 59 98 87 56 57 79 70 60 51 83 74 56 54 83 70 55 51 67 50 51 51 70 50 55 51 67 54 5 +57 59 98 87 57 55 94 87 57 55 90 83 56 54 83 70 56 57 87 78 60 57 87 78 55 51 67 54 59 58 74 62 59 58 81 71 5 +57 55 94 87 57 55 90 83 57 55 86 79 56 57 87 78 60 57 87 78 56 57 83 70 59 58 74 62 59 58 81 71 55 54 85 71 5 +57 55 90 83 57 55 86 79 57 55 86 76 60 57 87 78 56 57 83 70 56 54 87 78 59 58 81 71 55 54 85 71 55 54 85 71 5 +57 55 86 79 57 55 86 76 57 55 86 72 56 57 83 70 56 54 87 78 56 57 87 70 55 54 85 71 55 54 85 71 55 54 85 71 5 +57 59 74 68 60 66 82 65 68 77 78 65 56 57 83 70 56 64 83 67 64 75 83 63 55 54 85 71 55 51 81 71 59 61 81 67 5 +64 89 94 76 64 94 106 83 68 106 115 98 60 91 100 78 64 99 104 88 68 112 118 96 63 91 100 75 67 103 113 87 71 111 118 92 1 +64 94 106 83 68 106 115 98 72 115 120 98 64 99 104 88 68 112 118 96 68 116 122 99 67 103 113 87 71 111 118 92 71 111 123 96 1 +72 115 120 98 68 111 120 98 68 115 125 98 68 116 122 99 71 112 118 99 68 112 122 96 71 111 123 96 71 107 123 96 67 107 113 96 1 +68 111 125 98 68 115 120 98 64 115 125 98 68 112 128 99 68 116 122 103 64 116 128 103 67 111 118 96 71 116 123 100 67 111 123 100 1 +68 115 120 98 64 115 125 98 64 115 125 102 68 116 122 103 64 116 128 103 64 112 128 103 71 116 123 100 67 111 123 100 67 111 123 100 1 +64 115 125 102 64 115 125 98 60 111 120 98 64 112 128 103 64 116 122 99 64 121 122 96 67 111 123 100 67 116 123 100 71 111 128 100 1 +68 115 125 102 68 115 120 102 64 106 111 91 64 116 122 96 68 116 128 103 68 112 128 96 67 111 123 100 71 111 128 100 71 116 123 100 1 +68 115 120 102 64 106 111 91 64 94 102 83 68 116 128 103 68 112 128 96 64 103 113 88 71 111 128 100 71 116 123 100 71 107 118 96 1 +64 94 102 83 68 94 102 79 64 89 98 79 64 103 113 88 60 91 104 81 64 87 96 81 71 107 118 96 67 99 109 83 67 91 93 79 1 +68 77 74 61 68 77 74 65 68 77 74 61 71 79 83 67 68 75 79 63 68 75 75 59 71 79 85 67 71 79 85 67 63 75 81 62 7 +68 77 74 61 68 73 78 65 68 69 74 57 68 75 75 59 64 75 75 63 64 75 75 59 63 75 81 62 67 72 77 62 67 68 74 58 7 +68 73 78 65 68 69 74 57 64 66 71 54 64 75 75 63 64 75 75 59 68 71 75 59 67 72 77 62 67 68 74 58 63 68 67 58 7 +68 69 74 57 64 66 71 54 64 69 71 57 64 75 75 59 68 71 75 59 68 71 75 59 67 68 74 58 63 68 67 58 67 72 70 62 7 +64 69 71 57 68 69 74 61 68 73 82 65 68 71 75 59 68 75 75 59 68 75 75 59 67 72 70 62 67 75 74 58 67 75 74 62 7 +68 69 74 61 68 73 82 65 68 81 86 68 68 75 75 59 68 75 75 59 68 79 79 63 67 75 74 58 67 75 74 62 63 72 74 62 7 +68 81 86 68 68 77 82 65 64 73 78 61 68 79 79 63 71 79 87 67 71 75 79 59 63 72 74 62 63 75 77 62 67 79 81 62 7 +68 77 82 65 64 73 78 61 64 73 78 61 71 79 87 67 71 75 79 59 68 75 75 59 63 75 77 62 67 79 81 62 67 72 77 58 7 +64 73 78 61 64 73 78 61 68 73 78 57 71 75 79 59 68 75 75 59 68 75 75 59 67 79 81 62 67 72 77 58 67 75 74 58 7 +76 91 96 74 76 87 96 70 71 79 83 59 75 87 89 67 75 83 89 71 71 83 85 67 74 87 88 66 74 87 88 70 78 91 92 74 4 +76 87 96 70 71 79 83 59 68 79 79 63 75 83 89 71 71 83 85 67 67 75 85 62 74 87 88 70 78 91 92 74 74 83 92 70 4 +64 75 79 59 64 75 79 63 68 75 79 59 71 79 77 58 67 79 77 62 67 75 77 62 66 75 76 63 66 79 80 63 66 79 88 63 7 +64 75 79 63 68 75 79 59 68 83 87 70 67 79 77 62 67 75 77 62 67 79 81 62 66 79 80 63 66 79 88 63 66 79 84 63 7 +68 75 79 59 68 83 87 70 80 91 91 81 67 75 77 62 67 79 81 62 75 87 89 71 66 79 88 63 66 79 84 63 66 79 80 59 7 +68 83 87 70 80 91 91 81 84 95 100 78 67 79 81 62 75 87 89 71 79 91 93 75 66 79 84 63 66 79 80 59 74 79 84 66 7 +80 91 91 81 84 95 100 78 80 95 100 81 75 87 89 71 79 91 93 75 79 95 96 75 66 79 80 59 74 79 84 66 82 87 96 78 4 +80 95 100 81 84 99 104 85 80 99 100 81 79 95 96 75 84 95 100 79 84 95 100 75 82 87 96 78 82 96 100 78 82 96 104 78 4 +84 99 104 85 80 99 100 81 76 91 96 74 84 95 100 79 84 95 100 75 79 87 93 75 82 96 100 78 82 96 104 78 82 91 96 78 4 +71 79 87 70 68 75 87 67 76 83 91 74 63 58 81 67 63 68 85 67 71 91 93 75 56 49 80 66 56 53 73 66 70 79 84 66 5 +76 83 91 74 80 95 100 78 76 87 91 67 71 91 93 75 75 91 89 71 75 83 81 62 70 79 84 66 78 83 88 70 74 87 84 66 7 +76 87 91 67 71 87 87 70 76 91 91 78 75 83 81 62 71 79 85 67 71 83 81 67 74 87 84 66 78 87 84 70 74 79 84 63 7 +80 95 100 78 80 99 104 81 80 99 104 81 75 95 96 79 79 95 104 79 75 99 100 79 66 87 84 70 74 91 100 78 78 96 104 81 7 +80 99 104 81 80 99 104 81 80 99 104 78 79 95 104 79 75 99 100 79 79 99 104 83 74 91 100 78 78 96 104 81 82 100 104 81 3 +80 99 104 78 76 91 96 74 71 79 83 63 79 99 104 83 79 99 109 83 79 91 96 75 82 100 104 81 82 100 104 85 82 100 104 85 3 +76 91 96 74 71 79 83 63 64 68 83 67 79 99 109 83 79 91 96 75 71 72 77 58 82 100 104 85 82 100 104 85 78 91 92 74 3 +64 68 83 67 60 61 83 70 56 57 79 70 71 72 77 58 59 54 67 54 55 51 67 50 78 91 92 74 66 67 66 41 52 49 56 33 5 +60 61 83 70 56 57 79 70 60 51 83 74 59 54 67 54 55 51 67 50 51 51 70 50 66 67 66 41 52 49 56 33 52 49 66 44 5 +60 51 83 74 56 54 83 70 56 57 87 78 51 51 70 50 55 51 67 54 59 58 74 62 52 49 66 44 52 56 69 55 56 60 73 59 5 +56 54 83 70 56 57 87 78 60 57 87 78 55 51 67 54 59 58 74 62 59 58 81 71 52 56 69 55 56 60 73 59 59 60 76 66 5 +56 57 87 78 60 57 87 78 56 57 83 70 59 58 74 62 59 58 81 71 55 54 85 71 56 60 73 59 59 60 76 66 59 60 80 70 5 +56 57 83 70 56 54 87 78 56 57 87 70 55 54 85 71 55 54 85 71 55 54 85 71 59 60 80 70 56 60 84 74 56 56 88 74 5 +56 54 87 78 56 57 87 70 56 57 83 67 55 54 85 71 55 54 85 71 55 58 81 71 56 60 84 74 56 56 88 74 56 53 84 74 5 +56 57 83 67 56 57 83 70 56 64 83 67 55 58 81 71 55 54 85 71 55 51 81 71 56 53 84 74 56 53 84 78 52 49 88 78 5 +56 57 83 70 56 64 83 67 64 75 83 63 55 54 85 71 55 51 81 71 59 61 81 67 56 53 84 78 52 49 88 78 56 56 88 74 5 +56 64 83 67 64 75 83 63 68 79 83 67 55 51 81 71 59 61 81 67 67 79 85 62 52 49 88 78 56 56 88 74 56 63 84 66 5 +68 112 118 96 68 116 122 99 71 112 118 99 71 111 118 92 71 111 123 96 71 107 123 96 66 104 112 92 66 113 117 92 66 109 122 96 1 +68 116 122 99 71 112 118 99 68 112 122 96 71 111 123 96 71 107 123 96 67 107 113 96 66 113 117 92 66 109 122 96 66 109 117 96 1 +71 112 118 99 68 112 122 96 68 112 128 99 71 107 123 96 67 107 113 96 67 111 118 96 66 109 122 96 66 109 117 96 66 109 112 96 1 +68 112 128 99 68 116 122 103 64 116 128 103 67 111 118 96 71 116 123 100 67 111 123 100 66 109 112 96 66 109 122 100 66 109 122 100 1 +68 116 122 103 64 116 128 103 64 112 128 103 71 116 123 100 67 111 123 100 67 111 123 100 66 109 122 100 66 109 122 100 66 113 122 100 1 +64 112 128 103 64 116 122 99 64 121 122 96 67 111 123 100 67 116 123 100 71 111 128 100 66 113 122 100 66 113 127 100 66 113 122 100 1 +64 116 122 99 64 121 122 96 64 116 122 99 67 116 123 100 71 111 128 100 67 111 123 96 66 113 127 100 66 113 122 100 66 113 127 100 1 +64 116 122 99 64 116 122 96 68 116 128 103 67 111 123 96 67 111 123 100 71 111 128 100 66 113 127 100 70 118 127 100 70 113 127 100 1 +64 116 122 96 68 116 128 103 68 112 128 96 67 111 123 100 71 111 128 100 71 116 123 100 70 118 127 100 70 113 127 100 70 113 122 100 1 +68 116 128 103 68 112 128 96 64 103 113 88 71 111 128 100 71 116 123 100 71 107 118 96 70 113 127 100 70 113 122 100 70 118 127 100 1 +68 112 128 96 64 103 113 88 60 91 104 81 71 116 123 100 71 107 118 96 67 99 109 83 70 113 122 100 70 118 127 100 70 113 122 96 1 +64 103 113 88 60 91 104 81 64 87 96 81 71 107 118 96 67 99 109 83 67 91 93 79 70 118 127 100 70 113 122 96 66 100 104 89 1 +68 75 79 63 68 75 75 59 64 75 75 63 71 79 85 67 63 75 81 62 67 72 77 62 66 75 84 66 70 79 84 70 66 71 73 63 7 +68 75 75 59 64 75 75 63 64 75 75 59 63 75 81 62 67 72 77 62 67 68 74 58 70 79 84 70 66 71 73 63 63 63 66 55 7 +64 75 75 63 64 75 75 59 68 71 75 59 67 72 77 62 67 68 74 58 63 68 67 58 66 71 73 63 63 63 66 55 63 67 69 55 7 +64 75 75 59 68 71 75 59 68 71 75 59 67 68 74 58 63 68 67 58 67 72 70 62 63 63 66 55 63 67 69 55 66 71 73 55 7 +68 71 75 59 68 75 75 59 68 75 75 59 67 72 70 62 67 75 74 58 67 75 74 62 66 71 73 55 66 71 73 59 66 71 76 59 7 +68 75 75 59 68 79 79 63 71 79 87 67 67 75 74 62 63 72 74 62 63 75 77 62 66 71 76 59 66 71 73 63 63 67 73 59 7 +68 79 79 63 71 79 87 67 71 75 79 59 63 72 74 62 63 75 77 62 67 79 81 62 66 71 73 63 63 67 73 59 66 75 76 63 7 +71 79 87 67 71 75 79 59 68 75 75 59 63 75 77 62 67 79 81 62 67 72 77 58 63 67 73 59 66 75 76 63 70 79 84 66 7 +75 87 89 67 75 83 89 71 71 83 85 67 74 87 88 66 74 87 88 70 78 91 92 74 71 84 93 72 75 88 90 68 75 88 93 68 4 +71 83 85 67 67 75 85 62 71 79 89 62 78 91 92 74 74 83 92 70 66 79 84 63 75 88 93 68 75 91 93 72 71 84 90 68 4 +67 75 85 62 71 79 89 62 71 79 77 58 74 83 92 70 66 79 84 63 66 75 76 63 75 91 93 72 71 84 90 68 63 81 82 64 7 +71 79 77 58 67 79 77 62 67 75 77 62 66 75 76 63 66 79 80 63 66 79 88 63 63 81 82 64 63 81 79 64 67 84 86 68 7 +67 79 77 62 67 75 77 62 67 79 81 62 66 79 80 63 66 79 88 63 66 79 84 63 63 81 79 64 67 84 86 68 71 84 86 64 7 +75 87 89 71 79 91 93 75 79 95 96 75 66 79 80 59 74 79 84 66 82 87 96 78 67 81 82 64 67 77 82 64 71 88 93 72 7 +79 91 93 75 79 95 96 75 84 95 100 79 74 79 84 66 82 87 96 78 82 96 100 78 67 77 82 64 71 88 93 72 79 99 101 79 4 +79 95 96 75 84 95 100 79 84 95 100 75 82 87 96 78 82 96 100 78 82 96 104 78 71 88 93 72 79 99 101 79 83 103 105 83 4 +84 95 100 75 79 87 93 75 71 79 89 75 82 96 104 78 82 91 96 78 66 71 88 74 83 103 105 83 83 91 101 79 71 63 86 75 4 +79 87 93 75 71 79 89 75 67 75 89 67 82 91 96 78 66 71 88 74 56 53 80 66 83 91 101 79 71 63 86 75 59 54 82 75 5 +71 79 89 75 67 75 89 67 67 72 85 67 66 71 88 74 56 53 80 66 59 53 73 63 71 63 86 75 59 54 82 75 59 54 79 72 5 +67 72 85 67 63 58 81 67 63 68 85 67 59 53 73 63 56 49 80 66 56 53 73 66 59 54 79 72 59 51 79 72 56 54 75 64 5 +63 58 81 67 63 68 85 67 71 91 93 75 56 49 80 66 56 53 73 66 70 79 84 66 59 51 79 72 56 54 75 64 67 73 82 64 5 +63 68 85 67 71 91 93 75 75 91 89 71 56 53 73 66 70 79 84 66 78 83 88 70 56 54 75 64 67 73 82 64 75 84 90 68 7 +71 91 93 75 75 91 89 71 75 83 81 62 70 79 84 66 78 83 88 70 74 87 84 66 67 73 82 64 75 84 90 68 75 88 97 75 7 +75 91 89 71 75 83 81 62 71 79 85 67 78 83 88 70 74 87 84 66 78 87 84 70 75 84 90 68 75 88 97 75 75 88 97 72 7 +71 79 85 67 71 83 81 67 71 87 85 71 78 87 84 70 74 79 84 63 70 83 84 66 75 88 97 72 75 84 93 68 75 91 90 75 7 +71 87 85 71 75 95 96 79 79 95 104 79 70 83 84 66 66 87 84 70 74 91 100 78 75 91 90 75 79 88 93 75 75 88 97 72 7 +79 95 104 79 75 99 100 79 79 99 104 83 74 91 100 78 78 96 104 81 82 100 104 81 75 88 97 72 75 91 101 79 79 99 105 83 7 +75 99 100 79 79 99 104 83 79 99 109 83 78 96 104 81 82 100 104 81 82 100 104 85 75 91 101 79 79 99 105 83 83 99 105 83 3 +79 99 104 83 79 99 109 83 79 91 96 75 82 100 104 81 82 100 104 85 82 100 104 85 79 99 105 83 83 99 105 83 79 99 105 83 3 +79 99 109 83 79 91 96 75 71 72 77 58 82 100 104 85 82 100 104 85 78 91 92 74 83 99 105 83 79 99 105 83 75 91 97 68 3 +79 91 96 75 71 72 77 58 59 54 67 54 82 100 104 85 78 91 92 74 66 67 66 41 79 99 105 83 75 91 97 68 63 66 68 34 3 +55 51 67 50 51 51 70 50 55 51 67 54 52 49 56 33 52 49 66 44 52 56 69 55 52 51 62 42 49 48 68 49 49 54 68 53 5 +51 51 70 50 55 51 67 54 59 58 74 62 52 49 66 44 52 56 69 55 56 60 73 59 49 48 68 49 49 54 68 53 56 60 75 64 5 +55 51 67 54 59 58 74 62 59 58 81 71 52 56 69 55 56 60 73 59 59 60 76 66 49 54 68 53 56 60 75 64 52 57 75 68 5 +59 58 74 62 59 58 81 71 55 54 85 71 56 60 73 59 59 60 76 66 59 60 80 70 56 60 75 64 52 57 75 68 56 54 82 72 5 +59 58 81 71 55 54 85 71 55 54 85 71 59 60 76 66 59 60 80 70 56 60 84 74 52 57 75 68 56 54 82 72 56 51 79 75 5 +55 54 85 71 55 58 81 71 55 54 85 71 56 56 88 74 56 53 84 74 56 53 84 78 49 54 86 75 52 54 79 75 52 51 82 75 5 +55 58 81 71 55 54 85 71 55 51 81 71 56 53 84 74 56 53 84 78 52 49 88 78 52 54 79 75 52 51 82 75 52 54 90 72 5 +55 54 85 71 55 51 81 71 59 61 81 67 56 53 84 78 52 49 88 78 56 56 88 74 52 51 82 75 52 54 90 72 52 54 79 68 5 +55 51 81 71 59 61 81 67 67 79 85 62 52 49 88 78 56 56 88 74 56 63 84 66 52 54 90 72 52 54 79 68 52 57 79 64 5 +59 61 81 67 67 79 85 62 67 79 85 67 56 56 88 74 56 63 84 66 66 75 80 63 52 54 79 68 52 57 79 64 59 70 79 60 5 +67 83 93 75 63 91 100 75 67 103 113 87 59 83 96 74 63 87 92 81 66 104 112 89 59 84 90 75 63 99 110 86 67 108 119 98 1 +63 91 100 75 67 103 113 87 71 111 118 92 63 87 92 81 66 104 112 89 66 104 112 92 63 99 110 86 67 108 119 98 71 112 119 94 1 +67 103 113 87 71 111 118 92 71 111 123 96 66 104 112 89 66 104 112 92 66 113 117 92 67 108 119 98 71 112 119 94 67 108 119 98 1 +71 111 118 92 71 111 123 96 71 107 123 96 66 104 112 92 66 113 117 92 66 109 122 96 71 112 119 94 67 108 119 98 67 112 119 98 1 +71 107 123 96 67 107 113 96 67 111 118 96 66 109 122 96 66 109 117 96 66 109 112 96 67 112 119 98 71 108 119 98 67 112 114 98 1 +67 107 113 96 67 111 118 96 71 116 123 100 66 109 117 96 66 109 112 96 66 109 122 100 71 108 119 98 67 112 114 98 67 108 124 98 1 +67 111 118 96 71 116 123 100 67 111 123 100 66 109 112 96 66 109 122 100 66 109 122 100 67 112 114 98 67 108 124 98 67 108 130 101 1 +71 116 123 100 67 111 123 100 67 111 123 100 66 109 122 100 66 109 122 100 66 113 122 100 67 108 124 98 67 108 130 101 67 112 124 98 1 +67 111 123 100 67 116 123 100 71 111 128 100 66 113 122 100 66 113 127 100 66 113 122 100 67 112 124 98 63 112 119 98 63 112 130 101 1 +67 116 123 100 71 111 128 100 67 111 123 96 66 113 127 100 66 113 122 100 66 113 127 100 63 112 119 98 63 112 130 101 71 112 130 101 1 +67 111 123 100 71 111 128 100 71 116 123 100 70 118 127 100 70 113 127 100 70 113 122 100 71 112 124 101 67 112 124 101 67 112 124 98 1 +71 107 118 96 67 99 109 83 67 91 93 79 70 118 127 100 70 113 122 96 66 100 104 89 67 112 130 101 71 108 130 101 71 108 114 90 1 +67 72 77 62 67 68 74 58 63 68 67 58 66 71 73 63 63 63 66 55 63 67 69 55 67 73 79 64 63 66 68 57 63 66 68 57 7 +63 68 67 58 67 72 70 62 67 75 74 58 63 67 69 55 66 71 73 55 66 71 73 59 63 66 68 57 59 70 75 57 63 66 75 60 7 +67 72 70 62 67 75 74 58 67 75 74 62 66 71 73 55 66 71 73 59 66 71 76 59 59 70 75 57 63 66 75 60 67 70 72 60 7 +67 75 74 58 67 75 74 62 63 72 74 62 66 71 73 59 66 71 76 59 66 71 73 63 63 66 75 60 67 70 72 60 67 70 75 57 7 +63 72 74 62 63 75 77 62 67 79 81 62 66 71 73 63 63 67 73 59 66 75 76 63 67 70 75 57 63 70 68 57 63 66 68 57 7 +67 79 81 62 67 72 77 58 67 75 74 58 66 75 76 63 70 79 84 66 66 75 73 59 63 66 68 57 67 77 75 64 71 81 82 64 7 +74 83 92 70 66 79 84 63 66 75 76 63 75 91 93 72 71 84 90 68 63 81 82 64 74 88 93 69 78 92 93 73 67 88 89 69 4 +66 79 80 63 66 79 88 63 66 79 84 63 63 81 79 64 67 84 86 68 71 84 86 64 67 84 85 62 63 79 85 65 63 75 85 65 7 +66 79 88 63 66 79 84 63 66 79 80 59 67 84 86 68 71 84 86 64 67 81 82 64 63 79 85 65 63 75 85 65 70 84 82 65 7 +66 79 84 63 66 79 80 59 74 79 84 66 71 84 86 64 67 81 82 64 67 77 82 64 63 75 85 65 70 84 82 65 67 84 82 65 7 +66 79 80 59 74 79 84 66 82 87 96 78 67 81 82 64 67 77 82 64 71 88 93 72 70 84 82 65 67 84 82 65 67 84 85 69 7 +74 79 84 66 82 87 96 78 82 96 100 78 67 77 82 64 71 88 93 72 79 99 101 79 67 84 82 65 67 84 85 69 78 97 101 83 7 +82 96 104 78 82 91 96 78 66 71 88 74 83 103 105 83 83 91 101 79 71 63 86 75 82 102 110 87 78 88 101 83 67 67 93 80 4 +66 71 88 74 56 53 80 66 59 53 73 63 71 63 86 75 59 54 82 75 59 54 79 72 67 67 93 80 60 60 85 80 60 56 85 80 5 +59 53 73 63 56 49 80 66 56 53 73 66 59 54 79 72 59 51 79 72 56 54 75 64 60 56 85 80 57 53 82 73 57 53 78 69 5 +70 79 84 66 78 83 88 70 74 87 84 66 67 73 82 64 75 84 90 68 75 88 97 75 67 67 78 65 70 79 89 65 74 88 93 73 7 +78 83 88 70 74 87 84 66 78 87 84 70 75 84 90 68 75 88 97 75 75 88 97 72 70 79 89 65 74 88 93 73 78 92 97 80 7 +78 87 84 70 74 79 84 63 70 83 84 66 75 88 97 72 75 84 93 68 75 91 90 75 78 92 97 80 78 92 97 80 78 92 101 83 7 +70 83 84 66 66 87 84 70 74 91 100 78 75 91 90 75 79 88 93 75 75 88 97 72 78 92 101 83 82 97 101 83 82 92 101 76 7 +66 87 84 70 74 91 100 78 78 96 104 81 79 88 93 75 75 88 97 72 75 91 101 79 82 97 101 83 82 92 101 76 78 92 105 80 7 +74 91 100 78 78 96 104 81 82 100 104 81 75 88 97 72 75 91 101 79 79 99 105 83 82 92 101 76 78 92 105 80 82 97 105 87 7 +78 96 104 81 82 100 104 81 82 100 104 85 75 91 101 79 79 99 105 83 83 99 105 83 78 92 105 80 82 97 105 87 82 97 105 83 3 +82 100 104 81 82 100 104 85 82 100 104 85 79 99 105 83 83 99 105 83 79 99 105 83 82 97 105 87 82 97 105 83 78 97 105 83 3 +82 100 104 85 82 100 104 85 78 91 92 74 83 99 105 83 79 99 105 83 75 91 97 68 82 97 105 83 78 97 105 83 78 88 89 69 3 +82 100 104 85 78 91 92 74 66 67 66 41 79 99 105 83 75 91 97 68 63 66 68 34 78 97 105 83 78 88 89 69 60 63 67 41 3 +78 91 92 74 66 67 66 41 52 49 56 33 75 91 97 68 63 66 68 34 52 51 62 42 78 88 89 69 60 63 67 41 50 46 63 44 5 +66 67 66 41 52 49 56 33 52 49 66 44 63 66 68 34 52 51 62 42 49 48 68 49 60 63 67 41 50 46 63 44 50 49 67 51 5 +52 49 56 33 52 49 66 44 52 56 69 55 52 51 62 42 49 48 68 49 49 54 68 53 50 46 63 44 50 49 67 51 50 53 74 58 5 +52 49 66 44 52 56 69 55 56 60 73 59 49 48 68 49 49 54 68 53 56 60 75 64 50 49 67 51 50 53 74 58 50 53 82 69 5 +52 56 69 55 56 60 73 59 59 60 76 66 49 54 68 53 56 60 75 64 52 57 75 68 50 53 74 58 50 53 82 69 53 53 82 76 5 +59 60 76 66 59 60 80 70 56 60 84 74 52 57 75 68 56 54 82 72 56 51 79 75 53 53 82 76 50 56 82 73 53 53 82 73 5 +59 60 80 70 56 60 84 74 56 56 88 74 56 54 82 72 56 51 79 75 49 54 86 75 50 56 82 73 53 53 82 73 50 53 78 69 5 +56 53 84 74 56 53 84 78 52 49 88 78 52 54 79 75 52 51 82 75 52 54 90 72 53 53 74 69 50 53 78 65 50 53 82 65 5 +52 49 88 78 56 56 88 74 56 63 84 66 52 54 90 72 52 54 79 68 52 57 79 64 50 53 82 65 53 56 74 69 53 53 82 73 5 +59 83 96 74 63 87 92 81 66 104 112 89 59 84 90 75 63 99 110 86 67 108 119 98 60 92 101 83 67 111 114 94 67 111 119 94 1 +66 109 122 96 66 109 117 96 66 109 112 96 67 112 119 98 71 108 119 98 67 112 114 98 67 111 119 101 67 111 119 101 67 115 119 101 1 +66 109 117 96 66 109 112 96 66 109 122 100 71 108 119 98 67 112 114 98 67 108 124 98 67 111 119 101 67 115 119 101 67 111 119 94 1 +66 109 112 96 66 109 122 100 66 109 122 100 67 112 114 98 67 108 124 98 67 108 130 101 67 115 119 101 67 111 119 94 63 111 124 97 1 +66 113 127 100 66 113 122 100 66 113 127 100 63 112 119 98 63 112 130 101 71 112 130 101 63 111 124 101 63 111 124 101 67 115 129 101 1 +66 113 122 100 66 113 127 100 70 118 127 100 63 112 130 101 71 112 130 101 71 112 124 101 63 111 124 101 67 115 129 101 67 120 124 97 1 +70 113 127 100 70 113 122 100 70 118 127 100 67 112 124 101 67 112 124 98 67 112 130 101 70 115 129 101 70 111 119 101 67 111 119 94 1 +70 118 127 100 70 113 122 96 66 100 104 89 67 112 130 101 71 108 130 101 71 108 114 90 67 111 119 94 67 111 119 97 70 111 119 97 1 +70 113 122 96 66 100 104 89 63 87 92 78 71 108 130 101 71 108 114 90 63 88 97 75 67 111 119 97 70 111 119 97 70 97 105 87 1 +66 71 73 63 63 63 66 55 63 67 69 55 67 73 79 64 63 66 68 57 63 66 68 57 70 75 85 69 67 71 74 65 63 67 70 58 7 +63 63 66 55 63 67 69 55 66 71 73 55 63 66 68 57 63 66 68 57 59 70 75 57 67 71 74 65 63 67 70 58 63 71 74 58 7 +66 71 76 59 66 71 73 63 63 67 73 59 67 70 72 60 67 70 75 57 63 70 68 57 63 67 70 58 60 67 70 55 63 71 70 58 7 +63 67 73 59 66 75 76 63 70 79 84 66 63 70 68 57 63 66 68 57 67 77 75 64 63 71 70 58 60 67 67 58 60 63 67 58 7 +66 75 76 63 70 79 84 66 66 75 73 59 63 66 68 57 67 77 75 64 71 81 82 64 60 67 67 58 60 63 67 58 67 79 85 69 7 +75 91 93 72 75 88 90 72 71 84 93 72 74 92 89 76 74 84 93 69 70 88 89 69 76 85 90 72 76 89 94 68 72 85 90 68 4 +75 88 90 68 75 88 93 68 75 91 93 72 74 84 89 69 74 84 85 65 74 88 93 69 72 85 86 68 76 85 90 68 76 85 90 68 4 +75 88 93 68 75 91 93 72 71 84 90 68 74 84 85 65 74 88 93 69 78 92 93 73 76 85 90 68 76 85 90 68 76 94 94 72 4 +75 91 93 72 71 84 90 68 63 81 82 64 74 88 93 69 78 92 93 73 67 88 89 69 76 85 90 68 76 94 94 72 76 94 94 68 4 +63 81 82 64 63 81 79 64 67 84 86 68 67 88 89 69 67 84 85 62 63 79 85 65 76 94 94 68 68 85 82 65 64 81 82 61 7 +83 91 101 79 71 63 86 75 59 54 82 75 78 88 101 83 67 67 93 80 60 60 85 80 76 89 98 79 68 73 90 79 64 66 90 79 5 +71 63 86 75 59 54 82 75 59 54 79 72 67 67 93 80 60 60 85 80 60 56 85 80 68 73 90 79 64 66 90 79 60 55 82 76 5 +59 54 82 75 59 54 79 72 59 51 79 72 60 60 85 80 60 56 85 80 57 53 82 73 64 66 90 79 60 55 82 76 57 55 78 72 5 +56 54 75 64 67 73 82 64 75 84 90 68 57 53 78 69 67 67 78 65 70 79 89 65 57 55 74 61 64 66 78 65 72 81 86 68 7 +67 73 82 64 75 84 90 68 75 88 97 75 67 67 78 65 70 79 89 65 74 88 93 73 64 66 78 65 72 81 86 68 76 89 94 76 7 +75 84 90 68 75 88 97 75 75 88 97 72 70 79 89 65 74 88 93 73 78 92 97 80 72 81 86 68 76 89 94 76 80 98 102 76 7 +75 84 93 68 75 91 90 75 79 88 93 75 78 92 97 80 78 92 101 83 82 97 101 83 80 98 102 76 80 94 102 79 84 98 111 83 7 +79 88 93 75 75 88 97 72 75 91 101 79 82 97 101 83 82 92 101 76 78 92 105 80 84 98 111 83 80 98 111 83 80 98 106 83 7 +79 99 105 83 75 91 97 68 63 66 68 34 78 97 105 83 78 88 89 69 60 63 67 41 84 98 106 83 76 85 90 61 57 59 64 39 3 +75 91 97 68 63 66 68 34 52 51 62 42 78 88 89 69 60 63 67 41 50 46 63 44 76 85 90 61 57 59 64 39 53 49 71 46 5 +63 66 68 34 52 51 62 42 49 48 68 49 60 63 67 41 50 46 63 44 50 49 67 51 57 59 64 39 53 49 71 46 53 52 71 57 5 +52 51 62 42 49 48 68 49 49 54 68 53 50 46 63 44 50 49 67 51 50 53 74 58 53 49 71 46 53 52 71 57 53 55 78 68 5 +49 48 68 49 49 54 68 53 56 60 75 64 50 49 67 51 50 53 74 58 50 53 82 69 53 52 71 57 53 55 78 68 53 52 82 72 5 +49 54 68 53 56 60 75 64 52 57 75 68 50 53 74 58 50 53 82 69 53 53 82 76 53 55 78 68 53 52 82 72 53 52 82 68 5 +56 60 75 64 52 57 75 68 56 54 82 72 50 53 82 69 53 53 82 76 50 56 82 73 53 52 82 72 53 52 82 68 53 52 78 65 5 +56 51 79 75 49 54 86 75 52 54 79 75 53 53 82 73 50 53 78 69 53 53 74 69 53 55 74 57 57 55 74 61 53 55 82 61 5 +52 51 82 75 52 54 90 72 52 54 79 68 50 53 78 65 50 53 82 65 53 56 74 69 50 52 74 65 53 52 78 68 53 52 74 68 5 +52 57 79 64 59 70 79 60 63 77 86 64 53 53 82 73 53 56 82 69 57 75 82 65 50 52 78 65 53 52 78 65 53 62 78 61 5 +67 108 119 98 71 112 119 94 67 108 119 98 67 111 119 94 63 111 124 94 67 111 119 97 68 111 115 98 64 111 125 102 68 111 120 98 1 +71 112 119 94 67 108 119 98 67 112 119 98 63 111 124 94 67 111 119 97 67 111 119 101 64 111 125 102 68 111 120 98 68 111 115 98 1 +67 108 119 98 67 112 119 98 71 108 119 98 67 111 119 97 67 111 119 101 67 111 119 101 68 111 120 98 68 111 115 98 68 111 115 98 1 +71 108 119 98 67 112 114 98 67 108 124 98 67 111 119 101 67 115 119 101 67 111 119 94 68 111 115 98 68 115 120 98 68 115 125 98 1 +67 112 114 98 67 108 124 98 67 108 130 101 67 115 119 101 67 111 119 94 63 111 124 97 68 115 120 98 68 115 125 98 68 115 125 98 1 +67 108 124 98 67 108 130 101 67 112 124 98 67 111 119 94 63 111 124 97 63 111 124 101 68 115 125 98 68 115 125 98 60 111 125 98 1 +67 108 130 101 67 112 124 98 63 112 119 98 63 111 124 97 63 111 124 101 63 111 124 101 68 115 125 98 60 111 125 98 64 106 125 98 1 +67 112 124 98 63 112 119 98 63 112 130 101 63 111 124 101 63 111 124 101 63 111 124 101 60 111 125 98 64 106 125 98 64 111 120 98 1 +63 112 119 98 63 112 130 101 71 112 130 101 63 111 124 101 63 111 124 101 67 115 129 101 64 106 125 98 64 111 120 98 64 111 125 102 1 +63 112 130 101 71 112 130 101 71 112 124 101 63 111 124 101 67 115 129 101 67 120 124 97 64 111 120 98 64 111 125 102 72 115 120 102 1 +71 112 130 101 71 112 124 101 67 112 124 101 67 115 129 101 67 120 124 97 70 115 129 101 64 111 125 102 72 115 120 102 68 115 120 102 1 +67 112 124 101 67 112 124 98 67 112 130 101 70 115 129 101 70 111 119 101 67 111 119 94 68 115 120 102 68 115 120 98 68 111 120 98 1 +67 112 124 98 67 112 130 101 71 108 130 101 70 111 119 101 67 111 119 94 67 111 119 97 68 115 120 98 68 111 120 98 64 111 115 98 1 +67 112 130 101 71 108 130 101 71 108 114 90 67 111 119 94 67 111 119 97 70 111 119 97 68 111 120 98 64 111 115 98 68 111 120 102 1 +71 108 114 90 63 88 97 75 67 73 79 68 70 111 119 97 70 97 105 87 63 79 89 73 68 111 120 102 68 106 115 94 64 89 98 79 1 +63 66 68 57 63 66 68 57 59 70 75 57 67 71 74 65 63 67 70 58 63 71 74 58 72 77 78 65 68 73 71 61 64 69 71 57 7 +63 66 68 57 59 70 75 57 63 66 75 60 63 67 70 58 63 71 74 58 63 71 74 58 68 73 71 61 64 69 71 57 60 69 74 54 7 +59 70 75 57 63 66 75 60 67 70 72 60 63 71 74 58 63 71 74 58 63 67 70 58 64 69 71 57 60 69 74 54 60 69 71 57 7 +67 70 72 60 67 70 75 57 63 70 68 57 63 67 70 58 60 67 70 55 63 71 70 58 60 69 71 57 60 62 67 57 64 66 64 57 7 +63 70 68 57 63 66 68 57 67 77 75 64 63 71 70 58 60 67 67 58 60 63 67 58 64 66 64 57 64 66 67 57 64 62 67 57 7 +63 66 68 57 67 77 75 64 71 81 82 64 60 67 67 58 60 63 67 58 67 79 85 69 64 66 67 57 64 62 67 57 64 69 71 61 7 +74 92 89 76 74 84 93 69 70 88 89 69 76 85 90 72 76 89 94 68 72 85 90 68 76 87 96 70 76 91 96 70 76 83 96 70 4 +70 88 89 69 74 84 89 69 74 84 85 65 72 85 90 68 72 85 86 68 76 85 90 68 76 83 96 70 71 87 87 70 71 87 91 70 4 +74 84 89 69 74 84 85 65 74 88 93 69 72 85 86 68 76 85 90 68 76 85 90 68 71 87 87 70 71 87 91 70 76 83 91 67 4 +74 88 93 69 78 92 93 73 67 88 89 69 76 85 90 68 76 94 94 72 76 94 94 68 76 83 91 67 80 87 91 70 80 95 91 74 4 +67 88 89 69 67 84 85 62 63 79 85 65 76 94 94 68 68 85 82 65 64 81 82 61 80 95 91 74 71 87 87 70 68 83 87 63 7 +63 79 85 65 63 75 85 65 70 84 82 65 64 81 82 61 64 77 86 65 64 77 82 65 68 83 87 63 64 83 83 67 68 79 83 63 7 +63 75 85 65 70 84 82 65 67 84 82 65 64 77 86 65 64 77 82 65 64 81 78 65 64 83 83 67 68 79 83 63 68 83 83 67 7 +67 84 82 65 67 84 85 69 78 97 101 83 64 81 78 65 68 81 82 65 72 89 94 72 68 83 83 67 68 83 83 63 68 83 87 67 7 +67 84 85 69 78 97 101 83 82 102 110 87 68 81 82 65 72 89 94 72 80 102 106 87 68 83 83 63 68 83 87 67 76 91 96 81 7 +78 97 101 83 82 102 110 87 78 88 101 83 72 89 94 72 80 102 106 87 76 89 98 79 68 83 87 67 76 91 96 81 80 95 100 81 4 +82 102 110 87 78 88 101 83 67 67 93 80 80 102 106 87 76 89 98 79 68 73 90 79 76 91 96 81 80 95 100 81 76 83 96 81 4 +67 67 93 80 60 60 85 80 60 56 85 80 68 73 90 79 64 66 90 79 60 55 82 76 76 83 96 81 68 75 83 81 64 68 83 74 5 +60 60 85 80 60 56 85 80 57 53 82 73 64 66 90 79 60 55 82 76 57 55 78 72 68 75 83 81 64 68 83 74 60 61 75 70 5 +60 56 85 80 57 53 82 73 57 53 78 69 60 55 82 76 57 55 78 72 57 55 74 61 64 68 83 74 60 61 75 70 60 57 75 67 5 +70 79 89 65 74 88 93 73 78 92 97 80 72 81 86 68 76 89 94 76 80 98 102 76 71 79 91 70 76 87 96 74 80 91 100 78 7 +74 88 93 73 78 92 97 80 78 92 97 80 76 89 94 76 80 98 102 76 80 98 102 76 76 87 96 74 80 91 100 78 80 95 104 78 7 +78 92 97 80 78 92 97 80 78 92 101 83 80 98 102 76 80 98 102 76 80 94 102 79 80 91 100 78 80 95 104 78 80 95 104 81 7 +78 92 97 80 78 92 101 83 82 97 101 83 80 98 102 76 80 94 102 79 84 98 111 83 80 95 104 78 80 95 104 81 84 99 104 85 7 +78 92 101 83 82 97 101 83 82 92 101 76 80 94 102 79 84 98 111 83 80 98 111 83 80 95 104 81 84 99 104 85 84 103 108 88 3 +82 97 101 83 82 92 101 76 78 92 105 80 84 98 111 83 80 98 111 83 80 98 106 83 84 99 104 85 84 103 108 88 80 103 108 85 3 +82 92 101 76 78 92 105 80 82 97 105 87 80 98 111 83 80 98 106 83 84 98 111 87 84 103 108 88 80 103 108 85 80 99 108 85 3 +82 97 105 87 82 97 105 83 78 97 105 83 84 98 111 87 84 102 111 87 84 98 106 83 80 99 108 85 84 103 108 85 80 99 104 81 3 +78 97 105 83 78 88 89 69 60 63 67 41 84 98 106 83 76 85 90 61 57 59 64 39 80 99 104 81 71 83 87 59 56 57 63 41 3 +78 88 89 69 60 63 67 41 50 46 63 44 76 85 90 61 57 59 64 39 53 49 71 46 71 83 87 59 56 57 63 41 53 51 67 52 5 +50 46 63 44 50 49 67 51 50 53 74 58 53 49 71 46 53 52 71 57 53 55 78 68 53 51 67 52 53 54 75 59 56 57 79 63 5 +50 49 67 51 50 53 74 58 50 53 82 69 53 52 71 57 53 55 78 68 53 52 82 72 53 54 75 59 56 57 79 63 60 54 75 59 5 +50 53 82 69 53 53 82 76 50 56 82 73 53 52 82 72 53 52 82 68 53 52 78 65 60 54 75 59 53 54 71 59 56 57 75 59 5 +53 53 82 76 50 56 82 73 53 53 82 73 53 52 82 68 53 52 78 65 53 55 74 57 53 54 71 59 56 57 75 59 53 57 79 63 5 +50 56 82 73 53 53 82 73 50 53 78 69 53 52 78 65 53 55 74 57 57 55 74 61 56 57 75 59 53 57 79 63 53 54 75 67 5 +53 53 82 73 50 53 78 69 53 53 74 69 53 55 74 57 57 55 74 61 53 55 82 61 53 57 79 63 53 54 75 67 53 54 79 67 5 +53 53 74 69 50 53 78 65 50 53 82 65 53 55 82 61 50 52 74 65 53 52 78 68 53 54 79 67 56 54 75 63 53 51 75 59 5 +53 56 74 69 53 53 82 73 53 56 82 69 53 52 74 68 50 52 78 65 53 52 78 65 56 51 71 59 53 51 75 59 53 51 75 59 5 +53 53 82 73 53 56 82 69 57 75 82 65 50 52 78 65 53 52 78 65 53 62 78 61 53 51 75 59 53 51 75 59 53 57 75 63 5 +57 75 85 69 60 79 82 65 60 92 101 83 60 77 82 65 60 89 102 79 68 106 111 91 60 83 91 74 60 99 108 88 68 112 118 96 1 +60 79 82 65 60 92 101 83 67 111 114 94 60 89 102 79 68 106 111 91 68 111 115 98 60 99 108 88 68 112 118 96 71 107 118 96 1 +60 92 101 83 67 111 114 94 67 111 119 94 68 106 111 91 68 111 115 98 68 111 115 98 68 112 118 96 71 107 118 96 71 112 122 96 1 +67 111 114 94 67 111 119 94 63 111 124 94 68 111 115 98 68 111 115 98 64 111 125 102 71 107 118 96 71 112 122 96 68 112 122 99 1 +63 111 124 94 67 111 119 97 67 111 119 101 64 111 125 102 68 111 120 98 68 111 115 98 68 112 122 99 64 112 122 99 64 112 122 99 1 +67 111 119 101 67 111 119 101 67 115 119 101 68 111 115 98 68 111 115 98 68 115 120 98 64 112 122 99 64 112 122 99 64 116 122 99 1 +63 111 124 101 63 111 124 101 63 111 124 101 60 111 125 98 64 106 125 98 64 111 120 98 60 107 122 96 64 107 118 99 64 107 122 96 1 +63 111 124 101 67 115 129 101 67 120 124 97 64 111 120 98 64 111 125 102 72 115 120 102 64 107 122 96 68 107 122 99 68 116 122 99 1 +67 115 129 101 67 120 124 97 70 115 129 101 64 111 125 102 72 115 120 102 68 115 120 102 68 107 122 99 68 116 122 99 68 116 128 99 1 +70 115 129 101 70 111 119 101 67 111 119 94 68 115 120 102 68 115 120 98 68 111 120 98 68 116 128 99 68 116 122 99 64 112 122 99 1 +70 111 119 101 67 111 119 94 67 111 119 97 68 115 120 98 68 111 120 98 64 111 115 98 68 116 122 99 64 112 122 99 68 107 118 96 1 +67 111 119 94 67 111 119 97 70 111 119 97 68 111 120 98 64 111 115 98 68 111 120 102 64 112 122 99 68 107 118 96 68 112 122 103 1 +67 111 119 97 70 111 119 97 70 97 105 87 64 111 115 98 68 111 120 102 68 106 115 94 68 107 118 96 68 112 122 103 71 112 122 99 1 +70 111 119 97 70 97 105 87 63 79 89 73 68 111 120 102 68 106 115 94 64 89 98 79 68 112 122 103 71 112 122 99 68 99 108 85 1 +70 97 105 87 63 79 89 73 63 75 85 69 68 106 115 94 64 89 98 79 64 77 82 76 71 112 122 99 68 99 108 85 64 83 91 74 1 +63 67 70 58 63 71 74 58 63 71 74 58 68 73 71 61 64 69 71 57 60 69 74 54 68 75 79 63 60 68 67 52 60 61 67 56 7 +63 71 74 58 63 71 74 58 63 67 70 58 64 69 71 57 60 69 74 54 60 69 71 57 60 68 67 52 60 61 67 56 64 64 71 56 7 +63 71 74 58 63 67 70 58 60 67 70 55 60 69 74 54 60 69 71 57 60 62 67 57 60 61 67 56 64 64 71 56 60 68 67 56 7 +63 67 70 58 60 67 70 55 63 71 70 58 60 69 71 57 60 62 67 57 64 66 64 57 64 64 71 56 60 68 67 56 64 68 67 56 7 +63 71 70 58 60 67 67 58 60 63 67 58 64 66 64 57 64 66 67 57 64 62 67 57 64 68 67 56 60 68 67 56 64 68 67 52 7 +60 67 67 58 60 63 67 58 67 79 85 69 64 66 67 57 64 62 67 57 64 69 71 61 60 68 67 56 64 68 67 52 64 61 63 52 7 +76 85 90 72 76 89 94 68 72 85 90 68 76 87 96 70 76 91 96 70 76 83 96 70 79 91 96 71 75 91 93 71 75 83 89 71 4 +76 89 94 68 72 85 90 68 72 85 86 68 76 91 96 70 76 83 96 70 71 87 87 70 75 91 93 71 75 83 89 71 75 87 93 71 4 +76 85 90 68 76 94 94 72 76 94 94 68 76 83 91 67 80 87 91 70 80 95 91 74 79 91 93 71 79 91 96 71 75 91 93 71 4 +76 94 94 72 76 94 94 68 68 85 82 65 80 87 91 70 80 95 91 74 71 87 87 70 79 91 96 71 75 91 93 71 75 87 96 71 4 +76 94 94 68 68 85 82 65 64 81 82 61 80 95 91 74 71 87 87 70 68 83 87 63 75 91 93 71 75 87 96 71 71 83 93 67 4 +68 85 82 65 64 81 82 61 64 77 86 65 71 87 87 70 68 83 87 63 64 83 83 67 75 87 96 71 71 83 93 67 67 79 85 62 7 +64 81 82 61 64 77 86 65 64 77 82 65 68 83 87 63 64 83 83 67 68 79 83 63 71 83 93 67 67 79 85 62 63 75 85 62 7 +64 77 86 65 64 77 82 65 64 81 78 65 64 83 83 67 68 79 83 63 68 83 83 67 67 79 85 62 63 75 85 62 67 79 85 67 7 +64 77 82 65 64 81 78 65 68 81 82 65 68 79 83 63 68 83 83 67 68 83 83 63 63 75 85 62 67 79 85 67 71 79 85 67 7 +68 81 82 65 72 89 94 72 80 102 106 87 68 83 83 63 68 83 87 67 76 91 96 81 71 79 85 67 71 79 85 62 71 83 89 67 7 +80 102 106 87 76 89 98 79 68 73 90 79 76 91 96 81 80 95 100 81 76 83 96 81 71 83 89 67 75 87 96 75 79 91 96 79 4 +76 89 98 79 68 73 90 79 64 66 90 79 80 95 100 81 76 83 96 81 68 75 83 81 75 87 96 75 79 91 96 79 79 91 96 75 5 +60 55 82 76 57 55 78 72 57 55 74 61 64 68 83 74 60 61 75 70 60 57 75 67 75 79 89 75 59 64 77 71 55 64 81 67 5 +57 55 78 72 57 55 74 61 64 66 78 65 60 61 75 70 60 57 75 67 64 64 83 67 59 64 77 71 55 64 81 67 67 64 85 67 5 +72 81 86 68 76 89 94 76 80 98 102 76 71 79 91 70 76 87 96 74 80 91 100 78 71 79 89 71 75 83 89 71 75 87 89 75 7 +76 89 94 76 80 98 102 76 80 98 102 76 76 87 96 74 80 91 100 78 80 95 104 78 75 83 89 71 75 87 89 75 79 91 96 75 7 +80 98 106 83 84 98 111 87 84 102 111 87 80 103 108 85 80 99 108 85 84 103 108 85 79 107 109 87 84 107 113 87 79 107 104 87 3 +84 102 111 87 84 98 106 83 76 85 90 61 84 103 108 85 80 99 104 81 71 83 87 59 79 107 104 87 84 99 104 83 71 83 81 62 3 +84 98 106 83 76 85 90 61 57 59 64 39 80 99 104 81 71 83 87 59 56 57 63 41 84 99 104 83 71 83 81 62 55 61 63 46 3 +57 59 64 39 53 49 71 46 53 52 71 57 56 57 63 41 53 51 67 52 53 54 75 59 55 61 63 46 51 54 67 50 55 58 70 58 5 +53 49 71 46 53 52 71 57 53 55 78 68 53 51 67 52 53 54 75 59 56 57 79 63 51 54 67 50 55 58 70 58 55 54 74 58 5 +53 52 71 57 53 55 78 68 53 52 82 72 53 54 75 59 56 57 79 63 60 54 75 59 55 58 70 58 55 54 74 58 55 54 74 62 5 +53 52 82 72 53 52 82 68 53 52 78 65 60 54 75 59 53 54 71 59 56 57 75 59 55 54 74 62 55 58 77 58 51 54 74 58 5 +53 52 82 68 53 52 78 65 53 55 74 57 53 54 71 59 56 57 75 59 53 57 79 63 55 58 77 58 51 54 74 58 55 54 70 58 5 +53 52 78 65 53 55 74 57 57 55 74 61 56 57 75 59 53 57 79 63 53 54 75 67 51 54 74 58 55 54 70 58 55 58 70 58 5 +53 55 74 57 57 55 74 61 53 55 82 61 53 57 79 63 53 54 75 67 53 54 79 67 55 54 70 58 55 58 70 58 55 54 74 58 5 +53 55 82 61 50 52 74 65 53 52 78 68 53 54 79 67 56 54 75 63 53 51 75 59 55 54 74 58 55 54 74 58 55 54 70 58 5 +50 52 74 65 53 52 78 68 53 52 74 68 56 54 75 63 53 51 75 59 56 51 71 59 55 54 74 58 55 54 70 58 51 54 70 62 5 +50 52 78 65 53 52 78 65 53 62 78 61 53 51 75 59 53 51 75 59 53 57 75 63 55 51 77 67 55 54 81 71 51 58 81 75 5 +53 62 78 61 60 77 82 65 64 81 82 68 53 57 75 63 56 68 87 63 64 79 87 67 51 58 81 75 55 68 89 71 63 87 89 71 5 +68 106 111 91 68 111 115 98 68 111 115 98 68 112 118 96 71 107 118 96 71 112 122 96 71 111 113 96 71 111 123 100 71 107 123 100 1 +68 111 115 98 68 111 115 98 64 111 125 102 71 107 118 96 71 112 122 96 68 112 122 99 71 111 123 100 71 107 123 100 71 111 123 100 1 +68 111 115 98 64 111 125 102 68 111 120 98 71 112 122 96 68 112 122 99 64 112 122 99 71 107 123 100 71 111 123 100 67 111 123 100 1 +64 111 125 102 68 111 120 98 68 111 115 98 68 112 122 99 64 112 122 99 64 112 122 99 71 111 123 100 67 111 123 100 67 107 118 96 1 +68 111 120 98 68 111 115 98 68 111 115 98 64 112 122 99 64 112 122 99 64 112 122 99 67 111 123 100 67 107 118 96 67 107 123 100 1 +68 111 115 98 68 115 120 98 68 115 125 98 64 112 122 99 64 116 122 99 64 112 128 96 67 107 123 100 71 111 123 100 71 111 123 96 1 +68 115 120 98 68 115 125 98 68 115 125 98 64 116 122 99 64 112 128 96 64 112 122 96 71 111 123 100 71 111 123 96 71 107 118 96 1 +68 115 125 98 68 115 125 98 60 111 125 98 64 112 128 96 64 112 122 96 60 107 122 96 71 111 123 96 71 107 118 96 67 103 113 96 1 +68 115 125 98 60 111 125 98 64 106 125 98 64 112 122 96 60 107 122 96 64 107 118 99 71 107 118 96 67 103 113 96 67 107 118 96 1 +60 111 125 98 64 106 125 98 64 111 120 98 60 107 122 96 64 107 118 99 64 107 122 96 67 103 113 96 67 107 118 96 71 116 118 100 1 +64 111 120 98 64 111 125 102 72 115 120 102 64 107 122 96 68 107 122 99 68 116 122 99 71 116 118 100 71 111 123 104 71 111 123 104 1 +68 115 120 102 68 115 120 98 68 111 120 98 68 116 128 99 68 116 122 99 64 112 122 99 67 111 123 100 67 111 123 96 71 107 118 96 1 +68 115 120 98 68 111 120 98 64 111 115 98 68 116 122 99 64 112 122 99 68 107 118 96 67 111 123 96 71 107 118 96 71 107 118 96 1 +64 111 115 98 68 111 120 102 68 106 115 94 68 107 118 96 68 112 122 103 71 112 122 99 71 107 118 96 71 111 118 100 71 111 123 100 1 +68 73 71 61 64 69 71 57 60 69 74 54 68 75 79 63 60 68 67 52 60 61 67 56 67 75 77 62 63 68 70 54 63 64 67 54 7 +64 69 71 57 60 69 74 54 60 69 71 57 60 68 67 52 60 61 67 56 64 64 71 56 63 68 70 54 63 64 67 54 63 68 70 54 7 +60 69 74 54 60 69 71 57 60 62 67 57 60 61 67 56 64 64 71 56 60 68 67 56 63 64 67 54 63 68 70 54 63 64 70 58 7 +60 69 71 57 60 62 67 57 64 66 64 57 64 64 71 56 60 68 67 56 64 68 67 56 63 68 70 54 63 64 70 58 59 64 67 54 7 +64 66 64 57 64 66 67 57 64 62 67 57 64 68 67 56 60 68 67 56 64 68 67 52 59 64 67 54 63 68 70 58 63 64 70 58 7 +64 66 67 57 64 62 67 57 64 69 71 61 60 68 67 56 64 68 67 52 64 61 63 52 63 68 70 58 63 64 70 58 63 61 63 54 7 +76 87 96 70 76 91 96 70 76 83 96 70 79 91 96 71 75 91 93 71 75 83 89 71 78 87 92 74 78 87 88 70 78 87 88 70 4 +76 91 96 70 76 83 96 70 71 87 87 70 75 91 93 71 75 83 89 71 75 87 93 71 78 87 88 70 78 87 88 70 78 87 92 74 4 +71 87 87 70 71 87 91 70 76 83 91 67 75 87 93 71 75 87 93 67 79 91 93 71 78 87 92 74 74 87 96 74 74 87 88 74 4 +71 87 91 70 76 83 91 67 80 87 91 70 75 87 93 67 79 91 93 71 79 91 96 71 74 87 96 74 74 87 88 74 78 87 96 70 4 +68 83 87 63 64 83 83 67 68 79 83 63 71 83 93 67 67 79 85 62 63 75 85 62 78 96 92 74 74 87 88 70 66 79 80 66 7 +68 79 83 63 68 83 83 67 68 83 83 63 63 75 85 62 67 79 85 67 71 79 85 67 66 79 80 66 63 83 80 63 66 83 84 66 7 +68 83 83 67 68 83 83 63 68 83 87 67 67 79 85 67 71 79 85 67 71 79 85 62 63 83 80 63 66 83 84 66 66 79 80 63 7 +68 83 83 63 68 83 87 67 76 91 96 81 71 79 85 67 71 79 85 62 71 83 89 67 66 83 84 66 66 79 80 63 66 83 84 63 7 +68 83 87 67 76 91 96 81 80 95 100 81 71 79 85 62 71 83 89 67 75 87 96 75 66 79 80 63 66 83 84 63 70 83 84 66 7 +76 91 96 81 80 95 100 81 76 83 96 81 71 83 89 67 75 87 96 75 79 91 96 79 66 83 84 63 70 83 84 66 74 91 96 70 4 +80 95 100 81 76 83 96 81 68 75 83 81 75 87 96 75 79 91 96 79 79 91 96 75 70 83 84 66 74 91 96 70 82 91 96 81 4 +76 83 96 81 68 75 83 81 64 68 83 74 79 91 96 79 79 91 96 75 75 79 89 75 74 91 96 70 82 91 96 81 82 91 100 78 4 +68 75 83 81 64 68 83 74 60 61 75 70 79 91 96 75 75 79 89 75 59 64 77 71 82 91 96 81 82 91 100 78 74 83 92 74 5 +60 61 75 70 60 57 75 67 64 64 83 67 59 64 77 71 55 64 81 67 67 64 85 67 74 83 92 74 63 67 80 70 59 63 73 66 5 +60 57 75 67 64 64 83 67 71 79 91 70 55 64 81 67 67 64 85 67 71 79 89 71 63 67 80 70 59 63 73 66 66 63 84 66 5 +76 87 96 74 80 91 100 78 80 95 104 78 75 83 89 71 75 87 89 75 79 91 96 75 70 75 88 70 74 79 88 74 74 87 96 70 7 +80 91 100 78 80 95 104 78 80 95 104 81 75 87 89 75 79 91 96 75 79 95 100 79 74 79 88 74 74 87 96 70 78 91 100 78 7 +80 95 104 78 80 95 104 81 84 99 104 85 79 91 96 75 79 95 100 79 84 103 104 87 74 87 96 70 78 91 100 78 86 91 96 81 7 +80 95 104 81 84 99 104 85 84 103 108 88 79 95 100 79 84 103 104 87 79 107 109 92 78 91 100 78 86 91 96 81 86 100 108 81 3 +80 103 108 85 80 99 108 85 84 103 108 85 79 107 109 87 84 107 113 87 79 107 104 87 82 104 112 89 82 104 112 89 82 104 112 89 3 +80 99 108 85 84 103 108 85 80 99 104 81 84 107 113 87 79 107 104 87 84 99 104 83 82 104 112 89 82 104 112 89 82 100 104 89 3 +53 54 75 59 56 57 79 63 60 54 75 59 55 58 70 58 55 54 74 58 55 54 74 62 52 53 69 52 56 56 69 59 52 56 73 59 5 +60 54 75 59 53 54 71 59 56 57 75 59 55 54 74 62 55 58 77 58 51 54 74 58 52 56 73 59 56 56 73 59 52 60 73 59 5 +56 57 75 59 53 57 79 63 53 54 75 67 51 54 74 58 55 54 70 58 55 58 70 58 52 60 73 59 56 56 69 55 56 56 69 59 5 +53 57 79 63 53 54 75 67 53 54 79 67 55 54 70 58 55 58 70 58 55 54 74 58 56 56 69 55 56 56 69 59 52 56 73 59 5 +53 54 75 67 53 54 79 67 56 54 75 63 55 58 70 58 55 54 74 58 55 54 74 58 56 56 69 59 52 56 73 59 52 53 69 59 5 +53 54 79 67 56 54 75 63 53 51 75 59 55 54 74 58 55 54 74 58 55 54 70 58 52 56 73 59 52 53 69 59 56 53 76 59 5 +53 51 75 59 56 51 71 59 53 51 75 59 55 54 70 58 51 54 70 62 55 51 77 67 56 53 76 59 52 53 73 63 52 56 73 66 5 +53 51 75 59 53 51 75 59 53 57 75 63 55 51 77 67 55 54 81 71 51 58 81 75 52 56 73 66 56 56 84 78 56 63 88 78 5 +53 57 75 63 56 68 87 63 64 79 87 67 51 58 81 75 55 68 89 71 63 87 89 71 56 63 88 78 59 71 88 78 63 87 92 78 5 +64 79 87 67 60 83 91 74 60 99 108 88 63 87 89 71 67 91 100 79 71 103 109 87 63 87 92 78 63 87 96 74 63 91 100 78 1 +60 83 91 74 60 99 108 88 68 112 118 96 67 91 100 79 71 103 109 87 71 111 113 96 63 87 96 74 63 91 100 78 66 104 108 89 1 +68 112 118 96 71 107 118 96 71 112 122 96 71 111 113 96 71 111 123 100 71 107 123 100 66 104 108 89 70 113 122 96 70 113 122 96 1 +71 107 118 96 71 112 122 96 68 112 122 99 71 111 123 100 71 107 123 100 71 111 123 100 70 113 122 96 70 113 122 96 70 118 117 100 1 +71 112 122 96 68 112 122 99 64 112 122 99 71 107 123 100 71 111 123 100 67 111 123 100 70 113 122 96 70 118 117 100 66 113 122 100 1 +68 112 122 99 64 112 122 99 64 112 122 99 71 111 123 100 67 111 123 100 67 107 118 96 70 118 117 100 66 113 122 100 66 109 122 96 1 +64 112 122 99 64 112 122 99 64 112 122 99 67 111 123 100 67 107 118 96 67 107 123 100 66 113 122 100 66 109 122 96 63 113 122 96 1 +64 112 122 99 64 112 122 99 64 116 122 99 67 107 118 96 67 107 123 100 71 111 123 100 66 109 122 96 63 113 122 96 63 109 122 96 1 +64 112 122 99 64 116 122 99 64 112 128 96 67 107 123 100 71 111 123 100 71 111 123 96 63 113 122 96 63 109 122 96 63 109 122 96 1 +64 116 122 99 64 112 128 96 64 112 122 96 71 111 123 100 71 111 123 96 71 107 118 96 63 109 122 96 63 109 122 96 63 109 117 100 1 +64 112 128 96 64 112 122 96 60 107 122 96 71 111 123 96 71 107 118 96 67 103 113 96 63 109 122 96 63 109 117 100 63 104 117 96 1 +64 112 122 96 60 107 122 96 64 107 118 99 71 107 118 96 67 103 113 96 67 107 118 96 63 109 117 100 63 104 117 96 63 109 112 92 1 +60 107 122 96 64 107 118 99 64 107 122 96 67 103 113 96 67 107 118 96 71 116 118 100 63 104 117 96 63 109 112 92 66 104 117 96 1 +64 107 118 99 64 107 122 96 68 107 122 99 67 107 118 96 71 116 118 100 71 111 123 104 63 109 112 92 66 104 117 96 70 109 122 100 1 +68 107 122 99 68 116 122 99 68 116 128 99 71 111 123 104 71 111 123 104 67 111 123 100 70 109 122 100 66 113 127 103 66 113 122 103 1 +68 116 128 99 68 116 122 99 64 112 122 99 67 111 123 100 67 111 123 96 71 107 118 96 66 113 122 103 66 109 117 96 66 109 122 96 1 +68 116 122 99 64 112 122 99 68 107 118 96 67 111 123 96 71 107 118 96 71 107 118 96 66 109 117 96 66 109 122 96 66 104 122 96 1 +64 112 122 99 68 107 118 96 68 112 122 103 71 107 118 96 71 107 118 96 71 111 118 100 66 109 122 96 66 104 122 96 66 113 117 100 1 +68 107 118 96 68 112 122 103 71 112 122 99 71 107 118 96 71 111 118 100 71 111 123 100 66 104 122 96 66 113 117 100 70 113 122 103 1 +68 112 122 103 71 112 122 99 68 99 108 85 71 111 118 100 71 111 123 100 71 103 118 96 66 113 117 100 70 113 122 103 70 113 122 103 1 +71 112 122 99 68 99 108 85 64 83 91 74 71 111 123 100 71 103 118 96 67 87 100 79 70 113 122 103 70 113 122 103 66 109 122 96 1 +60 68 67 52 60 61 67 56 64 64 71 56 63 68 70 54 63 64 67 54 63 68 70 54 66 75 80 66 70 75 73 59 63 67 66 55 7 +60 68 67 56 64 68 67 56 60 68 67 56 63 64 70 58 59 64 67 54 63 68 70 58 63 67 66 55 63 67 73 55 63 67 69 59 7 +64 68 67 56 60 68 67 56 64 68 67 52 59 64 67 54 63 68 70 58 63 64 70 58 63 67 73 55 63 67 69 59 63 67 69 55 7 +60 68 67 56 64 68 67 52 64 61 63 52 63 68 70 58 63 64 70 58 63 61 63 54 63 67 69 59 63 67 69 55 59 63 69 55 7 +79 91 96 71 75 91 93 71 75 83 89 71 78 87 92 74 78 87 88 70 78 87 88 70 75 88 90 72 75 91 97 72 79 88 97 72 4 +75 91 93 71 75 83 89 71 75 87 93 71 78 87 88 70 78 87 88 70 78 87 92 74 75 91 97 72 79 88 97 72 79 88 93 72 4 +75 87 93 71 75 87 93 67 79 91 93 71 78 87 92 74 74 87 96 74 74 87 88 74 79 88 93 72 75 91 97 72 75 88 90 72 4 +79 91 93 71 79 91 96 71 75 91 93 71 74 87 88 74 78 87 96 70 78 91 88 70 75 88 90 72 79 88 93 68 79 95 93 72 4 +79 91 96 71 75 91 93 71 75 87 96 71 78 87 96 70 78 91 88 70 78 87 88 70 79 88 93 68 79 95 93 72 79 91 90 68 4 +75 87 96 71 71 83 93 67 67 79 85 62 78 87 88 70 78 96 92 74 74 87 88 70 79 91 90 68 79 88 90 72 79 88 93 72 4 +71 83 93 67 67 79 85 62 63 75 85 62 78 96 92 74 74 87 88 70 66 79 80 66 79 88 90 72 79 88 93 72 71 84 86 68 4 +67 79 85 67 71 79 85 67 71 79 85 62 63 83 80 63 66 83 84 66 66 79 80 63 67 81 86 64 67 81 86 64 67 81 82 64 7 +71 79 85 67 71 79 85 62 71 83 89 67 66 83 84 66 66 79 80 63 66 83 84 63 67 81 86 64 67 81 82 64 67 77 86 64 7 +71 79 85 62 71 83 89 67 75 87 96 75 66 79 80 63 66 83 84 63 70 83 84 66 67 81 82 64 67 77 86 64 67 81 82 64 7 +71 83 89 67 75 87 96 75 79 91 96 79 66 83 84 63 70 83 84 66 74 91 96 70 67 77 86 64 67 81 82 64 67 84 82 68 7 +75 87 96 75 79 91 96 79 79 91 96 75 70 83 84 66 74 91 96 70 82 91 96 81 67 81 82 64 67 84 82 68 75 91 97 79 4 +79 91 96 79 79 91 96 75 75 79 89 75 74 91 96 70 82 91 96 81 82 91 100 78 67 84 82 68 75 91 97 79 79 95 101 79 4 +79 91 96 75 75 79 89 75 59 64 77 71 82 91 96 81 82 91 100 78 74 83 92 74 75 91 97 79 79 95 101 79 75 88 97 79 4 +75 79 89 75 59 64 77 71 55 64 81 67 82 91 100 78 74 83 92 74 63 67 80 70 79 95 101 79 75 88 97 79 75 81 86 75 4 +59 64 77 71 55 64 81 67 67 64 85 67 74 83 92 74 63 67 80 70 59 63 73 66 75 88 97 79 75 81 86 75 63 66 79 68 5 +71 79 89 71 75 83 89 71 75 87 89 75 66 63 84 66 70 75 88 70 74 79 88 74 63 57 75 68 67 73 82 72 71 84 86 75 7 +79 91 96 75 79 95 100 79 84 103 104 87 74 87 96 70 78 91 100 78 86 91 96 81 75 81 90 68 75 81 93 68 75 84 90 72 7 +79 95 100 79 84 103 104 87 79 107 109 92 78 91 100 78 86 91 96 81 86 100 108 81 75 81 93 68 75 84 90 72 75 84 90 75 7 +84 103 104 87 79 107 109 92 79 107 109 87 86 91 96 81 86 100 108 81 82 104 112 89 75 84 90 72 75 84 90 75 79 95 105 83 3 +79 107 109 92 79 107 109 87 84 107 113 87 86 100 108 81 82 104 112 89 82 104 112 89 75 84 90 75 79 95 105 83 83 103 110 86 3 +71 83 81 62 55 61 63 46 51 54 67 50 78 96 104 81 66 79 76 59 59 56 66 44 79 95 105 83 75 84 90 68 63 66 68 49 5 +55 61 63 46 51 54 67 50 55 58 70 58 66 79 76 59 59 56 66 44 52 53 69 52 75 84 90 68 63 66 68 49 56 54 65 49 5 +55 58 70 58 55 54 74 58 55 54 74 62 52 53 69 52 56 56 69 59 52 56 73 59 56 54 65 49 56 54 68 53 56 57 72 57 5 +55 54 74 58 55 54 74 62 55 58 77 58 56 56 69 59 52 56 73 59 56 56 73 59 56 54 68 53 56 57 72 57 56 57 72 57 5 +55 54 74 62 55 58 77 58 51 54 74 58 52 56 73 59 56 56 73 59 52 60 73 59 56 57 72 57 56 57 72 57 56 57 75 57 5 +55 58 77 58 51 54 74 58 55 54 70 58 56 56 73 59 52 60 73 59 56 56 69 55 56 57 72 57 56 57 75 57 56 54 72 57 5 +51 54 74 58 55 54 70 58 55 58 70 58 52 60 73 59 56 56 69 55 56 56 69 59 56 57 75 57 56 54 72 57 59 54 79 60 5 +55 58 70 58 55 54 74 58 55 54 74 58 56 56 69 59 52 56 73 59 52 53 69 59 59 54 79 60 56 54 79 64 59 57 82 68 5 +55 54 74 58 55 54 74 58 55 54 70 58 52 56 73 59 52 53 69 59 56 53 76 59 56 54 79 64 59 57 82 68 59 60 86 75 5 +51 54 70 62 55 51 77 67 55 54 81 71 52 53 73 63 52 56 73 66 56 56 84 78 59 60 93 79 63 70 97 83 67 77 97 83 5 +55 51 77 67 55 54 81 71 51 58 81 75 52 56 73 66 56 56 84 78 56 63 88 78 63 70 97 83 67 77 97 83 75 91 105 86 5 +55 54 81 71 51 58 81 75 55 68 89 71 56 56 84 78 56 63 88 78 59 71 88 78 67 77 97 83 75 91 105 86 79 103 110 90 5 +63 87 89 71 67 91 100 79 71 103 109 87 63 87 92 78 63 87 96 74 63 91 100 78 71 103 110 86 67 99 101 83 75 99 101 79 1 +67 91 100 79 71 103 109 87 71 111 113 96 63 87 96 74 63 91 100 78 66 104 108 89 67 99 101 83 75 99 101 79 67 99 110 86 1 +71 103 109 87 71 111 113 96 71 111 123 100 63 91 100 78 66 104 108 89 70 113 122 96 75 99 101 79 67 99 110 86 71 112 119 98 1 +71 111 123 100 71 107 123 100 71 111 123 100 70 113 122 96 70 113 122 96 70 118 117 100 71 112 119 98 71 108 119 98 67 108 119 98 1 +67 107 118 96 67 107 123 100 71 111 123 100 66 109 122 96 63 113 122 96 63 109 122 96 63 108 119 98 63 112 119 94 63 108 114 94 1 +67 107 123 100 71 111 123 100 71 111 123 96 63 113 122 96 63 109 122 96 63 109 122 96 63 112 119 94 63 108 114 94 63 103 114 94 1 +71 111 123 100 71 111 123 96 71 107 118 96 63 109 122 96 63 109 122 96 63 109 117 100 63 108 114 94 63 103 114 94 63 103 119 90 1 +71 111 123 96 71 107 118 96 67 103 113 96 63 109 122 96 63 109 117 100 63 104 117 96 63 103 114 94 63 103 119 90 63 103 119 94 1 +71 107 118 96 67 103 113 96 67 107 118 96 63 109 117 100 63 104 117 96 63 109 112 92 63 103 119 90 63 103 119 94 67 103 119 94 1 +67 103 113 96 67 107 118 96 71 116 118 100 63 104 117 96 63 109 112 92 66 104 117 96 63 103 119 94 67 103 119 94 63 103 114 94 1 +67 107 118 96 71 116 118 100 71 111 123 104 63 109 112 92 66 104 117 96 70 109 122 100 67 103 119 94 63 103 114 94 67 108 119 98 1 +71 116 118 100 71 111 123 104 71 111 123 104 66 104 117 96 70 109 122 100 66 113 127 103 63 103 114 94 67 108 119 98 67 108 124 98 1 +71 111 123 104 71 111 123 104 67 111 123 100 70 109 122 100 66 113 127 103 66 113 122 103 67 108 119 98 67 108 124 98 63 108 124 98 1 +67 111 123 100 67 111 123 96 71 107 118 96 66 113 122 103 66 109 117 96 66 109 122 96 63 108 124 98 67 108 119 98 63 108 119 98 1 +71 107 118 96 71 107 118 96 71 111 118 100 66 109 122 96 66 104 122 96 66 113 117 100 63 108 119 98 63 108 119 98 67 112 124 101 1 +71 107 118 96 71 111 118 100 71 111 123 100 66 104 122 96 66 113 117 100 70 113 122 103 63 108 119 98 67 112 124 101 67 112 130 98 1 +71 111 118 100 71 111 123 100 71 103 118 96 66 113 117 100 70 113 122 103 70 113 122 103 67 112 124 101 67 112 130 98 63 112 124 98 1 +71 111 123 100 71 103 118 96 67 87 100 79 70 113 122 103 70 113 122 103 66 109 122 96 67 112 130 98 63 112 124 98 67 108 119 98 1 +63 64 67 54 63 68 70 54 63 64 70 58 70 75 73 59 63 67 66 55 63 67 66 55 67 73 82 64 67 70 72 57 59 66 65 60 7 +63 68 70 54 63 64 70 58 59 64 67 54 63 67 66 55 63 67 66 55 63 67 73 55 67 70 72 57 59 66 65 60 67 70 75 60 7 +59 64 67 54 63 68 70 58 63 64 70 58 63 67 73 55 63 67 69 59 63 67 69 55 67 70 75 60 67 66 72 57 63 66 68 57 7 +78 87 88 70 78 87 88 70 78 87 92 74 75 91 97 72 79 88 97 72 79 88 93 72 78 88 97 69 78 92 97 73 78 92 93 73 4 +78 87 88 70 78 87 92 74 74 87 96 74 79 88 97 72 79 88 93 72 75 91 97 72 78 92 97 73 78 92 93 73 82 88 97 69 4 +78 87 92 74 74 87 96 74 74 87 88 74 79 88 93 72 75 91 97 72 75 88 90 72 78 92 93 73 82 88 97 69 74 88 93 73 4 +74 87 96 74 74 87 88 74 78 87 96 70 75 91 97 72 75 88 90 72 79 88 93 68 82 88 97 69 74 88 93 73 74 84 97 69 4 +74 87 88 74 78 87 96 70 78 91 88 70 75 88 90 72 79 88 93 68 79 95 93 72 74 88 93 73 74 84 97 69 82 84 89 73 4 +78 96 92 74 74 87 88 70 66 79 80 66 79 88 90 72 79 88 93 72 71 84 86 68 78 88 89 69 78 88 89 73 78 88 93 73 4 +66 79 80 66 63 83 80 63 66 83 84 66 71 84 86 68 67 81 86 64 67 81 86 64 78 88 93 73 70 79 93 65 70 79 85 62 7 +66 83 84 66 66 79 80 63 66 83 84 63 67 81 86 64 67 81 82 64 67 77 86 64 70 79 85 62 67 84 85 62 67 79 82 65 7 +74 91 96 70 82 91 96 81 82 91 100 78 67 84 82 68 75 91 97 79 79 95 101 79 67 75 82 62 70 84 85 69 78 88 93 76 4 +82 91 96 81 82 91 100 78 74 83 92 74 75 91 97 79 79 95 101 79 75 88 97 79 70 84 85 69 78 88 93 76 74 79 89 73 4 +59 63 73 66 66 63 84 66 70 75 88 70 63 66 79 68 63 57 75 68 67 73 82 72 60 67 78 62 53 49 78 58 60 60 78 65 5 +70 75 88 70 74 79 88 74 74 87 96 70 67 73 82 72 71 84 86 75 75 81 90 68 60 60 78 65 67 75 85 73 70 79 85 73 7 +74 79 88 74 74 87 96 70 78 91 100 78 71 84 86 75 75 81 90 68 75 81 93 68 67 75 85 73 70 79 85 73 70 79 85 65 7 +86 100 108 81 82 104 112 89 82 104 112 89 75 84 90 75 79 95 105 83 83 103 110 86 70 84 89 69 78 92 97 80 82 106 114 87 7 +82 104 112 89 82 100 104 89 78 96 104 81 83 99 110 86 79 95 105 86 79 95 105 83 85 111 114 90 85 106 114 94 82 102 114 90 3 +78 96 104 81 66 79 76 59 59 56 66 44 79 95 105 83 75 84 90 68 63 66 68 49 82 102 114 90 74 92 97 80 70 79 82 65 3 +66 79 76 59 59 56 66 44 52 53 69 52 75 84 90 68 63 66 68 49 56 54 65 49 74 92 97 80 70 79 82 65 60 63 74 55 5 +52 60 73 59 56 56 69 55 56 56 69 59 56 57 75 57 56 54 72 57 59 54 79 60 60 63 82 69 60 56 78 69 60 60 93 80 5 +56 56 69 59 52 56 73 59 52 53 69 59 59 54 79 60 56 54 79 64 59 57 82 68 60 60 93 80 63 63 97 90 67 75 101 87 5 +52 56 73 59 52 53 69 59 56 53 76 59 56 54 79 64 59 57 82 68 59 60 86 75 63 63 97 90 67 75 101 87 70 84 101 87 5 +52 53 69 59 56 53 76 59 52 53 73 63 59 57 82 68 59 60 86 75 59 60 93 79 67 75 101 87 70 84 101 87 82 92 105 90 5 +56 53 76 59 52 53 73 63 52 56 73 66 59 60 86 75 59 60 93 79 63 70 97 83 70 84 101 87 82 92 105 90 89 106 114 94 5 +52 53 73 63 52 56 73 66 56 56 84 78 59 60 93 79 63 70 97 83 67 77 97 83 82 92 105 90 89 106 114 94 93 115 124 97 5 +56 56 84 78 56 63 88 78 59 71 88 78 67 77 97 83 75 91 105 86 79 103 110 90 93 115 124 97 93 120 124 104 82 120 124 101 3 +56 63 88 78 59 71 88 78 63 87 92 78 75 91 105 86 79 103 110 90 71 103 110 86 93 120 124 104 82 120 124 101 70 111 119 94 3 +63 87 92 78 63 87 96 74 63 91 100 78 71 103 110 86 67 99 101 83 75 99 101 79 70 111 119 94 67 106 114 90 63 92 105 80 1 +63 87 96 74 63 91 100 78 66 104 108 89 67 99 101 83 75 99 101 79 67 99 110 86 67 106 114 90 63 92 105 80 63 88 105 83 1 +66 104 108 89 70 113 122 96 70 113 122 96 67 99 110 86 71 112 119 98 71 108 119 98 63 88 105 83 67 97 110 87 67 111 114 94 1 +70 113 122 96 70 113 122 96 70 118 117 100 71 112 119 98 71 108 119 98 67 108 119 98 67 97 110 87 67 111 114 94 67 106 119 97 1 +70 113 122 96 70 118 117 100 66 113 122 100 71 108 119 98 67 108 119 98 63 112 114 98 67 111 114 94 67 106 119 97 67 106 114 94 1 +70 118 117 100 66 113 122 100 66 109 122 96 67 108 119 98 63 112 114 98 63 108 119 98 67 106 119 97 67 106 114 94 67 111 124 94 1 +66 113 122 100 66 109 122 96 63 113 122 96 63 112 114 98 63 108 119 98 63 112 119 94 67 106 114 94 67 111 124 94 63 106 114 94 1 +66 109 122 96 63 113 122 96 63 109 122 96 63 108 119 98 63 112 119 94 63 108 114 94 67 111 124 94 63 106 114 94 63 102 114 90 1 +63 113 122 96 63 109 122 96 63 109 122 96 63 112 119 94 63 108 114 94 63 103 114 94 63 106 114 94 63 102 114 90 63 102 119 94 1 +63 109 122 96 63 109 122 96 63 109 117 100 63 108 114 94 63 103 114 94 63 103 119 90 63 102 114 90 63 102 119 94 63 102 119 94 1 +63 109 117 100 63 104 117 96 63 109 112 92 63 103 119 90 63 103 119 94 67 103 119 94 63 102 119 94 63 102 114 94 67 106 114 97 1 +66 113 127 103 66 113 122 103 66 109 117 96 67 108 124 98 63 108 124 98 67 108 119 98 63 106 119 97 63 111 124 97 63 111 119 101 1 +66 109 117 96 66 109 122 96 66 104 122 96 67 108 119 98 63 108 119 98 63 108 119 98 63 111 119 101 63 106 119 101 63 111 119 97 1 +66 109 122 96 66 104 122 96 66 113 117 100 63 108 119 98 63 108 119 98 67 112 124 101 63 106 119 101 63 111 119 97 63 111 124 104 1 +66 104 122 96 66 113 117 100 70 113 122 103 63 108 119 98 67 112 124 101 67 112 130 98 63 111 119 97 63 111 124 104 63 111 119 97 1 +66 113 117 100 70 113 122 103 70 113 122 103 67 112 124 101 67 112 130 98 63 112 124 98 63 111 124 104 63 111 119 97 67 111 124 97 1 +70 113 122 103 70 113 122 103 66 109 122 96 67 112 130 98 63 112 124 98 67 108 119 98 63 111 119 97 67 111 124 97 67 106 124 94 1 +70 113 122 103 66 109 122 96 63 96 104 89 63 112 124 98 67 108 119 98 63 99 110 94 67 111 124 97 67 106 124 94 67 111 114 101 1 +66 109 122 96 63 96 104 89 63 83 88 78 67 108 119 98 63 99 110 94 63 88 101 79 67 106 124 94 67 111 114 101 67 106 114 90 1 +70 75 73 59 63 67 66 55 63 67 66 55 67 73 82 64 67 70 72 57 59 66 65 60 63 71 82 65 67 75 82 69 60 71 74 58 7 +63 67 66 55 63 67 73 55 63 67 69 59 59 66 65 60 67 70 75 60 67 66 72 57 60 71 74 58 63 71 74 58 67 71 74 62 7 +75 88 90 72 75 91 97 72 79 88 97 72 74 88 93 73 78 88 97 69 78 92 97 73 72 89 94 72 76 89 94 72 80 94 94 72 4 +75 91 97 72 79 88 97 72 79 88 93 72 78 88 97 69 78 92 97 73 78 92 93 73 76 89 94 72 80 94 94 72 80 94 94 76 4 +79 88 97 72 79 88 93 72 75 91 97 72 78 92 97 73 78 92 93 73 82 88 97 69 80 94 94 72 80 94 94 76 80 94 94 72 4 +79 88 93 72 75 91 97 72 75 88 90 72 78 92 93 73 82 88 97 69 74 88 93 73 80 94 94 76 80 94 94 72 80 89 94 72 4 +75 91 97 72 75 88 90 72 79 88 93 68 82 88 97 69 74 88 93 73 74 84 97 69 80 94 94 72 80 89 94 72 76 85 86 68 4 +75 88 90 72 79 88 93 68 79 95 93 72 74 88 93 73 74 84 97 69 82 84 89 73 80 89 94 72 76 85 86 68 76 85 90 68 4 +79 88 90 72 79 88 93 72 71 84 86 68 78 88 89 69 78 88 89 73 78 88 93 73 80 85 86 68 76 85 90 68 80 89 94 72 4 +79 88 93 72 71 84 86 68 67 81 86 64 78 88 89 73 78 88 93 73 70 79 93 65 76 85 90 68 80 89 94 72 76 85 94 68 4 +71 84 86 68 67 81 86 64 67 81 86 64 78 88 93 73 70 79 93 65 70 79 85 62 80 89 94 72 76 85 94 68 68 77 82 65 7 +67 81 86 64 67 81 82 64 67 77 86 64 70 79 85 62 67 84 85 62 67 79 82 65 68 77 82 65 68 77 86 65 72 81 86 68 7 +67 81 82 64 67 84 82 68 75 91 97 79 67 84 89 65 67 75 82 62 70 84 85 69 72 81 86 65 68 77 82 65 64 73 78 57 7 +67 84 82 68 75 91 97 79 79 95 101 79 67 75 82 62 70 84 85 69 78 88 93 76 68 77 82 65 64 73 78 57 68 81 78 68 4 +75 91 97 79 79 95 101 79 75 88 97 79 70 84 85 69 78 88 93 76 74 79 89 73 64 73 78 57 68 81 78 68 72 81 90 76 4 +79 95 101 79 75 88 97 79 75 81 86 75 78 88 93 76 74 79 89 73 67 75 89 73 68 81 78 68 72 81 90 76 68 77 86 68 7 +75 81 86 75 63 66 79 68 63 57 75 68 67 75 89 73 60 67 78 62 53 49 78 58 68 77 86 68 60 62 74 57 53 49 74 57 5 +63 66 79 68 63 57 75 68 67 73 82 72 60 67 78 62 53 49 78 58 60 60 78 65 60 62 74 57 53 49 74 57 64 69 86 72 5 +63 57 75 68 67 73 82 72 71 84 86 75 53 49 78 58 60 60 78 65 67 75 85 73 53 49 74 57 64 69 86 72 76 85 94 76 5 +71 84 86 75 75 81 90 68 75 81 93 68 67 75 85 73 70 79 85 73 70 79 85 65 76 85 94 76 72 89 94 72 76 85 86 68 7 +75 84 90 72 75 84 90 75 79 95 105 83 70 79 85 69 70 84 89 69 78 92 97 80 72 85 86 72 72 94 98 76 80 98 106 83 7 +75 84 90 75 79 95 105 83 83 103 110 86 70 84 89 69 78 92 97 80 82 106 114 87 72 94 98 76 80 98 106 83 80 102 111 87 7 +79 95 105 83 83 103 110 86 83 99 110 86 78 92 97 80 82 106 114 87 85 111 114 90 80 98 106 83 80 102 111 87 80 106 115 94 3 +83 99 110 86 79 95 105 86 79 95 105 83 85 111 114 90 85 106 114 94 82 102 114 90 80 106 115 94 84 111 115 94 84 106 115 91 3 +79 95 105 86 79 95 105 83 75 84 90 68 85 106 114 94 82 102 114 90 74 92 97 80 84 111 115 94 84 106 115 91 84 102 111 87 3 +79 95 105 83 75 84 90 68 63 66 68 49 82 102 114 90 74 92 97 80 70 79 82 65 84 106 115 91 84 102 111 87 80 94 102 83 3 +63 66 68 49 56 54 65 49 56 54 68 53 70 79 82 65 60 63 74 55 57 60 70 55 80 94 102 83 76 89 90 68 64 73 71 54 5 +56 57 72 57 56 57 75 57 56 54 72 57 60 71 85 69 60 63 82 69 60 56 78 69 60 69 86 76 60 66 98 83 64 69 98 87 5 +56 54 72 57 59 54 79 60 56 54 79 64 60 56 78 69 60 60 93 80 63 63 97 90 64 69 98 87 72 81 102 87 80 94 111 91 5 +59 54 79 60 56 54 79 64 59 57 82 68 60 60 93 80 63 63 97 90 67 75 101 87 72 81 102 87 80 94 111 91 84 106 111 91 5 +56 54 79 64 59 57 82 68 59 60 86 75 63 63 97 90 67 75 101 87 70 84 101 87 80 94 111 91 84 106 111 91 92 115 120 102 5 +59 57 82 68 59 60 86 75 59 60 93 79 67 75 101 87 70 84 101 87 82 92 105 90 84 106 111 91 92 115 120 102 97 115 125 102 5 +59 60 93 79 63 70 97 83 67 77 97 83 82 92 105 90 89 106 114 94 93 115 124 97 97 115 125 102 92 106 115 91 80 106 106 91 3 +67 77 97 83 75 91 105 86 79 103 110 90 93 115 124 97 93 120 124 104 82 120 124 101 80 106 106 91 80 111 120 98 76 111 115 94 1 +75 91 105 86 79 103 110 90 71 103 110 86 93 120 124 104 82 120 124 101 70 111 119 94 80 111 120 98 76 111 115 94 68 106 115 91 1 +71 103 110 86 67 99 101 83 75 99 101 79 70 111 119 94 67 106 114 90 63 92 105 80 68 106 115 91 68 102 115 91 64 89 102 79 1 +67 99 101 83 75 99 101 79 67 99 110 86 67 106 114 90 63 92 105 80 63 88 105 83 68 102 115 91 64 89 102 79 60 85 94 79 1 +75 99 101 79 67 99 110 86 71 112 119 98 63 92 105 80 63 88 105 83 67 97 110 87 64 89 102 79 60 85 94 79 64 89 98 83 1 +67 99 110 86 71 112 119 98 71 108 119 98 63 88 105 83 67 97 110 87 67 111 114 94 60 85 94 79 64 89 98 83 64 98 106 91 1 +71 112 119 98 71 108 119 98 67 108 119 98 67 97 110 87 67 111 114 94 67 106 119 97 64 89 98 83 64 98 106 91 64 106 115 94 1 +67 108 119 98 63 112 114 98 63 108 119 98 67 106 119 97 67 106 114 94 67 111 124 94 64 106 115 94 64 106 115 94 64 106 115 98 1 +63 112 114 98 63 108 119 98 63 112 119 94 67 106 114 94 67 111 124 94 63 106 114 94 64 106 115 94 64 106 115 98 64 106 120 94 1 +63 108 119 98 63 112 119 94 63 108 114 94 67 111 124 94 63 106 114 94 63 102 114 90 64 106 115 98 64 106 120 94 64 102 115 94 1 +63 112 119 94 63 108 114 94 63 103 114 94 63 106 114 94 63 102 114 90 63 102 119 94 64 106 120 94 64 102 115 94 64 102 115 94 1 +63 108 114 94 63 103 114 94 63 103 119 90 63 102 114 90 63 102 119 94 63 102 119 94 64 102 115 94 64 102 115 94 64 106 120 94 1 +63 103 119 94 67 103 119 94 63 103 114 94 63 102 114 94 67 106 114 97 63 102 114 90 68 106 115 94 64 102 115 94 64 102 115 94 1 +67 103 119 94 63 103 114 94 67 108 119 98 67 106 114 97 63 102 114 90 63 106 119 94 64 102 115 94 64 102 115 94 64 106 120 94 1 +67 108 119 98 63 108 119 98 63 108 119 98 63 111 119 101 63 106 119 101 63 111 119 97 68 106 120 98 64 111 125 98 64 102 115 98 1 +63 108 119 98 67 112 124 101 67 112 130 98 63 111 119 97 63 111 124 104 63 111 119 97 64 102 115 98 64 111 120 98 68 111 125 98 1 +67 112 124 101 67 112 130 98 63 112 124 98 63 111 124 104 63 111 119 97 67 111 124 97 64 111 120 98 68 111 125 98 68 111 120 98 1 +67 112 130 98 63 112 124 98 67 108 119 98 63 111 119 97 67 111 124 97 67 106 124 94 68 111 125 98 68 111 120 98 68 111 131 102 1 +63 112 124 98 67 108 119 98 63 99 110 94 67 111 124 97 67 106 124 94 67 111 114 101 68 111 120 98 68 111 131 102 72 111 120 98 1 +67 108 119 98 63 99 110 94 63 88 101 79 67 106 124 94 67 111 114 101 67 106 114 90 68 111 131 102 72 111 120 98 72 111 111 98 1 +63 99 110 94 63 88 101 79 59 77 79 68 67 111 114 101 67 106 114 90 63 97 97 83 72 111 120 98 72 111 111 98 68 102 106 87 1 +67 70 72 57 59 66 65 60 67 70 75 60 67 75 82 69 60 71 74 58 63 71 74 58 68 73 78 68 68 77 78 65 64 73 71 57 7 +59 66 65 60 67 70 75 60 67 66 72 57 60 71 74 58 63 71 74 58 67 71 74 62 68 77 78 65 64 73 71 57 64 77 74 61 7 +67 70 75 60 67 66 72 57 63 66 68 57 63 71 74 58 67 71 74 62 63 71 74 58 64 73 71 57 64 77 74 61 64 73 74 61 7 +67 66 72 57 63 66 68 57 63 63 68 53 67 71 74 62 63 71 74 58 63 67 67 51 64 77 74 61 64 73 74 61 64 66 71 57 7 +78 88 97 69 78 92 97 73 78 92 93 73 76 89 94 72 80 94 94 72 80 94 94 76 80 91 96 70 80 91 96 74 76 95 91 74 4 +78 92 93 73 82 88 97 69 74 88 93 73 80 94 94 76 80 94 94 72 80 89 94 72 76 95 91 74 80 91 96 70 76 91 91 70 4 +82 84 89 73 78 84 89 69 78 88 89 69 76 85 90 68 76 89 86 68 80 85 86 68 71 87 87 70 76 87 91 70 76 87 87 70 4 +78 88 89 69 78 88 89 73 78 88 93 73 80 85 86 68 76 85 90 68 80 89 94 72 76 87 87 70 76 87 91 63 80 91 91 67 4 +78 88 93 73 70 79 93 65 70 79 85 62 80 89 94 72 76 85 94 68 68 77 82 65 80 91 91 67 76 87 91 70 71 83 87 67 4 +67 84 85 62 67 79 82 65 67 84 89 65 68 77 86 65 72 81 86 68 72 81 86 65 68 83 83 63 68 79 87 63 68 79 83 63 7 +67 79 82 65 67 84 89 65 67 75 82 62 72 81 86 68 72 81 86 65 68 77 82 65 68 79 87 63 68 79 83 63 68 79 83 67 7 +53 49 78 58 60 60 78 65 67 75 85 73 53 49 74 57 64 69 86 72 76 85 94 76 60 54 75 59 71 79 91 78 80 99 104 78 5 +67 75 85 73 70 79 85 73 70 79 85 65 76 85 94 76 72 89 94 72 76 85 86 68 80 99 104 78 84 95 100 78 76 87 91 70 7 +70 79 85 65 70 79 85 69 70 84 89 69 76 85 86 68 72 85 86 72 72 94 98 76 76 87 91 70 76 91 96 74 76 99 104 85 7 +70 79 85 69 70 84 89 69 78 92 97 80 72 85 86 72 72 94 98 76 80 98 106 83 76 91 96 74 76 99 104 85 80 103 113 88 7 +70 84 89 69 78 92 97 80 82 106 114 87 72 94 98 76 80 98 106 83 80 102 111 87 76 99 104 85 80 103 113 88 80 103 113 88 3 +78 92 97 80 82 106 114 87 85 111 114 90 80 98 106 83 80 102 111 87 80 106 115 94 80 103 113 88 80 103 113 88 84 103 113 88 3 +85 111 114 90 85 106 114 94 82 102 114 90 80 106 115 94 84 111 115 94 84 106 115 91 84 103 113 88 84 103 113 92 88 103 113 96 3 +82 102 114 90 74 92 97 80 70 79 82 65 84 106 115 91 84 102 111 87 80 94 102 83 88 103 113 96 88 107 113 92 88 107 118 92 3 +74 92 97 80 70 79 82 65 60 63 74 55 84 102 111 87 80 94 102 83 76 89 90 68 88 107 113 92 88 107 118 92 84 103 108 88 3 +60 71 85 69 60 63 82 69 60 56 78 69 60 69 86 76 60 66 98 83 64 69 98 87 64 75 91 78 71 87 100 81 80 99 108 88 5 +60 63 82 69 60 56 78 69 60 60 93 80 60 66 98 83 64 69 98 87 72 81 102 87 71 87 100 81 80 99 108 88 84 107 118 96 5 +60 56 78 69 60 60 93 80 63 63 97 90 64 69 98 87 72 81 102 87 80 94 111 91 80 99 108 88 84 107 118 96 84 112 118 96 5 +60 60 93 80 63 63 97 90 67 75 101 87 72 81 102 87 80 94 111 91 84 106 111 91 84 107 118 96 84 112 118 96 92 116 128 103 3 +63 63 97 90 67 75 101 87 70 84 101 87 80 94 111 91 84 106 111 91 92 115 120 102 84 112 118 96 92 116 128 103 97 121 128 103 3 +67 75 101 87 70 84 101 87 82 92 105 90 84 106 111 91 92 115 120 102 97 115 125 102 92 116 128 103 97 121 128 103 88 116 122 96 3 +70 84 101 87 82 92 105 90 89 106 114 94 92 115 120 102 97 115 125 102 92 106 115 91 97 121 128 103 88 116 122 96 92 103 108 81 1 +89 106 114 94 93 115 124 97 93 120 124 104 92 106 115 91 80 106 106 91 80 111 120 98 92 103 108 81 80 87 96 81 68 83 100 85 1 +93 115 124 97 93 120 124 104 82 120 124 101 80 106 106 91 80 111 120 98 76 111 115 94 80 87 96 81 68 83 100 85 71 95 108 88 1 +93 120 124 104 82 120 124 101 70 111 119 94 80 111 120 98 76 111 115 94 68 106 115 91 68 83 100 85 71 95 108 88 71 103 113 92 1 +82 120 124 101 70 111 119 94 67 106 114 90 76 111 115 94 68 106 115 91 68 102 115 91 71 95 108 88 71 103 113 92 68 107 118 92 1 +70 111 119 94 67 106 114 90 63 92 105 80 68 106 115 91 68 102 115 91 64 89 102 79 71 103 113 92 68 107 118 92 64 99 104 85 1 +67 106 114 90 63 92 105 80 63 88 105 83 68 102 115 91 64 89 102 79 60 85 94 79 68 107 118 92 64 99 104 85 56 91 104 81 1 +63 88 105 83 67 97 110 87 67 111 114 94 60 85 94 79 64 89 98 83 64 98 106 91 56 91 104 81 60 95 113 88 64 95 104 88 1 +67 111 114 94 67 106 119 97 67 106 114 94 64 98 106 91 64 106 115 94 64 106 115 94 64 95 104 88 64 103 113 92 60 103 118 92 1 +67 106 119 97 67 106 114 94 67 111 124 94 64 106 115 94 64 106 115 94 64 106 115 98 64 103 113 92 60 103 118 92 60 99 113 92 1 +67 106 114 94 67 111 124 94 63 106 114 94 64 106 115 94 64 106 115 98 64 106 120 94 60 103 118 92 60 99 113 92 64 103 118 92 1 +63 106 114 94 63 102 114 90 63 102 119 94 64 106 120 94 64 102 115 94 64 102 115 94 64 103 118 92 64 107 113 96 64 107 122 92 1 +63 102 119 94 63 102 119 94 63 102 114 94 64 102 115 94 64 106 120 94 68 106 115 94 64 107 122 92 64 107 113 92 64 103 113 92 1 +63 102 114 94 67 106 114 97 63 102 114 90 68 106 115 94 64 102 115 94 64 102 115 94 64 103 113 92 64 103 118 96 64 103 118 99 1 +67 106 114 97 63 102 114 90 63 106 119 94 64 102 115 94 64 102 115 94 64 106 120 94 64 103 118 96 64 103 118 99 64 107 118 96 1 +63 102 114 90 63 106 119 94 63 106 119 97 64 102 115 94 64 106 120 94 64 111 125 102 64 103 118 99 64 107 118 96 68 112 122 96 1 +63 106 119 97 63 111 124 97 63 111 119 101 64 111 125 102 68 111 125 102 68 106 120 98 68 112 122 96 68 112 122 99 64 103 118 96 1 +63 111 124 97 63 111 119 101 63 106 119 101 68 111 125 102 68 106 120 98 64 111 125 98 68 112 122 99 64 103 118 96 64 107 122 99 1 +63 106 119 101 63 111 119 97 63 111 124 104 64 111 125 98 64 102 115 98 64 111 120 98 64 107 122 99 64 107 118 96 64 107 118 99 1 +63 111 119 97 63 111 124 104 63 111 119 97 64 102 115 98 64 111 120 98 68 111 125 98 64 107 118 96 64 107 118 99 68 107 122 96 1 +63 111 124 104 63 111 119 97 67 111 124 97 64 111 120 98 68 111 125 98 68 111 120 98 64 107 118 99 68 107 122 96 68 112 122 99 1 +67 111 124 97 67 106 124 94 67 111 114 101 68 111 120 98 68 111 131 102 72 111 120 98 68 112 122 99 68 107 128 96 71 112 128 99 1 +67 106 124 94 67 111 114 101 67 106 114 90 68 111 131 102 72 111 120 98 72 111 111 98 68 107 128 96 71 112 128 99 71 112 122 96 1 +63 97 97 83 60 84 89 73 63 79 89 73 68 102 106 87 68 89 102 79 64 85 90 72 76 112 118 96 68 99 113 85 68 91 96 78 1 +67 71 74 62 63 71 74 58 63 67 67 51 64 77 74 61 64 73 74 61 64 66 71 57 64 71 75 63 71 75 79 63 68 71 71 56 7 +72 89 94 72 76 89 94 72 80 94 94 72 80 87 96 70 80 91 96 70 80 91 96 74 79 91 96 71 79 91 96 75 79 87 93 71 4 +80 94 94 72 80 94 94 76 80 94 94 72 80 91 96 74 76 95 91 74 80 91 96 70 79 87 93 71 75 91 96 75 79 87 96 71 4 +80 94 94 76 80 94 94 72 80 89 94 72 76 95 91 74 80 91 96 70 76 91 91 70 75 91 96 75 79 87 96 71 75 87 93 71 4 +76 85 86 68 76 85 90 68 76 89 86 68 71 87 91 70 71 87 87 70 76 87 91 70 75 87 89 67 71 87 89 67 75 83 89 67 4 +76 85 90 68 76 89 86 68 80 85 86 68 71 87 87 70 76 87 91 70 76 87 87 70 71 87 89 67 75 83 89 67 75 87 89 67 4 +76 89 86 68 80 85 86 68 76 85 90 68 76 87 91 70 76 87 87 70 76 87 91 63 75 83 89 67 75 87 89 67 75 87 89 67 4 +80 89 94 72 76 85 94 68 68 77 82 65 80 91 91 67 76 87 91 70 71 83 87 67 75 87 85 67 75 87 89 67 71 87 89 67 4 +72 81 86 68 72 81 86 65 68 77 82 65 68 79 87 63 68 79 83 63 68 79 83 67 67 79 81 62 67 79 81 67 71 83 81 67 7 +72 81 86 65 68 77 82 65 64 73 78 57 68 79 83 63 68 79 83 67 68 75 83 59 67 79 81 67 71 83 81 67 67 75 77 62 7 +68 77 82 65 64 73 78 57 68 81 78 68 68 79 83 67 68 75 83 59 64 71 79 63 71 83 81 67 67 75 77 62 67 68 74 54 7 +64 73 78 57 68 81 78 68 72 81 90 76 68 75 83 59 64 71 79 63 71 79 87 70 67 75 77 62 67 68 74 54 67 72 77 62 7 +68 81 78 68 72 81 90 76 68 77 86 68 64 71 79 63 71 79 87 70 71 75 87 70 67 68 74 54 67 72 77 62 71 75 81 71 7 +72 81 90 76 68 77 86 68 60 62 74 57 71 79 87 70 71 75 87 70 64 61 75 52 67 72 77 62 71 75 81 71 63 61 74 54 7 +60 62 74 57 53 49 74 57 64 69 86 72 64 61 75 52 60 54 75 59 71 79 91 78 63 61 74 54 59 54 77 54 71 79 93 75 5 +64 69 86 72 76 85 94 76 72 89 94 72 71 79 91 78 80 99 104 78 84 95 100 78 71 79 93 75 84 99 109 83 79 91 104 75 7 +76 85 86 68 72 85 86 72 72 94 98 76 76 87 91 70 76 91 96 74 76 99 104 85 75 87 89 75 79 91 96 75 84 103 109 83 7 +80 94 102 83 76 89 90 68 64 73 71 54 88 107 118 92 84 103 108 88 71 75 83 59 88 111 118 100 88 116 123 100 84 99 104 79 3 +76 89 90 68 64 73 71 54 60 66 74 61 84 103 108 88 71 75 83 59 60 68 71 59 88 116 123 100 84 99 104 79 71 91 93 71 5 +64 73 71 54 60 66 74 61 60 69 86 76 71 75 83 59 60 68 71 59 64 75 91 78 84 99 104 79 71 91 93 71 75 99 109 83 5 +60 66 74 61 60 69 86 76 60 66 98 83 60 68 71 59 64 75 91 78 71 87 100 81 71 91 93 71 75 99 109 83 75 107 113 92 5 +60 66 98 83 64 69 98 87 72 81 102 87 71 87 100 81 80 99 108 88 84 107 118 96 75 107 113 92 75 103 113 96 75 99 109 96 3 +64 69 98 87 72 81 102 87 80 94 111 91 80 99 108 88 84 107 118 96 84 112 118 96 75 103 113 96 75 99 109 96 75 99 113 92 3 +72 81 102 87 80 94 111 91 84 106 111 91 84 107 118 96 84 112 118 96 92 116 128 103 75 99 109 96 75 99 113 92 75 107 113 92 3 +84 106 111 91 92 115 120 102 97 115 125 102 92 116 128 103 97 121 128 103 88 116 122 96 75 107 113 92 79 111 123 100 79 107 118 92 1 +92 115 120 102 97 115 125 102 92 106 115 91 97 121 128 103 88 116 122 96 92 103 108 81 79 111 123 100 79 107 118 92 75 107 113 92 1 +97 115 125 102 92 106 115 91 80 106 106 91 88 116 122 96 92 103 108 81 80 87 96 81 79 107 118 92 75 107 113 92 71 103 113 96 1 +92 106 115 91 80 106 106 91 80 111 120 98 92 103 108 81 80 87 96 81 68 83 100 85 75 107 113 92 71 103 113 96 71 107 113 92 1 +80 106 106 91 80 111 120 98 76 111 115 94 80 87 96 81 68 83 100 85 71 95 108 88 71 103 113 96 71 107 113 92 71 103 118 92 1 +68 106 115 91 68 102 115 91 64 89 102 79 71 103 113 92 68 107 118 92 64 99 104 85 71 107 118 96 71 107 118 96 63 107 113 92 1 +64 89 102 79 60 85 94 79 64 89 98 83 64 99 104 85 56 91 104 81 60 95 113 88 63 107 113 92 63 99 113 87 63 103 113 92 1 +60 85 94 79 64 89 98 83 64 98 106 91 56 91 104 81 60 95 113 88 64 95 104 88 63 99 113 87 63 103 113 92 63 103 113 92 1 +64 98 106 91 64 106 115 94 64 106 115 94 64 95 104 88 64 103 113 92 60 103 118 92 63 103 113 92 63 103 113 87 63 107 113 92 1 +64 106 115 94 64 106 115 94 64 106 115 98 64 103 113 92 60 103 118 92 60 99 113 92 63 103 113 87 63 107 113 92 63 99 113 92 1 +64 106 115 94 64 106 115 98 64 106 120 94 60 103 118 92 60 99 113 92 64 103 118 92 63 107 113 92 63 99 113 92 59 99 113 92 1 +64 106 115 98 64 106 120 94 64 102 115 94 60 99 113 92 64 103 118 92 64 107 113 96 63 99 113 92 59 99 113 92 59 103 118 92 1 +64 106 120 94 64 102 115 94 64 102 115 94 64 103 118 92 64 107 113 96 64 107 122 92 59 99 113 92 59 103 118 92 63 103 118 96 1 +64 102 115 94 64 102 115 94 64 106 120 94 64 107 113 96 64 107 122 92 64 107 113 92 59 103 118 92 63 103 118 96 67 103 118 96 1 +64 106 120 94 68 106 115 94 64 102 115 94 64 107 113 92 64 103 113 92 64 103 118 96 67 103 118 96 67 99 109 92 67 99 118 92 1 +64 102 115 94 64 102 115 94 64 106 120 94 64 103 118 96 64 103 118 99 64 107 118 96 67 99 118 92 71 111 118 96 67 107 118 96 1 +64 106 120 94 64 111 125 102 68 111 125 102 64 107 118 96 68 112 122 96 68 112 122 99 67 107 118 96 63 107 123 100 63 107 118 100 1 +64 102 115 98 64 111 120 98 68 111 125 98 64 107 118 96 64 107 118 99 68 107 122 96 67 111 118 96 67 107 118 96 71 107 118 96 1 +64 111 120 98 68 111 125 98 68 111 120 98 64 107 118 99 68 107 122 96 68 112 122 99 67 107 118 96 71 107 118 96 67 111 113 100 1 +68 111 131 102 72 111 120 98 72 111 111 98 68 107 128 96 71 112 128 99 71 112 122 96 67 111 118 96 71 111 118 96 71 111 118 100 1 +72 111 120 98 72 111 111 98 68 102 106 87 71 112 128 99 71 112 122 96 76 112 118 96 71 111 118 96 71 111 118 100 75 111 118 100 1 +72 111 111 98 68 102 106 87 68 89 102 79 71 112 122 96 76 112 118 96 68 99 113 85 71 111 118 100 75 111 118 100 71 107 118 96 1 +68 89 102 79 64 85 90 72 64 81 90 76 68 99 113 85 68 91 96 78 64 79 91 74 71 107 118 96 67 99 109 83 63 87 89 75 1 +80 87 96 70 80 91 96 70 80 91 96 74 79 91 96 71 79 91 96 75 79 87 93 71 74 87 92 70 78 87 96 70 78 87 96 70 4 +80 91 96 70 80 91 96 74 76 95 91 74 79 91 96 75 79 87 93 71 75 91 96 75 78 87 96 70 78 87 96 70 74 87 92 70 4 +76 91 91 70 71 87 91 70 71 87 87 70 75 87 93 71 75 87 89 67 71 87 89 67 74 87 92 66 74 87 92 66 74 83 88 66 4 +71 87 91 70 71 87 87 70 76 87 91 70 75 87 89 67 71 87 89 67 75 83 89 67 74 87 92 66 74 83 88 66 70 83 84 70 4 +71 87 87 70 76 87 91 70 76 87 87 70 71 87 89 67 75 83 89 67 75 87 89 67 74 83 88 66 70 83 84 70 74 83 84 66 4 +76 87 91 70 76 87 87 70 76 87 91 63 75 83 89 67 75 87 89 67 75 87 89 67 70 83 84 70 74 83 84 66 74 83 88 66 4 +80 91 91 67 76 87 91 70 71 83 87 67 75 87 85 67 75 87 89 67 71 87 89 67 66 79 80 63 66 79 76 59 70 79 88 63 4 +76 87 91 70 71 83 87 67 68 83 83 63 75 87 89 67 71 87 89 67 67 79 85 67 66 79 76 59 70 79 88 63 74 87 88 70 4 +68 83 83 63 68 79 87 63 68 79 83 63 67 79 85 67 67 79 81 62 67 79 81 67 74 87 88 70 70 83 84 66 66 75 80 63 7 +68 79 87 63 68 79 83 63 68 79 83 67 67 79 81 62 67 79 81 67 71 83 81 67 70 83 84 66 66 75 80 63 70 79 76 63 7 +68 79 83 63 68 79 83 67 68 75 83 59 67 79 81 67 71 83 81 67 67 75 77 62 66 75 80 63 70 79 76 63 70 79 84 66 7 +68 75 83 59 64 71 79 63 71 79 87 70 67 75 77 62 67 68 74 54 67 72 77 62 70 79 84 66 70 75 76 59 66 71 73 55 7 +71 79 87 70 71 75 87 70 64 61 75 52 67 72 77 62 71 75 81 71 63 61 74 54 66 71 73 55 63 75 80 59 70 75 84 66 7 +71 75 87 70 64 61 75 52 60 54 75 59 71 75 81 71 63 61 74 54 59 54 77 54 63 75 80 59 70 75 84 66 63 56 76 55 5 +64 61 75 52 60 54 75 59 71 79 91 78 63 61 74 54 59 54 77 54 71 79 93 75 70 75 84 66 63 56 76 55 63 60 80 59 5 +71 79 91 78 80 99 104 78 84 95 100 78 71 79 93 75 84 99 109 83 79 91 104 75 63 60 80 59 78 83 100 78 82 96 104 85 7 +80 99 104 78 84 95 100 78 76 87 91 70 84 99 109 83 79 91 104 75 75 87 89 75 78 83 100 78 82 96 104 85 82 91 96 78 7 +84 95 100 78 76 87 91 70 76 91 96 74 79 91 104 75 75 87 89 75 79 91 96 75 82 96 104 85 82 91 96 78 78 91 96 78 7 +76 87 91 70 76 91 96 74 76 99 104 85 75 87 89 75 79 91 96 75 84 103 109 83 82 91 96 78 78 91 96 78 82 104 112 85 7 +76 91 96 74 76 99 104 85 80 103 113 88 79 91 96 75 84 103 109 83 88 107 113 92 78 91 96 78 82 104 112 85 86 113 127 96 3 +80 103 113 88 80 103 113 88 84 103 113 88 88 107 113 92 88 107 113 92 88 107 113 92 86 113 127 96 90 113 127 96 90 109 117 96 3 +84 103 113 92 88 103 113 96 88 107 113 92 88 107 118 96 88 107 113 92 88 107 118 92 95 109 117 96 90 109 117 92 86 104 112 89 3 +84 103 108 88 71 75 83 59 60 68 71 59 88 116 123 100 84 99 104 79 71 91 93 71 86 113 122 100 86 118 122 100 82 109 112 92 3 +60 68 71 59 64 75 91 78 71 87 100 81 71 91 93 71 75 99 109 83 75 107 113 92 82 109 112 92 78 109 112 92 74 100 112 92 3 +64 75 91 78 71 87 100 81 80 99 108 88 75 99 109 83 75 107 113 92 75 103 113 96 78 109 112 92 74 100 112 92 70 100 112 92 3 +71 87 100 81 80 99 108 88 84 107 118 96 75 107 113 92 75 103 113 96 75 99 109 96 74 100 112 92 70 100 112 92 66 96 108 92 1 +92 116 128 103 97 121 128 103 88 116 122 96 75 107 113 92 79 111 123 100 79 107 118 92 63 87 104 81 63 96 104 89 66 100 108 92 1 +97 121 128 103 88 116 122 96 92 103 108 81 79 111 123 100 79 107 118 92 75 107 113 92 63 96 104 89 66 100 108 92 63 100 117 96 1 +80 87 96 81 68 83 100 85 71 95 108 88 71 103 113 96 71 107 113 92 71 103 118 92 66 104 117 96 66 104 112 92 66 109 117 92 1 +68 83 100 85 71 95 108 88 71 103 113 92 71 107 113 92 71 103 118 92 71 107 118 96 66 104 112 92 66 109 117 92 70 104 117 92 1 +71 95 108 88 71 103 113 92 68 107 118 92 71 103 118 92 71 107 118 96 71 107 118 96 66 109 117 92 70 104 117 92 66 104 122 92 1 +71 103 113 92 68 107 118 92 64 99 104 85 71 107 118 96 71 107 118 96 63 107 113 92 70 104 117 92 66 104 122 92 63 104 117 92 1 +68 107 118 92 64 99 104 85 56 91 104 81 71 107 118 96 63 107 113 92 63 99 113 87 66 104 122 92 63 104 117 92 63 100 112 92 1 +56 91 104 81 60 95 113 88 64 95 104 88 63 99 113 87 63 103 113 92 63 103 113 92 63 100 112 92 63 104 112 92 63 104 112 92 1 +60 95 113 88 64 95 104 88 64 103 113 92 63 103 113 92 63 103 113 92 63 103 113 87 63 104 112 92 63 104 112 92 59 104 112 92 1 +64 103 113 92 60 103 118 92 60 99 113 92 63 103 113 87 63 107 113 92 63 99 113 92 59 104 112 92 59 100 104 81 59 96 104 81 1 +60 103 118 92 60 99 113 92 64 103 118 92 63 107 113 92 63 99 113 92 59 99 113 92 59 100 104 81 59 96 104 81 63 91 108 89 1 +60 99 113 92 64 103 118 92 64 107 113 96 63 99 113 92 59 99 113 92 59 103 118 92 59 96 104 81 63 91 108 89 63 100 104 89 1 +64 103 118 92 64 107 113 96 64 107 122 92 59 99 113 92 59 103 118 92 63 103 118 96 63 91 108 89 63 100 104 89 66 100 112 92 1 +64 107 113 96 64 107 122 92 64 107 113 92 59 103 118 92 63 103 118 96 67 103 118 96 63 100 104 89 66 100 112 92 66 104 108 96 1 +64 107 122 92 64 107 113 92 64 103 113 92 63 103 118 96 67 103 118 96 67 99 109 92 66 100 112 92 66 104 108 96 66 104 117 92 1 +64 103 113 92 64 103 118 96 64 103 118 99 67 99 109 92 67 99 118 92 71 111 118 96 66 104 117 92 66 100 108 89 63 100 112 92 1 +64 103 118 96 64 103 118 99 64 107 118 96 67 99 118 92 71 111 118 96 67 107 118 96 66 100 108 89 63 100 112 92 63 109 122 96 1 +64 103 118 99 64 107 118 96 68 112 122 96 71 111 118 96 67 107 118 96 63 107 123 100 63 100 112 92 63 109 122 96 63 100 117 96 1 +68 112 122 96 68 112 122 99 64 103 118 96 63 107 123 100 63 107 118 100 67 111 118 100 63 100 117 96 66 109 122 100 66 109 122 100 1 +64 107 122 99 64 107 118 96 64 107 118 99 67 111 123 100 67 111 118 96 67 107 118 96 66 109 117 96 66 113 117 96 66 113 122 96 1 +64 107 118 96 64 107 118 99 68 107 122 96 67 111 118 96 67 107 118 96 71 107 118 96 66 113 117 96 66 113 122 96 66 113 117 96 1 +64 107 118 99 68 107 122 96 68 112 122 99 67 107 118 96 71 107 118 96 67 111 113 100 66 113 122 96 66 113 117 96 70 109 122 100 1 +71 112 122 96 76 112 118 96 68 99 113 85 71 111 118 100 75 111 118 100 71 107 118 96 70 113 117 96 74 113 117 96 74 113 122 100 1 +76 112 118 96 68 99 113 85 68 91 96 78 75 111 118 100 71 107 118 96 67 99 109 83 74 113 117 96 74 113 122 100 70 109 112 96 1 +68 99 113 85 68 91 96 78 64 79 91 74 71 107 118 96 67 99 109 83 63 87 89 75 74 113 122 100 70 109 112 96 66 100 108 85 1 +75 91 96 75 79 87 96 71 75 87 93 71 74 87 92 70 74 91 92 70 74 87 92 66 75 84 90 68 75 84 82 68 71 81 82 64 4 +79 87 96 71 75 87 93 71 75 87 89 67 74 91 92 70 74 87 92 66 74 87 92 66 75 84 82 68 71 81 82 64 67 73 82 60 4 +71 87 89 67 75 83 89 67 75 87 89 67 74 83 88 66 70 83 84 70 74 83 84 66 67 73 79 57 63 73 72 57 67 73 79 60 4 +75 87 89 67 75 87 85 67 75 87 89 67 74 83 88 66 66 79 80 63 66 79 76 59 71 81 86 64 71 81 82 64 67 73 75 57 7 +75 87 85 67 75 87 89 67 71 87 89 67 66 79 80 63 66 79 76 59 70 79 88 63 71 81 82 64 67 73 75 57 63 73 75 57 7 +75 87 89 67 71 87 89 67 67 79 85 67 66 79 76 59 70 79 88 63 74 87 88 70 67 73 75 57 63 73 75 57 67 84 79 68 7 +67 79 85 67 67 79 81 62 67 79 81 67 74 87 88 70 70 83 84 66 66 75 80 63 67 84 79 68 71 91 90 72 67 84 90 64 7 +67 79 81 62 67 79 81 67 71 83 81 67 70 83 84 66 66 75 80 63 70 79 76 63 71 91 90 72 67 84 90 64 67 81 82 64 7 +67 79 81 67 71 83 81 67 67 75 77 62 66 75 80 63 70 79 76 63 70 79 84 66 67 84 90 64 67 81 82 64 67 81 82 64 7 +67 75 77 62 67 68 74 54 67 72 77 62 70 79 84 66 70 75 76 59 66 71 73 55 67 81 82 64 71 77 86 64 71 77 86 64 7 +71 75 81 71 63 61 74 54 59 54 77 54 63 75 80 59 70 75 84 66 63 56 76 55 71 81 86 68 75 81 86 68 63 63 79 57 7 +63 61 74 54 59 54 77 54 71 79 93 75 70 75 84 66 63 56 76 55 63 60 80 59 75 81 86 68 63 63 79 57 63 70 86 72 5 +59 54 77 54 71 79 93 75 84 99 109 83 63 56 76 55 63 60 80 59 78 83 100 78 63 63 79 57 63 70 86 72 79 91 101 83 5 +71 79 93 75 84 99 109 83 79 91 104 75 63 60 80 59 78 83 100 78 82 96 104 85 63 70 86 72 79 91 101 83 83 91 101 83 7 +84 99 109 83 79 91 104 75 75 87 89 75 78 83 100 78 82 96 104 85 82 91 96 78 79 91 101 83 83 91 101 83 87 95 97 79 7 +79 91 96 75 84 103 109 83 88 107 113 92 78 91 96 78 82 104 112 85 86 113 127 96 83 99 105 86 87 112 114 94 92 117 124 101 3 +84 103 109 83 88 107 113 92 88 107 113 92 82 104 112 85 86 113 127 96 90 113 127 96 87 112 114 94 92 117 124 101 92 117 130 101 3 +88 107 113 92 88 107 113 92 88 107 113 92 86 113 127 96 90 113 127 96 90 109 117 96 92 117 124 101 92 117 130 101 96 112 124 98 3 +88 107 113 92 88 107 118 96 88 107 113 92 90 109 117 96 95 109 117 96 90 109 117 92 96 112 124 98 92 108 114 94 87 99 105 90 3 +88 107 113 92 88 107 118 92 88 111 118 100 90 109 117 92 86 104 112 89 86 104 112 92 87 99 105 90 83 103 114 90 83 112 124 94 3 +88 116 123 100 84 99 104 79 71 91 93 71 86 113 122 100 86 118 122 100 82 109 112 92 87 112 119 98 79 103 114 90 71 95 110 90 1 +84 99 104 79 71 91 93 71 75 99 109 83 86 118 122 100 82 109 112 92 78 109 112 92 79 103 114 90 71 95 110 90 67 99 114 94 1 +75 99 109 83 75 107 113 92 75 103 113 96 78 109 112 92 74 100 112 92 70 100 112 92 67 99 114 94 63 95 110 90 63 91 105 90 1 +75 103 113 96 75 99 109 96 75 99 113 92 70 100 112 92 66 96 108 92 63 87 100 81 63 91 105 90 59 91 105 86 59 91 101 86 1 +75 107 113 92 79 111 123 100 79 107 118 92 63 87 104 81 63 96 104 89 66 100 108 92 59 95 110 90 59 99 114 90 59 99 114 90 1 +79 107 118 92 75 107 113 92 71 103 113 96 66 100 108 92 63 100 117 96 66 104 117 96 59 99 114 90 59 95 119 90 59 103 119 94 1 +71 103 118 92 71 107 118 96 71 107 118 96 66 109 117 92 70 104 117 92 66 104 122 92 63 103 110 90 59 99 110 90 59 95 110 90 1 +71 107 118 96 71 107 118 96 63 107 113 92 70 104 117 92 66 104 122 92 63 104 117 92 59 99 110 90 59 95 110 90 59 91 105 86 1 +63 107 113 92 63 99 113 87 63 103 113 92 63 104 117 92 63 100 112 92 63 104 112 92 59 91 105 86 59 88 110 86 59 88 110 90 1 +63 103 113 92 63 103 113 92 63 103 113 87 63 104 112 92 63 104 112 92 59 104 112 92 59 88 110 90 59 99 114 90 63 99 114 90 1 +63 103 113 92 63 103 113 87 63 107 113 92 63 104 112 92 59 104 112 92 59 100 104 81 59 99 114 90 63 99 114 90 63 99 110 86 1 +63 103 113 87 63 107 113 92 63 99 113 92 59 104 112 92 59 100 104 81 59 96 104 81 63 99 114 90 63 99 110 86 59 95 105 86 1 +63 107 113 92 63 99 113 92 59 99 113 92 59 100 104 81 59 96 104 81 63 91 108 89 63 99 110 86 59 95 105 86 63 99 101 86 1 +63 99 113 92 59 99 113 92 59 103 118 92 59 96 104 81 63 91 108 89 63 100 104 89 59 95 105 86 63 99 101 86 67 95 101 83 1 +59 99 113 92 59 103 118 92 63 103 118 96 63 91 108 89 63 100 104 89 66 100 112 92 63 99 101 86 67 95 101 83 67 95 105 79 1 +59 103 118 92 63 103 118 96 67 103 118 96 63 100 104 89 66 100 112 92 66 104 108 96 67 95 101 83 67 95 105 79 63 91 101 79 1 +63 103 118 96 67 103 118 96 67 99 109 92 66 100 112 92 66 104 108 96 66 104 117 92 67 95 105 79 63 91 101 79 63 95 105 83 1 +71 111 118 96 67 107 118 96 63 107 123 100 63 100 112 92 63 109 122 96 63 100 117 96 67 95 101 86 67 99 114 86 67 103 110 94 1 +67 107 118 96 63 107 123 100 63 107 118 100 63 109 122 96 63 100 117 96 66 109 122 100 67 99 114 86 67 103 110 94 67 108 119 98 1 +63 107 123 100 63 107 118 100 67 111 118 100 63 100 117 96 66 109 122 100 66 109 122 100 67 103 110 94 67 108 119 98 67 108 119 94 1 +63 107 118 100 67 111 118 100 67 111 123 100 66 109 122 100 66 109 122 100 66 109 117 96 67 108 119 98 67 108 119 94 63 103 119 94 1 +67 111 118 100 67 111 123 100 67 111 118 96 66 109 122 100 66 109 117 96 66 113 117 96 67 108 119 94 63 103 119 94 67 103 114 94 1 +67 111 123 100 67 111 118 96 67 107 118 96 66 109 117 96 66 113 117 96 66 113 122 96 63 103 119 94 67 103 114 94 63 108 119 94 1 +67 107 118 96 71 107 118 96 67 111 113 100 66 113 122 96 66 113 117 96 70 109 122 100 63 108 119 94 63 112 114 94 67 108 119 101 1 +71 107 118 96 67 111 113 100 67 111 118 96 66 113 117 96 70 109 122 100 66 109 122 96 63 112 114 94 67 108 119 101 67 108 119 98 1 +67 111 113 100 67 111 118 96 71 111 118 96 70 109 122 100 66 109 122 96 70 113 127 96 67 108 119 101 67 108 119 98 67 112 119 98 1 +67 111 118 96 71 111 118 96 71 111 118 100 66 109 122 96 70 113 127 96 70 113 117 96 67 108 119 98 67 112 119 98 67 108 119 98 1 +71 111 118 100 75 111 118 100 71 107 118 96 70 113 117 96 74 113 117 96 74 113 122 100 67 108 119 98 71 108 114 98 71 112 119 98 1 +75 111 118 100 71 107 118 96 67 99 109 83 74 113 117 96 74 113 122 100 70 109 112 96 71 108 114 98 71 112 119 98 67 112 119 98 1 +74 87 92 70 74 91 92 70 74 87 92 66 75 84 90 68 75 84 82 68 71 81 82 64 67 75 78 58 63 75 78 55 63 71 74 55 4 +74 87 92 66 74 83 88 66 70 83 84 70 67 73 82 60 67 73 79 57 63 73 72 57 63 67 82 58 63 71 74 58 63 71 74 58 7 +74 83 84 66 74 83 88 66 66 79 80 63 67 73 79 60 71 81 86 64 71 81 82 64 63 71 74 58 67 75 78 58 70 79 82 65 7 +74 83 88 66 66 79 80 63 66 79 76 59 71 81 86 64 71 81 82 64 67 73 75 57 67 75 78 58 70 79 82 65 67 75 78 65 7 +66 79 80 63 66 79 76 59 70 79 88 63 71 81 82 64 67 73 75 57 63 73 75 57 70 79 82 65 67 75 78 65 60 71 70 58 7 +66 79 76 59 70 79 88 63 74 87 88 70 67 73 75 57 63 73 75 57 67 84 79 68 67 75 78 65 60 71 70 58 63 75 74 62 7 +74 87 88 70 70 83 84 66 66 75 80 63 67 84 79 68 71 91 90 72 67 84 90 64 63 75 74 62 67 84 85 69 70 88 93 73 7 +70 79 84 66 70 75 76 59 66 71 73 55 67 81 82 64 71 77 86 64 71 77 86 64 78 92 97 80 82 97 97 80 82 92 93 83 7 +70 75 76 59 66 71 73 55 63 75 80 59 71 77 86 64 71 77 86 64 71 81 86 68 82 97 97 80 82 92 93 83 78 92 101 80 7 +66 71 73 55 63 75 80 59 70 75 84 66 71 77 86 64 71 81 86 68 75 81 86 68 82 92 93 83 78 92 101 80 78 92 97 76 7 +63 75 80 59 70 75 84 66 63 56 76 55 71 81 86 68 75 81 86 68 63 63 79 57 78 92 101 80 78 92 97 76 67 71 78 62 7 +63 56 76 55 63 60 80 59 78 83 100 78 63 63 79 57 63 70 86 72 79 91 101 83 67 71 78 62 74 79 89 73 78 92 97 87 5 +78 83 100 78 82 96 104 85 82 91 96 78 79 91 101 83 83 91 101 83 87 95 97 79 78 92 97 87 78 97 101 83 82 102 105 87 7 +82 96 104 85 82 91 96 78 78 91 96 78 83 91 101 83 87 95 97 79 83 99 105 86 78 97 101 83 82 102 105 87 85 106 114 90 7 +82 91 96 78 78 91 96 78 82 104 112 85 87 95 97 79 83 99 105 86 87 112 114 94 82 102 105 87 85 106 114 90 93 120 119 97 3 +78 91 96 78 82 104 112 85 86 113 127 96 83 99 105 86 87 112 114 94 92 117 124 101 85 106 114 90 93 120 119 97 93 115 124 97 3 +82 104 112 85 86 113 127 96 90 113 127 96 87 112 114 94 92 117 124 101 92 117 130 101 93 120 119 97 93 115 124 97 93 120 129 101 3 +86 113 127 96 90 113 127 96 90 109 117 96 92 117 124 101 92 117 130 101 96 112 124 98 93 115 124 97 93 120 129 101 93 115 124 101 3 +90 113 127 96 90 109 117 96 95 109 117 96 92 117 130 101 96 112 124 98 92 108 114 94 93 120 129 101 93 115 124 101 89 106 114 94 3 +95 109 117 96 90 109 117 92 86 104 112 89 92 108 114 94 87 99 105 90 83 103 114 90 89 106 114 94 85 106 114 94 78 115 114 97 3 +86 104 112 89 86 104 112 92 86 113 122 100 83 103 114 90 83 112 124 94 87 112 119 98 78 115 114 97 78 111 119 94 70 106 114 90 3 +86 104 112 92 86 113 122 100 86 118 122 100 83 112 124 94 87 112 119 98 79 103 114 90 78 111 119 94 70 106 114 90 67 102 114 94 1 +86 113 122 100 86 118 122 100 82 109 112 92 87 112 119 98 79 103 114 90 71 95 110 90 70 106 114 90 67 102 114 94 63 97 105 87 1 +70 100 112 92 66 96 108 92 63 87 100 81 63 91 105 90 59 91 105 86 59 91 101 86 57 92 110 87 57 88 101 87 57 88 101 83 1 +66 96 108 92 63 87 100 81 63 87 104 81 59 91 105 86 59 91 101 86 59 95 110 90 57 88 101 87 57 88 101 83 57 88 105 83 1 +63 87 100 81 63 87 104 81 63 96 104 89 59 91 101 86 59 95 110 90 59 99 114 90 57 88 101 83 57 88 105 83 60 88 110 83 1 +63 87 104 81 63 96 104 89 66 100 108 92 59 95 110 90 59 99 114 90 59 99 114 90 57 88 105 83 60 88 110 83 57 92 110 87 1 +63 100 117 96 66 104 117 96 66 104 112 92 59 95 119 90 59 103 119 94 63 103 114 94 57 97 110 87 63 97 110 87 60 97 114 87 1 +66 104 117 96 66 104 112 92 66 109 117 92 59 103 119 94 63 103 114 94 63 103 110 90 63 97 110 87 60 97 114 87 57 92 114 87 1 +66 104 112 92 66 109 117 92 70 104 117 92 63 103 114 94 63 103 110 90 59 99 110 90 60 97 114 87 57 92 114 87 57 92 105 83 1 +66 109 117 92 70 104 117 92 66 104 122 92 63 103 110 90 59 99 110 90 59 95 110 90 57 92 114 87 57 92 105 83 57 88 105 83 1 +66 104 122 92 63 104 117 92 63 100 112 92 59 95 110 90 59 91 105 86 59 88 110 86 57 88 105 83 57 92 105 83 53 92 105 87 1 +63 104 117 92 63 100 112 92 63 104 112 92 59 91 105 86 59 88 110 86 59 88 110 90 57 92 105 83 53 92 105 87 57 88 105 87 1 +63 100 112 92 63 104 112 92 63 104 112 92 59 88 110 86 59 88 110 90 59 99 114 90 53 92 105 87 57 88 105 87 60 97 119 94 1 +59 96 104 81 63 91 108 89 63 100 104 89 59 95 105 86 63 99 101 86 67 95 101 83 60 97 114 94 63 102 114 87 67 97 105 80 1 +66 100 112 92 66 104 108 96 66 104 117 92 67 95 105 79 63 91 101 79 63 95 105 83 63 88 97 73 63 84 97 73 63 84 93 73 1 +66 104 108 96 66 104 117 92 66 100 108 89 63 91 101 79 63 95 105 83 67 95 101 83 63 84 97 73 63 84 93 73 67 84 89 76 1 +66 104 117 92 66 100 108 89 63 100 112 92 63 95 105 83 67 95 101 83 67 95 101 86 63 84 93 73 67 84 89 76 63 79 85 73 1 +63 100 112 92 63 109 122 96 63 100 117 96 67 95 101 86 67 99 114 86 67 103 110 94 63 79 85 73 67 84 93 76 67 92 101 76 1 +63 109 122 96 63 100 117 96 66 109 122 100 67 99 114 86 67 103 110 94 67 108 119 98 67 84 93 76 67 92 101 76 63 102 114 90 1 +66 113 117 96 66 113 122 96 66 113 117 96 67 103 114 94 63 108 119 94 63 112 114 94 63 102 119 94 63 111 119 97 63 106 114 97 1 +66 113 122 96 66 113 117 96 70 109 122 100 63 108 119 94 63 112 114 94 67 108 119 101 63 111 119 97 63 106 114 97 67 111 124 94 1 +66 113 117 96 70 109 122 100 66 109 122 96 63 112 114 94 67 108 119 101 67 108 119 98 63 106 114 97 67 111 124 94 67 111 119 97 1 +70 109 122 100 66 109 122 96 70 113 127 96 67 108 119 101 67 108 119 98 67 112 119 98 67 111 124 94 67 111 119 97 67 111 124 94 1 +70 113 127 96 70 113 117 96 74 113 117 96 67 112 119 98 67 108 119 98 71 108 114 98 67 111 124 94 67 115 124 97 67 115 119 97 1 +70 113 117 96 74 113 117 96 74 113 122 100 67 108 119 98 71 108 114 98 71 112 119 98 67 115 124 97 67 115 119 97 70 111 119 97 1 +74 113 117 96 74 113 122 100 70 109 112 96 71 108 114 98 71 112 119 98 67 112 119 98 67 115 119 97 70 111 119 97 67 111 119 94 1 +74 113 122 100 70 109 112 96 66 100 108 85 71 112 119 98 67 112 119 98 67 103 110 90 70 111 119 97 67 111 119 94 67 106 114 97 1 +75 84 90 68 75 84 90 68 75 84 82 68 70 79 82 62 67 75 78 58 63 75 78 55 64 69 71 57 64 69 74 57 64 69 74 57 7 +75 84 90 68 75 84 82 68 71 81 82 64 67 75 78 58 63 75 78 55 63 71 74 55 64 69 74 57 64 69 74 57 64 73 74 61 7 +75 84 82 68 71 81 82 64 67 73 82 60 63 75 78 55 63 71 74 55 63 67 82 58 64 69 74 57 64 73 74 61 64 73 71 57 7 +71 81 82 64 67 73 82 60 67 73 79 57 63 71 74 55 63 67 82 58 63 71 74 58 64 73 74 61 64 73 71 57 68 69 74 57 7 +67 73 82 60 67 73 79 57 63 73 72 57 63 67 82 58 63 71 74 58 63 71 74 58 64 73 71 57 68 69 74 57 64 73 74 54 7 +67 73 79 57 63 73 72 57 67 73 79 60 63 71 74 58 63 71 74 58 63 71 74 58 68 69 74 57 64 73 74 54 64 73 78 57 7 +63 73 72 57 67 73 79 60 71 81 86 64 63 71 74 58 63 71 74 58 67 75 78 58 64 73 74 54 64 73 78 57 64 73 78 61 7 +71 81 82 64 67 73 75 57 63 73 75 57 70 79 82 65 67 75 78 65 60 71 70 58 68 77 90 68 72 77 86 65 68 73 78 61 7 +67 73 75 57 63 73 75 57 67 84 79 68 67 75 78 65 60 71 70 58 63 75 74 62 72 77 86 65 68 73 78 61 64 69 74 61 7 +63 73 75 57 67 84 79 68 71 91 90 72 60 71 70 58 63 75 74 62 67 84 85 69 68 73 78 61 64 69 74 61 72 81 86 68 7 +71 91 90 72 67 84 90 64 67 81 82 64 67 84 85 69 70 88 93 73 74 88 89 73 72 81 86 68 80 98 106 83 88 106 111 87 7 +67 84 90 64 67 81 82 64 67 81 82 64 70 88 93 73 74 88 89 73 78 92 97 80 80 98 106 83 88 106 111 87 88 106 111 87 7 +67 81 82 64 67 81 82 64 71 77 86 64 74 88 89 73 78 92 97 80 82 97 97 80 88 106 111 87 88 106 111 87 88 102 111 87 7 +67 81 82 64 71 77 86 64 71 77 86 64 78 92 97 80 82 97 97 80 82 92 93 83 88 106 111 87 88 102 111 87 80 98 102 83 7 +71 77 86 64 71 77 86 64 71 81 86 68 82 97 97 80 82 92 93 83 78 92 101 80 88 102 111 87 80 98 102 83 80 94 102 79 7 +71 77 86 64 71 81 86 68 75 81 86 68 82 92 93 83 78 92 101 80 78 92 97 76 80 98 102 83 80 94 102 79 76 85 90 68 7 +63 63 79 57 63 70 86 72 79 91 101 83 67 71 78 62 74 79 89 73 78 92 97 87 68 77 90 68 76 85 98 79 76 85 98 79 5 +83 91 101 83 87 95 97 79 83 99 105 86 78 97 101 83 82 102 105 87 85 106 114 90 80 94 102 83 88 106 106 87 88 106 111 91 3 +87 95 97 79 83 99 105 86 87 112 114 94 82 102 105 87 85 106 114 90 93 120 119 97 88 106 106 87 88 106 111 91 88 115 120 94 3 +83 99 105 86 87 112 114 94 92 117 124 101 85 106 114 90 93 120 119 97 93 115 124 97 88 106 111 91 88 115 120 94 84 111 115 94 3 +87 112 114 94 92 117 124 101 92 117 130 101 93 120 119 97 93 115 124 97 93 120 129 101 88 115 120 94 84 111 115 94 84 115 115 98 3 +92 117 124 101 92 117 130 101 96 112 124 98 93 115 124 97 93 120 129 101 93 115 124 101 84 111 115 94 84 115 115 98 88 115 120 102 3 +92 117 130 101 96 112 124 98 92 108 114 94 93 120 129 101 93 115 124 101 89 106 114 94 84 115 115 98 88 115 120 102 80 111 115 94 3 +92 108 114 94 87 99 105 90 83 103 114 90 89 106 114 94 85 106 114 94 78 115 114 97 80 111 115 94 76 106 115 94 72 102 106 91 3 +87 99 105 90 83 103 114 90 83 112 124 94 85 106 114 94 78 115 114 97 78 111 119 94 76 106 115 94 72 102 106 91 64 98 102 91 3 +83 103 114 90 83 112 124 94 87 112 119 98 78 115 114 97 78 111 119 94 70 106 114 90 72 102 106 91 64 98 102 91 64 98 111 91 1 +83 112 124 94 87 112 119 98 79 103 114 90 78 111 119 94 70 106 114 90 67 102 114 94 64 98 102 91 64 98 111 91 60 98 111 87 1 +79 103 114 90 71 95 110 90 67 99 114 94 67 102 114 94 63 97 105 87 60 97 110 90 60 98 111 87 57 85 98 83 53 85 102 83 1 +63 95 110 90 63 91 105 90 59 91 105 86 60 102 114 90 57 92 110 87 57 88 101 87 57 89 106 83 57 81 94 79 57 81 90 76 1 +59 95 110 90 59 99 114 90 59 99 114 90 57 88 105 83 60 88 110 83 57 92 110 87 53 85 94 76 57 85 98 83 60 94 106 87 1 +59 99 114 90 59 99 114 90 59 95 119 90 60 88 110 83 57 92 110 87 57 97 110 87 57 85 98 83 60 94 106 87 60 94 111 87 1 +59 103 119 94 63 103 114 94 63 103 110 90 63 97 110 87 60 97 114 87 57 92 114 87 57 94 102 87 57 85 102 79 53 89 106 87 1 +63 103 114 94 63 103 110 90 59 99 110 90 60 97 114 87 57 92 114 87 57 92 105 83 57 85 102 79 53 89 106 87 53 89 106 83 1 +59 95 110 90 59 91 105 86 59 88 110 86 57 88 105 83 57 92 105 83 53 92 105 87 53 81 102 83 53 85 94 83 53 85 98 83 1 +59 91 105 86 59 88 110 86 59 88 110 90 57 92 105 83 53 92 105 87 57 88 105 87 53 85 94 83 53 85 98 83 53 85 102 83 1 +59 88 110 86 59 88 110 90 59 99 114 90 53 92 105 87 57 88 105 87 60 97 119 94 53 85 98 83 53 85 102 83 57 98 106 91 1 +59 88 110 90 59 99 114 90 63 99 114 90 57 88 105 87 60 97 119 94 63 111 119 97 53 85 102 83 57 98 106 91 60 106 115 98 1 +63 99 114 90 63 99 110 86 59 95 105 86 63 111 119 97 63 106 119 90 60 97 114 94 60 106 115 98 64 106 120 98 64 102 115 94 1 +63 99 110 86 59 95 105 86 63 99 101 86 63 106 119 90 60 97 114 94 63 102 114 87 64 106 120 98 64 102 115 94 64 106 120 94 1 +59 95 105 86 63 99 101 86 67 95 101 83 60 97 114 94 63 102 114 87 67 97 105 80 64 102 115 94 64 106 120 94 64 106 115 94 1 +67 108 119 98 67 108 119 94 63 103 119 94 63 102 114 90 67 102 114 94 67 102 114 90 64 89 106 83 64 102 115 91 68 106 115 94 1 +67 108 119 94 63 103 119 94 67 103 114 94 67 102 114 94 67 102 114 90 63 102 119 94 64 102 115 91 68 106 115 94 68 111 120 98 1 +67 103 114 94 63 108 119 94 63 112 114 94 63 102 119 94 63 111 119 97 63 106 114 97 68 111 120 98 64 111 120 94 64 111 125 98 1 +63 112 114 94 67 108 119 101 67 108 119 98 63 106 114 97 67 111 124 94 67 111 119 97 64 111 125 98 72 111 120 98 72 111 120 98 1 +67 108 119 101 67 108 119 98 67 112 119 98 67 111 124 94 67 111 119 97 67 111 124 94 72 111 120 98 72 111 120 98 72 111 125 98 1 +67 112 119 98 67 103 110 90 63 88 97 79 67 111 119 94 67 106 114 97 67 102 105 87 68 111 120 94 68 111 120 94 68 111 115 94 1 +74 84 85 65 74 84 85 65 70 79 82 62 64 73 74 57 64 73 74 57 64 69 71 57 64 71 75 59 64 71 67 56 68 71 71 59 7 +74 84 85 65 70 79 82 62 67 75 78 58 64 73 74 57 64 69 71 57 64 69 74 57 64 71 67 56 68 71 71 59 68 75 71 56 7 +67 75 78 58 63 75 78 55 63 71 74 55 64 69 74 57 64 69 74 57 64 73 74 61 68 75 71 56 68 71 75 56 68 71 75 56 7 +63 75 78 55 63 71 74 55 63 67 82 58 64 69 74 57 64 73 74 61 64 73 71 57 68 71 75 56 68 71 75 56 64 75 75 56 7 +63 71 74 55 63 67 82 58 63 71 74 58 64 73 74 61 64 73 71 57 68 69 74 57 68 71 75 56 64 75 75 56 68 71 75 56 7 +63 67 82 58 63 71 74 58 63 71 74 58 64 73 71 57 68 69 74 57 64 73 74 54 64 75 75 56 68 71 75 56 64 71 75 56 7 +63 71 74 58 63 71 74 58 63 71 74 58 68 69 74 57 64 73 74 54 64 73 78 57 68 71 75 56 64 71 75 56 64 75 79 56 7 +63 71 74 58 63 71 74 58 67 75 78 58 64 73 74 54 64 73 78 57 64 73 78 61 64 71 75 56 64 75 79 56 64 71 75 63 7 +63 71 74 58 67 75 78 58 70 79 82 65 64 73 78 57 64 73 78 61 68 77 90 68 64 75 79 56 64 71 75 63 64 79 79 63 7 +67 75 78 58 70 79 82 65 67 75 78 65 64 73 78 61 68 77 90 68 72 77 86 65 64 71 75 63 64 79 79 63 68 83 83 70 7 +60 71 70 58 63 75 74 62 67 84 85 69 68 73 78 61 64 69 74 61 72 81 86 68 68 83 87 67 68 79 83 59 68 75 79 59 7 +63 75 74 62 67 84 85 69 70 88 93 73 64 69 74 61 72 81 86 68 80 98 106 83 68 79 83 59 68 75 79 59 76 87 91 78 7 +67 84 85 69 70 88 93 73 74 88 89 73 72 81 86 68 80 98 106 83 88 106 111 87 68 75 79 59 76 87 91 78 88 103 113 85 7 +82 92 93 83 78 92 101 80 78 92 97 76 80 98 102 83 80 94 102 79 76 85 90 68 84 95 104 81 76 87 96 70 76 83 87 70 7 +78 92 101 80 78 92 97 76 67 71 78 62 80 94 102 79 76 85 90 68 68 77 90 68 76 87 96 70 76 83 87 70 76 87 96 78 7 +74 79 89 73 78 92 97 87 78 97 101 83 76 85 98 79 76 85 98 79 80 94 102 83 76 83 96 78 76 83 91 78 80 95 100 81 7 +78 92 97 87 78 97 101 83 82 102 105 87 76 85 98 79 80 94 102 83 88 106 106 87 76 83 91 78 80 95 100 81 88 103 108 88 7 +78 97 101 83 82 102 105 87 85 106 114 90 80 94 102 83 88 106 106 87 88 106 111 91 80 95 100 81 88 103 108 88 88 107 113 92 3 +82 102 105 87 85 106 114 90 93 120 119 97 88 106 106 87 88 106 111 91 88 115 120 94 88 103 108 88 88 107 113 92 88 112 122 96 3 +85 106 114 90 93 120 119 97 93 115 124 97 88 106 111 91 88 115 120 94 84 111 115 94 88 107 113 92 88 112 122 96 88 116 122 103 3 +93 120 119 97 93 115 124 97 93 120 129 101 88 115 120 94 84 111 115 94 84 115 115 98 88 112 122 96 88 116 122 103 84 112 122 99 3 +93 120 129 101 93 115 124 101 89 106 114 94 84 115 115 98 88 115 120 102 80 111 115 94 84 112 122 99 84 116 122 99 76 112 118 92 3 +93 115 124 101 89 106 114 94 85 106 114 94 88 115 120 102 80 111 115 94 76 106 115 94 84 116 122 99 76 112 118 92 71 103 108 88 1 +89 106 114 94 85 106 114 94 78 115 114 97 80 111 115 94 76 106 115 94 72 102 106 91 76 112 118 92 71 103 108 88 64 99 108 92 1 +85 106 114 94 78 115 114 97 78 111 119 94 76 106 115 94 72 102 106 91 64 98 102 91 71 103 108 88 64 99 108 92 64 103 118 96 1 +78 111 119 94 70 106 114 90 67 102 114 94 64 98 102 91 64 98 111 91 60 98 111 87 64 103 118 96 60 103 108 88 53 83 100 85 1 +63 97 105 87 60 97 110 90 60 102 114 90 57 85 98 83 53 85 102 83 57 89 106 83 53 83 104 81 53 83 100 85 50 75 91 74 1 +60 97 110 90 60 102 114 90 57 92 110 87 53 85 102 83 57 89 106 83 57 81 94 79 53 83 100 85 50 75 91 74 53 75 79 74 1 +60 102 114 90 57 92 110 87 57 88 101 87 57 89 106 83 57 81 94 79 57 81 90 76 50 75 91 74 53 75 79 74 56 79 91 78 1 +57 92 110 87 57 88 101 87 57 88 101 83 57 81 94 79 57 81 90 76 57 81 90 76 53 75 79 74 56 79 91 78 56 79 91 78 1 +57 88 101 87 57 88 101 83 57 88 105 83 57 81 90 76 57 81 90 76 53 85 94 76 56 79 91 78 56 79 91 78 53 79 96 78 1 +57 88 105 83 60 88 110 83 57 92 110 87 53 85 94 76 57 85 98 83 60 94 106 87 53 79 96 78 53 83 96 81 60 87 100 85 1 +60 88 110 83 57 92 110 87 57 97 110 87 57 85 98 83 60 94 106 87 60 94 111 87 53 83 96 81 60 87 100 85 56 87 104 81 1 +57 92 110 87 57 97 110 87 63 97 110 87 60 94 106 87 60 94 111 87 57 94 102 87 60 87 100 85 56 87 104 81 53 83 100 78 1 +57 97 110 87 63 97 110 87 60 97 114 87 60 94 111 87 57 94 102 87 57 85 102 79 56 87 104 81 53 83 100 78 53 79 96 81 1 +63 97 110 87 60 97 114 87 57 92 114 87 57 94 102 87 57 85 102 79 53 89 106 87 53 83 100 78 53 79 96 81 53 87 104 88 1 +60 97 114 87 57 92 114 87 57 92 105 83 57 85 102 79 53 89 106 87 53 89 106 83 53 79 96 81 53 87 104 88 53 95 108 85 1 +57 92 114 87 57 92 105 83 57 88 105 83 53 89 106 87 53 89 106 83 53 81 102 83 53 87 104 88 53 95 108 85 53 83 100 81 1 +57 92 105 83 57 88 105 83 57 92 105 83 53 89 106 83 53 81 102 83 53 85 94 83 53 95 108 85 53 83 100 81 53 79 96 78 1 +57 88 105 83 57 92 105 83 53 92 105 87 53 81 102 83 53 85 94 83 53 85 98 83 53 83 100 81 53 79 96 78 46 79 87 78 1 +57 92 105 83 53 92 105 87 57 88 105 87 53 85 94 83 53 85 98 83 53 85 102 83 53 79 96 78 46 79 87 78 50 79 96 78 1 +53 92 105 87 57 88 105 87 60 97 119 94 53 85 98 83 53 85 102 83 57 98 106 91 46 79 87 78 50 79 96 78 56 87 104 92 1 +57 88 105 87 60 97 119 94 63 111 119 97 53 85 102 83 57 98 106 91 60 106 115 98 50 79 96 78 56 87 104 92 60 103 118 92 1 +60 97 119 94 63 111 119 97 63 106 119 90 57 98 106 91 60 106 115 98 64 106 120 98 56 87 104 92 60 103 118 92 64 107 118 96 1 +63 111 119 97 63 106 119 90 60 97 114 94 60 106 115 98 64 106 120 98 64 102 115 94 60 103 118 92 64 107 118 96 64 112 118 96 1 +63 106 119 90 60 97 114 94 63 102 114 87 64 106 120 98 64 102 115 94 64 106 120 94 64 107 118 96 64 112 118 96 64 107 113 96 1 +63 102 114 87 67 97 105 80 63 88 97 73 64 106 120 94 64 106 115 94 68 102 115 87 64 107 113 96 71 107 118 96 76 112 122 99 1 +67 102 114 94 67 102 114 90 63 102 119 94 64 102 115 91 68 106 115 94 68 111 120 98 64 95 104 81 64 103 113 88 64 107 118 96 1 +63 102 119 94 63 111 119 97 63 106 114 97 68 111 120 98 64 111 120 94 64 111 125 98 64 107 118 96 68 107 118 96 64 112 122 96 1 +67 111 124 94 67 111 119 97 67 111 124 94 72 111 120 98 72 111 120 98 72 111 125 98 64 112 122 99 68 107 122 96 68 112 128 99 1 +67 111 124 94 67 115 124 97 67 115 119 97 72 111 125 98 68 111 115 94 68 111 115 94 68 112 128 99 76 112 122 99 71 112 122 96 1 +70 111 119 97 67 111 119 94 67 106 114 97 68 111 120 98 68 111 120 94 68 111 120 94 71 112 122 96 71 112 122 96 68 112 122 99 1 +67 111 119 94 67 106 114 97 67 102 105 87 68 111 120 94 68 111 120 94 68 111 115 94 71 112 122 96 68 112 122 99 68 112 118 96 1 +64 73 74 57 64 73 74 57 64 69 71 57 64 71 75 59 64 71 67 56 68 71 71 59 67 72 74 54 67 72 74 54 67 72 74 58 7 +64 73 74 57 64 69 71 57 64 69 74 57 64 71 67 56 68 71 71 59 68 75 71 56 67 72 74 54 67 72 74 58 67 72 74 54 7 +64 69 74 57 64 69 74 57 64 73 74 61 68 75 71 56 68 71 75 56 68 71 75 56 67 72 74 54 63 75 74 58 63 72 74 54 7 +64 73 71 57 68 69 74 57 64 73 74 54 64 75 75 56 68 71 75 56 64 71 75 56 63 68 70 58 63 72 70 58 67 72 67 54 7 +68 69 74 57 64 73 74 54 64 73 78 57 68 71 75 56 64 71 75 56 64 75 79 56 63 72 70 58 67 72 67 54 67 72 70 58 7 +64 73 74 54 64 73 78 57 64 73 78 61 64 71 75 56 64 75 79 56 64 71 75 63 67 72 67 54 67 72 70 58 63 72 74 58 7 +64 73 78 61 68 77 90 68 72 77 86 65 64 71 75 63 64 79 79 63 68 83 83 70 63 72 74 58 63 72 77 58 67 79 85 67 7 +88 106 111 87 88 106 111 87 88 102 111 87 88 103 113 85 88 103 113 88 84 99 108 85 79 87 96 79 75 83 96 79 75 91 96 83 7 +68 77 90 68 76 85 98 79 76 85 98 79 76 87 96 78 76 83 96 78 76 83 91 78 75 87 96 79 75 79 96 79 75 83 96 79 7 +76 85 98 79 76 85 98 79 80 94 102 83 76 83 96 78 76 83 91 78 80 95 100 81 75 79 96 79 75 83 96 79 88 95 109 87 7 +76 85 98 79 80 94 102 83 88 106 106 87 76 83 91 78 80 95 100 81 88 103 108 88 75 83 96 79 88 95 109 87 93 103 113 92 7 +80 94 102 83 88 106 106 87 88 106 111 91 80 95 100 81 88 103 108 88 88 107 113 92 88 95 109 87 93 103 113 92 88 107 118 96 3 +88 106 111 91 88 115 120 94 84 111 115 94 88 107 113 92 88 112 122 96 88 116 122 103 88 107 118 96 88 121 123 100 84 111 118 96 3 +88 115 120 94 84 111 115 94 84 115 115 98 88 112 122 96 88 116 122 103 84 112 122 99 88 121 123 100 84 111 118 96 79 107 109 96 3 +88 115 120 102 80 111 115 94 76 106 115 94 84 116 122 99 76 112 118 92 71 103 108 88 71 103 113 96 67 99 113 87 63 91 104 87 1 +80 111 115 94 76 106 115 94 72 102 106 91 76 112 118 92 71 103 108 88 64 99 108 92 67 99 113 87 63 91 104 87 59 91 100 87 1 +72 102 106 91 64 98 102 91 64 98 111 91 64 99 108 92 64 103 118 96 60 103 108 88 59 91 100 87 59 87 104 87 55 83 100 83 1 +57 85 98 83 53 85 102 83 57 89 106 83 53 83 104 81 53 83 100 85 50 75 91 74 51 75 96 79 51 72 89 75 51 68 85 71 1 +53 85 102 83 57 89 106 83 57 81 94 79 53 83 100 85 50 75 91 74 53 75 79 74 51 72 89 75 51 68 85 71 51 75 93 79 1 +57 81 94 79 57 81 90 76 57 81 90 76 53 75 79 74 56 79 91 78 56 79 91 78 51 75 93 79 55 75 96 79 55 72 93 71 1 +57 81 90 76 53 85 94 76 57 85 98 83 56 79 91 78 53 79 96 78 53 83 96 81 55 72 93 71 55 72 85 75 59 79 93 75 1 +53 85 94 76 57 85 98 83 60 94 106 87 53 79 96 78 53 83 96 81 60 87 100 85 55 72 85 75 59 79 93 75 59 91 104 83 1 +60 94 111 87 57 94 102 87 57 85 102 79 56 87 104 81 53 83 100 78 53 79 96 81 59 87 100 83 55 79 96 75 55 83 96 79 1 +53 89 106 87 53 89 106 83 53 81 102 83 53 87 104 88 53 95 108 85 53 83 100 81 55 83 104 83 51 83 100 83 51 79 96 79 1 +53 85 94 83 53 85 98 83 53 85 102 83 53 79 96 78 46 79 87 78 50 79 96 78 55 79 93 75 51 75 93 75 51 79 96 79 1 +53 85 98 83 53 85 102 83 57 98 106 91 46 79 87 78 50 79 96 78 56 87 104 92 51 75 93 75 51 79 96 79 55 87 100 83 1 +57 98 106 91 60 106 115 98 64 106 120 98 56 87 104 92 60 103 118 92 64 107 118 96 55 87 100 83 63 95 109 92 67 107 118 96 1 +60 106 115 98 64 106 120 98 64 102 115 94 60 103 118 92 64 107 118 96 64 112 118 96 63 95 109 92 67 107 118 96 71 107 118 96 1 +64 106 120 98 64 102 115 94 64 106 120 94 64 107 118 96 64 112 118 96 64 107 113 96 67 107 118 96 71 107 118 96 67 107 118 96 1 +64 106 120 94 64 106 115 94 68 102 115 87 64 107 113 96 71 107 118 96 76 112 122 99 67 107 118 96 79 111 118 96 84 116 118 96 1 +64 106 115 94 68 102 115 87 68 94 102 83 71 107 118 96 76 112 122 99 76 112 122 99 79 111 118 96 84 116 118 96 75 107 123 96 1 +68 102 115 87 68 94 102 83 64 85 94 72 76 112 122 99 76 112 122 99 68 103 113 88 84 116 118 96 75 107 123 96 67 107 118 92 1 +64 89 106 83 64 102 115 91 68 106 115 94 68 87 100 78 64 95 104 81 64 103 113 88 71 87 100 79 67 87 93 75 63 95 100 83 1 +68 106 115 94 68 111 120 98 64 111 120 94 64 103 113 88 64 107 118 96 68 107 118 96 63 95 100 83 67 107 118 96 67 107 118 96 1 +64 111 125 98 72 111 120 98 72 111 120 98 64 112 122 96 64 112 122 99 68 107 122 96 67 107 123 96 67 111 123 96 67 111 123 100 1 +72 111 125 98 68 111 115 94 68 111 115 94 68 112 128 99 76 112 122 99 71 112 122 96 67 111 118 100 71 111 123 96 71 111 123 100 1 +68 111 115 94 68 111 115 94 68 111 120 98 76 112 122 99 71 112 122 96 71 112 122 96 71 111 123 96 71 111 123 100 71 111 118 100 1 +68 111 115 94 68 111 120 98 68 111 120 94 71 112 122 96 71 112 122 96 71 112 122 96 71 111 123 100 71 111 118 100 71 111 123 100 1 +68 111 120 98 68 111 120 94 68 111 120 94 71 112 122 96 71 112 122 96 68 112 122 99 71 111 118 100 71 111 123 100 71 107 118 96 1 +68 111 120 94 68 111 120 94 68 111 115 94 71 112 122 96 68 112 122 99 68 112 118 96 71 111 123 100 71 107 118 96 71 107 109 92 1 +64 71 67 56 68 71 71 59 68 75 71 56 67 72 74 54 67 72 74 58 67 72 74 54 63 67 69 55 66 71 73 55 66 71 69 55 7 +68 71 71 59 68 75 71 56 68 71 75 56 67 72 74 58 67 72 74 54 63 75 74 58 66 71 73 55 66 71 69 55 66 71 73 55 7 +68 71 75 56 68 71 75 56 64 75 75 56 63 75 74 58 63 72 74 54 63 68 70 58 66 71 73 55 66 71 76 55 63 71 76 55 7 +64 71 75 56 64 75 79 56 64 71 75 63 67 72 67 54 67 72 70 58 63 72 74 58 63 67 69 55 63 71 73 59 63 75 76 59 7 +64 75 79 56 64 71 75 63 64 79 79 63 67 72 70 58 63 72 74 58 63 72 77 58 63 71 73 59 63 75 76 59 66 75 76 63 7 +64 79 79 63 68 83 83 70 68 83 87 67 63 72 77 58 67 79 85 67 67 83 89 71 66 75 76 63 70 79 80 63 70 83 92 70 7 +68 83 83 70 68 83 87 67 68 79 83 59 67 79 85 67 67 83 89 71 71 79 81 67 70 79 80 63 70 83 92 70 78 91 92 78 7 +68 83 87 67 68 79 83 59 68 75 79 59 67 83 89 71 71 79 81 67 67 72 81 62 70 83 92 70 78 91 92 78 82 100 108 85 7 +68 79 83 59 68 75 79 59 76 87 91 78 71 79 81 67 67 72 81 62 71 83 89 67 78 91 92 78 82 100 108 85 86 104 108 89 7 +68 75 79 59 76 87 91 78 88 103 113 85 67 72 81 62 71 83 89 67 79 87 96 79 82 100 108 85 86 104 108 89 90 104 108 85 7 +76 87 91 78 88 103 113 85 88 103 113 88 71 83 89 67 79 87 96 79 75 83 96 79 86 104 108 89 90 104 108 85 78 91 96 78 7 +84 95 104 81 76 87 96 70 76 83 87 70 79 87 96 75 79 87 89 71 79 87 100 75 59 63 88 74 70 75 92 78 74 87 92 78 7 +76 87 96 70 76 83 87 70 76 87 96 78 79 87 89 71 79 87 100 75 75 87 96 79 70 75 92 78 74 87 92 78 74 79 92 74 7 +76 83 87 70 76 87 96 78 76 83 96 78 79 87 100 75 75 87 96 79 75 79 96 79 74 87 92 78 74 79 92 74 74 79 88 74 7 +76 83 91 78 80 95 100 81 88 103 108 88 75 83 96 79 88 95 109 87 93 103 113 92 74 83 88 78 78 91 100 81 86 104 112 92 7 +80 95 100 81 88 103 108 88 88 107 113 92 88 95 109 87 93 103 113 92 88 107 118 96 78 91 100 81 86 104 112 92 86 100 108 92 3 +88 107 113 92 88 112 122 96 88 116 122 103 88 107 118 96 88 121 123 100 84 111 118 96 86 100 108 92 78 104 104 92 78 113 112 96 3 +88 116 122 103 84 112 122 99 84 116 122 99 84 111 118 96 79 107 109 96 71 103 113 96 78 113 112 96 70 104 112 92 66 91 100 81 1 +84 116 122 99 76 112 118 92 71 103 108 88 71 103 113 96 67 99 113 87 63 91 104 87 66 91 100 81 63 87 100 81 63 87 104 85 1 +64 99 108 92 64 103 118 96 60 103 108 88 59 91 100 87 59 87 104 87 55 83 100 83 56 91 108 89 56 87 104 85 56 83 100 81 1 +60 103 108 88 53 83 100 85 53 83 104 81 55 83 100 83 51 79 100 79 51 75 96 79 56 83 100 81 49 75 100 78 52 67 84 78 1 +53 83 104 81 53 83 100 85 50 75 91 74 51 75 96 79 51 72 89 75 51 68 85 71 52 67 84 78 52 71 84 78 56 75 92 74 1 +50 75 91 74 53 75 79 74 56 79 91 78 51 68 85 71 51 75 93 79 55 75 96 79 56 75 92 74 56 79 92 78 49 75 88 78 1 +53 75 79 74 56 79 91 78 56 79 91 78 51 75 93 79 55 75 96 79 55 72 93 71 56 79 92 78 49 75 88 78 52 67 80 74 1 +56 79 91 78 56 79 91 78 53 79 96 78 55 75 96 79 55 72 93 71 55 72 85 75 49 75 88 78 52 67 80 74 56 67 84 70 1 +53 79 96 78 53 83 96 81 60 87 100 85 55 72 85 75 59 79 93 75 59 91 104 83 56 67 84 70 52 71 84 74 56 79 96 74 1 +56 87 104 81 53 83 100 78 53 79 96 81 59 87 100 83 55 79 96 75 55 83 96 79 56 83 104 85 63 91 108 89 59 91 104 85 1 +53 87 104 88 53 95 108 85 53 83 100 81 55 83 104 83 51 83 100 83 51 79 96 79 56 79 96 78 52 79 96 78 52 79 100 78 1 +53 95 108 85 53 83 100 81 53 79 96 78 51 83 100 83 51 79 96 79 55 79 93 75 52 79 96 78 52 79 100 78 56 83 96 85 1 +53 83 100 81 53 79 96 78 46 79 87 78 51 79 96 79 55 79 93 75 51 75 93 75 52 79 100 78 56 83 96 85 56 83 108 85 1 +53 79 96 78 46 79 87 78 50 79 96 78 55 79 93 75 51 75 93 75 51 79 96 79 56 83 96 85 56 83 108 85 56 83 100 81 1 +50 79 96 78 56 87 104 92 60 103 118 92 51 79 96 79 55 87 100 83 63 95 109 92 56 83 100 81 56 79 100 81 52 83 100 81 1 +60 103 118 92 64 107 118 96 64 112 118 96 63 95 109 92 67 107 118 96 71 107 118 96 52 83 100 81 59 87 108 85 63 96 112 92 1 +64 107 118 96 64 112 118 96 64 107 113 96 67 107 118 96 71 107 118 96 67 107 118 96 59 87 108 85 63 96 112 92 66 100 112 92 1 +64 112 118 96 64 107 113 96 71 107 118 96 71 107 118 96 67 107 118 96 79 111 118 96 63 96 112 92 66 100 112 92 66 96 112 92 1 +76 112 122 99 68 103 113 88 64 91 100 81 75 107 123 96 67 107 118 92 67 99 109 79 66 109 122 92 70 109 122 96 66 109 122 96 1 +64 107 118 96 68 107 118 96 64 112 122 96 67 107 118 96 67 107 118 96 67 107 123 96 66 87 92 78 66 91 104 78 63 96 112 85 1 +68 107 118 96 64 112 122 96 64 112 122 99 67 107 118 96 67 107 123 96 67 111 123 96 66 91 104 78 63 96 112 85 63 109 122 96 1 +64 112 122 96 64 112 122 99 68 107 122 96 67 107 123 96 67 111 123 96 67 111 123 100 63 96 112 85 63 109 122 96 66 113 127 100 1 +68 107 122 96 68 112 128 99 76 112 122 99 67 111 123 100 67 111 118 100 71 111 123 96 66 113 127 100 66 109 122 100 66 109 122 96 1 +68 112 128 99 76 112 122 99 71 112 122 96 67 111 118 100 71 111 123 96 71 111 123 100 66 109 122 100 66 109 122 96 66 109 122 96 1 +71 112 122 96 71 112 122 96 71 112 122 96 71 111 123 100 71 111 118 100 71 111 123 100 66 109 122 96 66 113 122 96 70 113 117 100 1 +71 112 122 96 71 112 122 96 68 112 122 99 71 111 118 100 71 111 123 100 71 107 118 96 66 113 122 96 70 113 117 100 70 109 122 100 1 +67 72 74 54 67 72 74 54 67 72 74 58 66 71 73 55 63 67 69 55 66 71 73 55 67 70 68 57 63 66 68 53 63 66 68 57 7 +67 72 74 54 67 72 74 58 67 72 74 54 63 67 69 55 66 71 73 55 66 71 69 55 63 66 68 53 63 66 68 57 67 73 68 57 7 +67 72 74 58 67 72 74 54 63 75 74 58 66 71 73 55 66 71 69 55 66 71 73 55 63 66 68 57 67 73 68 57 67 73 72 57 7 +67 72 74 54 63 75 74 58 63 72 74 54 66 71 69 55 66 71 73 55 66 71 76 55 67 73 68 57 67 73 72 57 63 70 72 57 7 +63 75 74 58 63 72 74 54 63 68 70 58 66 71 73 55 66 71 76 55 63 71 76 55 67 73 72 57 63 70 72 57 63 73 72 60 7 +63 68 70 58 63 72 70 58 67 72 67 54 63 71 76 55 63 71 73 59 63 67 69 55 63 73 72 60 67 77 82 64 71 81 75 68 7 +63 72 70 58 67 72 67 54 67 72 70 58 63 71 73 59 63 67 69 55 63 71 73 59 67 77 82 64 71 81 75 68 75 88 90 72 7 +67 72 70 58 63 72 74 58 63 72 77 58 63 71 73 59 63 75 76 59 66 75 76 63 75 88 90 72 79 95 101 79 83 99 101 83 7 +63 72 74 58 63 72 77 58 67 79 85 67 63 75 76 59 66 75 76 63 70 79 80 63 79 95 101 79 83 99 101 83 87 99 105 83 7 +63 72 77 58 67 79 85 67 67 83 89 71 66 75 76 63 70 79 80 63 70 83 92 70 83 99 101 83 87 99 105 83 87 99 110 86 7 +67 83 89 71 71 79 81 67 67 72 81 62 70 83 92 70 78 91 92 78 82 100 108 85 87 99 110 86 87 112 114 90 96 108 119 94 7 +71 79 81 67 67 72 81 62 71 83 89 67 78 91 92 78 82 100 108 85 86 104 108 89 87 112 114 90 96 108 119 94 92 108 124 90 3 +67 72 81 62 71 83 89 67 79 87 96 79 82 100 108 85 86 104 108 89 90 104 108 85 96 108 119 94 92 108 124 90 92 99 105 86 3 +79 87 96 79 75 83 96 79 75 91 96 83 90 104 108 85 78 91 96 78 66 71 84 78 92 99 105 86 83 88 97 79 67 66 82 72 7 +75 83 96 79 75 91 96 83 79 87 96 75 78 91 96 78 66 71 84 78 59 63 88 74 83 88 97 79 67 66 82 72 63 66 79 72 7 +79 87 96 75 79 87 89 71 79 87 100 75 59 63 88 74 70 75 92 78 74 87 92 78 63 66 79 72 71 77 86 72 67 73 90 68 7 +79 87 89 71 79 87 100 75 75 87 96 79 70 75 92 78 74 87 92 78 74 79 92 74 71 77 86 72 67 73 90 68 71 73 86 68 7 +75 79 96 79 75 83 96 79 88 95 109 87 74 79 88 74 74 83 88 78 78 91 100 81 71 77 90 72 75 91 101 83 87 103 114 90 7 +88 95 109 87 93 103 113 92 88 107 118 96 78 91 100 81 86 104 112 92 86 100 108 92 87 103 114 90 92 108 114 98 87 112 114 94 3 +88 107 118 96 88 121 123 100 84 111 118 96 86 100 108 92 78 104 104 92 78 113 112 96 87 112 114 94 79 108 110 98 71 103 114 94 3 +71 103 113 96 67 99 113 87 63 91 104 87 66 91 100 81 63 87 100 81 63 87 104 85 56 81 90 79 52 77 90 75 52 84 105 86 1 +67 99 113 87 63 91 104 87 59 91 100 87 63 87 100 81 63 87 104 85 56 91 108 89 52 77 90 75 52 84 105 86 52 81 101 79 1 +63 91 104 87 59 91 100 87 59 87 104 87 63 87 104 85 56 91 108 89 56 87 104 85 52 84 105 86 52 81 101 79 49 73 97 79 1 +59 91 100 87 59 87 104 87 55 83 100 83 56 91 108 89 56 87 104 85 56 83 100 81 52 81 101 79 49 73 97 79 49 73 86 79 1 +51 79 100 79 51 75 96 79 51 72 89 75 49 75 100 78 52 67 84 78 52 71 84 78 52 70 90 75 52 70 90 75 52 73 90 75 1 +51 75 96 79 51 72 89 75 51 68 85 71 52 67 84 78 52 71 84 78 56 75 92 74 52 70 90 75 52 73 90 75 56 84 97 79 1 +51 75 93 79 55 75 96 79 55 72 93 71 56 79 92 78 49 75 88 78 52 67 80 74 56 81 97 79 52 73 93 79 52 66 86 72 1 +59 91 104 83 59 87 100 83 55 79 96 75 56 79 96 74 56 83 104 85 63 91 108 89 56 84 97 79 59 91 101 86 59 91 101 86 1 +55 79 96 75 55 83 96 79 55 83 104 83 63 91 108 89 59 91 104 85 56 79 96 78 59 91 101 86 56 88 101 83 56 84 97 83 1 +55 83 96 79 55 83 104 83 51 83 100 83 59 91 104 85 56 79 96 78 52 79 96 78 56 88 101 83 56 84 97 83 52 81 93 79 1 +55 83 104 83 51 83 100 83 51 79 96 79 56 79 96 78 52 79 96 78 52 79 100 78 56 84 97 83 52 81 93 79 49 73 90 75 1 +51 83 100 83 51 79 96 79 55 79 93 75 52 79 96 78 52 79 100 78 56 83 96 85 52 81 93 79 49 73 90 75 56 81 93 83 1 +51 79 96 79 55 79 93 75 51 75 93 75 52 79 100 78 56 83 96 85 56 83 108 85 49 73 90 75 56 81 93 83 56 88 101 83 1 +55 79 93 75 51 75 93 75 51 79 96 79 56 83 96 85 56 83 108 85 56 83 100 81 56 81 93 83 56 88 101 83 56 88 105 83 1 +55 87 100 83 63 95 109 92 67 107 118 96 56 79 100 81 52 83 100 81 59 87 108 85 56 84 93 83 56 84 97 79 59 88 101 86 1 +63 95 109 92 67 107 118 96 71 107 118 96 52 83 100 81 59 87 108 85 63 96 112 92 56 84 97 79 59 88 101 86 59 91 105 86 1 +67 107 118 96 71 107 118 96 67 107 118 96 59 87 108 85 63 96 112 92 66 100 112 92 59 88 101 86 59 91 105 86 59 95 105 90 1 +71 107 118 96 67 107 118 96 79 111 118 96 63 96 112 92 66 100 112 92 66 96 112 92 59 91 105 86 59 95 105 90 63 95 101 86 1 +67 107 118 96 79 111 118 96 84 116 118 96 66 100 112 92 66 96 112 92 70 100 117 92 59 95 105 90 63 95 101 86 63 103 114 94 1 +84 116 118 96 75 107 123 96 67 107 118 92 70 100 117 92 66 109 122 92 70 109 122 96 63 103 114 94 67 103 124 94 67 108 114 98 1 +75 107 123 96 67 107 118 92 67 99 109 79 66 109 122 92 70 109 122 96 66 109 122 96 67 103 124 94 67 108 114 98 75 108 114 98 1 +67 107 118 92 67 99 109 79 67 91 96 79 70 109 122 96 66 109 122 96 63 104 108 89 67 108 114 98 75 108 114 98 67 99 110 86 1 +67 99 109 79 67 91 96 79 67 83 93 75 66 109 122 96 63 104 108 89 63 91 104 78 75 108 114 98 67 99 110 86 67 91 101 79 1 +67 107 118 96 67 107 123 96 67 111 123 96 66 91 104 78 63 96 112 85 63 109 122 96 63 91 97 79 63 88 97 79 67 99 114 86 1 +67 107 123 96 67 111 123 96 67 111 123 100 63 96 112 85 63 109 122 96 66 113 127 100 63 88 97 79 67 99 114 86 63 108 124 98 1 +67 111 123 96 67 111 123 100 67 111 118 100 63 109 122 96 66 113 127 100 66 109 122 100 67 99 114 86 63 108 124 98 63 108 124 98 1 +67 111 118 100 71 111 123 96 71 111 123 100 66 109 122 100 66 109 122 96 66 109 122 96 63 108 124 98 67 103 124 94 67 108 124 94 1 +71 111 123 96 71 111 123 100 71 111 118 100 66 109 122 96 66 109 122 96 66 113 122 96 67 103 124 94 67 108 124 94 67 112 124 98 1 +71 111 118 100 71 111 123 100 71 107 118 96 66 113 122 96 70 113 117 100 70 109 122 100 67 112 124 98 67 108 114 94 67 103 119 90 1 +71 111 123 100 71 107 118 96 71 107 109 92 70 113 117 100 70 109 122 100 70 113 122 100 67 108 114 94 67 103 119 90 67 103 114 90 1 +71 107 118 96 71 107 109 92 67 91 104 87 70 109 122 100 70 113 122 100 70 113 117 100 67 103 119 90 67 103 114 90 67 99 110 86 1 +66 71 73 55 63 67 69 55 66 71 73 55 67 70 68 57 63 66 68 53 63 66 68 57 63 71 74 55 63 71 74 55 67 71 78 58 7 +63 67 69 55 66 71 73 55 66 71 69 55 63 66 68 53 63 66 68 57 67 73 68 57 63 71 74 55 67 71 78 58 67 75 78 62 7 +66 71 73 55 66 71 69 55 66 71 73 55 63 66 68 57 67 73 68 57 67 73 72 57 67 71 78 58 67 75 78 62 67 84 85 65 7 +66 71 69 55 66 71 73 55 66 71 76 55 67 73 68 57 67 73 72 57 63 70 72 57 67 75 78 62 67 84 85 65 78 97 97 76 7 +66 71 73 55 66 71 76 55 63 71 76 55 67 73 72 57 63 70 72 57 63 73 72 60 67 84 85 65 78 97 97 76 82 102 105 80 7 +66 71 76 55 63 71 76 55 63 71 73 59 63 70 72 57 63 73 72 60 67 77 82 64 78 97 97 76 82 102 105 80 85 106 110 83 7 +63 71 76 55 63 71 73 59 63 67 69 55 63 73 72 60 67 77 82 64 71 81 75 68 82 102 105 80 85 106 110 83 85 102 114 83 7 +63 71 73 59 63 67 69 55 63 71 73 59 67 77 82 64 71 81 75 68 75 88 90 72 85 106 110 83 85 102 114 83 85 102 110 87 7 +63 67 69 55 63 71 73 59 63 75 76 59 71 81 75 68 75 88 90 72 79 95 101 79 85 102 114 83 85 102 110 87 89 106 110 87 7 +63 75 76 59 66 75 76 63 70 79 80 63 79 95 101 79 83 99 101 83 87 99 105 83 89 106 110 87 89 106 110 87 85 106 110 87 3 +66 75 76 63 70 79 80 63 70 83 92 70 83 99 101 83 87 99 105 83 87 99 110 86 89 106 110 87 85 106 110 87 89 102 105 87 3 +70 79 80 63 70 83 92 70 78 91 92 78 87 99 105 83 87 99 110 86 87 112 114 90 85 106 110 87 89 102 105 87 89 106 114 94 3 +66 71 84 78 59 63 88 74 70 75 92 78 67 66 82 72 63 66 79 72 71 77 86 72 74 79 93 73 67 75 89 73 63 63 82 69 7 +59 63 88 74 70 75 92 78 74 87 92 78 63 66 79 72 71 77 86 72 67 73 90 68 67 75 89 73 63 63 82 69 67 71 82 65 7 +70 75 92 78 74 87 92 78 74 79 92 74 71 77 86 72 67 73 90 68 71 73 86 68 63 63 82 69 67 71 82 65 70 75 89 73 7 +74 87 92 78 74 79 92 74 74 79 88 74 67 73 90 68 71 73 86 68 71 77 90 72 67 71 82 65 70 75 89 73 67 71 89 73 7 +74 79 92 74 74 79 88 74 74 83 88 78 71 73 86 68 71 77 90 72 75 91 101 83 70 75 89 73 67 71 89 73 78 88 97 83 7 +74 79 88 74 74 83 88 78 78 91 100 81 71 77 90 72 75 91 101 83 87 103 114 90 67 71 89 73 78 88 97 83 89 106 114 94 7 +74 83 88 78 78 91 100 81 86 104 112 92 75 91 101 83 87 103 114 90 92 108 114 98 78 88 97 83 89 106 114 94 85 106 114 94 7 +86 104 112 92 86 100 108 92 78 104 104 92 92 108 114 98 87 112 114 94 79 108 110 98 85 106 114 94 78 102 119 90 74 102 114 90 3 +78 104 104 92 78 113 112 96 70 104 112 92 79 108 110 98 71 103 114 94 63 95 105 86 74 102 114 90 63 97 114 94 57 97 105 90 1 +78 113 112 96 70 104 112 92 66 91 100 81 71 103 114 94 63 95 105 86 56 81 90 79 63 97 114 94 57 97 105 90 57 84 101 80 1 +70 104 112 92 66 91 100 81 63 87 100 81 63 95 105 86 56 81 90 79 52 77 90 75 57 97 105 90 57 84 101 80 50 79 101 76 1 +66 91 100 81 63 87 100 81 63 87 104 85 56 81 90 79 52 77 90 75 52 84 105 86 57 84 101 80 50 79 101 76 50 75 97 76 1 +63 87 100 81 63 87 104 85 56 91 108 89 52 77 90 75 52 84 105 86 52 81 101 79 50 79 101 76 50 75 97 76 50 71 93 76 1 +56 87 104 85 56 83 100 81 49 75 100 78 49 73 97 79 49 73 86 79 52 70 90 75 47 67 89 73 47 71 85 73 50 67 85 76 1 +56 83 100 81 49 75 100 78 52 67 84 78 49 73 86 79 52 70 90 75 52 70 90 75 47 71 85 73 50 67 85 76 50 71 89 76 1 +49 75 100 78 52 67 84 78 52 71 84 78 52 70 90 75 52 70 90 75 52 73 90 75 50 67 85 76 50 71 89 76 50 75 89 80 1 +52 67 84 78 52 71 84 78 56 75 92 74 52 70 90 75 52 73 90 75 56 84 97 79 50 71 89 76 50 75 89 80 53 84 97 80 1 +56 75 92 74 56 79 92 78 49 75 88 78 56 84 97 79 56 81 97 79 52 73 93 79 53 84 97 80 57 84 93 76 57 75 82 73 1 +56 79 92 78 49 75 88 78 52 67 80 74 56 81 97 79 52 73 93 79 52 66 86 72 57 84 93 76 57 75 82 73 53 71 78 73 1 +56 67 84 70 52 71 84 74 56 79 96 74 52 66 82 68 56 70 82 72 56 84 97 79 53 71 82 73 53 75 89 76 53 79 93 73 1 +52 71 84 74 56 79 96 74 56 83 104 85 56 70 82 72 56 84 97 79 59 91 101 86 53 75 89 76 53 79 93 73 53 79 93 73 1 +56 79 96 74 56 83 104 85 63 91 108 89 56 84 97 79 59 91 101 86 59 91 101 86 53 79 93 73 53 79 93 73 50 79 97 80 1 +56 83 104 85 63 91 108 89 59 91 104 85 59 91 101 86 59 91 101 86 56 88 101 83 53 79 93 73 50 79 97 80 53 84 97 83 1 +63 91 108 89 59 91 104 85 56 79 96 78 59 91 101 86 56 88 101 83 56 84 97 83 50 79 97 80 53 84 97 83 53 88 105 83 1 +59 91 104 85 56 79 96 78 52 79 96 78 56 88 101 83 56 84 97 83 52 81 93 79 53 84 97 83 53 88 105 83 53 84 101 80 1 +56 79 96 78 52 79 96 78 52 79 100 78 56 84 97 83 52 81 93 79 49 73 90 75 53 88 105 83 53 84 101 80 50 84 93 76 1 +52 79 96 78 52 79 100 78 56 83 96 85 52 81 93 79 49 73 90 75 56 81 93 83 53 84 101 80 50 84 93 76 53 88 97 80 1 +52 79 100 78 56 83 96 85 56 83 108 85 49 73 90 75 56 81 93 83 56 88 101 83 50 84 93 76 53 88 97 80 57 88 105 87 1 +56 83 108 85 56 83 100 81 56 79 100 81 56 88 101 83 56 88 105 83 56 84 93 83 57 88 105 87 60 88 105 87 57 92 101 87 1 +56 83 100 81 56 79 100 81 52 83 100 81 56 88 105 83 56 84 93 83 56 84 97 79 60 88 105 87 57 92 101 87 57 88 105 83 1 +56 79 100 81 52 83 100 81 59 87 108 85 56 84 93 83 56 84 97 79 59 88 101 86 57 92 101 87 57 88 105 83 60 92 105 87 1 +52 83 100 81 59 87 108 85 63 96 112 92 56 84 97 79 59 88 101 86 59 91 105 86 57 88 105 83 60 92 105 87 60 88 105 87 1 +63 96 112 92 66 100 112 92 66 96 112 92 59 91 105 86 59 95 105 90 63 95 101 86 60 88 105 87 60 97 101 83 57 106 110 90 1 +66 100 112 92 66 96 112 92 70 100 117 92 59 95 105 90 63 95 101 86 63 103 114 94 60 97 101 83 57 106 110 90 63 111 119 97 1 +66 109 122 92 70 109 122 96 66 109 122 96 67 103 124 94 67 108 114 98 75 108 114 98 67 111 119 94 67 106 119 97 70 111 119 97 1 +70 109 122 96 66 109 122 96 63 104 108 89 67 108 114 98 75 108 114 98 67 99 110 86 67 106 119 97 70 111 119 97 63 102 114 90 1 +66 109 122 96 63 104 108 89 63 91 104 78 75 108 114 98 67 99 110 86 67 91 101 79 70 111 119 97 63 102 114 90 63 92 105 80 1 +63 96 112 85 63 109 122 96 66 113 127 100 63 88 97 79 67 99 114 86 63 108 124 98 57 88 97 76 57 88 101 80 60 92 110 83 1 +63 109 122 96 66 113 127 100 66 109 122 100 67 99 114 86 63 108 124 98 63 108 124 98 57 88 101 80 60 92 110 83 63 102 110 94 1 +66 109 122 100 66 109 122 96 66 109 122 96 63 108 124 98 67 103 124 94 67 108 124 94 63 102 110 94 63 106 114 90 63 106 114 90 1 +66 109 122 96 66 109 122 96 66 113 122 96 67 103 124 94 67 108 124 94 67 112 124 98 63 106 114 90 63 106 114 90 74 111 114 90 1 +66 109 122 96 66 113 122 96 70 113 117 100 67 108 124 94 67 112 124 98 67 108 114 94 63 106 114 90 74 111 114 90 67 106 114 87 1 +66 113 122 96 70 113 117 100 70 109 122 100 67 112 124 98 67 108 114 94 67 103 119 90 74 111 114 90 67 106 114 87 63 102 114 87 1 +70 113 117 100 70 109 122 100 70 113 122 100 67 108 114 94 67 103 119 90 67 103 114 90 67 106 114 87 63 102 114 87 63 97 110 87 1 +66 104 108 92 66 91 104 89 66 87 104 85 67 95 105 86 67 88 101 86 67 91 105 83 67 92 110 90 67 88 110 90 63 88 105 83 5 +67 70 68 57 63 66 68 53 63 66 68 57 63 71 74 55 63 71 74 55 67 71 78 58 72 85 86 65 80 89 94 72 80 89 94 76 7 +63 66 68 57 67 73 68 57 67 73 72 57 67 71 78 58 67 75 78 62 67 84 85 65 80 89 94 76 80 98 98 79 88 111 111 91 7 +67 77 82 64 71 81 75 68 75 88 90 72 85 106 110 83 85 102 114 83 85 102 110 87 84 106 115 91 84 102 111 87 84 102 111 87 3 +71 81 75 68 75 88 90 72 79 95 101 79 85 102 114 83 85 102 110 87 89 106 110 87 84 102 111 87 84 102 111 87 92 106 106 87 3 +83 99 101 83 87 99 105 83 87 99 110 86 89 106 110 87 85 106 110 87 89 102 105 87 88 106 115 87 88 106 106 87 88 106 106 87 3 +87 99 105 83 87 99 110 86 87 112 114 90 85 106 110 87 89 102 105 87 89 106 114 94 88 106 106 87 88 106 106 87 88 111 111 94 3 +87 99 110 86 87 112 114 90 96 108 119 94 89 102 105 87 89 106 114 94 93 111 119 97 88 106 106 87 88 111 111 94 92 111 115 94 3 +96 108 119 94 92 108 124 90 92 99 105 86 93 111 119 97 93 111 114 94 85 102 105 83 92 111 115 94 92 102 115 87 88 98 106 79 3 +92 108 124 90 92 99 105 86 83 88 97 79 93 111 114 94 85 102 105 83 82 92 101 80 92 102 115 87 88 98 106 79 84 98 106 79 3 +83 88 97 79 67 66 82 72 63 66 79 72 82 92 101 80 74 79 93 73 67 75 89 73 84 98 106 79 72 81 82 65 64 73 78 65 7 +63 66 79 72 71 77 86 72 67 73 90 68 67 75 89 73 63 63 82 69 67 71 82 65 64 73 78 65 60 66 78 61 64 66 78 65 7 +67 73 90 68 71 73 86 68 71 77 90 72 67 71 82 65 70 75 89 73 67 71 89 73 64 66 78 65 64 66 82 65 64 62 82 65 7 +71 73 86 68 71 77 90 72 75 91 101 83 70 75 89 73 67 71 89 73 78 88 97 83 64 66 82 65 64 62 82 65 76 89 102 87 7 +71 77 90 72 75 91 101 83 87 103 114 90 67 71 89 73 78 88 97 83 89 106 114 94 64 62 82 65 76 89 102 87 84 106 111 94 7 +75 91 101 83 87 103 114 90 92 108 114 98 78 88 97 83 89 106 114 94 85 106 114 94 76 89 102 87 84 106 111 94 76 102 111 91 3 +87 103 114 90 92 108 114 98 87 112 114 94 89 106 114 94 85 106 114 94 78 102 119 90 84 106 111 94 76 102 111 91 64 98 111 91 3 +92 108 114 98 87 112 114 94 79 108 110 98 85 106 114 94 78 102 119 90 74 102 114 90 76 102 111 91 64 98 111 91 60 102 111 91 3 +63 95 105 86 56 81 90 79 52 77 90 75 57 97 105 90 57 84 101 80 50 79 101 76 57 94 111 87 53 85 102 87 50 73 94 76 1 +56 81 90 79 52 77 90 75 52 84 105 86 57 84 101 80 50 79 101 76 50 75 97 76 53 85 102 87 50 73 94 76 50 66 82 72 1 +52 77 90 75 52 84 105 86 52 81 101 79 50 79 101 76 50 75 97 76 50 71 93 76 50 73 94 76 50 66 82 72 53 69 86 72 1 +52 84 105 86 52 81 101 79 49 73 97 79 50 75 97 76 50 71 93 76 47 67 89 73 50 66 82 72 53 69 86 72 50 66 82 76 1 +52 81 101 79 49 73 97 79 49 73 86 79 50 71 93 76 47 67 89 73 47 71 85 73 53 69 86 72 50 66 82 76 50 66 86 76 1 +49 73 97 79 49 73 86 79 52 70 90 75 47 67 89 73 47 71 85 73 50 67 85 76 50 66 82 76 50 66 86 76 53 66 82 76 1 +49 73 86 79 52 70 90 75 52 70 90 75 47 71 85 73 50 67 85 76 50 71 89 76 50 66 86 76 53 66 82 76 60 73 86 76 1 +52 70 90 75 52 70 90 75 52 73 90 75 50 67 85 76 50 71 89 76 50 75 89 80 53 66 82 76 60 73 86 76 60 77 94 79 1 +52 70 90 75 52 73 90 75 56 84 97 79 50 71 89 76 50 75 89 80 53 84 97 80 60 73 86 76 60 77 94 79 60 89 98 83 1 +52 73 90 75 56 84 97 79 56 81 97 79 50 75 89 80 53 84 97 80 57 84 93 76 60 77 94 79 60 89 98 83 60 94 106 87 1 +56 81 97 79 52 73 93 79 52 66 86 72 57 84 93 76 57 75 82 73 53 71 78 73 60 94 106 87 60 81 94 76 57 73 90 76 1 +52 73 93 79 52 66 86 72 52 66 82 68 57 75 82 73 53 71 78 73 53 71 82 73 60 81 94 76 57 73 90 76 53 73 90 76 1 +52 66 86 72 52 66 82 68 56 70 82 72 53 71 78 73 53 71 82 73 53 75 89 76 57 73 90 76 53 73 90 76 53 73 90 79 1 +52 66 82 68 56 70 82 72 56 84 97 79 53 71 82 73 53 75 89 76 53 79 93 73 53 73 90 76 53 73 90 79 57 73 90 76 1 +59 91 101 86 59 91 101 86 56 88 101 83 53 79 93 73 50 79 97 80 53 84 97 83 57 77 98 76 57 81 98 83 57 85 98 79 1 +59 91 101 86 56 88 101 83 56 84 97 83 50 79 97 80 53 84 97 83 53 88 105 83 57 81 98 83 57 85 98 79 57 85 98 83 1 +56 88 101 83 56 84 97 83 52 81 93 79 53 84 97 83 53 88 105 83 53 84 101 80 57 85 98 79 57 85 98 83 53 85 102 79 1 +56 84 97 83 52 81 93 79 49 73 90 75 53 88 105 83 53 84 101 80 50 84 93 76 57 85 98 83 53 85 102 79 57 85 94 79 1 +52 81 93 79 49 73 90 75 56 81 93 83 53 84 101 80 50 84 93 76 53 88 97 80 53 85 102 79 57 85 94 79 53 81 90 79 1 +49 73 90 75 56 81 93 83 56 88 101 83 50 84 93 76 53 88 97 80 57 88 105 87 57 85 94 79 53 81 90 79 53 85 98 83 1 +56 81 93 83 56 88 101 83 56 88 105 83 53 88 97 80 57 88 105 87 60 88 105 87 53 81 90 79 53 85 98 83 57 89 106 87 1 +56 88 101 83 56 88 105 83 56 84 93 83 57 88 105 87 60 88 105 87 57 92 101 87 53 85 98 83 57 89 106 87 60 94 102 87 1 +56 88 105 83 56 84 93 83 56 84 97 79 60 88 105 87 57 92 101 87 57 88 105 83 57 89 106 87 60 94 102 87 60 98 111 87 1 +56 84 97 79 59 88 101 86 59 91 105 86 57 88 105 83 60 92 105 87 60 88 105 87 60 98 111 87 60 94 98 83 57 85 98 87 1 +59 91 105 86 59 95 105 90 63 95 101 86 60 88 105 87 60 97 101 83 57 106 110 90 57 85 98 87 57 94 111 87 60 102 111 94 1 +63 95 101 86 63 103 114 94 67 103 124 94 57 106 110 90 63 111 119 97 67 111 119 94 60 102 111 94 60 106 115 94 64 106 115 94 1 +67 103 124 94 67 108 114 98 75 108 114 98 67 111 119 94 67 106 119 97 70 111 119 97 64 106 115 94 68 111 120 98 72 111 120 98 1 +75 108 114 98 67 99 110 86 67 91 101 79 70 111 119 97 63 102 114 90 63 92 105 80 72 111 120 98 68 102 111 87 68 89 98 83 1 +67 99 110 86 67 91 101 79 67 88 93 79 63 102 114 90 63 92 105 80 67 92 97 80 68 102 111 87 68 89 98 83 68 94 102 83 1 +63 108 124 98 63 108 124 98 67 103 124 94 60 92 110 83 63 102 110 94 63 106 114 90 50 62 102 98 53 66 106 91 60 94 111 87 1 +63 108 124 98 67 103 124 94 67 108 124 94 63 102 110 94 63 106 114 90 63 106 114 90 53 66 106 91 60 94 111 87 64 98 111 91 1 +67 112 124 98 67 108 114 94 67 103 119 90 74 111 114 90 67 106 114 87 63 102 114 87 68 98 111 91 68 102 111 91 64 98 106 87 1 +67 108 114 94 67 103 119 90 67 103 114 90 67 106 114 87 63 102 114 87 63 97 110 87 68 102 111 91 64 98 106 87 64 98 111 87 1 +67 103 119 90 67 103 114 90 67 99 110 86 63 102 114 87 63 97 110 87 63 92 110 87 64 98 106 87 64 98 111 87 64 85 111 87 1 +67 103 114 90 67 99 110 86 67 95 105 86 63 97 110 87 63 92 110 87 67 92 110 90 64 98 111 87 64 85 111 87 68 89 115 94 1 +67 99 110 86 67 95 105 86 67 88 101 86 63 92 110 87 67 92 110 90 67 88 110 90 64 85 111 87 68 89 115 94 72 94 111 94 5 +67 88 101 86 67 91 105 83 67 91 97 79 67 88 110 90 63 88 105 83 70 88 105 83 72 94 111 94 76 89 115 94 72 89 111 91 5 +71 77 90 75 67 70 82 72 63 77 82 68 63 71 85 65 60 63 74 65 60 71 74 62 60 66 78 68 60 66 78 68 64 66 78 68 7 +63 71 74 55 63 71 74 55 67 71 78 58 72 85 86 65 80 89 94 72 80 89 94 76 92 107 108 85 84 103 113 81 84 99 104 85 7 +63 71 74 55 67 71 78 58 67 75 78 62 80 89 94 72 80 89 94 76 80 98 98 79 84 103 113 81 84 99 104 85 84 103 108 81 7 +67 71 78 58 67 75 78 62 67 84 85 65 80 89 94 76 80 98 98 79 88 111 111 91 84 99 104 85 84 103 108 81 88 107 113 88 3 +67 84 85 65 78 97 97 76 82 102 105 80 88 111 111 91 92 111 111 91 88 102 115 87 88 107 113 88 88 112 122 92 88 107 113 92 3 +78 97 97 76 82 102 105 80 85 106 110 83 92 111 111 91 88 102 115 87 84 106 115 91 88 112 122 92 88 107 113 92 88 107 113 88 3 +82 102 105 80 85 106 110 83 85 102 114 83 88 102 115 87 84 106 115 91 84 102 111 87 88 107 113 92 88 107 113 88 88 103 113 85 3 +85 102 114 83 85 102 110 87 89 106 110 87 84 102 111 87 84 102 111 87 92 106 106 87 88 103 113 85 88 107 113 85 84 103 104 81 3 +85 102 110 87 89 106 110 87 89 106 110 87 84 102 111 87 92 106 106 87 88 106 115 87 88 107 113 85 84 103 104 81 84 103 104 81 3 +89 106 110 87 89 106 110 87 85 106 110 87 92 106 106 87 88 106 115 87 88 106 106 87 84 103 104 81 84 103 104 81 88 103 104 81 3 +89 106 110 87 85 106 110 87 89 102 105 87 88 106 115 87 88 106 106 87 88 106 106 87 84 103 104 81 88 103 104 81 84 103 108 85 3 +93 111 119 97 93 111 114 94 85 102 105 83 92 111 115 94 92 102 115 87 88 98 106 79 92 107 108 85 88 103 104 81 84 95 104 85 3 +93 111 114 94 85 102 105 83 82 92 101 80 92 102 115 87 88 98 106 79 84 98 106 79 88 103 104 81 84 95 104 85 80 95 96 74 3 +82 92 101 80 74 79 93 73 67 75 89 73 84 98 106 79 72 81 82 65 64 73 78 65 80 95 96 74 71 75 83 59 64 68 75 63 7 +74 79 93 73 67 75 89 73 63 63 82 69 72 81 82 65 64 73 78 65 60 66 78 61 71 75 83 59 64 68 75 63 64 68 83 67 7 +67 75 89 73 63 63 82 69 67 71 82 65 64 73 78 65 60 66 78 61 64 66 78 65 64 68 75 63 64 68 83 67 76 87 100 81 7 +63 63 82 69 67 71 82 65 70 75 89 73 60 66 78 61 64 66 78 65 64 66 82 65 64 68 83 67 76 87 100 81 84 95 100 85 7 +67 71 82 65 70 75 89 73 67 71 89 73 64 66 78 65 64 66 82 65 64 62 82 65 76 87 100 81 84 95 100 85 76 83 96 74 7 +67 71 89 73 78 88 97 83 89 106 114 94 64 62 82 65 76 89 102 87 84 106 111 94 76 83 96 74 76 95 113 88 80 107 118 96 7 +78 88 97 83 89 106 114 94 85 106 114 94 76 89 102 87 84 106 111 94 76 102 111 91 76 95 113 88 80 107 118 96 71 99 108 88 3 +78 102 119 90 74 102 114 90 63 97 114 94 64 98 111 91 60 102 111 91 57 102 115 94 60 95 108 88 60 95 113 92 53 95 108 88 1 +74 102 114 90 63 97 114 94 57 97 105 90 60 102 111 91 57 102 115 94 57 94 111 87 60 95 113 92 53 95 108 88 50 83 104 85 1 +63 97 114 94 57 97 105 90 57 84 101 80 57 102 115 94 57 94 111 87 53 85 102 87 53 95 108 88 50 83 104 85 53 79 100 81 1 +57 97 105 90 57 84 101 80 50 79 101 76 57 94 111 87 53 85 102 87 50 73 94 76 50 83 104 85 53 79 100 81 53 71 91 74 1 +50 79 101 76 50 75 97 76 50 71 93 76 50 73 94 76 50 66 82 72 53 69 86 72 53 71 91 74 53 64 79 74 50 68 83 70 1 +47 67 89 73 47 71 85 73 50 67 85 76 50 66 82 76 50 66 86 76 53 66 82 76 53 64 79 74 53 61 79 67 56 68 83 74 1 +47 71 85 73 50 67 85 76 50 71 89 76 50 66 86 76 53 66 82 76 60 73 86 76 53 61 79 67 56 68 83 74 64 83 100 85 1 +50 67 85 76 50 71 89 76 50 75 89 80 53 66 82 76 60 73 86 76 60 77 94 79 56 68 83 74 64 83 100 85 64 95 104 85 1 +50 71 89 76 50 75 89 80 53 84 97 80 60 73 86 76 60 77 94 79 60 89 98 83 64 83 100 85 64 95 104 85 64 99 113 92 1 +50 75 89 80 53 84 97 80 57 84 93 76 60 77 94 79 60 89 98 83 60 94 106 87 64 95 104 85 64 99 113 92 68 99 118 88 1 +53 84 97 80 57 84 93 76 57 75 82 73 60 89 98 83 60 94 106 87 60 81 94 76 64 99 113 92 68 99 118 88 60 91 104 85 1 +57 84 93 76 57 75 82 73 53 71 78 73 60 94 106 87 60 81 94 76 57 73 90 76 68 99 118 88 60 91 104 85 64 95 108 88 1 +57 75 82 73 53 71 78 73 53 71 82 73 60 81 94 76 57 73 90 76 53 73 90 76 60 91 104 85 64 95 108 88 60 83 100 78 1 +53 71 82 73 53 75 89 76 53 79 93 73 53 73 90 76 53 73 90 79 57 73 90 76 60 83 100 78 53 75 87 74 56 79 96 78 1 +53 79 93 73 53 79 93 73 50 79 97 80 57 73 90 76 57 77 98 76 57 81 98 83 56 79 96 78 56 87 104 85 56 87 100 81 1 +53 79 93 73 50 79 97 80 53 84 97 83 57 77 98 76 57 81 98 83 57 85 98 79 56 87 104 85 56 87 100 81 56 87 100 78 1 +50 79 97 80 53 84 97 83 53 88 105 83 57 81 98 83 57 85 98 79 57 85 98 83 56 87 100 81 56 87 100 78 56 87 104 81 1 +53 84 97 83 53 88 105 83 53 84 101 80 57 85 98 79 57 85 98 83 53 85 102 79 56 87 100 78 56 87 104 81 56 83 104 81 1 +53 88 105 83 53 84 101 80 50 84 93 76 57 85 98 83 53 85 102 79 57 85 94 79 56 87 104 81 56 83 104 81 56 83 96 81 1 +53 88 97 80 57 88 105 87 60 88 105 87 53 81 90 79 53 85 98 83 57 89 106 87 60 87 96 81 56 83 100 81 56 91 104 85 1 +57 88 105 87 60 88 105 87 57 92 101 87 53 85 98 83 57 89 106 87 60 94 102 87 56 83 100 81 56 91 104 85 60 91 104 85 1 +57 88 105 83 60 92 105 87 60 88 105 87 60 98 111 87 60 94 98 83 57 85 98 87 56 91 104 85 56 91 108 85 56 91 104 85 1 +60 88 105 87 60 97 101 83 57 106 110 90 57 85 98 87 57 94 111 87 60 102 111 94 56 91 104 85 56 95 108 88 60 95 113 92 1 +57 106 110 90 63 111 119 97 67 111 119 94 60 102 111 94 60 106 115 94 64 106 115 94 60 95 113 92 68 103 118 92 64 103 118 96 1 +63 111 119 97 67 111 119 94 67 106 119 97 60 106 115 94 64 106 115 94 68 111 120 98 68 103 118 92 64 103 118 96 68 107 122 96 1 +67 106 119 97 70 111 119 97 63 102 114 90 68 111 120 98 72 111 120 98 68 102 111 87 68 107 122 96 71 112 122 103 68 112 122 92 1 +63 102 114 90 63 92 105 80 67 92 97 80 68 102 111 87 68 89 98 83 68 94 102 83 68 112 122 92 71 103 113 88 68 99 108 88 1 +60 92 110 83 63 102 110 94 63 106 114 90 50 62 102 98 53 66 106 91 60 94 111 87 43 31 118 132 43 34 118 125 46 48 108 107 2 +63 102 110 94 63 106 114 90 63 106 114 90 53 66 106 91 60 94 111 87 64 98 111 91 43 34 118 125 46 48 108 107 53 75 104 92 1 +63 106 114 90 74 111 114 90 67 106 114 87 64 98 111 91 68 98 111 91 68 102 111 91 53 75 104 92 64 95 108 88 64 99 113 88 1 +67 106 114 87 63 102 114 87 63 97 110 87 68 102 111 91 64 98 106 87 64 98 111 87 64 99 113 88 64 95 108 85 60 99 104 85 1 +63 102 114 87 63 97 110 87 63 92 110 87 64 98 106 87 64 98 111 87 64 85 111 87 64 95 108 85 60 99 104 85 64 91 108 88 1 +63 97 110 87 63 92 110 87 67 92 110 90 64 98 111 87 64 85 111 87 68 89 115 94 60 99 104 85 64 91 108 88 71 91 118 92 5 +63 92 110 87 67 92 110 90 67 88 110 90 64 85 111 87 68 89 115 94 72 94 111 94 64 91 108 88 71 91 118 92 76 95 122 99 5 +67 92 110 90 67 88 110 90 63 88 105 83 68 89 115 94 72 94 111 94 76 89 115 94 71 91 118 92 76 95 122 99 76 99 122 96 5 +67 88 110 90 63 88 105 83 70 88 105 83 72 94 111 94 76 89 115 94 72 89 111 91 76 95 122 99 76 99 122 96 80 95 118 96 5 +63 79 85 76 67 75 89 76 67 79 93 76 60 69 78 72 64 69 78 68 68 69 82 68 60 61 83 74 60 57 79 70 53 54 75 70 7 +67 75 89 76 67 79 93 76 63 71 85 65 64 69 78 68 68 69 82 68 60 66 78 68 60 57 79 70 53 54 75 70 56 57 71 67 7 +67 79 93 76 63 71 85 65 60 63 74 65 68 69 82 68 60 66 78 68 60 66 78 68 53 54 75 70 56 57 71 67 56 61 71 67 7 +63 71 85 65 60 63 74 65 60 71 74 62 60 66 78 68 60 66 78 68 64 66 78 68 56 57 71 67 56 61 71 67 60 64 75 67 7 +72 85 86 65 80 89 94 72 80 89 94 76 92 107 108 85 84 103 113 81 84 99 104 85 84 103 104 83 88 103 104 83 88 103 104 87 3 +80 89 94 72 80 89 94 76 80 98 98 79 84 103 113 81 84 99 104 85 84 103 108 81 88 103 104 83 88 103 104 87 88 103 109 83 3 +80 98 98 79 88 111 111 91 92 111 111 91 84 103 108 81 88 107 113 88 88 112 122 92 88 103 109 83 88 103 109 83 88 107 109 87 3 +88 111 111 91 92 111 111 91 88 102 115 87 88 107 113 88 88 112 122 92 88 107 113 92 88 103 109 83 88 107 109 87 88 107 113 87 3 +88 102 115 87 84 106 115 91 84 102 111 87 88 107 113 92 88 107 113 88 88 103 113 85 88 107 113 87 93 107 113 92 88 107 113 87 3 +84 106 115 91 84 102 111 87 84 102 111 87 88 107 113 88 88 103 113 85 88 107 113 85 93 107 113 92 88 107 113 87 88 103 109 87 3 +84 102 111 87 84 102 111 87 92 106 106 87 88 103 113 85 88 107 113 85 84 103 104 81 88 107 113 87 88 103 109 87 88 103 104 79 3 +92 106 106 87 88 106 115 87 88 106 106 87 84 103 104 81 84 103 104 81 88 103 104 81 88 103 104 79 79 95 100 79 79 103 100 79 3 +88 106 115 87 88 106 106 87 88 106 106 87 84 103 104 81 88 103 104 81 84 103 108 85 79 95 100 79 79 103 100 79 84 99 100 79 3 +88 111 111 94 92 111 115 94 92 102 115 87 84 103 108 88 92 107 108 85 88 103 104 81 84 99 104 79 93 107 109 87 84 103 109 79 3 +92 102 115 87 88 98 106 79 84 98 106 79 88 103 104 81 84 95 104 85 80 95 96 74 84 103 109 79 84 99 100 79 84 95 109 83 3 +88 98 106 79 84 98 106 79 72 81 82 65 84 95 104 85 80 95 96 74 71 75 83 59 84 99 100 79 84 95 109 83 79 87 96 71 7 +84 98 106 79 72 81 82 65 64 73 78 65 80 95 96 74 71 75 83 59 64 68 75 63 84 95 109 83 79 87 96 71 67 75 81 62 7 +72 81 82 65 64 73 78 65 60 66 78 61 71 75 83 59 64 68 75 63 64 68 83 67 79 87 96 71 67 75 81 62 75 83 96 79 7 +60 66 78 61 64 66 78 65 64 66 82 65 64 68 83 67 76 87 100 81 84 95 100 85 75 83 96 79 84 103 113 92 88 103 109 92 7 +76 89 102 87 84 106 111 94 76 102 111 91 76 95 113 88 80 107 118 96 71 99 108 88 84 107 118 96 79 111 118 96 67 99 113 92 3 +76 102 111 91 64 98 111 91 60 102 111 91 71 99 108 88 60 95 108 88 60 95 113 92 67 99 113 92 55 87 104 87 51 87 100 87 1 +57 102 115 94 57 94 111 87 53 85 102 87 53 95 108 88 50 83 104 85 53 79 100 81 51 83 104 83 48 75 96 75 48 72 89 75 1 +53 69 86 72 50 66 82 76 50 66 86 76 50 68 83 70 53 64 79 74 53 61 79 67 51 64 77 71 48 61 74 67 51 61 77 71 1 +50 66 82 76 50 66 86 76 53 66 82 76 53 64 79 74 53 61 79 67 56 68 83 74 48 61 74 67 51 61 77 71 51 68 81 71 1 +50 66 86 76 53 66 82 76 60 73 86 76 53 61 79 67 56 68 83 74 64 83 100 85 51 61 77 71 51 68 81 71 59 72 85 75 1 +53 66 82 76 60 73 86 76 60 77 94 79 56 68 83 74 64 83 100 85 64 95 104 85 51 68 81 71 59 72 85 75 59 79 89 79 1 +60 73 86 76 60 77 94 79 60 89 98 83 64 83 100 85 64 95 104 85 64 99 113 92 59 72 85 75 59 79 89 79 59 79 96 79 1 +57 73 90 76 53 73 90 76 53 73 90 79 64 95 108 88 60 83 100 78 53 75 87 74 63 87 96 83 63 87 96 83 59 83 89 79 1 +53 73 90 76 53 73 90 79 57 73 90 76 60 83 100 78 53 75 87 74 56 79 96 78 63 87 96 83 59 83 89 79 59 95 109 87 1 +53 73 90 79 57 73 90 76 57 77 98 76 53 75 87 74 56 79 96 78 56 87 104 85 59 83 89 79 59 95 109 87 63 99 113 92 1 +57 73 90 76 57 77 98 76 57 81 98 83 56 79 96 78 56 87 104 85 56 87 100 81 59 95 109 87 63 99 113 92 67 99 109 87 1 +57 77 98 76 57 81 98 83 57 85 98 79 56 87 104 85 56 87 100 81 56 87 100 78 63 99 113 92 67 99 109 87 63 95 104 87 1 +57 81 98 83 57 85 98 79 57 85 98 83 56 87 100 81 56 87 100 78 56 87 104 81 67 99 109 87 63 95 104 87 63 95 109 87 1 +57 85 98 79 57 85 98 83 53 85 102 79 56 87 100 78 56 87 104 81 56 83 104 81 63 95 104 87 63 95 109 87 67 95 100 87 1 +57 85 98 83 53 85 102 79 57 85 94 79 56 87 104 81 56 83 104 81 56 83 96 81 63 95 109 87 67 95 100 87 67 95 104 87 1 +53 85 98 83 57 89 106 87 60 94 102 87 56 83 100 81 56 91 104 85 60 91 104 85 63 95 104 83 63 95 113 87 59 95 113 92 1 +57 89 106 87 60 94 102 87 60 98 111 87 56 91 104 85 60 91 104 85 56 91 104 85 63 95 113 87 59 95 113 92 59 91 104 87 1 +60 94 102 87 60 98 111 87 60 94 98 83 60 91 104 85 56 91 104 85 56 91 108 85 59 95 113 92 59 91 104 87 55 87 104 87 1 +60 98 111 87 60 94 98 83 57 85 98 87 56 91 104 85 56 91 108 85 56 91 104 85 59 91 104 87 55 87 104 87 55 91 104 87 1 +57 94 111 87 60 102 111 94 60 106 115 94 56 95 108 88 60 95 113 92 68 103 118 92 63 95 109 87 67 99 109 92 67 103 113 92 1 +60 106 115 94 64 106 115 94 68 111 120 98 68 103 118 92 64 103 118 96 68 107 122 96 67 103 113 92 67 107 118 96 67 107 123 96 1 +68 111 120 98 72 111 120 98 68 102 111 87 68 107 122 96 71 112 122 103 68 112 122 92 67 107 123 96 67 111 123 96 71 111 128 100 1 +72 111 120 98 68 102 111 87 68 89 98 83 71 112 122 103 68 112 122 92 71 103 113 88 67 111 123 96 71 111 128 100 71 111 128 96 1 +68 102 111 87 68 89 98 83 68 94 102 83 68 112 122 92 71 103 113 88 68 99 108 88 71 111 128 100 71 111 128 96 71 107 123 96 1 +68 89 98 83 68 94 102 83 68 94 98 79 71 103 113 88 68 99 108 88 60 95 108 85 71 111 128 96 71 107 123 96 71 103 118 96 1 +64 94 102 83 60 89 102 83 53 77 102 87 56 75 100 85 46 48 96 103 43 36 104 121 63 87 109 96 51 45 113 125 44 29 123 133 2 +60 89 102 83 53 77 102 87 53 73 102 94 46 48 96 103 43 36 104 121 43 34 118 132 51 45 113 125 44 29 123 133 44 37 118 133 2 +53 77 102 87 53 73 102 94 50 62 102 98 43 36 104 121 43 34 118 132 43 31 118 132 44 29 123 133 44 37 118 133 44 37 118 129 2 +53 73 102 94 50 62 102 98 53 66 106 91 43 34 118 132 43 31 118 132 43 34 118 125 44 37 118 133 44 37 118 129 44 32 113 125 2 +50 62 102 98 53 66 106 91 60 94 111 87 43 31 118 132 43 34 118 125 46 48 108 107 44 37 118 129 44 32 113 125 44 32 118 129 2 +53 66 106 91 60 94 111 87 64 98 111 91 43 34 118 125 46 48 108 107 53 75 104 92 44 32 113 125 44 32 118 129 48 34 113 125 2 +60 94 111 87 64 98 111 91 68 98 111 91 46 48 108 107 53 75 104 92 64 95 108 88 44 32 118 129 48 34 113 125 51 58 113 104 2 +64 98 111 91 68 98 111 91 68 102 111 91 53 75 104 92 64 95 108 88 64 99 113 88 48 34 113 125 51 58 113 104 59 87 104 83 1 +64 98 106 87 64 98 111 87 64 85 111 87 64 95 108 85 60 99 104 85 64 91 108 88 63 95 100 83 63 95 104 83 63 95 109 92 5 +64 98 111 87 64 85 111 87 68 89 115 94 60 99 104 85 64 91 108 88 71 91 118 92 63 95 104 83 63 95 109 92 75 99 118 96 5 +68 89 115 94 72 94 111 94 76 89 115 94 71 91 118 92 76 95 122 99 76 99 122 96 75 99 118 96 75 99 118 96 75 99 118 96 5 +72 94 111 94 76 89 115 94 72 89 111 91 76 95 122 99 76 99 122 96 80 95 118 96 75 99 118 96 75 99 118 96 75 95 109 96 5 +64 73 90 79 60 69 78 72 64 69 78 68 60 61 79 70 60 61 83 74 60 57 79 70 51 51 81 79 51 54 81 75 51 48 81 79 5 +60 69 78 72 64 69 78 68 68 69 82 68 60 61 83 74 60 57 79 70 53 54 75 70 51 54 81 75 51 48 81 79 48 42 74 75 5 +64 69 78 68 68 69 82 68 60 66 78 68 60 57 79 70 53 54 75 70 56 57 71 67 51 48 81 79 48 42 74 75 48 48 67 71 5 +68 69 82 68 60 66 78 68 60 66 78 68 53 54 75 70 56 57 71 67 56 61 71 67 48 42 74 75 48 48 67 71 51 54 67 62 5 +60 66 78 68 60 66 78 68 64 66 78 68 56 57 71 67 56 61 71 67 60 64 75 67 48 48 67 71 51 54 67 62 59 61 70 62 5 +92 107 108 85 84 103 113 81 84 99 104 85 84 103 104 83 88 103 104 83 88 103 104 87 90 100 104 85 90 100 108 81 90 104 108 85 3 +84 103 113 81 84 99 104 85 84 103 108 81 88 103 104 83 88 103 104 87 88 103 109 83 90 100 108 81 90 104 108 85 90 100 100 81 3 +84 103 108 81 88 107 113 88 88 112 122 92 88 103 109 83 88 103 109 83 88 107 109 87 90 100 100 81 86 100 108 81 86 100 104 81 3 +88 107 113 88 88 112 122 92 88 107 113 92 88 103 109 83 88 107 109 87 88 107 113 87 86 100 108 81 86 100 104 81 86 104 108 85 3 +88 112 122 92 88 107 113 92 88 107 113 88 88 107 109 87 88 107 113 87 93 107 113 92 86 100 104 81 86 104 108 85 90 104 108 89 3 +88 103 113 85 88 107 113 85 84 103 104 81 88 107 113 87 88 103 109 87 88 103 104 79 90 104 112 89 86 100 108 89 86 104 108 89 3 +84 103 104 81 84 103 104 81 88 103 104 81 88 103 104 79 79 95 100 79 79 103 100 79 86 104 108 89 86 104 104 85 82 96 96 81 3 +88 103 104 81 84 103 108 85 84 103 108 88 79 103 100 79 84 99 100 79 84 99 104 79 82 96 96 81 82 96 100 78 82 96 100 78 3 +84 103 108 88 92 107 108 85 88 103 104 81 84 99 104 79 93 107 109 87 84 103 109 79 82 96 100 78 90 104 112 85 90 109 112 89 3 +88 103 104 81 84 95 104 85 80 95 96 74 84 103 109 79 84 99 100 79 84 95 109 83 90 109 112 89 82 100 96 81 82 96 100 81 3 +80 95 96 74 71 75 83 59 64 68 75 63 84 95 109 83 79 87 96 71 67 75 81 62 82 96 100 81 86 96 100 81 82 91 92 81 7 +71 75 83 59 64 68 75 63 64 68 83 67 79 87 96 71 67 75 81 62 75 83 96 79 86 96 100 81 82 91 92 81 78 87 100 81 7 +64 68 75 63 64 68 83 67 76 87 100 81 67 75 81 62 75 83 96 79 84 103 113 92 82 91 92 81 78 87 100 81 86 100 108 89 7 +64 68 83 67 76 87 100 81 84 95 100 85 75 83 96 79 84 103 113 92 88 103 109 92 78 87 100 81 86 100 108 89 86 100 108 89 3 +76 83 96 74 76 95 113 88 80 107 118 96 84 103 109 92 84 107 118 96 79 111 118 96 78 100 112 92 78 104 122 96 74 109 112 96 3 +76 95 113 88 80 107 118 96 71 99 108 88 84 107 118 96 79 111 118 96 67 99 113 92 78 104 122 96 74 109 112 96 66 104 112 92 1 +80 107 118 96 71 99 108 88 60 95 108 88 79 111 118 96 67 99 113 92 55 87 104 87 74 109 112 96 66 104 112 92 59 91 100 85 1 +60 95 108 88 60 95 113 92 53 95 108 88 55 87 104 87 51 87 100 87 51 83 104 83 59 91 100 85 52 79 92 78 49 83 96 81 1 +60 95 113 92 53 95 108 88 50 83 104 85 51 87 100 87 51 83 104 83 48 75 96 75 52 79 92 78 49 83 96 81 49 79 96 78 1 +53 95 108 88 50 83 104 85 53 79 100 81 51 83 104 83 48 75 96 75 48 72 89 75 49 83 96 81 49 79 96 78 49 71 88 78 1 +53 79 100 81 53 71 91 74 53 64 79 74 48 72 89 75 51 68 85 71 51 68 77 71 49 71 88 78 49 71 88 74 49 67 88 70 1 +53 71 91 74 53 64 79 74 50 68 83 70 51 68 85 71 51 68 77 71 51 64 77 71 49 71 88 74 49 67 88 70 52 67 84 70 1 +53 64 79 74 50 68 83 70 53 64 79 74 51 68 77 71 51 64 77 71 48 61 74 67 49 67 88 70 52 67 84 70 52 63 80 70 1 +50 68 83 70 53 64 79 74 53 61 79 67 51 64 77 71 48 61 74 67 51 61 77 71 52 67 84 70 52 63 80 70 49 63 76 66 1 +56 68 83 74 64 83 100 85 64 95 104 85 51 68 81 71 59 72 85 75 59 79 89 79 49 67 80 70 52 71 80 74 52 71 84 70 1 +64 83 100 85 64 95 104 85 64 99 113 92 59 72 85 75 59 79 89 79 59 79 96 79 52 71 80 74 52 71 84 70 56 75 88 74 1 +64 99 113 92 68 99 118 88 60 91 104 85 59 79 96 79 63 83 96 83 63 79 96 83 56 75 88 74 56 75 92 78 59 79 96 81 1 +60 91 104 85 64 95 108 88 60 83 100 78 63 79 96 83 63 87 96 83 63 87 96 83 59 79 96 81 56 79 88 81 59 83 100 81 1 +64 95 108 88 60 83 100 78 53 75 87 74 63 87 96 83 63 87 96 83 59 83 89 79 56 79 88 81 59 83 100 81 59 83 100 81 1 +53 75 87 74 56 79 96 78 56 87 104 85 59 83 89 79 59 95 109 87 63 99 113 92 59 83 100 81 59 87 104 85 63 100 112 92 1 +56 79 96 78 56 87 104 85 56 87 100 81 59 95 109 87 63 99 113 92 67 99 109 87 59 87 104 85 63 100 112 92 70 104 117 92 1 +56 87 104 85 56 87 100 81 56 87 100 78 63 99 113 92 67 99 109 87 63 95 104 87 63 100 112 92 70 104 117 92 63 96 112 89 1 +56 83 104 81 56 83 96 81 60 87 96 81 67 95 100 87 67 95 104 87 67 95 109 87 66 100 112 89 63 100 112 92 63 100 117 92 1 +56 83 96 81 60 87 96 81 56 83 100 81 67 95 104 87 67 95 109 87 63 95 104 83 63 100 112 92 63 100 117 92 63 96 112 89 1 +60 91 104 85 56 91 104 85 56 91 108 85 59 95 113 92 59 91 104 87 55 87 104 87 63 96 108 89 59 96 112 89 52 87 108 85 1 +56 91 104 85 56 91 108 85 56 91 104 85 59 91 104 87 55 87 104 87 55 91 104 87 59 96 112 89 52 87 108 85 56 87 100 85 1 +56 91 108 85 56 91 104 85 56 95 108 88 55 87 104 87 55 91 104 87 63 95 109 87 52 87 108 85 56 87 100 85 63 87 108 85 1 +56 91 104 85 56 95 108 88 60 95 113 92 55 91 104 87 63 95 109 87 67 99 109 92 56 87 100 85 63 87 108 85 63 96 112 89 1 +56 95 108 88 60 95 113 92 68 103 118 92 63 95 109 87 67 99 109 92 67 103 113 92 63 87 108 85 63 96 112 89 63 100 112 89 1 +68 103 118 92 64 103 118 96 68 107 122 96 67 103 113 92 67 107 118 96 67 107 123 96 63 100 112 89 63 104 108 92 63 100 108 96 1 +64 103 118 96 68 107 122 96 71 112 122 103 67 107 118 96 67 107 123 96 67 111 123 96 63 104 108 92 63 100 108 96 66 100 117 92 1 +68 107 122 96 71 112 122 103 68 112 122 92 67 107 123 96 67 111 123 96 71 111 128 100 63 100 108 96 66 100 117 92 66 104 122 103 1 +71 112 122 103 68 112 122 92 71 103 113 88 67 111 123 96 71 111 128 100 71 111 128 96 66 100 117 92 66 104 122 103 74 113 122 100 1 +71 103 113 88 68 99 108 88 60 95 108 85 71 111 128 96 71 107 123 96 71 103 118 96 74 113 122 100 70 113 122 96 70 109 122 100 1 +68 99 108 88 60 95 108 85 64 95 108 85 71 107 123 96 71 103 118 96 67 103 118 92 70 113 122 96 70 109 122 100 66 113 117 100 1 +60 95 108 85 64 95 108 85 60 95 104 85 71 103 118 96 67 103 118 92 63 107 118 92 70 109 122 100 66 113 117 100 66 109 117 100 1 +64 95 108 85 60 95 104 85 56 75 100 85 67 103 118 92 63 107 118 92 63 87 109 96 66 113 117 100 66 109 117 100 66 113 122 100 1 +56 75 100 85 46 48 96 103 43 36 104 121 63 87 109 96 51 45 113 125 44 29 123 133 66 113 122 100 59 79 117 107 46 43 112 122 2 +46 48 96 103 43 36 104 121 43 34 118 132 51 45 113 125 44 29 123 133 44 37 118 133 59 79 117 107 46 43 112 122 49 49 112 118 2 +43 31 118 132 43 34 118 125 46 48 108 107 44 37 118 129 44 32 113 125 44 32 118 129 52 53 108 114 49 40 112 125 46 34 112 133 2 +46 48 108 107 53 75 104 92 64 95 108 88 44 32 118 129 48 34 113 125 51 58 113 104 46 34 112 133 46 32 112 133 46 32 112 133 2 +53 75 104 92 64 95 108 88 64 99 113 88 48 34 113 125 51 58 113 104 59 87 104 83 46 32 112 133 46 32 112 133 46 46 112 114 2 +64 95 108 85 60 99 104 85 64 91 108 88 63 95 100 83 63 95 104 83 63 95 109 92 56 71 104 89 59 87 100 81 66 91 112 89 5 +64 91 108 88 71 91 118 92 76 95 122 99 63 95 109 92 75 99 118 96 75 99 118 96 66 91 112 89 70 96 112 92 70 96 117 92 5 +71 91 118 92 76 95 122 99 76 99 122 96 75 99 118 96 75 99 118 96 75 99 118 96 70 96 112 92 70 96 117 92 74 91 112 96 5 +76 95 122 99 76 99 122 96 80 95 118 96 75 99 118 96 75 99 118 96 75 95 109 96 70 96 117 92 74 91 112 96 70 87 112 100 5 +80 95 118 96 80 95 118 92 76 83 100 78 75 95 109 96 75 95 113 96 79 99 109 83 70 87 112 100 66 83 117 100 70 87 112 100 5 +60 61 79 70 60 61 83 74 60 57 79 70 51 51 81 79 51 54 81 75 51 48 81 79 63 63 88 78 52 53 76 74 56 53 80 74 5 +60 61 83 74 60 57 79 70 53 54 75 70 51 54 81 75 51 48 81 79 48 42 74 75 52 53 76 74 56 53 80 74 49 49 76 74 5 +53 54 75 70 56 57 71 67 56 61 71 67 48 42 74 75 48 48 67 71 51 54 67 62 49 49 76 74 49 46 69 66 52 53 73 66 5 +56 57 71 67 56 61 71 67 60 64 75 67 48 48 67 71 51 54 67 62 59 61 70 62 49 46 69 66 52 53 73 66 59 60 73 63 5 +88 103 104 83 88 103 104 87 88 103 109 83 90 100 108 81 90 104 108 85 90 100 100 81 87 99 101 83 87 99 105 83 83 99 101 83 3 +88 103 104 87 88 103 109 83 88 103 109 83 90 104 108 85 90 100 100 81 86 100 108 81 87 99 105 83 83 99 101 83 83 95 97 79 3 +88 103 109 83 88 103 109 83 88 107 109 87 90 100 100 81 86 100 108 81 86 100 104 81 83 99 101 83 83 95 97 79 83 95 101 83 3 +88 103 109 83 88 107 109 87 88 107 113 87 86 100 108 81 86 100 104 81 86 104 108 85 83 95 97 79 83 95 101 83 87 103 110 86 3 +93 107 113 92 88 107 113 87 88 103 109 87 90 104 108 89 90 104 112 89 86 100 108 89 92 103 105 86 87 103 110 86 87 103 114 86 3 +88 103 109 87 88 103 104 79 79 95 100 79 86 100 108 89 86 104 108 89 86 104 104 85 87 103 114 86 92 112 119 94 96 112 119 98 3 +88 103 104 79 79 95 100 79 79 103 100 79 86 104 108 89 86 104 104 85 82 96 96 81 92 112 119 94 96 112 119 98 92 103 110 90 3 +79 95 100 79 79 103 100 79 84 99 100 79 86 104 104 85 82 96 96 81 82 96 100 78 96 112 119 98 92 103 110 90 83 95 105 79 3 +79 103 100 79 84 99 100 79 84 99 104 79 82 96 96 81 82 96 100 78 82 96 100 78 92 103 110 90 83 95 105 79 83 95 101 79 3 +84 99 100 79 84 99 104 79 93 107 109 87 82 96 100 78 82 96 100 78 90 104 112 85 83 95 105 79 83 95 101 79 87 103 105 83 3 +93 107 109 87 84 103 109 79 84 99 100 79 90 104 112 85 90 109 112 89 82 100 96 81 87 103 105 83 87 103 110 83 83 91 97 79 3 +84 103 109 79 84 99 100 79 84 95 109 83 90 109 112 89 82 100 96 81 82 96 100 81 87 103 110 83 83 91 97 79 83 95 101 83 3 +67 75 81 62 75 83 96 79 84 103 113 92 82 91 92 81 78 87 100 81 86 100 108 89 83 99 101 83 87 103 110 90 87 99 105 86 7 +75 83 96 79 84 103 113 92 88 103 109 92 78 87 100 81 86 100 108 89 86 100 108 89 87 103 110 90 87 99 105 86 79 99 105 86 3 +84 103 113 92 88 103 109 92 84 103 109 92 86 100 108 89 86 100 108 89 78 100 112 92 87 99 105 86 79 99 105 86 75 99 110 90 3 +88 103 109 92 84 103 109 92 84 107 118 96 86 100 108 89 78 100 112 92 78 104 122 96 79 99 105 86 75 99 110 90 67 99 114 90 3 +67 99 113 92 55 87 104 87 51 87 100 87 66 104 112 92 59 91 100 85 52 79 92 78 59 91 101 90 56 84 93 83 52 77 93 79 1 +55 87 104 87 51 87 100 87 51 83 104 83 59 91 100 85 52 79 92 78 49 83 96 81 56 84 93 83 52 77 93 79 52 73 90 75 1 +51 87 100 87 51 83 104 83 48 75 96 75 52 79 92 78 49 83 96 81 49 79 96 78 52 77 93 79 52 73 90 75 46 73 90 75 1 +51 83 104 83 48 75 96 75 48 72 89 75 49 83 96 81 49 79 96 78 49 71 88 78 52 73 90 75 46 73 90 75 49 73 86 79 1 +48 75 96 75 48 72 89 75 51 68 85 71 49 79 96 78 49 71 88 78 49 71 88 74 46 73 90 75 49 73 86 79 49 73 93 79 1 +51 68 85 71 51 68 77 71 51 64 77 71 49 71 88 74 49 67 88 70 52 67 84 70 49 73 93 79 52 77 93 75 49 73 86 75 1 +51 68 77 71 51 64 77 71 48 61 74 67 49 67 88 70 52 67 84 70 52 63 80 70 52 77 93 75 49 73 86 75 52 66 82 72 1 +51 61 77 71 51 68 81 71 59 72 85 75 49 63 76 66 49 67 80 70 52 71 80 74 52 70 82 72 49 70 82 72 52 73 82 75 1 +51 68 81 71 59 72 85 75 59 79 89 79 49 67 80 70 52 71 80 74 52 71 84 70 49 70 82 72 52 73 82 75 56 77 93 79 1 +59 72 85 75 59 79 89 79 59 79 96 79 52 71 80 74 52 71 84 70 56 75 88 74 52 73 82 75 56 77 93 79 56 81 97 83 1 +59 79 89 79 59 79 96 79 63 83 96 83 52 71 84 70 56 75 88 74 56 75 92 78 56 77 93 79 56 81 97 83 59 84 93 83 1 +63 83 96 83 63 79 96 83 63 87 96 83 56 75 92 78 59 79 96 81 56 79 88 81 59 84 93 83 59 81 101 83 56 81 93 79 1 +63 79 96 83 63 87 96 83 63 87 96 83 59 79 96 81 56 79 88 81 59 83 100 81 59 81 101 83 56 81 93 79 56 81 93 79 1 +63 87 96 83 63 87 96 83 59 83 89 79 56 79 88 81 59 83 100 81 59 83 100 81 56 81 93 79 56 81 93 79 56 84 105 86 1 +63 87 96 83 59 83 89 79 59 95 109 87 59 83 100 81 59 83 100 81 59 87 104 85 56 81 93 79 56 84 105 86 63 99 114 94 1 +59 83 89 79 59 95 109 87 63 99 113 92 59 83 100 81 59 87 104 85 63 100 112 92 56 84 105 86 63 99 114 94 67 99 110 94 1 +59 95 109 87 63 99 113 92 67 99 109 87 59 87 104 85 63 100 112 92 70 104 117 92 63 99 114 94 67 99 110 94 63 95 110 90 1 +63 99 113 92 67 99 109 87 63 95 104 87 63 100 112 92 70 104 117 92 63 96 112 89 67 99 110 94 63 95 110 90 63 95 105 90 1 +67 99 109 87 63 95 104 87 63 95 109 87 70 104 117 92 63 96 112 89 63 96 112 89 63 95 110 90 63 95 105 90 63 99 110 90 1 +63 95 104 87 63 95 109 87 67 95 100 87 63 96 112 89 63 96 112 89 66 100 112 89 63 95 105 90 63 99 110 90 63 103 119 90 1 +63 95 109 87 67 95 100 87 67 95 104 87 63 96 112 89 66 100 112 89 63 100 112 92 63 99 110 90 63 103 119 90 67 99 114 94 1 +67 95 100 87 67 95 104 87 67 95 109 87 66 100 112 89 63 100 112 92 63 100 117 92 63 103 119 90 67 99 114 94 63 99 114 94 1 +67 95 104 87 67 95 109 87 63 95 104 83 63 100 112 92 63 100 117 92 63 96 112 89 67 99 114 94 63 99 114 94 63 103 114 90 1 +63 95 104 83 63 95 113 87 59 95 113 92 63 96 112 89 63 96 108 89 63 96 108 89 63 103 114 90 63 103 119 90 59 99 114 90 1 +59 91 104 87 55 87 104 87 55 91 104 87 59 96 112 89 52 87 108 85 56 87 100 85 59 95 110 86 56 84 101 83 56 84 105 86 1 +55 91 104 87 63 95 109 87 67 99 109 92 56 87 100 85 63 87 108 85 63 96 112 89 56 84 105 86 59 81 105 86 59 88 105 86 1 +63 95 109 87 67 99 109 92 67 103 113 92 63 87 108 85 63 96 112 89 63 100 112 89 59 81 105 86 59 88 105 86 59 91 110 86 1 +67 103 113 92 67 107 118 96 67 107 123 96 63 100 112 89 63 104 108 92 63 100 108 96 59 91 110 86 63 99 110 94 63 95 105 90 1 +71 111 128 100 71 111 128 96 71 107 123 96 66 104 122 103 74 113 122 100 70 113 122 96 63 103 119 94 67 108 124 98 75 112 124 101 1 +71 111 128 96 71 107 123 96 71 103 118 96 74 113 122 100 70 113 122 96 70 109 122 100 67 108 124 98 75 112 124 101 71 112 124 101 1 +71 107 123 96 71 103 118 96 67 103 118 92 70 113 122 96 70 109 122 100 66 113 117 100 75 112 124 101 71 112 124 101 71 112 130 101 1 +63 107 118 92 63 87 109 96 51 45 113 125 66 109 117 100 66 113 122 100 59 79 117 107 71 112 130 101 71 112 119 98 67 108 114 98 1 +51 45 113 125 44 29 123 133 44 37 118 133 59 79 117 107 46 43 112 122 49 49 112 118 67 108 114 98 56 70 110 98 52 54 97 105 2 +44 37 118 133 44 37 118 129 44 32 113 125 49 49 112 118 52 53 108 114 49 40 112 125 52 54 97 105 49 45 110 124 46 32 119 135 2 +44 37 118 129 44 32 113 125 44 32 118 129 52 53 108 114 49 40 112 125 46 34 112 133 49 45 110 124 46 32 119 135 46 30 119 139 2 +44 32 113 125 44 32 118 129 48 34 113 125 49 40 112 125 46 34 112 133 46 32 112 133 46 32 119 135 46 30 119 139 42 32 114 135 2 +44 32 118 129 48 34 113 125 51 58 113 104 46 34 112 133 46 32 112 133 46 32 112 133 46 30 119 139 42 32 114 135 42 30 110 139 2 +51 58 113 104 59 87 104 83 63 95 100 83 46 32 112 133 46 46 112 114 56 71 104 89 42 30 110 139 42 30 114 135 46 34 110 124 2 +59 87 104 83 63 95 100 83 63 95 104 83 46 46 112 114 56 71 104 89 59 87 100 81 42 30 114 135 46 34 110 124 49 51 101 101 2 +63 95 100 83 63 95 104 83 63 95 109 92 56 71 104 89 59 87 100 81 66 91 112 89 46 34 110 124 49 51 101 101 56 73 97 79 5 +63 95 104 83 63 95 109 92 75 99 118 96 59 87 100 81 66 91 112 89 70 96 112 92 49 51 101 101 56 73 97 79 63 88 105 83 5 +63 95 109 92 75 99 118 96 75 99 118 96 66 91 112 89 70 96 112 92 70 96 117 92 56 73 97 79 63 88 105 83 67 84 105 94 5 +75 99 118 96 75 99 118 96 75 99 118 96 70 96 112 92 70 96 117 92 74 91 112 96 63 88 105 83 67 84 105 94 67 88 110 98 5 +79 99 109 83 71 75 93 79 51 51 81 79 70 87 112 100 82 91 108 85 63 63 88 78 79 91 119 98 79 99 110 86 71 77 86 75 5 +71 75 93 79 51 51 81 79 51 54 81 75 82 91 108 85 63 63 88 78 52 53 76 74 79 99 110 86 71 77 86 75 59 60 72 72 5 +51 51 81 79 51 54 81 75 51 48 81 79 63 63 88 78 52 53 76 74 56 53 80 74 71 77 86 75 59 60 72 72 59 63 79 72 5 +51 54 81 75 51 48 81 79 48 42 74 75 52 53 76 74 56 53 80 74 49 49 76 74 59 60 72 72 59 63 79 72 59 60 75 68 5 +51 48 81 79 48 42 74 75 48 48 67 71 56 53 80 74 49 49 76 74 49 46 69 66 59 63 79 72 59 60 75 68 52 54 75 68 5 +48 48 67 71 51 54 67 62 59 61 70 62 49 46 69 66 52 53 73 66 59 60 73 63 52 54 75 68 52 60 72 64 59 63 68 68 5 +90 100 104 85 90 100 108 81 90 104 108 85 83 95 101 79 87 99 101 83 87 99 105 83 82 102 110 83 85 102 105 80 85 97 101 80 3 +90 100 108 81 90 104 108 85 90 100 100 81 87 99 101 83 87 99 105 83 83 99 101 83 85 102 105 80 85 97 101 80 82 97 101 76 3 +90 104 108 85 90 100 100 81 86 100 108 81 87 99 105 83 83 99 101 83 83 95 97 79 85 97 101 80 82 97 101 76 82 97 101 80 3 +90 100 100 81 86 100 108 81 86 100 104 81 83 99 101 83 83 95 97 79 83 95 101 83 82 97 101 76 82 97 101 80 85 102 110 87 3 +86 100 108 81 86 100 104 81 86 104 108 85 83 95 97 79 83 95 101 83 87 103 110 86 82 97 101 80 85 102 110 87 85 102 110 90 3 +86 100 104 81 86 104 108 85 90 104 108 89 83 95 101 83 87 103 110 86 92 103 105 86 85 102 110 87 85 102 110 90 89 102 110 87 3 +86 104 108 85 90 104 108 89 90 104 112 89 87 103 110 86 92 103 105 86 87 103 110 86 85 102 110 90 89 102 110 87 89 102 114 87 3 +90 104 108 89 90 104 112 89 86 100 108 89 92 103 105 86 87 103 110 86 87 103 114 86 89 102 110 87 89 102 114 87 89 106 114 94 3 +90 104 112 89 86 100 108 89 86 104 108 89 87 103 110 86 87 103 114 86 92 112 119 94 89 102 114 87 89 106 114 94 93 115 124 94 3 +86 100 108 89 86 104 108 89 86 104 104 85 87 103 114 86 92 112 119 94 96 112 119 98 89 106 114 94 93 115 124 94 97 115 124 97 3 +86 104 108 89 86 104 104 85 82 96 96 81 92 112 119 94 96 112 119 98 92 103 110 90 93 115 124 94 97 115 124 97 93 106 114 94 3 +86 104 104 85 82 96 96 81 82 96 100 78 96 112 119 98 92 103 110 90 83 95 105 79 97 115 124 97 93 106 114 94 89 97 101 80 3 +82 96 96 81 82 96 100 78 82 96 100 78 92 103 110 90 83 95 105 79 83 95 101 79 93 106 114 94 89 97 101 80 85 97 105 80 3 +82 96 100 78 82 96 100 78 90 104 112 85 83 95 105 79 83 95 101 79 87 103 105 83 89 97 101 80 85 97 105 80 85 106 105 83 3 +82 96 100 78 90 104 112 85 90 109 112 89 83 95 101 79 87 103 105 83 87 103 110 83 85 97 105 80 85 106 105 83 85 102 101 83 3 +82 91 92 81 78 87 100 81 86 100 108 89 83 99 101 83 87 103 110 90 87 99 105 86 89 102 110 87 85 102 114 87 78 92 101 87 3 +86 100 108 89 86 100 108 89 78 100 112 92 87 99 105 86 79 99 105 86 75 99 110 90 78 92 101 87 74 97 105 94 67 97 110 94 3 +86 100 108 89 78 100 112 92 78 104 122 96 79 99 105 86 75 99 110 90 67 99 114 90 74 97 105 94 67 97 110 94 57 97 110 94 1 +78 100 112 92 78 104 122 96 74 109 112 96 75 99 110 90 67 99 114 90 63 99 114 90 67 97 110 94 57 97 110 94 53 88 101 83 1 +66 104 112 92 59 91 100 85 52 79 92 78 59 91 101 90 56 84 93 83 52 77 93 79 50 71 89 76 47 71 89 80 50 71 85 76 1 +59 91 100 85 52 79 92 78 49 83 96 81 56 84 93 83 52 77 93 79 52 73 90 75 47 71 89 80 50 71 85 76 47 67 85 69 1 +49 83 96 81 49 79 96 78 49 71 88 78 52 73 90 75 46 73 90 75 49 73 86 79 47 67 85 69 47 71 85 73 50 75 89 76 1 +49 79 96 78 49 71 88 78 49 71 88 74 46 73 90 75 49 73 86 79 49 73 93 79 47 71 85 73 50 75 89 76 50 79 89 76 1 +49 71 88 78 49 71 88 74 49 67 88 70 49 73 86 79 49 73 93 79 52 77 93 75 50 75 89 76 50 79 89 76 50 79 93 76 1 +49 67 88 70 52 67 84 70 52 63 80 70 52 77 93 75 49 73 86 75 52 66 82 72 50 79 93 76 50 79 89 76 50 71 82 73 1 +52 67 84 70 52 63 80 70 49 63 76 66 49 73 86 75 52 66 82 72 52 70 82 72 50 79 89 76 50 71 82 73 47 67 82 65 1 +52 63 80 70 49 63 76 66 49 67 80 70 52 66 82 72 52 70 82 72 49 70 82 72 50 71 82 73 47 67 82 65 50 71 85 73 1 +49 63 76 66 49 67 80 70 52 71 80 74 52 70 82 72 49 70 82 72 52 73 82 75 47 67 82 65 50 71 85 73 53 75 89 73 1 +56 75 88 74 56 75 92 78 59 79 96 81 56 81 97 83 59 84 93 83 59 81 101 83 57 84 101 83 53 84 101 87 50 79 93 80 1 +59 79 96 81 56 79 88 81 59 83 100 81 59 81 101 83 56 81 93 79 56 81 93 79 50 79 93 80 53 79 89 76 57 79 93 80 1 +56 79 88 81 59 83 100 81 59 83 100 81 56 81 93 79 56 81 93 79 56 84 105 86 53 79 89 76 57 79 93 80 57 88 101 83 1 +59 83 100 81 59 83 100 81 59 87 104 85 56 81 93 79 56 84 105 86 63 99 114 94 57 79 93 80 57 88 101 83 57 88 101 83 1 +59 83 100 81 59 87 104 85 63 100 112 92 56 84 105 86 63 99 114 94 67 99 110 94 57 88 101 83 57 88 101 83 60 88 101 83 1 +59 87 104 85 63 100 112 92 70 104 117 92 63 99 114 94 67 99 110 94 63 95 110 90 57 88 101 83 60 88 101 83 60 75 93 83 1 +70 104 117 92 63 96 112 89 63 96 112 89 63 95 110 90 63 95 105 90 63 99 110 90 60 75 93 83 63 79 97 83 63 88 105 90 1 +63 96 112 89 63 96 112 89 66 100 112 89 63 95 105 90 63 99 110 90 63 103 119 90 63 79 97 83 63 88 105 90 67 97 114 90 1 +66 100 112 89 63 100 112 92 63 100 117 92 63 103 119 90 67 99 114 94 63 99 114 94 67 97 114 90 70 106 114 94 67 97 114 87 1 +63 100 112 92 63 100 117 92 63 96 112 89 67 99 114 94 63 99 114 94 63 103 114 90 70 106 114 94 67 97 114 87 63 97 114 90 1 +63 100 117 92 63 96 112 89 63 96 108 89 63 99 114 94 63 103 114 90 63 103 119 90 67 97 114 87 63 97 114 90 67 102 114 90 1 +63 96 112 89 63 96 108 89 63 96 108 89 63 103 114 90 63 103 119 90 59 99 114 90 63 97 114 90 67 102 114 90 63 102 114 90 1 +63 96 108 89 63 96 108 89 59 96 112 89 63 103 119 90 59 99 114 90 59 95 110 86 67 102 114 90 63 102 114 90 63 106 114 90 1 +63 96 108 89 59 96 112 89 52 87 108 85 59 99 114 90 59 95 110 86 56 84 101 83 63 102 114 90 63 106 114 90 60 92 105 87 1 +59 96 112 89 52 87 108 85 56 87 100 85 59 95 110 86 56 84 101 83 56 84 105 86 63 106 114 90 60 92 105 87 53 84 110 87 1 +52 87 108 85 56 87 100 85 63 87 108 85 56 84 101 83 56 84 105 86 59 81 105 86 60 92 105 87 53 84 110 87 53 84 105 83 1 +56 87 100 85 63 87 108 85 63 96 112 89 56 84 105 86 59 81 105 86 59 88 105 86 53 84 110 87 53 84 105 83 57 88 105 87 1 +63 87 108 85 63 96 112 89 63 100 112 89 59 81 105 86 59 88 105 86 59 91 110 86 53 84 105 83 57 88 105 87 60 97 105 87 1 +63 100 108 96 66 100 117 92 66 104 122 103 63 95 105 90 67 99 110 94 63 103 119 94 63 92 105 87 63 97 114 90 67 102 119 97 1 +66 100 117 92 66 104 122 103 74 113 122 100 67 99 110 94 63 103 119 94 67 108 124 98 63 97 114 90 67 102 119 97 74 106 124 104 1 +66 104 122 103 74 113 122 100 70 113 122 96 63 103 119 94 67 108 124 98 75 112 124 101 67 102 119 97 74 106 124 104 78 111 129 101 1 +70 113 122 96 70 109 122 100 66 113 117 100 75 112 124 101 71 112 124 101 71 112 130 101 78 111 129 101 67 102 119 97 67 106 124 97 1 +70 109 122 100 66 113 117 100 66 109 117 100 71 112 124 101 71 112 130 101 71 112 130 101 67 102 119 97 67 106 124 97 70 111 124 101 1 +66 113 117 100 66 109 117 100 66 113 122 100 71 112 130 101 71 112 130 101 71 112 119 98 67 106 124 97 70 111 124 101 67 106 119 97 1 +66 113 122 100 59 79 117 107 46 43 112 122 71 112 119 98 67 108 114 98 56 70 110 98 67 106 119 97 67 111 114 97 60 88 110 97 1 +59 79 117 107 46 43 112 122 49 49 112 118 67 108 114 98 56 70 110 98 52 54 97 105 67 111 114 97 60 88 110 97 47 40 105 122 1 +46 43 112 122 49 49 112 118 52 53 108 114 56 70 110 98 52 54 97 105 49 45 110 124 60 88 110 97 47 40 105 122 44 31 114 136 2 +52 53 108 114 49 40 112 125 46 34 112 133 49 45 110 124 46 32 119 135 46 30 119 139 44 31 114 136 44 31 110 140 44 31 114 140 2 +49 40 112 125 46 34 112 133 46 32 112 133 46 32 119 135 46 30 119 139 42 32 114 135 44 31 110 140 44 31 114 140 44 31 114 133 2 +46 46 112 114 56 71 104 89 59 87 100 81 42 30 114 135 46 34 110 124 49 51 101 101 44 31 110 133 44 29 114 136 44 29 114 133 2 +56 71 104 89 59 87 100 81 66 91 112 89 46 34 110 124 49 51 101 101 56 73 97 79 44 29 114 136 44 29 114 133 47 37 114 122 2 +59 87 100 81 66 91 112 89 70 96 112 92 49 51 101 101 56 73 97 79 63 88 105 83 44 29 114 133 47 37 114 122 50 63 97 90 5 +70 96 112 92 70 96 117 92 74 91 112 96 63 88 105 83 67 84 105 94 67 88 110 98 50 63 97 90 63 84 97 80 70 88 105 87 5 +66 83 117 100 70 87 112 100 82 91 108 85 75 91 110 94 79 91 119 98 79 99 110 86 74 92 110 94 70 88 114 97 74 88 110 94 5 +70 87 112 100 82 91 108 85 63 63 88 78 79 91 119 98 79 99 110 86 71 77 86 75 70 88 114 97 74 88 110 94 78 84 93 80 5 +82 91 108 85 63 63 88 78 52 53 76 74 79 99 110 86 71 77 86 75 59 60 72 72 74 88 110 94 78 84 93 80 63 75 89 73 5 +63 63 88 78 52 53 76 74 56 53 80 74 71 77 86 75 59 60 72 72 59 63 79 72 78 84 93 80 63 75 89 73 60 71 82 65 5 +56 53 80 74 49 49 76 74 49 46 69 66 59 63 79 72 59 60 75 68 52 54 75 68 60 71 82 65 63 67 78 69 60 63 74 69 5 +49 49 76 74 49 46 69 66 52 53 73 66 59 60 75 68 52 54 75 68 52 60 72 64 63 67 78 69 60 63 74 69 60 63 78 65 5 +49 46 69 66 52 53 73 66 59 60 73 63 52 54 75 68 52 60 72 64 59 63 68 68 60 63 74 69 60 63 78 65 63 71 70 62 7 +83 95 101 79 87 99 101 83 87 99 105 83 82 102 110 83 85 102 105 80 85 97 101 80 88 102 106 83 88 106 102 83 88 102 102 79 3 +87 99 101 83 87 99 105 83 83 99 101 83 85 102 105 80 85 97 101 80 82 97 101 76 88 106 102 83 88 102 102 79 80 98 98 76 3 +87 99 105 83 83 99 101 83 83 95 97 79 85 97 101 80 82 97 101 76 82 97 101 80 88 102 102 79 80 98 98 76 80 98 102 79 3 +87 103 114 86 92 112 119 94 96 112 119 98 89 106 114 94 93 115 124 94 97 115 124 97 88 111 115 91 92 115 115 94 92 106 111 87 3 +92 112 119 94 96 112 119 98 92 103 110 90 93 115 124 94 97 115 124 97 93 106 114 94 92 115 115 94 92 106 111 87 88 102 106 83 3 +96 112 119 98 92 103 110 90 83 95 105 79 97 115 124 97 93 106 114 94 89 97 101 80 92 106 111 87 88 102 106 83 88 102 106 83 3 +83 95 101 79 87 103 105 83 87 103 110 83 85 97 105 80 85 106 105 83 85 102 101 83 88 98 106 79 84 102 106 79 84 94 102 79 3 +87 103 105 83 87 103 110 83 83 91 97 79 85 106 105 83 85 102 101 83 82 92 105 76 84 102 106 79 84 94 102 79 84 98 98 79 3 +87 95 101 83 83 99 101 83 87 103 110 90 85 92 105 83 89 102 110 87 85 102 114 87 84 102 111 87 88 106 102 91 84 98 102 87 3 +83 99 101 83 87 103 110 90 87 99 105 86 89 102 110 87 85 102 114 87 78 92 101 87 88 106 102 91 84 98 102 87 72 94 106 87 3 +87 103 110 90 87 99 105 86 79 99 105 86 85 102 114 87 78 92 101 87 74 97 105 94 84 98 102 87 72 94 106 87 64 98 111 91 3 +87 99 105 86 79 99 105 86 75 99 110 90 78 92 101 87 74 97 105 94 67 97 110 94 72 94 106 87 64 98 111 91 57 94 111 91 1 +67 99 114 90 63 99 114 90 59 91 101 90 57 97 110 94 53 88 101 83 50 71 89 76 53 85 102 83 50 73 90 76 50 69 86 72 1 +52 73 90 75 46 73 90 75 49 73 86 79 47 67 85 69 47 71 85 73 50 75 89 76 50 66 82 72 50 73 90 76 53 77 94 76 1 +49 73 86 79 49 73 93 79 52 77 93 75 50 75 89 76 50 79 89 76 50 79 93 76 53 77 94 76 50 73 90 76 50 77 98 79 1 +49 73 86 75 52 66 82 72 52 70 82 72 50 79 89 76 50 71 82 73 47 67 82 65 53 77 94 79 50 73 90 76 50 69 86 72 1 +52 66 82 72 52 70 82 72 49 70 82 72 50 71 82 73 47 67 82 65 50 71 85 73 50 73 90 76 50 69 86 72 53 69 82 72 1 +52 70 82 72 49 70 82 72 52 73 82 75 47 67 82 65 50 71 85 73 53 75 89 73 50 69 86 72 53 69 82 72 53 73 94 76 1 +49 70 82 72 52 73 82 75 56 77 93 79 50 71 85 73 53 75 89 73 53 84 97 80 53 69 82 72 53 73 94 76 53 73 90 76 1 +52 73 82 75 56 77 93 79 56 81 97 83 53 75 89 73 53 84 97 80 57 84 101 83 53 73 94 76 53 73 90 76 53 77 94 76 1 +56 81 97 83 59 84 93 83 59 81 101 83 57 84 101 83 53 84 101 87 50 79 93 80 53 77 94 76 53 77 94 76 53 77 90 76 1 +59 84 93 83 59 81 101 83 56 81 93 79 53 84 101 87 50 79 93 80 53 79 89 76 53 77 94 76 53 77 90 76 57 77 90 76 1 +59 81 101 83 56 81 93 79 56 81 93 79 50 79 93 80 53 79 89 76 57 79 93 80 53 77 90 76 57 77 90 76 53 77 90 76 1 +56 81 93 79 56 81 93 79 56 84 105 86 53 79 89 76 57 79 93 80 57 88 101 83 57 77 90 76 53 77 90 76 53 77 94 79 1 +56 81 93 79 56 84 105 86 63 99 114 94 57 79 93 80 57 88 101 83 57 88 101 83 53 77 90 76 53 77 94 79 57 81 94 79 1 +56 84 105 86 63 99 114 94 67 99 110 94 57 88 101 83 57 88 101 83 60 88 101 83 53 77 94 79 57 81 94 79 60 77 90 79 1 +67 99 110 94 63 95 110 90 63 95 105 90 60 88 101 83 60 75 93 83 63 79 97 83 60 77 90 79 64 81 90 83 64 85 94 83 1 +63 95 105 90 63 99 110 90 63 103 119 90 63 79 97 83 63 88 105 90 67 97 114 90 64 85 94 83 64 85 98 83 68 89 102 87 1 +63 99 110 90 63 103 119 90 67 99 114 94 63 88 105 90 67 97 114 90 70 106 114 94 64 85 98 83 68 89 102 87 64 98 111 91 1 +63 103 119 90 67 99 114 94 63 99 114 94 67 97 114 90 70 106 114 94 67 97 114 87 68 89 102 87 64 98 111 91 68 94 115 91 1 +63 99 114 94 63 103 114 90 63 103 119 90 67 97 114 87 63 97 114 90 67 102 114 90 68 94 115 91 60 89 102 83 60 85 102 83 1 +63 103 114 90 63 103 119 90 59 99 114 90 63 97 114 90 67 102 114 90 63 102 114 90 60 89 102 83 60 85 102 83 64 98 115 91 1 +63 103 119 90 59 99 114 90 59 95 110 86 67 102 114 90 63 102 114 90 63 106 114 90 60 85 102 83 64 98 115 91 68 106 115 94 1 +59 99 114 90 59 95 110 86 56 84 101 83 63 102 114 90 63 106 114 90 60 92 105 87 64 98 115 91 68 106 115 94 64 98 111 91 1 +59 81 105 86 59 88 105 86 59 91 110 86 53 84 105 83 57 88 105 87 60 97 105 87 53 89 106 87 57 94 111 87 57 94 106 83 1 +59 88 105 86 59 91 110 86 63 99 110 94 57 88 105 87 60 97 105 87 63 92 110 94 57 94 111 87 57 94 106 83 60 85 102 87 1 +63 95 105 90 67 99 110 94 63 103 119 94 63 92 105 87 63 97 114 90 67 102 119 97 60 85 102 87 64 98 111 91 68 106 111 98 1 +71 112 130 101 71 112 119 98 67 108 114 98 70 111 124 101 67 106 119 97 67 111 114 97 64 106 120 98 68 111 125 98 68 102 115 94 1 +56 70 110 98 52 54 97 105 49 45 110 124 60 88 110 97 47 40 105 122 44 31 114 136 60 89 111 94 53 59 106 113 50 31 115 128 2 +52 54 97 105 49 45 110 124 46 32 119 135 47 40 105 122 44 31 114 136 44 31 110 140 53 59 106 113 50 31 115 128 47 31 111 131 2 +46 30 119 139 42 32 114 135 42 30 110 139 44 31 114 140 44 31 114 133 44 31 114 133 47 34 111 128 44 34 115 128 44 31 115 131 2 +42 32 114 135 42 30 110 139 42 30 114 135 44 31 114 133 44 31 114 133 44 31 110 133 44 34 115 128 44 31 115 131 44 31 115 131 2 +42 30 110 139 42 30 114 135 46 34 110 124 44 31 114 133 44 31 110 133 44 29 114 136 44 31 115 131 44 31 115 131 47 31 111 124 2 +42 30 114 135 46 34 110 124 49 51 101 101 44 31 110 133 44 29 114 136 44 29 114 133 44 31 115 131 47 31 111 124 47 37 106 124 2 +46 34 110 124 49 51 101 101 56 73 97 79 44 29 114 136 44 29 114 133 47 37 114 122 47 31 111 124 47 37 106 124 50 43 98 109 2 +49 51 101 101 56 73 97 79 63 88 105 83 44 29 114 133 47 37 114 122 50 63 97 90 47 37 106 124 50 43 98 109 53 55 98 91 2 +63 88 105 83 67 84 105 94 67 88 110 98 50 63 97 90 63 84 97 80 70 88 105 87 53 55 98 91 57 73 86 72 64 85 98 79 5 +67 88 119 98 75 91 110 94 79 91 119 98 74 92 114 94 74 92 110 94 70 88 114 97 64 85 102 91 64 77 106 98 68 69 111 98 5 +79 99 110 86 71 77 86 75 59 60 72 72 74 88 110 94 78 84 93 80 63 75 89 73 68 73 111 91 68 77 98 79 72 77 94 76 7 +59 60 72 72 59 63 79 72 59 60 75 68 63 75 89 73 60 71 82 65 63 67 78 69 72 77 94 76 76 85 98 76 72 81 86 72 7 +59 63 79 72 59 60 75 68 52 54 75 68 60 71 82 65 63 67 78 69 60 63 74 69 76 85 98 76 72 81 86 72 68 73 78 65 7 +59 60 75 68 52 54 75 68 52 60 72 64 63 67 78 69 60 63 74 69 60 63 78 65 72 81 86 72 68 73 78 65 64 66 74 65 7 +52 54 75 68 52 60 72 64 59 63 68 68 60 63 74 69 60 63 78 65 63 71 70 62 68 73 78 65 64 66 74 65 64 73 82 68 7 +82 102 110 83 85 102 105 80 85 97 101 80 88 102 106 83 88 106 102 83 88 102 102 79 84 99 100 81 80 99 104 78 80 91 96 78 3 +85 97 101 80 82 97 101 76 82 97 101 80 88 102 102 79 80 98 98 76 80 98 102 79 80 91 96 78 80 95 100 78 80 95 100 78 3 +89 102 110 87 89 102 114 87 89 106 114 94 84 102 106 87 84 106 111 87 88 111 115 91 84 103 108 88 88 112 113 88 92 112 118 88 3 +89 102 114 87 89 106 114 94 93 115 124 94 84 106 111 87 88 111 115 91 92 115 115 94 88 112 113 88 92 112 118 88 88 99 104 88 3 +93 115 124 94 97 115 124 97 93 106 114 94 92 115 115 94 92 106 111 87 88 102 106 83 88 99 104 88 80 99 104 81 84 103 104 81 3 +97 115 124 97 93 106 114 94 89 97 101 80 92 106 111 87 88 102 106 83 88 102 106 83 80 99 104 81 84 103 104 81 84 103 104 85 3 +93 106 114 94 89 97 101 80 85 97 105 80 88 102 106 83 88 102 106 83 88 98 106 79 84 103 104 81 84 103 104 85 84 99 104 81 3 +85 97 105 80 85 106 105 83 85 102 101 83 88 98 106 79 84 102 106 79 84 94 102 79 84 99 104 81 84 99 100 81 88 99 104 85 3 +85 106 105 83 85 102 101 83 82 92 105 76 84 102 106 79 84 94 102 79 84 98 98 79 84 99 100 81 88 99 104 85 84 99 100 81 3 +82 92 105 76 85 92 101 83 85 92 105 83 84 98 98 79 84 94 102 79 84 102 111 87 84 99 100 81 84 99 104 85 88 103 108 88 3 +85 92 101 83 85 92 105 83 89 102 110 87 84 94 102 79 84 102 111 87 88 106 102 91 84 99 104 85 88 103 108 88 88 99 113 92 3 +85 92 105 83 89 102 110 87 85 102 114 87 84 102 111 87 88 106 102 91 84 98 102 87 88 103 108 88 88 99 113 92 76 95 104 88 3 +89 102 110 87 85 102 114 87 78 92 101 87 88 106 102 91 84 98 102 87 72 94 106 87 88 99 113 92 76 95 104 88 68 99 113 88 3 +74 97 105 94 67 97 110 94 57 97 110 94 64 98 111 91 57 94 111 91 53 85 102 83 60 91 108 88 53 87 104 85 50 75 96 78 1 +67 97 110 94 57 97 110 94 53 88 101 83 57 94 111 91 53 85 102 83 50 73 90 76 53 87 104 85 50 75 96 78 50 71 91 78 1 +57 97 110 94 53 88 101 83 50 71 89 76 53 85 102 83 50 73 90 76 50 69 86 72 50 75 96 78 50 71 91 78 50 68 87 74 1 +53 88 101 83 50 71 89 76 47 71 89 80 50 73 90 76 50 69 86 72 53 69 86 72 50 71 91 78 50 68 87 74 50 71 87 70 1 +47 71 89 80 50 71 85 76 47 67 85 69 53 69 86 72 53 69 82 72 50 66 82 72 50 71 87 70 50 71 87 74 50 75 91 74 1 +50 71 85 76 47 67 85 69 47 71 85 73 53 69 82 72 50 66 82 72 50 73 90 76 50 71 87 74 50 75 91 74 53 75 87 78 1 +50 79 89 76 50 79 93 76 50 79 89 76 50 73 90 76 50 77 98 79 53 77 94 79 50 75 91 81 50 75 96 78 56 75 91 74 1 +50 79 93 76 50 79 89 76 50 71 82 73 50 77 98 79 53 77 94 79 50 73 90 76 50 75 96 78 56 75 91 74 56 68 83 67 1 +50 79 89 76 50 71 82 73 47 67 82 65 53 77 94 79 50 73 90 76 50 69 86 72 56 75 91 74 56 68 83 67 53 68 83 70 1 +47 67 82 65 50 71 85 73 53 75 89 73 50 69 86 72 53 69 82 72 53 73 94 76 53 68 83 70 53 71 87 74 53 75 91 78 1 +50 71 85 73 53 75 89 73 53 84 97 80 53 69 82 72 53 73 94 76 53 73 90 76 53 71 87 74 53 75 91 78 53 79 96 70 1 +53 75 89 73 53 84 97 80 57 84 101 83 53 73 94 76 53 73 90 76 53 77 94 76 53 75 91 78 53 79 96 70 53 79 96 81 1 +53 84 97 80 57 84 101 83 53 84 101 87 53 73 90 76 53 77 94 76 53 77 94 76 53 79 96 70 53 79 96 81 56 83 96 78 1 +57 84 101 83 53 84 101 87 50 79 93 80 53 77 94 76 53 77 94 76 53 77 90 76 53 79 96 81 56 83 96 78 56 83 100 81 1 +50 79 93 80 53 79 89 76 57 79 93 80 53 77 90 76 57 77 90 76 53 77 90 76 56 83 100 81 60 87 104 85 60 83 100 85 1 +57 79 93 80 57 88 101 83 57 88 101 83 53 77 90 76 53 77 94 79 57 81 94 79 60 83 100 85 56 79 91 78 60 79 96 85 1 +60 88 101 83 60 75 93 83 63 79 97 83 60 77 90 79 64 81 90 83 64 85 94 83 64 91 100 81 68 87 96 81 60 83 96 81 1 +63 88 105 90 67 97 114 90 70 106 114 94 64 85 98 83 68 89 102 87 64 98 111 91 64 87 104 85 68 91 104 88 68 91 104 85 1 +70 106 114 94 67 97 114 87 63 97 114 90 64 98 111 91 68 94 115 91 60 89 102 83 68 91 104 85 68 87 104 88 60 75 91 78 1 +67 97 114 87 63 97 114 90 67 102 114 90 68 94 115 91 60 89 102 83 60 85 102 83 68 87 104 88 60 75 91 78 56 68 83 74 1 +63 97 114 90 67 102 114 90 63 102 114 90 60 89 102 83 60 85 102 83 64 98 115 91 60 75 91 78 56 68 83 74 64 83 96 88 1 +67 102 114 90 63 102 114 90 63 106 114 90 60 85 102 83 64 98 115 91 68 106 115 94 56 68 83 74 64 83 96 88 68 99 113 88 1 +63 106 114 90 60 92 105 87 53 84 110 87 68 106 115 94 64 98 111 91 57 94 111 87 68 99 113 88 68 99 108 85 56 91 104 88 1 +53 84 110 87 53 84 105 83 57 88 105 87 57 94 111 87 53 89 106 87 57 94 111 87 56 91 104 88 56 95 108 92 56 87 108 85 1 +53 84 105 83 57 88 105 87 60 97 105 87 53 89 106 87 57 94 111 87 57 94 106 83 56 95 108 92 56 87 108 85 56 83 100 85 1 +57 88 105 87 60 97 105 87 63 92 110 94 57 94 111 87 57 94 106 83 60 85 102 87 56 87 108 85 56 83 100 85 56 83 96 85 1 +63 92 105 87 63 97 114 90 67 102 119 97 60 85 102 87 64 98 111 91 68 106 111 98 60 91 100 85 60 99 108 92 64 99 113 92 1 +63 97 114 90 67 102 119 97 74 106 124 104 64 98 111 91 68 106 111 98 72 111 120 102 60 99 108 92 64 99 113 92 68 99 118 99 1 +67 102 119 97 74 106 124 104 78 111 129 101 68 106 111 98 72 111 120 102 80 115 125 102 64 99 113 92 68 99 118 99 71 107 122 103 1 +78 111 129 101 67 102 119 97 67 106 124 97 80 115 125 102 68 111 120 98 64 106 115 94 71 107 122 103 71 112 122 99 68 112 122 99 1 +70 111 124 101 67 106 119 97 67 111 114 97 64 106 120 98 68 111 125 98 68 102 115 94 71 112 128 99 71 103 122 96 64 91 104 92 1 +67 111 114 97 60 88 110 97 47 40 105 122 68 102 115 94 60 89 111 94 53 59 106 113 64 91 104 92 60 91 108 88 60 83 108 92 1 +44 31 114 136 44 31 110 140 44 31 114 140 50 31 115 128 47 31 111 131 47 34 111 128 60 64 100 99 53 51 104 114 50 36 113 128 2 +44 31 114 133 44 31 110 133 44 29 114 136 44 31 115 131 44 31 115 131 47 31 111 124 46 39 108 114 50 48 104 107 50 57 96 96 2 +44 31 110 133 44 29 114 136 44 29 114 133 44 31 115 131 47 31 111 124 47 37 106 124 50 48 104 107 50 57 96 96 56 61 96 88 2 +44 29 114 136 44 29 114 133 47 37 114 122 47 31 111 124 47 37 106 124 50 43 98 109 50 57 96 96 56 61 96 88 56 61 91 85 2 +47 37 114 122 50 63 97 90 63 84 97 80 50 43 98 109 53 55 98 91 57 73 86 72 56 61 91 85 56 64 91 85 60 64 91 81 2 +63 84 97 80 70 88 105 87 74 92 114 94 57 73 86 72 64 85 98 79 64 85 102 91 60 64 91 81 60 75 96 78 64 68 104 88 5 +74 92 114 94 74 92 110 94 70 88 114 97 64 85 102 91 64 77 106 98 68 69 111 98 64 68 104 88 64 64 108 92 60 61 108 99 5 +74 92 110 94 70 88 114 97 74 88 110 94 64 77 106 98 68 69 111 98 68 73 111 91 64 64 108 92 60 61 108 99 64 61 108 99 5 +70 88 114 97 74 88 110 94 78 84 93 80 68 69 111 98 68 73 111 91 68 77 98 79 60 61 108 99 64 61 108 99 64 68 108 92 5 +78 84 93 80 63 75 89 73 60 71 82 65 68 77 98 79 72 77 94 76 76 85 98 76 64 68 108 92 71 83 100 81 80 99 104 85 7 +63 75 89 73 60 71 82 65 63 67 78 69 72 77 94 76 76 85 98 76 72 81 86 72 71 83 100 81 80 99 104 85 80 95 100 81 4 +60 71 82 65 63 67 78 69 60 63 74 69 76 85 98 76 72 81 86 72 68 73 78 65 80 99 104 85 80 95 100 81 71 79 91 74 4 +63 67 78 69 60 63 74 69 60 63 78 65 72 81 86 72 68 73 78 65 64 66 74 65 80 95 100 81 71 79 91 74 68 71 83 67 7 +60 63 74 69 60 63 78 65 63 71 70 62 68 73 78 65 64 66 74 65 64 73 82 68 71 79 91 74 68 71 83 67 68 71 83 70 7 +88 102 106 83 88 106 102 83 88 102 102 79 84 99 100 81 80 99 104 78 80 91 96 78 84 95 100 79 88 99 104 83 88 103 104 83 3 +80 98 98 76 80 98 102 79 84 98 106 83 80 95 100 78 80 95 100 78 80 91 96 74 84 95 100 79 79 99 96 79 79 91 96 79 3 +84 98 106 83 84 102 106 87 84 102 106 87 80 91 96 74 80 95 100 81 84 103 108 88 79 91 96 79 84 95 100 79 84 99 104 83 3 +84 102 106 87 84 102 106 87 84 106 111 87 80 95 100 81 84 103 108 88 88 112 113 88 84 95 100 79 84 99 104 83 88 107 113 87 3 +88 111 115 91 92 115 115 94 92 106 111 87 92 112 118 88 88 99 104 88 80 99 104 81 88 107 109 87 84 99 104 79 84 99 104 79 3 +92 115 115 94 92 106 111 87 88 102 106 83 88 99 104 88 80 99 104 81 84 103 104 81 84 99 104 79 84 99 104 79 88 99 109 83 3 +88 102 106 83 88 102 106 83 88 98 106 79 84 103 104 81 84 103 104 85 84 99 104 81 88 99 109 83 84 103 100 83 84 99 104 83 3 +88 98 106 79 84 102 106 79 84 94 102 79 84 99 104 81 84 99 100 81 88 99 104 85 84 99 104 83 88 99 109 83 84 99 100 79 3 +84 102 106 79 84 94 102 79 84 98 98 79 84 99 100 81 88 99 104 85 84 99 100 81 88 99 109 83 84 99 100 79 84 103 104 83 3 +84 94 102 79 84 98 98 79 84 94 102 79 88 99 104 85 84 99 100 81 84 99 104 85 84 99 100 79 84 103 104 83 88 103 113 87 3 +84 98 98 79 84 94 102 79 84 102 111 87 84 99 100 81 84 99 104 85 88 103 108 88 84 103 104 83 88 103 113 87 88 103 109 92 3 +84 102 111 87 88 106 102 91 84 98 102 87 88 103 108 88 88 99 113 92 76 95 104 88 88 103 109 92 79 95 100 87 67 95 109 92 3 +88 106 102 91 84 98 102 87 72 94 106 87 88 99 113 92 76 95 104 88 68 99 113 88 79 95 100 87 67 95 109 92 63 95 113 87 3 +64 98 111 91 57 94 111 91 53 85 102 83 60 91 108 88 53 87 104 85 50 75 96 78 55 83 100 83 51 75 93 79 51 64 85 75 1 +57 94 111 91 53 85 102 83 50 73 90 76 53 87 104 85 50 75 96 78 50 71 91 78 51 75 93 79 51 64 85 75 48 61 81 67 1 +53 85 102 83 50 73 90 76 50 69 86 72 50 75 96 78 50 71 91 78 50 68 87 74 51 64 85 75 48 61 81 67 48 64 85 71 1 +53 69 86 72 53 69 82 72 50 66 82 72 50 71 87 70 50 71 87 74 50 75 91 74 51 72 85 75 51 72 85 75 48 72 89 75 1 +53 69 82 72 50 66 82 72 50 73 90 76 50 71 87 74 50 75 91 74 53 75 87 78 51 72 85 75 48 72 89 75 51 83 93 75 1 +50 66 82 72 50 73 90 76 53 77 94 76 50 75 91 74 53 75 87 78 53 75 87 78 48 72 89 75 51 83 93 75 55 79 96 79 1 +50 73 90 76 53 77 94 76 50 73 90 76 53 75 87 78 53 75 87 78 50 75 91 81 51 83 93 75 55 79 96 79 51 75 93 75 1 +50 73 90 76 50 77 98 79 53 77 94 79 50 75 91 81 50 75 96 78 56 75 91 74 51 75 93 75 51 75 89 75 55 72 89 71 1 +53 77 94 79 50 73 90 76 50 69 86 72 56 75 91 74 56 68 83 67 53 68 83 70 55 72 89 71 55 68 81 71 51 72 81 71 1 +50 73 90 76 50 69 86 72 53 69 82 72 56 68 83 67 53 68 83 70 53 71 87 74 55 68 81 71 51 72 81 71 55 75 85 75 1 +50 69 86 72 53 69 82 72 53 73 94 76 53 68 83 70 53 71 87 74 53 75 91 78 51 72 81 71 55 75 85 75 55 79 89 79 1 +53 69 82 72 53 73 94 76 53 73 90 76 53 71 87 74 53 75 91 78 53 79 96 70 55 75 85 75 55 79 89 79 55 79 96 79 1 +53 73 94 76 53 73 90 76 53 77 94 76 53 75 91 78 53 79 96 70 53 79 96 81 55 79 89 79 55 79 96 79 59 83 96 79 1 +53 77 94 76 53 77 94 76 53 77 90 76 53 79 96 81 56 83 96 78 56 83 100 81 59 83 96 79 71 99 104 87 67 103 109 87 1 +53 77 90 76 57 77 90 76 53 77 90 76 56 83 100 81 60 87 104 85 60 83 100 85 67 103 109 87 63 91 109 87 59 75 96 79 1 +57 77 90 76 53 77 90 76 53 77 94 79 60 87 104 85 60 83 100 85 56 79 91 78 63 91 109 87 59 75 96 79 59 83 96 79 1 +53 77 94 79 57 81 94 79 60 77 90 79 56 79 91 78 60 79 96 85 64 91 100 81 59 83 96 79 63 91 100 83 67 91 109 87 1 +57 81 94 79 60 77 90 79 64 81 90 83 60 79 96 85 64 91 100 81 68 87 96 81 63 91 100 83 67 91 109 87 75 91 109 92 1 +60 77 90 79 64 81 90 83 64 85 94 83 64 91 100 81 68 87 96 81 60 83 96 81 67 91 109 87 75 91 109 92 75 95 104 87 1 +64 85 94 83 64 85 98 83 68 89 102 87 60 83 96 81 64 87 104 85 68 91 104 88 75 95 104 87 71 95 104 87 75 91 109 92 1 +64 85 98 83 68 89 102 87 64 98 111 91 64 87 104 85 68 91 104 88 68 91 104 85 71 95 104 87 75 91 109 92 75 95 104 87 1 +68 94 115 91 60 89 102 83 60 85 102 83 68 87 104 88 60 75 91 78 56 68 83 74 67 83 96 79 59 72 85 71 55 68 85 75 1 +60 89 102 83 60 85 102 83 64 98 115 91 60 75 91 78 56 68 83 74 64 83 96 88 59 72 85 71 55 68 85 75 63 79 96 83 1 +64 98 111 91 57 94 111 87 53 89 106 87 68 99 108 85 56 91 104 88 56 95 108 92 67 103 109 92 63 95 109 87 59 95 113 92 1 +53 89 106 87 57 94 111 87 57 94 106 83 56 95 108 92 56 87 108 85 56 83 100 85 59 95 113 92 63 95 109 87 63 87 100 83 1 +57 94 111 87 57 94 106 83 60 85 102 87 56 87 108 85 56 83 100 85 56 83 96 85 63 95 109 87 63 87 100 83 63 87 100 87 1 +57 94 106 83 60 85 102 87 60 85 102 87 56 83 100 85 56 83 96 85 60 91 100 85 63 87 100 83 63 87 100 87 63 95 104 92 1 +60 85 102 87 60 85 102 87 64 98 111 91 56 83 96 85 60 91 100 85 60 99 108 92 63 87 100 87 63 95 104 92 63 99 113 92 1 +60 85 102 87 64 98 111 91 68 106 111 98 60 91 100 85 60 99 108 92 64 99 113 92 63 95 104 92 63 99 113 92 63 103 113 96 1 +72 111 120 102 80 115 125 102 68 111 120 98 68 99 118 99 71 107 122 103 71 112 122 99 71 103 113 96 71 103 113 96 71 107 123 100 1 +80 115 125 102 68 111 120 98 64 106 115 94 71 107 122 103 71 112 122 99 68 112 122 99 71 103 113 96 71 107 123 100 71 111 118 96 1 +68 111 120 98 64 106 115 94 64 106 120 98 71 112 122 99 68 112 122 99 71 112 128 99 71 107 123 100 71 111 118 96 67 99 113 96 1 +64 106 115 94 64 106 120 98 68 111 125 98 68 112 122 99 71 112 128 99 71 103 122 96 71 111 118 96 67 99 113 96 67 91 104 92 1 +68 111 125 98 68 102 115 94 60 89 111 94 71 103 122 96 64 91 104 92 60 91 108 88 67 91 104 92 59 75 100 83 59 87 104 92 1 +60 89 111 94 53 59 106 113 50 31 115 128 60 91 108 88 60 83 108 92 60 64 100 99 59 87 104 92 67 99 109 92 67 87 100 83 1 +53 59 106 113 50 31 115 128 47 31 111 131 60 83 108 92 60 64 100 99 53 51 104 114 67 99 109 92 67 87 100 83 63 79 100 87 2 +50 31 115 128 47 31 111 131 47 34 111 128 60 64 100 99 53 51 104 114 50 36 113 128 67 87 100 83 63 79 100 87 59 68 96 92 2 +47 34 111 128 44 34 115 128 44 31 115 131 50 36 113 128 43 36 118 128 46 39 108 114 59 68 96 92 55 61 100 96 55 64 104 92 2 +44 31 115 131 44 31 115 131 47 31 111 124 46 39 108 114 50 48 104 107 50 57 96 96 55 64 104 92 59 64 100 92 55 61 100 87 2 +47 37 106 124 50 43 98 109 53 55 98 91 56 61 96 88 56 61 91 85 56 64 91 85 55 58 96 87 59 58 93 83 59 61 89 79 5 +53 55 98 91 57 73 86 72 64 85 98 79 56 64 91 85 60 64 91 81 60 75 96 78 59 61 89 79 59 61 85 75 59 75 89 79 5 +57 73 86 72 64 85 98 79 64 85 102 91 60 64 91 81 60 75 96 78 64 68 104 88 59 61 85 75 59 75 89 79 59 64 100 92 5 +68 73 111 91 68 77 98 79 72 77 94 76 64 61 108 99 64 68 108 92 71 83 100 81 59 61 109 100 63 64 104 96 71 79 96 79 5 +72 77 94 76 76 85 98 76 72 81 86 72 71 83 100 81 80 99 104 85 80 95 100 81 71 79 96 79 79 95 96 79 79 95 96 79 4 +72 81 86 72 68 73 78 65 64 66 74 65 80 95 100 81 71 79 91 74 68 71 83 67 79 95 96 79 75 87 93 79 71 75 85 71 4 +84 99 100 81 80 99 104 78 80 91 96 78 84 95 100 79 88 99 104 83 88 103 104 83 82 96 100 81 86 96 104 81 86 96 108 81 3 +80 99 104 78 80 91 96 78 80 95 100 78 88 99 104 83 88 103 104 83 84 95 100 79 86 96 104 81 86 96 108 81 86 104 108 81 3 +80 91 96 78 80 95 100 78 80 95 100 78 88 103 104 83 84 95 100 79 79 99 96 79 86 96 108 81 86 104 108 81 86 96 104 81 3 +80 95 100 78 80 91 96 74 80 95 100 81 79 99 96 79 79 91 96 79 84 95 100 79 86 96 104 81 82 96 100 78 82 96 100 81 3 +80 91 96 74 80 95 100 81 84 103 108 88 79 91 96 79 84 95 100 79 84 99 104 83 82 96 100 78 82 96 100 81 82 91 104 78 3 +80 95 100 81 84 103 108 88 88 112 113 88 84 95 100 79 84 99 104 83 88 107 113 87 82 96 100 81 82 91 104 78 86 100 108 85 3 +84 103 108 88 88 112 113 88 92 112 118 88 84 99 104 83 88 107 113 87 88 107 109 87 82 91 104 78 86 100 108 85 90 109 112 92 3 +88 112 113 88 92 112 118 88 88 99 104 88 88 107 113 87 88 107 109 87 84 99 104 79 86 100 108 85 90 109 112 92 90 104 112 89 3 +92 112 118 88 88 99 104 88 80 99 104 81 88 107 109 87 84 99 104 79 84 99 104 79 90 109 112 92 90 104 112 89 90 100 108 85 3 +88 99 104 88 80 99 104 81 84 103 104 81 84 99 104 79 84 99 104 79 88 99 109 83 90 104 112 89 90 100 108 85 86 104 104 81 3 +80 99 104 81 84 103 104 81 84 103 104 85 84 99 104 79 88 99 109 83 84 103 100 83 90 100 108 85 86 104 104 81 86 100 108 85 3 +84 103 104 81 84 103 104 85 84 99 104 81 88 99 109 83 84 103 100 83 84 99 104 83 86 104 104 81 86 100 108 85 86 104 112 85 3 +84 103 104 85 84 99 104 81 84 99 100 81 84 103 100 83 84 99 104 83 88 99 109 83 86 100 108 85 86 104 112 85 86 100 104 81 3 +88 99 104 85 84 99 100 81 84 99 104 85 84 99 100 79 84 103 104 83 88 103 113 87 82 96 104 81 82 100 104 81 82 104 112 85 3 +84 99 100 81 84 99 104 85 88 103 108 88 84 103 104 83 88 103 113 87 88 103 109 92 82 100 104 81 82 104 112 85 86 104 108 92 3 +84 99 104 85 88 103 108 88 88 99 113 92 88 103 113 87 88 103 109 92 79 95 100 87 82 104 112 85 86 104 108 92 82 100 108 89 3 +88 103 108 88 88 99 113 92 76 95 104 88 88 103 109 92 79 95 100 87 67 95 109 92 86 104 108 92 82 100 108 89 74 96 104 89 3 +88 99 113 92 76 95 104 88 68 99 113 88 79 95 100 87 67 95 109 92 63 95 113 87 82 100 108 89 74 96 104 89 63 96 100 92 1 +76 95 104 88 68 99 113 88 60 91 108 88 67 95 109 92 63 95 113 87 55 83 100 83 74 96 104 89 63 96 100 92 56 91 108 89 1 +68 99 113 88 60 91 108 88 53 87 104 85 63 95 113 87 55 83 100 83 51 75 93 79 63 96 100 92 56 91 108 89 52 83 100 81 1 +60 91 108 88 53 87 104 85 50 75 96 78 55 83 100 83 51 75 93 79 51 64 85 75 56 91 108 89 52 83 100 81 49 75 92 78 1 +53 87 104 85 50 75 96 78 50 71 91 78 51 75 93 79 51 64 85 75 48 61 81 67 52 83 100 81 49 75 92 78 46 75 96 78 1 +50 71 87 70 50 71 87 74 50 75 91 74 51 72 85 75 51 72 85 75 48 72 89 75 46 67 84 74 49 71 92 74 49 71 84 78 1 +50 71 87 74 50 75 91 74 53 75 87 78 51 72 85 75 48 72 89 75 51 83 93 75 49 71 92 74 49 71 84 78 49 71 88 74 1 +50 75 91 74 53 75 87 78 53 75 87 78 48 72 89 75 51 83 93 75 55 79 96 79 49 71 84 78 49 71 88 74 52 79 96 78 1 +53 75 87 78 53 75 87 78 50 75 91 81 51 83 93 75 55 79 96 79 51 75 93 75 49 71 88 74 52 79 96 78 52 79 92 81 1 +50 75 96 78 56 75 91 74 56 68 83 67 51 75 89 75 55 72 89 71 55 68 81 71 52 71 84 74 52 71 84 70 52 71 80 70 1 +53 71 87 74 53 75 91 78 53 79 96 70 55 75 85 75 55 79 89 79 55 79 96 79 56 75 92 74 56 79 88 78 56 83 92 81 1 +53 75 91 78 53 79 96 70 53 79 96 81 55 79 89 79 55 79 96 79 59 83 96 79 56 79 88 78 56 83 92 81 56 83 100 78 1 +53 79 96 70 53 79 96 81 56 83 96 78 55 79 96 79 59 83 96 79 71 99 104 87 56 83 92 81 56 83 100 78 59 87 96 81 1 +53 79 96 81 56 83 96 78 56 83 100 81 59 83 96 79 71 99 104 87 67 103 109 87 56 83 100 78 59 87 96 81 66 100 108 89 1 +56 83 96 78 56 83 100 81 60 87 104 85 71 99 104 87 67 103 109 87 63 91 109 87 59 87 96 81 66 100 108 89 66 96 108 92 1 +60 87 104 85 60 83 100 85 56 79 91 78 63 91 109 87 59 75 96 79 59 83 96 79 66 96 108 92 59 91 100 85 56 79 96 81 1 +60 83 100 85 56 79 91 78 60 79 96 85 59 75 96 79 59 83 96 79 63 91 100 83 59 91 100 85 56 79 96 81 59 83 96 81 1 +56 79 91 78 60 79 96 85 64 91 100 81 59 83 96 79 63 91 100 83 67 91 109 87 56 79 96 81 59 83 96 81 63 83 100 85 1 +64 87 104 85 68 91 104 88 68 91 104 85 71 95 104 87 75 91 109 92 75 95 104 87 66 83 100 81 66 83 96 81 66 87 104 89 1 +68 91 104 88 68 91 104 85 68 87 104 88 75 91 109 92 75 95 104 87 67 83 96 79 66 83 96 81 66 87 104 89 70 96 104 89 1 +68 87 104 88 60 75 91 78 56 68 83 74 67 83 96 79 59 72 85 71 55 68 85 75 70 96 104 89 63 79 88 78 56 63 84 70 1 +68 99 113 88 68 99 108 85 56 91 104 88 67 99 109 92 67 103 109 92 63 95 109 87 59 79 96 81 63 87 108 89 63 91 112 89 1 +56 91 104 88 56 95 108 92 56 87 108 85 63 95 109 87 59 95 113 92 63 95 109 87 63 91 112 89 63 96 112 89 63 100 122 92 1 +56 83 100 85 56 83 96 85 60 91 100 85 63 87 100 83 63 87 100 87 63 95 104 92 63 104 117 92 63 96 108 89 66 96 112 89 1 +56 83 96 85 60 91 100 85 60 99 108 92 63 87 100 87 63 95 104 92 63 99 113 92 63 96 108 89 66 96 112 89 66 100 112 92 1 +64 99 113 92 68 99 118 99 71 107 122 103 63 103 113 96 71 103 113 96 71 103 113 96 70 100 112 92 70 104 112 96 70 104 112 96 1 +68 99 118 99 71 107 122 103 71 112 122 99 71 103 113 96 71 103 113 96 71 107 123 100 70 104 112 96 70 104 112 96 70 100 112 92 1 +71 112 122 99 68 112 122 99 71 112 128 99 71 107 123 100 71 111 118 96 67 99 113 96 70 100 112 92 70 100 112 96 66 104 122 96 1 +71 103 122 96 64 91 104 92 60 91 108 88 67 91 104 92 59 75 100 83 59 87 104 92 70 100 117 96 63 83 104 89 59 79 92 81 1 +64 91 104 92 60 91 108 88 60 83 108 92 59 75 100 83 59 87 104 92 67 99 109 92 63 83 104 89 59 79 92 81 63 75 104 85 1 +60 83 108 92 60 64 100 99 53 51 104 114 67 99 109 92 67 87 100 83 63 79 100 87 63 75 104 85 70 100 112 92 70 100 108 89 1 +60 64 100 99 53 51 104 114 50 36 113 128 67 87 100 83 63 79 100 87 59 68 96 92 70 100 112 92 70 100 108 89 66 79 96 85 1 +53 51 104 114 50 36 113 128 43 36 118 128 63 79 100 87 59 68 96 92 55 61 100 96 70 100 108 89 66 79 96 85 63 71 104 92 5 +50 36 113 128 43 36 118 128 46 39 108 114 59 68 96 92 55 61 100 96 55 64 104 92 66 79 96 85 63 71 104 92 59 67 104 96 2 +50 48 104 107 50 57 96 96 56 61 96 88 59 64 100 92 55 61 100 87 55 58 96 87 59 63 104 96 59 60 100 92 56 60 100 89 5 +50 57 96 96 56 61 96 88 56 61 91 85 55 61 100 87 55 58 96 87 59 58 93 83 59 60 100 92 56 60 100 89 56 60 88 81 5 +56 61 91 85 56 64 91 85 60 64 91 81 59 58 93 83 59 61 89 79 59 61 85 75 56 60 88 81 56 60 88 78 56 60 84 78 5 +56 64 91 85 60 64 91 81 60 75 96 78 59 61 89 79 59 61 85 75 59 75 89 79 56 60 88 78 56 60 84 78 52 56 80 74 5 +60 64 91 81 60 75 96 78 64 68 104 88 59 61 85 75 59 75 89 79 59 64 100 92 56 60 84 78 52 56 80 74 59 67 88 74 5 +60 75 96 78 64 68 104 88 64 64 108 92 59 75 89 79 59 64 100 92 59 58 104 100 52 56 80 74 59 67 88 74 63 71 92 81 5 +64 64 108 92 60 61 108 99 64 61 108 99 59 58 104 100 59 58 104 100 59 61 109 100 63 71 92 81 59 60 96 92 56 63 104 96 5 +64 61 108 99 64 68 108 92 71 83 100 81 59 61 109 100 63 64 104 96 71 79 96 79 56 63 104 96 59 67 104 96 63 67 108 96 5 +64 68 108 92 71 83 100 81 80 99 104 85 63 64 104 96 71 79 96 79 79 95 96 79 59 67 104 96 63 67 108 96 70 75 104 85 4 +71 83 100 81 80 99 104 85 80 95 100 81 71 79 96 79 79 95 96 79 79 95 96 79 63 67 108 96 70 75 104 85 74 87 92 78 4 +88 103 104 83 84 95 100 79 79 99 96 79 86 96 108 81 86 104 108 81 86 96 104 81 83 95 97 79 83 95 105 83 83 95 101 79 3 +84 95 100 79 84 99 104 83 88 107 113 87 82 96 100 81 82 91 104 78 86 100 108 85 83 95 101 79 83 95 105 83 92 103 110 90 3 +84 99 104 83 88 107 113 87 88 107 109 87 82 91 104 78 86 100 108 85 90 109 112 92 83 95 105 83 92 103 110 90 96 112 110 94 3 +88 107 113 87 88 107 109 87 84 99 104 79 86 100 108 85 90 109 112 92 90 104 112 89 92 103 110 90 96 112 110 94 96 108 114 90 3 +88 99 109 83 84 103 100 83 84 99 104 83 86 104 104 81 86 100 108 85 86 104 112 85 87 103 110 83 87 99 105 86 87 99 105 86 3 +88 99 109 83 84 99 100 79 84 103 104 83 86 100 104 81 82 96 104 81 82 100 104 81 83 95 105 83 83 99 105 83 87 103 105 86 3 +84 99 100 79 84 103 104 83 88 103 113 87 82 96 104 81 82 100 104 81 82 104 112 85 83 99 105 83 87 103 105 86 87 99 105 86 3 +88 103 113 87 88 103 109 92 79 95 100 87 82 104 112 85 86 104 108 92 82 100 108 89 87 99 105 86 83 95 105 90 79 99 110 90 3 +88 103 109 92 79 95 100 87 67 95 109 92 86 104 108 92 82 100 108 89 74 96 104 89 83 95 105 90 79 99 110 90 71 103 119 94 3 +79 95 100 87 67 95 109 92 63 95 113 87 82 100 108 89 74 96 104 89 63 96 100 92 79 99 110 90 71 103 119 94 59 95 110 90 1 +67 95 109 92 63 95 113 87 55 83 100 83 74 96 104 89 63 96 100 92 56 91 108 89 71 103 119 94 59 95 110 90 52 84 97 86 1 +63 95 113 87 55 83 100 83 51 75 93 79 63 96 100 92 56 91 108 89 52 83 100 81 59 95 110 90 52 84 97 86 52 81 97 79 1 +51 75 93 79 51 64 85 75 48 61 81 67 52 83 100 81 49 75 92 78 46 75 96 78 52 81 97 79 52 73 90 79 49 73 97 83 1 +51 64 85 75 48 61 81 67 48 64 85 71 49 75 92 78 46 75 96 78 46 71 84 74 52 73 90 79 49 73 97 83 49 77 93 75 1 +48 64 85 71 51 72 85 75 51 72 85 75 46 71 84 74 46 67 84 74 49 71 92 74 49 77 93 75 46 66 86 72 49 70 86 75 1 +51 72 85 75 51 72 85 75 48 72 89 75 46 67 84 74 49 71 92 74 49 71 84 78 46 66 86 72 49 70 86 75 49 73 90 75 1 +51 72 85 75 48 72 89 75 51 83 93 75 49 71 92 74 49 71 84 78 49 71 88 74 49 70 86 75 49 73 90 75 49 70 86 72 1 +48 72 89 75 51 83 93 75 55 79 96 79 49 71 84 78 49 71 88 74 52 79 96 78 49 73 90 75 49 70 86 72 52 70 82 75 1 +51 83 93 75 55 79 96 79 51 75 93 75 49 71 88 74 52 79 96 78 52 79 92 81 49 70 86 72 52 70 82 75 49 66 86 75 1 +55 79 96 79 51 75 93 75 51 75 89 75 52 79 96 78 52 79 92 81 52 71 84 74 52 70 82 75 49 66 86 75 52 66 86 72 1 +51 75 93 75 51 75 89 75 55 72 89 71 52 79 92 81 52 71 84 74 52 71 84 70 49 66 86 75 52 66 86 72 52 70 86 72 1 +51 75 89 75 55 72 89 71 55 68 81 71 52 71 84 74 52 71 84 70 52 71 80 70 52 66 86 72 52 70 86 72 52 70 86 72 1 +55 72 89 71 55 68 81 71 51 72 81 71 52 71 84 70 52 71 80 70 52 71 84 70 52 70 86 72 52 70 86 72 56 73 86 75 1 +55 68 81 71 51 72 81 71 55 75 85 75 52 71 80 70 52 71 84 70 56 75 92 74 52 70 86 72 56 73 86 75 59 77 90 79 1 +51 72 81 71 55 75 85 75 55 79 89 79 52 71 84 70 56 75 92 74 56 79 88 78 56 73 86 75 59 77 90 79 59 84 97 83 1 +55 75 85 75 55 79 89 79 55 79 96 79 56 75 92 74 56 79 88 78 56 83 92 81 59 77 90 79 59 84 97 83 56 88 97 83 1 +59 83 96 79 71 99 104 87 67 103 109 87 56 83 100 78 59 87 96 81 66 100 108 89 52 84 97 83 56 81 97 79 59 84 93 79 1 +67 103 109 87 63 91 109 87 59 75 96 79 66 100 108 89 66 96 108 92 59 91 100 85 59 84 93 79 59 88 105 86 63 95 110 86 1 +63 91 109 87 59 75 96 79 59 83 96 79 66 96 108 92 59 91 100 85 56 79 96 81 59 88 105 86 63 95 110 86 63 84 101 83 1 +59 75 96 79 59 83 96 79 63 91 100 83 59 91 100 85 56 79 96 81 59 83 96 81 63 95 110 86 63 84 101 83 59 73 93 75 1 +59 83 96 79 63 91 100 83 67 91 109 87 56 79 96 81 59 83 96 81 63 83 100 85 63 84 101 83 59 73 93 75 63 81 93 83 1 +75 95 104 87 71 95 104 87 75 91 109 92 66 87 100 85 66 83 100 81 66 83 96 81 59 88 101 83 67 84 93 83 67 84 97 83 1 +75 91 109 92 75 95 104 87 67 83 96 79 66 83 96 81 66 87 104 89 70 96 104 89 67 84 97 83 59 77 90 75 59 73 97 79 1 +75 95 104 87 67 83 96 79 59 72 85 71 66 87 104 89 70 96 104 89 63 79 88 78 59 77 90 75 59 73 97 79 59 73 93 75 1 +67 83 96 79 59 72 85 71 55 68 85 75 70 96 104 89 63 79 88 78 56 63 84 70 59 73 97 79 59 73 93 75 63 73 93 75 1 +59 72 85 71 55 68 85 75 63 79 96 83 63 79 88 78 56 63 84 70 59 67 84 74 59 73 93 75 63 73 93 75 59 81 93 79 1 +63 79 96 83 67 99 109 92 67 103 109 92 59 67 84 74 59 79 96 81 63 87 108 89 59 81 93 79 63 91 101 90 67 103 114 94 1 +67 103 109 92 63 95 109 87 59 95 113 92 63 87 108 89 63 91 112 89 63 96 112 89 67 103 114 94 63 99 114 90 63 103 114 94 1 +63 95 109 87 59 95 113 92 63 95 109 87 63 91 112 89 63 96 112 89 63 100 122 92 63 99 114 90 63 103 114 94 67 103 114 94 1 +59 95 113 92 63 95 109 87 63 87 100 83 63 96 112 89 63 100 122 92 63 104 117 92 63 103 114 94 67 103 114 94 67 103 114 94 1 +63 87 100 83 63 87 100 87 63 95 104 92 63 104 117 92 63 96 108 89 66 96 112 89 67 103 114 94 67 99 110 94 67 103 114 94 1 +63 87 100 87 63 95 104 92 63 99 113 92 63 96 108 89 66 96 112 89 66 100 112 92 67 99 110 94 67 103 114 94 71 103 114 98 1 +63 99 113 92 63 103 113 96 71 103 113 96 66 100 112 92 70 100 112 92 70 104 112 96 71 103 114 98 75 112 119 98 75 108 114 94 1 +63 103 113 96 71 103 113 96 71 103 113 96 70 100 112 92 70 104 112 96 70 104 112 96 75 112 119 98 75 108 114 94 71 108 114 94 1 +71 107 123 100 71 111 118 96 67 99 113 96 70 100 112 92 70 100 112 96 66 104 122 96 75 108 119 98 75 103 119 98 71 99 114 98 1 +67 91 104 92 59 75 100 83 59 87 104 92 70 100 117 96 63 83 104 89 59 79 92 81 75 108 124 98 71 99 110 94 67 77 97 79 1 +59 75 100 83 59 87 104 92 67 99 109 92 63 83 104 89 59 79 92 81 63 75 104 85 71 99 110 94 67 77 97 79 63 66 90 79 1 +59 87 104 92 67 99 109 92 67 87 100 83 59 79 92 81 63 75 104 85 70 100 112 92 67 77 97 79 63 66 90 79 63 81 101 86 1 +67 99 109 92 67 87 100 83 63 79 100 87 63 75 104 85 70 100 112 92 70 100 108 89 63 66 90 79 63 81 101 86 71 95 119 94 1 +63 79 100 87 59 68 96 92 55 61 100 96 70 100 108 89 66 79 96 85 63 71 104 92 71 95 119 94 67 88 105 86 63 73 97 86 5 +59 68 96 92 55 61 100 96 55 64 104 92 66 79 96 85 63 71 104 92 59 67 104 96 67 88 105 86 63 73 97 86 59 70 105 94 5 +55 64 104 92 59 64 100 92 55 61 100 87 59 67 104 96 59 63 104 96 59 60 100 92 59 70 105 94 63 66 101 90 59 66 97 86 5 +59 64 100 92 55 61 100 87 55 58 96 87 59 63 104 96 59 60 100 92 56 60 100 89 63 66 101 90 59 66 97 86 59 63 90 83 5 +55 61 100 87 55 58 96 87 59 58 93 83 59 60 100 92 56 60 100 89 56 60 88 81 59 66 97 86 59 63 90 83 59 63 86 83 5 +55 58 96 87 59 58 93 83 59 61 89 79 56 60 100 89 56 60 88 81 56 60 88 78 59 63 90 83 59 63 86 83 56 60 86 79 5 +59 58 93 83 59 61 89 79 59 61 85 75 56 60 88 81 56 60 88 78 56 60 84 78 59 63 86 83 56 60 86 79 52 54 86 83 5 +59 61 89 79 59 61 85 75 59 75 89 79 56 60 88 78 56 60 84 78 52 56 80 74 56 60 86 79 52 54 86 83 49 45 86 86 5 +59 75 89 79 59 64 100 92 59 58 104 100 52 56 80 74 59 67 88 74 63 71 92 81 49 45 86 86 49 51 86 83 59 70 90 72 5 +59 64 100 92 59 58 104 100 59 58 104 100 59 67 88 74 63 71 92 81 59 60 96 92 49 51 86 83 59 70 90 72 59 63 97 90 5 +59 58 104 100 59 58 104 100 59 61 109 100 63 71 92 81 59 60 96 92 56 63 104 96 59 70 90 72 59 63 97 90 59 60 97 90 5 +59 58 104 100 59 61 109 100 63 64 104 96 59 60 96 92 56 63 104 96 59 67 104 96 59 63 97 90 59 60 97 90 59 63 93 90 5 +71 79 96 79 79 95 96 79 79 95 96 79 63 67 108 96 70 75 104 85 74 87 92 78 63 66 97 94 67 77 110 90 75 91 97 79 5 +79 95 96 79 79 95 96 79 75 87 93 79 70 75 104 85 74 87 92 78 74 91 100 81 67 77 110 90 75 91 97 79 79 91 97 83 4 +79 95 96 79 75 87 93 79 71 75 85 71 74 87 92 78 74 91 100 81 78 96 96 81 75 91 97 79 79 91 97 83 79 91 97 79 4 +75 87 93 79 71 75 85 71 75 79 89 71 74 91 100 81 78 96 96 81 78 91 96 78 79 91 97 83 79 91 97 79 75 88 93 75 4 +82 96 100 81 86 96 104 81 86 96 108 81 83 91 97 79 79 95 97 75 83 95 97 79 78 92 101 80 78 92 97 76 78 92 101 76 3 +86 96 104 81 86 96 108 81 86 104 108 81 79 95 97 75 83 95 97 79 83 95 105 83 78 92 97 76 78 92 101 76 78 92 97 76 3 +82 96 100 78 82 96 100 81 82 91 104 78 79 95 101 79 83 95 101 79 83 95 105 83 85 97 97 80 85 106 105 80 93 111 114 90 3 +82 96 100 81 82 91 104 78 86 100 108 85 83 95 101 79 83 95 105 83 92 103 110 90 85 106 105 80 93 111 114 90 93 115 114 94 3 +86 100 108 85 90 109 112 92 90 104 112 89 92 103 110 90 96 112 110 94 96 108 114 90 93 115 114 94 93 111 114 94 89 102 110 87 3 +90 100 108 85 86 104 104 81 86 100 108 85 92 103 110 86 87 103 110 83 87 99 105 86 85 97 110 83 85 102 105 80 85 102 105 83 3 +86 104 112 85 86 100 104 81 82 96 104 81 87 99 105 86 83 95 105 83 83 99 105 83 85 97 101 83 85 97 101 83 89 102 105 87 3 +82 96 104 81 82 100 104 81 82 104 112 85 83 99 105 83 87 103 105 86 87 99 105 86 89 102 105 87 85 102 110 87 85 102 110 94 3 +86 104 108 92 82 100 108 89 74 96 104 89 83 95 105 90 79 99 110 90 71 103 119 94 78 92 110 87 70 88 105 90 60 92 105 87 3 +82 100 108 89 74 96 104 89 63 96 100 92 79 99 110 90 71 103 119 94 59 95 110 90 70 88 105 90 60 92 105 87 53 84 97 83 1 +74 96 104 89 63 96 100 92 56 91 108 89 71 103 119 94 59 95 110 90 52 84 97 86 60 92 105 87 53 84 97 83 50 79 101 83 1 +63 96 100 92 56 91 108 89 52 83 100 81 59 95 110 90 52 84 97 86 52 81 97 79 53 84 97 83 50 79 101 83 50 75 93 80 1 +52 83 100 81 49 75 92 78 46 75 96 78 52 81 97 79 52 73 90 79 49 73 97 83 50 75 93 80 50 71 89 80 50 75 101 80 1 +49 75 92 78 46 75 96 78 46 71 84 74 52 73 90 79 49 73 97 83 49 77 93 75 50 71 89 80 50 75 101 80 47 75 97 80 1 +46 75 96 78 46 71 84 74 46 67 84 74 49 73 97 83 49 77 93 75 46 66 86 72 50 75 101 80 47 75 97 80 50 71 89 76 1 +46 71 84 74 46 67 84 74 49 71 92 74 49 77 93 75 46 66 86 72 49 70 86 75 47 75 97 80 50 71 89 76 50 67 93 76 1 +46 67 84 74 49 71 92 74 49 71 84 78 46 66 86 72 49 70 86 75 49 73 90 75 50 71 89 76 50 67 93 76 50 75 97 80 1 +49 71 84 78 49 71 88 74 52 79 96 78 49 73 90 75 49 70 86 72 52 70 82 75 50 75 97 80 53 75 97 80 53 71 89 73 1 +49 71 88 74 52 79 96 78 52 79 92 81 49 70 86 72 52 70 82 75 49 66 86 75 53 75 97 80 53 71 89 73 50 71 89 73 1 +52 79 96 78 52 79 92 81 52 71 84 74 52 70 82 75 49 66 86 75 52 66 86 72 53 71 89 73 50 71 89 73 50 71 85 73 1 +52 71 84 74 52 71 84 70 52 71 80 70 52 66 86 72 52 70 86 72 52 70 86 72 50 71 85 73 53 79 89 76 53 75 93 73 1 +52 71 80 70 52 71 84 70 56 75 92 74 52 70 86 72 56 73 86 75 59 77 90 79 53 75 93 73 53 71 85 69 53 75 93 76 1 +52 71 84 70 56 75 92 74 56 79 88 78 56 73 86 75 59 77 90 79 59 84 97 83 53 71 85 69 53 75 93 76 57 79 97 80 1 +56 83 92 81 56 83 100 78 59 87 96 81 56 88 97 83 52 84 97 83 56 81 97 79 57 79 97 80 57 75 97 76 57 79 93 80 1 +59 87 96 81 66 100 108 89 66 96 108 92 56 81 97 79 59 84 93 79 59 88 105 86 57 79 93 80 60 84 93 80 60 75 93 83 1 +66 100 108 89 66 96 108 92 59 91 100 85 59 84 93 79 59 88 105 86 63 95 110 86 60 84 93 80 60 75 93 83 63 84 97 83 1 +56 79 96 81 59 83 96 81 63 83 100 85 63 84 101 83 59 73 93 75 63 81 93 83 63 84 93 80 63 79 89 83 67 88 105 87 1 +66 83 100 81 66 83 96 81 66 87 104 89 67 84 93 83 67 84 97 83 59 77 90 75 63 75 97 80 63 79 85 80 60 75 89 80 1 +63 79 88 78 56 63 84 70 59 67 84 74 59 73 93 75 63 73 93 75 59 81 93 79 63 92 105 87 63 92 105 87 60 92 110 90 1 +59 79 96 81 63 87 108 89 63 91 112 89 63 91 101 90 67 103 114 94 63 99 114 90 67 102 114 90 70 106 119 94 67 106 110 90 1 +63 96 112 89 63 100 122 92 63 104 117 92 63 103 114 94 67 103 114 94 67 103 114 94 70 111 114 97 70 115 119 97 67 106 124 94 1 +63 96 108 89 66 96 112 89 66 100 112 92 67 99 110 94 67 103 114 94 71 103 114 98 67 106 114 94 70 106 119 94 70 106 119 94 1 +70 100 112 92 70 104 112 96 70 104 112 96 75 112 119 98 75 108 114 94 71 108 114 94 74 111 114 97 70 111 124 97 70 106 114 94 1 +70 104 112 96 70 100 112 92 70 100 112 96 71 108 114 94 75 108 119 98 75 103 119 98 70 106 114 94 74 106 114 97 70 111 119 97 1 +70 100 112 92 70 100 112 96 66 104 122 96 75 108 119 98 75 103 119 98 71 99 114 98 74 106 114 97 70 111 119 97 70 102 114 94 1 +70 100 112 96 66 104 122 96 70 100 117 96 75 103 119 98 71 99 114 98 75 108 124 98 70 111 119 97 70 102 114 94 70 106 114 94 1 +66 104 122 96 70 100 117 96 63 83 104 89 71 99 114 98 75 108 124 98 71 99 110 94 70 102 114 94 70 106 114 94 67 97 114 90 1 +70 100 117 96 63 83 104 89 59 79 92 81 75 108 124 98 71 99 110 94 67 77 97 79 70 106 114 94 67 97 114 90 67 84 101 87 1 +59 79 92 81 63 75 104 85 70 100 112 92 67 77 97 79 63 66 90 79 63 81 101 86 67 84 101 87 74 92 105 90 78 92 110 94 1 +70 100 108 89 66 79 96 85 63 71 104 92 71 95 119 94 67 88 105 86 63 73 97 86 78 97 114 97 70 92 110 83 60 75 101 83 5 +66 79 96 85 63 71 104 92 59 67 104 96 67 88 105 86 63 73 97 86 59 70 105 94 70 92 110 83 60 75 101 83 60 75 101 83 5 +59 67 104 96 59 63 104 96 59 60 100 92 59 70 105 94 63 66 101 90 59 66 97 86 60 75 101 83 60 75 97 80 57 71 97 80 5 +59 63 104 96 59 60 100 92 56 60 100 89 63 66 101 90 59 66 97 86 59 63 90 83 60 75 97 80 57 71 97 80 60 71 93 80 5 +59 60 100 92 56 60 100 89 56 60 88 81 59 66 97 86 59 63 90 83 59 63 86 83 57 71 97 80 60 71 93 80 57 67 93 83 5 +56 60 88 81 56 60 88 78 56 60 84 78 59 63 86 83 56 60 86 79 52 54 86 83 57 67 93 83 53 60 93 80 47 49 82 83 5 +59 67 88 74 63 71 92 81 59 60 96 92 49 51 86 83 59 70 90 72 59 63 97 90 50 46 82 83 57 67 85 76 60 71 97 83 5 +63 71 92 81 59 60 96 92 56 63 104 96 59 70 90 72 59 63 97 90 59 60 97 90 57 67 85 76 60 71 97 83 60 60 97 87 5 +59 60 96 92 56 63 104 96 59 67 104 96 59 63 97 90 59 60 97 90 59 63 93 90 60 71 97 83 60 60 97 87 63 71 101 87 5 +56 63 104 96 59 67 104 96 63 67 108 96 59 60 97 90 59 63 93 90 63 66 97 94 60 60 97 87 63 71 101 87 63 71 101 90 5 +63 67 108 96 70 75 104 85 74 87 92 78 63 66 97 94 67 77 110 90 75 91 97 79 63 71 101 90 67 75 105 90 74 88 105 83 5 +70 75 104 85 74 87 92 78 74 91 100 81 67 77 110 90 75 91 97 79 79 91 97 83 67 75 105 90 74 88 105 83 74 92 101 80 4 +74 87 92 78 74 91 100 81 78 96 96 81 75 91 97 79 79 91 97 83 79 91 97 79 74 88 105 83 74 92 101 80 74 84 97 76 4 +74 91 100 81 78 96 96 81 78 91 96 78 79 91 97 83 79 91 97 79 75 88 93 75 74 92 101 80 74 84 97 76 74 88 93 76 4 +83 91 97 79 79 95 97 75 83 95 97 79 78 92 101 80 78 92 97 76 78 92 101 76 80 98 98 76 80 94 98 76 80 94 102 79 3 +79 95 97 75 83 95 97 79 83 95 105 83 78 92 97 76 78 92 101 76 78 92 97 76 80 94 98 76 80 94 102 79 80 98 94 76 3 +79 95 101 79 83 95 101 79 83 95 105 83 85 97 97 80 85 106 105 80 93 111 114 90 88 106 106 87 92 115 115 94 92 120 125 98 3 +83 95 105 83 92 103 110 90 96 112 110 94 93 111 114 90 93 115 114 94 93 111 114 94 92 120 125 98 92 115 115 87 84 102 102 79 3 +92 103 110 90 96 112 110 94 96 108 114 90 93 115 114 94 93 111 114 94 89 102 110 87 92 115 115 87 84 102 102 79 80 94 94 76 3 +96 112 110 94 96 108 114 90 92 103 110 86 93 111 114 94 89 102 110 87 85 97 110 83 84 102 102 79 80 94 94 76 80 94 98 79 3 +96 108 114 90 92 103 110 86 87 103 110 83 89 102 110 87 85 97 110 83 85 102 105 80 80 94 94 76 80 94 98 79 84 98 102 83 3 +92 103 110 86 87 103 110 83 87 99 105 86 85 97 110 83 85 102 105 80 85 102 105 83 80 94 98 79 84 98 102 83 84 98 102 79 3 +87 99 105 86 87 99 105 86 83 95 105 83 85 102 105 83 85 97 101 83 85 97 101 83 84 98 102 79 76 94 102 79 84 102 111 91 3 +87 99 105 86 83 95 105 83 83 99 105 83 85 97 101 83 85 97 101 83 89 102 105 87 76 94 102 79 84 102 111 91 84 102 106 91 3 +83 95 105 83 83 99 105 83 87 103 105 86 85 97 101 83 89 102 105 87 85 102 110 87 84 102 111 91 84 102 106 91 88 106 111 91 3 +83 99 105 83 87 103 105 86 87 99 105 86 89 102 105 87 85 102 110 87 85 102 110 94 84 102 106 91 88 106 111 91 88 106 111 98 3 +87 103 105 86 87 99 105 86 83 95 105 90 85 102 110 87 85 102 110 94 78 92 110 87 88 106 111 91 88 106 111 98 76 94 106 91 3 +87 99 105 86 83 95 105 90 79 99 110 90 85 102 110 94 78 92 110 87 70 88 105 90 88 106 111 98 76 94 106 91 68 94 111 91 3 +83 95 105 90 79 99 110 90 71 103 119 94 78 92 110 87 70 88 105 90 60 92 105 87 76 94 106 91 68 94 111 91 57 81 102 83 3 +59 95 110 90 52 84 97 86 52 81 97 79 53 84 97 83 50 79 101 83 50 75 93 80 50 77 90 79 50 73 86 76 50 69 86 72 1 +52 84 97 86 52 81 97 79 52 73 90 79 50 79 101 83 50 75 93 80 50 71 89 80 50 73 86 76 50 69 86 72 50 69 90 76 1 +52 73 90 79 49 73 97 83 49 77 93 75 50 71 89 80 50 75 101 80 47 75 97 80 50 69 90 76 50 69 90 76 50 73 94 76 1 +49 77 93 75 46 66 86 72 49 70 86 75 47 75 97 80 50 71 89 76 50 67 93 76 50 73 94 76 50 73 90 76 50 73 94 79 1 +49 70 86 75 49 73 90 75 49 70 86 72 50 67 93 76 50 75 97 80 53 75 97 80 50 73 94 79 53 81 102 83 53 77 98 79 1 +49 73 90 75 49 70 86 72 52 70 82 75 50 75 97 80 53 75 97 80 53 71 89 73 53 81 102 83 53 77 98 79 53 81 98 79 1 +52 70 82 75 49 66 86 75 52 66 86 72 53 71 89 73 50 71 89 73 50 71 85 73 53 81 98 79 53 77 94 76 53 73 98 76 1 +52 70 86 72 52 70 86 72 56 73 86 75 53 79 89 76 53 75 93 73 53 71 85 69 57 77 98 79 57 73 90 72 50 62 78 68 1 +52 70 86 72 56 73 86 75 59 77 90 79 53 75 93 73 53 71 85 69 53 75 93 76 57 73 90 72 50 62 78 68 53 69 82 76 1 +59 84 97 83 56 88 97 83 52 84 97 83 57 79 97 80 57 79 97 80 57 75 97 76 57 77 94 76 57 73 90 76 53 73 90 76 1 +56 81 97 79 59 84 93 79 59 88 105 86 57 79 93 80 60 84 93 80 60 75 93 83 57 77 94 79 60 81 98 79 60 73 90 79 1 +59 84 93 79 59 88 105 86 63 95 110 86 60 84 93 80 60 75 93 83 63 84 97 83 60 81 98 79 60 73 90 79 60 73 90 79 1 +59 88 105 86 63 95 110 86 63 84 101 83 60 75 93 83 63 84 97 83 63 84 93 80 60 73 90 79 60 73 90 79 60 81 94 79 1 +63 95 110 86 63 84 101 83 59 73 93 75 63 84 97 83 63 84 93 80 63 79 89 83 60 73 90 79 60 81 94 79 64 81 98 83 1 +63 91 101 86 59 88 101 83 67 84 93 83 67 92 101 90 60 84 97 83 63 75 97 80 64 85 102 83 60 81 90 76 60 81 90 79 1 +59 88 101 83 67 84 93 83 67 84 97 83 60 84 97 83 63 75 97 80 63 79 85 80 60 81 90 76 60 81 90 79 68 89 106 87 1 +67 84 93 83 67 84 97 83 59 77 90 75 63 75 97 80 63 79 85 80 60 75 89 80 60 81 90 79 68 89 106 87 68 98 111 91 1 +67 84 97 83 59 77 90 75 59 73 97 79 63 79 85 80 60 75 89 80 60 84 97 80 68 89 106 87 68 98 111 91 64 98 106 91 1 +59 73 97 79 59 73 93 75 63 73 93 75 60 84 97 80 63 92 105 87 63 92 105 87 64 98 106 91 64 94 111 91 60 94 111 91 1 +63 73 93 75 59 81 93 79 63 91 101 90 63 92 105 87 60 92 110 90 67 102 114 90 60 94 111 91 64 98 111 91 68 106 115 94 1 +63 91 101 90 67 103 114 94 63 99 114 90 67 102 114 90 70 106 119 94 67 106 110 90 68 106 115 94 72 106 115 98 72 106 115 94 1 +67 103 114 94 63 99 114 90 63 103 114 94 70 106 119 94 67 106 110 90 70 111 114 97 72 106 115 98 72 106 115 94 68 106 120 94 1 +63 99 114 90 63 103 114 94 67 103 114 94 67 106 110 90 70 111 114 97 70 115 119 97 72 106 115 94 68 106 120 94 72 111 120 94 1 +63 103 114 94 67 103 114 94 67 103 114 94 70 111 114 97 70 115 119 97 67 106 124 94 68 106 120 94 72 111 120 94 64 106 115 94 1 +67 103 114 94 67 103 114 94 67 99 110 94 70 115 119 97 67 106 124 94 67 106 114 94 72 111 120 94 64 106 115 94 64 102 115 94 1 +67 103 114 94 67 99 110 94 67 103 114 94 67 106 124 94 67 106 114 94 70 106 119 94 64 106 115 94 64 102 115 94 68 106 115 94 1 +67 99 110 94 67 103 114 94 71 103 114 98 67 106 114 94 70 106 119 94 70 106 119 94 64 102 115 94 68 106 115 94 68 102 115 94 1 +67 103 114 94 71 103 114 98 75 112 119 98 70 106 119 94 70 106 119 94 74 111 114 97 68 106 115 94 68 102 115 94 72 106 115 94 1 +71 103 114 98 75 112 119 98 75 108 114 94 70 106 119 94 74 111 114 97 70 111 124 97 68 102 115 94 72 106 115 94 72 106 115 91 1 +75 112 119 98 75 108 114 94 71 108 114 94 74 111 114 97 70 111 124 97 70 106 114 94 72 106 115 94 72 106 115 91 76 111 115 94 1 +75 108 114 94 71 108 114 94 75 108 119 98 70 111 124 97 70 106 114 94 74 106 114 97 72 106 115 91 76 111 115 94 76 111 115 94 1 +71 108 114 94 75 108 119 98 75 103 119 98 70 106 114 94 74 106 114 97 70 111 119 97 76 111 115 94 76 111 115 94 72 106 115 91 1 +75 103 119 98 71 99 114 98 75 108 124 98 70 111 119 97 70 102 114 94 70 106 114 94 72 106 115 91 72 106 115 94 76 111 115 94 1 +71 99 114 98 75 108 124 98 71 99 110 94 70 102 114 94 70 106 114 94 67 97 114 90 72 106 115 94 76 111 115 94 76 106 115 94 1 +71 99 110 94 67 77 97 79 63 66 90 79 67 97 114 90 67 84 101 87 74 92 105 90 76 106 115 94 76 102 111 98 80 111 125 102 1 +67 77 97 79 63 66 90 79 63 81 101 86 67 84 101 87 74 92 105 90 78 92 110 94 76 102 111 98 80 111 125 102 88 115 131 102 1 +63 81 101 86 71 95 119 94 67 88 105 86 78 92 110 94 78 97 114 97 70 92 110 83 88 115 131 102 88 111 120 94 76 89 102 76 1 +67 88 105 86 63 73 97 86 59 70 105 94 70 92 110 83 60 75 101 83 60 75 101 83 76 89 102 76 64 77 94 76 60 77 94 76 5 +52 54 86 83 49 45 86 86 49 51 86 83 47 49 82 83 44 43 82 87 50 46 82 83 50 52 82 83 50 52 78 83 50 52 82 79 5 +49 45 86 86 49 51 86 83 59 70 90 72 44 43 82 87 50 46 82 83 57 67 85 76 50 52 78 83 50 52 82 79 57 66 82 72 5 +49 51 86 83 59 70 90 72 59 63 97 90 50 46 82 83 57 67 85 76 60 71 97 83 50 52 82 79 57 66 82 72 60 77 90 83 5 +59 70 90 72 59 63 97 90 59 60 97 90 57 67 85 76 60 71 97 83 60 60 97 87 57 66 82 72 60 77 90 83 60 66 102 91 5 +59 63 97 90 59 60 97 90 59 63 93 90 60 71 97 83 60 60 97 87 63 71 101 87 60 77 90 83 60 66 102 91 60 62 106 94 5 +59 63 93 90 63 66 97 94 67 77 110 90 63 71 101 87 63 71 101 90 67 75 105 90 60 62 106 94 60 66 106 94 64 73 102 94 5 +78 92 101 80 78 92 97 76 78 92 101 76 80 98 98 76 80 94 98 76 80 94 102 79 84 99 108 81 84 99 108 81 80 95 100 81 3 +78 92 97 76 78 92 101 76 78 92 97 76 80 94 98 76 80 94 102 79 80 98 94 76 84 99 108 81 80 95 100 81 84 95 100 85 3 +78 92 101 76 78 92 97 76 82 97 97 80 80 94 102 79 80 98 94 76 84 94 98 79 80 95 100 81 84 95 100 85 84 103 108 92 3 +78 92 97 76 82 97 97 80 85 97 97 80 80 98 94 76 84 94 98 79 88 106 106 87 84 95 100 85 84 103 108 92 92 107 118 96 3 +85 97 97 80 85 106 105 80 93 111 114 90 88 106 106 87 92 115 115 94 92 120 125 98 92 107 118 96 97 112 122 92 97 116 122 96 3 +85 106 105 80 93 111 114 90 93 115 114 94 92 115 115 94 92 120 125 98 92 115 115 87 97 112 122 92 97 116 122 96 92 103 113 88 3 +93 111 114 90 93 115 114 94 93 111 114 94 92 120 125 98 92 115 115 87 84 102 102 79 97 116 122 96 92 103 113 88 84 95 96 74 3 +93 115 114 94 93 111 114 94 89 102 110 87 92 115 115 87 84 102 102 79 80 94 94 76 92 103 113 88 84 95 96 74 80 95 96 74 3 +93 111 114 94 89 102 110 87 85 97 110 83 84 102 102 79 80 94 94 76 80 94 98 79 84 95 96 74 80 95 96 74 84 95 100 81 3 +85 97 110 83 85 102 105 80 85 102 105 83 80 94 98 79 84 98 102 83 84 98 102 79 84 95 100 81 88 99 104 81 80 95 104 81 3 +85 102 105 80 85 102 105 83 85 97 101 83 84 98 102 83 84 98 102 79 76 94 102 79 88 99 104 81 80 95 104 81 84 99 108 88 3 +85 102 105 83 85 97 101 83 85 97 101 83 84 98 102 79 76 94 102 79 84 102 111 91 80 95 104 81 84 99 108 88 84 103 113 96 3 +85 97 101 83 85 97 101 83 89 102 105 87 76 94 102 79 84 102 111 91 84 102 106 91 84 99 108 88 84 103 113 96 84 99 113 88 3 +85 97 101 83 89 102 105 87 85 102 110 87 84 102 111 91 84 102 106 91 88 106 111 91 84 103 113 96 84 99 113 88 84 99 108 92 3 +89 102 105 87 85 102 110 87 85 102 110 94 84 102 106 91 88 106 111 91 88 106 111 98 84 99 113 88 84 99 108 92 84 107 113 96 3 +85 102 110 94 78 92 110 87 70 88 105 90 88 106 111 98 76 94 106 91 68 94 111 91 84 107 113 96 84 107 122 96 68 103 113 92 1 +78 92 110 87 70 88 105 90 60 92 105 87 76 94 106 91 68 94 111 91 57 81 102 83 84 107 122 96 68 103 113 92 53 91 104 88 1 +70 88 105 90 60 92 105 87 53 84 97 83 68 94 111 91 57 81 102 83 50 77 90 79 68 103 113 92 53 91 104 88 50 79 104 85 1 +60 92 105 87 53 84 97 83 50 79 101 83 57 81 102 83 50 77 90 79 50 73 86 76 53 91 104 88 50 79 104 85 50 79 100 81 1 +53 84 97 83 50 79 101 83 50 75 93 80 50 77 90 79 50 73 86 76 50 69 86 72 50 79 104 85 50 79 100 81 50 75 96 78 1 +50 75 93 80 50 71 89 80 50 75 101 80 50 69 86 72 50 69 90 76 50 69 90 76 50 75 96 78 46 71 87 74 50 71 87 74 1 +50 71 89 80 50 75 101 80 47 75 97 80 50 69 90 76 50 69 90 76 50 73 94 76 46 71 87 74 50 71 87 74 50 75 91 78 1 +47 75 97 80 50 71 89 76 50 67 93 76 50 73 94 76 50 73 90 76 50 73 94 79 50 75 91 78 50 79 96 78 46 79 96 78 1 +50 67 93 76 50 75 97 80 53 75 97 80 50 73 94 79 53 81 102 83 53 77 98 79 46 79 96 78 50 79 96 81 53 79 96 81 1 +53 75 97 80 53 71 89 73 50 71 89 73 53 77 98 79 53 81 98 79 53 77 94 76 53 79 96 81 53 83 96 78 53 75 96 78 1 +53 71 89 73 50 71 89 73 50 71 85 73 53 81 98 79 53 77 94 76 53 73 98 76 53 83 96 78 53 75 96 78 53 71 87 74 1 +50 71 89 73 50 71 85 73 53 79 89 76 53 77 94 76 53 73 98 76 57 77 98 79 53 75 96 78 53 71 87 74 53 71 87 74 1 +50 71 85 73 53 79 89 76 53 75 93 73 53 73 98 76 57 77 98 79 57 73 90 72 53 71 87 74 53 71 87 74 53 71 83 74 1 +53 79 89 76 53 75 93 73 53 71 85 69 57 77 98 79 57 73 90 72 50 62 78 68 53 71 87 74 53 71 83 74 53 71 87 74 1 +53 71 85 69 53 75 93 76 57 79 97 80 50 62 78 68 53 69 82 76 57 77 94 76 53 71 87 74 53 68 83 70 56 71 79 74 1 +57 75 97 76 57 79 93 80 60 84 93 80 53 73 90 76 57 77 94 79 60 81 98 79 56 75 96 74 60 79 91 81 64 87 100 85 1 +57 79 93 80 60 84 93 80 60 75 93 83 57 77 94 79 60 81 98 79 60 73 90 79 60 79 91 81 64 87 100 85 60 83 96 81 1 +60 84 93 80 60 75 93 83 63 84 97 83 60 81 98 79 60 73 90 79 60 73 90 79 64 87 100 85 60 83 96 81 68 83 96 81 1 +63 84 97 83 63 84 93 80 63 79 89 83 60 73 90 79 60 81 94 79 64 81 98 83 68 83 96 81 64 87 104 85 60 83 100 85 1 +63 84 93 80 63 79 89 83 67 88 105 87 60 81 94 79 64 81 98 83 64 85 98 83 64 87 104 85 60 83 100 85 64 83 96 81 1 +67 92 101 90 60 84 97 83 63 75 97 80 64 85 102 83 60 81 90 76 60 81 90 79 60 87 104 85 60 91 108 85 64 91 113 88 1 +63 75 97 80 63 79 85 80 60 75 89 80 60 81 90 79 68 89 106 87 68 98 111 91 64 91 113 88 64 95 113 88 68 103 113 88 1 +60 75 89 80 60 84 97 80 63 92 105 87 68 98 111 91 64 98 106 91 64 94 111 91 68 103 113 88 68 103 118 92 68 107 113 92 1 +60 84 97 80 63 92 105 87 63 92 105 87 64 98 106 91 64 94 111 91 60 94 111 91 68 103 118 92 68 107 113 92 68 107 118 92 1 +63 92 105 87 63 92 105 87 60 92 110 90 64 94 111 91 60 94 111 91 64 98 111 91 68 107 113 92 68 107 118 92 68 103 118 92 1 +60 92 110 90 67 102 114 90 70 106 119 94 64 98 111 91 68 106 115 94 72 106 115 98 68 103 118 92 71 103 118 92 71 103 118 96 1 +67 102 114 90 70 106 119 94 67 106 110 90 68 106 115 94 72 106 115 98 72 106 115 94 71 103 118 92 71 103 118 96 68 107 122 96 1 +70 106 119 94 67 106 110 90 70 111 114 97 72 106 115 98 72 106 115 94 68 106 120 94 71 103 118 96 68 107 122 96 68 103 118 92 1 +70 115 119 97 67 106 124 94 67 106 114 94 72 111 120 94 64 106 115 94 64 102 115 94 64 103 122 92 71 107 122 96 71 107 122 96 1 +67 106 114 94 70 106 119 94 70 106 119 94 64 102 115 94 68 106 115 94 68 102 115 94 71 107 122 96 71 103 113 92 71 103 118 92 1 +70 106 119 94 70 106 119 94 74 111 114 97 68 106 115 94 68 102 115 94 72 106 115 94 71 103 113 92 71 103 118 92 71 107 118 96 1 +70 111 124 97 70 106 114 94 74 106 114 97 72 106 115 91 76 111 115 94 76 111 115 94 71 107 118 96 76 107 122 99 71 116 122 99 1 +70 102 114 94 70 106 114 94 67 97 114 90 72 106 115 94 76 111 115 94 76 106 115 94 76 112 122 96 76 112 122 99 80 107 122 96 1 +70 106 114 94 67 97 114 90 67 84 101 87 76 111 115 94 76 106 115 94 76 102 111 98 76 112 122 99 80 107 122 96 76 107 118 96 1 +67 97 114 90 67 84 101 87 74 92 105 90 76 106 115 94 76 102 111 98 80 111 125 102 80 107 122 96 76 107 118 96 84 116 128 103 1 +74 92 105 90 78 92 110 94 78 97 114 97 80 111 125 102 88 115 131 102 88 111 120 94 84 116 128 103 92 116 133 103 84 112 122 96 1 +78 92 110 94 78 97 114 97 70 92 110 83 88 115 131 102 88 111 120 94 76 89 102 76 92 116 133 103 84 112 122 96 71 83 96 85 1 +78 97 114 97 70 92 110 83 60 75 101 83 88 111 120 94 76 89 102 76 64 77 94 76 84 112 122 96 71 83 96 85 64 79 96 81 5 +60 71 93 80 57 67 93 83 53 60 93 80 60 81 90 83 60 73 90 83 53 62 86 83 64 83 104 88 64 79 100 85 56 71 96 85 1 +57 67 93 83 53 60 93 80 47 49 82 83 60 73 90 83 53 62 86 83 50 52 82 83 64 79 100 85 56 71 96 85 56 68 91 81 5 +57 67 85 76 60 71 97 83 60 60 97 87 57 66 82 72 60 77 90 83 60 66 102 91 56 68 87 74 60 71 91 81 60 64 104 99 5 +60 71 97 83 60 60 97 87 63 71 101 87 60 77 90 83 60 66 102 91 60 62 106 94 60 71 91 81 60 64 104 99 56 64 108 96 5 +63 71 101 87 63 71 101 90 67 75 105 90 60 62 106 94 60 66 106 94 64 73 102 94 56 64 108 96 64 71 108 96 68 75 108 96 5 +63 71 101 90 67 75 105 90 74 88 105 83 60 66 106 94 64 73 102 94 76 89 106 87 64 71 108 96 68 75 108 96 71 87 108 88 5 +67 75 105 90 74 88 105 83 74 92 101 80 64 73 102 94 76 89 106 87 76 89 98 79 68 75 108 96 71 87 108 88 71 91 100 81 4 +74 92 101 80 74 84 97 76 74 88 93 76 76 89 98 79 72 89 98 79 76 85 98 79 71 91 100 81 76 95 108 88 80 95 104 85 4 +80 98 98 76 80 94 98 76 80 94 102 79 84 99 108 81 84 99 108 81 80 95 100 81 88 99 109 83 88 103 109 87 88 103 109 87 3 +80 94 102 79 80 98 94 76 84 94 98 79 80 95 100 81 84 95 100 85 84 103 108 92 88 103 109 87 93 107 113 92 93 111 123 96 3 +84 94 98 79 88 106 106 87 92 115 115 94 84 103 108 92 92 107 118 96 97 112 122 92 93 111 123 96 97 111 123 96 93 111 118 96 3 +88 106 106 87 92 115 115 94 92 120 125 98 92 107 118 96 97 112 122 92 97 116 122 96 97 111 123 96 93 111 118 96 93 111 118 96 3 +92 115 115 94 92 120 125 98 92 115 115 87 97 112 122 92 97 116 122 96 92 103 113 88 93 111 118 96 93 111 118 96 84 99 109 83 3 +92 120 125 98 92 115 115 87 84 102 102 79 97 116 122 96 92 103 113 88 84 95 96 74 93 111 118 96 84 99 109 83 79 91 100 75 3 +80 94 94 76 80 94 98 79 84 98 102 83 80 95 96 74 84 95 100 81 88 99 104 81 79 95 100 79 79 95 100 79 84 95 96 79 3 +80 94 98 79 84 98 102 83 84 98 102 79 84 95 100 81 88 99 104 81 80 95 104 81 79 95 100 79 84 95 96 79 84 99 104 83 3 +84 98 102 83 84 98 102 79 76 94 102 79 88 99 104 81 80 95 104 81 84 99 108 88 84 95 96 79 84 99 104 83 88 103 113 92 3 +84 98 102 79 76 94 102 79 84 102 111 91 80 95 104 81 84 99 108 88 84 103 113 96 84 99 104 83 88 103 113 92 88 103 109 92 3 +76 94 102 79 84 102 111 91 84 102 106 91 84 99 108 88 84 103 113 96 84 99 113 88 88 103 113 92 88 103 109 92 84 99 109 92 3 +84 102 106 91 88 106 111 91 88 106 111 98 84 99 113 88 84 99 108 92 84 107 113 96 84 99 109 92 88 103 113 96 88 103 118 100 3 +88 106 111 91 88 106 111 98 76 94 106 91 84 99 108 92 84 107 113 96 84 107 122 96 88 103 113 96 88 103 118 100 79 107 123 100 3 +76 94 106 91 68 94 111 91 57 81 102 83 84 107 122 96 68 103 113 92 53 91 104 88 79 107 123 100 67 103 113 96 55 91 109 87 1 +50 73 86 76 50 69 86 72 50 69 90 76 50 79 100 81 50 75 96 78 46 71 87 74 55 83 100 87 51 79 104 83 51 83 100 83 1 +50 69 86 72 50 69 90 76 50 69 90 76 50 75 96 78 46 71 87 74 50 71 87 74 51 79 104 83 51 83 100 83 51 79 96 79 1 +50 69 90 76 50 69 90 76 50 73 94 76 46 71 87 74 50 71 87 74 50 75 91 78 51 83 100 83 51 79 96 79 51 75 96 79 1 +50 73 94 76 50 73 90 76 50 73 94 79 50 75 91 78 50 79 96 78 46 79 96 78 51 75 96 79 48 72 89 79 48 68 89 75 1 +50 73 90 76 50 73 94 79 53 81 102 83 50 79 96 78 46 79 96 78 50 79 96 81 48 72 89 79 48 68 89 75 48 75 89 79 1 +53 77 98 79 53 81 98 79 53 77 94 76 53 79 96 81 53 83 96 78 53 75 96 78 51 75 96 79 51 72 89 75 48 79 93 79 1 +53 81 98 79 53 77 94 76 53 73 98 76 53 83 96 78 53 75 96 78 53 71 87 74 51 72 89 75 48 79 93 79 55 79 93 79 1 +53 77 94 76 53 73 98 76 57 77 98 79 53 75 96 78 53 71 87 74 53 71 87 74 48 79 93 79 55 79 93 79 55 79 93 75 1 +53 73 98 76 57 77 98 79 57 73 90 72 53 71 87 74 53 71 87 74 53 71 83 74 55 79 93 79 55 79 93 75 51 75 89 75 1 +50 62 78 68 53 69 82 76 57 77 94 76 53 71 87 74 53 68 83 70 56 71 79 74 51 68 85 75 51 68 81 71 55 72 81 71 1 +53 69 82 76 57 77 94 76 57 73 90 76 53 68 83 70 56 71 79 74 56 75 87 74 51 68 81 71 55 72 81 71 55 72 85 75 1 +57 77 94 76 57 73 90 76 53 73 90 76 56 71 79 74 56 75 87 74 56 75 96 74 55 72 81 71 55 72 85 75 59 79 93 79 1 +57 73 90 76 53 73 90 76 57 77 94 79 56 75 87 74 56 75 96 74 60 79 91 81 55 72 85 75 59 79 93 79 63 87 100 83 1 +53 73 90 76 57 77 94 79 60 81 98 79 56 75 96 74 60 79 91 81 64 87 100 85 59 79 93 79 63 87 100 83 63 95 104 83 1 +57 77 94 79 60 81 98 79 60 73 90 79 60 79 91 81 64 87 100 85 60 83 96 81 63 87 100 83 63 95 104 83 63 95 104 83 1 +64 85 98 83 64 85 102 83 60 81 90 76 64 83 96 81 60 87 104 85 60 91 108 85 63 91 104 83 67 95 109 92 71 103 113 92 1 +64 85 102 83 60 81 90 76 60 81 90 79 60 87 104 85 60 91 108 85 64 91 113 88 67 95 109 92 71 103 113 92 67 103 113 92 1 +60 81 90 76 60 81 90 79 68 89 106 87 60 91 108 85 64 91 113 88 64 95 113 88 71 103 113 92 67 103 113 92 71 103 109 92 1 +68 98 111 91 64 98 106 91 64 94 111 91 68 103 113 88 68 103 118 92 68 107 113 92 71 103 113 92 71 107 118 92 71 107 113 96 1 +64 94 111 91 60 94 111 91 64 98 111 91 68 107 113 92 68 107 118 92 68 103 118 92 71 107 113 96 71 103 118 92 67 103 118 92 1 +60 94 111 91 64 98 111 91 68 106 115 94 68 107 118 92 68 103 118 92 71 103 118 92 71 103 118 92 67 103 118 92 71 103 118 96 1 +64 98 111 91 68 106 115 94 72 106 115 98 68 103 118 92 71 103 118 92 71 103 118 96 67 103 118 92 71 103 118 96 71 103 109 92 1 +68 106 115 94 72 106 115 98 72 106 115 94 71 103 118 92 71 103 118 96 68 107 122 96 71 103 118 96 71 103 109 92 71 99 113 92 1 +72 106 115 98 72 106 115 94 68 106 120 94 71 103 118 96 68 107 122 96 68 103 118 92 71 103 109 92 71 99 113 92 71 99 118 96 1 +72 106 115 94 68 106 120 94 72 111 120 94 68 107 122 96 68 103 118 92 64 103 122 92 71 99 113 92 71 99 118 96 67 103 118 96 1 +68 106 120 94 72 111 120 94 64 106 115 94 68 103 118 92 64 103 122 92 71 107 122 96 71 99 118 96 67 103 118 96 67 107 113 96 1 +72 111 120 94 64 106 115 94 64 102 115 94 64 103 122 92 71 107 122 96 71 107 122 96 67 103 118 96 67 107 113 96 67 107 123 96 1 +64 106 115 94 64 102 115 94 68 106 115 94 71 107 122 96 71 107 122 96 71 103 113 92 67 107 113 96 67 107 123 96 71 111 123 96 1 +64 102 115 94 68 106 115 94 68 102 115 94 71 107 122 96 71 103 113 92 71 103 118 92 67 107 123 96 71 111 123 96 71 103 118 96 1 +68 106 115 94 68 102 115 94 72 106 115 94 71 103 113 92 71 103 118 92 71 107 118 96 71 111 123 96 71 103 118 96 71 107 113 92 1 +68 102 115 94 72 106 115 94 72 106 115 91 71 103 118 92 71 107 118 96 71 107 118 96 71 103 118 96 71 107 113 92 71 107 113 96 1 +72 106 115 94 72 106 115 91 76 111 115 94 71 107 118 96 71 107 118 96 76 107 122 99 71 107 113 92 71 107 113 96 75 103 118 96 1 +76 111 115 94 76 111 115 94 72 106 115 91 76 107 122 99 71 116 122 99 76 107 122 103 75 103 118 96 75 103 118 96 75 107 118 96 1 +76 111 115 94 72 106 115 91 72 106 115 94 71 116 122 99 76 107 122 103 76 112 122 96 75 103 118 96 75 107 118 96 79 103 118 100 1 +72 106 115 91 72 106 115 94 76 111 115 94 76 107 122 103 76 112 122 96 76 112 122 99 75 107 118 96 79 103 118 100 84 111 123 100 1 +72 106 115 94 76 111 115 94 76 106 115 94 76 112 122 96 76 112 122 99 80 107 122 96 79 103 118 100 84 111 123 100 84 103 118 96 1 +76 111 115 94 76 106 115 94 76 102 111 98 76 112 122 99 80 107 122 96 76 107 118 96 84 111 123 100 84 103 118 96 71 79 109 92 1 +80 111 125 102 88 115 131 102 88 111 120 94 84 116 128 103 92 116 133 103 84 112 122 96 79 103 123 100 84 111 128 100 84 103 118 92 1 +88 111 120 94 76 89 102 76 64 77 94 76 84 112 122 96 71 83 96 85 64 79 96 81 84 103 118 92 71 79 96 79 63 75 96 83 1 +60 77 94 76 57 81 90 76 60 85 94 79 60 83 100 81 60 83 96 85 64 87 100 88 67 83 104 87 59 83 100 83 63 87 100 87 1 +57 81 90 76 60 85 94 79 60 81 90 83 60 83 96 85 64 87 100 88 64 83 104 88 59 83 100 83 63 87 100 87 63 83 104 87 1 +60 81 90 83 60 73 90 83 53 62 86 83 64 83 104 88 64 79 100 85 56 71 96 85 63 83 104 87 63 79 100 87 59 75 96 87 1 +60 73 90 83 53 62 86 83 50 52 82 83 64 79 100 85 56 71 96 85 56 68 91 81 63 79 100 87 59 75 96 87 59 72 96 83 5 +53 62 86 83 50 52 82 83 50 52 78 83 56 71 96 85 56 68 91 81 56 64 91 81 59 75 96 87 59 72 96 83 59 75 96 75 5 +50 52 78 83 50 52 82 79 57 66 82 72 56 64 91 81 53 64 83 78 56 68 87 74 59 75 96 75 59 75 89 75 59 79 89 71 5 +57 66 82 72 60 77 90 83 60 66 102 91 56 68 87 74 60 71 91 81 60 64 104 99 59 79 89 71 63 79 93 75 63 68 109 92 5 +60 66 106 94 64 73 102 94 76 89 106 87 64 71 108 96 68 75 108 96 71 87 108 88 67 87 113 96 67 95 109 92 75 99 104 83 5 +64 73 102 94 76 89 106 87 76 89 98 79 68 75 108 96 71 87 108 88 71 91 100 81 67 95 109 92 75 99 104 83 75 95 100 79 4 +76 89 106 87 76 89 98 79 72 89 98 79 71 87 108 88 71 91 100 81 76 95 108 88 75 99 104 83 75 95 100 79 71 91 100 83 4 +76 89 98 79 72 89 98 79 76 85 98 79 71 91 100 81 76 95 108 88 80 95 104 85 75 95 100 79 71 91 100 83 71 95 104 87 4 +84 99 108 81 84 99 108 81 80 95 100 81 88 99 109 83 88 103 109 87 88 103 109 87 86 104 104 81 78 100 100 81 86 104 108 85 3 +80 95 100 81 84 95 100 85 84 103 108 92 88 103 109 87 93 107 113 92 93 111 123 96 86 104 108 85 90 109 112 92 90 118 117 96 3 +84 95 100 85 84 103 108 92 92 107 118 96 93 107 113 92 93 111 123 96 97 111 123 96 90 109 112 92 90 118 117 96 95 118 122 96 3 +84 103 108 92 92 107 118 96 97 112 122 92 93 111 123 96 97 111 123 96 93 111 118 96 90 118 117 96 95 118 122 96 90 104 112 92 3 +92 107 118 96 97 112 122 92 97 116 122 96 97 111 123 96 93 111 118 96 93 111 118 96 95 118 122 96 90 104 112 92 90 104 108 89 3 +97 112 122 92 97 116 122 96 92 103 113 88 93 111 118 96 93 111 118 96 84 99 109 83 90 104 112 92 90 104 108 89 86 100 104 89 3 +80 95 96 74 84 95 100 81 88 99 104 81 79 95 100 79 79 95 100 79 84 95 96 79 82 91 100 74 82 96 100 78 82 91 92 78 3 +84 95 100 81 88 99 104 81 80 95 104 81 79 95 100 79 84 95 96 79 84 99 104 83 82 96 100 78 82 91 92 78 82 96 100 81 3 +88 99 104 81 80 95 104 81 84 99 108 88 84 95 96 79 84 99 104 83 88 103 113 92 82 91 92 78 82 96 100 81 90 100 108 89 3 +80 95 104 81 84 99 108 88 84 103 113 96 84 99 104 83 88 103 113 92 88 103 109 92 82 96 100 81 90 100 108 89 90 109 112 92 3 +84 103 113 96 84 99 113 88 84 99 108 92 88 103 109 92 84 99 109 92 88 103 113 96 90 109 112 92 90 104 112 92 90 104 112 89 3 +84 99 108 92 84 107 113 96 84 107 122 96 88 103 113 96 88 103 118 100 79 107 123 100 90 104 112 89 95 109 117 96 86 104 117 100 3 +84 107 113 96 84 107 122 96 68 103 113 92 88 103 118 100 79 107 123 100 67 103 113 96 95 109 117 96 86 104 117 100 74 104 122 96 1 +50 79 104 85 50 79 100 81 50 75 96 78 55 87 100 87 55 83 100 87 51 79 104 83 56 91 112 89 56 87 112 89 52 87 112 89 1 +50 79 100 81 50 75 96 78 46 71 87 74 55 83 100 87 51 79 104 83 51 83 100 83 56 87 112 89 52 87 112 89 52 87 104 85 1 +50 75 96 78 46 71 87 74 50 71 87 74 51 79 104 83 51 83 100 83 51 79 96 79 52 87 112 89 52 87 104 85 52 83 100 85 1 +46 71 87 74 50 71 87 74 50 75 91 78 51 83 100 83 51 79 96 79 51 75 96 79 52 87 104 85 52 83 100 85 49 75 96 78 1 +50 71 87 74 50 75 91 78 50 79 96 78 51 79 96 79 51 75 96 79 48 72 89 79 52 83 100 85 49 75 96 78 49 71 92 78 1 +50 75 91 78 50 79 96 78 46 79 96 78 51 75 96 79 48 72 89 79 48 68 89 75 49 75 96 78 49 71 92 78 49 71 88 74 1 +50 79 96 78 46 79 96 78 50 79 96 81 48 72 89 79 48 68 89 75 48 75 89 79 49 71 92 78 49 71 88 74 49 67 88 70 1 +46 79 96 78 50 79 96 81 53 79 96 81 48 68 89 75 48 75 89 79 51 75 96 79 49 71 88 74 49 67 88 70 49 67 84 74 1 +50 79 96 81 53 79 96 81 53 83 96 78 48 75 89 79 51 75 96 79 51 72 89 75 49 67 88 70 49 67 84 74 49 71 92 78 1 +53 75 96 78 53 71 87 74 53 71 87 74 48 79 93 79 55 79 93 79 55 79 93 75 52 75 92 78 52 75 92 78 52 75 88 78 1 +53 71 87 74 53 71 87 74 53 71 83 74 55 79 93 79 55 79 93 75 51 75 89 75 52 75 92 78 52 75 88 78 52 75 88 78 1 +53 71 87 74 53 71 83 74 53 71 87 74 55 79 93 75 51 75 89 75 51 68 85 75 52 75 88 78 52 75 88 78 52 71 84 74 1 +53 71 83 74 53 71 87 74 53 68 83 70 51 75 89 75 51 68 85 75 51 68 81 71 52 75 88 78 52 71 84 74 56 71 88 74 1 +53 71 87 74 53 68 83 70 56 71 79 74 51 68 85 75 51 68 81 71 55 72 81 71 52 71 84 74 56 71 88 74 52 79 92 74 1 +53 68 83 70 56 71 79 74 56 75 87 74 51 68 81 71 55 72 81 71 55 72 85 75 56 71 88 74 52 79 92 74 56 75 92 74 1 +56 71 79 74 56 75 87 74 56 75 96 74 55 72 81 71 55 72 85 75 59 79 93 79 52 79 92 74 56 75 92 74 56 79 96 78 1 +64 87 100 85 60 83 96 81 68 83 96 81 63 95 104 83 63 95 104 83 63 95 104 87 59 87 100 89 63 96 104 89 66 100 108 92 1 +68 83 96 81 64 87 104 85 60 83 100 85 63 95 104 87 63 95 104 87 63 91 104 83 66 100 108 92 63 91 100 89 63 87 100 85 1 +64 87 104 85 60 83 100 85 64 83 96 81 63 95 104 87 63 91 104 83 63 91 104 83 63 91 100 89 63 87 100 85 59 87 96 81 1 +60 83 100 85 64 83 96 81 60 87 104 85 63 91 104 83 63 91 104 83 67 95 109 92 63 87 100 85 59 87 96 81 66 96 104 89 1 +64 83 96 81 60 87 104 85 60 91 108 85 63 91 104 83 67 95 109 92 71 103 113 92 59 87 96 81 66 96 104 89 70 104 117 92 1 +60 87 104 85 60 91 108 85 64 91 113 88 67 95 109 92 71 103 113 92 67 103 113 92 66 96 104 89 70 104 117 92 70 109 117 96 1 +64 91 113 88 64 95 113 88 68 103 113 88 67 103 113 92 71 103 109 92 71 103 113 92 70 109 117 96 70 109 112 96 66 104 112 92 1 +68 103 113 88 68 103 118 92 68 107 113 92 71 103 113 92 71 107 118 92 71 107 113 96 66 104 112 92 70 104 112 92 70 109 117 96 1 +68 107 113 92 68 107 118 92 68 103 118 92 71 107 113 96 71 103 118 92 67 103 118 92 70 109 117 96 70 109 117 92 70 104 112 92 1 +68 107 118 92 68 103 118 92 71 103 118 92 71 103 118 92 67 103 118 92 71 103 118 96 70 109 117 92 70 104 112 92 70 109 112 92 1 +68 103 118 92 64 103 122 92 71 107 122 96 71 99 118 96 67 103 118 96 67 107 113 96 66 100 112 92 66 104 117 92 63 104 112 92 1 +64 103 122 92 71 107 122 96 71 107 122 96 67 103 118 96 67 107 113 96 67 107 123 96 66 104 117 92 63 104 112 92 66 100 112 92 1 +71 107 122 96 71 107 122 96 71 103 113 92 67 107 113 96 67 107 123 96 71 111 123 96 63 104 112 92 66 100 112 92 66 104 117 92 1 +71 107 122 96 71 103 113 92 71 103 118 92 67 107 123 96 71 111 123 96 71 103 118 96 66 100 112 92 66 104 117 92 70 109 122 96 1 +71 103 113 92 71 103 118 92 71 107 118 96 71 111 123 96 71 103 118 96 71 107 113 92 66 104 117 92 70 109 122 96 74 109 117 96 1 +71 103 118 92 71 107 118 96 71 107 118 96 71 103 118 96 71 107 113 92 71 107 113 96 70 109 122 96 74 109 117 96 74 109 112 96 1 +71 107 118 96 71 107 118 96 76 107 122 99 71 107 113 92 71 107 113 96 75 103 118 96 74 109 117 96 74 109 112 96 74 109 112 96 1 +71 107 118 96 76 107 122 99 71 116 122 99 71 107 113 96 75 103 118 96 75 103 118 96 74 109 112 96 74 109 112 96 74 104 117 92 1 +71 116 122 99 76 107 122 103 76 112 122 96 75 103 118 96 75 107 118 96 79 103 118 100 74 104 117 92 74 109 117 96 78 104 112 96 1 +76 112 122 96 76 112 122 99 80 107 122 96 79 103 118 100 84 111 123 100 84 103 118 96 78 104 112 96 78 104 112 96 78 104 112 96 1 +76 107 118 96 84 116 128 103 92 116 133 103 71 79 109 92 79 103 123 100 84 111 128 100 74 83 108 89 66 71 100 85 74 83 104 92 1 +92 116 133 103 84 112 122 96 71 83 96 85 84 111 128 100 84 103 118 92 71 79 96 79 74 83 104 92 78 96 112 96 82 91 100 89 1 +84 112 122 96 71 83 96 85 64 79 96 81 84 103 118 92 71 79 96 79 63 75 96 83 78 96 112 96 82 91 100 89 66 71 84 78 1 +71 83 96 85 64 79 96 81 60 83 100 81 71 79 96 79 63 75 96 83 67 83 104 87 82 91 100 89 66 71 84 78 63 79 96 85 1 +64 79 96 81 60 83 100 81 60 83 96 85 63 75 96 83 67 83 104 87 59 83 100 83 66 71 84 78 63 79 96 85 66 91 104 92 1 +60 83 100 81 60 83 96 85 64 87 100 88 67 83 104 87 59 83 100 83 63 87 100 87 63 79 96 85 66 91 104 92 66 87 108 89 1 +64 83 104 88 64 79 100 85 56 71 96 85 63 83 104 87 63 79 100 87 59 75 96 87 63 83 104 85 63 83 100 85 66 83 100 85 1 +56 71 96 85 56 68 91 81 56 64 91 81 59 75 96 87 59 72 96 83 59 75 96 75 66 83 100 85 63 83 100 81 59 87 96 81 5 +56 64 91 81 53 64 83 78 56 68 87 74 59 75 96 75 59 75 89 75 59 79 89 71 59 87 96 81 63 83 92 74 59 83 96 74 5 +53 64 83 78 56 68 87 74 60 71 91 81 59 75 89 75 59 79 89 71 63 79 93 75 63 83 92 74 59 83 96 74 59 83 92 74 5 +60 64 104 99 56 64 108 96 64 71 108 96 63 68 109 92 59 75 109 96 67 87 113 96 59 83 92 70 63 79 108 92 66 83 108 96 5 +56 64 108 96 64 71 108 96 68 75 108 96 59 75 109 96 67 87 113 96 67 95 109 92 63 79 108 92 66 83 108 96 66 87 104 89 5 +64 71 108 96 68 75 108 96 71 87 108 88 67 87 113 96 67 95 109 92 75 99 104 83 66 83 108 96 66 87 104 89 63 87 104 89 5 +68 75 108 96 71 87 108 88 71 91 100 81 67 95 109 92 75 99 104 83 75 95 100 79 66 87 104 89 63 87 104 89 70 100 104 85 4 +71 87 108 88 71 91 100 81 76 95 108 88 75 99 104 83 75 95 100 79 71 91 100 83 63 87 104 89 70 100 104 85 70 91 104 85 4 +71 91 100 81 76 95 108 88 80 95 104 85 75 95 100 79 71 91 100 83 71 95 104 87 70 100 104 85 70 91 104 85 63 91 100 81 4 diff --git a/reagent/ope/test/data/satimage.names b/reagent/ope/test/data/satimage.names new file mode 100644 index 000000000..5ef49ffaf --- /dev/null +++ b/reagent/ope/test/data/satimage.names @@ -0,0 +1,139 @@ +FILE NAMES + sat.trn - training set + sat.tst - test set + + !!! NB. DO NOT USE CROSS-VALIDATION WITH THIS DATASET !!! + Just train and test only once with the above + training and test sets. + +PURPOSE + The database consists of the multi-spectral values + of pixels in 3x3 neighbourhoods in a satellite image, + and the classification associated with the central pixel + in each neighbourhood. The aim is to predict this + classification, given the multi-spectral values. In + the sample database, the class of a pixel is coded as + a number. + +PROBLEM TYPE + Classification + +AVAILABLE + This database was generated from Landsat Multi-Spectral + Scanner image data. These and other forms of remotely + sensed imagery can be purchased at a price from relevant + governmental authorities. The data is usually in binary + form, and distributed on magnetic tape(s). + +SOURCE + The small sample database was provided by: + Ashwin Srinivasan + Department of Statistics and Modelling Science + University of Strathclyde + Glasgow + Scotland + UK + +ORIGIN + The original Landsat data for this database was generated + from data purchased from NASA by the Australian Centre + for Remote Sensing, and used for research at: + The Centre for Remote Sensing + University of New South Wales + Kensington, PO Box 1 + NSW 2033 + Australia. + + The sample database was generated taking a small section (82 + rows and 100 columns) from the original data. The binary values + were converted to their present ASCII form by Ashwin Srinivasan. + The classification for each pixel was performed on the basis of + an actual site visit by Ms. Karen Hall, when working for Professor + John A. Richards, at the Centre for Remote Sensing at the University + of New South Wales, Australia. Conversion to 3x3 neighbourhoods and + splitting into test and training sets was done by Alistair Sutherland. + +HISTORY + The Landsat satellite data is one of the many sources of information + available for a scene. The interpretation of a scene by integrating + spatial data of diverse types and resolutions including multispectral + and radar data, maps indicating topography, land use etc. is expected + to assume significant importance with the onset of an era characterised + by integrative approaches to remote sensing (for example, NASA's Earth + Observing System commencing this decade). Existing statistical methods + are ill-equipped for handling such diverse data types. Note that this + is not true for Landsat MSS data considered in isolation (as in + this sample database). This data satisfies the important requirements + of being numerical and at a single resolution, and standard maximum- + likelihood classification performs very well. Consequently, + for this data, it should be interesting to compare the performance + of other methods against the statistical approach. + +DESCRIPTION + One frame of Landsat MSS imagery consists of four digital images + of the same scene in different spectral bands. Two of these are + in the visible region (corresponding approximately to green and + red regions of the visible spectrum) and two are in the (near) + infra-red. Each pixel is a 8-bit binary word, with 0 corresponding + to black and 255 to white. The spatial resolution of a pixel is about + 80m x 80m. Each image contains 2340 x 3380 such pixels. + + The database is a (tiny) sub-area of a scene, consisting of 82 x 100 + pixels. Each line of data corresponds to a 3x3 square neighbourhood + of pixels completely contained within the 82x100 sub-area. Each line + contains the pixel values in the four spectral bands + (converted to ASCII) of each of the 9 pixels in the 3x3 neighbourhood + and a number indicating the classification label of the central pixel. + The number is a code for the following classes: + + Number Class + + 1 red soil + 2 cotton crop + 3 grey soil + 4 damp grey soil + 5 soil with vegetation stubble + 6 mixture class (all types present) + 7 very damp grey soil + + NB. There are no examples with class 6 in this dataset. + + The data is given in random order and certain lines of data + have been removed so you cannot reconstruct the original image + from this dataset. + + In each line of data the four spectral values for the top-left + pixel are given first followed by the four spectral values for + the top-middle pixel and then those for the top-right pixel, + and so on with the pixels read out in sequence left-to-right and + top-to-bottom. Thus, the four spectral values for the central + pixel are given by attributes 17,18,19 and 20. If you like you + can use only these four attributes, while ignoring the others. + This avoids the problem which arises when a 3x3 neighbourhood + straddles a boundary. + +NUMBER OF EXAMPLES + training set 4435 + test set 2000 + +NUMBER OF ATTRIBUTES + 36 (= 4 spectral bands x 9 pixels in neighbourhood ) + +ATTRIBUTES + The attributes are numerical, in the range 0 to 255. + +CLASS + There are 6 decision classes: 1,2,3,4,5 and 7. + + NB. There are no examples with class 6 in this dataset- + they have all been removed because of doubts about the + validity of this class. + +AUTHOR + Ashwin Srinivasan + Department of Statistics and Data Modeling + University of Strathclyde + Glasgow + Scotland + UK + ross@uk.ac.turing diff --git a/reagent/ope/test/envs.py b/reagent/ope/test/envs.py index b29934dd6..4064dff81 100644 --- a/reagent/ope/test/envs.py +++ b/reagent/ope/test/envs.py @@ -1,10 +1,11 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import random from abc import abstractmethod from typing import Optional -from reagent.ope.estimators.estimator import ( +from reagent.ope.estimators.sequential_estimators import ( Mdp, Model, RLPolicy, @@ -19,7 +20,7 @@ class Environment(Model): Environment for RL """ - def __init__(self, max_horizon: int = -1): + def __init__(self, max_horizon: int = -1) -> None: self._current_state: Optional[State] = None self._steps_taken: int = 0 self._max_horizon = max_horizon @@ -35,7 +36,7 @@ def close(self): def step(self, policy: RLPolicy): a_dist = policy(self.current_state) - a = a_dist.sample() + a = a_dist.sample()[0] s_dist = self(self.current_state, a) srs = [] probs = [] @@ -79,19 +80,21 @@ def current_state(self): return self._current_state @current_state.setter - def current_state(self, state: Optional[None]): + def current_state(self, state: Optional[State]): self._current_state = state class PolicyLogGenerator(object): - def __init__(self, env: Environment, policy: RLPolicy): + def __init__(self, env: Environment, policy: RLPolicy) -> None: self._env = env self._policy = policy - def generate_log(self, init_state: State) -> Mdp: + def generate_log(self, init_state: State, max_horizon: int = -1) -> Mdp: transition = Transition(state=self._env.reset(state=init_state)) mpd = [] while transition.status != Transition.Status.TERMINATED: + if max_horizon > 0 and len(mpd) > max_horizon: + break transition = self._env.step(self._policy) mpd.append(transition) return mpd diff --git a/reagent/ope/test/gridworld.py b/reagent/ope/test/gridworld.py index 82374bcab..82709afb6 100644 --- a/reagent/ope/test/gridworld.py +++ b/reagent/ope/test/gridworld.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import random @@ -8,11 +9,11 @@ import torch from reagent.ope.estimators.sequential_estimators import ( DMEstimator, - DREstimator, + DoublyRobustEstimator, EpsilonGreedyRLPolicy, IPSEstimator, MAGICEstimator, - RandomRLPolicy, + NeuralDualDICE, RewardProbability, RLEstimatorInput, State, @@ -25,6 +26,7 @@ from reagent.ope.trainers.rl_tabular_trainers import ( DPTrainer, DPValueFunction, + EstimatedStateValueFunction, TabularPolicy, ) @@ -37,19 +39,26 @@ def __init__( goal: Tuple[int, int], max_horizon: int = -1, walls: Iterable[Tuple[int, int]] = (), + use_taxicab_reward: bool = False, ): super().__init__(max_horizon) self.size = size self.start = start self.goal = goal self.walls = set(walls) + self.use_taxicab_reward = use_taxicab_reward self.reset() @classmethod - def from_grid(cls, grid: Sequence[Sequence[str]], max_horizon: int = -1): + def from_grid( + cls, + grid: Sequence[Sequence[str]], + max_horizon: int = -1, + use_taxicab_reward: bool = False, + ): size = (len(grid), len(grid[0])) - start = () - goal = () + start = (0, 0) + goal = (0, 0) walls = [] for x, r in enumerate(grid): for y, c in enumerate(r): @@ -60,7 +69,32 @@ def from_grid(cls, grid: Sequence[Sequence[str]], max_horizon: int = -1): goal = (x, y) elif g == "w": walls += ((x, y),) - return cls(size, start, goal, max_horizon, walls) + return cls(size, start, goal, max_horizon, walls, use_taxicab_reward) + + @classmethod + def random_grid( + cls, + length: int, + max_horizon: int = -1, + wall_prob: float = 0.1, + use_taxicab_reward: bool = False, + ): + """ + Generates a random grid of size length x length with start = (0, 0) and + goal = (length-1, length-1) + """ + size = (length, length) + start = (0, 0) + goal = (length - 1, length - 1) + walls = [] + for r in range(length): + for c in range(length): + if (r, c) == start or (r, c) == goal: + continue + else: + if random.uniform(0, 1) < wall_prob: + walls.append((r, c)) + return cls(size, start, goal, max_horizon, walls, use_taxicab_reward) def reset(self, state: Optional[State] = None): super().reset(state) @@ -86,10 +120,27 @@ def _transit( elif to_pos == self.goal: return to_pos, 1.0, True else: - return to_pos, 0.0, False + return ( + to_pos, + 0.0 + if not self.use_taxicab_reward + else np.exp(-2 * self._taxi_distance(to_pos, self.goal) / self.size[0]), + False, + ) + + def _taxi_distance( + self, from_pos: Tuple[int, int], to_pos: Tuple[int, int] + ) -> float: + return abs(from_pos[0] - to_pos[0]) + abs(from_pos[1] - to_pos[1]) def _next_state_reward(self, state: State, action: Action) -> StateReward: - x, y = state.value + value = state.value + assert isinstance(value, tuple), f"got type {type(value)} instead of tuple" + # pyre-fixme[23]: Unable to unpack single value, 2 were expected. + (x, y) = value + assert isinstance(x, int) and isinstance( + y, int + ), "Gridworld expects states to be Tuple[int, int]" if state.value in self.walls or state.value == self.goal: return StateReward(State((x, y), state.is_terminal), 0.0) if action.value == 0: @@ -104,6 +155,7 @@ def _next_state_reward(self, state: State, action: Action) -> StateReward: def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution: sr = self._next_state_reward(state, action) + assert sr.state is not None return {sr.state: RewardProbability(sr.reward, 1.0)} @property @@ -166,7 +218,7 @@ def dump_policy(self, policy) -> str: elif pos in self.walls: dump += "\u2588" else: - action = policy(State(pos)).greedy() + action = policy(State(pos)).greedy()[0] if action.value == 0: dump += "\u21e9" elif action.value == 1: @@ -226,6 +278,10 @@ def close(self): def next_state_reward_dist(self, state: State, action: Action) -> StateDistribution: probs = [self.noise_prob] * len(self.action_space) + assert isinstance( + action.value, int + ), f"got type {type(action.value)} instead of int" + # pyre-fixme[16]: `int` has no attribute `__setitem__`. probs[action.value] = 1 - self.epsilon states = {} for a in self.action_space: @@ -233,7 +289,9 @@ def next_state_reward_dist(self, state: State, action: Action) -> StateDistribut if sr.state in states: rp = states[sr.state] states[sr.state] = RewardProbability( - rp.reward + sr.reward, rp.prob + probs[a.value] + rp.reward + sr.reward, + # pyre-fixme[16]: `int` has no attribute `__getitem__`. + rp.prob + probs[a.value], ) else: states[sr.state] = RewardProbability(sr.reward, probs[a.value]) @@ -257,6 +315,7 @@ def current_state(self, state: Optional[None]): GAMMA = 0.9 +USE_DP_VALUE_FUNC = True if __name__ == "__main__": logging.basicConfig(level=logging.INFO) @@ -268,23 +327,7 @@ def current_state(self, state: Optional[None]): device = torch.device("cuda") if torch.cuda.is_available() else None print(f"device - {device}") - gridworld = GridWorld.from_grid( - [ - ["s", "0", "0", "0", "0"], - ["0", "0", "0", "W", "0"], - ["0", "0", "0", "0", "0"], - ["0", "W", "0", "0", "0"], - ["0", "0", "0", "0", "g"], - ], - # [ - # ["s", "0", "0", "0"], - # ["0", "0", "0", "0"], - # ["0", "0", "0", "0"], - # ["0", "0", "0", "g"], - # ], - max_horizon=1000, - ) - # gridworld = ThomasGridWorld() + gridworld = GridWorld.random_grid(10, max_horizon=250, use_taxicab_reward=True) logging.info(f"GridWorld:\n{gridworld}") action_space = ActionSpace(4) @@ -295,26 +338,35 @@ def current_state(self, state: Optional[None]): logging.info(f"Opt Policy:\n{gridworld.dump_policy(opt_policy)}") logging.info(f"Opt state values:\n{gridworld.dump_value_func(value_func)}") - behavivor_policy = RandomRLPolicy(action_space) + # behavivor_policy = RandomRLPolicy(action_space) + behavivor_policy = EpsilonGreedyRLPolicy(opt_policy, 0.7) target_policy = EpsilonGreedyRLPolicy(opt_policy, 0.3) - model = NoiseGridWorldModel(gridworld, action_space, epsilon=0.3, max_horizon=1000) + + model = NoiseGridWorldModel(gridworld, action_space, epsilon=0.1, max_horizon=1000) value_func = DPValueFunction(target_policy, model, GAMMA) - ground_truth = DPValueFunction(target_policy, gridworld, GAMMA) + ground_truth: Optional[ValueFunction] = None + if USE_DP_VALUE_FUNC: + ground_truth = DPValueFunction(target_policy, gridworld, GAMMA) + else: + ground_truth = EstimatedStateValueFunction(target_policy, gridworld, GAMMA) logging.info( f"Target Policy ground truth values:\n" f"{gridworld.dump_value_func(ground_truth)}" ) - log = {} + logging.info( + f"Logging Policy values:\n" + f"{gridworld.dump_value_func(DPValueFunction(behavivor_policy, model, GAMMA))}" + ) + + log = [] log_generator = PolicyLogGenerator(gridworld, behavivor_policy) - num_episodes = 200 + num_episodes = 50 for state in gridworld.states: - mdps = [] for _ in range(num_episodes): - mdps.append(log_generator.generate_log(state)) - log[state] = mdps - logging.info(f"Generated {len(mdps)} logs for {state}") + log.append(log_generator.generate_log(state)) + logging.info(f"Generated {num_episodes} logs for {state}") estimator_input = RLEstimatorInput( gamma=GAMMA, @@ -324,6 +376,17 @@ def current_state(self, state: Optional[None]): ground_truth=ground_truth, ) + NeuralDualDICE( + device=device, + state_dim=2, + action_dim=4, + deterministic_env=True, + average_next_v=False, + value_lr=0.001, + zeta_lr=0.0001, + batch_size=512, + ).evaluate(estimator_input) + DMEstimator(device=device).evaluate(estimator_input) IPSEstimator(weight_clamper=None, weighted=False, device=device).evaluate( @@ -333,10 +396,10 @@ def current_state(self, state: Optional[None]): estimator_input ) - DREstimator(weight_clamper=None, weighted=False, device=device).evaluate( + DoublyRobustEstimator(weight_clamper=None, weighted=False, device=device).evaluate( estimator_input ) - DREstimator(weight_clamper=None, weighted=True, device=device).evaluate( + DoublyRobustEstimator(weight_clamper=None, weighted=True, device=device).evaluate( estimator_input ) diff --git a/reagent/ope/test/mslr_slate.py b/reagent/ope/test/mslr_slate.py index ba3ff1b34..382b4280c 100644 --- a/reagent/ope/test/mslr_slate.py +++ b/reagent/ope/test/mslr_slate.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import argparse +import itertools import json import logging import os @@ -9,36 +11,37 @@ import sys import time from collections import OrderedDict -from typing import List, Optional, Tuple +from typing import Iterable, List, Optional, Tuple import numpy as np import torch -from reagent.ope.estimators.estimator import Estimator, EstimatorResults +import torch.multiprocessing as mp +from reagent.ope.estimators.estimator import Evaluator from reagent.ope.estimators.slate_estimators import ( + DCGSlateMetric, DMEstimator, - LogEpisode, + DoublyRobustEstimator, + ERRSlateMetric, + IPSEstimator, LogSample, NDCGSlateMetric, + PassThruDistribution, + PBMEstimator, + PseudoInverseEstimator, + RankingDistribution, + RewardDistribution, SlateContext, + SlateEstimator, SlateEstimatorInput, - SlateItem, - SlateItemProbabilities, - SlateItems, + SlateItemFeatures, SlateItemValues, SlateModel, - SlatePolicy, SlateQuery, SlateSlots, ) -from reagent.ope.trainers.linear_trainers import ( - DecisionTreeClassifierTrainer, - DecisionTreeTrainer, - LassoTrainer, - LogisticRegressionTrainer, - SGDClassifierTrainer, - Trainer, - TrainingData, -) +from reagent.ope.estimators.types import Trainer, TrainingData +from reagent.ope.trainers.linear_trainers import DecisionTreeTrainer, LassoTrainer +from reagent.ope.utils import Clamper from torch import Tensor @@ -53,6 +56,7 @@ def __init__( num_columns: int, anchor_url_features: List[int], body_features: List[int], + dataset_name: str = "", device=None, ): if "folder" not in params: @@ -78,6 +82,12 @@ def __init__( self._validation_data = None self._test_data = None + self._name = dataset_name + + @property + def name(self) -> str: + return self._name + def _add(self, qid: Optional[int], feature_list: List[Tuple[float, Tensor]]): if qid is None or len(feature_list) == 0: return @@ -93,6 +103,7 @@ def load(self): with open(pickle_file, "rb") as f: self._queries, self._features, self._relevances = pickle.load(f) self._cache_file = "" + del f else: self._dict = OrderedDict() text_file = os.path.join(self._folder, self._source_file) @@ -168,6 +179,10 @@ def features(self) -> Tensor: self._load_features() return self._features[:, 1:] + @property + def all_features(self) -> Tensor: + return self.features + @property def anchor_url_features(self) -> Tensor: self._load_features() @@ -190,7 +205,8 @@ def body_features(self) -> Tensor: def relevances(self) -> Tensor: if self._relevances is None: self._relevances = torch.tensor( - [r[0] for v in self._dict.values() for r in v], device=self._device + [r[0] for r in itertools.chain(self._dict.values())], + device=self._device, ) return self._relevances @@ -216,28 +232,35 @@ def cache_file(self) -> str: return self._cache_file -def train(trainer: Trainer, train_dataset: MSLRDatasets, vali_dataset: MSLRDatasets): +def train( + trainer: Trainer, + train_dataset: MSLRDatasets, + vali_dataset: MSLRDatasets, + prefix: str = "", +): logging.info("training all features...") st = time.process_time() training_data = TrainingData( - train_dataset.features, + train_dataset.all_features, train_dataset.relevances, train_dataset.sample_weights, - vali_dataset.features, + vali_dataset.all_features, vali_dataset.relevances, vali_dataset.sample_weights, ) trainer.train(training_data) logging.info(f" training time: {time.process_time() - st}") trainer.save_model( - os.path.join(train_dataset.folder, trainer.name + "_all_features.pickle") + os.path.join( + train_dataset.folder, trainer.name + "_" + prefix + "_all_features.pickle" + ) ) - # logging.info("scoring...") - # score = trainer.score( - # vali_dataset.features, vali_dataset.relevances, vali_dataset.sample_weights - # ) - # logging.info(f" score: {score}") + logging.info("scoring...") + score = trainer.score( + vali_dataset.all_features, vali_dataset.relevances, vali_dataset.sample_weights + ) + logging.info(f" score: {score}") logging.info("training anchor_url features...") st = time.process_time() @@ -253,16 +276,19 @@ def train(trainer: Trainer, train_dataset: MSLRDatasets, vali_dataset: MSLRDatas ) logging.info(f" training time: {time.process_time() - st}") trainer.save_model( - os.path.join(train_dataset.folder, trainer.name + "_anchor_url_features.pickle") + os.path.join( + train_dataset.folder, + trainer.name + "_" + prefix + "_anchor_url_features.pickle", + ) ) - # logging.info("scoring...") - # score = trainer.score( - # vali_dataset.anchor_url_features, - # vali_dataset.relevances, - # vali_dataset.sample_weights, - # ) - # logging.info(f" score: {score}") + logging.info("scoring...") + score = trainer.score( + vali_dataset.anchor_url_features, + vali_dataset.relevances, + vali_dataset.sample_weights, + ) + logging.info(f" score: {score}") logging.info("training body features...") st = time.process_time() @@ -278,21 +304,25 @@ def train(trainer: Trainer, train_dataset: MSLRDatasets, vali_dataset: MSLRDatas ) logging.info(f" training time: {time.process_time() - st}") trainer.save_model( - os.path.join(train_dataset.folder, trainer.name + "_body_features.pickle") + os.path.join( + train_dataset.folder, trainer.name + "_" + prefix + "_body_features.pickle" + ) ) - # logging.info("scoring...") - # score = trainer.score( - # vali_dataset.body_features, vali_dataset.relevances, vali_dataset.sample_weights - # ) - # logging.info(f" score: {score}") + logging.info("scoring...") + score = trainer.score( + vali_dataset.body_features, vali_dataset.relevances, vali_dataset.sample_weights + ) + logging.info(f" score: {score}") def load_dataset( - params, num_columns, anchor_url_features, body_features + params, num_columns, anchor_url_features, body_features, dataset_name="" ) -> MSLRDatasets: logging.info(f"loading {params['source_file']}") - dataset = MSLRDatasets(params, num_columns, anchor_url_features, body_features) + dataset = MSLRDatasets( + params, num_columns, anchor_url_features, body_features, dataset_name + ) st = time.process_time() dataset.load() logging.info(f" load time: {time.process_time() - st}") @@ -311,155 +341,193 @@ def load_dataset( return dataset -def train_all(train_dataset, vali_dataset): - train(DecisionTreeClassifierTrainer(), train_dataset, vali_dataset) - train(DecisionTreeTrainer(), train_dataset, vali_dataset) - train(LassoTrainer(), train_dataset, vali_dataset) - train(LogisticRegressionTrainer(), train_dataset, vali_dataset) - train(SGDClassifierTrainer(), train_dataset, vali_dataset) +def train_all(train_dataset, vali_dataset, prefix: str = ""): + # train(DecisionTreeClassifierTrainer(), train_dataset, vali_dataset) + train(DecisionTreeTrainer(), train_dataset, vali_dataset, prefix) + train(LassoTrainer(), train_dataset, vali_dataset, prefix) + # train(LogisticRegressionTrainer(), train_dataset, vali_dataset) + # train(SGDClassifierTrainer(), train_dataset, vali_dataset) -class TrainedModel(SlateModel): - def __init__(self, relevances: Tensor, device=None): - self._relevances = relevances - self._device = device - - def item_rewards(self, context: SlateContext) -> SlateItemValues: - qv = context.query.value - item_rewards = self._relevances[qv[1] : (qv[1] + qv[2])].detach().clone() - return SlateItemValues(item_rewards) - - # def item_rewards(self, context: SlateContext) -> SlateItemValues: - # qv = context.query.value - # item_rewards = self._relevances[qv[1] : (qv[1] + qv[2])] - # return SlateItemValues(item_rewards) +def train_models(params): + all_dataset = load_dataset( + params["all_set"], num_columns, anchor_url_features, body_features + ) + half_dataset = load_dataset( + params["first_set"], num_columns, anchor_url_features, body_features + ) + vali_dataset = load_dataset( + params["vali_set"], num_columns, anchor_url_features, body_features + ) + train_all(all_dataset, vali_dataset, "all") + train_all(half_dataset, vali_dataset, "half") -class GroundTruthModel(SlateModel): +class MSLRModel(SlateModel): def __init__(self, relevances: Tensor, device=None): self._relevances = relevances self._device = device - def item_rewards(self, context: SlateContext) -> SlateItemValues: + def item_relevances(self, context: SlateContext) -> Tensor: qv = context.query.value - doc_rewards = self._relevances[qv[1] : (qv[1] + qv[2])] - return SlateItemValues(doc_rewards) - - -class MSLRPolicy(SlatePolicy): - def __init__( - self, relevances: Tensor, deterministic: bool, alpha: float = -1.0, device=None - ): - super().__init__(device) - self._relevances = relevances - self._deterministic = deterministic - self._alpha = alpha - - def _item_rewards(self, context: SlateContext) -> Tensor: - qv = context.query.value - item_rewards = self._relevances[qv[1] : (qv[1] + qv[2])].detach().clone() - if self._alpha >= 0: - _, ids = torch.sort(item_rewards, descending=True) - rank = torch.arange(1, ids.shape[0] + 1, dtype=torch.double) - item_rewards[ids] = torch.pow(2, -1.0 * self._alpha * torch.log2(rank)) - return item_rewards + if context.params is None: + relevances = self._relevances[qv[1] : (qv[1] + qv[2])].detach().clone() + else: + relevances = ( + # pyre-fixme[6]: For 1st param expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, + # ...]]` but got `object`. + self._relevances[qv[1] : (qv[1] + qv[2])][context.params] + .detach() + .clone() + ) + return relevances - def _query(self, context: SlateContext) -> SlateItemProbabilities: - return SlateItemProbabilities(self._item_rewards(context), self._deterministic) + def item_rewards(self, context: SlateContext) -> SlateItemValues: + return SlateItemValues(self.item_relevances(context)) def evaluate( - estimator: Estimator, input: SlateEstimatorInput, folder: str = "." -) -> EstimatorResults: - logging.info(f"Evaluating {estimator}...") - st = time.process_time() - rs = estimator.evaluate(input) - dt = time.process_time() - st - print(f"Evaluating {estimator} done: {rs} in {dt}s", flush=True) - file = os.path.join(folder, estimator.__class__.__name__ + "_results.pickle") - try: - with open(file, "wb") as f: - pickle.dump(rs, f, protocol=pickle.HIGHEST_PROTOCOL) - except Exception: - logging.error(f"{file} cannot be accessed.") - return rs - - -def evalute_all( + experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], dataset: MSLRDatasets, slate_size: int, + item_size: int, + metric_func: str, log_trainer: Trainer, + log_distribution: RewardDistribution, + log_features: str, tgt_trainer: Trainer, - tgt_deterministic: bool, - num_episodes: int, - num_samples: int, + tgt_distribution: RewardDistribution, + tgt_features: str, + dm_features: str, + max_num_workers: int, + device=None, ): + assert slate_size < item_size print( - f"Run: {log_trainer.name}, {tgt_trainer.name}" - f"[{'deterministic' if tgt_deterministic else 'stochastic'}]", + f"Evaluate All:" + f" slate_size={slate_size}, item_size={item_size}, metric={metric_func}" + f", Log=[{log_trainer.name}, {log_distribution}, {log_features}]" + f", Target=[{tgt_trainer.name}, {tgt_distribution}, {tgt_features}]" + f", DM=[{dm_features}]" + f", Workers={max_num_workers}, device={device}", flush=True, ) logging.info("Preparing models and policies...") - st = time.process_time() + st = time.perf_counter() log_trainer.load_model( - os.path.join(dataset.folder, log_trainer.name + "_anchor_url_features.pickle") + os.path.join( + dataset.folder, log_trainer.name + "_all_" + log_features + ".pickle" + ) ) - log_pred = log_trainer.predict(dataset.anchor_url_features) - log_model = TrainedModel(log_pred.scores) - log_policy = MSLRPolicy(log_pred.scores, False, 1.0) + # calculate behavior model scores + log_pred = log_trainer.predict(getattr(dataset, log_features)) tgt_trainer.load_model( - os.path.join(dataset.folder, tgt_trainer.name + "_body_features.pickle") + os.path.join( + dataset.folder, tgt_trainer.name + "_all_" + tgt_features + ".pickle" + ) ) - tgt_pred = tgt_trainer.predict(dataset.body_features) - tgt_model = TrainedModel(tgt_pred.scores) - tgt_policy = MSLRPolicy(tgt_pred.scores, tgt_deterministic, 1.0) + # calculate target model scores + tgt_pred = tgt_trainer.predict(getattr(dataset, tgt_features)) - dt = time.process_time() - st - logging.info(f"Preparing models and policies done: {dt}s") + dm_train_features = getattr(dataset, dm_features) - logging.info("Generating log...") - st = time.process_time() slots = SlateSlots(slate_size) - queries = dataset.queries - episodes = [] - for q in queries: - query = SlateQuery(q) - items = SlateItems([SlateItem(i) for i in range(q[2].item())]) - if len(items) < slate_size: - logging.warning( - f"Number of items ({len(items)}) less than " - f"number of slots ({slate_size})" - ) - continue - context = SlateContext(query, slots, items) - log_item_probs = log_policy(context) - log_item_rewards = log_model.item_rewards(context) - tgt_item_probs = tgt_policy(context) - metric = NDCGSlateMetric(log_item_rewards) + + dt = time.perf_counter() - st + logging.info(f"Preparing models and policies done: {dt}s") + + total_samples = 0 + for _, num_samples in experiments: + total_samples += num_samples + logging.info(f"Generating log: total_samples={total_samples}") + st = time.perf_counter() + tasks = [] + samples_generated = 0 + total_queries = dataset.queries.shape[0] + for estimators, num_samples in experiments: samples = [] for _ in range(num_samples): - slate = log_item_probs.sample_slate(slots) - samples.append(LogSample(slate, slate.slot_values(log_item_rewards))) - episodes.append( - LogEpisode( - context, metric, samples, None, log_item_probs, None, tgt_item_probs + # randomly sample a query + q = dataset.queries[random.randrange(total_queries)] + doc_size = int(q[2]) + if doc_size < item_size: + # skip if number of docs is less than item_size + continue + si = int(q[1]) + ei = si + doc_size + # using top item_size docs for logging + log_scores, item_choices = log_pred.scores[si:ei].sort( + dim=0, descending=True ) - ) - if len(episodes) >= num_episodes: - break - dt = time.process_time() - st - logging.info(f"Generating log done: {len(episodes)} samples in {dt}s") - - input = SlateEstimatorInput(episodes, tgt_model, log_model) + log_scores = log_scores[:item_size] + item_choices = item_choices[:item_size] + log_item_probs = log_distribution(SlateItemValues(log_scores)) + tgt_scores = tgt_pred.scores[si:ei][item_choices].detach().clone() + tgt_item_probs = tgt_distribution(SlateItemValues(tgt_scores)) + tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) + gt_item_rewards = SlateItemValues(dataset.relevances[si:ei][item_choices]) + gt_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards) + if metric_func == "dcg": + metric = DCGSlateMetric(device=device) + elif metric_func == "err": + metric = ERRSlateMetric(4.0, device=device) + else: + metric = NDCGSlateMetric(gt_item_rewards, device=device) + query = SlateQuery((si, ei)) + context = SlateContext(query, slots, item_choices) + slot_weights = metric.slot_weights(slots) + gt_reward = metric.calculate_reward(slots, gt_rewards, None, slot_weights) + if tgt_item_probs.is_deterministic: + tgt_slate_prob = 1.0 + log_slate = tgt_item_probs.sample_slate(slots) + log_reward = gt_reward + else: + tgt_slate_prob = float("nan") + log_slate = log_item_probs.sample_slate(slots) + log_rewards = log_slate.slot_values(gt_item_rewards) + log_reward = metric.calculate_reward( + slots, log_rewards, None, slot_weights + ) + log_slate_prob = log_item_probs.slate_probability(log_slate) + item_features = SlateItemFeatures(dm_train_features[si:ei][item_choices]) + sample = LogSample( + context, + metric, + log_slate, + log_reward, + log_slate_prob, + None, + log_item_probs, + tgt_slate_prob, + None, + tgt_item_probs, + gt_reward, + slot_weights, + None, + item_features, + ) + samples.append(sample) + samples_generated += 1 + if samples_generated % 1000 == 0: + logging.info( + f" samples generated: {samples_generated}, {100 * samples_generated / total_samples:.1f}%" + ) + tasks.append((estimators, SlateEstimatorInput(samples))) + dt = time.perf_counter() - st + logging.info(f"Generating log done: {total_samples} samples in {dt}s") - evaluate(DMEstimator(device=device), input) - # evaluate(IPSEstimator(device=device), input) - # evaluate(PseudoInverseEstimator(device=device), input) - # evaluate(PBMEstimator(device=device), input) + logging.info("start evaluating...") + st = time.perf_counter() + evaluator = Evaluator(tasks, max_num_workers) + Evaluator.report_results(evaluator.evaluate()) + logging.info(f"evaluating done in {time.perf_counter() - st}s") if __name__ == "__main__": + mp.set_start_method("spawn") + logging.basicConfig( format="%(asctime)-15s_%(levelname)s: %(message)s", level=logging.INFO ) @@ -498,20 +566,49 @@ def evalute_all( ) body_features = params["body_features"] if "body_features" in params else None - train_dataset = load_dataset( - params["train_set"], num_columns, anchor_url_features, body_features - ) - vali_dataset = load_dataset( - params["vali_set"], num_columns, anchor_url_features, body_features - ) - train_all(train_dataset, vali_dataset) - - exit(0) + # uncomment to train behavior and target models + # train_models(params) test_dataset = load_dataset( - params["test_set"], num_columns, anchor_url_features, body_features - ) - - evalute_all( - test_dataset, 5, DecisionTreeTrainer(), DecisionTreeTrainer(), True, 100, 100 + params["second_set"], + num_columns, + anchor_url_features, + body_features, + "second_set", ) + weight_clamper = Clamper(min_v=0.0) + estimators = [ + DMEstimator(DecisionTreeTrainer(), 0.5, device=device), + IPSEstimator(weight_clamper=weight_clamper, device=device), + DoublyRobustEstimator( + DecisionTreeTrainer(), 0.5, weight_clamper, False, device + ), + DoublyRobustEstimator(DecisionTreeTrainer(), 0.5, weight_clamper, True, device), + PseudoInverseEstimator(weight_clamper=weight_clamper, device=device), + PBMEstimator(weight_clamper=weight_clamper, device=device), + ] + + metrics = ["ndcg", "err"] + alphas = [0.0, 1.0, 2.0] + trainers = [ + (DecisionTreeTrainer(), LassoTrainer()), + (LassoTrainer(), DecisionTreeTrainer()), + ] + for log_trainer, tgt_trainers in trainers: + for metric in metrics: + for alpha in alphas: + evaluate( + [(estimators, 200)] * 4, + test_dataset, + 5, + 20, + metric, + log_trainer, + RankingDistribution(alpha), + "anchor_url_features", + tgt_trainers, + PassThruDistribution(), + "body_features", + "all_features", + 4, + ) diff --git a/reagent/ope/test/multiclass_bandits.py b/reagent/ope/test/multiclass_bandits.py index da84937c5..90b6ae161 100644 --- a/reagent/ope/test/multiclass_bandits.py +++ b/reagent/ope/test/multiclass_bandits.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import argparse import json @@ -7,7 +8,8 @@ import random import sys from dataclasses import dataclass -from typing import Tuple +from pathlib import PurePath +from typing import Iterable, Tuple import numpy as np import pandas as pd @@ -21,14 +23,14 @@ DMEstimator, DoublyRobustEstimator, IPSEstimator, - Log, LogSample, ) -from reagent.ope.estimators.types import ActionSpace, Policy +from reagent.ope.estimators.estimator import Estimator, Evaluator +from reagent.ope.estimators.types import ActionSpace, Policy, Trainer, TrainingData from reagent.ope.trainers.linear_trainers import ( + DecisionTreeTrainer, LogisticRegressionTrainer, SGDClassifierTrainer, - TrainingData, ) from torch import Tensor @@ -57,8 +59,9 @@ def __init__(self, params, device=None): index_col = params["index_col"] if "index_col" in params else None label_col = params["label_col"] sep = params["sep"] if "sep" in params else "," + self._config_file = params["file"] self._data_frame = pd.read_csv( - params["file"], + self._config_file, sep=sep, header=None, index_col=index_col if index_col is not None else False, @@ -103,6 +106,10 @@ def __getitem__(self, idx) -> MultiClassDataRow: self._features[idx], self._class_indices[idx], self._one_hots[idx] ) + @property + def config_file(self) -> str: + return self._config_file + @property def num_features(self) -> int: return self._features.shape[1] @@ -153,6 +160,7 @@ def train_val_test_split( torch.as_tensor(test_x, dtype=torch.float, device=device), torch.as_tensor(test_y, dtype=torch.float, device=device), torch.as_tensor(test_r, dtype=torch.float, device=device), + train_choices, ) @@ -179,17 +187,107 @@ def __init__( device=None, ): super().__init__(action_space, device) - self._action_ditributions = action_distributions + self._action_distributions = action_distributions self._exploitation_prob = 1.0 - epsilon self._exploration_prob = epsilon / len(self.action_space) - def _query(self, context: MultiClassContext) -> Tuple[Action, ActionDistribution]: - dist = self._action_ditributions[context.query_id] + def _query(self, context: int) -> Tuple[Action, ActionDistribution]: + dist = self._action_distributions[context] + if len(dist.shape) > 1 and dist.shape[0] == 1: + dist = dist[0] + if dist.shape[0] < len(self.action_space): + dist = torch.cat( + (dist, torch.zeros([len(self.action_space) - dist.shape[0]])) + ) dist = dist * self._exploitation_prob + self._exploration_prob action = torch.multinomial(dist, 1).item() return Action(action), ActionDistribution(dist) +def evaluate_all( + experiments: Iterable[Tuple[Iterable[Estimator], int]], + dataset: UCIMultiClassDataset, + log_trainer: Trainer, + log_epsilon: float, + tgt_trainer: Trainer, + tgt_epsilon: float, + max_num_workers: int, + random_reward_prob: float = 0.0, + device=None, +): + action_space = ActionSpace(dataset.num_actions) + config_path = PurePath(dataset.config_file) + data_name = config_path.stem + log_model_name = data_name + "_" + log_trainer.__class__.__name__ + ".pickle" + log_model_file = str(config_path.with_name(log_model_name)) + tgt_model_name = data_name + "_" + tgt_trainer.__class__.__name__ + ".pickle" + tgt_model_file = str(config_path.with_name(tgt_model_name)) + + log_trainer.load_model(log_model_file) + tgt_trainer.load_model(tgt_model_file) + if not log_trainer.is_trained or not tgt_trainer.is_trained: + ( + train_x, + train_y, + train_r, + val_x, + val_y, + val_r, + test_x, + test_y, + test_r, + train_choices, + ) = dataset.train_val_test_split((0.2, 0.8)) + trainer_data = TrainingData(train_x, train_y, None, val_x, val_y, None) + if not log_trainer.is_trained: + log_trainer.train(trainer_data) + log_trainer.save_model(log_model_file) + if not tgt_trainer.is_trained: + tgt_trainer.train(trainer_data) + tgt_trainer.save_model(tgt_model_file) + + log_results = log_trainer.predict(dataset.features) + assert log_results.probabilities is not None + log_policy = MultiClassPolicy(action_space, log_results.probabilities, log_epsilon) + + tgt_results = tgt_trainer.predict(dataset.features) + assert tgt_results.probabilities is not None + tgt_policy = MultiClassPolicy(action_space, tgt_results.probabilities, tgt_epsilon) + + tasks = [] + # pyre-fixme[61]: `train_choices` may not be initialized here. + test_queries = list(set(range(len(dataset))) - set(train_choices)) + for estimators, num_samples in experiments: + samples = [] + for _ in range(num_samples): + qid = random.sample(test_queries, 1) + label = int(dataset.labels[qid].item()) + log_action, log_action_probabilities = log_policy(qid) + log_reward = 1.0 if log_action.value == label else 0.0 + tgt_action, tgt_action_probabilities = tgt_policy(qid) + ground_truth_reward = 1.0 if tgt_action.value == label else 0.0 + item_feature = dataset.features[qid] + random_reward = random.random() < random_reward_prob + samples.append( + LogSample( + context=qid, + log_action=log_action, + log_reward=random.randint(0, 1) if random_reward else log_reward, + log_action_probabilities=log_action_probabilities, + tgt_action_probabilities=tgt_action_probabilities, + tgt_action=tgt_action, + ground_truth_reward=ground_truth_reward, + item_feature=item_feature, + ) + ) + tasks.append((estimators, BanditsEstimatorInput(action_space, samples, False))) + + evaluator = Evaluator(tasks, max_num_workers) + results = evaluator.evaluate() + Evaluator.report_results(results) + return results + + DEFAULT_ITERATIONS = 500 if __name__ == "__main__": @@ -212,71 +310,22 @@ def _query(self, context: MultiClassContext) -> Tuple[Action, ActionDistribution torch.random.manual_seed(1234) dataset = UCIMultiClassDataset(params["dataset"]) - - episodes = DEFAULT_ITERATIONS - if "iterations" in params: - episodes = params["iterations"] - - training_iterations = 10 - training_test_split_ratio = 0.5 - train_x, train_y, train_r, val_x, val_y, val_r, test_x, test_y, test_r = dataset.train_val_test_split( - (0.8, 0.8) - ) - - trainer_data = TrainingData(train_x, train_y, None, val_x, val_y, None) - - action_space = ActionSpace(dataset.num_actions) - gt_model = MultiClassModel(test_x, test_r) - log_trainer = LogisticRegressionTrainer() - log_trainer.train(trainer_data) - log_results = log_trainer.predict(test_x) - score = log_trainer.score(test_y, log_results.predictions) - logging.info(f"Model trainer score: {score}") - log_model = MultiClassModel(test_x, log_results.probabilities) - log_policy = MultiClassPolicy(action_space, log_results.probabilities, 1.0) - - target_trainer = SGDClassifierTrainer() - # target_trainer = SGDClassifierTrainer(500, 'modified_huber') - target_trainer.train(trainer_data) - target_results = target_trainer.predict(test_x) - score = target_trainer.score(test_y, target_results.predictions) - logging.info(f"Target trainer score: {score}") - target_model = MultiClassModel(test_x, target_results.probabilities) - target_policy = MultiClassPolicy(action_space, target_results.probabilities, 0.1) - - num_epsidoes = 10 - num_total_samples = test_x.shape[0] - num_sample = num_total_samples // 5 - - logs = [] - for i in range(num_epsidoes): - train_choices = random.sample(range(num_total_samples), num_sample) - samples = [] - for i in train_choices: - context = MultiClassContext(i) - logged_action, logged_dist = log_policy(context) - logged_reward = log_model(context)[logged_action] - target_action, target_dist = target_policy(context) - samples.append( - LogSample( - context, - logged_action, - logged_dist, - logged_reward, - target_action, - target_dist, - ) - ) - logs.append(Log(samples)) - - input = BanditsEstimatorInput(action_space, logs, target_model, gt_model) - - result = DMEstimator().evaluate(input) - logging.info(f"DM result: {result}") - - result = IPSEstimator().evaluate(input) - logging.info(f"IPS result: {result}") - - result = DoublyRobustEstimator().evaluate(input) - logging.info(f"DR result: {result}") + log_epsilon = 0.1 + tgt_trainer = SGDClassifierTrainer() + tgt_epsilon = 0.1 + dm_trainer = DecisionTreeTrainer() + experiments = [ + ( + ( + DMEstimator(DecisionTreeTrainer()), + IPSEstimator(), + DoublyRobustEstimator(DecisionTreeTrainer()), + ), + 1000, + ) + for _ in range(100) + ] + evaluate_all( + experiments, dataset, log_trainer, log_epsilon, tgt_trainer, tgt_epsilon, 0 + ) diff --git a/reagent/ope/test/notebooks/CartpoleExperiments.ipynb b/reagent/ope/test/notebooks/CartpoleExperiments.ipynb new file mode 100644 index 000000000..fa51e25cb --- /dev/null +++ b/reagent/ope/test/notebooks/CartpoleExperiments.ipynb @@ -0,0 +1,1218 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:reagent.core.dataclasses:USE_VANILLA_DATACLASS: False\n", + "INFO:reagent.core.dataclasses:ARBITRARY_TYPES_ALLOWED: True\n", + "INFO:reagent.core.registry_meta:Adding REGISTRY to type LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Not Registering LearningRateSchedulerConfig to LearningRateSchedulerConfig. Abstract method [] are not implemented.\n", + "INFO:reagent.core.registry_meta:Registering LambdaLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering MultiplicativeLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering StepLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering MultiStepLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering ExponentialLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering CosineAnnealingLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering CyclicLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering OneCycleLR to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Registering CosineAnnealingWarmRestarts to LearningRateSchedulerConfig\n", + "INFO:reagent.core.registry_meta:Adding REGISTRY to type OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Not Registering OptimizerConfig to OptimizerConfig. Abstract method [] are not implemented.\n", + "INFO:reagent.core.registry_meta:Registering Adam to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering AdamW to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering SparseAdam to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering Adamax to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering LBFGS to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering Rprop to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering ASGD to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering Adadelta to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering Adagrad to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering RMSprop to OptimizerConfig\n", + "INFO:reagent.core.registry_meta:Registering SGD to OptimizerConfig\n" + ] + } + ], + "source": [ + "import gym\n", + "import numpy as np\n", + "import torch\n", + "from typing import Iterable, Mapping, Optional, Sequence, Set, Tuple, Union\n", + "from reagent.ope.estimators.sequential_estimators import (\n", + " Mdp,\n", + " Model,\n", + " RLPolicy,\n", + " State,\n", + " StateReward,\n", + " Transition,\n", + " ActionSpace,\n", + " ActionDistribution,\n", + " Action,\n", + " RandomRLPolicy,\n", + " RLEstimatorInput,\n", + " IPSEstimator,\n", + " NeuralDualDICE,\n", + ")\n", + "from reagent.models.dqn import FullyConnectedDQN\n", + "from reagent.ope.utils import Clamper, RunningAverage\n", + "from gym import wrappers\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt\n", + "\n", + "NUM_EPISODES = 200\n", + "MAX_HORIZON = 250\n", + "GAMMA = 0.99\n", + "ALPHA = 0.66\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else None\n", + "print(f\"Device - {device}\")\n", + "\n", + "model = torch.jit.load(\"/mnt/vol/gfsfblearner-nebraska/flow/data/2020-07-24/18eeebdf-b0ed-4f93-b079-95f7c58656ff/207187922_207187922_0.pt\")\n", + "model = model.dqn_with_preprocessor.model\n", + "model.to(device)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Define the policy classes" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "class ComboPolicy(RLPolicy):\n", + " # Weighted combination between two given policies\n", + " def __init__(self, action_space: ActionSpace, weights: Sequence[float], policies: Sequence[RLPolicy]):\n", + " assert len(weights) == len(policies)\n", + " self._weights = weights\n", + " self._policies = policies\n", + " self._action_space = action_space\n", + " self._softmax = torch.nn.Softmax()\n", + " \n", + " def action_dist(self, state: State) -> ActionDistribution:\n", + " weighted_policies = [w * p(state).values for w,p in zip(self._weights, self._policies)]\n", + " weighted = torch.stack(weighted_policies).sum(0)\n", + " dist = self._softmax(weighted)\n", + " return self._action_space.distribution(dist)\n", + " \n", + "class PyTorchPolicy(RLPolicy):\n", + " def __init__(self, action_space: ActionSpace, model):\n", + " self._action_space = action_space\n", + " self._model = model\n", + " self._softmax = torch.nn.Softmax()\n", + " \n", + " def action_dist(self, state: State) -> ActionDistribution:\n", + " dist = self._model(torch.tensor(state.value, dtype=torch.float).reshape(1, -1))[0]\n", + " return self._action_space.distribution(self._softmax(dist))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Utility Functions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_logs(episodes: int, max_horizon: int, policy: RLPolicy) -> Sequence[Mdp]:\n", + " \"\"\"\n", + " Args:\n", + " episodes: number of episodes to generate\n", + " max_horizon: max horizon of each episode\n", + " policy: RLPolicy which uses real-valued states\n", + " \"\"\"\n", + " log = []\n", + " env = gym.make('CartPole-v0')\n", + " for _ in range(episodes):\n", + " init_state = env.reset()\n", + " cur_state = init_state\n", + " mdp = []\n", + " for _ in range(max_horizon):\n", + " action_dist = policy(State(cur_state))\n", + " action = action_dist.greedy().value\n", + " action_prob = action_dist.probability(Action(action))\n", + " next_state, reward, done, _ = env.step(action)\n", + " mdp.append(Transition(last_state=State(cur_state),\n", + " action=Action(action),\n", + " action_prob=action_prob,\n", + " state=State(next_state),\n", + " reward=reward,\n", + " status=2 if done else 1))\n", + " if done:\n", + " break\n", + " cur_state = next_state\n", + " log.append(mdp)\n", + " return log\n", + "\n", + "def zeta_nu_loss_callback(losses: Sequence[Tuple[float, float]], \n", + " estimated_values: Sequence, \n", + " input: RLEstimatorInput):\n", + " def callback_fn(zeta_loss, nu_loss, estimator):\n", + " losses.append((zeta_loss, nu_loss))\n", + " estimated_values.append(estimator._compute_estimates(input))\n", + " return callback_fn" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create the trained policy, target policy, and behavior policy" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "random_policy = RandomRLPolicy(ActionSpace(2))\n", + "model_policy = PyTorchPolicy(ActionSpace(2), model)\n", + "target_policy = ComboPolicy(ActionSpace(2), [1.0, 0.0], [model_policy, random_policy])\n", + "behavior_policy = ComboPolicy(ActionSpace(2), [0.55 + 0.15 * ALPHA, 0.45 - 0.15 * ALPHA], [model_policy, random_policy])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generate the logged dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:24: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:13: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + " del sys.path[0]\n" + ] + } + ], + "source": [ + "log = generate_logs(NUM_EPISODES, MAX_HORIZON, behavior_policy)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Estimate the value of the target policy\n", + "\n", + "Since the states are real-valued, instead of estimating v^pi(s), we take the average sum of the discounted rewards over numerous trials, getting E[v^pi(s)]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:24: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:13: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + " del sys.path[0]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Target Policy Ground Truth value: 70.20302794198436\n" + ] + } + ], + "source": [ + "def estimate_value(episodes: int, max_horizon: int, policy: RLPolicy, gamma: float):\n", + " avg = RunningAverage()\n", + " env = gym.make('CartPole-v0')\n", + " for _ in range(episodes):\n", + " init_state = env.reset()\n", + " cur_state = init_state\n", + " r = 0.0\n", + " discount = 1.0\n", + " for _ in range(max_horizon):\n", + " action_dist = policy(State(cur_state))\n", + " action = action_dist.greedy().value\n", + " action_prob = action_dist.probability(Action(action))\n", + " next_state, reward, done, _ = env.step(action)\n", + " r += reward * discount\n", + " discount *= gamma\n", + " if done:\n", + " break\n", + " cur_state = next_state\n", + " avg.add(r)\n", + " return avg.average\n", + "\n", + "ground_truth = estimate_value(NUM_EPISODES, MAX_HORIZON, target_policy, GAMMA)\n", + "print(f\"Target Policy Ground Truth value: {ground_truth}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "inp = RLEstimatorInput(\n", + " gamma=GAMMA,\n", + " log=log,\n", + " target_policy=target_policy,\n", + " discrete_states=False\n", + ")\n", + "ips = IPSEstimator()\n", + "dualdice_losses = []\n", + "dualdice_values = []\n", + "dualdice = NeuralDualDICE(4, 2, deterministic_env=True, \n", + " value_lr=0.003, zeta_lr=0.003, \n", + " batch_size=2048, \n", + " loss_callback_fn=zeta_nu_loss_callback(dualdice_losses, dualdice_values, inp),\n", + " device=device)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:IPSEstimator(device(None),weighted[True]}: start evaluating\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:24: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/ipykernel_launcher.py:13: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + " del sys.path[0]\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=74.5090560913086, ground_truth=0.0\n", + "INFO:root:IPSEstimator(device(None),weighted[True]}: finishing evaluating[process_time=13.853707919000001]\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=12.197612311945937, ground_truth=0.0\n", + "INFO:root:Samples 100 Avg Zeta Loss 0.013515950131695717, Avg Value Loss -0.011872679508778674\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=21.359412562633842, ground_truth=0.0\n", + "INFO:root:Samples 200 Avg Zeta Loss 0.032867668516701073, Avg Value Loss -0.03195237421035925\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=31.3605478482464, ground_truth=0.0\n", + "INFO:root:Samples 300 Avg Zeta Loss 0.06170809593284501, Avg Value Loss -0.060989961180688\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=39.15435264085474, ground_truth=0.0\n", + "INFO:root:Samples 400 Avg Zeta Loss 0.09260961384687108, Avg Value Loss -0.09186012931436383\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=45.733648655608356, ground_truth=0.0\n", + "INFO:root:Samples 500 Avg Zeta Loss 0.1208919502585195, Avg Value Loss -0.12021297005033559\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=50.04632369489927, ground_truth=0.0\n", + "INFO:root:Samples 600 Avg Zeta Loss 0.14566879029812604, Avg Value Loss -0.14500885449528747\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.906834703138784, ground_truth=0.0\n", + "INFO:root:Samples 700 Avg Zeta Loss 0.16704220785193952, Avg Value Loss -0.16637350306068183\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.61997176190003, ground_truth=0.0\n", + "INFO:root:Samples 800 Avg Zeta Loss 0.18543581553356497, Avg Value Loss -0.18476388545841008\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.48113522645389, ground_truth=0.0\n", + "INFO:root:Samples 900 Avg Zeta Loss 0.20143835243743122, Avg Value Loss -0.20076695910877684\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=60.22765881056284, ground_truth=0.0\n", + "INFO:root:Samples 1000 Avg Zeta Loss 0.21566343501652593, Avg Value Loss -0.21490484686303174\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.97033364185004, ground_truth=0.0\n", + "INFO:root:Samples 1100 Avg Zeta Loss 0.22887356189566418, Avg Value Loss -0.22799524105505548\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.226421100993456, ground_truth=0.0\n", + "INFO:root:Samples 1200 Avg Zeta Loss 0.2419627257225026, Avg Value Loss -0.24107862580657052\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.761702546247356, ground_truth=0.0\n", + "INFO:root:Samples 1300 Avg Zeta Loss 0.25531347974328894, Avg Value Loss -0.25432005140726405\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.73503067061738, ground_truth=0.0\n", + "INFO:root:Samples 1400 Avg Zeta Loss 0.269031892017561, Avg Value Loss -0.26795893059321824\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.137122485773936, ground_truth=0.0\n", + "INFO:root:Samples 1500 Avg Zeta Loss 0.28323989188966014, Avg Value Loss -0.2821323165006636\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=52.98327834499681, ground_truth=0.0\n", + "INFO:root:Samples 1600 Avg Zeta Loss 0.29787298655413924, Avg Value Loss -0.2967181593279537\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.43967350268721, ground_truth=0.0\n", + "INFO:root:Samples 1700 Avg Zeta Loss 0.31294792548958755, Avg Value Loss -0.31169310430456576\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=52.67908842175486, ground_truth=0.0\n", + "INFO:root:Samples 1800 Avg Zeta Loss 0.32836873602781735, Avg Value Loss -0.3270895791804788\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.315209816077974, ground_truth=0.0\n", + "INFO:root:Samples 1900 Avg Zeta Loss 0.34415020323346207, Avg Value Loss -0.3427792751559455\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.24424501362402, ground_truth=0.0\n", + "INFO:root:Samples 2000 Avg Zeta Loss 0.3601947005562248, Avg Value Loss -0.3587538495441672\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.68376994087084, ground_truth=0.0\n", + "INFO:root:Samples 2100 Avg Zeta Loss 0.37643962734012987, Avg Value Loss -0.3749142044007209\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.96217747478104, ground_truth=0.0\n", + "INFO:root:Samples 2200 Avg Zeta Loss 0.39291921264947005, Avg Value Loss -0.391323937455849\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.75942171227504, ground_truth=0.0\n", + "INFO:root:Samples 2300 Avg Zeta Loss 0.4095673371546738, Avg Value Loss -0.40792873051550044\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.67920215574946, ground_truth=0.0\n", + "INFO:root:Samples 2400 Avg Zeta Loss 0.4264367160840384, Avg Value Loss -0.424742956217825\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.3068904221308, ground_truth=0.0\n", + "INFO:root:Samples 2500 Avg Zeta Loss 0.44355532330451514, Avg Value Loss -0.4417917512242315\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.40253030984578, ground_truth=0.0\n", + "INFO:root:Samples 2600 Avg Zeta Loss 0.46088527779336663, Avg Value Loss -0.45900153291381285\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.589157551630656, ground_truth=0.0\n", + "INFO:root:Samples 2700 Avg Zeta Loss 0.47835447261894665, Avg Value Loss -0.4763657887835853\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.81791241682344, ground_truth=0.0\n", + "INFO:root:Samples 2800 Avg Zeta Loss 0.49603770180630297, Avg Value Loss -0.4939257093469584\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.64493523722547, ground_truth=0.0\n", + "INFO:root:Samples 2900 Avg Zeta Loss 0.513904861388005, Avg Value Loss -0.511616965743344\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.820140496441205, ground_truth=0.0\n", + "INFO:root:Samples 3000 Avg Zeta Loss 0.5319195063228248, Avg Value Loss -0.5295093464904623\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.66295844959422, ground_truth=0.0\n", + "INFO:root:Samples 3100 Avg Zeta Loss 0.5500221946272967, Avg Value Loss -0.5475205865265477\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.61842681610751, ground_truth=0.0\n", + "INFO:root:Samples 3200 Avg Zeta Loss 0.5682244423883818, Avg Value Loss -0.5656256978636238\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.44779874188362, ground_truth=0.0\n", + "INFO:root:Samples 3300 Avg Zeta Loss 0.5865773189171554, Avg Value Loss -0.5838913073010155\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.71785597261784, ground_truth=0.0\n", + "INFO:root:Samples 3400 Avg Zeta Loss 0.6050301781923317, Avg Value Loss -0.6021948606524044\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.10404156323258, ground_truth=0.0\n", + "INFO:root:Samples 3500 Avg Zeta Loss 0.6237087326147589, Avg Value Loss -0.6207745987733435\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.0615611733165, ground_truth=0.0\n", + "INFO:root:Samples 3600 Avg Zeta Loss 0.6424753768340448, Avg Value Loss -0.6394050202480294\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.56301885083942, ground_truth=0.0\n", + "INFO:root:Samples 3700 Avg Zeta Loss 0.6613085941780935, Avg Value Loss -0.6580782256298456\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.461626653172125, ground_truth=0.0\n", + "INFO:root:Samples 3800 Avg Zeta Loss 0.680294641233968, Avg Value Loss -0.6769630713253901\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.62284399474573, ground_truth=0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Samples 3900 Avg Zeta Loss 0.6993606929072077, Avg Value Loss -0.695912910153744\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.62992315297156, ground_truth=0.0\n", + "INFO:root:Samples 4000 Avg Zeta Loss 0.7185064421679705, Avg Value Loss -0.7149408297876721\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.02106215485004, ground_truth=0.0\n", + "INFO:root:Samples 4100 Avg Zeta Loss 0.7377599143193148, Avg Value Loss -0.7340898682761199\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.03011925073041, ground_truth=0.0\n", + "INFO:root:Samples 4200 Avg Zeta Loss 0.7571012823719949, Avg Value Loss -0.753317346207699\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.094718872419435, ground_truth=0.0\n", + "INFO:root:Samples 4300 Avg Zeta Loss 0.7765199167863462, Avg Value Loss -0.7726287578741903\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.51668158622166, ground_truth=0.0\n", + "INFO:root:Samples 4400 Avg Zeta Loss 0.7960622947944043, Avg Value Loss -0.7920336581938382\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.84934625641421, ground_truth=0.0\n", + "INFO:root:Samples 4500 Avg Zeta Loss 0.8156998060090562, Avg Value Loss -0.8115298611358543\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.706103118718225, ground_truth=0.0\n", + "INFO:root:Samples 4600 Avg Zeta Loss 0.8354063662313138, Avg Value Loss -0.8310887727305692\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.5358478329621, ground_truth=0.0\n", + "INFO:root:Samples 4700 Avg Zeta Loss 0.855160370210056, Avg Value Loss -0.850693606090902\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.25701109597017, ground_truth=0.0\n", + "INFO:root:Samples 4800 Avg Zeta Loss 0.8750115939797729, Avg Value Loss -0.8704083379639694\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.925828428717544, ground_truth=0.0\n", + "INFO:root:Samples 4900 Avg Zeta Loss 0.894942492326598, Avg Value Loss -0.8901813413204962\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.024406984756354, ground_truth=0.0\n", + "INFO:root:Samples 5000 Avg Zeta Loss 0.9149067743608257, Avg Value Loss -0.9099908599408633\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.93371202416903, ground_truth=0.0\n", + "INFO:root:Samples 5100 Avg Zeta Loss 0.9349380812992556, Avg Value Loss -0.9298498113513938\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.30840965109014, ground_truth=0.0\n", + "INFO:root:Samples 5200 Avg Zeta Loss 0.9549997544125205, Avg Value Loss -0.9497817114255531\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.19056909330295, ground_truth=0.0\n", + "INFO:root:Samples 5300 Avg Zeta Loss 0.9751475657446674, Avg Value Loss -0.9697622633010495\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.813249193403706, ground_truth=0.0\n", + "INFO:root:Samples 5400 Avg Zeta Loss 0.995287306513766, Avg Value Loss -0.9897496287057586\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.55337643228885, ground_truth=0.0\n", + "INFO:root:Samples 5500 Avg Zeta Loss 1.0155125500351097, Avg Value Loss -1.0098306881326768\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.26023634006089, ground_truth=0.0\n", + "INFO:root:Samples 5600 Avg Zeta Loss 1.0357304478391378, Avg Value Loss -1.0299038212668479\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.10335804849414, ground_truth=0.0\n", + "INFO:root:Samples 5700 Avg Zeta Loss 1.0560117344121571, Avg Value Loss -1.0500272734569682\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.38627906854628, ground_truth=0.0\n", + "INFO:root:Samples 5800 Avg Zeta Loss 1.076342331814089, Avg Value Loss -1.0702184758953823\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.03696954781097, ground_truth=0.0\n", + "INFO:root:Samples 5900 Avg Zeta Loss 1.0967435623687622, Avg Value Loss -1.0904606007344462\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.60146346309086, ground_truth=0.0\n", + "INFO:root:Samples 6000 Avg Zeta Loss 1.1171735915201308, Avg Value Loss -1.1107215389437302\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.94978322402479, ground_truth=0.0\n", + "INFO:root:Samples 6100 Avg Zeta Loss 1.1375977161924764, Avg Value Loss -1.1309886681864325\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.78985760192937, ground_truth=0.0\n", + "INFO:root:Samples 6200 Avg Zeta Loss 1.1580165586026823, Avg Value Loss -1.1512448866931497\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.523813958342345, ground_truth=0.0\n", + "INFO:root:Samples 6300 Avg Zeta Loss 1.1785015234736593, Avg Value Loss -1.1715751750910883\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.923248480393625, ground_truth=0.0\n", + "INFO:root:Samples 6400 Avg Zeta Loss 1.1990728631110297, Avg Value Loss -1.192018625030931\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.68037103944842, ground_truth=0.0\n", + "INFO:root:Samples 6500 Avg Zeta Loss 1.219645822805385, Avg Value Loss -1.2124421261665295\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.83107751839043, ground_truth=0.0\n", + "INFO:root:Samples 6600 Avg Zeta Loss 1.24031771586315, Avg Value Loss -1.2329654932768797\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.62769874782074, ground_truth=0.0\n", + "INFO:root:Samples 6700 Avg Zeta Loss 1.2610225198391107, Avg Value Loss -1.253528250938638\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.194054035223736, ground_truth=0.0\n", + "INFO:root:Samples 6800 Avg Zeta Loss 1.2817257959928043, Avg Value Loss -1.2740662471359987\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.4442386074851, ground_truth=0.0\n", + "INFO:root:Samples 6900 Avg Zeta Loss 1.3024059008737734, Avg Value Loss -1.2946103018977357\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.05572367869492, ground_truth=0.0\n", + "INFO:root:Samples 7000 Avg Zeta Loss 1.323255518833005, Avg Value Loss -1.3152916751271346\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.17737047306442, ground_truth=0.0\n", + "INFO:root:Samples 7100 Avg Zeta Loss 1.3439967530762127, Avg Value Loss -1.3358887156656782\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.86202231368185, ground_truth=0.0\n", + "INFO:root:Samples 7200 Avg Zeta Loss 1.3648675836339068, Avg Value Loss -1.3566078131088628\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.824145860791454, ground_truth=0.0\n", + "INFO:root:Samples 7300 Avg Zeta Loss 1.3858161796492559, Avg Value Loss -1.3773822339288837\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.64011201569596, ground_truth=0.0\n", + "INFO:root:Samples 7400 Avg Zeta Loss 1.406836647879221, Avg Value Loss -1.3982267929756007\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.174167135809085, ground_truth=0.0\n", + "INFO:root:Samples 7500 Avg Zeta Loss 1.4279600191638755, Avg Value Loss -1.4191768804889373\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.340669941792065, ground_truth=0.0\n", + "INFO:root:Samples 7600 Avg Zeta Loss 1.4491375523066332, Avg Value Loss -1.4401853529737827\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.65631359543148, ground_truth=0.0\n", + "INFO:root:Samples 7700 Avg Zeta Loss 1.470387361317281, Avg Value Loss -1.4612665123155217\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.51997617510082, ground_truth=0.0\n", + "INFO:root:Samples 7800 Avg Zeta Loss 1.4916453967535614, Avg Value Loss -1.4823671318343667\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.927061369078736, ground_truth=0.0\n", + "INFO:root:Samples 7900 Avg Zeta Loss 1.512928324097119, Avg Value Loss -1.503478870212791\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.33130466190415, ground_truth=0.0\n", + "INFO:root:Samples 8000 Avg Zeta Loss 1.534261463571887, Avg Value Loss -1.5246660846789784\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.90632180516317, ground_truth=0.0\n", + "INFO:root:Samples 8100 Avg Zeta Loss 1.5555292827301084, Avg Value Loss -1.5457715277338329\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.42537439635173, ground_truth=0.0\n", + "INFO:root:Samples 8200 Avg Zeta Loss 1.5770045495220326, Avg Value Loss -1.567101864874857\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.78077885487664, ground_truth=0.0\n", + "INFO:root:Samples 8300 Avg Zeta Loss 1.5984987575646874, Avg Value Loss -1.5884598440166549\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.39514056925266, ground_truth=0.0\n", + "INFO:root:Samples 8400 Avg Zeta Loss 1.6199902530693813, Avg Value Loss -1.609815690150345\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.91772938974874, ground_truth=0.0\n", + "INFO:root:Samples 8500 Avg Zeta Loss 1.641498719962718, Avg Value Loss -1.631196654041149\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.53888944436647, ground_truth=0.0\n", + "INFO:root:Samples 8600 Avg Zeta Loss 1.6630436982566066, Avg Value Loss -1.6525983797136272\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.49912049630797, ground_truth=0.0\n", + "INFO:root:Samples 8700 Avg Zeta Loss 1.6847023614149887, Avg Value Loss -1.6741258043274638\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.775506265025506, ground_truth=0.0\n", + "INFO:root:Samples 8800 Avg Zeta Loss 1.7063623882598042, Avg Value Loss -1.6956090021964343\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.55627074920731, ground_truth=0.0\n", + "INFO:root:Samples 8900 Avg Zeta Loss 1.7280763823435092, Avg Value Loss -1.7171947545701707\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.53857283452133, ground_truth=0.0\n", + "INFO:root:Samples 9000 Avg Zeta Loss 1.7498520991707718, Avg Value Loss -1.7388152835916664\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.1205724318122, ground_truth=0.0\n", + "INFO:root:Samples 9100 Avg Zeta Loss 1.7716309919892634, Avg Value Loss -1.7604377903798636\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.29847961604317, ground_truth=0.0\n", + "INFO:root:Samples 9200 Avg Zeta Loss 1.7934526063474174, Avg Value Loss -1.7821237636666438\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.01053425758253, ground_truth=0.0\n", + "INFO:root:Samples 9300 Avg Zeta Loss 1.8153925966099587, Avg Value Loss -1.803915132349972\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.323856269334804, ground_truth=0.0\n", + "INFO:root:Samples 9400 Avg Zeta Loss 1.8372958051814245, Avg Value Loss -1.8256669715127267\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.89877337882972, ground_truth=0.0\n", + "INFO:root:Samples 9500 Avg Zeta Loss 1.8592074841861537, Avg Value Loss -1.8474217456784228\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.46949341701875, ground_truth=0.0\n", + "INFO:root:Samples 9600 Avg Zeta Loss 1.8811504985800196, Avg Value Loss -1.8692204961694006\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.57444706511321, ground_truth=0.0\n", + "INFO:root:Samples 9700 Avg Zeta Loss 1.9031585154494857, Avg Value Loss -1.891076875004986\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.273690022130985, ground_truth=0.0\n", + "INFO:root:Samples 9800 Avg Zeta Loss 1.925169490202185, Avg Value Loss -1.9129266145050976\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.80483501585438, ground_truth=0.0\n", + "INFO:root:Samples 9900 Avg Zeta Loss 1.947259148964815, Avg Value Loss -1.934858884066447\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.596225443413765, ground_truth=0.0\n", + "INFO:root:Samples 10000 Avg Zeta Loss 1.969276748171883, Avg Value Loss -1.9567217556254104\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.6325078334776, ground_truth=0.0\n", + "INFO:root:Samples 10100 Avg Zeta Loss 1.9913769940490353, Avg Value Loss -1.9786638925823088\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.20474849252979, ground_truth=0.0\n", + "INFO:root:Samples 10200 Avg Zeta Loss 2.013464879373551, Avg Value Loss -2.0005790259105747\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.23128859178559, ground_truth=0.0\n", + "INFO:root:Samples 10300 Avg Zeta Loss 2.035588840133778, Avg Value Loss -2.022552036615641\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.18663145646945, ground_truth=0.0\n", + "INFO:root:Samples 10400 Avg Zeta Loss 2.057739439002438, Avg Value Loss -2.0445409935498886\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.34472111380706, ground_truth=0.0\n", + "INFO:root:Samples 10500 Avg Zeta Loss 2.0798630563109968, Avg Value Loss -2.0664977288715445\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.131590267705796, ground_truth=0.0\n", + "INFO:root:Samples 10600 Avg Zeta Loss 2.1020648568450984, Avg Value Loss -2.0885381590885217\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.45164719160603, ground_truth=0.0\n", + "INFO:root:Samples 10700 Avg Zeta Loss 2.12427447861919, Avg Value Loss -2.1105830154924043\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.227782397805875, ground_truth=0.0\n", + "INFO:root:Samples 10800 Avg Zeta Loss 2.1463753838813577, Avg Value Loss -2.1325204626513163\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.60800959444225, ground_truth=0.0\n", + "INFO:root:Samples 10900 Avg Zeta Loss 2.1685980860605847, Avg Value Loss -2.154578750397458\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.18214639253701, ground_truth=0.0\n", + "INFO:root:Samples 11000 Avg Zeta Loss 2.190870250954263, Avg Value Loss -2.1766755630551016\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.78909045143968, ground_truth=0.0\n", + "INFO:root:Samples 11100 Avg Zeta Loss 2.21313095117782, Avg Value Loss -2.198764442346478\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.27303408189436, ground_truth=0.0\n", + "INFO:root:Samples 11200 Avg Zeta Loss 2.2355091949369896, Avg Value Loss -2.2209785151282944\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.64451808282459, ground_truth=0.0\n", + "INFO:root:Samples 11300 Avg Zeta Loss 2.2577667256085885, Avg Value Loss -2.243076297478689\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.52821590453144, ground_truth=0.0\n", + "INFO:root:Samples 11400 Avg Zeta Loss 2.2801078869510785, Avg Value Loss -2.265250505001222\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.573872849015714, ground_truth=0.0\n", + "INFO:root:Samples 11500 Avg Zeta Loss 2.3025044834643493, Avg Value Loss -2.287469621846292\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.694167727876874, ground_truth=0.0\n", + "INFO:root:Samples 11600 Avg Zeta Loss 2.3248621157457743, Avg Value Loss -2.3096711442607534\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.7993472675059, ground_truth=0.0\n", + "INFO:root:Samples 11700 Avg Zeta Loss 2.3472290368292734, Avg Value Loss -2.3318843096355395\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.33451096776019, ground_truth=0.0\n", + "INFO:root:Samples 11800 Avg Zeta Loss 2.369720948866733, Avg Value Loss -2.3542356505043722\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.43373134288183, ground_truth=0.0\n", + "INFO:root:Samples 11900 Avg Zeta Loss 2.392159423420274, Avg Value Loss -2.376476262258212\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.21291805266494, ground_truth=0.0\n", + "INFO:root:Samples 12000 Avg Zeta Loss 2.4145627229937556, Avg Value Loss -2.3987108279281157\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.65890105811805, ground_truth=0.0\n", + "INFO:root:Samples 12100 Avg Zeta Loss 2.43702831976952, Avg Value Loss -2.4210287644462443\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.75059309084401, ground_truth=0.0\n", + "INFO:root:Samples 12200 Avg Zeta Loss 2.4594623888055427, Avg Value Loss -2.4433093185320534\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.479416614762336, ground_truth=0.0\n", + "INFO:root:Samples 12300 Avg Zeta Loss 2.4820023323393077, Avg Value Loss -2.465683498635844\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.76730895553166, ground_truth=0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Samples 12400 Avg Zeta Loss 2.5044362658385912, Avg Value Loss -2.4879692679271863\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.80880858501469, ground_truth=0.0\n", + "INFO:root:Samples 12500 Avg Zeta Loss 2.526911814873565, Avg Value Loss -2.510299583455398\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.279315619973666, ground_truth=0.0\n", + "INFO:root:Samples 12600 Avg Zeta Loss 2.549356189486397, Avg Value Loss -2.5325833088455934\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.1465278174524, ground_truth=0.0\n", + "INFO:root:Samples 12700 Avg Zeta Loss 2.5718504080367475, Avg Value Loss -2.554933076825973\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.537210861582004, ground_truth=0.0\n", + "INFO:root:Samples 12800 Avg Zeta Loss 2.594342621967998, Avg Value Loss -2.5772745685105436\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.10314162738863, ground_truth=0.0\n", + "INFO:root:Samples 12900 Avg Zeta Loss 2.6169064564232314, Avg Value Loss -2.5996883340781243\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.734488591112544, ground_truth=0.0\n", + "INFO:root:Samples 13000 Avg Zeta Loss 2.639412691513263, Avg Value Loss -2.6220367955623427\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.96295753865275, ground_truth=0.0\n", + "INFO:root:Samples 13100 Avg Zeta Loss 2.6619752270145796, Avg Value Loss -2.644420595843667\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.10364149679374, ground_truth=0.0\n", + "INFO:root:Samples 13200 Avg Zeta Loss 2.684503253619651, Avg Value Loss -2.6667975390149987\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.382200178703954, ground_truth=0.0\n", + "INFO:root:Samples 13300 Avg Zeta Loss 2.707061234876496, Avg Value Loss -2.689193525261737\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.9004713934352, ground_truth=0.0\n", + "INFO:root:Samples 13400 Avg Zeta Loss 2.7296600846326675, Avg Value Loss -2.7116383575159424\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.474747999403995, ground_truth=0.0\n", + "INFO:root:Samples 13500 Avg Zeta Loss 2.7521810855802396, Avg Value Loss -2.7340070576326245\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.879752154009346, ground_truth=0.0\n", + "INFO:root:Samples 13600 Avg Zeta Loss 2.774701733092047, Avg Value Loss -2.756373709458922\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.983678041871684, ground_truth=0.0\n", + "INFO:root:Samples 13700 Avg Zeta Loss 2.797247487465703, Avg Value Loss -2.7787654015455145\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.09313239935309, ground_truth=0.0\n", + "INFO:root:Samples 13800 Avg Zeta Loss 2.8198013674287283, Avg Value Loss -2.801169476976709\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.34687568995685, ground_truth=0.0\n", + "INFO:root:Samples 13900 Avg Zeta Loss 2.842412612483526, Avg Value Loss -2.823629057751559\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.14861765804876, ground_truth=0.0\n", + "INFO:root:Samples 14000 Avg Zeta Loss 2.8650064492164753, Avg Value Loss -2.8460764875934115\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.056994729059824, ground_truth=0.0\n", + "INFO:root:Samples 14100 Avg Zeta Loss 2.8876810999397264, Avg Value Loss -2.8686072134982754\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.58712774263997, ground_truth=0.0\n", + "INFO:root:Samples 14200 Avg Zeta Loss 2.9103140275854886, Avg Value Loss -2.891064435211131\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.62527416946984, ground_truth=0.0\n", + "INFO:root:Samples 14300 Avg Zeta Loss 2.9330028168078517, Avg Value Loss -2.9136046157027597\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.031447600866265, ground_truth=0.0\n", + "INFO:root:Samples 14400 Avg Zeta Loss 2.955566168620236, Avg Value Loss -2.936004991615247\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.366753042842674, ground_truth=0.0\n", + "INFO:root:Samples 14500 Avg Zeta Loss 2.978319524134321, Avg Value Loss -2.958597127142706\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.7270997834077, ground_truth=0.0\n", + "INFO:root:Samples 14600 Avg Zeta Loss 3.001056761964427, Avg Value Loss -2.981183322963092\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.7505654264766, ground_truth=0.0\n", + "INFO:root:Samples 14700 Avg Zeta Loss 3.023691490874695, Avg Value Loss -3.0036505465453316\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.91533497527489, ground_truth=0.0\n", + "INFO:root:Samples 14800 Avg Zeta Loss 3.0463642841358918, Avg Value Loss -3.026152236994308\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.832549877925494, ground_truth=0.0\n", + "INFO:root:Samples 14900 Avg Zeta Loss 3.0690576786585764, Avg Value Loss -3.048698038751902\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.54626014491603, ground_truth=0.0\n", + "INFO:root:Samples 15000 Avg Zeta Loss 3.091704772816387, Avg Value Loss -3.0711820795228624\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.340838121755034, ground_truth=0.0\n", + "INFO:root:Samples 15100 Avg Zeta Loss 3.1144130281682516, Avg Value Loss -3.093750419223007\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.82216373398558, ground_truth=0.0\n", + "INFO:root:Samples 15200 Avg Zeta Loss 3.1371412694084495, Avg Value Loss -3.1163305653249886\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.78400485667782, ground_truth=0.0\n", + "INFO:root:Samples 15300 Avg Zeta Loss 3.1598191010974275, Avg Value Loss -3.138849079578776\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.01047860441017, ground_truth=0.0\n", + "INFO:root:Samples 15400 Avg Zeta Loss 3.1825452359874986, Avg Value Loss -3.161423265640855\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.820055874639486, ground_truth=0.0\n", + "INFO:root:Samples 15500 Avg Zeta Loss 3.205322457338808, Avg Value Loss -3.1840170624804602\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.45464207188895, ground_truth=0.0\n", + "INFO:root:Samples 15700 Avg Zeta Loss 3.250658147101836, Avg Value Loss -3.229049518170107\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.22023324609718, ground_truth=0.0\n", + "INFO:root:Samples 15800 Avg Zeta Loss 3.2733804209872517, Avg Value Loss -3.251622576005333\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.73403647015879, ground_truth=0.0\n", + "INFO:root:Samples 15900 Avg Zeta Loss 3.2961386969350794, Avg Value Loss -3.274234789588403\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.98035719502128, ground_truth=0.0\n", + "INFO:root:Samples 16000 Avg Zeta Loss 3.318902971292178, Avg Value Loss -3.296830364779617\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.02006909236831, ground_truth=0.0\n", + "INFO:root:Samples 16100 Avg Zeta Loss 3.3415871862690296, Avg Value Loss -3.319358272982913\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.601729543005106, ground_truth=0.0\n", + "INFO:root:Samples 16200 Avg Zeta Loss 3.364366993457338, Avg Value Loss -3.3419861279750394\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.94121270449185, ground_truth=0.0\n", + "INFO:root:Samples 16300 Avg Zeta Loss 3.387193617142675, Avg Value Loss -3.364653358562819\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.79507584078423, ground_truth=0.0\n", + "INFO:root:Samples 16400 Avg Zeta Loss 3.409968931815141, Avg Value Loss -3.38727103022636\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.414912694861385, ground_truth=0.0\n", + "INFO:root:Samples 16500 Avg Zeta Loss 3.432717848714812, Avg Value Loss -3.4098625066217614\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.47740655381485, ground_truth=0.0\n", + "INFO:root:Samples 16600 Avg Zeta Loss 3.455552977476392, Avg Value Loss -3.4325457992764252\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.08716213506482, ground_truth=0.0\n", + "INFO:root:Samples 16700 Avg Zeta Loss 3.4783006623211428, Avg Value Loss -3.4551548876600484\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.54123819166792, ground_truth=0.0\n", + "INFO:root:Samples 16800 Avg Zeta Loss 3.5010147787225088, Avg Value Loss -3.477726552019034\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.467344331550805, ground_truth=0.0\n", + "INFO:root:Samples 16900 Avg Zeta Loss 3.5238301834925103, Avg Value Loss -3.500386344766703\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.28887473531543, ground_truth=0.0\n", + "INFO:root:Samples 17000 Avg Zeta Loss 3.54673501305921, Avg Value Loss -3.5231460407911483\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.92693015615721, ground_truth=0.0\n", + "INFO:root:Samples 17100 Avg Zeta Loss 3.569526340886768, Avg Value Loss -3.545794418388864\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.03025080977607, ground_truth=0.0\n", + "INFO:root:Samples 17200 Avg Zeta Loss 3.592361263059505, Avg Value Loss -3.5684765164944454\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.58788397217801, ground_truth=0.0\n", + "INFO:root:Samples 17300 Avg Zeta Loss 3.6152352061250075, Avg Value Loss -3.5911974042973207\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.348943523541756, ground_truth=0.0\n", + "INFO:root:Samples 17400 Avg Zeta Loss 3.6381086354097185, Avg Value Loss -3.6139127498257553\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.88792423092385, ground_truth=0.0\n", + "INFO:root:Samples 17500 Avg Zeta Loss 3.660996262845219, Avg Value Loss -3.6366390829231516\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.64092889727852, ground_truth=0.0\n", + "INFO:root:Samples 17600 Avg Zeta Loss 3.6838646365518364, Avg Value Loss -3.6593599769075587\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.464508780396145, ground_truth=0.0\n", + "INFO:root:Samples 17700 Avg Zeta Loss 3.706793425388163, Avg Value Loss -3.682134920958957\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.49725356424453, ground_truth=0.0\n", + "INFO:root:Samples 17800 Avg Zeta Loss 3.729671965872881, Avg Value Loss -3.7048644626685148\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.812327482752245, ground_truth=0.0\n", + "INFO:root:Samples 17900 Avg Zeta Loss 3.752583937720124, Avg Value Loss -3.727633769992553\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.83447304056141, ground_truth=0.0\n", + "INFO:root:Samples 18000 Avg Zeta Loss 3.7755038061094743, Avg Value Loss -3.7504062889823078\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.06161756516736, ground_truth=0.0\n", + "INFO:root:Samples 18100 Avg Zeta Loss 3.7983615259787444, Avg Value Loss -3.773112501637918\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.977115759492264, ground_truth=0.0\n", + "INFO:root:Samples 18200 Avg Zeta Loss 3.8212596185606036, Avg Value Loss -3.795871241997438\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.93332498354834, ground_truth=0.0\n", + "INFO:root:Samples 18300 Avg Zeta Loss 3.844120434511469, Avg Value Loss -3.818570276180459\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.77819919990416, ground_truth=0.0\n", + "INFO:root:Samples 18400 Avg Zeta Loss 3.867054436305858, Avg Value Loss -3.841338549347882\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.98146243099185, ground_truth=0.0\n", + "INFO:root:Samples 18500 Avg Zeta Loss 3.889834353519694, Avg Value Loss -3.8639760876097045\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.560563944682514, ground_truth=0.0\n", + "INFO:root:Samples 18600 Avg Zeta Loss 3.912705924814026, Avg Value Loss -3.8867164549656574\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.636025672935745, ground_truth=0.0\n", + "INFO:root:Samples 18700 Avg Zeta Loss 3.935622716302461, Avg Value Loss -3.9094922503183995\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.33293309784946, ground_truth=0.0\n", + "INFO:root:Samples 18800 Avg Zeta Loss 3.9585750190963624, Avg Value Loss -3.9322924524554033\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.62660613778321, ground_truth=0.0\n", + "INFO:root:Samples 18900 Avg Zeta Loss 3.981486737660551, Avg Value Loss -3.9550586598899122\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.941563979643206, ground_truth=0.0\n", + "INFO:root:Samples 19000 Avg Zeta Loss 4.004354378771075, Avg Value Loss -3.977788498766372\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.83804011481009, ground_truth=0.0\n", + "INFO:root:Samples 19100 Avg Zeta Loss 4.027259494442513, Avg Value Loss -4.000555049824666\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.77120778671828, ground_truth=0.0\n", + "INFO:root:Samples 19200 Avg Zeta Loss 4.0501451645498925, Avg Value Loss -4.023308456230487\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.15419466380203, ground_truth=0.0\n", + "INFO:root:Samples 19300 Avg Zeta Loss 4.072982111738675, Avg Value Loss -4.046004919870726\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.38751523784256, ground_truth=0.0\n", + "INFO:root:Samples 19400 Avg Zeta Loss 4.095963165853974, Avg Value Loss -4.068858300571169\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.01261386741325, ground_truth=0.0\n", + "INFO:root:Samples 19500 Avg Zeta Loss 4.118872688264647, Avg Value Loss -4.091628215436024\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.32150260791998, ground_truth=0.0\n", + "INFO:root:Samples 19600 Avg Zeta Loss 4.141733412373354, Avg Value Loss -4.1143534944381726\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.72479095479631, ground_truth=0.0\n", + "INFO:root:Samples 19700 Avg Zeta Loss 4.164639217474059, Avg Value Loss -4.137109408101042\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.69520734138447, ground_truth=0.0\n", + "INFO:root:Samples 19800 Avg Zeta Loss 4.187586074039721, Avg Value Loss -4.159920655682421\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.54152481416017, ground_truth=0.0\n", + "INFO:root:Samples 19900 Avg Zeta Loss 4.210470484604716, Avg Value Loss -4.182661835069823\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.62341497325943, ground_truth=0.0\n", + "INFO:root:Samples 20000 Avg Zeta Loss 4.233337087078613, Avg Value Loss -4.205371270263977\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.237596868392906, ground_truth=0.0\n", + "INFO:root:Samples 20100 Avg Zeta Loss 4.2561986997429235, Avg Value Loss -4.22809405743264\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.94164523162204, ground_truth=0.0\n", + "INFO:root:Samples 20200 Avg Zeta Loss 4.2791389578833865, Avg Value Loss -4.2508868789562575\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.08723810631983, ground_truth=0.0\n", + "INFO:root:Samples 20300 Avg Zeta Loss 4.302128457149769, Avg Value Loss -4.273726339986942\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.513601982170684, ground_truth=0.0\n", + "INFO:root:Samples 20400 Avg Zeta Loss 4.3250633787365915, Avg Value Loss -4.29653268682098\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.638478856118525, ground_truth=0.0\n", + "INFO:root:Samples 20500 Avg Zeta Loss 4.347947191583163, Avg Value Loss -4.319280571484739\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.662193725829376, ground_truth=0.0\n", + "INFO:root:Samples 20600 Avg Zeta Loss 4.370940867183781, Avg Value Loss -4.342140726384199\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.07051597717991, ground_truth=0.0\n", + "INFO:root:Samples 20700 Avg Zeta Loss 4.393934321860106, Avg Value Loss -4.365003257777551\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.29186924022914, ground_truth=0.0\n", + "INFO:root:Samples 20800 Avg Zeta Loss 4.416976865548852, Avg Value Loss -4.387906528242103\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.97562778657654, ground_truth=0.0\n", + "INFO:root:Samples 20900 Avg Zeta Loss 4.440002442748278, Avg Value Loss -4.410801164899316\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.259853449459584, ground_truth=0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Samples 21000 Avg Zeta Loss 4.462873749933285, Avg Value Loss -4.433544035719286\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.04397401368982, ground_truth=0.0\n", + "INFO:root:Samples 21100 Avg Zeta Loss 4.485874533784833, Avg Value Loss -4.4564147196117085\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.7484523091965, ground_truth=0.0\n", + "INFO:root:Samples 21200 Avg Zeta Loss 4.508806750477983, Avg Value Loss -4.479207391413474\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.133593411011255, ground_truth=0.0\n", + "INFO:root:Samples 21300 Avg Zeta Loss 4.531784399256959, Avg Value Loss -4.502044722542113\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.992835973327, ground_truth=0.0\n", + "INFO:root:Samples 21400 Avg Zeta Loss 4.554780917052289, Avg Value Loss -4.524900238392105\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.8102095312572, ground_truth=0.0\n", + "INFO:root:Samples 21500 Avg Zeta Loss 4.5777927221879775, Avg Value Loss -4.547768770651079\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.583157000048004, ground_truth=0.0\n", + "INFO:root:Samples 21600 Avg Zeta Loss 4.600794221357552, Avg Value Loss -4.5706329452446575\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.02072157637309, ground_truth=0.0\n", + "INFO:root:Samples 21700 Avg Zeta Loss 4.62370775018029, Avg Value Loss -4.593388641540504\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.28856736882704, ground_truth=0.0\n", + "INFO:root:Samples 21800 Avg Zeta Loss 4.646763650798418, Avg Value Loss -4.616311815102861\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.50859063723664, ground_truth=0.0\n", + "INFO:root:Samples 21900 Avg Zeta Loss 4.669674428017119, Avg Value Loss -4.639097629345322\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.324157183713865, ground_truth=0.0\n", + "INFO:root:Samples 22000 Avg Zeta Loss 4.692631908304527, Avg Value Loss -4.661917585406133\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.869841353046155, ground_truth=0.0\n", + "INFO:root:Samples 22100 Avg Zeta Loss 4.715614454623751, Avg Value Loss -4.684759109064078\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.78222027043424, ground_truth=0.0\n", + "INFO:root:Samples 22200 Avg Zeta Loss 4.738686925244178, Avg Value Loss -4.707691406807\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.04655721137238, ground_truth=0.0\n", + "INFO:root:Samples 22300 Avg Zeta Loss 4.761717468287867, Avg Value Loss -4.73057774559868\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.18954367808146, ground_truth=0.0\n", + "INFO:root:Samples 22400 Avg Zeta Loss 4.784701823311679, Avg Value Loss -4.75341294159915\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.49544020763197, ground_truth=0.0\n", + "INFO:root:Samples 22500 Avg Zeta Loss 4.807806867659309, Avg Value Loss -4.776376454195069\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.81309970865907, ground_truth=0.0\n", + "INFO:root:Samples 22600 Avg Zeta Loss 4.830815270534087, Avg Value Loss -4.7992421306663315\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.68118351882609, ground_truth=0.0\n", + "INFO:root:Samples 22700 Avg Zeta Loss 4.853786139442494, Avg Value Loss -4.8220750430088675\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.354953775588655, ground_truth=0.0\n", + "INFO:root:Samples 22800 Avg Zeta Loss 4.87684770598376, Avg Value Loss -4.845000007644706\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.96697969485171, ground_truth=0.0\n", + "INFO:root:Samples 22900 Avg Zeta Loss 4.899894975696025, Avg Value Loss -4.867927167499574\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.419945221452465, ground_truth=0.0\n", + "INFO:root:Samples 23000 Avg Zeta Loss 4.9229706925687164, Avg Value Loss -4.890848139131217\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.842384257586176, ground_truth=0.0\n", + "INFO:root:Samples 23100 Avg Zeta Loss 4.946052503148674, Avg Value Loss -4.913787235233833\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.426605846703325, ground_truth=0.0\n", + "INFO:root:Samples 23200 Avg Zeta Loss 4.969019618750089, Avg Value Loss -4.936608805770131\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.18620759856179, ground_truth=0.0\n", + "INFO:root:Samples 23300 Avg Zeta Loss 4.992014176627041, Avg Value Loss -4.959445976239557\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.552426275356865, ground_truth=0.0\n", + "INFO:root:Samples 23400 Avg Zeta Loss 5.015086928205055, Avg Value Loss -4.9823766682602795\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.0613008289824, ground_truth=0.0\n", + "INFO:root:Samples 23500 Avg Zeta Loss 5.038125466383724, Avg Value Loss -5.005273001194652\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.18074273263312, ground_truth=0.0\n", + "INFO:root:Samples 23600 Avg Zeta Loss 5.061239830373155, Avg Value Loss -5.028248659746772\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.3105565607157, ground_truth=0.0\n", + "INFO:root:Samples 23700 Avg Zeta Loss 5.0842992676123835, Avg Value Loss -5.051150304487128\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.28848609979722, ground_truth=0.0\n", + "INFO:root:Samples 23800 Avg Zeta Loss 5.107347953716699, Avg Value Loss -5.074047159658408\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.413893293976734, ground_truth=0.0\n", + "INFO:root:Samples 23900 Avg Zeta Loss 5.130365343309802, Avg Value Loss -5.096927899586753\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.2497699997671, ground_truth=0.0\n", + "INFO:root:Samples 24000 Avg Zeta Loss 5.153469065364656, Avg Value Loss -5.119840124478582\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.33453735821253, ground_truth=0.0\n", + "INFO:root:Samples 24100 Avg Zeta Loss 5.176505961889508, Avg Value Loss -5.142719715314922\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.52002964159683, ground_truth=0.0\n", + "INFO:root:Samples 24200 Avg Zeta Loss 5.199553677835208, Avg Value Loss -5.165611582108416\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.655182092924264, ground_truth=0.0\n", + "INFO:root:Samples 24300 Avg Zeta Loss 5.22254236379912, Avg Value Loss -5.188447585430309\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.296482781726674, ground_truth=0.0\n", + "INFO:root:Samples 24400 Avg Zeta Loss 5.245571682781918, Avg Value Loss -5.211331999331859\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.08082028842884, ground_truth=0.0\n", + "INFO:root:Samples 24500 Avg Zeta Loss 5.268588352219382, Avg Value Loss -5.234194524210957\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.949597686252204, ground_truth=0.0\n", + "INFO:root:Samples 24600 Avg Zeta Loss 5.291603984965015, Avg Value Loss -5.257063387962524\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.13977290724366, ground_truth=0.0\n", + "INFO:root:Samples 24700 Avg Zeta Loss 5.314651965145598, Avg Value Loss -5.279950797191669\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.935060736484765, ground_truth=0.0\n", + "INFO:root:Samples 24800 Avg Zeta Loss 5.337650366960475, Avg Value Loss -5.302789532475565\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.09111059034392, ground_truth=0.0\n", + "INFO:root:Samples 24900 Avg Zeta Loss 5.360795140148122, Avg Value Loss -5.325778730682251\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.19716998343957, ground_truth=0.0\n", + "INFO:root:Samples 25000 Avg Zeta Loss 5.383801635681585, Avg Value Loss -5.348627735243471\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.95482829264885, ground_truth=0.0\n", + "INFO:root:Samples 25100 Avg Zeta Loss 5.406827249732582, Avg Value Loss -5.371502680294693\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.41070991122429, ground_truth=0.0\n", + "INFO:root:Samples 25200 Avg Zeta Loss 5.4298797521671185, Avg Value Loss -5.394399358876791\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.73369957405821, ground_truth=0.0\n", + "INFO:root:Samples 25300 Avg Zeta Loss 5.453037416978743, Avg Value Loss -5.417410488873751\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.448979572895794, ground_truth=0.0\n", + "INFO:root:Samples 25400 Avg Zeta Loss 5.476074773811406, Avg Value Loss -5.440278601525014\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.42735983271223, ground_truth=0.0\n", + "INFO:root:Samples 25500 Avg Zeta Loss 5.499157419519285, Avg Value Loss -5.463165989212574\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.76284807003968, ground_truth=0.0\n", + "INFO:root:Samples 25600 Avg Zeta Loss 5.522222958893005, Avg Value Loss -5.48607737767571\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.829572343881004, ground_truth=0.0\n", + "INFO:root:Samples 25700 Avg Zeta Loss 5.545283859320147, Avg Value Loss -5.508961940823504\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.65620258910947, ground_truth=0.0\n", + "INFO:root:Samples 25800 Avg Zeta Loss 5.56839688550267, Avg Value Loss -5.531921173427137\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.84427817920178, ground_truth=0.0\n", + "INFO:root:Samples 25900 Avg Zeta Loss 5.591541643746992, Avg Value Loss -5.554913111644964\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.54362638024263, ground_truth=0.0\n", + "INFO:root:Samples 26000 Avg Zeta Loss 5.6145756392996615, Avg Value Loss -5.577779374700924\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.12049743244238, ground_truth=0.0\n", + "INFO:root:Samples 26100 Avg Zeta Loss 5.637652058451982, Avg Value Loss -5.600682160197226\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.7683631455894, ground_truth=0.0\n", + "INFO:root:Samples 26200 Avg Zeta Loss 5.660707243577624, Avg Value Loss -5.623571686372186\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.587596799276106, ground_truth=0.0\n", + "INFO:root:Samples 26300 Avg Zeta Loss 5.683707678632092, Avg Value Loss -5.646425251100136\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.3068559154572, ground_truth=0.0\n", + "INFO:root:Samples 26400 Avg Zeta Loss 5.70677689481545, Avg Value Loss -5.669338738548613\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.470572093202954, ground_truth=0.0\n", + "INFO:root:Samples 26500 Avg Zeta Loss 5.729953176081361, Avg Value Loss -5.692349165890034\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.47464269624996, ground_truth=0.0\n", + "INFO:root:Samples 26600 Avg Zeta Loss 5.753100848571206, Avg Value Loss -5.715341171313628\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.780405570517615, ground_truth=0.0\n", + "INFO:root:Samples 26700 Avg Zeta Loss 5.776219265020251, Avg Value Loss -5.738303929867051\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.18365532199445, ground_truth=0.0\n", + "INFO:root:Samples 26800 Avg Zeta Loss 5.799331895180999, Avg Value Loss -5.761258283080132\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.39158150658166, ground_truth=0.0\n", + "INFO:root:Samples 26900 Avg Zeta Loss 5.8223831541710585, Avg Value Loss -5.784140492186353\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.869815061157695, ground_truth=0.0\n", + "INFO:root:Samples 27000 Avg Zeta Loss 5.845365020166015, Avg Value Loss -5.80695153800703\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.71395749839536, ground_truth=0.0\n", + "INFO:root:Samples 27100 Avg Zeta Loss 5.868351813503248, Avg Value Loss -5.82977470583638\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.17570699816156, ground_truth=0.0\n", + "INFO:root:Samples 27200 Avg Zeta Loss 5.891353667112347, Avg Value Loss -5.852612631452993\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.42202425142216, ground_truth=0.0\n", + "INFO:root:Samples 27300 Avg Zeta Loss 5.914459266187895, Avg Value Loss -5.875550324952328\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.41750452964387, ground_truth=0.0\n", + "INFO:root:Samples 27400 Avg Zeta Loss 5.937589538825206, Avg Value Loss -5.8985204799598465\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.21403711621584, ground_truth=0.0\n", + "INFO:root:Samples 27500 Avg Zeta Loss 5.960616678425569, Avg Value Loss -5.921381153757473\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.4835843014984, ground_truth=0.0\n", + "INFO:root:Samples 27600 Avg Zeta Loss 5.983694229485721, Avg Value Loss -5.944292843990254\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.01530903326001, ground_truth=0.0\n", + "INFO:root:Samples 27700 Avg Zeta Loss 6.006687037519987, Avg Value Loss -5.967107715357627\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.246656760417615, ground_truth=0.0\n", + "INFO:root:Samples 27800 Avg Zeta Loss 6.029819598589394, Avg Value Loss -5.990060880845083\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.763314213636946, ground_truth=0.0\n", + "INFO:root:Samples 27900 Avg Zeta Loss 6.052872743005345, Avg Value Loss -6.012953649294104\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.65281325238143, ground_truth=0.0\n", + "INFO:root:Samples 28000 Avg Zeta Loss 6.075985875586482, Avg Value Loss -6.035903969177716\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.42881133654845, ground_truth=0.0\n", + "INFO:root:Samples 28100 Avg Zeta Loss 6.099136165551401, Avg Value Loss -6.058886948845673\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.16865474047142, ground_truth=0.0\n", + "INFO:root:Samples 28200 Avg Zeta Loss 6.12224769191239, Avg Value Loss -6.08183583766064\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.67550011051604, ground_truth=0.0\n", + "INFO:root:Samples 28300 Avg Zeta Loss 6.145168456101545, Avg Value Loss -6.104574479678183\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.667460898822064, ground_truth=0.0\n", + "INFO:root:Samples 28400 Avg Zeta Loss 6.168221545098909, Avg Value Loss -6.127461589247936\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.367648124031774, ground_truth=0.0\n", + "INFO:root:Samples 28500 Avg Zeta Loss 6.191193161760669, Avg Value Loss -6.150260366197741\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.502868988583174, ground_truth=0.0\n", + "INFO:root:Samples 28600 Avg Zeta Loss 6.214273556509592, Avg Value Loss -6.173161084260659\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.43195339437787, ground_truth=0.0\n", + "INFO:root:Samples 28700 Avg Zeta Loss 6.23728964906657, Avg Value Loss -6.195995071748923\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.74192196023714, ground_truth=0.0\n", + "INFO:root:Samples 28800 Avg Zeta Loss 6.260470460766263, Avg Value Loss -6.21899477404904\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.67815000229495, ground_truth=0.0\n", + "INFO:root:Samples 28900 Avg Zeta Loss 6.283545528148244, Avg Value Loss -6.241886925937076\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.65732915224904, ground_truth=0.0\n", + "INFO:root:Samples 29000 Avg Zeta Loss 6.306572950179374, Avg Value Loss -6.264753240610497\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=51.85937135803644, ground_truth=0.0\n", + "INFO:root:Samples 29100 Avg Zeta Loss 6.329680326711236, Avg Value Loss -6.287670683967792\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.81908237939143, ground_truth=0.0\n", + "INFO:root:Samples 29200 Avg Zeta Loss 6.352722200576945, Avg Value Loss -6.310531161868908\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.18992345338204, ground_truth=0.0\n", + "INFO:root:Samples 29300 Avg Zeta Loss 6.375708463115869, Avg Value Loss -6.333326054913689\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.40569431525368, ground_truth=0.0\n", + "INFO:root:Samples 29400 Avg Zeta Loss 6.398652241000075, Avg Value Loss -6.356078190049841\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.68809911821495, ground_truth=0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Samples 29500 Avg Zeta Loss 6.421703718425019, Avg Value Loss -6.3789572640036\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.54017058346755, ground_truth=0.0\n", + "INFO:root:Samples 29600 Avg Zeta Loss 6.4447026176520685, Avg Value Loss -6.401785587644748\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.67606859196099, ground_truth=0.0\n", + "INFO:root:Samples 29700 Avg Zeta Loss 6.467757576239587, Avg Value Loss -6.4246584097469\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.30032560124734, ground_truth=0.0\n", + "INFO:root:Samples 29800 Avg Zeta Loss 6.490880439342621, Avg Value Loss -6.447595181905674\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.22865621620491, ground_truth=0.0\n", + "INFO:root:Samples 29900 Avg Zeta Loss 6.514036437146596, Avg Value Loss -6.470569968423409\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.45233338038213, ground_truth=0.0\n", + "INFO:root:Samples 30000 Avg Zeta Loss 6.5371593088280715, Avg Value Loss -6.493510524901244\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.046938386770826, ground_truth=0.0\n", + "INFO:root:Samples 30100 Avg Zeta Loss 6.5602697100405205, Avg Value Loss -6.516433799647509\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.14269935005919, ground_truth=0.0\n", + "INFO:root:Samples 30200 Avg Zeta Loss 6.583427210940778, Avg Value Loss -6.539419277385862\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.190981035640526, ground_truth=0.0\n", + "INFO:root:Samples 30300 Avg Zeta Loss 6.606457785660238, Avg Value Loss -6.562279747429952\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.88820409741384, ground_truth=0.0\n", + "INFO:root:Samples 30400 Avg Zeta Loss 6.6294428949422235, Avg Value Loss -6.585080488065844\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.305500117087604, ground_truth=0.0\n", + "INFO:root:Samples 30500 Avg Zeta Loss 6.652536625859335, Avg Value Loss -6.607991321775267\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.03598118191976, ground_truth=0.0\n", + "INFO:root:Samples 30600 Avg Zeta Loss 6.675495825512296, Avg Value Loss -6.630769793559228\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.083112553364, ground_truth=0.0\n", + "INFO:root:Samples 30700 Avg Zeta Loss 6.698595925135748, Avg Value Loss -6.653693569033174\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.87439757675458, ground_truth=0.0\n", + "INFO:root:Samples 30800 Avg Zeta Loss 6.721620969754095, Avg Value Loss -6.676540560095828\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.794690975550886, ground_truth=0.0\n", + "INFO:root:Samples 30900 Avg Zeta Loss 6.744625578741757, Avg Value Loss -6.6993694850701155\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.40646371631461, ground_truth=0.0\n", + "INFO:root:Samples 31000 Avg Zeta Loss 6.767525403466093, Avg Value Loss -6.722085369933515\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.86919792041047, ground_truth=0.0\n", + "INFO:root:Samples 31100 Avg Zeta Loss 6.790541276361726, Avg Value Loss -6.744911450627244\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.07288607890842, ground_truth=0.0\n", + "INFO:root:Samples 31200 Avg Zeta Loss 6.813593354023748, Avg Value Loss -6.767780850354389\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.97817742869861, ground_truth=0.0\n", + "INFO:root:Samples 31300 Avg Zeta Loss 6.836778471231025, Avg Value Loss -6.790784167870643\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.54670542002346, ground_truth=0.0\n", + "INFO:root:Samples 31400 Avg Zeta Loss 6.859617504903787, Avg Value Loss -6.813439071824313\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.912648734887114, ground_truth=0.0\n", + "INFO:root:Samples 31500 Avg Zeta Loss 6.882733806683094, Avg Value Loss -6.836378429254368\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.57205799308978, ground_truth=0.0\n", + "INFO:root:Samples 31600 Avg Zeta Loss 6.905792589984687, Avg Value Loss -6.8592561197813735\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.41297621492038, ground_truth=0.0\n", + "INFO:root:Samples 31700 Avg Zeta Loss 6.928817476531632, Avg Value Loss -6.882089599969355\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.72735234961341, ground_truth=0.0\n", + "INFO:root:Samples 31800 Avg Zeta Loss 6.9518164952119434, Avg Value Loss -6.904899926688405\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.70291134838751, ground_truth=0.0\n", + "INFO:root:Samples 31900 Avg Zeta Loss 6.974943601417269, Avg Value Loss -6.927856271282358\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.742532943242864, ground_truth=0.0\n", + "INFO:root:Samples 32000 Avg Zeta Loss 6.997876755547848, Avg Value Loss -6.950602426343137\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.60043746172348, ground_truth=0.0\n", + "INFO:root:Samples 32100 Avg Zeta Loss 7.02099333609854, Avg Value Loss -6.97353075264446\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.86053605724418, ground_truth=0.0\n", + "INFO:root:Samples 32200 Avg Zeta Loss 7.043970294484762, Avg Value Loss -6.9963275840050185\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.698121997241124, ground_truth=0.0\n", + "INFO:root:Samples 32300 Avg Zeta Loss 7.06700968300701, Avg Value Loss -7.019173579548631\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.72527214212576, ground_truth=0.0\n", + "INFO:root:Samples 32400 Avg Zeta Loss 7.089940672845336, Avg Value Loss -7.041917369358794\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.322979318471695, ground_truth=0.0\n", + "INFO:root:Samples 32500 Avg Zeta Loss 7.112938841699778, Avg Value Loss -7.064720113483303\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.99732798630897, ground_truth=0.0\n", + "INFO:root:Samples 32600 Avg Zeta Loss 7.136018815959487, Avg Value Loss -7.08759926564169\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.136708882256244, ground_truth=0.0\n", + "INFO:root:Samples 32700 Avg Zeta Loss 7.1589635922029835, Avg Value Loss -7.110346230520483\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.845419960878125, ground_truth=0.0\n", + "INFO:root:Samples 32800 Avg Zeta Loss 7.181934335767093, Avg Value Loss -7.133129404631062\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.70087416188137, ground_truth=0.0\n", + "INFO:root:Samples 32900 Avg Zeta Loss 7.205060900436309, Avg Value Loss -7.156064905200419\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.8240004879053, ground_truth=0.0\n", + "INFO:root:Samples 33000 Avg Zeta Loss 7.2280440545634415, Avg Value Loss -7.178872858936592\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.0142708260826, ground_truth=0.0\n", + "INFO:root:Samples 33100 Avg Zeta Loss 7.251030294424424, Avg Value Loss -7.201667604468471\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.46160070476678, ground_truth=0.0\n", + "INFO:root:Samples 33200 Avg Zeta Loss 7.274004885271698, Avg Value Loss -7.224462017661568\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.888399346558245, ground_truth=0.0\n", + "INFO:root:Samples 33300 Avg Zeta Loss 7.29704338819519, Avg Value Loss -7.24731746718735\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.22124201972816, ground_truth=0.0\n", + "INFO:root:Samples 33400 Avg Zeta Loss 7.319984286200115, Avg Value Loss -7.270087837423839\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.3632069412201, ground_truth=0.0\n", + "INFO:root:Samples 33500 Avg Zeta Loss 7.342818285996492, Avg Value Loss -7.292728920858739\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.04130909067329, ground_truth=0.0\n", + "INFO:root:Samples 33600 Avg Zeta Loss 7.365795372906508, Avg Value Loss -7.315511049195852\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.2303972600874, ground_truth=0.0\n", + "INFO:root:Samples 33700 Avg Zeta Loss 7.388790660439644, Avg Value Loss -7.3383081485896975\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.41675860060401, ground_truth=0.0\n", + "INFO:root:Samples 33800 Avg Zeta Loss 7.411793868764934, Avg Value Loss -7.361129669572205\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.93623280494192, ground_truth=0.0\n", + "INFO:root:Samples 33900 Avg Zeta Loss 7.4348754381616216, Avg Value Loss -7.384032983942099\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.724670704307705, ground_truth=0.0\n", + "INFO:root:Samples 34000 Avg Zeta Loss 7.457692771810964, Avg Value Loss -7.406668788089903\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.70628098687136, ground_truth=0.0\n", + "INFO:root:Samples 34100 Avg Zeta Loss 7.480636672677502, Avg Value Loss -7.429425143713561\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.75372079863446, ground_truth=0.0\n", + "INFO:root:Samples 34200 Avg Zeta Loss 7.50353605338457, Avg Value Loss -7.452143118862954\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.177522328849676, ground_truth=0.0\n", + "INFO:root:Samples 34300 Avg Zeta Loss 7.526530731423917, Avg Value Loss -7.4749554350985195\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.58922991648891, ground_truth=0.0\n", + "INFO:root:Samples 34400 Avg Zeta Loss 7.549537695158676, Avg Value Loss -7.497780121835598\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.966634909646025, ground_truth=0.0\n", + "INFO:root:Samples 34500 Avg Zeta Loss 7.572461456807771, Avg Value Loss -7.520530837508648\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.25993034657517, ground_truth=0.0\n", + "INFO:root:Samples 34600 Avg Zeta Loss 7.595545648740521, Avg Value Loss -7.543435753611905\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.8341772994129, ground_truth=0.0\n", + "INFO:root:Samples 34700 Avg Zeta Loss 7.618475550764545, Avg Value Loss -7.566173561567962\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.90685803522667, ground_truth=0.0\n", + "INFO:root:Samples 34800 Avg Zeta Loss 7.64149827070377, Avg Value Loss -7.589005450154602\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.65759283858896, ground_truth=0.0\n", + "INFO:root:Samples 34900 Avg Zeta Loss 7.664416093045081, Avg Value Loss -7.61173958690298\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.386872302453476, ground_truth=0.0\n", + "INFO:root:Samples 35000 Avg Zeta Loss 7.687411925545045, Avg Value Loss -7.6345579852993986\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.48027590165882, ground_truth=0.0\n", + "INFO:root:Samples 35100 Avg Zeta Loss 7.710455307958099, Avg Value Loss -7.657409652882914\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.19200298891804, ground_truth=0.0\n", + "INFO:root:Samples 35200 Avg Zeta Loss 7.7334822764291316, Avg Value Loss -7.680261896869422\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.07144568875496, ground_truth=0.0\n", + "INFO:root:Samples 35300 Avg Zeta Loss 7.756507854175595, Avg Value Loss -7.703101787939035\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.684848385915025, ground_truth=0.0\n", + "INFO:root:Samples 35400 Avg Zeta Loss 7.779465769116109, Avg Value Loss -7.725873693766985\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=60.48398656895222, ground_truth=0.0\n", + "INFO:root:Samples 35500 Avg Zeta Loss 7.802472890085862, Avg Value Loss -7.748674344338579\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=53.535669684748626, ground_truth=0.0\n", + "INFO:root:Samples 35600 Avg Zeta Loss 7.825466281173225, Avg Value Loss -7.7714814141832225\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.155493210289265, ground_truth=0.0\n", + "INFO:root:Samples 35700 Avg Zeta Loss 7.8484880593887825, Avg Value Loss -7.7943233768283005\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.93342869354697, ground_truth=0.0\n", + "INFO:root:Samples 35800 Avg Zeta Loss 7.871511629770879, Avg Value Loss -7.817144569092372\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.0748269588149, ground_truth=0.0\n", + "INFO:root:Samples 35900 Avg Zeta Loss 7.894510674500821, Avg Value Loss -7.839969897171002\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.65937266379999, ground_truth=0.0\n", + "INFO:root:Samples 36000 Avg Zeta Loss 7.917486992158121, Avg Value Loss -7.862756788883358\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.284886762884696, ground_truth=0.0\n", + "INFO:root:Samples 36100 Avg Zeta Loss 7.940421660875306, Avg Value Loss -7.885508759537311\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.58312734676078, ground_truth=0.0\n", + "INFO:root:Samples 36200 Avg Zeta Loss 7.963291799097719, Avg Value Loss -7.90818887196234\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.864624004292075, ground_truth=0.0\n", + "INFO:root:Samples 36300 Avg Zeta Loss 7.98627011210888, Avg Value Loss -7.930964552318833\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.20342001075528, ground_truth=0.0\n", + "INFO:root:Samples 36400 Avg Zeta Loss 8.009158984263152, Avg Value Loss -7.953658742531721\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.91768235272123, ground_truth=0.0\n", + "INFO:root:Samples 36500 Avg Zeta Loss 8.032112618940445, Avg Value Loss -7.97644020874247\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.48426471120471, ground_truth=0.0\n", + "INFO:root:Samples 36600 Avg Zeta Loss 8.055006288606888, Avg Value Loss -7.999145803080641\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.9427146389382, ground_truth=0.0\n", + "INFO:root:Samples 36700 Avg Zeta Loss 8.077839018523154, Avg Value Loss -8.021791988361525\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.886846186637925, ground_truth=0.0\n", + "INFO:root:Samples 36800 Avg Zeta Loss 8.100711335602286, Avg Value Loss -8.04447871259415\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.86259436501416, ground_truth=0.0\n", + "INFO:root:Samples 36900 Avg Zeta Loss 8.123550841696165, Avg Value Loss -8.067123900229141\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=58.00700407627395, ground_truth=0.0\n", + "INFO:root:Samples 37000 Avg Zeta Loss 8.146511297648937, Avg Value Loss -8.089895958713985\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.50364261014385, ground_truth=0.0\n", + "INFO:root:Samples 37100 Avg Zeta Loss 8.16952296260645, Avg Value Loss -8.112708163653826\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.0523214042749, ground_truth=0.0\n", + "INFO:root:Samples 37200 Avg Zeta Loss 8.192362335430955, Avg Value Loss -8.135350379117467\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.74986555190406, ground_truth=0.0\n", + "INFO:root:Samples 37300 Avg Zeta Loss 8.215379193907086, Avg Value Loss -8.158194665289187\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.02570281672612, ground_truth=0.0\n", + "INFO:root:Samples 37400 Avg Zeta Loss 8.238360176351891, Avg Value Loss -8.180994252668096\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.4573593887396, ground_truth=0.0\n", + "INFO:root:Samples 37500 Avg Zeta Loss 8.26138682414781, Avg Value Loss -8.203841121286144\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.20794239092637, ground_truth=0.0\n", + "INFO:root:Samples 37600 Avg Zeta Loss 8.284317177366967, Avg Value Loss -8.226585941384833\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.348840217484295, ground_truth=0.0\n", + "INFO:root:Samples 37700 Avg Zeta Loss 8.30720136317171, Avg Value Loss -8.249290723364675\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.59032021620936, ground_truth=0.0\n", + "INFO:root:Samples 37800 Avg Zeta Loss 8.329886525810346, Avg Value Loss -8.27177333397247\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.07978929175647, ground_truth=0.0\n", + "INFO:root:Samples 37900 Avg Zeta Loss 8.352961141727484, Avg Value Loss -8.294665556979739\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.87349409895886, ground_truth=0.0\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:root:Samples 38000 Avg Zeta Loss 8.375849528850017, Avg Value Loss -8.317375089689683\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.279387802663166, ground_truth=0.0\n", + "INFO:root:Samples 38100 Avg Zeta Loss 8.398751506490301, Avg Value Loss -8.34007971234873\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.73150390934831, ground_truth=0.0\n", + "INFO:root:Samples 38200 Avg Zeta Loss 8.421663998094793, Avg Value Loss -8.362816867822508\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.289580549960036, ground_truth=0.0\n", + "INFO:root:Samples 38300 Avg Zeta Loss 8.444572094237799, Avg Value Loss -8.385532709369837\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.229642015453784, ground_truth=0.0\n", + "INFO:root:Samples 38400 Avg Zeta Loss 8.467486678098915, Avg Value Loss -8.408262741013504\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.77178224907841, ground_truth=0.0\n", + "INFO:root:Samples 38500 Avg Zeta Loss 8.490556438084655, Avg Value Loss -8.431133563345513\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.5992990696223, ground_truth=0.0\n", + "INFO:root:Samples 38600 Avg Zeta Loss 8.513485018908305, Avg Value Loss -8.453870444267281\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.24143328594238, ground_truth=0.0\n", + "INFO:root:Samples 38700 Avg Zeta Loss 8.536477865554515, Avg Value Loss -8.476689675298218\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.3732367037858, ground_truth=0.0\n", + "INFO:root:Samples 38800 Avg Zeta Loss 8.559347697764586, Avg Value Loss -8.499366940315609\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.811231811128366, ground_truth=0.0\n", + "INFO:root:Samples 38900 Avg Zeta Loss 8.582337054358307, Avg Value Loss -8.522172062188991\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.253676123667596, ground_truth=0.0\n", + "INFO:root:Samples 39000 Avg Zeta Loss 8.605252491520869, Avg Value Loss -8.544912367986164\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.584562648597725, ground_truth=0.0\n", + "INFO:root:Samples 39100 Avg Zeta Loss 8.628258125700562, Avg Value Loss -8.567736691276494\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.591364826384336, ground_truth=0.0\n", + "INFO:root:Samples 39200 Avg Zeta Loss 8.651171187999553, Avg Value Loss -8.590469166788887\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=56.1244341209203, ground_truth=0.0\n", + "INFO:root:Samples 39300 Avg Zeta Loss 8.674129023064438, Avg Value Loss -8.613240305334319\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=59.04860027365358, ground_truth=0.0\n", + "INFO:root:Samples 39400 Avg Zeta Loss 8.696943684578201, Avg Value Loss -8.635852237395602\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=51.99888665022283, ground_truth=0.0\n", + "INFO:root:Samples 39500 Avg Zeta Loss 8.719767868510633, Avg Value Loss -8.658488916077562\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.19883706866834, ground_truth=0.0\n", + "INFO:root:Samples 39600 Avg Zeta Loss 8.742598788507316, Avg Value Loss -8.681142301505618\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=57.93752814321723, ground_truth=0.0\n", + "INFO:root:Samples 39700 Avg Zeta Loss 8.76545707398545, Avg Value Loss -8.703816565692929\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.48645115728984, ground_truth=0.0\n", + "INFO:root:Samples 39800 Avg Zeta Loss 8.788312698330989, Avg Value Loss -8.726484091192392\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=55.680084187781965, ground_truth=0.0\n", + "INFO:root:Samples 39900 Avg Zeta Loss 8.811176592208069, Avg Value Loss -8.749166835071804\n", + "INFO:root: Append estimate [1]: log=69.98554447789161, estimated=54.79208640050983, ground_truth=0.0\n", + "INFO:root:Samples 40000 Avg Zeta Loss 8.833944010196014, Avg Value Loss -8.771746719354748\n" + ] + } + ], + "source": [ + "ips_result = ips.evaluate(inp)\n", + "dd_result = dualdice.evaluate(inp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def plot_dualdice_losses(losses):\n", + " zeta_losses = [x[0] for x in losses]\n", + " nu_losses = [x[1] for x in losses]\n", + " plt.plot(zeta_losses, label=\"Zeta Loss\")\n", + " plt.plot(nu_losses, label=\"Nu Loss\")\n", + " plt.ylabel(\"Loss\")\n", + " plt.xlabel(\"Epochs\")\n", + " plt.show()\n", + "\n", + "plot_dualdice_losses(dualdice_losses)\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/reagent/ope/test/notebooks/GridWorldExperiments.ipynb b/reagent/ope/test/notebooks/GridWorldExperiments.ipynb new file mode 100644 index 000000000..22f4b8c86 --- /dev/null +++ b/reagent/ope/test/notebooks/GridWorldExperiments.ipynb @@ -0,0 +1,348 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Imports\n", + "import logging\n", + "import random\n", + "from typing import Iterable, Optional, Sequence, Tuple\n", + "\n", + "import math\n", + "import numpy as np\n", + "import torch\n", + "from reagent.ope.estimators.sequential_estimators import (\n", + " DMEstimator,\n", + " DoublyRobustEstimator,\n", + " EpsilonGreedyRLPolicy,\n", + " IPSEstimator,\n", + " MAGICEstimator,\n", + " NeuralDualDICE,\n", + " RandomRLPolicy,\n", + " RewardProbability,\n", + " RLEstimatorInput,\n", + " State,\n", + " StateDistribution,\n", + " StateReward,\n", + " ValueFunction,\n", + ")\n", + "from reagent.ope.estimators.types import Action, ActionSpace\n", + "from reagent.ope.test.envs import Environment, PolicyLogGenerator\n", + "from reagent.ope.trainers.rl_tabular_trainers import (\n", + " DPTrainer,\n", + " DPValueFunction,\n", + " TabularPolicy,\n", + ")\n", + "from reagent.ope.test.gridworld import *\n", + "\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configurations\n", + "\n", + "Alter gamma to affect the discount on the reward. " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "GAMMA = 0.9" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate Estimators on a Policy\n", + "\n", + "Given a dataset of trajectories (episodes) generated by some logging policy, we evaluate the given target policy using 6 popular offline policy estimators for the sequential setting. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_estimators(log, target_policy, value_fun, ground_truth):\n", + " estimator_input = RLEstimatorInput(\n", + " gamma=GAMMA,\n", + " log=log,\n", + " target_policy=target_policy,\n", + " value_function=value_func,\n", + " ground_truth=ground_truth,\n", + " )\n", + " \n", + " dice_results = NeuralDualDICE(state_dim=2, \n", + " action_dim=4, \n", + " deterministic_env=True,\n", + " batch_size=512, \n", + " training_samples=10000, \n", + " value_lr = 0.001, \n", + " zeta_lr = 0.0001, \n", + " device=device).evaluate(estimator_input)\n", + "\n", + " dm_results = DMEstimator(device=device).evaluate(estimator_input)\n", + "\n", + " ips_results = IPSEstimator(weight_clamper=None, weighted=False, device=device).evaluate(\n", + " estimator_input\n", + " )\n", + " ips_results_weighted = IPSEstimator(weight_clamper=None, weighted=True, device=device).evaluate(\n", + " estimator_input\n", + " )\n", + " dr_results = DoublyRobustEstimator(weight_clamper=None, weighted=False, device=device).evaluate(\n", + " estimator_input\n", + " )\n", + " dr_results_weighted = DoublyRobustEstimator(weight_clamper=None, weighted=True, device=device).evaluate(\n", + " estimator_input\n", + " )\n", + "\n", + " magic_results = MAGICEstimator(device=device).evaluate(\n", + " estimator_input, num_resamples=10, loss_threhold=0.0000001, lr=0.00001\n", + " )\n", + " \n", + " return {\"dm\": dm_results,\n", + " \"ips\": ips_results,\n", + " \"ips_weighted\": ips_results_weighted,\n", + " \"dr\": dr_results,\n", + " \"dr_weighted\": dr_results_weighted,\n", + " \"magic\": magic_results,\n", + " \"dice\": dice_results}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generate Trajectories, Policies, and Evaluate Estimators\n", + "\n", + "We can see that the IPS estimators see good performance for smaller numbers of episodes, but as the number of episodes increases, we see worsening in performance which makes sense as the variance factor is likely increasing. " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "device - None\n", + "GridWorld:\n", + "⭕⬜⬜⬜\n", + "⬜⬜⬜⬜\n", + "⬜⬜⬜⬜\n", + "⬜⬜⬜⭐\n", + "\n", + "Opt Policy:\n", + "⬨⇨⇨⇩\n", + "⇩⇩⇨⇩\n", + "⇩⇩⇩⇩\n", + "⇨⇨⇨⬧\n", + "\n", + "Opt state values:\n", + " 3.27 4.74 6.38 8.2\n", + " 3.12 4.58 8.2 8.0\n", + " 4.58 6.2 8.0 10.0\n", + " 6.2 8.0 10.0 0.0\n", + "\n", + "Target Policy ground truth values:\n", + " 0.299 1.52 3.17 5.86\n", + " 0.71 1.62 5.43 5.96\n", + " 2.48 3.98 5.93 9.17\n", + " 4.35 6.37 9.04 0.0\n", + "\n" + ] + } + ], + "source": [ + "random.seed(1234)\n", + "np.random.seed(1234)\n", + "torch.random.manual_seed(1234)\n", + "\n", + "logging.basicConfig(level=logging.WARNING)\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else None\n", + "print(f\"device - {device}\")\n", + "\n", + "gridworld = GridWorld.from_grid(\n", + " [\n", + " [\"s\", \"0\", \"0\", \"0\", \"0\"],\n", + " [\"0\", \"0\", \"0\", \"W\", \"0\"],\n", + " [\"0\", \"0\", \"0\", \"0\", \"0\"],\n", + " [\"0\", \"W\", \"0\", \"0\", \"0\"],\n", + " [\"0\", \"0\", \"0\", \"0\", \"g\"],\n", + " ],\n", + " max_horizon=1000,\n", + ")\n", + "print(f\"GridWorld:\\n{gridworld}\")\n", + "\n", + "action_space = ActionSpace(4)\n", + "opt_policy = TabularPolicy(action_space)\n", + "trainer = DPTrainer(gridworld, opt_policy)\n", + "value_func = trainer.train(gamma=GAMMA)\n", + "\n", + "print(f\"Opt Policy:\\n{gridworld.dump_policy(opt_policy)}\")\n", + "print(f\"Opt state values:\\n{gridworld.dump_value_func(value_func)}\")\n", + "\n", + "behavivor_policy = RandomRLPolicy(action_space)\n", + "target_policy = EpsilonGreedyRLPolicy(opt_policy, 0.3)\n", + "model = NoiseGridWorldModel(gridworld, action_space, epsilon=0.3, max_horizon=1000)\n", + "value_func = DPValueFunction(target_policy, model, GAMMA)\n", + "ground_truth = DPValueFunction(target_policy, gridworld, GAMMA)\n", + "\n", + "print(\n", + " f\"Target Policy ground truth values:\\n\"\n", + " f\"{gridworld.dump_value_func(ground_truth)}\"\n", + ")\n", + "\n", + "log_generator = PolicyLogGenerator(gridworld, behavivor_policy)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Evaluating estimators on 5-length episodes\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I0812 115834.496 sequential_estimators.py:742] Data loading time: 12.767301744000001\n", + "I0812 115835.276 sequential_estimators.py:786] Samples 0, Avg Zeta Loss -0.0024972213432192802, Avg Value Loss 0.014194303192198277,\n", + "Time per 1000 samples: 16.181496048\n", + "I0812 120436.815 sequential_estimators.py:786] Samples 1000, Avg Zeta Loss 1.762339136944153, Avg Value Loss -1.3786156352562846,\n", + "Time per 1000 samples: 7850.563727696001\n", + "I0812 121012.476 sequential_estimators.py:786] Samples 2000, Avg Zeta Loss 3.8674179503917667, Avg Value Loss -2.7155063413381604,\n", + "Time per 1000 samples: 7573.734335957\n", + "I0812 121733.886 sequential_estimators.py:786] Samples 3000, Avg Zeta Loss 5.210317928791042, Avg Value Loss -3.3756288778781895,\n", + "Time per 1000 samples: 8524.390936679\n" + ] + } + ], + "source": [ + "result_maps = {}\n", + "xs = []\n", + "lengths = [5, 10, 100, 400]\n", + "# Now evaluate the estimators as the number of episodes increases\n", + "try:\n", + " for length in lengths:\n", + " small_log = []\n", + " for state in random.sample(list(gridworld.states), 5):\n", + " small_log.extend([log_generator.generate_log(state, length) for _ in range(50)])\n", + " print(f\"Evaluating estimators on {length}-length episodes\")\n", + " results = evaluate_estimators(small_log, target_policy, value_func, ground_truth)\n", + " for name, result in results.items():\n", + " if not name in result_maps:\n", + " result_maps[name] = []\n", + " res = result.report()[3].rmse.cpu().numpy()\n", + " result_maps[name].append(res)\n", + " xs.append(ep)\n", + "except KeyboardInterrupt:\n", + " pass\n", + " \n", + "fig, ax = plt.subplots()\n", + "for name, results in result_maps.items():\n", + " ax.plot(xs, results, label=name)\n", + "\n", + "# Log scale vastly improves visualization\n", + "plt.yscale(\"log\")\n", + "plt.xscale(\"log\")\n", + "plt.legend(loc='best')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAD3CAYAAAANMK+RAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3deXwU9f348dfslZsAciOCJ4rIIXJVxChtPdpqtfjRqvVotbUePez1/fVbKVq/bf221vartba14K18PLHawzYaPDlF5VBAacIRCEcg5N7dmfn9sZOwCdnsJuxmd3bfz8fDR3ZnZmfeE+J7Zt+fz3w+hm3bCCGEcD9PugMQQgiRHJLQhRAiS0hCF0KILCEJXQghsoQkdCGEyBK+dB24vLxcutcIIUQvzJkzx+hqedoSOpGg4m5TUVFBWVlZn8STKm4/B4k//dx+DhJ/8pSXl8dcJyUXIYTIEpLQhRAiS0hCF0KILJHWGroQQiSDbdvU1tZiWVZK9j9o0CB2796dkn3H4vF4GDhwIIbRZftnlyShCyFcr7a2lqKiIvLz81Oy//z8fEpKSlKy71haWlqora3liCOOSPgzUnIRQrieZVkpS+bpkp+f3+NvHJLQhRAiS2RFQm8JmdQ2BtMdhhBCpFVWJPTyDbv4vyUfpzsMIYSgoaGBMWPGcNlll9Hc3Nynx06oUVQpNR5YDNyjtb5PKfU0MNhZPRBYCvwcWAOscpbv1lpfkrrQD2oOmTS0hvviUEIIkZCnnnqqz48ZN6ErpYqAe4H2502jE7VS6i/AAuftBq11nz8fG7JsGoNmXx9WCCEAOHDgAHPnzqW5uZkzzjgDgDFjxrB27Vpqa2u57rrraGlpYfTo0SxcuJBdu3Zx/fXX09LSgs/n48EHH2TUqFGHHUciJZdW4HyguvMKpdQJwCCt9dLDjuQwhE2L5pAkdCFEejz22GNMmDCBN954g8mTJ3dYN3/+fG655RZef/11hg8fzsqVK5k3bx633nor5eXlfOtb3+LOO+9MShxx79C11mEgrJTqavV3gN9FvR+mlHoBGAL8Xmv9eHf7rqioiBtgQ0ND3O02VNvs3mcntL90SOQcMpnEn35uP4dUxz9o0KAO3RbPfmBF0o/x6g1TY6577733mDVrFvX19UyZMoW2uZrr6+tZvnw58+bNo76+nttuuw2AZcuWsW7dOubPn49pmgwePJj6+vpD9ltTU8O6desSjrHXDxYppQqBTwM3O4v2Aj8FHgOKgOVKqQqt9fZY+0hk9LJERjn76M3NvHdgB2Vlp/f8RPpAJo3U1hsSf/q5/RxSHf/u3bs7PPiz4gdnJ3X/9fX13T5Y5Pf7KSwspKSkhMbGxvanO0tKSvB4PO3r2ng8Hp555hlGjhzZ7XGHDh3K+PHjOyxL1WiLpwNvaq0tInfy9Vrrv2itW7XWtU7j6AmHsf+EhUyLJqmhCyHSZOzYsaxaFekP8tprr3VYd9ppp7FkyRIA5s2bx7/+9S+mT5/O4sWLAXj11Vd58sknkxLH4ST06cDatjdKqdlKqT87rwuACcCGpEQZR8i0aQqa7V9zhBCiL1111VUsXbqUOXPmsGHDhg5PeM6fP58//vGPzJ49m02bNnHWWWcxf/58XnjhBWbPns3tt9/OjBkzkhJHIr1cpgB3A2OAkFJqLnAxMBx4I2rTt4GvKKWWATZwl9b6kIbUVAiZFqZtEzQt8nzevjikEEK069+/f4c78/nz57e/Li4u5t///neH7UeMGMErr7yS9DgSaRRdBXRV/Lqp03Zh4PqkRpegkBm5GjYFTUnoQoiclRVPiobMSKlF+qILIXJZdiR0p17VLAldCJHDsiOhh9tKLvL4vxAid2VHQrciJZcmeVpUCJHDsiOhmxZFAa/0RRdC5LSsSeilBX4apeQihEiztuFz0yFLErpNab5fBugSQmScVE1c3ZWsmCQ6aFoMLAxIyUUIkRZdDZ973HHH8YUvfIEBAwYwb968Pokje+7QC/zSD10IkRZdDZ8bDoc599xz+yyZky136GHTipRcJKELIYD//mZyBruK9j9/+HLMdevXr+fMM88EaP8JMHVq7CF3UyErEnrQtCgt8FFd15LuUIQQGaC75NsbXY1VHs227fYhc6Nr5oFAIKlxxJNVJRdpFBVCpEN3w+f2paxI6GErUnKRRlEhRDp0N3xuX8qikov0QxdCpEd3w+f2pay4Q28vucgduhAih7k+oVu2jWnZ9MvzyVguQoic5vqEHjIt/F6DwoBPauhCiJyWBQndJuD1UCiDcwmRszweDy0t2dVtuaWlBY+nZyna9Y2iIdPC5/WQ5/NgWjZh570QIncMHDiQ2trauP3Fe6umpoahQ4emZN+xeDweBg4c2KPPJJTQlVLjgcXAPVrr+5RSDwFTgL3OJr/SWr+slLoI+CGQD9yrtV7QmxPpicgduoFhGBQEvDSFTPpJQhcipxiGwRFHHJGy/a9bt47x48enbP/JEjehK6WKgHuB8k6r/p/W+qWo7UqAXwOnAiHgXaWU1lo3pCRyR6SGHkngbWWXfvn+VB5SCCEyUiK3sq3A+UB1nO2mAiu01nVa6ybgLeCMJMUZU8i08Dl1pkK/V/qiCyFyVtw7dK11GAgrpTqvukUp9QNgJ3ATMBzYHbV+FzCsu31XVFTEDbChoaHb7aqbbIItNhUVFZitFm8tXcHWYiPufvtSvHPIdBJ/+rn9HCT+vtHbRtFHgTqt9Uql1PeBO4DOAxgYgN3dTsrKyuIeqKKiotvt1u88wD/2bKCsbCqLalYzdvxopo/pWUNCqsU7h0wn8aef289B4k+e8vLO1e+DepXQtdbRe3wZeAB4AhgStXwY8Gpv9t8TobCFzxu5Iy8MeGWALiFEzupVQldKaeBOrfUHwCxgLbAcmKCUKgVMYDrwzeSH3FHIivRDx6mhN0kNXQiRoxLp5TIFuBsYA4SUUnOBecCDSqkmoB74qtY6qJSaB7wOWMAdWuvmVJ9AdC+XooBPZi0SQuSsRBpFVwFdFY+mdbHt08DTSYsuAUHzYMmlIOCVAbqEEDnL9U/ghM2okkvAS6PU0IUQOcr1CT0Y/WCR1NCFEDnM9Qm9bbRF2nq5SMlFCJGjXJ/Qw6aN3yONokII4fqEHl1yKZB+6EKIHOb6hB4y7Q4lF6mhCyFylesTetiSfuhCCEE2JPRg+GCjaIFfGkWFELnL9Qk9ZNlRd+heuUMXQuQs9yf0To2iTSGpoQshclOWJPSDJZfWkIVpdTtqrxBCZKUsSOgHSy4ew4jU0aXrohAiB2VBQj9YckH6ogshclhWJPSA9+CUc5GGUamjCyFyTxYk9IOP/gMUBnw0SU8XIUQOyoKEbuGLLrlIX3QhRI7KioR+aMlFEroQIve4P6FHPViEjOcihMhh7k/o4U4ll4CXJunlIoTIQXHnFCUyUfR4YDFwj9b6PqXUSGAhkAeYwJVa62qlVAh4K+qjc7TWKc2uIcvuWHLxS6OoECI3xU3oSqki4F6gPGrxz4AHtdZaKXUjcCvwfaBOa93VhNIp02U/dEnoQogclEjJpRU4H6iOWvYt4Dnn9R6gX4riiyvSy0X6oQshhGHbiY17opSaD+zRWt8XtcwLvArM11q/ppRqAP4GjASe01rfHWt/5eXlttfrjXvchoYGiouLY67/+fsWN5xoMDAvktTf2WWzvclm7pjMaR6Idw6ZTuJPP7efg8SfPKZpMmfOHKOrdQnV0LviJPNHgQqt9WvO4u8DTwAhYIlS6g2t9fJY+ygri1+dqaio6Ha7u9a/yRmnT2VwcR4ATet20vifvZSVndyb00qJeOeQ6ST+9HP7OUj8yVNeXh5zXa8TutMoWqm1/mnbAq31A22vlVKvAScDMRN6MoRMC7+nY8lFGkWFELmoVwldKXUFYGmtfxy17DjgbuBiZ9FM4JmkRRpDyLTx+zr2Q2+WMdGFEDkokV4uU5xEPQYIKaXmAkOAFqVUhbPZeq31jUqpNcAyIAy8qLVekeoTCJrWIWO5yJOiQohcFDeha61XAQkVj7TWPwF+kpTIEmDbNmHLbp/ggvYnRSWhCyFyT+Z0BemFsGXj8xgYxsGELoNzCSFylasTerDTQ0VIP3QhRA5zdUIPmR0f+ydqxqJE+9cLIUS2cHVCD3caCx3A5/Hg93poDVtpi0sIIdLB1Qk9UnI59IGpQhkTXQiRg1yd0CMll0NPodDvpVnq6EKIHOPyhH5oyQXpiy6EyFGuT+jRj/23KZRJLoQQOcjlCb3rkkuBXx4uEkLkHpcn9K5LLkUyr6gQIge5PqF37oeOU0OXO3QhRK5xd0K37EOeFEVq6EKIHOXuhB6O1ctFSi5CiNzj7oRuxSq5SKOoECL3uDuhmzFKLn6poQshco+rE3rQtPDJHboQQoDbE3q4m0f/pVFUCJFrXJ3QQ52mn2sjjaJCiFzk6oQeu+QiNXQhRO6JO6cokYmixwOLgXu01vcppYYAjwD9gW3AFVrrVqXURcAPgXzgXq31glQGH7PkIv3QhRA5KO4dulKqCLgXKI9a/CtgodZ6BlAJXKGUKgF+DZwLnA78UClVnMrgu5qCDim5CCFyVCIll1bgfKA6alkZ8KLzejFwDjAVWKG1rtNaNwFvAWekKG5oq6HHGD5XJooWQuSauCUXrXUYCCuloheXaK2bnde7gGHAcGB31DZty2OqqKiIG2BDQ0PM7Sq3WDTkG1Q0be6wPGzZNLTaCe2/L3R3Dm4g8aef289B4u8bCdXQuxCMem0Adqdl0ctjKisri3ugioqKmNu9/c+PGDu0hLJJIw9Zd9vq1/jUrNkEfOlv9+3uHNxA4k8/t5+DxJ885eXlMdf1NtvVK6UKndfDnHLMDmBI1DbDOpVpki5kdT2nKNIwKoTIQb29Q/8HcCHwJHAx8DKwHJiglCoFTGA68M0kx9tBrEf/iWoY7V/gT2UIQgiRMeImdKXUFOBuYAwQUkrNBa4AHldK3QpsABZprcNKqXnA64AF3BFVZ0+JWI2iSF90IUQOSqRRdJXTq6WzQ5ZprZ8Gnk5adHFE7tBjlFxkGjohRI5Jf4vhYej+Dl36ogshcov7E7onVqOoTxpFhRA5xeUJvetH/5GSixAiB7k8oXc9BR0yJroQIge5PqF33w9dauhCiNzh7oRudVNykTt0IUSOcXdC77bkIv3QhRC5xfUJXRpFhRAiwuUJvZsHi6QfuhAix7g8ocd59F/6oQshcojLE3q8O3RJ6EKI3OHahG7bdqRR1CM1dCGEwM0J3bRsPIaBN+aj/1JDF0LkFtcm9JBp4/d1ncyRGroQIge5N6FbFv4Y5RaAIqmhCyFyjGsTejAc+7F/gDyfh5BpEbasPo1LCCHSxbUJPWzFnn4OwDAMCvxemuUuXQiRI1yb0IPd9EFvUyR1dCFEDnFtQu/uoaI2BVJHF0LkENcm9HA3DxW1kYeLhBC5JO4k0V1RSn0N+ErUotOAlUAR0Ogs+54zwXRKJFZykb7oQojc0auErrX+C/AXIsn9DOBy4CTgWq312qRH2YXISIvd36EX+GUIXSFE7khGyWU+cGcS9tMjIdOO+dh/m8isRZLQhRC5wbBtu9cfVkpNA27WWl+llKoA9gNHAB8C39ZaN8f6bHl5ue31euMeo6GhgeLi4kOWf7jf5u1dNl87IXZSf6bSYmShwcwh3d/Jp1qsc3ALiT/93H4OEn/ymKbJnDlzukxqvSq5RLkeWOS8/h2wTmu9USl1L3AL8L/dfbisrCzuASoqKrrczt64m832DsrKJsT87HuvbeKIogBl00Ynci4pE+sc3ELiTz+3n4PEnzzl5eUx1x1uQj/TSdxorZ+PWv434LLD3He3Qlbs6efaFMk0dEKIHNLrhK6UOhJo0Vq3KKUM4FXgy1rrncAsIKWNo6E4j/7j9EPfXd+ayjCEECJjHE6j6HCgmsjduQ3cB7yslFoCHO28T5mQZcecT7SNNIoKITLN4bRbxtPrO3St9Qrg3Kj3zwLPJi2yOILhREou0g9dCJEZQqbF39bt5JHlW/jdlyZw5IDCpB/jcGvoaRO2Eii5SD90IUSatYRMXvigmsdWbGHMwEJ+fM5YRvYvSMmxXJvQQ2b8kkuRlFyEEGnS0Brmmfe28eTKbZwyoh93XXgKJw/vl9JjujahB834JRcZnEsI0df2N4d4atVWnnlvOzPGDOT3ahLHDe6bPuyuTehh0yLgS6BRVGroQog+sKehlcdWbOGva3dw1vGDWXjFFEaloE7eHdcm9JBpUxSQfuhCiPSqrmvmkeVb+NdHNZw3bhiPXz2NYf3y0xKLaxN6QiUXv5RchBCpUVnbyENLq3jzkz18ceJInv7qDAYWBdIak2sTeti04462WBjw0hI2sWwbj5He8VyEENlhQ009Dy2rYtXWfajJR/Lc9TPpl+9Pd1jg5oQesuKPh+4xDPJ8XlpCJoUB156qECIDfLC9joVLK/mopp4rph7FbeeemHF5JbOi6YFgAo/+E1V2ybRfvBAi89m2zfKqfTzwkUXThnVcNe0ofnnhePJ88UeKTQfXZrmwZce9Q0f6ogshesG2bV7/ZA8Ll1bR0BpmxiCD71w0I267Xbq5NqEnMgUd0hddCNEDpmVTvmEXC5dW4vEYXDtjDGcdP5g3Xl+S8ckcNyf0kJlYyaUo4KVR+qILIboRMi3+vn4nDy+ron9BgJvPPJZPHX0Ehss6U7g4oSdWcikM+GiWO3QhRBdaQiYvrtnBoyuqOGpAIT/+7ImcOqq/6xJ5Gxcn9ARLLtIXXQjRSWMwzLOrt/PEqq2cPLwfv/jCeMaPKE13WIfN1Qk93uBcSKOoECJKXXOIRe9u5enV25k2egD3zp3E8UMyY67QZHBxQrfxeeJ/LSoM+GQ8FyFy3J6GVp5YuZUX11Rz5vGD+csVUziqj8dZ6QsuTuiJ93JplJKLEDlphzPOyisf1XDuScN4LI3jrPQFVyf0eI/+AxT5vexvDvVJTEKIzFBZ28jDy6p44+M9XDhhBPqrMzgizeOs9AX3JnTLTqhfaEHAS3VdS5/EJIRIr4276lm4tIqVW/ahTs2scVb6Qq8SulKqDHgaWOcsWgP8DHgE6A9sA67QWrcmN9yDEi25SD90IbLfmuo6Fiyt5KOd9Vx+2lH85NwTKcrB4T4O54yXaK3ntr1RSj0MLNRaL1JK/Rq4AliQnDAPFUpgtEXa+qFLLxchso5t26zcso8FS6vYvr+Zr0w7il9ekLnjrPSFZF7CyoAbnNeLgZtTm9AT74cujaJCZA/btnlz814WvlNJXUuYa2aM5ryThrri0fxUM2zb7vGHnJLL/UAVUALcDizSWg901o8F/qS1PjPWPsrLy22vN/6VtKGhgeLiQ/uJ/nCFxS9OM/DGeaJrS4PNC1tsvjUuff/Ysc7BLST+9HP7OSQjfsu2WbMPyqsjOWvOCINTBtAncx1k0u/fNE3mzJnT5Un39g59E3An8BQwGqgAog9gAHGvFGVlZXEPVFFRcch2pmVjrHyNOWedFffzm/c08tKuNZSVzYi7bap0dQ5uIvGnn9vP4XDiD5sWf1u/k4eXbaE038cPzhvDrGP6dpyVTPr9l5eXx1zXq4Sutd4OPOG8/Y9SaicwQilVqLVuAoYB1b0NOJ5Eyy04sxZJyUUI94keZ+XI/oX812dO4LSjBrh2nJW+0NteLpcBJ2qt5yulBgFDgQeBC4EngYuBl5MfbkRPE7oMziWEezQGwzz33nYeX7mVccP68fMvjOeULBhnpS/0tuTyEnCpUuotwAPcCKwGnlRK3QpsABYlOdZ2kZEWE7tKFzqDc9m2LVd2ITJYXXMI/e429OptTM3CcVb6Qm9LLg3ARV2s6pMiU9C08HsSu0P3eT14PQZB08rp7kxCZKq9jUGeWLmFxR9UM/u4wTx4+RRGD8y+cVb6git73ocTnNyiTaEza5EkdCEyx84DLTy6fAv/+HAn55w0lEevmsrw0oJ0h+VqrkzooQTnE23T1jCahYOrCeE6VbVNPLysiiUf7+bCU0aw6NrpDCrOS3dYWcGVCT0YTrxRFKeOLg2jQqTXpl0NLFxWyYqqfVwyeSTPXTeT0oLcGWelL7gyoYetxBtFkTHRhUirtdV1LNxkUbP+PS6fMor/Pic3x1npC678rQZ70G2RtpKLjOciRJ+xbZtVW/ez4J1KtuxrYuYAgz9ePJN8v7RjpZIrE3qoF42iUnIRIvVs2+atzXtZsLSSuuYQV08fzXnjhvHWG69LMu8DLk3oPWsUjQzQJSUXIVLFtGxe3biLhUurALhmxmjmnDAEbwLTRIrkcWlC71nJpUiG0BUiJcKmxd8/rOHhZVWU5Pm4YdYxnHFs346zIg5yb0LvwZW/rR96Mr2/bT/rdh7gy1NGyR+vyDmtYWecleVbGNk/nx9++gSmyjgraefShN67fujJYtk2v/zXBvY3h9jXFOLGM46RP2SRE5qCYZ59r5onVm7hxKEl3Pn5k5kwUsZZyRQuTeg97+WypyGYtOP/88MaCvxe/nDpZG7U72HZNjfPPlaSushaB1pCLHp3G0+v3saUUQP47ZcmMnZoSbrDEp24NqEHepTQfUlrFA2ZFg+8uZl5551E/8IA9186mZv1asKWzXfKjpOkLrJK9DgrZxw3iD99+VTGDCxKd1giBlfO2RSybHw96bboT14N/fn3qxk9sJApowYA0L/Az/2XTmb1tv385rVN9GYGKCEyzc4DLfy6fCNqwVKagiaPXDWVn543TpJ5hnNnQu/po/8Bb1J6uTQFwyxYWslNZxzbYXm/fD+/v2QSa6oP8KvyjZLUhWtt3dfEnf/4kCseXo7f62HRtdP50WfGMkIGzXIFdyZ0yyLQwzv0ZJRcnlq1jSmj+ndZOyzJ93PfJZP4aGc9d/1rI5YkdeEiH+9u4CcvreOrj69icHEez143k2+XHSeDZrmMOxO6afdohu/CgO+wnxTd3xziiVVbuWHWMTG3Kc7z8X+XTOLjPQ384pUNktRFxlu34wDfe/4DbtLvcfzgYp6/fibfmHUM/WXQLFdyaULvxXjoh1lyeXhZFZ8+YQij4ozBW5zn43dzJ1JV28id//gI05KkLjKLbdus2rKPm/RqfrR4DdNGD2Dx12dy9fTRFOe5sp+EcLjyXy9k2j3s5XJ4/dBr6lv465pqnrx2ekLbFwV8/O5Lk/juc+/zs398yOwCSeoi/Wzb5u3/7GXBO1XsawpyzYzIOCs9aY8Smc2VCT1oWvgSnIKOJAzO9ee3/sMXJ45kcA/qiQUBL7/90kRufe4DntphM/vMnsUsRLJYts1rG3ezcGklpmVz7YwxzBkr46xko14ndKXUz4GzAD9wF/A5YAqw19nkV1rrl5MX6kE9nYIu4PVg2jZh0+pR7R2gcm8jSz7ew7PXzehxnPl+L7+5eAJfXbCEn768nts/N06SuugzYdPinx/W8NCyKoryfFx/+tGccewgPPKsRNbqVUJXSs0GJmmtZyqlBgIfAP8G/p/W+qXkh9lRT0suhmE4PV1MSgt6llD/8OZmrpx6FP3ye9dIlO/3cu3xBn+tDXPbS+v52efG9fiiIkRPtIZNXlq7k0eWVzG8Xz7fn3MC00bLOCu5oLeZ5W1AOa/3A4G+bGDt6QQX9LIv+rodB1hTXcelpx7Zwwg78nsMfvXFU2gJmfz4r+sImdZh7U+IrjQHTR5fsYWL/vwOb3yyhzs+dzIPXHYq08cMlGSeI4zDfQhGKfV14FPO2+FAPrATuElrvSfW58rLy22vN/6A9w0NDRQXF3dY9tAmiymDDE4ZkPgf6a/XWFx5nMGwgsQ/88cNFhMGGMwccnj/M7SdQ9iyefQTGwO48lgDn0tqmF39G7iJ2+Mnzjk0h23e2gVv1tgcWwJnDzcYWZRZf1tu/zfIpPhN02TOnDld/gMfVqOoUupC4HrgM079vE5rvVIp9X3gDuDG7j5fVlYW9xgVFRWHbPfC3veZPGEks44dlHCsD21byfiJxzN+RGIjwy2rrKV10wZ+8KXph10iiT6HM8+0+PGLa3l5v81dF5xCwJf55Zeu/g3cxO3xE+McahuDPLFqKy98uJ1Zxw5i4TmjOfqIzHw03+3/BpkUf3l5ecx1h9Moeg4wD/is1no/EH2Ul4EHervveII9fPSf9qdFEyu52LbN71//hBvOOCbp9W6/18MvLhjPf7+0jh8uXsNdF44nzydTc4nE1dS38OjyLfx9/U4+c+JQHrlqqjyaL6C3dW+lVCnwG+B8rfVeZ5lWSk1wNpkFrE1qpFHCVs96udDDSS5e3bgby7b59NghvYywez6vh//5/MkU+L18//k1tMhsSiIBW/c1cec/P+Tyh5bj8xgsunY6/yXjrIgovb1DvxQYACxSqq1tlHnAg0qpJqAe+GrywuyopxNc0N4oGn88l7Blcf8bm/n+nONT2r3L5/Xws8+PY/7fPuR7z3/A3RdNkEl0RZc+3t3AE59YbF67irmTRvLsdTPl0XzRpV4ldK31n4A/dbFq2uGHFF/vern4Eiq5vLR2J0NKAswYM/AwIkyMz+Ph9vPHcfvf1/Pd5z7gNxdNoCAgSV1ErN95gIXvVPJB9QGmDzT4zZUz5dF80a3Mb5HrQuQOvYclF3/8p0VbQiZ/fvs/3HRG380+5PUY/PS8cQwtyeM7z71PU5Im4hDu9e7Wfdzy9Hv84IU1TDkqMs7K2cMNSeYiLlf+hfR0CjraxnOJU6t+evV2xg3rl3BPmGTxegzmnXcSP//nR3z7mff57dyJFAVc+U8jeikyzkotDy2tZE9jkKunj+Y3J8s4K6JnXJk1QqaFv4d9uAsDPnYcaI65vr4lxKMrqnjg0lOTEGHPeQyDH59zIr98ZQPfevp9fjd3otyR5QDLtqnYuJuFy6oImhbXTh/Np08cIkNEiF5xZcbodaNoNyWXx1ZsYdYxgzhmUPr68XoMg//67Fj+998bueXp97j3kkmS1LNU2LJ4xRlnJd/v5WszxzD7OBlnRRweV2aLXpdcYiT0PQ2tPPvedh67uk/adLvlMQx+9OkTuPvVTdykV3PfJZMo6eU4MiLzBMMWL63dwcPLqxjWL5/vnS3jrFco0kkAAA24SURBVIjkcWdCt6weDc5FnImiFyyt5HMnD2dYv/wkRXh4DMPge2cfzz2vfcxNOnKnXird1FytOWjy/AfbeXzFVo4bXMwd549j4pH90x2WyDKuLNT1qpdLjH7o2/Y388pHu7hmxugkRnj4DMPgu2cdx6mj+nOjXs3+5lC6QxK9UN8SYsE7lXzxz2/z/vY67r54Ar+bO1GSuUgJ1yV0y7axLLvHg/MXxeiH/sc3N3PpqUcyoDCQxCiTwzAMvl12HDPGDOTGRavZ1xRMd0giQfuagvz+9U+46M/vUFXbxAOXncpdF57CiV1MMC5Esriu5NJWP+9pzbGgi0bRjbvqWV61j//67NgkR5k8hmFw8+xj8XoMvrloNferyQwsyryLj4ioqW/hsRVb+Nu6nXx67BAe+spUjuwvj+aLvuHChN7zcgsxBuf6wxubuXbG6Izv820YBt+cdQxew+CGRau5X01iUIzp8BobWtm9ow7DY1BYlEdBYYCCogBe6c+cUtv2NfHw8i28unEXnz95OE9eM50hJYlPWShEMmR2JutCqBfTyOGUXJqiauirt+3nkz2N3HXhKUmOMDUMw+Abs47B49yp//aC8YQPNFNTXceuHXXtP0NBkyHD+2EYBk2NrTQ1BmlpCuIP+CgsClBQlEdhUSDyujCPwuIABYWBSPIvivxs266g0I9H+kN365M9DTy0tIp3Kmv50sQRPPu1GfTPwPKdyA0uTOg2gV7coef7PQTDFqZl4zHg969/wjdOPzrjxyNvbQmxa+cBdlXXUVO9H/+OOk6qrOW+dysZPrI/I4/sz9ARpYwdP4Ihw0vp17/gkHKUZdm0toRobgzS1NhKc1OwPdk3NwbZt7eR6i37IusagzQ561ubQwTyfBgei/VL/+lcCCKJv6Aw6sLQfpGIrMsvCOBxyeQdvfXhzgMsWFrFB9v3c9mUUfzoM2PlmQGRdq77C+zNwFw4d7gF/sg0dKu37ae+Ncy544alJMbeCAXD7K5pS9x11OyoY1d1HQ0HWhg0tIShI/ozZEQpM8YO5YLhpbz48R5eXLOD+784KW53S4/HiJReCgMMHJz4rCuWZdPSHKTi1Tc4ZfykyAWgKUhTQ+SisHd3PdsqO14cmhpbCbaGySvwU9jpzj9yIQhQWJwXWVec1+HbQV6+P+MvBKu37WfBO5Vs3tPIldOO4mefGyejZIqM4bqEHjatXj8WXeD30tAa5vevf8KNZxzT454yyRAOm+ytqW9P2m2lkrp9TRwxuJghw0sZOqKUKZ86hqEj+jNwUFGXZY9rBhXj83q44al3eeCyU1PSh97j1OELS3yMOjrx2aEsy6K5KUSzk+jb7/ydbwe7dxygualtXbB9u1AwTH5hoFOyd8o/beWgwoBTJspr/5mX70vpgzm2bfNOZS0L36lkd0MrV08fzd0XTcj4b3ci97guoe+qrmPMR9UsXbKJCVOOojBG42BXCgM+nv+gmsKAl9k9mL6uN0zTonZ3AzXVdXyyrp4dm95k1446anc30P+IIoYOL2XIiP5MnDqaoSNKOWJISY8bLq+cehQew+AbT73LHy6dnDETHXg8HoqK8yjqwb8Nzu+sue1bQPtd/8GLQs32/e3fEJqaDq4Lh8yO9f+otoGCojy2bW9kTckW5+JwsFwUyOv+QmDZNhWbdvPQ0ipawhbXzhjNZ2ScFZHBXJfQCwYUUj9qIFUf7+KVF97nmLFDmDTtaE4YP5xAnN4qRQEvjy3fwr2XTEzaHZ1l2ezb29BeKmlroNy7q56S0gKGjCjFMm3GTTySsvNOZtDQfviT+BX98tNG4fMY3PDUau6/dLKru8h5vR6K++VT3MNvG+GQSXNTsEOyj/5WUL8vzNp3t0RdCCLLLctu/xZQWJRHIM+Hx+vB8BjsbgyyeW8TXq/B9KElHDmgEHPNdl5ZX43H68Hr8eDxGpHXXgOPx4PX68HjiVrmvO9qeYdlzs+O+4r8jCw3sCwb27ZliADRLdcl9BOGlXD7NdMZXlpAS3OQtau3snTJRvTCtwnk+ehXWkBJaQH9+rf9LIws619AkWUxdUgxo/N91GzfTyhsEg6ahMMmoaBJOGwRCoY7/YysDwfNQ7avP9DCnpoDFBQGGDqilCHDSznupGF8as5YhgwrJeA0klVUVDBx2piU/U7UqUfiMeAG50591IDClB0rE/n8Xkqcf/euVFTsp6xs1iHLQyGT5vbEH6SxKcjyyr1UbNhFab6P2TPHcPSAQizLxjItLMvGjP5p2oRaw7RaFqZpY7X9NC0ss9Myy3KW25iW89PZl9Vpn6bz+Q7HC5uUP/1U+0Ug1oXi0OXOhccTvU2nC4dzMYn+bPvnoi440fv1eDtt6+m4rPP2DXUhdu880OV+OxxPvv0cFtcl9Dyfl+HO/7j5BQFO+9SxnPapY7Esm+bGVg7UNXNgfzP1dc0cqGumpno/H3+4gwP7mxm6pwG/x+ChDTvw+T34/D78fg8+vxef34s/6mfba5/fS2FhAF+psy7gxeeL/CwsymPI8H7kF6S/m9rcyUfi8bT1U5/M6IHpSeqWbWNazn/OU72R12BaNpZtE3Z+tm93yLa2sy0dtrUsm3CM7Tq8j3ptWjabt1msWfJx19tGbffu1v0cO7iI714xlckZ9mh+RUUFZ555ZvsFIPoiYbZdFMzOF5wuLhSHXECi99V2ATp4YTJDFiFnWfQFpsvjtcfm7Csqtvr6Bja++/ohx+78WZySXeeL0CEXL+fbi7eLC8whF6EuLnbRF6F437K8Xg8125r58IPtHS4+CX3Lir6AOtun8luW6xJ6LB6PQVFJPkUl+Qw/ckC6w0mLiyeOxOcxuHHRak4/5ohDkmfnBNn2X/v7GAmyodHino1vRz4blXzDXWyLM2GHz2PgMQy8HgOvEVnm7bDMeR/1OrKeDp/1GJF9dfis8xmPh4776mLfPsPA7zEoyffhNTx4nc8csq1hcOXUoxibwY/mG4aB10kkbhuqraKigrKysrjbWV18e4l5wbKiLhydLl5W54uQmdi3rI4XrIMXu5qaZloPfNztt6yOrw89Xtt7j8fgW7edz+Bh/ZL+e056QldK3QHMAfKBb2itVyb7GCK2C04ZwbB++Wzd14zH6JjwDkmkHRIaeD2e9uQbnTxXrljOzBmTD02sUdv6nH1m4njeFaEqyqanruQlksfj8eDxRMpomSRyQTrzsPdjOzdDqbpLT2pCV0qdBUzVWp+ulBoP3A/MTuYxRHzTRg9kWhIHj6zKN1zd2CpEpmj7lpUqyW6BOAtYDKC1XguMUErlVgudEEKkSbIT+nBgd9T73cDQJB9DCCFEF5JdQ+88YLcB2LE2rqioiLvDhoaGhLbLZG4/B4k//dx+DhJ/30h2Qt8BDIl6PxioibVxIq3eibaOZzK3n4PEn35uPweJP3nKy8tjrkt2yeXvwIVEGkhPBTZrrZuTfAwhhBBdSGpC11qvAt5XSr0LPADcmsz9CyGEiC3p/dC11j8CfpTs/QohhOieDJwghBBZwrDtmJ1QUqq8vDw9BxZCCJebM2dOl08npS2hCyGESC4puQghRJaQhC6EEFlCEroQQmQJSehCCJElJKELIUSWyOgZi9w6WYYzFvxi4B6t9X1KqSHAI0B/YBtwhda6Nd1xxqKU+rkzFLIfuAtY4pb4neGaH3JG+SwC7gCWuiX+NkqpAmCdE//f3BS/UqoMeNqJH2AN8DOXncPlwPecAQZvA1a4If6MvUOPniwDuBr4TbpjSoRSqgi4F4geQedXwEKt9QygErgijSF2Syk1G5iktZ4JfBa4x03xAxcAK7XWZwJfAn7tsvjb/ATY67x2Y/xLtNZlzn+3uOkclFLFTjI/Hfg88EW3xJ+xCd3Fk2W0AucD1VHLyoAXndeLgXPSFFsi3gaU83o/EADOdkv8WuuntNb/67w90rmbctPvH6XUicBJwMvOIlfFH4ObzuEc4GWtdYvWulprfb1b4s/kkstw4P2o922TZfwnjTHFpbUOA2GlVPTikqhRJ3cBw9ITXXxO/A3O2+ucr/sXuCX+NkqpZU6c5wNvuCz+XwM3A9c4713z9xNlnFLq70AJcLvLzmEUMNiJvxj4qVviz+Q79B5NlpHhos/FFeehlLoQuB74jhvj11pPBy4CngLCUasyOn6l1FXA61rryqjFbvv9bwLuBD4HfAV40Im7TaafQ55z8/h54KtOm4wr/oYyOaH3aLKMDFcfVS4a1qkck3GUUucA84Bztdb73RS/Uuo0pdRRRJL6u87feKNb4neS4Fyl1FLnG9JtQLOL4kdrvV1r/YTW2tJa/wfYCRS76Bx2Au9orU2t9SbggFv+hjI5oWfTZBn/aDsX4OKo2mjGUUqVOg3Q52ut2xrlXBM/8CnnWwVKqaHOV/6X3BK/1vpSrfU0p/HtQad3iGviJ/J7v0wpNd95Pci5233QRefwb+BspZTh9FBzzd9QRg/OpZS6C/iM83Xna1rrNemOKR6l1BTgbmAMEAK2Oy3ijzvd6DYA1zi16oyjlPo6MB/YGLX4auBhl8SfByx06qB5Tre/VcCTbog/mpMUK4F/uil+p5fIo843bI9zUVrtsnP4OnB5VBvACjfEn9EJXQghROIyueQihBCiByShCyFElpCELoQQWUISuhBCZAlJ6EIIkSUkoQshRJaQhC6EEFni/wMP+PTk2IAC0AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "bento_obj_id": "139882812866128", + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "fig, ax = plt.subplots()\n", + "for name, results in result_maps.items():\n", + " ax.plot(xs, results, label=name)\n", + "\n", + "# Log scale vastly improves visualization\n", + "#plt.yscale(\"log\")\n", + "#plt.xscale(\"log\")\n", + "plt.legend(loc='best')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "bento_stylesheets": { + "bento/extensions/flow/main.css": true, + "bento/extensions/kernel_selector/main.css": true, + "bento/extensions/kernel_ui/main.css": true, + "bento/extensions/new_kernel/main.css": true, + "bento/extensions/system_usage/main.css": true, + "bento/extensions/theme/main.css": true + }, + "disseminate_notebook_info": {}, + "kernelspec": { + "display_name": "reagent (local)", + "language": "python", + "name": "reinforcement_learning_local" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5+" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/reagent/ope/test/notebooks/contextual_bandit_experiments.ipynb b/reagent/ope/test/notebooks/contextual_bandit_experiments.ipynb new file mode 100644 index 000000000..584cc0ce5 --- /dev/null +++ b/reagent/ope/test/notebooks/contextual_bandit_experiments.ipynb @@ -0,0 +1,326 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "# Imports\n", + "\n", + "import argparse\n", + "import json\n", + "import logging\n", + "import os\n", + "import random\n", + "import sys\n", + "from dataclasses import dataclass\n", + "from typing import Tuple\n", + "from multiprocessing import Pool\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch\n", + "from torch import Tensor\n", + "\n", + "from reagent.ope.estimators.estimator import Estimator, EstimatorResult\n", + "from reagent.ope.estimators.contextual_bandits_estimators import (\n", + " Action,\n", + " ActionDistribution,\n", + " ActionRewards,\n", + " BanditsEstimatorInput,\n", + " BanditsModel,\n", + " DMEstimator,\n", + " DoublyRobustEstimator,\n", + " IPSEstimator,\n", + " LogSample,\n", + " SwitchEstimator,\n", + " SwitchDREstimator\n", + ")\n", + "from reagent.ope.estimators.types import ActionSpace, Policy, Trainer\n", + "from reagent.ope.trainers.linear_trainers import (\n", + " LogisticRegressionTrainer,\n", + " SGDClassifierTrainer,\n", + " TrainingData,\n", + " DecisionTreeTrainer,\n", + " LinearTrainer,\n", + " NNTrainer\n", + ")\n", + "from reagent.ope.test.multiclass_bandits import (\n", + " MultiClassDataRow,\n", + " UCIMultiClassDataset,\n", + " MultiClassContext,\n", + " MultiClassModel,\n", + " MultiClassPolicy,\n", + " evaluate_all\n", + ")\n", + "from reagent.ope.utils import RunningAverage, Clamper\n", + "\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration Settings\n", + "\n", + "Edit the experiments list with the names of UCI datasets given in reagent/test/data to produce results for each dataset. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Configuration\n", + "\n", + "DEFAULT_ITERATIONS = 500\n", + "TEST_ROOT_PATH = '..'\n", + "UCI_DATASET_CONFIGS = os.path.join(TEST_ROOT_PATH, 'configs')\n", + "MAX_METRIC_NAME_LENGTH = 20\n", + "experiments = [\"ecoli\", \"letter_recog\", \"pendigits\", \"optdigits\", \"satimage\"]\n", + "\n", + "experiment_params = []\n", + "for exp in experiments:\n", + " with open(os.path.join(UCI_DATASET_CONFIGS, exp + '_config.json'), \"r\") as f:\n", + " params = json.load(f)\n", + " if \"dataset\" in params:\n", + " if \"file\" in params[\"dataset\"]:\n", + " params[\"dataset\"][\"file\"] = os.path.join(TEST_ROOT_PATH, params[\"dataset\"][\"file\"])\n", + " experiment_params.append({\"name\": exp, \"params\": params}) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run an experiment\n", + "\n", + "We load the given dataset, and create trainers (which will be used for generating the policies for the logger and target). To try different trainers, modify the `log_trainer` and `tgt_trainer` variables with different `LinearTrainer`s. \n", + "\n", + "Note that DM's performance is highly dependent on the reward model. To try different reward models, modify the trainer passed into `DMEstimator` and `DoublyRobustEstimator` with different `LinearTrainer`s. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def load_dataset(params):\n", + " return UCIMultiClassDataset(params[\"dataset\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment(s)\n", + "def run_experiment(dataset): \n", + " random.seed(1234)\n", + " np.random.seed(1234)\n", + " torch.random.manual_seed(1234)\n", + "\n", + " log_trainer = LogisticRegressionTrainer()\n", + " log_epsilon = 0.1\n", + " tgt_trainer = SGDClassifierTrainer()\n", + " tgt_epsilon = 0.1\n", + " experiments = [\n", + " (\n", + " (\n", + " SwitchEstimator(LogisticRegressionTrainer(), rmax=1.0),\n", + " SwitchDREstimator(LogisticRegressionTrainer(), rmax=1.0),\n", + " DMEstimator(LogisticRegressionTrainer()),\n", + " IPSEstimator(),\n", + " DoublyRobustEstimator(LogisticRegressionTrainer()),\n", + " ),\n", + " 1000,\n", + " )\n", + " for _ in range(100)\n", + " ]\n", + " results = evaluate_all(\n", + " experiments, dataset, log_trainer, log_epsilon, tgt_trainer, tgt_epsilon, 0\n", + " )\n", + " return results\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Result Generation\n", + "\n", + "For each UCI dataset, we generate a logging and target policy, create a simulated dataset using the logging policy, and evaluate the target policy using DM, IPS, and DR. The bias, rmse, and variance against the ground truth is plotted for each dataset. \n", + "\n", + "\n", + "For the settings with the logging policy trained with a `LogisticRegressionTrainer`, the target policy with a `SGDClassifierTrainer`, and the reward model for DM and DR trained with a `LogisticRegressionTrainer`, a sample result gives:\n", + "\n", + "\n", + "![alt text](img/bias.png \"Bias\")![alt text](img/variance.png \"Bias\")![alt text](img/rmse.png \"Bias\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "datasets = []\n", + "for params in experiment_params:\n", + " datasets.append(load_dataset(params['params']))\n", + " \n", + "labels = []\n", + "\n", + "bias_result_mapping = {}\n", + "var_result_mapping = {}\n", + "rmse_result_mapping = {}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running experiment ecoli\n", + "SwitchEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.6409800000000001 tgt_reward[0.681124829351902] gt_reward[0.68099], diffs: tgt-gt[samples=100, rmse=0.018109501162651483, bias=0.00013482935190194833, variance=0.000331248336773678] tgt-log[samples=100, rmse=0.04037231561536808, bias=0.04014482935190198, variance=1.850156005410315e-05]\n", + "SwitchDREstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.6409800000000001 tgt_reward[0.6827095168828964] gt_reward[0.68099], diffs: tgt-gt[samples=100, rmse=0.017383191095996103, bias=0.0017195168828963636, variance=0.0003022410044134877] tgt-log[samples=100, rmse=0.0420135565438498, bias=0.04172951688289641, variance=2.402662038720069e-05]\n", + "DMEstimator(trainer(logistic_regression,device(None)) rewards: log_reward0.6409800000000001 tgt_reward[0.5767845714374259] gt_reward[0.68099], diffs: tgt-gt[samples=100, rmse=0.11443995653971288, bias=-0.10420542856257413, variance=0.002260335667577406] tgt-log[samples=100, rmse=0.07716353279482184, bias=-0.06419542856257408, variance=0.001851674489944438]\n", + "IPSEstimator(weight_clamper(Clamper(-inf,inf)),weighted(False),device(None)) rewards: log_reward0.6409800000000001 tgt_reward[0.6823636658191] gt_reward[0.68099], diffs: tgt-gt[samples=100, rmse=0.01882527740452088, bias=0.0013736658190999795, variance=0.0003560647591662634] tgt-log[samples=100, rmse=0.04206649112797752, bias=0.04138366581910002, variance=5.755745373057047e-05]\n", + "DoublyRobustEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.6409800000000001 tgt_reward[0.6817410593008515] gt_reward[0.68099], diffs: tgt-gt[samples=100, rmse=0.017341787591402112, bias=0.0007510593008514166, variance=0.00030320556241607353] tgt-log[samples=100, rmse=0.04114618984290583, bias=0.04076105930085145, variance=3.186361945547262e-05]\n", + "Running experiment letter_recog\n", + "SwitchEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.57266 tgt_reward[0.37569143682718276] gt_reward[0.37457999999999997], diffs: tgt-gt[samples=100, rmse=0.017854429347174034, bias=0.0011114368271827103, variance=0.0003207528843357692] tgt-log[samples=100, rmse=0.1970799629435354, bias=-0.19696856317281733, variance=4.434031864764246e-05]\n", + "SwitchDREstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.57266 tgt_reward[0.39171935588121415] gt_reward[0.37457999999999997], diffs: tgt-gt[samples=100, rmse=0.03417343925054195, bias=0.017139355881214083, variance=0.0008828953840278517] tgt-log[samples=100, rmse=0.18214352227137348, bias=-0.18094064411878594, variance=0.00044115758717292716]\n", + "DMEstimator(trainer(logistic_regression,device(None)) rewards: log_reward0.57266 tgt_reward[0.5635916976264305] gt_reward[0.37457999999999997], diffs: tgt-gt[samples=100, rmse=0.1904276330665789, bias=0.1890116976264305, variance=0.0005426884805196267] tgt-log[samples=100, rmse=0.015867718348869037, bias=-0.009068302373569526, variance=0.0001712630077379278]\n", + "IPSEstimator(weight_clamper(Clamper(-inf,inf)),weighted(False),device(None)) rewards: log_reward0.57266 tgt_reward[0.375998918900146] gt_reward[0.37457999999999997], diffs: tgt-gt[samples=100, rmse=0.01824032850522253, bias=0.0014189189001458874, variance=0.00033403661932650735] tgt-log[samples=100, rmse=0.19681556707994985, bias=-0.1966610810998541, variance=6.140063195842609e-05]\n", + "DoublyRobustEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.57266 tgt_reward[0.37928051044302813] gt_reward[0.37457999999999997], diffs: tgt-gt[samples=100, rmse=0.03347790030442483, bias=0.0047005104430280905, variance=0.001109772737745446] tgt-log[samples=100, rmse=0.19478690644998073, bias=-0.1933794895569719, variance=0.0005518302454934725]\n", + "Running experiment pendigits\n", + "SwitchEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.8269399999999999 tgt_reward[0.7546563205122948] gt_reward[0.75397], diffs: tgt-gt[samples=100, rmse=0.015537334667333605, bias=0.0006863205122947092, variance=0.00024337144719104064] tgt-log[samples=100, rmse=0.07248482839355978, bias=-0.07228367948770521, variance=2.941416864900794e-05]\n", + "SwitchDREstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.8269399999999999 tgt_reward[0.778800046145916] gt_reward[0.75397], diffs: tgt-gt[samples=100, rmse=0.03441568664250387, bias=0.024830046145915927, variance=0.0005736447428956617] tgt-log[samples=100, rmse=0.05062061183046669, bias=-0.048139953854084, variance=0.00024746584345196695]\n", + "DMEstimator(trainer(logistic_regression,device(None)) rewards: log_reward0.8269399999999999 tgt_reward[0.8175922172337771] gt_reward[0.75397], diffs: tgt-gt[samples=100, rmse=0.0656767363156423, bias=0.06362221723377698, variance=0.0002683304720530327] tgt-log[samples=100, rmse=0.01131064331883703, bias=-0.009347782766222957, variance=4.095920165803824e-05]\n", + "IPSEstimator(weight_clamper(Clamper(-inf,inf)),weighted(False),device(None)) rewards: log_reward0.8269399999999999 tgt_reward[0.7546501099407741] gt_reward[0.75397], diffs: tgt-gt[samples=100, rmse=0.015550695689640714, bias=0.0006801099407739219, variance=0.00024379958272754629] tgt-log[samples=100, rmse=0.07249068782194369, bias=-0.072289890059226, variance=2.9365268811629092e-05]\n", + "DoublyRobustEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.8269399999999999 tgt_reward[0.7560407283385557] gt_reward[0.75397], diffs: tgt-gt[samples=100, rmse=0.027855609742539835, bias=0.0020707283385555174, variance=0.0007794414932086698] tgt-log[samples=100, rmse=0.0741953563333444, bias=-0.0708992716614444, variance=0.0004830749285946023]\n", + "Running experiment optdigits\n" + ] + } + ], + "source": [ + "for dataset, params in zip(datasets, experiment_params):\n", + " print(\"Running experiment \" + params[\"name\"])\n", + " if params[\"name\"] in labels:\n", + " continue\n", + " exp_results = run_experiment(dataset)\n", + " labels.append(params[\"name\"])\n", + "\n", + " for estimator_name, result in exp_results.items():\n", + " _, _, _, tgt_gt, _, _ = result.report()\n", + " if not estimator_name in bias_result_mapping:\n", + " bias_result_mapping[estimator_name] = []\n", + " if not estimator_name in var_result_mapping:\n", + " var_result_mapping[estimator_name] = []\n", + " if not estimator_name in rmse_result_mapping:\n", + " rmse_result_mapping[estimator_name] = []\n", + "\n", + " bias_result_mapping[estimator_name].append(tgt_gt.bias.cpu().numpy())\n", + " var_result_mapping[estimator_name].append(tgt_gt.variance.cpu().numpy())\n", + " rmse_result_mapping[estimator_name].append(tgt_gt.rmse.cpu().numpy())\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate Bar Charts, a la https://arxiv.org/pdf/1511.03722.pdf\n", + "\n", + "def create_and_show_chart(labels, results, title):\n", + " # Width of each bar\n", + " width = 0.1\n", + "\n", + " metrics = list(results.keys())\n", + " \n", + " # Set position of bar on X axis\n", + " barpos = [np.arange(len(results[metrics[0]]))]\n", + " for m in range(len(metrics)-1):\n", + " barpos.append([x + width for x in barpos[-1]])\n", + " \n", + " fig, ax = plt.subplots()\n", + " for metric, barpositions in zip(metrics, barpos):\n", + " ax.bar(barpositions, results[metric], width, label=metric[:MAX_METRIC_NAME_LENGTH])\n", + "\n", + " ax.set_ylabel(title)\n", + " plt.xticks([r + width for r in range(len(labels))], labels)\n", + " ax.set_xticklabels(labels)\n", + " ax.legend()\n", + "\n", + " fig.tight_layout()\n", + "\n", + " plt.show()\n", + "\n", + "create_and_show_chart(labels, bias_result_mapping, 'Bias')\n", + "create_and_show_chart(labels, rmse_result_mapping, 'RMSE')\n", + "create_and_show_chart(labels, var_result_mapping, 'Variance')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "celltoolbar": "Attachments", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/reagent/ope/test/notebooks/contextual_bandit_randomized_experiments.ipynb b/reagent/ope/test/notebooks/contextual_bandit_randomized_experiments.ipynb new file mode 100644 index 000000000..adc814867 --- /dev/null +++ b/reagent/ope/test/notebooks/contextual_bandit_randomized_experiments.ipynb @@ -0,0 +1,409 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/home/alexschneidman/anaconda3/envs/ope/lib/python3.7/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "# Imports\n", + "\n", + "import argparse\n", + "import json\n", + "import logging\n", + "import os\n", + "import random\n", + "import sys\n", + "from pathlib import PurePath\n", + "from dataclasses import dataclass\n", + "from typing import Tuple, Iterable\n", + "from multiprocessing import Pool\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import torch\n", + "from torch import Tensor\n", + "\n", + "from reagent.ope.estimators.estimator import Estimator, EstimatorResult, Evaluator\n", + "from reagent.ope.estimators.contextual_bandits_estimators import (\n", + " Action,\n", + " ActionDistribution,\n", + " ActionRewards,\n", + " BanditsEstimatorInput,\n", + " BanditsModel,\n", + " DMEstimator,\n", + " DoublyRobustEstimator,\n", + " IPSEstimator,\n", + " LogSample,\n", + " SwitchEstimator,\n", + " SwitchDREstimator\n", + ")\n", + "from reagent.ope.estimators.types import ActionSpace, Policy, Trainer\n", + "from reagent.ope.trainers.linear_trainers import (\n", + " LogisticRegressionTrainer,\n", + " SGDClassifierTrainer,\n", + " TrainingData,\n", + " DecisionTreeTrainer,\n", + " LinearTrainer,\n", + " NNTrainer\n", + ")\n", + "from reagent.ope.test.multiclass_bandits import (\n", + " MultiClassDataRow,\n", + " UCIMultiClassDataset,\n", + " MultiClassContext,\n", + " MultiClassModel,\n", + " MultiClassPolicy,\n", + " evaluate_all\n", + ")\n", + "from reagent.ope.utils import RunningAverage, Clamper\n", + "\n", + "import matplotlib\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configuration Settings\n", + "\n", + "Edit the experiments list with the names of UCI datasets given in reagent/test/data to produce results for each dataset. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Configuration\n", + "\n", + "DEFAULT_ITERATIONS = 500\n", + "TEST_ROOT_PATH = '..'\n", + "UCI_DATASET_CONFIGS = os.path.join(TEST_ROOT_PATH, 'configs')\n", + "MAX_METRIC_NAME_LENGTH = 20\n", + "experiments = [\"ecoli\", \"letter_recog\", \"pendigits\", \"optdigits\", \"satimage\"]\n", + "#experiments = [\"ecoli\"]\n", + "\n", + "experiment_params = []\n", + "for exp in experiments:\n", + " with open(os.path.join(UCI_DATASET_CONFIGS, exp + '_config.json'), \"r\") as f:\n", + " params = json.load(f)\n", + " if \"dataset\" in params:\n", + " if \"file\" in params[\"dataset\"]:\n", + " params[\"dataset\"][\"file\"] = os.path.join(TEST_ROOT_PATH, params[\"dataset\"][\"file\"])\n", + " experiment_params.append({\"name\": exp, \"params\": params}) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run an experiment\n", + "\n", + "We load the given dataset, and create trainers (which will be used for generating the policies for the logger and target). To try different trainers, modify the `log_trainer` and `tgt_trainer` variables with different `LinearTrainer`s. \n", + "\n", + "Note that DM's performance is highly dependent on the reward model. To try different reward models, modify the trainer passed into `DMEstimator` and `DoublyRobustEstimator` with different `LinearTrainer`s. " + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def evaluate_all_noisy(\n", + " experiments: Iterable[Tuple[Iterable[Estimator], int]],\n", + " dataset: UCIMultiClassDataset,\n", + " log_trainer: Trainer,\n", + " log_epsilon: float,\n", + " tgt_trainer: Trainer,\n", + " tgt_epsilon: float,\n", + " max_num_workers: int,\n", + " random_reward_prob: float = 0.0,\n", + " device=None,\n", + "):\n", + " action_space = ActionSpace(dataset.num_actions)\n", + " config_path = PurePath(dataset.config_file)\n", + " data_name = config_path.stem\n", + " log_model_name = data_name + \"_\" + log_trainer.__class__.__name__ + \".pickle\"\n", + " log_model_file = str(config_path.with_name(log_model_name))\n", + " tgt_model_name = data_name + \"_\" + tgt_trainer.__class__.__name__ + \".pickle\"\n", + " tgt_model_file = str(config_path.with_name(tgt_model_name))\n", + "\n", + " #log_trainer.load_model(log_model_file)\n", + " #tgt_trainer.load_model(tgt_model_file)\n", + " if not log_trainer.is_trained or not tgt_trainer.is_trained:\n", + " (\n", + " train_x,\n", + " train_y,\n", + " train_r,\n", + " val_x,\n", + " val_y,\n", + " val_r,\n", + " test_x,\n", + " test_y,\n", + " test_r,\n", + " train_choices,\n", + " ) = dataset.train_val_test_split((0.5, 0.8))\n", + " trainer_data = TrainingData(train_x, train_y, None, val_x, val_y, None)\n", + " #if not log_trainer.is_trained:\n", + " # log_trainer.train(trainer_data)\n", + " # log_trainer.save_model(log_model_file)\n", + " if not tgt_trainer.is_trained:\n", + " tgt_trainer.train(trainer_data)\n", + " tgt_trainer.save_model(tgt_model_file)\n", + " \n", + " \n", + " tgt_results = tgt_trainer.predict(dataset.features)\n", + " assert tgt_results.probabilities is not None\n", + " tgt_policy = MultiClassPolicy(action_space, tgt_results.probabilities, tgt_epsilon)\n", + " \n", + " #log_results = log_trainer.predict(dataset.features)\n", + " #assert log_results.probabilities is not None\n", + " uniform = torch.full(tgt_results.probabilities.shape, 1.0 / len(action_space))\n", + " #log_policy = MultiClassPolicy(action_space, log_results.probabilities, log_epsilon)\n", + " log_policy = MultiClassPolicy(action_space, uniform, log_epsilon)\n", + "\n", + " tasks = []\n", + " test_queries = list(set(range(len(dataset))) - set(train_choices))\n", + " for estimators, num_samples in experiments:\n", + " samples = []\n", + " for _ in range(num_samples):\n", + " qid = random.sample(test_queries, 1)\n", + " label = int(dataset.labels[qid].item())\n", + " log_action, log_action_probabilities = log_policy(qid)\n", + " log_reward = 1.0 if log_action.value == label else 0.0\n", + " tgt_action, tgt_action_probabilities = tgt_policy(qid)\n", + " ground_truth_reward = 1.0 if tgt_action.value == label else 0.0\n", + " item_feature = dataset.features[qid]\n", + " random_reward = random.random() < random_reward_prob\n", + " samples.append(\n", + " LogSample(\n", + " context=qid,\n", + " log_action=log_action,\n", + " log_reward=random.randint(0, 1) if random_reward else log_reward,\n", + " log_action_probabilities=log_action_probabilities,\n", + " tgt_action_probabilities=tgt_action_probabilities,\n", + " tgt_action=tgt_action,\n", + " ground_truth_reward=ground_truth_reward,\n", + " item_feature=item_feature,\n", + " )\n", + " )\n", + " tasks.append((estimators, BanditsEstimatorInput(action_space, samples, False)))\n", + "\n", + " evaluator = Evaluator(tasks, max_num_workers)\n", + " results = evaluator.evaluate()\n", + " Evaluator.report_results(results)\n", + " return results" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def load_dataset(params):\n", + " return UCIMultiClassDataset(params[\"dataset\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Experiment(s)\n", + "def run_experiment(dataset): \n", + " random.seed(1234)\n", + " np.random.seed(1234)\n", + " torch.random.manual_seed(1234)\n", + "\n", + " log_trainer = LogisticRegressionTrainer()\n", + " log_epsilon = 0.1\n", + " tgt_trainer = SGDClassifierTrainer()\n", + " tgt_epsilon = 0.1\n", + " experiments = [\n", + " (\n", + " (\n", + " SwitchEstimator(LogisticRegressionTrainer(), rmax=1.0),\n", + " SwitchDREstimator(LogisticRegressionTrainer(), rmax=1.0),\n", + " DMEstimator(LogisticRegressionTrainer()),\n", + " IPSEstimator(),\n", + " DoublyRobustEstimator(LogisticRegressionTrainer()),\n", + " ),\n", + " 1000,\n", + " )\n", + " for _ in range(100)\n", + " ]\n", + " results = evaluate_all_noisy(\n", + " experiments, dataset, log_trainer, log_epsilon, tgt_trainer, tgt_epsilon, 0, 0.5\n", + " )\n", + " return results\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Result Generation\n", + "\n", + "For each UCI dataset, we generate a logging and target policy, create a simulated dataset using the logging policy, and evaluate the target policy using DM, IPS, and DR. The bias, rmse, and variance against the ground truth is plotted for each dataset. \n", + "\n", + "\n", + "For the settings with the logging policy trained with a `LogisticRegressionTrainer`, the target policy with a `SGDClassifierTrainer`, and the reward model for DM and DR trained with a `LogisticRegressionTrainer`, a sample result gives:\n", + "\n", + "\n", + "![alt text](img/bias.png \"Bias\")![alt text](img/variance.png \"Bias\")![alt text](img/rmse.png \"Bias\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "datasets = []\n", + "for params in experiment_params:\n", + " datasets.append(load_dataset(params['params']))\n", + "labels = []\n", + "\n", + "bias_result_mapping = {}\n", + "var_result_mapping = {}\n", + "rmse_result_mapping = {}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running experiment ecoli\n", + "SwitchEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.3145700000000001 tgt_reward[0.5792702929675579] gt_reward[0.6479199999999998], diffs: tgt-gt[samples=100, rmse=0.08826843444395464, bias=-0.06864970703244203, variance=0.003109630549036968] tgt-log[samples=100, rmse=0.269050868616176, bias=0.2647002929675578, variance=0.0023455806121291606]\n", + "SwitchDREstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.3145700000000001 tgt_reward[0.5768058840930462] gt_reward[0.6479199999999998], diffs: tgt-gt[samples=100, rmse=0.08041803618813323, bias=-0.07111411590695375, variance=0.001424083902149753] tgt-log[samples=100, rmse=0.26409560671470594, bias=0.2622358840930461, variance=0.000988717757522366]\n", + "DMEstimator(trainer(logistic_regression,device(None)) rewards: log_reward0.3145700000000001 tgt_reward[0.4787884335635231] gt_reward[0.6479199999999998], diffs: tgt-gt[samples=100, rmse=0.17374743909282628, bias=-0.16913156643647692, variance=0.0015986725515747208] tgt-log[samples=100, rmse=0.16893882673376942, bias=0.16421843356352292, variance=0.0015885184405306798]\n", + "IPSEstimator(weight_clamper(Clamper(-inf,inf)),weighted(False),device(None)) rewards: log_reward0.3145700000000001 tgt_reward[0.579270295387581] gt_reward[0.6479199999999998], diffs: tgt-gt[samples=100, rmse=0.08826843277572166, bias=-0.06864970461241898, variance=0.003109630587181532] tgt-log[samples=100, rmse=0.2690508713082726, bias=0.2647002953875809, variance=0.002345580781280917]\n", + "DoublyRobustEstimator(trainer(logistic_regression),weight_clamper(Clamper(-inf,inf)),device(None)) rewards: log_reward0.3145700000000001 tgt_reward[0.5776792460019063] gt_reward[0.6479199999999998], diffs: tgt-gt[samples=100, rmse=0.08004570592961396, bias=-0.07024075399809349, variance=0.0014884358742924763] tgt-log[samples=100, rmse=0.2650540728811515, bias=0.2631092460019064, variance=0.0010375618375708183]\n", + "Running experiment letter_recog\n" + ] + } + ], + "source": [ + "for dataset, params in zip(datasets, experiment_params):\n", + " print(\"Running experiment \" + params[\"name\"])\n", + " if params[\"name\"] in labels:\n", + " continue\n", + " exp_results = run_experiment(dataset)\n", + " labels.append(params[\"name\"])\n", + "\n", + " for estimator_name, result in exp_results.items():\n", + " _, _, _, tgt_gt, _, _ = result.report()\n", + " result_var = torch.tensor(\n", + " [res.estimated_reward for res in result.results],\n", + " dtype=torch.double,\n", + " ).var().item()\n", + " if not estimator_name in bias_result_mapping:\n", + " bias_result_mapping[estimator_name] = []\n", + " if not estimator_name in var_result_mapping:\n", + " var_result_mapping[estimator_name] = []\n", + " if not estimator_name in rmse_result_mapping:\n", + " rmse_result_mapping[estimator_name] = []\n", + "\n", + " bias_result_mapping[estimator_name].append(tgt_gt.bias.cpu().numpy())\n", + " var_result_mapping[estimator_name].append(result_var)\n", + " rmse_result_mapping[estimator_name].append(tgt_gt.rmse.cpu().numpy())\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate Bar Charts, a la https://arxiv.org/pdf/1511.03722.pdf\n", + "print(labels)\n", + "def create_and_show_chart(labels, results, title):\n", + " # Width of each bar\n", + " width = 0.1\n", + "\n", + " metrics = list(results.keys())\n", + " \n", + " # Set position of bar on X axis\n", + " barpos = [np.arange(len(results[metrics[0]]))]\n", + " for m in range(len(metrics)-1):\n", + " barpos.append([x + width for x in barpos[-1]])\n", + " \n", + " fig, ax = plt.subplots()\n", + " for metric, barpositions in zip(metrics, barpos):\n", + " ax.bar(barpositions, results[metric], width, label=metric[:MAX_METRIC_NAME_LENGTH])\n", + "\n", + " ax.set_ylabel(title)\n", + " plt.xticks([r + width for r in range(len(labels))], labels)\n", + " ax.set_xticklabels(labels)\n", + " ax.legend()\n", + "\n", + " fig.tight_layout()\n", + "\n", + " plt.show()\n", + "\n", + "create_and_show_chart(labels, bias_result_mapping, 'Bias')\n", + "create_and_show_chart(labels, rmse_result_mapping, 'RMSE')\n", + "create_and_show_chart(labels, var_result_mapping, 'Variance')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "celltoolbar": "Attachments", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/reagent/ope/test/notebooks/img/bias.png b/reagent/ope/test/notebooks/img/bias.png new file mode 100644 index 000000000..716b47f46 Binary files /dev/null and b/reagent/ope/test/notebooks/img/bias.png differ diff --git a/reagent/ope/test/notebooks/img/rmse.png b/reagent/ope/test/notebooks/img/rmse.png new file mode 100644 index 000000000..818a0713b Binary files /dev/null and b/reagent/ope/test/notebooks/img/rmse.png differ diff --git a/reagent/ope/test/notebooks/img/variance.png b/reagent/ope/test/notebooks/img/variance.png new file mode 100644 index 000000000..fce2b67a8 Binary files /dev/null and b/reagent/ope/test/notebooks/img/variance.png differ diff --git a/reagent/ope/test/unit_tests/test_contextual_bandit_estimators.py b/reagent/ope/test/unit_tests/test_contextual_bandit_estimators.py new file mode 100644 index 000000000..875b313fb --- /dev/null +++ b/reagent/ope/test/unit_tests/test_contextual_bandit_estimators.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import random +import unittest + +import numpy as np +import torch +from reagent.ope.estimators.contextual_bandits_estimators import ( + Action, + ActionDistribution, + ActionSpace, + BanditsEstimatorInput, + DMEstimator, + DoublyRobustEstimator, + IPSEstimator, + LogSample, + ModelOutputs, + SwitchDREstimator, + SwitchEstimator, +) + + +class TestSwitchEstimators(unittest.TestCase): + """ + These unit tests verify basic properties of the Switch estimators, in that + when the threshold is low, the model-based DM estimator is used and when the + threshold is high, the propensity score estimator is used. + """ + + NUM_ACTIONS = 2 + DR_EPSILON = 0.05 + + def setUp(self) -> None: + random.seed(0) + torch.random.manual_seed(0) + np.random.seed(0) + self.action_space = ActionSpace(TestSwitchEstimators.NUM_ACTIONS) + self.sample1 = LogSample( + context=0, + log_action=Action(0), + log_reward=1.0, + log_action_probabilities=ActionDistribution(torch.tensor([0.7, 0.3])), + tgt_action_probabilities=ActionDistribution([0.6, 0.4]), + tgt_action=Action(1), + model_outputs=ModelOutputs(0.5, [0.4, 0.5]), + ) + self.sample2 = LogSample( + context=0, + log_action=Action(1), + log_reward=0.0, + log_action_probabilities=ActionDistribution([0.5, 0.5]), + tgt_action_probabilities=ActionDistribution([0.7, 0.3]), + tgt_action=Action(0), + model_outputs=ModelOutputs(0.0, [0.0, 0.0]), + ) + self.bandit_input = BanditsEstimatorInput( + self.action_space, [self.sample1, self.sample2], True + ) + SwitchEstimator.EXP_BASE = 1.5 + SwitchEstimator.CANDIDATES = 21 + + def test_switch_equal_to_ips(self): + """ + Switch with tau set at the max value should be equal to IPS + """ + # Setting the base to 1 will cause all candidates to be the maximum threshold + SwitchEstimator.EXP_BASE = 1 + switch = SwitchEstimator(rmax=1.0).evaluate(self.bandit_input) + ips = IPSEstimator().evaluate(self.bandit_input) + self.assertAlmostEqual(ips.estimated_reward, switch.estimated_reward) + + def test_switch_dr_equal_to_dr(self): + """ + Switch-DR with tau set at the max value should be equal to DR + """ + # Setting the base to 1 will cause all candidates to be the maximum threshold + SwitchEstimator.EXP_BASE = 1 + switch = SwitchDREstimator(rmax=1.0).evaluate(self.bandit_input) + dr = DoublyRobustEstimator().evaluate(self.bandit_input) + self.assertAlmostEqual( + dr.estimated_reward, + switch.estimated_reward, + delta=TestSwitchEstimators.DR_EPSILON, + ) + + def test_switch_equal_to_dm(self): + """ + Switch with tau set at the min value should be equal to DM + """ + # Setting candidates to 0 will default to tau being the minimum threshold + SwitchEstimator.CANDIDATES = 0 + switch = SwitchEstimator(rmax=1.0).evaluate(self.bandit_input) + dm = DMEstimator().evaluate(self.bandit_input) + self.assertAlmostEqual(dm.estimated_reward, switch.estimated_reward) + + def test_switch_dr_equal_to_dm(self): + """ + Switch-DR with tau set at the min value should be equal to DM + """ + # Setting candidates to 0 will default to tau being the minimum threshold + SwitchEstimator.CANDIDATES = 0 + switch = SwitchDREstimator(rmax=1.0).evaluate(self.bandit_input) + dm = DMEstimator().evaluate(self.bandit_input) + self.assertAlmostEqual(dm.estimated_reward, switch.estimated_reward) diff --git a/reagent/ope/test/unit_tests/test_slate_estimators.py b/reagent/ope/test/unit_tests/test_slate_estimators.py index 015b87ed5..33d32c051 100644 --- a/reagent/ope/test/unit_tests/test_slate_estimators.py +++ b/reagent/ope/test/unit_tests/test_slate_estimators.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import random import unittest -from functools import reduce import torch from reagent.ope.estimators.slate_estimators import ( @@ -36,7 +36,7 @@ def test_slate_item_probabilities(self): probs = SlateItemProbabilities(self._item_relevances) slate = probs.sample_slate(self._slots) slate_prob = probs.slate_probability(slate) - self.assertAlmostEqual(slate_prob, 0.017825312) + self.assertAlmostEqual(slate_prob, 0.017825312, places=2) slot_item_expectations = probs.slot_item_expectations(self._slots) slot_rewards = slot_item_expectations.expected_rewards( SlateItemValues(self._item_rewards) @@ -50,16 +50,13 @@ def test_slate_slot_item_probabilities(self): ) slate = probs.sample_slate(self._slots) slate_prob = probs.slate_probability(slate) - self.assertAlmostEqual(slate_prob, 0.02139037) + self.assertAlmostEqual(slate_prob, 0.02139037, places=2) slot_item_expectations = probs.slot_item_expectations() slot_rewards = slot_item_expectations.expected_rewards( SlateItemValues(self._item_rewards) ) - diff = slot_rewards.values - torch.tensor([1.818, 2.449, 4.353]) + diff = slot_rewards.values - torch.tensor([1.81818, 2.51352, 7.36929]) self.assertAlmostEqual(diff.sum().item(), 0, places=5) - for d in slot_item_expectations.items: - sum = reduce(lambda a, b: a + b, d.values) - self.assertAlmostEqual(sum.item(), 1.0) def test_metrics(self): dcg = DCGSlateMetric() diff --git a/reagent/ope/test/unit_tests/test_types.py b/reagent/ope/test/unit_tests/test_types.py index ff8c55354..96bff6cff 100644 --- a/reagent/ope/test/unit_tests/test_types.py +++ b/reagent/ope/test/unit_tests/test_types.py @@ -1,16 +1,20 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest from typing import Tuple, Union import numpy as np import torch -from reagent.ope.estimators.types import Distribution, Items, TypeWrapper, Values -from torch import Tensor +from reagent.ope.estimators.types import ( + ActionDistribution as Distribution, + TypeWrapper, + Values, +) class TestTypes(unittest.TestCase): - TestType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, Tensor] + TestType = Union[int, Tuple[int], float, Tuple[float], np.ndarray, torch.Tensor] TestClass = TypeWrapper[TestType] def setUp(self) -> None: @@ -276,7 +280,7 @@ def test_conversion(self): class TestDistribution(unittest.TestCase): - class TestIntKeyDistribution(Distribution[int]): + class TestIntKeyDistribution(Distribution): def _new_key(self, k: int): return k @@ -312,7 +316,7 @@ def _test_sample(self, distribution: Distribution): counts = [0] * 4 total = 100000 for _ in range(total): - counts[distribution.sample()] += 1 + counts[distribution.sample()[0]] += 1 self.assertAlmostEqual(counts[0] / total, 0.1, places=2) self.assertAlmostEqual(counts[1] / total, 0.2, places=2) self.assertAlmostEqual(counts[2] / total, 0.3, places=2) diff --git a/reagent/ope/test/unit_tests/test_utils.py b/reagent/ope/test/unit_tests/test_utils.py index e9f1f1f3e..b20e4fbcd 100644 --- a/reagent/ope/test/unit_tests/test_utils.py +++ b/reagent/ope/test/unit_tests/test_utils.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest diff --git a/reagent/ope/test/yandex_web_search.py b/reagent/ope/test/yandex_web_search.py index 1f41709b6..3b24f5486 100644 --- a/reagent/ope/test/yandex_web_search.py +++ b/reagent/ope/test/yandex_web_search.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import argparse import json @@ -8,26 +9,31 @@ import random import sys import time -from typing import List, MutableMapping, Optional, Sequence, Tuple, Union +from typing import Iterable, List, Mapping, MutableMapping, Optional, Sequence, Tuple import numpy as np import torch +import torch.multiprocessing as mp +from reagent.ope.estimators.estimator import Evaluator from reagent.ope.estimators.slate_estimators import ( - DMEstimator, - LogEpisode, + DCGSlateMetric, + ERRSlateMetric, + FrechetDistribution, + IPSEstimator, LogSample, NDCGSlateMetric, + PBMEstimator, + PseudoInverseEstimator, + RankingDistribution, + RewardDistribution, SlateContext, + SlateEstimator, SlateEstimatorInput, - SlateItemProbabilities, - SlateItems, SlateItemValues, SlateModel, SlateQuery, - SlateSlotItemExpectations, SlateSlots, SlateSlotValues, - make_slate, ) from reagent.ope.utils import RunningAverage @@ -37,10 +43,26 @@ RELEVANT_THRESHOLD = 49 HIGHLY_RELEVANT_THRESHOLD = 399 -MAX_POSITION = 10 +MAX_SLATE_SIZE = 10 MIN_QUERY_COUNT = 10 +def click_to_relevances( + clicks: Iterable[Tuple[int, int]], urls: Sequence[Tuple[int, int]] +) -> Tuple[List[float], Mapping[Tuple[int, int], float]]: + position_relevances = [0.0] * max(len(urls), MAX_SLATE_SIZE) + url_relevances = {url: 0.0 for url in urls} + for i, dt in clicks: + r = 0.0 + if dt > HIGHLY_RELEVANT_THRESHOLD: + r = 2.0 + elif dt > RELEVANT_THRESHOLD: + r = 1.0 + position_relevances[i] = r + url_relevances[urls[i]] = r + return position_relevances, url_relevances + + class LoggedQuery: def __init__( self, @@ -88,8 +110,8 @@ def clicks(self): return self._clicks def _click_to_relevances(self): - self._position_relevances = [0.0] * max(len(self._list), MAX_POSITION) - self._url_relevances = {} + self._position_relevances = [0.0] * max(len(self._list), MAX_SLATE_SIZE) + self._url_relevances = {url: 0.0 for url in self._list} for i, dt in self.clicks: r = 0.0 if dt > HIGHLY_RELEVANT_THRESHOLD: @@ -112,52 +134,46 @@ def url_relevances(self): return self._url_relevances -class ProcessedQuery: +class TrainingQuery: def __init__(self, query_id: int, query_terms: Tuple[int]): self._query_id = query_id self._query_terms = query_terms self._count = 0 - self._url_relevances: Union[ - Sequence[Tuple[Tuple[int, int], float]], - MutableMapping[Tuple[int, int], float], - ] = {} - self._position_relevances = [0.0] * MAX_POSITION + self._url_relevances: MutableMapping[Tuple[int, int], RunningAverage] = {} + self._position_relevances = [RunningAverage() for _ in range(MAX_SLATE_SIZE)] def add(self, query: LoggedQuery): - if len(query.clicks) == 0: - return self._count += 1 urs = query.url_relevances for item_id, r in urs.items(): if item_id not in self._url_relevances: - self._url_relevances[item_id] = 0.0 + self._url_relevances[item_id] = RunningAverage(r) else: - self._url_relevances[item_id] += r + self._url_relevances[item_id].add(r) prs = query.position_relevances - for i in range(MAX_POSITION): - self._position_relevances[i] += prs[i] + for i in range(MAX_SLATE_SIZE): + self._position_relevances[i].add(prs[i]) - def merge(self, other: "ProcessedQuery"): - self._count += 1 + def merge(self, other: "TrainingQuery"): for i, r in other.url_relevances.items(): if i not in self._url_relevances: - self._url_relevances[i] = r + self._url_relevances[i] = RunningAverage(r) else: - self._url_relevances[i] += r - for i in range(MAX_POSITION): - self._position_relevances[i] += other.position_relevances[i] + self._url_relevances[i].add(r) + for i in range(MAX_SLATE_SIZE): + self._position_relevances[i].add(other.position_relevances[i]) def finalize(self): - self._url_relevances = { - k: v / self._count for k, v in self._url_relevances.items() - } - self._position_relevances = [v / self._count for v in self._position_relevances] + self._url_relevances = {k: v.average for k, v in self._url_relevances.items()} + self._position_relevances = [v.average for v in self._position_relevances] def pack(self): - self._url_relevances = list(self._url_relevances.items()) + if isinstance(self._url_relevances, Mapping): + self._url_relevances = list(self._url_relevances.items()) - def unpack(self): - self._url_relevances = {v[0]: v[1] for v in self._url_relevances} + def _unpack(self): + if isinstance(self._url_relevances, Sequence): + self._url_relevances = {v[0]: v[1] for v in self._url_relevances} @property def count(self): @@ -173,6 +189,7 @@ def query_terms(self): @property def url_relevances(self): + self._unpack() return self._url_relevances @property @@ -267,25 +284,33 @@ def create_cache(params): logging.info(f" saving time: {time.process_time() - st}") -def load_logged_queries(params): +def load_logged_queries(params) -> Sequence[TrainingQuery]: logging.info("loading logged queries...") if "folder" not in params: raise Exception('Please define "folder" in "raw_data"') folder = params["folder"] if "folder" in params else "" if len(folder) == 0: folder = os.getcwd() - cache_folder = params["cache_folder"] if "cache_folder" in params else folder - if len(cache_folder) == 0: - cache_folder = folder + cache_file_name = params["cache_file_name"] if "cache_file_name" in params else "" + cache_file = os.path.join(folder, f"{cache_file_name}.pickle") + if len(cache_file_name) > 0 and os.access(cache_file, os.R_OK): + logging.info(f" loading {cache_file}") + try: + st = time.perf_counter() + with open(cache_file, "rb") as f: + logged_queries = pickle.load(f) + logging.info(f" loading time {time.perf_counter() - st}") + return logged_queries + except Exception as err: + logging.warning(f" loading error {err}") base_file_name = params["base_file_name"] if "base_file_name" in params else "" if len(base_file_name) == 0: raise Exception('"base_file_name" not defined!') days = params["days"] if "days" in params else [] all_queries = {} - st = time.process_time() + st = time.perf_counter() for day in days: - cache_file = f"{base_file_name}_{day:02}.pickle" - pickle_file = os.path.join(cache_folder, cache_file) + pickle_file = os.path.join(folder, f"{base_file_name}_{day:02}.pickle") if os.access(pickle_file, os.R_OK): logging.info(f" loading {pickle_file}") with open(pickle_file, "rb") as f: @@ -296,13 +321,27 @@ def load_logged_queries(params): logging.info(f" loaded queries: {len(queries)}") for q in queries: if q.query_id in all_queries: - all_queries[q.q.query_id].append(q) + tq = all_queries[q.query_id] else: - all_queries[q.q.query_id] = [q] + tq = TrainingQuery(q.query_id, q.query_terms) + all_queries[q.query_id] = tq + tq.add(q) else: logging.warning(f" {pickle_file} not accessible!") - logging.info(f"loading time {time.process_time() - st}") - return all_queries + logging.info(f" loading time {time.perf_counter() - st}") + logged_queries = tuple(all_queries.values()) + for v in logged_queries: + v.finalize() + if len(cache_file_name) > 0: + logging.info(f" saving logged queries to {cache_file}") + try: + st = time.perf_counter() + with open(cache_file, "wb") as f: + pickle.dump(logged_queries, f, protocol=pickle.HIGHEST_PROTOCOL) + logging.info(f" saving time {time.perf_counter() - st}") + except Exception: + logging.warning(f" {cache_file} not accessible!") + return logged_queries class TrainingDataset: @@ -335,12 +374,22 @@ def load_queries(self, reload=False): logging.info(f" loading {pickle_file}") st = time.process_time() with open(pickle_file, "rb") as f: - min_query_count, days, queries = pickle.load(f) + ( + min_query_count, + days, + queries, + query_ids, + query_terms, + position_relevances, + ) = pickle.load(f) if min_query_count != self._min_query_count or days != self._days: logging.info(" updated config from last cache, reload") self.load_queries(True) else: self._queries = queries + self._query_ids = query_ids + self._query_terms = query_terms + self._position_relevances = position_relevances logging.info( f" loaded {len(self._queries)}, " f" time {time.process_time() - st}" @@ -364,7 +413,7 @@ def load_queries(self, reload=False): st = time.process_time() for q in queries: if q.query_id not in all_queries: - qr = ProcessedQuery(q.query_id, q.query_terms) + qr = TrainingQuery(q.query_id, q.query_terms) all_queries[q.query_id] = qr else: qr = all_queries[q.query_id] @@ -378,22 +427,33 @@ def load_queries(self, reload=False): v.finalize() v.pack() self._queries.append(v) + self._query_ids = None + self._query_terms = None + self._position_relevances = None if len(self._cache_file) > 0: logging.info(f"saving training queries to {pickle_file}") try: st = time.process_time() with open(pickle_file, "wb") as f: + self._process_training_queries() pickle.dump( - (self._min_query_count, self._days, self._queries), + ( + self._min_query_count, + self._days, + self._queries, + self._query_ids, + self._query_terms, + self._position_relevances, + ), f, protocol=pickle.HIGHEST_PROTOCOL, ) logging.info(f" saving time {time.process_time() - st}") except Exception: logging.warning(f" {pickle_file} not accessible!") - self._query_ids = None - self._query_terms = None - self._position_relevances = None + # self._query_ids = None + # self._query_terms = None + # self._position_relevances = None logging.info(f"loaded training queries: {len(self._queries)}") def _process_training_queries(self): @@ -407,15 +467,14 @@ def _process_training_queries(self): st = time.process_time() self._query_ids = {} self._query_terms = {} - self._position_relevances = [RunningAverage() for _ in range(MAX_POSITION)] + self._position_relevances = [RunningAverage() for _ in range(MAX_SLATE_SIZE)] for q in self._queries: - q.unpack() self._query_ids[q.query_id] = q for t in q.query_terms: if t in self._query_terms: self._query_terms[t].merge(q) else: - mq = ProcessedQuery(0, (t,)) + mq = TrainingQuery(0, (t,)) mq.merge(q) self._query_terms[t] = mq for ra, r in zip(self._position_relevances, q.position_relevances): @@ -429,25 +488,36 @@ def _process_training_queries(self): def training_queries(self): return self._queries - def predict_item(self, query_id: int, query_terms: Tuple[int]) -> SlateItemValues: + def item_relevances( + self, query_id: int, query_terms: Tuple[int], items: Iterable[Tuple[int, int]] + ) -> SlateItemValues: self._process_training_queries() if query_id in self._query_ids: q = self._query_ids[query_id] - return SlateItemValues(dict(q.url_relevances.items())) + rels = q.url_relevances else: - rels = {} + ras = {} for t in query_terms: - q = self._query_terms[t] - for i, r in q.url_relevances: - if i in rels: - ra = rels[i] - else: - ra = RunningAverage() - ra.add(r) - return SlateItemValues({i: r.average for i, r in rels.items()}) + if t in self._query_terms: + q = self._query_terms[t] + for i, r in q.url_relevances: + if i in ras: + ra = ras[i] + else: + ra = RunningAverage() + ras[i] = ra + ra.add(r) + rels = {i: r.average for i, r in ras.items()} + item_rels = {} + for i in items: + if i in rels: + item_rels[i] = rels[i] + else: + item_rels[i] = 0.0 + return SlateItemValues(item_rels) - def predict_slot(self, slots: SlateSlots) -> SlateSlotItemExpectations: - return SlateSlotItemExpectations(self._position_relevances[: len(slots)]) + def slot_relevances(self, slots: SlateSlots) -> SlateSlotValues: + return SlateSlotValues(self._position_relevances[: len(slots)]) class YandexSlateModel(SlateModel): @@ -456,13 +526,113 @@ def __init__(self, dataset: TrainingDataset): def item_rewards(self, context: SlateContext) -> SlateItemValues: query = context.query.value - return self._dataset.predict_item(query[0], query[1:]) + # pyre-fixme[20]: Call `TrainingDataset.item_relevances` expects argument `items`. + return self._dataset.item_relevances(query[0], query[1:]) + + def slot_probabilities(self, context: SlateContext) -> SlateSlotValues: + return self._dataset.slot_relevances(context.slots) + + +def evaluate( + experiments: Iterable[Tuple[Iterable[SlateEstimator], int]], + log_dataset: TrainingDataset, + log_distribution: RewardDistribution, + tgt_dataset: TrainingDataset, + tgt_distribution: RewardDistribution, + log_queries: Sequence[TrainingQuery], + slate_size: int, + item_size: int, + metric_func: str, + max_num_workers: int, + device=None, +): + log_length = len(log_queries) + slots = SlateSlots(slate_size) + + logging.info("Generating log...") + st = time.perf_counter() + tasks = [] + total_samples = 0 + for estimators, num_samples in experiments: + samples = [] + if num_samples * 10 > log_length: + logging.warning(f"not enough log data, needs {num_samples * 10}") + continue + query_choices = np.random.choice(log_length, num_samples, replace=False) + for i in query_choices: + q = log_queries[i] + # pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`. + context = SlateContext(SlateQuery((q.query_id, *(q.query_terms))), slots) + url_relevances = q.url_relevances + if len(url_relevances) > item_size: + url_relevances = { + k: v + for k, v in sorted( + url_relevances.items(), key=lambda item: item[1] + )[:item_size] + } + items = url_relevances.keys() + log_item_rewards = log_dataset.item_relevances( + q.query_id, q.query_terms, items + ) + log_item_probs = log_distribution(log_item_rewards) + tgt_item_rewards = tgt_dataset.item_relevances( + q.query_id, q.query_terms, items + ) + tgt_item_probs = tgt_distribution(tgt_item_rewards) + tgt_slot_expectation = tgt_item_probs.slot_item_expectations(slots) + gt_item_rewards = SlateItemValues(url_relevances) + if metric_func == "dcg": + metric = DCGSlateMetric(device=device) + elif metric_func == "err": + metric = ERRSlateMetric(4.0, device=device) + else: + metric = NDCGSlateMetric(gt_item_rewards, device=device) + slot_weights = metric.slot_weights(slots) + if tgt_item_probs.is_deterministic: + tgt_slate_prob = 1.0 + log_slate = tgt_item_probs.sample_slate(slots) + else: + tgt_slate_prob = float("nan") + log_slate = log_item_probs.sample_slate(slots) + log_slate_prob = log_item_probs.slate_probability(log_slate) + log_rewards = log_slate.slot_values(gt_item_rewards) + log_reward = metric.calculate_reward(slots, log_rewards, None, slot_weights) + gt_slot_rewards = tgt_slot_expectation.expected_rewards(gt_item_rewards) + gt_reward = metric.calculate_reward( + slots, gt_slot_rewards, None, slot_weights + ) + samples.append( + LogSample( + context, + metric, + log_slate, + log_reward, + log_slate_prob, + None, + log_item_probs, + tgt_slate_prob, + None, + tgt_item_probs, + gt_reward, + slot_weights, + ) + ) + total_samples += 1 + tasks.append((estimators, SlateEstimatorInput(samples))) + dt = time.perf_counter() - st + logging.info(f"Generating log done: {total_samples} samples in {dt}s") - def slot_probabilities(self, context: SlateContext) -> SlateSlotItemExpectations: - return self._dataset.predict_slot(context.slots) + logging.info("start evaluating...") + st = time.perf_counter() + evaluator = Evaluator(tasks, max_num_workers) + Evaluator.report_results(evaluator.evaluate()) + logging.info(f"evaluating done in {time.perf_counter() - st}s") if __name__ == "__main__": + mp.set_start_method("spawn") + logging.basicConfig( format="%(asctime)-15s_%(levelname)s: %(message)s", level=logging.INFO ) @@ -480,71 +650,40 @@ def slot_probabilities(self, context: SlateContext) -> SlateSlotItemExpectations with open(args.parameters, "r") as f: params = json.load(f) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + # uncomment to create cache for faster data loading + # create_cache(params["raw_data"]) - logging.info('loading "ground_truth_training_data"') - ground_truth_training_dataset = TrainingDataset( - params["ground_truth_training_data"] - ) - st = time.process_time() - ground_truth_training_dataset.load_queries() - logging.info(f"load time: {time.process_time() - st}") - gt_model = YandexSlateModel(ground_truth_training_dataset) - - logging.info('loading "log_training_data"') - log_training_dataset = TrainingDataset(params["log_training_data"]) - st = time.process_time() - log_training_dataset.load_queries() - logging.info(f"load time: {time.process_time() - st}") - - logging.info('loading "target_training_data"') - tgt_training_dataset = TrainingDataset(params["target_training_data"]) - st = time.process_time() - tgt_training_dataset.load_queries() - logging.info(f"load time: {time.process_time() - st}") - tgt_model = YandexSlateModel(tgt_training_dataset) + # device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + device = None + + logging.info('loading "log_data"') + log_dataset = TrainingDataset(params["log_data"]) + st = time.perf_counter() + log_dataset.load_queries() + logging.info(f"load time: {time.perf_counter() - st}") + logging.info('loading "target_data"') + tgt_dataset = TrainingDataset(params["target_data"]) + st = time.perf_counter() + tgt_dataset.load_queries() + logging.info(f"load time: {time.perf_counter() - st}") + + logging.info('loading "test_data"') + st = time.perf_counter() log_queries = load_logged_queries(params["test_data"]) - slots = SlateSlots(MAX_POSITION) - episodes = [] - for qid, qs in sorted(log_queries.items(), key=lambda i: len(i[1]), reverse=True): - log_query = qs[0] - context = SlateContext(SlateQuery((qid, *(log_query.query_terms))), slots) - log_item_rewards = log_training_dataset.predict_item( - log_query.query_id, log_query.query_terms - ) - log_item_probs = SlateItemProbabilities(log_item_rewards.values) - tgt_item_rewards = tgt_model.item_rewards(context) - tgt_item_probs = SlateItemProbabilities(tgt_item_rewards.values) - gt_item_rewards = gt_model.item_rewards(context) - metric = NDCGSlateMetric(gt_item_rewards) - samples = [] - for q in qs: - slate = make_slate(slots, q.list) - samples.append( - LogSample( - slate, - slate.slot_values(gt_item_rewards), - SlateSlotValues(q.position_relevances), - ) - ) - episodes.append( - LogEpisode( - context, - metric, - samples, - None, - log_item_probs, - None, - tgt_item_probs, - gt_item_rewards, - ) - ) - input = SlateEstimatorInput(episodes) - - estimator = DMEstimator() - logging.info("Evaluating...") - st = time.process_time() - rs = estimator.evaluate(input) - dt = time.process_time() - st - logging.info(f"Evaluating DMEstimator done: {rs} in {dt}s") + logging.info(f"load time: {time.perf_counter() - st}") + + estimators = [IPSEstimator(), PseudoInverseEstimator(), PBMEstimator()] + + evaluate( + [(estimators, 200)] * 4, + log_dataset, + RankingDistribution(1.0), + tgt_dataset, + FrechetDistribution(2.0, True), + log_queries, + 5, + 10, + "ndcg", + 2, + ) diff --git a/reagent/ope/trainers/__init__.py b/reagent/ope/trainers/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/ope/trainers/__init__.py +++ b/reagent/ope/trainers/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/ope/trainers/linear_trainers.py b/reagent/ope/trainers/linear_trainers.py index 108809d66..853d20c70 100644 --- a/reagent/ope/trainers/linear_trainers.py +++ b/reagent/ope/trainers/linear_trainers.py @@ -1,101 +1,21 @@ #!/usr/bin/env python3 - +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -import pickle +import math import time -from abc import ABC, abstractmethod -from dataclasses import dataclass -from typing import Optional, Tuple +from typing import Optional import numpy as np import torch +from reagent.ope.estimators.types import PredictResults, Trainer, TrainingData from sklearn.linear_model import Lasso, LogisticRegression, SGDClassifier -from sklearn.metrics import accuracy_score +from sklearn.metrics import accuracy_score, mean_squared_error from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from torch import Tensor -@dataclass(frozen=True) -class TrainingData: - train_x: Tensor - train_y: Tensor - train_weight: Optional[Tensor] - validation_x: Tensor - validation_y: Tensor - validation_weight: Optional[Tensor] - - -@dataclass(frozen=True) -class PredictResults: - predictions: Optional[Tensor] # shape = [num_samples] - scores: Tensor # shape = [num_samples] - probabilities: Optional[Tensor] = None - - -class Trainer(ABC): - def __init__(self): - self._model = None - - @staticmethod - def _sample( - x: Tensor, - y: Tensor, - weight: Optional[Tensor] = None, - num_samples: int = 0, - fortran_order: bool = False, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - assert x.shape[0] == y.shape[0] - x_na = x.numpy() - if fortran_order: - x_na = x_na.reshape(x.shape, order="F") - y_na = y.numpy() - w_na = weight.numpy() if weight is not None else None - if num_samples > 0 and num_samples < x.shape[0]: - cs = np.random.choice(x.shape[0], num_samples, replace=False) - x_na = x_na[cs, :] - y_na = y_na[cs] - w_na = w_na[cs] if w_na is not None else None - return x_na, y_na, w_na - - @property - @abstractmethod - def name(self) -> str: - pass - - @abstractmethod - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): - pass - - @abstractmethod - def predict(self, x: Tensor, device=None) -> PredictResults: - pass - - @abstractmethod - def score( - self, y: Tensor, y_pred: Tensor, weight: Optional[Tensor] = None - ) -> float: - pass - - def save_model(self, file: str): - if self._model is None: - logging.error(f"{self.__class__.__name__}.save_model: _model is None ") - return - try: - with open(file, "wb") as f: - pickle.dump(self._model, f, protocol=pickle.HIGHEST_PROTOCOL) - except Exception: - logging.error(f"{file} cannot be accessed.") - - def load_model(self, file: str): - try: - with open(file, "rb") as f: - self._model = pickle.load(f) - except Exception: - logging.error(f"{file} cannot be read.") - - class LinearTrainer(Trainer): - def __init__(self, is_classifier: bool = False): + def __init__(self, is_classifier: bool = False) -> None: super().__init__() self._is_classifier = is_classifier @@ -103,7 +23,7 @@ def predict(self, x: Tensor, device=None) -> PredictResults: if self._model is not None: if hasattr(self._model, "predict_proba"): proba = torch.as_tensor( - self._model.predict_proba(x), dtype=torch.double, device=device + self._model.predict_proba(x), dtype=torch.float, device=device ) score = (proba * torch.arange(proba.shape[1])).sum(dim=1) return PredictResults(torch.argmax(proba, 1), score, proba) @@ -111,7 +31,7 @@ def predict(self, x: Tensor, device=None) -> PredictResults: return PredictResults( None, torch.as_tensor( - self._model.predict(x), dtype=torch.double, device=device + self._model.predict(x), dtype=torch.float, device=device ), None, ) @@ -120,11 +40,19 @@ def predict(self, x: Tensor, device=None) -> PredictResults: else: raise Exception("model not trained") - def score( - self, y: Tensor, y_pred: Tensor, weight: Optional[Tensor] = None - ) -> float: + def _score(self, y_true: np.ndarray, y_pred: np.ndarray, weight=None) -> float: + if self._is_classifier: + return accuracy_score(y_true, y_pred, sample_weight=weight) + else: + return 1.0 / math.pow( + 2, + mean_squared_error(y_true, y_pred, sample_weight=weight), + ) + + def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float: + y_pred = self._model.predict(x) w = weight.numpy() if weight is not None else None - return accuracy_score(y.numpy(), y_pred.numpy(), sample_weight=w) + return self._score(y.numpy(), y_pred, weight=w) class LassoTrainer(LinearTrainer): @@ -132,7 +60,9 @@ class LassoTrainer(LinearTrainer): def name(self) -> str: return "lasso" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + def train( + self, data: TrainingData, iterations: int = 1, num_samples: int = 0 + ) -> None: logging.info("LassoTrainer.train...") self._model = None best_score = float("-inf") @@ -143,17 +73,19 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): sx, sy, ssw = super()._sample( data.validation_x, data.validation_y, data.validation_weight ) - for alpha in np.logspace(-8, -1, num=8, base=10): + for alpha in np.logspace(-4, 2, num=7, base=10): model = Lasso( alpha=alpha, fit_intercept=False, copy_X=True, - max_iter=1000, + max_iter=10000, warm_start=False, selection="random", ) model.fit(x, y) - score = model.score(sx, sy, ssw) + y_pred = model.predict(sx) + score = self._score(sy, y_pred, weight=ssw) + # score = model.score(sx, sy, ssw) logging.info(f" alpha: {alpha}, score: {score}") if score > best_score: best_score = score @@ -165,7 +97,9 @@ class DecisionTreeTrainer(LinearTrainer): def name(self) -> str: return "decision_tree" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + def train( + self, data: TrainingData, iterations: int = 1, num_samples: int = 0 + ) -> None: logging.info("DecisionTreeTrainer.train...") self._model = None best_score = float("-inf") @@ -176,6 +110,18 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): sx, sy, ssw = super()._sample( data.validation_x, data.validation_y, data.validation_weight ) + if self._model is None: + self._model = DecisionTreeRegressor( + criterion="mse", + splitter="random", + max_depth=None, + min_samples_split=4, + min_samples_leaf=4, + ) + self._model.fit(x, y, sw) + y_pred = self._model.predict(sx) + best_score = self._score(sy, y_pred, weight=ssw) + logging.info(f" max_depth: None, score: {best_score}") for depth in range(3, 21, 3): model = DecisionTreeRegressor( criterion="mse", @@ -185,7 +131,9 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): min_samples_leaf=4, ) model.fit(x, y, sw) - score = model.score(sx, sy, ssw) + y_pred = model.predict(sx) + score = self._score(sy, y_pred, weight=ssw) + # score = model.score(sx, sy, ssw) logging.info(f" max_depth: {depth}, score: {score}") if score > best_score: best_score = score @@ -193,14 +141,16 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): class DecisionTreeClassifierTrainer(LinearTrainer): - def __init__(self): + def __init__(self) -> None: super().__init__(True) @property def name(self) -> str: return "decision_tree_classifier" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + def train( + self, data: TrainingData, iterations: int = 1, num_samples: int = 0 + ) -> None: logging.info("DecisionTreeClassifierTrainer.train...") self._model = None best_score = float("-inf") @@ -228,7 +178,7 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): class LogisticRegressionTrainer(LinearTrainer): - def __init__(self, solver: str = "lbfgs"): + def __init__(self, solver: str = "lbfgs") -> None: super().__init__(True) self._solver = solver @@ -236,7 +186,9 @@ def __init__(self, solver: str = "lbfgs"): def name(self) -> str: return "logistic_regression" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + def train( + self, data: TrainingData, iterations: int = 1, num_samples: int = 0 + ) -> None: logging.info("LogisticRegressionTrainer.train...") self._model = None best_score = float("-inf") @@ -265,7 +217,7 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): class SGDClassifierTrainer(LinearTrainer): - def __init__(self, loss: str = "log", max_iter: int = 1000): + def __init__(self, loss: str = "log", max_iter: int = 1000) -> None: super().__init__(True) self._loss = loss self._max_iter = max_iter @@ -274,7 +226,9 @@ def __init__(self, loss: str = "log", max_iter: int = 1000): def name(self) -> str: return "sgd_classifier" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): + def train( + self, data: TrainingData, iterations: int = 1, num_samples: int = 0 + ) -> None: logging.info("SGDClassifierTrainer.train...") self._model = None best_score = float("-inf") @@ -301,32 +255,55 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): class LinearNet(torch.nn.Module): - def __init__(self, D_in, H, D_out): - super().__init__() - self.linear1 = torch.nn.Linear(D_in, H) - self.nonlinear = torch.nn.ReLU() - self.linear2 = torch.nn.Linear(H, D_out) + def __init__( + self, + D_in: int, + H: int, + D_out: int, + hidden_layers: int = 2, + activation=torch.nn.ReLU, + ) -> None: + super(LinearNet, self).__init__() + self._hidden_dim = H + self._hidden_layers = hidden_layers + self._activation = activation + self._out_dim = D_out + + self.layers = [] + dim = D_in + for _ in range(self._hidden_layers): + self.layers.append(torch.nn.Linear(dim, self._hidden_dim)) + self.layers.append(self._activation()) + dim = self._hidden_dim + self.layers.append(torch.nn.Linear(dim, self._out_dim)) + self.model = torch.nn.Sequential(*self.layers) def forward(self, x: torch.Tensor): x = x.requires_grad_(True) - x = torch.nn.functional.normalize(x) - x = self.linear1(x) - x = self.nonlinear(x) - x = self.linear2(x) - return x + return self.model(x) class NNTrainer(Trainer): - def __init__(self, device=None): + def __init__(self, device=None) -> None: super().__init__() self._device = device + self._loss_fn: Optional[torch.nn.MSELoss] = None @property def name(self) -> str: return "linear_net" - def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): - d_in, d_out = data.train_x.shape[1], data.train_y.shape[1] + def train( + self, + data: TrainingData, + iterations: int = 100, + epochs: int = 1, + num_samples: int = 0, + ) -> None: + d_in, d_out = ( + data.train_x.shape[1], + data.train_y.shape[1] if len(data.train_y.shape) > 1 else 1, + ) if d_in == 0 or d_out == 0: return None h = 500 @@ -345,38 +322,38 @@ def train(self, data: TrainingData, iterations: int = 1, num_samples: int = 0): scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, "min", patience=5, verbose=True, threshold=1e-5 ) - for t in range(iterations): - x, y, _ = super()._sample( - data.train_x, data.train_y, data.train_weight, num_samples, True - ) - x = torch.as_tensor(x, device=self._device) - y = torch.as_tensor(y, device=self._device) - y_pred = self._model(x) - loss = self._loss_fn(y_pred, y) - if (t + 1) % 10 == 0: - scheduler.step(loss.item()) - logging.info(f" step [{t + 1}]: loss={loss.item()}") - - optimizer.zero_grad() - loss.backward() - optimizer.step() + for _ in range(epochs): + for t in range(iterations): + x, y, _ = super()._sample( + data.train_x, data.train_y, data.train_weight, num_samples, True + ) + x = torch.as_tensor(x, device=self._device) + y = torch.as_tensor(y, device=self._device) + if len(y.shape) == 1: + y = y.reshape(-1, 1) + y_pred = self._model(x) + # pyre-fixme[29]: `Optional[torch.nn.MSELoss]` is not a function. + loss = self._loss_fn(y_pred, y) + if (t + 1) % 10 == 0: + scheduler.step(loss.item()) + logging.info(f" step [{t + 1}]: loss={loss.item()}") + + optimizer.zero_grad() + loss.backward() + optimizer.step() logging.info(f" training time {time.process_time() - st}") - def predict(self, features: Tensor, device=None) -> PredictResults: + def predict(self, x: Tensor, device=None) -> PredictResults: if self._model is not None: self._model.eval() - proba = torch.as_tensor( - self._model(features), dtype=torch.double, device=device - ) + proba = torch.as_tensor(self._model(x), dtype=torch.float, device=device) return PredictResults(torch.argmax(proba, 1), proba) else: raise Exception("mode not trained") - def score( - self, y: Tensor, y_pred: Tensor, weight: Optional[Tensor] = None - ) -> float: + def score(self, x: Tensor, y: Tensor, weight: Optional[Tensor] = None) -> float: if self._loss_fn is not None: - return self._loss_fn(y_pred, y).item() + return self._loss_fn(y, x).item() else: raise Exception("mode not trained") diff --git a/reagent/ope/trainers/rl_tabular_trainers.py b/reagent/ope/trainers/rl_tabular_trainers.py index 7bc708ae4..f622e2a77 100644 --- a/reagent/ope/trainers/rl_tabular_trainers.py +++ b/reagent/ope/trainers/rl_tabular_trainers.py @@ -1,30 +1,32 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import pickle from functools import reduce -from typing import Mapping, Sequence +from typing import List, Mapping, Sequence import torch -from reagent.ope.estimators.estimator import ( - Action, - ActionDistribution, - ActionSpace, +from reagent.ope.estimators.sequential_estimators import ( Model, RLPolicy, State, ValueFunction, ) +from reagent.ope.estimators.types import Action, ActionDistribution, ActionSpace from reagent.ope.test.envs import Environment, PolicyLogGenerator +from reagent.ope.utils import RunningAverage class TabularPolicy(RLPolicy): - def __init__(self, action_space: ActionSpace, epsilon: float = 0.0, device=None): + def __init__( + self, action_space: ActionSpace, epsilon: float = 0.0, device=None + ) -> None: super().__init__(action_space, device) self._epsilon = epsilon as_size = len(action_space) self._exploitation_prob = 1.0 - epsilon self._exploration_prob = epsilon / len(action_space) - self._uniform_probs = as_size * [1.0 / as_size] + self._uniform_probs: List[float] = as_size * [1.0 / as_size] self._state_space = {} def update(self, state: State, actions: Sequence[float]) -> float: @@ -73,7 +75,7 @@ def load(self, path) -> bool: class TabularValueFunction(ValueFunction): - def __init__(self, policy: RLPolicy, model: Model, gamma=0.99): + def __init__(self, policy: RLPolicy, model: Model, gamma: float = 0.99) -> None: self._policy = policy self._model = model self._gamma = gamma @@ -96,10 +98,51 @@ def state_action_value(self, state: State, action: Action) -> float: def state_value(self, state: State) -> float: pass - def reset(self, clear_state_values: bool = False): + def reset(self, clear_state_values: bool = False) -> None: pass +class EstimatedStateValueFunction(ValueFunction): + def __init__( + self, policy: RLPolicy, env: Environment, gamma: float, num_episodes: int = 100 + ) -> None: + self._policy = policy + self._env = env + self._gamma = gamma + self._num_episodes = num_episodes + self._state_values = {} + self._estimate_value() + + def _estimate_value(self) -> None: + tgt_generator = PolicyLogGenerator(self._env, self._policy) + log = {} + for state in self._env.states: + mdps = [] + for _ in range(self._num_episodes): + mdps.append(tgt_generator.generate_log(state)) + log[state] = mdps + + for state, mdps in log.items(): + avg = RunningAverage() + for mdp in mdps: + discount = 1.0 + r = 0.0 + for t in mdp: + r += discount * t.reward + discount *= self._gamma + avg.add(r) + self._state_values[state] = avg.average + + def state_action_value(self, state: State, action: Action) -> float: + return 0.0 + + def state_value(self, state: State) -> float: + return self._state_values[state] + + def reset(self) -> None: + self._state_values = {} + + class DPValueFunction(TabularValueFunction): def __init__( self, @@ -107,7 +150,7 @@ def __init__( env: Environment, gamma: float = 0.99, threshold: float = 0.0001, - ): + ) -> None: super().__init__(policy, env, gamma) self._env = env self._threshold = threshold @@ -118,12 +161,12 @@ def state_value(self, state: State, horizon: int = -1) -> float: self._evaluate() return self._state_value(state) - def reset(self, clear_state_values: bool = False): + def reset(self, clear_state_values: bool = False) -> None: self._evaluated = False if clear_state_values: self._state_values.clear() - def _evaluate(self): + def _evaluate(self) -> None: delta = float("inf") while delta >= self._threshold: delta = 0.0 @@ -145,7 +188,7 @@ def _evaluate(self): class DPTrainer(object): - def __init__(self, env: Environment, policy: TabularPolicy): + def __init__(self, env: Environment, policy: TabularPolicy) -> None: self._env = env self._policy = policy @@ -153,7 +196,7 @@ def __init__(self, env: Environment, policy: TabularPolicy): def _state_value(state: State, state_values: Mapping[State, float]) -> float: return 0.0 if state not in state_values else state_values[state] - def train(self, gamma: float = 0.9, threshold: float = 0.0001): + def train(self, gamma: float = 0.9, threshold: float = 0.0001) -> DPValueFunction: stable = False valfunc = DPValueFunction(self._policy, self._env, gamma, threshold) while not stable: @@ -192,7 +235,7 @@ def __init__( first_visit: bool = True, count_threshold: int = 100, max_iteration: int = 200, - ): + ) -> None: super().__init__(policy, env, gamma) self._env = env self._first_visit = first_visit @@ -201,7 +244,7 @@ def __init__( self._log_generator = PolicyLogGenerator(env, policy) self._state_counts = {} - def _state_value(self, state: State): + def _state_value(self, state: State) -> float: i = 0 state_count = self._state_counts[state] if state in self._state_counts else 0 while state_count < self._count_threshold and i < self._max_iteration: @@ -241,7 +284,7 @@ def _state_value(self, state: State): ) return super()._state_value(state) - def _update_state_value(self, state: State, g: float): + def _update_state_value(self, state: State, g: float) -> None: sv = super()._state_value(state) sc = self._state_counts[state] if state in self._state_counts else 0 sc += 1 @@ -252,14 +295,14 @@ def _update_state_value(self, state: State, g: float): def state_value(self, state: State) -> float: return self._state_value(state) - def reset(self, clear_state_values: bool = False): + def reset(self, clear_state_values: bool = False) -> None: if clear_state_values: self._state_values.clear() self._state_counts.clear() class MonteCarloTrainer(object): - def __init__(self, env: Environment, policy: TabularPolicy): + def __init__(self, env: Environment, policy: TabularPolicy) -> None: self._env = env self._policy = policy self._log_generator = PolicyLogGenerator(env, policy) @@ -270,7 +313,7 @@ def train( gamma: float = 0.9, first_visit: bool = True, update_interval: int = 20, - ): + ) -> None: i = 0 value_counts = {} while i < iterations: @@ -315,7 +358,7 @@ def train( if i % update_interval == 0 and self._update_policy(value_counts): break - def _update_state_value(self, value_counts, state, action, g: float): + def _update_state_value(self, value_counts, state, action, g: float) -> None: key = (state, action) sv, sc = value_counts[key] if key in value_counts else (0.0, 0) sc += 1 diff --git a/reagent/ope/utils.py b/reagent/ope/utils.py index f602814dc..f3fe07425 100644 --- a/reagent/ope/utils.py +++ b/reagent/ope/utils.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import math from collections import OrderedDict from typing import Sequence, Union @@ -7,6 +9,10 @@ import torch +DEFAULT_MIN = float("-inf") +DEFAULT_MAX = float("inf") + + def convert_to_one_hots(a, num_classes: int, dtype=torch.int, device=None): """ Convert class index array (num_sample,) to an one hots array @@ -26,7 +32,7 @@ def convert_to_one_hots(a, num_classes: int, dtype=torch.int, device=None): class LRUCache(OrderedDict): - def __init__(self, maxsize=2 ** 10, *args, **kwds): + def __init__(self, maxsize=2**10, *args, **kwds): self.maxsize = maxsize super().__init__(*args, **kwds) @@ -42,13 +48,16 @@ def __setitem__(self, key, value): class RunningAverage: - def __init__(self): - self._average = 0.0 - self._count = 0 + def __init__(self, init_val: float = float("nan")): + self._average = init_val + self._count = 0 if math.isnan(init_val) else 1 def add(self, value) -> "RunningAverage": - self._count += 1 - self._average = self._average + (float(value) - self._average) / self._count + if not math.isnan(value) and not math.isinf(value): + if self._count == 0: + self._average = 0.0 + self._count += 1 + self._average = self._average + (float(value) - self._average) / self._count return self @property @@ -63,11 +72,14 @@ def count(self): def total(self): return self._average * self._count + def __float__(self): + return self._average + class Clamper: - def __init__(self, min: float = None, max: float = None): - self._min = min if min is not None else float("-inf") - self._max = max if max is not None else float("inf") + def __init__(self, min_v: float = DEFAULT_MIN, max_v: float = DEFAULT_MAX): + self._min = min_v + self._max = max_v if self._min >= self._max: raise ValueError(f"min[{min}] greater than max[{max}]") @@ -82,3 +94,6 @@ def __call__( return [max(self._min, min(self._max, float(i))) for i in v] else: return max(self._min, min(self._max, float(v))) + + def __repr__(self): + return f"Clamper({self._min},{self._max})" diff --git a/reagent/optimizer/__init__.py b/reagent/optimizer/__init__.py index e5a0d9b48..34a8c7f47 100644 --- a/reagent/optimizer/__init__.py +++ b/reagent/optimizer/__init__.py @@ -1 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .soft_update import SoftUpdate +from .union import Optimizer__Union + + +__all__ = ["Optimizer__Union", "SoftUpdate"] diff --git a/reagent/optimizer/optimizer.py b/reagent/optimizer/optimizer.py index be74d63e8..d07b4855b 100644 --- a/reagent/optimizer/optimizer.py +++ b/reagent/optimizer/optimizer.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. """ For each Torch optimizer, we create a wrapper pydantic dataclass around it. @@ -33,7 +34,7 @@ class Parameters: class Trainer: def __init__(self, network, params): - self.optimizer = params.optimizer.make_optimizer(network.parameters()) + self.optimizer = params.optimizer.make_optimizer_scheduler(network.parameters())["optimizer"] def train(self, data): ... @@ -42,37 +43,27 @@ def train(self, data): self.optimizer.step() """ import inspect -from typing import List +from typing import Dict, List, Union import torch from reagent.core.dataclasses import dataclass, field from reagent.core.registry_meta import RegistryMeta -from .scheduler_union import LearningRateScheduler__Union +from .scheduler import LearningRateSchedulerConfig from .utils import is_torch_optimizer -@dataclass(frozen=True) -class Optimizer: - # This is the wrapper for optimizer + scheduler - optimizer: torch.optim.Optimizer - lr_schedulers: List[torch.optim.lr_scheduler._LRScheduler] - - def step(self): - self.optimizer.step() - for lr_scheduler in self.lr_schedulers: - lr_scheduler.step() - - def __getattr__(self, attr): - return getattr(self.optimizer, attr) - - @dataclass(frozen=True) class OptimizerConfig(metaclass=RegistryMeta): # optional config if you want to use (potentially chained) lr scheduler - lr_schedulers: List[LearningRateScheduler__Union] = field(default_factory=list) - - def make_optimizer(self, params) -> Optimizer: + lr_schedulers: List[LearningRateSchedulerConfig] = field(default_factory=list) + + def make_optimizer_scheduler( + self, params + ) -> Dict[str, Union[torch.optim.Optimizer, torch.optim.lr_scheduler._LRScheduler]]: + assert ( + len(self.lr_schedulers) <= 1 + ), "Multiple schedulers for one optimizer is no longer supported" # Assuming the classname is the same as the torch class name torch_optimizer_class = getattr(torch.optim, type(self).__name__) assert is_torch_optimizer( @@ -81,11 +72,11 @@ def make_optimizer(self, params) -> Optimizer: filtered_args = { k: getattr(self, k) for k in inspect.signature(torch_optimizer_class).parameters - if k != "params" + if k != "params" and hasattr(self, k) } optimizer = torch_optimizer_class(params=params, **filtered_args) - lr_schedulers = [ - lr_scheduler.make_from_optimizer(optimizer) - for lr_scheduler in self.lr_schedulers - ] - return Optimizer(optimizer=optimizer, lr_schedulers=lr_schedulers) + if len(self.lr_schedulers) == 0: + return {"optimizer": optimizer} + else: + lr_scheduler = self.lr_schedulers[0].make_from_optimizer(optimizer) + return {"optimizer": optimizer, "lr_scheduler": lr_scheduler} diff --git a/reagent/optimizer/scheduler.py b/reagent/optimizer/scheduler.py index db7b1e4d9..5056ae9bc 100644 --- a/reagent/optimizer/scheduler.py +++ b/reagent/optimizer/scheduler.py @@ -1,4 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import inspect +from typing import Any, Dict import torch from reagent.core.dataclasses import dataclass @@ -18,4 +22,16 @@ def make_from_optimizer( assert is_torch_lr_scheduler( torch_lr_scheduler_class ), f"{torch_lr_scheduler_class} is not a scheduler." - return torch_lr_scheduler_class(optimizer=optimizer, **vars(self)) + + filtered_args = { + k: getattr(self, k) + for k in inspect.signature(torch_lr_scheduler_class).parameters + if k != "optimizer" + } + + self.decode_lambdas(filtered_args) + + return torch_lr_scheduler_class(optimizer=optimizer, **filtered_args) + + def decode_lambdas(self, args: Dict[str, Any]) -> None: + pass diff --git a/reagent/optimizer/scheduler_union.py b/reagent/optimizer/scheduler_union.py index 48bdf5f51..c82919cdb 100644 --- a/reagent/optimizer/scheduler_union.py +++ b/reagent/optimizer/scheduler_union.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging from typing import List @@ -6,6 +7,7 @@ import reagent.optimizer.uninferrable_schedulers as cannot_be_inferred import torch from reagent.core.configuration import make_config_class, param_hash +from reagent.core.fb_checker import IS_FB_ENVIRONMENT from reagent.core.tagged_union import TaggedUnion from .scheduler import LearningRateSchedulerConfig @@ -13,7 +15,13 @@ logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) + + +cannot_be_inferred_modules = [cannot_be_inferred] +if IS_FB_ENVIRONMENT: + import reagent.optimizer.fb.uninferrable_schedulers as fb_cannot_be_inferred + + cannot_be_inferred_modules.append(fb_cannot_be_inferred) def get_torch_lr_schedulers() -> List[str]: @@ -27,18 +35,24 @@ def get_torch_lr_schedulers() -> List[str]: classes = {} for name in get_torch_lr_schedulers(): - if hasattr(cannot_be_inferred, name): + cannot_be_inferred_module = None + for module in cannot_be_inferred_modules: + if hasattr(module, name): + cannot_be_inferred_module = module + break + + if cannot_be_inferred_module is not None: # these were manually filled in. - subclass = getattr(cannot_be_inferred, name) + subclass = getattr(cannot_be_inferred_module, name) else: torch_lr_scheduler_class = getattr(torch.optim.lr_scheduler, name) subclass = type( name, - # must subclass Optimizer to be added to the Registry + # must subclass LearningRateSchedulerConfig to be added to the Registry (LearningRateSchedulerConfig,), {"__module__": __name__}, ) - make_config_class(torch_lr_scheduler_class, blacklist=["optimizer"])(subclass) + make_config_class(torch_lr_scheduler_class, blocklist=["optimizer"])(subclass) subclass.__hash__ = param_hash classes[name] = subclass @@ -46,7 +60,4 @@ def get_torch_lr_schedulers() -> List[str]: @LearningRateSchedulerConfig.fill_union() class LearningRateScheduler__Union(TaggedUnion): - def make_from_optimizer( - self, optimizer: torch.optim.Optimizer - ) -> torch.optim.lr_scheduler._LRScheduler: - return self.value.make_from_optimizer(optimizer) + pass diff --git a/reagent/optimizer/soft_update.py b/reagent/optimizer/soft_update.py new file mode 100644 index 000000000..e5cb604bd --- /dev/null +++ b/reagent/optimizer/soft_update.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import torch + + +class SoftUpdate(torch.optim.Optimizer): + def __init__(self, target_params, source_params, tau: float = 0.1) -> None: + """ + Perform soft-update on target_params. Soft-update gradually blends + source_params into target_params with this update equation: + + target_param = tau * source_param + (1 - tau) * target_param + """ + target_params = list(target_params) + source_params = list(source_params) + + if len(target_params) != len(source_params): + raise ValueError( + "target and source must have the same number of parameters" + ) + + for t_param, s_param in zip(target_params, source_params): + if t_param.shape != s_param.shape: + raise ValueError( + "The shape of target parameter doesn't match that of the source" + ) + + params = target_params + source_params + defaults = dict( + tau=tau, lr=1.0 + ) # set a dummy learning rate because optimizers are expected to have one + super().__init__(params, defaults) + + for group in self.param_groups: + tau = group["tau"] + if tau > 1.0 or tau < 0.0: + raise ValueError(f"tau should be in [0.0, 1.0]; got {tau}") + + @classmethod + def make_optimizer_scheduler(cls, target_params, source_params, tau): + su = cls(target_params, source_params, tau) + return {"optimizer": su} + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params = group["params"] + n = len(params) + tau = group["tau"] + for target_param, source_param in zip(params[: n // 2], params[n // 2 :]): + if target_param is source_param: + # skip soft-updating when the target network share s the parameter with + # the network being train. + continue + new_param = tau * source_param.data + (1.0 - tau) * target_param.data + target_param.data.copy_(new_param) + return loss diff --git a/reagent/optimizer/uninferrable_optimizers.py b/reagent/optimizer/uninferrable_optimizers.py index 3e20f9d11..f4fbb5714 100644 --- a/reagent/optimizer/uninferrable_optimizers.py +++ b/reagent/optimizer/uninferrable_optimizers.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. """ This file contains configs that could not be inferred from the default values @@ -23,6 +24,43 @@ class Adam(OptimizerConfig): eps: float = 1e-08 weight_decay: float = 0 amsgrad: bool = False + maximize: bool = False + foreach: Optional[bool] = None + capturable: bool = False + differentiable: bool = False + + +@dataclass(frozen=True) +class NAdam(OptimizerConfig): + lr: float = 0.001 + betas: Tuple[float, float] = (0.9, 0.999) + eps: float = 1e-08 + weight_decay: float = 0 + momentum_decay: float = 4e-3 + maximize: bool = False + foreach: Optional[bool] = None + + +@dataclass(frozen=True) +class RAdam(OptimizerConfig): + lr: float = 0.001 + betas: Tuple[float, float] = (0.9, 0.999) + eps: float = 1e-08 + weight_decay: float = 0 + maximize: bool = False + foreach: Optional[bool] = None + + +@dataclass(frozen=True) +class SGD(OptimizerConfig): + lr: float = 0.001 + momentum: float = 0.0 + weight_decay: float = 0.0 + dampening: float = 0.0 + nesterov: bool = False + maximize: bool = False + foreach: Optional[bool] = None + differentiable: bool = False @dataclass(frozen=True) @@ -32,6 +70,9 @@ class AdamW(OptimizerConfig): eps: float = 1e-08 weight_decay: float = 0.01 amsgrad: bool = False + maximize: bool = False + foreach: Optional[bool] = None + capturable: bool = False @dataclass(frozen=True) @@ -39,6 +80,7 @@ class SparseAdam(OptimizerConfig): lr: float = 0.001 betas: Tuple[float, float] = (0.9, 0.999) eps: float = 1e-08 + maximize: bool = False @dataclass(frozen=True) @@ -47,6 +89,8 @@ class Adamax(OptimizerConfig): betas: Tuple[float, float] = (0.9, 0.999) eps: float = 1e-08 weight_decay: float = 0 + maximize: bool = False + foreach: Optional[bool] = None @dataclass(frozen=True) @@ -58,6 +102,7 @@ class LBFGS(OptimizerConfig): tolerance_change: float = 1e-09 history_size: int = 100 line_search_fn: Optional[str] = None + maximize: bool = False @dataclass(frozen=True) @@ -65,3 +110,5 @@ class Rprop(OptimizerConfig): lr: float = 0.01 etas: Tuple[float, float] = (0.5, 1.2) step_sizes: Tuple[float, float] = (1e-06, 50) + maximize: bool = False + foreach: Optional[bool] = None diff --git a/reagent/optimizer/uninferrable_schedulers.py b/reagent/optimizer/uninferrable_schedulers.py index 2af26d52b..462a893d2 100644 --- a/reagent/optimizer/uninferrable_schedulers.py +++ b/reagent/optimizer/uninferrable_schedulers.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. """ This file contains configs that could not be inferred from the default values @@ -8,32 +9,92 @@ - tuple - None - required parameters (no default value) + +Sometimes there are no defaults to infer from, so we got to include those here. TODO: remove this file once we can infer everything. """ -from typing import List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union from reagent.core.dataclasses import dataclass +from reagent.core.fb_checker import IS_FB_ENVIRONMENT from .scheduler import LearningRateSchedulerConfig +# Inside FB, we have more sophisticated classes to serialize Callables +if not IS_FB_ENVIRONMENT: + + # To allow string-based configuration, we need these Mixins to convert + # from strings to Callables + class _LRLambdaMixin(object): + def decode_lambdas(self, args: Dict[str, Any]) -> None: + lr_lambda = args.get("lr_lambda") + if type(lr_lambda) is str: + args["lr_lambda"] = eval(lr_lambda) # noqa + + class _ScaleFnLambdaMixin(object): + def decode_lambdas(self, args: Dict[str, Any]) -> None: + scale_fn = args.get("scale_fn") + if type(scale_fn) is str: + args["scale_fn"] = eval(scale_fn) # noqa + + @dataclass(frozen=True) + class LambdaLR(_LRLambdaMixin, LearningRateSchedulerConfig): + lr_lambda: Union[str, Callable[[int], float], List[Callable[[int], float]]] + last_epoch: int = -1 + verbose: bool = False + + @dataclass(frozen=True) + class MultiplicativeLR(_LRLambdaMixin, LearningRateSchedulerConfig): + lr_lambda: Union[str, Callable[[int], float], List[Callable[[int], float]]] + last_epoch: int = -1 + verbose: bool = False + + @dataclass(frozen=True) + class CyclicLR(_ScaleFnLambdaMixin, LearningRateSchedulerConfig): + base_lr: Union[float, List[float]] + max_lr: Union[float, List[float]] + step_size_up: int = 2000 + step_size_down: Optional[int] = None + mode: str = "triangular" + gamma: float = 1.0 + scale_fn: Optional[Union[str, Callable[[int], float]]] = None + scale_mode: str = "cycle" + cycle_momentum: bool = True + base_momentum: float = 0.8 + max_momentum: float = 0.9 + last_epoch: int = -1 + verbose: bool = False + + +@dataclass(frozen=True) +class StepLR(LearningRateSchedulerConfig): + step_size: int + gamma: float = 0.1 + last_epoch: int = -1 + verbose: bool = False + @dataclass(frozen=True) -class CyclicLR(LearningRateSchedulerConfig): - # scale_fn is Callable, which FBL doesn't support. - # TODO(T67530507) Add a scale function factory (FBL doesn't allow callables) - pass - # base_lr: Union[float, List[float]] - # max_lr: Union[float, List[float]] - # step_size_up: int = 2000 - # step_size_down: Optional[int] = None - # mode: str = "triangular" - # gamma: float = 1.0 - # scale_fn: Optional[Callable[[int], float]] = None - # scale_mode: str = "cycle" - # cycle_momentum: bool = True - # base_momentum: float = 0.8 - # max_momentum: float = 0.9 - # last_epoch: int = -1 +class MultiStepLR(LearningRateSchedulerConfig): + milestones: List[int] + gamma: float = 0.1 + last_epoch: int = -1 + verbose: bool = False + + +@dataclass(frozen=True) +class ExponentialLR(LearningRateSchedulerConfig): + gamma: float + last_epoch: int = -1 + verbose: bool = False + + +@dataclass(frozen=True) +class CosineAnnealingLR(LearningRateSchedulerConfig): + T_max: int + eta_min: float = 0 + last_epoch: int = -1 + verbose: bool = False @dataclass(frozen=True) @@ -50,6 +111,8 @@ class OneCycleLR(LearningRateSchedulerConfig): div_factor: float = 25.0 final_div_factor: float = 10000.0 last_epoch: int = -1 + three_phase: bool = False + verbose: bool = False @dataclass(frozen=True) @@ -58,3 +121,4 @@ class CosineAnnealingWarmRestarts(LearningRateSchedulerConfig): T_mult: int = 1 eta_min: float = 0 last_epoch: int = -1 + verbose: bool = False diff --git a/reagent/optimizer/union.py b/reagent/optimizer/union.py index 9f880b214..a2e047dc9 100644 --- a/reagent/optimizer/union.py +++ b/reagent/optimizer/union.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging from typing import List @@ -13,7 +14,6 @@ logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) def get_torch_optimizers() -> List[str]: @@ -41,7 +41,7 @@ def get_torch_optimizers() -> List[str]: {}, ) # fill in optimizer parameters (except params) - make_config_class(torch_optimizer_class, blacklist=["params"])(subclass) + make_config_class(torch_optimizer_class, blocklist=["params"])(subclass) subclass.__hash__ = param_hash classes[name] = subclass @@ -51,12 +51,12 @@ def get_torch_optimizers() -> List[str]: class Optimizer__Union(TaggedUnion): @classmethod def default(cls, **kwargs): - """ Return default factory for Optimizer (defaulting to Adam). """ + """Return default factory for Optimizer (defaulting to Adam).""" return ( cls(Adam=classes["Adam"]()) if kwargs == {} - else lambda: cls(Adam=classes["Adam"](**kwargs)) + else cls(Adam=classes["Adam"](**kwargs)) ) - def make_optimizer(self, params): - return self.value.make_optimizer(params) + def make_optimizer_scheduler(self, params): + return self.value.make_optimizer_scheduler(params) diff --git a/reagent/optimizer/utils.py b/reagent/optimizer/utils.py index cc632eb0c..2a918eaeb 100644 --- a/reagent/optimizer/utils.py +++ b/reagent/optimizer/utils.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import inspect import torch -def is_strict_subclass(a, b): +def is_strict_subclass(a: object, b: object): if not inspect.isclass(a) or not inspect.isclass(b): return False return issubclass(a, b) and a != b @@ -16,4 +17,6 @@ def is_torch_optimizer(cls): def is_torch_lr_scheduler(cls): - return is_strict_subclass(cls, torch.optim.lr_scheduler._LRScheduler) + return is_strict_subclass( + cls, torch.optim.lr_scheduler._LRScheduler + ) or is_strict_subclass(cls, torch.optim.lr_scheduler.LRScheduler) diff --git a/reagent/parameters_seq2slate.py b/reagent/parameters_seq2slate.py deleted file mode 100644 index dab4abea0..000000000 --- a/reagent/parameters_seq2slate.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -from enum import Enum -from typing import Optional - -from reagent.core.dataclasses import dataclass - - -class LearningMethod(Enum): - TEACHER_FORCING = "teacher_forcing" - REINFORCEMENT_LEARNING = "reinforcement_learning" - DIFFERENTIABLE_REWARD = "differentiable_reward" - PAIRWISE_ATTENTION = "pairwise_attention" - SIMULATION = "simulation" - - @property - def expect_slate_wise_reward(self): - return self in ( - LearningMethod.REINFORCEMENT_LEARNING, - LearningMethod.SIMULATION, - ) - - -@dataclass(frozen=True) -class RewardClamp: - clamp_min: Optional[float] = None - clamp_max: Optional[float] = None diff --git a/reagent/prediction/cfeval/__init__.py b/reagent/prediction/cfeval/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/prediction/cfeval/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/prediction/cfeval/predictor_wrapper.py b/reagent/prediction/cfeval/predictor_wrapper.py new file mode 100644 index 000000000..2d49915c2 --- /dev/null +++ b/reagent/prediction/cfeval/predictor_wrapper.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import List, Tuple + +import torch +from reagent.core import types as rlt +from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor + +logger = logging.getLogger(__name__) + + +class BanditRewardNetPredictorWrapper(torch.jit.ScriptModule): + def __init__( + self, + reward_model_with_preprocessor: DiscreteDqnWithPreprocessor, + action_names: List[str], + state_feature_config: rlt.ModelFeatureConfig, + ) -> None: + super().__init__() + self.reward_model_with_preprocessor = torch.jit.trace( + reward_model_with_preprocessor, + reward_model_with_preprocessor.input_prototype(), + ) + self.action_names = torch.jit.Attribute(action_names, List[str]) + + @torch.jit.script_method + def forward( + self, state: rlt.ServingFeatureData + ) -> Tuple[torch.Tensor, torch.Tensor]: + reward_predictions = self.reward_model_with_preprocessor(state) + num_examples = reward_predictions.size()[0] + num_actions = len(self.action_names) + assert reward_predictions.shape == ( + num_examples, + num_actions, + ), f"Invalid shape {reward_predictions.shape} != ({num_examples}, {num_actions})" + mask = torch.ones_like(reward_predictions, dtype=torch.uint8) + return (reward_predictions, mask) diff --git a/reagent/prediction/dqn_torch_predictor.py b/reagent/prediction/dqn_torch_predictor.py deleted file mode 100644 index 7aaa77b7a..000000000 --- a/reagent/prediction/dqn_torch_predictor.py +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import logging -from typing import Dict, List, Optional, Tuple - -import numpy as np -import torch -from reagent.preprocessing.sparse_to_dense import PythonSparseToDenseProcessor -from reagent.torch_utils import masked_softmax -from reagent.types import DqnPolicyActionSet, SacPolicyActionSet - - -logger = logging.getLogger(__name__) - - -class DiscreteDqnTorchPredictor: - def __init__(self, model) -> None: - self.model = model - self.internal_sparse_to_dense = PythonSparseToDenseProcessor( - self.model.state_sorted_features() - ) - self.softmax_temperature: Optional[float] = None - - def predict(self, state_features: List[Dict[int, float]]) -> List[Dict[str, float]]: - ( - dense_state_features, - dense_state_feature_exist_mask, - ) = self.internal_sparse_to_dense(state_features) - action_names, values = self.model( - (dense_state_features, dense_state_feature_exist_mask) - ) - retval = [] - for i in range(values.size()[0]): - retval_item: Dict[str, float] = {} - for j, action in enumerate(action_names): - retval_item[action] = values[i][j] - retval.append(retval_item) - return retval - - def policy( - self, - state: torch.Tensor, - state_feature_presence: Optional[torch.Tensor] = None, - possible_actions_presence: Optional[torch.Tensor] = None, - ) -> DqnPolicyActionSet: - assert state.size()[0] == 1, "Only pass in one state when getting a policy" - assert ( - self.softmax_temperature is not None - ), "Please set the softmax temperature before calling policy()" - - if state_feature_presence is None: - state_feature_presence = torch.ones_like(state) - action_names, q_scores = self.model((state, state_feature_presence)) - - return self.policy_given_q_values( - q_scores, - action_names, - # pyre-fixme[6]: Expected `float` for 3rd param but got `Optional[float]`. - self.softmax_temperature, - possible_actions_presence, - ) - - @staticmethod - def policy_given_q_values( - q_scores: torch.Tensor, - action_names: List[str], - softmax_temperature: float, - possible_actions_presence: Optional[torch.Tensor] = None, - ) -> DqnPolicyActionSet: - assert q_scores.shape[0] == 1 and len(q_scores.shape) == 2 - - if possible_actions_presence is None: - possible_actions_presence = torch.ones_like(q_scores) - possible_actions_presence = possible_actions_presence.reshape(1, -1) - assert possible_actions_presence.shape == q_scores.shape - - # set impossible actions so low that they can't be picked - q_scores -= (1.0 - possible_actions_presence) * 1e10 - - q_scores_softmax = ( - masked_softmax(q_scores, possible_actions_presence, softmax_temperature) - .detach() - .numpy()[0] - ) - if np.isnan(q_scores_softmax).any() or np.max(q_scores_softmax) < 1e-3: - q_scores_softmax[:] = 1.0 / q_scores_softmax.shape[0] - greedy_act_idx = int(torch.argmax(q_scores)) - softmax_act_idx = int(np.random.choice(q_scores.size()[1], p=q_scores_softmax)) - - return DqnPolicyActionSet( - greedy=greedy_act_idx, - softmax=softmax_act_idx, - greedy_act_name=action_names[greedy_act_idx], - softmax_act_name=action_names[softmax_act_idx], - softmax_act_prob=q_scores_softmax[softmax_act_idx], - ) - - def policy_net(self) -> bool: - return False - - def discrete_action(self) -> bool: - return True - - -class ParametricDqnTorchPredictor: - def __init__(self, model) -> None: - self.model = model - self.state_internal_sparse_to_dense = PythonSparseToDenseProcessor( - self.model.state_sorted_features() - ) - self.action_internal_sparse_to_dense = PythonSparseToDenseProcessor( - self.model.action_sorted_features() - ) - self.softmax_temperature: Optional[float] = None - - def predict( - self, - state_features: List[Dict[int, float]], - action_features: List[Dict[int, float]], - ) -> List[Dict[str, float]]: - ( - dense_state_features, - dense_state_feature_exist_mask, - ) = self.state_internal_sparse_to_dense(state_features) - ( - dense_action_features, - dense_action_feature_exist_mask, - ) = self.action_internal_sparse_to_dense(action_features) - action_names, values = self.model( - (dense_state_features, dense_state_feature_exist_mask), - (dense_action_features, dense_action_feature_exist_mask), - ) - retval = [] - for i in range(values.size()[0]): - retval_item: Dict[str, float] = {} - for j, action in enumerate(action_names): - retval_item[action] = values[i][j] - retval.append(retval_item) - return retval - - def policy( - self, - tiled_states: torch.Tensor, - possible_actions_with_presence: Tuple[torch.Tensor, torch.Tensor], - ): - possible_actions, possible_actions_presence = possible_actions_with_presence - assert tiled_states.size()[0] == possible_actions.size()[0] - assert possible_actions.size()[0] == possible_actions_presence.size()[0] - assert ( - self.softmax_temperature is not None - ), "Please set the softmax temperature before calling policy()" - - state_feature_presence = torch.ones_like(tiled_states) - _, q_scores = self.model( - (tiled_states, state_feature_presence), possible_actions_with_presence - ) - q_scores = q_scores.reshape(1, -1) - - return self.policy_given_q_values( - q_scores, - # pyre-fixme[6]: Expected `float` for 2nd param but got `Optional[float]`. - self.softmax_temperature, - torch.ones_like(q_scores), - ) - - @staticmethod - def policy_given_q_values( - q_scores: torch.Tensor, - softmax_temperature: float, - possible_actions_presence: torch.Tensor, - ) -> DqnPolicyActionSet: - assert q_scores.shape[0] == 1 and len(q_scores.shape) == 2 - possible_actions_presence = possible_actions_presence.reshape(1, -1) - assert possible_actions_presence.shape == q_scores.shape - - # set impossible actions so low that they can't be picked - q_scores -= (1.0 - possible_actions_presence) * 1e10 - - q_scores_softmax_numpy = ( - masked_softmax( - q_scores.reshape(1, -1), possible_actions_presence, softmax_temperature - ) - .detach() - .numpy()[0] - ) - if ( - np.isnan(q_scores_softmax_numpy).any() - or np.max(q_scores_softmax_numpy) < 1e-3 - ): - q_scores_softmax_numpy[:] = 1.0 / q_scores_softmax_numpy.shape[0] - - greedy_act_idx = int(torch.argmax(q_scores)) - softmax_act_idx = int( - np.random.choice(q_scores.size()[1], p=q_scores_softmax_numpy) - ) - return DqnPolicyActionSet( - greedy=greedy_act_idx, - softmax=softmax_act_idx, - softmax_act_prob=float(q_scores_softmax_numpy[softmax_act_idx]), - ) - - def policy_net(self) -> bool: - return False - - def discrete_action(self) -> bool: - return False - - -class ActorTorchPredictor: - def __init__(self, model, action_feature_ids: List[int]) -> None: - self.model = model - self.internal_sparse_to_dense = PythonSparseToDenseProcessor( - self.model.state_sorted_features() - ) - self.action_feature_ids = action_feature_ids - - def predict(self, state_features: List[Dict[int, float]]) -> List[Dict[str, float]]: - ( - dense_state_features, - dense_state_feature_exist_mask, - ) = self.internal_sparse_to_dense(state_features) - actions = self.model((dense_state_features, dense_state_feature_exist_mask)) - assert actions.shape[1:] == (len(self.action_feature_ids),) - retval = [ - {str(fid): val.item() for fid, val in zip(self.action_feature_ids, action)} - for action in actions - ] - return retval - - def actor_prediction( - self, float_state_features: List[Dict[int, float]] - ) -> List[Dict[str, float]]: - return self.predict(float_state_features) - - def policy_net(self) -> bool: - return True - - def policy(self, states: torch.Tensor) -> SacPolicyActionSet: - state_masks = torch.ones_like(states, dtype=torch.bool) - actions = self.model((states, state_masks)).detach() - assert actions.shape[1:] == (len(self.action_feature_ids),) - return SacPolicyActionSet(greedy=actions.cpu(), greedy_propensity=1.0) diff --git a/reagent/prediction/predictor_wrapper.py b/reagent/prediction/predictor_wrapper.py index ce28d43e2..7e9ab98d7 100644 --- a/reagent/prediction/predictor_wrapper.py +++ b/reagent/prediction/predictor_wrapper.py @@ -4,19 +4,89 @@ import logging from typing import Dict, List, Optional, Tuple -import reagent.types as rlt +import reagent.core.types as rlt import torch +import torch.nn.functional as F +from reagent.core.torch_utils import gather +from reagent.model_utils.seq2slate_utils import Seq2SlateMode, Seq2SlateOutputArch from reagent.models.base import ModelBase -from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.models.seq2slate_reward import Seq2SlateRewardNetBase from reagent.preprocessing.postprocessor import Postprocessor from reagent.preprocessing.preprocessor import Preprocessor +from reagent.preprocessing.sparse_preprocessor import ( + make_sparse_preprocessor, + SparsePreprocessor, +) +from reagent.training.utils import gen_permutations +from reagent.training.world_model.seq2reward_trainer import get_Q from torch import nn logger = logging.getLogger(__name__) +_DEFAULT_FEATURE_IDS = [] + +FAKE_STATE_FEATURE_ID = 1111111 +FAKE_STATE_ID_LIST_FEATURES = { + FAKE_STATE_FEATURE_ID: ( + torch.zeros(1, dtype=torch.long), + torch.tensor([], dtype=torch.long), + ) +} +FAKE_STATE_ID_SCORE_LIST_FEATURES = { + FAKE_STATE_FEATURE_ID: ( + torch.zeros(1, dtype=torch.long), + torch.tensor([], dtype=torch.long), + torch.tensor([], dtype=torch.float), + ) +} + + +def serving_to_feature_data( + serving: rlt.ServingFeatureData, + dense_preprocessor: Preprocessor, + sparse_preprocessor: SparsePreprocessor, +) -> rlt.FeatureData: + float_features_with_presence, id_list_features, id_score_list_features = serving + return rlt.FeatureData( + float_features=dense_preprocessor(*float_features_with_presence), + id_list_features_raw=sparse_preprocessor.preprocess_id_list(id_list_features), + id_score_list_features_raw=sparse_preprocessor.preprocess_id_score_list( + id_score_list_features + ), + ) + + +def sparse_input_prototype( + model: ModelBase, + state_preprocessor: Preprocessor, + state_feature_config: rlt.ModelFeatureConfig, +): + name2id = state_feature_config.name2id + model_prototype = model.input_prototype() + state_id_list_features = FAKE_STATE_ID_LIST_FEATURES + state_id_score_list_features = FAKE_STATE_ID_SCORE_LIST_FEATURES + + if isinstance(model_prototype, rlt.FeatureData): + if state_feature_config.id_list_feature_configs: + assert model_prototype.id_list_features_raw + state_id_list_features = { + name2id[k]: v for k, v in model_prototype.id_list_features_raw.items() + } + if state_feature_config.id_score_list_feature_configs: + assert model_prototype.id_score_list_features_raw + state_id_score_list_features = { + name2id[k]: v + for k, v in model_prototype.id_score_list_features_raw.items() + } -# TODO: The feature definition should be ModelFeatureConfig + input = rlt.ServingFeatureData( + float_features_with_presence=state_preprocessor.input_prototype(), + id_list_features=state_id_list_features, + id_score_list_features=state_id_score_list_features, + ) + return (input,) class DiscreteDqnWithPreprocessor(ModelBase): @@ -27,29 +97,79 @@ class DiscreteDqnWithPreprocessor(ModelBase): any custom Python type. """ - def __init__(self, model: ModelBase, state_preprocessor: Preprocessor): + def __init__( + self, + model: ModelBase, + state_preprocessor: Preprocessor, + state_feature_config: rlt.ModelFeatureConfig, + ): super().__init__() self.model = model self.state_preprocessor = state_preprocessor + self.state_feature_config = state_feature_config + self.sparse_preprocessor = make_sparse_preprocessor( + self.state_feature_config, device=torch.device("cpu") + ) - def forward(self, state_with_presence: Tuple[torch.Tensor, torch.Tensor]): - preprocessed_state = self.state_preprocessor( - state_with_presence[0], state_with_presence[1] + def forward(self, state: rlt.ServingFeatureData): + state_feature_data = serving_to_feature_data( + state, self.state_preprocessor, self.sparse_preprocessor ) - state_feature_vector = rlt.FeatureData(preprocessed_state) - q_values = self.model(state_feature_vector) + q_values = self.model(state_feature_data) return q_values def input_prototype(self): - return (self.state_preprocessor.input_prototype(),) + return sparse_input_prototype( + model=self.model, + state_preprocessor=self.state_preprocessor, + state_feature_config=self.state_feature_config, + ) - @property - def sorted_features(self): - # TODO: the interface here should be ModelFeatureConfig - return self.state_preprocessor.sorted_features + +class DiscreteDqnPredictorWrapper(torch.jit.ScriptModule): + def __init__( + self, + dqn_with_preprocessor: DiscreteDqnWithPreprocessor, + action_names: List[str], + # here to keep interface consistent with FB internal + state_feature_config: rlt.ModelFeatureConfig, + ) -> None: + super().__init__() + self.dqn_with_preprocessor = torch.jit.trace( + dqn_with_preprocessor, dqn_with_preprocessor.input_prototype() + ) + self.action_names = torch.jit.Attribute(action_names, List[str]) + + @torch.jit.script_method + def forward(self, state: rlt.ServingFeatureData) -> Tuple[List[str], torch.Tensor]: + q_values = self.dqn_with_preprocessor(state) + return (self.action_names, q_values) + + +class OSSSparsePredictorUnwrapper(nn.Module): + # Wrap input in serving feature data + def __init__(self, model: nn.Module) -> None: + super().__init__() + self.model = model + + def forward( + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + state_id_list_features: Dict[int, Tuple[torch.Tensor, torch.Tensor]], + state_id_score_list_features: Dict[ + int, Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + ], + ) -> Tuple[List[str], torch.Tensor]: + return self.model( + rlt.ServingFeatureData( + float_features_with_presence=state_with_presence, + id_list_features=state_id_list_features, + id_score_list_features=state_id_score_list_features, + ) + ) -class DiscreteDqnWithPreprocessorWithIdList(ModelBase): +class BinaryDifferenceScorerWithPreprocessor(ModelBase): """ This is separated from DiscreteDqnPredictorWrapper so that we can pass typed inputs into the model. This is possible because JIT only traces tensor operation. @@ -61,100 +181,49 @@ def __init__( self, model: ModelBase, state_preprocessor: Preprocessor, - state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + state_feature_config: rlt.ModelFeatureConfig, ): super().__init__() self.model = model self.state_preprocessor = state_preprocessor self.state_feature_config = state_feature_config - - def forward( - self, - state_with_presence: Tuple[torch.Tensor, torch.Tensor], - state_id_list_features: Dict[int, Tuple[torch.Tensor, torch.Tensor]], - ): - preprocessed_state = self.state_preprocessor( - state_with_presence[0], state_with_presence[1] - ) - id_list_features = { - id_list_feature_config.name: state_id_list_features[ - id_list_feature_config.feature_id - ] - for id_list_feature_config in self.id_list_feature_configs - } - state_feature_vector = rlt.FeatureData( - float_features=preprocessed_state, id_list_features=id_list_features + self.sparse_preprocessor = make_sparse_preprocessor( + self.state_feature_config, device=torch.device("cpu") ) - q_values = self.model(state_feature_vector) - return q_values - @property - def id_list_feature_configs(self) -> List[rlt.IdListFeatureConfig]: - if self.state_feature_config: - # pyre-fixme[16]: `Optional` has no attribute `id_list_feature_configs`. - return self.state_feature_config.id_list_feature_configs - return [] + def forward(self, state: rlt.ServingFeatureData): + state_feature_data = serving_to_feature_data( + state, self.state_preprocessor, self.sparse_preprocessor + ) + q_values = self.model(state_feature_data) + assert q_values.shape[1] == 2, f"{q_values.shape}" + softmax_vals = F.softmax(q_values, dim=1) + # TODO for future cleanup: kind of a misnomer now, since not really "difference" + return softmax_vals[:, 1] def input_prototype(self): - feature_name_to_id = { - config.name: config.feature_id for config in self.id_list_feature_configs - } - state_id_list_features = { - feature_name_to_id[k]: v - for k, v in self.model.input_prototype().id_list_features.items() - } - # Terrible hack to make JIT tracing works. Python dict doesn't have type - # so we need to insert something so JIT tracer can infer the type. - if not state_id_list_features: - state_id_list_features = { - -1: ( - torch.zeros(1, dtype=torch.long), - torch.tensor([], dtype=torch.long), - ) - } - return (self.state_preprocessor.input_prototype(), state_id_list_features) - - @property - def sorted_features(self): - # TODO: the interface here should be ModelFeatureConfig - return self.state_preprocessor.sorted_features - + return sparse_input_prototype( + model=self.model, + state_preprocessor=self.state_preprocessor, + state_feature_config=self.state_feature_config, + ) -class DiscreteDqnPredictorWrapper(torch.jit.ScriptModule): - __constants__ = ["state_sorted_features_t"] +class BinaryDifferenceScorerPredictorWrapper(torch.jit.ScriptModule): def __init__( self, - dqn_with_preprocessor: DiscreteDqnWithPreprocessor, - action_names: List[str], - state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + binary_difference_scorer_with_preprocessor: BinaryDifferenceScorerWithPreprocessor, + state_feature_config: rlt.ModelFeatureConfig, ) -> None: - """ - state_feature_config is here to keep the interface consistent with FB internal - version - """ super().__init__() - - self.state_sorted_features_t = dqn_with_preprocessor.sorted_features - - self.dqn_with_preprocessor = torch.jit.trace( - dqn_with_preprocessor, dqn_with_preprocessor.input_prototype() + self.binary_difference_scorer_with_preprocessor = torch.jit.trace( + binary_difference_scorer_with_preprocessor, + binary_difference_scorer_with_preprocessor.input_prototype(), ) - self.action_names = torch.jit.Attribute(action_names, List[str]) @torch.jit.script_method - def state_sorted_features(self) -> List[int]: - """ - This interface is used by DiscreteDqnTorchPredictor - """ - return self.state_sorted_features_t - - @torch.jit.script_method - def forward( - self, state_with_presence: Tuple[torch.Tensor, torch.Tensor] - ) -> Tuple[List[str], torch.Tensor]: - q_values = self.dqn_with_preprocessor(state_with_presence) - return (self.action_names, q_values) + def forward(self, state: rlt.ServingFeatureData) -> torch.Tensor: + return self.binary_difference_scorer_with_preprocessor(state) # Pass through serving module's output @@ -167,52 +236,11 @@ def forward(self, *args, **kwargs) -> Tuple[List[str], torch.Tensor]: return self.model(*args, **kwargs) -DiscreteDqnPredictorUnwrapper = OSSPredictorUnwrapper -ActorPredictorUnwrapper = OSSPredictorUnwrapper +DiscreteDqnPredictorUnwrapper = OSSSparsePredictorUnwrapper +ActorPredictorUnwrapper = OSSSparsePredictorUnwrapper ParametricDqnPredictorUnwrapper = OSSPredictorUnwrapper -class DiscreteDqnPredictorWrapperWithIdList(torch.jit.ScriptModule): - __constants__ = ["state_sorted_features_t"] - - def __init__( - self, - dqn_with_preprocessor: DiscreteDqnWithPreprocessorWithIdList, - action_names: List[str], - state_feature_config: Optional[rlt.ModelFeatureConfig] = None, - ) -> None: - """ - state_feature_config is here to keep the interface consistent with FB internal - version - """ - super().__init__() - - self.state_sorted_features_t = dqn_with_preprocessor.sorted_features - - self.dqn_with_preprocessor = torch.jit.trace( - dqn_with_preprocessor, dqn_with_preprocessor.input_prototype() - ) - self.action_names = torch.jit.Attribute(action_names, List[str]) - - @torch.jit.script_method - def state_sorted_features(self) -> List[int]: - """ - This interface is used by DiscreteDqnTorchPredictor - """ - return self.state_sorted_features_t - - @torch.jit.script_method - def forward( - self, - state_with_presence: Tuple[torch.Tensor, torch.Tensor], - state_id_list_features: Dict[int, Tuple[torch.Tensor, torch.Tensor]], - ) -> Tuple[List[str], torch.Tensor]: - q_values = self.dqn_with_preprocessor( - state_with_presence, state_id_list_features - ) - return (self.action_names, q_values) - - class ParametricDqnWithPreprocessor(ModelBase): def __init__( self, @@ -225,14 +253,6 @@ def __init__( self.state_preprocessor = state_preprocessor self.action_preprocessor = action_preprocessor - @property - def state_sorted_features(self) -> List[int]: - return self.state_preprocessor.sorted_features - - @property - def action_sorted_features(self) -> List[int]: - return self.action_preprocessor.sorted_features - def forward( self, state_with_presence: Tuple[torch.Tensor, torch.Tensor], @@ -257,31 +277,13 @@ def input_prototype(self): class ParametricDqnPredictorWrapper(torch.jit.ScriptModule): - __constants__ = ["state_sorted_features_t", "action_sorted_features_t"] - def __init__(self, dqn_with_preprocessor: ParametricDqnWithPreprocessor) -> None: super().__init__() - self.state_sorted_features_t = dqn_with_preprocessor.state_sorted_features - self.action_sorted_features_t = dqn_with_preprocessor.action_sorted_features self.dqn_with_preprocessor = torch.jit.trace( dqn_with_preprocessor, dqn_with_preprocessor.input_prototype() ) - @torch.jit.script_method - def state_sorted_features(self) -> List[int]: - """ - This interface is used by ParametricDqnTorchPredictor - """ - return self.state_sorted_features_t - - @torch.jit.script_method - def action_sorted_features(self) -> List[int]: - """ - This interface is used by ParametricDqnTorchPredictor - """ - return self.action_sorted_features_t - @torch.jit.script_method def forward( self, @@ -304,43 +306,50 @@ def __init__( self, model: ModelBase, state_preprocessor: Preprocessor, + state_feature_config: rlt.ModelFeatureConfig, action_postprocessor: Optional[Postprocessor] = None, + serve_mean_policy: bool = False, ): super().__init__() self.model = model self.state_preprocessor = state_preprocessor + self.state_feature_config = state_feature_config + self.sparse_preprocessor = make_sparse_preprocessor( + self.state_feature_config, device=torch.device("cpu") + ) self.action_postprocessor = action_postprocessor + self.serve_mean_policy = serve_mean_policy - def forward(self, state_with_presence: Tuple[torch.Tensor, torch.Tensor]): - preprocessed_state = self.state_preprocessor( - state_with_presence[0], state_with_presence[1] + def forward(self, state: rlt.ServingFeatureData): + state_feature_data = serving_to_feature_data( + state, self.state_preprocessor, self.sparse_preprocessor ) - state_feature_vector = rlt.FeatureData(preprocessed_state) - # TODO: include log_prob in the output - action = self.model(state_feature_vector).action + model_output = self.model(state_feature_data) + if self.serve_mean_policy: + assert ( + model_output.squashed_mean is not None + ), "action mean is None and serve_mean_policy=True" + action = model_output.squashed_mean + else: + action = model_output.action + if self.action_postprocessor: - # pyre-fixme[29]: `Optional[Postprocessor]` is not a function. action = self.action_postprocessor(action) - return action + return (action, model_output.log_prob) def input_prototype(self): - return (self.state_preprocessor.input_prototype(),) - - @property - def sorted_features(self): - # TODO: the interface here should be ModelFeatureConfig - return self.state_preprocessor.sorted_features - - -_DEFAULT_FEATURE_IDS = [] + return sparse_input_prototype( + model=self.model, + state_preprocessor=self.state_preprocessor, + state_feature_config=self.state_feature_config, + ) class ActorPredictorWrapper(torch.jit.ScriptModule): - __constants__ = ["state_sorted_features_t"] - def __init__( self, actor_with_preprocessor: ActorWithPreprocessor, + state_feature_config: rlt.ModelFeatureConfig, action_feature_ids: List[int] = _DEFAULT_FEATURE_IDS, ) -> None: """ @@ -349,43 +358,397 @@ def __init__( """ super().__init__() - self.state_sorted_features_t = actor_with_preprocessor.sorted_features - self.actor_with_preprocessor = torch.jit.trace( actor_with_preprocessor, actor_with_preprocessor.input_prototype() ) @torch.jit.script_method - def state_sorted_features(self) -> List[int]: - """ - This interface is used by ActorTorchPredictor - """ - return self.state_sorted_features_t + def forward( + self, state: rlt.ServingFeatureData + ) -> Tuple[torch.Tensor, torch.Tensor]: + return self.actor_with_preprocessor(state) + + +class RankingActorWithPreprocessor(ModelBase): + def __init__( + self, + model: ModelBase, + state_preprocessor: Preprocessor, + candidate_preprocessor: Preprocessor, + num_candidates: int, + action_postprocessor: Optional[Postprocessor] = None, + ): + super().__init__() + self.model = model + self.state_preprocessor = state_preprocessor + self.candidate_preprocessor = candidate_preprocessor + self.num_candidates = num_candidates + self.action_postprocessor = action_postprocessor + + def forward( + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + candidate_with_presence_list: List[Tuple[torch.Tensor, torch.Tensor]], + ): + assert ( + len(candidate_with_presence_list) == self.num_candidates + ), f"{len(candidate_with_presence_list)} != {self.num_candidates}" + preprocessed_state = self.state_preprocessor(*state_with_presence) + # each is batch_size x candidate_dim, result is batch_size x num_candidates x candidate_dim + preprocessed_candidates = torch.stack( + [self.candidate_preprocessor(*x) for x in candidate_with_presence_list], + dim=1, + ) + input = rlt.FeatureData( + float_features=preprocessed_state, + candidate_docs=rlt.DocList( + float_features=preprocessed_candidates, + mask=torch.tensor(-1), + value=torch.tensor(-1), + ), + ) + input = rlt._embed_states(input) + action = self.model(input).action + if self.action_postprocessor is not None: + action = self.action_postprocessor(action) + return action + + def input_prototype(self): + return ( + self.state_preprocessor.input_prototype(), + [self.candidate_preprocessor.input_prototype()] * self.num_candidates, + ) + + +class RankingActorPredictorWrapper(torch.jit.ScriptModule): + def __init__( + self, + actor_with_preprocessor: RankingActorWithPreprocessor, + action_feature_ids: List[int], + ) -> None: + super().__init__() + self.actor_with_preprocessor = torch.jit.trace( + actor_with_preprocessor, + actor_with_preprocessor.input_prototype(), + check_trace=False, + ) @torch.jit.script_method def forward( - self, state_with_presence: Tuple[torch.Tensor, torch.Tensor] + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + candidate_with_presence_list: List[Tuple[torch.Tensor, torch.Tensor]], ) -> torch.Tensor: - action = self.actor_with_preprocessor(state_with_presence) + action = self.actor_with_preprocessor( + state_with_presence, candidate_with_presence_list + ) return action -class Seq2SlateWithPreprocessor(ModelBase): +class LearnVMSlateWithPreprocessor(ModelBase): + def __init__( + self, + mlp: torch.nn.Module, + state_preprocessor: Preprocessor, + candidate_preprocessor: Preprocessor, + ): + super().__init__() + self.mlp = mlp + self.state_preprocessor = state_preprocessor + self.candidate_preprocessor = candidate_preprocessor + + def input_prototype(self): + candidate_input_prototype = self.candidate_preprocessor.input_prototype() + return ( + self.state_preprocessor.input_prototype(), + ( + candidate_input_prototype[0].repeat((1, 5, 1)), + candidate_input_prototype[1].repeat((1, 5, 1)), + ), + ) + + def forward(self, state_vp, candidate_vp): + batch_size, num_candidates, candidate_dim = candidate_vp[0].shape + state_feats = self.state_preprocessor(*state_vp) + candidate_feats = self.candidate_preprocessor( + candidate_vp[0].view( + batch_size * num_candidates, + len(self.candidate_preprocessor.sorted_features), + ), + candidate_vp[1].view( + batch_size * num_candidates, + len(self.candidate_preprocessor.sorted_features), + ), + ).view(batch_size, num_candidates, -1) + input = rlt.FeatureData( + float_features=state_feats, candidate_docs=rlt.DocList(candidate_feats) + ) + scores = self.mlp(input).view(batch_size, num_candidates) + return scores + + +class SlateRankingPreprocessor(ModelBase): + def __init__( + self, + state_preprocessor: Preprocessor, + candidate_preprocessor: Preprocessor, + candidate_size: int, + ): + super().__init__() + self.state_preprocessor = state_preprocessor + self.candidate_preprocessor = candidate_preprocessor + self.candidate_size = candidate_size + + def input_prototype(self): + candidate_input_prototype = self.candidate_preprocessor.input_prototype() + return ( + self.state_preprocessor.input_prototype(), + ( + candidate_input_prototype[0].repeat((1, self.candidate_size, 1)), + candidate_input_prototype[1].repeat((1, self.candidate_size, 1)), + ), + ) + + def forward( + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + candidate_with_presence: Tuple[torch.Tensor, torch.Tensor], + ): + # state_value.shape == state_presence.shape == batch_size x state_feat_num + # candidate_value.shape == candidate_presence.shape == + # batch_size x max_src_seq_len x candidate_feat_num + batch_size, max_src_seq_len, candidate_feat_num = candidate_with_presence[ + 0 + ].shape + + preprocessed_state = self.state_preprocessor( + state_with_presence[0], state_with_presence[1] + ) + preprocessed_candidates = self.candidate_preprocessor( + candidate_with_presence[0].view( + batch_size * max_src_seq_len, + candidate_feat_num, + ), + candidate_with_presence[1].view( + batch_size * max_src_seq_len, + candidate_feat_num, + ), + # the last dimension is preprocessed candidate feature dim, + # not necessarily = candidate_feat_num + ).view(batch_size, max_src_seq_len, -1) + + return preprocessed_state, preprocessed_candidates + + +class Seq2SlateWithPreprocessor(nn.Module): def __init__( self, model: Seq2SlateTransformerNet, state_preprocessor: Preprocessor, candidate_preprocessor: Preprocessor, greedy: bool, + ): + super().__init__() + self.model = model.seq2slate + self.greedy = greedy + preprocessor = SlateRankingPreprocessor( + state_preprocessor, candidate_preprocessor, model.max_src_seq_len + ) + self.input_prototype_data = preprocessor.input_prototype() + # if the module has to be serialized via jit.script, preprocessor has to be traced first + # because preprocessor has operations beyond what jit.script can support + if not self.can_be_traced(): + preprocessor = torch.jit.trace(preprocessor, preprocessor.input_prototype()) + self.preprocessor = preprocessor + self.state_sorted_features = state_preprocessor.sorted_features + self.candidate_sorted_features = candidate_preprocessor.sorted_features + self.state_feature_id_to_index = state_preprocessor.feature_id_to_index + self.candidate_feature_id_to_index = candidate_preprocessor.feature_id_to_index + + def input_prototype(self): + return self.input_prototype_data + + def forward( + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + candidate_with_presence: Tuple[torch.Tensor, torch.Tensor], + ): + preprocessed_state, preprocessed_candidates = self.preprocessor( + state_with_presence, candidate_with_presence + ) + max_src_seq_len = preprocessed_candidates.shape[1] + res = self.model( + mode=Seq2SlateMode.RANK_MODE.value, + state=preprocessed_state, + src_seq=preprocessed_candidates, + tgt_seq_len=max_src_seq_len, + greedy=self.greedy, + ) + return ( + res.ranked_per_symbol_probs, + res.ranked_per_seq_probs, + res.ranked_tgt_out_idx, + ) + + def can_be_traced(self): + """ + Whether this module can be serialized by jit.trace. + In production, we find jit.trace may have faster performance than jit.script. + The models that can be traced are those don't have for-loop in inference, + since we want to deal with inputs of variable lengths. The models that can't + be traced are those with iterative decoder, i.e., autoregressive or non-greedy + frechet-sort. + """ + output_arch = self.model.output_arch + return output_arch == Seq2SlateOutputArch.ENCODER_SCORE or ( + output_arch == Seq2SlateOutputArch.FRECHET_SORT and self.greedy + ) + + +class Seq2SlatePredictorWrapper(torch.jit.ScriptModule): + def __init__(self, seq2slate_with_preprocessor: Seq2SlateWithPreprocessor) -> None: + super().__init__() + if seq2slate_with_preprocessor.can_be_traced(): + self.seq2slate_with_preprocessor = torch.jit.trace( + seq2slate_with_preprocessor, + seq2slate_with_preprocessor.input_prototype(), + ) + else: + self.seq2slate_with_preprocessor = torch.jit.script( + seq2slate_with_preprocessor + ) + + @torch.jit.script_method + def forward( + self, + state_with_presence: Tuple[torch.Tensor, torch.Tensor], + candidate_with_presence: Tuple[torch.Tensor, torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + # ranked_per_seq_probs shape: batch_size, 1 + # ranked_tgt_out_idx shape: batch_size, tgt_seq_len + _, ranked_per_seq_probs, ranked_tgt_out_idx = self.seq2slate_with_preprocessor( + state_with_presence, candidate_with_presence + ) + assert ranked_tgt_out_idx is not None + assert ranked_per_seq_probs is not None + # -2 to offset padding symbol and decoder start symbol + ranked_tgt_out_idx -= 2 + return ranked_per_seq_probs, ranked_tgt_out_idx + + +class Seq2RewardWithPreprocessor(DiscreteDqnWithPreprocessor): + def __init__( + self, + model: ModelBase, # acc_reward prediction model + state_preprocessor: Preprocessor, + seq_len: int, + num_action: int, + ): + """ + Since TorchScript unable to trace control-flow, we + have to generate the action enumerations as constants + here so that trace can use them directly. + """ + super().__init__(model, state_preprocessor, rlt.ModelFeatureConfig()) + self.seq_len = seq_len + self.num_action = num_action + self.all_permut = gen_permutations(seq_len, num_action) + + def forward(self, state: rlt.ServingFeatureData): + """ + This serving module only takes in current state. + We need to simulate all multi-step length action seq's + then predict accumulated reward on all those seq's. + After that, we categorize all action seq's by their + first actions. Then take the maximum reward as the + predicted categorical reward for that category. + Return: categorical reward for the first action + """ + state_with_presence, _, _ = state + batch_size, state_dim = state_with_presence[0].size() + state_first_step = self.state_preprocessor( + state_with_presence[0], state_with_presence[1] + ).reshape(batch_size, -1) + # shape: batch_size, num_action + max_acc_reward = get_Q( + # pyre-fixme[6]: Expected `Seq2RewardNetwork` for 1st param but got + # `ModelBase`. + self.model, + state_first_step, + self.all_permut, + ) + return max_acc_reward + + +class Seq2RewardPlanShortSeqWithPreprocessor(DiscreteDqnWithPreprocessor): + def __init__( + self, + model: ModelBase, # acc_reward prediction model + step_model: ModelBase, # step prediction model + state_preprocessor: Preprocessor, + seq_len: int, + num_action: int, + ): + """ + The difference with Seq2RewardWithPreprocessor: + This wrapper will plan for different look_ahead steps (between 1 and seq_len), + and merge results according to look_ahead step prediction probabilities. + """ + super().__init__(model, state_preprocessor, rlt.ModelFeatureConfig()) + self.step_model = step_model + self.seq_len = seq_len + self.num_action = num_action + # key: seq_len, value: all possible action sequences of length seq_len + self.all_permut = { + s + 1: gen_permutations(s + 1, num_action) for s in range(seq_len) + } + + def forward(self, state: rlt.ServingFeatureData): + state_with_presence, _, _ = state + batch_size, state_dim = state_with_presence[0].size() + + state_first_step = self.state_preprocessor( + state_with_presence[0], state_with_presence[1] + ).reshape(batch_size, -1) + + # shape: batch_size, seq_len + step_probability = F.softmax(self.step_model(state_first_step), dim=1) + # shape: batch_size, seq_len, num_action + max_acc_reward = torch.cat( + [ + get_Q( + # pyre-fixme[6]: Expected `Seq2RewardNetwork` for 1st param but + # got `ModelBase`. + self.model, + state_first_step, + self.all_permut[i + 1], + ).unsqueeze(1) + for i in range(self.seq_len) + ], + dim=1, + ) + # shape: batch_size, num_action + max_acc_reward_weighted = torch.sum( + max_acc_reward * step_probability.unsqueeze(2), dim=1 + ) + return max_acc_reward_weighted + + +class Seq2SlateRewardWithPreprocessor(ModelBase): + def __init__( + self, + model: Seq2SlateRewardNetBase, + state_preprocessor: Preprocessor, + candidate_preprocessor: Preprocessor, ): super().__init__() self.model = model self.state_preprocessor = state_preprocessor self.candidate_preprocessor = candidate_preprocessor - self.greedy = greedy def input_prototype(self): candidate_input_prototype = self.candidate_preprocessor.input_prototype() + return ( self.state_preprocessor.input_prototype(), ( @@ -411,86 +774,99 @@ def forward( # candidate_value.shape == candidate_presence.shape == # batch_size x max_src_seq_len x candidate_feat_num batch_size = state_with_presence[0].shape[0] + max_tgt_seq_len = self.model.max_tgt_seq_len + max_src_seq_len = self.model.max_src_seq_len + + # we use a fake slate_idx_with_presence to retrieve the first + # max_tgt_seq_len candidates from + # len(slate_idx_with presence) == batch_size + # component: 1d tensor with length max_tgt_seq_len + slate_idx_with_presence = [ + (torch.arange(max_tgt_seq_len), torch.ones(max_tgt_seq_len)) + ] * batch_size preprocessed_state = self.state_preprocessor( state_with_presence[0], state_with_presence[1] ) + preprocessed_candidates = self.candidate_preprocessor( candidate_with_presence[0].view( - batch_size * self.model.max_src_seq_len, - len(self.candidate_sorted_features), + batch_size * max_src_seq_len, len(self.candidate_sorted_features) ), candidate_with_presence[1].view( - batch_size * self.model.max_src_seq_len, - len(self.candidate_sorted_features), + batch_size * max_src_seq_len, len(self.candidate_sorted_features) ), - ).view(batch_size, self.model.max_src_seq_len, -1) + ).view(batch_size, max_src_seq_len, -1) + + src_src_mask = torch.ones(batch_size, max_src_seq_len, max_src_seq_len) + + tgt_out_idx = torch.cat( + [slate_idx[0] for slate_idx in slate_idx_with_presence] + ).view(batch_size, max_tgt_seq_len) + + tgt_out_seq = gather(preprocessed_candidates, tgt_out_idx) - # TODO: consider different numbers of candidates in the same batch_ - src_src_mask = torch.ones( - batch_size, self.model.max_src_seq_len, self.model.max_src_seq_len - ) ranking_input = rlt.PreprocessedRankingInput.from_tensors( state=preprocessed_state, src_seq=preprocessed_candidates, src_src_mask=src_src_mask, + tgt_out_seq=tgt_out_seq, + # +2 is needed to avoid two preserved symbols: + # PADDING_SYMBOL = 0 + # DECODER_START_SYMBOL = 1 + tgt_out_idx=tgt_out_idx + 2, ) - ranking_output = self.model( - ranking_input, - mode=Seq2SlateMode.RANK_MODE, - tgt_seq_len=self.model.max_tgt_seq_len, - greedy=self.greedy, - ) - return ranking_output.ranked_tgt_out_probs, ranking_output.ranked_tgt_out_idx + output = self.model(ranking_input) + return output.predicted_reward -class Seq2SlatePredictorWrapper(torch.jit.ScriptModule): - __constants__ = ["state_sorted_features_t", "candidate_sorted_features_t"] - def __init__(self, seq2slate_with_preprocessor: Seq2SlateWithPreprocessor) -> None: +class MDNRNNWithPreprocessor(ModelBase): + def __init__( + self, + model: ModelBase, + state_preprocessor: Preprocessor, + seq_len: int, + num_action: int, + state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + ): super().__init__() - - self.state_sorted_features_t = seq2slate_with_preprocessor.state_sorted_features - self.candidate_sorted_features_t = ( - seq2slate_with_preprocessor.candidate_sorted_features - ) - self.seq2slate_with_preprocessor = torch.jit.trace( - seq2slate_with_preprocessor, seq2slate_with_preprocessor.input_prototype() + self.model = model + self.state_preprocessor = state_preprocessor + self.state_feature_config = state_feature_config or rlt.ModelFeatureConfig() + self.sparse_preprocessor = make_sparse_preprocessor( + self.state_feature_config, device=torch.device("cpu") ) + self.seq_len = seq_len + self.num_action = num_action - @torch.jit.script_method - def state_sorted_features(self) -> List[int]: - """ - This interface is used by Seq2SlateTorchPredictor - """ - return self.state_sorted_features_t - - @torch.jit.script_method - def candidate_sorted_features(self) -> List[int]: - """ - This interface is used by Seq2SlateTorchPredictor - """ - return self.candidate_sorted_features_t - - @torch.jit.script_method def forward( self, state_with_presence: Tuple[torch.Tensor, torch.Tensor], - candidate_with_presence: Tuple[torch.Tensor, torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: - # ranked_tgt_out_probs shape: batch_size, tgt_seq_len, candidate_size - # ranked_tgt_out_idx shape: batch_size, tgt_seq_len - ranked_tgt_out_probs, ranked_tgt_out_idx = self.seq2slate_with_preprocessor( - state_with_presence, candidate_with_presence + action: torch.Tensor, + ): + + batch_size, state_dim = state_with_presence[0].size() + preprocessed_state = ( + self.state_preprocessor(state_with_presence[0], state_with_presence[1]) + .reshape(batch_size, self.seq_len, -1) + .transpose(0, 1) ) - # convert to slate-wise probabilities - # ranked_tgt_out_probs shape: batch_size - ranked_tgt_out_probs = torch.prod( - torch.gather( - ranked_tgt_out_probs, 2, ranked_tgt_out_idx.unsqueeze(-1) - ).squeeze(), - -1, + result = self.model(action, preprocessed_state) + + return result + + def input_prototype(self): + return ( + self.state_preprocessor.input_prototype(), + torch.randn(1, 1, self.num_action, device=self.state_preprocessor.device), ) - # -2 to offset padding symbol and decoder start symbol - ranked_tgt_out_idx -= 2 - return ranked_tgt_out_probs, ranked_tgt_out_idx + + +class CompressModelWithPreprocessor(DiscreteDqnWithPreprocessor): + def forward(self, state: rlt.ServingFeatureData): + state_feature_data = serving_to_feature_data( + state, self.state_preprocessor, self.sparse_preprocessor + ) + q_values = self.model(state_feature_data) + return q_values diff --git a/reagent/prediction/ranking/__init__.py b/reagent/prediction/ranking/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/prediction/ranking/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/prediction/ranking/predictor_wrapper.py b/reagent/prediction/ranking/predictor_wrapper.py new file mode 100644 index 000000000..123343f41 --- /dev/null +++ b/reagent/prediction/ranking/predictor_wrapper.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from enum import Enum +from typing import List, Optional, Tuple + +import torch +import torch.nn.functional as F + + +class Kernel(Enum): + # = dot_product(x, y) + Linear = "linear" + + # = exp(-||x-y||^2 / (2 * sigma^2)) + RBF = "rbf" + + +class DeterminantalPointProcessPredictorWrapper(torch.jit.ScriptModule): + """http://jgillenw.com/cikm2018.pdf Algorithm 1""" + + def __init__( + self, + alpha: float, + kernel: Kernel = Kernel.Linear, + sigma: float = 1.0, + rerank_topk: Optional[int] = None, + ) -> None: + super().__init__() + # control the strength of encouragement for diversity + self.alpha = alpha + + # distance function + self.kernel = kernel + + # sigma parameter used in the RBF kernel + self.sigma = sigma + + # hard code this value so jit.script can work + self.MIN_VALUE = -3.4e38 + + # if None, will rerank the full slate + self.rerank_topk = rerank_topk + if self.rerank_topk is not None: + assert self.rerank_topk > 0 + + def unchosen_dets(self, L, chosen: List[int]): + slate_size = L.shape[0] + dets = torch.full((slate_size,), self.MIN_VALUE, device=L.device) + for i in range(slate_size): + if i not in chosen: + dets[i] = torch.det(L[:, chosen + [i]][chosen + [i]]) + return dets + + def greedy_select(self, L): + slate_size = L.shape[0] + dets = torch.zeros(slate_size, slate_size, device=L.device) + chosen: List[int] = [] + unchosen = torch.ones(slate_size) + + if self.rerank_topk is not None: + rerank_topk = min(self.rerank_topk, slate_size) + else: + rerank_topk = slate_size + + for i in range(rerank_topk): + unchosen_dets = self.unchosen_dets(L, chosen) + dets[i, :] = unchosen_dets + chosen_idx = torch.argmax(unchosen_dets) + chosen.append(chosen_idx.item()) + unchosen[chosen_idx] = 0 + + final_order = torch.tensor(chosen) + if rerank_topk != slate_size: + final_order = torch.cat((final_order, torch.nonzero(unchosen).flatten())) + + return final_order, dets + + @torch.jit.script_method + def forward( + self, + quality_scores: torch.Tensor, + feature_vectors: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + quality_scores: (num_items, 1) + feature_vectors (num_items, num_feat) + + Return: + chosen indices: (num_items, ) + determinants computed at each selection: (num_items, num_items) + the kernel matrix: (num_items, num_items) + """ + + quality_scores = quality_scores.float() + feature_vectors = F.normalize(feature_vectors.float(), p=2.0, dim=1) + + num_items = quality_scores.shape[0] + if self.kernel == Kernel.Linear: + B = (self.alpha**0.5) * quality_scores * feature_vectors + L = torch.mm(B, B.t()) + L[torch.arange(num_items), torch.arange(num_items)] = ( + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` + # and `int`. + quality_scores.squeeze(1) + ** 2 + ) + elif self.kernel == Kernel.RBF: + L = ( + self.alpha + * torch.mm(quality_scores, quality_scores.t()) + * torch.exp( + # pyre-fixme[58]: `**` is not supported for operand types + # `Tensor` and `int`. + -(torch.cdist(feature_vectors, feature_vectors, p=2.0) ** 2) + / (2 * self.sigma**2) + ) + ) + else: + raise NotImplementedError() + + chosen, dets = self.greedy_select(L) + + return chosen, dets, L diff --git a/reagent/prediction/synthetic_reward/__init__.py b/reagent/prediction/synthetic_reward/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/prediction/synthetic_reward/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/prediction/synthetic_reward/synthetic_reward_predictor_wrapper.py b/reagent/prediction/synthetic_reward/synthetic_reward_predictor_wrapper.py new file mode 100644 index 000000000..7bed72210 --- /dev/null +++ b/reagent/prediction/synthetic_reward/synthetic_reward_predictor_wrapper.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Tuple + +import torch +import torch.nn as nn +from reagent.models.base import ModelBase +from reagent.preprocessing.preprocessor import Preprocessor + + +def split_features( + state_and_action_with_presence: Tuple[torch.Tensor, torch.Tensor], + state_feat_num: int, + action_feat_num: int, +): + state_value = state_and_action_with_presence[0].narrow(1, 0, state_feat_num) + state_presence = state_and_action_with_presence[1].narrow(1, 0, state_feat_num) + action_value = state_and_action_with_presence[0].narrow( + 1, state_feat_num, action_feat_num + ) + action_presence = state_and_action_with_presence[1].narrow( + 1, state_feat_num, action_feat_num + ) + return (state_value, state_presence), (action_value, action_presence) + + +class SyntheticRewardPredictorWrapper(nn.Module): + def __init__( + self, + seq_len: int, + state_preprocessor: Preprocessor, + action_preprocessor: Preprocessor, + net: ModelBase, + ) -> None: + super().__init__() + self.seq_len = seq_len + self.state_preprocessor = state_preprocessor + self.action_preprocessor = action_preprocessor + self.net = net + self.state_feat_num = len(state_preprocessor.sorted_features) + self.action_feat_num = len(action_preprocessor.sorted_features) + + def forward( + self, + state_and_action_with_presence: Tuple[torch.Tensor, torch.Tensor], + ) -> torch.Tensor: + assert self.seq_len == state_and_action_with_presence[0].shape[0] + state_with_presence, action_with_presence = split_features( + state_and_action_with_presence, + self.state_feat_num, + self.action_feat_num, + ) + # shape: seq_len, 1, state_feat_dim + preprocessed_state = self.state_preprocessor( + state_with_presence[0], state_with_presence[1] + ).unsqueeze(1) + # shape: seq_len, 1, action_feat_dim + preprocessed_action = self.action_preprocessor( + action_with_presence[0], action_with_presence[1] + ).unsqueeze(1) + # shape: (seq_len, ) + reward = self.net(preprocessed_state, preprocessed_action).flatten() + return reward diff --git a/reagent/preprocessing/batch_preprocessor.py b/reagent/preprocessing/batch_preprocessor.py index aa3cfcc12..cbf841c99 100644 --- a/reagent/preprocessing/batch_preprocessor.py +++ b/reagent/preprocessing/batch_preprocessor.py @@ -4,50 +4,14 @@ from typing import Dict import torch +import torch.nn as nn import torch.nn.functional as F -from reagent import types as rlt +from reagent.core import types as rlt from reagent.preprocessing.preprocessor import Preprocessor -class InputColumn(object): - STATE_FEATURES = "state_features" - STATE_SEQUENCE_FEATURES = "state_sequence_features" - STATE_ID_LIST_FEATURES = "state_id_list_features" - STATE_ID_SCORE_LIST_FEATURES = "state_id_score_list_features" - NEXT_STATE_FEATURES = "next_state_features" - NEXT_STATE_SEQUENCE_FEATURES = "next_state_sequence_features" - NEXT_STATE_ID_LIST_FEATURES = "next_state_id_list_features" - NEXT_STATE_ID_SCORE_LIST_FEATURES = "next_state_id_score_list_features" - ACTION = "action" - NEXT_ACTION = "next_action" - POSSIBLE_ACTIONS = "possible_actions" - POSSIBLE_ACTIONS_MASK = "possible_actions_mask" - POSSIBLE_NEXT_ACTIONS = "possible_next_actions" - POSSIBLE_NEXT_ACTIONS_MASK = "possible_next_actions_mask" - NOT_TERMINAL = "not_terminal" - STEP = "step" - TIME_DIFF = "time_diff" - TIME_SINCE_FIRST = "time_since_first" - MDP_ID = "mdp_id" - SEQUENCE_NUMBER = "sequence_number" - METRICS = "metrics" - REWARD = "reward" - ACTION_PROBABILITY = "action_probability" - SLATE_REWARD = "slate_reward" - POSITION_REWARD = "position_reward" - CANDIDATE_FEATURES = "candidate_features" - NEXT_CANDIDATE_FEATURES = "next_candidate_features" - REWARD_MASK = "reward_mask" - ITEM_MASK = "item_mask" - NEXT_ITEM_MASK = "next_item_mask" - ITEM_PROBABILITY = "item_probability" - NEXT_ITEM_PROBABILITY = "next_item_probability" - EXTRAS = "extras" - - -class BatchPreprocessor: - def __call__(self, batch: Dict[str, torch.Tensor]) -> rlt.TensorDataClass: - raise NotImplementedError() +class BatchPreprocessor(nn.Module): + pass def batch_to_device(batch: Dict[str, torch.Tensor], device: torch.device): @@ -59,13 +23,14 @@ def batch_to_device(batch: Dict[str, torch.Tensor], device: torch.device): class DiscreteDqnBatchPreprocessor(BatchPreprocessor): def __init__( - self, num_actions: int, state_preprocessor: Preprocessor, use_gpu: bool - ): + self, num_actions: int, state_preprocessor: Preprocessor, use_gpu: bool = False + ) -> None: + super().__init__() self.num_actions = num_actions self.state_preprocessor = state_preprocessor self.device = torch.device("cuda") if use_gpu else torch.device("cpu") - def __call__(self, batch: Dict[str, torch.Tensor]) -> rlt.DiscreteDqnInput: + def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.DiscreteDqnInput: batch = batch_to_device(batch, self.device) preprocessed_state = self.state_preprocessor( batch["state_features"], batch["state_features_presence"] @@ -105,12 +70,13 @@ def __init__( state_preprocessor: Preprocessor, action_preprocessor: Preprocessor, use_gpu: bool, - ): + ) -> None: + super().__init__() self.state_preprocessor = state_preprocessor self.action_preprocessor = action_preprocessor self.device = torch.device("cuda") if use_gpu else torch.device("cpu") - def __call__(self, batch: Dict[str, torch.Tensor]) -> rlt.ParametricDqnInput: + def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.ParametricDqnInput: batch = batch_to_device(batch, self.device) # first preprocess state and action preprocessed_state = self.state_preprocessor( @@ -151,13 +117,14 @@ def __init__( self, state_preprocessor: Preprocessor, action_preprocessor: Preprocessor, - use_gpu: bool, - ): + use_gpu: bool = False, + ) -> None: + super().__init__() self.state_preprocessor = state_preprocessor self.action_preprocessor = action_preprocessor self.device = torch.device("cuda") if use_gpu else torch.device("cpu") - def __call__(self, batch: Dict[str, torch.Tensor]) -> rlt.PolicyNetworkInput: + def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.PolicyNetworkInput: batch = batch_to_device(batch, self.device) preprocessed_state = self.state_preprocessor( batch["state_features"], batch["state_features_presence"] diff --git a/reagent/preprocessing/identify_types.py b/reagent/preprocessing/identify_types.py index f47934b26..3ea846d0e 100644 --- a/reagent/preprocessing/identify_types.py +++ b/reagent/preprocessing/identify_types.py @@ -11,7 +11,9 @@ ENUM = "ENUM" QUANTILE = "QUANTILE" CONTINUOUS_ACTION = "CONTINUOUS_ACTION" +DISCRETE_ACTION = "DISCRETE_ACTION" DO_NOT_PREPROCESS = "DO_NOT_PREPROCESS" +CLIP_LOG = "CLIP_LOG" FEATURE_TYPES = ( BINARY, PROBABILITY, @@ -20,13 +22,15 @@ ENUM, QUANTILE, CONTINUOUS_ACTION, + DISCRETE_ACTION, DO_NOT_PREPROCESS, + CLIP_LOG, ) ROW_DELIM = "\n" COLUMN_DELIM = ";" -DEFAULT_MAX_UNIQUE_ENUM = 100 +DEFAULT_MAX_UNIQUE_ENUM = 10 def _is_probability(feature_values): diff --git a/reagent/preprocessing/normalization.py b/reagent/preprocessing/normalization.py index d36009266..66b70c8e4 100644 --- a/reagent/preprocessing/normalization.py +++ b/reagent/preprocessing/normalization.py @@ -7,14 +7,14 @@ from typing import Dict, List, Optional, Tuple import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import six import torch -from reagent.parameters import NormalizationData, NormalizationParameters +from reagent.core.parameters import NormalizationParameters from reagent.preprocessing import identify_types from reagent.preprocessing.identify_types import DEFAULT_MAX_UNIQUE_ENUM, FEATURE_TYPES -from scipy import stats -from scipy.stats.mstats import mquantiles +from scipy import stats # @manual=third-party//scipy:scipy-py +from scipy.stats.mstats import mquantiles # @manual=third-party//scipy:scipy-py logger = logging.getLogger(__name__) @@ -27,7 +27,8 @@ MINIMUM_SAMPLES_TO_IDENTIFY = 20 DEFAULT_MAX_QUANTILE_SIZE = 20 DEFAULT_NUM_SAMPLES = 100000 -MAX_FEATURE_VALUE = 6.0 +# Achieved by probability feature transformation on clamped limits (1e-5, 1-1e-5) +MAX_FEATURE_VALUE = 11.513 MIN_FEATURE_VALUE = MAX_FEATURE_VALUE * -1 EPS = 1e-6 @@ -48,6 +49,9 @@ def identify_parameter( skip_quantiles=False, feature_type=None, ): + force_boxcox = feature_type == identify_types.BOXCOX + force_continuous = feature_type == identify_types.CONTINUOUS + force_quantile = feature_type == identify_types.QUANTILE if feature_type is None: feature_type = identify_types.identify_type(values, max_unique_enum_values) @@ -57,14 +61,9 @@ def identify_parameter( stddev = 1.0 possible_values = None quantiles = None - assert feature_type in [ - identify_types.CONTINUOUS, - identify_types.PROBABILITY, - identify_types.BINARY, - identify_types.ENUM, - identify_types.CONTINUOUS_ACTION, - identify_types.DO_NOT_PREPROCESS, - ], "unknown type {}".format(feature_type) + assert feature_type in identify_types.FEATURE_TYPES, "unknown type {}".format( + feature_type + ) assert ( len(values) >= MINIMUM_SAMPLES_TO_IDENTIFY ), "insufficient information to identify parameter" @@ -76,8 +75,9 @@ def identify_parameter( mean = float(np.mean(values)) values = values - mean stddev = max(float(np.std(values, ddof=1)), 1.0) - if feature_type == identify_types.CONTINUOUS: - if min_value == max_value: + + if feature_type == identify_types.CONTINUOUS or force_boxcox or force_quantile: + if min_value == max_value and not (force_boxcox or force_quantile): return no_op_feature() k2_original, p_original = stats.normaltest(values) @@ -92,9 +92,13 @@ def identify_parameter( k2_original, p_original, k2_boxcox, p_boxcox ) ) - if lambda_ < 0.9 or lambda_ > 1.1: + if (lambda_ < 0.9 or lambda_ > 1.1 or force_boxcox) and not ( + force_continuous or force_quantile + ): # Lambda is far enough from 1.0 to be worth doing boxcox - if k2_original > k2_boxcox * 10 and k2_boxcox <= quantile_k2_threshold: + if ( + k2_original > k2_boxcox * 10 and k2_boxcox <= quantile_k2_threshold + ) or force_boxcox: # The boxcox output is significantly more normally distributed # than the original data and is normal enough to apply # effectively. @@ -105,7 +109,7 @@ def identify_parameter( np.isfinite(stddev) and stddev < BOX_COX_MAX_STDDEV and not np.isclose(stddev, 0) - ): + ) or force_boxcox: values = candidate_values boxcox_lambda = float(lambda_) if boxcox_lambda is None or skip_box_cox: @@ -117,7 +121,8 @@ def identify_parameter( boxcox_lambda is None and k2_original > quantile_k2_threshold and (not skip_quantiles) - ): + and not force_continuous + ) or force_quantile: feature_type = identify_types.QUANTILE quantiles = ( np.unique( @@ -192,17 +197,16 @@ def get_feature_start_indices( sorted_features: List[int], normalization_parameters: Dict[int, NormalizationParameters], ): - """ Returns the starting index for each feature in the output feature vector """ + """Returns the starting index for each feature in the output feature vector""" start_indices = [] cur_idx = 0 for feature in sorted_features: np = normalization_parameters[feature] start_indices.append(cur_idx) if np.feature_type == identify_types.ENUM: - assert np.possible_values is not None - # pyre-fixme[6]: Expected `Sized` for 1st param but got - # `Optional[List[int]]`. - cur_idx += len(np.possible_values) + possible_values = np.possible_values + assert possible_values is not None + cur_idx += len(possible_values) else: cur_idx += 1 return start_indices @@ -261,6 +265,9 @@ def get_feature_norm_metadata(feature_name, feature_value_list, norm_params): feature_override = None if norm_params["feature_overrides"] is not None: feature_override = norm_params["feature_overrides"].get(feature_name, None) + feature_override = feature_override or norm_params.get( + "default_feature_override", None + ) feature_values = np.array(feature_value_list, dtype=np.float32) assert not (np.any(np.isinf(feature_values))), "Feature values contain infinity" diff --git a/reagent/preprocessing/postprocessor.py b/reagent/preprocessing/postprocessor.py index 88a168b48..d3476ba69 100644 --- a/reagent/preprocessing/postprocessor.py +++ b/reagent/preprocessing/postprocessor.py @@ -5,8 +5,12 @@ import torch import torch.nn as nn -from reagent.parameters import NormalizationParameters -from reagent.preprocessing.identify_types import CONTINUOUS_ACTION, DO_NOT_PREPROCESS +from reagent.core.parameters import NormalizationParameters +from reagent.preprocessing.identify_types import ( + CONTINUOUS_ACTION, + DISCRETE_ACTION, + DO_NOT_PREPROCESS, +) from reagent.preprocessing.normalization import EPS, get_num_output_features @@ -30,9 +34,10 @@ def __init__( ), "All dimensions of actions should have the same preprocessing" self.feature_type = list(feature_types)[0] assert self.feature_type in { + DISCRETE_ACTION, CONTINUOUS_ACTION, DO_NOT_PREPROCESS, - }, f"{self.feature_type} is not CONTINUOUS_ACTION & DO_NOT_PREPROCESS" + }, f"{self.feature_type} is not DISCRETE_ACTION, CONTINUOUS_ACTION or DO_NOT_PREPROCESS" self.device = torch.device("cuda" if use_gpu else "cpu") @@ -41,11 +46,12 @@ def __init__( self.min_serving_value = torch.tensor( [normalization_parameters[f].min_value for f in sorted_features], device=self.device, - ) + ).float() self.scaling_factor = torch.tensor( [ ( - # pyre-fixme[16]: Optional type has no attribute `__sub__`. + # pyre-fixme[58]: `-` is not supported for operand types + # `Optional[float]` and `Optional[float]`. normalization_parameters[f].max_value - normalization_parameters[f].min_value ) @@ -53,14 +59,17 @@ def __init__( for f in sorted_features ], device=self.device, - ) + ).float() + self.almost_one = torch.tensor(1.0 - EPS, device=self.device).float() def input_prototype(self) -> Tuple[torch.Tensor]: return (torch.randn(1, self.num_output_features),) def forward(self, input: torch.Tensor) -> torch.Tensor: if self.feature_type == CONTINUOUS_ACTION: + # Please don't re-order; ONNX messed up tensor type when torch.clamp is + # the first operand. return ( - torch.clamp(input, -1 + EPS, 1 - EPS) + 1 - EPS + self.almost_one + torch.clamp(input, -self.almost_one, self.almost_one) ) * self.scaling_factor + self.min_serving_value return input diff --git a/reagent/preprocessing/preprocessor.py b/reagent/preprocessing/preprocessor.py index b587c6655..174f53c12 100644 --- a/reagent/preprocessing/preprocessor.py +++ b/reagent/preprocessing/preprocessor.py @@ -2,17 +2,17 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Dict, List, Optional, Tuple, cast +from typing import cast, Dict, List, Optional, Tuple import torch -from reagent.parameters import NormalizationParameters -from reagent.preprocessing.identify_types import ENUM, FEATURE_TYPES +from reagent.core.parameters import NormalizationParameters +from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS, ENUM, FEATURE_TYPES from reagent.preprocessing.normalization import ( EPS, MAX_FEATURE_VALUE, MIN_FEATURE_VALUE, ) -from torch.nn import Module, Parameter +from torch.nn import Module, Parameter # @manual="//caffe2:torch" logger = logging.getLogger(__name__) @@ -113,9 +113,12 @@ def input_prototype(self) -> Tuple[torch.Tensor, torch.Tensor]: def forward( self, input: torch.Tensor, input_presence_byte: torch.Tensor ) -> torch.Tensor: - """ Preprocess the input matrix + """Preprocess the input matrix :param input tensor """ + assert ( + input.shape == input_presence_byte.shape + ), f"{input.shape} != {input_presence_byte.shape}" outputs = [] split_input = torch.split(input, self.split_sections, dim=1) # NB: converting to float prevent ASAN heap-buffer-overflow @@ -156,20 +159,13 @@ def forward( ) ptr += 1 self._check_preprocessing_output(new_output, norm_params_list) + if feature_type != DO_NOT_PREPROCESS: + new_output = torch.clamp( + new_output, MIN_FEATURE_VALUE, MAX_FEATURE_VALUE + ) outputs.append(new_output) - if len(outputs) == 1: - return cast( - torch.Tensor, - torch.clamp(outputs[0], MIN_FEATURE_VALUE, MAX_FEATURE_VALUE), - ) - - return cast( - torch.Tensor, - torch.clamp( - torch.cat(outputs, dim=1), MIN_FEATURE_VALUE, MAX_FEATURE_VALUE - ), - ) + return torch.cat(outputs, dim=1) def _preprocess_feature_single_column( self, @@ -218,6 +214,19 @@ def _preprocess_BINARY( # ONNX doesn't support != yet return self.one_tensor - (input == self.zero_tensor).float() + def _create_parameters_CLIP_LOG( + self, begin_index: int, norm_params: List[NormalizationParameters] + ): + pass + + def _preprocess_CLIP_LOG( + self, + begin_index: int, + input: torch.Tensor, + norm_params: List[NormalizationParameters], + ) -> torch.Tensor: + return input.clip(EPS).log() + def _create_parameters_PROBABILITY( self, begin_index: int, norm_params: List[NormalizationParameters] ): @@ -229,7 +238,7 @@ def _preprocess_PROBABILITY( input: torch.Tensor, norm_params: List[NormalizationParameters], ) -> torch.Tensor: - clamped_input = torch.clamp(input, 0.01, 0.99) + clamped_input = torch.clamp(input, 1e-5, 1 - 1e-5) return self.negative_one_tensor * ( ((self.one_tensor / clamped_input) - self.one_tensor).log() ) @@ -253,7 +262,8 @@ def _create_parameters_CONTINUOUS_ACTION( (torch.ones(len(norm_params), device=self.device) - EPS) * 2 / torch.tensor( - # pyre-fixme[16]: `Optional` has no attribute `__sub__`. + # pyre-fixme[58]: `-` is not supported for operand types + # `Optional[float]` and `Optional[float]`. [p.max_value - p.min_value for p in norm_params], device=self.device, ), @@ -273,6 +283,19 @@ def _preprocess_CONTINUOUS_ACTION( ) * scaling_factor + min_training_value return torch.clamp(continuous_action, -1 + EPS, 1 - EPS) + def _create_parameters_DISCRETE_ACTION( + self, begin_index: int, norm_params: List[NormalizationParameters] + ): + pass + + def _preprocess_DISCRETE_ACTION( + self, + begin_index: int, + input: torch.Tensor, + norm_params: List[NormalizationParameters], + ): + return input + def _create_parameters_CONTINUOUS( self, begin_index: int, norm_params: List[NormalizationParameters] ): @@ -486,7 +509,9 @@ def _create_parameters_ENUM( self._create_parameter( begin_index, "enum_values", - torch.tensor(norm_params.possible_values, device=self.device).unsqueeze(0), + torch.tensor( + norm_params.possible_values, device=self.device, dtype=torch.float + ).unsqueeze(0), ) def _preprocess_ENUM( @@ -558,7 +583,7 @@ def _check_preprocessing_output(self, batch, norm_params): feature_type = norm_params[0].feature_type min_value, max_value = batch.min(), batch.max() - if feature_type in ("BOXCOX", "CONTINUOUS"): + if feature_type in ("BOXCOX", "CONTINUOUS", "DO_NOT_PREPROCESS", "CLIP_LOG"): # Continuous features may be in range (-inf, inf) pass elif max_value.item() > MAX_FEATURE_VALUE: diff --git a/reagent/preprocessing/sparse_preprocessor.py b/reagent/preprocessing/sparse_preprocessor.py new file mode 100644 index 000000000..849fded1c --- /dev/null +++ b/reagent/preprocessing/sparse_preprocessor.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import logging +from typing import Dict, Tuple + +import reagent.core.types as rlt +import torch + + +logger = logging.getLogger(__name__) + + +class MapIDList(torch.nn.Module): + @abc.abstractmethod + def forward(self, raw_ids: torch.Tensor) -> torch.Tensor: + pass + + +class MapIDScoreList(torch.nn.Module): + @abc.abstractmethod + def forward( + self, raw_ids: torch.Tensor, raw_values: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + pass + + +class ExactMapIDList(MapIDList): + def __init__(self): + super().__init__() + + def forward(self, raw_ids: torch.Tensor) -> torch.Tensor: + return raw_ids + + +class ExactMapIDScoreList(MapIDScoreList): + def __init__(self): + super().__init__() + + def forward( + self, raw_ids: torch.Tensor, raw_values: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + return ( + raw_ids, + raw_values, + ) + + +class HashingMapIDList(MapIDList): + def __init__(self, embedding_table_size): + super().__init__() + self.embedding_table_size = embedding_table_size + + def forward(self, raw_ids: torch.Tensor) -> torch.Tensor: + hashed_ids = torch.ops.fb.sigrid_hash( + raw_ids, + salt=0, + maxValue=self.embedding_table_size, + hashIntoInt32=False, + ) + return hashed_ids + + +class HashingMapIDScoreList(MapIDScoreList): + def __init__(self, embedding_table_size): + super().__init__() + self.embedding_table_size = embedding_table_size + + def forward( + self, raw_ids: torch.Tensor, raw_values: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + hashed_ids = torch.ops.fb.sigrid_hash( + raw_ids, + salt=0, + maxValue=self.embedding_table_size, + hashIntoInt32=False, + ) + return ( + hashed_ids, + raw_values, + ) + + +def make_sparse_preprocessor( + feature_config: rlt.ModelFeatureConfig, device: torch.device +): + """Helper to initialize, for scripting SparsePreprocessor""" + # TODO: Add option for simple modulo and other hash functions + id2name: Dict[int, str] = feature_config.id2name + name2id: Dict[str, int] = feature_config.name2id + + def _make_id_list_mapper(config: rlt.IdListFeatureConfig) -> MapIDList: + mapping_config = feature_config.id_mapping_config[config.id_mapping_name] + if mapping_config.hashing: + return HashingMapIDList(mapping_config.embedding_table_size) + else: + return ExactMapIDList() + + id_list_mappers = { + config.feature_id: _make_id_list_mapper(config) + for config in feature_config.id_list_feature_configs + } + + def _make_id_score_list_mapper( + config: rlt.IdScoreListFeatureConfig, + ) -> MapIDScoreList: + mapping_config = feature_config.id_mapping_config[config.id_mapping_name] + if mapping_config.hashing: + return HashingMapIDScoreList(mapping_config.embedding_table_size) + else: + return ExactMapIDScoreList() + + id_score_list_mappers = { + config.feature_id: _make_id_score_list_mapper(config) + for config in feature_config.id_score_list_feature_configs + } + sparse_preprocessor = SparsePreprocessor( + id2name, name2id, id_list_mappers, id_score_list_mappers, device + ) + return torch.jit.script(sparse_preprocessor) + + +class SparsePreprocessor(torch.nn.Module): + """Performs preprocessing for sparse features (i.e. id_list, id_score_list) + + Functionality includes: + (1) changes keys from feature_id to feature_name, for better debuggability + (2) maps sparse ids to embedding table indices based on id_mapping + (3) filters out ids which aren't in the id2name + """ + + def __init__( + self, + id2name: Dict[int, str], + name2id: Dict[str, int], + id_list_mappers: Dict[int, MapIDList], + id_score_list_mappers: Dict[int, MapIDScoreList], + device: torch.device, + ) -> None: + super().__init__() + assert set(id2name.keys()) == set(id_list_mappers.keys()) | set( + id_score_list_mappers.keys() + ) + self.id2name: Dict[int, str] = torch.jit.Attribute(id2name, Dict[int, str]) + self.name2id: Dict[str, int] = torch.jit.Attribute(name2id, Dict[str, int]) + self.id_list_mappers = torch.nn.ModuleDict( + {id2name[k]: v for k, v in id_list_mappers.items()} + ) + self.id_score_list_mappers = torch.nn.ModuleDict( + {id2name[k]: v for k, v in id_score_list_mappers.items()} + ) + self.device = device + + @torch.jit.export + def preprocess_id_list( + self, id_list: Dict[int, Tuple[torch.Tensor, torch.Tensor]] + ) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]: + """ + Input: rlt.ServingIdListFeature + Output: rlt.IdListFeature + """ + ret: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {} + for name, mapper in self.id_list_mappers.items(): + fid = self.name2id[name] + if fid in id_list: + offsets, values = id_list[fid] + idx_values = mapper(values) + ret[name] = ( + offsets.to(self.device), + idx_values.to(self.device), + ) + return ret + + @torch.jit.export + def preprocess_id_score_list( + self, id_score_list: Dict[int, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] + ) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: + """ + Input: rlt.ServingIdScoreListFeature + Output: rlt.IdScoreListFeature + """ + ret: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = {} + for name, mapper in self.id_score_list_mappers.items(): + fid = self.name2id[name] + if fid in id_score_list: + offsets, keys, values = id_score_list[fid] + idx_keys, weights = mapper(keys, values) + ret[name] = ( + offsets.to(self.device), + idx_keys.to(self.device), + weights.to(self.device).float(), + ) + return ret diff --git a/reagent/preprocessing/sparse_to_dense.py b/reagent/preprocessing/sparse_to_dense.py index bebcb153f..7acd318b2 100644 --- a/reagent/preprocessing/sparse_to_dense.py +++ b/reagent/preprocessing/sparse_to_dense.py @@ -4,8 +4,6 @@ from typing import Dict, List, Tuple -# @manual=third-party//pandas:pandas-py -import pandas as pd import torch from reagent.preprocessing import normalization @@ -13,7 +11,7 @@ class SparseToDenseProcessor: def __init__( self, sorted_features: List[int], set_missing_value_to_zero: bool = False - ): + ) -> None: self.sorted_features = sorted_features self.set_missing_value_to_zero = set_missing_value_to_zero @@ -28,13 +26,15 @@ class StringKeySparseToDenseProcessor(SparseToDenseProcessor): def __init__( self, sorted_features: List[int], set_missing_value_to_zero: bool = False - ): + ) -> None: super().__init__(sorted_features, set_missing_value_to_zero) self._sparse_to_dense = PythonSparseToDenseProcessor( sorted_features, set_missing_value_to_zero ) - def process(self, sparse_data) -> Tuple[torch.Tensor, torch.Tensor]: + def process( + self, sparse_data: List[Dict[str, float]] + ) -> Tuple[torch.Tensor, torch.Tensor]: # Convert all keys to integers sparse_data_int = [] for sd in sparse_data: @@ -48,7 +48,7 @@ def process(self, sparse_data) -> Tuple[torch.Tensor, torch.Tensor]: class PythonSparseToDenseProcessor(SparseToDenseProcessor): def __init__( self, sorted_features: List[int], set_missing_value_to_zero: bool = False - ): + ) -> None: super().__init__(sorted_features, set_missing_value_to_zero) self.feature_to_index: Dict[int, int] = { f: i for i, f in enumerate(sorted_features) @@ -60,17 +60,18 @@ def process( missing_value = normalization.MISSING_VALUE if self.set_missing_value_to_zero: missing_value = 0.0 - state_features_df = pd.DataFrame(sparse_data).fillna(missing_value) - # Add columns identified by normalization, but not present in batch - for col in self.sorted_features: - # pyre-fixme[16]: Optional type has no attribute `columns`. - if col not in state_features_df.columns: - # pyre-fixme[16]: Optional type has no attribute `__setitem__`. - state_features_df[col] = missing_value - values = torch.from_numpy( - # pyre-fixme[16]: Optional type has no attribute `__getitem__`. - state_features_df[self.sorted_features].to_numpy() - ).float() + values = torch.nan_to_num( + torch.FloatTensor( + [ + [ + row[col] if col in row else missing_value + for col in self.sorted_features + ] + for row in sparse_data + ] + ), + nan=missing_value, + ) if self.set_missing_value_to_zero: # When we set missing values to 0, we don't know what is and isn't missing presence = torch.ones_like(values, dtype=torch.bool) diff --git a/reagent/preprocessing/transforms.py b/reagent/preprocessing/transforms.py index b21f08431..9e1f17987 100644 --- a/reagent/preprocessing/transforms.py +++ b/reagent/preprocessing/transforms.py @@ -2,18 +2,26 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Dict, List, Optional import numpy as np +import reagent.core.types as rlt import torch -from reagent.parameters import NormalizationData +import torch.nn.functional as F +from reagent.core.parameters import NormalizationData from reagent.preprocessing.preprocessor import Preprocessor +from reagent.preprocessing.sparse_preprocessor import make_sparse_preprocessor +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor logger = logging.getLogger(__name__) class Compose: + """ + Applies an iterable collection of transform functions + """ + def __init__(self, *transforms): self.transforms = transforms @@ -47,7 +55,7 @@ def __call__(self, data): class Lambda: - """ For simple transforms """ + """Applies an arbitrary callable transform""" def __init__(self, keys: List[str], fn: Callable): self.keys = keys @@ -59,6 +67,22 @@ def __call__(self, data): return data +class SelectValuePresenceColumns: + """ + Select columns from value-presence source key + """ + + def __init__(self, source: str, dest: str, indices: List[int]): + self.source = source + self.dest = dest + self.indices = indices + + def __call__(self, data): + value, presence = data[self.source] + data[self.dest] = (value[:, self.indices], presence[:, self.indices]) + return data + + class DenseNormalization: """ Normalize the `keys` using `normalization_data`. @@ -94,13 +118,283 @@ def __call__(self, data): for k in self.keys: value, presence = data[k] - data[k] = self._preprocessor( - value.to(self.device), presence.to(self.device) + value, presence = value.to(self.device), presence.to(self.device) + presence[torch.isnan(value)] = 0 + value[torch.isnan(value)] = 0 + data[k] = self._preprocessor(value, presence).float() + + return data + + +def _build_id_2_embedding_size( + keys: List[str], + feature_configs: List[List[rlt.BaseDataClass]], + id_mapping_configs: List[Dict[str, rlt.IdMappingConfig]], +): + """Sparse feature id -> embedding_table_size in corresponding id_mapping_config""" + id_2_embedding_size = {} + for key, feature_config, id_mapping_config in zip( + keys, feature_configs, id_mapping_configs + ): + id_2_embedding_size[key] = { + config.feature_id: id_mapping_config[ + config.id_mapping_name + ].embedding_table_size + for config in feature_config + } + return id_2_embedding_size + + +def _build_id_2_hashing( + keys: List[str], + feature_configs: List[List[rlt.BaseDataClass]], + id_mapping_configs: List[Dict[str, rlt.IdMappingConfig]], +): + """Sparse feature id -> hashing boolean in corresponding id_mapping_config""" + id_2_hashing = {} + for key, feature_config, id_mapping_config in zip( + keys, feature_configs, id_mapping_configs + ): + id_2_hashing[key] = { + config.feature_id: id_mapping_config[config.id_mapping_name].hashing + for config in feature_config + } + return id_2_hashing + + +def _build_id_2_name( + keys: List[str], + feature_configs: List[List[rlt.BaseDataClass]], +): + """Sparse feature id -> sparse feature name""" + id_2_name = {} + for key, feature_config in zip(keys, feature_configs): + id_2_name[key] = {config.feature_id: config.name for config in feature_config} + return id_2_name + + +class IDListFeatures: + """ + Process data read by SparseFeatureMetadata(sparse_feature_type=MULTI_CATEGORY) to KeyedJaggedTensor + + For source data format {key: (offsets, ids)}, see examples in fbcode/caffe2/caffe2/fb/proto/io_metadata.thrift: + https://fburl.com/code/ndbg93s0 + + For target data format, see examples in fbcode/torchrec/sparse/jagged_tensor.py: + https://fburl.com/code/iad11zzc + """ + + def __init__( + self, + keys: List[str], + feature_configs: List[List[rlt.IdListFeatureConfig]], + id_mapping_configs: List[Dict[str, rlt.IdMappingConfig]], + ): + """ + Args: + keys (List[str]): a list of columns to apply this transform + feature_configs: a list of feature configs, corresponding to each column in keys + id_mapping_configs: a list of id mapping configs, corresponding to each column in keys + """ + self.keys = keys + self.feature_configs = feature_configs + self.id_mapping_configs = id_mapping_configs + assert len(self.feature_configs) > 0, "No id list feature config provided" + self._id_2_embed_size = _build_id_2_embedding_size( + keys, + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but + # got `List[List[IdListFeatureConfig]]`. + feature_configs, + id_mapping_configs, + ) + self._id_2_hashing = _build_id_2_hashing( + keys, + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but + # got `List[List[IdListFeatureConfig]]`. + feature_configs, + id_mapping_configs, + ) + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but got + # `List[List[IdListFeatureConfig]]`. + self._id_2_name = _build_id_2_name(keys, feature_configs) + + def __call__(self, data): + for k in self.keys: + jagged_tensor_keys: List[str] = [] + values: List[torch.Tensor] = [] + lengths: List[torch.Tensor] = [] + + for feature_id in data[k].keys(): + feature_name = self._id_2_name[k][feature_id] + jagged_tensor_keys.append(feature_name) + offset, ids = data[k][feature_id] + offset = torch.cat([offset, torch.tensor([len(ids)])]) + lengths.append(offset[1:] - offset[:-1]) + hashing = self._id_2_hashing[k][feature_id] + if hashing: + embed_size = self._id_2_embed_size[k][feature_id] + hashed_ids = torch.ops.fb.sigrid_hash( + ids, + salt=0, + maxValue=embed_size, + hashIntoInt32=False, + ) + values.append(hashed_ids) + else: + values.append(ids) + + data[k] = KeyedJaggedTensor( + keys=jagged_tensor_keys, + values=torch.cat(values), + lengths=torch.cat(lengths), + ) + + return data + + +class IDScoreListFeatures: + """ + Process data read by SparseFeatureMetadata(sparse_feature_type=WEIGHTED_MULTI_CATEGORY) to KeyedJaggedTensor + + For source data format {key: (offsets, ids, weights)}, see examples in fbcode/caffe2/caffe2/fb/proto/io_metadata.thrift: + https://fburl.com/code/ndbg93s0 + + For target data format, see examples in fbcode/torchrec/sparse/jagged_tensor.py: + https://fburl.com/code/iad11zzc + """ + + def __init__( + self, + keys: List[str], + feature_configs: List[List[rlt.IdScoreListFeatureConfig]], + id_mapping_configs: List[Dict[str, rlt.IdMappingConfig]], + ): + """ + Args: + keys (List[str]): a list of columns to apply this transform + feature_configs: a list of feature configs, corresponding to each column in keys + id_mapping_configs: a list of id mapping configs, corresponding to each column in keys + """ + self.keys = keys + self.feature_configs = feature_configs + self.id_mapping_configs = id_mapping_configs + assert len(self.keys) == len( + self.feature_configs + ), "There should be as many keys as feature_configs" + self._id_2_embed_size = _build_id_2_embedding_size( + keys, + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but + # got `List[List[IdScoreListFeatureConfig]]`. + feature_configs, + id_mapping_configs, + ) + self._id_2_hashing = _build_id_2_hashing( + keys, + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but + # got `List[List[IdScoreListFeatureConfig]]`. + feature_configs, + id_mapping_configs, + ) + # pyre-fixme[6]: For 2nd param expected `List[List[BaseDataClass]]` but got + # `List[List[IdScoreListFeatureConfig]]`. + self._id_2_name = _build_id_2_name(keys, feature_configs) + + def __call__(self, data): + for k in self.keys: + jagged_tensor_keys: List[str] = [] + values: List[torch.Tensor] = [] + lengths: List[torch.Tensor] = [] + weights: List[torch.Tensor] = [] + + for feature_id in data[k].keys(): + feature_name = self._id_2_name[k][feature_id] + jagged_tensor_keys.append(feature_name) + offset, ids, weight = data[k][feature_id] + offset = torch.cat([offset, torch.tensor([len(ids)])]) + lengths.append(offset[1:] - offset[:-1]) + weights.append(weight) + hashing = self._id_2_hashing[k][feature_id] + if hashing: + embed_size = self._id_2_embed_size[k][feature_id] + hashed_ids = torch.ops.fb.sigrid_hash( + ids, + salt=0, + maxValue=embed_size, + hashIntoInt32=False, + ) + values.append(hashed_ids) + else: + values.append(ids) + + data[k] = KeyedJaggedTensor( + keys=jagged_tensor_keys, + values=torch.cat(values), + lengths=torch.cat(lengths), + weights=torch.cat(weights), ) return data +class MapIDListFeatures: + """ + Deprecated: Applies a SparsePreprocessor (see sparse_preprocessor.SparsePreprocessor) + + This class should be deprecated in favor of IDListFeatures and IDScoreListFeatures + """ + + def __init__( + self, + id_list_keys: List[str], + id_score_list_keys: List[str], + feature_config: rlt.ModelFeatureConfig, + device: torch.device, + ): + self.id_list_keys = id_list_keys + self.id_score_list_keys = id_score_list_keys + assert ( + set(id_list_keys).intersection(set(id_score_list_keys)) == set() + ), f"id_list_keys: {id_list_keys}; id_score_list_keys: {id_score_list_keys}" + self.feature_config = feature_config + self.sparse_preprocessor = make_sparse_preprocessor( + feature_config=feature_config, device=device + ) + + def __call__(self, data): + for k in self.id_list_keys + self.id_score_list_keys: + # if no ids, it means we're not using sparse features. + if not self.feature_config.id2name or k not in data: + data[k] = None + continue + + assert isinstance(data[k], dict), f"{k} has type {type(data[k])}. {data[k]}" + if k in self.id_list_keys: + data[k] = self.sparse_preprocessor.preprocess_id_list(data[k]) + else: + data[k] = self.sparse_preprocessor.preprocess_id_score_list(data[k]) + return data + + +class OneHotActions: + """ + Keys should be in the set {0,1,2,...,num_actions}, where + a value equal to num_actions denotes that it's not valid. + """ + + def __init__(self, keys: List[str], num_actions: int): + self.keys = keys + self.num_actions = num_actions + + def __call__(self, data): + for k in self.keys: + # we do + 1 and then index up to n because value could be num_actions, + # in which case the result is a zero-vector + data[k] = F.one_hot(data[k], self.num_actions + 1).index_select( + -1, torch.arange(self.num_actions) + ) + return data + + class ColumnVector: """ Ensure that the keys are column vectors @@ -114,11 +408,17 @@ def __call__(self, data): raw_value = data[k] if isinstance(raw_value, tuple): value, _presence = raw_value - - if isinstance(raw_value, list): + elif isinstance(raw_value, list): # TODO(T67265031): make mdp_id a tensor, which we will be able to # when column type changes to int value = np.array(raw_value) + elif isinstance(raw_value, torch.Tensor): + # TODO(T67265031): this is an identity mapping, which is only necessary + # when mdp_id in traced batch preprocessors becomes a tensor (mdp_id + # is a list of strings in normal batch preprocessors). + value = raw_value + else: + raise NotImplementedError(f"value of type {type(raw_value)}.") assert value.ndim == 1 or ( value.ndim == 2 and value.shape[1] == 1 @@ -128,9 +428,36 @@ def __call__(self, data): return data +class ExtractValue: + """ + Input is of format list(tuple(tensor, tensor)) - list(tuple(value, presence)). + Output is of format list(tensor) with only the value extracted for the batch. + + Note that this transform works on array data type only. + """ + + def __init__(self, keys: List[str]): + self.keys = keys + + def __call__(self, data): + extra_val_list = [] + for k in self.keys: + raw_list = data[k] + assert isinstance( + raw_list, list + ), f"Extra key - {k} must be of format list(tuple(tensor, tensor))" + assert len(raw_list) != 0, f"Extra key - {k} cannot be an empty list" + for raw_value in raw_list: + value, presence = raw_value + extra_val_list.append(value) + data[k] = extra_val_list + return data + + class MaskByPresence: """ Expect data to be (value, presence) and return value * presence. + This zeros out values that aren't present. """ def __init__(self, keys: List[str]): @@ -154,8 +481,9 @@ def __call__(self, data): class StackDenseFixedSizeArray: """ - Expect data to be List of (Value, Presence), and output a tensor of shape - (batch_size, feature_dim). + If data is a tensor, ensures it has the correct shape. If data is a list of + (value, presence) discards the presence tensors and concatenates the values + to output a tensor of shape (batch_size, feature_dim). """ def __init__(self, keys: List[str], size: int, dtype=torch.float): @@ -183,40 +511,63 @@ def __call__(self, data): class FixedLengthSequences: """ - Expects the key to be `Dict[Int, Tuple[Tensor, T]]`. - The sequence_id is the key of the dict. The first element of the tuple - is the offset for each example, which is expected to be in fixed interval. - If `to_key` is set, extract `T` to that key. Otherwise, put `T` back to `key` + Does two things: + 1. makes sure each sequence in the list of keys has the expected fixed length + 2. if to_keys is provided, copies the relevant sequence_id to the new key, + otherwise overwrites the old key + + Expects each data[key] to be `Dict[Int, Tuple[Tensor, T]]`. Where: + - key is the feature id + - sequence_id is the key of the dict data[key] + - The first element of the tuple is the offset for each example, which is expected to be in fixed interval. + - The second element is the data at each step in the sequence This is mainly for FB internal use, see fbcode/caffe2/caffe2/fb/proto/io_metadata.thrift for the data format extracted from SequenceFeatureMetadata + + NOTE: this is not product between two lists (keys and to_keys); + it's setting keys[sequence_id] to to_keys in a parallel way """ def __init__( self, - key: str, + keys: List[str], sequence_id: int, - expected_length: int, + expected_length: Optional[int] = None, *, - to_key: Optional[str] = None, + to_keys: Optional[List[str]] = None, ): - self.key = key + self.keys = keys self.sequence_id = sequence_id - self.to_key = to_key or key + self.to_keys = to_keys or keys + assert len(self.to_keys) == len(keys) self.expected_length = expected_length def __call__(self, data): - offsets, value = data[self.key][self.sequence_id] - - expected_offsets = torch.arange( - 0, offsets.shape[0] * self.expected_length, self.expected_length - ) - assert all( - expected_offsets == offsets - ), f"Unexpected offsets for {self.key} {self.sequence_id}: {offsets}" + for key, to_key in zip(self.keys, self.to_keys): + offsets, value = data[key][self.sequence_id] + # TODO assert regarding offsets length compared to value + expected_length = self.expected_length + if expected_length is None: + if len(offsets) > 1: + # If batch size is larger than 1, just use the offsets + expected_length = (offsets[1] - offsets[0]).item() + else: + # If batch size is 1 + expected_length = value[0].size(0) + self.expected_length = expected_length + + # some check that all arrays have the same length + last_len = (value[0].size(0) - offsets[-1]).view(1) + lengths = torch.cat((torch.diff(offsets), last_len)) + length = torch.unique(lengths) + if not (len(length) == 1 and length == torch.tensor(self.expected_length)): + raise ValueError( + f"Expected all batches for {key} to have {expected_length} items, but got sizes {lengths}" + ) - data[self.to_key] = value + data[to_key] = value return data @@ -239,3 +590,376 @@ def __call__(self, data): data[k] = value.view(-1, self.slate_size, dim) return data + + +class VarLengthSequences: + """ + Like FixedLengthSequences, but doesn't require the sequence-lengths to be the same. Instead, + the largest slate size from the batch is used. For batches with smaller + slate sizes, the values are padded with zeros. + Additionally a presence tensor is produced to indicate which elements are present + vs padded. + The item presense tensor is a float boolean tensor of shape `[B, max_slate_size]` + """ + + def __init__( + self, + keys: List[str], + sequence_id: int, + *, + to_keys: Optional[List[str]] = None, + to_keys_item_presence: Optional[List[str]] = None, + ): + self.keys = keys + self.sequence_id = sequence_id + self.to_keys = to_keys or keys + self.to_keys_item_presence = to_keys_item_presence or [ + k + "_item_presence" for k in self.to_keys + ] + assert len(self.to_keys) == len(keys) + + def __call__(self, data): + for key, to_key, to_key_item_presence in zip( + self.keys, self.to_keys, self.to_keys_item_presence + ): + # ignore the feature presence + offsets, (value, presence) = data[key][self.sequence_id] + + # compute the length of each observation + lengths = torch.diff( + torch.cat( + ( + offsets, + torch.tensor( + [value.shape[0]], dtype=offsets.dtype, device=offsets.device + ), + ) + ) + ) + + num_obs = len(lengths) + max_len = lengths.max().item() + self.max_len = max_len + feature_dim = value.shape[1] + + # create an empty 2d tensor to store the amended tensor + # the new shape should be the maximum length of the observations times the number of observations, and the number of features + new_shape = (num_obs * max_len, feature_dim) + padded_value = torch.zeros( + *new_shape, dtype=value.dtype, device=value.device + ) + padded_presence = torch.zeros( + *new_shape, dtype=presence.dtype, device=presence.device + ) + + # create a tensor of indices to scatter the values to + indices = torch.cat( + [ + torch.arange(lengths[i], device=value.device) + i * max_len + for i in range(num_obs) + ] + ) + + # insert the values into the padded tensor + padded_value[indices] = value + padded_presence[indices] = presence + + # get the item presence tensor + item_presence = torch.cat( + [ + (torch.arange(max_len, device=value.device) < lengths[i]).float() + for i in range(num_obs) + ] + ) + + item_presence = item_presence.view(-1, max_len) + + data[to_key] = (padded_value, padded_presence) + data[to_key_item_presence] = item_presence + + return data + + +class FixedLengthSequenceDenseNormalization: + """ + Combines the FixedLengthSequences, DenseNormalization, and SlateView transforms + """ + + def __init__( + self, + keys: List[str], + sequence_id: int, + normalization_data: NormalizationData, + expected_length: Optional[int] = None, + device: Optional[torch.device] = None, + to_keys: Optional[List[str]] = None, + ): + to_keys = to_keys or [f"{k}:{sequence_id}" for k in keys] + self.fixed_length_sequences = FixedLengthSequences( + keys, sequence_id, to_keys=to_keys, expected_length=expected_length + ) + self.dense_normalization = DenseNormalization( + to_keys, normalization_data, device=device + ) + # We will override this in __call__() + self.slate_view = SlateView(to_keys, slate_size=-1) + + def __call__(self, data): + data = self.fixed_length_sequences(data) + data = self.dense_normalization(data) + self.slate_view.slate_size = self.fixed_length_sequences.expected_length + return self.slate_view(data) + + +class VarLengthSequenceDenseNormalization: + """ + Combines the VarLengthSequences, DenseNormalization, and SlateView transforms. + For SlateView we infer the slate size at runtime and patch the transform. + """ + + def __init__( + self, + keys: List[str], + sequence_id: int, + normalization_data: NormalizationData, + to_keys_item_presence: Optional[List[str]] = None, + device: Optional[torch.device] = None, + to_keys: Optional[List[str]] = None, + ): + to_keys = to_keys or [f"{k}:{sequence_id}" for k in keys] + self.var_length_sequences = VarLengthSequences( + keys, + sequence_id, + to_keys=to_keys, + to_keys_item_presence=to_keys_item_presence, + ) + self.dense_normalization = DenseNormalization( + to_keys, normalization_data, device=device + ) + # We will override slate_size in __call__() + self.slate_view = SlateView(to_keys, slate_size=-1) + + def __call__(self, data): + data = self.var_length_sequences(data) + data = self.dense_normalization(data) + self.slate_view.slate_size = ( + self.var_length_sequences.max_len + ) # this assumes that max_len is the same for all all keys + return self.slate_view(data) + + +class AppendConstant: + """ + Append a column of constant value at the beginning of the specified dimension + Can be used to add a column of "1" to the Linear Regression input data to capture intercept/bias + """ + + def __init__(self, keys: List[str], dim: int = -1, const: float = 1.0): + self.keys = keys + self.dim = dim + self.const = const + + def __call__(self, data): + for k in self.keys: + value = data[k] + extra_col = self.const * torch.ones( + value.shape[:-1], device=value.device + ).unsqueeze(-1) + data[k] = torch.cat((extra_col, value), dim=self.dim) + return data + + +class UnsqueezeRepeat: + """ + This transform adds an extra dimension to the tensor and repeats + the tensor along that dimension + """ + + def __init__(self, keys: List[str], dim: int, num_repeat: int = 1): + self.keys = keys + self.dim = dim + self.num_repeat = num_repeat + + def __call__(self, data): + for k in self.keys: + data[k] = data[k].unsqueeze(self.dim) + if self.num_repeat != 1: + repeat_counters = [1 for _ in range(data[k].ndim)] + repeat_counters[self.dim] = self.num_repeat + data[k] = data[k].repeat(*repeat_counters) + return data + + +def _get_product_features(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + """ + Get outer product of 2 tensors along the last dimension. + All dimensions except last are preserved. The last dimension is replaced + with flattened outer products of last-dimension-vectors from input tensors + + This is a vectorized implementation of (for 2D case): + for i in range(x.shape[0]): + out[i, :] = torch.outer(x[i, :], y[i, :]).flatten() + + For 2D inputs: + Input shapes: + x: (batch, feature_dim_x) + y: (batch, feature_dim_y) + Output shape: + (batch, feature_dim_x*feature_dim_y) + """ + return torch.einsum("...i,...j->...ij", (x, y)).flatten(start_dim=-2) + + +class OuterProduct: + """ + This transform creates a tensor with an outer product of elements of 2 tensors. + The outer product is stored under the new key. + The 2 input tensors might be dropped, depending on input arguments + """ + + def __init__( + self, + key1: str, + key2: str, + output_key: str, + drop_inputs: bool = False, + ): + self.key1 = key1 + self.key2 = key2 + self.output_key = output_key + self.drop_inputs = drop_inputs + + def __call__(self, data): + x = data[self.key1] + y = data[self.key2] + prod = _get_product_features(x, y) + data[self.output_key] = prod + if self.drop_inputs: + del data[self.key1], data[self.key2] + return data + + +class GetEye: + """ + Place a diagonal tensor into the data dictionary + """ + + def __init__(self, key: str, size: int): + self.key = key + self.size = size + + def __call__(self, data): + x = torch.eye(self.size) + data[self.key] = x + return data + + +def _broadcast_tensors_for_cat( + tensors: List[torch.Tensor], dim: int +) -> List[torch.Tensor]: + """ + Broadcast all tensors so that they could be concatenated along the specific dim. + The tensor shapes have to be broadcastable (after the concatenation dim is taken out) + + Example: + Input tensors of shapes [(10,3,5), (1,3,3)] (dim=2) would get broadcasted to [(10,3,5), (10,3,3)], + so that they could be concatenated along the last dim. + """ + if dim >= 0: + dims = [dim] * len(tensors) + else: + dims = [t.ndim + dim for t in tensors] + shapes = [list(t.shape) for t in tensors] + for s, d in zip(shapes, dims): + s.pop(d) + shapes_except_cat_dim = [tuple(s) for s in shapes] + broadcast_shape = torch.broadcast_shapes(*shapes_except_cat_dim) + final_shapes = [list(broadcast_shape) for t in tensors] + for s, t, d in zip(final_shapes, tensors, dims): + s.insert(d, t.shape[dim]) + final_shapes = [tuple(s) for s in final_shapes] + return [t.expand(s) for t, s in zip(tensors, final_shapes)] + + +class Cat: + """ + This transform concatenates the tensors along a specified dim + """ + + def __init__( + self, input_keys: List[str], output_key: str, dim: int, broadcast: bool = True + ): + self.input_keys = input_keys + self.output_key = output_key + self.dim = dim + self.broadcast = broadcast + + def __call__(self, data): + tensors = [] + for k in self.input_keys: + tensors.append(data[k]) + if self.broadcast: + tensors = _broadcast_tensors_for_cat(tensors, self.dim) + data[self.output_key] = torch.cat(tensors, dim=self.dim) + return data + + +class Rename: + """ + Change key names + """ + + def __init__(self, old_names: List[str], new_names: List[str]): + self.old_names = old_names + self.new_names = new_names + + def __call__(self, data): + new_data = dict(data) + for o, n in zip(self.old_names, self.new_names): + new_data[n] = new_data.pop(o) + return new_data + + +class Filter: + """ + Remove some keys from the dict. + Can specify keep_keys (they will be kept) or remove_keys (they will be removed) + """ + + def __init__( + self, + *, + keep_keys: Optional[List[str]] = None, + remove_keys: Optional[List[str]] = None, + ): + assert (keep_keys is None) != (remove_keys is None) + self.keep_keys = keep_keys + self.remove_keys = remove_keys + + def __call__(self, data): + if self.keep_keys: + new_data = {} + for k in self.keep_keys: + if k in data: + new_data[k] = data[k] + else: + new_data = dict(data) + for k in self.remove_keys: + if k in new_data: + del new_data[k] + return new_data + + +class ToDtype: + """ + Convert tensors to a specific dtype + """ + + def __init__(self, dtypes: Dict[str, torch.dtype]): + self.dtypes = dtypes + + def __call__(self, data): + new_data = dict(data) + for key, dtype in self.dtypes.items(): + new_data[key] = data[key].to(dtype=dtype) + return new_data diff --git a/reagent/preprocessing/types.py b/reagent/preprocessing/types.py new file mode 100644 index 000000000..0fc395814 --- /dev/null +++ b/reagent/preprocessing/types.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + + +class InputColumn(object): + STATE_FEATURES = "state_features" + STATE_SEQUENCE_FEATURES = "state_sequence_features" + STATE_ID_LIST_FEATURES = "state_id_list_features" + STATE_ID_SCORE_LIST_FEATURES = "state_id_score_list_features" + NEXT_STATE_FEATURES = "next_state_features" + NEXT_STATE_SEQUENCE_FEATURES = "next_state_sequence_features" + NEXT_STATE_ID_LIST_FEATURES = "next_state_id_list_features" + NEXT_STATE_ID_SCORE_LIST_FEATURES = "next_state_id_score_list_features" + ACTION = "action" + NEXT_ACTION = "next_action" + ACTION_ID_LIST_FEATURES = "action_id_list_features" + ACTION_ID_SCORE_LIST_FEATURES = "action_id_score_list_features" + NEXT_ACTION_ID_LIST_FEATURES = "next_action_id_list_features" + NEXT_ACTION_ID_SCORE_LIST_FEATURES = "next_action_id_score_list_features" + POSSIBLE_ACTIONS = "possible_actions" + POSSIBLE_ACTIONS_MASK = "possible_actions_mask" + POSSIBLE_NEXT_ACTIONS = "possible_next_actions" + POSSIBLE_NEXT_ACTIONS_MASK = "possible_next_actions_mask" + NOT_TERMINAL = "not_terminal" + STEP = "step" + TIME_DIFF = "time_diff" + TIME_SINCE_FIRST = "time_since_first" + MDP_ID = "mdp_id" + SEQUENCE_NUMBER = "sequence_number" + METRICS = "metrics" + REWARD = "reward" + ACTION_PROBABILITY = "action_probability" + SLATE_REWARD = "slate_reward" + POSITION_REWARD = "position_reward" + CANDIDATE_FEATURES = "candidate_features" + NEXT_CANDIDATE_FEATURES = "next_candidate_features" + REWARD_MASK = "reward_mask" + ITEM_MASK = "item_mask" + NEXT_ITEM_MASK = "next_item_mask" + ITEM_PROBABILITY = "item_probability" + NEXT_ITEM_PROBABILITY = "next_item_probability" + EXTRAS = "extras" + SCORES = "scores" + VALID_STEP = "valid_step" + WEIGHT = "weight" + CONTEXT_FEATURES = "context_features" + ARM_FEATURES = "arm_features" + CONTEXT_ARM_FEATURES = "context_arm_features" + ARMS = "arms" + ARM_PRESENCE = "arm_presence" diff --git a/reagent/publishers/__init__.py b/reagent/publishers/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/publishers/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/workflow/publishers/file_system_publisher.py b/reagent/publishers/file_system_publisher.py similarity index 52% rename from reagent/workflow/publishers/file_system_publisher.py rename to reagent/publishers/file_system_publisher.py index 328a65624..f12948950 100644 --- a/reagent/workflow/publishers/file_system_publisher.py +++ b/reagent/publishers/file_system_publisher.py @@ -1,19 +1,22 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging import os -from typing import Optional +from typing import Dict, Optional from reagent.core.dataclasses import dataclass -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.publishers.model_publisher import ModelPublisher -from reagent.workflow.result_types import NoPublishingResults -from reagent.workflow.types import RecurringPeriod, RLTrainingOutput +from reagent.core.result_types import NoPublishingResults +from reagent.model_managers.model_manager import ModelManager +from reagent.publishers.model_publisher import ModelPublisher +from reagent.workflow.types import ( + ModuleNameToEntityId, + RecurringPeriod, + RLTrainingOutput, +) try: - # pyre-fixme[21]: Could not find `tinydb`. - # pyre-fixme[21]: Could not find `tinydb`. from tinydb import Query, TinyDB HAS_TINYDB = True @@ -34,7 +37,7 @@ class FileSystemPublisher: @dataclass class FileSystemPublisher(ModelPublisher): - """ Uses a file to serve as a key-value store. + """Uses a file to serve as a key-value store. The key is the str/repr representation of the ModelManager. The value is the path to the torchscipt model. @@ -48,9 +51,12 @@ def __post_init_post_parse__(self): self.db: TinyDB = TinyDB(self.publishing_file) logger.info(f"Using TinyDB at {self.publishing_file}.") - def get_latest_published_model(self, model_manager: ModelManager) -> str: + def get_latest_published_model( + self, model_manager: ModelManager, module_name: str + ) -> str: Model = Query() - key = str(model_manager) + # TODO: make this take in a + key = f"{module_name}_{str(model_manager)}" # pyre-fixme[16]: `FileSystemPublisher` has no attribute `db`. results = self.db.search(Model[KEY_FIELD] == key) if len(results) != 1: @@ -68,26 +74,26 @@ def do_publish( self, model_manager: ModelManager, training_output: RLTrainingOutput, - recurring_workflow_id: int, + setup_data: Optional[Dict[str, bytes]], + recurring_workflow_ids: ModuleNameToEntityId, child_workflow_id: int, recurring_period: Optional[RecurringPeriod], ) -> NoPublishingResults: - path = training_output.output_path - assert path is not None, f"Given path is None." - assert os.path.exists(path), f"Given path {path} doesn't exist." - Model = Query() - # find if there's already been something stored - key = str(model_manager) - # pyre-fixme[16]: `FileSystemPublisher` has no attribute `db`. - results = self.db.search(Model[KEY_FIELD] == key) - if len(results) == 0: - # this is a first - self.db.insert({KEY_FIELD: key, VALUE_FIELD: path}) - else: - # replace it - if len(results) > 1: - raise RuntimeError( - f"Got {len(results)} results for model_manager. {results}" - ) - self.db.update({VALUE_FIELD: path}, Model[KEY_FIELD] == key) + for module_name, path in training_output.output_paths.items(): + assert os.path.exists(path), f"Given path {path} doesn't exist." + Model = Query() + # find if there's already been something stored + key = f"{module_name}_{str(model_manager)}" + # pyre-fixme[16]: `FileSystemPublisher` has no attribute `db`. + results = self.db.search(Model[KEY_FIELD] == key) + if len(results) == 0: + # this is a first + self.db.insert({KEY_FIELD: key, VALUE_FIELD: path}) + else: + # replace it + if len(results) > 1: + raise RuntimeError( + f"Got {len(results)} results for model_manager. {results}" + ) + self.db.update({VALUE_FIELD: path}, Model[KEY_FIELD] == key) return NoPublishingResults(success=True) diff --git a/reagent/workflow/publishers/model_publisher.py b/reagent/publishers/model_publisher.py similarity index 74% rename from reagent/workflow/publishers/model_publisher.py rename to reagent/publishers/model_publisher.py index 5462155c1..5ffd6f791 100644 --- a/reagent/workflow/publishers/model_publisher.py +++ b/reagent/publishers/model_publisher.py @@ -1,13 +1,18 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc import inspect -from typing import Optional +from typing import Dict, Optional from reagent.core.registry_meta import RegistryMeta -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.result_registries import PublishingResult -from reagent.workflow.types import RecurringPeriod, RLTrainingOutput +from reagent.core.result_registries import PublishingResult +from reagent.model_managers.model_manager import ModelManager +from reagent.workflow.types import ( + ModuleNameToEntityId, + RecurringPeriod, + RLTrainingOutput, +) class ModelPublisher(metaclass=RegistryMeta): @@ -20,7 +25,9 @@ def publish( self, model_manager: ModelManager, training_output: RLTrainingOutput, - recurring_workflow_id: int, + setup_data: Optional[Dict[str, bytes]], + # Mapping from serving_module name -> recurring_workflow_id + recurring_workflow_ids: ModuleNameToEntityId, child_workflow_id: int, recurring_period: Optional[RecurringPeriod], ): @@ -33,7 +40,8 @@ def publish( result = self.do_publish( model_manager, training_output, - recurring_workflow_id, + setup_data, + recurring_workflow_ids, child_workflow_id, recurring_period, ) @@ -55,7 +63,8 @@ def do_publish( self, model_manager: ModelManager, training_output: RLTrainingOutput, - recurring_workflow_id: int, + setup_data: Optional[Dict[str, bytes]], + recurring_workflow_ids: ModuleNameToEntityId, child_workflow_id: int, recurring_period: Optional[RecurringPeriod], ) -> PublishingResult: diff --git a/reagent/workflow/publishers/no_publishing.py b/reagent/publishers/no_publishing.py similarity index 54% rename from reagent/workflow/publishers/no_publishing.py rename to reagent/publishers/no_publishing.py index 639474ad4..387c77569 100644 --- a/reagent/workflow/publishers/no_publishing.py +++ b/reagent/publishers/no_publishing.py @@ -1,12 +1,17 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import Optional +from typing import Dict, Optional from reagent.core.dataclasses import dataclass -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.publishers.model_publisher import ModelPublisher -from reagent.workflow.result_types import NoPublishingResults -from reagent.workflow.types import RecurringPeriod, RLTrainingOutput +from reagent.core.result_types import NoPublishingResults +from reagent.model_managers.model_manager import ModelManager +from reagent.publishers.model_publisher import ModelPublisher +from reagent.workflow.types import ( + ModuleNameToEntityId, + RecurringPeriod, + RLTrainingOutput, +) @dataclass @@ -21,7 +26,8 @@ def do_publish( self, model_manager: ModelManager, training_output: RLTrainingOutput, - recurring_workflow_id: int, + setup_data: Optional[Dict[str, bytes]], + recurring_workflow_ids: ModuleNameToEntityId, child_workflow_id: int, recurring_period: Optional[RecurringPeriod], ) -> NoPublishingResults: diff --git a/reagent/publishers/union.py b/reagent/publishers/union.py new file mode 100644 index 000000000..d36238c99 --- /dev/null +++ b/reagent/publishers/union.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.tagged_union import TaggedUnion + +from .file_system_publisher import FileSystemPublisher # noqa +from .model_publisher import ModelPublisher +from .no_publishing import NoPublishing # noqa + + +if IS_FB_ENVIRONMENT: + import fblearner.flow.projects.rl.publishing.clients # noqa + import fblearner.flow.projects.rl.publishing.common # noqa + + +@ModelPublisher.fill_union() +class ModelPublisher__Union(TaggedUnion): + pass diff --git a/reagent/replay_memory/circular_replay_buffer.py b/reagent/replay_memory/circular_replay_buffer.py index 227a73868..faf897988 100644 --- a/reagent/replay_memory/circular_replay_buffer.py +++ b/reagent/replay_memory/circular_replay_buffer.py @@ -26,12 +26,14 @@ off-policy corrections. """ +import abc import collections import gzip import logging import os import pickle -from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass +from typing import Dict, List, NamedTuple, Optional, Tuple import numpy as np import torch @@ -39,32 +41,260 @@ logger = logging.getLogger(__name__) -try: - import gym - from gym import spaces - HAS_GYM = True -except ImportError: - HAS_GYM = False - logger.warning( - f"ReplayBuffer.create_from_env() will not work because gym is not installed" - ) +@dataclass +class ElementMetadata: + @classmethod + @abc.abstractmethod + def create_from_example(cls, name: str, example): + """Constructor of the Metadata. + Given an input example, construct an ElementMetadata for this key `name`. + Good practice to call self.validate here after initializing metadata. + """ + raise NotImplementedError() + + @abc.abstractmethod + def zero_example(self): + """What would an empty `input` example look like?""" + raise NotImplementedError() + + @abc.abstractmethod + def validate(self, name: str, input): + """Does the input look correct?""" + raise NotImplementedError() + + @abc.abstractmethod + def create_storage(self, capacity: int): + """Initialize the replay buffer with given `capacity`, for this data type. + I.e. what is the "internal representation" of this data type in the replay buffer? + """ + raise NotImplementedError() + + @abc.abstractmethod + def input_to_storage(self, input): + """Convert `input` to the "internal representation" of the replay buffer.""" + raise NotImplementedError() + + @abc.abstractmethod + def sample_to_output(self, sample): + """Convert "internal representation" of replay buffer to `output`. + Concretely, when we call replay_buffer.sample(...), what do we want the output to look like? + """ + raise NotImplementedError() + + +@dataclass +class DenseMetadata(ElementMetadata): + """ + Internal representation is a torch tensor. + Batched output is tensor of shape (batch_size, obs_shape, stack_size) + """ + + shape: Tuple[int, ...] + dtype: np.dtype + + @classmethod + def create_from_example(cls, name: str, example): + arr = np.array(example) + dtype = arr.dtype + if dtype == np.dtype("float64"): + dtype = np.dtype("float32") + res = cls(arr.shape, dtype) + res.validate(name, example) + return res + + def zero_example(self): + return np.zeros(self.shape, dtype=self.dtype) + + def validate(self, name: str, input): + assert not isinstance( + input, (dict, torch.Tensor) + ), f"{name}: {type(input)} is dict or torch.Tensor" + arr = np.array(input) + dtype = arr.dtype + if dtype == np.dtype("float64"): + dtype = np.dtype("float32") + assert ( + arr.shape == self.shape and dtype == self.dtype + ), f"{name}: Expected {self.shape} {self.dtype}, got {arr.shape} {dtype}" + + def create_storage(self, capacity: int): + array_shape = [capacity, *self.shape] + # not all bit representations are valid for bool + if self.dtype == bool: + return torch.zeros(array_shape, dtype=torch.bool) + return torch.from_numpy(np.empty(array_shape, dtype=self.dtype)) + + def input_to_storage(self, input): + return torch.from_numpy(np.array(input, dtype=self.dtype)) + + def sample_to_output(self, sample): + # sample has shape (batch_size, stack_size, obs_shape) right now, so + # reshape to (batch_size, obs_shape, stack_size) + perm = [0] + list(range(2, len(self.shape) + 2)) + [1] + output = sample.permute(*perm) + # squeeze the stack dim if it is 1 + if output.shape[-1] == 1: + output = output.squeeze(-1) + return output + + +@dataclass +class IDListMetadata(ElementMetadata): + """ + Internal representation is a np.array of Dict[str, np.array of type int64] + Output is Dict[str, Tuple[np.array of type int32, np.array of type int64]], same as id_list in FeatureStore. + The tuple is (offset, ids). + TODO: implement for stack size > 1 + """ + + keys: List[str] + + @classmethod + def create_from_example(cls, name: str, example): + res = cls(list(example.keys())) + res.validate(name, example) + return res + + def zero_example(self): + return {k: [] for k in self.keys} + + def validate(self, name: str, input): + assert isinstance(input, dict), f"{name}: {type(input)} isn't dict" + for k, v in input.items(): + assert isinstance(k, str), f"{name}: {k} ({type(k)}) is not str" + assert k in self.keys, f"{name}: {k} not in {self.keys}" + arr = np.array(v) + if len(arr) > 0: + assert ( + arr.dtype == np.int64 + ), f"{name}: {v} arr has dtype {arr.dtype}, not np.int64" + + def create_storage(self, capacity: int): + array_shape = (capacity,) + return np.empty(array_shape, dtype=np.object) + + def input_to_storage(self, input): + return input + + def sample_to_output(self, sample): + sample = sample.squeeze(1) + result: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {} + for k in self.keys: + offsets = [] + ids = [] + for elem in sample: + # uninitialized case (when sampling next) + if elem is None: + cur_ids = [] + else: + cur_ids = elem[k] + offsets.append(len(ids)) + ids.extend(cur_ids) + result[k] = ( + torch.tensor(offsets, dtype=torch.int32), + torch.tensor(ids, dtype=torch.int64), + ) + return result -try: - from recsim.simulator.recsim_gym import RecSimGymEnv - HAS_RECSIM = True -except ImportError: - HAS_RECSIM = False - logger.warning(f"ReplayBuffer.create_from_env() will not recognize RecSim env") +@dataclass +class IDScoreListMetadata(ElementMetadata): + """ + Internal representation is a np.array of Dict[str, np.array of type int64] + Output is Dict[str, Tuple[np.array of type int32, np.array of type int64, np.array of type np.float32]], same as id_list in FeatureStore. + The tuple is (offset, ids, scores). + TODO: implement for stack size > 1 + """ + + keys: List[str] + + @classmethod + def create_from_example(cls, name: str, example): + res = cls(list(example.keys())) + res.validate(name, example) + return res + + def zero_example(self): + return {k: ([], []) for k in self.keys} + + def validate(self, name: str, input): + assert isinstance(input, dict), f"{name}: {type(input)} isn't dict" + for k, v in input.items(): + assert isinstance(k, str), f"{name}: {k} ({type(k)}) is not str" + assert k in self.keys, f"{name}: {k} not in {self.keys}" + assert ( + isinstance(v, tuple) and len(v) == 2 + ), f"{name}: {v} ({type(v)}) is not len 2 tuple" + ids = np.array(v[0]) + scores = np.array(v[1]) + assert len(ids) == len(scores), f"{name}: {len(ids)} != {len(scores)}" + if len(ids) > 0: + assert ( + ids.dtype == np.int64 + ), f"{name}: ids dtype {ids.dtype} isn't np.int64" + assert scores.dtype in ( + np.float32, + np.float64, + ), f"{name}: scores dtype {scores.dtype} isn't np.float32/64" + + def create_storage(self, capacity: int): + array_shape = (capacity,) + return np.empty(array_shape, dtype=np.object) + + def input_to_storage(self, input): + return input + + def sample_to_output(self, sample): + sample = sample.squeeze(1) + result: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {} + for k in self.keys: + offsets = [] + ids = [] + scores = [] + for elem in sample: + # uninitialized case (when sampling next) + if elem is None: + cur_ids, cur_scores = [], [] + else: + cur_ids, cur_scores = elem[k] + assert len(cur_ids) == len( + cur_scores + ), f"{len(cur_ids)} != {len(cur_scores)}" + offsets.append(len(ids)) + ids.extend(cur_ids) + scores.extend(cur_scores) + result[k] = ( + torch.tensor(offsets, dtype=torch.int32), + torch.tensor(ids, dtype=torch.int64), + torch.tensor(scores, dtype=torch.float32), + ) + return result -# Defines a type describing part of the tuple returned by the replay -# memory. Each element of the tuple is a tensor of shape [batch, ...] where -# ... is defined the 'shape' field of ReplayElement. The tensor type is -# given by the 'type' field. The 'name' field is for convenience and ease of -# debugging. -ReplayElement = collections.namedtuple("shape_type", ["name", "shape", "type"]) +class ReplayElement(NamedTuple): + # Describing contents of each field of replay memory. + name: str + metadata: ElementMetadata + + +def make_replay_element(name, example): + assert not isinstance(example, torch.Tensor), "Input shouldn't be tensor" + metadata = None + for metadata_cls in [DenseMetadata, IDListMetadata, IDScoreListMetadata]: + try: + metadata = metadata_cls.create_from_example(name, example) + break + except Exception as e: + logger.info( + f"Failed attempt to create {metadata_cls} from ({name}) {example}: {e}" + ) + + if metadata is None: + raise ValueError(f"Unable to deduce type for {name}: {example}") + + return ReplayElement(name, metadata) + # A prefix that can not collide with variable names for checkpoint files. STORE_FILENAME_PREFIX = "$store$_" @@ -72,6 +302,8 @@ # This constant determines how many iterations a checkpoint is kept for. CHECKPOINT_DURATION = 4 +REQUIRED_KEYS = ["observation", "action", "reward", "terminal"] + class ReplayBuffer(object): """A simple Replay Buffer. @@ -88,25 +320,16 @@ class ReplayBuffer(object): def __init__( self, - observation_shape: Tuple[int, ...], - stack_size: int, - replay_capacity: int, - batch_size: int, + stack_size: int = 1, + replay_capacity: int = 10000, + batch_size: int = 1, return_everything_as_stack: bool = False, return_as_timeline_format: bool = False, update_horizon: int = 1, gamma: float = 0.99, - max_sample_attempts: int = 1000, - extra_storage_types: Optional[List[ReplayElement]] = None, - observation_dtype=np.uint8, - action_shape: Tuple[int, ...] = (), - action_dtype=np.int32, - reward_shape: Tuple[int, ...] = (), - reward_dtype=np.float32, ) -> None: """Initializes ReplayBuffer. Args: - observation_shape: tuple of ints. stack_size: int, number of frames to use in state stack. replay_capacity: int, number of transitions to keep in memory. batch_size: int. @@ -116,23 +339,10 @@ def __init__( is returned list format, like the output of TimelineOperator update_horizon: int, length of update ('n' in n-step update). gamma: int, the discount factor. - max_sample_attempts: int, the maximum number of attempts allowed to - get a sample. - extra_storage_types: list of ReplayElements defining the type of the extra - contents that will be stored and returned by sample_transition_batch. - observation_dtype: np.dtype, type of the observations. Defaults to - np.uint8 for Atari 2600. - action_shape: tuple of ints, the shape for the action vector. Empty tuple - means the action is a scalar. - action_dtype: np.dtype, type of elements in the action. - reward_shape: tuple of ints, the shape of the reward vector. Empty tuple - means the reward is a scalar. - reward_dtype: np.dtype, type of elements in the reward. Raises: ValueError: If replay_capacity is too small to hold at least one transition. """ - assert isinstance(observation_shape, tuple) if replay_capacity < update_horizon + stack_size: raise ValueError( "There is not enough capacity to cover " @@ -147,40 +357,15 @@ def __init__( "But we'll support it anyways..." ) - logger.info( - "Creating a %s replay memory with the following parameters:", - self.__class__.__name__, - ) - logger.info("\t observation_shape: %s", str(observation_shape)) - logger.info("\t observation_dtype: %s", str(observation_dtype)) - logger.info("\t stack_size: %d", stack_size) - logger.info("\t replay_capacity: %d", replay_capacity) - logger.info("\t batch_size: %d", batch_size) - logger.info("\t update_horizon: %d", update_horizon) - logger.info("\t gamma: %f", gamma) - - self._action_shape = action_shape - self._action_dtype = action_dtype - self._reward_shape = reward_shape - self._reward_dtype = reward_dtype - self._observation_shape = observation_shape + self._initialized_buffer = False self._stack_size = stack_size self._return_everything_as_stack = return_everything_as_stack self._return_as_timeline_format = return_as_timeline_format - self._state_shape = self._observation_shape + (self._stack_size,) self._replay_capacity = replay_capacity self._batch_size = batch_size self._update_horizon = update_horizon self._gamma = gamma - self._observation_dtype = observation_dtype - # FIXME: np.bool causes UBSAN error - self._terminal_dtype = np.uint8 - self._max_sample_attempts = max_sample_attempts - if extra_storage_types: - self._extra_storage_types = extra_storage_types - else: - self._extra_storage_types = [] - self._create_storage() + self.add_count = np.array(0) # When the horizon is > 1, we compute the sum of discounted rewards as a dot # product using the precomputed vector . @@ -191,161 +376,56 @@ def __init__( self._is_index_valid = torch.zeros(self._replay_capacity, dtype=torch.bool) self._num_valid_indices = 0 self._num_transitions_in_current_episode = 0 + + # to be initialized on first add (put here to please pyre) + self._store: Dict[str, torch.Tensor] = {} + self._storage_types: List[ReplayElement] = [] + self._batch_type = collections.namedtuple("filler", []) + # have these for ease + self._extra_keys: List[str] = [] + self._key_to_replay_elem: Dict[str, ReplayElement] = {} + self._zero_transition = {} + self._transition_elements = {} + + def initialize_buffer(self, **kwargs): + """Initialize replay buffer based on first input""" + kwarg_keys = set(kwargs.keys()) + assert set(REQUIRED_KEYS).issubset( + kwarg_keys + ), f"{kwarg_keys} doesn't contain all of {REQUIRED_KEYS}" + + # arbitrary order for extra keys + self._extra_keys = list(kwarg_keys - set(REQUIRED_KEYS)) + + self._storage_types: List[ReplayElement] = [ + make_replay_element(k, kwargs[k]) for k in REQUIRED_KEYS + self._extra_keys + ] + self._key_to_replay_elem = { + elem.name: elem for elem in self.get_storage_signature() + } + self._create_storage() + self._transition_elements = self.get_transition_elements() self._batch_type = collections.namedtuple( - "batch_type", [e.name for e in self.get_transition_elements()] + "batch_type", self._transition_elements ) - self._key_to_shape_map = {k.name: k.shape for k in self.get_storage_signature()} + self._zero_transition = { + elem.name: elem.metadata.zero_example() for elem in self._storage_types + } + self._initialized_buffer = True + + logger.info(f"Initializing {self.__class__.__name__}...") + logger.info(f"\t stack_size: {self._stack_size}") + logger.info(f"\t replay_capacity: {self._replay_capacity}") + logger.info(f"\t update_horizon: {self._update_horizon}") + logger.info(f"\t gamma: {self._gamma}") + logger.info("\t storage_types: ") + for elem in self._storage_types: + logger.info(f"\t\t {elem}") @property def size(self) -> int: return self._num_valid_indices - @classmethod - def create_from_env( - cls, - env: "gym.Env", - *, - replay_memory_size: int, - batch_size: int, - stack_size: int = 1, - store_log_prob: bool = True, - **kwargs, - ): - extra_storage_types: List[ReplayElement] = [] - obs_space = env.observation_space - - if HAS_RECSIM and isinstance(env.unwrapped, RecSimGymEnv): - assert isinstance(obs_space, spaces.Dict) - user_obs_space = obs_space["user"] - if not isinstance(user_obs_space, spaces.Box): - raise NotImplementedError( - f"User observation space {type(user_obs_space)} is not supported" - ) - # Put user into observation part of replay buffer - observation_shape = user_obs_space.shape - observation_dtype = user_obs_space.dtype - - # Create an element for doc & response - extra_storage_types.extend(cls._get_replay_elements_for_recsim(obs_space)) - elif isinstance(obs_space, spaces.Box): - observation_shape = obs_space.shape - observation_dtype = obs_space.dtype - else: - raise NotImplementedError( - f"Observation type {type(env.observation_space)} is not supported" - ) - - action_space = env.action_space - if isinstance( - action_space, (spaces.Box, spaces.MultiDiscrete, spaces.Discrete) - ): - action_dtype = action_space.dtype - action_shape = action_space.shape - else: - raise NotImplementedError( - f"env.action_space {type(env.action_space)} not supported." - ) - - extra_storage_types.append(ReplayElement("mdp_id", (), np.int64)) - extra_storage_types.append(ReplayElement("sequence_number", (), np.int64)) - if store_log_prob: - extra_storage_types.append(ReplayElement("log_prob", (), np.float32)) - - return cls( - stack_size=stack_size, - replay_capacity=replay_memory_size, - batch_size=batch_size, - observation_shape=observation_shape, - observation_dtype=observation_dtype, - action_shape=action_shape, - action_dtype=action_dtype, - reward_shape=(), - reward_dtype=np.float32, - extra_storage_types=extra_storage_types, - **kwargs, - ) - - @staticmethod - def _get_replay_elements_for_recsim(obs_space) -> List[ReplayElement]: - """ - obs_space["doc"] is a dict with as many keys as number of candidates. - All the values should be identical. They should be dict with keys - corresponding to document features. - - obs_space["response"] is a tuple. Its length is the slate size presented - to the user. Each element should be identical. They should be dict with - keys corresponding to the type of response. - """ - logger.info(obs_space) - doc_obs_space = obs_space["doc"] - if not isinstance(doc_obs_space, spaces.Dict): - raise NotImplementedError( - f"Doc space {type(doc_obs_space)} is not supported" - ) - - num_docs = len(doc_obs_space.spaces) - - # Assume that all docs are in the same space - - replay_elements: List[ReplayElement] = [] - - doc_0_space = doc_obs_space["0"] - if isinstance(doc_0_space, spaces.Dict): - for k, v in doc_0_space.spaces.items(): - if isinstance(v, spaces.Discrete): - shape = (num_docs,) - elif isinstance(v, spaces.Box): - shape = (num_docs, *v.shape) - else: - raise NotImplementedError( - f"Doc feature {k} with the observation space of {type(v)}" - " is not supported" - ) - replay_elements.append(ReplayElement(f"doc_{k}", shape, v.dtype)) - elif isinstance(doc_0_space, spaces.Box): - shape = (num_docs, *doc_0_space.shape) - replay_elements.append(ReplayElement("doc", shape, doc_0_space.dtype)) - else: - raise NotImplementedError(f"Unknown space: {doc_0_space}") - - augmentation = obs_space.spaces.get("augmentation", None) - if augmentation is not None: - aug_0_space = list(augmentation.spaces.values())[0] - for k, v in aug_0_space.spaces.items(): - if isinstance(v, spaces.Discrete): - shape = (num_docs,) - elif isinstance(v, spaces.Box): - shape = (num_docs, *v.shape) - else: - raise NotImplementedError( - f"Augmentation feature {k} with the observation space " - f"of {type(v)} is not supported" - ) - replay_elements.append( - ReplayElement(f"augmentation_{k}", shape, v.dtype) - ) - - response_space = obs_space["response"] - assert isinstance(response_space, spaces.Tuple) - - slate_size = len(response_space) - - response_space_0 = response_space[0] - assert isinstance(response_space_0, spaces.Dict) - for k, v in response_space_0.spaces.items(): - if isinstance(v, spaces.Discrete): - shape = (slate_size,) - elif isinstance(v, spaces.Box): - shape = (slate_size, *v.shape) - else: - raise NotImplementedError( - f"Response {k} with the observation space of {type(v)} " - "is not supported" - ) - replay_elements.append(ReplayElement(f"response_{k}", shape, v.dtype)) - - return replay_elements - def set_index_valid_status(self, idx: int, is_valid: bool): old_valid = self._is_index_valid[idx] if not old_valid and is_valid: @@ -357,13 +437,10 @@ def set_index_valid_status(self, idx: int, is_valid: bool): self._is_index_valid[idx] = is_valid def _create_storage(self) -> None: - """Creates the numpy arrays used to store transitions. - """ - self._store: Dict[str, torch.Tensor] = {} + """Creates the numpy arrays used to store transitions.""" for storage_element in self.get_storage_signature(): - array_shape = [self._replay_capacity] + list(storage_element.shape) - self._store[storage_element.name] = torch.from_numpy( - np.empty(array_shape, dtype=storage_element.type) + self._store[storage_element.name] = storage_element.metadata.create_storage( + self._replay_capacity ) def get_add_args_signature(self) -> List[ReplayElement]: @@ -381,46 +458,27 @@ def get_storage_signature(self) -> List[ReplayElement]: Returns: list of ReplayElements defining the type of the contents stored. """ - storage_elements = [ - ReplayElement( - "observation", self._observation_shape, self._observation_dtype - ), - ReplayElement("action", self._action_shape, self._action_dtype), - ReplayElement("reward", self._reward_shape, self._reward_dtype), - ReplayElement("terminal", (), self._terminal_dtype), - ] - - for extra_replay_element in self._extra_storage_types: - storage_elements.append(extra_replay_element) - return storage_elements + return self._storage_types def _add_zero_transition(self) -> None: - """Adds a padding transition filled with zeros (Used in episode beginnings). - """ - zero_transition = [] - for element_type in self.get_add_args_signature(): - zero_transition.append( - np.zeros(element_type.shape, dtype=element_type.type) - ) - self._add(*zero_transition) + """Adds a padding transition filled with zeros (Used in episode beginnings).""" + self._add(**self._zero_transition) - def add(self, observation, action, reward, terminal, *args, **kwargs): + def add(self, **kwargs): """Adds a transition to the replay memory. This function checks the types and handles the padding at the beginning of an episode. Then it calls the _add function. Since the next_observation in the transition will be the observation added next there is no need to pass it. If the replay memory is at capacity the oldest transition will be discarded. - Args: - observation: np.array with shape observation_shape. - action: int, the action in the transition. - reward: float, the reward received in the transition. - terminal: np.dtype, acts as a boolean indicating whether the transition - was terminal (1) or not (0). - *args: extra contents with shapes and dtypes according to - extra_storage_types. + + Only accept kwargs, which must contain observation, action, reward, terminal + as keys. """ - self._check_add_types(observation, action, reward, terminal, *args, **kwargs) + if not self._initialized_buffer: + self.initialize_buffer(**kwargs) + + self._check_add_types(**kwargs) last_idx = (self.cursor() - 1) % self._replay_capacity if self.is_empty() or self._store["terminal"][last_idx]: self._num_transitions_in_current_episode = 0 @@ -435,7 +493,7 @@ def add(self, observation, action, reward, terminal, *args, **kwargs): if self._num_transitions_in_current_episode >= self._update_horizon: idx = (cur_idx - self._update_horizon) % self._replay_capacity self.set_index_valid_status(idx=idx, is_valid=True) - self._add(observation, action, reward, terminal, *args, **kwargs) + self._add(**kwargs) self._num_transitions_in_current_episode += 1 # mark the next stack_size-1 as invalid (note cursor has advanced by 1) @@ -443,7 +501,7 @@ def add(self, observation, action, reward, terminal, *args, **kwargs): idx = (self.cursor() + i) % self._replay_capacity self.set_index_valid_status(idx=idx, is_valid=False) - if terminal: + if kwargs["terminal"]: # Since the frame (cur_idx) we just inserted was terminal, we now mark # the last "num_back" transitions as valid for sampling (including cur_idx). # This is because next_state is not relevant for those terminal (multi-step) @@ -462,26 +520,17 @@ def add(self, observation, action, reward, terminal, *args, **kwargs): idx = (cur_idx - i) % self._replay_capacity self.set_index_valid_status(idx=idx, is_valid=True) - def _add(self, *args, **kwargs): + def _add(self, **kwargs): """Internal add method to add to the storage arrays. Args: *args: All the elements in a transition. """ - self._check_args_length(*args, **kwargs) + self._check_args_length(**kwargs) elements = self.get_add_args_signature() - # convert kwarg np.arrays to torch.tensors - for element in elements[len(args) :]: - if element.name in kwargs: - kwargs[element.name] = torch.from_numpy( - np.array(kwargs[element.name], dtype=element.type) - ) - # convert arg np.arrays to torch.tensors - kwargs.update( - { - e.name: torch.from_numpy(np.array(arg, dtype=e.type)) - for arg, e in zip(args, elements[: len(args)]) - } - ) + for element in elements: + kwargs[element.name] = element.metadata.input_to_storage( + kwargs[element.name] + ) self._add_transition(kwargs) def _add_transition(self, transition: Dict[str, torch.Tensor]) -> None: @@ -496,52 +545,30 @@ def _add_transition(self, transition: Dict[str, torch.Tensor]) -> None: self.add_count += 1 - def _check_args_length(self, *args, **kwargs): + def _check_args_length(self, **kwargs): """Check if args passed to the add method have the same length as storage. Args: *args: Args for elements used in storage. Raises: ValueError: If args have wrong length. """ - if len(args) + len(kwargs) != len(self.get_add_args_signature()): + if len(kwargs) != len(self.get_add_args_signature()): raise ValueError( - f"Add expects: {self.get_add_args_signature()}; " - f" received {args} {kwargs}" + f"Add expects: {self.get_add_args_signature()}; received {kwargs}" ) - def _check_add_types(self, *args, **kwargs): + def _check_add_types(self, **kwargs): """Checks if args passed to the add method match those of the storage. Args: *args: Args whose types need to be validated. Raises: ValueError: If args have wrong shape or dtype. """ - self._check_args_length(*args, **kwargs) - add_arg_signature = self.get_add_args_signature() - - def _check(arg_element, store_element): - if isinstance(arg_element, np.ndarray): - arg_shape = arg_element.shape - elif isinstance(arg_element, tuple) or isinstance(arg_element, list): - # TODO(b/80536437). This is not efficient when arg_element is a list. - arg_shape = np.array(arg_element).shape - else: - # Assume it is scalar. - arg_shape = () - store_element_shape = tuple(store_element.shape) - if arg_shape != store_element_shape: - raise ValueError( - "arg {} has shape {}, expected {}".format( - store_element.name, arg_shape, store_element_shape - ) - ) + self._check_args_length(**kwargs) - for arg_element, store_element in zip(args, add_arg_signature): - _check(arg_element, store_element) - - for store_element in add_arg_signature[len(args) :]: + for store_element in self.get_add_args_signature(): arg_element = kwargs[store_element.name] - _check(arg_element, store_element) + store_element.metadata.validate(store_element.name, arg_element) def is_empty(self) -> bool: """Is the Replay Buffer empty?""" @@ -586,7 +613,7 @@ def sample_all_valid_transitions(self): def sample_transition_batch(self, batch_size=None, indices=None): """Returns a batch of transitions (including any extra contents). If get_transition_elements has been overridden and defines elements not - stored in self._store, an empty array will be returned and it will be + stored in self._store, None will be returned and it will be left to the child class to fill it. For example, for the child class PrioritizedReplayBuffer, the contents of the sampling_probabilities are stored separately in a sum tree. @@ -619,8 +646,6 @@ def sample_transition_batch(self, batch_size=None, indices=None): indices = indices.type(dtype=torch.int64) assert len(indices) == batch_size - transition_elements = self.get_transition_elements(batch_size) - # calculate 2d array of indices with size (batch_size, update_horizon) # ith row contain the multistep indices starting at indices[i] multistep_indices = indices.unsqueeze(1) + torch.arange(self._update_horizon) @@ -638,31 +663,31 @@ def sample_transition_batch(self, batch_size=None, indices=None): steps_for_timeline_format = None batch_arrays = [] - for element in transition_elements: - if element.name == "state": + for element_name in self._transition_elements: + if element_name == "state": batch = self._get_batch_for_indices("observation", indices) - elif element.name == "next_state": + elif element_name == "next_state": batch = self._get_batch_for_indices( "observation", next_indices, steps_for_timeline_format ) - elif element.name == "indices": + elif element_name == "indices": batch = indices - elif element.name == "terminal": + elif element_name == "terminal": terminal_indices = (indices + steps - 1) % self._replay_capacity batch = self._store["terminal"][terminal_indices].to(torch.bool) - elif element.name == "reward": + elif element_name == "reward": if self._return_as_timeline_format or self._return_everything_as_stack: batch = self._get_batch_for_indices( "reward", indices, steps_for_timeline_format ) else: batch = self._reduce_multi_step_reward(multistep_indices, steps) - elif element.name == "step": + elif element_name == "step": batch = steps - elif element.name in self._store: - batch = self._get_batch_for_indices(element.name, indices) - elif element.name.startswith("next_"): - store_name = element.name[len("next_") :] + elif element_name in self._store: + batch = self._get_batch_for_indices(element_name, indices) + elif element_name.startswith("next_"): + store_name = element_name[len("next_") :] assert ( store_name in self._store ), f"{store_name} is not in {self._store.keys()}" @@ -671,35 +696,33 @@ def sample_transition_batch(self, batch_size=None, indices=None): ) else: # We assume the other elements are filled in by the subclass. - batch = torch.from_numpy(np.empty(element.shape, dtype=element.type)) + batch = None # always enables the batch_size dim if isinstance(batch, torch.Tensor) and batch.ndim == 1: batch = batch.unsqueeze(1) batch_arrays.append(batch) - - batch_arrays = self._batch_type(*batch_arrays) - return batch_arrays + return self._batch_type(*batch_arrays) def _get_batch_for_indices( self, key: str, indices: torch.Tensor, steps: Optional[torch.Tensor] = None ): - """ Get batch for given key. - There are two orthogonal special cases. - - returning a stack of features: - View this case as adding an extra "stack" dimension to feature, - causing the shape to be (*feature.shape, stack_size) - - returning next_features as a list (same as timeline output): - This should only be on if update_horizon is > 1. - If this is the case then we don't return a torch.Tensor, - but instead return List[List[features]] where the ith - element is torch.tensor([feat_{t+1}, ..., feat_{t+k}]); - where k <= multi_steps could be strictly less if there's a - terminal state. - NOTE: this option is activated by using the optional steps parameter. - - Otherwise, we just return the indexed features in the replay buffer. - In all of the cases, we assume indices is 1-dimensional. + """Get batch for given key. + There are two orthogonal special cases. + - returning a stack of features: + View this case as adding an extra "stack" dimension to feature, + causing the shape to be (*feature.shape, stack_size) + - returning next_features as a list (same as timeline output): + This should only be on if update_horizon is > 1. + If this is the case then we don't return a torch.Tensor, + but instead return List[List[features]] where the ith + element is torch.tensor([feat_{t+1}, ..., feat_{t+k}]); + where k <= multi_steps could be strictly less if there's a + terminal state. + NOTE: this option is activated by using the optional steps parameter. + + Otherwise, we just return the indexed features in the replay buffer. + In all of the cases, we assume indices is 1-dimensional. """ assert len(indices.shape) == 1, f"{indices.shape} isn't 1-dimensional." if steps is not None: @@ -723,26 +746,17 @@ def _reduce_multi_step_reward( return rewards.sum(dim=1) def _get_stack_for_indices(self, key: str, indices: torch.Tensor) -> torch.Tensor: - """ Get stack of transition data. """ + """Get stack of transition data.""" assert len(indices.shape) == 1, f"{indices.shape} not 1-dimensional" - feature_shape = self._key_to_shape_map[key] # calculate 2d array of indices of shape (batch_size, stack_size) # ith row contain indices in the stack of obs at indices[i] stack_indices = indices.unsqueeze(1) + torch.arange(-self._stack_size + 1, 1) - # pyre-fixme[16]: `Tensor` has no attribute `__imod__`. stack_indices %= self._replay_capacity retval = self._store[key][stack_indices] - # retval has shape (batch_size, stack_size, obs_shape) right now, so - # reshape to (batch_size, obs_shape, stack_size) - perm = [0] + list(range(2, len(feature_shape) + 2)) + [1] - retval = retval.permute(*perm) - # squeeze the stack dim if it is 1 - if self._stack_size == 1: - retval = retval.squeeze(len(perm) - 1) - return retval + return self._key_to_replay_elem[key].metadata.sample_to_output(retval) def _get_steps(self, multistep_indices: torch.Tensor) -> torch.Tensor: - """ Calculate trajectory length, defined to be the number of states + """Calculate trajectory length, defined to be the number of states in this multi_step transition until terminal state or until end of multi_step (a.k.a. update_horizon). """ @@ -758,49 +772,24 @@ def _get_steps(self, multistep_indices: torch.Tensor) -> torch.Tensor: terminals = torch.einsum("ab,b->ab", (terminals, unique_mask)) return torch.argmax(terminals, dim=1) + 1 - def get_transition_elements(self, batch_size=None): - """Returns a 'type signature' for sample_transition_batch. - Args: - batch_size: int, number of transitions returned. If None, the default - batch_size will be used. - Returns: - signature: A namedtuple describing the method's return type signature. - """ - batch_size = self._batch_size if batch_size is None else batch_size - - transition_elements = [ - ReplayElement( - "state", (batch_size,) + self._state_shape, self._observation_dtype - ), - ReplayElement( - "action", (batch_size,) + self._action_shape, self._action_dtype - ), - ReplayElement( - "reward", (batch_size,) + self._reward_shape, self._reward_dtype - ), - ReplayElement( - "next_state", (batch_size,) + self._state_shape, self._observation_dtype - ), - ReplayElement( - "next_action", (batch_size,) + self._action_shape, self._action_dtype - ), - ReplayElement( - "next_reward", (batch_size,) + self._reward_shape, self._reward_dtype - ), - ReplayElement("terminal", (batch_size,), self._terminal_dtype), - ReplayElement("indices", (batch_size,), np.int32), - ReplayElement("step", (batch_size,), np.int32), - ] - for element in self._extra_storage_types: + def get_transition_elements(self): + """Returns element names for sample_transition_batch.""" + extra_names = [] + for name in self._extra_keys: for prefix in ["", "next_"]: - transition_elements.append( - ReplayElement( - f"{prefix}{element.name}", - (batch_size,) + tuple(element.shape), - element.type, - ) - ) - return transition_elements + extra_names.append(f"{prefix}{name}") + return [ + "state", + "action", + "reward", + "next_state", + "next_action", + "next_reward", + "terminal", + "indices", + "step", + *extra_names, + ] def _generate_filename(self, checkpoint_dir, name, suffix): return os.path.join(checkpoint_dir, "{}_ckpt.{}.gz".format(name, suffix)) diff --git a/reagent/replay_memory/prioritized_replay_buffer.py b/reagent/replay_memory/prioritized_replay_buffer.py index ed5b4fda3..bcbd747ff 100644 --- a/reagent/replay_memory/prioritized_replay_buffer.py +++ b/reagent/replay_memory/prioritized_replay_buffer.py @@ -23,7 +23,6 @@ import numpy as np import torch from reagent.replay_memory import circular_replay_buffer, sum_tree -from reagent.replay_memory.circular_replay_buffer import ReplayElement class PrioritizedReplayBuffer(circular_replay_buffer.ReplayBuffer): @@ -33,96 +32,52 @@ class PrioritizedReplayBuffer(circular_replay_buffer.ReplayBuffer): def __init__( self, - observation_shape, - stack_size, - replay_capacity, - batch_size, - update_horizon=1, - gamma=0.99, - max_sample_attempts=1000, - extra_storage_types=None, - observation_dtype=np.uint8, - action_shape=(), - action_dtype=np.int32, - reward_shape=(), - reward_dtype=np.float32, - ): + stack_size: int, + replay_capacity: int, + batch_size: int, + update_horizon: int = 1, + gamma: float = 0.99, + max_sample_attempts: int = 1000, + ) -> None: """Initializes PrioritizedReplayBuffer. Args: - observation_shape: tuple of ints. stack_size: int, number of frames to use in state stack. replay_capacity: int, number of transitions to keep in memory. batch_size: int. update_horizon: int, length of update ('n' in n-step update). gamma: int, the discount factor. - max_sample_attempts: int, the maximum number of attempts allowed to - get a sample. - extra_storage_types: list of ReplayElements defining the type of the extra - contents that will be stored and returned by sample_transition_batch. - observation_dtype: np.dtype, type of the observations. Defaults to - np.uint8 for Atari 2600. - action_shape: tuple of ints, the shape for the action vector. Empty tuple - means the action is a scalar. - action_dtype: np.dtype, type of elements in the action. - reward_shape: tuple of ints, the shape of the reward vector. Empty tuple - means the reward is a scalar. - reward_dtype: np.dtype, type of elements in the reward. """ super(PrioritizedReplayBuffer, self).__init__( - observation_shape=observation_shape, stack_size=stack_size, replay_capacity=replay_capacity, batch_size=batch_size, update_horizon=update_horizon, gamma=gamma, - max_sample_attempts=max_sample_attempts, - extra_storage_types=extra_storage_types, - observation_dtype=observation_dtype, - action_shape=action_shape, - action_dtype=action_dtype, - reward_shape=reward_shape, - reward_dtype=reward_dtype, ) - + self._max_sample_attempts = max_sample_attempts self.sum_tree = sum_tree.SumTree(replay_capacity) - def get_add_args_signature(self): - """The signature of the add function. - The signature is the same as the one for ReplayBuffer, with an - added priority. - Returns: - list of ReplayElements defining the type of the argument signature needed - by the add function. - """ - parent_add_signature = super( - PrioritizedReplayBuffer, self - ).get_add_args_signature() - add_signature = parent_add_signature + [ - ReplayElement("priority", (), np.float32) - ] - return add_signature - - def _add(self, *args): + def _add(self, **kwargs) -> None: """Internal add method to add to the underlying memory arrays. The arguments need to match add_arg_signature. If priority is none, it is set to the maximum priority ever seen. Args: - *args: All the elements in a transition. """ - self._check_args_length(*args) + self._check_args_length(**kwargs) # Use Schaul et al.'s (2015) scheme of setting the priority of new elements # to the maximum priority so far. # Picks out 'priority' from arguments and adds it to the sum_tree. transition = {} - for i, element in enumerate(self.get_add_args_signature()): + for element in self.get_add_args_signature(): if element.name == "priority": - priority = args[i] + priority = kwargs[element.name] else: - transition[element.name] = torch.from_numpy( - np.array(args[i], dtype=element.type) + transition[element.name] = element.metadata.input_to_storage( + kwargs[element.name] ) + # pyre-fixme[61]: `priority` is undefined, or not always defined. self.sum_tree.set(self.cursor(), priority) super(PrioritizedReplayBuffer, self)._add_transition(transition) @@ -176,12 +131,20 @@ def sample_transition_batch(self, batch_size=None, indices=None): ) # The parent returned an empty array for the probabilities. Fill it with the # contents of the sum tree. Note scalar values are returned as (batch_size, 1). - transition.sampling_probabilities[:, 0] = torch.from_numpy( - self.get_priority(transition.indices.numpy().astype(np.int32)) - ) - return transition - def set_priority(self, indices, priorities): + batch_arrays = [] + for element_name in self._transition_elements: + if element_name == "sampling_probabilities": + batch = torch.from_numpy( + self.get_priority(transition.indices.numpy().astype(np.int32)) + ).view(batch_size, 1) + else: + batch = getattr(transition, element_name) + batch_arrays.append(batch) + + return self._batch_type(*batch_arrays) + + def set_priority(self, indices, priorities) -> None: """Sets the priority of the given elements according to Schaul et al. Args: indices: np.array with dtype int32, of indices in range @@ -213,18 +176,8 @@ def get_priority(self, indices): priority_batch[i] = self.sum_tree.get(memory_index) return priority_batch - def get_transition_elements(self, batch_size=None): - """Returns a 'type signature' for sample_transition_batch. - Args: - batch_size: int, number of transitions returned. If None, the default - batch_size will be used. - Returns: - signature: A namedtuple describing the method's return type signature. - """ - parent_transition_type = super( + def get_transition_elements(self): + parent_transition_elements = super( PrioritizedReplayBuffer, self - ).get_transition_elements(batch_size) - probablilities_type = [ - ReplayElement("sampling_probabilities", (batch_size,), np.float32) - ] - return parent_transition_type + probablilities_type + ).get_transition_elements() + return parent_transition_elements + ["sampling_probabilities"] diff --git a/reagent/replay_memory/sum_tree.py b/reagent/replay_memory/sum_tree.py index f50afd08b..8d5c6487d 100644 --- a/reagent/replay_memory/sum_tree.py +++ b/reagent/replay_memory/sum_tree.py @@ -53,7 +53,7 @@ class SumTree(object): tree, but is a little more user-friendly. """ - def __init__(self, capacity: int): + def __init__(self, capacity: int) -> None: """Creates the sum tree data structure for the given replay capacity. Args: capacity: int, the maximum number of elements that can be stored in this diff --git a/reagent/replay_memory/utils.py b/reagent/replay_memory/utils.py index dce70a385..237db1f55 100644 --- a/reagent/replay_memory/utils.py +++ b/reagent/replay_memory/utils.py @@ -15,7 +15,7 @@ def _dense_to_sparse(dense: np.ndarray) -> List[Dict[str, float]]: - """ Convert dense array to sparse representation """ + """Convert dense array to sparse representation""" assert len(dense.shape) == 2, f"dense shape is {dense.shape}" # pyre-fixme[7]: Expected `List[Dict[str, float]]` but got `List[Dict[int, # typing.Any]]`. @@ -25,7 +25,7 @@ def _dense_to_sparse(dense: np.ndarray) -> List[Dict[str, float]]: def replay_buffer_to_pre_timeline_df( is_discrete_action: bool, replay_buffer: ReplayBuffer ) -> pd.DataFrame: - """ Format needed for uploading dataset to Hive, and then run timeline. """ + """Format needed for uploading dataset to Hive, and then run timeline.""" n = replay_buffer.size batch = replay_buffer.sample_transition_batch(batch_size=n) @@ -64,7 +64,7 @@ def replay_buffer_to_pre_timeline_df( "ds": [DEFAULT_DS for _ in range(n)], "state_features": _dense_to_sparse(batch.state), "action": action, - "mdp_id": batch.mdp_id.tolist(), + "mdp_id": list(map(str, batch.mdp_id.flatten().tolist())), "sequence_number": sequence_number, "action_probability": action_probability, "reward": reward, diff --git a/reagent/reporting/__init__.py b/reagent/reporting/__init__.py new file mode 100644 index 000000000..6470e8064 --- /dev/null +++ b/reagent/reporting/__init__.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .compound_reporter import CompoundReporter +from .reporter_base import ReporterBase + +__all__ = [ + "CompoundReporter", + "ReporterBase", +] diff --git a/reagent/reporting/actor_critic_reporter.py b/reagent/reporting/actor_critic_reporter.py new file mode 100644 index 000000000..a9f25845a --- /dev/null +++ b/reagent/reporting/actor_critic_reporter.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import itertools +import logging + +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import ActorCriticTrainingReport + + +logger = logging.getLogger(__name__) + + +class ActorCriticReporter(ReporterBase): + def __init__(self, report_interval: int = 100): + self.report_interval = report_interval + super().__init__(self.value_list_observers, self.aggregating_observers) + + @property + def value_list_observers(self): + return {} + + @property + def aggregating_observers(self): + return { + **{ + "cpe_results": IntervalAggregatingObserver( + 1, agg.ListAggregator("cpe_details") + ), + }, + **{ + name: IntervalAggregatingObserver(self.report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("td_loss", agg.MeanAggregator("td_loss")), + ( + "recent_rewards", + agg.RecentValuesAggregator("logged_rewards"), + ), + ( + "logged_action_q_value", + agg.MeanAggregator("model_values_on_logged_actions"), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("td_loss", "td_loss"), + ("reward_loss", "reward_loss"), + ("logged_propensities", "propensities/logged"), + ("logged_rewards", "reward/logged"), + ] + ], + ) + }, + } + + # TODO: write this for OSS + def generate_training_report(self) -> ActorCriticTrainingReport: + return ActorCriticTrainingReport() diff --git a/reagent/reporting/compound_reporter.py b/reagent/reporting/compound_reporter.py new file mode 100644 index 000000000..a807b06f2 --- /dev/null +++ b/reagent/reporting/compound_reporter.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import Callable, List + +from reagent.core.result_registries import TrainingReport + +from .reporter_base import ReporterBase + + +class CompoundReporter(ReporterBase): + def __init__( + self, + reporters: List[ReporterBase], + merge_function: Callable[[List[ReporterBase]], TrainingReport], + ) -> None: + super().__init__({}, {}) + self._reporters = reporters + self._merge_function = merge_function + self._flush_function = None + + def set_flush_function(self, flush_function) -> None: + self._flush_function = flush_function + + def log(self, **kwargs) -> None: + raise RuntimeError("You should call log() on this reporter") + + def flush(self, epoch: int) -> None: + if self._flush_function: + self._flush_function(self, epoch) + else: + for reporter in self._reporters: + reporter.flush(epoch) + + def generate_training_report(self) -> TrainingReport: + return self._merge_function(self._reporters) diff --git a/reagent/reporting/discrete_crr_reporter.py b/reagent/reporting/discrete_crr_reporter.py new file mode 100644 index 000000000..0cefcd06b --- /dev/null +++ b/reagent/reporting/discrete_crr_reporter.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import itertools +import logging +from typing import List, Optional + +import torch +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import DQNTrainingReport + + +logger = logging.getLogger(__name__) + + +class DiscreteCRRReporter(ReporterBase): + def __init__( + self, + actions: List[str], + report_interval: int = 100, + target_action_distribution: Optional[List[float]] = None, + recent_window_size: int = 100, + ): + self.value_list_observers = {} + self.aggregating_observers = { + **{ + "cpe_results": IntervalAggregatingObserver( + 1, agg.ListAggregator("cpe_details") + ), + }, + **{ + name: IntervalAggregatingObserver(report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("td_loss", agg.MeanAggregator("td_loss")), + ("reward_loss", agg.MeanAggregator("reward_loss")), + ("actor_loss", agg.MeanAggregator("actor_loss")), + ( + "model_values", + agg.FunctionsByActionAggregator( + "model_values", + actions, + {"mean": torch.mean, "std": torch.std}, + ), + ), + ( + "logged_action", + agg.ActionCountAggregator("logged_actions", actions), + ), + ( + "model_action", + agg.ActionCountAggregator("model_action_idxs", actions), + ), + ( + "recent_rewards", + agg.RecentValuesAggregator("logged_rewards"), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionCountAggregator(key, title, actions), + ) + for key, title in [ + ("logged_actions", "logged"), + ("model_action_idxs", "model"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("reward_loss", "reward_loss"), + ("logged_propensities", "propensities/logged"), + ("logged_rewards", "reward/logged"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionHistogramAndMeanAggregator( + key, category, title, actions + ), + ) + for key, category, title in [ + ("model_propensities", "propensities", "model"), + ("model_rewards", "reward", "model"), + ("model_values", "value", "model"), + ] + ], + ) + }, + } + super().__init__(self.value_list_observers, self.aggregating_observers) + self.target_action_distribution = target_action_distribution + self.recent_window_size = recent_window_size + + # TODO: write this for OSS + def generate_training_report(self) -> DQNTrainingReport: + return DQNTrainingReport() diff --git a/reagent/reporting/discrete_dqn_reporter.py b/reagent/reporting/discrete_dqn_reporter.py new file mode 100644 index 000000000..25e080c7a --- /dev/null +++ b/reagent/reporting/discrete_dqn_reporter.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import itertools +import logging +from typing import List, Optional + +import torch +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import DQNTrainingReport + + +logger = logging.getLogger(__name__) + + +class DiscreteDQNReporter(ReporterBase): + def __init__( + self, + actions: List[str], + report_interval: int = 100, + target_action_distribution: Optional[List[float]] = None, + recent_window_size: int = 100, + ): + self.value_list_observers = {} + self.aggregating_observers = { + **{ + "cpe_results": IntervalAggregatingObserver( + 1, agg.ListAggregator("cpe_details") + ), + }, + **{ + name: IntervalAggregatingObserver(report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("td_loss", agg.MeanAggregator("td_loss")), + ("reward_loss", agg.MeanAggregator("reward_loss")), + ( + "model_values", + agg.FunctionsByActionAggregator( + "model_values", + actions, + {"mean": torch.mean, "std": torch.std}, + ), + ), + ( + "logged_action", + agg.ActionCountAggregator("logged_actions", actions), + ), + ( + "model_action", + agg.ActionCountAggregator("model_action_idxs", actions), + ), + ( + "recent_rewards", + agg.RecentValuesAggregator("logged_rewards"), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionCountAggregator(key, title, actions), + ) + for key, title in [ + ("logged_actions", "logged"), + ("model_action_idxs", "model"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("td_loss", "td_loss"), + ("reward_loss", "reward_loss"), + ("logged_propensities", "propensities/logged"), + ("logged_rewards", "reward/logged"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionHistogramAndMeanAggregator( + key, category, title, actions + ), + ) + for key, category, title in [ + ("model_propensities", "propensities", "model"), + ("model_rewards", "reward", "model"), + ("model_values", "value", "model"), + ] + ], + ) + }, + } + super().__init__(self.value_list_observers, self.aggregating_observers) + self.target_action_distribution = target_action_distribution + self.recent_window_size = recent_window_size + + # TODO: write this for OSS + def generate_training_report(self) -> DQNTrainingReport: + return DQNTrainingReport() diff --git a/reagent/reporting/parametric_dqn_reporter.py b/reagent/reporting/parametric_dqn_reporter.py new file mode 100644 index 000000000..4a46d7831 --- /dev/null +++ b/reagent/reporting/parametric_dqn_reporter.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import itertools +import logging + +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import ParametricDQNTrainingReport + + +logger = logging.getLogger(__name__) + + +class ParametricDQNReporter(ReporterBase): + def __init__(self, report_interval: int = 100): + self.value_list_observers = {} + self.aggregating_observers = { + **{ + "cpe_results": IntervalAggregatingObserver( + 1, agg.ListAggregator("cpe_details") + ), + }, + **{ + name: IntervalAggregatingObserver(report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("td_loss", agg.MeanAggregator("td_loss")), + ("reward_loss", agg.MeanAggregator("reward_loss")), + ( + "recent_rewards", + agg.RecentValuesAggregator("logged_rewards"), + ), + ( + "model_values_on_logged_actions", + agg.MeanAggregator("model_values_on_logged_actions"), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("td_loss", "td_loss"), + ("reward_loss", "reward_loss"), + ("logged_propensities", "propensities/logged"), + ("logged_rewards", "reward/logged"), + ] + ], + ) + }, + } + super().__init__(self.value_list_observers, self.aggregating_observers) + + # TODO: write this for OSS + def generate_training_report(self) -> ParametricDQNTrainingReport: + return ParametricDQNTrainingReport() diff --git a/reagent/reporting/reporter_base.py b/reagent/reporting/reporter_base.py new file mode 100644 index 000000000..e191d05e1 --- /dev/null +++ b/reagent/reporting/reporter_base.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import abc +import logging +from typing import Dict + +import torch +from reagent.core.observers import ( + CompositeObserver, + EpochEndObserver, + IntervalAggregatingObserver, + ValueListObserver, +) +from reagent.core.result_registries import TrainingReport +from reagent.core.tracker import ObservableMixin +from reagent.core.utils import lazy_property + + +logger = logging.getLogger(__name__) + + +class ReporterBase(CompositeObserver): + def __init__( + self, + value_list_observers: Dict[str, ValueListObserver], + aggregating_observers: Dict[str, IntervalAggregatingObserver], + ): + epoch_end_observer = EpochEndObserver(self.flush) + self._value_list_observers = value_list_observers + self._aggregating_observers = aggregating_observers + super().__init__( + list(value_list_observers.values()) + + list(aggregating_observers.values()) + + [epoch_end_observer] + ) + self._reporter_observable = _ReporterObservable(self) + + def log(self, **kwargs) -> None: + self._reporter_observable.notify_observers(**kwargs) + + def flush(self, epoch: int): + logger.info(f"Epoch {epoch} ended") + + for observer in self._aggregating_observers.values(): + observer.flush() + + def __getattr__(self, key: str): + val = self._value_list_observers.get(key, None) + if val is not None: + return val + val = self._aggregating_observers.get(key, None) + if val is not None: + return val.aggregator + raise AttributeError + + # TODO: write this for OSS + @abc.abstractmethod + def generate_training_report(self) -> TrainingReport: + pass + + +class _ReporterObservable(ObservableMixin): + def __init__(self, reporter) -> None: + self._reporter = reporter + super().__init__() + self.add_observer(reporter) + + @lazy_property + def _observable_value_types(self): + return {k: torch.Tensor for k in self._reporter.get_observing_keys()} diff --git a/reagent/reporting/reward_network_reporter.py b/reagent/reporting/reward_network_reporter.py new file mode 100644 index 000000000..528cbf517 --- /dev/null +++ b/reagent/reporting/reward_network_reporter.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import logging + +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.models.base import ModelBase +from reagent.reporting.reporter_base import ReporterBase +from reagent.training.reward_network_trainer import LossFunction + + +logger = logging.getLogger(__name__) + + +class RewardNetworkReporter(ReporterBase): + def __init__( + self, + loss_type: LossFunction, + model_description: str, + report_interval: int = 100, + ): + self.loss_type = loss_type + self.model_description = model_description + self.report_interval = report_interval + self.best_model = None + self.best_model_loss = float("inf") + super().__init__(self.value_list_observers, self.aggregating_observers) + + @property + def value_list_observers(self): + return {} + + @property + def aggregating_observers(self): + return { + name: IntervalAggregatingObserver( + self.report_interval if "loss" in name else 1, aggregator + ) + for name, aggregator in [ + ("loss", agg.MeanAggregator("loss")), + ("unweighted_loss", agg.MeanAggregator("unweighted_loss")), + ("eval_loss", agg.MeanAggregator("eval_loss")), + ("eval_unweighted_loss", agg.MeanAggregator("eval_unweighted_loss")), + ("eval_rewards", agg.LastEpochListAggregator("eval_rewards")), + ("eval_pred_rewards", agg.LastEpochListAggregator("eval_pred_rewards")), + ] + } + + def update_best_model(self, loss: float, model: ModelBase): + if loss < self.best_model_loss: + self.best_model_loss = loss + self.best_model = copy.deepcopy(model) diff --git a/reagent/reporting/seq2reward_reporter.py b/reagent/reporting/seq2reward_reporter.py new file mode 100644 index 000000000..b179b40ce --- /dev/null +++ b/reagent/reporting/seq2reward_reporter.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import itertools +import logging +from typing import List + +import torch +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import Seq2RewardTrainingReport + + +logger = logging.getLogger(__name__) + + +class Seq2RewardReporter(ReporterBase): + def __init__(self, action_names: List[str], report_interval: int = 100): + self.action_names = action_names + self.report_interval = report_interval + super().__init__(self.value_list_observers, self.aggregating_observers) + + @property + def value_list_observers(self): + return {} + + @property + def aggregating_observers(self): + return { + name: IntervalAggregatingObserver(self.report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("mse_loss_per_batch", agg.MeanAggregator("mse_loss")), + ( + "step_entropy_loss_per_batch", + agg.MeanAggregator("step_entropy_loss"), + ), + ( + "q_values_per_batch", + agg.FunctionsByActionAggregator( + "q_values", self.action_names, {"mean": torch.mean} + ), + ), + ("eval_mse_loss_per_batch", agg.MeanAggregator("eval_mse_loss")), + ( + "eval_step_entropy_loss_per_batch", + agg.MeanAggregator("eval_step_entropy_loss"), + ), + ( + "eval_q_values_per_batch", + agg.FunctionsByActionAggregator( + "eval_q_values", self.action_names, {"mean": torch.mean} + ), + ), + ( + "eval_action_distribution_per_batch", + agg.FunctionsByActionAggregator( + "eval_action_distribution", + self.action_names, + {"mean": torch.mean}, + ), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("mse_loss", "mse_loss"), + ("step_entropy_loss", "step_entropy_loss"), + ("eval_mse_loss", "eval_mse_loss"), + ("eval_step_entropy_loss", "eval_step_entropy_loss"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionHistogramAndMeanAggregator( + key, category, title, self.action_names + ), + ) + for key, category, title in [ + ("q_values", "q_values", "training"), + ("eval_q_values", "q_values", "eval"), + ("eval_action_distribution", "action_distribution", "eval"), + ] + ], + ) + } + + # TODO: write this for OSS + def generate_training_report(self) -> Seq2RewardTrainingReport: + return Seq2RewardTrainingReport() + + +class Seq2RewardCompressReporter(Seq2RewardReporter): + @property + def aggregating_observers(self): + return { + name: IntervalAggregatingObserver(self.report_interval, aggregator) + for name, aggregator in itertools.chain( + [ + ("mse_loss_per_batch", agg.MeanAggregator("mse_loss")), + ("accuracy_per_batch", agg.MeanAggregator("accuracy")), + ("eval_mse_loss_per_batch", agg.MeanAggregator("eval_mse_loss")), + ("eval_accuracy_per_batch", agg.MeanAggregator("eval_accuracy")), + ( + "eval_q_values_per_batch", + agg.FunctionsByActionAggregator( + "eval_q_values", self.action_names, {"mean": torch.mean} + ), + ), + ( + "eval_action_distribution_per_batch", + agg.FunctionsByActionAggregator( + "eval_action_distribution", + self.action_names, + {"mean": torch.mean}, + ), + ), + ], + [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("mse_loss", "compress_mse_loss"), + ("accuracy", "compress_accuracy"), + ("eval_mse_loss", "compress_eval_mse_loss"), + ("eval_accuracy", "compress_eval_accuracy"), + ] + ], + [ + ( + f"{key}_tb", + agg.TensorBoardActionHistogramAndMeanAggregator( + key, category, title, self.action_names + ), + ) + for key, category, title in [ + ("eval_q_values", "q_values", "compress_eval"), + ( + "eval_action_distribution", + "action_distribution", + "compress_eval", + ), + ] + ], + ) + } diff --git a/reagent/workflow/reporters/parametric_dqn_reporter.py b/reagent/reporting/slate_q_reporter.py similarity index 55% rename from reagent/workflow/reporters/parametric_dqn_reporter.py rename to reagent/reporting/slate_q_reporter.py index bd0c9d821..4a6708493 100644 --- a/reagent/workflow/reporters/parametric_dqn_reporter.py +++ b/reagent/reporting/slate_q_reporter.py @@ -1,28 +1,39 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import itertools import logging -from collections import OrderedDict from reagent.core import aggregators as agg from reagent.core.observers import IntervalAggregatingObserver, ValueListObserver -from reagent.workflow.reporters.reporter_base import ReporterBase -from reagent.workflow.training_reports import ParametricDQNTrainingReport +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import SlateQTrainingReport logger = logging.getLogger(__name__) -class ParametricDQNReporter(ReporterBase): +class SlateQReporter(ReporterBase): def __init__(self, report_interval: int = 100): - self.value_list_observers = {"cpe_results": ValueListObserver("cpe_details")} - self.aggregating_observers = OrderedDict( - (name, IntervalAggregatingObserver(report_interval, aggregator)) + self.report_interval = report_interval + super().__init__(self.value_list_observers, self.aggregating_observers) + + @property + def value_list_observers(self): + return {"cpe_results": ValueListObserver("cpe_details")} + + @property + def aggregating_observers(self): + return { + name: IntervalAggregatingObserver(self.report_interval, aggregator) for name, aggregator in itertools.chain( [ ("td_loss", agg.MeanAggregator("td_loss")), - ("reward_loss", agg.MeanAggregator("reward_loss")), ("recent_rewards", agg.RecentValuesAggregator("logged_rewards")), + ( + "logged_action_q_value", + agg.MeanAggregator("model_values_on_logged_actions"), + ), ], [ ( @@ -32,14 +43,11 @@ def __init__(self, report_interval: int = 100): for key, log_key in [ ("td_loss", "td_loss"), ("reward_loss", "reward_loss"), - ("logged_propensities", "propensities/logged"), ("logged_rewards", "reward/logged"), ] ], ) - ) - super().__init__(self.value_list_observers, self.aggregating_observers) + } - # TODO: write this for OSS - def generate_training_report(self) -> ParametricDQNTrainingReport: - return ParametricDQNTrainingReport() + def generate_training_report(self) -> SlateQTrainingReport: + return SlateQTrainingReport() diff --git a/reagent/reporting/td3_reporter.py b/reagent/reporting/td3_reporter.py new file mode 100644 index 000000000..2ec482ac8 --- /dev/null +++ b/reagent/reporting/td3_reporter.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.actor_critic_reporter import ActorCriticReporter + + +logger = logging.getLogger(__name__) + + +class TD3Reporter(ActorCriticReporter): + @property + def aggregating_observers(self): + ret = super().aggregating_observers + ret.update( + { + name: IntervalAggregatingObserver(1, aggregator) + for name, aggregator in [ + ( + f"{key}_tb", + agg.TensorBoardHistogramAndMeanAggregator(key, log_key), + ) + for key, log_key in [ + ("q1_loss", "loss/q1_loss"), + ("actor_loss", "loss/actor_loss"), + ("q1_value", "q_value/q1_value"), + ("next_q_value", "q_value/next_q_value"), + ("target_q_value", "q_value/target_q_value"), + ("actor_q1_value", "q_value/actor_q1_value"), + ("q2_loss", "loss/q2_loss"), + ("q2_value", "q_value/q2_value"), + ] + ] + } + ) + return ret diff --git a/reagent/reporting/world_model_reporter.py b/reagent/reporting/world_model_reporter.py new file mode 100644 index 000000000..71f1d7ee6 --- /dev/null +++ b/reagent/reporting/world_model_reporter.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +from reagent.core import aggregators as agg +from reagent.core.observers import IntervalAggregatingObserver +from reagent.reporting.reporter_base import ReporterBase +from reagent.workflow.training_reports import WorldModelTrainingReport + + +logger = logging.getLogger(__name__) + + +class WorldModelReporter(ReporterBase): + def __init__(self, report_interval: int = 100): + self.report_interval = report_interval + super().__init__(self.value_list_observers, self.aggregating_observers) + + @property + def value_list_observers(self): + return {} + + @property + def aggregating_observers(self): + agg_obs = {} + for name in [ + "loss", + "gmm", + "bce", + "mse", + "eval_loss", + "eval_gmm", + "eval_bce", + "eval_mse", + "test_loss", + "test_gmm", + "test_bce", + "test_mse", + ]: + # Mean Aggegators - average losses over every report_interval minibatches + mean_agg = agg.MeanAggregator(name) + agg_obs[name] = IntervalAggregatingObserver(self.report_interval, mean_agg) + # Tensorboard aggregators + tb_obs_name = f"{name}_tb" + tb_agg = agg.TensorBoardHistogramAndMeanAggregator(name, name) + agg_obs[tb_obs_name] = IntervalAggregatingObserver( + self.report_interval, tb_agg + ) + # Epoch Aggregators - average losses per epoch + ep_obs_name = f"{name}_epoch" + ep_mean_agg = agg.MeanAggregator(name) + agg_obs[ep_obs_name] = IntervalAggregatingObserver( + 999999999999999, # a huge report interval to prevent from aggregating before epoch ends + ep_mean_agg, + ) + return agg_obs + + # TODO: write this for OSS + def generate_training_report(self) -> WorldModelTrainingReport: + return WorldModelTrainingReport() diff --git a/reagent/samplers/__init__.py b/reagent/samplers/__init__.py new file mode 100644 index 000000000..ff3ab48a4 --- /dev/null +++ b/reagent/samplers/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .frechet import FrechetSort + + +__all__ = ["FrechetSort"] diff --git a/reagent/samplers/frechet.py b/reagent/samplers/frechet.py new file mode 100644 index 000000000..4c34bc93b --- /dev/null +++ b/reagent/samplers/frechet.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +import math +from typing import Optional + +import reagent.core.types as rlt +import torch +import torch.nn.functional as F +from reagent.core.configuration import resolve_defaults +from reagent.gym.types import Sampler +from torch.distributions import Gumbel + +logger = logging.getLogger(__name__) + + +class FrechetSort(Sampler): + EPS = 1e-12 + + @resolve_defaults + def __init__( + self, + shape: float = 1.0, + topk: Optional[int] = None, + equiv_len: Optional[int] = None, + log_scores: bool = False, + ): + """FréchetSort is a softer version of descending sort which samples all possible + orderings of items favoring orderings which resemble descending sort. This can + be used to convert descending sort by rank score into a differentiable, + stochastic policy amenable to policy gradient algorithms. + + :param shape: parameter of Frechet Distribution. Lower values correspond to + aggressive deviations from descending sort. + :param topk: If specified, only the first topk actions are specified. + :param equiv_len: Orders are considered equivalent if the top equiv_len match. Used + in probability computations. + Essentially specifies the action space. + :param log_scores Scores passed in are already log-transformed. In this case, we would + simply add Gumbel noise. + For LearnVM, we set this to be True because we expect input and output scores + to be in the log space. + + Example: + + Consider the sampler: + + sampler = FrechetSort(shape=3, topk=5, equiv_len=3) + + Given a set of scores, this sampler will produce indices of items roughly + resembling a argsort by scores in descending order. The higher the shape, + the more it would resemble a descending argsort. `topk=5` means only the top + 5 ranks will be output. The `equiv_len` determines what orders are considered + equivalent for probability computation. In this example, the sampler will + produce probability for the top 3 items appearing in a given order for the + `log_prob` call. + """ + self.shape = shape + self.topk = topk + self.upto = equiv_len + if topk is not None: + if equiv_len is None: + self.upto = topk + # pyre-fixme[58]: `>` is not supported for operand types `Optional[int]` + # and `Optional[int]`. + if self.upto > self.topk: + raise ValueError(f"Equiv length {equiv_len} cannot exceed topk={topk}.") + self.gumbel_noise = Gumbel(0, 1.0 / shape) + self.log_scores = log_scores + + def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput: + """Sample a ranking according to Frechet sort. Note that possible_actions_mask + is ignored as the list of rankings scales exponentially with slate size and + number of items and it can be difficult to enumerate them.""" + assert scores.dim() == 2, "sample_action only accepts batches" + log_scores = scores if self.log_scores else torch.log(scores) + perturbed = log_scores + self.gumbel_noise.sample(scores.shape) + action = torch.argsort(perturbed.detach(), descending=True) + log_prob = self.log_prob(scores, action) + # Only truncate the action before returning + if self.topk is not None: + action = action[: self.topk] + return rlt.ActorOutput(action, log_prob) + + def log_prob( + self, + scores: torch.Tensor, + action: torch.Tensor, + equiv_len_override: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + What is the probability of a given set of scores producing the given + list of permutations only considering the top `equiv_len` ranks? + + We may want to override the default equiv_len here when we know the having larger + action space doesn't matter. i.e. in Reels + """ + upto = self.upto + if equiv_len_override is not None: + assert equiv_len_override.shape == ( + scores.shape[0], + ), f"Invalid shape {equiv_len_override.shape}, compared to scores {scores.shape}. equiv_len_override {equiv_len_override}" + upto = equiv_len_override.long() + if self.topk is not None and torch.any(equiv_len_override > self.topk): + raise ValueError( + f"Override {equiv_len_override} cannot exceed topk={self.topk}." + ) + + squeeze = False + if len(scores.shape) == 1: + squeeze = True + scores = scores.unsqueeze(0) + action = action.unsqueeze(0) + + assert len(action.shape) == len(scores.shape) == 2, "scores should be batch" + if action.shape[1] > scores.shape[1]: + raise ValueError( + f"action cardinality ({action.shape[1]}) is larger than the number of scores ({scores.shape[1]})" + ) + elif action.shape[1] < scores.shape[1]: + raise NotImplementedError( + f"This semantic is ambiguous. If you have shorter slate, pad it with scores.shape[1] ({scores.shape[1]})" + ) + + log_scores = scores if self.log_scores else torch.log(scores) + n = log_scores.shape[-1] + # Add scores for the padding value + log_scores = torch.cat( + [ + log_scores, + torch.full( + (log_scores.shape[0], 1), -math.inf, device=log_scores.device + ), + ], + dim=1, + ) + log_scores = torch.gather(log_scores, 1, action) * self.shape + + p = upto if upto is not None else n + # We should unsqueeze here + if isinstance(p, int): + log_prob = sum( + torch.nan_to_num( + F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0 + ) + for i in range(p) + ) + elif isinstance(p, torch.Tensor): + # do masked sum + log_prob = sum( + torch.nan_to_num( + F.log_softmax(log_scores[:, i:], dim=1)[:, 0], neginf=0.0 + ) + * (i < p).float() + for i in range(n) + ) + else: + raise RuntimeError(f"p is {p}") + + # pyre-fixme[16]: Item `int` of `Union[typing_extensions.Literal[0], + # Tensor]` has no attribute `isnan`. + assert not torch.any(log_prob.isnan()), f"Nan in {log_prob}" + # pyre-fixme[7]: Expected `Tensor` but got `Union[int, Tensor]`. + return log_prob diff --git a/reagent/scripts/__init__.py b/reagent/scripts/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/scripts/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/scripts/hparam_tuning.py b/reagent/scripts/hparam_tuning.py new file mode 100644 index 000000000..17b1fe548 --- /dev/null +++ b/reagent/scripts/hparam_tuning.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 +# (c) Facebook, Inc. and its affiliates. Confidential and proprietary. + +import logging # isort:skip + +logging.disable() # isort:skip + +import copy +import json +import os +from typing import Any, Callable, Dict, List, Optional, Tuple + +import numpy as np +import torch.multiprocessing as mp +from ax.service.ax_client import AxClient + + +def ax_evaluate_params( + params_list: List[Dict], + fixed_params: Dict, + eval_fn: Callable, + parse_params_fn: Optional[Callable] = None, + num_seeds: int = 10, + num_proc: int = 20, +) -> List[Dict[str, Tuple[float, float]]]: + """ + Evaluate a single set of hyperparameters for Ax search. + + Args: + params_list: A list of hyperparameter configs to evaluate. + fixed_params: A dictionary of hyperparameters that are held fixed between evaluations. + eval_fn: Evaluation function that returns a dictionary of metric values. + parse_params_fn: A optional function applied to the hyperparameter dictionary to parse some elements. Can be useful + if the best representation for Ax doesn't match the format accepted by the eval_fn. + num_seeds: Number of random seeds among which the metrics are averaged. + num_proc: Number of processes to run in parallel. + Returns: + A list of average evaluation metrics (one per config) + """ + # create a list of full hyperparameter configurations to be evaluated + params_with_seed_list = [] + for params in params_list: + for s in range(num_seeds): + params_s = copy.deepcopy(params) + params_s.update(fixed_params) + params_s["seed"] = s + if parse_params_fn is not None: + params_s = parse_params_fn(params_s) + params_with_seed_list.append(params_s) + + # evaluate metrics in parallel using multiprocessing + if num_proc > 1: + with mp.get_context("spawn").Pool( + min(len(params_with_seed_list), num_proc) + ) as p: + metrics = p.map(eval_fn, params_with_seed_list) + else: + metrics = list(map(eval_fn, params_with_seed_list)) + + # calculate the average metrics across different seeds + avg_metrics = [] + num_params = len(params_list) + for i in range(num_params): + avg_metrics.append( + { + k: ( + np.mean( + [m[k] for m in metrics[i * num_seeds : (i + 1) * num_seeds]] + ), + np.std( + [m[k] for m in metrics[i * num_seeds : (i + 1) * num_seeds]] + ), + ) + for k in metrics[0].keys() + } + ) + return avg_metrics + + +def run_ax_search( + fixed_params: Dict, + ax_params: List[Dict[str, Any]], + eval_fn: Callable, + obj_name: str, + minimize: bool, + id_: str, + parse_params_fn: Optional[Callable] = None, + ax_param_constraints: Optional[List[str]] = None, + num_ax_steps: int = 50, + num_concur_samples: int = 2, + num_seeds: int = 10, + num_proc: int = 20, + folder_name: Optional[str] = None, + verbose: bool = False, +) -> Tuple[Dict[str, Any], AxClient]: + """ + Run a search for best hyperparameter values using Ax. + Note that this requires the Ax package (https://ax.dev/) to be installed. + + Args: + fixed_params: Fixed values of hyperparameters. + ax_params: Ax configuration for hyperparameters that are searched over. See docs for ax_client.create_experiment() + eval_fn: Evaluation function that returns a dictionary of metric values. + obj_name: Objective name (key of the dict returned by eval_fn) + minimize: If True, objective is minimized, if False it's maximized. + id_: An arbitrary string identifier of the search (used as part of filename where results are saved) + parse_params_fn: A function applied to the parameter dictionary to parse it. Can be used + if the best representation for Ax doesn't match the format accepted by the eval_fn. + ax_param_constraints: Constraints for the parameters that are searched over. + num_ax_steps: The number of ax steps to take. + num_concur_samples: Number of configurations to sample per ax step (in parallel) + num_seeds: Number of seeds to average over + num_proc: Number of processes to run in parallel. + folder_name: Folder where to save best found parameters + verbose: If True, some details are printed out + Returns: + A dict of best hyperparameters found by Ax + """ + for p in ax_params: + assert ( + p["name"] not in fixed_params + ), f'Parameter {p["name"]} appers in both fixed and search parameters' + if ax_param_constraints is None: + ax_param_constraints = [] + ax_client = AxClient() + ax_client.create_experiment( + name=f"hparams_search_{id_}", + parameters=ax_params, + objective_name=obj_name, + minimize=minimize, + parameter_constraints=ax_param_constraints, + choose_generation_strategy_kwargs={ + "max_parallelism_override": num_concur_samples, + "num_initialization_trials": max(num_concur_samples, 5, len(ax_params)), + }, + ) + best_params = None + all_considered_params = [] + all_considered_metrics = [] + + try: + for i in range(1, num_ax_steps + 1): + if verbose: + print(f"ax step {i}/{num_ax_steps}") + params_list = [] + trial_indices_list = [] + for _ in range(num_concur_samples): + # sample several values (to be evaluated in parallel) + parameters, trial_index = ax_client.get_next_trial() + params_list.append(parameters) + trial_indices_list.append(trial_index) + res = ax_evaluate_params( + params_list, + fixed_params=fixed_params, + eval_fn=eval_fn, + parse_params_fn=parse_params_fn, + num_seeds=num_seeds, + num_proc=num_proc, + ) + all_considered_params.extend(params_list) + all_considered_metrics.extend(res) + for t_i, v in zip(trial_indices_list, res): + ax_client.complete_trial(trial_index=t_i, raw_data=v) + best_params, predicted_metrics = ax_client.get_best_parameters() + predicted_metrics = predicted_metrics[0] # choose expected metric values + if verbose: + print(best_params, predicted_metrics) + # save at every iteration in case search is interrupted + if folder_name is not None: + with open( + os.path.join( + os.path.expanduser(folder_name), + f"ax_results_{id_}.json", + ), + "w", + ) as f: + json.dump( + { + "best_params": best_params, + "predicted_metrics": predicted_metrics, + "fixed_params": fixed_params, + "ax_params": ax_params, + "num_ax_steps": i, + "num_concur_samples": num_concur_samples, + "num_seeds": num_seeds, + "num_proc": num_proc, + "all_considered_params": all_considered_params, + "all_considered_metrics": all_considered_metrics, + }, + f, + indent=4, + ) + except KeyboardInterrupt: + # handle keyboard interruption to enable returning intermediate results if interrupted + pass + return best_params, ax_client diff --git a/reagent/test/base/horizon_test_base.py b/reagent/test/base/horizon_test_base.py index 0feef7da9..466d0bce7 100644 --- a/reagent/test/base/horizon_test_base.py +++ b/reagent/test/base/horizon_test_base.py @@ -9,7 +9,9 @@ import numpy as np import torch from reagent.core.configuration import make_config_class -from reagent.tensorboardX import SummaryWriterContext +from reagent.core.tensorboardX import SummaryWriterContext + +# pyre-fixme[21]: Could not find name `YAML` in `ruamel.yaml`. from ruamel.yaml import YAML @@ -17,18 +19,19 @@ class HorizonTestBase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: SummaryWriterContext._reset_globals() logging.basicConfig(level=logging.INFO) np.random.seed(SEED) torch.manual_seed(SEED) random.seed(SEED) - def tearDown(self): + def tearDown(self) -> None: SummaryWriterContext._reset_globals() @classmethod def run_from_config(cls, run_test: Callable, config_path: str, use_gpu: bool): + # pyre-fixme[16]: Module `yaml` has no attribute `YAML`. yaml = YAML(typ="safe") with open(config_path, "r") as f: config_dict = yaml.load(f.read()) diff --git a/reagent/test/base/test_json_serialize.py b/reagent/test/base/test_json_serialize.py deleted file mode 100644 index 70a0a808f..000000000 --- a/reagent/test/base/test_json_serialize.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import dataclasses -import typing - -from reagent import parameters as rlp -from reagent.json_serialize import json_to_object, object_to_json -from reagent.test.base.horizon_test_base import HorizonTestBase - - -class TestJsonSerialize(HorizonTestBase): - def test_json_serialize_basic(self): - typed_param = rlp.NormalizationData( - dense_normalization_parameters={ - 0: rlp.NormalizationParameters(feature_type="CONTINUOUS") - } - ) - self.assertEqual( - typed_param, - json_to_object(object_to_json(typed_param), rlp.NormalizationData), - ) - - def test_json_serialize_nested(self): - @dataclasses.dataclass - class Test1: - x: int - - @dataclasses.dataclass - class Test2: - x: typing.List[Test1] - y: typing.Dict[str, Test1] - - t = Test2(x=[Test1(x=3), Test1(x=4)], y={"1": Test1(x=5), "2": Test1(x=6)}) - self.assertEqual(t, json_to_object(object_to_json(t), Test2)) diff --git a/reagent/test/base/test_tensorboardX.py b/reagent/test/base/test_tensorboardX.py index 7dd540d1b..3faff5a93 100644 --- a/reagent/test/base/test_tensorboardX.py +++ b/reagent/test/base/test_tensorboardX.py @@ -3,23 +3,23 @@ import unittest from tempfile import TemporaryDirectory -from unittest.mock import MagicMock, call +from unittest.mock import call, MagicMock import torch -from reagent.tensorboardX import SummaryWriterContext, summary_writer_context +from reagent.core.tensorboardX import summary_writer_context, SummaryWriterContext from reagent.test.base.horizon_test_base import HorizonTestBase from torch.utils.tensorboard import SummaryWriter class TestSummaryWriterContext(HorizonTestBase): - def test_noop(self): + def test_noop(self) -> None: self.assertIsNone(SummaryWriterContext.add_scalar("test", torch.ones(1))) - def test_with_none(self): + def test_with_none(self) -> None: with summary_writer_context(None): self.assertIsNone(SummaryWriterContext.add_scalar("test", torch.ones(1))) - def test_writing(self): + def test_writing(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock() @@ -29,7 +29,7 @@ def test_writing(self): "test", torch.ones(1), global_step=0 ) - def test_writing_stack(self): + def test_writing_stack(self) -> None: with TemporaryDirectory() as tmp_dir1, TemporaryDirectory() as tmp_dir2: writer1 = SummaryWriter(tmp_dir1) writer1.add_scalar = MagicMock() @@ -46,15 +46,16 @@ def test_writing_stack(self): "test2", torch.ones(1), global_step=0 ) - def test_swallowing_exception(self): + def test_swallowing_exception(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock(side_effect=NotImplementedError("test")) + # pyre-fixme[16]: `SummaryWriter` has no attribute `exceptions_to_ignore`. writer.exceptions_to_ignore = (NotImplementedError, KeyError) with summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1)) - def test_not_swallowing_exception(self): + def test_not_swallowing_exception(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock(side_effect=NotImplementedError("test")) @@ -63,13 +64,13 @@ def test_not_swallowing_exception(self): ), summary_writer_context(writer): SummaryWriterContext.add_scalar("test", torch.ones(1)) - def test_swallowing_histogram_value_error(self): + def test_swallowing_histogram_value_error(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) with summary_writer_context(writer): SummaryWriterContext.add_histogram("bad_histogram", torch.ones(100, 1)) - def test_global_step(self): + def test_global_step(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_scalar = MagicMock() @@ -85,7 +86,7 @@ def test_global_step(self): ) self.assertEqual(2, len(writer.add_scalar.mock_calls)) - def test_add_custom_scalars(self): + def test_add_custom_scalars(self) -> None: with TemporaryDirectory() as tmp_dir: writer = SummaryWriter(tmp_dir) writer.add_custom_scalars = MagicMock() diff --git a/reagent/test/base/test_types.py b/reagent/test/base/test_types.py new file mode 100644 index 000000000..f41e4565e --- /dev/null +++ b/reagent/test/base/test_types.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import reagent.core.types as rlt +import torch +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor + + +class TestTypes(unittest.TestCase): + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_tensor_data_class_to_cuda(self): + # Test if TensorDataClass.to(...) can move to designated device + batch_size = 4 + dim = 5 + float_features = torch.randn(batch_size, dim) + keys = ["Key0", "Key1", "Key2"] + values = torch.arange(10).float() + lengths = torch.tensor([2, 0, 1, 1, 1, 1, 3, 0, 0, 1, 0, 0]) + id_list_features = KeyedJaggedTensor(keys=keys, values=values, lengths=lengths) + id_list_features_raw = { + "key0": (torch.randn(batch_size, dim), torch.randn(batch_size, dim)) + } + data = rlt.FeatureData( + float_features=float_features, + id_list_features=id_list_features, + id_list_features_raw=id_list_features_raw, + ) + data_cuda = data.to(torch.device("cuda")) + assert data_cuda.float_features.device.type == "cuda" + assert data_cuda.id_list_features.values().device.type == "cuda" + assert data_cuda.id_list_features.lengths().device.type == "cuda" + assert data_cuda.id_list_features_raw["key0"][0].device.type == "cuda" + assert data_cuda.id_list_features_raw["key0"][1].device.type == "cuda" diff --git a/reagent/test/base/test_utils.py b/reagent/test/base/test_utils.py index 09be26bf1..a3d52d268 100644 --- a/reagent/test/base/test_utils.py +++ b/reagent/test/base/test_utils.py @@ -3,14 +3,20 @@ import unittest -import numpy as np import numpy.testing as npt import torch -from reagent.torch_utils import masked_softmax, rescale_torch_tensor +from reagent.core.torch_utils import ( + masked_softmax, + reorder_data_kjt, + rescale_torch_tensor, + shift_kjt_by_one, + split_sequence_keyed_jagged_tensor, +) +from torchrec.sparse.jagged_tensor import KeyedJaggedTensor class TestUtils(unittest.TestCase): - def test_rescale_torch_tensor(self): + def test_rescale_torch_tensor(self) -> None: rows, cols = 3, 5 original_tensor = torch.randint(low=10, high=40, size=(rows, cols)).float() prev_max_tensor = torch.ones(1, 5) * 40.0 @@ -39,11 +45,12 @@ def test_rescale_torch_tensor(self): comparison_tensor = torch.eq(original_tensor, reconstructed_original_tensor) self.assertTrue(torch.sum(comparison_tensor), rows * cols) - def test_masked_softmax(self): + def test_masked_softmax(self) -> None: # Postive value case x = torch.tensor([[15.0, 6.0, 9.0], [3.0, 2.0, 1.0]]) temperature = 1 mask = torch.tensor([[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]]) + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. out = masked_softmax(x, mask, temperature) expected_out = torch.tensor([[0.9975, 0.0000, 0.0025], [0, 0.7311, 0.2689]]) npt.assert_array_almost_equal(out, expected_out, 4) @@ -52,6 +59,7 @@ def test_masked_softmax(self): x = torch.tensor([[150.0, 2.0]]) temperature = 0.01 mask = torch.tensor([[0.0, 1.0]]) + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. out = masked_softmax(x, mask, temperature) expected_out = torch.tensor([[0.0, 1.0]]) npt.assert_array_almost_equal(out, expected_out, 4) @@ -60,6 +68,7 @@ def test_masked_softmax(self): x = torch.tensor([[-10.0, -1.0, -5.0]]) temperature = 0.01 mask = torch.tensor([[1.0, 1.0, 0.0]]) + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. out = masked_softmax(x, mask, temperature) expected_out = torch.tensor([[0.0, 1.0, 0.0]]) npt.assert_array_almost_equal(out, expected_out, 4) @@ -68,6 +77,98 @@ def test_masked_softmax(self): x = torch.tensor([[-5.0, 4.0, 3.0], [2.0, 1.0, 2.0]]) temperature = 1 mask = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]) + # pyre-fixme[6]: For 2nd param expected `float` but got `Tensor`. out = masked_softmax(x, mask, temperature) expected_out = torch.tensor([[0.0, 0.0, 0.0], [0.4223, 0.1554, 0.4223]]) npt.assert_array_almost_equal(out, expected_out, 4) + + def test_split_sequence_keyed_jagged_tensor(self) -> None: + """Test the example in the docstring of split_sequence_keyed_jagged_tensor""" + keys = ["Key0", "Key1", "Key2"] + values = torch.arange(10).float() + weights = values / 10.0 + lengths = torch.tensor([2, 0, 1, 1, 1, 1, 3, 0, 0, 1, 0, 0]) + num_steps = 2 + + def verify_output(out): + self.assertEqual(out[0].keys(), keys) + assert torch.allclose( + out[0].values(), torch.tensor([0.0, 1.0, 2.0, 4.0, 6.0, 7.0, 8.0]) + ) + assert torch.allclose(out[0].lengths(), torch.tensor([2, 1, 1, 3, 0, 0])) + if out[0]._weights is not None: + assert torch.allclose( + out[0].weights(), torch.tensor([0.0, 0.1, 0.2, 0.4, 0.6, 0.7, 0.8]) + ) + assert torch.allclose(out[1].values(), torch.tensor([3.0, 5.0, 9.0])) + assert torch.allclose(out[1].lengths(), torch.tensor([0, 1, 1, 0, 1, 0])) + if out[1]._weights is not None: + assert torch.allclose(out[1].weights(), torch.tensor([0.3, 0.5, 0.9])) + + # Test id list data + x0 = KeyedJaggedTensor(keys=keys, values=values, lengths=lengths) + y0 = split_sequence_keyed_jagged_tensor(x0, num_steps) + verify_output(y0) + + # Test id score list data + x1 = KeyedJaggedTensor( + keys=keys, values=values, lengths=lengths, weights=weights + ) + y1 = split_sequence_keyed_jagged_tensor(x1, num_steps) + verify_output(y1) + + def test_reorder_data_kjt(self) -> None: + """Test the example in the docstring of reorder_data_kjt""" + keys = ["Key0", "Key1"] + values = torch.arange(7).float() + weights = values / 10.0 + lengths = torch.tensor([2, 0, 1, 1, 1, 2]) + + # With weights + x = KeyedJaggedTensor( + keys=keys, values=values, lengths=lengths, weights=weights + ) + y = reorder_data_kjt(x, torch.tensor([2, 1, 0])) + self.assertEqual(y.keys(), keys) + assert torch.allclose( + y.values(), torch.tensor([2.0, 0.0, 1.0, 5.0, 6.0, 4.0, 3.0]) + ) + assert torch.allclose(y.lengths(), torch.tensor([1, 0, 2, 2, 1, 1])) + assert torch.allclose(y.weights(), y.values() / 10.0) + + # Without weights + x = KeyedJaggedTensor(keys=keys, values=values, lengths=lengths) + y = reorder_data_kjt(x, torch.tensor([2, 1, 0])) + self.assertEqual(y.keys(), keys) + assert torch.allclose( + y.values(), torch.tensor([2.0, 0.0, 1.0, 5.0, 6.0, 4.0, 3.0]) + ) + assert torch.allclose(y.lengths(), torch.tensor([1, 0, 2, 2, 1, 1])) + + def test_shift_kjt_by_one(self) -> None: + """Test the example in the docstring of shift_kjt_by_one""" + keys = ["Key0", "Key1"] + values = torch.arange(7).float() + weights = values / 10.0 + lengths = torch.tensor([2, 0, 1, 1, 1, 2]) + + # With weights + x = KeyedJaggedTensor( + keys=keys, values=values, lengths=lengths, weights=weights + ) + y = shift_kjt_by_one(x) + self.assertEqual(y.keys(), keys) + assert torch.allclose(y.values(), torch.tensor([2.0, 4.0, 5.0, 6.0])) + assert torch.allclose(y.lengths(), torch.tensor([0, 1, 0, 1, 2, 0])) + assert torch.allclose(y.weights(), y.values() / 10.0) + + # Without weights + x = KeyedJaggedTensor( + keys=keys, + values=values, + lengths=lengths, + ) + y = shift_kjt_by_one(x) + self.assertEqual(y.keys(), keys) + assert torch.allclose(y.values(), torch.tensor([2.0, 4.0, 5.0, 6.0])) + assert torch.allclose(y.lengths(), torch.tensor([0, 1, 0, 1, 2, 0])) diff --git a/reagent/test/base/utils.py b/reagent/test/base/utils.py index ced252f5e..fb5b1f980 100644 --- a/reagent/test/base/utils.py +++ b/reagent/test/base/utils.py @@ -95,56 +95,7 @@ def default_normalizer(feats, min_value=None, max_value=None): return normalization -def only_continuous_normalizer_helper( - feats, feature_type, min_value=None, max_value=None -): - assert feature_type in ( - "CONTINUOUS", - "CONTINUOUS_ACTION", - ), f"invalid feature type: {feature_type}." - assert type(min_value) == type(max_value) and type(min_value) in ( - int, - float, - list, - np.ndarray, - type(None), - ) - if type(min_value) in [int, float, type(None)]: - min_value = [min_value] * len(feats) - max_value = [max_value] * len(feats) - normalization = collections.OrderedDict( - [ - ( - feats[i], - NormalizationParameters( - feature_type=feature_type, - boxcox_lambda=None, - boxcox_shift=None, - mean=0, - stddev=1, - possible_values=None, - quantiles=None, - min_value=float(min_value[i]) if min_value[i] is not None else None, - max_value=float(max_value[i]) if max_value[i] is not None else None, - ), - ) - for i in range(len(feats)) - ] - ) - return normalization - - -def only_continuous_normalizer(feats, min_value=None, max_value=None): - return only_continuous_normalizer_helper(feats, "CONTINUOUS", min_value, max_value) - - -def only_continuous_action_normalizer(feats, min_value=None, max_value=None): - return only_continuous_normalizer_helper( - feats, "CONTINUOUS_ACTION", min_value, max_value - ) - - -def write_lists_to_csv(path, *args): +def write_lists_to_csv(path, *args) -> None: rows = zip(*args) with open(path, "w") as f: writer = csv.writer(f) diff --git a/reagent/test/configs/cem_cartpole_v0.json b/reagent/test/configs/cem_cartpole_v0.json deleted file mode 100644 index 53af77f39..000000000 --- a/reagent/test/configs/cem_cartpole_v0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "cross_entropy_method", - "max_replay_memory_size": 20480, - "use_gpu": false, - "rl": { - "gamma": 1.0, - "softmax_policy": 0 - }, - "cem": { - "mdnrnn": { - "hidden_size": 100, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.001, - "not_terminal_loss_weight": 200.0, - "next_state_loss_weight": 1.0, - "reward_loss_weight": 1.0, - "num_gaussians": 1 - }, - "plan_horizon_length": 10, - "num_world_models": 1, - "cem_population_size": 100, - "cem_num_iterations": 10, - "ensemble_population_size": 1, - "num_elites": 15 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 1, - "offline_num_batches_per_epoch": 5000, - "offline_train_epochs": 1 - } -} diff --git a/reagent/test/configs/cem_linear_dynamics_v0.json b/reagent/test/configs/cem_linear_dynamics_v0.json deleted file mode 100644 index 9552c9e3e..000000000 --- a/reagent/test/configs/cem_linear_dynamics_v0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "env": "LinearDynamics-v0", - "model_type": "cross_entropy_method", - "max_replay_memory_size": 20480, - "use_gpu": false, - "rl": { - "gamma": 1.0, - "softmax_policy": 0 - }, - "cem": { - "mdnrnn": { - "hidden_size": 100, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.001, - "not_terminal_loss_weight": 0.0, - "next_state_loss_weight": 1.0, - "reward_loss_weight": 1.0, - "num_gaussians": 1 - }, - "plan_horizon_length": 4, - "num_world_models": 1, - "cem_population_size": 100, - "cem_num_iterations": 10, - "ensemble_population_size": 1, - "num_elites": 15 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 3, - "offline_num_batches_per_epoch": 1000, - "offline_train_epochs": 1 - } -} diff --git a/reagent/test/configs/discrete_dqn_pocman_v0.json b/reagent/test/configs/discrete_dqn_pocman_v0.json deleted file mode 100644 index 37c8da3ae..000000000 --- a/reagent/test/configs/discrete_dqn_pocman_v0.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "env": "Pocman-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 100000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 1024, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 10, - "offline_train_epochs": 50 - } -} diff --git a/reagent/test/configs/discrete_dqn_string_game_v0.json b/reagent/test/configs/discrete_dqn_string_game_v0.json deleted file mode 100644 index 65917d0c2..000000000 --- a/reagent/test/configs/discrete_dqn_string_game_v0.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "env": "StringGame-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 20480, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 1024, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 6, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 10, - "offline_train_epochs": 25 - } -} diff --git a/reagent/test/configs/mdnrnn_cartpole_v0.json b/reagent/test/configs/mdnrnn_cartpole_v0.json deleted file mode 100644 index 1ff3b7b62..000000000 --- a/reagent/test/configs/mdnrnn_cartpole_v0.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "env": "CartPole-v0", - "mdnrnn": { - "hidden_size": 50, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.005, - "not_terminal_loss_weight": 1, - "next_state_loss_weight": 1, - "reward_loss_weight": 1, - "num_gaussians": 1 - }, - "run_details": { - "seq_len": 1, - "num_train_episodes": 300, - "num_test_episodes": 30, - "num_state_embed_episodes": 200, - "max_steps": 200, - "train_epochs": 3, - "early_stopping_patience": 2 - } -} diff --git a/reagent/test/configs/mdnrnn_lunarlander_v2.json b/reagent/test/configs/mdnrnn_lunarlander_v2.json deleted file mode 100644 index 314fe978c..000000000 --- a/reagent/test/configs/mdnrnn_lunarlander_v2.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "env": "LunarLander-v2", - "mdnrnn": { - "hidden_size": 8, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.001, - "num_gaussians": 5 - }, - "run_details": { - "seq_len": 50, - "num_train_episodes": 120, - "num_test_episodes": 30, - "max_steps": 2000, - "train_epochs": 100, - "early_stopping_patience": 2 - } -} diff --git a/reagent/test/configs/mdnrnn_pocman_v0.json b/reagent/test/configs/mdnrnn_pocman_v0.json deleted file mode 100644 index 2307c36dd..000000000 --- a/reagent/test/configs/mdnrnn_pocman_v0.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "env": "Pocman-v0", - "mdnrnn": { - "hidden_size": 50, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.001, - "num_gaussians": 5, - "reward_loss_weight": 1.0, - "next_state_loss_weight": 1.0, - "not_terminal_loss_weight": 0.0, - "fit_only_one_next_step": true - }, - "run_details": { - "seq_len": 3, - "num_train_episodes": 1000, - "num_test_episodes": 100, - "num_state_embed_episodes": 2500, - "max_steps": 200, - "train_epochs": 10, - "early_stopping_patience": 10 - } -} diff --git a/reagent/test/configs/mdnrnn_string_game_v0.json b/reagent/test/configs/mdnrnn_string_game_v0.json deleted file mode 100644 index 301dd21bc..000000000 --- a/reagent/test/configs/mdnrnn_string_game_v0.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "env": "StringGame-v0", - "mdnrnn": { - "hidden_size": 20, - "num_hidden_layers": 2, - "minibatch_size": 1024, - "learning_rate": 0.001, - "num_gaussians": 1, - "reward_loss_weight": 1.0, - "next_state_loss_weight": 1.0, - "not_terminal_loss_weight": 0.0, - "fit_only_one_next_step": true - }, - "run_details": { - "seq_len": 3, - "num_train_episodes": 4000, - "num_test_episodes": 100, - "num_state_embed_episodes": 1800, - "max_steps": 6, - "train_epochs": 6, - "early_stopping_patience": 6 - } -} diff --git a/reagent/test/core/aggregators_test.py b/reagent/test/core/aggregators_test.py index fb8a3f68f..95b1fcac1 100644 --- a/reagent/test/core/aggregators_test.py +++ b/reagent/test/core/aggregators_test.py @@ -9,7 +9,7 @@ class ActionCountAggregatorTest(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.actions = ["A", "B", "C"] key = "logged_action" self.aggregator = ActionCountAggregator(key, self.actions) @@ -26,14 +26,14 @@ def setUp(self): for x in logged_actions: self.aggregator(key, x) - def test_get_distributions(self): + def test_get_distributions(self) -> None: distr = self.aggregator.get_distributions() self.assertEqual(len(distr), 3) self.assertEqual(distr["A"], [0.3, 0.4]) self.assertEqual(distr["B"], [0.3, 0.4]) self.assertEqual(distr["C"], [0.4, 0.2]) - def test_get_cumulative_distributions(self): + def test_get_cumulative_distributions(self) -> None: distr = self.aggregator.get_cumulative_distributions() self.assertEqual(len(distr), 3) self.assertEqual(distr["A"], 0.35) diff --git a/reagent/test/core/test_config_parsing.py b/reagent/test/core/test_config_parsing.py index a08104653..908233eb6 100644 --- a/reagent/test/core/test_config_parsing.py +++ b/reagent/test/core/test_config_parsing.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc +import os import unittest from reagent.core.configuration import make_config_class, resolve_defaults @@ -17,7 +19,7 @@ def __init__( self.a = a self.b = b - def __call__(self): + def __call__(self) -> int: return self.a * self.b @@ -43,7 +45,7 @@ def foo(self): @dataclass class Bar(FooRegistry): - def foo(self): + def foo(self) -> int: return 10 @@ -61,17 +63,47 @@ class Config: class TestConfigParsing(unittest.TestCase): - def test_parse_foo_default(self): + def test_parse_foo_default(self) -> None: raw_config = {} config = Config(**raw_config) self.assertEqual(config.union.value.foo(), 2) - def test_parse_foo(self): + def test_parse_foo(self) -> None: raw_config = {"union": {"Foo": {"a_param": {"a": 6}}}} + # pyre-fixme[6]: For 1st param expected `FooUnion` but got `Dict[str, + # Dict[str, Dict[str, int]]]`. config = Config(**raw_config) self.assertEqual(config.union.value.foo(), 12) - def test_parse_bar(self): + def test_parse_bar(self) -> None: raw_config = {"union": {"Bar": {}}} + # pyre-fixme[6]: For 1st param expected `FooUnion` but got `Dict[str, + # Dict[typing.Any, typing.Any]]`. config = Config(**raw_config) self.assertEqual(config.union.value.foo(), 10) + + def test_frozen_registry(self) -> None: + with self.assertRaises(RuntimeError): + + @dataclass + class Baz(FooRegistry): + def foo(self): + return 20 + + self.assertListEqual(sorted(FooRegistry.REGISTRY.keys()), ["Bar", "Foo"]) + + def test_frozen_registry_skip(self) -> None: + _environ = dict(os.environ) + os.environ.update({"SKIP_FROZEN_REGISTRY_CHECK": "1"}) + try: + + @dataclass + class Baz(FooRegistry): + def foo(self): + return 20 + + finally: + os.environ.clear() + os.environ.update(_environ) + + self.assertListEqual(sorted(FooRegistry.REGISTRY.keys()), ["Bar", "Foo"]) diff --git a/reagent/test/core/test_utils.py b/reagent/test/core/test_utils.py new file mode 100644 index 000000000..8c8647ba5 --- /dev/null +++ b/reagent/test/core/test_utils.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import unittest + +import reagent.core.types as rlt +from reagent.core.utils import embedding_bag_configs_from_feature_configs + + +class TestUtils(unittest.TestCase): + def test_embedding_bag_configs_from_feature_configs(self) -> None: + TABLE_1_EMBED_SIZE = 100 + TABLE_1_EMBED_DIM = 64 + TABLE_2_EMBED_SIZE = 200 + TABLE_2_EMBED_DIM = 32 + + feature_config_1 = rlt.ModelFeatureConfig( + float_feature_infos=[rlt.FloatFeatureInfo(name="dummy0", feature_id=0)], + id_list_feature_configs=[ + rlt.IdListFeatureConfig( + name="id_list_feature_111", + feature_id=111, + id_mapping_name="table_1", + ) + ], + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name="id_score_list_feature_112", + feature_id=112, + id_mapping_name="table_2", + ) + ], + id_mapping_config={ + "table_1": rlt.IdMappingConfig( + embedding_table_size=TABLE_1_EMBED_SIZE, + embedding_dim=TABLE_1_EMBED_DIM, + ), + "table_2": rlt.IdMappingConfig( + embedding_table_size=TABLE_2_EMBED_SIZE, + embedding_dim=TABLE_2_EMBED_DIM, + ), + }, + ) + feature_config_2 = rlt.ModelFeatureConfig( + float_feature_infos=[rlt.FloatFeatureInfo(name="dummy1", feature_id=1)], + id_list_feature_configs=[ + rlt.IdListFeatureConfig( + name="id_list_feature_211", + feature_id=211, + id_mapping_name="table_1", + ) + ], + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name="id_score_list_feature_212", + feature_id=212, + id_mapping_name="table_1", + ) + ], + id_mapping_config={ + "table_1": rlt.IdMappingConfig( + embedding_table_size=TABLE_1_EMBED_SIZE, + embedding_dim=TABLE_1_EMBED_DIM, + ), + }, + ) + embedding_bag_configs = embedding_bag_configs_from_feature_configs( + [feature_config_1, feature_config_2] + ) + assert len(embedding_bag_configs) == 2 + + assert embedding_bag_configs[0].name == "table_1" + assert embedding_bag_configs[0].num_embeddings == TABLE_1_EMBED_SIZE + assert embedding_bag_configs[0].embedding_dim == TABLE_1_EMBED_DIM + assert embedding_bag_configs[0].feature_names == [ + "id_list_feature_111", + "id_list_feature_211", + "id_score_list_feature_212", + ] + + assert embedding_bag_configs[1].name == "table_2" + assert embedding_bag_configs[1].num_embeddings == TABLE_2_EMBED_SIZE + assert embedding_bag_configs[1].embedding_dim == TABLE_2_EMBED_DIM + assert embedding_bag_configs[1].feature_names == ["id_score_list_feature_112"] + + # feature_config_3 specifies inconsistent id_mapping_config as those in feature_config_1 + # we expect to see exception + feature_config_3 = rlt.ModelFeatureConfig( + float_feature_infos=[rlt.FloatFeatureInfo(name="dummy1", feature_id=1)], + id_list_feature_configs=[ + rlt.IdListFeatureConfig( + name="id_list_feature_211", + feature_id=211, + id_mapping_name="table_1", + ) + ], + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name="id_score_list_feature_212", + feature_id=212, + id_mapping_name="table_1", + ) + ], + id_mapping_config={ + "table_1": rlt.IdMappingConfig( + embedding_table_size=TABLE_1_EMBED_SIZE + 1, + embedding_dim=TABLE_1_EMBED_DIM + 1, + ), + }, + ) + self.assertRaises( + AssertionError, + embedding_bag_configs_from_feature_configs, + [feature_config_1, feature_config_3], + ) diff --git a/reagent/test/core/tracker_test.py b/reagent/test/core/tracker_test.py index 514844987..afb1071b5 100644 --- a/reagent/test/core/tracker_test.py +++ b/reagent/test/core/tracker_test.py @@ -9,7 +9,7 @@ class TestObservable(unittest.TestCase): - def test_observable(self): + def test_observable(self) -> None: @observable(td_loss=float, str_val=str) class DummyClass: def __init__(self, a, b, c=10): @@ -28,8 +28,10 @@ def do_something(self, i): self.assertEqual(instance.c, 10) observers = [ValueListObserver("td_loss") for _i in range(3)] + # pyre-fixme[16]: `DummyClass` has no attribute `add_observers`. instance.add_observers(observers) # Adding twice should not result in double update + # pyre-fixme[16]: `DummyClass` has no attribute `add_observer`. instance.add_observer(observers[0]) for i in range(10): @@ -38,7 +40,7 @@ def do_something(self, i): for observer in observers: self.assertEqual(observer.values, [float(i) for i in range(10)]) - def test_no_observable_values(self): + def test_no_observable_values(self) -> None: try: @observable() diff --git a/reagent/test/environment/environment.py b/reagent/test/environment/environment.py deleted file mode 100644 index 93a506cd1..000000000 --- a/reagent/test/environment/environment.py +++ /dev/null @@ -1,343 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import collections -import random -from functools import partial -from typing import Deque, Dict, List, NamedTuple, Optional, Union - - -FEATURES = Dict[int, float] -ACTION = Union[str, FEATURES] - - -class Samples(NamedTuple): - mdp_ids: List[str] - sequence_numbers: List[int] - sequence_number_ordinals: List[int] - states: List[FEATURES] - actions: List[ACTION] - action_probabilities: List[float] - rewards: List[float] - possible_actions: List[List[ACTION]] - next_states: List[FEATURES] - next_actions: List[ACTION] - terminals: List[bool] - possible_next_actions: List[List[ACTION]] - - -class MultiStepSamples(NamedTuple): - mdp_ids: List[str] - sequence_numbers: List[int] - sequence_number_ordinals: List[int] - states: List[FEATURES] - actions: List[ACTION] - action_probabilities: List[float] - rewards: List[List[float]] - possible_actions: List[List[ACTION]] - next_states: List[List[FEATURES]] - next_actions: List[List[ACTION]] - terminals: List[List[bool]] - possible_next_actions: List[List[List[ACTION]]] - - def to_single_step(self) -> Samples: - return Samples( - mdp_ids=self.mdp_ids, - sequence_numbers=self.sequence_numbers, - sequence_number_ordinals=self.sequence_number_ordinals, - states=self.states, - actions=self.actions, - action_probabilities=self.action_probabilities, - rewards=[r[0] for r in self.rewards], - possible_actions=self.possible_actions, - next_states=[ns[0] for ns in self.next_states], - next_actions=[na[0] for na in self.next_actions], - terminals=[t[0] for t in self.terminals], - possible_next_actions=[pna[0] for pna in self.possible_next_actions], - ) - - -class Environment: - def reset(self): - """ Reset the environment and return the initial state """ - pass - - def step(self, action): - """ - Proceed one step ahead using the action. - Return next state, reward, terminal, and info - """ - return None, None, None, None - - def _process_state(self, state): - """ - Transform the state to the format that can be uploaded to Hive - """ - pass - - def sample_policy(self, state, use_continuous_action, epsilon): - """ - Sample an action following epsilon-greedy - Return the raw action which can be fed into env.step(), the processed - action which can be uploaded to Hive, and action probability - """ - return None, None, None - - def action_to_features(self, action) -> FEATURES: - """ - Transform an action into a feature vector (as a dictionary) - Call this function when discrete actions need to be transformed into - continuous formats - """ - raise NotImplementedError - - def possible_actions( - self, - state, - terminal=False, - ignore_terminal=False, - use_continuous_action: bool = False, - **kwargs, - ) -> List[ACTION]: - """ - Get possible actions at the current state. If ignore_terminal is False, - then this function always returns an empty list at a terminal state. - """ - pass - - @staticmethod - def set_if_in_range(index, limit, container, value): - if index >= limit: - return - container[index] = value - - def generate_random_samples( - self, - num_transitions: int, - use_continuous_action: bool, - epsilon: float = 1.0, - multi_steps: Optional[int] = None, - max_step: Optional[int] = None, - include_shorter_samples_at_start: bool = False, - include_shorter_samples_at_end: bool = True, - ) -> Union[Samples, MultiStepSamples]: - """ Generate samples: - [ - s_t, - (a_t, a_{t+1}, ..., a_{t+steps}), - (r_t, r_{t+1}, ..., r_{t+steps}), - (s_{t+1}, s_{t+2}, ..., s_{t+steps+1}) - ] - - :param num_transitions: How many transitions to collect - :param use_continuous_action: True if a discrete action needs to be - represented as a vector using a dictionary; otherwise the action is - represented as string. - :param epsilon: (1-epsilon) determines the chance of taking optimal actions. - Only valid when the environment (e.g., gridworld) records optimal actions. - :param multi_steps: An integer decides how many steps of transitions - contained in each sample. Only used if you want to train multi-step RL. - :param max_step: An episode terminates after max_step number of steps - :param include_shorter_samples_at_start: Whether to return samples of shorter - steps at the beginning of an episode. - :param include_shorter_samples_at_end: Whether to return samples of shorter - steps at the end of an episode. - """ - return_single_step_samples = False - if multi_steps is None: - return_single_step_samples = True - multi_steps = 1 - - states: List[FEATURES] = [{} for _ in range(num_transitions)] - action_probabilities: List[float] = [0.0] * num_transitions - rewards: List[List[float]] = [[] for _ in range(num_transitions)] - next_states: List[List[FEATURES]] = [[{}] for _ in range(num_transitions)] - terminals: List[List[bool]] = [[] for _ in range(num_transitions)] - mdp_ids = [""] * num_transitions - sequence_numbers = [0] * num_transitions - possible_actions: List[List[ACTION]] = [[] for _ in range(num_transitions)] - possible_next_actions: List[List[List[ACTION]]] = [ - [[]] for _ in range(num_transitions) - ] - next_actions: List[List[ACTION]] = [[] for _ in range(num_transitions)] - actions: List[ACTION] = [] - if use_continuous_action: - actions = [{} for _ in range(num_transitions)] - else: - # pyre-fixme[9]: actions has type `List[Union[Dict[int, float], str]]`; - # used as `List[str]`. - actions = [""] * num_transitions - - state = None - terminal = True - raw_action = None - processed_action = None - next_raw_action = None - next_processed_action = None - next_action_probability = 1.0 - transition = 0 - mdp_id = -1 - sequence_number = 0 - - state_deque: Deque[FEATURES] = collections.deque(maxlen=multi_steps) - action_deque: Deque[ACTION] = collections.deque(maxlen=multi_steps) - action_probability_deque: Deque[float] = collections.deque(maxlen=multi_steps) - reward_deque: Deque[float] = collections.deque(maxlen=multi_steps) - next_state_deque: Deque[FEATURES] = collections.deque(maxlen=multi_steps) - next_action_deque: Deque[ACTION] = collections.deque(maxlen=multi_steps) - terminal_deque: Deque[bool] = collections.deque(maxlen=multi_steps) - sequence_number_deque: Deque[int] = collections.deque(maxlen=multi_steps) - possible_action_deque: Deque[List[ACTION]] = collections.deque( - maxlen=multi_steps - ) - possible_next_action_deque: Deque[List[ACTION]] = collections.deque( - maxlen=multi_steps - ) - - # We run until we finish the episode that completes N transitions, but - # we may have to go beyond N to reach the end of that episode - while not terminal or transition < num_transitions: - if terminal: - state = self.reset() - terminal = False - mdp_id += 1 - sequence_number = 0 - state_deque.clear() - action_deque.clear() - action_probability_deque.clear() - reward_deque.clear() - next_state_deque.clear() - next_action_deque.clear() - terminal_deque.clear() - sequence_number_deque.clear() - possible_action_deque.clear() - possible_next_action_deque.clear() - raw_action, processed_action, action_probability = self.sample_policy( - state, use_continuous_action, epsilon - ) - else: - raw_action = next_raw_action - processed_action = next_processed_action - action_probability = next_action_probability - sequence_number += 1 - - possible_action = self.possible_actions( - state, - terminal=terminal, - ignore_terminal=False, - use_continuous_action=use_continuous_action, - ) - next_state, reward, terminal, _ = self.step(raw_action) - if max_step is not None and sequence_number >= max_step: - terminal = True - ( - next_raw_action, - next_processed_action, - next_action_probability, - ) = self.sample_policy(next_state, use_continuous_action, epsilon) - possible_next_action = self.possible_actions( - next_state, - terminal=terminal, - ignore_terminal=False, - use_continuous_action=use_continuous_action, - ) - - state_deque.append(self._process_state(state)) - action_deque.append(processed_action) - action_probability_deque.append(action_probability) - reward_deque.append(reward) - terminal_deque.append(terminal) - sequence_number_deque.append(sequence_number) - possible_action_deque.append(possible_action) - possible_next_action_deque.append(possible_next_action) - - next_processed_state: FEATURES = self._process_state(next_state) - next_state_deque.append(next_processed_state) - - # Format terminals in same way we ask clients to log terminals (in RL dex) - # i.e., setting next action empty if the episode terminates - if terminal: - # We need to keep next state even at the terminal state - # first, fblearner/flow/projects/rl/core/data_fetcher.py decides - # terminal signals by looking at next action, not next state - # second, next state will be used for world model building - if type(next_processed_action) is str: - next_processed_action = "" - else: - next_processed_action = {} - next_action_deque.append(next_processed_action) - - # We want exactly N data points, but we need to wait until the - # episode is over so we can get the episode values. `set_if_in_range` - # will set episode values if they are in the range [0,N) and ignore - # otherwise. - if not terminal and ( - include_shorter_samples_at_start or len(terminal_deque) == multi_steps - ): - set_if_in_range = partial( - self.set_if_in_range, transition, num_transitions - ) - set_if_in_range(states, state_deque[0]) - set_if_in_range(actions, action_deque[0]) - set_if_in_range(action_probabilities, action_probability_deque[0]) - set_if_in_range(rewards, list(reward_deque)) - set_if_in_range(next_states, list(next_state_deque)) - set_if_in_range(next_actions, list(next_action_deque)) - set_if_in_range(terminals, list(terminal_deque)) - set_if_in_range(mdp_ids, str(mdp_id)) - set_if_in_range(sequence_numbers, sequence_number_deque[0]) - set_if_in_range(possible_actions, possible_action_deque[0]) - set_if_in_range(possible_next_actions, list(possible_next_action_deque)) - transition += 1 - # collect samples at the end of the episode. - if terminal: - num_samples_at_end = 0 - if include_shorter_samples_at_end: - num_samples_at_end = len(state_deque) - elif len(terminal_deque) == multi_steps: - num_samples_at_end = 1 - for _ in range(num_samples_at_end): - set_if_in_range = partial( - self.set_if_in_range, transition, num_transitions - ) - set_if_in_range(states, state_deque.popleft()) - set_if_in_range(actions, action_deque.popleft()) - set_if_in_range( - action_probabilities, action_probability_deque.popleft() - ) - set_if_in_range(rewards, list(reward_deque)) - set_if_in_range(next_states, list(next_state_deque)) - set_if_in_range(next_actions, list(next_action_deque)) - set_if_in_range(terminals, list(terminal_deque)) - set_if_in_range(mdp_ids, str(mdp_id)) - set_if_in_range(sequence_numbers, sequence_number_deque.popleft()) - set_if_in_range(possible_actions, possible_action_deque.popleft()) - set_if_in_range( - possible_next_actions, list(possible_next_action_deque) - ) - reward_deque.popleft() - next_state_deque.popleft() - next_action_deque.popleft() - terminal_deque.popleft() - possible_next_action_deque.popleft() - transition += 1 - - state = next_state - - samples = MultiStepSamples( - mdp_ids=mdp_ids, - sequence_numbers=sequence_numbers, - sequence_number_ordinals=sequence_numbers, - states=states, - actions=actions, - action_probabilities=action_probabilities, - rewards=rewards, - possible_actions=possible_actions, - next_states=next_states, - next_actions=next_actions, - terminals=terminals, - possible_next_actions=possible_next_actions, - ) - if return_single_step_samples: - return samples.to_single_step() - return samples diff --git a/reagent/test/evaluation/cb/__init__.py b/reagent/test/evaluation/cb/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/reagent/test/evaluation/cb/test_integration.py b/reagent/test/evaluation/cb/test_integration.py new file mode 100644 index 000000000..1fb53732e --- /dev/null +++ b/reagent/test/evaluation/cb/test_integration.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest +from unittest.mock import MagicMock + +import numpy as np +import numpy.testing as npt + +import torch +from reagent.core.types import CBInput +from reagent.evaluation.cb.policy_evaluator import PolicyEvaluator +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.models.linear_regression import LinearRegressionUCB +from reagent.training.cb.linucb_trainer import LinUCBTrainer +from torch.utils.tensorboard import SummaryWriter + + +class TestEvalDuringTraining(unittest.TestCase): + def setUp(self): + x_dim = 2 + self.policy_network = LinearRegressionUCB(x_dim) + policy = Policy(scorer=self.policy_network, sampler=GreedyActionSampler()) + + self.trainer = LinUCBTrainer(policy) + self.eval_module = PolicyEvaluator(self.policy_network) + sw = SummaryWriter("/tmp/tb") + sw.add_scalars = MagicMock() + self.eval_module.attach_summary_writer(sw) + self.trainer.attach_eval_module(self.eval_module) + + def test_eval_during_training(self): + """ + Test integration of evaluation into the training loop. + + run simulated training-evaluation sequence with 3 batches. + the model and features are set up so that the model always selects action 1, so all observations + with logged action 0 will be skipped + the evaluated model is updated only after consuming batch_1 + """ + + # step 1: consume batch_1. just the 2nd data point will be used + batch_1 = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 2], + [1, 3], + ], + [ + [1, 4], + [1, 5], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[1.5], [2.3]], dtype=torch.float), + ) + self.trainer.training_step(batch_1, 0) + + # flush the buffers inside the model + self.trainer.scorer._calculate_coefs() + self.eval_module.eval_model._calculate_coefs() + + # update the evaluated model. this model will be used for evaluation (action selection) in both batch_2 and batch_3 bcs we won't update it after batch_2 + self.eval_module.update_eval_model(self.trainer.scorer) + + """ + after batch_1, the model has state: + A: tensor([[ 1., 5.], + [ 5., 25.]]) + inv_A: tensor([[ 0.9630, -0.1852], + [-0.1852, 0.0741]]) + b: tensor([ 2.3000, 11.5000]) + num_obs: tensor([1.]) + coefs: tensor([0.0852, 0.4259]) + """ + + # check if trained model state is correct + batch_1_used_features = batch_1.context_arm_features[1, 1].numpy() + npt.assert_allclose( + (self.trainer.scorer.avg_A * self.trainer.scorer.sum_weight).numpy(), + np.outer(batch_1_used_features, batch_1_used_features), + ) + npt.assert_allclose( + (self.trainer.scorer.avg_b * self.trainer.scorer.sum_weight).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item(), + ) + + # check if evaluated model state is correct (should be same as trained model) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_A + * self.eval_module.eval_model.sum_weight + ).numpy(), + np.outer(batch_1_used_features, batch_1_used_features), + ) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_b + * self.eval_module.eval_model.sum_weight + ).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item(), + ) + + # step 2: consume batch_2. just the 1st data point will be used + batch_2 = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 7], + [1, 8], + [ + 1, + 9, + ], # this arm would have been chosen by the model if it was present + ], + [ + [1, 9], + [1, 10], + [1, 11], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[1], [0]], dtype=torch.long), + reward=torch.tensor([[1.2], [2.9]], dtype=torch.float), + arm_presence=torch.tensor([[1, 1, 0], [1, 1, 1]], dtype=torch.bool), + ) + self.trainer.training_step(batch_2, 0) + + # flush the buffers inside the model + self.trainer.scorer._calculate_coefs() + self.eval_module.eval_model._calculate_coefs() + + # check that trained model state is correct + batch_2_used_features = batch_2.context_arm_features[0, 1].numpy() + npt.assert_allclose( + (self.trainer.scorer.avg_A * self.trainer.scorer.sum_weight).numpy(), + np.outer(batch_1_used_features, batch_1_used_features) + + np.outer(batch_2_used_features, batch_2_used_features), + ) + npt.assert_allclose( + (self.trainer.scorer.avg_b * self.trainer.scorer.sum_weight).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item() + + batch_2_used_features * batch_2.reward[0, 0].item(), + ) + + # check that evaluated model state is correct (same as it was after batch_1) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_A + * self.eval_module.eval_model.sum_weight + ).numpy(), + np.outer(batch_1_used_features, batch_1_used_features), + ) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_b + * self.eval_module.eval_model.sum_weight + ).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item(), + ) + + """ + after batch_2, the model has state: + A: tensor([[ 2., 13.], + [13., 89.]]) + inv_A: tensor([[ 0.8911, -0.1287], + [-0.1287, 0.0297]]) + b: tensor([ 3.5000, 21.1000]) + num_obs: tensor([2.]) + coefs: tensor([0.4030, 0.1762]) + """ + + # step 3: consume batch_3. just the 2nd data point will be used + batch_3 = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 7], + [1, 8], + ], + [ + [1, 9], + [1, 10], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[3.5], [7.1]], dtype=torch.float), + ) + self.trainer.training_step(batch_3, 0) + + # flush the buffers inside the model + self.trainer.scorer._calculate_coefs() + self.eval_module.eval_model._calculate_coefs() + + # finish training epoch + self.trainer.on_train_epoch_end() + + """ + after batch_3, the model has state: + A: tensor([[ 3., 23.], + [ 23., 189.]]) + inv_A: tensor([[ 0.8225, -0.0996], + [-0.0996, 0.0173]]) + b: tensor([10.8000, 94.1000]) + num_obs: tensor([3.]) + coefs: tensor([-0.4861, 0.5541]) + """ + + # check that trained model state is correct + batch_3_used_features = batch_3.context_arm_features[1, 1].numpy() + npt.assert_allclose( + (self.trainer.scorer.avg_A * self.trainer.scorer.sum_weight).numpy(), + np.outer(batch_1_used_features, batch_1_used_features) + + np.outer(batch_2_used_features, batch_2_used_features) + + np.outer(batch_3_used_features, batch_3_used_features), + ) + npt.assert_allclose( + (self.trainer.scorer.avg_b * self.trainer.scorer.sum_weight).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item() + + batch_2_used_features * batch_2.reward[0, 0].item() + + batch_3_used_features * batch_3.reward[1, 0].item(), + ) + + # check that evaluated model state is correct (same as it was after batch_1) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_A + * self.eval_module.eval_model.sum_weight + ).numpy(), + np.outer(batch_1_used_features, batch_1_used_features), + ) + npt.assert_allclose( + ( + self.eval_module.eval_model.avg_b + * self.eval_module.eval_model.sum_weight + ).numpy(), + batch_1_used_features * batch_1.reward[1, 0].item(), + ) + + # check average reward. should be AVG([2.3, 1.2, 7.1]) = 4.2 + self.assertAlmostEqual( + self.eval_module.get_avg_reward(), + np.mean( + [ + batch_1.reward[1, 0].item(), + batch_2.reward[0, 0].item(), + batch_3.reward[1, 0].item(), + ] + ), + places=4, + ) + + # check total weight (number of observations). Should be 3 + self.assertAlmostEqual( + (self.eval_module.sum_weight + self.eval_module.sum_weight_local).item(), + 3.0, + places=4, + ) + + # metrics should have been logged once, at the end of epoch + # TODO: test logging logic triggered by eval_model_update_critical_weight + self.eval_module.summary_writer.add_scalars.assert_called_once() diff --git a/reagent/test/evaluation/cb/test_policy_evaluator.py b/reagent/test/evaluation/cb/test_policy_evaluator.py new file mode 100644 index 000000000..9d351ad09 --- /dev/null +++ b/reagent/test/evaluation/cb/test_policy_evaluator.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import unittest +from dataclasses import replace +from unittest.mock import MagicMock + +import torch +from reagent.core.types import CBInput +from reagent.evaluation.cb.policy_evaluator import PolicyEvaluator +from reagent.models.linear_regression import LinearRegressionUCB +from torch.utils.tensorboard import SummaryWriter + + +def _compare_state_dicts(state_dict_1, state_dict_2): + if len(state_dict_1) != len(state_dict_2): + return False + + for ((k_1, v_1), (k_2, v_2)) in zip( + sorted(state_dict_1.items()), sorted(state_dict_2.items()) + ): + if k_1 != k_2: + return False + if not torch.allclose(v_1, v_2): + return False + return True + + +class TestPolicyEvaluator(unittest.TestCase): + def setUp(self): + self.policy_network = LinearRegressionUCB(2) + self.eval_module = PolicyEvaluator(self.policy_network) + self.batch = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 2], + [1, 3], + ], + [ + [1, 4], + [1, 5], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[1.5], [2.3]], dtype=torch.float), + ) + + def test_process_all_data(self): + + state_dict_before = copy.deepcopy(self.eval_module.state_dict()) + self.eval_module._process_all_data(self.batch) + state_dict_after = copy.deepcopy(self.eval_module.state_dict()) + + # all_data_sum_weight_local got updated properly + self.assertAlmostEqual( + state_dict_after["all_data_sum_weight_local"].item() + - state_dict_before["all_data_sum_weight_local"].item(), + len(self.batch), + ) + # all_data_sum_weight didn't change (bcs we haven't aggregated across instances yet) + self.assertAlmostEqual( + state_dict_after["all_data_sum_weight"].item(), + state_dict_before["all_data_sum_weight"].item(), + ) + + # sum_weight and sum_reward_weighted didn't change (as well as local values) + self.assertAlmostEqual( + state_dict_after["sum_weight"].item(), + state_dict_before["sum_weight"].item(), + ) + self.assertAlmostEqual( + state_dict_after["sum_weight_local"].item(), + state_dict_before["sum_weight_local"].item(), + ) + self.assertAlmostEqual( + state_dict_after["sum_reward_weighted"].item(), + state_dict_before["sum_reward_weighted"].item(), + ) + self.assertAlmostEqual( + state_dict_after["sum_reward_weighted_local"].item(), + state_dict_before["sum_reward_weighted_local"].item(), + ) + + def test_process_used_data_reject_all(self): + # make sure calling _process_used_data() doesn't change internal state if all weights are 0 + state_dict_before = copy.deepcopy(self.eval_module.state_dict()) + batch = replace( + self.batch, + weight=torch.zeros_like(self.batch.action, dtype=torch.float), + ) + self.eval_module._process_used_data(batch) + state_dict_after = copy.deepcopy(self.eval_module.state_dict()) + self.assertTrue(_compare_state_dicts(state_dict_before, state_dict_after)) + + def test_process_used_data_accept_some(self): + # calling _process_used_data with non-zero weights should change the state and lead to correct reward value + policy_network = LinearRegressionUCB(2) + eval_module = PolicyEvaluator(policy_network) + state_dict_before = copy.deepcopy(eval_module.state_dict()) + weight_value = 2.0 + batch = replace( + self.batch, + weight=torch.tensor([[0.0], [weight_value]]), + ) + eval_module._process_used_data(batch) + eval_module._aggregate_across_instances() + state_dict_after = copy.deepcopy(eval_module.state_dict()) + self.assertFalse(_compare_state_dicts(state_dict_before, state_dict_after)) + self.assertEqual(eval_module.sum_weight_local.item(), 0.0) + self.assertEqual(eval_module.sum_weight.item(), weight_value) + self.assertEqual( + eval_module.sum_reward_weighted.item(), + weight_value * self.batch.reward[1, 0].item(), + ) + self.assertEqual(eval_module.sum_reward_weighted_local.item(), 0.0) + self.assertEqual(eval_module.get_avg_reward(), self.batch.reward[1, 0].item()) + + def test_update_eval_model(self): + policy_network_1 = LinearRegressionUCB(2) + policy_network_1.avg_A += 0.3 + policy_network_2 = LinearRegressionUCB(2) + policy_network_2.avg_A += 0.1 + eval_module = PolicyEvaluator(policy_network_1) + self.assertTrue( + _compare_state_dicts( + eval_module.eval_model.state_dict(), policy_network_1.state_dict() + ) + ) + + eval_module.update_eval_model(policy_network_2) + self.assertTrue( + _compare_state_dicts( + eval_module.eval_model.state_dict(), policy_network_2.state_dict() + ) + ) + + # change to the source model shouldn't affect the model in the eval module + original_state_dict_2 = copy.deepcopy(policy_network_2.state_dict()) + policy_network_2.avg_A += 0.4 + self.assertTrue( + _compare_state_dicts( + eval_module.eval_model.state_dict(), original_state_dict_2 + ) + ) + + def test_ingest_batch(self): + model_actions = torch.tensor([[1], [1]], dtype=torch.long) + _ = self.eval_module.ingest_batch(self.batch, model_actions) + self.eval_module._aggregate_across_instances() + # correct average reward + self.assertEqual( + self.eval_module.get_avg_reward(), self.batch.reward[1, 0].item() + ) + + def test_formatted_output(self): + model_actions = torch.tensor([[1], [1]], dtype=torch.long) + _ = self.eval_module.ingest_batch(self.batch, model_actions) + self.eval_module._aggregate_across_instances() + output = self.eval_module.get_formatted_result_string() + self.assertIsInstance(output, str) + + def test_summary_writer(self): + sw = SummaryWriter("/tmp/tb") + sw.add_scalars = MagicMock() + self.eval_module.attach_summary_writer(sw) + self.eval_module.log_metrics(global_step=5) + + expected_metric_dict = { + "avg_reward": 0.0, + "sum_weight": 0.0, + "all_data_sum_weight": 0.0, + "num_eval_model_updates": 0, + } + sw.add_scalars.assert_called_once_with( + "Offline_Eval", expected_metric_dict, global_step=5 + ) diff --git a/reagent/test/evaluation/cb/test_utils.py b/reagent/test/evaluation/cb/test_utils.py new file mode 100644 index 000000000..ed8eccc4d --- /dev/null +++ b/reagent/test/evaluation/cb/test_utils.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import numpy.testing as npt +import torch +from reagent.core.types import CBInput +from reagent.evaluation.cb.utils import zero_out_skipped_obs_weights + + +class TestCBEvalUtils(unittest.TestCase): + def setUp(self): + self.batch = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 2], + [1, 3], + ], + [ + [1, 4], + [1, 5], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[1.5], [2.3]], dtype=torch.float), + weight=torch.tensor([[7], [5]], dtype=torch.float), + ) + + def test_zero_out_skipped_obs_weights(self): + model_actions = torch.tensor([[1], [1]], dtype=torch.long) + new_batch = zero_out_skipped_obs_weights(self.batch, model_actions) + # everything except weights should remain the same in the new batch + for name in ["context_arm_features", "action", "reward"]: + npt.assert_allclose( + getattr(self.batch, name).numpy(), getattr(new_batch, name).numpy() + ) + + # weights should be zero-ed out where action!= model_action + self.assertEqual(new_batch.weight[0, 0].item(), 0.0) + self.assertEqual(new_batch.weight[1, 0].item(), self.batch.weight[1, 0].item()) diff --git a/reagent/test/evaluation/test_evaluation_data_page.py b/reagent/test/evaluation/test_evaluation_data_page.py index 3c69bc470..f31e6709b 100644 --- a/reagent/test/evaluation/test_evaluation_data_page.py +++ b/reagent/test/evaluation/test_evaluation_data_page.py @@ -8,10 +8,15 @@ import numpy as np import torch import torch.nn as nn -from reagent import types as rlt +from reagent.core import types as rlt from reagent.evaluation.doubly_robust_estimator import DoublyRobustEstimator from reagent.evaluation.evaluation_data_page import EvaluationDataPage -from reagent.models.seq2slate import Seq2SlateMode +from reagent.evaluation.ope_adapter import OPEstimatorAdapter +from reagent.model_utils.seq2slate_utils import Seq2SlateMode +from reagent.ope.estimators.contextual_bandits_estimators import ( + SwitchDREstimator, + SwitchEstimator, +) logger = logging.getLogger(__name__) @@ -135,16 +140,14 @@ def test_seq2slate_eval_data_page(self): tgt_out_idx.flatten() - 2, ].reshape(batch_size, tgt_seq_len, candidate_dim) - ptb = rlt.PreprocessedTrainingBatch( - training_input=rlt.PreprocessedRankingInput( - state=rlt.FeatureData(float_features=torch.eye(state_dim)), - src_seq=rlt.FeatureData(float_features=src_seq), - tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq), - src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len), - tgt_out_idx=tgt_out_idx, - tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]), - slate_reward=torch.tensor([4.0, 5.0, 7.0]), - ), + ptb = rlt.PreprocessedRankingInput( + state=rlt.FeatureData(float_features=torch.eye(state_dim)), + src_seq=rlt.FeatureData(float_features=src_seq), + tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq), + src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len), + tgt_out_idx=tgt_out_idx, + tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]), + slate_reward=torch.tensor([4.0, 5.0, 7.0]), extras=rlt.ExtraData( sequence_number=torch.tensor([0, 0, 0]), mdp_id=np.array(["0", "1", "2"]), @@ -152,7 +155,7 @@ def test_seq2slate_eval_data_page(self): ) edp = EvaluationDataPage.create_from_tensors_seq2slate( - seq2slate_net, reward_net, ptb.training_input, eval_greedy=True + seq2slate_net, reward_net, ptb, eval_greedy=True ) logger.info("---------- Start evaluating eval_greedy=True -----------------") doubly_robust_estimator = DoublyRobustEstimator() @@ -161,6 +164,20 @@ def test_seq2slate_eval_data_page(self): inverse_propensity, doubly_robust, ) = doubly_robust_estimator.estimate(edp) + switch_estimator, switch_dr_estimator = ( + OPEstimatorAdapter(SwitchEstimator()), + OPEstimatorAdapter(SwitchDREstimator()), + ) + + # Verify that Switch with low exponent is equivalent to IPS + switch_ips = switch_estimator.estimate(edp, exp_base=1) + # Verify that Switch with no candidates is equivalent to DM + switch_dm = switch_estimator.estimate(edp, candidates=0) + # Verify that SwitchDR with low exponent is equivalent to DR + switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1) + # Verify that SwitchDR with no candidates is equivalent to DM + switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0) + logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}") avg_logged_reward = (4 + 5 + 7) / 3 @@ -180,11 +197,15 @@ def test_seq2slate_eval_data_page(self): self.assertAlmostEqual( doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6 ) + self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6) + self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6) + self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6) + self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6) logger.info("---------- Finish evaluating eval_greedy=True -----------------") logger.info("---------- Start evaluating eval_greedy=False -----------------") edp = EvaluationDataPage.create_from_tensors_seq2slate( - seq2slate_net, reward_net, ptb.training_input, eval_greedy=False + seq2slate_net, reward_net, ptb, eval_greedy=False ) doubly_robust_estimator = DoublyRobustEstimator() _, inverse_propensity, _ = doubly_robust_estimator.estimate(edp) diff --git a/reagent/test/evaluation/test_ope_integration.py b/reagent/test/evaluation/test_ope_integration.py new file mode 100644 index 000000000..678a7fa72 --- /dev/null +++ b/reagent/test/evaluation/test_ope_integration.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import random +import unittest + +import numpy as np +import torch +from reagent.core import types as rlt +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.evaluation.ope_adapter import ( + OPEstimatorAdapter, + SequentialOPEstimatorAdapter, +) +from reagent.ope.estimators.contextual_bandits_estimators import ( + DMEstimator, + DoublyRobustEstimator, + IPSEstimator, + SwitchDREstimator, + SwitchEstimator, +) +from reagent.ope.estimators.sequential_estimators import ( + DoublyRobustEstimator as SeqDREstimator, + EpsilonGreedyRLPolicy, + RandomRLPolicy, + RLEstimatorInput, +) +from reagent.ope.estimators.types import Action, ActionSpace +from reagent.ope.test.envs import PolicyLogGenerator +from reagent.ope.test.gridworld import GridWorld, NoiseGridWorldModel +from reagent.ope.trainers.rl_tabular_trainers import ( + DPTrainer, + DPValueFunction, + TabularPolicy, +) +from reagent.test.evaluation.test_evaluation_data_page import ( + FakeSeq2SlateRewardNetwork, + FakeSeq2SlateTransformerNet, +) + + +logger = logging.getLogger(__name__) + + +def rlestimator_input_to_edp( + input: RLEstimatorInput, num_actions: int +) -> EvaluationDataPage: + mdp_ids = [] + logged_propensities = [] + logged_rewards = [] + action_mask = [] + model_propensities = [] + model_values = [] + + for mdp in input.log: + mdp_id = len(mdp_ids) + for t in mdp: + mdp_ids.append(mdp_id) + logged_propensities.append(t.action_prob) + logged_rewards.append(t.reward) + assert t.action is not None + action_mask.append( + [1 if x == t.action.value else 0 for x in range(num_actions)] + ) + assert t.last_state is not None + model_propensities.append( + [ + input.target_policy(t.last_state)[Action(x)] + for x in range(num_actions) + ] + ) + assert input.value_function is not None + model_values.append( + [ + input.value_function(t.last_state, Action(x)) + for x in range(num_actions) + ] + ) + + return EvaluationDataPage( + mdp_id=torch.tensor(mdp_ids).reshape(len(mdp_ids), 1), + logged_propensities=torch.tensor(logged_propensities).reshape( + (len(logged_propensities), 1) + ), + logged_rewards=torch.tensor(logged_rewards).reshape((len(logged_rewards), 1)), + action_mask=torch.tensor(action_mask), + model_propensities=torch.tensor(model_propensities), + model_values=torch.tensor(model_values), + sequence_number=torch.tensor([]), + model_rewards=torch.tensor([]), + model_rewards_for_logged_action=torch.tensor([]), + ) + + +class TestOPEModuleAlgs(unittest.TestCase): + GAMMA = 0.9 + CPE_PASS_BAR = 1.0 + CPE_MAX_VALUE = 2.0 + MAX_HORIZON = 1000 + NOISE_EPSILON = 0.3 + EPISODES = 2 + + def test_gridworld_sequential_adapter(self): + """ + Create a gridworld environment, logging policy, and target policy + Evaluates target policy using the direct OPE sequential doubly robust estimator, + then transforms the log into an evaluation data page which is passed to the ope adapter. + + This test is meant to verify the adaptation of EDPs into RLEstimatorInputs as employed + by ReAgent since ReAgent provides EDPs to Evaluators. Going from EDP -> RLEstimatorInput + is more involved than RLEstimatorInput -> EDP since the EDP does not store the state + at each timestep in each MDP, only the corresponding logged outputs & model outputs. + Thus, the adapter must do some tricks to represent these timesteps as states so the + ope module can extract the correct outputs. + + Note that there is some randomness in the model outputs since the model is purposefully + noisy. However, the same target policy is being evaluated on the same logged walks through + the gridworld, so the two results should be close in value (within 1). + + """ + random.seed(0) + np.random.seed(0) + torch.random.manual_seed(0) + + device = torch.device("cuda") if torch.cuda.is_available() else None + + gridworld = GridWorld.from_grid( + [ + ["s", "0", "0", "0", "0"], + ["0", "0", "0", "W", "0"], + ["0", "0", "0", "0", "0"], + ["0", "W", "0", "0", "0"], + ["0", "0", "0", "0", "g"], + ], + max_horizon=TestOPEModuleAlgs.MAX_HORIZON, + ) + + action_space = ActionSpace(4) + opt_policy = TabularPolicy(action_space) + trainer = DPTrainer(gridworld, opt_policy) + value_func = trainer.train(gamma=TestOPEModuleAlgs.GAMMA) + + behavivor_policy = RandomRLPolicy(action_space) + target_policy = EpsilonGreedyRLPolicy( + opt_policy, TestOPEModuleAlgs.NOISE_EPSILON + ) + model = NoiseGridWorldModel( + gridworld, + action_space, + epsilon=TestOPEModuleAlgs.NOISE_EPSILON, + max_horizon=TestOPEModuleAlgs.MAX_HORIZON, + ) + value_func = DPValueFunction(target_policy, model, TestOPEModuleAlgs.GAMMA) + ground_truth = DPValueFunction( + target_policy, gridworld, TestOPEModuleAlgs.GAMMA + ) + + log = [] + log_generator = PolicyLogGenerator(gridworld, behavivor_policy) + num_episodes = TestOPEModuleAlgs.EPISODES + for state in gridworld.states: + for _ in range(num_episodes): + log.append(log_generator.generate_log(state)) + + estimator_input = RLEstimatorInput( + gamma=TestOPEModuleAlgs.GAMMA, + log=log, + target_policy=target_policy, + value_function=value_func, + ground_truth=ground_truth, + ) + + edp = rlestimator_input_to_edp(estimator_input, len(model.action_space)) + + dr_estimator = SeqDREstimator( + weight_clamper=None, weighted=False, device=device + ) + + module_results = SequentialOPEstimatorAdapter.estimator_results_to_cpe_estimate( + dr_estimator.evaluate(estimator_input) + ) + adapter_results = SequentialOPEstimatorAdapter( + dr_estimator, TestOPEModuleAlgs.GAMMA, device=device + ).estimate(edp) + + self.assertAlmostEqual( + adapter_results.raw, + module_results.raw, + delta=TestOPEModuleAlgs.CPE_PASS_BAR, + ), f"OPE adapter results differed too much from underlying module (Diff: {abs(adapter_results.raw - module_results.raw)} > {TestOPEModuleAlgs.CPE_PASS_BAR})" + self.assertLess( + adapter_results.raw, TestOPEModuleAlgs.CPE_MAX_VALUE + ), f"OPE adapter results are too large ({adapter_results.raw} > {TestOPEModuleAlgs.CPE_MAX_VALUE})" + + def test_seq2slate_eval_data_page(self): + """ + Create 3 slate ranking logs and evaluate using Direct Method, Inverse + Propensity Scores, and Doubly Robust. + + The logs are as follows: + state: [1, 0, 0], [0, 1, 0], [0, 0, 1] + indices in logged slates: [3, 2], [3, 2], [3, 2] + model output indices: [2, 3], [3, 2], [2, 3] + logged reward: 4, 5, 7 + logged propensities: 0.2, 0.5, 0.4 + predicted rewards on logged slates: 2, 4, 6 + predicted rewards on model outputted slates: 1, 4, 5 + predicted propensities: 0.4, 0.3, 0.7 + + When eval_greedy=True: + + Direct Method uses the predicted rewards on model outputted slates. + Thus the result is expected to be (1 + 4 + 5) / 3 + + Inverse Propensity Scores would scale the reward by 1.0 / logged propensities + whenever the model output slate matches with the logged slate. + Since only the second log matches with the model output, the IPS result + is expected to be 5 / 0.5 / 3 + + Doubly Robust is the sum of the direct method result and propensity-scaled + reward difference; the latter is defined as: + 1.0 / logged_propensities * (logged reward - predicted reward on logged slate) + * Indicator(model slate == logged slate) + Since only the second logged slate matches with the model outputted slate, + the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3 + + + When eval_greedy=False: + + Only Inverse Propensity Scores would be accurate. Because it would be too + expensive to compute all possible slates' propensities and predicted rewards + for Direct Method. + + The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3 + """ + batch_size = 3 + state_dim = 3 + src_seq_len = 2 + tgt_seq_len = 2 + candidate_dim = 2 + + reward_net = FakeSeq2SlateRewardNetwork() + seq2slate_net = FakeSeq2SlateTransformerNet() + + src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1) + tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]]) + tgt_out_seq = src_seq[ + torch.arange(batch_size).repeat_interleave(tgt_seq_len), + tgt_out_idx.flatten() - 2, + ].reshape(batch_size, tgt_seq_len, candidate_dim) + + ptb = rlt.PreprocessedRankingInput( + state=rlt.FeatureData(float_features=torch.eye(state_dim)), + src_seq=rlt.FeatureData(float_features=src_seq), + tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq), + src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len), + tgt_out_idx=tgt_out_idx, + tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]), + slate_reward=torch.tensor([4.0, 5.0, 7.0]), + extras=rlt.ExtraData( + sequence_number=torch.tensor([0, 0, 0]), + mdp_id=np.array(["0", "1", "2"]), + ), + ) + + edp = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, reward_net, ptb, eval_greedy=True + ) + logger.info("---------- Start evaluating eval_greedy=True -----------------") + doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator()) + dm_estimator = OPEstimatorAdapter(DMEstimator()) + ips_estimator = OPEstimatorAdapter(IPSEstimator()) + switch_estimator = OPEstimatorAdapter(SwitchEstimator()) + switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator()) + + doubly_robust = doubly_robust_estimator.estimate(edp) + inverse_propensity = ips_estimator.estimate(edp) + direct_method = dm_estimator.estimate(edp) + + # Verify that Switch with low exponent is equivalent to IPS + switch_ips = switch_estimator.estimate(edp, exp_base=1) + # Verify that Switch with no candidates is equivalent to DM + switch_dm = switch_estimator.estimate(edp, candidates=0) + # Verify that SwitchDR with low exponent is equivalent to DR + switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1) + # Verify that SwitchDR with no candidates is equivalent to DM + switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0) + + logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}") + + avg_logged_reward = (4 + 5 + 7) / 3 + self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6) + self.assertAlmostEqual( + direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6 + ) + self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6) + self.assertAlmostEqual( + inverse_propensity.normalized, + inverse_propensity.raw / avg_logged_reward, + delta=1e-6, + ) + self.assertAlmostEqual( + doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6 + ) + self.assertAlmostEqual( + doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6 + ) + self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6) + self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6) + self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6) + self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6) + logger.info("---------- Finish evaluating eval_greedy=True -----------------") + + logger.info("---------- Start evaluating eval_greedy=False -----------------") + edp = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, reward_net, ptb, eval_greedy=False + ) + doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator()) + dm_estimator = OPEstimatorAdapter(DMEstimator()) + ips_estimator = OPEstimatorAdapter(IPSEstimator()) + + doubly_robust = doubly_robust_estimator.estimate(edp) + inverse_propensity = ips_estimator.estimate(edp) + direct_method = dm_estimator.estimate(edp) + self.assertAlmostEqual( + inverse_propensity.raw, + (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3, + delta=1e-6, + ) + self.assertAlmostEqual( + inverse_propensity.normalized, + inverse_propensity.raw / avg_logged_reward, + delta=1e-6, + ) + logger.info("---------- Finish evaluating eval_greedy=False -----------------") diff --git a/reagent/test/gym/c51_cartpole_v0.json b/reagent/test/gym/c51_cartpole_v0.json deleted file mode 100644 index 9fd116c84..000000000 --- a/reagent/test/gym/c51_cartpole_v0.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 10000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false, - "categorical": true, - "num_atoms": 51, - "qmin": 0, - "qmax": 30, - "c51_l2_decay": 0 - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 1024, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100, - "offline_train_epochs": 20 - } -} diff --git a/reagent/test/gym/discrete_dqn_cartpole_small_v0.json b/reagent/test/gym/discrete_dqn_cartpole_small_v0.json deleted file mode 100644 index 561843ff9..000000000 --- a/reagent/test/gym/discrete_dqn_cartpole_small_v0.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 20000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 512, - "learning_rate": 0.01, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 100, - "max_steps": 200, - "train_every_ts": 3, - "train_after_ts": 1, - "test_every_ts": 400, - "test_after_ts": 1000, - "num_train_batches": 1, - "avg_over_num_episodes": 25, - "offline_train_epochs": 7 - } -} diff --git a/reagent/test/gym/discrete_dqn_cartpole_v0.json b/reagent/test/gym/discrete_dqn_cartpole_v0.json deleted file mode 100644 index cd9432685..000000000 --- a/reagent/test/gym/discrete_dqn_cartpole_v0.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 20000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 512, - "learning_rate": 0.01, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 3, - "train_after_ts": 1, - "test_every_ts": 400, - "test_after_ts": 1000, - "num_train_batches": 1, - "avg_over_num_episodes": 25, - "offline_train_epochs": 7 - } -} diff --git a/reagent/test/gym/discrete_dqn_lunarlander_v2.json b/reagent/test/gym/discrete_dqn_lunarlander_v2.json deleted file mode 100644 index 6105a3769..000000000 --- a/reagent/test/gym/discrete_dqn_lunarlander_v2.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "env": "LunarLander-v2", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 100000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.001, - - "maxq_learning": true, - "epsilon": 1, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false, - "bcq": false, - "bcq_drop_threshold": 0.99 - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 128, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "evaluation": { - "calc_cpe_in_training": false - }, - "run_details": { - "num_episodes": 1000, - "max_steps": 1000, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 10000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100, - "offline_train_epochs": 300, - "solved_reward_threshold": 200, - "max_episodes_to_run_after_solved": 200, - "stop_training_after_solved": true, - "epsilon_decay": 0.995, - "minimum_epsilon": 0.0, - "bcq_imitator_hyperparams": { - "gbdt_trees": 100, - "max_depth": 8 - } - } -} diff --git a/reagent/test/gym/discrete_dqn_maxq_asteroids_v0.json b/reagent/test/gym/discrete_dqn_maxq_asteroids_v0.json deleted file mode 100644 index 4984f3f80..000000000 --- a/reagent/test/gym/discrete_dqn_maxq_asteroids_v0.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "env": "Asteroids-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 100000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0.2, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999, - "cnn_parameters": { - "conv_dims": [ - 3, - 32, - 16 - ], - "conv_height_kernels": [ - 8, - 4 - ], - "conv_width_kernels": [ - 8, - 4 - ], - "pool_kernels_strides": [ - 2, - 2 - ], - "pool_types": [ - "max", - "max" - ] - } - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/discrete_qlearn_maxq_cartpole_v0.json b/reagent/test/gym/discrete_qlearn_maxq_cartpole_v0.json deleted file mode 100644 index a1e1aa655..000000000 --- a/reagent/test/gym/discrete_qlearn_maxq_cartpole_v0.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "discrete", - "max_replay_memory_size": 10000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0.2, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/discrete_qlearn_softmax_cartpole_v0.json b/reagent/test/gym/discrete_qlearn_softmax_cartpole_v0.json deleted file mode 100644 index 02fa22a22..000000000 --- a/reagent/test/gym/discrete_qlearn_softmax_cartpole_v0.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "discrete", - "max_replay_memory_size": 10000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0, - "temperature": 0.35, - "softmax_policy": 1 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/discrete_rainbow_dqn_cartpole_v0.json b/reagent/test/gym/discrete_rainbow_dqn_cartpole_v0.json deleted file mode 100644 index 94ba0e9c3..000000000 --- a/reagent/test/gym/discrete_rainbow_dqn_cartpole_v0.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 10000, - "use_gpu": true, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0.2, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": true, - "dueling_architecture": true - }, - "training": { - "layers": [ - -1, - 128, - -1 - ], - "activations": [ - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/discrete_sarsa_softmax_cartpole_v0.json b/reagent/test/gym/discrete_sarsa_softmax_cartpole_v0.json deleted file mode 100644 index 03d915979..000000000 --- a/reagent/test/gym/discrete_sarsa_softmax_cartpole_v0.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "discrete", - "max_replay_memory_size": 10000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": false, - "epsilon": 0, - "temperature": 0.35, - "softmax_policy": 1 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 128, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/maxq_asteroids_v0.json b/reagent/test/gym/maxq_asteroids_v0.json deleted file mode 100644 index 15bcfc422..000000000 --- a/reagent/test/gym/maxq_asteroids_v0.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "env": "Asteroids-v0", - "model_type": "discrete", - "max_replay_memory_size": 100000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0.2, - "temperature": 0.35, - "softmax_policy": 0 - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "gamma": 0.999, - "cnn_parameters": { - "conv_dims": [ - 3, - 32, - 16 - ], - "conv_height_kernels": [ - 8, - 4 - ], - "conv_width_kernels": [ - 8, - 4 - ], - "pool_kernels_strides": [ - 2, - 2 - ], - "pool_types": [ - "max", - "max" - ] - } - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/maxq_lunarlander_v2.json b/reagent/test/gym/maxq_lunarlander_v2.json deleted file mode 100644 index 46710e052..000000000 --- a/reagent/test/gym/maxq_lunarlander_v2.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "env": "LunarLander-v2", - "model_type": "discrete", - "max_replay_memory_size": 10000, - "rl": { - "reward_discount_factor": 0.99, - "target_update_rate": 0.01, - - "maxq_learning": true, - "epsilon": 0.5 - }, - "training": { - "layers": [ - -1, - 256, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "relu", - "linear" - ], - "minibatch_size": 1024, - "learning_rate": 0.001, - "optimizer": "ADAM", - "learning_rate_decay": 0.999 - }, - "run_details": { - "num_episodes": 901, - "train_every": 10, - "train_after": 10, - "test_every": 100, - "test_after": 10, - "num_train_batches": 100, - "avg_over_num_episodes": 100, - "render": 0, - "render_every": 100 - } -} diff --git a/reagent/test/gym/parametric_dqn_cartpole_v0.json b/reagent/test/gym/parametric_dqn_cartpole_v0.json deleted file mode 100644 index af55030c2..000000000 --- a/reagent/test/gym/parametric_dqn_cartpole_v0.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_parametric_dqn", - "max_replay_memory_size": 20000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.1, - "maxq_learning": true, - "epsilon": 0.05, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 512, - "learning_rate": 0.01, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 3, - "train_after_ts": 1, - "test_every_ts": 400, - "test_after_ts": 1000, - "num_train_batches": 1, - "avg_over_num_episodes": 25, - "offline_train_epochs": 7 - } -} diff --git a/reagent/test/gym/parametric_qlearn_softmax_cartpole_v0.json b/reagent/test/gym/parametric_qlearn_softmax_cartpole_v0.json deleted file mode 100644 index a11ee362c..000000000 --- a/reagent/test/gym/parametric_qlearn_softmax_cartpole_v0.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "parametric", - "max_replay_memory_size": 10000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0, - "temperature": 0.35, - "softmax_policy": 1 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/parametric_rainbow_dqn_cartpole_v0.json b/reagent/test/gym/parametric_rainbow_dqn_cartpole_v0.json deleted file mode 100644 index 4d4bbda40..000000000 --- a/reagent/test/gym/parametric_rainbow_dqn_cartpole_v0.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_parametric_dqn", - "max_replay_memory_size": 10000, - "use_gpu": true, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": true, - "epsilon": 0, - "temperature": 0.35, - "softmax_policy": 1 - }, - "rainbow": { - "double_q_learning": true, - "dueling_architecture": true - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/parametric_sarsa_softmax_cartpole_v0.json b/reagent/test/gym/parametric_sarsa_softmax_cartpole_v0.json deleted file mode 100644 index d05b566d5..000000000 --- a/reagent/test/gym/parametric_sarsa_softmax_cartpole_v0.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "parametric", - "max_replay_memory_size": 10000, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.2, - - "maxq_learning": false, - "epsilon": 0, - "temperature": 0.35, - "softmax_policy": 1 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false - }, - "training": { - "layers": [ - -1, - 128, - 64, - -1 - ], - "activations": [ - "relu", - "relu", - "linear" - ], - "minibatch_size": 64, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 0.999 - }, - "run_details": { - "num_episodes": 5001, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100 - } -} diff --git a/reagent/test/gym/qrdqn_cartpole_v0.json b/reagent/test/gym/qrdqn_cartpole_v0.json deleted file mode 100644 index 9dc343b0c..000000000 --- a/reagent/test/gym/qrdqn_cartpole_v0.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "env": "CartPole-v0", - "model_type": "pytorch_discrete_dqn", - "max_replay_memory_size": 20000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.005, - "maxq_learning": true, - "epsilon": 0.2, - "temperature": 0.35, - "softmax_policy": 0 - }, - "rainbow": { - "double_q_learning": false, - "dueling_architecture": false, - "categorical": false, - "quantile": true, - "num_atoms": 50, - "c51_l2_decay": 0 - }, - "training": { - "layers": [-1, 128, 64, -1], - "activations": ["relu", "relu", "linear"], - "minibatch_size": 32, - "learning_rate": 0.001, - "optimizer": "ADAM", - "lr_decay": 1 - }, - "run_details": { - "num_episodes": 200, - "max_steps": 200, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 500, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100, - "offline_train_epochs": 20 - } -} diff --git a/reagent/test/gym/sac_pendulum_v0.json b/reagent/test/gym/sac_pendulum_v0.json deleted file mode 100644 index ce0e9e278..000000000 --- a/reagent/test/gym/sac_pendulum_v0.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "env": "Pendulum-v0", - "model_type": "soft_actor_critic", - "max_replay_memory_size": 100000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.005, - "epsilon": 0, - "softmax_policy": 1 - }, - "sac_training": { - "rl": { - "gamma": 0.99, - "target_update_rate": 0.005, - "epsilon": 0, - "softmax_policy": 1 - }, - "minibatch_size": 256, - "q_network_optimizer": { - "learning_rate": 0.001 - }, - "value_network_optimizer": { - "learning_rate": 0.001 - }, - "actor_network_optimizer": { - "learning_rate": 0.001 - }, - "alpha_optimizer": { - "learning_rate": 0.001 - }, - "entropy_temperature": 0.1 - }, - "critic_training": { - "layers": [128, 64], - "activations": ["relu", "relu"] - }, - "sac_value_training": { - "layers": [128, 64], - "activations": ["relu", "relu"] - }, - "actor_training": { - "layers": [128, 64], - "activations": ["relu", "relu"] - }, - "run_details": { - "num_episodes": 1000, - "max_steps": 1000, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100, - "offline_train_epochs": 200 - } -} diff --git a/reagent/test/gym/td3_pendulum_v0.json b/reagent/test/gym/td3_pendulum_v0.json deleted file mode 100644 index dc8069a78..000000000 --- a/reagent/test/gym/td3_pendulum_v0.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "env": "Pendulum-v0", - "model_type": "td3", - "max_replay_memory_size": 100000, - "use_gpu": false, - "rl": { - "gamma": 0.99, - "target_update_rate": 0.005, - "epsilon": 0 - }, - "td3_training": { - "minibatch_size": 256, - "use_2_q_functions": true, - "noise_variance": 0.2, - "noise_clip": 0.5, - "delayed_policy_update": 2, - "q_network_optimizer": { - "learning_rate": 0.001 - }, - "actor_network_optimizer": { - "learning_rate": 0.001 - } - }, - "critic_training": { - "layers": [128, 64], - "activations": ["relu", "relu"] - }, - "actor_training": { - "layers": [128, 64], - "activations": ["relu", "relu"] - }, - "run_details": { - "num_episodes": 1000, - "max_steps": 1000, - "train_every_ts": 1, - "train_after_ts": 1, - "test_every_ts": 2000, - "test_after_ts": 1, - "num_train_batches": 1, - "avg_over_num_episodes": 100, - "offline_train_epochs": 200 - } -} diff --git a/reagent/test/lite/__init__.py b/reagent/test/lite/__init__.py new file mode 100644 index 000000000..ae7b7a8f9 --- /dev/null +++ b/reagent/test/lite/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +# light APIs for solving optimization problems. diff --git a/reagent/test/lite/test_combo_optimizer.py b/reagent/test/lite/test_combo_optimizer.py new file mode 100644 index 000000000..3fbd27b55 --- /dev/null +++ b/reagent/test/lite/test_combo_optimizer.py @@ -0,0 +1,692 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import random +import unittest +from collections import defaultdict +from typing import Dict + +import nevergrad as ng +import numpy as np +import torch +import torch.nn as nn +from reagent.lite.optimizer import ( + BayesianByBackpropOptimizer, + BayesianMLPEnsemblerOptimizer, + BayesianOptimizerBase, + GREEDY_TEMP, + GumbelSoftmaxOptimizer, + NeverGradOptimizer, + PolicyGradientOptimizer, + QLearningOptimizer, + RandomSearchOptimizer, + sol_to_tensors, +) + +# nevergrad performs a little worse in the test environment +NEVERGRAD_TEST_THRES = 6.0 +POLICY_GRADIENT_TEST_THRES = 3.0 +GUMBEL_SOFTMAX_TEST_THRES = 3.0 +Q_LEARNING_TEST_THRES = 3.0 +BAYESSIAN_MLP_TEST_THRES = 3.0 +BAYESSIAN_MLP_CONV_THRES = 6.0 + + +class GroundTruthNet(nn.Module): + def __init__(self, dim_input, dim_model): + super().__init__() + self.net = nn.Sequential( + torch.nn.Linear(dim_input, dim_model), + torch.nn.ReLU(), + torch.nn.Linear(dim_model, 1), + ) + for p in self.parameters(): + if p.dim() > 1: + nn.init.uniform_(p, -3, 3) + + def forward(self, x): + return self.net(x) + + +def random_sample(input_param, obj_func, n_generations=100): + """Return the best result from random sampling""" + rs_optimizer = RandomSearchOptimizer( + input_param, + obj_func, + batch_size=512, + ) + min_reward_rs_optimizer = torch.tensor(9999.0) + print("Random Sampling") + for i in range(n_generations): + ( + sampled_solutions, + reward, + ) = rs_optimizer.optimize_step() + min_reward_rs_optimizer = torch.min( + min_reward_rs_optimizer, torch.min(reward.data) + ) + print(f"Generation={i}, min_reward={min_reward_rs_optimizer}") + print() + + return min_reward_rs_optimizer + + +def discrete_input_param(): + # Some random discrete choice space + ng_param = ng.p.Dict( + choice1=ng.p.Choice(["128", "256", "512", "768"]), + choice2=ng.p.Choice(["128", "256", "512", "768"]), + choice3=ng.p.Choice(["True", "False"]), + choice4=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]), + choice5=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]), + ) + return ng_param + + +def create_ground_truth_net(ng_param): + dim_input = sum([len(ng_param[k].choices) for k in ng_param]) + dim_model = 256 + gt_net = GroundTruthNet(dim_input, dim_model) + print(f"Ground-Truth Net DIM_INPUT={dim_input}, DIM_MODEL={dim_model}") + return gt_net + + +def create_discrete_choice_obj_func(ng_param, gt_net): + def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor: + # sampled_sol format: + # key = choice_name + # val = choice_idx (a tensor of length `batch_size`) + assert list(sampled_sol.values())[0].dim() == 1 + batch_size = list(sampled_sol.values())[0].shape[0] + batch_tensors = [] + for i in range(batch_size): + tensors = [] + for k in sorted(sampled_sol.keys()): + num_choices = len(ng_param[k].choices) + one_hot = torch.zeros(num_choices) + one_hot[sampled_sol[k][i]] = 1 + tensors.append(one_hot) + batch_tensors.append(torch.cat(tensors, dim=-1)) + batch_tensors = torch.stack(batch_tensors) + return gt_net(batch_tensors) + + return obj_func + + +def create_discrete_choice_gumbel_softmax_obj_func(ng_param, gt_net): + def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor: + # sampled_sol format: + # key = choice_name + # val = sampled softmax distribution, a tensor of shape (batch_size, num_choices) + assert list(sampled_sol.values())[0].dim() == 2 + batch_size = list(sampled_sol.values())[0].shape[0] + batch_tensors = [] + for i in range(batch_size): + tensors = [] + for k in sorted(sampled_sol.keys()): + tensors.append(sampled_sol[k][i]) + batch_tensors.append(torch.cat(tensors, dim=-1)) + batch_tensors = torch.stack(batch_tensors) + return gt_net(batch_tensors) + + return obj_func + + +class TestComboOptimizer(unittest.TestCase): + def setUp(self): + seed = 123 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + def test_random_sample_with_raw_choices_using_uncommon_key(self): + batch_size = 200 + input_param = ng.p.Dict( + **{ + "#1": ng.p.Choice([32, 64, 128]), + "choice2[3]": ng.p.Choice([True, False]), + "choice3.attr": ng.p.Choice( + ["Red", "Blue", "Green", "Yellow", "Purple"] + ), + } + ) + obj_func = None + + sampling_weights = { + "#1": [0.5, 0.5, 0.0], + "choice2[3]": [0.25, 0.75], + "choice3.attr": [0.1, 0.9, 0.0, 0.0, 0.0], + } + + optimizer = RandomSearchOptimizer( + input_param, + obj_func, + batch_size=batch_size, + sampling_weights=sampling_weights, + ) + sampled_sol = optimizer.sample(batch_size) + sampled_sol = optimizer.indices_to_raw_choices(sampled_sol) + self.assertEqual(len(sampled_sol), batch_size) + self.assertIsInstance(sampled_sol, list) + + counts = {key: defaultdict(int) for key in sampling_weights} + for sample in sampled_sol: + self.assertSetEqual(set(sample.keys()), set(input_param.keys())) + self.assertIn(sample["#1"], [32, 64]) + self.assertIn(sample["choice2[3]"], [True, False]) + self.assertIn(sample["choice3.attr"], ["Red", "Blue"]) + for key in sample: + counts[key][sample[key]] += 1 + + self.assertAlmostEqual(counts["#1"][32] / float(batch_size), 0.5, places=1) + self.assertAlmostEqual(counts["#1"][64] / float(batch_size), 0.5, places=1) + self.assertEqual(counts["#1"][128], 0) + + self.assertAlmostEqual( + counts["choice2[3]"][True] / float(batch_size), 0.25, places=1 + ) + self.assertAlmostEqual( + counts["choice2[3]"][False] / float(batch_size), 0.75, places=1 + ) + + self.assertAlmostEqual( + counts["choice3.attr"]["Red"] / float(batch_size), 0.1, places=1 + ) + self.assertAlmostEqual( + counts["choice3.attr"]["Blue"] / float(batch_size), 0.9, places=1 + ) + self.assertEqual(counts["choice3.attr"]["Green"], 0) + self.assertEqual(counts["choice3.attr"]["Yellow"], 0) + self.assertEqual(counts["choice3.attr"]["Purple"], 0) + + def test_random_sample_with_raw_choices_1(self): + batch_size = 1 + input_param = ng.p.Dict( + choice1=ng.p.Choice([32, 64, 128]), + choice2=ng.p.Choice([True, False]), + choice3=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]), + ) + obj_func = None + optimizer = RandomSearchOptimizer( + input_param, obj_func, batch_size=batch_size, sampling_weights=None + ) + sampled_sol = optimizer.sample(batch_size) + sampled_sol = optimizer.indices_to_raw_choices(sampled_sol) + self.assertEqual(len(sampled_sol), batch_size) + self.assertIsInstance(sampled_sol, list) + for sample in sampled_sol: + self.assertSetEqual(set(sample.keys()), set(input_param.keys())) + for key in sample: + self.assertIn(sample[key], input_param[key].choices.value) + + def test_random_sample_with_raw_choices_2(self): + batch_size = 200 + input_param = ng.p.Dict( + choice1=ng.p.Choice([32, 64, 128]), + choice2=ng.p.Choice([True, False]), + choice3=ng.p.Choice(["Red", "Blue", "Green", "Yellow", "Purple"]), + ) + obj_func = None + + sampling_weights = { + "choice1": [0.5, 0.5, 0.0], + "choice2": [0.25, 0.75], + "choice3": [0.1, 0.9, 0.0, 0.0, 0.0], + } + + optimizer = RandomSearchOptimizer( + input_param, + obj_func, + batch_size=batch_size, + sampling_weights=sampling_weights, + ) + sampled_sol = optimizer.sample(batch_size) + sampled_sol = optimizer.indices_to_raw_choices(sampled_sol) + self.assertEqual(len(sampled_sol), batch_size) + self.assertIsInstance(sampled_sol, list) + + counts = {key: defaultdict(int) for key in sampling_weights} + for sample in sampled_sol: + self.assertSetEqual(set(sample.keys()), set(input_param.keys())) + self.assertIn(sample["choice1"], [32, 64]) + self.assertIn(sample["choice2"], [True, False]) + self.assertIn(sample["choice3"], ["Red", "Blue"]) + for key in sample: + counts[key][sample[key]] += 1 + + self.assertAlmostEqual(counts["choice1"][32] / float(batch_size), 0.5, places=1) + self.assertAlmostEqual(counts["choice1"][64] / float(batch_size), 0.5, places=1) + self.assertEqual(counts["choice1"][128], 0) + + self.assertAlmostEqual( + counts["choice2"][True] / float(batch_size), 0.25, places=1 + ) + self.assertAlmostEqual( + counts["choice2"][False] / float(batch_size), 0.75, places=1 + ) + + self.assertAlmostEqual( + counts["choice3"]["Red"] / float(batch_size), 0.1, places=1 + ) + self.assertAlmostEqual( + counts["choice3"]["Blue"] / float(batch_size), 0.9, places=1 + ) + self.assertEqual(counts["choice3"]["Green"], 0) + self.assertEqual(counts["choice3"]["Yellow"], 0) + self.assertEqual(counts["choice3"]["Purple"], 0) + + def test_nevergrad_optimizer_discrete(self): + batch_size = 32 + n_generations = 40 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = NeverGradOptimizer( + input_param, + batch_size * n_generations, # estimated_budgets + obj_func=obj_func, + batch_size=batch_size, + optimizer_name="DoubleFastGADiscreteOnePlusOne", + ) + best_rs_result = random_sample(input_param, obj_func, n_generations=20) + history_min_reward = torch.tensor(9999.0) + for i in range(n_generations): + ( + sampled_solutions, + reward, + ) = optimizer.optimize_step() + history_min_reward = torch.min(history_min_reward, torch.min(reward.data)) + print( + f"Generation={i}, min_reward={torch.min(reward.data)}, " + f"history_min_reward={history_min_reward}" + ) + assert ( + abs(best_rs_result - history_min_reward) < NEVERGRAD_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, optimizer best result={history_min_reward}" + assert ( + optimizer.best_solutions(1)[0][0] == history_min_reward + ), "Best solutions (n=1) inconsistent with the best reward" + # just test sampling() can run + optimizer.sample(10) + + def test_policy_gradient_optimizer_discrete(self): + batch_size = 32 + learning_rate = 0.1 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = PolicyGradientOptimizer( + input_param, obj_func, batch_size=batch_size, learning_rate=learning_rate + ) + best_rs_result = random_sample(input_param, obj_func, n_generations=20) + n_generations = 100 + for i in range(n_generations): + ( + sampled_solutions, + reward, + sampled_log_probs, + ) = optimizer.optimize_step() + mean_reward = torch.mean(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward}, " + f"min_reward={torch.min(reward.data)}, " + f"mean_sample_prob={torch.mean(torch.exp(sampled_log_probs))}, " + f"temperature={optimizer.temp}" + ) + assert ( + abs(best_rs_result - mean_reward) < POLICY_GRADIENT_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, optimizer mean result={mean_reward}" + # just test sampling() can run + optimizer.sample(10) + + def test_q_learning_optimizer_discrete(self): + batch_size = 256 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = QLearningOptimizer(input_param, obj_func, batch_size=batch_size) + best_rs_result = random_sample(input_param, obj_func, n_generations=20) + n_generations = 100 + for i in range(n_generations): + ( + sampled_solutions, + reward, + ) = optimizer.optimize_step() + mean_reward = torch.mean(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward}, " + f"min_reward={torch.min(reward.data)}, " + f"temperature={optimizer.temp}" + ) + + eval_result = obj_func(optimizer.sample(1)) + assert ( + abs(best_rs_result - eval_result) < Q_LEARNING_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}" + + def test_gumbel_softmax_optimizer_discrete(self): + batch_size = 32 + anneal_rate = 0.97 + learning_rate = 0.1 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_gumbel_softmax_obj_func(input_param, gt_net) + optimizer = GumbelSoftmaxOptimizer( + input_param, + obj_func, + anneal_rate=anneal_rate, + batch_size=batch_size, + learning_rate=learning_rate, + ) + + obj_func_rs = create_discrete_choice_obj_func(input_param, gt_net) + best_rs_result = random_sample(input_param, obj_func_rs, n_generations=20) + + n_generations = 100 + for i in range(n_generations): + (sampled_softmax_vals, reward, logits) = optimizer.optimize_step() + mean_reward = torch.mean(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward}, " + f"min_reward={torch.min(reward.data)}, " + f"temperature={optimizer.temp}" + ) + assert ( + optimizer.temp == optimizer.min_temp + ), "Towards the end of learning, GumbelSoftmax Optimizer should have a low temperature" + assert ( + abs(best_rs_result - mean_reward) < GUMBEL_SOFTMAX_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, optimizer mean result={mean_reward}" + eval_obj_func = create_discrete_choice_obj_func(input_param, gt_net) + eval_result = eval_obj_func(optimizer.sample(1)) + assert ( + abs(best_rs_result - eval_result) < GUMBEL_SOFTMAX_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}" + + def run_policy_gradient_optimizer( + self, + input_param, + obj_func, + batch_size, + n_generations, + repeats, + ): + results = [] + for r in range(repeats): + print(f"\n\n**** Policy Gradient Optimizer, Repeat={r} ****") + pg_optimizer = PolicyGradientOptimizer( + input_param, + obj_func, + batch_size=batch_size, + ) + for i in range(n_generations): + # non-exploration at the last generation + if i == n_generations - 1: + pg_optimizer.temp = GREEDY_TEMP + temp = pg_optimizer.temp + ( + sampled_solutions, + reward, + sampled_log_probs, + ) = pg_optimizer.optimize_step() + mean_reward_pg_optimizer = torch.mean(reward.data) + min_reward_pg_optimizer = torch.min(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward_pg_optimizer}, " + f"min_reward={min_reward_pg_optimizer}, " + f"mean_sample_prob={torch.mean(torch.exp(sampled_log_probs))}, " + f"temperature={temp}" + ) + results.append(mean_reward_pg_optimizer) + + return results + + def run_q_learning_optimizer( + self, + input_param, + obj_func, + batch_size, + n_generations, + repeats, + ): + results = [] + for r in range(repeats): + print(f"\n\n**** QLearning Optimizer, Repeat={r} ****") + ql_optimizer = QLearningOptimizer( + input_param, + obj_func, + batch_size=batch_size, + anneal_rate=0.997, + ) + for i in range(n_generations): + # non-exploration at the last generation + if i == n_generations - 1: + ql_optimizer.temp = GREEDY_TEMP + + temp = ql_optimizer.temp + ( + sampled_solutions, + reward, + ) = ql_optimizer.optimize_step() + mean_reward_ql_optimizer = torch.mean(reward.data) + min_reward_ql_optimizer = torch.min(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward_ql_optimizer}, " + f"min_reward={min_reward_ql_optimizer}, " + f"temp={temp}" + ) + results.append(mean_reward_ql_optimizer) + + return results + + def test_policy_gradient_vs_q_learning_discrete(self): + """ + Comparison between policy gradient and Q-learning-based optimizer + The input param has two axes, choice1 and choice2. + + The value achieved by different combinations of the two choices: + a b c + 1 0.43 0.9 0.45 + + 2 0.9 0.4 0.9 + + 3 0.45 0.9 0.45 + + In summary, the global minimum is at (choice1=2, choice2=b), but there are local minima + and maxima which easily hurdle an optimizer from finding the global minimum. + + In this setting, Q-learning performs better than policy gradient + """ + input_param = ng.p.Dict( + choice1=ng.p.Choice(["1", "2", "3"]), + choice2=ng.p.Choice(["a", "b", "c"]), + ) + + def obj_func(sampled_sol: Dict[str, torch.Tensor]) -> torch.Tensor: + # sampled_sol format: + # key = choice_name + # val = choice_idx (a tensor of length `batch_size`) + assert list(sampled_sol.values())[0].dim() == 1 + batch_size = list(sampled_sol.values())[0].shape[0] + result = torch.zeros(batch_size, 1) + choice1 = sampled_sol["choice1"] + choice2 = sampled_sol["choice2"] + for i in range(batch_size): + if choice1[i] == 1 and choice2[i] == 1: + result[i] = 0.4 + elif choice1[i] == 0 and choice2[i] == 0: + result[i] = 0.43 + elif choice1[i] == 1 or choice2[i] == 1: + result[i] = 0.9 + else: + result[i] = 0.45 + return result + + batch_size = 32 + n_generations = 100 + repeat = 10 + + qlearning_res = self.run_q_learning_optimizer( + input_param, obj_func, batch_size, n_generations, repeat + ) + pg_res = self.run_policy_gradient_optimizer( + input_param, obj_func, batch_size, n_generations, repeat + ) + print(f"QLearning results over {repeat} repeats: {qlearning_res}") + print(f"PG results over {repeat} repeats: {pg_res}") + + assert ( + np.mean(qlearning_res) < 0.42 + ), "QLearning should end up better than local minimum (0.43)" + assert np.mean(qlearning_res) < np.mean( + pg_res + ), f"In this setting. qlearning should be better than policy gradient over {repeat} repeats" + + def test_sol_to_tensors(self): + input_param = discrete_input_param() + sampled_sol = { + "choice1": torch.tensor([0, 1, 2]), + "choice2": torch.tensor([1, 2, 0]), + "choice3": torch.tensor([0, 1, 0]), + "choice4": torch.tensor([4, 3, 2]), + "choice5": torch.tensor([1, 2, 3]), + } + tensor = torch.FloatTensor( + [ + [1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], + ] + ) + sampled_tensor = sol_to_tensors(sampled_sol, input_param) + self.assertTrue(torch.all(tensor == sampled_tensor)) + + def test_bayesian_optimizer_its_random_mutation_discrete(self): + acq_type = "its" + mutation_type = "random" + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = BayesianOptimizerBase( + param=input_param, + obj_func=obj_func, + start_temp=1.0, + min_temp=0.0, + acq_type=acq_type, + mutation_type=mutation_type, + ) + sampled_solution = { + "choice1": torch.tensor([0]), + "choice2": torch.tensor([1]), + "choice3": torch.tensor([0]), + "choice4": torch.tensor([1]), + "choice5": torch.tensor([0]), + } + optimizer._maintain_best_solutions(sampled_solution, torch.tensor([0.0])) + # no mutation + mutated_solution = optimizer.sample(1, 0.0) + self.assertEqual(sampled_solution, mutated_solution) + # mutation in one idx (at most) + mutated_solution = optimizer.sample(1, 1 / len(input_param)) + difference = 0 + for k in sorted(input_param.keys()): + if sampled_solution[k] != mutated_solution[k]: + difference += 1 + self.assertTrue(difference <= 1) + # mutation in two idxs (at most) + mutated_solution = optimizer.sample(1, 2 / len(input_param)) + difference = 0 + for k in sorted(input_param.keys()): + if sampled_solution[k] != mutated_solution[k]: + difference += 1 + self.assertTrue(difference <= 2) + + def test_bayessian_optimizer_its_random_mutation_ensembler_discrete(self): + batch_size = 8 + num_mutations = 10 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = BayesianMLPEnsemblerOptimizer( + param=input_param, + obj_func=obj_func, + batch_size=batch_size, + num_mutations=num_mutations, + anneal_rate=0.95, + ) + best_rs_result = random_sample(input_param, obj_func, n_generations=20) + n_generations = 200 + all_sampled_solutions = [] + for i in range(n_generations): + (sampled_solutions, reward, loss) = optimizer.optimize_step() + all_sampled_solutions.append(sampled_solutions) + mean_reward = torch.mean(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward}, " + f"min_reward={torch.min(reward.data)}, " + f"Avg. loss={loss}," + ) + best_sol = optimizer.sample(1, 0.0) + eval_result = obj_func(best_sol) + + assert ( + abs(best_rs_result - eval_result) < BAYESSIAN_MLP_TEST_THRES + ), f"Learning not converged. best random search={best_rs_result}, eval result={eval_result}" + + sampled_solutions = {} + for k in sorted(input_param.keys()): + sampled_solutions[k] = torch.cat([sol[k] for sol in all_sampled_solutions]) + + acq_reward = optimizer.acquisition(sampled_sol=sampled_solutions) + min_acq_reward = torch.min(acq_reward).item() + best_sol_acq_reward = optimizer.acquisition(sampled_sol=best_sol).item() + + assert ( + abs(best_sol_acq_reward - min_acq_reward) < BAYESSIAN_MLP_CONV_THRES + ), f"Learning not converged. min acquisition reward={min_acq_reward}, best solution's acquisition reward={best_sol_acq_reward}" + + def test_bayessian_optimizer_its_random_mutation_backprop_discrete(self): + batch_size = 8 + num_mutations = 10 + input_param = discrete_input_param() + gt_net = create_ground_truth_net(input_param) + obj_func = create_discrete_choice_obj_func(input_param, gt_net) + optimizer = BayesianByBackpropOptimizer( + param=input_param, + obj_func=obj_func, + batch_size=batch_size, + num_mutations=num_mutations, + anneal_rate=0.95, + ) + best_rs_result = random_sample(input_param, obj_func, n_generations=20) + n_generations = 200 + all_sampled_solutions = [] + for i in range(n_generations): + (sampled_solutions, reward, loss) = optimizer.optimize_step() + all_sampled_solutions.append(sampled_solutions) + mean_reward = torch.mean(reward.data) + print( + f"Generation={i}, mean_reward={mean_reward}, " + f"min_reward={torch.min(reward.data)}, " + f"Avg. Elbo loss={loss}," + ) + best_sol = optimizer.sample(1, 0.0) + eval_result = obj_func(best_sol) + + assert ( + abs(best_rs_result - eval_result) < BAYESSIAN_MLP_TEST_THRES + ), f"Learning not converged. Best Random Search={best_rs_result}, Eval result={eval_result}" + + sampled_solutions = {} + for k in sorted(input_param.keys()): + sampled_solutions[k] = torch.cat([sol[k] for sol in all_sampled_solutions]) + + acq_reward = optimizer.acquisition(sampled_sol=sampled_solutions) + min_acq_reward = torch.min(acq_reward).item() + best_sol_acq_reward = optimizer.acquisition( + sampled_sol=best_sol, + ).item() + + assert ( + abs(best_sol_acq_reward - min_acq_reward) < BAYESSIAN_MLP_CONV_THRES + ), f"Learning not converged. Min acquisition reward={min_acq_reward}, best solution's acquisition reward={best_sol_acq_reward}" diff --git a/reagent/test/mab/__init__.py b/reagent/test/mab/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/mab/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/mab/test_mab.py b/reagent/test/mab/test_mab.py new file mode 100644 index 000000000..e81e17d7f --- /dev/null +++ b/reagent/test/mab/test_mab.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest +from io import BytesIO +from itertools import cycle +from unittest import mock + +import numpy as np +import numpy.testing as npt +import torch +from parameterized.parameterized import parameterized +from pytorch_lightning import seed_everything +from reagent.mab.mab_algorithm import ( + get_arm_indices, + place_values_at_indices, + randomized_argmax, + reindex_multiple_tensors, +) +from reagent.mab.simulation import ( + BernoilliMAB, + compare_bandit_algos, + multiple_evaluations_bandit_algo, + single_evaluation_bandit_algo, +) +from reagent.mab.thompson_sampling import ( + BaseThompsonSampling, + BernoulliBetaThompson, + NormalGammaThompson, +) +from reagent.mab.ucb import BaseUCB, MetricUCB, UCB1, UCBTuned + +ALL_UCB_ALGOS = [ + ["MetricUCB", MetricUCB], + ["UCB1", UCB1], + ["UCBTuned", UCBTuned], +] + +ALL_THOMPSON_ALGOS = [ + ["NormalGammaThompson", NormalGammaThompson], + ["BernoulliBetaThompson", BernoulliBetaThompson], +] + +ALL_MAB_ALGOS = ALL_UCB_ALGOS + ALL_THOMPSON_ALGOS + + +class TestMAButils(unittest.TestCase): + def setUp(self): + seed_everything(1) + + def test_get_arm_indices_happy_case(self): + ids_of_all_arms = ["a", "b", "c", "z", "4"] + ids_of_arms_in_batch = ["z", "4", "b"] + idxs = get_arm_indices(ids_of_all_arms, ids_of_arms_in_batch) + self.assertListEqual(idxs, [3, 4, 1]) + + def test_get_arm_indices_fail(self): + ids_of_all_arms = ["a", "b", "c", "z", "4"] + ids_of_arms_in_batch = ["z", "4", "b", "o"] + with self.assertRaises(ValueError): + get_arm_indices(ids_of_all_arms, ids_of_arms_in_batch) + + def test_place_values_at_indices(self): + values = torch.tensor([3, 7, 11], dtype=torch.float) + idxs = [2, 3, 5] + len_ = 7 + result = place_values_at_indices(values, idxs, len_) + expected_result = torch.Tensor([0, 0, 3, 7, 0, 11, 0]) + npt.assert_array_equal(result.numpy(), expected_result.numpy()) + + def test_reindex_multiple_tensors(self): + values = ( + torch.tensor([3, 7, 11], dtype=torch.float), + torch.tensor([4, 2, 89], dtype=torch.float), + ) + all_ids = ["a", "b", "c", "z", "4"] + batch_ids = ["z", "4", "b"] + reindexed_values = reindex_multiple_tensors(all_ids, batch_ids, values) + npt.assert_equal( + reindexed_values[0].numpy(), np.array([0.0, 11.0, 0.0, 3.0, 7.0]) + ) + npt.assert_equal( + reindexed_values[1].numpy(), np.array([0.0, 89.0, 0.0, 4.0, 2.0]) + ) + + def _test_randomized_argmax(self, x, expected_idxs): + best_idxs = set() + for _ in range(1000): + best_idxs.add(randomized_argmax(x)) + self.assertSetEqual(best_idxs, expected_idxs) + + def test_randomized_argmax(self): + self._test_randomized_argmax(torch.tensor([1, 2, 3, 2, 3, 1, 3]), {2, 4, 6}) + self._test_randomized_argmax( + torch.tensor( + [1, torch.tensor(float("inf")), 3, 2, 3, torch.tensor(float("inf")), 3] + ), + {1, 5}, + ) + self._test_randomized_argmax( + torch.tensor( + [ + torch.tensor(float("inf")), + torch.tensor(float("inf")), + torch.tensor(float("inf")), + ] + ), + {0, 1, 2}, + ) + self._test_randomized_argmax(torch.tensor([1, 2, 3, 2, 3, 1, 5]), {6}) + + +class TestMAB(unittest.TestCase): + def setUp(self): + seed_everything(1) + + @parameterized.expand(ALL_MAB_ALGOS) + def test_batch_training(self, name, cls): + n_arms = 5 + b = cls(n_arms=n_arms) + total_obs_per_arm = torch.zeros(n_arms) + total_success_per_arm = torch.zeros(n_arms) + for _ in range(10): + n_obs_per_arm = torch.randint(0, 50, size=(n_arms,)).float() + n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm + total_obs_per_arm += n_obs_per_arm + total_success_per_arm += n_success_per_arm + + b.add_batch_observations( + n_obs_per_arm, + n_success_per_arm, + n_success_per_arm, # squared rewards are same as rewards + ) + + npt.assert_array_equal( + b.total_n_obs_per_arm.numpy(), total_obs_per_arm.numpy() + ) # observation counters are correct + npt.assert_array_equal( + b.total_sum_reward_per_arm.numpy(), total_success_per_arm.numpy() + ) # total reward counters are corect + npt.assert_array_equal( + b.total_sum_reward_squared_per_arm.numpy(), + total_success_per_arm.numpy(), + ) # squared rewards equal to rewards for Bernoulli bandit + + self.assertEqual( + b.total_n_obs_all_arms, total_obs_per_arm.sum().item() + ) # total observation counter correct + + avg_rewards = total_success_per_arm / total_obs_per_arm + npt.assert_allclose( + b.get_avg_reward_values().numpy(), avg_rewards.numpy() + ) # avg rewards computed correctly + + scores = b.get_scores() + forward_scores = b() + + # scores shape and type are correct + self.assertEqual(scores.shape, (n_arms,)) + self.assertIsInstance(scores, torch.Tensor) + self.assertEqual(forward_scores.shape, (n_arms,)) + self.assertIsInstance(forward_scores, torch.Tensor) + + if isinstance(b, BaseUCB): + npt.assert_array_less( + avg_rewards, + scores.numpy(), + ) # UCB scores greater than avg rewards + + valid_indices = b.total_n_obs_per_arm.numpy() >= b.min_num_obs_per_arm + npt.assert_array_equal( + scores[valid_indices], forward_scores[valid_indices] + ) + + @parameterized.expand(ALL_MAB_ALGOS) + def test_class_method(self, name, cls): + n_arms = 5 + n_obs_per_arm = torch.randint(0, 50, size=(n_arms,)).float() + n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm + scores = cls.get_scores_from_batch( + n_obs_per_arm, n_success_per_arm, n_success_per_arm + ) + + # UCB scores shape and type are correct + self.assertEqual(scores.shape, (n_arms,)) + self.assertIsInstance(scores, torch.Tensor) + + if issubclass(cls, BaseUCB): + avg_rewards = n_success_per_arm / n_obs_per_arm + + npt.assert_array_less( + avg_rewards.numpy(), + np.where( + n_obs_per_arm.numpy() >= 1, + scores.numpy(), + np.nan, + ), + ) # UCB scores greater than avg rewards + + @parameterized.expand(ALL_MAB_ALGOS) + def test_online_training(self, name, cls): + n_arms = 5 + total_n_obs = 100 + min_num_obs_per_arm = 15 + b = cls(n_arms=n_arms, min_num_obs_per_arm=min_num_obs_per_arm) + total_obs_per_arm = torch.zeros(n_arms) + total_success_per_arm = torch.zeros(n_arms) + true_ctrs = torch.rand(size=(n_arms,)) + for _ in range(total_n_obs): + chosen_arm = b.get_action() + reward = torch.bernoulli(true_ctrs[int(chosen_arm)]) + b.add_single_observation(chosen_arm, reward) + total_obs_per_arm[int(chosen_arm)] += 1 + total_success_per_arm[int(chosen_arm)] += reward + # each arm has at least the required number of observations + self.assertLessEqual(min_num_obs_per_arm, b.total_n_obs_per_arm.min().item()) + online_scores = b() + offline_scores = cls.get_scores_from_batch( + total_obs_per_arm, total_success_per_arm, total_success_per_arm + ) + if isinstance(b, BaseUCB): + npt.assert_array_equal( + online_scores.numpy(), offline_scores.numpy() + ) # UCB scores computed by online and offline algorithms match + elif isinstance(b, NormalGammaThompson): + b_batch = cls(n_arms=n_arms) + b_batch.add_batch_observations( + total_obs_per_arm, + total_success_per_arm, + total_success_per_arm, # squared rewards are same as rewards + ) + + # make sure that posterior parameters are the same + npt.assert_allclose( + b_batch.gamma_rates.numpy(), b.gamma_rates.numpy(), rtol=1e-5 + ) + npt.assert_allclose(b_batch.mus.numpy(), b.mus.numpy(), rtol=1e-5) + npt.assert_array_equal( + b_batch.total_n_obs_per_arm.numpy(), b.total_n_obs_per_arm.numpy() + ) + npt.assert_array_equal( + b_batch.total_sum_reward_per_arm.numpy(), + b.total_sum_reward_per_arm.numpy(), + ) + npt.assert_array_equal( + b_batch.total_sum_reward_squared_per_arm.numpy(), + b.total_sum_reward_squared_per_arm.numpy(), + ) + + elif isinstance(b, BaseThompsonSampling): + npt.assert_raises( + AssertionError, + npt.assert_array_equal, + online_scores.numpy(), + offline_scores.numpy(), + ) + # Thompson sampling scores are stochastic, so shouldn't be equal + + @parameterized.expand(ALL_MAB_ALGOS) + def test_save_load(self, name, cls): + n_arms = 5 + b = cls(n_arms=n_arms) + n_obs_per_arm = torch.randint(0, 100, size=(n_arms,)).float() + n_success_per_arm = torch.rand(size=(n_arms,)) * n_obs_per_arm + b.add_batch_observations(n_obs_per_arm, n_success_per_arm, n_success_per_arm) + + avg_rewards_before_save = b.get_avg_reward_values() + + if isinstance(b, BaseUCB): + ucb_scores_before_save = b.get_scores() + + f_write = BytesIO() + torch.save(b, f_write) + f_write.seek(0) + f_read = BytesIO(f_write.read()) + f_write.close() + b_loaded = torch.load(f_read) + f_read.close() + + if isinstance(b, BaseUCB): + ucb_scores_after_load = b_loaded.get_scores() + npt.assert_array_equal( + ucb_scores_before_save.numpy(), ucb_scores_after_load.numpy() + ) # UCB scores are same before saving and after loading + + avg_rewards_after_load = b_loaded.get_avg_reward_values() + npt.assert_array_equal( + avg_rewards_before_save.numpy(), avg_rewards_after_load.numpy() + ) # avg rewards are same before saving and after loading + + self.assertListEqual(b.arm_ids, b_loaded.arm_ids) + + @parameterized.expand(ALL_MAB_ALGOS) + def test_custom_arm_ids(self, name, cls): + # arm 0 earns no rewards, so we specify arm_ids 1,...,N explicitly + n_arms = 5 + b = cls(n_arms=n_arms) + n_obs_per_arm = torch.randint(0, 100, size=(n_arms - 1,)).float() + n_success_per_arm = torch.rand(size=(n_arms - 1,)) * n_obs_per_arm + b.add_batch_observations( + n_obs_per_arm, + n_success_per_arm, + n_success_per_arm, + arm_ids=list(map(str, range(1, n_arms))), + ) + + self.assertEqual(b.total_n_obs_per_arm[0], 0) + npt.assert_array_equal(n_obs_per_arm.numpy(), b.total_n_obs_per_arm[1:].numpy()) + npt.assert_array_equal( + n_success_per_arm.numpy(), b.total_sum_reward_per_arm[1:].numpy() + ) + npt.assert_array_equal( + n_success_per_arm.numpy(), + b.total_sum_reward_squared_per_arm[1:].numpy(), + ) + + +class TestSimulation(unittest.TestCase): + def setUp(self): + seed_everything(1) + + def test_single_evaluation(self): + bandit = BernoilliMAB(100, torch.tensor([0.3, 0.5])) + algo = UCB1(n_arms=2) + regret_trajectory = single_evaluation_bandit_algo(bandit, algo) + + self.assertIsInstance(regret_trajectory, np.ndarray) + self.assertEqual(regret_trajectory.shape, (bandit.max_steps,)) + + # make sure regret is non-decreasing + self.assertGreaterEqual(np.diff(regret_trajectory, prepend=0).min(), 0) + + def test_single_evaluation_update_every(self): + num_steps = 100 + update_every = 10 + + bandit = BernoilliMAB(num_steps, torch.tensor([0.3, 0.5])) + algo = UCB1(n_arms=2) + algo.add_batch_observations = mock.Mock() + algo.get_action = mock.Mock(side_effect=cycle(["0", "1"])) + regret_trajectory = single_evaluation_bandit_algo( + bandit, algo, update_every=update_every, freeze_scores_btw_updates=False + ) + self.assertEqual(len(regret_trajectory), num_steps) + self.assertEqual( + algo.add_batch_observations.call_count, num_steps / update_every + ) + self.assertEqual(algo.get_action.call_count, num_steps) + + bandit = BernoilliMAB(num_steps, torch.tensor([0.3, 0.5])) + algo = UCB1(n_arms=2) + algo.add_batch_observations = mock.Mock() + algo.get_action = mock.Mock(side_effect=cycle(["0", "1"])) + regret_trajectory = single_evaluation_bandit_algo( + bandit, algo, update_every=update_every, freeze_scores_btw_updates=True + ) + self.assertEqual(len(regret_trajectory), num_steps) + self.assertEqual( + algo.add_batch_observations.call_count, num_steps / update_every + ) + self.assertEqual(algo.get_action.call_count, num_steps / update_every) + + def test_multiple_evaluations_bandit_algo(self): + max_steps = 20 + regret_trajectory = multiple_evaluations_bandit_algo( + algo_cls=UCB1, + bandit_cls=BernoilliMAB, + n_bandits=3, + max_steps=max_steps, + algo_kwargs={"n_arms": 2}, + bandit_kwargs={"probs": torch.Tensor([0.3, 0.5])}, + ) + + self.assertIsInstance(regret_trajectory, np.ndarray) + self.assertEqual(regret_trajectory.shape, (max_steps,)) + + # make sure regret is non-decreasing + self.assertGreaterEqual(np.diff(regret_trajectory, prepend=0).min(), 0) + + def test_compare_bandit_algos(self): + max_steps = 1000 + algo_clss = [UCB1, MetricUCB, BernoulliBetaThompson] + algo_names, regret_trajectories = compare_bandit_algos( + algo_clss=algo_clss, + bandit_cls=BernoilliMAB, + n_bandits=5, + max_steps=max_steps, + algo_kwargs=[ + {"n_arms": 2, "estimate_variance": False}, + {"n_arms": 2}, + {"n_arms": 2}, + ], + bandit_kwargs={"probs": torch.Tensor([0.1, 0.2])}, + ) + + self.assertEqual(len(algo_names), len(algo_clss)) + self.assertEqual(len(regret_trajectories), len(algo_clss)) + + self.assertListEqual(algo_names, ["UCB1", "MetricUCB", "BernoulliBetaThompson"]) + + for traj in regret_trajectories: + self.assertIsInstance(traj, np.ndarray) + self.assertEqual(traj.shape, (max_steps,)) + + # make sure regret is non-decreasing + self.assertGreaterEqual(np.diff(traj, prepend=0).min(), 0) + + # UCB1 should be much worse than MetricUCB in this setting because UCB1 assumes + # variance is equal to 1, while MetricUCB estimates it + self.assertGreater(regret_trajectories[0][-1], regret_trajectories[1][-1]) diff --git a/reagent/test/models/__init__.py b/reagent/test/models/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/test/models/__init__.py +++ b/reagent/test/models/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/models/test_actor.py b/reagent/test/models/test_actor.py index 5bfa8f622..df0d32330 100644 --- a/reagent/test/models/test_actor.py +++ b/reagent/test/models/test_actor.py @@ -4,20 +4,64 @@ import logging import unittest +from typing import Union + import numpy.testing as npt import torch +import torch.nn as nn +from reagent.core import types as rlt from reagent.models.actor import ( DirichletFullyConnectedActor, FullyConnectedActor, GaussianFullyConnectedActor, ) -from reagent.test.models.test_utils import check_save_load - +from reagent.test.models.test_utils import run_model_jit_trace logger = logging.getLogger(__name__) -class TestFullyConnectedActor(unittest.TestCase): +class ActorTorchScriptWrapper(nn.Module): + def __init__( + self, + model: Union[ + FullyConnectedActor, + GaussianFullyConnectedActor, + DirichletFullyConnectedActor, + ], + ): + super().__init__() + self.model = model + + def forward(self, state_float_features: torch.Tensor): + actor_output = self.model(rlt.FeatureData(float_features=state_float_features)) + return actor_output.action, actor_output.log_prob + + +class TestActorBase(unittest.TestCase): + def check_save_load( + self, + model: Union[ + FullyConnectedActor, + GaussianFullyConnectedActor, + DirichletFullyConnectedActor, + ], + stochastic: bool, + ): + # jit.trace can't trace models with stochastic output + if stochastic: + return + + script_model = ActorTorchScriptWrapper(model) + + def compare_func(model_output, script_model_output): + action, log_prob = script_model_output + assert torch.all(action == model_output.action) + assert torch.all(log_prob == model_output.log_prob) + + run_model_jit_trace(model, script_model, compare_func) + + +class TestFullyConnectedActor(TestActorBase): def test_basic(self): state_dim = 8 action_dim = 4 @@ -34,6 +78,7 @@ def test_basic(self): model.eval() action = model(input) self.assertEqual((1, action_dim), action.action.shape) + self.assertEqual((1, 1), action.log_prob.shape) def test_save_load(self): state_dim = 8 @@ -45,10 +90,7 @@ def test_save_load(self): activations=["relu", "relu"], use_batch_norm=False, ) - expected_num_params, expected_num_inputs, expected_num_outputs = 6, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model, stochastic=False) def test_save_load_batch_norm(self): state_dim = 8 @@ -62,13 +104,10 @@ def test_save_load_batch_norm(self): ) # Freezing batch_norm model.eval() - expected_num_params, expected_num_inputs, expected_num_outputs = 21, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model, stochastic=False) -class TestGaussianFullyConnectedActor(unittest.TestCase): +class TestGaussianFullyConnectedActor(TestActorBase): def test_basic(self): state_dim = 8 action_dim = 4 @@ -85,6 +124,7 @@ def test_basic(self): model.eval() action = model(input) self.assertEqual((1, action_dim), action.action.shape) + self.assertEqual((1, 1), action.log_prob.shape) def test_save_load(self): state_dim = 8 @@ -96,16 +136,21 @@ def test_save_load(self): activations=["relu", "relu"], use_batch_norm=False, ) - expected_num_params, expected_num_inputs, expected_num_outputs = 6, 1, 1 - # Actor output is stochastic and won't match between PyTorch & Caffe2 - check_save_load( - self, - model, - expected_num_params, - expected_num_inputs, - expected_num_outputs, - check_equality=False, + self.check_save_load(model, stochastic=True) + + def test_save_load_batch_norm(self): + state_dim = 8 + action_dim = 4 + model = GaussianFullyConnectedActor( + state_dim, + action_dim, + sizes=[7, 6], + activations=["relu", "relu"], + use_batch_norm=True, ) + # Freezing batch_norm + model.eval() + self.check_save_load(model, stochastic=True) def test_get_log_prob(self): torch.manual_seed(0) @@ -122,11 +167,11 @@ def test_get_log_prob(self): self.assertEqual((1, state_dim), input.float_features.shape) action = model(input) squashed_action = action.action.detach() - action_log_prob = model.get_log_prob(input, squashed_action) + action_log_prob = model.get_log_prob(input, squashed_action).detach() npt.assert_allclose(action.log_prob.detach(), action_log_prob, rtol=1e-4) -class TestDirichletFullyConnectedActor(unittest.TestCase): +class TestDirichletFullyConnectedActor(TestActorBase): def test_basic(self): state_dim = 8 action_dim = 4 @@ -143,6 +188,7 @@ def test_basic(self): model.eval() action = model(input) self.assertEqual((1, action_dim), action.action.shape) + self.assertEqual((1, 1), action.log_prob.shape) def test_save_load(self): state_dim = 8 @@ -154,12 +200,18 @@ def test_save_load(self): activations=["relu", "relu"], use_batch_norm=False, ) - expected_num_params, expected_num_inputs, expected_num_outputs = 7, 1, 1 - check_save_load( - self, - model, - expected_num_params, - expected_num_inputs, - expected_num_outputs, - check_equality=False, + self.check_save_load(model, stochastic=True) + + def test_save_load_batch_norm(self): + state_dim = 8 + action_dim = 4 + model = DirichletFullyConnectedActor( + state_dim, + action_dim, + sizes=[7, 6], + activations=["relu", "relu"], + use_batch_norm=True, ) + # Freezing batch_norm + model.eval() + self.check_save_load(model, stochastic=True) diff --git a/reagent/test/models/test_base.py b/reagent/test/models/test_base.py deleted file mode 100644 index d162a587c..000000000 --- a/reagent/test/models/test_base.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import dataclasses -import logging -import unittest -from typing import Any - -import torch -import torch.nn as nn -from reagent import types as rlt -from reagent.models.base import ModelBase -from reagent.test.models.test_utils import check_save_load - - -logger = logging.getLogger(__name__) - - -@dataclasses.dataclass -class ModelOutput: - # These should be torch.Tensor but the type checking failed when I used it - sum: Any - mul: Any - plus_one: Any - linear: Any - - -class Model(ModelBase): - def __init__(self): - super().__init__() - self.linear = nn.Linear(4, 1) - - def input_prototype(self): - return ( - rlt.FeatureData(torch.randn([1, 4])), - rlt.FeatureData(torch.randn([1, 4])), - ) - - def forward(self, state, action): - state = state.float_features - action = action.float_features - - return ModelOutput( - state + action, state * action, state + 1, self.linear(state) - ) - - -class TestBase(unittest.TestCase): - def test_get_predictor_export_meta_and_workspace(self): - model = Model() - - # 2 params + 1 const - expected_num_params, expected_num_inputs, expected_num_outputs = 3, 2, 4 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) diff --git a/reagent/test/models/test_bcq.py b/reagent/test/models/test_bcq.py index 088763449..cd746b8e8 100644 --- a/reagent/test/models/test_bcq.py +++ b/reagent/test/models/test_bcq.py @@ -6,18 +6,38 @@ import numpy.testing as npt import torch + +import torch.nn as nn import torch.nn.init as init -from reagent import types as rlt +from reagent.core import types as rlt from reagent.models.bcq import BatchConstrainedDQN from reagent.models.dqn import FullyConnectedDQN from reagent.models.fully_connected_network import FullyConnectedNetwork -from reagent.test.models.test_utils import check_save_load +from reagent.test.models.test_utils import run_model_jit_trace logger = logging.getLogger(__name__) +class BatchConstrainedDQNTorchScriptWrapper(nn.Module): + def __init__(self, model: BatchConstrainedDQN): + super().__init__() + self.model = model + + def forward(self, state_float_features: torch.Tensor): + return self.model( + rlt.FeatureData(float_features=state_float_features), + ) + + class TestBCQ(unittest.TestCase): + def check_save_load(self, model: BatchConstrainedDQN): + """ + Test if a model is torch.jit.tracable + """ + script_model = BatchConstrainedDQNTorchScriptWrapper(model) + run_model_jit_trace(model, script_model) + def test_basic(self): state_dim = 8 action_dim = 4 @@ -54,11 +74,7 @@ def test_save_load(self): imitator_network=imitator_network, bcq_drop_threshold=0.05, ) - # 6 for DQN + 6 for Imitator Network + 2 for BCQ constants - expected_num_params, expected_num_inputs, expected_num_outputs = 14, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_forward_pass(self): torch.manual_seed(123) diff --git a/reagent/test/models/test_critic.py b/reagent/test/models/test_critic.py index 718274c71..ffee7a969 100644 --- a/reagent/test/models/test_critic.py +++ b/reagent/test/models/test_critic.py @@ -4,14 +4,39 @@ import logging import unittest +import torch + +import torch.nn as nn +from reagent.core import types as rlt from reagent.models.critic import FullyConnectedCritic -from reagent.test.models.test_utils import check_save_load +from reagent.test.models.test_utils import run_model_jit_trace logger = logging.getLogger(__name__) +class FullyConnectedCriticTorchScriptWrapper(nn.Module): + def __init__(self, model: FullyConnectedCritic): + super().__init__() + self.model = model + + def forward( + self, state_float_features: torch.Tensor, action_float_features: torch.Tensor + ): + return self.model( + rlt.FeatureData(float_features=state_float_features), + rlt.FeatureData(float_features=action_float_features), + ) + + class TestFullyConnectedCritic(unittest.TestCase): + def check_save_load(self, model: FullyConnectedCritic): + """ + Test if a model is torch.jit.tracable + """ + script_model = FullyConnectedCriticTorchScriptWrapper(model) + run_model_jit_trace(model, script_model) + def test_basic(self): state_dim = 8 action_dim = 4 @@ -40,10 +65,7 @@ def test_save_load(self): activations=["relu", "relu"], use_batch_norm=False, ) - expected_num_params, expected_num_inputs, expected_num_outputs = 6, 2, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_save_load_batch_norm(self): state_dim = 8 @@ -57,7 +79,4 @@ def test_save_load_batch_norm(self): ) # Freezing batch_norm model.eval() - expected_num_params, expected_num_inputs, expected_num_outputs = 21, 2, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) diff --git a/reagent/test/models/test_deep_represent_linucb_model.py b/reagent/test/models/test_deep_represent_linucb_model.py new file mode 100644 index 000000000..0cfb29617 --- /dev/null +++ b/reagent/test/models/test_deep_represent_linucb_model.py @@ -0,0 +1,28 @@ +import unittest + +from reagent.models.deep_represent_linucb import DeepRepresentLinearRegressionUCB + + +class TestDeepRepresentLinearRegressionUCB(unittest.TestCase): + def test_basic(self): + raw_input_dim = 9 + sizes = [6] + linucb_inp_dim = 3 + activations = ["relu"] + + model = DeepRepresentLinearRegressionUCB( + raw_input_dim=raw_input_dim, + sizes=sizes, + linucb_inp_dim=linucb_inp_dim, + activations=activations, + ) + + raw_input = model.input_prototype() + self.assertEqual((1, raw_input_dim), raw_input.shape) # check input size + self.assertEqual( + (1, linucb_inp_dim), # check deep_represent output size + model.deep_represent_layers(raw_input).shape, + ) + model.eval() + output = model(raw_input) # check final output size + self.assertEqual((1,), output.shape) # ucb is 0-d tensor diff --git a/reagent/test/models/test_disjoint_linucb_predictor.py b/reagent/test/models/test_disjoint_linucb_predictor.py new file mode 100644 index 000000000..67dc59cc7 --- /dev/null +++ b/reagent/test/models/test_disjoint_linucb_predictor.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import numpy as np +import numpy.testing as npt +import torch +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.models.disjoint_linucb_predictor import ( + batch_quadratic_form_multi_arms, + DisjointLinearRegressionUCB, +) +from reagent.training.cb.disjoint_linucb_trainer import DisjointLinUCBTrainer + + +class TestDisjointLinearRegressionUCBUtils(unittest.TestCase): + def test_batch_quadratic_form(self) -> None: + def check_correctness(x, A): + batch_result = batch_quadratic_form_multi_arms(x, A) + batch_size = x.size()[0] + num_arms = A.size()[0] + loop_result = torch.zeros((batch_size, num_arms)) # batch_size * num_arms + + for i in range(num_arms): + a = A[i] + for j in range(batch_size): + loop_result[j][i] = x[j].t() @ a @ x[j] + npt.assert_allclose(batch_result.numpy(), loop_result.numpy(), rtol=1e-3) + + x1 = torch.tensor([[1.0, 4.3], [3.2, 9.8]]) + A1 = torch.tensor( + [[[2.0, 1.0], [2.4, 0.5]], [[2.0, 0], [0, 3.0]], [[4.6, 8.0], [3.6, 0.7]]] + ) + check_correctness(x1, A1) + + torch.manual_seed(0) + # x (B,N); A (num_arms, N, N) + + # equal num_arms and batch size + x2 = torch.randn((8, 10)) + A2 = torch.randn((8, 10, 10)) + check_correctness(x2, A2) + + # equal batch size and N + x3 = torch.randn((8, 8)) + A3 = torch.randn((3, 8, 8)) + check_correctness(x3, A3) + + # equal num_arms and N + x4 = torch.randn((10, 3)) + A4 = torch.randn((3, 3, 3)) + check_correctness(x4, A4) + + # batch size != N != num_arms + x5 = torch.randn((10, 8)) + A5 = torch.randn((4, 8, 8)) + check_correctness(x5, A5) + + # batch size = N = num_arms + x6 = torch.randn((10, 10)) + A6 = torch.randn((10, 10, 10)) + check_correctness(x6, A6) + + +class TestDisjointLinearRegressionUCB(unittest.TestCase): + def test_call_ucb(self) -> None: + inputs = [] + # y = x1+x2 + inputs.append( + CBInput( + context_arm_features=torch.tensor([[1.0, 2.0], [1.0, 3.0]]), + reward=torch.tensor([[3.0], [4.0]]), + ) + ) + # y = 2x1 + x2 + inputs.append( + CBInput( + context_arm_features=torch.tensor([[2.0, 3.0], [1.0, 5.0]]), + reward=torch.tensor([[7.0], [7.0]]), + ) + ) + # y = 2x1 + 2x2 + inputs.append( + CBInput( + context_arm_features=torch.tensor([[0.5, 3.0], [1.8, 5.1]]), + reward=torch.tensor([[7.0], [13.8]]), + ) + ) + model = DisjointLinearRegressionUCB(num_arms=3, input_dim=2, l2_reg_lambda=0.0) + trainer = DisjointLinUCBTrainer( + Policy(scorer=model, sampler=GreedyActionSampler()) + ) + trainer.training_step(inputs, batch_idx=0) + trainer.on_train_epoch_end() + + inp = torch.tensor([[1.0, 5.0], [1.0, 6.0]]) + alpha = 1.5 + out = model(inp, ucb_alpha=alpha) + + def calculate_mean(x, model_type): + if model_type == 1: + return x[0] + x[1] + elif model_type == 2: + return 2 * x[0] + x[1] + elif model_type == 3: + return 2 * x[0] + 2 * x[1] + + expected_out = np.zeros((2, 3)) + for i in range(2): + x = inp[i] + for j in range(3): + expected_out[i, j] = calculate_mean(x, j + 1) + alpha * np.sqrt( + x.numpy() @ model.inv_A[j].numpy() @ x.numpy() + ) + self.assertIsInstance(out, torch.Tensor) + self.assertEqual(tuple(out.shape), (2, 3)) + npt.assert_allclose(out.numpy(), expected_out, rtol=1e-4) diff --git a/reagent/test/models/test_dqn.py b/reagent/test/models/test_dqn.py index 7b5a34acb..190607967 100644 --- a/reagent/test/models/test_dqn.py +++ b/reagent/test/models/test_dqn.py @@ -4,14 +4,35 @@ import logging import unittest -from reagent.models.dqn import FullyConnectedDQN -from reagent.test.models.test_utils import check_save_load +import torch +import torch.nn as nn +from reagent.core import types as rlt +from reagent.models.dqn import FullyConnectedDQN +from reagent.test.models.test_utils import run_model_jit_trace logger = logging.getLogger(__name__) +class FullyConnectedDQNTorchScriptWrapper(nn.Module): + def __init__(self, model: FullyConnectedDQN): + super().__init__() + self.model = model + + def forward(self, state_float_features: torch.Tensor): + return self.model( + rlt.FeatureData(float_features=state_float_features), + ) + + class TestFullyConnectedDQN(unittest.TestCase): + def check_save_load(self, model: FullyConnectedDQN): + """ + Test if a model is torch.jit.tracable + """ + script_model = FullyConnectedDQNTorchScriptWrapper(model) + run_model_jit_trace(model, script_model) + def test_basic(self): state_dim = 8 action_dim = 4 @@ -39,10 +60,7 @@ def test_save_load(self): activations=["relu", "relu"], use_batch_norm=False, ) - expected_num_params, expected_num_inputs, expected_num_outputs = 6, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_save_load_batch_norm(self): state_dim = 8 @@ -56,7 +74,4 @@ def test_save_load_batch_norm(self): ) # Freezing batch_norm model.eval() - expected_num_params, expected_num_inputs, expected_num_outputs = 21, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) diff --git a/reagent/test/models/test_dueling_q_network.py b/reagent/test/models/test_dueling_q_network.py index 6ce91968e..2fa9f36b3 100644 --- a/reagent/test/models/test_dueling_q_network.py +++ b/reagent/test/models/test_dueling_q_network.py @@ -3,15 +3,61 @@ import logging import unittest +from typing import Union -from reagent.models.dueling_q_network import DuelingQNetwork, ParametricDuelingQNetwork -from reagent.test.models.test_utils import check_save_load +import torch +import torch.nn as nn +from reagent.core import types as rlt +from reagent.models.dueling_q_network import DuelingQNetwork, ParametricDuelingQNetwork +from reagent.test.models.test_utils import run_model_jit_trace logger = logging.getLogger(__name__) +class DuelingQNetworkTorchScriptWrapper(nn.Module): + def __init__( + self, + model: Union[ + DuelingQNetwork, + ParametricDuelingQNetwork, + ], + ): + super().__init__() + self.model = model + + def forward(self, state_float_features: torch.Tensor): + return self.model(rlt.FeatureData(float_features=state_float_features)) + + +class ParametricDuelingQNetworkTorchScriptWrapper(nn.Module): + def __init__( + self, + model: Union[ + DuelingQNetwork, + ParametricDuelingQNetwork, + ], + ): + super().__init__() + self.model = model + + def forward( + self, state_float_features: torch.Tensor, action_float_features: torch.Tensor + ): + return self.model( + rlt.FeatureData(float_features=state_float_features), + rlt.FeatureData(float_features=action_float_features), + ) + + class TestDuelingQNetwork(unittest.TestCase): + def check_save_load(self, model: Union[DuelingQNetwork, ParametricDuelingQNetwork]): + if isinstance(model, ParametricDuelingQNetwork): + script_model = ParametricDuelingQNetworkTorchScriptWrapper(model) + else: + script_model = DuelingQNetworkTorchScriptWrapper(model) + run_model_jit_trace(model, script_model) + def test_discrete_action(self): state_dim = 8 action_dim = 4 @@ -49,10 +95,7 @@ def test_save_load_discrete_action(self): model = DuelingQNetwork.make_fully_connected( state_dim, action_dim, layers=[8, 4], activations=["relu", "relu"] ) - expected_num_params, expected_num_inputs, expected_num_outputs = 22, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_save_load_parametric_action(self): state_dim = 8 @@ -60,37 +103,28 @@ def test_save_load_parametric_action(self): model = ParametricDuelingQNetwork.make_fully_connected( state_dim, action_dim, [8, 4], ["relu", "relu"] ) - expected_num_params, expected_num_inputs, expected_num_outputs = 22, 2, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_save_load_discrete_action_batch_norm(self): state_dim = 8 action_dim = 4 model = DuelingQNetwork.make_fully_connected( - state_dim, action_dim, layers=[8, 4], activations=["relu", "relu"] + state_dim, + action_dim, + layers=[8, 4], + activations=["relu", "relu"], + use_batch_norm=True, ) # Freezing batch_norm model.eval() - # Number of expected params is the same because DuelingQNetwork always - # initialize batch norm layer even if it doesn't use it. - expected_num_params, expected_num_inputs, expected_num_outputs = 22, 1, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) def test_save_load_parametric_action_batch_norm(self): state_dim = 8 action_dim = 4 model = ParametricDuelingQNetwork.make_fully_connected( - state_dim, action_dim, [8, 4], ["relu", "relu"] + state_dim, action_dim, [8, 4], ["relu", "relu"], use_batch_norm=True ) # Freezing batch_norm model.eval() - # Number of expected params is the same because DuelingQNetwork always - # initialize batch norm layer even if it doesn't use it. - expected_num_params, expected_num_inputs, expected_num_outputs = 22, 2, 1 - check_save_load( - self, model, expected_num_params, expected_num_inputs, expected_num_outputs - ) + self.check_save_load(model) diff --git a/reagent/test/models/test_linear_regression_ucb.py b/reagent/test/models/test_linear_regression_ucb.py new file mode 100644 index 000000000..21f9ef32f --- /dev/null +++ b/reagent/test/models/test_linear_regression_ucb.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import numpy as np +import numpy.testing as npt +import torch +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.models.linear_regression import batch_quadratic_form, LinearRegressionUCB +from reagent.training.cb.linucb_trainer import LinUCBTrainer + + +class TestLinearRegressionUCBUtils(unittest.TestCase): + def test_batch_quadratic_form(self) -> None: + x = torch.tensor([[1.0, 4.3], [3.2, 9.8]]) + A = torch.tensor([[2.0, 1.0], [2.4, 0.5]]) + batch_result = batch_quadratic_form(x, A) + loop_result = torch.zeros(2) + for i in range(2): + loop_result[i] = x[i].t() @ A @ x[i] + npt.assert_allclose(batch_result.numpy(), loop_result.numpy()) + + def test_batch_quadratic_form_3d(self) -> None: + x = torch.tensor([[[1.0, 4.3], [3.2, 9.8]], [[1.2, 4.1], [3.0, 7.8]]]) + A = torch.tensor([[2.0, 1.0], [2.4, 0.5]]) + batch_result = batch_quadratic_form(x, A) + loop_result = torch.zeros(2, 2) + for i in range(2): + for j in range(2): + loop_result[i][j] = x[i, j].t() @ A @ x[i, j] + npt.assert_allclose(batch_result.numpy(), loop_result.numpy()) + + +class TestLinearRegressionUCB(unittest.TestCase): + def test_call_no_ucb(self) -> None: + x = torch.tensor([[1.0, 2.0], [1.0, 3.0]]) # y=x+1 + y = torch.tensor([3.0, 4.0]) + model = LinearRegressionUCB(2, ucb_alpha=0, l2_reg_lambda=0.0) + trainer = LinUCBTrainer(Policy(scorer=model, sampler=GreedyActionSampler())) + trainer.update_params(x, y) + + inp = torch.tensor([[1.0, 5.0], [1.0, 6.0]]) + out = model(inp) + + self.assertIsInstance(out, torch.Tensor) + self.assertEqual(tuple(out.shape), (2,)) + npt.assert_allclose(out.numpy(), np.array([6.0, 7.0]), rtol=1e-4) + + def test_call_ucb(self) -> None: + x = torch.tensor([[1.0, 2.0], [1.0, 3.0]]) # y=x+1 + y = torch.tensor([3.0, 4.0]) + model = LinearRegressionUCB(2, l2_reg_lambda=0.0) + trainer = LinUCBTrainer(Policy(scorer=model, sampler=GreedyActionSampler())) + trainer.update_params(x, y) + + inp = torch.tensor([[1.0, 5.0], [1.0, 6.0]]) + alpha = 1.5 + out = model(inp, ucb_alpha=alpha) + + expected_out = np.zeros(2) + expected_out[0] = 6.0 + alpha * np.sqrt( + inp[0].numpy() + @ (model.inv_avg_A / model.sum_weight).numpy() + @ inp[0].numpy() + ) + expected_out[1] = 7.0 + alpha * np.sqrt( + inp[1].numpy() + @ (model.inv_avg_A / model.sum_weight).numpy() + @ inp[1].numpy() + ) + + self.assertIsInstance(out, torch.Tensor) + self.assertEqual(tuple(out.shape), (2,)) + npt.assert_allclose(out.numpy(), expected_out, rtol=1e-4) diff --git a/reagent/test/models/test_no_soft_update_embedding.py b/reagent/test/models/test_no_soft_update_embedding.py index 0dd191439..17cf09f8c 100644 --- a/reagent/test/models/test_no_soft_update_embedding.py +++ b/reagent/test/models/test_no_soft_update_embedding.py @@ -8,12 +8,10 @@ import torch import torch.nn as nn from reagent.models.no_soft_update_embedding import NoSoftUpdateEmbedding -from reagent.parameters import RLParameters -from reagent.training.rl_trainer_pytorch import RLTrainer class Model(nn.Module): - def __init__(self): + def __init__(self) -> None: super().__init__() self.embedding = NoSoftUpdateEmbedding(10, 3) @@ -22,7 +20,7 @@ def forward(self, input): class TestNoSoftUpdteEmbedding(unittest.TestCase): - def test_no_soft_update(self): + def test_no_soft_update(self) -> None: model = Model() target_model = copy.deepcopy(model) @@ -43,11 +41,26 @@ def test_no_soft_update(self): self.assertEqual(1, len(params)) param = params[0].detach().numpy() - trainer = RLTrainer(rl_parameters=RLParameters(), use_gpu=False) - trainer._soft_update(model, target_model, 0.1) + self._soft_update(model, target_model, 0.1) target_params = list(target_model.parameters()) self.assertEqual(1, len(target_params)) target_param = target_params[0].detach().numpy() npt.assert_array_equal(target_param, param) + + @torch.no_grad() + def _soft_update(self, network, target_network, tau) -> None: + """Target network update logic as defined in DDPG paper + updated_params = tau * network_params + (1 - tau) * target_network_params + :param network network with parameters to include in soft update + :param target_network target network with params to soft update + :param tau hyperparameter to control target tracking speed + """ + for t_param, param in zip(target_network.parameters(), network.parameters()): + if t_param is param: + # Skip soft-updating when the target network shares the parameter with + # the network being train. + continue + new_param = tau * param.data + (1.0 - tau) * t_param.data + t_param.data.copy_(new_param) diff --git a/reagent/test/models/test_sparse_dqn_net.py b/reagent/test/models/test_sparse_dqn_net.py new file mode 100644 index 000000000..896cf4b1d --- /dev/null +++ b/reagent/test/models/test_sparse_dqn_net.py @@ -0,0 +1,52 @@ +import unittest + +import torch +from reagent.models.sparse_dqn import SparseDQN +from torchrec import EmbeddingBagCollection, EmbeddingBagConfig + + +class TestSparseDQN(unittest.TestCase): + def test_single_step_sparse_dqn(self): + state_dense_dim = 10 + action_dense_dim = 2 + dense_sizes = [256, 32] + activation = "relu" + final_activation = "relu" + # Fake embedding bag configs + embedding_table_size = 1000 + embedding_dim = 32 + num_sparse_features = 2 # refer to watched_ids and liked_ids below + embedding_bag_configs = [ + EmbeddingBagConfig( + name="video_id", + feature_names=["watched_ids", "liked_ids"], + num_embeddings=embedding_table_size, + embedding_dim=embedding_dim, + ) + ] + embedding_bag_col = EmbeddingBagCollection( + device=torch.device("cpu"), tables=embedding_bag_configs + ) + + net = SparseDQN( + state_dense_dim=state_dense_dim, + embedding_bag_collection=embedding_bag_col, + action_dense_dim=action_dense_dim, + overarch_dims=dense_sizes, + activation=activation, + final_activation=final_activation, + output_dim=action_dense_dim, + ).q_network.dnn + + # the dim of the input to overall arch is dimension of dense features plus + # number of sparse features times embedding dimension for sparse features + assert ( + net[1].in_features + == state_dense_dim + action_dense_dim + num_sparse_features * embedding_dim + ) + assert net[1].out_features == dense_sizes[0] + assert net[4].in_features == dense_sizes[0] + assert net[4].out_features == dense_sizes[1] + assert net[7].in_features == dense_sizes[1] + + assert net[7].out_features == action_dense_dim diff --git a/reagent/test/models/test_synthetic_reward_net.py b/reagent/test/models/test_synthetic_reward_net.py new file mode 100644 index 000000000..a53897df7 --- /dev/null +++ b/reagent/test/models/test_synthetic_reward_net.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import unittest + +import torch +from reagent.core import parameters as rlp +from reagent.models.synthetic_reward import ( + _gen_mask, + NGramConvolutionalNetwork, + NGramFullyConnectedNetwork, + SequenceSyntheticRewardNet, + SingleStepSyntheticRewardNet, + SyntheticRewardNet, + TransformerSyntheticRewardNet, +) +from reagent.models.synthetic_reward_sparse_arch import ( + SingleStepSyntheticSparseArchRewardNet, + SyntheticRewardSparseArchNet, +) +from torchrec import EmbeddingBagCollection, EmbeddingBagConfig + + +logger = logging.getLogger(__name__) + + +class TestSyntheticReward(unittest.TestCase): + def test_single_step_synthetic_reward(self): + state_dim = 10 + action_dim = 2 + sizes = [256, 128] + activations = ["sigmoid", "relu"] + last_layer_activation = "leaky_relu" + reward_net = SyntheticRewardNet( + SingleStepSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + ) + ) + dnn = reward_net.export_mlp().dnn + # dnn[0] is a concat layer + assert dnn[1].in_features == state_dim + action_dim + assert dnn[1].out_features == 256 + assert dnn[2]._get_name() == "Sigmoid" + assert dnn[3].in_features == 256 + assert dnn[3].out_features == 128 + assert dnn[4]._get_name() == "ReLU" + assert dnn[5].in_features == 128 + assert dnn[5].out_features == 1 + assert dnn[6]._get_name() == "LeakyReLU" + + valid_step = torch.tensor([[1], [2], [3]]) + batch_size = 3 + seq_len = 4 + mask = _gen_mask(valid_step, batch_size, seq_len) + assert torch.all( + mask + == torch.tensor( + [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]] + ) + ) + + def test_ngram_fc_synthetic_reward(self): + state_dim = 10 + action_dim = 2 + sizes = [256, 128] + activations = ["sigmoid", "relu"] + last_layer_activation = "leaky_relu" + context_size = 3 + + net = NGramFullyConnectedNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + context_size=context_size, + ) + reward_net = SyntheticRewardNet(net) + + dnn = reward_net.export_mlp().fc.dnn + assert dnn[0].in_features == (state_dim + action_dim) * context_size + assert dnn[0].out_features == 256 + assert dnn[1]._get_name() == "Sigmoid" + assert dnn[2].in_features == 256 + assert dnn[2].out_features == 128 + assert dnn[3]._get_name() == "ReLU" + assert dnn[4].in_features == 128 + assert dnn[4].out_features == 1 + assert dnn[5]._get_name() == "LeakyReLU" + + valid_step = torch.tensor([[1], [2], [3]]) + batch_size = 3 + seq_len = 4 + mask = _gen_mask(valid_step, batch_size, seq_len) + assert torch.all( + mask + == torch.tensor( + [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]] + ) + ) + + def test_ngram_conv_net_synthetic_reward(self): + state_dim = 10 + action_dim = 2 + sizes = [256, 128] + activations = ["sigmoid", "relu"] + last_layer_activation = "leaky_relu" + context_size = 3 + + conv_net_params = rlp.ConvNetParameters( + conv_dims=[256, 128], + conv_height_kernels=[1, 1], + pool_types=["max", "max"], + pool_kernel_sizes=[1, 1], + ) + net = NGramConvolutionalNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + context_size=context_size, + conv_net_params=conv_net_params, + ) + + reward_net = SyntheticRewardNet(net) + conv_net = reward_net.export_mlp().conv_net + + assert conv_net.conv_dims == [1, 256, 128] + assert conv_net.conv_height_kernels == [1, 1] + assert conv_net.conv_width_kernels == [12, 1] + + assert conv_net.conv_layers[0].in_channels == 1 + assert conv_net.conv_layers[0].out_channels == 256 + assert conv_net.conv_layers[0].kernel_size == (1, 12) + assert conv_net.conv_layers[0].stride == (1, 1) + assert conv_net.conv_layers[1].in_channels == 256 + assert conv_net.conv_layers[1].out_channels == 128 + assert conv_net.conv_layers[1].kernel_size == (1, 1) + assert conv_net.conv_layers[1].stride == (1, 1) + + dnn = reward_net.export_mlp().conv_net.feed_forward.dnn + assert dnn[0].in_features == 384 + assert dnn[0].out_features == 256 + assert dnn[1]._get_name() == "Sigmoid" + assert dnn[2].in_features == 256 + assert dnn[2].out_features == 128 + assert dnn[3]._get_name() == "ReLU" + assert dnn[4].in_features == 128 + assert dnn[4].out_features == 1 + assert dnn[5]._get_name() == "LeakyReLU" + + def test_lstm_synthetic_reward(self): + state_dim = 10 + action_dim = 2 + last_layer_activation = "leaky_relu" + net = SequenceSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + lstm_hidden_size=128, + lstm_num_layers=2, + lstm_bidirectional=True, + last_layer_activation=last_layer_activation, + ) + reward_net = SyntheticRewardNet(net) + lstm = reward_net.export_mlp().lstm + assert lstm.bidirectional + assert lstm.input_size == 12 + assert lstm.hidden_size == 128 + assert lstm.num_layers == 2 + + dnn = reward_net.export_mlp().fc_out + assert dnn.in_features == 128 * 2 + assert dnn.out_features == 1 + + output_activation = reward_net.export_mlp().output_activation + assert output_activation._get_name() == "LeakyReLU" + + def test_transformer_synthetic_reward(self): + state_dim = 10 + action_dim = 2 + d_model = 64 + nhead = 8 + num_encoder_layers = 2 + dim_feedforward = 64 + dropout = 0.0 + activation = "relu" + last_layer_activation = "leaky_relu" + layer_norm_eps = 1e-5 + max_len = 10 + + net = TransformerSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + d_model=d_model, + nhead=nhead, + num_encoder_layers=num_encoder_layers, + dim_feedforward=dim_feedforward, + dropout=dropout, + activation=activation, + last_layer_activation=last_layer_activation, + layer_norm_eps=layer_norm_eps, + max_len=max_len, + ) + + reward_net = SyntheticRewardNet(net) + export_net = reward_net.export_mlp() + transformer = export_net.transformer + assert export_net.state_dim == state_dim + assert export_net.action_dim == action_dim + assert export_net.d_model == d_model + assert export_net.nhead == nhead + assert export_net.dim_feedforward == dim_feedforward + assert export_net.dropout == dropout + assert export_net.activation == activation + assert export_net.layer_norm_eps == layer_norm_eps + + assert transformer.num_layers == num_encoder_layers + dnn_out = export_net.fc_out + assert dnn_out.in_features == d_model + assert dnn_out.out_features == 1 + + output_activation = export_net.output_activation + assert output_activation._get_name() == "LeakyReLU" + + def test_single_step_sparse_arch_synthetic_reward(self): + state_dense_dim = 10 + action_dense_dim = 2 + dense_sizes = [256, 32] + dense_activations = ["sigmoid", "relu"] + overall_sizes = [128, 1] + overall_activations = ["sigmoid", "relu"] + # Fake embedding bag configs + embedding_table_size = 1000 + embedding_dim = 32 + num_sparse_features = 2 # refer to watched_ids and liked_ids below + embedding_bag_configs = [ + EmbeddingBagConfig( + name="video_id", + feature_names=["watched_ids", "liked_ids"], + num_embeddings=embedding_table_size, + embedding_dim=embedding_dim, + ) + ] + embedding_bag_col = EmbeddingBagCollection( + device=torch.device("meta"), tables=embedding_bag_configs + ) + reward_net = SyntheticRewardSparseArchNet( + SingleStepSyntheticSparseArchRewardNet( + state_dense_dim=state_dense_dim, + action_dense_dim=action_dense_dim, + dense_sizes=dense_sizes, + dense_activations=dense_activations, + overall_sizes=overall_sizes, + overall_activations=overall_activations, + embedding_bag_collection=embedding_bag_col, + ) + ) + net = reward_net.export_mlp() + assert net.state_dense_arch[0].in_features == state_dense_dim + assert net.state_dense_arch[0].out_features == dense_sizes[0] + assert net.state_dense_arch[2].in_features == dense_sizes[0] + assert net.state_dense_arch[2].out_features == dense_sizes[1] + assert net.action_dense_arch[0].in_features == action_dense_dim + assert net.action_dense_arch[0].out_features == dense_sizes[0] + assert net.action_dense_arch[2].in_features == dense_sizes[0] + assert net.action_dense_arch[2].out_features == dense_sizes[1] + assert net.sparse_arch.embedding_bag_collection == embedding_bag_col + # the dim of the input to overall arch is 2D + 2F + F choose 2 + # See the explanation in SingleStepSyntheticSparseArchRewardNet + assert ( + net.overall_arch[0].in_features + == 2 * dense_sizes[1] + + 2 * num_sparse_features + + num_sparse_features * (num_sparse_features - 1) / 2 + ) + assert net.overall_arch[0].out_features == overall_sizes[0] + assert net.overall_arch[2].in_features == overall_sizes[0] + assert net.overall_arch[2].out_features == overall_sizes[1] diff --git a/reagent/test/models/test_utils.py b/reagent/test/models/test_utils.py index e32e2304b..24ea71def 100644 --- a/reagent/test/models/test_utils.py +++ b/reagent/test/models/test_utils.py @@ -1,32 +1,38 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - import logging +from typing import Callable, Optional import torch -import numpy.testing as npt +from reagent.models.base import ModelBase logger = logging.getLogger(__name__) -def check_save_load( - self, - model, - expected_num_params, - expected_num_inputs, - expected_num_outputs, - check_equality=True, +def run_model_jit_trace( + model: ModelBase, + script_model, + compare_func: Optional[Callable] = None, ): - # TODO: remove the expected_num* from call sites - - # TODO: revive this test or kill it - # input_prototype = model.input_prototype() - # traced_model = torch.jit.trace(model, input_prototype) - - # if check_equality: - # x = model(*input_prototype) - # y = traced_model(*input_prototype) - # self.assertEqual(x, y) - - pass + input_prototype = model.input_prototype() + if not isinstance(input_prototype, (list, tuple)): + input_prototype = (input_prototype,) + tensor_input_prototype = tuple(x.float_features for x in input_prototype) + traced_model = torch.jit.trace(script_model, tensor_input_prototype) + + x = model(*input_prototype) + y = traced_model(*tensor_input_prototype) + + if compare_func: + compare_func(x, y) + elif isinstance(x, (list, tuple)): + assert isinstance(y, (list, tuple)) + for xx, yy in x, y: + assert isinstance(xx, torch.Tensor) + assert isinstance(yy, torch.Tensor) + assert torch.all(xx == yy) + else: + assert isinstance(x, torch.Tensor) + assert isinstance(y, torch.Tensor) + assert torch.all(x == y) diff --git a/reagent/test/net_builder/test_continuous_actor_net_builder.py b/reagent/test/net_builder/test_continuous_actor_net_builder.py index aa4cdda24..03cf18126 100644 --- a/reagent/test/net_builder/test_continuous_actor_net_builder.py +++ b/reagent/test/net_builder/test_continuous_actor_net_builder.py @@ -3,17 +3,19 @@ import unittest +from reagent.core import types as rlt +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData, NormalizationParameters from reagent.net_builder import continuous_actor from reagent.net_builder.unions import ContinuousActorNetBuilder__Union -from reagent.parameters import NormalizationData, NormalizationParameters from reagent.preprocessing.identify_types import CONTINUOUS -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbActorPredictorWrapper as ActorPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ActorPredictorWrapper @@ -42,8 +44,9 @@ def _test_actor_net_builder( for i in range(action_dim) } ) + state_feature_config = rlt.ModelFeatureConfig() actor_network = builder.build_actor( - state_normalization_data, action_normalization_data + state_feature_config, state_normalization_data, action_normalization_data ) x = actor_network.input_prototype() y = actor_network(x) @@ -51,20 +54,26 @@ def _test_actor_net_builder( log_prob = y.log_prob self.assertEqual(action.shape, (1, action_dim)) self.assertEqual(log_prob.shape, (1, 1)) + state_feature_config = rlt.ModelFeatureConfig() serving_module = builder.build_serving_module( - actor_network, state_normalization_data, action_normalization_data + actor_network, + state_feature_config, + state_normalization_data, + action_normalization_data, ) self.assertIsInstance(serving_module, ActorPredictorWrapper) - def test_gaussian_fully_connected(self): + def test_gaussian_fully_connected(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `GaussianFullyConnected`. chooser = ContinuousActorNetBuilder__Union( GaussianFullyConnected=continuous_actor.gaussian_fully_connected.GaussianFullyConnected() ) self._test_actor_net_builder(chooser) - def test_dirichlet_fully_connected(self): + def test_dirichlet_fully_connected(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `DirichletFullyConnected`. chooser = ContinuousActorNetBuilder__Union( DirichletFullyConnected=continuous_actor.dirichlet_fully_connected.DirichletFullyConnected() ) diff --git a/reagent/test/net_builder/test_discrete_dqn_net_builder.py b/reagent/test/net_builder/test_discrete_dqn_net_builder.py index 9cff3ff8d..a006daafe 100644 --- a/reagent/test/net_builder/test_discrete_dqn_net_builder.py +++ b/reagent/test/net_builder/test_discrete_dqn_net_builder.py @@ -2,44 +2,32 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import unittest -from typing import Optional -from reagent import types as rlt +from reagent.core import types as rlt +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData, NormalizationParameters from reagent.net_builder import discrete_dqn from reagent.net_builder.unions import DiscreteDQNNetBuilder__Union -from reagent.parameters import NormalizationData, NormalizationParameters from reagent.preprocessing.identify_types import CONTINUOUS +from torchrec import PoolingType -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper, - FbDiscreteDqnPredictorWrapperWithIdList as DiscreteDqnPredictorWrapperWithIdList, - ) -except ImportError: - from reagent.prediction.predictor_wrapper import ( - DiscreteDqnPredictorWrapper, - DiscreteDqnPredictorWrapperWithIdList, ) +else: + from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper class TestDiscreteDQNNetBuilder(unittest.TestCase): def _test_discrete_dqn_net_builder( self, chooser: DiscreteDQNNetBuilder__Union, - state_feature_config: Optional[rlt.ModelFeatureConfig] = None, + state_feature_config: rlt.ModelFeatureConfig, serving_module_class=DiscreteDqnPredictorWrapper, ) -> None: builder = chooser.value - state_dim = 3 - state_feature_config = state_feature_config or rlt.ModelFeatureConfig( - float_feature_infos=[ - rlt.FloatFeatureInfo(name=f"f{i}", feature_id=i) - for i in range(state_dim) - ] - ) - state_dim = len(state_feature_config.float_feature_infos) - state_normalization_data = NormalizationData( dense_normalization_parameters={ fi.feature_id: NormalizationParameters( @@ -48,7 +36,6 @@ def _test_discrete_dqn_net_builder( for fi in state_feature_config.float_feature_infos } ) - action_names = ["L", "R"] q_network = builder.build_q_network( state_feature_config, state_normalization_data, len(action_names) @@ -61,32 +48,86 @@ def _test_discrete_dqn_net_builder( ) self.assertIsInstance(serving_module, serving_module_class) - def test_fully_connected(self): + def test_fully_connected(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. chooser = DiscreteDQNNetBuilder__Union( FullyConnected=discrete_dqn.fully_connected.FullyConnected() ) - self._test_discrete_dqn_net_builder(chooser) + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(name=f"f{i}", feature_id=i) for i in range(3) + ] + ) + self._test_discrete_dqn_net_builder(chooser, state_feature_config) - def test_dueling(self): + def test_dueling(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `Dueling`. chooser = DiscreteDQNNetBuilder__Union(Dueling=discrete_dqn.dueling.Dueling()) - self._test_discrete_dqn_net_builder(chooser) + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(name=f"f{i}", feature_id=i) for i in range(3) + ] + ) + self._test_discrete_dqn_net_builder(chooser, state_feature_config) - def test_fully_connected_with_id_list_none(self): + def test_fully_connected_with_embedding(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `FullyConnectedWithEmbedding`. chooser = DiscreteDQNNetBuilder__Union( FullyConnectedWithEmbedding=discrete_dqn.fully_connected_with_embedding.FullyConnectedWithEmbedding() ) + + EMBEDDING_TABLE_SIZE = 10 + EMBEDDING_DIM = 32 + # only id_list + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5) + ], + id_list_feature_configs=[ + rlt.IdListFeatureConfig( + name="A", feature_id=10, id_mapping_name="A_mapping" + ) + ], + id_mapping_config={ + "A_mapping": rlt.IdMappingConfig( + embedding_table_size=EMBEDDING_TABLE_SIZE, + embedding_dim=EMBEDDING_DIM, + hashing=False, + pooling_type=PoolingType.SUM, + ) + }, + ) self._test_discrete_dqn_net_builder( - chooser, serving_module_class=DiscreteDqnPredictorWrapperWithIdList + chooser, state_feature_config=state_feature_config ) - def test_fully_connected_with_id_list(self): - # Intentionally used this long path to make sure we included it in __init__.py - chooser = DiscreteDQNNetBuilder__Union( - FullyConnectedWithEmbedding=discrete_dqn.fully_connected_with_embedding.FullyConnectedWithEmbedding() + # only id_score_list + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5) + ], + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name="A", feature_id=10, id_mapping_name="A_mapping" + ) + ], + id_mapping_config={ + "A_mapping": rlt.IdMappingConfig( + embedding_table_size=EMBEDDING_TABLE_SIZE, + embedding_dim=EMBEDDING_DIM, + hashing=False, + pooling_type=PoolingType.SUM, + ) + }, + ) + self._test_discrete_dqn_net_builder( + chooser, state_feature_config=state_feature_config ) + + # id_list + id_score_list state_feature_config = rlt.ModelFeatureConfig( float_feature_infos=[ rlt.FloatFeatureInfo(name=str(i), feature_id=i) for i in range(1, 5) @@ -96,10 +137,20 @@ def test_fully_connected_with_id_list(self): name="A", feature_id=10, id_mapping_name="A_mapping" ) ], - id_mapping_config={"A_mapping": rlt.IdMapping(ids=[0, 1, 2])}, + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name="B", feature_id=100, id_mapping_name="A_mapping" + ) + ], + id_mapping_config={ + "A_mapping": rlt.IdMappingConfig( + embedding_table_size=EMBEDDING_TABLE_SIZE, + embedding_dim=EMBEDDING_DIM, + hashing=False, + pooling_type=PoolingType.SUM, + ) + }, ) self._test_discrete_dqn_net_builder( - chooser, - state_feature_config=state_feature_config, - serving_module_class=DiscreteDqnPredictorWrapperWithIdList, + chooser, state_feature_config=state_feature_config ) diff --git a/reagent/test/net_builder/test_parametric_dqn_net_builder.py b/reagent/test/net_builder/test_parametric_dqn_net_builder.py index d68f0b9dd..b2bce9655 100644 --- a/reagent/test/net_builder/test_parametric_dqn_net_builder.py +++ b/reagent/test/net_builder/test_parametric_dqn_net_builder.py @@ -3,17 +3,18 @@ import unittest +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData, NormalizationParameters from reagent.net_builder import parametric_dqn from reagent.net_builder.unions import ParametricDQNNetBuilder__Union -from reagent.parameters import NormalizationData, NormalizationParameters from reagent.preprocessing.identify_types import CONTINUOUS -try: +if IS_FB_ENVIRONMENT: from reagent.fb.prediction.fb_predictor_wrapper import ( FbParametricDqnPredictorWrapper as ParametricDqnPredictorWrapper, ) -except ImportError: +else: from reagent.prediction.predictor_wrapper import ParametricDqnPredictorWrapper @@ -51,8 +52,9 @@ def _test_parametric_dqn_net_builder( ) self.assertIsInstance(serving_module, ParametricDqnPredictorWrapper) - def test_fully_connected(self): + def test_fully_connected(self) -> None: # Intentionally used this long path to make sure we included it in __init__.py + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. chooser = ParametricDQNNetBuilder__Union( FullyConnected=parametric_dqn.fully_connected.FullyConnected() ) diff --git a/reagent/test/net_builder/test_synthetic_reward_net_builder.py b/reagent/test/net_builder/test_synthetic_reward_net_builder.py new file mode 100644 index 000000000..eeede4a0b --- /dev/null +++ b/reagent/test/net_builder/test_synthetic_reward_net_builder.py @@ -0,0 +1,299 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import numpy.testing as npt +import torch +from reagent.core import parameters as rlp, types as rlt +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.parameters import NormalizationData, NormalizationParameters +from reagent.net_builder.synthetic_reward.ngram_synthetic_reward import ( + NGramConvNetSyntheticReward, + NGramSyntheticReward, +) +from reagent.net_builder.synthetic_reward.sequence_synthetic_reward import ( + SequenceSyntheticReward, +) +from reagent.net_builder.synthetic_reward.single_step_synthetic_reward import ( + SingleStepSyntheticReward, +) +from reagent.net_builder.synthetic_reward.transformer_synthetic_reward import ( + TransformerSyntheticReward, +) +from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder +from reagent.net_builder.unions import SyntheticRewardNetBuilder__Union +from reagent.preprocessing.identify_types import CONTINUOUS +from reagent.preprocessing.preprocessor import Preprocessor + + +if IS_FB_ENVIRONMENT: + from reagent.fb.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import ( + FbSyntheticRewardPredictorWrapper as SyntheticRewardPredictorWrapper, + ) +else: + from reagent.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import ( + SyntheticRewardPredictorWrapper, + ) + +STATE_DIM = 3 +ACTION_DIM = 2 +BATCH_SIZE = 2 +SEQ_LEN = 4 + + +def _create_norm(dim, offset: int = 0): + normalization_data = NormalizationData( + dense_normalization_parameters={ + i: NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0) + for i in range(offset, dim + offset) + } + ) + return normalization_data + + +def _create_input(): + state = torch.randn(SEQ_LEN, BATCH_SIZE, STATE_DIM) + # generate valid_step with shape (BATCH_SIZE, 1), values ranging from [1, SEQ_LEN] (inclusive) + valid_step = torch.randint(1, SEQ_LEN + 1, size=(BATCH_SIZE, 1)) + # create one-hot action value + action_label = torch.LongTensor(SEQ_LEN * BATCH_SIZE, 1) % ACTION_DIM + action = torch.FloatTensor(SEQ_LEN * BATCH_SIZE, ACTION_DIM) + action.zero_() + action.scatter_(1, action_label, 1) + action = action.reshape(SEQ_LEN, BATCH_SIZE, ACTION_DIM) + + input = rlt.MemoryNetworkInput( + state=rlt.FeatureData(state), + action=rlt.FeatureData(action), + valid_step=valid_step, + # the rest fields will not be used + next_state=torch.tensor([]), + reward=torch.tensor([]), + step=torch.tensor([]), + not_terminal=torch.tensor([]), + time_diff=torch.tensor([]), + ) + return input + + +def _create_preprocessed_input( + input: rlt.MemoryNetworkInput, + state_preprocessor: Preprocessor, + action_preprocessor: Preprocessor, +): + preprocessed_state = state_preprocessor( + input.state.float_features.reshape(SEQ_LEN * BATCH_SIZE, STATE_DIM), + torch.ones(SEQ_LEN * BATCH_SIZE, STATE_DIM), + ).reshape(SEQ_LEN, BATCH_SIZE, STATE_DIM) + preprocessed_action = action_preprocessor( + input.action.float_features.reshape(SEQ_LEN * BATCH_SIZE, ACTION_DIM), + torch.ones(SEQ_LEN * BATCH_SIZE, ACTION_DIM), + ).reshape(SEQ_LEN, BATCH_SIZE, ACTION_DIM) + return rlt.MemoryNetworkInput( + state=rlt.FeatureData(preprocessed_state), + action=rlt.FeatureData(preprocessed_action), + valid_step=input.valid_step, + next_state=input.next_state, + reward=input.reward, + step=input.step, + not_terminal=input.not_terminal, + time_diff=input.time_diff, + ) + + +class TestSyntheticRewardNetBuilder(unittest.TestCase): + def test_single_step_synthetic_reward_net_builder_discrete_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `SingleStepSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + SingleStepSyntheticReward=SingleStepSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_discrete_actions(builder) + + def test_ngram_fc_synthetic_reward_net_builder_discrete_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `NGramSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + NGramSyntheticReward=NGramSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_discrete_actions(builder) + + def test_ngram_conv_net_synthetic_reward_net_builder_discrete_actions( + self, + ) -> None: + conv_net_params = rlp.ConvNetParameters( + conv_dims=[256, 128], + conv_height_kernels=[1, 1], + pool_types=["max", "max"], + pool_kernel_sizes=[1, 1], + ) + # pyre-fixme[28]: Unexpected keyword argument `NGramConvNetSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + NGramConvNetSyntheticReward=NGramConvNetSyntheticReward( + conv_net_params=conv_net_params + ) + ).value + self._test_synthetic_reward_net_builder_discrete_actions(builder) + + def test_lstm_synthetic_reward_net_builder_discrete_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `SequenceSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + SequenceSyntheticReward=SequenceSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_discrete_actions(builder) + + def test_transformer_synthetic_reward_net_builder_discrete_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `TransformerSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + TransformerSyntheticReward=TransformerSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_discrete_actions(builder) + + def _test_synthetic_reward_net_builder_discrete_actions( + self, builder: SyntheticRewardNetBuilder + ) -> None: + state_normalization_data = _create_norm(STATE_DIM) + discrete_action_names = ["1", "2"] + reward_net = builder.build_synthetic_reward_network( + state_normalization_data, discrete_action_names=discrete_action_names + ) + input = _create_input() + output = reward_net(input).predicted_reward + assert output.shape == (BATCH_SIZE, 1) + + # TO IMPLEMENT + # predictor_wrapper = builder.build_serving_module( + # reward_net, + # state_normalization_data, + # discrete_action_names=discrete_action_names, + # ) + # self.assertIsInstance( + # predictor_wrapper, DiscreteSingleStepSyntheticRewardPredictorWrapper + # ) + + def test_single_step_synthetic_reward_net_builder_continuous_actions(self) -> None: + # pyre-fixme[28]: Unexpected keyword argument `SingleStepSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + SingleStepSyntheticReward=SingleStepSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_continuous_actions(builder) + + def test_ngram_fc_synthetic_reward_net_builder_continuous_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `NGramSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + NGramSyntheticReward=NGramSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_continuous_actions(builder) + + def test_ngram_conv_net_synthetic_reward_net_builder_continuous_actions( + self, + ) -> None: + conv_net_params = rlp.ConvNetParameters( + conv_dims=[256, 128], + conv_height_kernels=[1, 1], + pool_types=["max", "max"], + pool_kernel_sizes=[1, 1], + ) + # pyre-fixme[28]: Unexpected keyword argument `NGramConvNetSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + NGramConvNetSyntheticReward=NGramConvNetSyntheticReward( + conv_net_params=conv_net_params + ) + ).value + self._test_synthetic_reward_net_builder_continuous_actions(builder) + + def test_lstm_synthetic_reward_net_builder_continuous_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `SequenceSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + SequenceSyntheticReward=SequenceSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_continuous_actions(builder) + + def test_transformer_synthetic_reward_net_builder_continuous_actions( + self, + ) -> None: + # pyre-fixme[28]: Unexpected keyword argument `TransformerSyntheticReward`. + builder = SyntheticRewardNetBuilder__Union( + TransformerSyntheticReward=TransformerSyntheticReward() + ).value + self._test_synthetic_reward_net_builder_continuous_actions(builder) + + @torch.no_grad() + def _test_synthetic_reward_net_builder_continuous_actions( + self, builder: SyntheticRewardNetBuilder + ) -> None: + """ + This test does the following steps: + 1. create a net builder + 2. use the net builder to create a synthetic reward network + 3. export the synthetic reward network + 4. use the exported network to create a predictor wrapper + 5. create raw input and preprocessed inputs + 6. compare if the results between the following matches: + a. synthetic reward network on preprocessed input + b. export network on preprocessed input + c. predictor wrapper on raw input + """ + state_normalization_data = _create_norm(STATE_DIM) + action_normalization_data = _create_norm(ACTION_DIM, offset=STATE_DIM) + state_preprocessor = Preprocessor( + state_normalization_data.dense_normalization_parameters + ) + action_preprocessor = Preprocessor( + action_normalization_data.dense_normalization_parameters + ) + reward_net = builder.build_synthetic_reward_network( + state_normalization_data, + action_normalization_data=action_normalization_data, + ).eval() + input = _create_input() + preprocessed_input = _create_preprocessed_input( + input, state_preprocessor, action_preprocessor + ) + output = reward_net(preprocessed_input).predicted_reward + assert output.shape == (BATCH_SIZE, 1) + + # pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a function. + export_net = reward_net.export_mlp().cpu().eval() + export_output = export_net( + preprocessed_input.state.float_features, + preprocessed_input.action.float_features, + ) + predictor_wrapper = builder.build_serving_module( + SEQ_LEN, + reward_net, + state_normalization_data, + action_normalization_data=action_normalization_data, + ) + self.assertIsInstance(predictor_wrapper, SyntheticRewardPredictorWrapper) + for i in range(BATCH_SIZE): + input_to_predictor = torch.cat( + ( + input.state.float_features[:, i, :], + input.action.float_features[:, i, :], + ), + dim=1, + ) + input_to_predictor_presence = torch.ones(SEQ_LEN, STATE_DIM + ACTION_DIM) + predictor_output = predictor_wrapper( + (input_to_predictor, input_to_predictor_presence) + ) + if IS_FB_ENVIRONMENT: + predictor_output = predictor_output[1][2] + npt.assert_array_almost_equal(predictor_output, export_output[i], decimal=4) + npt.assert_almost_equal( + torch.sum(predictor_output[-input.valid_step[i] :]), + output[i], + decimal=4, + ) diff --git a/reagent/test/net_builder/test_value_net_builder.py b/reagent/test/net_builder/test_value_net_builder.py index b48a7a493..9359d2239 100644 --- a/reagent/test/net_builder/test_value_net_builder.py +++ b/reagent/test/net_builder/test_value_net_builder.py @@ -4,14 +4,16 @@ import unittest import torch +from reagent.core.parameters import NormalizationData, NormalizationParameters +from reagent.core.types import FeatureData from reagent.net_builder import value from reagent.net_builder.unions import ValueNetBuilder__Union -from reagent.parameters import NormalizationData, NormalizationParameters from reagent.preprocessing.identify_types import CONTINUOUS class TestValueNetBuilder(unittest.TestCase): - def test_fully_connected(self): + def test_fully_connected(self) -> None: + # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. chooser = ValueNetBuilder__Union( FullyConnected=value.fully_connected.FullyConnected() ) @@ -25,6 +27,6 @@ def test_fully_connected(self): ) value_network = builder.build_value_network(normalization_data) batch_size = 5 - x = torch.randn(batch_size, state_dim) + x = FeatureData(float_features=torch.randn(batch_size, state_dim)) y = value_network(x) self.assertEqual(y.shape, (batch_size, 1)) diff --git a/reagent/test/optimizer/__init__.py b/reagent/test/optimizer/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/optimizer/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/optimizer/test_make_optimizer.py b/reagent/test/optimizer/test_make_optimizer.py new file mode 100644 index 000000000..ea478ca3a --- /dev/null +++ b/reagent/test/optimizer/test_make_optimizer.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import torch +from reagent.optimizer.uninferrable_optimizers import Adam +from reagent.optimizer.uninferrable_schedulers import ( + CosineAnnealingLR, + CosineAnnealingWarmRestarts, + ExponentialLR, + MultiStepLR, + OneCycleLR, + StepLR, +) +from reagent.optimizer.utils import is_torch_lr_scheduler, is_torch_optimizer + + +class TestMakeOptimizer(unittest.TestCase): + def setUp(self): + self.model = torch.nn.Linear(3, 4) + + def _verify_optimizer(self, optimizer_scheduler_pair): + self.assertTrue(is_torch_optimizer(type(optimizer_scheduler_pair["optimizer"]))) + self.assertTrue( + is_torch_lr_scheduler(type(optimizer_scheduler_pair["lr_scheduler"])) + ) + + def test_make_optimizer_with_step_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, lr_schedulers=[StepLR(gamma=0.1, step_size=0.01)] + ).make_optimizer_scheduler(self.model.parameters()) + ) + + def test_make_optimizer_with_multistep_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, + lr_schedulers=[MultiStepLR(gamma=0.2, milestones=[1000, 2000])], + ).make_optimizer_scheduler(self.model.parameters()) + ) + + def test_make_optimizer_with_exponential_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, lr_schedulers=[ExponentialLR(gamma=0.9)] + ).make_optimizer_scheduler(self.model.parameters()) + ) + + def test_make_optimizer_with_cosine_annealing_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, lr_schedulers=[CosineAnnealingLR(T_max=1)] + ).make_optimizer_scheduler(self.model.parameters()) + ) + + def test_make_optimizer_with_one_cycle_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, + lr_schedulers=[ + OneCycleLR(max_lr=0.1, base_momentum=0.8, total_steps=1000) + ], + ).make_optimizer_scheduler(self.model.parameters()) + ) + + def test_make_optimizer_with_cosine_annealing_warm_restarts_lr_scheduler(self): + self._verify_optimizer( + Adam( + lr=0.001, lr_schedulers=[CosineAnnealingWarmRestarts(T_0=1)] + ).make_optimizer_scheduler(self.model.parameters()) + ) diff --git a/reagent/test/prediction/test_model_with_preprocessor.py b/reagent/test/prediction/test_model_with_preprocessor.py new file mode 100644 index 000000000..ecbaeaef3 --- /dev/null +++ b/reagent/test/prediction/test_model_with_preprocessor.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import numpy.testing as npt +import torch +from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.prediction.predictor_wrapper import Seq2SlateWithPreprocessor +from reagent.preprocessing.preprocessor import Preprocessor +from reagent.test.prediction.test_prediction_utils import ( + _cont_norm, + change_cand_size_slate_ranking, +) + + +class TestModelWithPreprocessor(unittest.TestCase): + def verify_results(self, expected_output, scripted_output) -> None: + for i, j in zip(expected_output, scripted_output): + npt.assert_array_equal(i.detach(), j.detach()) + + def test_seq2slate_transformer_frechet_sort_model_with_preprocessor(self) -> None: + self._test_seq2slate_model_with_preprocessor( + model="transformer", output_arch=Seq2SlateOutputArch.FRECHET_SORT + ) + + def test_seq2slate_transformer_autoregressive_model_with_preprocessor(self) -> None: + self._test_seq2slate_model_with_preprocessor( + model="transformer", output_arch=Seq2SlateOutputArch.AUTOREGRESSIVE + ) + + def _test_seq2slate_model_with_preprocessor( + self, model: str, output_arch: Seq2SlateOutputArch + ) -> None: + state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} + candidate_normalization_parameters = {i: _cont_norm() for i in range(101, 106)} + state_preprocessor = Preprocessor(state_normalization_parameters, False) + candidate_preprocessor = Preprocessor(candidate_normalization_parameters, False) + candidate_size = 10 + slate_size = 4 + + seq2slate = None + if model == "transformer": + seq2slate = Seq2SlateTransformerNet( + state_dim=len(state_normalization_parameters), + candidate_dim=len(candidate_normalization_parameters), + num_stacked_layers=2, + num_heads=2, + dim_model=10, + dim_feedforward=10, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + output_arch=output_arch, + temperature=0.5, + ) + else: + raise NotImplementedError(f"model type {model} is unknown") + + seq2slate_with_preprocessor = Seq2SlateWithPreprocessor( + seq2slate, state_preprocessor, candidate_preprocessor, greedy=True + ) + input_prototype = seq2slate_with_preprocessor.input_prototype() + + if seq2slate_with_preprocessor.can_be_traced(): + seq2slate_with_preprocessor_jit = torch.jit.trace( + seq2slate_with_preprocessor, + seq2slate_with_preprocessor.input_prototype(), + ) + else: + seq2slate_with_preprocessor_jit = torch.jit.script( + seq2slate_with_preprocessor + ) + expected_output = seq2slate_with_preprocessor(*input_prototype) + jit_output = seq2slate_with_preprocessor_jit(*input_prototype) + self.verify_results(expected_output, jit_output) + + # Test if scripted model can handle variable lengths of input + input_prototype = change_cand_size_slate_ranking(input_prototype, 20) + expected_output = seq2slate_with_preprocessor(*input_prototype) + jit_output = seq2slate_with_preprocessor_jit(*input_prototype) + self.verify_results(expected_output, jit_output) diff --git a/reagent/test/prediction/test_prediction_utils.py b/reagent/test/prediction/test_prediction_utils.py new file mode 100644 index 000000000..01234738d --- /dev/null +++ b/reagent/test/prediction/test_prediction_utils.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import torch +from reagent.preprocessing.identify_types import CONTINUOUS, CONTINUOUS_ACTION +from reagent.preprocessing.normalization import NormalizationParameters + + +def _cont_norm(): + return NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0) + + +def _cont_action_norm(): + return NormalizationParameters( + feature_type=CONTINUOUS_ACTION, min_value=-3.0, max_value=3.0 + ) + + +def change_cand_size_slate_ranking(input_prototype, candidate_size_override): + state_prototype, candidate_prototype = input_prototype + candidate_prototype = ( + candidate_prototype[0][:, :1, :].repeat(1, candidate_size_override, 1), + candidate_prototype[1][:, :1, :].repeat(1, candidate_size_override, 1), + ) + return ( + (torch.randn_like(state_prototype[0]), torch.ones_like(state_prototype[1])), + ( + torch.randn_like(candidate_prototype[0]), + torch.ones_like(candidate_prototype[1]), + ), + ) diff --git a/reagent/test/prediction/test_predictor_wrapper.py b/reagent/test/prediction/test_predictor_wrapper.py index 2e58968b4..024364bd2 100644 --- a/reagent/test/prediction/test_predictor_wrapper.py +++ b/reagent/test/prediction/test_predictor_wrapper.py @@ -1,43 +1,64 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import random import unittest +import numpy.testing as npt +import reagent.core.types as rlt import reagent.models as models -import reagent.types as rlt import torch -from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet +from reagent.model_utils.seq2slate_utils import Seq2SlateMode, Seq2SlateOutputArch +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.prediction.cfeval.predictor_wrapper import BanditRewardNetPredictorWrapper from reagent.prediction.predictor_wrapper import ( ActorPredictorWrapper, ActorWithPreprocessor, DiscreteDqnPredictorWrapper, - DiscreteDqnPredictorWrapperWithIdList, DiscreteDqnWithPreprocessor, - DiscreteDqnWithPreprocessorWithIdList, + FAKE_STATE_FEATURE_ID, ParametricDqnPredictorWrapper, ParametricDqnWithPreprocessor, Seq2SlatePredictorWrapper, Seq2SlateWithPreprocessor, ) -from reagent.preprocessing.identify_types import CONTINUOUS, CONTINUOUS_ACTION -from reagent.preprocessing.normalization import NormalizationParameters +from reagent.prediction.ranking.predictor_wrapper import ( + DeterminantalPointProcessPredictorWrapper, + Kernel, +) from reagent.preprocessing.postprocessor import Postprocessor from reagent.preprocessing.preprocessor import Preprocessor +from reagent.test.prediction.test_prediction_utils import ( + _cont_action_norm, + _cont_norm, + change_cand_size_slate_ranking, +) -def _cont_norm(): - return NormalizationParameters(feature_type=CONTINUOUS, mean=0.0, stddev=1.0) - - -def _cont_action_norm(): - return NormalizationParameters( - feature_type=CONTINUOUS_ACTION, min_value=-3.0, max_value=3.0 +def seq2slate_input_prototype_to_ranking_input( + state_input_prototype, + candidate_input_prototype, + state_preprocessor, + candidate_preprocessor, +): + batch_size, candidate_size, candidate_dim = candidate_input_prototype[0].shape + preprocessed_state = state_preprocessor( + state_input_prototype[0], state_input_prototype[1] + ) + preprocessed_candidates = candidate_preprocessor( + candidate_input_prototype[0].view(batch_size * candidate_size, candidate_dim), + candidate_input_prototype[1].view(batch_size * candidate_size, candidate_dim), + ).view(batch_size, candidate_size, -1) + return rlt.PreprocessedRankingInput.from_tensors( + state=preprocessed_state, + src_seq=preprocessed_candidates, ) class TestPredictorWrapper(unittest.TestCase): - def test_discrete_wrapper(self): - state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} + def test_discrete_wrapper(self) -> None: + ids = range(1, 5) + state_normalization_parameters = {i: _cont_norm() for i in ids} state_preprocessor = Preprocessor(state_normalization_parameters, False) action_dim = 2 dqn = models.FullyConnectedDQN( @@ -46,43 +67,28 @@ def test_discrete_wrapper(self): sizes=[16], activations=["relu"], ) - dqn_with_preprocessor = DiscreteDqnWithPreprocessor(dqn, state_preprocessor) - action_names = ["L", "R"] - wrapper = DiscreteDqnPredictorWrapper(dqn_with_preprocessor, action_names) - input_prototype = dqn_with_preprocessor.input_prototype() - output_action_names, q_values = wrapper(*input_prototype) - self.assertEqual(action_names, output_action_names) - self.assertEqual(q_values.shape, (1, 2)) - - expected_output = dqn(rlt.FeatureData(state_preprocessor(*input_prototype[0]))) - self.assertTrue((expected_output == q_values).all()) - - def test_discrete_wrapper_with_id_list_none(self): - state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} - state_preprocessor = Preprocessor(state_normalization_parameters, False) - action_dim = 2 - dqn = models.FullyConnectedDQN( - state_dim=len(state_normalization_parameters), - action_dim=action_dim, - sizes=[16], - activations=["relu"], + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(feature_id=i, name=f"feat_{i}") for i in ids + ] ) - dqn_with_preprocessor = DiscreteDqnWithPreprocessorWithIdList( - dqn, state_preprocessor + dqn_with_preprocessor = DiscreteDqnWithPreprocessor( + dqn, state_preprocessor, state_feature_config ) action_names = ["L", "R"] - wrapper = DiscreteDqnPredictorWrapperWithIdList( - dqn_with_preprocessor, action_names + wrapper = DiscreteDqnPredictorWrapper( + dqn_with_preprocessor, action_names, state_feature_config ) - input_prototype = dqn_with_preprocessor.input_prototype() - output_action_names, q_values = wrapper(*input_prototype) + input_prototype = dqn_with_preprocessor.input_prototype()[0] + output_action_names, q_values = wrapper(input_prototype) self.assertEqual(action_names, output_action_names) self.assertEqual(q_values.shape, (1, 2)) - expected_output = dqn(rlt.FeatureData(state_preprocessor(*input_prototype[0]))) + state_with_presence = input_prototype.float_features_with_presence + expected_output = dqn(rlt.FeatureData(state_preprocessor(*state_with_presence))) self.assertTrue((expected_output == q_values).all()) - def test_discrete_wrapper_with_id_list(self): + def test_discrete_wrapper_with_id_list(self) -> None: state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} state_preprocessor = Preprocessor(state_normalization_parameters, False) action_dim = 2 @@ -92,15 +98,20 @@ def test_discrete_wrapper_with_id_list(self): ], id_list_feature_configs=[ rlt.IdListFeatureConfig( - name="A", feature_id=10, id_mapping_name="A_mapping" + name="id_list_feature_A", + feature_id=FAKE_STATE_FEATURE_ID, + id_mapping_name="Table_A", ) ], - id_mapping_config={"A_mapping": rlt.IdMapping(ids=[0, 1, 2])}, + id_mapping_config={ + "Table_A": rlt.IdMappingConfig( + embedding_table_size=100, embedding_dim=32, hashing=False + ) + }, ) embedding_concat = models.EmbeddingBagConcat( - state_dim=len(state_normalization_parameters), + state_dense_dim=len(state_normalization_parameters), model_feature_config=state_feature_config, - embedding_dim=8, ) dqn = models.Sequential( embedding_concat, @@ -113,15 +124,15 @@ def test_discrete_wrapper_with_id_list(self): ), ) - dqn_with_preprocessor = DiscreteDqnWithPreprocessorWithIdList( + dqn_with_preprocessor = DiscreteDqnWithPreprocessor( dqn, state_preprocessor, state_feature_config ) action_names = ["L", "R"] - wrapper = DiscreteDqnPredictorWrapperWithIdList( + wrapper = DiscreteDqnPredictorWrapper( dqn_with_preprocessor, action_names, state_feature_config ) - input_prototype = dqn_with_preprocessor.input_prototype() - output_action_names, q_values = wrapper(*input_prototype) + input_prototype = dqn_with_preprocessor.input_prototype()[0] + output_action_names, q_values = wrapper(input_prototype) self.assertEqual(action_names, output_action_names) self.assertEqual(q_values.shape, (1, 2)) @@ -130,17 +141,19 @@ def test_discrete_wrapper_with_id_list(self): for config in state_feature_config.id_list_feature_configs } state_id_list_features = { - feature_id_to_name[k]: v for k, v in input_prototype[1].items() + feature_id_to_name[k]: v + for k, v in input_prototype.id_list_features.items() } + state_with_presence = input_prototype.float_features_with_presence expected_output = dqn( rlt.FeatureData( - float_features=state_preprocessor(*input_prototype[0]), - id_list_features=state_id_list_features, + float_features=state_preprocessor(*state_with_presence), + id_list_features_raw=state_id_list_features, ) ) self.assertTrue((expected_output == q_values).all()) - def test_parametric_wrapper(self): + def test_parametric_wrapper(self) -> None: state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} action_normalization_parameters = {i: _cont_norm() for i in range(5, 9)} state_preprocessor = Preprocessor(state_normalization_parameters, False) @@ -169,7 +182,7 @@ def test_parametric_wrapper(self): ) self.assertTrue((expected_output == q_value).all()) - def test_actor_wrapper(self): + def test_actor_wrapper(self) -> None: state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} action_normalization_parameters = { i: _cont_action_norm() for i in range(101, 105) @@ -184,12 +197,13 @@ def test_actor_wrapper(self): sizes=[16], activations=["relu"], ) + state_feature_config = rlt.ModelFeatureConfig() actor_with_preprocessor = ActorWithPreprocessor( - actor, state_preprocessor, postprocessor + actor, state_preprocessor, state_feature_config, postprocessor ) - wrapper = ActorPredictorWrapper(actor_with_preprocessor) - input_prototype = actor_with_preprocessor.input_prototype() - action = wrapper(*input_prototype) + wrapper = ActorPredictorWrapper(actor_with_preprocessor, state_feature_config) + input_prototype = actor_with_preprocessor.input_prototype()[0] + action, _log_prob = wrapper(input_prototype) self.assertEqual(action.shape, (1, len(action_normalization_parameters))) expected_output = postprocessor( @@ -197,22 +211,54 @@ def test_actor_wrapper(self): ) self.assertTrue((expected_output == action).all()) - def test_seq2slate_wrapper(self): + def validate_seq2slate_output(self, expected_output, wrapper_output) -> None: + ranked_per_seq_probs, ranked_tgt_out_idx = ( + expected_output.ranked_per_seq_probs, + expected_output.ranked_tgt_out_idx, + ) + # -2 to offset padding symbol and decoder start symbol + ranked_tgt_out_idx -= 2 + + self.assertTrue(ranked_per_seq_probs == wrapper_output[0]) + self.assertTrue(torch.all(torch.eq(ranked_tgt_out_idx, wrapper_output[1]))) + + def test_seq2slate_transformer_frechet_sort_wrapper(self) -> None: + self._test_seq2slate_wrapper( + model="transformer", output_arch=Seq2SlateOutputArch.FRECHET_SORT + ) + + def test_seq2slate_transformer_autoregressive_wrapper(self) -> None: + self._test_seq2slate_wrapper( + model="transformer", output_arch=Seq2SlateOutputArch.AUTOREGRESSIVE + ) + + def _test_seq2slate_wrapper( + self, model: str, output_arch: Seq2SlateOutputArch + ) -> None: state_normalization_parameters = {i: _cont_norm() for i in range(1, 5)} candidate_normalization_parameters = {i: _cont_norm() for i in range(101, 106)} state_preprocessor = Preprocessor(state_normalization_parameters, False) candidate_preprocessor = Preprocessor(candidate_normalization_parameters, False) - seq2slate = Seq2SlateTransformerNet( - state_dim=len(state_normalization_parameters), - candidate_dim=len(candidate_normalization_parameters), - num_stacked_layers=2, - num_heads=2, - dim_model=10, - dim_feedforward=10, - max_src_seq_len=10, - max_tgt_seq_len=4, - encoder_only=False, - ) + candidate_size = 10 + slate_size = 4 + + seq2slate = None + if model == "transformer": + seq2slate = Seq2SlateTransformerNet( + state_dim=len(state_normalization_parameters), + candidate_dim=len(candidate_normalization_parameters), + num_stacked_layers=2, + num_heads=2, + dim_model=10, + dim_feedforward=10, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + output_arch=output_arch, + temperature=0.5, + ) + else: + raise NotImplementedError(f"model type {model} is unknown") + seq2slate_with_preprocessor = Seq2SlateWithPreprocessor( seq2slate, state_preprocessor, candidate_preprocessor, greedy=True ) @@ -222,45 +268,174 @@ def test_seq2slate_wrapper(self): state_input_prototype, candidate_input_prototype, ) = seq2slate_with_preprocessor.input_prototype() - ret_val = wrapper(state_input_prototype, candidate_input_prototype) + wrapper_output = wrapper(state_input_prototype, candidate_input_prototype) - preprocessed_state = state_preprocessor( - state_input_prototype[0], state_input_prototype[1] + ranking_input = seq2slate_input_prototype_to_ranking_input( + state_input_prototype, + candidate_input_prototype, + state_preprocessor, + candidate_preprocessor, ) - preprocessed_candidates = candidate_preprocessor( - candidate_input_prototype[0].view( - 1 * seq2slate.max_src_seq_len, len(candidate_normalization_parameters) - ), - candidate_input_prototype[1].view( - 1 * seq2slate.max_src_seq_len, len(candidate_normalization_parameters) - ), - ).view(1, seq2slate.max_src_seq_len, -1) - src_src_mask = torch.ones( - 1, seq2slate.max_src_seq_len, seq2slate.max_src_seq_len + expected_output = seq2slate( + ranking_input, + mode=Seq2SlateMode.RANK_MODE, + tgt_seq_len=candidate_size, + greedy=True, + ) + self.validate_seq2slate_output(expected_output, wrapper_output) + + # Test Seq2SlatePredictorWrapper can handle variable lengths of inputs + random_length = random.randint(candidate_size + 1, candidate_size * 2) + ( + state_input_prototype, + candidate_input_prototype, + ) = change_cand_size_slate_ranking( + seq2slate_with_preprocessor.input_prototype(), random_length ) - ranking_input = rlt.PreprocessedRankingInput.from_tensors( - state=preprocessed_state, - src_seq=preprocessed_candidates, - src_src_mask=src_src_mask, + wrapper_output = wrapper(state_input_prototype, candidate_input_prototype) + + ranking_input = seq2slate_input_prototype_to_ranking_input( + state_input_prototype, + candidate_input_prototype, + state_preprocessor, + candidate_preprocessor, ) expected_output = seq2slate( ranking_input, mode=Seq2SlateMode.RANK_MODE, - tgt_seq_len=seq2slate.max_tgt_seq_len, + tgt_seq_len=random_length, greedy=True, ) - ranked_tgt_out_probs, ranked_tgt_out_idx = ( - expected_output.ranked_tgt_out_probs, - expected_output.ranked_tgt_out_idx, + self.validate_seq2slate_output(expected_output, wrapper_output) + + def test_determinantal_point_process_wrapper_linear_kernel(self) -> None: + # The second and third items are identical (similarity=1) + # So the second and third items have strong repulsion + # The expected ranked indices should be 2, 0, 1 + quality_scores = torch.tensor( + [ + [4], + [5], + [8], + ] ) - ranked_tgt_out_probs = torch.prod( - torch.gather( - ranked_tgt_out_probs, 2, ranked_tgt_out_idx.unsqueeze(-1) - ).squeeze(), - -1, + + feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]]) + + wrapper = DeterminantalPointProcessPredictorWrapper( + alpha=1.0, kernel=Kernel.Linear + ) + ranked_idx, determinants, L = wrapper(quality_scores, feature_vectors) + npt.assert_array_almost_equal(ranked_idx, [2, 0, 1]) + npt.assert_array_almost_equal( + determinants, + torch.tensor( + [ + [16, 25, 64], + [1024, 0, wrapper.MIN_VALUE], + [wrapper.MIN_VALUE, 0, wrapper.MIN_VALUE], + ] + ), + ) + npt.assert_array_almost_equal(L, [[16, 0, 0], [0, 25, 40], [0, 40, 64]]) + + # Test shorter rerank positions + # All three items have different categories, so the final order is 1, 2, 0 if + # rerank the full slate. If rerank_topk=1, then the expected order is 1, 0, 2 + quality_scores = torch.tensor( + [ + [4], + [6], + [5], + ] + ) + feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]) + wrapper = DeterminantalPointProcessPredictorWrapper( + alpha=1.0, kernel=Kernel.Linear, rerank_topk=1 + ) + ranked_idx, _, _ = wrapper(quality_scores, feature_vectors) + npt.assert_array_almost_equal(ranked_idx, [1, 0, 2]) + + def test_determinantal_point_process_wrapper_rbf_kernel(self) -> None: + # The second and third items are identical (similarity=1) + # So the second and third items have strong repulsion + # The expected ranked indices should be 2, 0, 1 + quality_scores = torch.tensor( + [ + [4], + [5], + [8], + ] ) - # -2 to offset padding symbol and decoder start symbol - ranked_tgt_out_idx -= 2 - self.assertTrue(ranked_tgt_out_probs == ret_val[0]) - self.assertTrue(torch.all(torch.eq(ret_val[1], ranked_tgt_out_idx))) + feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1]]) + + wrapper = DeterminantalPointProcessPredictorWrapper( + alpha=1.0, kernel=Kernel.RBF + ) + ranked_idx, determinants, L = wrapper(quality_scores, feature_vectors) + npt.assert_array_almost_equal(ranked_idx, [2, 0, 1]) + npt.assert_array_almost_equal( + determinants, + torch.tensor( + [ + [16, 25, 64], + [885.41766159, 0, wrapper.MIN_VALUE], + [wrapper.MIN_VALUE, 0, wrapper.MIN_VALUE], + ] + ), + decimal=3, + ) + npt.assert_array_almost_equal( + L, [[16, 7.3576, 11.7721], [7.3576, 25, 40], [11.7721, 40, 64]], decimal=3 + ) + + # Test shorter rerank positions + # All three items have different categories, so the final order is 1, 2, 0 if + # rerank the full slate. If rerank_topk=1, then the expected order is 1, 0, 2 + quality_scores = torch.tensor( + [ + [4], + [6], + [5], + ] + ) + feature_vectors = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]) + wrapper = DeterminantalPointProcessPredictorWrapper( + alpha=1.0, kernel=Kernel.RBF, rerank_topk=1 + ) + ranked_idx, _, _ = wrapper(quality_scores, feature_vectors) + npt.assert_array_almost_equal(ranked_idx, [1, 0, 2]) + + def test_reward_model_wrapper(self) -> None: + ids = range(1, 5) + state_normalization_parameters = {i: _cont_norm() for i in ids} + state_preprocessor = Preprocessor(state_normalization_parameters, False) + action_dim = 2 + model = models.FullyConnectedDQN( + state_dim=len(state_normalization_parameters), + action_dim=action_dim, + sizes=[16], + activations=["relu"], + ) + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[ + rlt.FloatFeatureInfo(feature_id=i, name=f"feat_{i}") for i in ids + ] + ) + model_with_preprocessor = DiscreteDqnWithPreprocessor( + model, state_preprocessor, state_feature_config + ) + action_names = ["L", "R"] + wrapper = BanditRewardNetPredictorWrapper( + model_with_preprocessor, action_names, state_feature_config + ) + input_prototype = model_with_preprocessor.input_prototype()[0] + reward_predictions, mask = wrapper(input_prototype) + self.assertEqual(reward_predictions.shape, (1, 2)) + + state_with_presence = input_prototype.float_features_with_presence + expected_output = model( + rlt.FeatureData(state_preprocessor(*state_with_presence)) + ) + self.assertTrue((expected_output == reward_predictions).all()) diff --git a/reagent/test/preprocessing/preprocessing_util.py b/reagent/test/preprocessing/preprocessing_util.py index 98c842805..751371b78 100644 --- a/reagent/test/preprocessing/preprocessing_util.py +++ b/reagent/test/preprocessing/preprocessing_util.py @@ -17,7 +17,7 @@ CONTINUOUS_ACTION_FEATURE_ID_2 = 10 -def id_to_type(id): +def id_to_type(id) -> str: if id == BINARY_FEATURE_ID or id == BINARY_FEATURE_ID_2: return "BINARY" if id == BOXCOX_FEATURE_ID: diff --git a/reagent/test/preprocessing/test_postprocessing.py b/reagent/test/preprocessing/test_postprocessing.py index b853993ad..eb44d696d 100644 --- a/reagent/test/preprocessing/test_postprocessing.py +++ b/reagent/test/preprocessing/test_postprocessing.py @@ -6,16 +6,13 @@ import numpy.testing as npt import torch from reagent.preprocessing.identify_types import CONTINUOUS_ACTION, DO_NOT_PREPROCESS -from reagent.preprocessing.normalization import ( - NormalizationData, - NormalizationParameters, -) +from reagent.preprocessing.normalization import NormalizationParameters from reagent.preprocessing.postprocessor import Postprocessor from reagent.preprocessing.preprocessor import Preprocessor class TestPostprocessing(unittest.TestCase): - def test_continuous_action(self): + def test_continuous_action(self) -> None: normalization_params = { i: NormalizationParameters( feature_type=CONTINUOUS_ACTION, min_value=-5.0 * i, max_value=10.0 * i @@ -32,7 +29,7 @@ def test_continuous_action(self): y = postprocessor(preprocessor(x, presence)) npt.assert_allclose(x, y, rtol=1e-4) - def test_do_not_preprocess(self): + def test_do_not_preprocess(self) -> None: normalization_params = { i: NormalizationParameters(feature_type=DO_NOT_PREPROCESS) for i in range(1, 5) diff --git a/reagent/test/preprocessing/test_preprocessing.py b/reagent/test/preprocessing/test_preprocessing.py index 4b80e0671..36829b6e4 100644 --- a/reagent/test/preprocessing/test_preprocessing.py +++ b/reagent/test/preprocessing/test_preprocessing.py @@ -7,7 +7,7 @@ import numpy.testing as npt import six import torch -from reagent.preprocessing import identify_types, normalization +from reagent.preprocessing import identify_types, normalization, transforms from reagent.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM from reagent.preprocessing.normalization import ( MISSING_VALUE, @@ -18,11 +18,10 @@ from reagent.test.base.utils import NumpyFeatureProcessor from reagent.test.preprocessing.preprocessing_util import ( BOXCOX_FEATURE_ID, - CONTINUOUS_ACTION_FEATURE_ID, - CONTINUOUS_ACTION_FEATURE_ID_2, + CONTINUOUS_FEATURE_ID, ENUM_FEATURE_ID, - PROBABILITY_FEATURE_ID, id_to_type, + PROBABILITY_FEATURE_ID, read_data, ) from scipy import special @@ -37,7 +36,7 @@ def _feature_type_override(self, feature_id): return identify_types.CONTINUOUS_ACTION return None - def test_prepare_normalization_and_normalize(self): + def test_prepare_normalization_and_normalize(self) -> None: feature_value_map = read_data() normalization_parameters = {} @@ -151,7 +150,7 @@ def test_prepare_normalization_and_normalize(self): else: raise NotImplementedError() - def test_normalize_dense_matrix_enum(self): + def test_normalize_dense_matrix_enum(self) -> None: normalization_parameters = { 1: NormalizationParameters( identify_types.ENUM, @@ -193,7 +192,7 @@ def test_normalize_dense_matrix_enum(self): normalized_feature_matrix, ) - def test_persistency(self): + def test_persistency(self) -> None: feature_value_map = read_data() normalization_parameters = {} for name, values in feature_value_map.items(): @@ -236,7 +235,7 @@ def test_persistency(self): getattr(normalization_parameters[k], field), ) - def test_quantile_boundary_logic(self): + def test_quantile_boundary_logic(self) -> None: """Test quantile logic when feaure value == quantile boundary.""" input = torch.tensor([[0.0], [80.0], [100.0]]) norm_params = NormalizationParameters( @@ -257,7 +256,7 @@ def test_quantile_boundary_logic(self): self.assertTrue(np.all(np.isclose(output, expected_output))) - def test_preprocessing_network(self): + def test_preprocessing_network(self) -> None: feature_value_map = read_data() normalization_parameters = {} @@ -321,7 +320,7 @@ def test_preprocessing_network(self): ), ) - def test_type_override(self): + def test_type_override_binary(self) -> None: # Take a feature that should be identified as probability feature_value_map = read_data() probability_values = feature_value_map[PROBABILITY_FEATURE_ID] @@ -331,3 +330,79 @@ def test_type_override(self): "_", probability_values, feature_type=identify_types.BINARY ) self.assertEqual(parameter.feature_type, "BINARY") + + def test_type_override_continuous(self) -> None: + # Take a feature that should be identified as BOXCOX + feature_value_map = read_data() + probability_values = feature_value_map[BOXCOX_FEATURE_ID] + + # And ask for a CONTINUOUS anyways + parameter = normalization.identify_parameter( + "_", probability_values, feature_type=identify_types.CONTINUOUS + ) + self.assertEqual(parameter.feature_type, "CONTINUOUS") + + def test_type_override_boxcox(self) -> None: + # Take a feature that should be identified as CONTINUOUS + feature_value_map = read_data() + probability_values = feature_value_map[CONTINUOUS_FEATURE_ID] + + # And ask for a BOXCOX anyways + parameter = normalization.identify_parameter( + "_", probability_values, feature_type=identify_types.BOXCOX + ) + self.assertEqual(parameter.feature_type, "BOXCOX") + + def test_type_override_quantile(self) -> None: + # Take a feature that should be identified as CONTINUOUS + feature_value_map = read_data() + probability_values = feature_value_map[BOXCOX_FEATURE_ID] + + # And ask for a QUANTILE anyways + parameter = normalization.identify_parameter( + "_", probability_values, feature_type=identify_types.QUANTILE + ) + self.assertEqual(parameter.feature_type, "QUANTILE") + + def test_columnvector(self) -> None: + def format_input2output(test_keys, inp_form): + test_data = {} + for ky in test_keys: + test_data[ky] = inp_form + test_instance = transforms.ColumnVector(test_keys) + output_data = test_instance(test_data) + return output_data + + test_values = range(0, 5) + test_keys = [] + for k in test_values: + test_keys.append(str(k)) + + # Possible input formats: tuple, list, torch.Tensor + for n_len in [1, 3]: + test_input_forms = [ + (np.ones((n_len, 1)), 0), + n_len * [1], + torch.tensor(np.ones((n_len, 1))), + ] + for inp_form in test_input_forms: + output_data = format_input2output(test_keys, inp_form) + for ky in test_keys: + self.assertEqual(output_data[ky].shape[0], n_len) + self.assertEqual(output_data[ky].shape[1], 1) + + # Input as in row format + test_data = {} + for ky in test_keys: + test_data[ky] = (np.ones((1, 3)), 0) + test_instance = transforms.ColumnVector(test_keys) + with self.assertRaisesRegex(AssertionError, "Invalid shape for key"): + output_data = test_instance(test_data) + + # Input as unimplemented type (number) + test_data = {} + for ky in test_keys: + test_data[ky] = 1 + test_instance = transforms.ColumnVector(test_keys) + with self.assertRaisesRegex(NotImplementedError, "value of type"): + output_data = test_instance(test_data) diff --git a/reagent/test/preprocessing/test_sparse_to_dense.py b/reagent/test/preprocessing/test_sparse_to_dense.py index 8bd1ce873..702e090dc 100644 --- a/reagent/test/preprocessing/test_sparse_to_dense.py +++ b/reagent/test/preprocessing/test_sparse_to_dense.py @@ -12,7 +12,7 @@ class TestSparseToDense(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.sorted_features = [1, 2, 5, 4] self.str_keyed_sparse_data = [ {}, @@ -45,7 +45,7 @@ def setUp(self): ] ) - def test_int_key_sparse_to_dense(self): + def test_int_key_sparse_to_dense(self) -> None: # int keys, set_missing_value_to_zero=False processor = PythonSparseToDenseProcessor( self.sorted_features, set_missing_value_to_zero=False @@ -54,7 +54,7 @@ def test_int_key_sparse_to_dense(self): assert torch.allclose(value, self.expected_value_missing) assert torch.all(presence == self.expected_presence_missing) - def test_str_key_sparse_to_dense(self): + def test_str_key_sparse_to_dense(self) -> None: # string keys, set_missing_value_to_zero=True processor = StringKeySparseToDenseProcessor( self.sorted_features, set_missing_value_to_zero=True diff --git a/reagent/test/preprocessing/test_transforms.py b/reagent/test/preprocessing/test_transforms.py new file mode 100644 index 000000000..f3d779c34 --- /dev/null +++ b/reagent/test/preprocessing/test_transforms.py @@ -0,0 +1,875 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import os +import unittest +from copy import deepcopy +from typing import List +from unittest.mock import Mock, patch + +import numpy as np +import reagent.core.types as rlt +import torch +from reagent.preprocessing import transforms +from reagent.preprocessing.types import InputColumn + + +class TestTransforms(unittest.TestCase): + def setUp(self) -> None: + # add custom compare function for torch.Tensor + self.addTypeEqualityFunc(torch.Tensor, TestTransforms.are_torch_tensor_equal) + + @staticmethod + def are_torch_tensor_equal(tensor_0, tensor_1, msg=None) -> bool: + if torch.all(tensor_0 == tensor_1): + return True + raise TestTransforms.failureException("non-equal pytorch tensors found", msg) + + def assertTorchTensorEqual(self, tensor_0, tensor_1, msg=None) -> None: + self.assertIsInstance( + tensor_0, torch.Tensor, "first argument is not a torch.Tensor" + ) + self.assertIsInstance( + tensor_1, torch.Tensor, "second argument is not a torch.Tensor" + ) + self.assertEqual(tensor_0, tensor_1, msg=msg) + + def assertDictComparatorEqual(self, a, b, cmp) -> None: + """ + assertDictEqual() compares args with ==. This allows caller to override + comparator via cmp argument. + """ + self.assertIsInstance(a, dict, "First argument is not a dictionary") + self.assertIsInstance(b, dict, "Second argument is not a dictionary") + self.assertSequenceEqual(a.keys(), b.keys()) + + for key in a.keys(): + self.assertTrue(cmp(a[key], b[key]), msg=f"Different at key {key}") + + def assertDictOfTensorEqual(self, a, b) -> None: + """ + Helper method to compare dicts with values of type Tensor. + + Cannot use assertDictEqual when values are of type Tensor since + tensor1 == tensor2 results in a tensor of bools. Use this instead. + """ + + def _tensor_cmp(a, b): + return torch.all(a == b) + + self.assertDictComparatorEqual(a, b, _tensor_cmp) + + def test_Compose(self) -> None: + t1, t2 = Mock(return_value=2), Mock(return_value=3) + compose = transforms.Compose(t1, t2) + data = 1 + out = compose(data) + t1.assert_called_with(1) + t2.assert_called_with(2) + self.assertEqual(out, 3) + + def test_ValuePresence(self) -> None: + vp = transforms.ValuePresence() + d1 = {"a": 1, "a_presence": 0, "b": 2} + d2 = {"a_presence": 0, "b": 2} + o1 = vp(d1) + o2 = vp(d2) + self.assertEqual(o1, {"a": (1, 0), "b": 2}) + self.assertEqual(o2, {"a_presence": 0, "b": 2}) + + def test_AppendExtraValues(self) -> None: + keys = ["a"] + av = transforms.ExtractValue(keys) + data = { + "a": [ + (torch.tensor([1, 2]), torch.tensor([True, True])), + (torch.tensor([3, 4]), torch.BoolTensor([False, False])), + ] + } + out = av(data) + expected = {"a": [torch.tensor([1, 2]), torch.tensor([3, 4])]} + self.assertEqual(out["a"][0], expected["a"][0]) + self.assertEqual(out["a"][1], expected["a"][1]) + with self.assertRaisesRegex(Exception, "Extra key - a cannot be an empty list"): + empty_list = {"a": []} + out = av(empty_list) + + def test_MaskByPresence(self) -> None: + keys = ["a", "b"] + mbp = transforms.MaskByPresence(keys) + data = { + "a": (torch.tensor(1), torch.tensor(0)), + "b": (torch.tensor(3), torch.tensor(1)), + } + expected = {"a": torch.tensor(0), "b": torch.tensor(3)} + out = mbp(data) + self.assertEqual(out["a"], expected["a"]) + self.assertEqual(out["b"], expected["b"]) + with self.assertRaisesRegex(Exception, "Not valid value"): + data2 = { + "a": torch.tensor(1), + "b": (torch.tensor(3), torch.tensor(1)), + } + out = mbp(data2) + with self.assertRaisesRegex(Exception, "Unmatching value shape"): + data3 = { + "a": (torch.tensor(1), torch.tensor([0, 2])), + "b": (torch.tensor(3), torch.tensor(1)), + } + out = mbp(data3) + + def test_StackDenseFixedSizeArray(self) -> None: + # happy path: value is type Tensor; check cast to float + value = torch.eye(4).to(dtype=torch.int) # start as int + data = {"a": value} + # pyre-fixme[6]: For 1st param expected `List[str]` but got `_dict_keys[str, + # typing.Any]`. + out = transforms.StackDenseFixedSizeArray(data.keys(), size=4)(data) + expected = {"a": value.to(dtype=torch.float)} + self.assertDictOfTensorEqual(out, expected) + self.assertTrue(out["a"].dtype == torch.float, msg="dtype != float") + + # happy path: value is list w/ elements type Tuple[Tensor, Tensor] + presence = torch.tensor([[1, 1, 1], [1, 1, 1]]) + data = { + "a": [ + (torch.tensor([[0, 0, 0], [1, 1, 1]]), presence), + (torch.tensor([[2, 2, 2], [3, 3, 3]]), presence), + ], + "b": [ + (torch.tensor([[3, 3, 3], [2, 2, 2]]), presence), + (torch.tensor([[1, 1, 1], [0, 0, 0]]), presence), + ], + } + # pyre-fixme[6]: For 1st param expected `List[str]` but got `_dict_keys[str, + # List[Tuple[typing.Any, typing.Any]]]`. + out = transforms.StackDenseFixedSizeArray(data.keys(), size=3)(data) + expected = { + "a": torch.tile(torch.arange(4).view(-1, 1).to(dtype=torch.float), (1, 3)), + "b": torch.tile( + torch.arange(4).flip(dims=(0,)).view(-1, 1).to(dtype=torch.float), + (1, 3), + ), + } + self.assertDictOfTensorEqual(out, expected) + + # raise for tensor wrong shape + with self.assertRaisesRegex(ValueError, "Wrong shape"): + sdf = transforms.StackDenseFixedSizeArray(["a"], size=3) + sdf({"a": torch.ones(2)}) + + # raise for tensor wrong ndim + with self.assertRaisesRegex(ValueError, "Wrong shape"): + sdf = transforms.StackDenseFixedSizeArray(["a"], size=2) + sdf({"a": torch.zeros(2, 2, 2)}) + + def test_Lambda(self) -> None: + lam = transforms.Lambda(keys=["a", "b", "c"], fn=lambda x: x + 1) + data = {"a": 1, "b": 2, "c": 3, "d": 4} + out = lam(data) + self.assertEqual(out, {"a": 2, "b": 3, "c": 4, "d": 4}) + + def test_SelectValuePresenceColumns(self) -> None: + block = np.reshape(np.arange(16), (4, 4)) + data = {"a": (block, block + 16), "c": 1} + svp = transforms.SelectValuePresenceColumns( + source="a", dest="b", indices=[1, 2] + ) + out = svp(data) + expected = { + "a": (block, block + 16), + "b": (block[:, [1, 2]], block[:, [1, 2]] + 16), + "c": 1, + } + for key in ["a", "b"]: + # pyre-fixme[16]: Item `int` of `Union[int, Tuple[typing.Any, + # typing.Any]]` has no attribute `__getitem__`. + self.assertTrue(np.all(out[key][0] == expected[key][0])) + # pyre-fixme[16]: Item `int` of `Union[int, Tuple[typing.Any, + # typing.Any]]` has no attribute `__getitem__`. + self.assertTrue(np.all(out[key][1] == expected[key][1])) + self.assertEqual(out["c"], expected["c"]) + + @patch("reagent.preprocessing.transforms.Preprocessor") + def test_DenseNormalization(self, Preprocessor) -> None: + a_out = torch.tensor(1) + b_out = torch.tensor(2) + c_out = torch.tensor(3.0) + preprocessor = Mock(side_effect=[a_out, b_out]) + Preprocessor.return_value = preprocessor + # of form (value, presence) + a_in = (torch.tensor([1, torch.nan, 2]), torch.tensor([1, 1, 1])) + b_in = (torch.tensor([1, 2, torch.nan]), torch.tensor([0, 1, 1])) + data = {"a": a_in, "b": b_in, "c": c_out} + normalization_data = Mock() + dn = transforms.DenseNormalization( + keys=["a", "b"], normalization_data=normalization_data + ) + out = dn(data) + self.assertEqual(out["a"], a_out.float()) + self.assertEqual(out["b"], b_out.float()) + # ensure unnamed variables not changed + self.assertEqual(out["c"], c_out) + in_1, in_2 = [call_args.args for call_args in preprocessor.call_args_list] + + self.assertEqual(torch.stack(in_1), torch.stack(a_in)) + self.assertEqual(torch.stack(in_2), torch.stack(b_in)) + + @patch("reagent.preprocessing.transforms.Preprocessor") + def test_FixedLengthSequenceDenseNormalization(self, Preprocessor) -> None: + # test key mapping + rand_gen = torch.Generator().manual_seed(0) + + a_batch_size = 2 + b_batch_size = 3 + + a_dim = 13 + b_dim = 11 + + expected_length = 7 + + a_T = ( + torch.rand( + a_batch_size * expected_length, a_dim, generator=rand_gen + ), # value + torch.rand(a_batch_size * expected_length, a_dim, generator=rand_gen) + > 0.5, # presence + ) + b_T = ( + torch.rand( + b_batch_size * expected_length, b_dim, generator=rand_gen + ), # value + torch.rand(b_batch_size * expected_length, b_dim, generator=rand_gen) + > 0.5, # presence + ) + + # expected values after preprocessing + a_TN = a_T[0] + 1 + b_TN = b_T[0] + 1 + + # copy used for checking inplace modifications + a_TN_copy = deepcopy(a_TN) + b_TN_copy = deepcopy(b_TN) + + a_offsets = torch.arange(0, a_batch_size * expected_length, expected_length) + b_offsets = torch.arange(0, b_batch_size * expected_length, expected_length) + + a_in = {1: (a_offsets, a_T), 2: 0} + b_in = {1: (b_offsets, b_T), 2: 1} + + c_out = 2 + + # input data + data = {"a": a_in, "b": b_in, "c": c_out} + + # copy used for checking inplace modifications + data_copy = deepcopy(data) + + Preprocessor.return_value = Mock(side_effect=[a_TN, b_TN]) + + flsdn = transforms.FixedLengthSequenceDenseNormalization( + keys=["a", "b"], + sequence_id=1, + normalization_data=Mock(), + ) + + out = flsdn(data) + + # data is modified inplace and returned + self.assertEqual(data, out) + + # check preprocessor number of calls + self.assertEqual(Preprocessor.call_count, 1) + self.assertEqual(Preprocessor.return_value.call_count, 2) + + # result contains original keys and new processed keys + self.assertSetEqual(set(out.keys()), {"a", "b", "c", "a:1", "b:1"}) + + def assertKeySeqIdItem(item_0, item_1): + self.assertTorchTensorEqual(item_0[0], item_1[0]) + self.assertTorchTensorEqual(item_0[1][0], item_1[1][0]) + self.assertTorchTensorEqual(item_0[1][1], item_1[1][1]) + + # original keys should keep their value + for key in ("a", "b"): + # no change in the output + # pyre-fixme[16]: Item `int` of `Union[Dict[int, + # typing.Union[typing.Tuple[torch.Tensor, typing.Tuple[typing.Any, + # typing.Any]], int]], int]` has no attribute `__getitem__`. + assertKeySeqIdItem(out[key][1], data_copy[key][1]) + + # no change in untouched seq id + # pyre-fixme[16]: Item `int` of `Union[Dict[int, + # typing.Union[typing.Tuple[torch.Tensor, typing.Tuple[typing.Any, + # typing.Any]], int]], int]` has no attribute `__getitem__`. + self.assertEqual(out[key][2], data_copy[key][2]) + + # no change in the non-processed key + self.assertEqual(out["c"], data_copy["c"]) + + # check output shapes + self.assertListEqual( + [*out["a:1"].shape], [a_batch_size, expected_length, a_dim] + ) + self.assertListEqual( + [*out["b:1"].shape], [b_batch_size, expected_length, b_dim] + ) + + # no inplace change in normalized tensors + self.assertTorchTensorEqual(a_TN, a_TN_copy) + self.assertTorchTensorEqual(b_TN, b_TN_copy) + + # check if output has been properly slated + self.assertTorchTensorEqual( + out["a:1"], a_TN.view(a_batch_size, expected_length, a_dim) + ) + self.assertTorchTensorEqual( + out["b:1"], b_TN.view(b_batch_size, expected_length, b_dim) + ) + + def test_IDListFeatures_and_IDScoreListFeatures(self) -> None: + ID_LIST_FEATURE_ID = 0 + ID_SCORE_LIST_FEATURE_ID = 1 + EMBEDDING_TABLE_SIZE = 100 + EMBEDDING_DIM = 128 + data = { + InputColumn.STATE_ID_LIST_FEATURES: { + ID_LIST_FEATURE_ID: [ + torch.tensor([0, 3]), + torch.tensor([0, 1, 2, 3, 4]), + ] + }, + InputColumn.NEXT_STATE_ID_LIST_FEATURES: { + ID_LIST_FEATURE_ID: [ + torch.tensor([0, 1]), + torch.tensor([0, 1]), + ] + }, + InputColumn.STATE_ID_SCORE_LIST_FEATURES: { + ID_SCORE_LIST_FEATURE_ID: [ + torch.tensor([0, 3]), + torch.tensor([0, 1, 2, 3, 4]), + torch.tensor([0.0, 0.1, 0.2, 0.3, 0.4]), + ] + }, + InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES: { + ID_SCORE_LIST_FEATURE_ID: [ + torch.tensor([0, 2]), + torch.tensor([0, 1, 2]), + torch.tensor([0.0, 0.1, 0.2]), + ] + }, + } + state_feature_config = rlt.ModelFeatureConfig( + float_feature_infos=[], + id_list_feature_configs=[ + rlt.IdListFeatureConfig( + name=f"id_list_feature_{ID_LIST_FEATURE_ID}", + feature_id=ID_LIST_FEATURE_ID, + id_mapping_name=f"id_list_feature_table_{ID_LIST_FEATURE_ID}", + ) + ], + id_score_list_feature_configs=[ + rlt.IdScoreListFeatureConfig( + name=f"id_score_list_feature_{ID_SCORE_LIST_FEATURE_ID}", + feature_id=ID_SCORE_LIST_FEATURE_ID, + id_mapping_name=f"id_score_list_feature_table_{ID_SCORE_LIST_FEATURE_ID}", + ) + ], + id_mapping_config={ + f"id_list_feature_table_{ID_LIST_FEATURE_ID}": rlt.IdMappingConfig( + embedding_table_size=EMBEDDING_TABLE_SIZE, + embedding_dim=EMBEDDING_DIM, + hashing=False, + ), + f"id_score_list_feature_table_{ID_SCORE_LIST_FEATURE_ID}": rlt.IdMappingConfig( + embedding_table_size=EMBEDDING_TABLE_SIZE, + embedding_dim=EMBEDDING_DIM, + hashing=False, + ), + }, + ) + + state_id_list_columns: List[str] = [ + InputColumn.STATE_ID_LIST_FEATURES, + InputColumn.NEXT_STATE_ID_LIST_FEATURES, + ] + state_id_score_list_columns: List[str] = [ + InputColumn.STATE_ID_SCORE_LIST_FEATURES, + InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, + ] + + transform_id_list_features = transforms.IDListFeatures( + keys=state_id_list_columns, + # both columns share the same feature configs + feature_configs=[ + state_feature_config.id_list_feature_configs, + state_feature_config.id_list_feature_configs, + ], + id_mapping_configs=[ + state_feature_config.id_mapping_config, + state_feature_config.id_mapping_config, + ], + ) + + transform_id_score_list_features = transforms.IDScoreListFeatures( + keys=state_id_score_list_columns, + feature_configs=[ + state_feature_config.id_score_list_feature_configs, + state_feature_config.id_score_list_feature_configs, + ], + id_mapping_configs=[ + state_feature_config.id_mapping_config, + state_feature_config.id_mapping_config, + ], + ) + out = transform_id_score_list_features( + transform_id_list_features(deepcopy(data)) + ) + + for column in [ + InputColumn.STATE_ID_SCORE_LIST_FEATURES, + InputColumn.NEXT_STATE_ID_SCORE_LIST_FEATURES, + ]: + self.assertEqual( + out[column].keys(), + [x.name for x in state_feature_config.id_score_list_feature_configs], + ) + assert torch.allclose( + out[column].values(), + data[column][ID_SCORE_LIST_FEATURE_ID][1], + ) + assert torch.allclose( + out[column].weights(), + data[column][ID_SCORE_LIST_FEATURE_ID][2], + ) + assert torch.allclose( + # KeyedJaggedTensor's offset has one more element at the end + out[column].offsets()[:-1], + data[column][ID_SCORE_LIST_FEATURE_ID][0], + ) + + for column in [ + InputColumn.STATE_ID_LIST_FEATURES, + InputColumn.NEXT_STATE_ID_LIST_FEATURES, + ]: + self.assertEqual( + out[column].keys(), + [x.name for x in state_feature_config.id_list_feature_configs], + ) + assert torch.allclose( + out[column].values(), + data[column][ID_LIST_FEATURE_ID][1], + ) + assert torch.allclose( + # KeyedJaggedTensor's offset has one more element at the end + out[column].offsets()[:-1], + data[column][ID_LIST_FEATURE_ID][0], + ) + + def test_OneHotActions(self) -> None: + keys = ["0", "1", "2"] + num_actions = 2 + oha = transforms.OneHotActions(keys, num_actions) + data_in = {"0": torch.tensor(0), "1": torch.tensor(1), "2": torch.tensor(2)} + data_out = oha(data_in) + expected = { + "0": torch.tensor([1, 0]), + "1": torch.tensor([0, 1]), + "2": torch.tensor([0, 0]), + } + self.assertDictOfTensorEqual(data_out, expected) + + def test_FixedLengthSequences(self) -> None: + # of form {sequence_id: (offsets, Tuple(Tensor, Tensor))} + a_T = (torch.tensor([0, 1]), torch.tensor([1, 0])) + b_T = (torch.tensor([1, 1]), torch.tensor([1, 0])) + a_in = {1: (torch.tensor([0]), a_T)} + b_in = {1: (torch.tensor([0]), b_T)} + fls1 = transforms.FixedLengthSequences(keys=["a", "b"], sequence_id=1) + fls2 = transforms.FixedLengthSequences( + keys=["a", "b"], sequence_id=1, expected_length=2 + ) + fls3 = transforms.FixedLengthSequences( + keys=["a", "b"], sequence_id=1, expected_length=2, to_keys=["to_a", "to_b"] + ) + o1 = fls1({"a": a_in, "b": b_in}) + o2 = fls2({"a": a_in, "b": b_in}) + o3 = fls3({"a": a_in, "b": b_in}) + # o1, o2 should contain only keys + self.assertEqual(len(o1), 2) + self.assertEqual(len(o2), 2) + # o3 should contain keys & to_keys + self.assertEqual(len(o3), 4) + # ensure `T` is set back to key + self.assertTrue( + torch.all(o1["a"][0] == a_T[0]) and torch.all(o1["a"][1] == a_T[1]) + ) + self.assertTrue( + torch.all(o1["b"][0] == b_T[0]) and torch.all(o1["b"][1] == b_T[1]) + ) + self.assertTrue( + torch.all(o2["a"][0] == a_T[0]) and torch.all(o2["a"][1] == a_T[1]) + ) + self.assertTrue( + torch.all(o2["b"][0] == b_T[0]) and torch.all(o2["b"][1] == b_T[1]) + ) + # ensure keys not changed + self.assertEqual(o3["a"], a_in) + self.assertEqual(o3["b"], b_in) + # # ensure `T` is set to_key + self.assertTrue( + torch.all(o3["to_a"][0] == a_T[0]) and torch.all(o3["to_a"][1] == a_T[1]) + ) + self.assertTrue( + torch.all(o3["to_b"][0] == b_T[0]) and torch.all(o3["to_b"][1] == b_T[1]) + ) + # Testing assertions in the call method + # TODO testing assert regarding offsets length compared to value + c_T = (torch.tensor([0, 1]), torch.tensor([1, 1])) + with self.assertRaisesRegex(ValueError, "Expected all batches"): + # wrong expected length + fls = transforms.FixedLengthSequences( + keys=["a", "b"], sequence_id=1, expected_length=1 + ) + fls({"a": a_in, "b": b_in}) + with self.assertRaisesRegex(ValueError, "Expected all batches"): + # wrong offsets + c_in = {1: (torch.tensor([0, 1]), c_T)} + fls = transforms.FixedLengthSequences(keys=["a", "b", "c"], sequence_id=1) + fls({"a": a_in, "b": b_in, "c": c_in}) + # Testing assertion in the constructor + with self.assertRaises(AssertionError): + transforms.FixedLengthSequences( + keys=["a", "b"], sequence_id=1, to_keys=["to_a"] + ) + + def test_SlateView(self) -> None: + # Unit tests for the SlateView class + sv = transforms.SlateView(keys=["a"], slate_size=-1) + + # GIVEN a SlateView with keys = ["a"] + # WHEN data is passed in under a key "b" + # THEN the value for "b" should not be unflattened since the key "b" is not in SlateView.keys! + sv.slate_size = 1 + sv.keys = ["a"] + a_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]) + b_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]) + data = {"a": a_in, "b": b_in} + out = sv(data) + self.assertEqual(out["b"].shape, torch.Size([4, 2])) + self.assertTorchTensorEqual(out["b"], b_in) + + # GIVEN slate.size = 1 and keys = ["a", "b"] + # WHEN input shape is [4, 2] + # THEN output shape should be [4, 1, 2] for all keys + sv.slate_size = 1 + sv.keys = ["a", "b"] + a_in = torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]) + b_in = torch.tensor([[10, 20], [30, 40], [50, 60], [70, 80]]) + data = {"a": a_in, "b": b_in} + out = sv(data) + a_out_412 = torch.tensor([[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]]) + b_out_412 = torch.tensor([[[10, 20]], [[30, 40]], [[50, 60]], [[70, 80]]]) + self.assertEqual(out["a"].shape, torch.Size([4, 1, 2])) + self.assertEqual(out["b"].shape, torch.Size([4, 1, 2])) + self.assertDictOfTensorEqual({"a": a_out_412, "b": b_out_412}, out) + + # GIVEN a SlateView with keys = ["a", "b"] + # WHEN data is passed in missing one or more of those keys + # THEN a KeyError should be raised + sv.keys = ["a", "b"] + a_in = torch.tensor([[1, 2], [3, 4]]) + c_in = torch.tensor([[1, 2], [3, 4]]) + data = {"a": a_in, "c": c_in} + with self.assertRaises(KeyError): + out = sv(data) + + # GIVEN a SlateView with keys = ["a"] + # WHEN data is passed in that is of an invalid shape + # THEN a RuntimeError should be raised + sv.slate_size = 2 + sv.keys = ["a"] + a_in = torch.tensor([[1, 2]]) + data = {"a": a_in} + with self.assertRaises(RuntimeError): + out = sv(data) + + # GIVEN slate.size = 2 and keys = ["a"] + # WHEN input shape is [4, 3] + # THEN output shape should be [2, 2, 3] + sv.slate_size = 2 + sv.keys = ["a"] + a_in = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + data = {"a": a_in} + out = sv(data) + a_out_223 = torch.tensor([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) + self.assertEqual(out["a"].shape, torch.Size([2, 2, 3])) + self.assertDictOfTensorEqual({"a": a_out_223}, out) + + def _check_same_keys(self, dict_a, dict_b) -> None: + self.assertSetEqual(set(dict_a.keys()), set(dict_b.keys())) + + def test_AppendConstant(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.AppendConstant(["a"], const=1.5) + t_data = t(data) + self._check_same_keys(data, t_data) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + self.assertTorchTensorEqual( + t_data["a"], torch.tensor([[1.5, 9.0, 4.5], [1.5, 3.4, 3.9]]) + ) + + def test_UnsqueezeRepeat(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.UnsqueezeRepeat(["a"], dim=1, num_repeat=3) + t_data = t(data) + self._check_same_keys(data, t_data) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + self.assertTorchTensorEqual( + t_data["a"], + torch.tensor( + [ + [[9.0, 4.5], [9.0, 4.5], [9.0, 4.5]], + [[3.4, 3.9], [3.4, 3.9], [3.4, 3.9]], + ] + ), + ) + + def test_OuterProduct(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.OuterProduct("a", "b", "ab") + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + + expected_out = torch.empty(2, 4) + for i in range(2): + expected_out[i, :] = torch.outer( + data["a"][i, :].flatten(), data["b"][i, :].flatten() + ).flatten() + self.assertTorchTensorEqual(t_data["ab"], expected_out) + + def test_GetEye(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.GetEye("c", 4) + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + + self.assertTorchTensorEqual(t_data["c"], torch.eye(4)) + + def test_Cat(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.Cat(["a", "b"], "c", 0) + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + + self.assertTorchTensorEqual(t_data["c"], torch.cat([data["a"], data["b"]], 0)) + + def test_Rename(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.Rename(["a"], ["aa"]) + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["b"], t_data["b"]) + + self.assertTorchTensorEqual(t_data["aa"], data["a"]) + + def test_Filter(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]), + } + t = transforms.Filter(keep_keys=["a"]) + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertListEqual(sorted(t_data.keys()), ["a"]) + + t = transforms.Filter(remove_keys=["b"]) + t_data = t(data) + # make sure original data was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertListEqual(sorted(t_data.keys()), ["a"]) + + def test_broadcast_tensors_for_cat(self) -> None: + tensors = [ + torch.tensor([[3.0, 4.0, 5.0], [4.5, 4.3, 5.9]]), + torch.tensor([[2.0, 9.0, 8.0]]), + ] + broadcasted_tensors = transforms._broadcast_tensors_for_cat(tensors, 1) + self.assertTorchTensorEqual(broadcasted_tensors[0], tensors[0]) + self.assertTorchTensorEqual(broadcasted_tensors[1], tensors[1].repeat(2, 1)) + + tensors = [ + torch.empty(10, 2, 5), + torch.empty(1, 2, 3), + ] + broadcasted_tensors = transforms._broadcast_tensors_for_cat(tensors, -1) + self.assertEqual(tuple(broadcasted_tensors[0].shape), (10, 2, 5)) + self.assertEqual(tuple(broadcasted_tensors[1].shape), (10, 2, 3)) + + tensors = [ + torch.empty(1, 1, 5), + torch.empty(10, 3, 1), + ] + broadcasted_tensors = transforms._broadcast_tensors_for_cat(tensors, 1) + self.assertEqual(tuple(broadcasted_tensors[0].shape), (10, 1, 5)) + self.assertEqual(tuple(broadcasted_tensors[1].shape), (10, 3, 5)) + + tensors = [ + torch.empty(1, 3, 5, 1), + torch.empty(10, 3, 1, 4), + ] + broadcasted_tensors = transforms._broadcast_tensors_for_cat(tensors, 0) + self.assertEqual(tuple(broadcasted_tensors[0].shape), (1, 3, 5, 4)) + self.assertEqual(tuple(broadcasted_tensors[1].shape), (10, 3, 5, 4)) + + def test_ToDtype(self) -> None: + data = { + "a": torch.tensor([[9.0, 4.5], [3.4, 3.9]]).float(), + "b": torch.tensor([[9.2, 2.5], [4.4, 1.9]]).double(), + "c": torch.tensor([[9.1, 2.3], [4.2, 1.4]]).double(), + } + t = transforms.ToDtype({"b": torch.float}) + t_data = t(data) + + # make sure all values was left unmodified + self.assertTorchTensorEqual(data["a"], t_data["a"]) + self.assertTorchTensorEqual(data["b"], t_data["b"]) + self.assertTorchTensorEqual(data["c"], t_data["c"]) + + # mase sure the data types are correct + self.assertEqual(t_data["a"].dtype, torch.float) # was float, didn't change + self.assertEqual(t_data["b"].dtype, torch.float) # changed from double to float + self.assertEqual(t_data["c"].dtype, torch.double) # was double, didn't change + + def test_VarLengthSequences(self) -> None: + seq_id = 1 + + # of form {sequence_id: (offsets, Tuple(Tensor, Tensor))} + a_T = ( + torch.tensor([[0, 1, 3], [2, 3, 7], [4, 5, 8], [2, 3, 1]]).float(), + torch.ones(4, 3), + ) + b_T = ( + torch.tensor( + [[1, 1, 3], [2, 2, 5], [3, 3, 1], [9, 10, 4], [5, 1, 7]] + ).float(), + torch.ones(5, 3), + ) + a_in = {seq_id: (torch.tensor([0, 1]), a_T)} + b_in = {seq_id: (torch.tensor([0, 4]), b_T)} + vls1 = transforms.VarLengthSequences(keys=["a", "b"], sequence_id=seq_id) + vls2 = transforms.VarLengthSequences( + keys=["a", "b"], sequence_id=seq_id, to_keys=["a_to_key", "b_to_key"] + ) + vls3 = transforms.VarLengthSequences( + keys=["a", "b"], + sequence_id=seq_id, + to_keys=["a_to_key", "b_to_key"], + to_keys_item_presence=["a_to_key_item_presence", "b_to_key_item_presence"], + ) + o1 = vls1({"a": a_in, "b": b_in}) + o2 = vls2({"a": a_in, "b": b_in}) + o3 = vls3({"a": a_in, "b": b_in}) + + self.assertSetEqual( + set(o1.keys()), {"a", "b", "a_item_presence", "b_item_presence"} + ) + self.assertSetEqual( + set(o2.keys()), + { + "a", + "b", + "a_to_key", + "b_to_key", + "a_to_key_item_presence", + "b_to_key_item_presence", + }, + ) + self.assertSetEqual( + set(o3.keys()), + { + "a", + "b", + "a_to_key", + "b_to_key", + "a_to_key_item_presence", + "b_to_key_item_presence", + }, + ) + + # ensure input values are not changed if output keys are different + self.assertEqual(o2["a"], a_in) + self.assertEqual(o2["b"], b_in) + self.assertEqual(o3["a"], a_in) + self.assertEqual(o3["b"], b_in) + + # Testing assertion in the constructor + with self.assertRaises(AssertionError): + transforms.VarLengthSequences( + keys=["a", "b"], sequence_id=1, to_keys=["to_a"] + ) + + # output shapes are correct + self.assertTupleEqual(tuple(o1["a"][0].shape), (6, 3)) + self.assertTupleEqual(tuple(o1["b"][0].shape), (8, 3)) + + # output values are correct + expected_a = torch.tensor( + [ + [ + [0, 1, 3], + [0, 0, 0], + [0, 0, 0], + [2, 3, 7], + [4, 5, 8], + [2, 3, 1], + ], + ] + ).float() + expected_b = torch.tensor( + [ + [ + [1, 1, 3], + [2, 2, 5], + [3, 3, 1], + [9, 10, 4], + [5, 1, 7], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + ], + ] + ).float() + self.assertEqual(o1["a"][0], expected_a) + self.assertEqual(o1["b"][0], expected_b) + + # item presence tensors are correct + extected_a_item_presence = torch.tensor([[1, 0, 0], [1, 1, 1]]).float() + extected_b_item_presence = torch.tensor([[1, 1, 1, 1], [1, 0, 0, 0]]).float() + self.assertEqual(o1["a_item_presence"], extected_a_item_presence) + self.assertEqual(o1["b_item_presence"], extected_b_item_presence) diff --git a/reagent/test/preprocessing/test_type_identification.py b/reagent/test/preprocessing/test_type_identification.py index a0eeed0bb..08efa020a 100644 --- a/reagent/test/preprocessing/test_type_identification.py +++ b/reagent/test/preprocessing/test_type_identification.py @@ -16,7 +16,7 @@ class TestTypeIdentification(unittest.TestCase): - def test_identification(self): + def test_identification(self) -> None: feature_value_map = read_data() types = {} diff --git a/reagent/test/ranking/__init__.py b/reagent/test/ranking/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/ranking/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/ranking/seq2slate_utils.py b/reagent/test/ranking/seq2slate_utils.py new file mode 100644 index 000000000..ba9caf83b --- /dev/null +++ b/reagent/test/ranking/seq2slate_utils.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import math +import tempfile +from itertools import permutations + +import pytorch_lightning as pl +import reagent.core.types as rlt +import torch +import torch.nn as nn +from reagent.core.parameters import Seq2SlateParameters +from reagent.core.parameters_seq2slate import LearningMethod, SimulationParameters +from reagent.core.torch_utils import gather +from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch +from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet +from reagent.optimizer.union import Optimizer__Union +from reagent.training.ranking.seq2slate_sim_trainer import Seq2SlateSimulationTrainer +from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer +from torch.utils.data import DataLoader + + +logger = logging.getLogger(__name__) + + +MODEL_TRANSFORMER = "transformer" +ON_POLICY = "on_policy" +OFF_POLICY = "off_policy" +SIMULATION = "simulation" + + +class TSPRewardModel(nn.Module): + def forward(self, state, candidates, ranked_cities, src_src_mask, tgt_out_idx): + reward = compute_reward(ranked_cities) + # negate because we want to minimize + return -reward + + +def post_preprocess_batch(seq2slate_net, candidate_num, batch, device, epoch): + model_propensity, model_action, reward = rank_on_policy_and_eval( + seq2slate_net, batch, candidate_num, greedy=False + ) + batch = rlt.PreprocessedRankingInput.from_input( + state=batch.state.float_features, + candidates=batch.src_seq.float_features, + device=device, + action=model_action, + logged_propensities=model_propensity, + # negate because we want to minimize + slate_reward=-reward, + ) + logger.info(f"Epoch {epoch} mean on_policy reward: {torch.mean(reward)}") + logger.info(f"Epoch {epoch} mean model_propensity: {torch.mean(model_propensity)}") + return batch + + +class Seq2SlateOnPolicyTrainer(Seq2SlateTrainer): + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + new_batch = post_preprocess_batch( + self.seq2slate_net, + self.seq2slate_net.max_src_seq_len, + batch, + batch.state.float_features.device, + self.current_epoch, + ) + for attr in dir(new_batch): + if not callable(getattr(new_batch, attr)) and not attr.startswith("__"): + setattr(batch, attr, getattr(new_batch, attr)) + super().on_train_batch_start(batch, batch_idx, dataloader_idx) + + +def create_trainer( + seq2slate_net, + learning_method, + batch_size, + learning_rate, + policy_gradient_interval, + device, +): + if learning_method == ON_POLICY: + seq2slate_params = Seq2SlateParameters( + on_policy=True, learning_method=LearningMethod.REINFORCEMENT_LEARNING + ) + trainer_cls = Seq2SlateOnPolicyTrainer + elif learning_method == OFF_POLICY: + seq2slate_params = Seq2SlateParameters( + on_policy=False, + learning_method=LearningMethod.REINFORCEMENT_LEARNING, + ) + trainer_cls = Seq2SlateTrainer + elif learning_method == SIMULATION: + temp_reward_model_path = tempfile.mkstemp(suffix=".pt")[1] + reward_model = torch.jit.script(TSPRewardModel()) + torch.jit.save(reward_model, temp_reward_model_path) + seq2slate_params = Seq2SlateParameters( + on_policy=True, + learning_method=LearningMethod.SIMULATION, + simulation=SimulationParameters( + reward_name_weight={"tour_length": 1.0}, + reward_name_power={"tour_length": 1.0}, + reward_name_path={"tour_length": temp_reward_model_path}, + ), + ) + trainer_cls = Seq2SlateSimulationTrainer + + param_dict = { + "seq2slate_net": seq2slate_net, + "params": seq2slate_params, + "policy_optimizer": Optimizer__Union.default(lr=learning_rate), + "print_interval": 1, + "policy_gradient_interval": policy_gradient_interval, + } + return trainer_cls(**param_dict) + + +def create_seq2slate_net( + model_str, + candidate_num, + candidate_dim, + hidden_size, + output_arch, + temperature, + device, +): + if model_str == MODEL_TRANSFORMER: + return Seq2SlateTransformerNet( + state_dim=1, + candidate_dim=candidate_dim, + num_stacked_layers=2, + num_heads=2, + dim_model=hidden_size, + dim_feedforward=hidden_size, + max_src_seq_len=candidate_num, + max_tgt_seq_len=candidate_num, + output_arch=output_arch, + temperature=temperature, + state_embed_dim=1, + ).to(device) + else: + raise NotImplementedError(f"unknown model type {model_str}") + + +FIX_CANDIDATES = None + + +@torch.no_grad() +def create_batch( + batch_size, + candidate_num, + candidate_dim, + device, + learning_method, + diverse_input=False, +): + # fake state, we only use candidates + state = torch.zeros(batch_size, 1) + if diverse_input: + # city coordinates are spread in [0, 4] + candidates = torch.randint( + 5, (batch_size, candidate_num, candidate_dim) + ).float() + else: + # every training data has the same nodes as the input cities + global FIX_CANDIDATES + if FIX_CANDIDATES is None or FIX_CANDIDATES.shape != ( + batch_size, + candidate_num, + candidate_dim, + ): + candidates = torch.randint( + 5, (batch_size, candidate_num, candidate_dim) + ).float() + candidates[1:] = candidates[0] + FIX_CANDIDATES = candidates + else: + candidates = FIX_CANDIDATES + + batch_dict = { + "state": state, + "candidates": candidates, + "device": device, + } + if learning_method == OFF_POLICY: + # using data from a uniform sampling policy + action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)]) + propensity = torch.full((batch_size, 1), 1.0 / math.factorial(candidate_num)) + ranked_cities = gather(candidates, action) + reward = compute_reward(ranked_cities) + batch_dict["action"] = action + batch_dict["logged_propensities"] = propensity + batch_dict["slate_reward"] = -reward + + batch = rlt.PreprocessedRankingInput.from_input(**batch_dict) + logger.info("Generate one batch") + return batch + + +def create_train_and_test_batches( + batch_size, + candidate_num, + candidate_dim, + device, + num_train_batches, + learning_method, + diverse_input, +): + train_batches = [ + create_batch( + batch_size, + candidate_num, + candidate_dim, + device, + learning_method, + diverse_input=diverse_input, + ) + for _ in range(num_train_batches) + ] + + if diverse_input: + test_batch = create_batch( + batch_size, + candidate_num, + candidate_dim, + device, + learning_method, + diverse_input=diverse_input, + ) + else: + test_batch = train_batches[0] + + return train_batches, test_batch + + +def compute_reward(ranked_cities): + assert len(ranked_cities.shape) == 3 + ranked_cities_offset = torch.roll(ranked_cities, shifts=1, dims=1) + return ( + torch.sqrt(((ranked_cities_offset - ranked_cities) ** 2).sum(-1)) + .sum(-1) + .unsqueeze(1) + ) + + +def compute_best_reward(input_cities): + batch_size, candidate_num, _ = input_cities.shape + all_perm = torch.tensor( + list(permutations(torch.arange(candidate_num), candidate_num)) + ) + res = [ + compute_reward(gather(input_cities, perm.repeat(batch_size, 1))) + for perm in all_perm + ] + # res shape: batch_size, num_perm + res = torch.cat(res, dim=1) + best_possible_reward = torch.min(res, dim=1).values + best_possible_reward_mean = torch.mean(best_possible_reward) + return best_possible_reward_mean + + +@torch.no_grad() +def rank_on_policy( + model, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool +): + model.eval() + rank_output = model( + batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=tgt_seq_len, greedy=greedy + ) + ranked_slate_prob = rank_output.ranked_per_seq_probs + ranked_order = rank_output.ranked_tgt_out_idx - 2 + model.train() + return ranked_slate_prob, ranked_order + + +@torch.no_grad() +def rank_on_policy_and_eval( + seq2slate_net, batch: rlt.PreprocessedRankingInput, tgt_seq_len: int, greedy: bool +): + model_propensity, model_action = rank_on_policy( + seq2slate_net, batch, tgt_seq_len, greedy=greedy + ) + ranked_cities = gather(batch.src_seq.float_features, model_action) + reward = compute_reward(ranked_cities) + return model_propensity, model_action, reward + + +def run_seq2slate_tsp( + model_str, + batch_size, + epochs, + candidate_num, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, +): + pl.seed_everything(0) + + candidate_dim = 2 + eval_sample_size = 1 + + train_batches, test_batch = create_train_and_test_batches( + batch_size, + candidate_num, + candidate_dim, + device, + num_batches, + learning_method, + diverse_input, + ) + best_test_possible_reward = compute_best_reward(test_batch.src_seq.float_features) + + seq2slate_net = create_seq2slate_net( + model_str, + candidate_num, + candidate_dim, + hidden_size, + Seq2SlateOutputArch.AUTOREGRESSIVE, + 1.0, + device, + ) + + trainer = create_trainer( + seq2slate_net, + learning_method, + batch_size, + learning_rate, + policy_gradient_interval, + device, + ) + + def evaluate(): + best_test_reward = torch.full((batch_size,), 1e9).to(device) + for _ in range(eval_sample_size): + model_propensities, _, reward = rank_on_policy_and_eval( + seq2slate_net.to(device), test_batch, candidate_num, greedy=True + ) + best_test_reward = torch.where( + reward < best_test_reward, reward, best_test_reward + ) + logger.info( + f"Test mean model_propensities {torch.mean(model_propensities)}, " + f"Test mean reward: {torch.mean(best_test_reward)}, " + f"best possible reward {best_test_possible_reward}" + ) + if torch.any(torch.isnan(model_propensities)): + raise Exception("Model propensities contain NaNs") + ratio = torch.mean(best_test_reward) / best_test_possible_reward + return ratio < expect_reward_threshold, ratio + + evaluate() + + training_data = DataLoader(train_batches, collate_fn=lambda x: x[0]) + pl_trainer = pl.Trainer( + max_epochs=epochs, + gpus=None if device == torch.device("cpu") else 1, + logger=False, + ) + pl_trainer.fit(trainer, training_data) + + result, ratio = evaluate() + + assert result, ( + f"Test failed because it did not reach expected test reward, " + f"{ratio} > {expect_reward_threshold}." + ) diff --git a/reagent/test/ranking/test_seq2slate_inference.py b/reagent/test/ranking/test_seq2slate_inference.py new file mode 100644 index 000000000..474c83f84 --- /dev/null +++ b/reagent/test/ranking/test_seq2slate_inference.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +import random +import unittest + +import numpy as np +import torch +from reagent.core.parameters import NormalizationData, NormalizationParameters +from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch +from reagent.models.seq2slate import Seq2SlateTransformerModel, Seq2SlateTransformerNet +from reagent.prediction.predictor_wrapper import Seq2SlateWithPreprocessor +from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS +from reagent.preprocessing.preprocessor import Preprocessor + + +logger = logging.getLogger(__name__) + + +class TestSeq2SlateInference(unittest.TestCase): + def setUp(self): + np.random.seed(0) + random.seed(0) + torch.manual_seed(0) + + def test_seq2slate_scriptable(self): + state_dim = 2 + candidate_dim = 3 + num_stacked_layers = 2 + num_heads = 2 + dim_model = 128 + dim_feedforward = 128 + candidate_size = 8 + slate_size = 8 + output_arch = Seq2SlateOutputArch.AUTOREGRESSIVE + temperature = 1.0 + greedy_serving = True + + # test the raw Seq2Slate model is script-able + seq2slate = Seq2SlateTransformerModel( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=num_stacked_layers, + num_heads=num_heads, + dim_model=dim_model, + dim_feedforward=dim_feedforward, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + output_arch=output_arch, + temperature=temperature, + ) + seq2slate_scripted = torch.jit.script(seq2slate) + + seq2slate_net = Seq2SlateTransformerNet( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=num_stacked_layers, + num_heads=num_heads, + dim_model=dim_model, + dim_feedforward=dim_feedforward, + max_src_seq_len=candidate_size, + max_tgt_seq_len=slate_size, + output_arch=output_arch, + temperature=temperature, + ) + + state_normalization_data = NormalizationData( + dense_normalization_parameters={ + 0: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + 1: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + } + ) + + candidate_normalization_data = NormalizationData( + dense_normalization_parameters={ + 5: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + 6: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + 7: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + } + ) + state_preprocessor = Preprocessor( + state_normalization_data.dense_normalization_parameters, False + ) + candidate_preprocessor = Preprocessor( + candidate_normalization_data.dense_normalization_parameters, False + ) + + # test seq2slate with preprocessor is scriptable + seq2slate_with_preprocessor = Seq2SlateWithPreprocessor( + seq2slate_net.eval(), + state_preprocessor, + candidate_preprocessor, + greedy_serving, + ) + torch.jit.script(seq2slate_with_preprocessor) diff --git a/reagent/test/ranking/test_seq2slate_off_policy.py b/reagent/test/ranking/test_seq2slate_off_policy.py new file mode 100644 index 000000000..6de394905 --- /dev/null +++ b/reagent/test/ranking/test_seq2slate_off_policy.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +import random +import unittest + +import numpy as np +import pytest +import torch +from reagent.test.ranking.seq2slate_utils import ( + MODEL_TRANSFORMER, + OFF_POLICY, + run_seq2slate_tsp, +) + + +logger = logging.getLogger(__name__) + + +class TestSeq2SlateOffPolicy(unittest.TestCase): + def setUp(self): + np.random.seed(0) + random.seed(0) + torch.manual_seed(0) + + def test_seq2slate_transformer_off_policy_simple_tsp(self): + """ + Solve Traveling Salesman Problem. Data comes from one set of nodes (cities). + """ + device = torch.device("cpu") + batch_size = 4096 + epochs = 1 + num_batches = 100 + expect_reward_threshold = 1.02 + hidden_size = 32 + num_candidates = 6 + diverse_input = False + learning_rate = 0.001 + learning_method = OFF_POLICY + policy_gradient_interval = 1 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) + + @pytest.mark.seq2slate_long + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_seq2slate_transformer_off_policy_hard_tsp(self): + """ + Solve Traveling Salesman Problem. Data comes from multiple sets of cities. + """ + device = torch.device("cuda") + batch_size = 4096 + epochs = 3 + num_batches = 300 + expect_reward_threshold = 1.02 + hidden_size = 32 + num_candidates = 4 + diverse_input = True + learning_rate = 0.001 + learning_method = OFF_POLICY + policy_gradient_interval = 20 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) diff --git a/reagent/test/ranking/test_seq2slate_on_policy.py b/reagent/test/ranking/test_seq2slate_on_policy.py new file mode 100644 index 000000000..190dca734 --- /dev/null +++ b/reagent/test/ranking/test_seq2slate_on_policy.py @@ -0,0 +1,373 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import itertools +import logging +import random +import unittest +from collections import defaultdict +from itertools import permutations + +import numpy as np +import pytest +import reagent.core.types as rlt +import torch +import torch.nn.functional as F +from parameterized import parameterized +from reagent.model_utils.seq2slate_utils import ( + DECODER_START_SYMBOL, + mask_logits_by_idx, + per_symbol_to_per_seq_log_probs, + per_symbol_to_per_seq_probs, + pytorch_decoder_mask, + Seq2SlateMode, + Seq2SlateOutputArch, + subsequent_mask, +) +from reagent.test.ranking.seq2slate_utils import ( + create_batch, + create_seq2slate_net, + MODEL_TRANSFORMER, + ON_POLICY, + rank_on_policy, + run_seq2slate_tsp, +) + + +logger = logging.getLogger(__name__) + + +output_arch_list = [ + Seq2SlateOutputArch.FRECHET_SORT, + Seq2SlateOutputArch.AUTOREGRESSIVE, +] +temperature_list = [1.0, 2.0] + + +class TestSeq2SlateOnPolicy(unittest.TestCase): + def setUp(self): + np.random.seed(0) + random.seed(0) + torch.manual_seed(0) + + def test_pytorch_decoder_mask(self): + batch_size = 3 + src_seq_len = 4 + num_heads = 2 + + memory = torch.randn(batch_size, src_seq_len, num_heads) + tgt_in_idx = torch.tensor([[1, 2, 3], [1, 4, 2], [1, 5, 4]]).long() + tgt_tgt_mask, tgt_src_mask = pytorch_decoder_mask(memory, tgt_in_idx, num_heads) + + expected_tgt_tgt_mask = ( + torch.tensor( + [ + [False, True, True], + [False, False, True], + [False, False, False], + ], + ) + .unsqueeze(0) + .repeat(batch_size * num_heads, 1, 1) + ) + expected_tgt_src_mask = torch.tensor( + [ + [ + [False, False, False, False], + [True, False, False, False], + [True, True, False, False], + ], + [ + [False, False, False, False], + [False, False, True, False], + [True, False, True, False], + ], + [ + [False, False, False, False], + [False, False, False, True], + [False, False, True, True], + ], + ] + ).repeat_interleave(num_heads, dim=0) + assert torch.all(tgt_tgt_mask == expected_tgt_tgt_mask) + assert torch.all(tgt_src_mask == expected_tgt_src_mask) + + def test_per_symbol_to_per_seq_log_probs(self): + """ + Test per_symbol_to_per_seq_log_probs method + """ + batch_size = 1 + seq_len = 3 + candidate_size = seq_len + 2 + + tgt_out_idx = torch.tensor([[0, 2, 1]]) + 2 + per_symbol_log_probs = torch.randn(batch_size, seq_len, candidate_size) + per_symbol_log_probs[0, :, :2] = float("-inf") + per_symbol_log_probs[0, 1, 2] = float("-inf") + per_symbol_log_probs[0, 2, 2] = float("-inf") + per_symbol_log_probs[0, 2, 4] = float("-inf") + per_symbol_log_probs = F.log_softmax(per_symbol_log_probs, dim=2) + + expect_per_seq_log_probs = ( + per_symbol_log_probs[0, 0, 2] + + per_symbol_log_probs[0, 1, 4] + + per_symbol_log_probs[0, 2, 3] + ) + computed_per_seq_log_probs = per_symbol_to_per_seq_log_probs( + per_symbol_log_probs, tgt_out_idx + ) + np.testing.assert_allclose( + expect_per_seq_log_probs, computed_per_seq_log_probs, atol=0.001, rtol=0.0 + ) + + def test_per_symbol_to_per_seq_probs(self): + batch_size = 1 + seq_len = 3 + candidate_size = seq_len + 2 + + tgt_out_idx = torch.tensor([[0, 2, 1]]) + 2 + per_symbol_log_probs = torch.randn(batch_size, seq_len, candidate_size) + per_symbol_log_probs[0, :, :2] = float("-inf") + per_symbol_log_probs[0, 1, 2] = float("-inf") + per_symbol_log_probs[0, 2, 2] = float("-inf") + per_symbol_log_probs[0, 2, 4] = float("-inf") + per_symbol_log_probs = F.log_softmax(per_symbol_log_probs, dim=2) + per_symbol_probs = torch.exp(per_symbol_log_probs) + + expect_per_seq_probs = ( + per_symbol_probs[0, 0, 2] + * per_symbol_probs[0, 1, 4] + * per_symbol_probs[0, 2, 3] + ) + computed_per_seq_probs = per_symbol_to_per_seq_probs( + per_symbol_probs, tgt_out_idx + ) + np.testing.assert_allclose( + expect_per_seq_probs, computed_per_seq_probs, atol=0.001, rtol=0.0 + ) + + def test_subsequent_mask(self): + expect_mask = torch.tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]]) + mask = subsequent_mask(3, torch.device("cpu")) + assert torch.all(torch.eq(mask, expect_mask)) + + def test_mask_logits_by_idx(self): + logits = torch.tensor( + [ + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [2.0, 3.0, 4.0, 5.0, 6.0], + [3.0, 4.0, 5.0, 6.0, 7.0], + ], + [ + [5.0, 4.0, 3.0, 2.0, 1.0], + [6.0, 5.0, 4.0, 3.0, 2.0], + [7.0, 6.0, 5.0, 4.0, 3.0], + ], + ] + ) + tgt_in_idx = torch.tensor( + [[DECODER_START_SYMBOL, 2, 3], [DECODER_START_SYMBOL, 4, 3]] + ) + masked_logits = mask_logits_by_idx(logits, tgt_in_idx) + expected_logits = torch.tensor( + [ + [ + [float("-inf"), float("-inf"), 3.0, 4.0, 5.0], + [float("-inf"), float("-inf"), float("-inf"), 5.0, 6.0], + [float("-inf"), float("-inf"), float("-inf"), float("-inf"), 7.0], + ], + [ + [float("-inf"), float("-inf"), 3.0, 2.0, 1.0], + [float("-inf"), float("-inf"), 4.0, 3.0, float("-inf")], + [float("-inf"), float("-inf"), 5.0, float("-inf"), float("-inf")], + ], + ] + ) + assert torch.all(torch.eq(masked_logits, expected_logits)) + + @parameterized.expand(itertools.product(output_arch_list, temperature_list)) + @torch.no_grad() + def test_seq2slate_transformer_propensity_computation( + self, output_arch, temperature + ): + """ + Test propensity computation of seq2slate net + """ + candidate_num = 4 + candidate_dim = 2 + hidden_size = 32 + all_perm = torch.tensor( + list(permutations(torch.arange(candidate_num), candidate_num)) + ) + batch_size = len(all_perm) + device = torch.device("cpu") + + seq2slate_net = create_seq2slate_net( + MODEL_TRANSFORMER, + candidate_num, + candidate_dim, + hidden_size, + output_arch, + temperature, + device, + ) + batch = create_batch( + batch_size, + candidate_num, + candidate_dim, + device, + ON_POLICY, + diverse_input=False, + ) + batch = rlt.PreprocessedRankingInput.from_input( + state=batch.state.float_features, + candidates=batch.src_seq.float_features, + device=device, + action=all_perm, + ) + per_symbol_log_prob = seq2slate_net( + batch, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE + ).log_probs + per_seq_log_prob = seq2slate_net( + batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE + ).log_probs + per_seq_log_prob_computed = per_symbol_to_per_seq_log_probs( + per_symbol_log_prob, all_perm + 2 + ) + # probabilities of two modes should match + np.testing.assert_allclose( + per_seq_log_prob, per_seq_log_prob_computed, atol=0.00001 + ) + # probabilities of all possible permutations should sum up to 1 + np.testing.assert_allclose( + torch.sum(torch.exp(per_seq_log_prob)), 1.0, atol=0.00001 + ) + + @parameterized.expand(itertools.product(output_arch_list, temperature_list)) + def test_seq2slate_transformer_onpolicy_basic_logic(self, output_arch, temperature): + """ + Test basic logic of seq2slate on policy sampling + """ + device = torch.device("cpu") + candidate_num = 4 + candidate_dim = 2 + batch_size = 4096 + hidden_size = 32 + seq2slate_net = create_seq2slate_net( + MODEL_TRANSFORMER, + candidate_num, + candidate_dim, + hidden_size, + output_arch, + temperature, + device, + ) + batch = create_batch( + batch_size, + candidate_num, + candidate_dim, + device, + ON_POLICY, + diverse_input=False, + ) + + action_to_propensity_map = {} + action_count = defaultdict(int) + total_count = 0 + for i in range(50): + model_propensity, model_action = rank_on_policy( + seq2slate_net, batch, candidate_num, greedy=False + ) + for propensity, action in zip(model_propensity, model_action): + action_str = ",".join(map(str, action.numpy().tolist())) + + # Same action always leads to same propensity + if action_to_propensity_map.get(action_str) is None: + action_to_propensity_map[action_str] = float(propensity) + else: + np.testing.assert_allclose( + action_to_propensity_map[action_str], + float(propensity), + atol=0.001, + rtol=0.0, + ) + + action_count[action_str] += 1 + total_count += 1 + + logger.info(f"Finish {i} round, {total_count} data counts") + + # Check action distribution + for action_str, count in action_count.items(): + empirical_propensity = count / total_count + computed_propensity = action_to_propensity_map[action_str] + logger.info( + f"action={action_str}, empirical propensity={empirical_propensity}, " + f"computed propensity={computed_propensity}" + ) + np.testing.assert_allclose( + computed_propensity, empirical_propensity, atol=0.01, rtol=0.0 + ) + + def test_seq2slate_transformer_on_policy_simple_tsp(self): + """ + Solve Traveling Salesman Problem. Cities comes from a fixed set of nodes (cities). + Easily hit reward threshold after one batch training + """ + device = torch.device("cpu") + batch_size = 4096 + epochs = 1 + num_batches = 50 + expect_reward_threshold = 1.12 + hidden_size = 32 + num_candidates = 6 + diverse_input = False + learning_rate = 0.001 + learning_method = ON_POLICY + policy_gradient_interval = 1 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) + + @pytest.mark.seq2slate_long + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_seq2slate_transformer_on_policy_hard_tsp(self): + """ + Solve Traveling Salesman Problem. Data comes from different sets of cities. + """ + device = torch.device("cuda") + batch_size = 4096 + epochs = 3 + num_batches = 300 + expect_reward_threshold = 1.05 + hidden_size = 32 + num_candidates = 6 + diverse_input = True + learning_rate = 0.001 + learning_method = ON_POLICY + policy_gradient_interval = 1 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) diff --git a/reagent/test/ranking/test_seq2slate_simulation.py b/reagent/test/ranking/test_seq2slate_simulation.py new file mode 100644 index 000000000..c547b7e8d --- /dev/null +++ b/reagent/test/ranking/test_seq2slate_simulation.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import random +import unittest + +import numpy as np +import pytest +import torch +from reagent.test.ranking.seq2slate_utils import ( + MODEL_TRANSFORMER, + run_seq2slate_tsp, + SIMULATION, +) + + +class TestSeq2SlateSimulation(unittest.TestCase): + def setUp(self): + np.random.seed(0) + random.seed(0) + torch.manual_seed(0) + + def test_seq2slate_transformer_simulation_simple_tsp(self): + """ + Solve Traveling Salesman Problem. Data comes from one set of nodes (cities). + """ + device = torch.device("cpu") + batch_size = 4096 + epochs = 1 + num_batches = 50 + expect_reward_threshold = 1.12 + hidden_size = 32 + num_candidates = 6 + diverse_input = False + learning_rate = 0.001 + learning_method = SIMULATION + policy_gradient_interval = 1 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) + + @pytest.mark.seq2slate_long + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_seq2slate_transformer_simulation_hard_tsp(self): + """ + Solve Traveling Salesman Problem. Data comes from multiple sets of cities. + """ + device = torch.device("cuda") + batch_size = 4096 + epochs = 8 + num_batches = 300 + expect_reward_threshold = 1.02 + hidden_size = 32 + num_candidates = 6 + diverse_input = True + learning_rate = 0.001 + learning_method = SIMULATION + policy_gradient_interval = 1 + run_seq2slate_tsp( + MODEL_TRANSFORMER, + batch_size, + epochs, + num_candidates, + num_batches, + hidden_size, + diverse_input, + learning_rate, + expect_reward_threshold, + learning_method, + policy_gradient_interval, + device, + ) diff --git a/reagent/test/ranking/test_seq2slate_trainer.py b/reagent/test/ranking/test_seq2slate_trainer.py new file mode 100644 index 000000000..f50a96f5b --- /dev/null +++ b/reagent/test/ranking/test_seq2slate_trainer.py @@ -0,0 +1,559 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import itertools +import logging +import random +import unittest +from itertools import permutations + +import numpy as np +import numpy.testing as npt +import pytorch_lightning as pl +import reagent.core.types as rlt +import torch +from parameterized import parameterized +from reagent.core.parameters import Seq2SlateParameters +from reagent.core.parameters_seq2slate import IPSClamp, IPSClampMethod +from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch +from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet +from reagent.optimizer.union import classes, Optimizer__Union +from reagent.samplers.frechet import FrechetSort +from reagent.training.ranking.helper import ips_clamp +from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer +from torch.utils.data import DataLoader + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) +logger.setLevel(level=logging.INFO) + + +output_arch_list = [ + Seq2SlateOutputArch.FRECHET_SORT, + Seq2SlateOutputArch.AUTOREGRESSIVE, +] +policy_gradient_interval_list = [1, 5] +clamp_method_list = [IPSClampMethod.UNIVERSAL, IPSClampMethod.UNIVERSAL] +clamp_max_list = [1.0, 10.0] +frechet_sort_shape_list = [0.1, 0.5, 1.0] + + +def create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, +): + return Seq2SlateTrainer( + seq2slate_net=seq2slate_net, + params=seq2slate_params, + policy_optimizer=Optimizer__Union(SGD=classes["SGD"](lr=learning_rate)), + policy_gradient_interval=policy_gradient_interval, + print_interval=1, + ) + + +def create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch +): + return Seq2SlateTransformerNet( + state_dim=state_dim, + candidate_dim=candidate_dim, + num_stacked_layers=2, + num_heads=2, + dim_model=hidden_size, + dim_feedforward=hidden_size, + max_src_seq_len=candidate_num, + max_tgt_seq_len=candidate_num, + output_arch=output_arch, + temperature=0.5, + ) + + +def create_on_policy_batch( + seq2slate, batch_size, state_dim, candidate_num, candidate_dim, rank_seed, device +): + state = torch.randn(batch_size, state_dim).to(device) + candidates = torch.randn(batch_size, candidate_num, candidate_dim).to(device) + reward = torch.rand(batch_size, 1).to(device) + batch = rlt.PreprocessedRankingInput.from_input( + state=state, candidates=candidates, device=device + ) + # Reset seed here so that gradients can be replicated. + torch.manual_seed(rank_seed) + rank_output = seq2slate( + batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False + ) + ranked_order = rank_output.ranked_tgt_out_idx - 2 + ranked_slate_prob = rank_output.ranked_per_seq_probs + on_policy_batch = rlt.PreprocessedRankingInput.from_input( + state=state, + candidates=candidates, + device=device, + action=ranked_order, + logged_propensities=ranked_slate_prob.detach(), + slate_reward=reward, + ) + return on_policy_batch + + +def create_off_policy_batch( + seq2slate, batch_size, state_dim, candidate_num, candidate_dim, device +): + state = torch.randn(batch_size, state_dim) + candidates = torch.randn(batch_size, candidate_num, candidate_dim) + reward = torch.rand(batch_size, 1) + action = torch.stack([torch.randperm(candidate_num) for _ in range(batch_size)]) + logged_slate_prob = torch.rand(batch_size, 1) / 1e12 + off_policy_batch = rlt.PreprocessedRankingInput.from_input( + state=state, + candidates=candidates, + device=device, + action=action, + logged_propensities=logged_slate_prob, + slate_reward=reward, + ) + return off_policy_batch + + +class TestSeq2SlateTrainer(unittest.TestCase): + def setUp(self): + np.random.seed(0) + random.seed(0) + torch.manual_seed(0) + + def assert_correct_gradient( + self, + net_with_gradient, + net_after_gradient, + policy_gradient_interval, + learning_rate, + ): + for (n_c, w_c), (n, w) in zip( + net_with_gradient.named_parameters(), net_after_gradient.named_parameters() + ): + assert n_c == n + if w_c.grad is not None: + assert torch.allclose( + w_c - policy_gradient_interval * learning_rate * w_c.grad, + w, + rtol=1e-4, + atol=2e-6, + ) + + def test_ips_clamp(self): + importance_sampling = torch.tensor([0.5, 0.3, 3.0, 10.0, 40.0]) + assert torch.all(ips_clamp(importance_sampling, None) == importance_sampling) + assert torch.all( + ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.AGGRESSIVE, 3.0)) + == torch.tensor([0.5, 0.3, 3.0, 0.0, 0.0]) + ) + assert torch.all( + ips_clamp(importance_sampling, IPSClamp(IPSClampMethod.UNIVERSAL, 3.0)) + == torch.tensor([0.5, 0.3, 3.0, 3.0, 3.0]) + ) + + @parameterized.expand( + itertools.product(policy_gradient_interval_list, output_arch_list) + ) + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_seq2slate_trainer_on_policy_gpu( + self, policy_gradient_interval, output_arch + ): + self._test_seq2slate_trainer_on_policy( + policy_gradient_interval, output_arch, device=torch.device("cuda") + ) + + @parameterized.expand( + itertools.product(policy_gradient_interval_list, output_arch_list) + ) + def test_seq2slate_trainer_on_policy_cpu( + self, policy_gradient_interval, output_arch + ): + self._test_seq2slate_trainer_on_policy( + policy_gradient_interval, output_arch, device=torch.device("cpu") + ) + + def _test_seq2slate_trainer_on_policy( + self, policy_gradient_interval, output_arch, device + ): + batch_size = 32 + state_dim = 2 + candidate_num = 15 + candidate_dim = 4 + hidden_size = 16 + learning_rate = 1.0 + on_policy = True + rank_seed = 111 + seq2slate_params = Seq2SlateParameters(on_policy=on_policy) + + seq2slate_net = create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch + ).to(device) + seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device) + seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device) + trainer = create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, + ) + batch = create_on_policy_batch( + seq2slate_net, + batch_size, + state_dim, + candidate_num, + candidate_dim, + rank_seed, + device, + ) + training_data = DataLoader([batch], collate_fn=lambda x: x[0]) + pl_trainer = pl.Trainer( + max_epochs=policy_gradient_interval, + gpus=None if device == torch.device("cpu") else 1, + logger=False, + ) + pl_trainer.fit(trainer, training_data) + seq2slate_net = trainer.seq2slate_net.to(device) + + # manual compute gradient + torch.manual_seed(rank_seed) + rank_output = seq2slate_net_copy( + batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False + ) + loss = -( + torch.mean(torch.log(rank_output.ranked_per_seq_probs) * batch.slate_reward) + ) + loss.backward() + self.assert_correct_gradient( + seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate + ) + + # another way to compute gradient manually + torch.manual_seed(rank_seed) + ranked_per_seq_probs = seq2slate_net_copy_copy( + batch, mode=Seq2SlateMode.RANK_MODE, tgt_seq_len=candidate_num, greedy=False + ).ranked_per_seq_probs + loss = -( + torch.mean( + ranked_per_seq_probs + / ranked_per_seq_probs.detach() + * batch.slate_reward + ) + ) + loss.backward() + self.assert_correct_gradient( + seq2slate_net_copy_copy, + seq2slate_net, + policy_gradient_interval, + learning_rate, + ) + + @parameterized.expand( + itertools.product(policy_gradient_interval_list, output_arch_list) + ) + @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") + def test_seq2slate_trainer_off_policy_gpu( + self, policy_gradient_interval, output_arch + ): + self._test_seq2slate_trainer_off_policy( + policy_gradient_interval, output_arch, device=torch.device("cuda") + ) + + @parameterized.expand( + itertools.product(policy_gradient_interval_list, output_arch_list) + ) + def test_seq2slate_trainer_off_policy_cpu( + self, policy_gradient_interval, output_arch + ): + self._test_seq2slate_trainer_off_policy( + policy_gradient_interval, output_arch, device=torch.device("cpu") + ) + + def _test_seq2slate_trainer_off_policy( + self, policy_gradient_interval, output_arch, device + ): + batch_size = 32 + state_dim = 2 + candidate_num = 15 + candidate_dim = 4 + hidden_size = 16 + learning_rate = 1.0 + on_policy = False + seq2slate_params = Seq2SlateParameters(on_policy=on_policy) + + seq2slate_net = create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch + ).to(device) + seq2slate_net_copy = copy.deepcopy(seq2slate_net).to(device) + seq2slate_net_copy_copy = copy.deepcopy(seq2slate_net).to(device) + trainer = create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, + ) + batch = create_off_policy_batch( + seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device + ) + + training_data = DataLoader([batch], collate_fn=lambda x: x[0]) + pl_trainer = pl.Trainer( + max_epochs=policy_gradient_interval, + gpus=None if device == torch.device("cpu") else 1, + logger=False, + ) + pl_trainer.fit(trainer, training_data) + seq2slate_net = trainer.seq2slate_net.to(device) + + # manual compute gradient + ranked_per_seq_log_probs = seq2slate_net_copy( + batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE + ).log_probs + + loss = -( + torch.mean( + ranked_per_seq_log_probs + * torch.exp(ranked_per_seq_log_probs).detach() + / batch.tgt_out_probs + * batch.slate_reward + ) + ) + loss.backward() + self.assert_correct_gradient( + seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate + ) + + # another way to compute gradient manually + ranked_per_seq_probs = torch.exp( + seq2slate_net_copy_copy( + batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE + ).log_probs + ) + + loss = -( + torch.mean(ranked_per_seq_probs / batch.tgt_out_probs * batch.slate_reward) + ) + loss.backward() + self.assert_correct_gradient( + seq2slate_net_copy_copy, + seq2slate_net, + policy_gradient_interval, + learning_rate, + ) + + @parameterized.expand(itertools.product(clamp_method_list, output_arch_list)) + def test_seq2slate_trainer_off_policy_with_clamp(self, clamp_method, output_arch): + batch_size = 32 + state_dim = 2 + candidate_num = 15 + candidate_dim = 4 + hidden_size = 16 + learning_rate = 1.0 + device = torch.device("cpu") + policy_gradient_interval = 1 + seq2slate_params = Seq2SlateParameters( + on_policy=False, + ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=0.3), + ) + + seq2slate_net = create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch + ) + seq2slate_net_copy = copy.deepcopy(seq2slate_net) + trainer = create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, + ) + batch = create_off_policy_batch( + seq2slate_net, batch_size, state_dim, candidate_num, candidate_dim, device + ) + + training_data = DataLoader([batch], collate_fn=lambda x: x[0]) + pl_trainer = pl.Trainer(max_epochs=policy_gradient_interval, logger=False) + pl_trainer.fit(trainer, training_data) + + # manual compute gradient + ranked_per_seq_probs = torch.exp( + seq2slate_net_copy( + batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE + ).log_probs + ) + logger.info(f"ips ratio={ranked_per_seq_probs / batch.tgt_out_probs}") + loss = -( + torch.mean( + ips_clamp( + ranked_per_seq_probs / batch.tgt_out_probs, + seq2slate_params.ips_clamp, + ) + * batch.slate_reward + ) + ) + loss.backward() + self.assert_correct_gradient( + seq2slate_net_copy, seq2slate_net, policy_gradient_interval, learning_rate + ) + + @parameterized.expand( + itertools.product( + output_arch_list, clamp_method_list, clamp_max_list, frechet_sort_shape_list + ) + ) + def test_compute_impt_smpl(self, output_arch, clamp_method, clamp_max, shape): + logger.info(f"output arch: {output_arch}") + logger.info(f"clamp method: {clamp_method}") + logger.info(f"clamp max: {clamp_max}") + logger.info(f"frechet shape: {shape}") + + candidate_num = 5 + candidate_dim = 2 + state_dim = 1 + hidden_size = 32 + device = torch.device("cpu") + learning_rate = 0.001 + policy_gradient_interval = 1 + + candidates = torch.randint(5, (candidate_num, candidate_dim)).float() + candidate_scores = torch.sum(candidates, dim=1) + + seq2slate_params = Seq2SlateParameters( + on_policy=False, + ips_clamp=IPSClamp(clamp_method=clamp_method, clamp_max=clamp_max), + ) + seq2slate_net = create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch + ) + trainer = create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, + ) + + all_permt = torch.tensor( + list(permutations(range(candidate_num), candidate_num)) + ) + sampler = FrechetSort(shape=shape, topk=candidate_num) + sum_of_logged_propensity = 0 + sum_of_model_propensity = 0 + sum_of_ips_ratio = 0 + + for i in range(len(all_permt)): + sample_action = all_permt[i] + logged_propensity = torch.exp( + sampler.log_prob(candidate_scores, sample_action) + ) + batch = rlt.PreprocessedRankingInput.from_input( + state=torch.zeros(1, state_dim), + candidates=candidates.unsqueeze(0), + device=device, + action=sample_action.unsqueeze(0), + logged_propensities=logged_propensity.reshape(1, 1), + ) + model_propensities = torch.exp( + seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs + ) + impt_smpl, clamped_impt_smpl = trainer._compute_impt_smpl( + model_propensities, logged_propensity + ) + if impt_smpl > clamp_max: + if clamp_method == IPSClampMethod.AGGRESSIVE: + npt.asset_allclose(clamped_impt_smpl.detach().numpy(), 0, rtol=1e-5) + else: + npt.assert_allclose( + clamped_impt_smpl.detach().numpy(), clamp_max, rtol=1e-5 + ) + + sum_of_model_propensity += model_propensities + sum_of_logged_propensity += logged_propensity + sum_of_ips_ratio += model_propensities / logged_propensity + logger.info( + f"shape={shape}, sample_action={sample_action}, logged_propensity={logged_propensity}," + f" model_propensity={model_propensities}" + ) + + logger.info( + f"shape {shape}, sum_of_logged_propensity={sum_of_logged_propensity}, " + f"sum_of_model_propensity={sum_of_model_propensity}, " + f"mean sum_of_ips_ratio={sum_of_ips_ratio / len(all_permt)}" + ) + npt.assert_allclose(sum_of_logged_propensity.detach().numpy(), 1, rtol=1e-5) + npt.assert_allclose(sum_of_model_propensity.detach().numpy(), 1, rtol=1e-5) + + @parameterized.expand(itertools.product(output_arch_list, frechet_sort_shape_list)) + def test_ips_ratio_mean(self, output_arch, shape): + output_arch = Seq2SlateOutputArch.FRECHET_SORT + shape = 0.1 + logger.info(f"output arch: {output_arch}") + logger.info(f"frechet shape: {shape}") + + candidate_num = 5 + candidate_dim = 2 + state_dim = 1 + hidden_size = 8 + device = torch.device("cpu") + batch_size = 1024 + num_batches = 400 + learning_rate = 0.001 + policy_gradient_interval = 1 + + state = torch.zeros(batch_size, state_dim) + # all data have same candidates + candidates = torch.randint( + 5, (batch_size, candidate_num, candidate_dim) + ).float() + candidates[1:] = candidates[0] + candidate_scores = torch.sum(candidates, dim=-1) + + seq2slate_params = Seq2SlateParameters( + on_policy=False, + ) + seq2slate_net = create_seq2slate_transformer( + state_dim, candidate_num, candidate_dim, hidden_size, output_arch + ) + trainer = create_trainer( + seq2slate_net, + learning_rate, + seq2slate_params, + policy_gradient_interval, + ) + + sampler = FrechetSort(shape=shape, topk=candidate_num) + sum_of_ips_ratio = 0 + + for i in range(num_batches): + sample_outputs = [ + sampler.sample_action(candidate_scores[j : j + 1]) + for j in range(batch_size) + ] + action = torch.stack( + list(map(lambda x: x.action.squeeze(0), sample_outputs)) + ) + logged_propensity = torch.stack( + list(map(lambda x: torch.exp(x.log_prob), sample_outputs)) + ) + batch = rlt.PreprocessedRankingInput.from_input( + state=state, + candidates=candidates, + device=device, + action=action, + logged_propensities=logged_propensity, + ) + model_propensities = torch.exp( + seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE).log_probs + ) + impt_smpl, _ = trainer._compute_impt_smpl( + model_propensities, logged_propensity + ) + sum_of_ips_ratio += torch.mean(impt_smpl).detach().numpy() + mean_of_ips_ratio = sum_of_ips_ratio / (i + 1) + logger.info(f"{i}-th batch, mean ips ratio={mean_of_ips_ratio}") + + if i > 100 and np.allclose(mean_of_ips_ratio, 1, atol=0.03): + return + + raise Exception(f"Mean ips ratio {mean_of_ips_ratio} is not close to 1") diff --git a/reagent/test/replay_memory/circular_replay_buffer_test.py b/reagent/test/replay_memory/circular_replay_buffer_test.py index 0ee07311f..23f47a73c 100644 --- a/reagent/test/replay_memory/circular_replay_buffer_test.py +++ b/reagent/test/replay_memory/circular_replay_buffer_test.py @@ -16,8 +16,7 @@ # limitations under the License. """Tests for circular_replay_buffer.py.""" -import gzip -import os + import tempfile import unittest @@ -53,88 +52,40 @@ def setUp(self): def tearDown(self): self.tmp_dir.cleanup() - def testWithNontupleObservationShape(self): - with self.assertRaises(AssertionError): - _ = circular_replay_buffer.ReplayBuffer( - observation_shape=84, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, - ) - def testConstructor(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) - self.assertEqual(memory._observation_shape, OBSERVATION_SHAPE) - # Test with non square observation shape - memory = circular_replay_buffer.ReplayBuffer( - observation_shape=(4, 20), - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, - ) - self.assertEqual(memory._observation_shape, (4, 20)) self.assertEqual(memory.add_count, 0) def testAdd(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) self.assertEqual(memory.cursor(), 0) zeros = np.zeros(OBSERVATION_SHAPE) - memory.add(zeros, 0, 0, 0) + memory.add(observation=zeros, action=0, reward=0, terminal=0) # Check if the cursor moved STACK_SIZE -1 padding adds + 1, (the one above). self.assertEqual(memory.cursor(), STACK_SIZE) def testExtraAdd(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, - extra_storage_types=[ - circular_replay_buffer.ReplayElement("extra1", [], np.float32), - circular_replay_buffer.ReplayElement("extra2", [2], np.int8), - ], + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) self.assertEqual(memory.cursor(), 0) zeros = np.zeros(OBSERVATION_SHAPE) - memory.add(zeros, 0, 0, 0, 0, [0, 0]) + memory.add( + observation=zeros, action=0, reward=0, terminal=0, extra1=0, extra2=[0, 0] + ) with self.assertRaisesRegex(ValueError, "Add expects"): - memory.add(zeros, 0, 0, 0) + memory.add(observation=zeros, action=0, reward=0, terminal=0) # Check if the cursor moved STACK_SIZE -1 zeros adds + 1, (the one above). self.assertEqual(memory.cursor(), STACK_SIZE) - def testCheckAddTypes(self): - memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, - extra_storage_types=[ - circular_replay_buffer.ReplayElement("extra1", [], np.float32), - circular_replay_buffer.ReplayElement("extra2", [2], np.int8), - ], - ) - zeros = np.zeros(OBSERVATION_SHAPE) - - memory._check_add_types(zeros, 0, 0, 0, 0, [0, 0]) - - with self.assertRaisesRegex(ValueError, "Add expects"): - memory._check_add_types(zeros, 0, 0, 0) - def testLowCapacity(self): with self.assertRaisesRegex(ValueError, "There is not enough capacity"): circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, stack_size=10, replay_capacity=10, batch_size=BATCH_SIZE, @@ -144,7 +95,6 @@ def testLowCapacity(self): with self.assertRaisesRegex(ValueError, "There is not enough capacity"): circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, stack_size=5, replay_capacity=10, batch_size=BATCH_SIZE, @@ -155,7 +105,6 @@ def testLowCapacity(self): # We should be able to create a buffer that contains just enough for a # transition. circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, stack_size=5, replay_capacity=10, batch_size=BATCH_SIZE, @@ -165,7 +114,6 @@ def testLowCapacity(self): def testNSteprewardum(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, stack_size=STACK_SIZE, replay_capacity=10, batch_size=BATCH_SIZE, @@ -174,7 +122,12 @@ def testNSteprewardum(self): ) for i in range(50): - memory.add(np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), 0, 2.0, 0) + memory.add( + observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), + action=0, + reward=2.0, + terminal=0, + ) for _i in range(100): batch = memory.sample_transition_batch() @@ -184,15 +137,15 @@ def testNSteprewardum(self): def testSampleTransitionBatch(self): replay_capacity = 10 memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=1, - replay_capacity=replay_capacity, - batch_size=2, + stack_size=1, replay_capacity=replay_capacity, batch_size=2 ) num_adds = 50 # The number of transitions to add to the memory. for i in range(num_adds): memory.add( - np.full(OBSERVATION_SHAPE, i, OBS_DTYPE), 0, 0, i % 4 + observation=np.full(OBSERVATION_SHAPE, i, OBS_DTYPE), + action=0, + reward=0, + terminal=i % 4, ) # Every 4 transitions is terminal. # Test sampling with default batch size. for _i in range(1000): @@ -221,7 +174,7 @@ def testSampleTransitionBatch(self): # transitions are terminal when adding observation (i % 4). expected_terminal = np.expand_dims( np.array([min((x + num_adds - replay_capacity) % 4, 1) for x in indices]), 1 - ).astype(np.bool) + ).astype(bool) batch = memory.sample_transition_batch( batch_size=len(indices), indices=torch.tensor(indices) ) @@ -237,24 +190,17 @@ def testSampleTransitionBatch(self): def testSampleTransitionBatchExtra(self): replay_capacity = 10 memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=1, - replay_capacity=replay_capacity, - batch_size=2, - extra_storage_types=[ - circular_replay_buffer.ReplayElement("extra1", [], np.float32), - circular_replay_buffer.ReplayElement("extra2", [2], np.int8), - ], + stack_size=1, replay_capacity=replay_capacity, batch_size=2 ) num_adds = 50 # The number of transitions to add to the memory. for i in range(num_adds): memory.add( - np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), - 0, - 0, - i % 4, - i % 2, - [i % 2, 0], + observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), + action=0, + reward=0, + terminal=i % 4, + extra1=i % 2, + extra2=[i % 2, 0], ) # Every 4 transitions is terminal. # Test sampling with default batch size. for _i in range(1000): @@ -283,7 +229,7 @@ def testSampleTransitionBatchExtra(self): # transitions are terminal when adding observation (i % 4). expected_terminal = np.expand_dims( np.array([min((x + num_adds - replay_capacity) % 4, 1) for x in indices]), 1 - ).astype(np.bool) + ).astype(bool) expected_extra1 = np.expand_dims( np.array([(x + num_adds - replay_capacity) % 2 for x in indices]), 1 ) @@ -324,7 +270,6 @@ def testSamplingWithterminalInTrajectory(self): replay_capacity = 10 update_horizon = 3 memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, stack_size=1, replay_capacity=replay_capacity, batch_size=2, @@ -333,11 +278,11 @@ def testSamplingWithterminalInTrajectory(self): ) for i in range(replay_capacity): memory.add( - np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), - i * 2, # action - i, # reward - 1 if i == 3 else 0, - ) # terminal + observation=np.full(OBSERVATION_SHAPE, i, dtype=OBS_DTYPE), + action=i * 2, + reward=i, + terminal=1 if i == 3 else 0, + ) indices = [2, 3, 4] batch = memory.sample_transition_batch( batch_size=len(indices), indices=torch.tensor(indices) @@ -355,7 +300,7 @@ def testSamplingWithterminalInTrajectory(self): # Since indices = [2, 3, 4], our expected reward are [5, 3, 15]. expected_reward = np.array([[5], [3], [15]]) # Because update_horizon = 3, both indices 2 and 3 include terminal. - expected_terminal = np.array([[1], [1], [0]]).astype(np.bool) + expected_terminal = np.array([[1], [1], [0]]).astype(bool) npt.assert_array_equal(batch.state, expected_states) npt.assert_array_equal( batch.action, np.expand_dims(np.array(indices) * 2, axis=1) @@ -366,15 +311,27 @@ def testSamplingWithterminalInTrajectory(self): def testIsTransitionValid(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=10, - batch_size=2, + stack_size=STACK_SIZE, replay_capacity=10, batch_size=2 ) - memory.add(np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), 0, 0, 0) - memory.add(np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), 0, 0, 0) - memory.add(np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), 0, 0, 1) + memory.add( + observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), + action=0, + reward=0, + terminal=0, + ) + memory.add( + observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), + action=0, + reward=0, + terminal=0, + ) + memory.add( + observation=np.full(OBSERVATION_SHAPE, 0, dtype=OBS_DTYPE), + action=0, + reward=0, + terminal=1, + ) # These valids account for the automatically applied padding (3 blanks each # episode. @@ -393,12 +350,12 @@ def testIsTransitionValid(self): "Index %i should be %s" % (i, bool(correct_valids[i])), ) + +""" +Since we don't use saving, not maintaining for now def testSave(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) memory.observation = self._test_observation memory.action = self._test_action @@ -427,12 +384,9 @@ def testSave(self): self.assertFalse(os.path.exists(stale_filename)) def testSaveNonNDArrayAttributes(self): - """Tests checkpointing an attribute which is not a numpy array.""" + # Tests checkpointing an attribute which is not a numpy array. memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) # Add some non-numpy data: an int, a string, an object. @@ -464,10 +418,7 @@ def testSaveNonNDArrayAttributes(self): def testLoadFromNonexistentDirectory(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) # We are trying to load from a non-existent directory, so a NotFoundError # will be raised. @@ -481,10 +432,7 @@ def testLoadFromNonexistentDirectory(self): def testPartialLoadFails(self): memory = circular_replay_buffer.ReplayBuffer( - observation_shape=OBSERVATION_SHAPE, - stack_size=STACK_SIZE, - replay_capacity=5, - batch_size=BATCH_SIZE, + stack_size=STACK_SIZE, replay_capacity=5, batch_size=BATCH_SIZE ) self.assertNotEqual(memory._store["observation"], self._test_observation) self.assertNotEqual(memory._store["action"], self._test_action) @@ -544,3 +492,4 @@ def testLoad(self): npt.assert_allclose(memory._store["reward"], self._test_reward) npt.assert_allclose(memory._store["terminal"], self._test_terminal) self.assertEqual(memory.add_count, self._test_add_count) +""" diff --git a/reagent/test/replay_memory/create_from_env_test.py b/reagent/test/replay_memory/create_from_env_test.py index 43e9b3386..0490ad177 100644 --- a/reagent/test/replay_memory/create_from_env_test.py +++ b/reagent/test/replay_memory/create_from_env_test.py @@ -1,32 +1,33 @@ #!/usr/bin/env python3 +import logging import unittest import numpy as np from reagent.replay_memory.circular_replay_buffer import ReplayBuffer +logger = logging.getLogger(__name__) + try: - from recsim.environments import interest_exploration, interest_evolution + from reagent.gym.envs import RecSim HAS_RECSIM = True -except ModuleNotFoundError: +except ImportError as e: + logger.info(f"Exception {e}") HAS_RECSIM = False class CreateFromEnvTest(unittest.TestCase): @unittest.skipIf(not HAS_RECSIM, "recsim is not installed") def test_create_from_recsim_interest_exploration(self): - env_config = { - "num_candidates": 20, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_exploration.create_environment(env_config) - replay_buffer = ReplayBuffer.create_from_env( - env, replay_memory_size=100, batch_size=10, store_log_prob=True + env = RecSim( + num_candidates=20, + slate_size=3, + resample_documents=False, + is_interest_exploration=True, ) + replay_buffer = ReplayBuffer(replay_capacity=100, batch_size=10) obs = env.reset() observation = obs["user"] action = env.action_space.sample() @@ -41,10 +42,10 @@ def test_create_from_recsim_interest_exploration(self): response_quality = np.stack([r["quality"] for r in response], axis=0) repsonse_cluster_id = np.array([r["cluster_id"] for r in response]) replay_buffer.add( - observation, - action, - reward, - terminal, + observation=observation, + action=action, + reward=reward, + terminal=terminal, mdp_id=0, sequence_number=0, doc_quality=quality, @@ -57,16 +58,8 @@ def test_create_from_recsim_interest_exploration(self): @unittest.skipIf(not HAS_RECSIM, "recsim is not installed") def test_create_from_recsim_interest_evolution(self): - env_config = { - "num_candidates": 20, - "slate_size": 3, - "resample_documents": False, - "seed": 1, - } - env = interest_evolution.create_environment(env_config) - replay_buffer = ReplayBuffer.create_from_env( - env, replay_memory_size=100, batch_size=10, store_log_prob=True - ) + env = RecSim(num_candidates=20, slate_size=3, resample_documents=False) + replay_buffer = ReplayBuffer(replay_capacity=100, batch_size=10) obs = env.reset() observation = obs["user"] action = env.action_space.sample() @@ -82,10 +75,10 @@ def test_create_from_recsim_interest_evolution(self): response_watch_time = np.stack([r["watch_time"] for r in response], axis=0) response_liked = np.array([r["liked"] for r in response]) replay_buffer.add( - observation, - action, - reward, - terminal, + observation=observation, + action=action, + reward=reward, + terminal=terminal, mdp_id=0, sequence_number=0, doc=doc_features, diff --git a/reagent/test/replay_memory/extra_replay_buffer_test.py b/reagent/test/replay_memory/extra_replay_buffer_test.py index 8c939bb8c..0aefd7b16 100644 --- a/reagent/test/replay_memory/extra_replay_buffer_test.py +++ b/reagent/test/replay_memory/extra_replay_buffer_test.py @@ -6,7 +6,7 @@ import numpy as np import numpy.testing as npt import torch -from reagent.replay_memory.circular_replay_buffer import ReplayBuffer, ReplayElement +from reagent.replay_memory.circular_replay_buffer import ReplayBuffer from reagent.test.base.horizon_test_base import HorizonTestBase @@ -23,7 +23,7 @@ def get_add_transition(i): - """ For adding into RB """ + """For adding into RB""" return { "state": np.ones(OBS_SHAPE) * i, "action": int(i), @@ -41,7 +41,7 @@ def get_add_transition(i): def get_stacked_transition(i, stack_size, traj_start_idx): - """ For getting expected stacked state of i """ + """For getting expected stacked state of i""" res = {k: [] for k in ["state", "action", "reward", "extra1"]} # must pad with some zero states for idx in range(i - stack_size + 1, i + 1): @@ -53,19 +53,16 @@ def get_stacked_transition(i, stack_size, traj_start_idx): def setup_buffer(buffer_size, trajectory_lengths, stack_size=None, multi_steps=None): - """ We will insert one trajectory into the RB. """ + """We will insert one trajectory into the RB.""" stack_size = stack_size if stack_size is not None else 1 update_horizon = multi_steps if multi_steps is not None else 1 memory = ReplayBuffer( - observation_shape=OBS_SHAPE, - observation_dtype=OBS_TYPE, stack_size=stack_size, replay_capacity=buffer_size, batch_size=1, update_horizon=update_horizon, return_everything_as_stack=stack_size is not None, return_as_timeline_format=multi_steps is not None, - extra_storage_types=[ReplayElement("extra1", (), np.float32)], ) i = 0 @@ -74,17 +71,17 @@ def setup_buffer(buffer_size, trajectory_lengths, stack_size=None, multi_steps=N trans = get_add_transition(i) terminal = bool(j == traj_len - 1) memory.add( - trans["state"], - trans["action"], - trans["reward"], - terminal, - trans["extra1"], + observation=trans["state"], + action=trans["action"], + reward=trans["reward"], + terminal=terminal, + extra1=trans["extra1"], ) i += 1 return memory.sample_all_valid_transitions() -def test_stack_generic(buffer_size, trajectory_lengths, stack_size): +def generic_stack_test_helper(buffer_size, trajectory_lengths, stack_size): batch = setup_buffer(buffer_size, trajectory_lengths, stack_size=stack_size) expected = {k: [] for k in ["state", "action", "reward", "extra1"]} @@ -118,7 +115,7 @@ def test_stack_generic(buffer_size, trajectory_lengths, stack_size): ) -def test_stack_multi_steps_generic( +def generic_stack_multi_steps_test_helper( buffer_size, trajectory_lengths, stack_size, multi_steps ): batch = setup_buffer( @@ -226,7 +223,7 @@ def test_stack_multi_steps_generic( class ExtraReplayBufferTest(HorizonTestBase): - """ Stress tests for the replay buffer, especially for new flags. """ + """Stress tests for the replay buffer, especially for new flags.""" def test_stack_slaughter(self): stack_size = 7 @@ -239,7 +236,7 @@ def test_stack_slaughter(self): f"traj_lengths:{traj_lengths}, " f"stack_size:{stack_size}" ) - test_stack_generic(buffer_size, traj_lengths.tolist(), stack_size) + generic_stack_test_helper(buffer_size, traj_lengths.tolist(), stack_size) logger.info(f"Inserting {i} trajectories passed...") def test_stack_multistep_flags_slaughter(self): @@ -258,7 +255,7 @@ def test_stack_multistep_flags_slaughter(self): f"stack_size:{stack_size}, " f"multi_steps:{multi_steps}" ) - test_stack_multi_steps_generic( + generic_stack_multi_steps_test_helper( buffer_size, traj_lengths.tolist(), stack_size, multi_steps ) logger.info(f"Inserting {i} trajectories passed...") @@ -273,8 +270,6 @@ def test_replay_overflow(self): multi_steps = 2 stack_size = 2 memory = ReplayBuffer( - observation_shape=OBS_SHAPE, - observation_dtype=OBS_TYPE, stack_size=stack_size, replay_capacity=6, batch_size=1, @@ -284,7 +279,11 @@ def test_replay_overflow(self): ) def trans(i): - return np.ones(OBS_SHAPE, dtype=OBS_TYPE), int(2 * i), float(3 * i) + return { + "observation": np.ones(OBS_SHAPE, dtype=OBS_TYPE), + "action": int(2 * i), + "reward": float(3 * i), + } # Contents of RB # start: [X, X, X, X, X, X] @@ -293,20 +292,20 @@ def trans(i): ) # t0: [X, s0, X, X, X, X] - memory.add(*trans(0), False) + memory.add(**trans(0), terminal=False) npt.assert_array_equal( memory._is_index_valid, [False, False, False, False, False, False] ) # t1: [X, s0, s1, X, X, X] - memory.add(*trans(1), False) + memory.add(**trans(1), terminal=False) npt.assert_array_equal( memory._is_index_valid, [False, False, False, False, False, False] ) # t2: [X, s0, s1, s2, X, X] # s0 finally becomes valid as its next state was added - memory.add(*trans(2), False) + memory.add(**trans(2), terminal=False) npt.assert_array_equal( memory._is_index_valid, [False, True, False, False, False, False] ) @@ -316,7 +315,7 @@ def trans(i): # t3: [X, s0, s1, s2, s3, X] # episode termination validates whole episode - memory.add(*trans(3), True) + memory.add(**trans(3), terminal=True) npt.assert_array_equal( memory._is_index_valid, [False, True, True, True, True, False] ) @@ -330,7 +329,7 @@ def trans(i): # t4: [s4, s0, s1, s2, s3, X] # s0 invalidated as its previous frame is corrupted - memory.add(*trans(4), False) + memory.add(**trans(4), terminal=False) npt.assert_array_equal( memory._is_index_valid, [False, False, True, True, True, False] ) @@ -340,7 +339,7 @@ def trans(i): npt.assert_array_equal(batch.next_action[1][0], [4, 6]) # t5: [s4, s5, s1, s2, s3, X] - memory.add(*trans(5), False) + memory.add(**trans(5), terminal=False) npt.assert_array_equal( memory._is_index_valid, [False, False, False, True, True, False] ) @@ -349,7 +348,7 @@ def trans(i): npt.assert_array_equal(batch.next_action[0][0], [4, 6]) # t6: [s4, s5, s6, s2, s3, X] - memory.add(*trans(6), True) + memory.add(**trans(6), terminal=True) npt.assert_array_equal( memory._is_index_valid, [True, True, True, False, True, False] ) @@ -361,3 +360,90 @@ def trans(i): # batch.next_action[3] is [garbage] logger.info("Overflow test passes!") + + def test_sparse_input(self): + replay_capacity = 100 + num_transitions = replay_capacity // 2 + memory = ReplayBuffer( + stack_size=1, replay_capacity=replay_capacity, update_horizon=1 + ) + + def trans(i): + sparse_feat1 = list(range(0, i % 4)) + sparse_feat2 = list(range(i % 4, 4)) + id_list = {"sparse_feat1": sparse_feat1, "sparse_feat2": sparse_feat2} + sparse_feat3 = (list(range(0, i % 7)), [k + 0.5 for k in range(0, i % 7)]) + sparse_feat4 = (list(range(i % 7, 7)), [k + 0.5 for k in range(i % 7, 7)]) + id_score_list = {"sparse_feat3": sparse_feat3, "sparse_feat4": sparse_feat4} + return { + "observation": np.ones(OBS_SHAPE, dtype=OBS_TYPE), + "action": int(2 * i), + "reward": float(3 * i), + "terminal": i % 4, + "id_list": id_list, + "id_score_list": id_score_list, + } + + for i in range(num_transitions): + memory.add(**trans(i)) + + indices = list(range(num_transitions - 1)) + batch = memory.sample_transition_batch(len(indices), torch.tensor(indices)) + + # calculate expected + res = { + "id_list": {"sparse_feat1": ([], []), "sparse_feat2": ([], [])}, + "id_score_list": { + "sparse_feat3": ([], [], []), + "sparse_feat4": ([], [], []), + }, + "next_id_list": {"sparse_feat1": ([], []), "sparse_feat2": ([], [])}, + "next_id_score_list": { + "sparse_feat3": ([], [], []), + "sparse_feat4": ([], [], []), + }, + } + for i in range(num_transitions - 1): + feats_i = trans(i) + feats_next = trans(i + 1) + for k in ["id_list", "id_score_list"]: + for feat_id in res[k]: + res[k][feat_id][0].append(len(res[k][feat_id][1])) + if k == "id_list": + res[k][feat_id][1].extend(feats_i[k][feat_id]) + else: + res[k][feat_id][1].extend(feats_i[k][feat_id][0]) + res[k][feat_id][2].extend(feats_i[k][feat_id][1]) + + for k in ["next_id_list", "next_id_score_list"]: + for feat_id in res[k]: + res[k][feat_id][0].append(len(res[k][feat_id][1])) + orig_k = k[len("next_") :] + if k == "next_id_list": + res[k][feat_id][1].extend(feats_next[orig_k][feat_id]) + else: + res[k][feat_id][1].extend(feats_next[orig_k][feat_id][0]) + res[k][feat_id][2].extend(feats_next[orig_k][feat_id][1]) + + for k in ["id_list", "id_score_list", "next_id_list", "next_id_score_list"]: + for feat_id in res[k]: + if k in ["id_list", "next_id_list"]: + npt.assert_array_equal( + res[k][feat_id][0], getattr(batch, k)[feat_id][0] + ) + npt.assert_array_equal( + res[k][feat_id][1], getattr(batch, k)[feat_id][1] + ) + else: + npt.assert_array_equal( + res[k][feat_id][0], getattr(batch, k)[feat_id][0] + ) + npt.assert_array_equal( + res[k][feat_id][1], getattr(batch, k)[feat_id][1] + ) + npt.assert_array_equal( + res[k][feat_id][2], getattr(batch, k)[feat_id][2] + ) + + # sample random + _ = memory.sample_transition_batch(10) diff --git a/reagent/test/replay_memory/prioritized_replay_buffer_test.py b/reagent/test/replay_memory/prioritized_replay_buffer_test.py index 2ae04ad02..ec5fb879c 100644 --- a/reagent/test/replay_memory/prioritized_replay_buffer_test.py +++ b/reagent/test/replay_memory/prioritized_replay_buffer_test.py @@ -32,7 +32,7 @@ class PrioritizedReplayBufferTest(unittest.TestCase): def create_default_memory(self): return prioritized_replay_buffer.PrioritizedReplayBuffer( - SCREEN_SIZE, STACK_SIZE, REPLAY_CAPACITY, BATCH_SIZE, max_sample_attempts=10 + STACK_SIZE, REPLAY_CAPACITY, BATCH_SIZE, max_sample_attempts=10 ) # For faster tests. def add_blank(self, memory, action=0, reward=0.0, terminal=0, priority=1.0): @@ -48,7 +48,13 @@ def add_blank(self, memory, action=0, reward=0.0, terminal=0, priority=1.0): Index of the transition just added. """ dummy = np.zeros(SCREEN_SIZE) - memory.add(dummy, action, reward, terminal, priority) + memory.add( + observation=dummy, + action=action, + reward=reward, + terminal=terminal, + priority=priority, + ) index = (memory.cursor() - 1) % REPLAY_CAPACITY return index @@ -64,7 +70,7 @@ def testAddWithAndWithoutPriority(self): # Check that the prioritized replay buffer expects an additional argument # for priority. with self.assertRaisesRegex(ValueError, "Add expects"): - memory.add(zeros, 0, 0, 0) + memory.add(observation=zeros, action=0, reward=0, terminal=0) def testDummyScreensAddedToNewMemory(self): memory = self.create_default_memory() @@ -130,11 +136,7 @@ def testSampleIndexBatchTooManyFailedRetries(self): def testSampleIndexBatch(self): memory = prioritized_replay_buffer.PrioritizedReplayBuffer( - SCREEN_SIZE, - STACK_SIZE, - REPLAY_CAPACITY, - BATCH_SIZE, - max_sample_attempts=REPLAY_CAPACITY, + STACK_SIZE, REPLAY_CAPACITY, BATCH_SIZE, max_sample_attempts=10 ) # This will ensure we end up with cursor == 1. for _ in range(REPLAY_CAPACITY - STACK_SIZE + 2): diff --git a/reagent/test/samplers/__init__.py b/reagent/test/samplers/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/samplers/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/samplers/test_frechet_sort.py b/reagent/test/samplers/test_frechet_sort.py new file mode 100644 index 000000000..6e69ffa03 --- /dev/null +++ b/reagent/test/samplers/test_frechet_sort.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import torch +from reagent.samplers.frechet import FrechetSort +from reagent.test.base.horizon_test_base import HorizonTestBase + + +class FrechetSortTest(HorizonTestBase): + def test_log_prob(self): + scores = torch.tensor( + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [5.0, 1.0, 2.0, 3.0, 4.0], + ] + ) + shape = 2.0 + frechet_sort = FrechetSort(topk=3, shape=shape, log_scores=True) + + # The log-prob should be the same; the last 2 positions don't matter + action = torch.tensor( + [ + [0, 1, 2, 3, 4], + [1, 2, 3, 0, 4], + ], + dtype=torch.long, + ) + log_probs = frechet_sort.log_prob(scores, action) + self.assertEqual(log_probs[0], log_probs[1]) + + action = torch.tensor( + [ + [0, 1, 2, 3, 4], + [3, 2, 1, 0, 4], + ], + dtype=torch.long, + ) + log_probs = frechet_sort.log_prob(scores, action) + self.assertLess(log_probs[0], log_probs[1]) + + # manually calculating the log prob for the second case + s = scores[1][action[1]] + log_prob = 0.0 + for p in range(3): + log_prob -= torch.exp((s[p:] - s[p]) * shape).sum().log() + + self.assertAlmostEqual(log_prob, log_probs[1]) + + def test_log_prob_padding(self): + scores = torch.tensor( + [ + [1.0, 2.0, 3.0, 4.0, 5.0], + [1.0, 2.0, 3.0, 4.0, 5.0], + ], + requires_grad=True, + ) + shape = 2.0 + frechet_sort = FrechetSort(topk=3, shape=shape, log_scores=True) + + # A shorter sequence should have a higher prob + action = torch.tensor( + [ + [0, 1, 2, 3, 4], + [0, 1, 5, 5, 5], + ], + dtype=torch.long, + ) + log_probs = frechet_sort.log_prob(scores, action) + self.assertLess(log_probs[0], log_probs[1]) + + log_probs.sum().backward() + self.assertGreater(scores.grad.sum(), 0) + + # manually calculating the log prob for the second case + # 5 is padding, so we remove it here + s = scores[1][action[1][:2]] + log_prob = 0.0 + for p in range(2): + log_prob -= torch.exp((s[p:] - s[p]) * shape).sum().log() + + self.assertAlmostEqual(log_prob, log_probs[1]) diff --git a/reagent/test/test_data/ex_mdps.py b/reagent/test/test_data/ex_mdps.py new file mode 100644 index 000000000..4c5cab9ca --- /dev/null +++ b/reagent/test/test_data/ex_mdps.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import Tuple + +import pandas + + +def generate_discrete_mdp_pandas_df( + multi_steps: bool, use_seq_num_diff_as_time_diff: bool +) -> Tuple[pandas.DataFrame, str]: + # Simulate the following MDP: + # state: 0, action: 7 ('L'), reward: 0, + # state: 1, action: 8 ('R'), reward: 1, + # state: 4, action: 9 ('U'), reward: 4, + # state: 5, action: 10 ('D'), reward: 5, + # state: 6 (terminal) + actions = ["L", "R", "U", "D"] + possible_actions = [["L", "R"], ["R", "U"], ["U", "D"], ["D"]] + + # assume multi_steps=2 + if multi_steps: + rewards = [[0, 1], [1, 4], [4, 5], [5]] + metrics = [ + [{"reward": 0}, {"reward": 1}], + [{"reward": 1}, {"reward": 4}], + [{"reward": 4}, {"reward": 5}], + [{"reward": 5}], + ] + next_states = [[{1: 1}, {4: 1}], [{4: 1}, {5: 1}], [{5: 1}, {6: 1}], [{6: 1}]] + next_actions = [["R", "U"], ["U", "D"], ["D", ""], [""]] + possible_next_actions = [ + [["R", "U"], ["U", "D"]], + [["U", "D"], ["D"]], + [["D"], [""]], + [[""]], + ] + # terminals = [[0, 0], [0, 0], [0, 1], [1]] + time_diffs = [[1, 1], [1, 1], [1, 1], [1]] + else: + rewards = [0, 1, 4, 5] + metrics = [{"reward": 0}, {"reward": 1}, {"reward": 4}, {"reward": 5}] # noqa + next_states = [{1: 1}, {4: 1}, {5: 1}, {6: 1}] + next_actions = ["R", "U", "D", ""] + possible_next_actions = [["R", "U"], ["U", "D"], ["D"], [""]] + # terminals = [0, 0, 0, 1] + if use_seq_num_diff_as_time_diff: + time_diffs = [1, 1, 1, 1] # noqa + else: + time_diffs = [1, 3, 1, 1] # noqa + + n = 4 + mdp_ids = ["0", "0", "0", "0"] + sequence_numbers = [0, 1, 4, 5] + sequence_number_ordinals = [1, 2, 3, 4] + states = [{0: 1}, {1: 1}, {4: 1}, {5: 1}] + action_probabilities = [0.3, 0.4, 0.5, 0.6] + + ds = "2019-07-17" + df = pandas.DataFrame( + { + "mdp_id": mdp_ids, + "sequence_number": sequence_numbers, + "sequence_number_ordinal": sequence_number_ordinals, + "state_features": states, + "action": actions, + "action_probability": action_probabilities, + "reward": rewards, + "next_state_features": next_states, + "next_action": next_actions, + "time_diff": time_diffs, + "possible_actions": possible_actions, + "possible_next_actions": possible_next_actions, + "metrics": metrics, + "ds": [ds] * n, + } + ) + return df, ds + + +def generate_parametric_mdp_pandas_df( + multi_steps: bool, use_seq_num_diff_as_time_diff: bool +): + # Simulate the following MDP: + # state: 0, action: 7 ('L'), reward: 0, + # state: 1, action: 8 ('R'), reward: 1, + # state: 4, action: 9 ('U'), reward: 4, + # state: 5, action: 10 ('D'), reward: 5, + # state: 6 (terminal) + actions = [{7: 1}, {8: 1}, {9: 1}, {10: 1}] + possible_actions = [ + [{7: 1}, {8: 1}], + [{8: 1}, {9: 1}], + [{9: 1}, {10: 1}], + [{10: 1}], + ] + + # assume multi_step=2 + if multi_steps: + rewards = [[0, 1], [1, 4], [4, 5], [5]] + metrics = [ + [{"reward": 0}, {"reward": 1}], + [{"reward": 1}, {"reward": 4}], + [{"reward": 4}, {"reward": 5}], + [{"reward": 5}], + ] + next_states = [[{1: 1}, {4: 1}], [{4: 1}, {5: 1}], [{5: 1}, {6: 1}], [{6: 1}]] + next_actions = [[{8: 1}, {9: 1}], [{9: 1}, {10: 1}], [{10: 1}, {}], [{}]] + possible_next_actions = [ + [[{8: 1}, {9: 1}], [{9: 1}, {10: 1}]], + [[{9: 1}, {10: 1}], [{10: 1}]], + [[{10: 1}], [{}]], + [[{}]], + ] + # terminals = [[0, 0], [0, 0], [0, 1], [1]] + time_diffs = [[1, 1], [1, 1], [1, 1], [1]] + else: + rewards = [0, 1, 4, 5] + metrics = [{"reward": 0}, {"reward": 1}, {"reward": 4}, {"reward": 5}] # noqa + next_states = [{1: 1}, {4: 1}, {5: 1}, {6: 1}] + next_actions = [{8: 1}, {9: 1}, {10: 1}, {}] + possible_next_actions = [[{8: 1}, {9: 1}], [{9: 1}, {10: 1}], [{10: 1}], [{}]] + # terminals = [0, 0, 0, 1] + if use_seq_num_diff_as_time_diff: + time_diffs = [1, 1, 1, 1] # noqa + else: + time_diffs = [1, 3, 1, 1] # noqa + + n = 4 + mdp_ids = ["0", "0", "0", "0"] + sequence_numbers = [0, 1, 4, 5] + sequence_number_ordinals = [1, 2, 3, 4] + states = [{0: 1}, {1: 1}, {4: 1}, {5: 1}] + action_probabilities = [0.3, 0.4, 0.5, 0.6] + + ds = "2019-07-17" + df = pandas.DataFrame( + { + "mdp_id": mdp_ids, + "sequence_number": sequence_numbers, + "sequence_number_ordinal": sequence_number_ordinals, + "state_features": states, + "action": actions, + "action_probability": action_probabilities, + "reward": rewards, + "next_state_features": next_states, + "next_action": next_actions, + "time_diff": time_diffs, + "possible_actions": possible_actions, + "possible_next_actions": possible_next_actions, + "metrics": metrics, + "ds": [ds] * n, + } + ) + return df, ds diff --git a/reagent/test/training/__init__.py b/reagent/test/training/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/training/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/training/cb/__init__.py b/reagent/test/training/cb/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/test/training/cb/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/training/cb/test_deep_represent_linucb.py b/reagent/test/training/cb/test_deep_represent_linucb.py new file mode 100644 index 000000000..f115d82c1 --- /dev/null +++ b/reagent/test/training/cb/test_deep_represent_linucb.py @@ -0,0 +1,71 @@ +""" +How to use: + buck test reagent:training_tests -- TestDeepRepresentLinUCB +""" + +import unittest + +import numpy as np +import numpy.testing as npt +import torch +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler + +from reagent.models.deep_represent_linucb import DeepRepresentLinearRegressionUCB +from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.training.cb.deep_represent_linucb_trainer import DeepRepresentLinUCBTrainer +from reagent.training.parameters import DeepRepresentLinUCBTrainerParameters + + +class TestDeepRepresentLinUCB(unittest.TestCase): + def setUp(self): + + self.params = DeepRepresentLinUCBTrainerParameters() + + raw_input_dim = 100 + sizes = [100] + linucb_inp_dim = 100 + activations = ["relu"] + output_activation = "linear" + + customized_layers = FullyConnectedNetwork( + [raw_input_dim] + sizes + [linucb_inp_dim], + activations + [output_activation], + use_batch_norm=False, + dropout_ratio=0.0, + normalize_output=False, + use_layer_norm=False, + ) + policy_network = DeepRepresentLinearRegressionUCB( + raw_input_dim=raw_input_dim, + sizes=sizes, + linucb_inp_dim=linucb_inp_dim, + activations=activations, + mlp_layers=customized_layers, + ) + + self.policy = Policy(scorer=policy_network, sampler=GreedyActionSampler()) + self.trainer = DeepRepresentLinUCBTrainer(self.policy, **self.params.asdict()) + self.batch = CBInput( + context_arm_features=torch.rand(2, 2, 100), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[1.5], [-2.3]]), + ) # random Gaussian features where feature_dim=100 + + def test_linucb_training_step(self): + self.trainer.training_step(self.batch, 0) + assert len(self.batch.action) == len(self.batch.reward) + assert len(self.batch.action) == self.batch.context_arm_features.shape[0] + + loss_iterations = [] + for _ in range(100): + # Linucb parameters are updated within training_step manually + loss = self.trainer.training_step(batch=self.batch, batch_idx=0).item() + loss_iterations.append(loss) + + npt.assert_allclose( + np.asarray(loss_iterations[90:]), + np.zeros(10), + atol=1e-2, + ) diff --git a/reagent/test/training/cb/test_disjoint_linucb.py b/reagent/test/training/cb/test_disjoint_linucb.py new file mode 100644 index 000000000..e84df06ce --- /dev/null +++ b/reagent/test/training/cb/test_disjoint_linucb.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import unittest + +import numpy as np +import numpy.testing as npt +import torch +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.models.disjoint_linucb_predictor import DisjointLinearRegressionUCB +from reagent.training.cb.disjoint_linucb_trainer import DisjointLinUCBTrainer +from reagent.training.parameters import DisjointLinUCBTrainerParameters + + +class TestDisjointLinUCB(unittest.TestCase): + def setUp(self): + self.batch_size = 2 + self.num_arms = 2 + self.params = DisjointLinUCBTrainerParameters() + + self.x_dim = 9 + policy_network = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + self.policy = Policy(scorer=policy_network, sampler=GreedyActionSampler()) + + self.trainer = DisjointLinUCBTrainer(self.policy, **self.params.asdict()) + self.batch = [ + CBInput( + context_arm_features=torch.tensor( + [ + [1, 2, 3, 6, 7, 2 * 6, 2 * 7, 3 * 6, 3 * 7], + [1, 2, 3, 10, 11, 2 * 10, 2 * 11, 3 * 10, 3 * 11], + ], + dtype=torch.float, + ), + reward=torch.tensor([[1.5], [2.3]], dtype=torch.float), + ), + CBInput( + context_arm_features=torch.tensor( + [ + [1, 4, 5, 8, 9, 4 * 8, 4 * 9, 5 * 8, 5 * 9], + [1, 4, 5, 12, 13, 4 * 12, 4 * 13, 5 * 12, 5 * 13], + ], + dtype=torch.float, + ), + reward=torch.tensor([[1.9], [2.8]], dtype=torch.float), + ), + ] + + def test_linucb_training_step(self): + self.trainer.training_step(self.batch, 0) + + def test_linucb_training_batch_vs_online(self): + # make sure that feeding in a batch gives same result as feeding in examples one-by-one + obss = [[], []] + for i in range(self.batch_size): + obss[i].append( + CBInput( + context_arm_features=self.batch[0].context_arm_features[ + i : i + 1, : + ], + reward=self.batch[0].reward[[i]], + ) + ) + obss[i].append( + CBInput( + context_arm_features=self.batch[1].context_arm_features[ + i : i + 1, : + ], + reward=self.batch[1].reward[[i]], + ) + ) + + scorer_1 = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + scorer_2 = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + policy_1 = Policy(scorer=scorer_1, sampler=GreedyActionSampler()) + policy_2 = Policy(scorer=scorer_2, sampler=GreedyActionSampler()) + trainer_1 = DisjointLinUCBTrainer(policy_1) + trainer_2 = DisjointLinUCBTrainer(policy_2) + + trainer_1.training_step(obss[0], 0) + trainer_1.training_step(obss[1], 1) + trainer_1.on_train_epoch_end() + trainer_2.training_step(self.batch, 0) + trainer_2.on_train_epoch_end() + + for arm in range(self.num_arms): + npt.assert_array_less( + np.zeros(scorer_1.A[arm].shape), scorer_1.A[arm].numpy() + ) # make sure A got updated + npt.assert_allclose( + scorer_1.A[arm].numpy(), scorer_2.A[arm].numpy(), rtol=1e-4 + ) + npt.assert_allclose( + scorer_1.inv_A[arm].numpy(), scorer_2.inv_A[arm].numpy(), rtol=1e-4 + ) + npt.assert_allclose( + scorer_1.b[arm].numpy(), scorer_2.b[arm].numpy(), rtol=1e-4 + ) + + def test_linucb_model_update_equations(self): + # make sure that the model parameters match hand-computed values + scorer = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + policy = Policy(scorer=scorer, sampler=GreedyActionSampler()) + trainer = DisjointLinUCBTrainer(policy) + trainer.training_step(self.batch, 0) + trainer.on_train_epoch_end() + # the feature matrix (computed by hand) + for arm in range(self.num_arms): + x = self.batch[arm].context_arm_features.numpy() + npt.assert_allclose(scorer.A[arm].numpy(), x.T @ x, rtol=1e-5) + npt.assert_allclose( + scorer.b[arm].numpy(), + x.T @ self.batch[arm].reward.squeeze().numpy(), + rtol=1e-5, + ) + + for arm in range(self.num_arms): + npt.assert_allclose( + (np.eye(self.x_dim) + scorer.A[arm].numpy()) + @ scorer.inv_A[arm].numpy(), + np.eye(self.x_dim), + atol=1e-2, + rtol=1e-3, + ) + npt.assert_equal( + scorer.A[arm].numpy(), scorer.coefs_valid_for_A[arm].numpy() + ) + + def test_linucb_weights(self): + # make sure that using a weight is same as processing an example several times + batch_with_weight = copy.deepcopy(self.batch) + for arm in range(self.num_arms): + batch_with_weight[arm].weight = 3 * torch.ones((self.batch_size, 1)) + + scorer_1 = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + scorer_2 = DisjointLinearRegressionUCB(self.num_arms, self.x_dim) + policy_1 = Policy(scorer=scorer_1, sampler=GreedyActionSampler()) + policy_2 = Policy(scorer=scorer_2, sampler=GreedyActionSampler()) + trainer_1 = DisjointLinUCBTrainer(policy_1) + trainer_2 = DisjointLinUCBTrainer(policy_2) + + trainer_1.training_step(batch_with_weight, 0) + trainer_1.on_train_epoch_end() + for i in range(3): + trainer_2.training_step(self.batch, i) + trainer_2.on_train_epoch_end() + + for arm in range(self.num_arms): + npt.assert_array_less( + np.zeros(scorer_1.A[arm].shape), scorer_1.A[arm].numpy() + ) # make sure A got updated + npt.assert_allclose(scorer_1.A.numpy(), scorer_2.A.numpy(), rtol=1e-6) + npt.assert_allclose(scorer_1.b.numpy(), scorer_2.b.numpy(), rtol=1e-6) + + def test_linucb_discount_factors(self) -> None: + # change the precision to double + torch.set_default_dtype(torch.float64) + + scorer = DisjointLinearRegressionUCB(self.num_arms, self.x_dim, gamma=0.9) + policy = Policy(scorer=scorer, sampler=GreedyActionSampler()) + trainer = DisjointLinUCBTrainer(policy) + + # 1st round training + trainer.training_step(self.batch, 0) + trainer.on_train_epoch_end() + + # 2nd round training + torch.manual_seed(0) + self.batch_2nd_round = [ + CBInput( + context_arm_features=torch.randn((10, self.x_dim)), + reward=torch.randn((10, 1)), + ), + CBInput( + context_arm_features=torch.randn((3, self.x_dim)), + reward=torch.randn((3, 1)), + ), + ] + self.second_batch_2nd_round = [ + CBInput( + context_arm_features=torch.randn((10, self.x_dim)), + reward=torch.randn((10, 1)), + ), + CBInput( + context_arm_features=torch.randn((3, self.x_dim)), + reward=torch.randn((3, 1)), + ), + ] + trainer.training_step(self.batch_2nd_round, 0) + # check if there are several training steps in a round + # discount factor is only applied one time + trainer.training_step(self.second_batch_2nd_round, 1) + trainer.on_train_epoch_end() + + # eval dataset + inp1 = torch.randn((5, self.x_dim)) + out1 = scorer(inp1) + # check it won't do redundant coefficent update after 2nd eval + inp2 = torch.randn((5, self.x_dim)) + out2 = scorer(inp2) + + # the feature matrix and model parameter and eval output (computed by hand) + for arm in range(self.num_arms): + x1 = self.batch[arm].context_arm_features.numpy() + x2 = self.batch_2nd_round[arm].context_arm_features.numpy() + x3 = self.second_batch_2nd_round[arm].context_arm_features.numpy() + reward1 = self.batch[arm].reward.squeeze().numpy() + reward2 = self.batch_2nd_round[arm].reward.squeeze().numpy() + reward3 = self.second_batch_2nd_round[arm].reward.squeeze().numpy() + + # all matrix and vectors are the same + A = scorer.gamma * x1.T @ x1 + x2.T @ x2 + x3.T @ x3 + b = scorer.gamma * x1.T @ reward1 + x2.T @ reward2 + x3.T @ reward3 + npt.assert_allclose( + scorer.A[arm].numpy(), scorer.gamma * A, atol=1e-5, rtol=1e-5 + ) + npt.assert_allclose( + scorer.b[arm].numpy(), scorer.gamma * b, atol=1e-5, rtol=1e-5 + ) + + inv_A = np.linalg.inv(A + np.identity(self.x_dim) * scorer.l2_reg_lambda) + npt.assert_allclose(scorer.inv_A[arm].numpy(), inv_A, atol=1e-4, rtol=1e-4) + + # model parameters are the same + theta = inv_A @ b + npt.assert_allclose(scorer.coefs[arm].numpy(), theta, atol=1e-4, rtol=1e-4) + + # ucb scores are the same + def calculated_expected_ucb_scores(inp): + expected_out = np.zeros(inp.size()[0]) + for i in range(inp.size()[0]): + x = inp[i].numpy() + expected_out[i] = x @ theta.T + scorer.ucb_alpha * np.sqrt( + x @ inv_A @ x.T + ) + return expected_out + + expected_out1 = calculated_expected_ucb_scores(inp1) + npt.assert_allclose( + out1[:, arm].numpy(), expected_out1, atol=1e-4, rtol=1e-4 + ) + + expected_out2 = calculated_expected_ucb_scores(inp2) + npt.assert_allclose( + out2[:, arm].numpy(), expected_out2, atol=1e-4, rtol=1e-4 + ) diff --git a/reagent/test/training/cb/test_linucb.py b/reagent/test/training/cb/test_linucb.py new file mode 100644 index 000000000..9632c243c --- /dev/null +++ b/reagent/test/training/cb/test_linucb.py @@ -0,0 +1,288 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import copy +import unittest + +import numpy as np +import numpy.testing as npt +import torch +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import GreedyActionSampler +from reagent.models.linear_regression import LinearRegressionUCB +from reagent.training.cb.linucb_trainer import _get_chosen_arm_features, LinUCBTrainer +from reagent.training.parameters import LinUCBTrainerParameters + + +class TestLinUCButils(unittest.TestCase): + def test_get_chosen_arm_features(self): + all_arms_features = torch.tensor( + [[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], dtype=torch.float + ) + actions = torch.tensor([[1], [0]], dtype=torch.long) + chosen_arm_features = _get_chosen_arm_features(all_arms_features, actions) + npt.assert_equal( + chosen_arm_features.numpy(), np.array([[3.0, 4.0], [5.0, 6.0]]) + ) + + +class TestLinUCB(unittest.TestCase): + def setUp(self): + self.batch_size = 2 + + self.num_arms = 2 + self.params = LinUCBTrainerParameters() + + self.x_dim = 5 + policy_network = LinearRegressionUCB(self.x_dim) + self.policy = Policy(scorer=policy_network, sampler=GreedyActionSampler()) + + self.trainer = LinUCBTrainer(self.policy, **self.params.asdict()) + self.batch = CBInput( + context_arm_features=torch.tensor( + [ + [ + [1, 2, 3, 6, 7], + [1, 2, 3, 10, 11], + ], + [ + [1, 4, 5, 8, 9], + [1, 4, 5, 12, 13], + ], + ], + dtype=torch.float, + ), + action=torch.tensor([[0], [1]], dtype=torch.long), + reward=torch.tensor([[1.5], [2.3]], dtype=torch.float), + ) + + def test_linucb_training_step(self): + self.trainer.training_step(self.batch, 0) + self.trainer.on_train_epoch_end() + + def test_linucb_training_batch_vs_online(self): + # make sure that feeding in a batch gives same result as feeding in examples one-by-one + obss = [] + for i in range(self.batch_size): + obss.append( + CBInput( + context_arm_features=self.batch.context_arm_features[ + i : i + 1, :, : + ], + action=self.batch.action[[i]], + reward=self.batch.reward[[i]], + ) + ) + + scorer_1 = LinearRegressionUCB(self.x_dim) + scorer_2 = LinearRegressionUCB(self.x_dim) + policy_1 = Policy(scorer=scorer_1, sampler=GreedyActionSampler()) + policy_2 = Policy(scorer=scorer_2, sampler=GreedyActionSampler()) + trainer_1 = LinUCBTrainer(policy_1) + trainer_2 = LinUCBTrainer(policy_2) + + trainer_1.training_step(obss[0], 0) + trainer_1.training_step(obss[1], 1) + trainer_1.on_train_epoch_end() + trainer_2.training_step(self.batch, 0) + trainer_2.on_train_epoch_end() + + npt.assert_array_less( + np.zeros(scorer_1.avg_A.shape), scorer_1.avg_A.numpy() + ) # make sure A got updated + npt.assert_allclose(scorer_1.avg_A.numpy(), scorer_2.avg_A.numpy(), rtol=1e-4) + npt.assert_allclose(scorer_1.avg_b.numpy(), scorer_2.avg_b.numpy(), rtol=1e-4) + + def test_linucb_training_multiple_epochs(self): + # make sure that splitting the data across multiple epochs is same as learning from all data in one epoch + # this is only true when there is no discounting (gamma=1) + obss = [] + for i in range(self.batch_size): + obss.append( + CBInput( + context_arm_features=self.batch.context_arm_features[ + i : i + 1, :, : + ], + action=self.batch.action[[i]], + reward=self.batch.reward[[i]], + ) + ) + + scorer_1 = LinearRegressionUCB(self.x_dim) + scorer_2 = LinearRegressionUCB(self.x_dim) + policy_1 = Policy(scorer=scorer_1, sampler=GreedyActionSampler()) + policy_2 = Policy(scorer=scorer_2, sampler=GreedyActionSampler()) + trainer_1 = LinUCBTrainer(policy_1) + trainer_2 = LinUCBTrainer(policy_2) + + trainer_1.training_step(obss[0], 0) + trainer_1.on_train_epoch_end() + trainer_1.training_step(obss[1], 1) + trainer_1.on_train_epoch_end() + + trainer_2.training_step(self.batch, 0) + trainer_2.on_train_epoch_end() + + npt.assert_array_less( + np.zeros(scorer_1.avg_A.shape), scorer_1.avg_A.numpy() + ) # make sure A got updated + npt.assert_allclose(scorer_1.avg_A.numpy(), scorer_2.avg_A.numpy(), rtol=1e-4) + npt.assert_allclose(scorer_1.avg_b.numpy(), scorer_2.avg_b.numpy(), rtol=1e-4) + npt.assert_allclose( + scorer_1.inv_avg_A.numpy(), scorer_2.inv_avg_A.numpy(), rtol=1e-4 + ) + npt.assert_allclose(scorer_1.coefs.numpy(), scorer_2.coefs.numpy(), rtol=1e-3) + + def test_linucb_model_update_equations(self): + # make sure that the model parameters match hand-computed values + scorer = LinearRegressionUCB(self.x_dim) + policy = Policy(scorer=scorer, sampler=GreedyActionSampler()) + trainer = LinUCBTrainer(policy) + trainer.training_step(self.batch, 0) + trainer.on_train_epoch_end() + # the feature matrix (computed by hand) + x = _get_chosen_arm_features( + self.batch.context_arm_features, self.batch.action + ).numpy() + + npt.assert_allclose(scorer.avg_A.numpy(), x.T @ x / len(self.batch), rtol=1e-4) + npt.assert_allclose( + scorer.avg_b.numpy(), + x.T @ self.batch.reward.squeeze().numpy() / len(self.batch), + rtol=1e-4, + ) + + scorer._calculate_coefs() + npt.assert_equal(scorer.avg_A.numpy(), scorer.coefs_valid_for_avg_A.numpy()) + + npt.assert_allclose( + ( + np.eye(self.x_dim) * scorer.l2_reg_lambda + + (scorer.avg_A * scorer.sum_weight).numpy() + ) + @ (scorer.inv_avg_A / scorer.sum_weight).numpy(), + np.eye(self.x_dim), + atol=1e-3, + ) + + def test_linucb_weights(self): + # make sure that using a weight is same as processing an example several times + batch_with_weight = copy.deepcopy(self.batch) + batch_with_weight.weight = 3 * torch.ones((self.batch_size, 1)) + + scorer_1 = LinearRegressionUCB(self.x_dim) + scorer_2 = LinearRegressionUCB(self.x_dim) + policy_1 = Policy(scorer=scorer_1, sampler=GreedyActionSampler()) + policy_2 = Policy(scorer=scorer_2, sampler=GreedyActionSampler()) + trainer_1 = LinUCBTrainer(policy_1) + trainer_2 = LinUCBTrainer(policy_2) + + trainer_1.training_step(batch_with_weight, 0) + trainer_1.on_train_epoch_end() + for i in range(3): + trainer_2.training_step(self.batch, i) + trainer_2.on_train_epoch_end() + + npt.assert_array_less( + np.zeros(scorer_1.avg_A.shape), scorer_1.avg_A.numpy() + ) # make sure A got updated + npt.assert_allclose(scorer_1.avg_A.numpy(), scorer_2.avg_A.numpy(), rtol=1e-4) + npt.assert_allclose(scorer_1.avg_b.numpy(), scorer_2.avg_b.numpy(), rtol=1e-4) + + def test_linucb_discount_factors(self) -> None: + # change the precision to double + torch.set_default_dtype(torch.float64) + + gamma = 0.8 + scorer = LinearRegressionUCB(self.x_dim, gamma=gamma) + policy = Policy(scorer=scorer, sampler=GreedyActionSampler()) + trainer = LinUCBTrainer(policy) + + # 1st round training + trainer.training_step(self.batch, 0) + trainer.on_train_epoch_end() + + # 2nd round training + torch.manual_seed(0) + self.batch_2nd_round = CBInput( + context_arm_features=torch.randn((10, self.num_arms, self.x_dim)), + reward=torch.randn((10, 1)), + action=torch.tensor([[0], [1]], dtype=torch.long).repeat(5, 1), + ) + self.second_batch_2nd_round = CBInput( + context_arm_features=torch.randn((6, self.num_arms, self.x_dim)), + reward=torch.randn((6, 1)), + action=torch.tensor([[0], [1]], dtype=torch.long).repeat(3, 1), + ) + trainer.training_step(self.batch_2nd_round, 0) + # check if there are several training steps in a round + # discount factor is only applied one time + trainer.training_step(self.second_batch_2nd_round, 1) + trainer.on_train_epoch_end() + + # eval dataset + inp1 = torch.randn((5, self.x_dim)) + out1 = scorer(inp1) + # check it won't do redundant coefficent update after 2nd eval + inp2 = torch.randn((5, self.x_dim)) + out2 = scorer(inp2) + + # the feature matrix and model parameter and eval output (computed by hand) + x1 = _get_chosen_arm_features( + self.batch.context_arm_features, self.batch.action + ).numpy() + x2 = _get_chosen_arm_features( + self.batch_2nd_round.context_arm_features, self.batch_2nd_round.action + ).numpy() + x3 = _get_chosen_arm_features( + self.second_batch_2nd_round.context_arm_features, + self.second_batch_2nd_round.action, + ).numpy() + reward1 = self.batch.reward.squeeze().numpy() + reward2 = self.batch_2nd_round.reward.squeeze().numpy() + reward3 = self.second_batch_2nd_round.reward.squeeze().numpy() + + # all matrix and vectors are the same + A = scorer.gamma * x1.T @ x1 + x2.T @ x2 + x3.T @ x3 + b = scorer.gamma * x1.T @ reward1 + x2.T @ reward2 + x3.T @ reward3 + npt.assert_allclose( + (scorer.avg_A * scorer.sum_weight).numpy(), + A * scorer.gamma, + atol=1e-5, + rtol=1e-5, + ) + npt.assert_allclose( + (scorer.avg_b * scorer.sum_weight).numpy(), + b * scorer.gamma, + atol=1e-5, + rtol=1e-5, + ) + + inv_A = np.linalg.inv(A + np.identity(self.x_dim) * scorer.l2_reg_lambda) + npt.assert_allclose( + (scorer.inv_avg_A / scorer.sum_weight * scorer.gamma).numpy(), + inv_A, + atol=1e-4, + rtol=1e-4, + ) + + # model parameters are the same + theta = inv_A @ b + npt.assert_allclose(scorer.coefs.numpy(), theta, atol=1e-4, rtol=1e-4) + + # ucb scores are the same + def calculated_expected_ucb_scores(inp): + expected_out = np.zeros(inp.size()[0]) + for i in range(inp.size()[0]): + x = inp[i].numpy() + expected_out[i] = x @ theta.T + scorer.ucb_alpha * np.sqrt( + x @ inv_A @ x.T / scorer.gamma + ) + return expected_out + + expected_out1 = calculated_expected_ucb_scores(inp1) + npt.assert_allclose(out1.numpy(), expected_out1, atol=1e-4, rtol=1e-4) + + expected_out2 = calculated_expected_ucb_scores(inp2) + npt.assert_allclose(out2.numpy(), expected_out2, atol=1e-4, rtol=1e-4) diff --git a/reagent/test/training/test_ars_optimizer.py b/reagent/test/training/test_ars_optimizer.py new file mode 100644 index 000000000..3202e19b0 --- /dev/null +++ b/reagent/test/training/test_ars_optimizer.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import unittest + +import numpy as np +import torch +from reagent.training.gradient_free.ars_util import ARSOptimizer + + +class TestARSOptimizer(unittest.TestCase): + def metric(self, x): + # Ackley Function + # https://www.sfu.ca/~ssurjano/ackley.html + + x *= 100 + return ( + -20 * np.exp(-0.2 * np.sqrt(np.inner(x, x) / x.size)) + - np.exp(np.cos(2 * np.pi * x).sum() / x.size) + + 20 + + np.e + ) + + def test_ars_optimizer(self): + dim = 10 + n_generations = 30 + X = torch.Tensor([[i] for i in range(dim)]) + y = torch.ones(dim) + n_pert = 100 + feature_dim = 2 + np.random.seed(seed=123456) + ars_opt = ARSOptimizer(feature_dim, n_pert, rand_ars_params=True) + for i in range(n_generations): + perturbed_params = ars_opt.sample_perturbed_params() + rewards = [] + for idx in range(0, len(perturbed_params)): + pos_param, neg_param = perturbed_params[idx] + pos_weight = torch.sigmoid( + torch.matmul(torch.column_stack((X, y)), pos_param) + ) + # ARSOptimizer works in an ascent manner, + # thus a neg sign for minimizing objectives. + r_pos = -self.metric(pos_weight.numpy()) + rewards.append(r_pos) + neg_weight = torch.sigmoid( + torch.matmul(torch.column_stack((X, y)), neg_param) + ) + r_neg = -self.metric(neg_weight.numpy()) + rewards.append(r_neg) + ars_opt.update_ars_params(torch.Tensor(rewards)) + new_weight = torch.sigmoid( + torch.matmul( + torch.column_stack((X, y)), + torch.from_numpy(ars_opt.ars_params).float(), + ) + ) + perf = self.metric(new_weight.numpy()) + print(f"gen {i}: perf {perf}") + self.assertLessEqual(perf, 1e-15) diff --git a/reagent/test/training/test_behavioral_cloning.py b/reagent/test/training/test_behavioral_cloning.py new file mode 100644 index 000000000..2431bcee8 --- /dev/null +++ b/reagent/test/training/test_behavioral_cloning.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import unittest +from enum import Enum + +import pytorch_lightning as pl +import torch +from pytorch_lightning import seed_everything +from reagent.core import types as rlt +from reagent.models.dqn import FullyConnectedDQN +from reagent.optimizer.union import classes, Optimizer__Union +from reagent.training.behavioral_cloning_trainer import BehavioralCloningTrainer +from torch.utils.data import DataLoader + +logger = logging.getLogger(__name__) + +SEED = 0 + + +class SyntheticType(Enum): + ACTION_TYPE = "one-hot" # support 'one-hot' + + +def get_dummy_batch(action_type, num_batches): + if action_type == "one-hot": + action = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) + else: + raise TypeError("the actions (labels) should be one-hot") + + possible_actions_mask = torch.tensor( + [ + [1, 1, 0, 0], + [0, 1, 1, 0], + [0, 0, 1, 1], + [1, 0, 0, 1] + # 1 means no mask. This mask keeps the label position (diagonal position) and some other position + ] + ) + + batches = [None for _ in range(num_batches)] + for i in range(num_batches): + state = torch.tensor( + [ + [+0.1, +0.2, +0.3, +0.4, +0.5, +0.6, +0.7, +0.8], + [+0.1, +0.2, +0.3, +0.4, -0.5, -0.6, -0.7, -0.8], + [-0.1, -0.2, -0.3, -0.4, +0.5, +0.6, +0.7, +0.8], + [-0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8], + ] + ) + # 8*1 float embedding + # -------- means label=0 + # ----++++ means label=1 + # ++++---- means label=2 + # ++++++++ means label=3 + state = state + (1e-8**0.5) * torch.rand_like(state) # add rand noise + i_th_training_batch = rlt.BehavioralCloningModelInput( + state=rlt.FeatureData(float_features=state), + action=action, + possible_actions_mask=possible_actions_mask, + ) + batches[i] = i_th_training_batch + return batches + + +def create_synthetic_data( + num_batches_train: int, num_batches_eval: int +) -> rlt.BehavioralCloningModelInput: + train_batches = get_dummy_batch( + action_type=SyntheticType.ACTION_TYPE.value, num_batches=num_batches_train + ) + train_dataloader = DataLoader(train_batches, collate_fn=lambda x: x[0]) + + eval_batches = get_dummy_batch( + action_type=SyntheticType.ACTION_TYPE.value, num_batches=num_batches_eval + ) + eval_dataloader = DataLoader(eval_batches, collate_fn=lambda x: x[0]) + + return train_dataloader, eval_dataloader # list of BehavioralCloningModelInput + + +def train_bc_model(train_dataloader, num_epochs) -> pl.LightningModule: + bc_net = FullyConnectedDQN( + state_dim=8, # input + action_dim=4, # output + sizes=[7, 6, 5], # hidden layers + activations=["relu", "relu", "relu"], + ) + + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + bc_trainer = BehavioralCloningTrainer(bc_net=bc_net, optimizer=optimizer) + pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True) + pl_trainer.fit(bc_trainer, train_dataloader) + return bc_trainer + + +def validation_prob_vs_label( + bc_trainer: pl.LightningModule, + batch: rlt.BehavioralCloningModelInput, + batch_idx: int, +): + masked_logits = bc_trainer.bc_net( + batch.state, + batch.possible_actions_mask, + ) + labels = batch.action + probs = torch.nn.functional.softmax(masked_logits) + assert torch.allclose(labels.double(), probs.double(), atol=1e-1) + return + + +def eval_bc_model(eval_dataloader, bc_trainer) -> torch.Tensor: + total_xentropy_loss = 0 + for batch_idx, batch in enumerate(eval_dataloader): + xentropy_loss = bc_trainer.validation_step(batch, batch_idx) + total_xentropy_loss += xentropy_loss + N_eval = len(eval_dataloader) + eval_xentropy_loss = total_xentropy_loss / N_eval + + # at the last batch, check whether probs matches labels + validation_prob_vs_label(bc_trainer, batch, batch_idx) + return eval_xentropy_loss + + +class TestBehavioralCloning(unittest.TestCase): + def setUp(self): + seed_everything(1) + + def test_behavioral_cloning_v0(self): + NUM_TRAIN_BATCH, NUM_EVAL_BATCH = 200, 200 + train_dataloader, eval_dataloader = create_synthetic_data( + num_batches_train=NUM_TRAIN_BATCH, num_batches_eval=NUM_EVAL_BATCH + ) + bc_trainer = train_bc_model(train_dataloader=train_dataloader, num_epochs=4) + eval_loss = eval_bc_model( + eval_dataloader=eval_dataloader, bc_trainer=bc_trainer + ) + logger.info(f"eval_loss={eval_loss}") + assert abs(eval_loss) < 0.1 diff --git a/reagent/test/training/test_crr.py b/reagent/test/training/test_crr.py new file mode 100644 index 000000000..e12d79592 --- /dev/null +++ b/reagent/test/training/test_crr.py @@ -0,0 +1,243 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import torch +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.core.types import DiscreteDqnInput, ExtraData, FeatureData +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.models.actor import FullyConnectedActor +from reagent.models.dqn import FullyConnectedDQN +from reagent.training.discrete_crr_trainer import DiscreteCRRTrainer +from reagent.training.parameters import CRRTrainerParameters +from reagent.workflow.types import RewardOptions + + +class TestCRR(unittest.TestCase): + def setUp(self): + # preparing various components for qr-dqn trainer initialization + self.batch_size = 3 + self.state_dim = 10 + self.action_dim = 2 + self.num_layers = 2 + self.sizes = [20 for _ in range(self.num_layers)] + self.num_atoms = 11 + self.activations = ["relu" for _ in range(self.num_layers)] + self.dropout_ratio = 0 + self.exploration_variance = 1e-10 + + self.actions = [str(i) for i in range(self.action_dim)] + self.params = CRRTrainerParameters(actions=self.actions) + self.reward_options = RewardOptions() + self.metrics_to_score = get_metrics_to_score( + self.reward_options.metric_reward_values + ) + + self.actor_network = FullyConnectedActor( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + activations=self.activations, + exploration_variance=self.exploration_variance, + ) + self.actor_network_target = self.actor_network.get_target_network() + + self.q1_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + activations=self.activations, + dropout_ratio=self.dropout_ratio, + ) + self.q1_network_target = self.q1_network.get_target_network() + + self.q2_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + activations=self.activations, + dropout_ratio=self.dropout_ratio, + ) + self.q2_network_target = self.q2_network.get_target_network() + + self.num_output_nodes = (len(self.metrics_to_score) + 1) * len( + self.params.actions + ) + self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True) + self.reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe_target = self.q_network_cpe.get_target_network() + self.inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones( + self.batch_size, 1 + ), # todo: check terminal behavior + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(action_probability=torch.ones(self.batch_size, 1)), + ) + + @staticmethod + def dummy_log(*args, **kwargs): + # replaces calls to self.log() which otherwise require the pytorch lighting trainer to be intialized + return None + + def _construct_trainer(self, new_params=None, no_cpe=False, no_q2=False): + trainer = DiscreteCRRTrainer( + actor_network=self.actor_network, + actor_network_target=self.actor_network_target, + q1_network=self.q1_network, + q1_network_target=self.q1_network_target, + q2_network=(None if no_q2 else self.q2_network), + q2_network_target=(None if no_q2 else self.q2_network_target), + reward_network=(None if no_cpe else self.reward_network), + q_network_cpe=(None if no_cpe else self.q_network_cpe), + q_network_cpe_target=(None if no_cpe else self.q_network_cpe_target), + metrics_to_score=self.metrics_to_score, + evaluation=EvaluationParameters( + calc_cpe_in_training=(False if no_cpe else True) + ), + # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`. + **(new_params if new_params is not None else self.params).asdict(), + ) + trainer.log = self.dummy_log + return trainer + + def test_init(self): + trainer = self._construct_trainer() + self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all()) + param_copy = CRRTrainerParameters( + actions=self.actions, + rl=RLParameters(reward_boost={i: int(i) + 1 for i in self.actions}), + ) + reward_boost_trainer = self._construct_trainer(new_params=param_copy) + self.assertTrue( + ( + torch.isclose( + reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0]) + ) + ).all() + ) + + def test_train_step_gen(self): + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + add_backward_type = type( + ( + torch.tensor([1.0], requires_grad=True) + + torch.tensor([1.0], requires_grad=True) + ).grad_fn + ) + # vanilla + trainer = self._construct_trainer() + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 6) + self.assertEqual(type(losses[0].grad_fn), mse_backward_type) + self.assertEqual(type(losses[1].grad_fn), mse_backward_type) + self.assertEqual(type(losses[2].grad_fn), add_backward_type) + self.assertEqual(type(losses[3].grad_fn), mse_backward_type) + self.assertEqual(type(losses[4].grad_fn), mse_backward_type) + self.assertEqual(type(losses[5].grad_fn), add_backward_type) + + # no CPE + trainer = self._construct_trainer(no_cpe=True) + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + # no q2 net + trainer = self._construct_trainer(no_q2=True) + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 5) + + # use_target_actor + params_copy = CRRTrainerParameters(actions=self.actions, use_target_actor=True) + trainer = self._construct_trainer(new_params=params_copy) + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 6) + + # delayed policy update + params_copy = CRRTrainerParameters( + actions=self.actions, delayed_policy_update=2 + ) + trainer = self._construct_trainer(new_params=params_copy) + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 6) + self.assertEqual(losses[2], None) + + # entropy + params_copy = CRRTrainerParameters(actions=self.actions, entropy_coeff=1.0) + trainer = self._construct_trainer(new_params=params_copy) + loss_gen = trainer.train_step_gen(self.inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 6) + + def test_q_network_property(self): + trainer = self._construct_trainer() + self.assertEqual(trainer.q_network, trainer.q1_network) + + def test_configure_optimizers(self): + trainer = self._construct_trainer() + optimizers = trainer.configure_optimizers() + self.assertEqual(len(optimizers), 6) + train_step_yield_order = [ + trainer.q1_network, + trainer.q2_network, + trainer.actor_network, + trainer.reward_network, + trainer.q_network_cpe, + trainer.q1_network, + ] + for i in range(len(train_step_yield_order)): + opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0] + loss_param = list(train_step_yield_order[i].parameters())[0] + self.assertTrue(torch.all(torch.isclose(opt_param, loss_param))) + trainer = self._construct_trainer(no_cpe=True) + optimizers = trainer.configure_optimizers() + self.assertEqual(len(optimizers), 4) + trainer = self._construct_trainer(no_q2=True) + optimizers = trainer.configure_optimizers() + self.assertEqual(len(optimizers), 5) + + def test_get_detached_model_outputs(self): + trainer = self._construct_trainer() + action_scores, _ = trainer.get_detached_model_outputs( + FeatureData(float_features=torch.rand(self.batch_size, self.state_dim)) + ) + self.assertEqual(action_scores.shape[0], self.batch_size) + self.assertEqual(action_scores.shape[1], self.action_dim) + + def test_validation_step(self): + trainer = self._construct_trainer() + edp = trainer.validation_step(self.inp, batch_idx=1) + out = trainer.actor_network(self.inp.state) + # Note: in current code EDP assumes policy induced by q-net instead of actor + self.assertTrue(torch.all(torch.isclose(edp.optimal_q_values, out.action))) diff --git a/reagent/test/training/test_dqn.py b/reagent/test/training/test_dqn.py new file mode 100644 index 000000000..0b3ca104a --- /dev/null +++ b/reagent/test/training/test_dqn.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import torch +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.core.types import DiscreteDqnInput, ExtraData, FeatureData +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.models.dqn import FullyConnectedDQN +from reagent.training.dqn_trainer import DQNTrainer +from reagent.training.parameters import DQNTrainerParameters +from reagent.workflow.types import RewardOptions + + +class TestDQN(unittest.TestCase): + def setUp(self): + self.params = DQNTrainerParameters(actions=["1", "2"]) + self.reward_options = RewardOptions() + self.metrics_to_score = get_metrics_to_score( + self.reward_options.metric_reward_values + ) + self.state_dim = 10 + self.action_dim = 2 + self.batch_size = 3 + self.sizes = [20, 20] + self.activations = ["relu", "relu"] + self.q_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_target = self.q_network.get_target_network() + self.x = FeatureData(float_features=torch.rand(5, self.state_dim)) + self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True) + self.num_output_nodes = (len(self.metrics_to_score) + 1) * len( + self.params.actions + ) + self.reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe_target = self.q_network_cpe.get_target_network() + + def _construct_trainer(self, new_params=None, no_cpe=False): + reward_network = self.reward_network + q_network_cpe = self.q_network_cpe + q_network_cpe_target = self.q_network_cpe_target + evaluation = self.eval_parameters + params = self.params + + if new_params is not None: + params = new_params + if no_cpe: + reward_network = q_network_cpe = q_network_cpe_target = None + evaluation = EvaluationParameters(calc_cpe_in_training=False) + + return DQNTrainer( + q_network=self.q_network, + q_network_target=self.q_network_target, + reward_network=reward_network, + q_network_cpe=q_network_cpe, + q_network_cpe_target=q_network_cpe_target, + metrics_to_score=self.metrics_to_score, + evaluation=evaluation, + **params.asdict(), + ) + + def test_init(self): + trainer = self._construct_trainer() + self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all()) + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(reward_boost={"1": 1, "2": 2}) + ) + reward_boost_trainer = self._construct_trainer(new_params=param_copy) + self.assertTrue( + ( + torch.isclose( + reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0]) + ) + ).all() + ) + + def test_train_step_gen(self): + # mock training batch + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0, 0.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + add_backward_type = type( + ( + torch.tensor([1.0], requires_grad=True) + + torch.tensor([1.0], requires_grad=True) + ).grad_fn + ) + + # vanilla DQN with CPE + trainer = self._construct_trainer() + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + # four outputs of the train_step_gen method call: + # td_loss, two CPE losses (reward_loss and metric_q_value_loss), + # and soft_update_result + self.assertEqual(len(losses), 4) + self.assertEqual(type(losses[0].grad_fn), mse_backward_type) + self.assertEqual(type(losses[1].grad_fn), mse_backward_type) + self.assertEqual(type(losses[2].grad_fn), mse_backward_type) + self.assertEqual(type(losses[3].grad_fn), add_backward_type) + + # no CPE + trainer = self._construct_trainer(no_cpe=True) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + # two outputs of the train_step_gen method with no CPE + self.assertEqual(len(losses), 2) + + # seq_num + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(use_seq_num_diff_as_time_diff=True) + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + # multi_steps + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(multi_steps=2) + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + # non_max_q + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(maxq_learning=False) + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + def test_configure_optimizers(self): + trainer = self._construct_trainer() + optimizers = trainer.configure_optimizers() + # expecting a list of [ + # q_network optimizer, + # reward_network optimizer, + # q_network_cpe optimizer, + # soft_update optimizer] + self.assertEqual(len(optimizers), 4) + train_step_yield_order = [ + trainer.q_network, + trainer.reward_network, + trainer.q_network_cpe, + trainer.q_network, + ] + for i in range(len(train_step_yield_order)): + opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0] + loss_param = list(train_step_yield_order[i].parameters())[0] + self.assertTrue(torch.all(torch.isclose(opt_param, loss_param))) + + trainer = self._construct_trainer(no_cpe=True) + optimizers = trainer.configure_optimizers() + # expecting a [q_network optimizer, soft_update optimizer] list + self.assertEqual(len(optimizers), 2) + + def test_get_detached_model_outputs(self): + trainer = self._construct_trainer() + q_out, q_target = trainer.get_detached_model_outputs(self.x) + self.assertEqual(q_out.shape[0], q_target.shape[0], self.batch_size) + self.assertEqual(q_out.shape[1], q_target.shape[1], self.action_dim) + + def test_compute_discount_tensor(self): + time_diff = 4 + steps = 3 + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * time_diff, + step=torch.ones(self.batch_size, 1) * steps, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + + # vanilla + trainer = self._construct_trainer() + discount_tensor = trainer.compute_discount_tensor( + batch=inp, boosted_rewards=inp.reward + ) + self.assertEqual(discount_tensor.shape[0], self.batch_size) + self.assertEqual(discount_tensor.shape[1], 1) + self.assertTrue( + torch.isclose(discount_tensor, torch.tensor(trainer.gamma)).all() + ) + + # seq_num + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(use_seq_num_diff_as_time_diff=True) + ) + trainer = self._construct_trainer(new_params=param_copy) + discount_tensor = trainer.compute_discount_tensor( + batch=inp, boosted_rewards=inp.reward + ) + self.assertEqual(discount_tensor.shape[0], self.batch_size) + self.assertEqual(discount_tensor.shape[1], 1) + self.assertTrue( + torch.isclose( + discount_tensor, torch.tensor(trainer.gamma**time_diff) + ).all() + ) + + # multi_steps + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(multi_steps=steps) + ) + trainer = self._construct_trainer(new_params=param_copy) + discount_tensor = trainer.compute_discount_tensor( + batch=inp, boosted_rewards=inp.reward + ) + self.assertEqual(discount_tensor.shape[0], self.batch_size) + self.assertEqual(discount_tensor.shape[1], 1) + self.assertTrue( + torch.isclose(discount_tensor, torch.tensor(trainer.gamma**steps)).all() + ) + + def test_compute_td_loss(self): + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + + # vanilla mse loss + trainer = self._construct_trainer() + discount_tensor = trainer.compute_discount_tensor( + batch=inp, boosted_rewards=inp.reward + ) + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0, 0.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + loss = trainer.compute_td_loss( + batch=inp, boosted_rewards=inp.reward, discount_tensor=discount_tensor + ) + self.assertEqual(type(loss.grad_fn), mse_backward_type) + + # huber loss + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(q_network_loss="huber") + ) + trainer = self._construct_trainer(new_params=param_copy) + discount_tensor = trainer.compute_discount_tensor( + batch=inp, boosted_rewards=inp.reward + ) + smooth_l1_backward_type = type( + torch.nn.functional.smooth_l1_loss( + torch.tensor([1.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + loss = trainer.compute_td_loss( + batch=inp, boosted_rewards=inp.reward, discount_tensor=discount_tensor + ) + self.assertEqual(type(loss.grad_fn), smooth_l1_backward_type) + + def test_validation_step(self): + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + trainer = self._construct_trainer() + data_page = trainer.validation_step(batch=inp, batch_idx=1) + self.assertTrue(isinstance(data_page, EvaluationDataPage)) + + def test__dense_to_action_dict(self): + trainer = self._construct_trainer() + dense = torch.rand(trainer.num_actions) + retval = trainer._dense_to_action_dict(dense) + self.assertEqual(len(retval), trainer.num_actions) + for i, a in enumerate(self.params.actions): + self.assertTrue(a in retval) + self.assertTrue(torch.isclose(retval[a], dense[i])) diff --git a/reagent/test/training/test_dqn_base.py b/reagent/test/training/test_dqn_base.py new file mode 100644 index 000000000..064260c78 --- /dev/null +++ b/reagent/test/training/test_dqn_base.py @@ -0,0 +1,543 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest +from typing import List, Optional, Tuple + +import torch +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.core.torch_utils import masked_softmax +from reagent.core.types import DiscreteDqnInput, ExtraData, FeatureData +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.models.dqn import FullyConnectedDQN +from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning +from reagent.training.parameters import DQNTrainerParameters +from reagent.workflow.types import RewardOptions + + +class MockDQNTrainer(DQNTrainerBaseLightning): + """A minimal child class to test the methods in the DQNTrainerBase class.""" + + def __init__( + self, + rl_parameters: RLParameters, + metrics_to_score=None, + actions: Optional[List[str]] = None, + evaluation_parameters: Optional[EvaluationParameters] = None, + double_q_learning: bool = True, + ): + super().__init__( + rl_parameters, + metrics_to_score=metrics_to_score, + actions=actions, + evaluation_parameters=evaluation_parameters, + ) + self.double_q_learning = double_q_learning + + @torch.no_grad() + def get_detached_model_outputs( + self, state + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """Gets the q values from the model and target networks""" + q_values = self.q_network(state) + q_values_target = self.q_network_target(state) + return q_values, q_values_target + + +class TestDQNTrainerBaseLightning(unittest.TestCase): + def setUp(self): + self.params = DQNTrainerParameters(actions=["1", "2"]) + self.reward_options = RewardOptions() + self.metrics_to_score = get_metrics_to_score( + self.reward_options.metric_reward_values + ) + self.state_dim = 10 + self.action_dim = 2 + self.batch_size = 3 + self.sizes = [20, 20] + self.activations = ["relu", "relu"] + self.q_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_target = self.q_network.get_target_network() + self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True) + self.num_output_nodes = (len(self.metrics_to_score) + 1) * len( + self.params.actions + ) + self.reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe_target = self.q_network_cpe.get_target_network() + + def _construct_trainer(self, new_params=None, no_cpe=False): + evaluation = self.eval_parameters + params = self.params + + if new_params is not None: + params = new_params + if no_cpe: + evaluation = EvaluationParameters(calc_cpe_in_training=False) + + return MockDQNTrainer( + actions=params.actions, + rl_parameters=params.rl, + metrics_to_score=self.metrics_to_score, + evaluation_parameters=evaluation, + ) + + def test_get_max_q_values_with_target(self): + q_values = torch.tensor([[3.0, 4.0]]) + q_values_target = torch.tensor([[2.0, 1.0]]) + trainer = self._construct_trainer() + + # double q-learning (default) + possible_actions_mask = torch.ones(q_values.shape) + max_q_values_target, max_indicies = trainer.get_max_q_values_with_target( + q_values, q_values_target, possible_actions_mask + ) + self.assertEqual(max_indicies, torch.tensor([[1]])) + self.assertEqual(max_q_values_target, torch.tensor([[1.0]])) + + # mask out max-value action + possible_actions_mask = torch.tensor([[1, 0]]) + max_q_values_target, max_indicies = trainer.get_max_q_values_with_target( + q_values, q_values_target, possible_actions_mask + ) + self.assertEqual(max_indicies, torch.tensor([[0]])) + self.assertEqual(max_q_values_target, torch.tensor([[2.0]])) + + # simple q-learning + trainer.double_q_learning = False + possible_actions_mask = torch.ones(q_values.shape) + max_q_values_target, max_indicies = trainer.get_max_q_values_with_target( + q_values, q_values_target, possible_actions_mask + ) + self.assertEqual(max_indicies, torch.tensor([[0]])) + self.assertEqual(max_q_values_target, torch.tensor([[2.0]])) + + # mask out max-value action + possible_actions_mask = torch.tensor([[0, 1]]) + max_q_values_target, max_indicies = trainer.get_max_q_values_with_target( + q_values, q_values_target, possible_actions_mask + ) + self.assertEqual(max_indicies, torch.tensor([[1]])) + self.assertEqual(max_q_values_target, torch.tensor([[1.0]])) + + def test_boost_rewards(self): + rewards = torch.ones(3, 1) + actions = torch.tensor([[0, 1], [1, 0], [0, 1]]) + param_copy = DQNTrainerParameters( + actions=["1", "2"], rl=RLParameters(reward_boost={"1": 1.0, "2": 2.0}) + ) + trainer = self._construct_trainer(new_params=param_copy) + boosted_reward = trainer.boost_rewards(rewards, actions) + self.assertTrue( + torch.equal(boosted_reward, torch.tensor([[3.0], [2.0], [3.0]])) + ) + + def test__initialize_cpe(self): + reward_network = self.reward_network + q_network_cpe = self.q_network_cpe + q_network_cpe_target = self.q_network_cpe_target + optimizer = self.params.optimizer + # CPE + trainer = self._construct_trainer() + trainer._initialize_cpe( + reward_network, q_network_cpe, q_network_cpe_target, optimizer + ) + self.assertTrue(torch.equal(trainer.reward_idx_offsets, torch.tensor([0]))) + self.assertIsNotNone(trainer.reward_network) + self.assertIsNotNone(trainer.q_network_cpe) + self.assertIsNotNone(trainer.q_network_cpe_target) + self.assertIsNotNone(trainer.reward_network_optimizer) + self.assertIsNotNone(trainer.q_network_cpe_optimizer) + self.assertIsNotNone(trainer.evaluator) + # no CPE + trainer = self._construct_trainer(no_cpe=True) + trainer._initialize_cpe( + reward_network, q_network_cpe, q_network_cpe_target, optimizer + ) + self.assertIsNone(trainer.reward_network) + + def test__initialize_cpe_extra_metrics(self): + reward_options = RewardOptions( + metric_reward_values={"metric_a": 2, "metric_b": -2} + ) + self.metrics_to_score = get_metrics_to_score( + reward_options.metric_reward_values + ) + num_output_nodes = (len(self.metrics_to_score) + 1) * len(self.params.actions) + reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + q_network_cpe_target = q_network_cpe.get_target_network() + reward_network = self.reward_network + q_network_cpe = self.q_network_cpe + q_network_cpe_target = self.q_network_cpe_target + optimizer = self.params.optimizer + # CPE + trainer = self._construct_trainer() + trainer._initialize_cpe( + reward_network, q_network_cpe, q_network_cpe_target, optimizer + ) + self.assertTrue( + torch.equal(trainer.reward_idx_offsets, torch.tensor([0, 2, 4])) + ) + self.assertIsNotNone(trainer.reward_network) + self.assertIsNotNone(trainer.q_network_cpe) + self.assertIsNotNone(trainer.q_network_cpe_target) + self.assertIsNotNone(trainer.reward_network_optimizer) + self.assertIsNotNone(trainer.q_network_cpe_optimizer) + self.assertIsNotNone(trainer.evaluator) + # no CPE + trainer = self._construct_trainer(no_cpe=True) + trainer._initialize_cpe( + reward_network, q_network_cpe, q_network_cpe_target, optimizer + ) + self.assertIsNone(trainer.reward_network) + + def test__configure_cpe_optimizers(self): + reward_network = self.reward_network + q_network_cpe = self.q_network_cpe + q_network_cpe_target = self.q_network_cpe_target + trainer = self._construct_trainer() + trainer._initialize_cpe( + reward_network, q_network_cpe, q_network_cpe_target, self.params.optimizer + ) + _, _, optimizers = trainer._configure_cpe_optimizers() + # expecting a [reward_network_optimizer, q_network_cpe_optimizer] list + self.assertEqual(len(optimizers), 2) + + def test__calculate_cpes(self): + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0, 0.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + trainer = self._construct_trainer() + trainer._initialize_cpe( + self.reward_network, + self.q_network_cpe, + self.q_network_cpe_target, + self.params.optimizer, + ) + trainer.reward_network = self.reward_network + not_done_mask = inp.not_terminal.float() + discount_tensor = torch.ones(inp.reward.shape) + all_q_values = self.q_network(inp.state) + all_action_scores = all_q_values.detach() + all_next_action_scores = self.q_network(inp.next_state).detach() + logged_action_idxs = torch.tensor([[1], [0], [1]]) + + cpes_gen = trainer._calculate_cpes( + inp, + inp.state, + inp.next_state, + all_action_scores, + all_next_action_scores, + logged_action_idxs, + discount_tensor, + not_done_mask, + ) + cpes = list(cpes_gen) + + ## expected reward_loss + + reward_target = inp.reward + reward_estimate = trainer.reward_network(inp.state) + # rewards at offset + logged action idx + reward_estimate = reward_estimate.gather(1, torch.tensor([[1], [0], [1]])) + mse_reward_loss = torch.nn.functional.mse_loss(reward_estimate, reward_target) + + ## expected metric_q_value_loss + + # assuming masked_softmax is tested elsewhere, + # we can treat this as expected ground truth value + model_propensities_next_states = masked_softmax( + all_next_action_scores, + inp.possible_next_actions_mask + if trainer.maxq_learning + else inp.next_action, + trainer.rl_temperature, + ) + metric_q_values = trainer.q_network_cpe(inp.state).gather( + 1, torch.tensor([[1], [0], [1]]) + ) + metrics_target_q_values = trainer.q_network_cpe_target(inp.next_state) + per_metric_next_q_values = torch.sum( + metrics_target_q_values * model_propensities_next_states, + 1, + keepdim=True, + ) + per_metric_next_q_values *= not_done_mask + metrics_target_q_values = ( + reward_target + discount_tensor * per_metric_next_q_values + ) + metric_q_value_loss = trainer.q_network_loss( + metric_q_values, metrics_target_q_values + ) + self.assertEqual(len(cpes), 2) + self.assertEqual(type(cpes[0].grad_fn), mse_backward_type) + self.assertEqual(type(cpes[1].grad_fn), mse_backward_type) + self.assertEqual(cpes[0], mse_reward_loss) + self.assertEqual(cpes[1], metric_q_value_loss) + + def test__calculate_cpes_extra_metrics(self): + reward_options = RewardOptions( + metric_reward_values={"metric_a": 2, "metric_b": -2} + ) + self.metrics_to_score = get_metrics_to_score( + reward_options.metric_reward_values + ) + num_output_nodes = (len(self.metrics_to_score) + 1) * len(self.params.actions) + # re-initialize networks with larger output layers + reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + q_network_cpe_target = q_network_cpe.get_target_network() + # mock data for two extra metrcis: a and b + extra_metrics = torch.concat( + (2 * torch.ones(self.batch_size, 1), -2 * torch.ones(self.batch_size, 1)), + dim=1, + ) + # initialize batch with extra metrics data + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(metrics=extra_metrics), + ) + + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0, 0.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + trainer = self._construct_trainer() + trainer._initialize_cpe( + reward_network, + q_network_cpe, + q_network_cpe_target, + self.params.optimizer, + ) + trainer.reward_network = reward_network + not_done_mask = inp.not_terminal.float() + discount_tensor = torch.ones(inp.reward.shape) + all_q_values = self.q_network(inp.state) + all_action_scores = all_q_values.detach() + all_next_action_scores = self.q_network(inp.next_state).detach() + logged_action_idxs = torch.tensor([[1], [0], [1]]) + + cpes_gen = trainer._calculate_cpes( + inp, + inp.state, + inp.next_state, + all_action_scores, + all_next_action_scores, + logged_action_idxs, + discount_tensor, + not_done_mask, + ) + cpes = list(cpes_gen) + # offset + logged action idx tensor + offset_tensor = torch.tensor([[1, 3, 5], [0, 2, 4], [1, 3, 5]]) + ## expected reward_loss + + reward_target = torch.cat((inp.reward, inp.extras.metrics), dim=1) + reward_estimate = trainer.reward_network(inp.state) + reward_estimate = reward_estimate.gather(1, offset_tensor) + mse_reward_loss = torch.nn.functional.mse_loss(reward_estimate, reward_target) + + ## expected metric_q_value_loss + + model_propensities_next_states = masked_softmax( + all_next_action_scores, + inp.possible_next_actions_mask + if trainer.maxq_learning + else inp.next_action, + trainer.rl_temperature, + ) + # q_values at offset + logged action idx + metric_q_values = trainer.q_network_cpe(inp.state).gather(1, offset_tensor) + metrics_target_q_values = torch.chunk( + trainer.q_network_cpe_target(inp.next_state), + 3, + dim=1, + ) + target_metric_q_values = [] + for i, per_metric_target_q_values in enumerate(metrics_target_q_values): + per_metric_next_q_values = torch.sum( + per_metric_target_q_values * model_propensities_next_states, + 1, + keepdim=True, + ) + per_metric_next_q_values = per_metric_next_q_values * not_done_mask + per_metric_target_q_values = reward_target[:, i : i + 1] + ( + discount_tensor * per_metric_next_q_values + ) + target_metric_q_values.append(per_metric_target_q_values) + + target_metric_q_values = torch.cat(target_metric_q_values, dim=1) + metric_q_value_loss = trainer.q_network_loss( + metric_q_values, target_metric_q_values + ) + + self.assertEqual(len(cpes), 2) + self.assertEqual(type(cpes[0].grad_fn), mse_backward_type) + self.assertEqual(type(cpes[1].grad_fn), mse_backward_type) + self.assertEqual(cpes[0], mse_reward_loss) + self.assertEqual(cpes[1], metric_q_value_loss) + + def test_gather_eval_data(self): + batch_num = 2 + batches = [] + # generate several data batches + for _ in range(batch_num): + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + batches.append(inp) + trainer = self._construct_trainer() + trainer.q_network = self.q_network + trainer.q_network_target = self.q_network_target + trainer.q_network_cpe = self.q_network_cpe + trainer.reward_network = self.reward_network + # generate evaluation datapages for each batch + data_pages = [ + trainer.validation_step(batch=inp, batch_idx=idx + 1) + for idx, inp in enumerate(batches) + ] + # aggregate datapages + eval_data = trainer.gather_eval_data(data_pages) + self.assertEqual( + eval_data.model_rewards.shape[0], + batch_num * data_pages[0].model_rewards.shape[0], + ) + self.assertEqual( + eval_data.logged_rewards.shape[0], + batch_num * data_pages[0].logged_rewards.shape[0], + ) + self.assertEqual( + eval_data.action_mask.shape[0], + batch_num * data_pages[0].action_mask.shape[0], + ) + self.assertEqual( + eval_data.model_propensities.shape[0], + batch_num * data_pages[0].model_propensities.shape[0], + ) + self.assertEqual( + eval_data.model_values.shape[0], + batch_num * data_pages[0].model_values.shape[0], + ) + self.assertEqual( + eval_data.possible_actions_mask.shape[0], + batch_num * data_pages[0].possible_actions_mask.shape[0], + ) + self.assertEqual( + eval_data.optimal_q_values.shape[0], + batch_num * data_pages[0].optimal_q_values.shape[0], + ) + self.assertEqual( + eval_data.eval_action_idxs.shape[0], + batch_num * data_pages[0].eval_action_idxs.shape[0], + ) + + def test_validation_step(self): + inp = DiscreteDqnInput( + state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + next_state=FeatureData( + float_features=torch.rand(self.batch_size, self.state_dim) + ), + reward=torch.ones(self.batch_size, 1), + time_diff=torch.ones(self.batch_size, 1) * 2, + step=torch.ones(self.batch_size, 1) * 2, + not_terminal=torch.ones(self.batch_size, 1), + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(self.batch_size, self.action_dim), + possible_next_actions_mask=torch.ones(self.batch_size, self.action_dim), + extras=ExtraData(), + ) + trainer = self._construct_trainer() + trainer.q_network = self.q_network + trainer.q_network_target = self.q_network_target + trainer.q_network_cpe = self.q_network_cpe + trainer.reward_network = self.reward_network + data_page = trainer.validation_step(batch=inp, batch_idx=1) + self.assertTrue(isinstance(data_page, EvaluationDataPage)) diff --git a/reagent/test/training/test_multi_stage_trainer.py b/reagent/test/training/test_multi_stage_trainer.py new file mode 100644 index 000000000..e6484e1d0 --- /dev/null +++ b/reagent/test/training/test_multi_stage_trainer.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import unittest +from typing import List + +import pytorch_lightning as pl +import torch +import torch.nn as nn +import torch.optim as optim +from reagent.reporting import CompoundReporter, ReporterBase +from reagent.training import MultiStageTrainer, ReAgentLightningModule +from torch.utils.data import DataLoader, TensorDataset + + +class DummyReporter(ReporterBase): + def __init__(self, name: str, expected_epochs: List[int]): + super().__init__({}, {}) + self.name = name + self.expected_epochs = expected_epochs + self._log_count = 0 + self._flush_count = 0 + self._testing = False + + def log(self, **kwargs) -> None: + self._log_count += 1 + + def flush(self, epoch: int): + if not self._testing: + assert epoch in self.expected_epochs, f"{epoch} {self.expected_epochs}" + self._flush_count += 1 + + +class DummyTrainer(ReAgentLightningModule): + def __init__( + self, + name: str, + input_dim: int, + expected_epochs: List[int], + validation_keys: List[str], + test_keys: List[str], + ): + super().__init__() + self.name = name + self.linear1 = nn.Linear(input_dim, 1) + self.linear2 = nn.Linear(input_dim, 1) + self.loss_fn = nn.BCEWithLogitsLoss() + + self._call_count = { + "train": 0, + "validation": 0, + "test": 0, + } + self.expected_epochs = expected_epochs + self.validation_keys = validation_keys + self.test_keys = test_keys + + def configure_optimizers(self): + return [ + optim.SGD(self.linear1.parameters(), lr=1e2), + optim.SGD(self.linear2.parameters(), lr=1e2), + ] + + def on_test_start(self): + self.reporter._testing = True + + def on_test_end(self): + self.reporter._testing = False + + def train_step_gen(self, training_batch, batch_idx: int): + print(f"train_step_gen {self.name}") + assert ( + self.current_epoch in self.expected_epochs + ), f"{self.current_epoch} {self.expected_epochs}" + self._call_count["train"] += 1 + x, label = training_batch + + self.reporter.log() + + y = self.linear1(x) + yield self.loss_fn(y, label) + y = self.linear2(x) + yield self.loss_fn(y, label) + + def validation_step(self, batch, batch_idx: int): + print(f"validation_step {self.name}") + self._call_count["validation"] += 1 + assert self.current_epoch in self.expected_epochs + return {k: torch.ones(2, 3) for k in self.validation_keys} + + def validation_epoch_end(self, outputs): + print(f"validation_step_end {self.name}") + print(outputs) + for output in outputs: + assert set(output.keys()) == set(self.validation_keys) + + def test_step(self, batch, batch_idx: int): + print(f"test_step {self.name}") + self._call_count["test"] += 1 + return {k: torch.ones(2, 3) for k in self.test_keys} + + def test_epoch_end(self, outputs): + print(f"test_epoch_end {self.name}") + print(outputs) + for output in outputs: + assert set(output.keys()) == set(self.test_keys) + + +def make_dataset(input_dim, size): + return TensorDataset( + torch.randn(size, input_dim), + torch.randint(0, 2, (size, 1), dtype=torch.float32), + ) + + +def _merge_report(reporters): + pass + + +class TestMultiStageTrainer(unittest.TestCase): + def test_multi_stage_trainer(self): + input_dim = 5 + stage1 = DummyTrainer( + "stage1", + input_dim, + expected_epochs=[0, 1, 2], + validation_keys=["a", "b", "c"], + test_keys=["d", "e"], + ) + stage2 = DummyTrainer( + "stage2", + input_dim, + expected_epochs=[3, 4, 5], + validation_keys=["x", "y", "z"], + test_keys=["u", "v"], + ) + multi_stage_trainer = MultiStageTrainer( + [stage1, stage2], + epochs=[3, 3], + ) + + reporters = [ + DummyReporter("stage1", expected_epochs=[0, 1, 2]), + DummyReporter("stage2", expected_epochs=[3, 4, 5]), + ] + compound_reporter = CompoundReporter(reporters, _merge_report) + multi_stage_trainer.set_reporter(compound_reporter) + + training_size = 100 + validation_size = 20 + train_dataloader = DataLoader( + make_dataset(input_dim, training_size), batch_size=5 + ) + validation_dataloader = DataLoader( + make_dataset(input_dim, validation_size), + batch_size=5, + ) + + trainer = pl.Trainer(max_epochs=6, min_epochs=6) + trainer.fit(multi_stage_trainer, train_dataloader, validation_dataloader) + + test_size = 20 + test_dataloader = DataLoader( + make_dataset(input_dim, test_size), + batch_size=5, + ) + trainer.test(dataloaders=test_dataloader) + print(f"stage1 {stage1._call_count}") + print(f"stage2 {stage2._call_count}") + self.assertEqual(stage1._call_count["train"], 60) + # It seems that lightning call validation 2 times at the beginning + self.assertEqual(stage1._call_count["validation"], 14) + self.assertEqual(stage1._call_count["test"], 4) + self.assertEqual(stage2._call_count["train"], 60) + self.assertEqual(stage2._call_count["validation"], 12) + self.assertEqual(stage2._call_count["test"], 4) + + for reporter, t in zip(reporters, [stage1, stage2]): + print(f"{reporter.name} {reporter._log_count} {reporter._flush_count}") + self.assertEqual(reporter._log_count, t._call_count["train"]) + # flush got called in train & validation 3 times each. + # In stage1, there is an additional call to validation at the beginning + self.assertEqual(reporter._flush_count, 8 if t == stage1 else 7) diff --git a/reagent/test/training/test_ppo.py b/reagent/test/training/test_ppo.py new file mode 100644 index 000000000..e326a1001 --- /dev/null +++ b/reagent/test/training/test_ppo.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest +from collections import defaultdict +from unittest import mock + +import torch +from reagent.core.types import PolicyGradientInput +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.gym.policies.policy import Policy +from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler +from reagent.models.dueling_q_network import DuelingQNetwork +from reagent.models.fully_connected_network import FloatFeatureFullyConnected +from reagent.training.parameters import PPOTrainerParameters +from reagent.training.ppo_trainer import PPOTrainer +from reagent.workflow.types import RewardOptions + + +class TestPPO(unittest.TestCase): + def setUp(self): + # preparing various components for qr-dqn trainer initialization + self.batch_size = 3 + self.state_dim = 10 + self.action_dim = 2 + self.num_layers = 2 + self.sizes = [20 for _ in range(self.num_layers)] + self.activations = ["relu" for _ in range(self.num_layers)] + self.use_layer_norm = False + self.softmax_temperature = 1 + + self.actions = [str(i) for i in range(self.action_dim)] + self.params = PPOTrainerParameters(actions=self.actions, normalize=False) + self.reward_options = RewardOptions() + self.metrics_to_score = get_metrics_to_score( + self.reward_options.metric_reward_values + ) + + self.policy_network = DuelingQNetwork.make_fully_connected( + state_dim=self.state_dim, + action_dim=self.action_dim, + layers=self.sizes, + activations=self.activations, + ) + self.sampler = SoftmaxActionSampler(temperature=self.softmax_temperature) + self.policy = Policy(scorer=self.policy_network, sampler=self.sampler) + + self.value_network = FloatFeatureFullyConnected( + state_dim=self.state_dim, + output_dim=1, + sizes=self.sizes, + activations=self.activations, + use_layer_norm=self.use_layer_norm, + ) + + def _construct_trainer(self, new_params=None, use_value_net=True): + value_network = self.value_network if use_value_net else None + params = new_params if new_params else self.params + + trainer = PPOTrainer( + policy=self.policy, value_net=value_network, **params.asdict() + ) + trainer.optimizers = mock.Mock(return_value=[0, 0]) + return trainer + + def test_init(self): + trainer = self._construct_trainer() + + self.assertEqual( + type(trainer.value_loss_fn), type(torch.nn.MSELoss(reduction="mean")) + ) + + with self.assertRaises(AssertionError): + new_params = PPOTrainerParameters(ppo_epsilon=-1) + self._construct_trainer(new_params) + + with self.assertRaises(AssertionError): + new_params = PPOTrainerParameters(ppo_epsilon=2) + self._construct_trainer(new_params) + + with self.assertRaises(AssertionError): + params = PPOTrainerParameters(actions=["1", "2"], normalize=True) + trainer = self._construct_trainer(new_params=params) + + def test__trajectory_to_losses(self): + inp = PolicyGradientInput.input_prototype( + batch_size=self.batch_size, + action_dim=self.action_dim, + state_dim=self.state_dim, + ) + # Normalize + offset clamp min + params = PPOTrainerParameters( + actions=["1", "2"], normalize=True, offset_clamp_min=True + ) + trainer = self._construct_trainer(new_params=params, use_value_net=False) + losses = trainer._trajectory_to_losses(inp) + self.assertEqual(len(losses), 1) + self.assertTrue("ppo_loss" in losses) + + trainer = self._construct_trainer() + losses = trainer._trajectory_to_losses(inp) + self.assertEqual(len(losses), 2) + self.assertTrue("ppo_loss" in losses and "value_net_loss" in losses) + # entropy weight should always lower ppo_loss + trainer.entropy_weight = 1.0 + entropy_losses = trainer._trajectory_to_losses(inp) + self.assertTrue(entropy_losses["ppo_loss"] < losses["ppo_loss"]) + + def test_configure_optimizers(self): + # Ordering is value then policy + trainer = self._construct_trainer() + optimizers = trainer.configure_optimizers() + self.assertTrue( + torch.all( + torch.isclose( + optimizers[0]["optimizer"].param_groups[0]["params"][0], + list(trainer.value_net.fc.dnn[0].parameters())[0], + ) + ) + ) + self.assertTrue( + torch.all( + torch.isclose( + optimizers[1]["optimizer"].param_groups[0]["params"][0], + list(trainer.scorer.shared_network.fc.dnn[0].parameters())[0], + ) + ) + ) + + def test_get_optimizers(self): + # ordering covered in test_configure_optimizers + trainer = self._construct_trainer() + optimizers = trainer.get_optimizers() + self.assertIsNotNone(optimizers[0]) + trainer = self._construct_trainer(use_value_net=False) + optimizers = trainer.get_optimizers() + self.assertIsNone(optimizers[0]) + + def test_training_step(self): + trainer = self._construct_trainer() + inp = defaultdict(lambda: torch.ones(1, 5)) + trainer.update_model = mock.Mock() + trainer.training_step(inp, batch_idx=1) + trainer.update_model.assert_called_with() + trainer.update_freq = 10 + trainer.update_model = mock.Mock() + trainer.training_step(inp, batch_idx=1) + trainer.update_model.assert_not_called() + + def test_update_model(self): + trainer = self._construct_trainer() + # can't update empty model + with self.assertRaises(AssertionError): + trainer.update_model() + # _update_model called with permutation of traj_buffer contents update_epoch # times + trainer = self._construct_trainer( + new_params=PPOTrainerParameters( + ppo_batch_size=1, + update_epochs=2, + update_freq=2, + normalize=False, + ) + ) + trainer.traj_buffer = [1, 2] + trainer._update_model = mock.Mock() + trainer.update_model() + calls = [mock.call([1]), mock.call([2]), mock.call([1]), mock.call([2])] + trainer._update_model.assert_has_calls(calls, any_order=True) + # trainer empties buffer + self.assertEqual(trainer.traj_buffer, []) + + # _update_model + trainer = self._construct_trainer() + value_net_opt_mock = mock.Mock() + ppo_opt_mock = mock.Mock() + trainer.get_optimizers = mock.Mock( + return_value=[value_net_opt_mock, ppo_opt_mock] + ) + trainer._trajectory_to_losses = mock.Mock( + side_effect=[ + {"ppo_loss": torch.tensor(1), "value_net_loss": torch.tensor(2)}, + {"ppo_loss": torch.tensor(3), "value_net_loss": torch.tensor(4)}, + ] + ) + trainer.manual_backward = mock.Mock() + inp1 = PolicyGradientInput.input_prototype( + batch_size=1, action_dim=1, state_dim=1 + ) + inp2 = PolicyGradientInput.input_prototype( + batch_size=1, action_dim=1, state_dim=1 + ) + + trainer._update_model([inp1, inp2]) + + trainer._trajectory_to_losses.assert_has_calls( + [mock.call(inp1), mock.call(inp2)] + ) + value_net_opt_mock.zero_grad.assert_called() + value_net_opt_mock.step.assert_called() + + ppo_opt_mock.zero_grad.assert_called() + ppo_opt_mock.step.assert_called() + + trainer.manual_backward.assert_has_calls( + [mock.call(torch.tensor(6)), mock.call(torch.tensor(4))] + ) diff --git a/reagent/test/training/test_probabilistic.py b/reagent/test/training/test_probabilistic.py new file mode 100644 index 000000000..f1ef5c756 --- /dev/null +++ b/reagent/test/training/test_probabilistic.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import unittest + +import numpy as np +import pytorch_lightning as pl +import reagent.core.types as rlt +import torch +import torch.optim as optim +from reagent.models.probabilistic_fully_connected_network import ( + FullyConnectedProbabilisticNetwork, +) +from reagent.training.cfeval.bayes_by_backprop_trainer import BayesByBackpropTrainer + +logger = logging.getLogger(__name__) + + +def toy_function(x): + return -(x**4) + 3 * x**2 - 5 * np.sin(x) + 1 + + +class TestBayesByBackpropTraining(unittest.TestCase): + def setUp(self): + pl.seed_everything(123) + + def test_probabilistic_network(self): + + net = FullyConnectedProbabilisticNetwork( + [2, 16, 16, 1], ["relu", "relu", "linear"], prior_var=1 + ) + net = net + trainer = BayesByBackpropTrainer(net) + + epochs = 1000 + optimizer = optim.Adam(net.parameters(), lr=0.1) + batch_size = 6 + action = torch.ones(batch_size, 1) + loss_ema = -1 + prev_loss_ema = -1 + + for epoch in range(epochs): # loop over the dataset multiple times + x = torch.rand((batch_size, 1)) * 4 - 2 + y = toy_function(x) + batch = rlt.BanditRewardModelInput( + action=action, + reward=y, + state=rlt.FeatureData(float_features=x), + # BanditRewardModelInput for simple fully supervised regression task + ) + loss = next(trainer.train_step_gen(batch, epoch)) + optimizer.zero_grad() + loss.backward() + optimizer.step() + if loss_ema == -1: + loss_ema = loss + else: + loss_ema = loss_ema * 0.99 + 0.01 * loss + if epoch % 100 == 1: + print("Loss EMA", loss_ema) + if epoch > 250: # give some time for training to stabilize + assert loss_ema < prev_loss_ema + prev_loss_ema = loss_ema + print("Finished Training") + + # test model confidence + num_samples_per_point = 100 + test_domain = 25 + num_points = 100 + x_tmp = torch.cat( + [ + torch.linspace(-1 * test_domain, test_domain, num_points).reshape( + -1, 1 + ), + torch.ones((num_points, 1)), + ], + 1, + ) + y_samp = np.zeros((num_samples_per_point, num_points)) + for s in range(num_samples_per_point): + y_tmp = net(x_tmp).cpu().detach().numpy() + y_samp[s] = y_tmp.reshape(-1) + mean = np.mean(y_samp, 0, keepdims=True) + var = np.mean((y_samp - mean) ** 2, 0) + + # make several assertions that the further you get from the training domain, the more + # unconfident the network becomes, eventually becoming extremely unconfident when x=-25 + assert var[60] > var[50] + assert var[0] > var[60] + assert var[0] > var[50] * 10 diff --git a/reagent/test/training/test_qrdqn.py b/reagent/test/training/test_qrdqn.py new file mode 100644 index 000000000..708ef85ef --- /dev/null +++ b/reagent/test/training/test_qrdqn.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import unittest + +import torch +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.core.types import DiscreteDqnInput, ExtraData, FeatureData +from reagent.evaluation.evaluator import get_metrics_to_score +from reagent.models.dqn import FullyConnectedDQN +from reagent.training.parameters import QRDQNTrainerParameters +from reagent.training.qrdqn_trainer import QRDQNTrainer +from reagent.workflow.types import RewardOptions + + +class TestQRDQN(unittest.TestCase): + def setUp(self): + # preparing various components for qr-dqn trainer initialization + self.params = QRDQNTrainerParameters(actions=["1", "2"], num_atoms=11) + self.reward_options = RewardOptions() + self.metrics_to_score = get_metrics_to_score( + self.reward_options.metric_reward_values + ) + self.state_dim = 10 + self.action_dim = 2 + self.sizes = [20, 20] + self.num_atoms = 11 + self.activations = ["relu", "relu"] + self.dropout_ratio = 0 + self.q_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.action_dim, + sizes=self.sizes, + num_atoms=self.num_atoms, + activations=self.activations, + dropout_ratio=self.dropout_ratio, + ) + self.q_network_target = self.q_network.get_target_network() + self.x = FeatureData(float_features=torch.rand(5, 10)) + self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True) + self.num_output_nodes = (len(self.metrics_to_score) + 1) * len( + # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`. + self.params.actions + ) + self.reward_network = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe = FullyConnectedDQN( + state_dim=self.state_dim, + action_dim=self.num_output_nodes, + sizes=self.sizes, + activations=self.activations, + ) + self.q_network_cpe_target = self.q_network_cpe.get_target_network() + + def _construct_trainer(self, new_params=None, no_cpe=False): + reward_network = self.reward_network + q_network_cpe = self.q_network_cpe + q_network_cpe_target = self.q_network_cpe_target + evaluation = self.eval_parameters + params = self.params + + if new_params is not None: + params = new_params + if no_cpe: + reward_network = q_network_cpe = q_network_cpe_target = None + evaluation = EvaluationParameters(calc_cpe_in_training=False) + + return QRDQNTrainer( + q_network=self.q_network, + q_network_target=self.q_network_target, + reward_network=reward_network, + q_network_cpe=q_network_cpe, + q_network_cpe_target=q_network_cpe_target, + metrics_to_score=self.metrics_to_score, + evaluation=evaluation, + # pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`. + **params.asdict(), + ) + + def test_init(self): + trainer = self._construct_trainer() + quantiles = (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms) + self.assertTrue((torch.isclose(trainer.quantiles, quantiles)).all()) + self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all()) + param_copy = QRDQNTrainerParameters( + actions=["1", "2"], + num_atoms=11, + rl=RLParameters(reward_boost={"1": 1, "2": 2}), + ) + reward_boost_trainer = self._construct_trainer(new_params=param_copy) + self.assertTrue( + ( + torch.isclose( + reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0]) + ) + ).all() + ) + + def test_train_step_gen(self): + inp = DiscreteDqnInput( + state=FeatureData(float_features=torch.rand(3, 10)), + next_state=FeatureData(float_features=torch.rand(3, 10)), + reward=torch.ones(3, 1), + time_diff=torch.ones(3, 1) * 2, + step=torch.ones(3, 1) * 2, + not_terminal=torch.ones(3, 1), # todo: check terminal behavior + action=torch.tensor([[0, 1], [1, 0], [0, 1]]), + next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]), + possible_actions_mask=torch.ones(3, 2), + possible_next_actions_mask=torch.ones(3, 2), + extras=ExtraData(), + ) + mse_backward_type = type( + torch.nn.functional.mse_loss( + torch.tensor([1.0], requires_grad=True), torch.zeros(1) + ).grad_fn + ) + add_backward_type = type( + ( + torch.tensor([1.0], requires_grad=True) + + torch.tensor([1.0], requires_grad=True) + ).grad_fn + ) + mean_backward_type = type( + torch.tensor([1.0, 2.0], requires_grad=True).mean().grad_fn + ) + + # vanilla + trainer = self._construct_trainer() + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + self.assertEqual(type(losses[0].grad_fn), mean_backward_type) + self.assertEqual(type(losses[1].grad_fn), mse_backward_type) + self.assertEqual(type(losses[2].grad_fn), mse_backward_type) + self.assertEqual(type(losses[3].grad_fn), add_backward_type) + + # no CPE + trainer = self._construct_trainer(no_cpe=True) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 2) + + # seq_num + param_copy = QRDQNTrainerParameters( + actions=["1", "2"], + num_atoms=11, + rl=RLParameters(use_seq_num_diff_as_time_diff=True), + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + # multi_steps + param_copy = QRDQNTrainerParameters( + actions=["1", "2"], num_atoms=11, rl=RLParameters(multi_steps=2) + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + # non_max_q + param_copy = QRDQNTrainerParameters( + actions=["1", "2"], num_atoms=11, rl=RLParameters(maxq_learning=False) + ) + trainer = self._construct_trainer(new_params=param_copy) + loss_gen = trainer.train_step_gen(inp, batch_idx=1) + losses = list(loss_gen) + self.assertEqual(len(losses), 4) + + def test_configure_optimizers(self): + trainer = self._construct_trainer() + optimizers = trainer.configure_optimizers() + self.assertEqual(len(optimizers), 4) + train_step_yield_order = [ + trainer.q_network, + trainer.reward_network, + trainer.q_network_cpe, + trainer.q_network, + ] + for i in range(len(train_step_yield_order)): + opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0] + loss_param = list(train_step_yield_order[i].parameters())[0] + self.assertTrue(torch.all(torch.isclose(opt_param, loss_param))) + + trainer = self._construct_trainer(no_cpe=True) + optimizers = trainer.configure_optimizers() + self.assertEqual(len(optimizers), 2) + + def test_get_detached_model_outputs(self): + trainer = self._construct_trainer() + q_out, q_target = trainer.get_detached_model_outputs(self.x) + self.assertEqual(q_out.shape[0], q_target.shape[0], 3) + self.assertEqual(q_out.shape[1], q_target.shape[1], 2) diff --git a/reagent/test/training/test_synthetic_reward_training.py b/reagent/test/training/test_synthetic_reward_training.py new file mode 100644 index 000000000..502d6a9eb --- /dev/null +++ b/reagent/test/training/test_synthetic_reward_training.py @@ -0,0 +1,422 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import unittest + +import pytorch_lightning as pl +import torch +from reagent.core import parameters as rlp, types as rlt +from reagent.models.synthetic_reward import ( + NGramConvolutionalNetwork, + NGramFullyConnectedNetwork, + SequenceSyntheticRewardNet, + SingleStepSyntheticRewardNet, + SyntheticRewardNet, + TransformerSyntheticRewardNet, +) +from reagent.optimizer.union import classes, Optimizer__Union +from reagent.reporting.reward_network_reporter import RewardNetworkReporter +from reagent.training import RewardNetTrainer +from reagent.training.reward_network_trainer import LossFunction +from torch.utils.data import DataLoader + + +logger = logging.getLogger(__name__) + + +def create_data( + state_dim, action_dim, seq_len, batch_size, num_batches, binary_reward=False +): + SCALE = 2 + # reward is a linear function of (state, action) + weight = SCALE * torch.randn(state_dim + action_dim) + data = [None for _ in range(num_batches)] + for i in range(num_batches): + state = SCALE * torch.randn(seq_len, batch_size, state_dim) + action = SCALE * torch.randn(seq_len, batch_size, action_dim) + # random valid step + valid_step = torch.randint(1, seq_len + 1, (batch_size, 1)) + + # reward_matrix shape: batch_size x seq_len + reward_matrix = torch.matmul( + torch.cat((state, action), dim=2), weight + ).transpose(0, 1) + if binary_reward: + reward_matrix = torch.sigmoid(reward_matrix) + mask = torch.arange(seq_len).repeat(batch_size, 1) + mask = (mask >= (seq_len - valid_step)).float() + reward = (reward_matrix * mask).sum(dim=1).reshape(-1, 1) + data[i] = rlt.MemoryNetworkInput( + state=rlt.FeatureData(state), + action=rlt.FeatureData(action), + valid_step=valid_step, + reward=reward, + # the rest fields will not be used + next_state=torch.tensor([]), + step=torch.tensor([]), + not_terminal=torch.tensor([]), + time_diff=torch.tensor([]), + ) + return weight, data + + +def create_sequence_data(state_dim, action_dim, seq_len, batch_size, num_batches): + SCALE = 2 + weight = SCALE * torch.randn(state_dim + action_dim) + + data = [None for _ in range(num_batches)] + + for i in range(num_batches): + state = SCALE * torch.randn(seq_len, batch_size, state_dim) + action = SCALE * torch.randn(seq_len, batch_size, action_dim) + # random valid step + valid_step = torch.randint(1, seq_len + 1, (batch_size, 1)) + + feature_mask = torch.arange(seq_len).repeat(batch_size, 1) + feature_mask = (feature_mask >= (seq_len - valid_step)).float() + assert feature_mask.shape == (batch_size, seq_len), feature_mask.shape + feature_mask = feature_mask.transpose(0, 1).unsqueeze(-1) + assert feature_mask.shape == (seq_len, batch_size, 1), feature_mask.shape + + feature = torch.cat((state, action), dim=2) + masked_feature = feature * feature_mask + + # seq_len, batch_size, state_dim + action_dim + left_shifted = torch.cat( + ( + masked_feature.narrow(0, 1, seq_len - 1), + torch.zeros(1, batch_size, state_dim + action_dim), + ), + dim=0, + ) + # seq_len, batch_size, state_dim + action_dim + right_shifted = torch.cat( + ( + torch.zeros(1, batch_size, state_dim + action_dim), + masked_feature.narrow(0, 0, seq_len - 1), + ), + dim=0, + ) + # reward_matrix shape: batch_size x seq_len + reward_matrix = torch.matmul(left_shifted + right_shifted, weight).transpose( + 0, 1 + ) + + mask = torch.arange(seq_len).repeat(batch_size, 1) + mask = (mask >= (seq_len - valid_step)).float() + reward = (reward_matrix * mask).sum(dim=1).reshape(-1, 1) + + data[i] = rlt.MemoryNetworkInput( + state=rlt.FeatureData(state), + action=rlt.FeatureData(action), + valid_step=valid_step, + reward=reward, + # the rest fields will not be used + next_state=torch.tensor([]), + step=torch.tensor([]), + not_terminal=torch.tensor([]), + time_diff=torch.tensor([]), + ) + + return weight, data + + +def train_and_eval(trainer, data, num_eval_batches=100, max_epochs=1): + train_dataloader = DataLoader(data[:-num_eval_batches], collate_fn=lambda x: x[0]) + eval_data = data[-num_eval_batches:] + + # disable logging in tests + pl_trainer = pl.Trainer(max_epochs=max_epochs, logger=False) + pl_trainer.fit(trainer, train_dataloader) + + total_loss = 0 + for i, batch in enumerate(eval_data): + loss = trainer.validation_step(batch, batch_idx=i) + total_loss += loss + return total_loss / num_eval_batches + + +class TestSyntheticRewardTraining(unittest.TestCase): + def setUp(self): + pl.seed_everything(123) + + def test_linear_reward_parametric_reward_success(self): + avg_eval_loss = self._test_linear_reward_parametric_reward( + ground_truth_reward_from_multiple_steps=False + ) + threshold = 0.1 + assert avg_eval_loss < threshold + + def test_linear_reward_parametric_reward_fail(self): + avg_eval_loss = self._test_linear_reward_parametric_reward( + ground_truth_reward_from_multiple_steps=True + ) + # fail to learn + threshold = 100.0 + assert avg_eval_loss > threshold + + def _test_linear_reward_parametric_reward( + self, ground_truth_reward_from_multiple_steps=False + ): + """ + Reward at each step is a linear function of present state and action. + However, we can only observe aggregated reward at the last step + + This model will fail to learn when ground-truth reward is a function of + multiple steps' states and actions. + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 5000 + sizes = [256, 128] + activations = ["relu", "relu"] + last_layer_activation = "linear" + reward_net = SyntheticRewardNet( + SingleStepSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer(reward_net, optimizer) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + if ground_truth_reward_from_multiple_steps: + weight, data = create_sequence_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + else: + weight, data = create_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + avg_eval_loss = train_and_eval(trainer, data) + return avg_eval_loss + + def test_single_step_parametric_binary_reward(self): + """ + Reward at each step is a linear function of present state and action. + However, we can only observe aggregated reward at the last step + + This model will fail to learn when ground-truth reward is a function of + multiple steps' states and actions. + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 5000 + sizes = [256, 128] + activations = ["relu", "relu"] + last_layer_activation = "sigmoid" + reward_net = SyntheticRewardNet( + SingleStepSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer( + reward_net, optimizer, loss_type=LossFunction.BCELoss + ) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + weight, data = create_data( + state_dim, action_dim, seq_len, batch_size, num_batches, binary_reward=True + ) + avg_eval_loss = train_and_eval(trainer, data) + return avg_eval_loss + + def test_ngram_fc_parametric_reward(self): + """ + Reward at each step is a linear function of states and actions in a + context window around the step. + + However, we can only observe aggregated reward at the last step + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 10000 + sizes = [256, 128] + activations = ["relu", "relu"] + last_layer_activation = "linear" + reward_net = SyntheticRewardNet( + NGramFullyConnectedNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + context_size=3, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer(reward_net, optimizer) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + weight, data = create_sequence_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + threshold = 0.2 + avg_eval_loss = train_and_eval(trainer, data) + assert avg_eval_loss < threshold + + def test_ngram_conv_net_parametric_reward(self): + """ + Reward at each step is a linear function of states and actions in a + context window around the step. + + However, we can only observe aggregated reward at the last step + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 5000 + sizes = [128, 64] + activations = ["relu", "relu"] + last_layer_activation = "linear" + conv_net_params = rlp.ConvNetParameters( + conv_dims=[128], + conv_height_kernels=[1], + pool_types=["max"], + pool_kernel_sizes=[1], + ) + reward_net = SyntheticRewardNet( + NGramConvolutionalNetwork( + state_dim=state_dim, + action_dim=action_dim, + sizes=sizes, + activations=activations, + last_layer_activation=last_layer_activation, + context_size=3, + conv_net_params=conv_net_params, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer(reward_net, optimizer) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + weight, data = create_sequence_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + threshold = 0.2 + avg_eval_loss = train_and_eval(trainer, data) + assert avg_eval_loss < threshold, "loss = {} larger than threshold {}".format( + avg_eval_loss, threshold + ) + + def test_lstm_parametric_reward(self): + """ + Reward at each step is a linear function of states and actions in a + context window around the step. + + However, we can only observe aggregated reward at the last step + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 5000 + last_layer_activation = "linear" + reward_net = SyntheticRewardNet( + SequenceSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + lstm_hidden_size=128, + lstm_num_layers=2, + lstm_bidirectional=True, + last_layer_activation=last_layer_activation, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer(reward_net, optimizer) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + weight, data = create_sequence_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + threshold = 0.2 + avg_eval_loss = train_and_eval(trainer, data) + assert avg_eval_loss < threshold + + def test_transformer_parametric_reward(self): + """ + Reward at each step is a linear function of states and actions in a + context window around the step. + + However, we can only observe aggregated reward at the last step + """ + state_dim = 10 + action_dim = 2 + seq_len = 5 + batch_size = 512 + num_batches = 10000 + d_model = 64 + nhead = 8 + num_encoder_layers = 1 + dim_feedforward = 64 + last_layer_activation = "linear" + max_len = seq_len + 1 + reward_net = SyntheticRewardNet( + TransformerSyntheticRewardNet( + state_dim=state_dim, + action_dim=action_dim, + d_model=d_model, + nhead=nhead, + num_encoder_layers=num_encoder_layers, + dim_feedforward=dim_feedforward, + dropout=0.0, + activation="relu", + last_layer_activation=last_layer_activation, + layer_norm_eps=1e-5, + max_len=max_len, + ) + ) + optimizer = Optimizer__Union(Adam=classes["Adam"]()) + trainer = RewardNetTrainer(reward_net, optimizer) + trainer.set_reporter( + RewardNetworkReporter( + trainer.loss_type, + str(reward_net), + ) + ) + weight, data = create_sequence_data( + state_dim, action_dim, seq_len, batch_size, num_batches + ) + + threshold = 0.25 + avg_eval_loss = train_and_eval(trainer, data) + assert ( + avg_eval_loss < threshold + ), "loss = {:.4f} larger than threshold {}".format(avg_eval_loss, threshold) diff --git a/reagent/test/workflow/reagent_sql_test_base.py b/reagent/test/workflow/reagent_sql_test_base.py index 5efae84f6..d09ea4926 100644 --- a/reagent/test/workflow/reagent_sql_test_base.py +++ b/reagent/test/workflow/reagent_sql_test_base.py @@ -9,7 +9,9 @@ import numpy as np import torch from pyspark import SparkConf -from reagent.workflow.spark_utils import DEFAULT_SPARK_CONFIG +from reagent.data.spark_utils import DEFAULT_SPARK_CONFIG + +# pyre-fixme[21]: Could not find `sparktestingbase`. from sparktestingbase.sqltestcase import SQLTestCase @@ -24,6 +26,7 @@ GLOBAL_TEST_CLASS_COUNTER = 0 +# pyre-fixme[11]: Annotation `SQLTestCase` is not defined as a type. class ReagentSQLTestBase(SQLTestCase): def getConf(self): conf = SparkConf() @@ -54,17 +57,17 @@ def setUp(self): logging.basicConfig() def assertEq(self, series_a, arr_b): - """ Assert panda series is equal to np array """ + """Assert panda series is equal to np array""" arr_a = np.array(series_a.tolist()) np.testing.assert_equal(arr_a, arr_b) def assertAllClose(self, series_a, arr_b): - """ Assert panda series is allclose to np array """ + """Assert panda series is allclose to np array""" arr_a = np.array(series_a.tolist()) np.testing.assert_allclose(arr_a, arr_b) def assertEqWithPresence(self, series_a, presence, arr_b): - """ Assert panda series given presence array is equal to np array """ + """Assert panda series given presence array is equal to np array""" arr_a = np.array(series_a.tolist()) present_a = arr_a[presence] present_b = arr_b[presence] diff --git a/reagent/test/workflow/test_data/__init__.py b/reagent/test/workflow/test_data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/reagent/test/workflow/test_oss_workflows.py b/reagent/test/workflow/test_oss_workflows.py index 3abd8001c..d29876d62 100644 --- a/reagent/test/workflow/test_oss_workflows.py +++ b/reagent/test/workflow/test_oss_workflows.py @@ -9,10 +9,12 @@ from unittest.mock import patch import reagent + +# pyre-fixme[21]: Could not find module `reagent.workflow.cli`. import reagent.workflow.cli as cli import torch from click.testing import CliRunner -from reagent.parameters import NormalizationParameters +from reagent.core.parameters import NormalizationParameters from reagent.test.base.horizon_test_base import HorizonTestBase from reagent.workflow.types import Dataset from ruamel.yaml import YAML @@ -36,11 +38,11 @@ NEW_CONFIG_NAME = "config.yaml" # module to patch -DISCRETE_DQN_BASE = "reagent.workflow.model_managers.discrete_dqn_base" +DISCRETE_DQN_BASE = "reagent.model_managers.discrete_dqn_base" def get_test_workflow_config(path_to_config: str, use_gpu: bool): - """ Loads and modifies config to fun fast. """ + """Loads and modifies config to fun fast.""" yaml = YAML(typ="safe") with open(path_to_config, "r") as f: config = yaml.load(f) @@ -56,7 +58,7 @@ def get_test_workflow_config(path_to_config: str, use_gpu: bool): def mock_cartpole_normalization() -> Dict[int, NormalizationParameters]: - """ Get mock normalization from our local file. """ + """Get mock normalization from our local file.""" with open(CARTPOLE_NORMALIZATION_JSON, "r") as f: norm = json.load(f) @@ -67,7 +69,7 @@ def mock_cartpole_normalization() -> Dict[int, NormalizationParameters]: class TestOSSWorkflows(HorizonTestBase): - """ Run workflow to ensure no crashes, correctness/performance not tested. """ + """Run workflow to ensure no crashes, correctness/performance not tested.""" def _test_dqn_workflow(self, use_gpu=False, use_all_avail_gpus=False): runner = CliRunner() @@ -93,7 +95,8 @@ def _test_dqn_workflow(self, use_gpu=False, use_all_avail_gpus=False): ) mock_normalization = mock_cartpole_normalization() with patch( - f"{DISCRETE_DQN_BASE}.query_data", return_value=mock_dataset + "reagent.data.oss_data_fetcher.OssDataFetcher.query_data", + return_value=mock_dataset, ), patch( f"{DISCRETE_DQN_BASE}.identify_normalization_parameters", return_value=mock_normalization, diff --git a/reagent/test/workflow/test_preprocessing.py b/reagent/test/workflow/test_preprocessing.py index 3eff45325..fdcaab95d 100644 --- a/reagent/test/workflow/test_preprocessing.py +++ b/reagent/test/workflow/test_preprocessing.py @@ -7,6 +7,8 @@ import numpy as np import pytest from reagent.preprocessing.identify_types import CONTINUOUS + +# pyre-fixme[21]: Could not find `workflow`. from reagent.test.workflow.reagent_sql_test_base import ReagentSQLTestBase from reagent.workflow.identify_types_flow import identify_normalization_parameters from reagent.workflow.types import PreprocessingOptions, TableSpec @@ -19,6 +21,7 @@ TABLE_NAME = "test_table" +# pyre-fixme[11]: Annotation `ReagentSQLTestBase` is not defined as a type. class TestPreprocessing(ReagentSQLTestBase): def setUp(self): super().setUp() diff --git a/reagent/test/workflow/test_query_data.py b/reagent/test/workflow/test_query_data.py index 81aac4bdc..9fd506f72 100644 --- a/reagent/test/workflow/test_query_data.py +++ b/reagent/test/workflow/test_query_data.py @@ -5,12 +5,15 @@ import unittest import numpy as np -import pandas import pytest -from pyspark.sql.functions import asc -from reagent.test.environment.environment import MultiStepSamples + +# pyre-ignore +from pyspark.sql.functions import asc # @manual=//python/wheel/pyspark:pyspark +from reagent.data.oss_data_fetcher import OssDataFetcher +from reagent.test.test_data.ex_mdps import generate_discrete_mdp_pandas_df + +# pyre-ignore from reagent.test.workflow.reagent_sql_test_base import ReagentSQLTestBase -from reagent.workflow.data_fetcher import query_data from reagent.workflow.types import Dataset, TableSpec @@ -18,80 +21,8 @@ def generate_data_discrete(sqlCtx, multi_steps: bool, table_name: str): - # Simulate the following MDP: - # state: 0, action: 7 ('L'), reward: 0, - # state: 1, action: 8 ('R'), reward: 1, - # state: 4, action: 9 ('U'), reward: 4, - # state: 5, action: 10 ('D'), reward: 5, - # state: 6 (terminal) - actions = ["L", "R", "U", "D"] - possible_actions = [["L", "R"], ["R", "U"], ["U", "D"], ["D"]] - - # assume multi_steps=2 - if multi_steps: - rewards = [[0, 1], [1, 4], [4, 5], [5]] - metrics = [ - [{"reward": 0}, {"reward": 1}], - [{"reward": 1}, {"reward": 4}], - [{"reward": 4}, {"reward": 5}], - [{"reward": 5}], - ] - next_states = [[{1: 1}, {4: 1}], [{4: 1}, {5: 1}], [{5: 1}, {6: 1}], [{6: 1}]] - next_actions = [["R", "U"], ["U", "D"], ["D", ""], [""]] - possible_next_actions = [ - [["R", "U"], ["U", "D"]], - [["U", "D"], ["D"]], - [["D"], [""]], - [[""]], - ] - terminals = [[0, 0], [0, 0], [0, 1], [1]] - time_diffs = [[1, 1], [1, 1], [1, 1], [1]] - else: - rewards = [[0], [1], [4], [5]] - metrics = [{"reward": 0}, {"reward": 1}, {"reward": 4}, {"reward": 5}] # noqa - next_states = [[{1: 1}], [{4: 1}], [{5: 1}], [{6: 1}]] - next_actions = [["R"], ["U"], ["D"], [""]] - possible_next_actions = [[["R", "U"]], [["U", "D"]], [["D"]], [[""]]] - terminals = [[0], [0], [0], [1]] - time_diffs = [1, 3, 1, 1] # noqa - - samples = MultiStepSamples( - mdp_ids=["0", "0", "0", "0"], - sequence_numbers=[0, 1, 4, 5], - sequence_number_ordinals=[1, 2, 3, 4], - states=[{0: 1}, {1: 1}, {4: 1}, {5: 1}], - actions=actions, - action_probabilities=[0.3, 0.4, 0.5, 0.6], - rewards=rewards, - possible_actions=possible_actions, - next_states=next_states, - next_actions=next_actions, - terminals=terminals, - possible_next_actions=possible_next_actions, - ) - if not multi_steps: - samples = samples.to_single_step() - - next_state_features = samples.next_states - possible_next_actions = samples.possible_next_actions - next_actions = samples.next_actions - - df = pandas.DataFrame( - { - "mdp_id": samples.mdp_ids, - "sequence_number": samples.sequence_numbers, - "sequence_number_ordinal": samples.sequence_number_ordinals, - "state_features": samples.states, - "action": samples.actions, - "action_probability": samples.action_probabilities, - "reward": samples.rewards, - "next_state_features": next_state_features, - "next_action": next_actions, - "time_diff": time_diffs, - "possible_actions": samples.possible_actions, - "possible_next_actions": possible_next_actions, - "metrics": metrics, - } + df, _ = generate_discrete_mdp_pandas_df( + multi_steps=multi_steps, use_seq_num_diff_as_time_diff=False ) df = sqlCtx.createDataFrame(df) logger.info("Created dataframe") @@ -99,6 +30,7 @@ def generate_data_discrete(sqlCtx, multi_steps: bool, table_name: str): df.createOrReplaceTempView(table_name) +# pyre-fixme[11]: Annotation `ReagentSQLTestBase` is not defined as a type. class TestQueryData(ReagentSQLTestBase): def setUp(self): super().setUp() @@ -115,7 +47,8 @@ def _discrete_read_data( self, custom_reward_expression=None, gamma=None, multi_steps=None ): ts = TableSpec(table_name=self.table_name) - dataset: Dataset = query_data( + df = OssDataFetcher() + dataset: Dataset = df.query_data( input_table_spec=ts, discrete_action=True, actions=["L", "R", "U", "D"], @@ -164,7 +97,7 @@ def test_query_data(self): logger.info("discrete multi-step seems fine.") def verify_discrete_single_step_except_rewards(self, df): - """ expects a pandas dataframe """ + """expects a pandas dataframe""" self.assertEq(df["sequence_number"], np.array([1, 2, 3, 4], dtype="int32")) state_features_presence = np.array( diff --git a/reagent/test/workflow/test_query_data_parametric.py b/reagent/test/workflow/test_query_data_parametric.py index c98ac5b29..6d7a6259e 100644 --- a/reagent/test/workflow/test_query_data_parametric.py +++ b/reagent/test/workflow/test_query_data_parametric.py @@ -5,100 +5,23 @@ import unittest import numpy as np -import pandas import pytest + +# pyre-fixme[21]: Could not find `pyspark`. from pyspark.sql.functions import asc -from reagent.test.environment.environment import MultiStepSamples +from reagent.data.oss_data_fetcher import OssDataFetcher +from reagent.test.test_data.ex_mdps import generate_parametric_mdp_pandas_df + +# pyre-fixme[21]: Could not find `workflow`. from reagent.test.workflow.reagent_sql_test_base import ReagentSQLTestBase -from reagent.workflow.data_fetcher import query_data from reagent.workflow.types import Dataset, TableSpec - logger = logging.getLogger(__name__) def generate_data_parametric(sqlCtx, multi_steps: bool, table_name: str): - # Simulate the following MDP: - # state: 0, action: 7 ('L'), reward: 0, - # state: 1, action: 8 ('R'), reward: 1, - # state: 4, action: 9 ('U'), reward: 4, - # state: 5, action: 10 ('D'), reward: 5, - # state: 6 (terminal) - actions = [{7: 1}, {8: 1}, {9: 1}, {10: 1}] - possible_actions = [ - [{7: 1}, {8: 1}], - [{8: 1}, {9: 1}], - [{9: 1}, {10: 1}], - [{10: 1}], - ] - - # assume multi_step=2 - if multi_steps: - rewards = [[0, 1], [1, 4], [4, 5], [5]] - metrics = [ - [{"reward": 0}, {"reward": 1}], - [{"reward": 1}, {"reward": 4}], - [{"reward": 4}, {"reward": 5}], - [{"reward": 5}], - ] - next_states = [[{1: 1}, {4: 1}], [{4: 1}, {5: 1}], [{5: 1}, {6: 1}], [{6: 1}]] - next_actions = [[{8: 1}, {9: 1}], [{9: 1}, {10: 1}], [{10: 1}, {}], [{}]] - possible_next_actions = [ - [[{8: 1}, {9: 1}], [{9: 1}, {10: 1}]], - [[{9: 1}, {10: 1}], [{10: 1}]], - [[{10: 1}], [{}]], - [[{}]], - ] - terminals = [[0, 0], [0, 0], [0, 1], [1]] - time_diffs = [[1, 1], [1, 1], [1, 1], [1]] - else: - rewards = [[0], [1], [4], [5]] - metrics = [{"reward": 0}, {"reward": 1}, {"reward": 4}, {"reward": 5}] # noqa - next_states = [[{1: 1}], [{4: 1}], [{5: 1}], [{6: 1}]] - next_actions = [[{8: 1}], [{9: 1}], [{10: 1}], [{}]] - possible_next_actions = [ - [[{8: 1}, {9: 1}]], - [[{9: 1}, {10: 1}]], - [[{10: 1}]], - [[{}]], - ] - terminals = [[0], [0], [0], [1]] - time_diffs = [1, 3, 1, 1] # noqa - - samples = MultiStepSamples( - mdp_ids=["0", "0", "0", "0"], - sequence_numbers=[0, 1, 4, 5], - sequence_number_ordinals=[1, 2, 3, 4], - states=[{0: 1}, {1: 1}, {4: 1}, {5: 1}], - actions=actions, - action_probabilities=[0.3, 0.4, 0.5, 0.6], - rewards=rewards, - possible_actions=possible_actions, - next_states=next_states, - next_actions=next_actions, - terminals=terminals, - possible_next_actions=possible_next_actions, - ) - if not multi_steps: - samples = samples.to_single_step() - - next_state_features = samples.next_states - next_actions = samples.next_actions - - df = pandas.DataFrame( - { - "mdp_id": samples.mdp_ids, - "sequence_number": samples.sequence_numbers, - "sequence_number_ordinal": samples.sequence_number_ordinals, - "state_features": samples.states, - "action": samples.actions, - "action_probability": samples.action_probabilities, - "reward": samples.rewards, - "next_state_features": next_state_features, - "next_action": next_actions, - "time_diff": time_diffs, - "metrics": metrics, - } + df, _ = generate_parametric_mdp_pandas_df( + multi_steps=multi_steps, use_seq_num_diff_as_time_diff=False ) df = sqlCtx.createDataFrame(df) logger.info("Created dataframe") @@ -106,6 +29,7 @@ def generate_data_parametric(sqlCtx, multi_steps: bool, table_name: str): df.createOrReplaceTempView(table_name) +# pyre-fixme[11]: Annotation `ReagentSQLTestBase` is not defined as a type. class TestQueryDataParametric(ReagentSQLTestBase): def setUp(self): super().setUp() @@ -122,7 +46,8 @@ def _parametric_read_data( self, custom_reward_expression=None, gamma=None, multi_steps=None ): ts = TableSpec(table_name=self.table_name) - dataset: Dataset = query_data( + df = OssDataFetcher() + dataset: Dataset = df.query_data( input_table_spec=ts, discrete_action=False, include_possible_actions=False, @@ -173,7 +98,7 @@ def test_query_data_parametric(self): logger.info("parametric multi-step seems fine.") def verify_parametric_single_step_except_rewards(self, df): - """ expects a pandas dataframe """ + """expects a pandas dataframe""" self.assertEq(df["sequence_number"], np.array([1, 2, 3, 4], dtype="int32")) state_features_presence = np.array( diff --git a/reagent/test/world_model/__init__.py b/reagent/test/world_model/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/test/world_model/__init__.py +++ b/reagent/test/world_model/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/test/world_model/simulated_world_model.py b/reagent/test/world_model/simulated_world_model.py index 3eaa4f344..33634a197 100644 --- a/reagent/test/world_model/simulated_world_model.py +++ b/reagent/test/world_model/simulated_world_model.py @@ -18,7 +18,7 @@ def __init__( num_gaussians, lstm_num_hidden_layers, lstm_num_hiddens, - ): + ) -> None: super().__init__() self.action_dim = action_dim self.state_dim = state_dim @@ -30,7 +30,7 @@ def __init__( self.init_hidden() self.eval() - def init_lstm(self): + def init_lstm(self) -> None: self.lstm = nn.LSTM( input_size=self.action_dim + self.state_dim, hidden_size=self.lstm_num_hiddens, @@ -41,14 +41,14 @@ def init_lstm(self): self.lstm_num_hiddens, self.state_dim * self.num_gaussians + 1 ) - def init_hidden(self, batch_size=1): + def init_hidden(self, batch_size: int = 1) -> None: # (num_layers * num_directions, batch, hidden_size) self.hidden = ( torch.zeros(self.lstm_num_hidden_layers, batch_size, self.lstm_num_hiddens), torch.zeros(self.lstm_num_hidden_layers, batch_size, self.lstm_num_hiddens), ) - def init_weight(self): + def init_weight(self) -> None: torch.manual_seed(3212) for _, p in self.lstm.named_parameters(): nn.init.normal_(p, 0, 1) diff --git a/reagent/test/world_model/test_mdnrnn.py b/reagent/test/world_model/test_mdnrnn.py index 4705dc872..be137b7a8 100644 --- a/reagent/test/world_model/test_mdnrnn.py +++ b/reagent/test/world_model/test_mdnrnn.py @@ -6,9 +6,10 @@ import numpy as np import torch -from reagent.models.mdn_rnn import MDNRNNMemoryPool, gmm_loss +from reagent.core.parameters import MDNRNNTrainerParameters +from reagent.models.mdn_rnn import gmm_loss, MDNRNNMemoryPool from reagent.models.world_model import MemoryNetwork -from reagent.parameters import MDNRNNTrainerParameters +from reagent.reporting.world_model_reporter import WorldModelReporter from reagent.test.world_model.simulated_world_model import SimulatedWorldModel from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer from torch.distributions.categorical import Categorical @@ -131,7 +132,6 @@ def _test_mdnrnn_simulate_world(self, use_gpu=False): mdnrnn_params = MDNRNNTrainerParameters( hidden_size=mdnrnn_num_hiddens, num_hidden_layers=mdnrnn_num_hidden_layers, - minibatch_size=batch_size, learning_rate=adam_lr, num_gaussians=mdrnn_num_gaussians, ) @@ -147,35 +147,42 @@ def _test_mdnrnn_simulate_world(self, use_gpu=False): trainer = MDNRNNTrainer( memory_network=mdnrnn_net, params=mdnrnn_params, cum_loss_hist=num_batch ) + reporter = WorldModelReporter(report_interval=1) + trainer.set_reporter(reporter) + optimizer = trainer.configure_optimizers()[0] for e in range(num_epochs): for i in range(num_batch): training_batch = replay_buffer.sample_memories( batch_size, use_gpu=use_gpu ) - losses = trainer.train(training_batch) + optimizer.zero_grad() + loss = next(trainer.train_step_gen(training_batch, i)) + loss.backward() + optimizer.step() + logger.info( "{}-th epoch, {}-th minibatch: \n" "loss={}, bce={}, gmm={}, mse={} \n" "cum loss={}, cum bce={}, cum gmm={}, cum mse={}\n".format( e, i, - losses["loss"], - losses["bce"], - losses["gmm"], - losses["mse"], - np.mean(trainer.cum_loss), - np.mean(trainer.cum_bce), - np.mean(trainer.cum_gmm), - np.mean(trainer.cum_mse), + reporter.loss.values[-1], + reporter.bce.values[-1], + reporter.gmm.values[-1], + reporter.mse.values[-1], + np.mean(reporter.loss.values[-100:]), + np.mean(reporter.bce.values[-100:]), + np.mean(reporter.gmm.values[-100:]), + np.mean(reporter.mse.values[-100:]), ) ) if ( - np.mean(trainer.cum_loss) < 0 - and np.mean(trainer.cum_gmm) < -3.0 - and np.mean(trainer.cum_bce) < 0.6 - and np.mean(trainer.cum_mse) < 0.2 + np.mean(reporter.loss.values[-100:]) < 0 + and np.mean(reporter.gmm.values[-100:]) < -3.0 + and np.mean(reporter.bce.values[-100:]) < 0.6 + and np.mean(reporter.mse.values[-100:]) < 0.2 ): return diff --git a/reagent/test/world_model/test_seq2reward.py b/reagent/test/world_model/test_seq2reward.py new file mode 100644 index 000000000..7df83d047 --- /dev/null +++ b/reagent/test/world_model/test_seq2reward.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +import os +import random +import unittest +from typing import Optional + +import numpy as np +import pytorch_lightning as pl +import torch +import torch.nn as nn +from parameterized import parameterized +from reagent.core import types as rlt +from reagent.core.parameters import ( + NormalizationData, + NormalizationParameters, + ProblemDomain, + Seq2RewardTrainerParameters, +) +from reagent.gym.envs import Gym +from reagent.gym.utils import create_df_from_replay_buffer +from reagent.models.seq2reward_model import Seq2RewardNetwork +from reagent.net_builder.value.fully_connected import FullyConnected +from reagent.prediction.predictor_wrapper import ( + FAKE_STATE_ID_LIST_FEATURES, + FAKE_STATE_ID_SCORE_LIST_FEATURES, + Seq2RewardPlanShortSeqWithPreprocessor, + Seq2RewardWithPreprocessor, +) +from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS +from reagent.preprocessing.preprocessor import Preprocessor +from reagent.training.utils import gen_permutations +from reagent.training.world_model.compress_model_trainer import CompressModelTrainer +from reagent.training.world_model.seq2reward_trainer import get_Q, Seq2RewardTrainer +from torch.utils.data import DataLoader + +logger = logging.getLogger(__name__) + +SEED = 0 +STRING_GAME_TESTS = [(False,), (True,)] + + +class FakeStepPredictionNetwork(nn.Module): + def __init__(self, look_ahead_steps): + super().__init__() + self.look_ahead_steps = look_ahead_steps + + def forward(self, state: torch.Tensor): + """ + Given the current state, predict the probability of + experiencing next n steps (1 <=n <= look_ahead_steps) + + For the test purpose, it outputs fixed fake numbers + """ + batch_size, _ = state.shape + return torch.ones(batch_size, self.look_ahead_steps).float() + + +class FakeSeq2RewardNetwork(nn.Module): + def forward( + self, + state: rlt.FeatureData, + action: rlt.FeatureData, + valid_reward_len: Optional[torch.Tensor] = None, + ): + """ + Mimic I/O of Seq2RewardNetwork but return fake reward + Reward is the concatenation of action indices, independent + of state. + + For example, when seq_len = 3, batch_size = 1, action_num = 2, + acc_reward = tensor( + [[ 0.], + [ 1.], + [ 10.], + [ 11.], + [100.], + [101.], + [110.], + [111.]] + ) + + Input action shape: seq_len, batch_size, num_action + Output acc_reward shape: batch_size, 1 + """ + # pyre-fixme[9]: action has type `FeatureData`; used as `Tensor`. + action = action.float_features.transpose(0, 1) + # pyre-fixme[6]: For 1st param expected `Tensor` but got `FeatureData`. + action_indices = torch.argmax(action, dim=2).tolist() + acc_reward = torch.tensor( + list(map(lambda x: float("".join(map(str, x))), action_indices)) + ).reshape(-1, 1) + logger.info(f"acc_reward: {acc_reward}") + return rlt.Seq2RewardOutput(acc_reward=acc_reward) + + +def create_string_game_data( + dataset_size=10000, training_data_ratio=0.9, filter_short_sequence=False +): + SEQ_LEN = 6 + NUM_ACTION = 2 + NUM_MDP_PER_BATCH = 5 + + env = Gym(env_name="StringGame-v0", set_max_steps=SEQ_LEN) + df = create_df_from_replay_buffer( + env=env, + problem_domain=ProblemDomain.DISCRETE_ACTION, + desired_size=dataset_size, + multi_steps=None, + ds="2020-10-10", + ) + + if filter_short_sequence: + batch_size = NUM_MDP_PER_BATCH + time_diff = torch.ones(SEQ_LEN, batch_size) + valid_step = SEQ_LEN * torch.ones(batch_size, dtype=torch.int64)[:, None] + not_terminal = torch.Tensor( + [0 if i == SEQ_LEN - 1 else 1 for i in range(SEQ_LEN)] + ) + not_terminal = torch.transpose(not_terminal.tile(NUM_MDP_PER_BATCH, 1), 0, 1) + else: + batch_size = NUM_MDP_PER_BATCH * SEQ_LEN + time_diff = torch.ones(SEQ_LEN, batch_size) + valid_step = torch.arange(SEQ_LEN, 0, -1).tile(NUM_MDP_PER_BATCH)[:, None] + not_terminal = torch.transpose( + torch.tril(torch.ones(SEQ_LEN, SEQ_LEN), diagonal=-1).tile( + NUM_MDP_PER_BATCH, 1 + ), + 0, + 1, + ) + + num_batches = int(dataset_size / SEQ_LEN / NUM_MDP_PER_BATCH) + batches = [None for _ in range(num_batches)] + batch_count, batch_seq_count = 0, 0 + batch_reward = torch.zeros(SEQ_LEN, batch_size) + batch_action = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION) + batch_state = torch.zeros(SEQ_LEN, batch_size, NUM_ACTION) + for mdp_id in sorted(set(df.mdp_id)): + mdp = df[df["mdp_id"] == mdp_id].sort_values("sequence_number", ascending=True) + if len(mdp) != SEQ_LEN: + continue + + all_step_reward = torch.Tensor(list(mdp["reward"])) + all_step_state = torch.Tensor([list(s.values()) for s in mdp["state_features"]]) + all_step_action = torch.zeros_like(all_step_state) + all_step_action[torch.arange(SEQ_LEN), [int(a) for a in mdp["action"]]] = 1.0 + + for j in range(SEQ_LEN): + if filter_short_sequence and j > 0: + break + + reward = torch.zeros_like(all_step_reward) + reward[: SEQ_LEN - j] = all_step_reward[-(SEQ_LEN - j) :] + batch_reward[:, batch_seq_count] = reward + + state = torch.zeros_like(all_step_state) + state[: SEQ_LEN - j] = all_step_state[-(SEQ_LEN - j) :] + batch_state[:, batch_seq_count] = state + + action = torch.zeros_like(all_step_action) + action[: SEQ_LEN - j] = all_step_action[-(SEQ_LEN - j) :] + batch_action[:, batch_seq_count] = action + + batch_seq_count += 1 + + if batch_seq_count == batch_size: + batches[batch_count] = rlt.MemoryNetworkInput( + reward=batch_reward, + action=rlt.FeatureData(float_features=batch_action), + state=rlt.FeatureData(float_features=batch_state), + next_state=rlt.FeatureData( + float_features=torch.zeros_like(batch_state) + ), # fake, not used anyway + not_terminal=not_terminal, + time_diff=time_diff, + valid_step=valid_step, + step=None, + ) + batch_count += 1 + batch_seq_count = 0 + batch_reward = torch.zeros_like(batch_reward) + batch_action = torch.zeros_like(batch_action) + batch_state = torch.zeros_like(batch_state) + assert batch_count == num_batches + + num_training_batches = int(training_data_ratio * num_batches) + training_data = DataLoader( + batches[:num_training_batches], collate_fn=lambda x: x[0] + ) + eval_data = DataLoader(batches[num_training_batches:], collate_fn=lambda x: x[0]) + return training_data, eval_data + + +def train_seq2reward_model(training_data, learning_rate=0.01, num_epochs=5): + SEQ_LEN, batch_size, NUM_ACTION = next( + iter(training_data) + ).action.float_features.shape + assert SEQ_LEN == 6 and NUM_ACTION == 2 + + seq2reward_network = Seq2RewardNetwork( + state_dim=NUM_ACTION, + action_dim=NUM_ACTION, + num_hiddens=64, + num_hidden_layers=2, + ) + + trainer_param = Seq2RewardTrainerParameters( + learning_rate=learning_rate, + multi_steps=SEQ_LEN, + action_names=["0", "1"], + gamma=1.0, + view_q_value=True, + ) + + trainer = Seq2RewardTrainer( + seq2reward_network=seq2reward_network, params=trainer_param + ) + + pl.seed_everything(SEED) + pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True) + pl_trainer.fit(trainer, training_data) + + return trainer + + +def eval_seq2reward_model(eval_data, seq2reward_trainer): + SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.float_features.shape + + initial_state = torch.Tensor([[0, 0]]) + initial_state_q_values = torch.squeeze( + get_Q( + seq2reward_trainer.seq2reward_network, + initial_state, + seq2reward_trainer.all_permut, + ) + ) + + total_mse_loss = 0 + total_q_values = torch.zeros(NUM_ACTION) + total_action_distribution = torch.zeros(NUM_ACTION) + for idx, batch in enumerate(eval_data): + ( + mse_loss, + _, + q_values, + action_distribution, + ) = seq2reward_trainer.validation_step(batch, idx) + total_mse_loss += mse_loss + total_q_values += torch.tensor(q_values) + total_action_distribution += torch.tensor(action_distribution) + + N_eval = len(eval_data) + eval_mse_loss = total_mse_loss / N_eval + eval_q_values = total_q_values / N_eval + eval_action_distribution = total_action_distribution / N_eval + + return ( + initial_state_q_values, + eval_mse_loss, + eval_q_values, + eval_action_distribution, + ) + + +def train_seq2reward_compress_model( + training_data, seq2reward_network, learning_rate=0.1, num_epochs=5 +): + SEQ_LEN, batch_size, NUM_ACTION = next( + iter(training_data) + ).action.float_features.shape + assert SEQ_LEN == 6 and NUM_ACTION == 2 + + compress_net_builder = FullyConnected(sizes=[8, 8]) + state_normalization_data = NormalizationData( + dense_normalization_parameters={ + 0: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + 1: NormalizationParameters(feature_type=DO_NOT_PREPROCESS), + } + ) + compress_model_network = compress_net_builder.build_value_network( + state_normalization_data, + output_dim=NUM_ACTION, + ) + + trainer_param = Seq2RewardTrainerParameters( + learning_rate=0.0, + multi_steps=SEQ_LEN, + action_names=["0", "1"], + compress_model_learning_rate=learning_rate, + gamma=1.0, + view_q_value=True, + ) + + trainer = CompressModelTrainer( + compress_model_network=compress_model_network, + seq2reward_network=seq2reward_network, + params=trainer_param, + ) + + pl.seed_everything(SEED) + pl_trainer = pl.Trainer(max_epochs=num_epochs, deterministic=True) + pl_trainer.fit(trainer, training_data) + + return trainer + + +def eval_seq2reward_compress_model(eval_data, compress_model_trainer): + SEQ_LEN, batch_size, NUM_ACTION = next(iter(eval_data)).action.float_features.shape + total_mse_loss = 0 + total_q_values = torch.zeros(NUM_ACTION) + total_action_distribution = torch.zeros(NUM_ACTION) + for idx, batch in enumerate(eval_data): + ( + mse_loss, + q_values, + action_distribution, + _, + ) = compress_model_trainer.validation_step(batch, idx) + total_mse_loss += mse_loss + total_q_values += torch.tensor(q_values) + total_action_distribution += torch.tensor(action_distribution) + + N_eval = len(eval_data) + eval_mse_loss = total_mse_loss / N_eval + eval_q_values = total_q_values / N_eval + eval_action_distribution = total_action_distribution / N_eval + + return eval_mse_loss, eval_q_values, eval_action_distribution + + +class TestSeq2Reward(unittest.TestCase): + def test_seq2reward_with_preprocessor_plan_short_sequence(self): + self._test_seq2reward_with_preprocessor(plan_short_sequence=True) + + def test_seq2reward_with_preprocessor_plan_full_sequence(self): + self._test_seq2reward_with_preprocessor(plan_short_sequence=False) + + def _test_seq2reward_with_preprocessor(self, plan_short_sequence): + state_dim = 4 + action_dim = 2 + seq_len = 3 + model = FakeSeq2RewardNetwork() + state_normalization_parameters = { + i: NormalizationParameters( + feature_type=DO_NOT_PREPROCESS, mean=0.0, stddev=1.0 + ) + for i in range(1, state_dim) + } + state_preprocessor = Preprocessor(state_normalization_parameters, False) + + if plan_short_sequence: + step_prediction_model = FakeStepPredictionNetwork(seq_len) + model_with_preprocessor = Seq2RewardPlanShortSeqWithPreprocessor( + model, + step_prediction_model, + state_preprocessor, + seq_len, + action_dim, + ) + else: + model_with_preprocessor = Seq2RewardWithPreprocessor( + model, + state_preprocessor, + seq_len, + action_dim, + ) + input_prototype = rlt.ServingFeatureData( + float_features_with_presence=state_preprocessor.input_prototype(), + id_list_features=FAKE_STATE_ID_LIST_FEATURES, + id_score_list_features=FAKE_STATE_ID_SCORE_LIST_FEATURES, + ) + q_values = model_with_preprocessor(input_prototype) + if plan_short_sequence: + # When planning for 1, 2, and 3 steps ahead, + # the expected q values are respectively: + # [0, 1], [1, 11], [11, 111] + # Weighting the expected q values by predicted step + # probabilities [0.33, 0.33, 0.33], we have [4, 41] + expected_q_values = torch.tensor([[4.0, 41.0]]) + else: + expected_q_values = torch.tensor([[11.0, 111.0]]) + assert torch.all(expected_q_values == q_values) + + def test_get_Q(self): + NUM_ACTION = 2 + MULTI_STEPS = 3 + BATCH_SIZE = 2 + STATE_DIM = 4 + all_permut = gen_permutations(MULTI_STEPS, NUM_ACTION) + seq2reward_network = FakeSeq2RewardNetwork() + state = torch.zeros(BATCH_SIZE, STATE_DIM) + q_values = get_Q(seq2reward_network, state, all_permut) + expected_q_values = torch.tensor([[11.0, 111.0], [11.0, 111.0]]) + logger.info(f"q_values: {q_values}") + assert torch.all(expected_q_values == q_values) + + def test_gen_permutations_seq_len_1_action_6(self): + SEQ_LEN = 1 + NUM_ACTION = 6 + expected_outcome = torch.tensor([[0], [1], [2], [3], [4], [5]]) + self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome) + + def test_gen_permutations_seq_len_3_num_action_2(self): + SEQ_LEN = 3 + NUM_ACTION = 2 + expected_outcome = torch.tensor( + [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ] + ) + self._test_gen_permutations(SEQ_LEN, NUM_ACTION, expected_outcome) + + def _test_gen_permutations(self, SEQ_LEN, NUM_ACTION, expected_outcome): + # expected shape: SEQ_LEN, PERM_NUM, ACTION_DIM + result = gen_permutations(SEQ_LEN, NUM_ACTION) + assert result.shape == (SEQ_LEN, NUM_ACTION**SEQ_LEN, NUM_ACTION) + outcome = torch.argmax(result.transpose(0, 1), dim=-1) + assert torch.all(outcome == expected_outcome) + + @parameterized.expand(STRING_GAME_TESTS) + @unittest.skipIf("SANDCASTLE" in os.environ, "Skipping long test on sandcastle.") + def test_seq2reward_on_string_game_v0(self, filter_short_sequence): + np.random.seed(SEED) + random.seed(SEED) + torch.manual_seed(SEED) + training_data, eval_data = create_string_game_data( + filter_short_sequence=filter_short_sequence + ) + seq2reward_trainer = train_seq2reward_model(training_data) + ( + initial_state_q_values, + eval_mse_loss, + eval_q_values, + eval_action_distribution, + ) = eval_seq2reward_model(eval_data, seq2reward_trainer) + + assert abs(initial_state_q_values[0].item() - 10) < 1.0 + assert abs(initial_state_q_values[1].item() - 5) < 1.0 + + if filter_short_sequence: + assert eval_mse_loss < 0.1 + else: + # Same short sequences may have different total rewards due to the missing + # states and actions in previous steps, so the trained network is not able + # to reduce the mse loss to values close to zero. + assert eval_mse_loss < 10 + + compress_model_trainer = train_seq2reward_compress_model( + training_data, seq2reward_trainer.seq2reward_network + ) + ( + compress_eval_mse_loss, + compress_eval_q_values, + compress_eval_action_distribution, + ) = eval_seq2reward_compress_model(eval_data, compress_model_trainer) + + assert compress_eval_mse_loss < 1e-5 + assert torch.all(eval_q_values - compress_eval_q_values < 1e-5) + assert torch.all( + eval_action_distribution - compress_eval_action_distribution < 1e-5 + ) diff --git a/reagent/torch_utils.py b/reagent/torch_utils.py deleted file mode 100644 index 5502c9b4d..000000000 --- a/reagent/torch_utils.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from io import BytesIO -from typing import Dict - -import numpy as np -import torch - - -def dict_to_tensor(batch: Dict[str, np.ndarray], device: str = "cpu"): - return {k: torch.tensor(v).to(device) for k, v in batch.items()} - - -def rescale_torch_tensor( - tensor: torch.Tensor, - new_min: torch.Tensor, - new_max: torch.Tensor, - prev_min: torch.Tensor, - prev_max: torch.Tensor, -): - """ - Rescale column values in N X M torch tensor to be in new range. - Each column m in input tensor will be rescaled from range - [prev_min[m], prev_max[m]] to [new_min[m], new_max[m]] - """ - assert tensor.shape[1] == new_min.shape[1] == new_max.shape[1] - assert tensor.shape[1] == prev_min.shape[1] == prev_max.shape[1] - prev_range = prev_max - prev_min - new_range = new_max - new_min - return ((tensor - prev_min) / prev_range) * new_range + new_min - - -def stack(mems): - """ - Stack a list of tensors - Could use torch.stack here but torch.stack is much slower - than torch.cat + view - Submitted an issue for investigation: - https://github.com/pytorch/pytorch/issues/22462 - - FIXME: Remove this function after the issue above is resolved - """ - shape = (-1, *mems[0].shape) - return torch.cat(mems).view(*shape) - - -def export_module_to_buffer(module) -> BytesIO: - # traced_script_module = torch.jit.trace(module, module.input_prototype()) - write_buffer = BytesIO() - torch.jit.save(module, write_buffer) - return write_buffer - - -def softmax(x, temperature): - """Compute softmax values for each sets of scores in x.""" - x = x / temperature - return torch.nn.functional.softmax(x, dim=1) - - -def masked_softmax(x, mask, temperature): - """Compute softmax values for each sets of scores in x.""" - x = x / temperature - mask_min_x = x - ((1.0 - mask) * 1e20) - mask_min_x -= torch.max(mask_min_x, dim=1, keepdim=True)[0] - e_x = torch.exp(mask_min_x) - e_x *= mask - out = e_x / e_x.sum(dim=1, keepdim=True) - - # Set NaN values to 0 (NaN happens when a full mask row is passed in) - out[out != out] = 0 - return out diff --git a/reagent/training/__init__.py b/reagent/training/__init__.py index 50bf1f7d5..b0f8e3b7c 100644 --- a/reagent/training/__init__.py +++ b/reagent/training/__init__.py @@ -1,39 +1,59 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.training.behavioral_cloning_trainer import BehavioralCloningTrainer from reagent.training.c51_trainer import C51Trainer from reagent.training.cem_trainer import CEMTrainer +from reagent.training.cfeval import BanditRewardNetTrainer +from reagent.training.discrete_crr_trainer import DiscreteCRRTrainer from reagent.training.dqn_trainer import DQNTrainer +from reagent.training.multi_stage_trainer import MultiStageTrainer from reagent.training.parametric_dqn_trainer import ParametricDQNTrainer +from reagent.training.ppo_trainer import PPOTrainer from reagent.training.qrdqn_trainer import QRDQNTrainer -from reagent.training.rl_trainer_pytorch import RLTrainer +from reagent.training.reagent_lightning_module import ( + ReAgentLightningModule, + StoppingEpochCallback, +) +from reagent.training.reinforce_trainer import ReinforceTrainer +from reagent.training.reward_network_trainer import RewardNetTrainer from reagent.training.sac_trainer import SACTrainer from reagent.training.slate_q_trainer import SlateQTrainer from reagent.training.td3_trainer import TD3Trainer from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer from .parameters import ( + BehavioralCloningTrainerParameters, C51TrainerParameters, + CRRTrainerParameters, DQNTrainerParameters, ParametricDQNTrainerParameters, + PPOTrainerParameters, QRDQNTrainerParameters, + ReinforceTrainerParameters, + RewardNetworkTrainerParameters, SACTrainerParameters, + Seq2SlateTrainerParameters, SlateQTrainerParameters, TD3TrainerParameters, ) __all__ = [ + "BehavioralCloningTrainer", + "BanditRewardNetTrainer", "C51Trainer", "CEMTrainer", - "RLTrainer", "DQNTrainer", + "MultiStageTrainer", "MDNRNNTrainer", "ParametricDQNTrainer", "QRDQNTrainer", "SACTrainer", "SlateQTrainer", "TD3Trainer", + "DiscreteCRRTrainer", + "RewardNetTrainer", "C51TrainerParameters", "DQNTrainerParameters", "ParametricDQNTrainerParameters", @@ -41,4 +61,21 @@ "SACTrainerParameters", "SlateQTrainerParameters", "TD3TrainerParameters", + "CRRTrainerParameters", + "RewardNetworkTrainerParameters", + "Seq2SlateTrainerParameters", + "ReAgentLightningModule", + "StoppingEpochCallback", + "ReinforceTrainer", + "ReinforceTrainerParameters", + "PPOTrainer", + "PPOTrainerParameters", + "BehavioralCloningTrainerParameters", ] + +if IS_FB_ENVIRONMENT: + from reagent.training.fb.signal_loss_reward_decomp_trainer import ( # noqa + SignalLossRewardDecompTrainer, + ) + + __all__.append("SignalLossRewardDecompTrainer") diff --git a/reagent/training/behavioral_cloning_trainer.py b/reagent/training/behavioral_cloning_trainer.py new file mode 100644 index 000000000..58ba061af --- /dev/null +++ b/reagent/training/behavioral_cloning_trainer.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import field +from reagent.models.base import ModelBase +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule + +logger = logging.getLogger(__name__) + + +class BehavioralCloningTrainer(ReAgentLightningModule): + def __init__( + self, + bc_net: ModelBase, + optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + ) -> None: + super().__init__() + self.bc_net = bc_net + self.loss_fn = torch.nn.CrossEntropyLoss(reduction="mean") + self.optimizer = optimizer + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.bc_net.parameters()) + ) + return optimizers + + def _get_masked_logits(self, batch: rlt.BehavioralCloningModelInput): + logits = self.bc_net( + batch.state, possible_actions_mask=batch.possible_actions_mask + ) + return logits + + def train_step_gen( + self, training_batch: rlt.BehavioralCloningModelInput, batch_idx: int + ): + self._check_input(training_batch) + labels = training_batch.action + logits_masked = self._get_masked_logits(training_batch) + assert labels.ndim == logits_masked.ndim == 2 + assert labels.shape[0] == logits_masked.shape[0] + _, integer_labels = labels.max(dim=0) + loss = self.loss_fn(logits_masked, integer_labels) + detached_loss = loss.detach().cpu() + self.reporter.log(loss=detached_loss) + yield loss + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.BehavioralCloningModelInput, batch_idx: int): + self._check_input(batch) + logits_masked = self._get_masked_logits(batch) + labels = batch.action + assert labels.ndim == logits_masked.ndim == 2 + assert labels.shape[0] == logits_masked.shape[0] + _, integer_labels = labels.max(dim=0) + loss = self.loss_fn(logits_masked, integer_labels) + detached_loss = loss.detach().cpu() + return detached_loss + + def _check_input(self, training_batch: rlt.BehavioralCloningModelInput): + assert isinstance(training_batch, rlt.BehavioralCloningModelInput) + labels = training_batch.action + if len(labels.shape) > 1 and labels.shape[0] > 1: # check one hot label + pass + else: + raise TypeError( + "label tensor format or dimension does not match loss function" + ) + assert torch.all( + training_batch.action * training_batch.possible_actions_mask + == training_batch.action + ) # check all labels are not masked out diff --git a/reagent/training/c51_trainer.py b/reagent/training/c51_trainer.py index 420d06d98..4a4f07261 100644 --- a/reagent/training/c51_trainer.py +++ b/reagent/training/c51_trainer.py @@ -1,29 +1,19 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - from typing import List -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import field -from reagent.core.tracker import observable -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import EvaluationParameters, RLParameters -from reagent.training.rl_trainer_pytorch import RLTrainer -from reagent.training.training_data_page import TrainingDataPage - - -@observable( - td_loss=torch.Tensor, - logged_actions=torch.Tensor, - logged_propensities=torch.Tensor, - logged_rewards=torch.Tensor, - model_values=torch.Tensor, - model_action_idxs=torch.Tensor, -) -class C51Trainer(RLTrainer): +from reagent.core.parameters import RLParameters +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin + + +class C51Trainer(RLTrainerMixin, ReAgentLightningModule): """ Implementation of 51 Categorical DQN (C51) @@ -35,9 +25,6 @@ def __init__( self, q_network, q_network_target, - metrics_to_score=None, - loss_reporter=None, - use_gpu: bool = False, actions: List[str] = field(default_factory=list), # noqa: B008 rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 double_q_learning: bool = True, @@ -49,53 +36,71 @@ def __init__( optimizer: Optimizer__Union = field( # noqa: B008 default_factory=Optimizer__Union.default ), - evaluation: EvaluationParameters = field( # noqa: B008 - default_factory=EvaluationParameters - ), ) -> None: - RLTrainer.__init__( - self, - rl, - use_gpu=use_gpu, - metrics_to_score=metrics_to_score, - actions=actions, - loss_reporter=loss_reporter, - ) - + """ + Args: + q_network: states, action -> q-value + q_network_target: model that provides targets + actions(optional): list of agent's actions + rl (optional): an instance of the RLParameter class, which + defines relevant hyperparameters + double_q_learning (optional): whether or not double Q learning, enabled by default, + minibatch_size (optional): the size of the minibatch + minibatches_per_step (optional): the number of minibatch updates + per training step + num_atoms (optional): number of "canonical returns"in the discretized value distributions + qmin (optional): minimum q-value + qmax (optional): maximum q-value + optimizer (optional): the optimizer class and + optimizer hyperparameters for the q network(s) optimizer + """ + super().__init__() self.double_q_learning = double_q_learning self.minibatch_size = minibatch_size self.minibatches_per_step = minibatches_per_step self._actions = actions self.q_network = q_network self.q_network_target = q_network_target - self.q_network_optimizer = optimizer.make_optimizer(q_network.parameters()) + self.q_network_optimizer = optimizer self.qmin = qmin self.qmax = qmax self.num_atoms = num_atoms - self.support = torch.linspace( - self.qmin, self.qmax, self.num_atoms, device=self.device - ) + self.rl_parameters = rl + self.register_buffer("support", None) + self.support = torch.linspace(self.qmin, self.qmax, self.num_atoms) self.scale_support = (self.qmax - self.qmin) / (self.num_atoms - 1.0) - self.reward_boosts = torch.zeros([1, len(self._actions)], device=self.device) - if rl.reward_boost is not None: + self.register_buffer("reward_boosts", None) + self.reward_boosts = torch.zeros([1, len(self._actions)]) + if self.rl_parameters.reward_boost is not None: # pyre-fixme[16]: Optional type has no attribute `keys`. - for k in rl.reward_boost.keys(): + for k in self.rl_parameters.reward_boost.keys(): i = self._actions.index(k) # pyre-fixme[16]: Optional type has no attribute `__getitem__`. - self.reward_boosts[0, i] = rl.reward_boost[k] + self.reward_boosts[0, i] = self.rl_parameters.reward_boost[k] - @torch.no_grad() - def train(self, training_batch: rlt.DiscreteDqnInput) -> None: - if isinstance(training_batch, TrainingDataPage): - training_batch = training_batch.as_discrete_maxq_training_batch() + def configure_optimizers(self): + optimizers = [ + self.q_network_optimizer.make_optimizer_scheduler( + self.q_network.parameters() + ) + ] + # soft-update + target_params = list(self.q_network_target.parameters()) + source_params = list(self.q_network.parameters()) + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) + return optimizers + def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int): rewards = self.boost_rewards(training_batch.reward, training_batch.action) discount_tensor = torch.full_like(rewards, self.gamma) possible_next_actions_mask = training_batch.possible_next_actions_mask.float() possible_actions_mask = training_batch.possible_actions_mask.float() - self.minibatch += 1 not_terminal = training_batch.not_terminal.float() if self.use_seq_num_diff_as_time_diff: @@ -118,7 +123,9 @@ def train(self, training_batch: rlt.DiscreteDqnInput) -> None: next_q_values = (next_dist * self.support).sum(2) next_action = self.argmax_with_mask( - next_q_values, possible_next_actions_mask + next_q_values, + # pyre-fixme[6]: For 2nd param expected `int` but got `FloatTensor`. + possible_next_actions_mask, ) next_dist = next_dist[range(rewards.shape[0]), next_action.reshape(-1)] else: @@ -130,9 +137,7 @@ def train(self, training_batch: rlt.DiscreteDqnInput) -> None: # rescale to indicies [0, 1, ..., N-1] b = (target_Q - self.qmin) / self.scale_support - # pyre-fixme[16]: `Tensor` has no attribute `floor`. lo = b.floor().to(torch.int64) - # pyre-fixme[16]: `Tensor` has no attribute `ceil`. up = b.ceil().to(torch.int64) # handle corner cases of l == b == u @@ -153,52 +158,38 @@ def train(self, training_batch: rlt.DiscreteDqnInput) -> None: # m_l = m_l + p(s_t+n, a*)(u - b) # m_u = m_u + p(s_t+n, a*)(b - l) m = torch.zeros_like(next_dist) - # pyre-fixme[16]: `Tensor` has no attribute `scatter_add_`. m.scatter_add_(dim=1, index=lo, src=next_dist * (up.float() - b)) m.scatter_add_(dim=1, index=up, src=next_dist * (b - lo.float())) + log_dist = self.q_network.log_dist(training_batch.state) - with torch.enable_grad(): - log_dist = self.q_network.log_dist(training_batch.state) - - # for reporting only - all_q_values = (log_dist.exp() * self.support).sum(2).detach() - - log_dist = (log_dist * training_batch.action.unsqueeze(-1)).sum(1) - - loss = -(m * log_dist).sum(1).mean() - loss.backward() - self._maybe_run_optimizer( - self.q_network_optimizer, self.minibatches_per_step - ) - - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network, self.q_network_target, self.tau, self.minibatches_per_step - ) - + # for reporting only + all_q_values = (log_dist.exp() * self.support).sum(2).detach() model_action_idxs = self.argmax_with_mask( all_q_values, + # pyre-fixme[6]: For 2nd param expected `int` but got `Tensor`. possible_actions_mask if self.maxq_learning else training_batch.action, ) - # pyre-fixme[16]: `C51Trainer` has no attribute `notify_observers`. - self.notify_observers( - td_loss=loss, - logged_actions=torch.argmax(training_batch.action, dim=1, keepdim=True), - logged_propensities=training_batch.extras.action_probability, - logged_rewards=rewards, - model_values=all_q_values, - model_action_idxs=model_action_idxs, - ) + log_dist = (log_dist * training_batch.action.unsqueeze(-1)).sum(1) - self.loss_reporter.report( - td_loss=loss, - logged_actions=training_batch.action.argmax(dim=1, keepdim=True), - logged_propensities=training_batch.extras.action_probability, - logged_rewards=rewards, - model_values=all_q_values, - model_action_idxs=model_action_idxs, - ) + loss = -(m * log_dist).sum(1).mean() + + if batch_idx % self.trainer.log_every_n_steps == 0: + self.reporter.log( + td_loss=loss, + logged_actions=torch.argmax(training_batch.action, dim=1, keepdim=True), + logged_propensities=training_batch.extras.action_probability, + logged_rewards=rewards, + model_values=all_q_values, + model_action_idxs=model_action_idxs, + ) + self.log( + "td_loss", loss, prog_bar=True, batch_size=training_batch.batch_size() + ) + + yield loss + result = self.soft_update_result() + yield result @torch.no_grad() def boost_rewards( @@ -210,11 +201,11 @@ def boost_rewards( ) return rewards + reward_boosts - def argmax_with_mask(self, q_values, possible_actions_mask): + def argmax_with_mask(self, q_values, possible_actions_mask: int): # Set q-values of impossible actions to a very large negative number. + # pyre-fixme[16]: `int` has no attribute `shape`. q_values = q_values.reshape(possible_actions_mask.shape) - q_values = q_values + self.ACTION_NOT_POSSIBLE_VAL * (1 - possible_actions_mask) + q_values = q_values + RLTrainerMixin.ACTION_NOT_POSSIBLE_VAL * ( + 1 - possible_actions_mask + ) return q_values.argmax(1) - - def warm_start_components(self): - return ["q_network", "q_network_target", "q_network_optimizer"] diff --git a/reagent/training/cb/__init__.py b/reagent/training/cb/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/training/cb/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/training/cb/base_trainer.py b/reagent/training/cb/base_trainer.py new file mode 100644 index 000000000..8b8d7561b --- /dev/null +++ b/reagent/training/cb/base_trainer.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from abc import ABC, abstractmethod +from typing import final, Optional + +import torch +from reagent.core.types import CBInput +from reagent.evaluation.cb.base_evaluator import BaseOfflineEval +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from torchrec.metrics.metric_module import RecMetricModule + + +logger = logging.getLogger(__name__) + + +class BaseCBTrainerWithEval(ABC, ReAgentLightningModule): + """ + The base class for Contextual Bandit models. A minimal implementation of a specific model requires providing a specific + implementation for only the cb_training_step() method. + The main functionality implemented in this base class is the integration of Offline Evaluation into the training loop. + """ + + scorer: torch.nn.Module + + def __init__( + self, + eval_model_update_critical_weight: Optional[float] = None, + recmetric_module: Optional[RecMetricModule] = None, + log_every_n_steps: int = 0, + *args, + **kwargs, + ): + """ + Agruments: + eval_model_update_critical_weight: Maximum total weight of training data (all data, not just that where + logged and model actions match) after which we update the state of the evaluated model. + """ + super().__init__(*args, **kwargs) + self.eval_module: Optional[BaseOfflineEval] = None + self.eval_model_update_critical_weight = eval_model_update_critical_weight + self.recmetric_module = recmetric_module + self.log_every_n_steps = log_every_n_steps + assert (log_every_n_steps > 0) == ( + recmetric_module is not None + ), "recmetric_module should be provided if and only if log_every_n_steps > 0" + + def attach_eval_module(self, eval_module: BaseOfflineEval): + """ + Attach an Offline Evaluation module. It will kleep track of reward during training and filter training batches. + """ + self.eval_module = eval_module + + @abstractmethod + def cb_training_step(self, batch: CBInput, batch_idx: int, optimizer_idx: int = 0): + """ + This method impements the actual training step. See training_step() for more details + """ + pass + + @final + def training_step(self, batch: CBInput, batch_idx: int, optimizer_idx: int = 0): + """ + This method combines 2 things in order to enable Offline Evaluation of non-stationary CB algorithms: + 1. If offline evaluator is defined, it will pre-process the batch - keep track of the reward and filter out some observations. + 2. The filtered batch will be fed to the cb_training_step() method, which implements the actual training logic. + + DO NOT OVERRIDE THIS METHOD IN SUBCLASSES, IT'S @final. Instead, override cb_training_step(). + """ + eval_module = self.eval_module # assign to local var to keep pyre happy + if eval_module is not None: + # update the model if we've processed enough samples + eval_model_update_critical_weight = self.eval_model_update_critical_weight + if eval_model_update_critical_weight is not None: + if ( + eval_module.sum_weight_since_update_local.item() + >= eval_model_update_critical_weight + ): + logger.info( + f"Updating the evaluated model after {eval_module.sum_weight_since_update_local.item()} observations" + ) + eval_module.update_eval_model(self.scorer) + eval_module.sum_weight_since_update_local.zero_() + eval_module.num_eval_model_updates += 1 + eval_module._aggregate_across_instances() + eval_module.log_metrics(global_step=self.global_step) + with torch.no_grad(): + eval_scores = eval_module.eval_model(batch.context_arm_features) + if batch.arm_presence is not None: + # mask out non-present arms + eval_scores = torch.masked.as_masked_tensor( + eval_scores, batch.arm_presence.bool() + ) + model_actions = ( + # pyre-fixme[16]: `Tensor` has no attribute `get_data`. + torch.argmax(eval_scores, dim=1) + .get_data() + .reshape(-1, 1) + ) + else: + model_actions = torch.argmax(eval_scores, dim=1).reshape(-1, 1) + new_batch = eval_module.ingest_batch(batch, model_actions) + eval_module.sum_weight_since_update_local += ( + batch.weight.sum() if batch.weight is not None else len(batch) + ) + else: + new_batch = batch + return self.cb_training_step(new_batch, batch_idx, optimizer_idx) + + def on_train_epoch_end(self): + eval_module = self.eval_module # assign to local var to keep pyre happy + if eval_module is not None: + if eval_module.sum_weight_since_update_local.item() > 0: + # only aggregate if we've processed new data since last aggregation. + eval_module._aggregate_across_instances() + eval_module.log_metrics(global_step=self.global_step) + + def _log_recmetrics(self, global_step: Optional[int] = None) -> None: + pass diff --git a/reagent/training/cb/deep_represent_linucb_trainer.py b/reagent/training/cb/deep_represent_linucb_trainer.py new file mode 100644 index 000000000..89d56ad6d --- /dev/null +++ b/reagent/training/cb/deep_represent_linucb_trainer.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging + +import torch +from reagent.core.configuration import resolve_defaults +from reagent.core.dataclasses import field +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.models.deep_represent_linucb import DeepRepresentLinearRegressionUCB +from reagent.optimizer.union import Optimizer__Union +from reagent.training.cb.linucb_trainer import _get_chosen_arm_features, LinUCBTrainer + +logger = logging.getLogger(__name__) + + +class DeepRepresentLinUCBTrainer(LinUCBTrainer): + """ + The trainer for a Contextual Bandit model, where deep represent layer serves as feature processor, + and then processed features are fed to LinUCB layer to produce UCB score. + This is extension of LinUCBTrainer. More details refer to docstring of LinUCBTrainer. + + Reference: + - LinUCB : https://arxiv.org/pdf/2012.01780.pdf + - DeepRepresentLinUCB : https://arxiv.org/pdf/1003.0146.pdf + """ + + @resolve_defaults + def __init__( + self, + policy: Policy, + optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + **kwargs, + ): + super().__init__( + policy=policy, + **kwargs, + ) + assert isinstance( + policy.scorer, DeepRepresentLinearRegressionUCB + ), "Trainer requires the policy scorer to be DeepRepresentLinearRegressionUCB" + self.scorer = policy.scorer + self.loss_fn = torch.nn.MSELoss(reduction="mean") + self.optimizer = optimizer + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.scorer.parameters()) + ) + return optimizers + + def cb_training_step(self, batch: CBInput, batch_idx: int, optimizer_idx: int = 0): + self._check_input(batch) + assert batch.action is not None # to satisfy Pyre + assert batch.reward is not None # to satisfy Pyre + x = _get_chosen_arm_features(batch.context_arm_features, batch.action) + + pred_ucb = self.scorer( # noqa + inp=x + ) # this calls scorer.forward() so as to update pred_u, and to grad descent on deep_represent module + loss = self.loss_fn(self.scorer.pred_u, batch.reward.t()) + + # update parameters + self.update_params(self.scorer.mlp_out.detach(), batch.reward, batch.weight) + + return loss diff --git a/reagent/training/cb/disjoint_linucb_trainer.py b/reagent/training/cb/disjoint_linucb_trainer.py new file mode 100644 index 000000000..b6c5ad0ac --- /dev/null +++ b/reagent/training/cb/disjoint_linucb_trainer.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import List, Optional + +import torch +from reagent.core.configuration import resolve_defaults +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.models.disjoint_linucb_predictor import DisjointLinearRegressionUCB +from reagent.training.cb.base_trainer import BaseCBTrainerWithEval + + +logger = logging.getLogger(__name__) + + +class DisjointLinUCBTrainer(BaseCBTrainerWithEval): + """ + The trainer for Disjoint LinUCB Contextual Bandit model. + The model estimates a ridge regression (linear) and only supports dense features. + + Args: + policy: The policy to be trained. Its scorer has to be DisjointLinearRegressionUCB + """ + + @resolve_defaults + def __init__( + self, + policy: Policy, + automatic_optimization: bool = False, # turn off automatic_optimization because we are updating parameters manually + *args, + **kwargs, + ): + super().__init__(automatic_optimization=automatic_optimization, *args, **kwargs) + assert isinstance( + policy.scorer, DisjointLinearRegressionUCB + ), "DisjointLinUCBTrainer requires the policy scorer to be DisjointLinearRegressionUCB" + self.scorer = policy.scorer + self.num_arms = policy.scorer.num_arms + + def configure_optimizers(self): + # no optimizers bcs we update weights manually + return None + + def update_params( + self, + arm_idx: int, + x: torch.Tensor, + y: Optional[torch.Tensor], + weight: Optional[torch.Tensor] = None, + ): + """ + Update A and b for arm with index arm_idx + Args: + arm_idx: the index of the arm to be updated + x: 2D tensor of shape (batch_size, dim) + y: 2D tensor of shape (batch_size, 1) + weight: 2D tensor of shape (batch_size, 1) + """ + # weight is number of observations represented by each entry + if weight is None: + weight = torch.ones_like(torch.tensor(y)) + weight = weight.float() + + self.scorer.cur_num_obs[arm_idx] += torch.tensor(y).shape[0] + + self.scorer.cur_A[arm_idx] += torch.matmul( + x.t(), x * weight + ) # dim (DA*DC, DA*DC) + self.scorer.cur_b[arm_idx] += torch.matmul( + x.t(), y * weight + ).squeeze() # dim (DA*DC,) + + def _check_input(self, batch: List[CBInput]): + # TODO: check later with train_script for batch's dataset info + assert len(batch) == self.num_arms + for sub_batch in batch: + assert sub_batch.context_arm_features.ndim == 2 + assert sub_batch.reward is not None + + # pyre-fixme[14]: `cb_training_step` overrides method defined in `BaseCBTrainerWithEval` + # inconsistently. + def cb_training_step( + self, batch: List[CBInput], batch_idx: int, optimizer_idx: int = 0 + ): + """ + each element in batch is a sub-batch of data for that arm + """ + for arm_idx in range(self.num_arms): + sub_batch = batch[arm_idx] + self.update_params( + arm_idx, + sub_batch.context_arm_features, + sub_batch.reward, + sub_batch.weight, + ) + + def apply_discounting_multiplier(self): + self.scorer.b *= self.scorer.gamma + self.scorer.A *= self.scorer.gamma + + def on_train_epoch_end(self): + super().on_train_epoch_end() + # at the end of the training epoch calculate the coefficients + self.scorer._estimate_coefs() + # apply discount factor here so that next round it's already discounted + # self.A is V in D-LinUCB paper https://arxiv.org/pdf/1909.09146.pdf + # This is a simplified version of D-LinUCB, we calculate A = \sum \gamma^t xx^T + # to discount the old data. See N2441818 for why we do this. + self.apply_discounting_multiplier() diff --git a/reagent/training/cb/linucb_trainer.py b/reagent/training/cb/linucb_trainer.py new file mode 100644 index 000000000..cd06f8483 --- /dev/null +++ b/reagent/training/cb/linucb_trainer.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import logging +from typing import Optional + +import torch +from reagent.core.configuration import resolve_defaults +from reagent.core.types import CBInput +from reagent.gym.policies.policy import Policy +from reagent.models.linear_regression import LinearRegressionUCB +from reagent.training.cb.base_trainer import BaseCBTrainerWithEval + + +logger = logging.getLogger(__name__) + + +def _get_chosen_arm_features( + all_arm_features: torch.Tensor, chosen_arms: torch.Tensor +) -> torch.Tensor: + """ + Pick the features for chosen arms out of a tensor with features of all arms + + Args: + all_arm_features: 3D Tensor of shape (batch_size, num_arms, arm_dim) with + features of all available arms. + chosen_arms: 2D Tensor of shape (batch_size, 1) with dtype long. For each observation + it holds the index of the chosen arm. + Returns: + A 2D Tensor of shape (batch_size, arm_dim) with features of chosen arms. + """ + assert all_arm_features.ndim == 3 + return torch.gather( + all_arm_features, + 1, + chosen_arms.unsqueeze(-1).expand(-1, 1, all_arm_features.shape[2]), + ).squeeze(1) + + +class LinUCBTrainer(BaseCBTrainerWithEval): + """ + The trainer for LinUCB Contextual Bandit model. + The model estimates a ridge regression (linear) and only supports dense features. + We can have different number and identities of arms in each observation. The arms must + have features to represent their semantic meaning. + Instead of keeping track of cumulative values of `A` and `b`, we keep track of the average + (cumulative divided by total weight) values. + Reference: https://arxiv.org/pdf/1003.0146.pdf + + Args: + policy: The policy to be trained. Its scorer has to be LinearRegressionUCB + """ + + @resolve_defaults + def __init__( + self, + policy: Policy, + automatic_optimization: bool = False, # turn off automatic_optimization because we are updating parameters manually, + *args, + **kwargs, + ): + super().__init__(automatic_optimization=automatic_optimization, *args, **kwargs) + assert isinstance( + policy.scorer, LinearRegressionUCB + ), "LinUCBTrainer requires the policy scorer to be LinearRegressionUCB" + self.scorer = policy.scorer + + def configure_optimizers(self): + # no optimizers bcs we update weights manually + return None + + def update_params( + self, x: torch.Tensor, y: torch.Tensor, weight: Optional[torch.Tensor] = None + ): + """ + Args: + x: 2D tensor of shape (batch_size, dim) + y: 2D tensor of shape (batch_size, 1) + weight: 2D tensor of shape (batch_size, 1) + """ + # weight is number of observations represented by each entry + if weight is None: + weight = torch.ones_like(y) + weight = weight.float() + + batch_sum_weight = weight.sum() + self.scorer.cur_num_obs += y.shape[0] + self.scorer.cur_sum_weight += batch_sum_weight + # update average values of A and b using observations from the batch + self.scorer.cur_avg_A = ( + self.scorer.cur_avg_A * (1 - batch_sum_weight / self.scorer.cur_sum_weight) + + torch.matmul(x.t(), x * weight) / self.scorer.cur_sum_weight + ) # dim (DA*DC, DA*DC) + self.scorer.cur_avg_b = ( + self.scorer.cur_avg_b * (1 - batch_sum_weight / self.scorer.cur_sum_weight) + + torch.matmul(x.t(), y * weight).squeeze() / self.scorer.cur_sum_weight + ) # dim (DA*DC,) + + def _check_input(self, batch: CBInput): + assert batch.context_arm_features.ndim == 3 + assert batch.reward is not None + assert batch.action is not None + assert len(batch.action) == len(batch.reward) + assert len(batch.action) == batch.context_arm_features.shape[0] + + def _update_recmetrics( + self, batch: CBInput, batch_idx: int, x: torch.Tensor + ) -> None: + recmetric_module = self.recmetric_module + if (recmetric_module is not None) and (batch_idx % self.log_every_n_steps == 0): + # get point predictions (expected value, uncertainty ignored) + # this could be expensive because the coefficients have to be computed via matrix inversion + preds = self.scorer(x, ucb_alpha=0) + weight = batch.weight + if weight is None: + assert batch.reward is not None + weight = torch.ones_like(batch.reward) + recmetric_module.update( + { + "prediction": preds, + "label": batch.reward, + "weight": weight, + } + ) + self._log_recmetrics(global_step=self.global_step) + + def cb_training_step(self, batch: CBInput, batch_idx: int, optimizer_idx: int = 0): + self._check_input(batch) + assert batch.action is not None # to satisfy Pyre + x = _get_chosen_arm_features(batch.context_arm_features, batch.action) + + # update parameters + assert batch.reward is not None # to satisfy Pyre + self.update_params(x, batch.reward, batch.weight) + + self._update_recmetrics(batch, batch_idx, x) + + def apply_discounting_multiplier(self): + self.scorer.sum_weight *= self.scorer.gamma + + def on_train_epoch_end(self): + super().on_train_epoch_end() + # at the end of the training epoch calculate the coefficients + self.scorer._calculate_coefs() + # apply discounting factor to the total weight. the average `A` and `b` valuse remain the same + self.apply_discounting_multiplier() diff --git a/reagent/training/cem_trainer.py b/reagent/training/cem_trainer.py index 4036e92ad..e87a2f1d9 100644 --- a/reagent/training/cem_trainer.py +++ b/reagent/training/cem_trainer.py @@ -11,10 +11,11 @@ import logging from typing import List -import reagent.types as rlt +import reagent.core.types as rlt +import torch.nn as nn +from reagent.core.parameters import CEMTrainerParameters from reagent.models.cem_planner import CEMPlannerNetwork -from reagent.parameters import CEMTrainerParameters -from reagent.training.rl_trainer_pytorch import RLTrainer +from reagent.training.reagent_lightning_module import ReAgentLightningModule from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer @@ -29,23 +30,20 @@ def print_mdnrnn_losses(minibatch, model_index, losses) -> None: ) -class CEMTrainer(RLTrainer): +class CEMTrainer(ReAgentLightningModule): def __init__( self, cem_planner_network: CEMPlannerNetwork, world_model_trainers: List[MDNRNNTrainer], parameters: CEMTrainerParameters, - use_gpu: bool = False, ) -> None: - super().__init__(parameters.rl, use_gpu=use_gpu) + super().__init__() self.cem_planner_network = cem_planner_network - self.world_model_trainers = world_model_trainers - self.minibatch_size = parameters.mdnrnn.minibatch_size + self.world_model_trainers = nn.ModuleList(world_model_trainers) - def train(self, training_batch: rlt.MemoryNetworkInput) -> None: - for i, trainer in enumerate(self.world_model_trainers): - losses = trainer.train(training_batch) - # TODO: report losses instead of printing them - # print_mdnrnn_losses(self.minibatch, i, losses) + def configure_optimizers(self): + return [o for t in self.world_model_trainers for o in t.configure_optimizers()] - self.minibatch += 1 + def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int): + for t in self.world_model_trainers: + yield from t.train_step_gen(training_batch, batch_idx) diff --git a/reagent/training/cfeval/__init__.py b/reagent/training/cfeval/__init__.py new file mode 100644 index 000000000..a80964164 --- /dev/null +++ b/reagent/training/cfeval/__init__.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from .bandit_reward_network_trainer import BanditRewardNetTrainer + +__all__ = [ + "BanditRewardNetTrainer", +] diff --git a/reagent/training/cfeval/bandit_reward_network_trainer.py b/reagent/training/cfeval/bandit_reward_network_trainer.py new file mode 100644 index 000000000..0f3007776 --- /dev/null +++ b/reagent/training/cfeval/bandit_reward_network_trainer.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging +from typing import Optional + +import numpy as np +import reagent.core.types as rlt +import torch +from reagent.core.dataclasses import field +from reagent.models.base import ModelBase +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.reward_network_trainer import _get_loss_function, LossFunction + +logger = logging.getLogger(__name__) + + +class BanditRewardNetTrainer(ReAgentLightningModule): + def __init__( + self, + reward_net: ModelBase, + optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + loss_type: LossFunction = LossFunction.MSE, + reward_ignore_threshold: Optional[float] = None, + weighted_by_inverse_propensity: bool = False, + ) -> None: + super().__init__() + self.reward_net = reward_net + self.optimizer = optimizer + self.loss_type = loss_type + self.reward_ignore_threshold = reward_ignore_threshold + self.weighted_by_inverse_propensity = weighted_by_inverse_propensity + self.loss_fn = _get_loss_function( + loss_type, reward_ignore_threshold, weighted_by_inverse_propensity + ) + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.reward_net.parameters()) + ) + return optimizers + + def _get_sample_weight(self, batch: rlt.BanditRewardModelInput): + weight = None + if self.weighted_by_inverse_propensity: + assert batch.action_prob is not None + # pyre-fixme[58]: `/` is not supported for operand types `float` and + # `Tensor`. + weight = 1.0 / batch.action_prob + return weight + + def _get_predicted_reward(self, batch: rlt.BanditRewardModelInput): + model_rewards_all_actions = self.reward_net(batch.state) + logged_action_idxs = torch.argmax(batch.action, dim=1, keepdim=True) + predicted_reward = model_rewards_all_actions.gather(1, logged_action_idxs) + return predicted_reward + + @torch.no_grad() + def _compute_unweighted_loss( + self, + predicted_reward: torch.Tensor, + target_reward: torch.Tensor, + batch: rlt.BanditRewardModelInput, + ): + return self.loss_fn( + predicted_reward, + target_reward, + weight=torch.ones_like(predicted_reward), + batch=batch, + ) + + def train_step_gen( + self, training_batch: rlt.BanditRewardModelInput, batch_idx: int + ): + weight = self._get_sample_weight(training_batch) + target_reward = training_batch.reward + predicted_reward = self._get_predicted_reward(training_batch) + + assert ( + predicted_reward.shape == target_reward.shape + and len(target_reward.shape) == 2 + and target_reward.shape[1] == 1 + ) + loss = self.loss_fn(predicted_reward, target_reward, weight, training_batch) + + detached_loss = loss.detach().cpu() + self.reporter.log(loss=detached_loss) + + if weight is not None: + unweighted_loss = self._compute_unweighted_loss( + predicted_reward, target_reward, training_batch + ) + self.reporter.log(unweighted_loss=unweighted_loss) + + if self.all_batches_processed % 10 == 0: + logger.info( + f"{self.all_batches_processed}-th batch: " + f"{self.loss_type}={detached_loss.item()}" + ) + + yield loss + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.BanditRewardModelInput, batch_idx: int): + if self._training_batch_type and isinstance(batch, dict): + batch = self._training_batch_type.from_dict(batch) + + reward = batch.reward + self.reporter.log(eval_rewards=reward.flatten().detach().cpu()) + + pred_reward = self._get_predicted_reward(batch) + self.reporter.log(eval_pred_rewards=pred_reward.flatten().detach().cpu()) + + weight = self._get_sample_weight(batch) + loss = self.loss_fn(pred_reward, reward, weight, batch) + + detached_loss = loss.detach().cpu() + self.reporter.log(eval_loss=detached_loss) + + if weight is not None: + unweighted_loss = self._compute_unweighted_loss(pred_reward, reward, batch) + self.reporter.log(eval_unweighted_loss=unweighted_loss) + + return detached_loss.item() + + def validation_epoch_end(self, outputs): + self.reporter.update_best_model(np.mean(outputs), self.reward_net) diff --git a/reagent/training/cfeval/bayes_by_backprop_trainer.py b/reagent/training/cfeval/bayes_by_backprop_trainer.py new file mode 100644 index 000000000..05654bad7 --- /dev/null +++ b/reagent/training/cfeval/bayes_by_backprop_trainer.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import reagent.core.types as rlt +import torch +from reagent.training.cfeval.bandit_reward_network_trainer import BanditRewardNetTrainer + +logger = logging.getLogger(__name__) + + +class BayesByBackpropTrainer(BanditRewardNetTrainer): + def train_step_gen( + self, training_batch: rlt.BanditRewardModelInput, batch_idx: int + ): + weight = self._get_sample_weight(training_batch) + + # pyre-ignore seems to be pyre bug for pytorch + loss = self.reward_net.sample_elbo( + torch.cat([training_batch.action, training_batch.state.float_features], 1), + training_batch.reward, + 1, + ) + # use (action, reward) to indicate (input,target) + + detached_loss = loss.detach().cpu() + self.reporter.log(loss=detached_loss) + + if weight is not None: + raise NotImplementedError # TODO for integration in to RL framework + # unweighted_loss = self._compute_unweighted_loss( + # predicted_reward, target_reward, training_batch + # ) + # self.reporter.log(unweighted_loss=unweighted_loss) + + self.all_batches_processed += 1 + if self.all_batches_processed % 100 == 0: + logger.info( + f"{self.all_batches_processed}-th batch: " + f"Loss={detached_loss.item()}" + ) + + yield loss + + def validation_step(self, batch: rlt.BanditRewardModelInput, batch_idx: int): + if self._training_batch_type and isinstance(batch, dict): + batch = self._training_batch_type.from_dict(batch) + + weight = self._get_sample_weight(batch) + # pyre-ignore + loss = self.reward_net.sample_elbo( + torch.cat([batch.action, batch.state.float_features], 1), + batch.reward, + 1, + ) + + detached_loss = loss.detach().cpu() + self.reporter.log(eval_loss=detached_loss) + + if weight is not None: + raise NotImplementedError # TODO for integration in to RL framework + # unweighted_loss = self._compute_unweighted_loss(pred_reward, reward, batch) + # self.reporter.log(eval_unweighted_loss=unweighted_loss) + + return detached_loss.item() diff --git a/reagent/training/discrete_crr_trainer.py b/reagent/training/discrete_crr_trainer.py new file mode 100644 index 000000000..78b4b1226 --- /dev/null +++ b/reagent/training/discrete_crr_trainer.py @@ -0,0 +1,439 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +# Note: this files is modeled after td3_trainer.py + +import logging +from typing import List, Tuple + +import reagent.core.types as rlt +import torch +import torch.nn.functional as F +from reagent.core.configuration import resolve_defaults +from reagent.core.dataclasses import field +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning +from torch import distributions as pyd + + +logger = logging.getLogger(__name__) + + +class DiscreteCRRTrainer(DQNTrainerBaseLightning): + """ + Critic Regularized Regression (CRR) algorithm trainer + as described in https://arxiv.org/abs/2006.15134 + """ + + @resolve_defaults + def __init__( + self, + actor_network, + actor_network_target, + q1_network, + q1_network_target, + reward_network, + q2_network=None, + q2_network_target=None, + q_network_cpe=None, + q_network_cpe_target=None, + metrics_to_score=None, + evaluation: EvaluationParameters = field( # noqa: B008 + default_factory=EvaluationParameters + ), + # Start CRRTrainerParameters. All parameters above should be + # in the blocklist for CRRTrainerParameters in parameters.py + rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 + double_q_learning: bool = True, + q_network_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + actor_network_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + use_target_actor: bool = False, + actions: List[str] = field(default_factory=list), # noqa: B008 + delayed_policy_update: int = 1, + beta: float = 1.0, + entropy_coeff: float = 0.0, + clip_limit: float = 10.0, + max_weight: float = 20.0, + ) -> None: + """ + Args: + actor_network: states -> actions, trained to maximize value + actor_network_target: copy of actor network for training stability + q1_network: states -> q-value for all actions + q1_network_target: copy of q-network for training stability + q2_network (optional): double q-learning to stabilize training + from overestimation bias. The presence of q2_network is specified + in discrete_crr.py using the config parameter double_q_learning + q2_network_target (optional): copy of q-network for training stability + rl (optional): an instance of the RLParameter class, which + defines relevant hyperparameters + q_network_optimizer (optional): the optimizer class and + optimizer hyperparameters for the q network(s) optimizer + actor_network_optimizer (optional): see q_network_optimizer + use_target_actor (optional): specifies whether target actor is used + delayed_policy_update (optional): the ratio of q network updates + to target and policy network updates + beta: coefficient for KL-divergence policy constaint regularization of CRR + see eq(5) in https://arxiv.org/pdf/2006.15134.pdf. With large beta, the output + policy of CRR can not leaves too far away from the logged policy + + entropy_coeff: coefficient for entropy regularization + clip_limit: threshold for importance sampling when compute entropy + regularization using offline samples + max_weight: the maximum possible action weight in the actor loss + + Explaination of entropy regularization: + Entropy regularization punishes deterministic policy and encourages + "unifom" policy. Entropy regularized MDP can be viewed as add the term + (-entropy_coeff * pi_ratio * log_pi_b) to each reward. For detailed + formulation of entropy regularized please see eq.(9) & eq.(10) in + https://arxiv.org/pdf/2007.06558.pdf + """ + super().__init__( + rl, + metrics_to_score=metrics_to_score, + actions=actions, + evaluation_parameters=evaluation, + ) + self._actions = actions + assert self._actions is not None, "Discrete-action CRR needs action names" + + self.rl_parameters = rl + self.double_q_learning = double_q_learning + + self.use_target_actor = use_target_actor + + self.q1_network = q1_network + self.q1_network_target = q1_network_target + self.q_network_optimizer = q_network_optimizer + + self.q2_network = q2_network + if self.q2_network is not None: + assert ( + q2_network_target is not None + ), "q2_network provided without a target network" + self.q2_network_target = q2_network_target + + self.actor_network = actor_network + self.actor_network_target = actor_network_target + self.actor_network_optimizer = actor_network_optimizer + + self.delayed_policy_update = delayed_policy_update + + self._initialize_cpe( + reward_network, + q_network_cpe, + q_network_cpe_target, + optimizer=q_network_optimizer, + ) + self.beta = beta + self.entropy_coeff = entropy_coeff + self.clip_limit = clip_limit + self.max_weight = max_weight + + @property + def q_network(self): + return self.q1_network + + @torch.no_grad() + def get_detached_model_outputs(self, state) -> Tuple[torch.Tensor, None]: + # This function is only used in evaluation_data_page.py, in create_from_tensors_dqn(), + # in order to compute model propensities. The definition of this function in + # dqn_trainer.py returns two values, and so we also return two values here, for + # consistency. + action_scores = self.actor_network(state).action + return action_scores, None + + def configure_optimizers(self): + optimizers = [] + target_params = list(self.q1_network_target.parameters()) + source_params = list(self.q1_network.parameters()) + + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q1_network.parameters() + ) + ) + if self.q2_network: + target_params += list(self.q2_network_target.parameters()) + source_params += list(self.q2_network.parameters()) + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q2_network.parameters() + ) + ) + + target_params += list(self.actor_network_target.parameters()) + source_params += list(self.actor_network.parameters()) + optimizers.append( + self.actor_network_optimizer.make_optimizer_scheduler( + self.actor_network.parameters() + ) + ) + + if self.calc_cpe_in_training: + ( + cpe_target_params, + cpe_source_params, + cpe_optimizers, + ) = self._configure_cpe_optimizers() + target_params += cpe_target_params + source_params += cpe_source_params + optimizers += cpe_optimizers + + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) + + return optimizers + + def compute_target_q_values(self, next_state, rewards, not_terminal, next_q_values): + if self.use_target_actor: + next_state_actor_output = self.actor_network_target(next_state).action + else: + next_state_actor_output = self.actor_network(next_state).action + + next_dist = pyd.Categorical(logits=next_state_actor_output) + next_V = (next_q_values * next_dist.probs).sum(dim=1, keepdim=True) + if self.q2_network is not None: + next_q2_values = self.q2_network_target(next_state) + next_V2 = (next_q2_values * next_dist.probs).sum(dim=1, keepdim=True) + next_V = torch.min(next_V, next_V2) + + target_q_values = rewards + self.gamma * next_V * not_terminal.float() + return target_q_values + + def compute_td_loss(self, q_network, state, action, target_q_values): + all_q_values = q_network(state) + q_values = (all_q_values * action).sum(dim=1, keepdim=True) + q_loss = F.mse_loss(q_values, target_q_values) + return q_loss + + def compute_actor_loss( + self, batch_idx, action, logged_action_probs, all_q_values, all_action_scores + ): + # Only update actor network after a fixed number of Q updates + if batch_idx % self.delayed_policy_update != 0: + # Yielding None prevents the actor network from updating + actor_loss = None + return (actor_loss, actor_loss) + + # dist is the distribution of actions derived from the actor's outputs (logits) + dist = pyd.Categorical(logits=all_action_scores) + # Note: D = dist.probs is equivalent to: + # e_x = torch.exp(actor_actions) + # D = e_x / e_x.sum(dim=1, keepdim=True) + # That is, dist gives a softmax distribution over actor's outputs + + # values is the vector of state values in this batch + values = (all_q_values * dist.probs).sum(dim=1, keepdim=True) + + advantages = all_q_values - values + # Note: the above statement subtracts the "values" column vector from + # every column of the all_q_values matrix, giving us the advantages + # of every action in the present state + + weight = torch.clamp( + ((1 / self.beta) * (advantages * action).sum(dim=1, keepdim=True)).exp(), + 0, + self.max_weight, + ) + # Remember: training_batch.action is in the one-hot format + logged_action_idxs = torch.argmax(action, dim=1, keepdim=True) + + # Note: action space is assumed to be discrete with actions + # belonging to the set {0, 1, ..., action_dim-1}. Therefore, + # advantages.gather(1, logged_action_idxs) will select, for each data point + # (row i of the Advantage matrix "advantages"), the element with index + # action.float_features[i] + + # Note: dist.logits already gives log(p), which can be verified by + # comparing dist.probs and dist.logits. + # https://pytorch.org/docs/master/distributions.html#multinomial + # states: logits (Tensor) – event log probabilities + + # log_pi_b is the log of the probability assigned by the + # actor (abbreviated as pi) to the actions of the behavioral (b) policy + log_pi_b = dist.log_prob(logged_action_idxs.squeeze(1)).unsqueeze(1) + + # entropy regularization + pi_t = (dist.probs * action).sum(dim=1, keepdim=True) + + if self.entropy_coeff > 0: + pi_b = logged_action_probs.view(pi_t.shape) + assert torch.min(pi_b) > 0, "Logged action probability <= 0" + pi_ratio = torch.clip(pi_t / pi_b, min=1e-4, max=self.clip_limit) + entropy = (pi_ratio * log_pi_b).mean() + else: + # dummy value + entropy = 0 + + # Note: the CRR loss for each datapoint (and the magnitude of the corresponding + # parameter update) is proportional to log_pi_b * weight. Therefore, as mentioned + # at the top of Section 3.2, the actor on the one hand has incentive to assign + # larger probabilities to the actions observed in the dataset (so as to reduce + # the magnitude of log_pi_b), but on the other hand it gives preference to doing + # this on datapoints where weight is large (i.e., those points on which the + # Q-value of the observed action is large). + actor_loss_without_reg = (-log_pi_b * weight.detach()).mean() + actor_loss = (-log_pi_b * weight.detach()).mean() + self.entropy_coeff * entropy + return actor_loss_without_reg, actor_loss + + def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int): + """ + IMPORTANT: the input action here is preprocessed according to the + training_batch type, which in this case is DiscreteDqnInput. Hence, + the preprocessor in the DiscreteDqnInputMaker class in the + trainer_preprocessor.py is used, which converts acion taken to a + one-hot representation. + """ + self._check_input(training_batch) + + state = training_batch.state + action = training_batch.action + next_state = training_batch.next_state + not_terminal = training_batch.not_terminal + rewards = self.boost_rewards(training_batch.reward, training_batch.action) + + # Remember: training_batch.action is in the one-hot format + logged_action_idxs = torch.argmax(action, dim=1, keepdim=True) + discount_tensor = torch.full_like(rewards, self.gamma) + + next_q_values = self.q1_network_target(next_state) + target_q_values = self.compute_target_q_values( + next_state, rewards, not_terminal, next_q_values + ) + q1_loss = self.compute_td_loss(self.q1_network, state, action, target_q_values) + + # Show td_loss on the progress bar and in tensorboard graphs: + self.log( + "td_loss", q1_loss, prog_bar=True, batch_size=training_batch.batch_size() + ) + yield q1_loss + + if self.q2_network: + q2_loss = self.compute_td_loss( + self.q2_network, state, action, target_q_values + ) + yield q2_loss + + all_q_values = self.q1_network(state) # Q-values of all actions + + # Note: action_dim (the length of each row of the actor_action + # matrix obtained below) is assumed to be > 1. + all_action_scores = self.actor_network(state).action + logged_action_probs = training_batch.extras.action_probability + + actor_loss_without_reg, actor_loss = self.compute_actor_loss( + batch_idx, action, logged_action_probs, all_q_values, all_action_scores + ) + # self.reporter.log( + # actor_loss=actor_loss, + # actor_q1_value=actor_q1_values, + # ) + + # Show actor_loss on the progress bar and also in Tensorboard graphs + self.log( + "actor_loss_without_reg", + actor_loss_without_reg, + prog_bar=True, + batch_size=training_batch.batch_size(), + ) + self.log( + "actor_loss", + actor_loss, + prog_bar=True, + batch_size=training_batch.batch_size(), + ) + yield actor_loss + + yield from self._calculate_cpes( + training_batch, + state, + next_state, + all_action_scores, + next_q_values.detach(), + logged_action_idxs, + discount_tensor, + not_terminal.float(), + ) + + # TODO: rename underlying function to get_max_possible_values_and_idxs + model_action_idxs = self.get_max_q_values( + all_action_scores, + training_batch.possible_actions_mask if self.maxq_learning else action, + )[1] + + self.reporter.log( + logged_actions=logged_action_idxs, + td_loss=q1_loss, + logged_propensities=training_batch.extras.action_probability, + logged_rewards=rewards, + model_values=all_action_scores, + model_action_idxs=model_action_idxs, + ) + + # Use the soft update rule to update the target networks. + # Note: this yield has to be the last one, since SoftUpdate is the last + # optimizer added in the configure_optimizers() function. + result = self.soft_update_result() + yield result + + def validation_step(self, batch, batch_idx): + # As explained in the comments to the validation_step function in + # pytorch_lightning/core/lightning.py, this function operates on a + # single batch of data from the validation set. For example: + # val_outs = [] + # for val_batch in val_data: + # out = validation_step(val_batch) + # val_outs.append(out) + # validation_epoch_end(val_outs) + # Note: the relevant validation_epoch_end() function is defined in dqn_trainer_base.py + + # RETURN ARGS: + # The super() call at the end of this function calls the function with the same name + # in dqn_trainer_base.py, which returns a EvaluationDataPage for data in that batch. + # In other words, the validation_epoch_end() function will take a list of validation + # EvaluationDataPages. + + if isinstance(batch, dict): + batch = rlt.DiscreteDqnInput.from_dict(batch) + + # validation data + state = batch.state + action = batch.action + next_state = batch.next_state + not_terminal = batch.not_terminal + rewards = self.boost_rewards(batch.reward, action) + + # intermediate values + next_q_values = self.q1_network_target(next_state) + target_q_values = self.compute_target_q_values( + next_state, rewards, not_terminal, next_q_values + ) + all_q_values = self.q1_network(state) + all_action_scores = self.actor_network(state).action + logged_action_probs = batch.extras.action_probability + + # loss to log + actor_loss_without_reg, actor_loss = self.compute_actor_loss( + batch_idx, action, logged_action_probs, all_q_values, all_action_scores + ) + td_loss = self.compute_td_loss(self.q1_network, state, action, target_q_values) + + self.log( + "eval_actor_loss_without_reg", + actor_loss_without_reg, + batch_size=batch.batch_size(), + ) + self.log("eval_actor_loss", actor_loss, batch_size=batch.batch_size()) + self.log("eval_td_loss", td_loss, batch_size=batch.batch_size()) + + return super().validation_step(batch, batch_idx) diff --git a/reagent/training/dqn_trainer.py b/reagent/training/dqn_trainer.py index ca5113d99..06c815158 100644 --- a/reagent/training/dqn_trainer.py +++ b/reagent/training/dqn_trainer.py @@ -1,19 +1,20 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - +import logging from typing import List, Optional, Tuple -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import dataclass, field -from reagent.core.tracker import observable -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import EvaluationParameters, RLParameters -from reagent.training.dqn_trainer_base import DQNTrainerBase +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning from reagent.training.imitator_training import get_valid_actions_from_imitator -from reagent.training.training_data_page import TrainingDataPage + + +logger = logging.getLogger(__name__) @dataclass(frozen=True) @@ -22,18 +23,29 @@ class BCQConfig: drop_threshold: float = 0.1 -@observable( - td_loss=torch.Tensor, - reward_loss=torch.Tensor, - logged_actions=torch.Tensor, - logged_propensities=torch.Tensor, - logged_rewards=torch.Tensor, - model_propensities=torch.Tensor, - model_rewards=torch.Tensor, - model_values=torch.Tensor, - model_action_idxs=torch.Tensor, -) -class DQNTrainer(DQNTrainerBase): +class DQNTrainer(DQNTrainerBaseLightning): + """A trainer for the DQN algorithm and its variants. + + Configures optimizers, builds the losses and train functions for the + Q-learning based algorithm variants. Supports MaxQ and SARSA style + TD-learning under both, standard and batch-constrained q-learning. + During training, updates CPE metrics estimators. + + Attributes: + double_q_learning: a boolean flag whether to use double-q learning. + minibatch_size: an int number of samples per minibatch. + minibatches_per_step: an int number of minibatch updates per step. + q_network: a network object mapping states to q-values of all actions. + q_network_target: a copy of q-network for training stability used in + estimating q-values targets. + q_network_optimizer: an optimizer object for training q-network. + bcq: a config file for batch-constrained q-learning (BCQ). + bcq_imitator: if using batch-constrained q-learning, the behavior + policy used for BCQ training. + all_action_scores: a torch tensor containing q-network + predictions from the current states. + """ + @resolve_defaults def __init__( self, @@ -43,9 +55,12 @@ def __init__( q_network_cpe=None, q_network_cpe_target=None, metrics_to_score=None, + evaluation: EvaluationParameters = field( # noqa: B008 + default_factory=EvaluationParameters + ), imitator=None, - loss_reporter=None, - use_gpu: bool = False, + # Start DQNTrainerParameters. All parameters above should be + # in the blocklist for DQNTrainerParameters in parameters.py actions: List[str] = field(default_factory=list), # noqa: B008 rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 double_q_learning: bool = True, @@ -55,17 +70,30 @@ def __init__( optimizer: Optimizer__Union = field( # noqa: B008 default_factory=Optimizer__Union.default ), - evaluation: EvaluationParameters = field( # noqa: B008 - default_factory=EvaluationParameters - ), ) -> None: + """ + Args: + q_network: states -> q-value for each action + q_network_target: copy of q-network for training stability + reward_network: states -> reward for each action + q_network_cpe: states -> cpe q-values for each action + q_network_cpe_target: copy of q_network_cpe for training stability + metrics_to_score: a list of string reward metrics names. + imitator (optional): The behavior policy, used for BCQ training + actions: list of action names + rl: RLParameters + double_q_learning: boolean flag to use double-q learning + bcq: a config file for batch-constrained q-learning, defaults to normal + minibatch_size: samples per minibatch + minibatches_per_step: minibatch updates per step + optimizer: q-network optimizer + evaluation: evaluation params, primarily whether to use CPE in eval or not + """ super().__init__( rl, - use_gpu=use_gpu, metrics_to_score=metrics_to_score, actions=actions, evaluation_parameters=evaluation, - loss_reporter=loss_reporter, ) assert self._actions is not None, "Discrete-action DQN needs action names" self.double_q_learning = double_q_learning @@ -74,22 +102,12 @@ def __init__( self.q_network = q_network self.q_network_target = q_network_target - self.q_network_optimizer = optimizer.make_optimizer(q_network.parameters()) + self.q_network_optimizer = optimizer self._initialize_cpe( reward_network, q_network_cpe, q_network_cpe_target, optimizer=optimizer ) - # pyre-fixme[6]: Expected `Sized` for 1st param but got `Optional[List[str]]`. - self.reward_boosts = torch.zeros([1, len(self._actions)], device=self.device) - if rl.reward_boost is not None: - # pyre-fixme[16]: `Optional` has no attribute `keys`. - for k in rl.reward_boost.keys(): - # pyre-fixme[16]: `Optional` has no attribute `index`. - i = self._actions.index(k) - # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. - self.reward_boosts[0, i] = rl.reward_boost[k] - # Batch constrained q-learning self.bcq = bcq is not None if self.bcq: @@ -97,105 +115,159 @@ def __init__( self.bcq_drop_threshold = bcq.drop_threshold self.bcq_imitator = imitator - def warm_start_components(self): - components = ["q_network", "q_network_target", "q_network_optimizer"] - if self.reward_network is not None: - components += [ - "reward_network", - "reward_network_optimizer", - "q_network_cpe", - "q_network_cpe_target", - "q_network_cpe_optimizer", - ] - return components + def configure_optimizers(self): + """Initializes networks optimizers. + + Initializes and returns the reward, q_network and the cpe networks + optimizers. Also initializes soft updates of target networks from + corresponding source networks. + + Returns: + A list of initialized optimizer objects. + """ + optimizers = [] + target_params = list(self.q_network_target.parameters()) + source_params = list(self.q_network.parameters()) + + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q_network.parameters() + ) + ) + + if self.calc_cpe_in_training: + ( + cpe_target_params, + cpe_source_params, + cpe_optimizers, + ) = self._configure_cpe_optimizers() + target_params += cpe_target_params + source_params += cpe_source_params + optimizers += cpe_optimizers + + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) + + return optimizers @torch.no_grad() - def get_detached_q_values( + def get_detached_model_outputs( self, state ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: - """ Gets the q values from the model and target networks """ + """Gets the q values from the model and target networks""" q_values = self.q_network(state) q_values_target = self.q_network_target(state) return q_values, q_values_target - @torch.no_grad() - def train(self, training_batch: rlt.DiscreteDqnInput): - if isinstance(training_batch, TrainingDataPage): - training_batch = training_batch.as_discrete_maxq_training_batch() - assert isinstance(training_batch, rlt.DiscreteDqnInput) - boosted_rewards = self.boost_rewards( - training_batch.reward, training_batch.action - ) - - self.minibatch += 1 - rewards = boosted_rewards - discount_tensor = torch.full_like(rewards, self.gamma) - not_done_mask = training_batch.not_terminal.float() - assert not_done_mask.dim() == 2 - + def compute_discount_tensor( + self, batch: rlt.DiscreteDqnInput, boosted_rewards: torch.Tensor + ): + """Computes a discount tensor to be used in td-error estimation.""" + discount_tensor = torch.full_like(boosted_rewards, self.gamma) if self.use_seq_num_diff_as_time_diff: assert self.multi_steps is None - discount_tensor = torch.pow(self.gamma, training_batch.time_diff.float()) + discount_tensor = torch.pow(self.gamma, batch.time_diff.float()) if self.multi_steps is not None: - assert training_batch.step is not None - # pyre-fixme[16]: `Optional` has no attribute `float`. - discount_tensor = torch.pow(self.gamma, training_batch.step.float()) + assert batch.step is not None + discount_tensor = torch.pow(self.gamma, batch.step.float()) + return discount_tensor + + def compute_td_loss( + self, + batch: rlt.DiscreteDqnInput, + boosted_rewards: torch.Tensor, + discount_tensor: torch.Tensor, + ): + """Computes q_network td loss. + + Computes a temporal difference loss for training the q-network based + on a corresponding Bellman update. Supports maxQ and SARSA + style updates. - all_next_q_values, all_next_q_values_target = self.get_detached_q_values( - training_batch.next_state + Args: + batch: a training batch object. + boosted_rewards: a (batch_size, 1) shaped torch tensor with + boosted rewards values. + discount_tensor: a (batch_size, 1) torch tensor containing the + discount to apply. + + Returns: + A temporal difference loss object for training the q-network. + """ + not_done_mask = batch.not_terminal.float() + all_next_q_values, all_next_q_values_target = self.get_detached_model_outputs( + batch.next_state ) if self.maxq_learning: # Compute max a' Q(s', a') over all possible actions using target network - possible_next_actions_mask = ( - training_batch.possible_next_actions_mask.float() - ) + possible_next_actions_mask = batch.possible_next_actions_mask.float() if self.bcq: action_on_policy = get_valid_actions_from_imitator( self.bcq_imitator, - training_batch.next_state, + batch.next_state, self.bcq_drop_threshold, ) possible_next_actions_mask *= action_on_policy next_q_values, max_q_action_idxs = self.get_max_q_values_with_target( - all_next_q_values, all_next_q_values_target, possible_next_actions_mask + all_next_q_values, + all_next_q_values_target, + possible_next_actions_mask, ) else: # SARSA next_q_values, max_q_action_idxs = self.get_max_q_values_with_target( - all_next_q_values, all_next_q_values_target, training_batch.next_action + all_next_q_values, + all_next_q_values_target, + batch.next_action, ) filtered_next_q_vals = next_q_values * not_done_mask - target_q_values = rewards + (discount_tensor * filtered_next_q_vals) + target_q_values = boosted_rewards + (discount_tensor * filtered_next_q_vals) - with torch.enable_grad(): - # Get Q-value of action taken - all_q_values = self.q_network(training_batch.state) - # pyre-fixme[16]: `DQNTrainer` has no attribute `all_action_scores`. - self.all_action_scores = all_q_values.detach() - q_values = torch.sum(all_q_values * training_batch.action, 1, keepdim=True) + # Get Q-value of action taken + all_q_values = self.q_network(batch.state) + # pyre-fixme[16]: `DQNTrainer` has no attribute `all_action_scores`. + self.all_action_scores = all_q_values.detach() + q_values = torch.sum(all_q_values * batch.action, 1, keepdim=True) + td_loss = self.q_network_loss(q_values, target_q_values.detach()) + return td_loss - loss = self.q_network_loss(q_values, target_q_values) - # pyre-fixme[16]: `DQNTrainer` has no attribute `loss`. - self.loss = loss.detach() + def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int): + """Builds loss functions for updating q- and reward networks. - loss.backward() - self._maybe_run_optimizer( - self.q_network_optimizer, self.minibatches_per_step - ) + Args: + training_batch: a training batch data object. + batch_idx: an integer batch index. - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network, self.q_network_target, self.tau, self.minibatches_per_step - ) + Yields: + If a calc_cpe_in_training flag is True, yields a tuple + (td_loss, reward_loss, cpe_metric_loss, soft_update_loss) + for updating q_network, reward_network, q_network_cpe and target + networks respectively. If calc_cpe_in_training is False, yields + a tuple (td_loss, soft_update_loss) for updating q_network and + q_network_target. + """ + # TODO: calls to _maybe_run_optimizer removed, should be replaced with Trainer parameter + self._check_input(training_batch) + + rewards = self.boost_rewards(training_batch.reward, training_batch.action) + not_done_mask = training_batch.not_terminal.float() + + discount_tensor = self.compute_discount_tensor(training_batch, rewards) + td_loss = self.compute_td_loss(training_batch, rewards, discount_tensor) + yield td_loss + td_loss = td_loss.detach() # Get Q-values of next states, used in computing cpe all_next_action_scores = self.q_network(training_batch.next_state).detach() - logged_action_idxs = torch.argmax(training_batch.action, dim=1, keepdim=True) - reward_loss, model_rewards, model_propensities = self._calculate_cpes( + + yield from self._calculate_cpes( training_batch, training_batch.state, training_batch.next_state, @@ -215,34 +287,90 @@ def train(self, training_batch: rlt.DiscreteDqnInput): ) possible_actions_mask *= action_on_policy + # Do we ever use model_action_idxs computed below? model_action_idxs = self.get_max_q_values( self.all_action_scores, possible_actions_mask if self.maxq_learning else training_batch.action, )[1] - # pyre-fixme[16]: `DQNTrainer` has no attribute `notify_observers`. - self.notify_observers( - td_loss=self.loss, - reward_loss=reward_loss, - logged_actions=logged_action_idxs, - logged_propensities=training_batch.extras.action_probability, - logged_rewards=rewards, - model_propensities=model_propensities, - model_rewards=model_rewards, - model_values=self.all_action_scores, - model_action_idxs=model_action_idxs, + self._log_dqn( + td_loss, logged_action_idxs, training_batch, rewards, model_action_idxs ) - self.loss_reporter.report( - td_loss=self.loss, - reward_loss=reward_loss, + # Use the soft update rule to update target network + yield self.soft_update_result() + + def _log_dqn( + self, td_loss, logged_action_idxs, training_batch, rewards, model_action_idxs + ): + """Logs training update results.""" + self.reporter.log( + td_loss=td_loss, logged_actions=logged_action_idxs, logged_propensities=training_batch.extras.action_probability, logged_rewards=rewards, logged_values=None, # Compute at end of each epoch for CPE - model_propensities=model_propensities, - model_rewards=model_rewards, model_values=self.all_action_scores, model_values_on_logged_actions=None, # Compute at end of each epoch for CPE model_action_idxs=model_action_idxs, ) + model_values = self._dense_to_action_dict(self.all_action_scores.mean(dim=0)) + action_histogram = self._dense_to_action_dict( + training_batch.action.float().mean(dim=0) + ) + if training_batch.extras.action_probability is None: + logged_propensities = None + else: + logged_propensities = training_batch.extras.action_probability.mean(dim=0) + model_action_idxs = self._dense_to_action_dict( + torch.nn.functional.one_hot( + model_action_idxs.squeeze(1), num_classes=self.num_actions + ) + .float() + .mean(dim=0) + ) + # log metrics if a logger is set + if self.logger: + self.logger.log_metrics( + { + "td_loss": td_loss, + "logged_actions": action_histogram, + "logged_propensities": logged_propensities, + "logged_rewards": rewards.mean(), + "model_values": model_values, + "model_action_idxs": model_action_idxs, + }, + step=self.all_batches_processed, + ) + + def _dense_to_action_dict(self, dense: torch.Tensor): + """Converts values tensor to a dict mapping action names to values. + + Example: tensor([1.0, 0.0, 1.0]) -> {"1": 1.0, "2": 0.0, "3": 1.0}. + """ + assert dense.size() == ( + self.num_actions, + ), f"Invalid dense size {dense.size()} != {(self.num_actions,)}" + retval = {} + for i, a in enumerate(self._actions): + retval[a] = dense[i] + return retval + + def validation_step(self, batch, batch_idx): + """Runs model evaluation on an input batch of data. + + Args: + batch: a batch data object, e.g. DiscreteDqnInput object. + batch_idx: an integer batch index. + + Returns: + An EvaluationDataPage object with evaluation results. + """ + if isinstance(batch, dict): + batch = rlt.DiscreteDqnInput.from_dict(batch) + rewards = self.boost_rewards(batch.reward, batch.action) + discount_tensor = self.compute_discount_tensor(batch, rewards) + td_loss = self.compute_td_loss(batch, rewards, discount_tensor) + # Show eval_td_loss in a tensorboard graph + self.log("eval_td_loss", td_loss, batch_size=batch.batch_size()) + return super().validation_step(batch, batch_idx) diff --git a/reagent/training/dqn_trainer_base.py b/reagent/training/dqn_trainer_base.py index 115d26396..ffc7fb7cc 100644 --- a/reagent/training/dqn_trainer_base.py +++ b/reagent/training/dqn_trainer_base.py @@ -2,82 +2,497 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from abc import abstractmethod +from typing import Dict, List, Optional +import reagent.core.types as rlt import torch -from reagent.training.rl_trainer_pytorch import RLTrainer - +import torch.nn.functional as F +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.core.torch_utils import masked_softmax +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.evaluation.evaluator import Evaluator +from reagent.optimizer import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin logger = logging.getLogger(__name__) -class DQNTrainerBase(RLTrainer): +class DQNTrainerMixin: + # Q-value for action that is not possible. Guaranteed to be worse than any + # legitimate action + ACTION_NOT_POSSIBLE_VAL = -1e9 + def get_max_q_values(self, q_values, possible_actions_mask): + return self.get_max_q_values_with_target( + q_values, q_values, possible_actions_mask + ) + + def get_max_q_values_with_target( + self, q_values, q_values_target, possible_actions_mask + ): """ Used in Q-learning update. - :param states: Numpy array with shape (batch_size, state_dim). Each row - contains a representation of a state. + :param q_values: PyTorch tensor with shape (batch_size, action_dim). Each row + contains the list of Q-values for each possible action in this state. + + :param q_values_target: PyTorch tensor with shape (batch_size, action_dim). Each row + contains the list of Q-values from the target network + for each possible action in this state. - :param possible_actions_mask: Numpy array with shape (batch_size, action_dim). + :param possible_actions_mask: PyTorch tensor with shape (batch_size, action_dim). possible_actions[i][j] = 1 iff the agent can take action j from state i. - :param double_q_learning: bool to use double q-learning + Returns a tensor of maximum Q-values for every state in the batch + and also the index of the corresponding action (which is used in + evaluation_data_page.py, in create_from_tensors_dqn()). + """ # The parametric DQN can create flattened q values so we reshape here. q_values = q_values.reshape(possible_actions_mask.shape) - + q_values_target = q_values_target.reshape(possible_actions_mask.shape) # Set q-values of impossible actions to a very large negative number. inverse_pna = 1 - possible_actions_mask impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna q_values = q_values + impossible_action_penalty - max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True) - return max_q_values, max_indicies + q_values_target = q_values_target + impossible_action_penalty - def get_max_q_values_with_target( - self, q_values, q_values_target, possible_actions_mask + if self.double_q_learning: + # Use indices of the max q_values from the online network to select q-values + # from the target network. This prevents overestimation of q-values. + # The torch.gather function selects the entry from each row that corresponds + # to the max_index in that row. + max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True) + max_q_values_target = torch.gather(q_values_target, 1, max_indicies) + else: + max_q_values_target, max_indicies = torch.max( + q_values_target, dim=1, keepdim=True + ) + + return max_q_values_target, max_indicies + + +class DQNTrainerBaseLightning(DQNTrainerMixin, RLTrainerMixin, ReAgentLightningModule): + """Base trainer class for the DQN algorihtm variants. + + A base class for training variants of the DQN algorithm. + Provides methods for q-values calculation, reward boosting, and also + initializing and training counterfactual policy evaluation + metrics estimators (see https://arxiv.org/abs/1811.00260 for detail). + + Attributes: + rl_parameters: an object of a reagent.core.parameters.RLParameters + data class. + metrics_to_score: a list of strings representing reward metrics. + evaluation_parameters: an object of a + reagent.core.parameters.EvaluationParameters data class. + q_network_loss: a loss function object, currently either + torch.nn.functional.mse_loss or torch.nn.functional.smooth_l1_loss. + reward_boosts: a torch tensor containing reward boost values at + indices corresponding to respective discrete actions. + calc_cpe_in_training: a boolean flag whether to calculate CPE metrics + during training. + reward_network: a network object mapping states to rewards and + extra reward metrics. Given the number of discrete actions n and + e.g. a set of k reward metrics `m_1`, `m_2`, ..., and `m_k`, the + reward_network output has shape (batch_size, n * k), where + the [:, (k - 1) * n : k * n] sub-tensor contains the metric `m_k` + q-values estimates for each of the n actions. The order of metrics + in the output is the following: the environment reward, followed by + extra metrics sorted by their names in RewardOptions.metric_reward_values + config. + reward_network_optimizer: an optimizer object for training + reward network. + q_network_cpe: a network object mapping states to CPE metrics. + q_network_cpe_target: a copy of q_network_cpe for training stability. + q_network_cpe_optimizer: an optimizer object for training q_network_cpe. + reward_idx_offsets: a flat torch tensor containing integer offsets of + different reward metrics in the reward_network output. + evaluator: an object of a reagent.evaluation.evaluator.Evaluator class. + """ + + def __init__( + self, + rl_parameters: RLParameters, + metrics_to_score=None, + actions: Optional[List[str]] = None, + evaluation_parameters: Optional[EvaluationParameters] = None, ): """ - Used in Q-learning update. + Args: + rl_parameters: an object of a reagent.core.parameters.RLParameters + data class. + metrics_to_score: a list of strings representing extra reward + metrics, excluding the reward itself. + actions: a list of string action names for available actions. + evaluation_parameters: an object of a + reagent.core.parameters.EvaluationParameters data class. + """ + super().__init__() + self.rl_parameters = rl_parameters + self.time_diff_unit_length = rl_parameters.time_diff_unit_length + self.tensorboard_logging_freq = rl_parameters.tensorboard_logging_freq + self.calc_cpe_in_training = ( + evaluation_parameters and evaluation_parameters.calc_cpe_in_training + ) + assert actions is not None + self._actions: List[str] = actions - :param states: Numpy array with shape (batch_size, state_dim). Each row - contains a representation of a state. + if rl_parameters.q_network_loss == "mse": + self.q_network_loss = F.mse_loss + elif rl_parameters.q_network_loss == "huber": + self.q_network_loss = F.smooth_l1_loss + else: + raise Exception( + "Q-Network loss type {} not valid loss.".format( + rl_parameters.q_network_loss + ) + ) - :param possible_actions_mask: Numpy array with shape (batch_size, action_dim). - possible_actions[i][j] = 1 iff the agent can take action j from - state i. + if metrics_to_score: + self.metrics_to_score = metrics_to_score + ["reward"] + else: + self.metrics_to_score = ["reward"] + + self._init_reward_boosts(rl_parameters.reward_boost) + + @abstractmethod + @torch.no_grad() + def get_detached_model_outputs(self, state): + pass + + def _init_reward_boosts(self, rl_reward_boost: Optional[Dict[str, float]]) -> None: + """Initializes reward_boosts class attribute. - :param double_q_learning: bool to use double q-learning + Given an input reward_boosts dictionary, constructs a torch tensor + containing reward boost values at indices corresponding to + respective discrete actions. Assigns resulting tensor to the + reward_boosts class attribute. + + Args: + rl_reward_boost: a dict mapping discrete actions string names to the + corresponding float reward boost values. """ + reward_boosts = torch.zeros([1, len(self._actions)]) + if rl_reward_boost is not None: + for k in rl_reward_boost.keys(): + i = self._actions.index(k) + reward_boosts[0, i] = rl_reward_boost[k] + self.register_buffer("reward_boosts", reward_boosts) - # The parametric DQN can create flattened q values so we reshape here. - q_values = q_values.reshape(possible_actions_mask.shape) - q_values_target = q_values_target.reshape(possible_actions_mask.shape) + def _check_input(self, training_batch: rlt.DiscreteDqnInput): + """Checks the shapes of input tensors in the training batch.""" + assert isinstance(training_batch, rlt.DiscreteDqnInput) + assert training_batch.not_terminal.dim() == training_batch.reward.dim() == 2 + assert ( + training_batch.not_terminal.shape[1] == training_batch.reward.shape[1] == 1 + ) + assert training_batch.action.dim() == training_batch.next_action.dim() == 2 + assert ( + training_batch.action.shape[1] + == training_batch.next_action.shape[1] + == self.num_actions + ) + if torch.logical_and( + training_batch.possible_next_actions_mask.float().sum(dim=1) == 0, + training_batch.not_terminal.squeeze().bool(), + ).any(): + # make sure there's no non-terminal state with no possible next actions + raise ValueError( + "No possible next actions. Should the environment have terminated?" + ) - if self.double_q_learning: - # Set q-values of impossible actions to a very large negative number. - inverse_pna = 1 - possible_actions_mask - impossible_action_penalty = self.ACTION_NOT_POSSIBLE_VAL * inverse_pna - q_values = q_values + impossible_action_penalty - # Select max_q action after scoring with online network - max_q_values, max_indicies = torch.max(q_values, dim=1, keepdim=True) - # Use q_values from target network for max_q action from online q_network - # to decouble selection & scoring, preventing overestimation of q-values - max_q_values_target = torch.gather(q_values_target, 1, max_indicies) - return max_q_values_target, max_indicies - else: - return self.get_max_q_values(q_values_target, possible_actions_mask) + @property + def num_actions(self) -> int: + """Returns a number of available discrete actions.""" + assert self._actions is not None, "Not a discrete action DQN" + return len(self._actions) @torch.no_grad() def boost_rewards( self, rewards: torch.Tensor, actions: torch.Tensor ) -> torch.Tensor: + """Applies reward boosts to the rewards tensor. + + Given the (batch_size, num_actions) actions tensor, computes the + reward boosts for each time step based on the values stored in the + reward_boosts attribute and augments the rewards tensor. + + Args: + rewards: a (batch_size, 1) shaped torch tensor with rewards values. + actions: a (batch_size, num_actions) torch tensor with executed actions. + + Returns: + a (batch_size, 1) shaped torch tensor with boosted rewards values. + """ # Apply reward boost if specified reward_boosts = torch.sum( - # pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_boosts`. actions.float() * self.reward_boosts, dim=1, keepdim=True, ) return rewards + reward_boosts + + def _initialize_cpe( + self, + reward_network, + q_network_cpe, + q_network_cpe_target, + optimizer: Optimizer__Union, + ) -> None: + """Initializes CPE networks, optimizers and an evaluator object. + + Given the number of discrete actions n and e.g. a set of reward + metrics `a`, `b`, and `c`, the reward_network output has shape + (batch_size, n * 3), where the [:, :n] sub-tensor contains the metric + `a` q-values estimates for each action, the [:, n: 2 * n] sub-tensor + contains the metric `b` q-values estimates, etc. In addition to + initializing the reward and cpe networks, this function computes + the offsets of each metric in the reward network output tensor, + i.e. the torch.tensor([0, n, 2 * n]). + + Args: + reward_network: a network object mapping states to rewards and + extra reward metrics. + q_network_cpe: a network object mapping states to CPE metrics. + The network output has the same shape as the reward_network + output: (batch_size, n * k), where n is a number of discrete + actions, k is a number of reward metrics. + q_network_cpe_target: a copy of q_network_cpe for training stability. + optimizer: an optimizer object for training q_network_cpe. + """ + if not self.calc_cpe_in_training: + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network`. + self.reward_network = None + return + + assert reward_network is not None, "reward_network is required for CPE" + self.reward_network = reward_network + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `reward_network_optimizer`. + self.reward_network_optimizer = optimizer + assert ( + q_network_cpe is not None and q_network_cpe_target is not None + ), "q_network_cpe and q_network_cpe_target are required for CPE" + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe`. + self.q_network_cpe = q_network_cpe + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_target`. + self.q_network_cpe_target = q_network_cpe_target + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `q_network_cpe_optimizer`. + self.q_network_cpe_optimizer = optimizer + num_output_nodes = len(self.metrics_to_score) * self.num_actions + reward_idx_offsets = torch.arange( + 0, + num_output_nodes, + self.num_actions, + dtype=torch.long, + ) + self.register_buffer("reward_idx_offsets", reward_idx_offsets) + + reward_stripped_metrics_to_score = ( + self.metrics_to_score[:-1] if len(self.metrics_to_score) > 1 else None + ) + # pyre-fixme[16]: `DQNTrainerBase` has no attribute `evaluator`. + self.evaluator = Evaluator( + self._actions, + self.rl_parameters.gamma, + self, + metrics_to_score=reward_stripped_metrics_to_score, + ) + + def _configure_cpe_optimizers(self): + """Initializes the reward and the cpe networks optimizers.""" + target_params = list(self.q_network_cpe_target.parameters()) + source_params = list(self.q_network_cpe.parameters()) + # TODO: why is reward net commented out? + # source_params += list(self.reward_network.parameters()) + optimizers = [] + optimizers.append( + self.reward_network_optimizer.make_optimizer_scheduler( + self.reward_network.parameters() + ) + ) + optimizers.append( + self.q_network_cpe_optimizer.make_optimizer_scheduler( + self.q_network_cpe.parameters() + ) + ) + return target_params, source_params, optimizers + + def _calculate_cpes( + self, + training_batch, + states, + next_states, + all_action_scores, + all_next_action_scores, + logged_action_idxs, + discount_tensor, + not_done_mask, + ): + """Computes losses for the reward and the cpe q-values networks. + + Based on the actions taken idxs, computes the mse loss for training + the reward network and either mse or huber loss for the cpe q-network. + + Args: + training_batch: a training batch data object. + states: a (batch_size, state_dim) torch tensor containing + current states. + next_states: a (batch_size, state_dim) torch tensor containing + next states. + all_action_scores: a torch tensor containing q-network + predictions from the current states. + all_next_action_scores: a torch tensor containing q-network + predictions from the next states. + logged_action_idxs: a (batch_size, 1) torch tensor with integer + executed actions indices. + discount_tensor: a (batch_size, 1) torch tensor containing the + discount to apply, e.g. gamma ** k where k is a number of + steps used in td-error estimation. + not_done_mask: a (batch_size, 1) torch tensor with boolean values + indicating whether corresponding state is terminal. + Yields: + A reward network and a CPE q-network loss objects. + """ + if not self.calc_cpe_in_training: + return + if training_batch.extras.metrics is None: + metrics_reward_concat_real_vals = training_batch.reward + else: + metrics_reward_concat_real_vals = torch.cat( + (training_batch.reward, training_batch.extras.metrics), dim=1 + ) + + model_propensities_next_states = masked_softmax( + all_next_action_scores, + training_batch.possible_next_actions_mask + if self.maxq_learning + else training_batch.next_action, + self.rl_temperature, + ) + + ######### Train separate reward network for CPE evaluation ############# + reward_estimates = self.reward_network(states) + reward_estimates_for_logged_actions = reward_estimates.gather( + 1, self.reward_idx_offsets + logged_action_idxs + ) + reward_loss = F.mse_loss( + reward_estimates_for_logged_actions, metrics_reward_concat_real_vals + ) + yield reward_loss + + ######### Train separate q-network for CPE evaluation ############# + metric_q_values = self.q_network_cpe(states).gather( + 1, self.reward_idx_offsets + logged_action_idxs + ) + all_metrics_target_q_values = torch.chunk( + self.q_network_cpe_target(next_states).detach(), + len(self.metrics_to_score), + dim=1, + ) + target_metric_q_values = [] + for i, per_metric_target_q_values in enumerate(all_metrics_target_q_values): + per_metric_next_q_values = torch.sum( + per_metric_target_q_values * model_propensities_next_states, + 1, + keepdim=True, + ) + per_metric_next_q_values = per_metric_next_q_values * not_done_mask + per_metric_target_q_values = metrics_reward_concat_real_vals[ + :, i : i + 1 + ] + (discount_tensor * per_metric_next_q_values) + target_metric_q_values.append(per_metric_target_q_values) + + target_metric_q_values = torch.cat(target_metric_q_values, dim=1) + metric_q_value_loss = self.q_network_loss( + metric_q_values, target_metric_q_values + ) + + # The model_propensities computed below are not used right now. The CPE graphs in the Outputs + # tab use model_propensities computed in the function create_from_tensors_dqn() in evaluation_data_page.py, + # which is called on the eval_table_sample in the gather_eval_data() function below. + model_propensities = masked_softmax( + all_action_scores, + training_batch.possible_actions_mask + if self.maxq_learning + else training_batch.action, + self.rl_temperature, + ) + # Extract rewards predicted by the reward_network. The other columns will + # give predicted values for other metrics, if such were specified. + model_rewards = reward_estimates[ + :, + torch.arange( + self.reward_idx_offsets[0], + self.reward_idx_offsets[0] + self.num_actions, + ), + ] + + self.reporter.log( + reward_loss=reward_loss, + model_propensities=model_propensities, + model_rewards=model_rewards, + ) + + yield metric_q_value_loss + + def gather_eval_data(self, validation_step_outputs): + """Aggregates EvaluationDataPage objects. + + Combines a list of EvaluationDataPage objects obtained as a result + of calling the validation_step method into a single object. Switches + to CPU to avoid running out of memory on GPU as this operation can be + memory intensive. + + Args: + validation_step_outputs: a list of EvaluationDataPage objects + returned by the validation_step method. + Returns: + An EvaluationDataPage object containing concatenated data from + the input list of objects. + """ + was_on_gpu = self.on_gpu + self.cpu() + eval_data = None + for edp in validation_step_outputs: + if eval_data is None: + eval_data = edp + else: + eval_data = eval_data.append(edp) + if eval_data and eval_data.mdp_id is not None: + eval_data = eval_data.sort() + eval_data = eval_data.compute_values(self.gamma) + eval_data.validate() + if was_on_gpu: + self.cuda() + return eval_data + + def validation_step(self, batch, batch_idx): + """Runs model evaluation on an input batch of data.""" + if isinstance(batch, dict): + batch = rlt.DiscreteDqnInput.from_dict(batch) + # HACK: Move to cpu in order to hold more batches in memory + # This is only needed when trainers need in-memory + # EvaluationDataPages of the full evaluation dataset + return EvaluationDataPage.create_from_training_batch(batch, self).cpu() + + def validation_epoch_end(self, valid_step_outputs): + # As explained in the comments to the validation_step function in + # pytorch_lightning/core/lightning.py, this function is generally used as follows: + # val_outs = [] + # for val_batch in val_data: + # out = validation_step(val_batch) + # val_outs.append(out) + # validation_epoch_end(val_outs) + + # The input arguments of validation_epoch_end() is a list of EvaluationDataPages, + # which matches the way it is used in gather_eval_data() above. + + eval_data = self.gather_eval_data(valid_step_outputs) + if eval_data and eval_data.mdp_id is not None: + cpe_details = self.evaluator.evaluate_post_training(eval_data) + self.reporter.log(cpe_details=cpe_details) diff --git a/reagent/training/gradient_free/__init__.py b/reagent/training/gradient_free/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/training/gradient_free/__init__.py +++ b/reagent/training/gradient_free/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/training/gradient_free/ars_util.py b/reagent/training/gradient_free/ars_util.py new file mode 100644 index 000000000..d1c6e20fc --- /dev/null +++ b/reagent/training/gradient_free/ars_util.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from operator import itemgetter + +import numpy as np +import torch + + +""" +Utility functions for Advanced Random Search (ARS) algorithm +based on the paper "Simple random search provides a competitive approach +to reinforcement learning", Mania et al. +https://arxiv.org/abs/1803.07055 + +Here, we show an example of training a data reweighting policy using ARS. The policy +is learned to weight each sample for training a supervised learning model. ARS is a +competitive alternative to the policy gradient method in "Data Valuation using +Reinforcement Learning", Yoon, Arik, and Pfister. +https://arxiv.org/abs/1909.11671 + + + def reward_func(pos_param, neg_param): + # Return rewards for positively/negatively perturbed parameters + # model = a supervised learning model + # X = training features + # y = labels + + # Initialize a supervised learning model + model_pos = model.init() + # Sample weights are bounded within (0, 1) + pos_weight = torch.sigmoid(torch.matmul(torch.column_stack((X, y)), pos_param)) + model_pos.fit(X, y, sample_weight=pos_weight) + r_pos = metric(model_pos.predict(X_e), y_e) + + model_neg = model.init() + neg_weight = torch.sigmoid(torch.matmul(torch.column_stack((X, y)), neg_param)) + model_neg.fit(X, y, sample_weight=neg_weight) + r_neg = metric(model_neg.predict(X_e), y_e) + + return (r_pos, r_neg) + + # Training + # feature_dim = feature dimension + 1 (for label) + # n_pert = given number of random perturbations + # alpha = step size + # noise = noise level (between 0 ~ 1) added to the random perturbations + ars_opt = ARSOptimizer(feature_dim, n_pert, alpha=alpha, noise=noise) + + for _ in range(n_generations): + perturbed_params = ars_opt.sample_perturbed_params() + rewards = [] + for idx in range(0, len(perturbed_params)): + pos_param, neg_param = params[idx] + rewards.extend(reward_func(pos_param, neg_param)) + ars_opt.update_ars_params(rewards) +""" + + +class ARSOptimizer: + """ARSOptimizer is supposed to maximize an objective function""" + + def __init__( + self, + feature_dim, + n_pert: int = 10, + rand_ars_params: bool = False, + alpha: int = 1, + noise: int = 1, + b_top=None, + ) -> None: + self.feature_dim = feature_dim + self.ars_params = ( + np.random.randn(feature_dim) if rand_ars_params else np.zeros(feature_dim) + ) + self.alpha = alpha + self.noise = noise + self.n_pert = n_pert + self.b_top = b_top if b_top is not None else n_pert + self.perturbations = [] + + def update_ars_params(self, rewards: torch.Tensor) -> None: + """ + reward should be something like + [reward_pert1_pos, reward_pert1_neg, reward_pert2_pos, reward_pert2_neg, ...] + """ + assert ( + len(self.perturbations) > 0 + ), "must call sample_perturbed_params before this function" + assert rewards.shape == ( + 2 * self.n_pert, + ), "rewards must have length 2 * n_pert" + rank = {} + rewards = rewards.numpy() + for pert_idx in range(self.n_pert): + reward_pos = rewards[2 * pert_idx] + reward_neg = rewards[2 * pert_idx + 1] + rank[pert_idx] = max(reward_pos, reward_neg) + self.perturbations[pert_idx] *= reward_pos - reward_neg + std_r = np.std(rewards) + weight_sum = 0 + for pert_idx in list( + dict(sorted(rank.items(), key=itemgetter(1), reverse=True)).keys() + )[: self.b_top]: + weight_sum += self.perturbations[pert_idx] + self.ars_params = self.ars_params + self.alpha * weight_sum / ( + self.b_top * (std_r if std_r > 0 else 1) + ) + self.perturbations = [] + + def sample_perturbed_params(self): + """Return tuples of (pos_param, neg_param)""" + self.perturbations = [] + perturbed_params = [] + for _ in range(self.n_pert): + pert = np.random.randn(self.feature_dim) + self.perturbations.append(pert) + perturbed_params.append( + ( + torch.from_numpy(self.ars_params + self.noise * pert).float(), + torch.from_numpy(self.ars_params - self.noise * pert).float(), + ) + ) + return perturbed_params diff --git a/reagent/training/gradient_free/es_worker.py b/reagent/training/gradient_free/es_worker.py index fc3a5a5f4..d8a30b0f4 100644 --- a/reagent/training/gradient_free/es_worker.py +++ b/reagent/training/gradient_free/es_worker.py @@ -7,7 +7,7 @@ import torch.distributed as distributed import torch.nn import torch.optim -from reagent.parameters import EvolutionParameters +from reagent.core.parameters import EvolutionParameters from reagent.training.gradient_free.evolution_pool import EvolutionPool from torch.distributed import ProcessGroup @@ -20,7 +20,6 @@ def __init__( self, individual_pool: EvolutionPool, es_params: EvolutionParameters, - # pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type. process_group: ProcessGroup, num_nodes: int, ) -> None: diff --git a/reagent/training/gradient_free/evolution_pool.py b/reagent/training/gradient_free/evolution_pool.py index 125f95833..0881870f5 100644 --- a/reagent/training/gradient_free/evolution_pool.py +++ b/reagent/training/gradient_free/evolution_pool.py @@ -8,7 +8,7 @@ import torch.fb.rendezvous.zeus import torch.nn import torch.optim -from reagent.parameters import EvolutionParameters +from reagent.core.parameters import EvolutionParameters logger = logging.getLogger(__name__) @@ -50,7 +50,6 @@ def __init__( self.parent_tensors[tensor_name] = torch.randn( tensor_size, dtype=torch.float ) - # pyre-fixme[16]: `Tensor` has no attribute `grad`. self.parent_tensors[tensor_name].grad = torch.randn( tensor_size, dtype=torch.float ) @@ -68,7 +67,6 @@ def populate_children(self, iteration: int): individual_tensor = individual[tensor_name] individual_tensor.normal_(0, self.es_params.mutation_power) - # pyre-fixme[16]: `Tensor` has no attribute `add_`. individual_tensor.add_(parent_tensor) def apply_global_reward(self, rewards: torch.Tensor, next_iteration: int): @@ -76,14 +74,13 @@ def apply_global_reward(self, rewards: torch.Tensor, next_iteration: int): if torch.abs(std_dev) > 1e-6: normalized_rewards = (rewards - torch.mean(rewards)) / std_dev for parent_tensor in self.parent_tensors.values(): - # pyre-fixme[16]: `Tensor` has no attribute `grad`. + # pyre-fixme[16]: Optional type has no attribute `zero_`. parent_tensor.grad.zero_() for i, individual in enumerate(self.population_tensors): for tensor_name, parent_tensor in self.parent_tensors.items(): individual_tensor = individual[tensor_name] # Subtract the parent to get the gradient estimate - # pyre-fixme[16]: `Tensor` has no attribute `sub_`. individual_tensor.sub_(parent_tensor) # Amplify the gradient by the reward @@ -96,6 +93,7 @@ def apply_global_reward(self, rewards: torch.Tensor, next_iteration: int): * -1 ) + # pyre-fixme[16]: Optional type has no attribute `__iadd__`. parent_tensor.grad += individual_tensor self.optimizer.step() diff --git a/reagent/training/imitator_training.py b/reagent/training/imitator_training.py index 25aef36ef..3cf5dc75b 100644 --- a/reagent/training/imitator_training.py +++ b/reagent/training/imitator_training.py @@ -4,61 +4,10 @@ import logging import torch -from reagent.core.configuration import resolve_defaults -from reagent.core.dataclasses import field -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import RLParameters -from reagent.training.rl_trainer_pytorch import RLTrainer - logger = logging.getLogger(__name__) -class ImitatorTrainer(RLTrainer): - @resolve_defaults - def __init__( - self, - imitator, - use_gpu: bool = False, - rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 - minibatch_size: int = 1024, - minibatches_per_step: int = 1, - optimizer: Optimizer__Union = field( # noqa: B008 - default_factory=Optimizer__Union.default - ), - ) -> None: - super().__init__(rl, use_gpu=use_gpu) - self.minibatch_size = minibatch_size - self.minibatches_per_step = minibatches_per_step or 1 - self.imitator = imitator - self.imitator_optimizer = optimizer.make_optimizer(imitator.parameters()) - - def _imitator_accuracy(self, predictions, true_labels): - match_tensor = predictions == true_labels - matches = int(match_tensor.sum()) - return round(matches / len(predictions), 3) - - @torch.no_grad() - def train(self, training_batch, train=True): - learning_input = training_batch.training_input - - with torch.enable_grad(): - action_preds = self.imitator(learning_input.state.float_features) - # Classification label is index of action with value 1 - pred_action_idxs = torch.max(action_preds, dim=1)[1] - actual_action_idxs = torch.max(learning_input.action, dim=1)[1] - - if train: - imitator_loss = torch.nn.CrossEntropyLoss() - bcq_loss = imitator_loss(action_preds, actual_action_idxs) - bcq_loss.backward() - self._maybe_run_optimizer( - self.imitator_optimizer, self.minibatches_per_step - ) - - return self._imitator_accuracy(pred_action_idxs, actual_action_idxs) - - def get_valid_actions_from_imitator(imitator, input, drop_threshold): """Create mask for non-viable actions under the imitator.""" if isinstance(imitator, torch.nn.Module): diff --git a/reagent/training/loss_reporter.py b/reagent/training/loss_reporter.py deleted file mode 100644 index f21677e9d..000000000 --- a/reagent/training/loss_reporter.py +++ /dev/null @@ -1,363 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import logging -import math -from collections import deque -from typing import Deque, List, NamedTuple, Optional - -import numpy as np -import torch -from reagent.tensorboardX import SummaryWriterContext - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -LOSS_REPORT_INTERVAL = 100 - - -class BatchStats(NamedTuple): - td_loss: Optional[torch.Tensor] = None - reward_loss: Optional[torch.Tensor] = None - imitator_loss: Optional[torch.Tensor] = None - logged_actions: Optional[torch.Tensor] = None - logged_propensities: Optional[torch.Tensor] = None - logged_rewards: Optional[torch.Tensor] = None - logged_values: Optional[torch.Tensor] = None - model_propensities: Optional[torch.Tensor] = None - model_rewards: Optional[torch.Tensor] = None - model_values: Optional[torch.Tensor] = None - model_values_on_logged_actions: Optional[torch.Tensor] = None - model_action_idxs: Optional[torch.Tensor] = None - - def write_summary(self, actions: List[str]): - if actions: - for field, log_key in [ - ("logged_actions", "actions/logged"), - ("model_action_idxs", "actions/model"), - ]: - val = getattr(self, field) - if val is None: - continue - for i, action in enumerate(actions): - # pyre-fixme[16]: `SummaryWriterContext` has no attribute - # `add_scalar`. - SummaryWriterContext.add_scalar( - "{}/{}".format(log_key, action), (val == i).sum().item() - ) - - for field, log_key in [ - ("td_loss", "td_loss"), - ("imitator_loss", "imitator_loss"), - ("reward_loss", "reward_loss"), - ("logged_propensities", "propensities/logged"), - ("logged_rewards", "reward/logged"), - ("logged_values", "value/logged"), - ("model_values_on_logged_actions", "value/model_logged_action"), - ]: - val = getattr(self, field) - if val is None: - continue - assert len(val.shape) == 1 or ( - len(val.shape) == 2 and val.shape[1] == 1 - ), "Unexpected shape for {}: {}".format(field, val.shape) - self._log_histogram_and_mean(log_key, val) - - for field, log_key in [ - ("model_propensities", "propensities/model"), - ("model_rewards", "reward/model"), - ("model_values", "value/model"), - ]: - val = getattr(self, field) - if val is None: - continue - if ( - len(val.shape) == 1 or (len(val.shape) == 2 and val.shape[1] == 1) - ) and not actions: - self._log_histogram_and_mean(log_key, val) - elif len(val.shape) == 2 and val.shape[1] == len(actions): - for i, action in enumerate(actions): - self._log_histogram_and_mean(f"{log_key}/{action}", val[:, i]) - else: - raise ValueError( - "Unexpected shape for {}: {}; actions: {}".format( - field, val.shape, actions - ) - ) - - def _log_histogram_and_mean(self, log_key, val): - try: - SummaryWriterContext.add_histogram(log_key, val) - SummaryWriterContext.add_scalar(f"{log_key}/mean", val.mean()) - except ValueError: - logger.warning( - f"Cannot create histogram for key: {log_key}; " - "this is likely because you have NULL value in your input; " - f"value: {val}" - ) - raise - - @staticmethod - def add_custom_scalars(action_names: Optional[List[str]]): - if not action_names: - return - - SummaryWriterContext.add_custom_scalars_multilinechart( - [ - "propensities/model/{}/mean".format(action_name) - for action_name in action_names - ], - category="propensities", - title="model", - ) - SummaryWriterContext.add_custom_scalars_multilinechart( - [ - "propensities/logged/{}/mean".format(action_name) - for action_name in action_names - ], - category="propensities", - title="logged", - ) - SummaryWriterContext.add_custom_scalars_multilinechart( - ["actions/logged/{}".format(action_name) for action_name in action_names], - category="actions", - title="logged", - ) - SummaryWriterContext.add_custom_scalars_multilinechart( - ["actions/model/{}".format(action_name) for action_name in action_names], - category="actions", - title="model", - ) - - -def merge_tensor_namedtuple_list(l, cls): - def merge_tensor(f): - vals = [getattr(e, f) for e in l] - not_none_vals = [v for v in vals if v is not None] - assert len(not_none_vals) == 0 or len(not_none_vals) == len(vals) - if not not_none_vals: - return None - return torch.cat(not_none_vals, dim=0) - - return cls(**{f: merge_tensor(f) for f in cls._fields}) - - -class StatsByAction(object): - def __init__(self, actions): - self.stats = {action: [] for action in actions} - - def append(self, stats): - for k in stats: - assert k in self.stats - for k in self.stats: - v = stats.get(k, 0) - if isinstance(v, torch.Tensor): - v = v.item() - self.stats[k].append(v) - - def items(self): - return self.stats.items() - - def __len__(self): - return len(self.stats) - - -class NoOpLossReporter: - def report(self, **kwargs): - pass - - def flush(self): - pass - - -class LossReporter(object): - RECENT_WINDOW_SIZE = 100 - - def __init__(self, action_names: Optional[List[str]] = None): - assert action_names is None or len(action_names) > 0 - self.action_names: List[str] = action_names or [] - - self.running_reward: Deque[float] = deque(maxlen=int(1e6)) - - self.td_loss: List[float] = [] - self.reward_loss: List[float] = [] - self.imitator_loss: List[float] = [] - self.logged_action_q_value: List[float] = [] - self.logged_action_counts = {action: 0 for action in self.action_names} - self.model_values = StatsByAction(self.action_names) - self.model_value_stds = StatsByAction(self.action_names) - self.model_action_counts = StatsByAction(self.action_names) - self.model_action_counts_cumulative = { - action: 0 for action in self.action_names - } - self.model_action_distr = StatsByAction(self.action_names) - - self.incoming_stats: List[BatchStats] = [] - - self.loss_report_interval = LOSS_REPORT_INTERVAL - - BatchStats.add_custom_scalars(action_names) - - @property - def num_batches(self): - return len(self.td_loss) - - def report(self, **kwargs): - def _to_tensor(v): - if v is None: - return None - if not isinstance(v, torch.Tensor): - v = torch.tensor(v) - if len(v.shape) == 0: - v = v.reshape(1) - return v.detach().cpu() - - kwargs = {k: _to_tensor(v) for k, v in kwargs.items()} - batch_stats = BatchStats(**kwargs) - self.incoming_stats.append(batch_stats) - if len(self.incoming_stats) >= self.loss_report_interval: - self.flush() - - @torch.no_grad() - def flush(self): - if not len(self.incoming_stats): - logger.info("Nothing to report") - return - - logger.info("Loss on {} batches".format(len(self.incoming_stats))) - - batch_stats = merge_tensor_namedtuple_list(self.incoming_stats, BatchStats) - batch_stats.write_summary(self.action_names) - - print_details = "Loss:\n" - - td_loss_mean = float(batch_stats.td_loss.mean()) - self.td_loss.append(td_loss_mean) - print_details = print_details + "TD LOSS: {0:.3f}\n".format(td_loss_mean) - - if batch_stats.logged_rewards is not None: - flattened_rewards = torch.flatten(batch_stats.logged_rewards).tolist() - self.running_reward.extend(flattened_rewards) - - if batch_stats.reward_loss is not None: - reward_loss_mean = float(batch_stats.reward_loss.mean()) - self.reward_loss.append(reward_loss_mean) - print_details = print_details + "REWARD LOSS: {0:.3f}\n".format( - reward_loss_mean - ) - - if batch_stats.imitator_loss is not None: - imitator_loss_mean = float(batch_stats.imitator_loss.mean()) - self.imitator_loss.append(imitator_loss_mean) - print_details = print_details + "IMITATOR LOSS: {0:.3f}\n".format( - imitator_loss_mean - ) - - if batch_stats.model_values is not None and self.action_names: - self.model_values.append( - dict(zip(self.action_names, batch_stats.model_values.mean(dim=0))) - ) - self.model_value_stds.append( - dict(zip(self.action_names, batch_stats.model_values.std(dim=0))) - ) - - if batch_stats.model_values_on_logged_actions is not None: - self.logged_action_q_value.append( - batch_stats.model_values_on_logged_actions.mean().item() - ) - - if ( - batch_stats.logged_actions is not None - and batch_stats.model_action_idxs is not None - ): - logged_action_counts = { - action: (batch_stats.logged_actions == i).sum().item() - for i, action in enumerate(self.action_names) - } - model_action_counts = { - action: (batch_stats.model_action_idxs == i).sum().item() - for i, action in enumerate(self.action_names) - } - print_details += "The distribution of logged actions : {}\n".format( - logged_action_counts - ) - print_details += "The distribution of model actions : {}\n".format( - model_action_counts - ) - for action, count in logged_action_counts.items(): - self.logged_action_counts[action] += count - - self.model_action_counts.append(model_action_counts) - - for action, count in model_action_counts.items(): - self.model_action_counts_cumulative[action] += count - - total = float(sum(model_action_counts.values())) - self.model_action_distr.append( - {action: count / total for action, count in model_action_counts.items()} - ) - - print_details += "Batch Evaluator Finished" - for print_detail in print_details.split("\n"): - logger.info(print_detail) - - self.incoming_stats.clear() - - def get_td_loss_after_n(self, n): - return self.td_loss[n:] - - def get_recent_td_loss(self): - return LossReporter.calculate_recent_window_average( - self.td_loss, LossReporter.RECENT_WINDOW_SIZE, num_entries=1 - ) - - def get_recent_reward_loss(self): - return LossReporter.calculate_recent_window_average( - self.reward_loss, LossReporter.RECENT_WINDOW_SIZE, num_entries=1 - ) - - def get_recent_imitator_loss(self): - return LossReporter.calculate_recent_window_average( - self.imitator_loss, LossReporter.RECENT_WINDOW_SIZE, num_entries=1 - ) - - def get_logged_action_distribution(self): - total_actions = 1.0 * sum(self.logged_action_counts.values()) - return {k: (v / total_actions) for k, v in self.logged_action_counts.items()} - - def get_model_action_distribution(self): - total_actions = 1.0 * sum(self.model_action_counts_cumulative.values()) - return { - k: (v / total_actions) - for k, v in self.model_action_counts_cumulative.items() - } - - def get_recent_rewards(self): - return self.running_reward - - def log_to_tensorboard(self, epoch: int) -> None: - def none_to_zero(x: Optional[float]) -> float: - if x is None or math.isnan(x): - return 0.0 - return x - - for name, value in [ - ("Training/td_loss", self.get_recent_td_loss()), - ("Training/reward_loss", self.get_recent_reward_loss()), - ("Training/imitator_loss", self.get_recent_imitator_loss()), - ]: - # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. - SummaryWriterContext.add_scalar(name, none_to_zero(value), epoch) - - @staticmethod - def calculate_recent_window_average(arr, window_size, num_entries): - if len(arr) > 0: - begin = max(0, len(arr) - window_size) - return np.mean(np.array(arr[begin:]), axis=0) - else: - logger.error("Not enough samples for evaluation.") - if num_entries == 1: - return float("nan") - else: - return [float("nan")] * num_entries diff --git a/reagent/training/multi_stage_trainer.py b/reagent/training/multi_stage_trainer.py new file mode 100644 index 000000000..0403c28ae --- /dev/null +++ b/reagent/training/multi_stage_trainer.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import bisect +import functools +import itertools +from collections import OrderedDict +from typing import Dict, List, Tuple + +import torch.nn as nn +from pytorch_lightning.loops.optimization.optimizer_loop import ClosureResult +from reagent.core.utils import lazy_property + +from .reagent_lightning_module import ReAgentLightningModule + + +class MultiStageTrainer(ReAgentLightningModule): + def __init__( + self, + trainers: List[ReAgentLightningModule], + epochs: List[int], + assign_reporter_function=None, + flush_reporter_function=None, + automatic_optimization: bool = True, + ) -> None: + super().__init__(automatic_optimization=automatic_optimization) + # NB: wrapping in a ModuleList so the state can be saved + self._trainers = nn.ModuleList(trainers) + self._assign_reporter_function = assign_reporter_function + self._flush_reporter_function = ( + functools.partial(flush_reporter_function, self) + if flush_reporter_function + else self._flush_reporter + ) + self._in_testing_loop = False + + # Cumulative sum of number of epochs up to the index (of trainers) + self._trainer_acc_epochs = [0] + epochs + for i in range(1, len(epochs) + 1): + self._trainer_acc_epochs[i] += self._trainer_acc_epochs[i - 1] + + # Num of epochs for each trainer. Used to check if the sum of them + # equals to num_epochs used in pytorch-lightning trainer + self.trainer_epoch_mapping = OrderedDict() + for t, e in zip(trainers, epochs): + trainer_name = type(t).__name__ + self.trainer_epoch_mapping[trainer_name] = e + + @property + def multi_stage_total_epochs(self): + return self._trainer_acc_epochs[-1] + + def set_reporter(self, reporter) -> None: + super().set_reporter(reporter) + if self._assign_reporter_function: + self._assign_reporter_function(self._trainers, reporter) + else: + # By default, assume CompoundReporter with the same + # number of reporters as trainers + assert len(self._trainers) == len( + reporter._reporters + ), f"{len(self._trainers)} != {len(reporter._reporters)}" + for t, r in zip(self._trainers, reporter._reporters): + t.set_reporter(r) + + @lazy_property + def _optimizer_step_to_trainer_idx(self) -> Dict[int, Tuple[int, int]]: + mapping = {} + offset = 0 + + for i, t in enumerate(self._trainers): + num_optimizing_steps = t._num_optimizing_steps + for j in range(num_optimizing_steps): + mapping[offset + j] = (i, offset) + offset += num_optimizing_steps + + return mapping + + def _flush_reporter(self, reporter, epoch) -> None: + """ + By default, assume CompoundReporter with the same + number of reporters as trainers + """ + if not self._in_testing_loop: + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + reporter._reporters[epoch_trainer_idx].flush(epoch) + else: + for r in reporter._reporters: + r.flush(epoch) + + def on_fit_start(self) -> None: + # pyre-fixme[16]: `MultiStageTrainer` has no attribute `_starting_epoch`. + self._starting_epoch = self.trainer.current_epoch + # Connecting pl.Trainer to stage trainers + for t in self._trainers: + t.trainer = self.trainer + t.on_fit_start() + + self.reporter.set_flush_function(self._flush_reporter_function) + + def on_fit_end(self) -> None: + del self._starting_epoch + # Disconnecting + for t in self._trainers: + t.on_fit_end() + del t.trainer + + self.reporter.set_flush_function(None) + + def on_test_start(self) -> None: + # pyre-fixme[16]: `MultiStageTrainer` has no attribute `_starting_epoch`. + self._starting_epoch = self.trainer.current_epoch + self._in_testing_loop = True + + for t in self._trainers: + t.on_test_start() + + def on_test_end(self) -> None: + del self._starting_epoch + self._in_testing_loop = False + for t in self._trainers: + t.on_test_end() + + def _get_trainer_idx_from_epoch(self) -> int: + # Cycling through the trainers + epoch = (self.trainer.current_epoch - self._starting_epoch) % ( + self._trainer_acc_epochs[-1] + ) + trainer_idx = bisect.bisect_right(self._trainer_acc_epochs, epoch) - 1 + + return trainer_idx + + def configure_optimizers(self): + # FIXME: Doesn't support LRScheduler yet + return list( + itertools.chain(*[t.configure_optimizers() for t in self._trainers]) + ) + + def training_step(self, batch, batch_idx: int, optimizer_idx: int = 0): + trainer_idx, offset = self._optimizer_step_to_trainer_idx[optimizer_idx] + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + assert ( + trainer_idx == epoch_trainer_idx + ), f"Got {trainer_idx}; expected {epoch_trainer_idx}" + return self._trainers[trainer_idx].training_step( + batch, batch_idx, optimizer_idx - offset + ) + + def training_epoch_end(self, outputs) -> None: + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + self._trainers[epoch_trainer_idx].training_epoch_end(outputs) + + def validation_step(self, *args, **kwargs): + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + return self._trainers[epoch_trainer_idx].validation_step(*args, **kwargs) + + def validation_epoch_end(self, outputs) -> None: + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + self._trainers[epoch_trainer_idx].validation_epoch_end(outputs) + + def test_step(self, *args, **kwargs): + return { + str(i): trainer.test_step(*args, **kwargs) + for i, trainer in enumerate(self._trainers) + } + + def test_epoch_end(self, outputs) -> None: + for i, trainer in enumerate(self._trainers): + trainer.test_epoch_end([o[str(i)] for o in outputs]) + + def optimizer_step( + self, + epoch: int, + batch_idx: int, + optimizer, + optimizer_idx: int, + optimizer_closure, + on_tpu: int = False, + using_native_amp: int = False, + using_lbfgs: int = False, + ) -> None: + assert epoch == self.trainer.current_epoch + epoch_trainer_idx = self._get_trainer_idx_from_epoch() + optimizer_trainer_idx, offset = self._optimizer_step_to_trainer_idx[ + optimizer_idx + ] + if epoch_trainer_idx == optimizer_trainer_idx: + # FIXME: epoch argument is not really correct + # Trainer will see the total epochs, including those epochs they + # are inactive. + self._trainers[epoch_trainer_idx].optimizer_step( + epoch, + batch_idx, + optimizer, + optimizer_idx - offset, + optimizer_closure, + on_tpu=on_tpu, + using_native_amp=using_native_amp, + using_lbfgs=using_lbfgs, + ) + # FIXME: this is a hack around https://github.com/PyTorchLightning/pytorch-lightning/pull/9360 + # which assumes that the optimizer closure will be consumed per training step invocation + # however this is not true in the multi-stage trainer as the training step is called for *all* of the + # optimizers configured under `trainers` even though only one lightning module is active at a given time + # A more robust solution would be to use manual optimization, where the lightning trainer does no inspection + # of the optimization closure for further processing + elif hasattr(optimizer_closure, "_result"): + optimizer_closure._result = ClosureResult(closure_loss=None) diff --git a/reagent/training/parameters.py b/reagent/training/parameters.py index 11683270f..0d9d572d6 100644 --- a/reagent/training/parameters.py +++ b/reagent/training/parameters.py @@ -2,11 +2,21 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from reagent.core.configuration import make_config_class +from reagent.core.types import BaseDataClass +from .behavioral_cloning_trainer import BehavioralCloningTrainer from .c51_trainer import C51Trainer +from .cb.deep_represent_linucb_trainer import DeepRepresentLinUCBTrainer +from .cb.disjoint_linucb_trainer import DisjointLinUCBTrainer +from .cb.linucb_trainer import LinUCBTrainer +from .discrete_crr_trainer import DiscreteCRRTrainer from .dqn_trainer import DQNTrainer from .parametric_dqn_trainer import ParametricDQNTrainer +from .ppo_trainer import PPOTrainer from .qrdqn_trainer import QRDQNTrainer +from .ranking.seq2slate_trainer import Seq2SlateTrainer +from .reinforce_trainer import ReinforceTrainer +from .reward_network_trainer import RewardNetTrainer from .sac_trainer import SACTrainer from .slate_q_trainer import SlateQTrainer from .td3_trainer import TD3Trainer @@ -14,7 +24,7 @@ @make_config_class( SACTrainer.__init__, - blacklist=["use_gpu", "actor_network", "q1_network", "q2_network", "value_network"], + blocklist=["use_gpu", "actor_network", "q1_network", "q2_network", "value_network"], ) class SACTrainerParameters: pass @@ -22,14 +32,35 @@ class SACTrainerParameters: @make_config_class( TD3Trainer.__init__, - blacklist=["use_gpu", "actor_network", "q1_network", "q2_network"], + blocklist=["use_gpu", "actor_network", "q1_network", "q2_network"], ) class TD3TrainerParameters: pass @make_config_class( - SlateQTrainer.__init__, blacklist=["use_gpu", "q_network", "q_network_target"] + DiscreteCRRTrainer.__init__, + blocklist=[ + "use_gpu", + "actor_network", + "actor_network_target", + "q1_network", + "q1_network_target", + "reward_network", + "q2_network", + "q2_network_target", + "q_network_cpe", + "q_network_cpe_target", + "metrics_to_score", + "evaluation", + ], +) +class CRRTrainerParameters: + pass + + +@make_config_class( + SlateQTrainer.__init__, blocklist=["use_gpu", "q_network", "q_network_target"] ) class SlateQTrainerParameters: pass @@ -37,7 +68,7 @@ class SlateQTrainerParameters: @make_config_class( ParametricDQNTrainer.__init__, - blacklist=["use_gpu", "q_network", "q_network_target", "reward_network"], + blocklist=["use_gpu", "q_network", "q_network_target", "reward_network"], ) class ParametricDQNTrainerParameters: pass @@ -45,7 +76,7 @@ class ParametricDQNTrainerParameters: @make_config_class( DQNTrainer.__init__, - blacklist=[ + blocklist=[ "use_gpu", "q_network", "q_network_target", @@ -55,6 +86,7 @@ class ParametricDQNTrainerParameters: "metrics_to_score", "imitator", "loss_reporter", + "evaluation", ], ) class DQNTrainerParameters: @@ -63,7 +95,7 @@ class DQNTrainerParameters: @make_config_class( QRDQNTrainer.__init__, - blacklist=[ + blocklist=[ "use_gpu", "q_network", "q_network_target", @@ -72,6 +104,7 @@ class DQNTrainerParameters: "q_network_cpe", "q_network_cpe_target", "loss_reporter", + "evaluation", ], ) class QRDQNTrainerParameters: @@ -80,13 +113,89 @@ class QRDQNTrainerParameters: @make_config_class( C51Trainer.__init__, - blacklist=[ + blocklist=[ "use_gpu", "q_network", "q_network_target", "metrics_to_score", "loss_reporter", + "evaluation", ], ) class C51TrainerParameters: pass + + +@make_config_class(RewardNetTrainer.__init__, blocklist=["reward_net"]) +class RewardNetworkTrainerParameters: + pass + + +@make_config_class(BehavioralCloningTrainer.__init__, blocklist=["bc_net"]) +class BehavioralCloningTrainerParameters: + pass + + +@make_config_class( + Seq2SlateTrainer.__init__, + blocklist=[ + "use_gpu", + "seq2slate_net", + "baseline_net", + "baseline_warmup_num_batches", + ], +) +class Seq2SlateTrainerParameters(BaseDataClass): + pass + + +@make_config_class( + ReinforceTrainer.__init__, + blocklist=[ + "policy", + "value_net", + ], +) +class ReinforceTrainerParameters: + pass + + +@make_config_class( + PPOTrainer.__init__, + blocklist=[ + "policy", + "value_net", + ], +) +class PPOTrainerParameters: + pass + + +@make_config_class( + LinUCBTrainer.__init__, + blocklist=[ + "policy", + ], +) +class LinUCBTrainerParameters: + pass + + +@make_config_class( + DeepRepresentLinUCBTrainer.__init__, + blocklist=[ + "policy", + ], +) +class DeepRepresentLinUCBTrainerParameters: + pass + + +@make_config_class( + DisjointLinUCBTrainer.__init__, + blocklist=[ + "policy", + ], +) +class DisjointLinUCBTrainerParameters: + pass diff --git a/reagent/training/parametric_dqn_trainer.py b/reagent/training/parametric_dqn_trainer.py index 2f9d91d3c..427d62c66 100644 --- a/reagent/training/parametric_dqn_trainer.py +++ b/reagent/training/parametric_dqn_trainer.py @@ -2,72 +2,112 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Tuple +from typing import Optional, Tuple -import reagent.parameters as rlp -import reagent.types as rlt +import reagent.core.parameters as rlp +import reagent.core.types as rlt import torch +import torch.nn as nn import torch.nn.functional as F from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import field -from reagent.optimizer.union import Optimizer__Union -from reagent.training.dqn_trainer_base import DQNTrainerBase - +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.dqn_trainer_base import DQNTrainerMixin +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin logger = logging.getLogger(__name__) -class ParametricDQNTrainer(DQNTrainerBase): +class ParametricDQNTrainer(DQNTrainerMixin, RLTrainerMixin, ReAgentLightningModule): @resolve_defaults def __init__( self, q_network, q_network_target, - reward_network, - use_gpu: bool = False, + reward_network: Optional[nn.Module] = None, # Start ParametricDQNTrainerParameters rl: rlp.RLParameters = field(default_factory=rlp.RLParameters), # noqa: B008 double_q_learning: bool = True, - minibatch_size: int = 1024, minibatches_per_step: int = 1, optimizer: Optimizer__Union = field( # noqa: B008 default_factory=Optimizer__Union.default ), + log_tensorboard: bool = False, ) -> None: - super().__init__(rl, use_gpu=use_gpu) + super().__init__() + self.rl_parameters = rl self.double_q_learning = double_q_learning - self.minibatch_size = minibatch_size self.minibatches_per_step = minibatches_per_step or 1 self.q_network = q_network self.q_network_target = q_network_target - self.q_network_optimizer = optimizer.make_optimizer(self.q_network.parameters()) - self.reward_network = reward_network - self.reward_network_optimizer = optimizer.make_optimizer( - self.reward_network.parameters() + self.optimizer = optimizer + self.log_tensorboard = log_tensorboard + + if rl.q_network_loss == "mse": + self.q_network_loss = F.mse_loss + elif rl.q_network_loss == "huber": + self.q_network_loss = F.smooth_l1_loss + elif rl.q_network_loss == "bce_with_logits": + # The loss is only used when gamma = 0, reward is between 0 and 1 + # and we need to calculate NE as metrics. + assert ( + rl.gamma == 0 + ), "bce_with_logits loss is only supported when gamma is 0." + self.q_network_loss = F.binary_cross_entropy_with_logits + else: + raise Exception( + "Q-Network loss type {} not valid loss.".format(rl.q_network_loss) + ) + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.q_network.parameters()) + ) + if self.reward_network is not None: + optimizers.append( + self.optimizer.make_optimizer_scheduler( + self.reward_network.parameters() + ) + ) + # soft-update + target_params = list(self.q_network_target.parameters()) + source_params = list(self.q_network.parameters()) + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) ) - def warm_start_components(self): - return [ - "q_network", - "q_network_target", - "q_network_optimizer", - "reward_network", - "reward_network_optimizer", - ] + return optimizers + + def _check_input(self, training_batch: rlt.ParametricDqnInput): + assert isinstance(training_batch, rlt.ParametricDqnInput) + assert training_batch.not_terminal.dim() == training_batch.reward.dim() == 2 + assert ( + training_batch.not_terminal.shape[1] == training_batch.reward.shape[1] == 1 + ) + assert ( + training_batch.action.float_features.dim() + == training_batch.next_action.float_features.dim() + == 2 + ) @torch.no_grad() - def get_detached_q_values(self, state, action) -> Tuple[torch.Tensor, torch.Tensor]: - """ Gets the q values from the model and target networks """ + def get_detached_model_outputs( + self, state, action + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Gets the q values from the model and target networks""" q_values = self.q_network(state, action) q_values_target = self.q_network_target(state, action) return q_values, q_values_target - @torch.no_grad() - def train(self, training_batch: rlt.ParametricDqnInput) -> None: - self.minibatch += 1 + def train_step_gen(self, training_batch: rlt.ParametricDqnInput, batch_idx: int): + self._check_input(training_batch) reward = training_batch.reward not_terminal = training_batch.not_terminal.float() discount_tensor = torch.full_like(reward, self.gamma) @@ -91,7 +131,10 @@ def train(self, training_batch: rlt.ParametricDqnInput) -> None: ) max_num_action = product // batch_size tiled_next_state = training_batch.next_state.get_tiled_batch(max_num_action) - all_next_q_values, all_next_q_values_target = self.get_detached_q_values( + ( + all_next_q_values, + all_next_q_values_target, + ) = self.get_detached_model_outputs( tiled_next_state, training_batch.possible_next_actions ) # Compute max a' Q(s', a') over all possible actions using target network @@ -106,7 +149,7 @@ def train(self, training_batch: rlt.ParametricDqnInput) -> None: else: # SARSA (Use the target network) - _, next_q_values = self.get_detached_q_values( + _, next_q_values = self.get_detached_model_outputs( training_batch.next_state, training_batch.next_action ) assert ( @@ -118,33 +161,24 @@ def train(self, training_batch: rlt.ParametricDqnInput) -> None: target_q_values.shape[-1] == 1 ), f"{target_q_values.shape} doesn't end with 1" - with torch.enable_grad(): - # Get Q-value of action taken - q_values = self.q_network(training_batch.state, training_batch.action) - assert ( - target_q_values.shape == q_values.shape - ), f"{target_q_values.shape} != {q_values.shape}." - td_loss = self.q_network_loss(q_values, target_q_values) - td_loss.backward() - self._maybe_run_optimizer( - self.q_network_optimizer, self.minibatches_per_step + # Get Q-value of action taken + q_values = self.q_network(training_batch.state, training_batch.action) + assert ( + target_q_values.shape == q_values.shape + ), f"{target_q_values.shape} != {q_values.shape}." + td_loss = self.q_network_loss(q_values, target_q_values) + yield td_loss + + # pyre-fixme[16]: Optional type has no attribute `metrics`. + if training_batch.extras.metrics is not None: + metrics_reward_concat_real_vals = torch.cat( + (reward, training_batch.extras.metrics), dim=1 ) + else: + metrics_reward_concat_real_vals = reward - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network, self.q_network_target, self.tau, self.minibatches_per_step - ) - - with torch.enable_grad(): - # pyre-fixme[16]: Optional type has no attribute `metrics`. - if training_batch.extras.metrics is not None: - metrics_reward_concat_real_vals = torch.cat( - (reward, training_batch.extras.metrics), dim=1 - ) - else: - metrics_reward_concat_real_vals = reward - - # get reward estimates + # get reward estimates + if self.reward_network is not None: reward_estimates = self.reward_network( training_batch.state, training_batch.action ) @@ -152,14 +186,28 @@ def train(self, training_batch: rlt.ParametricDqnInput) -> None: reward_estimates.squeeze(-1), metrics_reward_concat_real_vals.squeeze(-1), ) - reward_loss.backward() - self._maybe_run_optimizer( - self.reward_network_optimizer, self.minibatches_per_step - ) - - self.loss_reporter.report( - td_loss=td_loss.detach().cpu(), - reward_loss=reward_loss.detach().cpu(), + yield reward_loss + else: + reward_loss = torch.tensor([0.0]) + + td_loss = td_loss.detach().cpu() + reward_loss = reward_loss.detach().cpu() + q_values = q_values.detach().cpu() + # Logging loss, rewards, and model values + # Use reagent reporter + self.reporter.log( + td_loss=td_loss, + reward_loss=reward_loss, logged_rewards=reward, - model_values_on_logged_actions=q_values.detach().cpu(), + model_values_on_logged_actions=q_values, ) + # Use pytorch-lightning logger on rank 0 + # pyre-fixme[16]: Optional type has no attribute `experiment`. + if self.log_tensorboard and self.logger.experiment: + self.log("loss", {"td_loss": td_loss, "reward_loss": reward_loss}) + tensorboard = self.logger.experiment + tensorboard.add_histogram("reward", reward) + tensorboard.add_histogram("model_values_on_logged_actions", q_values) + + # Use the soft update rule to update target network + yield self.soft_update_result() diff --git a/reagent/training/ppo_trainer.py b/reagent/training/ppo_trainer.py new file mode 100644 index 000000000..bdba28159 --- /dev/null +++ b/reagent/training/ppo_trainer.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import inspect +import logging +from dataclasses import field +from typing import Dict, List, Optional, Union + +import reagent.core.types as rlt +import torch +import torch.optim +from reagent.core.configuration import resolve_defaults +from reagent.gym.policies.policy import Policy +from reagent.models.base import ModelBase +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.utils import discounted_returns, whiten + + +logger = logging.getLogger(__name__) + + +class PPOTrainer(ReAgentLightningModule): + """ + Proximal Policy Optimization (PPO). See https://arxiv.org/pdf/1707.06347.pdf + This is the "clip" version of PPO. It does not include: + - KL divergence + - Bootstrapping with a critic model (our approach only works if full trajectories up to terminal state are fed in) + Optionally, a value network can be trained and used as a baseline for rewards. + """ + + @resolve_defaults + def __init__( + self, + policy: Policy, + gamma: float = 0.9, + optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + optimizer_value_net: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + actions: List[str] = field(default_factory=list), # noqa: B008 + reward_clip: float = 1e6, + normalize: bool = True, + subtract_mean: bool = True, + offset_clamp_min: bool = False, + update_freq: int = 1, # how many env steps between updates + update_epochs: int = 1, # how many epochs to run when updating (for PPO) + ppo_batch_size: int = 1, # batch size (number of trajectories) used for PPO updates + ppo_epsilon: float = 0.2, # clamp importance weights between 1-epsilon and 1+epsilon + entropy_weight: float = 0.0, # weight of the entropy term in the PPO loss + value_net: Optional[ModelBase] = None, + ): + # PPO relies on customized update schemas, achieved by manual_backward() + super().__init__(automatic_optimization=False) + self.scorer = policy.scorer + self.sampler = policy.sampler + self.gamma = gamma + self.optimizer_value_net = optimizer_value_net + self.actions = actions + self.reward_clip = reward_clip + self.normalize = normalize + self.subtract_mean = subtract_mean + self.offset_clamp_min = offset_clamp_min + self.update_freq = update_freq + self.update_epochs = update_epochs + self.ppo_batch_size = ppo_batch_size + self.ppo_epsilon = ppo_epsilon + self.entropy_weight = entropy_weight + + self.optimizer = optimizer + self.value_net = value_net + if value_net is not None: + self.value_loss_fn = torch.nn.MSELoss(reduction="mean") + assert ( + not self.normalize + ), "Can't apply a value baseline and normalize rewards simultaneously" + assert (ppo_epsilon >= 0) and ( + ppo_epsilon <= 1 + ), "ppo_epslion has to be in [0;1]" + + self.traj_buffer = [] + + def _trajectory_to_losses( + self, trajectory: rlt.PolicyGradientInput + ) -> Dict[str, torch.Tensor]: + """ + Get a dict of losses for the trajectory. Dict always includes PPO loss. + If a value baseline is trained, a loss for the value network is also included. + """ + losses = {} + actions = trajectory.action + rewards = trajectory.reward.detach() + scorer_inputs = [] + if inspect.getattr_static(trajectory, "graph", None) is not None: + # TODO: can this line be hit currently in ReAgent? + # GNN + scorer_inputs.append(trajectory.graph) + else: + scorer_inputs.append(trajectory.state) + if trajectory.possible_actions_mask is not None: + scorer_inputs.append(trajectory.possible_actions_mask) + scores = self.scorer(*scorer_inputs) + offset_reinforcement = discounted_returns( + torch.clamp(rewards, max=self.reward_clip).clone(), self.gamma + ) + if self.normalize: + offset_reinforcement = whiten( + offset_reinforcement, subtract_mean=self.subtract_mean + ) + if self.offset_clamp_min: + offset_reinforcement = offset_reinforcement.clamp(min=0) + if self.value_net is not None: + # subtract learned value function baselines from rewards + baselines = self.value_net(trajectory.state).squeeze() + # use reward-to-go as label for training the value function + losses["value_net_loss"] = self.value_loss_fn( + baselines, offset_reinforcement + ) + # detach bcs we want PPO to tweak policy, not baseline + offset_reinforcement = offset_reinforcement - baselines.detach() + + target_propensity = self.sampler.log_prob(scores, actions).float() + characteristic_eligibility = torch.exp( + target_propensity - trajectory.log_prob.detach() + ).float() + + losses["ppo_loss"] = -torch.min( + offset_reinforcement.float() @ characteristic_eligibility, + offset_reinforcement.float() + @ torch.clamp( + characteristic_eligibility, + 1 - self.ppo_epsilon, + 1 + self.ppo_epsilon, + ), + ) + if self.entropy_weight != 0: + entropy = self.sampler.entropy(scores) + # "-" bcs minimizing, not maximizing + losses["ppo_loss"] = losses["ppo_loss"] - self.entropy_weight * entropy + return losses + + def configure_optimizers(self): + optimizers = [] + # value net optimizer + if self.value_net is not None: + optimizers.append( + self.optimizer_value_net.make_optimizer_scheduler( + self.value_net.parameters() + ) + ) + # policy optimizer + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.scorer.parameters()) + ) + + return optimizers + + def get_optimizers(self): + opts = self.optimizers() + if self.value_net is not None: + return opts[0], opts[1] + return None, opts[0] + + # pyre-fixme[14]: `training_step` overrides method defined in + # `ReAgentLightningModule` inconsistently. + def training_step( + self, + training_batch: Union[rlt.PolicyGradientInput, Dict[str, torch.Tensor]], + batch_idx: int, + ): + if isinstance(training_batch, dict): + training_batch = rlt.PolicyGradientInput.from_dict(training_batch) + + self.traj_buffer.append(training_batch) + if len(self.traj_buffer) == self.update_freq: + self.update_model() + + def update_model(self): + assert ( + len(self.traj_buffer) == self.update_freq + ), "trajectory buffer does not have sufficient samples for model_update" + for _ in range(self.update_epochs): + # iterate through minibatches of PPO updates in random order + random_order = torch.randperm(len(self.traj_buffer)) + for i in range(0, len(self.traj_buffer), self.ppo_batch_size): + idx = random_order[i : i + self.ppo_batch_size] + training_batch_list = [self.traj_buffer[i] for i in idx] + self._update_model(training_batch_list) + + self.traj_buffer = [] # empty the buffer + + def _update_model(self, training_batch_list: List[rlt.PolicyGradientInput]): + losses = { + "ppo_loss": [], + "value_net_loss": [], + } + value_net_opt, ppo_opt = self.get_optimizers() + + for traj in training_batch_list: + loss = self._trajectory_to_losses(traj) + for k, v in loss.items(): + losses[k].append(v) + + if self.value_net is not None: + # TD loss for the baseline value network + value_net_loss = torch.stack(losses["value_net_loss"]).sum() + value_net_opt.zero_grad() + self.manual_backward(value_net_loss) + value_net_opt.step() + + # PPO "loss" for the policy network + ppo_loss = torch.stack(losses["ppo_loss"]).sum() + ppo_opt.zero_grad() + self.manual_backward(ppo_loss) + ppo_opt.step() diff --git a/reagent/training/qrdqn_trainer.py b/reagent/training/qrdqn_trainer.py index cddfd6113..b3a4a0195 100644 --- a/reagent/training/qrdqn_trainer.py +++ b/reagent/training/qrdqn_trainer.py @@ -4,31 +4,20 @@ import logging from typing import List, Tuple -import reagent.types as rlt +import reagent.core.types as rlt import torch from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import field -from reagent.core.tracker import observable +from reagent.core.parameters import EvaluationParameters, RLParameters +from reagent.optimizer import SoftUpdate from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import EvaluationParameters, RLParameters -from reagent.training.dqn_trainer_base import DQNTrainerBase -from reagent.training.training_data_page import TrainingDataPage +from reagent.training.dqn_trainer_base import DQNTrainerBaseLightning logger = logging.getLogger(__name__) -@observable( - td_loss=torch.Tensor, - logged_actions=torch.Tensor, - logged_propensities=torch.Tensor, - logged_rewards=torch.Tensor, - model_propensities=torch.Tensor, - model_rewards=torch.Tensor, - model_values=torch.Tensor, - model_action_idxs=torch.Tensor, -) -class QRDQNTrainer(DQNTrainerBase): +class QRDQNTrainer(DQNTrainerBaseLightning): """ Implementation of QR-DQN (Quantile Regression Deep Q-Network) @@ -44,8 +33,6 @@ def __init__( reward_network=None, q_network_cpe=None, q_network_cpe_target=None, - loss_reporter=None, - use_gpu: bool = False, actions: List[str] = field(default_factory=list), # noqa: B008 rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 double_q_learning: bool = True, @@ -63,14 +50,12 @@ def __init__( ), ) -> None: super().__init__( - rl, - use_gpu=use_gpu, + rl_parameters=rl, metrics_to_score=metrics_to_score, actions=actions, evaluation_parameters=evaluation, - loss_reporter=loss_reporter, ) - + # TODO: check to ensure no rl parameter value is set that isn't actively used by class self.double_q_learning = double_q_learning self.minibatch_size = minibatch_size self.minibatches_per_step = minibatches_per_step @@ -78,49 +63,55 @@ def __init__( self.q_network = q_network self.q_network_target = q_network_target - self.q_network_optimizer = optimizer.make_optimizer(self.q_network.parameters()) + self.q_network_optimizer = optimizer self.num_atoms = num_atoms + self.register_buffer("quantiles", None) self.quantiles = ( - (0.5 + torch.arange(self.num_atoms, device=self.device).float()) - / float(self.num_atoms) + (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms) ).view(1, -1) self._initialize_cpe( reward_network, q_network_cpe, q_network_cpe_target, optimizer=cpe_optimizer ) - self.reward_boosts = torch.zeros([1, len(self._actions)], device=self.device) - if rl.reward_boost is not None: - # pyre-fixme[16]: Optional type has no attribute `keys`. - for k in rl.reward_boost.keys(): - i = self._actions.index(k) - # pyre-fixme[16]: Optional type has no attribute `__getitem__`. - self.reward_boosts[0, i] = rl.reward_boost[k] - - def warm_start_components(self): - components = ["q_network", "q_network_target", "q_network_optimizer"] - if self.reward_network is not None: - components += [ - "reward_network", - "reward_network_optimizer", - "q_network_cpe", - "q_network_cpe_target", - "q_network_cpe_optimizer", - ] - return components + def configure_optimizers(self): + optimizers = [] + target_params = list(self.q_network_target.parameters()) + source_params = list(self.q_network.parameters()) - @torch.no_grad() - def train(self, training_batch: rlt.DiscreteDqnInput): - if isinstance(training_batch, TrainingDataPage): - training_batch = training_batch.as_discrete_maxq_training_batch() + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q_network.parameters() + ) + ) + + if self.calc_cpe_in_training: + ( + cpe_target_params, + cpe_source_params, + cpe_optimizers, + ) = self._configure_cpe_optimizers() + target_params += cpe_target_params + source_params += cpe_source_params + optimizers += cpe_optimizers + + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) + + return optimizers + + def train_step_gen(self, training_batch: rlt.DiscreteDqnInput, batch_idx: int): + self._check_input(training_batch) rewards = self.boost_rewards(training_batch.reward, training_batch.action) discount_tensor = torch.full_like(rewards, self.gamma) possible_next_actions_mask = training_batch.possible_next_actions_mask.float() possible_actions_mask = training_batch.possible_actions_mask.float() - self.minibatch += 1 not_done_mask = training_batch.not_terminal.float() if self.use_seq_num_diff_as_time_diff: @@ -149,31 +140,22 @@ def train(self, training_batch: rlt.DiscreteDqnInput): # Build target distribution target_Q = rewards + discount_tensor * not_done_mask * next_qf - with torch.enable_grad(): - current_qf = self.q_network(training_batch.state) + current_qf = self.q_network(training_batch.state) - # for reporting only - all_q_values = current_qf.mean(2).detach() + # for reporting only + all_q_values = current_qf.mean(2).detach() - current_qf = (current_qf * training_batch.action.unsqueeze(-1)).sum(1) + current_qf = (current_qf * training_batch.action.unsqueeze(-1)).sum(1) - # (batch, atoms) -> (atoms, batch, 1) -> (atoms, batch, atoms) - td = target_Q.t().unsqueeze(-1) - current_qf - loss = ( - self.huber(td) - # pyre-fixme[16]: `FloatTensor` has no attribute `abs`. - * (self.quantiles - (td.detach() < 0).float()).abs() - ).mean() + # (batch, atoms) -> (atoms, batch, 1) -> (atoms, batch, atoms) + td = target_Q.t().unsqueeze(-1) - current_qf + loss = ( + self.huber(td) * (self.quantiles - (td.detach() < 0).float()).abs() + ).mean() - loss.backward() - self._maybe_run_optimizer( - self.q_network_optimizer, self.minibatches_per_step - ) - - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network, self.q_network_target, self.tau, self.minibatches_per_step - ) + yield loss + # pyre-fixme[16]: `DQNTrainer` has no attribute `loss`. + self.loss = loss.detach() # Get Q-values of next states, used in computing cpe all_next_action_scores = ( @@ -181,7 +163,7 @@ def train(self, training_batch: rlt.DiscreteDqnInput): ) logged_action_idxs = torch.argmax(training_batch.action, dim=1, keepdim=True) - reward_loss, model_rewards, model_propensities = self._calculate_cpes( + yield from self._calculate_cpes( training_batch, training_batch.state, training_batch.next_state, @@ -197,31 +179,19 @@ def train(self, training_batch: rlt.DiscreteDqnInput): possible_actions_mask if self.maxq_learning else training_batch.action, ) - # pyre-fixme[16]: `QRDQNTrainer` has no attribute `notify_observers`. - self.notify_observers( - td_loss=loss, - logged_actions=logged_action_idxs, - logged_propensities=training_batch.extras.action_probability, - logged_rewards=rewards, - model_propensities=model_propensities, - model_rewards=model_rewards, - model_values=all_q_values, - model_action_idxs=model_action_idxs, - ) - - self.loss_reporter.report( + self.reporter.log( td_loss=loss, logged_actions=logged_action_idxs, logged_propensities=training_batch.extras.action_probability, logged_rewards=rewards, logged_values=None, # Compute at end of each epoch for CPE - model_propensities=model_propensities, - model_rewards=model_rewards, model_values=all_q_values, model_values_on_logged_actions=None, # Compute at end of each epoch for CPE model_action_idxs=model_action_idxs, ) + yield self.soft_update_result() + @torch.no_grad() def boost_rewards( self, rewards: torch.Tensor, actions: torch.Tensor @@ -243,10 +213,10 @@ def huber(self, x): return torch.where(x.abs() < 1, 0.5 * x.pow(2), x.abs() - 0.5) @torch.no_grad() - def get_detached_q_values( + def get_detached_model_outputs( self, state: rlt.FeatureData ) -> Tuple[torch.Tensor, torch.Tensor]: - """ Gets the q values from the model and target networks """ + """Gets the q values from the model and target networks""" q_values = self.q_network(state).mean(dim=2) q_values_target = self.q_network_target(state).mean(dim=2) return q_values, q_values_target diff --git a/reagent/training/ranking/__init__.py b/reagent/training/ranking/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/training/ranking/__init__.py +++ b/reagent/training/ranking/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/training/ranking/helper.py b/reagent/training/ranking/helper.py new file mode 100644 index 000000000..7b447ba2b --- /dev/null +++ b/reagent/training/ranking/helper.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import Optional + +import torch +from reagent.core.parameters_seq2slate import IPSClamp, IPSClampMethod + + +def ips_clamp(impt_smpl, ips_clamp: Optional[IPSClamp]): + if not ips_clamp: + return impt_smpl.clone() + if ips_clamp.clamp_method == IPSClampMethod.UNIVERSAL: + return torch.clamp(impt_smpl, 0, ips_clamp.clamp_max) + elif ips_clamp.clamp_method == IPSClampMethod.AGGRESSIVE: + return torch.where( + impt_smpl > ips_clamp.clamp_max, torch.zeros_like(impt_smpl), impt_smpl + ) diff --git a/reagent/training/ranking/seq2slate_attn_trainer.py b/reagent/training/ranking/seq2slate_attn_trainer.py index 46806343a..bb160253d 100644 --- a/reagent/training/ranking/seq2slate_attn_trainer.py +++ b/reagent/training/ranking/seq2slate_attn_trainer.py @@ -2,21 +2,26 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -import reagent.types as rlt +import numpy as np +import reagent.core.types as rlt import torch import torch.nn as nn -from reagent.core.tracker import observable -from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet -from reagent.parameters import TransformerParameters -from reagent.training.loss_reporter import NoOpLossReporter -from reagent.training.trainer import Trainer - +from reagent.core.dataclasses import field +from reagent.model_utils.seq2slate_utils import Seq2SlateMode +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from sklearn.metrics import ( + average_precision_score, + dcg_score, + ndcg_score, + roc_auc_score, +) logger = logging.getLogger(__name__) -@observable(cross_entropy_loss=torch.Tensor) -class Seq2SlatePairwiseAttnTrainer(Trainer): +class Seq2SlatePairwiseAttnTrainer(ReAgentLightningModule): """ Seq2Slate without a decoder learned in a supervised learning fashion ( https://arxiv.org/pdf/1904.06813.pdf ) @@ -25,52 +30,124 @@ class Seq2SlatePairwiseAttnTrainer(Trainer): def __init__( self, seq2slate_net: Seq2SlateTransformerNet, - parameters: TransformerParameters, - minibatch_size: int, - loss_reporter=None, - use_gpu: bool = False, + slate_size: int, + calc_cpe: bool, + policy_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), ) -> None: - self.parameters = parameters - self.loss_reporter = loss_reporter - self.use_gpu = use_gpu + super().__init__() self.seq2slate_net = seq2slate_net - self.minibatch_size = minibatch_size - self.minibatch = 0 - self.optimizer = parameters.optimizer.make_optimizer( - self.seq2slate_net.parameters() - ) + self.slate_size = slate_size + self.calc_cpe = calc_cpe + self.policy_optimizer = policy_optimizer self.log_softmax = nn.LogSoftmax(dim=1) self.kl_loss = nn.KLDivLoss(reduction="batchmean") - if self.loss_reporter is None: - self.loss_reporter = NoOpLossReporter() - def warm_start_components(self): - components = ["seq2slate_net"] - return components + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.policy_optimizer.make_optimizer_scheduler( + self.seq2slate_net.parameters() + ) + ) + return optimizers - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - assert type(training_batch) is rlt.PreprocessedTrainingBatch - training_input = training_batch.training_input - assert isinstance(training_input, rlt.PreprocessedRankingInput) + def train_step_gen( + self, training_batch: rlt.PreprocessedRankingInput, batch_idx: int + ): + assert type(training_batch) is rlt.PreprocessedRankingInput # shape: batch_size, tgt_seq_len encoder_scores = self.seq2slate_net( - training_input, mode=Seq2SlateMode.ENCODER_SCORE_MODE + training_batch, mode=Seq2SlateMode.ENCODER_SCORE_MODE ).encoder_scores assert encoder_scores.requires_grad loss = self.kl_loss( - self.log_softmax(encoder_scores), training_input.position_reward + self.log_softmax(encoder_scores), training_batch.position_reward + ) + + detached_loss = loss.detach().cpu() + self.reporter.log(train_cross_entropy_loss=detached_loss) + + yield loss + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + # pyre-fixme[16]: `Optional` has no attribute `shape`. + batch_size = batch.position_reward.shape[0] + + # shape: batch_size, tgt_seq_len + encoder_scores = self.seq2slate_net( + batch, mode=Seq2SlateMode.ENCODER_SCORE_MODE + ).encoder_scores + assert ( + encoder_scores.shape[1] == batch.position_reward.shape[1] == self.slate_size + ) + ce_loss = self.kl_loss( + self.log_softmax(encoder_scores), batch.position_reward + ).item() + + if not self.calc_cpe: + self.reporter.log(eval_cross_entropy_loss=ce_loss) + return + + # shape: batch_size, tgt_seq_len + ranking_output = self.seq2slate_net( + batch, mode=Seq2SlateMode.RANK_MODE, greedy=True ) - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() + # pyre-fixme[16]: `int` has no attribute `cpu`. + ranked_idx = (ranking_output.ranked_tgt_out_idx - 2).cpu().numpy() + # pyre-fixme[58]: `-` is not supported for operand types + # `Optional[torch.Tensor]` and `int`. + logged_idx = (batch.tgt_out_idx - 2).cpu().numpy() + score_bar = np.arange(self.slate_size, 0, -1) - loss = loss.detach() - self.minibatch += 1 + batch_dcg = [] + batch_ndcg = [] + batch_mean_ap = [] + batch_auc = [] + batch_base_dcg = [] + batch_base_ndcg = [] + batch_base_map = [] + batch_base_auc = [] + for i in range(batch_size): + # no positive label in the slate or slate labels are all positive + # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. + if (not torch.any(batch.position_reward[i].bool())) or ( + torch.all(batch.position_reward[i].bool()) + ): + continue - # pyre-fixme[16]: `Seq2SlatePairwiseAttnTrainer` has no attribute - # `notify_observers`. - self.notify_observers(cross_entropy_loss=loss) + ranked_scores = np.zeros(self.slate_size) + ranked_scores[ranked_idx[i]] = score_bar + truth_scores = np.zeros(self.slate_size) + truth_scores[logged_idx[i]] = batch.position_reward[i].cpu().numpy() + base_scores = np.zeros(self.slate_size) + base_scores[logged_idx[i]] = score_bar + # average_precision_score accepts 1D arrays + # dcg & ndcg accepts 2D arrays + batch_mean_ap.append(average_precision_score(truth_scores, ranked_scores)) + batch_base_map.append(average_precision_score(truth_scores, base_scores)) + batch_auc.append(roc_auc_score(truth_scores, ranked_scores)) + batch_base_auc.append(roc_auc_score(truth_scores, base_scores)) + ranked_scores = np.expand_dims(ranked_scores, axis=0) + truth_scores = np.expand_dims(truth_scores, axis=0) + base_scores = np.expand_dims(base_scores, axis=0) + batch_dcg.append(dcg_score(truth_scores, ranked_scores)) + batch_ndcg.append(ndcg_score(truth_scores, ranked_scores)) + batch_base_dcg.append(dcg_score(truth_scores, base_scores)) + batch_base_ndcg.append(ndcg_score(truth_scores, base_scores)) - return {"cross_entropy_loss": loss} + self.reporter.log( + eval_cross_entropy_loss=ce_loss, + eval_dcg=torch.mean(torch.tensor(batch_dcg)).reshape(1), + eval_ndcg=torch.mean(torch.tensor(batch_ndcg)).reshape(1), + eval_mean_ap=torch.mean(torch.tensor(batch_mean_ap)).reshape(1), + eval_auc=torch.mean(torch.tensor(batch_auc)).reshape(1), + eval_base_dcg=torch.mean(torch.tensor(batch_base_dcg)).reshape(1), + eval_base_ndcg=torch.mean(torch.tensor(batch_base_ndcg)).reshape(1), + eval_base_map=torch.mean(torch.tensor(batch_base_map)).reshape(1), + eval_base_auc=torch.mean(torch.tensor(batch_base_auc)).reshape(1), + ) diff --git a/reagent/training/ranking/seq2slate_dr_trainer.py b/reagent/training/ranking/seq2slate_dr_trainer.py deleted file mode 100644 index fb1376cb9..000000000 --- a/reagent/training/ranking/seq2slate_dr_trainer.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -import logging -from typing import Optional - -import reagent.types as rlt -import torch -import torch.nn as nn -import torch.nn.functional as F -from reagent.models.seq2slate import ( - BaselineNet, - Seq2SlateMode, - Seq2SlateTransformerModel, - Seq2SlateTransformerNet, -) -from reagent.parameters import Seq2SlateTransformerParameters -from reagent.training.trainer import Trainer - - -logger = logging.getLogger(__name__) - - -class Seq2SlateDifferentiableRewardTrainer(Trainer): - """ - Seq2Slate learned with differentiable reward (Section 3.2 in - https://arxiv.org/pdf/1810.02019.pdf ) - """ - - def __init__( - self, - seq2slate_net: Seq2SlateTransformerNet, - parameters: Seq2SlateTransformerParameters, - minibatch_size: int, - baseline_net: Optional[BaselineNet] = None, - use_gpu: bool = False, - ) -> None: - self.parameters = parameters - self.use_gpu = use_gpu - self.seq2slate_net = seq2slate_net - self.baseline_net = baseline_net - self.minibatch_size = minibatch_size - self.minibatch = 0 - self.optimizer = self.parameters.transformer.optimizer.make_optimizer( - self.seq2slate_net.parameters() - ) - # TODO: T62269969 add baseline_net in training - self.kl_div_loss = nn.KLDivLoss(reduction="none") - - def warm_start_components(self): - components = ["seq2slate_net"] - return components - - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - assert type(training_batch) is rlt.PreprocessedTrainingBatch - training_input = training_batch.training_input - assert isinstance(training_input, rlt.PreprocessedRankingInput) - - per_symbol_log_probs = self.seq2slate_net( - training_input, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE - ).log_probs - per_seq_log_probs = Seq2SlateTransformerModel.per_symbol_to_per_seq_log_probs( - per_symbol_log_probs, training_input.tgt_out_idx - ) - assert per_symbol_log_probs.requires_grad and per_seq_log_probs.requires_grad - # pyre-fixme[16]: `Optional` has no attribute `shape`. - assert per_seq_log_probs.shape == training_input.tgt_out_probs.shape - - if not self.parameters.on_policy: - importance_sampling = ( - torch.exp(per_seq_log_probs) / training_input.tgt_out_probs - ) - if self.parameters.importance_sampling_clamp_max is not None: - importance_sampling = torch.clamp( - importance_sampling, - 0, - self.parameters.importance_sampling_clamp_max, - ) - else: - importance_sampling = ( - torch.exp(per_seq_log_probs) / torch.exp(per_seq_log_probs).detach() - ) - assert importance_sampling.requires_grad - - # pyre-fixme[6]: Expected `Tensor` for 1st param but got - # `Optional[torch.Tensor]`. - labels = self._transform_label(training_input.tgt_out_idx) - assert not labels.requires_grad - - batch_size, max_tgt_seq_len = training_input.tgt_out_idx.shape - # batch_loss shape: batch_size x max_tgt_seq_len - batch_loss = ( - torch.sum(self.kl_div_loss(per_symbol_log_probs, labels), dim=2) - * training_input.position_reward - ) - # weighted_batch_loss shape: batch_size, 1 - weighted_batch_loss = torch.sum( - 1.0 - / torch.log( - torch.arange(1, 1 + max_tgt_seq_len, device=batch_loss.device).float() - + 1.0 - ) - * batch_loss, - dim=1, - keepdim=True, - ) - loss = 1.0 / batch_size * torch.sum(importance_sampling * weighted_batch_loss) - - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - - loss = loss.detach().cpu().numpy() - per_symbol_log_probs = per_symbol_log_probs.detach() - self.minibatch += 1 - logger.info(f"{self.minibatch} batch: loss={loss}") - - return {"per_symbol_log_probs": per_symbol_log_probs, "sl": loss} - - def _transform_label(self, tgt_out_idx: torch.Tensor): - label_size = self.seq2slate_net.max_src_seq_len + 2 - label = F.one_hot(tgt_out_idx, label_size) - return label.float() diff --git a/reagent/training/ranking/seq2slate_sim_trainer.py b/reagent/training/ranking/seq2slate_sim_trainer.py index b8afb37ea..17bd91b41 100644 --- a/reagent/training/ranking/seq2slate_sim_trainer.py +++ b/reagent/training/ranking/seq2slate_sim_trainer.py @@ -5,27 +5,28 @@ from typing import List, Optional import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch -from reagent.core.tracker import observable -from reagent.models.seq2slate import ( - DECODER_START_SYMBOL, - BaselineNet, - Seq2SlateTransformerNet, -) -from reagent.parameters import Seq2SlateTransformerParameters +import torch.nn as nn +from reagent.core.dataclasses import field +from reagent.core.parameters import Seq2SlateParameters +from reagent.core.torch_utils import gather +from reagent.models.seq2slate import BaselineNet, Seq2SlateMode, Seq2SlateTransformerNet +from reagent.optimizer.union import Optimizer__Union from reagent.training.ranking.seq2slate_trainer import Seq2SlateTrainer -from reagent.training.trainer import Trainer logger = logging.getLogger(__name__) -def _load_reward_net(path, use_gpu): - reward_network = torch.jit.load(path) - if use_gpu: - reward_network = reward_network.cuda() - return reward_network +def _load_reward_net(name_and_path, use_gpu): + reward_name_and_net = {} + for name, path in name_and_path.items(): + reward_network = torch.jit.load(path) + if use_gpu: + reward_network = reward_network.cuda() + reward_name_and_net[name] = reward_network + return reward_name_and_net def swap_dist_in_slate(idx_): @@ -57,10 +58,7 @@ def swap_dist(idx: List[int]): return swap_dist_in_slate(idx) + swap_dist_out_slate(idx) -@observable( - pg_loss=torch.Tensor, train_baseline_loss=torch.Tensor, train_log_probs=torch.Tensor -) -class Seq2SlateSimulationTrainer(Trainer): +class Seq2SlateSimulationTrainer(Seq2SlateTrainer): """ Seq2Slate learned with simulation data, with the action generated randomly and the reward computed by a reward network @@ -69,178 +67,127 @@ class Seq2SlateSimulationTrainer(Trainer): def __init__( self, seq2slate_net: Seq2SlateTransformerNet, - parameters: Seq2SlateTransformerParameters, - minibatch_size: int, - reward_net_path: str, + params: Seq2SlateParameters = field( # noqa: B008 + default_factory=Seq2SlateParameters + ), baseline_net: Optional[BaselineNet] = None, - use_gpu: bool = False, + baseline_warmup_num_batches: int = 0, + policy_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + baseline_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + policy_gradient_interval: int = 1, + print_interval: int = 100, + calc_cpe: bool = False, + reward_network: Optional[nn.Module] = None, ) -> None: - self.reward_net_path = reward_net_path - # loaded when used - self.reward_net = None - self.parameters = parameters - self.minibatch_size = minibatch_size - self.use_gpu = use_gpu - self.device = torch.device("cuda") if use_gpu else torch.device("cpu") - self.permutation_index = torch.tensor( - list( - permutations( - # pyre-fixme[6]: Expected `Iterable[Variable[itertools._T]]` for - # 1st param but got `Tensor`. - torch.arange(seq2slate_net.max_src_seq_len), - seq2slate_net.max_tgt_seq_len, - ) - ), - device=self.device, - ).long() - - if self.parameters.simulation_distance_penalty is not None: - # pyre-fixme[16]: `Optional` has no attribute `__gt__`. - assert self.parameters.simulation_distance_penalty > 0 - self.permutation_distance = ( - torch.tensor( - [swap_dist(x.tolist()) for x in self.permutation_index], - device=self.device, - ) - .unsqueeze(1) - .float() - ) - self.MAX_DISTANCE = torch.max(self.permutation_distance) - - self.trainer = Seq2SlateTrainer( - seq2slate_net, parameters, minibatch_size, baseline_net, use_gpu - ) - self.seq2slate_net = self.trainer.seq2slate_net - self.baseline_net = self.trainer.baseline_net - - def warm_start_components(self): - components = ["seq2slate_net"] - return components - - def _simulated_training_input( - self, training_input, sim_tgt_out_idx, sim_distance, device - ): - batch_size, max_tgt_seq_len = sim_tgt_out_idx.shape - ( - _, - max_src_seq_len, - candidate_feat_dim, - ) = training_input.src_seq.float_features.shape - - # candidates + padding_symbol + decoder_start_symbol - candidate_size = max_src_seq_len + 2 - src_seq_augment = torch.zeros( - batch_size, candidate_size, candidate_feat_dim, device=device + super().__init__( + seq2slate_net, + params=params, + baseline_net=baseline_net, + baseline_warmup_num_batches=baseline_warmup_num_batches, + policy_optimizer=policy_optimizer, + baseline_optimizer=baseline_optimizer, + policy_gradient_interval=policy_gradient_interval, + print_interval=print_interval, + calc_cpe=calc_cpe, + reward_network=reward_network, ) - src_seq_augment[:, 2:, :] = training_input.src_seq.float_features - - sim_tgt_in_idx = torch.zeros_like(sim_tgt_out_idx).long() - sim_tgt_in_idx[:, 0] = DECODER_START_SYMBOL - sim_tgt_in_idx[:, 1:] = sim_tgt_out_idx[:, :-1] - - sim_tgt_in_seq = rlt.FeatureData( - float_features=src_seq_augment[ - torch.arange(batch_size, device=device).repeat_interleave( - max_tgt_seq_len - ), - sim_tgt_in_idx.flatten(), - ].view(batch_size, max_tgt_seq_len, candidate_feat_dim) - ) - sim_tgt_out_seq = rlt.FeatureData( - float_features=src_seq_augment[ - torch.arange(batch_size, device=device).repeat_interleave( - max_tgt_seq_len - ), - sim_tgt_out_idx.flatten(), - ].view(batch_size, max_tgt_seq_len, candidate_feat_dim) + self.sim_param = params.simulation + assert self.sim_param is not None + # loaded when used + self.reward_name_and_net = nn.ModuleDict({}) + self.MAX_DISTANCE = ( + seq2slate_net.max_src_seq_len * (seq2slate_net.max_src_seq_len - 1) / 2 ) - sim_tgt_out_probs = torch.tensor( - [1.0 / len(self.permutation_index)], device=self.device - ).repeat(batch_size) - - if self.reward_net is None: - self.reward_net = _load_reward_net(self.reward_net_path, self.use_gpu) - slate_reward = self.reward_net( - training_input.state.float_features, + + @torch.no_grad() + def _simulated_training_input(self, training_input: rlt.PreprocessedRankingInput): + device = training_input.state.float_features.device + # precision error may cause invalid actions + valid_output = False + while not valid_output: + rank_output = self.seq2slate_net( + training_input, + mode=Seq2SlateMode.RANK_MODE, + tgt_seq_len=self.seq2slate_net.max_tgt_seq_len, + greedy=False, + ) + model_propensities = rank_output.ranked_per_seq_probs + model_actions_with_offset = rank_output.ranked_tgt_out_idx + model_actions = model_actions_with_offset - 2 + # pyre-fixme[6]: For 1st param expected `Tensor` but got `bool`. + if torch.all(model_actions >= 0): + valid_output = True + + batch_size = model_actions_with_offset.shape[0] + simulated_slate_features = gather( training_input.src_seq.float_features, - sim_tgt_out_seq.float_features, - training_input.src_src_mask, - sim_tgt_out_idx, - ).detach() - if slate_reward.ndim == 1: - logger.warning(f"Slate reward should be 2-D tensor, unsqueezing") - slate_reward = slate_reward.unsqueeze(1) - elif slate_reward.ndim != 2: - raise RuntimeError("Expect slate reward to be 2-D tensor") + # pyre-fixme[61]: `model_actions` may not be initialized here. + model_actions, + ) + + if not self.reward_name_and_net: + use_gpu = True if device == torch.device("cuda") else False + self.reward_name_and_net = nn.ModuleDict( + _load_reward_net(self.sim_param.reward_name_path, use_gpu) + ) + + sim_slate_reward = torch.zeros(batch_size, 1, device=device) + for name, reward_net in self.reward_name_and_net.items(): + weight = self.sim_param.reward_name_weight[name] + power = self.sim_param.reward_name_power[name] + sr = reward_net( + training_input.state.float_features, + training_input.src_seq.float_features, + simulated_slate_features, + training_input.src_src_mask, + model_actions_with_offset, + ).detach() + assert sr.ndim == 2, f"Slate reward {name} output should be 2-D tensor" + sim_slate_reward += weight * (sr**power) + # guard-rail reward prediction range - reward_clamp = self.parameters.simulation_reward_clamp + reward_clamp = self.sim_param.reward_clamp if reward_clamp is not None: - slate_reward = torch.clamp( - slate_reward, min=reward_clamp.clamp_min, max=reward_clamp.clamp_max + sim_slate_reward = torch.clamp( + sim_slate_reward, min=reward_clamp.clamp_min, max=reward_clamp.clamp_max ) # guard-rail sequence similarity - distance_penalty = self.parameters.simulation_distance_penalty + distance_penalty = self.sim_param.distance_penalty if distance_penalty is not None: - slate_reward += distance_penalty * (self.MAX_DISTANCE - sim_distance) + sim_distance = ( + torch.tensor( + # pyre-fixme[16]: `int` has no attribute `__iter__`. + [swap_dist(x.tolist()) for x in model_actions], + device=device, + ) + .unsqueeze(1) + .float() + ) + sim_slate_reward += distance_penalty * (self.MAX_DISTANCE - sim_distance) assert ( - len(slate_reward.shape) == 2 and slate_reward.shape[1] == 1 - ), f"{slate_reward.shape}" - - on_policy_input = rlt.PreprocessedRankingInput( - state=training_input.state, - src_seq=training_input.src_seq, - src_src_mask=training_input.src_src_mask, - tgt_in_seq=sim_tgt_in_seq, - tgt_out_seq=sim_tgt_out_seq, - tgt_tgt_mask=training_input.tgt_tgt_mask, - slate_reward=slate_reward, - src_in_idx=training_input.src_in_idx, - tgt_in_idx=sim_tgt_in_idx, - tgt_out_idx=sim_tgt_out_idx, - tgt_out_probs=sim_tgt_out_probs, + len(sim_slate_reward.shape) == 2 and sim_slate_reward.shape[1] == 1 + ), f"{sim_slate_reward.shape}" + + on_policy_input = rlt.PreprocessedRankingInput.from_input( + state=training_input.state.float_features, + candidates=training_input.src_seq.float_features, + device=device, + # pyre-fixme[6]: Expected `Optional[torch.Tensor]` for 4th param but got + # `int`. + # pyre-fixme[61]: `model_actions` may not be initialized here. + action=model_actions, + slate_reward=sim_slate_reward, + # pyre-fixme[61]: `model_propensities` may not be initialized here. + logged_propensities=model_propensities, ) return on_policy_input - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - assert type(training_batch) is rlt.PreprocessedTrainingBatch - training_input = training_batch.training_input - assert isinstance(training_input, rlt.PreprocessedRankingInput) - - batch_size = training_input.state.float_features.shape[0] - - # randomly pick a permutation for every slate - random_indices = torch.randint(0, len(self.permutation_index), (batch_size,)) - sim_tgt_out_idx = self.permutation_index[random_indices] + 2 - if self.parameters.simulation_distance_penalty is not None: - sim_distance = self.permutation_distance[random_indices] - else: - sim_distance = None - - with torch.no_grad(): - # format data according to the new ordering - training_input = self._simulated_training_input( - training_input, sim_tgt_out_idx, sim_distance, self.device - ) - - # data in the results_dict: - # { - # "per_seq_probs": np.exp(log_probs), - # "advantage": advantage, - # "obj_rl_loss": obj_rl_loss, - # "ips_rl_loss": ips_rl_loss, - # "baseline_loss": baseline_loss, - # } - results_dict = self.trainer.train( - rlt.PreprocessedTrainingBatch( - training_input=training_input, extras=training_batch.extras - ) - ) - # pyre-fixme[16]: `Seq2SlateSimulationTrainer` has no attribute - # `notify_observers`. - self.notify_observers( - pg_loss=torch.tensor(results_dict["ips_rl_loss"]).reshape(1), - train_baseline_loss=torch.tensor(results_dict["baseline_loss"]).reshape(1), - train_log_probs=torch.FloatTensor(np.log(results_dict["per_seq_probs"])), - ) - return results_dict + def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + assert type(batch) is rlt.PreprocessedRankingInput + training_batch = self._simulated_training_input(batch) + return super().training_step(training_batch, batch_idx) diff --git a/reagent/training/ranking/seq2slate_tf_trainer.py b/reagent/training/ranking/seq2slate_tf_trainer.py index 8dae48986..1d5d61906 100644 --- a/reagent/training/ranking/seq2slate_tf_trainer.py +++ b/reagent/training/ranking/seq2slate_tf_trainer.py @@ -1,20 +1,25 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from typing import List, Optional, Tuple -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn as nn import torch.nn.functional as F -from reagent.models.seq2slate import Seq2SlateMode, Seq2SlateTransformerNet -from reagent.parameters import Seq2SlateTransformerParameters -from reagent.training.trainer import Trainer +from reagent.core.dataclasses import field +from reagent.core.parameters import Seq2SlateParameters +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.model_utils.seq2slate_utils import Seq2SlateMode +from reagent.models.seq2slate import Seq2SlateTransformerNet +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule logger = logging.getLogger(__name__) -class Seq2SlateTeacherForcingTrainer(Trainer): +class Seq2SlateTeacherForcingTrainer(ReAgentLightningModule): """ Seq2Slate learned in a teach-forcing fashion (only used if the the ground-truth sequences are available) @@ -23,53 +28,129 @@ class Seq2SlateTeacherForcingTrainer(Trainer): def __init__( self, seq2slate_net: Seq2SlateTransformerNet, - parameters: Seq2SlateTransformerParameters, - minibatch_size: int, - use_gpu: bool = False, + params: Seq2SlateParameters, + policy_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + policy_gradient_interval: int = 1, + print_interval: int = 100, + calc_cpe: bool = False, + reward_network: Optional[nn.Module] = None, ) -> None: - self.parameters = parameters - self.use_gpu = use_gpu + super().__init__() + self.params = params + self.policy_gradient_interval = policy_gradient_interval + self.print_interval = print_interval self.seq2slate_net = seq2slate_net - self.minibatch_size = minibatch_size - self.minibatch = 0 - self.optimizer = self.parameters.transformer.optimizer.make_optimizer( - self.seq2slate_net.parameters() - ) + self.policy_optimizer = policy_optimizer self.kl_div_loss = nn.KLDivLoss(reduction="batchmean") - def warm_start_components(self): - components = ["seq2slate_net"] - return components + # use manual optimization to get more flexibility + self.automatic_optimization = False + + assert not calc_cpe or reward_network is not None + self.calc_cpe = calc_cpe + self.reward_network = reward_network - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - assert type(training_batch) is rlt.PreprocessedTrainingBatch - training_input = training_batch.training_input - assert isinstance(training_input, rlt.PreprocessedRankingInput) + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.policy_optimizer.make_optimizer_scheduler( + self.seq2slate_net.parameters() + ) + ) + return optimizers + + # pyre-fixme [14]: overrides method defined in `ReAgentLightningModule` inconsistently + def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + assert type(batch) is rlt.PreprocessedRankingInput log_probs = self.seq2slate_net( - training_input, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE + batch, mode=Seq2SlateMode.PER_SYMBOL_LOG_PROB_DIST_MODE ).log_probs assert log_probs.requires_grad - assert training_input.optim_tgt_out_idx is not None - # pyre-fixme[6]: Expected `Tensor` for 1st param but got - # `Optional[torch.Tensor]`. - labels = self._transform_label(training_input.optim_tgt_out_idx) + assert batch.optim_tgt_out_idx is not None + labels = self._transform_label(batch.optim_tgt_out_idx) assert not labels.requires_grad loss = self.kl_div_loss(log_probs, labels) - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() + self.manual_backward(loss) + if (self.all_batches_processed + 1) % self.policy_gradient_interval == 0: + opt = self.optimizers()[0] + opt.step() + opt.zero_grad() loss = loss.detach().cpu().numpy() log_probs = log_probs.detach() - self.minibatch += 1 - logger.info(f"{self.minibatch} batch: loss={loss}") - - return log_probs, loss + if (self.all_batches_processed + 1) % self.print_interval == 0: + logger.info(f"{self.all_batches_processed + 1} batch: loss={loss}") def _transform_label(self, optim_tgt_out_idx: torch.Tensor): label_size = self.seq2slate_net.max_src_seq_len + 2 label = F.one_hot(optim_tgt_out_idx, label_size) return label.float() + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + seq2slate_net = self.seq2slate_net + + assert seq2slate_net.training is False + + logged_slate_rank_prob = torch.exp( + seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE) + .log_probs.detach() + .flatten() + .cpu() + ) + + ranked_slate_output = seq2slate_net(batch, Seq2SlateMode.RANK_MODE, greedy=True) + ranked_slate_rank_prob = ranked_slate_output.ranked_per_seq_probs.cpu() + + self.reporter.log( + logged_slate_rank_probs=logged_slate_rank_prob, + ranked_slate_rank_probs=ranked_slate_rank_prob, + ) + + if not self.calc_cpe: + return + + edp_g = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, + # pyre-fixme[6]: Expected `Module` for 2nd param but got + # `Optional[nn.Module]`. + self.reward_network, + batch, + eval_greedy=True, + ) + + edp_ng = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, + # pyre-fixme[6]: Expected `Module` for 2nd param but got + # `Optional[nn.Module]`. + self.reward_network, + batch, + eval_greedy=False, + ) + + return edp_g, edp_ng + + # pyre-fixme[14]: Inconsistent override + def validation_epoch_end( + self, outputs: Optional[List[Tuple[EvaluationDataPage, EvaluationDataPage]]] + ): + if self.calc_cpe: + assert outputs is not None + eval_data_pages_g, eval_data_pages_ng = None, None + for edp_g, edp_ng in outputs: + if eval_data_pages_g is None and eval_data_pages_ng is None: + eval_data_pages_g = edp_g + eval_data_pages_ng = edp_ng + else: + # pyre-fixme[16]: `Optional` has no attribute `append` + eval_data_pages_g.append(edp_g) + eval_data_pages_ng.append(edp_ng) + self.reporter.log( + eval_data_pages_g=eval_data_pages_g, + eval_data_pages_ng=eval_data_pages_ng, + ) diff --git a/reagent/training/ranking/seq2slate_trainer.py b/reagent/training/ranking/seq2slate_trainer.py index 4b8dd49ac..2351ce70b 100644 --- a/reagent/training/ranking/seq2slate_trainer.py +++ b/reagent/training/ranking/seq2slate_trainer.py @@ -1,184 +1,279 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import Optional +from typing import List, Optional, Tuple -import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch -from reagent.core.tracker import observable -from reagent.models.seq2slate import BaselineNet, Seq2SlateMode, Seq2SlateTransformerNet -from reagent.parameters import Seq2SlateTransformerParameters -from reagent.training.trainer import Trainer +import torch.nn as nn +import torch.nn.functional as F +from reagent.core.dataclasses import field +from reagent.core.parameters import Seq2SlateParameters +from reagent.evaluation.evaluation_data_page import EvaluationDataPage +from reagent.model_utils.seq2slate_utils import Seq2SlateMode +from reagent.models.seq2slate import BaselineNet, Seq2SlateTransformerNet +from reagent.optimizer.union import Optimizer__Union +from reagent.training.ranking.helper import ips_clamp +from reagent.training.reagent_lightning_module import ReAgentLightningModule logger = logging.getLogger(__name__) -@observable( - pg_loss=torch.Tensor, train_baseline_loss=torch.Tensor, train_log_probs=torch.Tensor -) -class Seq2SlateTrainer(Trainer): +class Seq2SlateTrainer(ReAgentLightningModule): def __init__( self, seq2slate_net: Seq2SlateTransformerNet, - parameters: Seq2SlateTransformerParameters, - minibatch_size: int, + params: Seq2SlateParameters = field( # noqa: B008 + default_factory=Seq2SlateParameters + ), baseline_net: Optional[BaselineNet] = None, - use_gpu: bool = False, + baseline_warmup_num_batches: int = 0, + policy_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + baseline_optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + policy_gradient_interval: int = 1, + print_interval: int = 100, + calc_cpe: bool = False, + reward_network: Optional[nn.Module] = None, ) -> None: - self.parameters = parameters - self.use_gpu = use_gpu + super().__init__() self.seq2slate_net = seq2slate_net + self.params = params + self.policy_gradient_interval = policy_gradient_interval + self.print_interval = print_interval + self.baseline_net = baseline_net - self.minibatch_size = minibatch_size - self.minibatch = 0 - self.rl_opt = self.parameters.transformer.optimizer.make_optimizer( - self.seq2slate_net.parameters() + self.baseline_warmup_num_batches = baseline_warmup_num_batches + + self.rl_opt = policy_optimizer + if self.baseline_net: + self.baseline_opt = baseline_optimizer + + # use manual optimization to get more flexibility + self.automatic_optimization = False + + assert not calc_cpe or reward_network is not None + self.calc_cpe = calc_cpe + self.reward_network = reward_network + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.rl_opt.make_optimizer_scheduler(self.seq2slate_net.parameters()) ) if self.baseline_net: - assert self.parameters.baseline - # pyre-fixme[16]: `Optional` has no attribute `optimizer`. - self.baseline_opt = self.parameters.baseline.optimizer.make_optimizer( - self.baseline_net.parameters() + optimizers.append( + self.baseline_opt.make_optimizer_scheduler( + self.baseline_net.parameters() + ) ) + return optimizers + + def _compute_impt_smpl( + self, model_propensities, logged_propensities + ) -> Tuple[torch.Tensor, torch.Tensor]: + logged_propensities = logged_propensities.reshape(-1, 1) assert ( - self.parameters.importance_sampling_clamp_max is None - or not self.parameters.on_policy - ), ( - "importance_sampling_clamp_max is not useful and should " - "be set to None in on-policy learning" - ) + model_propensities.shape == logged_propensities.shape + and len(model_propensities.shape) == 2 + and model_propensities.shape[1] == 1 + ), f"{model_propensities.shape} {logged_propensities.shape}" - def warm_start_components(self): - components = ["seq2slate_net"] - if self.baseline_net: - components.append("baseline_net") - return components + impt_smpl = model_propensities / logged_propensities + clamped_impt_smpl = ips_clamp(impt_smpl, self.params.ips_clamp) + return impt_smpl, clamped_impt_smpl - def _compute_impt_sampling( - self, model_propensities, logged_propensities - ) -> torch.Tensor: - device = model_propensities.device - batch_size = model_propensities.shape[0] - if not self.parameters.on_policy: - return model_propensities / logged_propensities - # on policy performs no importance sampling correction = setting IS to 1 - return torch.ones(batch_size, 1, device=device) - - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - assert type(training_batch) is rlt.PreprocessedTrainingBatch - training_input = training_batch.training_input - assert isinstance(training_input, rlt.PreprocessedRankingInput) - - batch_size = training_input.state.float_features.shape[0] - device = torch.device("cuda") if self.use_gpu else torch.device("cpu") - - reward = training_input.slate_reward - batch_size = training_input.state.float_features.shape[0] + # pyre-fixme [14]: overrides method defined in `ReAgentLightningModule` inconsistently + def training_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + assert type(batch) is rlt.PreprocessedRankingInput + + batch_size = batch.state.float_features.shape[0] + + reward = batch.slate_reward assert reward is not None + optimizers = self.optimizers() + if self.baseline_net: + assert len(optimizers) == 2 + baseline_opt = optimizers[1] + else: + assert len(optimizers) == 1 + rl_opt = optimizers[0] + if self.baseline_net: # Train baseline - # pyre-fixme[29]: `Optional[BaselineNet]` is not a function. - b = self.baseline_net(training_input) + b = self.baseline_net(batch) baseline_loss = 1.0 / batch_size * torch.sum((b - reward) ** 2) - self.baseline_opt.zero_grad() - baseline_loss.backward() - self.baseline_opt.step() + baseline_opt.zero_grad() + self.manual_backward(baseline_loss) + baseline_opt.step() else: - b = torch.zeros_like(reward, device=device) - baseline_loss = torch.zeros(1, device=device) + b = torch.zeros_like(reward) + baseline_loss = torch.zeros(1) # Train Seq2Slate using REINFORCE # log probs of tgt seqs - log_probs = self.seq2slate_net( - training_input, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE - ).log_probs + model_propensities = torch.exp( + self.seq2slate_net( + batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE + ).log_probs + ) b = b.detach() assert ( - b.shape == reward.shape == log_probs.shape - ), f"{b.shape} {reward.shape} {log_probs.shape}" + b.shape == reward.shape == model_propensities.shape + ), f"{b.shape} {reward.shape} {model_propensities.shape}" - importance_sampling = self._compute_impt_sampling( - torch.exp(log_probs.detach()), training_input.tgt_out_probs + impt_smpl, clamped_impt_smpl = self._compute_impt_smpl( + model_propensities, batch.tgt_out_probs ) - clamped_importance_sampling = importance_sampling - if self.parameters.importance_sampling_clamp_max is not None: - clamped_importance_sampling = torch.clamp( - importance_sampling, 0, self.parameters.importance_sampling_clamp_max - ) - - assert importance_sampling.shape == reward.shape - - # gradient is only w.r.t log_probs + assert ( + impt_smpl.shape == clamped_impt_smpl.shape == reward.shape + ), f"{impt_smpl.shape} {clamped_impt_smpl.shape} {reward.shape}" + # gradient is only w.r.t model_propensities assert ( not reward.requires_grad - and not importance_sampling.requires_grad - and not clamped_importance_sampling.requires_grad + # pyre-fixme[16]: `Optional` has no attribute `requires_grad`. + and not batch.tgt_out_probs.requires_grad + and impt_smpl.requires_grad + and clamped_impt_smpl.requires_grad and not b.requires_grad - and log_probs.requires_grad ) - # add negative sign because we take gradient descent but we want to # maximize rewards - batch_loss = -clamped_importance_sampling * log_probs * (reward - b) - rl_loss = 1.0 / batch_size * torch.sum(batch_loss) + batch_obj_loss = -clamped_impt_smpl * (reward - b) + obj_loss = torch.mean(batch_obj_loss) + # condition to perform policy gradient update: + # 1. no baseline + # 2. or baseline is present and it passes the warm up stage + # 3. the last policy gradient was performed policy_gradient_interval minibatches ago if ( - self.parameters.baseline is None - # pyre-fixme[16]: `Optional` has no attribute `warmup_num_batches`. - or self.minibatch >= self.parameters.baseline.warmup_num_batches + self.baseline_net is None + or (self.all_batches_processed + 1) >= self.baseline_warmup_num_batches ): - self.rl_opt.zero_grad() - rl_loss.backward() - self.rl_opt.step() + self.manual_backward(obj_loss) + if (self.all_batches_processed + 1) % self.policy_gradient_interval == 0: + rl_opt.step() + rl_opt.zero_grad() else: logger.info("Not update RL model because now is baseline warmup phase") - # obj_rl_loss is the objective we take gradient with regard to - # ips_rl_loss is the sum of importance sampling weighted rewards, which gives - # the same gradient when we don't use baseline or clamp. - # obj_rl_loss is used to get gradient becaue it is in the logarithmic form - # thus more stable. - # ips_rl_loss is more useful as an offline evaluation metric - obj_rl_loss = rl_loss.detach().cpu().numpy() - ips_rl_loss = ( - (-1.0 / batch_size * torch.sum(importance_sampling * reward)).cpu().numpy() + ips_loss = torch.mean(-impt_smpl * reward).cpu().detach().numpy() + clamped_ips_loss = ( + torch.mean(-clamped_impt_smpl * reward).cpu().detach().numpy() ) baseline_loss = baseline_loss.detach().cpu().numpy().item() - advantage = (reward - b).detach().cpu().numpy() - log_probs = log_probs.detach().cpu().numpy() + logged_slate_rank_probs = model_propensities.detach().cpu().numpy() - self.minibatch += 1 - if self.minibatch % 10 == 0: + if (self.all_batches_processed + 1) % self.print_interval == 0: logger.info( - "{} batch: obj_rl_loss={}, ips_rl_loss={}, baseline_loss={}, max_ips={}, mean_ips={}, clamp={}".format( - self.minibatch, - obj_rl_loss, - ips_rl_loss, + "{} batch: ips_loss={}, clamped_ips_loss={}, baseline_loss={}, max_ips={}, mean_ips={}, grad_update={}".format( + self.all_batches_processed + 1, + ips_loss, + clamped_ips_loss, baseline_loss, - torch.max(importance_sampling), - torch.mean(importance_sampling), - self.parameters.importance_sampling_clamp_max, + torch.max(impt_smpl), + torch.mean(impt_smpl), + (self.all_batches_processed + 1) % self.policy_gradient_interval + == 0, ) ) - - # ips_rl_loss is the policy_gradient_loss. - # See RankingTrainingPageHandler.finish() function in page_handler.py - # pyre-fixme[16]: `Seq2SlateTrainer` has no attribute - # `notify_observers`. - self.notify_observers( - pg_loss=torch.tensor(ips_rl_loss).reshape(1), + self.reporter.log( + train_ips_score=torch.tensor(ips_loss).reshape(1), + train_clamped_ips_score=torch.tensor(clamped_ips_loss).reshape(1), train_baseline_loss=torch.tensor(baseline_loss).reshape(1), - train_log_probs=torch.FloatTensor(log_probs), + train_logged_slate_rank_probs=torch.FloatTensor(logged_slate_rank_probs), + train_ips_ratio=impt_smpl, + train_clamped_ips_ratio=clamped_impt_smpl, + train_advantages=advantage, + ) + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + seq2slate_net = self.seq2slate_net + + assert seq2slate_net.training is False + + logged_slate_rank_prob = torch.exp( + seq2slate_net(batch, mode=Seq2SlateMode.PER_SEQ_LOG_PROB_MODE) + .log_probs.detach() + .flatten() + .cpu() + ) + + eval_baseline_loss = torch.tensor([0.0]).reshape(1) + if self.baseline_net: + baseline_net = self.baseline_net + b = baseline_net(batch).detach() + # pyre-fixme[6]: Expected `Tensor` for 2nd param but got + # `Optional[torch.Tensor]`. + eval_baseline_loss = F.mse_loss(b, batch.slate_reward).cpu().reshape(1) + else: + # pyre-fixme[6]: For 1st param expected `Tensor` but got `Optional[Tensor]`. + b = torch.zeros_like(batch.slate_reward) + + eval_advantage = ( + # pyre-fixme[58]: `-` is not supported for operand types + # `Optional[torch.Tensor]` and `Any`. + (batch.slate_reward - b) + .flatten() + .cpu() + ) + + ranked_slate_output = seq2slate_net(batch, Seq2SlateMode.RANK_MODE, greedy=True) + ranked_slate_rank_prob = ranked_slate_output.ranked_per_seq_probs.cpu() + + self.reporter.log( + eval_baseline_loss=eval_baseline_loss, + eval_advantages=eval_advantage, + logged_slate_rank_probs=logged_slate_rank_prob, + ranked_slate_rank_probs=ranked_slate_rank_prob, ) - return { - "per_seq_probs": np.exp(log_probs), - "advantage": advantage, - "obj_rl_loss": obj_rl_loss, - "ips_rl_loss": ips_rl_loss, - "baseline_loss": baseline_loss, - } + if not self.calc_cpe: + return + + edp_g = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, + # pyre-fixme[6]: Expected `Module` for 2nd param but got + # `Optional[nn.Module]`. + self.reward_network, + batch, + eval_greedy=True, + ) + + edp_ng = EvaluationDataPage.create_from_tensors_seq2slate( + seq2slate_net, + # pyre-fixme[6]: Expected `Module` for 2nd param but got + # `Optional[nn.Module]`. + self.reward_network, + batch, + eval_greedy=False, + ) + + return edp_g, edp_ng + + # pyre-fixme[14]: Inconsistent override + def validation_epoch_end( + self, outputs: Optional[List[Tuple[EvaluationDataPage, EvaluationDataPage]]] + ): + if self.calc_cpe: + assert outputs is not None + eval_data_pages_g, eval_data_pages_ng = None, None + for edp_g, edp_ng in outputs: + if eval_data_pages_g is None and eval_data_pages_ng is None: + eval_data_pages_g = edp_g + eval_data_pages_ng = edp_ng + else: + # pyre-fixme[16]: `Optional` has no attribute `append` + eval_data_pages_g.append(edp_g) + eval_data_pages_ng.append(edp_ng) + self.reporter.log( + eval_data_pages_g=eval_data_pages_g, + eval_data_pages_ng=eval_data_pages_ng, + ) diff --git a/reagent/training/reagent_lightning_module.py b/reagent/training/reagent_lightning_module.py new file mode 100644 index 000000000..93e792c21 --- /dev/null +++ b/reagent/training/reagent_lightning_module.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import inspect +import logging + +import pytorch_lightning as pl +import torch +from reagent.core.tensorboardX import SummaryWriterContext +from reagent.core.utils import lazy_property +from typing_extensions import final + + +logger = logging.getLogger(__name__) + + +class ReAgentLightningModule(pl.LightningModule): + def __init__(self, automatic_optimization=True): + super().__init__() + self._automatic_optimization = automatic_optimization + self._training_step_generator = None + self._reporter = pl.loggers.base.DummyExperiment() + # For the generator API + self._verified_steps = False + # For summary_writer property + self._summary_writer_logger = None + self._summary_writer = None + # To enable incremental training + self.register_buffer("_next_stopping_epoch", None) + self.register_buffer("_cleanly_stopped", None) + self._next_stopping_epoch = torch.tensor([-1]).int() + self._cleanly_stopped = torch.ones(1) + self._setup_input_type() + # Counters + self.train_batches_processed_this_epoch = 0 + self.val_batches_processed_this_epoch = 0 + self.test_batches_processed_this_epoch = 0 + self.all_batches_processed = 0 + + def _setup_input_type(self): + self._training_batch_type = None + sig = inspect.signature(self.train_step_gen) + assert "training_batch" in sig.parameters + param = sig.parameters["training_batch"] + annotation = param.annotation + if annotation == inspect.Parameter.empty: + return + if hasattr(annotation, "from_dict"): + self._training_batch_type = annotation + + def set_reporter(self, reporter): + if reporter is None: + reporter = pl.loggers.base.DummyExperiment() + self._reporter = reporter + return self + + @property + def reporter(self): + return self._reporter + + def set_clean_stop(self, clean_stop: bool): + self._cleanly_stopped[0] = int(clean_stop) + + def increase_next_stopping_epochs(self, num_epochs: int): + self._next_stopping_epoch += num_epochs + self.set_clean_stop(False) + return self + + def train_step_gen(self, training_batch, batch_idx: int): + """ + Implement training step as generator here + """ + raise NotImplementedError + + def soft_update_result(self) -> torch.Tensor: + """ + A dummy loss to trigger soft-update + """ + one = torch.ones(1, requires_grad=True) + return one + one + + @property + def summary_writer(self): + """ + Accessor to TensorBoard's SummaryWriter + """ + if self._summary_writer_logger is self.logger: + # If self.logger doesn't change between call, then return cached result + return self._summary_writer + + # Invalidate + self._summary_writer = None + self._summary_writer_logger = self.logger + + if isinstance(self.logger, pl.loggers.base.LoggerCollection): + for logger in self.logger._logger_iterable: + if isinstance(logger, pl.loggers.tensorboard.TensorBoardLogger): + self._summary_writer = logger.experiment + break + elif isinstance(logger, pl.loggers.tensorboard.TensorBoardLogger): + self._summary_writer = logger.experiment + + return self._summary_writer + + # pyre-fixme[14]: `training_step` overrides method defined in `LightningModule` + # inconsistently. + def training_step(self, batch, batch_idx: int, optimizer_idx: int = 0): + assert (optimizer_idx == 0) or (self._num_optimizing_steps > 1) + + if self._training_step_generator is None: + if self._training_batch_type and isinstance(batch, dict): + batch = self._training_batch_type.from_dict(batch) + self._training_step_generator = self.train_step_gen(batch, batch_idx) + + ret = next(self._training_step_generator) + + if optimizer_idx == self._num_optimizing_steps - 1: + if not self._verified_steps: + try: + next(self._training_step_generator) + except StopIteration: + self._verified_steps = True + if not self._verified_steps: + raise RuntimeError( + "training_step_gen() yields too many times." + "The number of yields should match the number of optimizers," + f" in this case {self._num_optimizing_steps}" + ) + self._training_step_generator = None + SummaryWriterContext.increase_global_step() + + return ret + + def optimizers(self, use_pl_optimizer: bool = True): + o = super().optimizers(use_pl_optimizer) + if isinstance(o, list): + return o + return [o] + + @lazy_property + def _num_optimizing_steps(self) -> int: + return len(self.configure_optimizers()) + + @final + def on_train_epoch_end(self): + logger.info( + f"Finished train epoch {self.current_epoch} " + f"with {self.train_batches_processed_this_epoch} batches processed" + ) + + self.reporter.flush(self.current_epoch) + self.train_batches_processed_this_epoch = 0 + + # Tell the trainer to stop. + if self.current_epoch == self._next_stopping_epoch.item(): + self.trainer.should_stop = True + + @final + def on_validation_epoch_end(self): + logger.info( + f"Finished validation epoch {self.current_epoch} " + f"with {self.val_batches_processed_this_epoch} batches processed" + ) + self.reporter.flush(self.current_epoch) + self.val_batches_processed_this_epoch = 0 + + @final + def on_test_epoch_end(self): + logger.info( + f"Finished a test epoch with {self.test_batches_processed_this_epoch} batches processed" + ) + self.reporter.flush(self.current_epoch) + self.test_batches_processed_this_epoch = 0 + + @final + def on_train_batch_end(self, *args, **kwargs): + self.train_batches_processed_this_epoch += 1 + self.all_batches_processed += 1 + + @final + def on_validation_batch_end(self, *args, **kwargs): + self.val_batches_processed_this_epoch += 1 + self.all_batches_processed += 1 + + @final + def on_test_batch_end(self, *args, **kwargs): + self.test_batches_processed_this_epoch += 1 + self.all_batches_processed += 1 + + def train(self, *args): + # trainer.train(batch) was the old, pre-Lightning ReAgent trainer API. + # make sure that nobody is trying to call trainer.train() this way. + # trainer.train() or trainer.train(True/False) is allowed - this puts the network into training/eval mode. + if (len(args) == 0) or ((len(args) == 1) and (isinstance(args[0], bool))): + super().train(*args) + else: + raise NotImplementedError( + "Method .train() is not used for ReAgent Lightning trainers. Please use .fit() method of the pl.Trainer instead" + ) + + +class StoppingEpochCallback(pl.Callback): + """ + We use this callback to control the number of training epochs in incremental + training. Epoch & step counts are not reset in the checkpoint. If we were to set + `max_epochs` on the trainer, we would have to keep track of the previous `max_epochs` + and add to it manually. This keeps the infomation in one place. + + Note that we need to set `_cleanly_stopped` back to True before saving the checkpoint. + This is done in `ModelManager.save_trainer()`. + """ + + def __init__(self, num_epochs): + super().__init__() + self.num_epochs = num_epochs + + def on_pretrain_routine_end(self, trainer, pl_module): + assert isinstance(pl_module, ReAgentLightningModule) + cleanly_stopped = pl_module._cleanly_stopped.item() + logger.info(f"cleanly stopped: {cleanly_stopped}") + if cleanly_stopped: + pl_module.increase_next_stopping_epochs(self.num_epochs) + + +def has_test_step_override(trainer_module: ReAgentLightningModule): + """Detect if a subclass of LightningModule has test_step overridden""" + return type(trainer_module).test_step != pl.LightningModule.test_step diff --git a/reagent/training/reinforce_trainer.py b/reagent/training/reinforce_trainer.py new file mode 100644 index 000000000..d6be880d7 --- /dev/null +++ b/reagent/training/reinforce_trainer.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import inspect +import logging +import math +from dataclasses import field +from typing import List, Optional + +import reagent.core.types as rlt +import torch +import torch.optim +from reagent.gym.policies.policy import Policy +from reagent.models.base import ModelBase +from reagent.optimizer.union import Optimizer__Union +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.utils import discounted_returns, whiten + +logger = logging.getLogger(__name__) + + +class ReinforceTrainer(ReAgentLightningModule): + def __init__( + self, + policy: Policy, + gamma: float = 0.0, + optimizer: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + optimizer_value_net: Optimizer__Union = field( # noqa: B008 + default_factory=Optimizer__Union.default + ), + actions: List[str] = field(default_factory=list), # noqa: B008 + off_policy: bool = False, + reward_clip: float = 1e6, + clip_param: float = 1e6, + normalize: bool = True, + subtract_mean: bool = True, + offset_clamp_min: bool = False, + value_net: Optional[ModelBase] = None, + do_log_metrics: bool = False, + ): + super().__init__() + self._actions = actions + self.scorer = policy.scorer + self.sampler = policy.sampler + self.gamma = gamma + self.off_policy = off_policy + self.reward_clip = reward_clip + self.clip_param = clip_param + self.normalize = normalize + self.subtract_mean = subtract_mean + self.offset_clamp_min = offset_clamp_min + self.optimizer = optimizer + self.optimizer_value_net = optimizer_value_net + if value_net is not None: + if self.normalize or self.subtract_mean: + raise RuntimeError( + "Can't apply a baseline and reward normalization \ + (or mean subtraction) simultaneously." + ) + self.value_net = value_net + self.value_loss_fn = torch.nn.MSELoss(reduction="mean") + else: + self.value_net = None + self.do_log_metrics = do_log_metrics + if self.do_log_metrics: + self.losses = [] + self.ips_ratio_means = [] + + def _check_input(self, training_batch: rlt.PolicyGradientInput): + assert training_batch.reward.ndim == 1 + if self.off_policy: + assert training_batch.log_prob.ndim == 1 + + def configure_optimizers(self): + optimizers = [] + # value net optimizer + if self.value_net is not None: + optimizers.append( + self.optimizer_value_net.make_optimizer_scheduler( + self.value_net.parameters() + ) + ) + # policy optimizer + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.scorer.parameters()) + ) + + return optimizers + + def train_step_gen(self, training_batch: rlt.PolicyGradientInput, batch_idx: int): + self._check_input(training_batch) + actions = training_batch.action + rewards = training_batch.reward.detach() + scorer_inputs = [] + if inspect.getattr_static(training_batch, "graph", None) is not None: + # GNN + scorer_inputs.append(training_batch.graph) + else: + scorer_inputs.append(training_batch.state) + if training_batch.possible_actions_mask is not None: + scorer_inputs.append(training_batch.possible_actions_mask) + scores = self.scorer(*scorer_inputs) + characteristic_eligibility = self.sampler.log_prob(scores, actions).float() + offset_reinforcement = discounted_returns( + torch.clamp(rewards, max=self.reward_clip).clone(), self.gamma + ) + if self.normalize: + offset_reinforcement = whiten( + offset_reinforcement, subtract_mean=self.subtract_mean + ) + elif self.subtract_mean: + offset_reinforcement -= offset_reinforcement.mean() + if self.offset_clamp_min: + offset_reinforcement = offset_reinforcement.clamp(min=0) + if self.value_net is not None: + assert not (self.normalize or self.subtract_mean) + baselines = self.value_net(training_batch.state).squeeze() + yield self.value_loss_fn(baselines, offset_reinforcement) + # subtract learned value function baselines from rewards + offset_reinforcement = offset_reinforcement - baselines + + if self.off_policy: + characteristic_eligibility = torch.exp( + torch.clamp( + characteristic_eligibility - training_batch.log_prob, + max=math.log(float(self.clip_param)), + ) + ).float() + + loss = -(offset_reinforcement.float().detach()) @ characteristic_eligibility + if self.do_log_metrics: + detached_loss = loss.detach().cpu().item() / len(offset_reinforcement) + self.losses.append(detached_loss) + detached_ips_ratio_mean = ( + characteristic_eligibility.detach().mean().cpu().item() + ) + self.ips_ratio_means.append(detached_ips_ratio_mean) + assert self.logger is not None + self.logger.log_metrics( + { + "Training_loss/per_iteration": detached_loss, + "IPS_ratio_mean/per_iteration": detached_ips_ratio_mean, + }, + step=self.all_batches_processed, + ) + yield loss + + def training_epoch_end(self, training_step_outputs): + if self.do_log_metrics: + self.logger.log_metrics( + { + "Training_loss/per_epoch": sum(self.losses) / len(self.losses), + "IPS_ratio_mean/per_epoch": sum(self.ips_ratio_means) + / len(self.ips_ratio_means), + }, + step=self.current_epoch, + ) + self.losses = [] + self.ips_ratio_means = [] diff --git a/reagent/training/reward_network_trainer.py b/reagent/training/reward_network_trainer.py index 15fd4a336..7378e0f75 100644 --- a/reagent/training/reward_network_trainer.py +++ b/reagent/training/reward_network_trainer.py @@ -1,54 +1,189 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging +from enum import Enum +from typing import Optional -import reagent.types as rlt +import numpy as np +import reagent.core.types as rlt import torch from reagent.core.dataclasses import field from reagent.models.base import ModelBase from reagent.optimizer.union import Optimizer__Union -from reagent.training.trainer import Trainer +from reagent.training.reagent_lightning_module import ReAgentLightningModule logger = logging.getLogger(__name__) -class RewardNetTrainer(Trainer): +class LossFunction(Enum): + MSE = "MSE_Loss" + SmoothL1Loss = "SmoothL1_Loss" + L1Loss = "L1_Loss" + BCELoss = "BCE_Loss" + + +def _get_loss_function( + loss_fn: LossFunction, + reward_ignore_threshold: Optional[float], + weighted_by_inverse_propensity: bool, +): + reduction_type = "none" + + if loss_fn == LossFunction.MSE: + torch_fn = torch.nn.MSELoss(reduction=reduction_type) + elif loss_fn == LossFunction.SmoothL1Loss: + torch_fn = torch.nn.SmoothL1Loss(reduction=reduction_type) + elif loss_fn == LossFunction.L1Loss: + torch_fn = torch.nn.L1Loss(reduction=reduction_type) + elif loss_fn == LossFunction.BCELoss: + torch_fn = torch.nn.BCELoss(reduction=reduction_type) + + def wrapper_loss_fn(pred, target, weight, batch): + if loss_fn == LossFunction.BCELoss: + valid_step = batch.valid_step + assert valid_step is not None + pred = pred / valid_step + target = target / valid_step + + loss = torch_fn(pred, target) + + if weighted_by_inverse_propensity: + assert weight.shape == loss.shape + loss = loss * weight + + # ignore abnormal reward only during training + if pred.requires_grad and reward_ignore_threshold is not None: + loss = loss[target <= reward_ignore_threshold] + assert len(loss) > 0, ( + f"reward ignore threshold set too small. target={target}, " + f"threshold={reward_ignore_threshold}" + ) + + return torch.mean(loss) + + return wrapper_loss_fn + + +class RewardNetTrainer(ReAgentLightningModule): def __init__( self, reward_net: ModelBase, - minibatch_size: int, - use_gpu: bool = False, optimizer: Optimizer__Union = field( # noqa: B008 default_factory=Optimizer__Union.default ), + loss_type: LossFunction = LossFunction.MSE, + reward_ignore_threshold: Optional[float] = None, + weighted_by_inverse_propensity: bool = False, ) -> None: + super().__init__() self.reward_net = reward_net - self.use_gpu = use_gpu - self.minibatch_size = minibatch_size - self.minibatch = 0 - self.loss_fn = torch.nn.MSELoss(reduction="mean") - self.opt = optimizer.make_optimizer(self.reward_net.parameters()) - - def train(self, training_batch: rlt.PreprocessedTrainingBatch): - training_input = training_batch.training_input - if isinstance(training_input, rlt.PreprocessedRankingInput): - target_reward = training_input.slate_reward + self.optimizer = optimizer + self.loss_type = loss_type + self.reward_ignore_threshold = reward_ignore_threshold + self.weighted_by_inverse_propensity = weighted_by_inverse_propensity + self.loss_fn = _get_loss_function( + loss_type, reward_ignore_threshold, weighted_by_inverse_propensity + ) + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + self.optimizer.make_optimizer_scheduler(self.reward_net.parameters()) + ) + return optimizers + + def _get_sample_weight(self, batch: rlt.PreprocessedRankingInput): + weight = None + if self.weighted_by_inverse_propensity: + if isinstance(batch, rlt.PreprocessedRankingInput): + assert batch.tgt_out_probs is not None + # pyre-fixme[58]: `/` is not supported for operand types `float` and + # `Tensor`. + weight = 1.0 / batch.tgt_out_probs + else: + raise NotImplementedError( + f"Sampling weighting not implemented for {type(batch)}" + ) + return weight + + def _get_target_reward(self, batch: rlt.PreprocessedRankingInput): + if isinstance(batch, rlt.PreprocessedRankingInput): + target_reward = batch.slate_reward else: - target_reward = training_input.reward + target_reward = batch.reward + assert target_reward is not None + return target_reward + + @torch.no_grad() + def _compute_unweighted_loss( + self, + predicted_reward: torch.Tensor, + target_reward: torch.Tensor, + batch: rlt.PreprocessedRankingInput, + ): + return self.loss_fn( + predicted_reward, + target_reward, + weight=torch.ones_like(predicted_reward), + batch=batch, + ) + + def train_step_gen( + self, training_batch: rlt.PreprocessedRankingInput, batch_idx: int + ): + weight = self._get_sample_weight(training_batch) + target_reward = self._get_target_reward(training_batch) + predicted_reward = self.reward_net(training_batch).predicted_reward + + assert ( + predicted_reward.shape == target_reward.shape + and len(target_reward.shape) == 2 + and target_reward.shape[1] == 1 + ) + + loss = self.loss_fn(predicted_reward, target_reward, weight, training_batch) + + detached_loss = loss.detach().cpu() + self.reporter.log(loss=detached_loss) + + if weight is not None: + unweighted_loss = self._compute_unweighted_loss( + predicted_reward, target_reward, training_batch + ) + self.reporter.log(unweighted_loss=unweighted_loss) + + if self.all_batches_processed % 10 == 0: + logger.info( + f"{self.all_batches_processed}-th batch: " + f"{self.loss_type}={detached_loss.item()}" + ) + + yield loss + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.PreprocessedRankingInput, batch_idx: int): + reward = self._get_target_reward(batch) + self.reporter.log(eval_rewards=reward.flatten().detach().cpu()) + + pred_reward = self.reward_net(batch).predicted_reward + self.reporter.log(eval_pred_rewards=pred_reward.flatten().detach().cpu()) + + weight = self._get_sample_weight(batch) + + loss = self.loss_fn(pred_reward, reward, weight, batch) + + detached_loss = loss.detach().cpu() + self.reporter.log(eval_loss=detached_loss) - predicted_reward = self.reward_net(training_input).predicted_reward - mse_loss = self.loss_fn(predicted_reward, target_reward) - self.opt.zero_grad() - mse_loss.backward() - self.opt.step() - mse_loss = mse_loss.detach() + if weight is not None: + unweighted_loss = self._compute_unweighted_loss(pred_reward, reward, batch) + self.reporter.log(eval_unweighted_loss=unweighted_loss) - self.minibatch += 1 - if self.minibatch % 10 == 0: - logger.info("{}-th batch: mse_loss={}".format(self.minibatch, mse_loss)) + return detached_loss.item() - return mse_loss + def validation_epoch_end(self, outputs): + self.reporter.update_best_model(np.mean(outputs), self.reward_net) def warm_start_components(self): return ["reward_net"] diff --git a/reagent/training/rl_trainer_pytorch.py b/reagent/training/rl_trainer_pytorch.py index c32cd4541..edc5277f2 100644 --- a/reagent/training/rl_trainer_pytorch.py +++ b/reagent/training/rl_trainer_pytorch.py @@ -2,255 +2,70 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from typing import List, Optional +from typing import Optional -import torch -import torch.nn.functional as F -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import EvaluationParameters, RLParameters -from reagent.torch_utils import masked_softmax -from reagent.training.loss_reporter import LossReporter -from reagent.training.trainer import Trainer +from reagent.core.parameters import RLParameters logger = logging.getLogger(__name__) -class RLTrainer(Trainer): +# pyre-fixme[13]: Attribute `rl_parameters` is never initialized. +class RLTrainerMixin: # Q-value for action that is not possible. Guaranteed to be worse than any # legitimate action ACTION_NOT_POSSIBLE_VAL = -1e9 - # Hack to mark legitimate 0 value q-values before pytorch sparse -> dense - FINGERPRINT = 12345 - def __init__( - self, - rl_parameters: RLParameters, - use_gpu: bool, - metrics_to_score=None, - actions: Optional[List[str]] = None, - evaluation_parameters: Optional[EvaluationParameters] = None, - loss_reporter=None, - ) -> None: - self.minibatch = 0 - self.minibatch_size: Optional[int] = None - self.minibatches_per_step: Optional[int] = None - self.rl_parameters = rl_parameters - self.rl_temperature = float(rl_parameters.temperature) - self.maxq_learning = rl_parameters.maxq_learning - self.gamma = rl_parameters.gamma - self.tau = rl_parameters.target_update_rate - self.use_seq_num_diff_as_time_diff = rl_parameters.use_seq_num_diff_as_time_diff - self.time_diff_unit_length = rl_parameters.time_diff_unit_length - self.tensorboard_logging_freq = rl_parameters.tensorboard_logging_freq - self.multi_steps = rl_parameters.multi_steps - self.calc_cpe_in_training = ( - evaluation_parameters and evaluation_parameters.calc_cpe_in_training - ) - - if rl_parameters.q_network_loss == "mse": - self.q_network_loss = F.mse_loss - elif rl_parameters.q_network_loss == "huber": - self.q_network_loss = F.smooth_l1_loss - else: - raise Exception( - "Q-Network loss type {} not valid loss.".format( - rl_parameters.q_network_loss - ) - ) - - if metrics_to_score: - self.metrics_to_score = metrics_to_score + ["reward"] - else: - self.metrics_to_score = ["reward"] - - cuda_available = torch.cuda.is_available() - logger.info("CUDA availability: {}".format(cuda_available)) - if use_gpu and cuda_available: - logger.info("Using GPU: GPU requested and available.") - self.use_gpu = True - self.device = torch.device("cuda") - else: - logger.info("NOT Using GPU: GPU not requested or not available.") - self.use_gpu = False - self.device = torch.device("cpu") - - self.loss_reporter = loss_reporter or LossReporter(actions) - self._actions = actions + # todo potential inconsistencies + _use_seq_num_diff_as_time_diff = None + _maxq_learning = None + _multi_steps = None + rl_parameters: RLParameters @property - def num_actions(self) -> int: - assert self._actions is not None, "Not a discrete action DQN" - # pyre-fixme[6]: Expected `Sized` for 1st param but got `Optional[List[str]]`. - return len(self._actions) - - def _initialize_cpe( - self, - reward_network, - q_network_cpe, - q_network_cpe_target, - optimizer: Optimizer__Union, - ) -> None: - if self.calc_cpe_in_training: - assert reward_network is not None, "reward_network is required for CPE" - # pyre-fixme[16]: `RLTrainer` has no attribute `reward_network`. - self.reward_network = reward_network - # pyre-fixme[16]: `RLTrainer` has no attribute `reward_network_optimizer`. - self.reward_network_optimizer = optimizer.make_optimizer( - self.reward_network.parameters() - ) - assert ( - q_network_cpe is not None and q_network_cpe_target is not None - ), "q_network_cpe and q_network_cpe_target are required for CPE" - # pyre-fixme[16]: `RLTrainer` has no attribute `q_network_cpe`. - self.q_network_cpe = q_network_cpe - # pyre-fixme[16]: `RLTrainer` has no attribute `q_network_cpe_target`. - self.q_network_cpe_target = q_network_cpe_target - # pyre-fixme[16]: `RLTrainer` has no attribute `q_network_cpe_optimizer`. - self.q_network_cpe_optimizer = optimizer.make_optimizer( - self.q_network_cpe.parameters() - ) - num_output_nodes = len(self.metrics_to_score) * self.num_actions - # pyre-fixme[16]: `RLTrainer` has no attribute `reward_idx_offsets`. - self.reward_idx_offsets = torch.arange( - 0, - num_output_nodes, - self.num_actions, - device=self.device, - dtype=torch.long, - ) - else: - self.reward_network = None - - @torch.no_grad() - def _soft_update(self, network, target_network, tau) -> None: - """ Target network update logic as defined in DDPG paper - updated_params = tau * network_params + (1 - tau) * target_network_params - :param network network with parameters to include in soft update - :param target_network target network with params to soft update - :param tau hyperparameter to control target tracking speed - """ - for t_param, param in zip(target_network.parameters(), network.parameters()): - if t_param is param: - # Skip soft-updating when the target network shares the parameter with - # the network being train. - continue - new_param = tau * param.data + (1.0 - tau) * t_param.data - t_param.data.copy_(new_param) - - @torch.no_grad() - def _maybe_soft_update( - self, network, target_network, tau, minibatches_per_step - ) -> None: - if self.minibatch % minibatches_per_step != 0: - return - self._soft_update(network, target_network, tau) - - def _maybe_run_optimizer(self, optimizer, minibatches_per_step) -> None: - if self.minibatch % minibatches_per_step != 0: - return - for group in optimizer.param_groups: - for p in group["params"]: - if p.grad is not None: - p.grad /= minibatches_per_step - optimizer.step() - optimizer.zero_grad() + def gamma(self) -> float: + return self.rl_parameters.gamma - @torch.no_grad() - def _calculate_cpes( - self, - training_batch, - states, - next_states, - all_action_scores, - all_next_action_scores, - logged_action_idxs, - discount_tensor, - not_done_mask, - ): - if not self.calc_cpe_in_training: - return None, None, None - - if training_batch.extras.metrics is None: - metrics_reward_concat_real_vals = training_batch.reward - else: - metrics_reward_concat_real_vals = torch.cat( - (training_batch.reward, training_batch.extras.metrics), dim=1 - ) + @property + def tau(self) -> float: + return self.rl_parameters.target_update_rate - model_propensities_next_states = masked_softmax( - all_next_action_scores, - training_batch.possible_next_actions_mask - if self.maxq_learning - else training_batch.next_action, - self.rl_temperature, + @property + def multi_steps(self) -> Optional[int]: + return ( + self.rl_parameters.multi_steps + if self._multi_steps is None + else self._multi_steps ) - with torch.enable_grad(): - ######### Train separate reward network for CPE evaluation ############# - reward_estimates = self.reward_network(states) - reward_estimates_for_logged_actions = reward_estimates.gather( - 1, self.reward_idx_offsets + logged_action_idxs - ) - reward_loss = F.mse_loss( - reward_estimates_for_logged_actions, metrics_reward_concat_real_vals - ) - reward_loss.backward() - self._maybe_run_optimizer( - self.reward_network_optimizer, self.minibatches_per_step - ) + @multi_steps.setter + def multi_steps(self, multi_steps): + self._multi_steps = multi_steps - ######### Train separate q-network for CPE evaluation ############# - metric_q_values = self.q_network_cpe(states).gather( - 1, self.reward_idx_offsets + logged_action_idxs - ) - all_metrics_target_q_values = torch.chunk( - self.q_network_cpe_target(next_states).detach(), - len(self.metrics_to_score), - dim=1, - ) - target_metric_q_values = [] - for i, per_metric_target_q_values in enumerate(all_metrics_target_q_values): - per_metric_next_q_values = torch.sum( - per_metric_target_q_values * model_propensities_next_states, - 1, - keepdim=True, - ) - per_metric_next_q_values = per_metric_next_q_values * not_done_mask - per_metric_target_q_values = metrics_reward_concat_real_vals[ - :, i : i + 1 - ] + (discount_tensor * per_metric_next_q_values) - target_metric_q_values.append(per_metric_target_q_values) + @property + def maxq_learning(self) -> bool: + return ( + self.rl_parameters.maxq_learning + if self._maxq_learning is None + else self._maxq_learning + ) - target_metric_q_values = torch.cat(target_metric_q_values, dim=1) - metric_q_value_loss = self.q_network_loss( - metric_q_values, target_metric_q_values - ) - metric_q_value_loss.backward() - self._maybe_run_optimizer( - self.q_network_cpe_optimizer, self.minibatches_per_step - ) + @maxq_learning.setter + def maxq_learning(self, maxq_learning): + self._maxq_learning = maxq_learning - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network_cpe, - self.q_network_cpe_target, - self.tau, - self.minibatches_per_step, + @property + def use_seq_num_diff_as_time_diff(self) -> bool: + return ( + self.rl_parameters.use_seq_num_diff_as_time_diff + if self._use_seq_num_diff_as_time_diff is None + else self._use_seq_num_diff_as_time_diff ) - model_propensities = masked_softmax( - all_action_scores, - training_batch.possible_actions_mask - if self.maxq_learning - else training_batch.action, - self.rl_temperature, - ) - model_rewards = reward_estimates[ - :, - torch.arange( - self.reward_idx_offsets[0], - self.reward_idx_offsets[0] + self.num_actions, - ), - ] - return reward_loss, model_rewards, model_propensities + @use_seq_num_diff_as_time_diff.setter + def use_seq_num_diff_as_time_diff(self, use_seq_num_diff_as_time_diff): + self._use_seq_num_diff_as_time_diff = use_seq_num_diff_as_time_diff + + @property + def rl_temperature(self) -> float: + return self.rl_parameters.temperature diff --git a/reagent/training/sac_trainer.py b/reagent/training/sac_trainer.py index c332a0b01..3c7e0edb2 100644 --- a/reagent/training/sac_trainer.py +++ b/reagent/training/sac_trainer.py @@ -5,34 +5,48 @@ from typing import List, Optional import numpy as np -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F from reagent.core.configuration import resolve_defaults -from reagent.core.dataclasses import field -from reagent.core.tracker import observable -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import RLParameters -from reagent.tensorboardX import SummaryWriterContext -from reagent.training.rl_trainer_pytorch import RLTrainer -from reagent.training.training_data_page import TrainingDataPage - +from reagent.core.dataclasses import dataclass, field +from reagent.core.parameters import RLParameters +from reagent.models.actor import LOG_PROB_MAX, LOG_PROB_MIN +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin logger = logging.getLogger(__name__) -@observable( - td_loss=torch.Tensor, - reward_loss=torch.Tensor, - logged_actions=torch.Tensor, - logged_propensities=torch.Tensor, - logged_rewards=torch.Tensor, - model_propensities=torch.Tensor, - model_rewards=torch.Tensor, - model_values=torch.Tensor, - model_action_idxs=torch.Tensor, -) -class SACTrainer(RLTrainer): +@dataclass +class CRRWeightFn: + # pick indicator or exponent + indicator_fn_threshold: Optional[float] = None + exponent_beta: Optional[float] = None + exponent_clamp: Optional[float] = None + + def __post_init_post_parse__(self): + assert self.exponent_beta or self.indicator_fn_threshold + assert not (self.exponent_beta and self.indicator_fn_threshold) + if self.exponent_beta: + assert self.exponent_beta > 1e-6 + + if self.exponent_clamp: + assert self.exponent_clamp > 1e-6 + + def get_weight_from_advantage(self, advantage): + if self.indicator_fn_threshold: + return (advantage >= self.indicator_fn_threshold).float() + + if self.exponent_beta: + exp = torch.exp(advantage / self.exponent_beta) + if self.exponent_clamp: + exp = torch.clamp(exp, 0.0, self.exponent_clamp) + return exp + + +class SACTrainer(RLTrainerMixin, ReAgentLightningModule): """ Soft Actor-Critic trainer as described in https://arxiv.org/pdf/1801.01290 @@ -46,7 +60,6 @@ def __init__( q1_network, q2_network=None, value_network=None, - use_gpu: bool = False, # Start SACTrainerParameters rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 q_network_optimizer: Optimizer__Union = field( # noqa: B008 @@ -69,6 +82,8 @@ def __init__( apply_kld_on_mean: bool = False, action_embedding_mean: Optional[List[float]] = None, action_embedding_variance: Optional[List[float]] = None, + crr_config: Optional[CRRWeightFn] = None, + backprop_through_log_prob: bool = True, ) -> None: """ Args: @@ -79,48 +94,35 @@ def __init__( from overestimation bias value_network (optional): states -> value of state under actor # alpha in the paper; controlling explore & exploit + backprop_through_log_prob: This is mostly for backward compatibility issue; + we used to have a bug that does this and it yields a better result in + some cases # TODO: finish """ - super().__init__(rl, use_gpu=use_gpu) - - self.minibatch_size = minibatch_size - self.minibatches_per_step = 1 - + super().__init__() + self.rl_parameters = rl self.q1_network = q1_network - self.q1_network_optimizer = q_network_optimizer.make_optimizer( - q1_network.parameters() - ) - self.q2_network = q2_network - if self.q2_network is not None: - self.q2_network_optimizer = q_network_optimizer.make_optimizer( - q2_network.parameters() - ) + self.q_network_optimizer = q_network_optimizer self.value_network = value_network + self.value_network_optimizer = value_network_optimizer if self.value_network is not None: - self.value_network_optimizer = value_network_optimizer.make_optimizer( - value_network.parameters() - ) self.value_network_target = copy.deepcopy(self.value_network) else: self.q1_network_target = copy.deepcopy(self.q1_network) self.q2_network_target = copy.deepcopy(self.q2_network) self.actor_network = actor_network - self.actor_network_optimizer = actor_network_optimizer.make_optimizer( - actor_network.parameters() - ) + self.actor_network_optimizer = actor_network_optimizer self.entropy_temperature = entropy_temperature - self.alpha_optimizer = None - device = "cuda" if use_gpu else "cpu" + self.alpha_optimizer = alpha_optimizer if alpha_optimizer is not None: self.target_entropy = target_entropy - self.log_alpha = torch.tensor( - [np.log(self.entropy_temperature)], requires_grad=True, device=device + self.log_alpha = torch.nn.Parameter( + torch.tensor([np.log(self.entropy_temperature)]) ) - self.alpha_optimizer = alpha_optimizer.make_optimizer([self.log_alpha]) self.logged_action_uniform_prior = logged_action_uniform_prior @@ -129,262 +131,254 @@ def __init__( if self.add_kld_to_loss: self.kld_weight = action_embedding_kld_weight - self.action_emb_mean = torch.tensor(action_embedding_mean, device=device) - self.action_emb_variance = torch.tensor( - action_embedding_variance, device=device - ) + # Calling register_buffer so that the tensors got moved to the right device + self.register_buffer("action_emb_mean", None) + self.register_buffer("action_emb_variance", None) + # Assigning the values here instead of above so that typechecker wouldn't complain + self.action_emb_mean = torch.tensor(action_embedding_mean) + self.action_emb_variance = torch.tensor(action_embedding_variance) + + self.crr_config = crr_config + if crr_config: + assert self.value_network is not None + + self.backprop_through_log_prob = backprop_through_log_prob + + def configure_optimizers(self): + optimizers = [] - def warm_start_components(self): - components = [ - "q1_network", - "q1_network_optimizer", - "actor_network", - "actor_network_optimizer", - ] + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q1_network.parameters() + ) + ) if self.q2_network: - components += ["q2_network", "q2_network_optimizer"] + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q2_network.parameters() + ) + ) + optimizers.append( + self.actor_network_optimizer.make_optimizer_scheduler( + self.actor_network.parameters() + ) + ) + if self.alpha_optimizer is not None: + optimizers.append( + self.alpha_optimizer.make_optimizer_scheduler([self.log_alpha]) + ) + if self.value_network: + optimizers.append( + self.value_network_optimizer.make_optimizer_scheduler( + self.value_network.parameters() + ) + ) + # soft-update if self.value_network: - components += [ - "value_network", - "value_network_optimizer", - "value_network_target", - ] + target_params = self.value_network_target.parameters() + source_params = self.value_network.parameters() else: - components += ["q1_network_target"] + target_params = list(self.q1_network_target.parameters()) + source_params = list(self.q1_network.parameters()) if self.q2_network: - components += ["q2_network_target"] - return components + target_params += list(self.q2_network_target.parameters()) + source_params += list(self.q2_network.parameters()) + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) - @torch.no_grad() - def train(self, training_batch: rlt.PolicyNetworkInput) -> None: + return optimizers + + def train_step_gen(self, training_batch: rlt.PolicyNetworkInput, batch_idx: int): """ IMPORTANT: the input action here is assumed to match the range of the output of the actor. """ - if isinstance(training_batch, TrainingDataPage): - training_batch = training_batch.as_policy_network_training_batch() assert isinstance(training_batch, rlt.PolicyNetworkInput) - self.minibatch += 1 - state = training_batch.state action = training_batch.action reward = training_batch.reward discount = torch.full_like(reward, self.gamma) not_done_mask = training_batch.not_terminal - # We need to zero out grad here because gradient from actor update - # should not be used in Q-network update - self.actor_network_optimizer.zero_grad() - self.q1_network_optimizer.zero_grad() - if self.q2_network is not None: - self.q2_network_optimizer.zero_grad() - if self.value_network is not None: - self.value_network_optimizer.zero_grad() - - with torch.enable_grad(): - # - # First, optimize Q networks; minimizing MSE between - # Q(s, a) & r + discount * V'(next_s) - # + # + # First, optimize Q networks; minimizing MSE between + # Q(s, a) & r + discount * V'(next_s) + # - q1_value = self.q1_network(state, action) - if self.q2_network: - q2_value = self.q2_network(state, action) - actor_output = self.actor_network(state) - - # Optimize Alpha - if self.alpha_optimizer is not None: - alpha_loss = -( - ( - self.log_alpha - * (actor_output.log_prob + self.target_entropy).detach() - ).mean() - ) - self.alpha_optimizer.zero_grad() - alpha_loss.backward() - self.alpha_optimizer.step() - self.entropy_temperature = self.log_alpha.exp() - - with torch.no_grad(): - if self.value_network is not None: - next_state_value = self.value_network_target( - training_batch.next_state.float_features - ) - else: - next_state_actor_output = self.actor_network( - training_batch.next_state - ) - next_state_actor_action = ( - training_batch.next_state, - rlt.FeatureData(next_state_actor_output.action), - ) - next_state_value = self.q1_network_target(*next_state_actor_action) - - if self.q2_network is not None: - target_q2_value = self.q2_network_target( - *next_state_actor_action - ) - next_state_value = torch.min(next_state_value, target_q2_value) - - log_prob_a = self.actor_network.get_log_prob( - training_batch.next_state, next_state_actor_output.action - ) - log_prob_a = log_prob_a.clamp(-20.0, 20.0) - next_state_value -= self.entropy_temperature * log_prob_a - - if self.gamma > 0.0: - target_q_value = ( - reward + discount * next_state_value * not_done_mask.float() - ) - else: - # This is useful in debugging instability issues - target_q_value = reward - - q1_loss = F.mse_loss(q1_value, target_q_value) - q1_loss.backward() - self._maybe_run_optimizer( - self.q1_network_optimizer, self.minibatches_per_step + if self.value_network is not None: + next_state_value = self.value_network_target(training_batch.next_state) + else: + next_state_actor_output = self.actor_network(training_batch.next_state) + next_state_actor_action = ( + training_batch.next_state, + rlt.FeatureData(next_state_actor_output.action), ) - if self.q2_network: - q2_loss = F.mse_loss(q2_value, target_q_value) - q2_loss.backward() - self._maybe_run_optimizer( - self.q2_network_optimizer, self.minibatches_per_step - ) + next_state_value = self.q1_network_target(*next_state_actor_action) - # Second, optimize the actor; minimizing KL-divergence between - # propensity & softmax of value. Due to reparameterization trick, - # it ends up being log_prob(actor_action) - Q(s, actor_action) + if self.q2_network is not None: + target_q2_value = self.q2_network_target(*next_state_actor_action) + next_state_value = torch.min(next_state_value, target_q2_value) - state_actor_action = (state, rlt.FeatureData(actor_output.action)) - q1_actor_value = self.q1_network(*state_actor_action) - min_q_actor_value = q1_actor_value - if self.q2_network: - q2_actor_value = self.q2_network(*state_actor_action) - min_q_actor_value = torch.min(q1_actor_value, q2_actor_value) + log_prob_a = self.actor_network.get_log_prob( + training_batch.next_state, next_state_actor_output.action + ).clamp(LOG_PROB_MIN, LOG_PROB_MAX) + next_state_value -= self.entropy_temperature * log_prob_a - actor_loss = ( - self.entropy_temperature * actor_output.log_prob - min_q_actor_value + if self.gamma > 0.0: + target_q_value = ( + reward + discount * next_state_value * not_done_mask.float() ) - # Do this in 2 steps so we can log histogram of actor loss - # pyre-fixme[16]: `float` has no attribute `mean`. - actor_loss_mean = actor_loss.mean() - - if self.add_kld_to_loss: - if self.apply_kld_on_mean: - action_batch_m = torch.mean(actor_output.action_mean, axis=0) - action_batch_v = torch.var(actor_output.action_mean, axis=0) - else: - action_batch_m = torch.mean(actor_output.action, axis=0) - action_batch_v = torch.var(actor_output.action, axis=0) - kld = ( - 0.5 - * ( - (action_batch_v + (action_batch_m - self.action_emb_mean) ** 2) - / self.action_emb_variance - - 1 - + self.action_emb_variance.log() - - action_batch_v.log() - ).sum() - ) + else: + # This is useful in debugging instability issues + target_q_value = reward - actor_loss_mean += self.kld_weight * kld + q1_value = self.q1_network(state, action) + q1_loss = F.mse_loss(q1_value, target_q_value) + yield q1_loss - actor_loss_mean.backward() - self._maybe_run_optimizer( - self.actor_network_optimizer, self.minibatches_per_step - ) + if self.q2_network: + q2_value = self.q2_network(state, action) + q2_loss = F.mse_loss(q2_value, target_q_value) + yield q2_loss - # - # Lastly, if applicable, optimize value network; minimizing MSE between - # V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ] - # - - if self.value_network is not None: - state_value = self.value_network(state.float_features) - - if self.logged_action_uniform_prior: - log_prob_a = torch.zeros_like(min_q_actor_value) - target_value = min_q_actor_value - else: - with torch.no_grad(): - log_prob_a = actor_output.log_prob - log_prob_a = log_prob_a.clamp(-20.0, 20.0) - target_value = ( - min_q_actor_value - self.entropy_temperature * log_prob_a - ) - - value_loss = F.mse_loss(state_value, target_value.detach()) - value_loss.backward() - self._maybe_run_optimizer( - self.value_network_optimizer, self.minibatches_per_step - ) + # Second, optimize the actor; minimizing KL-divergence between + # propensity & softmax of value. Due to reparameterization trick, + # it ends up being log_prob(actor_action) - Q(s, actor_action) - # Use the soft update rule to update the target networks - if self.value_network is not None: - self._maybe_soft_update( - self.value_network, - self.value_network_target, - self.tau, - self.minibatches_per_step, - ) + actor_output = self.actor_network(state) + + state_actor_action = (state, rlt.FeatureData(actor_output.action)) + q1_actor_value = self.q1_network(*state_actor_action) + min_q_actor_value = q1_actor_value + if self.q2_network: + q2_actor_value = self.q2_network(*state_actor_action) + min_q_actor_value = torch.min(q1_actor_value, q2_actor_value) + + actor_log_prob = actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX) + + if not self.backprop_through_log_prob: + actor_log_prob = actor_log_prob.detach() + + if self.crr_config is not None: + cur_value = self.value_network(training_batch.state) + advantage = (min_q_actor_value - cur_value).detach() + # pyre-fixme[16]: `Optional` has no attribute `get_weight_from_advantage`. + crr_weight = self.crr_config.get_weight_from_advantage(advantage) + assert ( + actor_log_prob.shape == crr_weight.shape + ), f"{actor_log_prob.shape} != {crr_weight.shape}" + actor_loss = -(actor_log_prob * crr_weight.detach()) else: - self._maybe_soft_update( - self.q1_network, - self.q1_network_target, - self.tau, - self.minibatches_per_step, + actor_loss = self.entropy_temperature * actor_log_prob - min_q_actor_value + # Do this in 2 steps so we can log histogram of actor loss + actor_loss_mean = actor_loss.mean() + + if self.add_kld_to_loss: + if self.apply_kld_on_mean: + # pyre-fixme[28]: Unexpected keyword argument `axis`. + action_batch_m = torch.mean(actor_output.squashed_mean, axis=0) + # pyre-fixme[28]: Unexpected keyword argument `axis`. + action_batch_v = torch.var(actor_output.squashed_mean, axis=0) + else: + # pyre-fixme[28]: Unexpected keyword argument `axis`. + action_batch_m = torch.mean(actor_output.action, axis=0) + # pyre-fixme[28]: Unexpected keyword argument `axis`. + action_batch_v = torch.var(actor_output.action, axis=0) + kld = ( + 0.5 + * ( + # pyre-fixme[58]: `**` is not supported for operand types + # `Tensor` and `int`. + (action_batch_v + (action_batch_m - self.action_emb_mean) ** 2) + / self.action_emb_variance + - 1 + + self.action_emb_variance.log() + - action_batch_v.log() + ).sum() ) - if self.q2_network is not None: - self._maybe_soft_update( - self.q2_network, - self.q2_network_target, - self.tau, - self.minibatches_per_step, - ) - # Logging at the end to schedule all the cuda operations first - if ( - self.tensorboard_logging_freq != 0 - and self.minibatch % self.tensorboard_logging_freq == 0 - ): - SummaryWriterContext.add_histogram("q1/logged_state_value", q1_value) - if self.q2_network: - SummaryWriterContext.add_histogram("q2/logged_state_value", q2_value) + actor_loss_mean += self.kld_weight * kld - # pyre-fixme[16]: `SummaryWriterContext` has no attribute `add_scalar`. - SummaryWriterContext.add_scalar( - "entropy_temperature", self.entropy_temperature - ) - SummaryWriterContext.add_histogram("log_prob_a", log_prob_a) - if self.value_network: - SummaryWriterContext.add_histogram("value_network/target", target_value) + yield actor_loss_mean - SummaryWriterContext.add_histogram( - "q_network/next_state_value", next_state_value + # Optimize Alpha + if self.alpha_optimizer is not None: + alpha_loss = -( + ( + self.log_alpha + * ( + actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX) + + self.target_entropy + ).detach() + ).mean() ) - SummaryWriterContext.add_histogram( - "q_network/target_q_value", target_q_value + yield alpha_loss + self.entropy_temperature = self.log_alpha.exp() + + # + # Lastly, if applicable, optimize value network; minimizing MSE between + # V(s) & E_a~pi(s) [ Q(s,a) - log(pi(a|s)) ] + # + + if self.value_network is not None: + state_value = self.value_network(state) + + if self.logged_action_uniform_prior: + log_prob_a = torch.zeros_like(min_q_actor_value) + target_value = min_q_actor_value + else: + log_prob_a = actor_output.log_prob.clamp(LOG_PROB_MIN, LOG_PROB_MAX) + target_value = min_q_actor_value - self.entropy_temperature * log_prob_a + + value_loss = F.mse_loss(state_value, target_value.detach()) + yield value_loss + + # pyre-fixme[16]: `Optional` has no attribute `log_metrics`. + self.logger.log_metrics( + { + "td_loss": q1_loss, + "logged_rewards": reward.mean(), + "model_values_on_logged_actions": q1_value.mean(), + "q1_value": q1_value.mean(), + "entropy_temperature": self.entropy_temperature, + "log_prob_a": log_prob_a.mean(), + "next_state_value": next_state_value.mean(), + "target_q_value": target_q_value.mean(), + "min_q_actor_value": min_q_actor_value.mean(), + "actor_output_log_prob": actor_output.log_prob.mean(), + "actor_loss": actor_loss.mean(), + }, + step=self.all_batches_processed, + ) + if self.q2_network: + self.logger.log_metrics( + {"q2_value": q2_value.mean()}, + step=self.all_batches_processed, ) - SummaryWriterContext.add_histogram( - "actor/min_q_actor_value", min_q_actor_value + + if self.value_network: + self.logger.log_metrics( + {"target_state_value": target_value.mean()}, + step=self.all_batches_processed, ) - SummaryWriterContext.add_histogram( - "actor/action_log_prob", actor_output.log_prob + + if self.add_kld_to_loss: + self.logger.log_metrics( + { + "action_batch_mean": action_batch_m.mean(), + "action_batch_var": action_batch_v.mean(), + # pyre-fixme[61]: `kld` may not be initialized here. + "kld": kld, + }, + step=self.all_batches_processed, ) - SummaryWriterContext.add_histogram("actor/loss", actor_loss) - if self.add_kld_to_loss: - SummaryWriterContext.add_histogram("kld/mean", action_batch_m) - SummaryWriterContext.add_histogram("kld/var", action_batch_v) - SummaryWriterContext.add_scalar("kld/kld", kld) - - self.loss_reporter.report( - td_loss=float(q1_loss), - reward_loss=None, - logged_rewards=reward, - model_values_on_logged_actions=q1_value, - model_propensities=actor_output.log_prob.exp(), - model_values=min_q_actor_value, - ) + + # Use the soft update rule to update the target networks + result = self.soft_update_result() + self.log("td_loss", q1_loss, prog_bar=True) + yield result diff --git a/reagent/training/slate_q_trainer.py b/reagent/training/slate_q_trainer.py index 7fd13da5d..3edb66fdc 100644 --- a/reagent/training/slate_q_trainer.py +++ b/reagent/training/slate_q_trainer.py @@ -1,27 +1,41 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +import enum import logging -from typing import List +from typing import Optional -import reagent.parameters as rlp -import reagent.types as rlt +import reagent.core.parameters as rlp +import reagent.core.types as rlt import torch import torch.nn.functional as F from reagent.core.dataclasses import field -from reagent.optimizer.union import Optimizer__Union -from reagent.training.dqn_trainer_base import DQNTrainerBase - +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin logger = logging.getLogger(__name__) -class SlateQTrainer(DQNTrainerBase): +class NextSlateValueNormMethod(enum.Enum): + """ + The Q value of the current slate item is the sum of the item's short-term reward and + the normalized sum of all item Q-values on the next slate. + We can normalize the sum by either the current slate size (NORM_BY_CURRENT_SLATE_SIZE) + or the next slate size (NORM_BY_NEXT_SLATE_SIZE). + This enum distinguishes between these two different ways of normalizing the next slate value. + """ + + NORM_BY_CURRENT_SLATE_SIZE = "norm_by_current_slate_size" + NORM_BY_NEXT_SLATE_SIZE = "norm_by_next_slate_size" + + +class SlateQTrainer(RLTrainerMixin, ReAgentLightningModule): def __init__( self, q_network, q_network_target, - use_gpu: bool = False, + slate_size, # Start SlateQTrainerParameters rl: rlp.RLParameters = field( # noqa: B008 default_factory=lambda: rlp.RLParameters(maxq_learning=False) @@ -29,24 +43,77 @@ def __init__( optimizer: Optimizer__Union = field( # noqa: B008 default_factory=Optimizer__Union.default ), + slate_opt_parameters: Optional[rlp.SlateOptParameters] = None, + discount_time_scale: Optional[float] = None, + single_selection: bool = True, + next_slate_value_norm_method: NextSlateValueNormMethod = NextSlateValueNormMethod.NORM_BY_CURRENT_SLATE_SIZE, minibatch_size: int = 1024, evaluation: rlp.EvaluationParameters = field( # noqa: B008 default_factory=lambda: rlp.EvaluationParameters(calc_cpe_in_training=False) ), ) -> None: - super().__init__(rl, use_gpu=use_gpu) - self.minibatches_per_step = 1 - self.minibatch_size = minibatch_size + """ + Args: + q_network: states, action -> q-value + slate_size(int): a fixed slate size + rl (optional): an instance of the RLParameter class, which + defines relevant hyperparameters + optimizer (optional): the optimizer class and + optimizer hyperparameters for the q network(s) optimizer + discount_time_scale (optional): use to control the discount factor (gamma) + relative to the time difference (t2-t1), i.e., gamma^((t2-t1)/time_scale). + If it is absent, we won't adjust the discount factor by the time difference. + single_selection (optional): TBD + next_slate_value_norm_method (optional): how to calculate the next slate value + when single_selection is False. By default we use NORM_BY_CURRENT_SLATE_SIZE. + minibatch_size (optional): the size of the minibatch + evaluation (optional): TBD + """ + super().__init__() + self.rl_parameters = rl + + self.discount_time_scale = discount_time_scale + self.single_selection = single_selection + self.next_slate_value_norm_method = next_slate_value_norm_method self.q_network = q_network self.q_network_target = q_network_target - self.q_network_optimizer = optimizer.make_optimizer(self.q_network.parameters()) + self.q_network_optimizer = optimizer + + self.slate_size = slate_size + self.slate_opt_parameters = slate_opt_parameters + + def configure_optimizers(self): + optimizers = [] + + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q_network.parameters() + ) + ) + + target_params = list(self.q_network_target.parameters()) + source_params = list(self.q_network.parameters()) + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) - def warm_start_components(self) -> List[str]: - components = ["q_network", "q_network_target", "q_network_optimizer"] - return components + return optimizers - def _action_docs(self, state: rlt.FeatureData, action: torch.Tensor) -> rlt.DocList: + def _action_docs( + self, + state: rlt.FeatureData, + action: torch.Tensor, + terminal_mask: Optional[torch.Tensor] = None, + ) -> rlt.DocList: + # for invalid indices, simply set action to 0 so we can batch index still + if terminal_mask is not None: + assert terminal_mask.shape == ( + action.shape[0], + ), f"{terminal_mask.shape} != 0th dim of {action.shape}" + action[terminal_mask] = torch.zeros_like(action[terminal_mask]) docs = state.candidate_docs assert docs is not None return docs.select_slate(action) @@ -54,7 +121,7 @@ def _action_docs(self, state: rlt.FeatureData, action: torch.Tensor) -> rlt.DocL def _get_unmasked_q_values( self, q_network, state: rlt.FeatureData, slate: rlt.DocList ) -> torch.Tensor: - """ Gets the q values from the model and target networks """ + """Gets the q values from the model and target networks""" batch_size, slate_size, _ = slate.float_features.shape # TODO: Probably should create a new model type return q_network( @@ -62,58 +129,147 @@ def _get_unmasked_q_values( ).view(batch_size, slate_size) @torch.no_grad() - def train(self, training_batch: rlt.SlateQInput): + def _get_maxq_next_action(self, next_state: rlt.FeatureData) -> torch.Tensor: + """Get the next action list based on the slate optimization strategy.""" + slate_opt_parameters = self.slate_opt_parameters + assert slate_opt_parameters is not None + + if slate_opt_parameters.method == rlp.SlateOptMethod.TOP_K: + return self._get_maxq_topk(next_state) + else: + raise NotImplementedError( + "SlateQ with optimization method other than TOP_K is not implemented." + ) + + def _get_maxq_topk(self, next_state: rlt.FeatureData) -> torch.Tensor: + candidate_docs = next_state.candidate_docs + assert candidate_docs is not None + + batch_size, num_candidates, _ = candidate_docs.float_features.shape + assert 0 < self.slate_size <= num_candidates + + docs = candidate_docs.select_slate( + torch.arange(num_candidates).repeat(batch_size, 1) + ) + next_q_values = self._get_unmasked_q_values( + self.q_network_target, next_state, docs + ) * self._get_docs_value(docs) + _, next_actions = torch.topk(next_q_values, self.slate_size, dim=1) + + return next_actions + + def _get_docs_value(self, docs: rlt.DocList) -> torch.Tensor: + # Multiplying by the mask to filter out selected padding items. + value = docs.value * docs.mask + if self.single_selection: + value = F.softmax(value, dim=1) + return value + + def _get_slate_size(self, state: rlt.FeatureData) -> torch.Tensor: + """Get the actual size (ignore all padded items) of each slate by summing item masks.""" + mask = self._get_item_mask(state) + return torch.minimum( + mask.sum(1, keepdim=True), + torch.tensor([self.slate_size], device=mask.device), + ) + + def _get_item_mask(self, state: rlt.FeatureData) -> torch.Tensor: + """Get the mask from the given state.""" + candidate_docs = state.candidate_docs + assert candidate_docs is not None + return candidate_docs.mask + + def _get_avg_by_slate_size(self, batch: rlt.SlateQInput): + """Get the slate_size for averaging the sum of slate value.""" + if ( + self.next_slate_value_norm_method + == NextSlateValueNormMethod.NORM_BY_NEXT_SLATE_SIZE + ): + return self._get_slate_size(batch.next_state) + if ( + self.next_slate_value_norm_method + == NextSlateValueNormMethod.NORM_BY_CURRENT_SLATE_SIZE + ): + return self._get_slate_size(batch.state) + raise NotImplementedError( + f"The next_slate_value_norm_method {self.next_slate_value_norm_method} has not been implemented" + ) + + def train_step_gen(self, training_batch: rlt.SlateQInput, batch_idx: int): assert isinstance( training_batch, rlt.SlateQInput ), f"learning input is a {type(training_batch)}" - self.minibatch += 1 reward = training_batch.reward reward_mask = training_batch.reward_mask discount_tensor = torch.full_like(reward, self.gamma) - if self.maxq_learning: - raise NotImplementedError("Q-Learning for SlateQ is not implemented") - else: - # SARSA (Use the target network) - next_action_docs = self._action_docs( - training_batch.next_state, training_batch.next_action + # Adjust the discount factor by the time_diff if the discount_time_scale is provided, + # and the time_diff exists in the training_batch. + if self.discount_time_scale and training_batch.time_diff is not None: + discount_tensor = discount_tensor ** ( + training_batch.time_diff / self.discount_time_scale ) - next_q_values = torch.sum( - self._get_unmasked_q_values( - self.q_network_target, training_batch.next_state, next_action_docs - ) - * F.softmax(next_action_docs.value, dim=1), - dim=1, - keepdim=True, + + next_action = ( + self._get_maxq_next_action(training_batch.next_state) + if self.rl_parameters.maxq_learning + else training_batch.next_action + ) + + terminal_mask = (training_batch.not_terminal.to(torch.bool) == False).squeeze(1) + next_action_docs = self._action_docs( + training_batch.next_state, + next_action, + terminal_mask=terminal_mask, + ) + next_q_values = torch.sum( + self._get_unmasked_q_values( + self.q_network_target, + training_batch.next_state, + next_action_docs, ) + * self._get_docs_value(next_action_docs), + dim=1, + keepdim=True, + ) - filtered_max_q_vals = next_q_values * training_batch.not_terminal.float() + # If not single selection, divide max-Q by the actual slate size. + if not self.single_selection: + next_q_values = next_q_values / self._get_avg_by_slate_size(training_batch) + filtered_max_q_vals = next_q_values * training_batch.not_terminal.float() target_q_values = reward + (discount_tensor * filtered_max_q_vals) - target_q_values = target_q_values[reward_mask] - - with torch.enable_grad(): - # Get Q-value of action taken - action_docs = self._action_docs(training_batch.state, training_batch.action) - q_values = self._get_unmasked_q_values( - self.q_network, training_batch.state, action_docs - )[reward_mask] - all_action_scores = q_values.detach() - - value_loss = self.q_network_loss(q_values, target_q_values) - td_loss = value_loss.detach() - value_loss.backward() - self._maybe_run_optimizer( - self.q_network_optimizer, self.minibatches_per_step - ) + # Don't mask if not single selection + if self.single_selection: + target_q_values = target_q_values[reward_mask] + + # Get Q-value of action taken + action_docs = self._action_docs(training_batch.state, training_batch.action) + q_values = self._get_unmasked_q_values( + self.q_network, training_batch.state, action_docs + ) + if self.single_selection: + q_values = q_values[reward_mask] + + all_action_scores = q_values.detach() + + value_loss = F.mse_loss(q_values, target_q_values) + yield value_loss + + if not self.single_selection: + all_action_scores = all_action_scores.sum(dim=1, keepdim=True) - # Use the soft update rule to update target network - self._maybe_soft_update( - self.q_network, self.q_network_target, self.tau, self.minibatches_per_step + # Logging at the end to schedule all the cuda operations first + self.reporter.log( + td_loss=value_loss, + model_values_on_logged_actions=all_action_scores, ) - self.loss_reporter.report( - td_loss=td_loss, model_values_on_logged_actions=all_action_scores + # Use the soft update rule to update the target networks + result = self.soft_update_result() + self.log( + "td_loss", value_loss, prog_bar=True, batch_size=training_batch.batch_size() ) + yield result diff --git a/reagent/training/td3_trainer.py b/reagent/training/td3_trainer.py index f64f600f1..b665a8617 100644 --- a/reagent/training/td3_trainer.py +++ b/reagent/training/td3_trainer.py @@ -3,21 +3,21 @@ import copy import logging -import reagent.types as rlt +import reagent.core.types as rlt import torch +import torch.nn.functional as F from reagent.core.configuration import resolve_defaults from reagent.core.dataclasses import field -from reagent.optimizer.union import Optimizer__Union -from reagent.parameters import CONTINUOUS_TRAINING_ACTION_RANGE, RLParameters -from reagent.tensorboardX import SummaryWriterContext -from reagent.training.rl_trainer_pytorch import RLTrainer -from reagent.training.training_data_page import TrainingDataPage +from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE, RLParameters +from reagent.optimizer import Optimizer__Union, SoftUpdate +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.rl_trainer_pytorch import RLTrainerMixin logger = logging.getLogger(__name__) -class TD3Trainer(RLTrainer): +class TD3Trainer(RLTrainerMixin, ReAgentLightningModule): """ Twin Delayed Deep Deterministic Policy Gradient algorithm trainer as described in https://arxiv.org/pdf/1802.09477 @@ -29,7 +29,6 @@ def __init__( actor_network, q1_network, q2_network=None, - use_gpu: bool = False, # Start TD3TrainerParameters rl: RLParameters = field(default_factory=RLParameters), # noqa: B008 q_network_optimizer: Optimizer__Union = field( # noqa: B008 @@ -39,69 +38,96 @@ def __init__( default_factory=Optimizer__Union.default ), minibatch_size: int = 64, - use_2_q_functions: bool = True, noise_variance: float = 0.2, noise_clip: float = 0.5, delayed_policy_update: int = 2, minibatches_per_step: int = 1, ) -> None: """ - Args: TODO: fill in + Args: + actor_network: states -> actions, trained to maximize value + q1_network: states, action -> q-value + q2_network (optional): double q-learning to stabilize training + from overestimation bias + rl (optional): an instance of the RLParameter class, which + defines relevant hyperparameters + q_network_optimizer (optional): the optimizer class and + optimizer hyperparameters for the q network(s) optimizer + actor_network_optimizer (optional): see q_network_optimizer + minibatch_size (optional): the size of the minibatch + noise_variance (optional): the variance of action noise added to smooth + q-value estimates + noise_clip (optional): the maximum absolute value of action noise added + to smooth q-value estimates + delayed_policy_update (optional): the ratio of q network updates + to target and policy network updates + minibatches_per_step (optional, TODO: currently unused): the number of minibatch updates + per training step """ - super().__init__(rl, use_gpu=use_gpu) - + super().__init__() + self.rl_parameters = rl self.minibatch_size = minibatch_size self.minibatches_per_step = minibatches_per_step or 1 self.q1_network = q1_network self.q1_network_target = copy.deepcopy(self.q1_network) - self.q1_network_optimizer = q_network_optimizer.make_optimizer( - q1_network.parameters() - ) + self.q_network_optimizer = q_network_optimizer self.q2_network = q2_network if self.q2_network is not None: self.q2_network_target = copy.deepcopy(self.q2_network) - self.q2_network_optimizer = q_network_optimizer.make_optimizer( - q2_network.parameters() - ) self.actor_network = actor_network self.actor_network_target = copy.deepcopy(self.actor_network) - self.actor_network_optimizer = actor_network_optimizer.make_optimizer( - actor_network.parameters() - ) + self.actor_network_optimizer = actor_network_optimizer self.noise_variance = noise_variance self.noise_clip_range = (-noise_clip, noise_clip) self.delayed_policy_update = delayed_policy_update - def warm_start_components(self): - components = [ - "q1_network", - "q1_network_target", - "q1_network_optimizer", - "actor_network", - "actor_network_target", - "actor_network_optimizer", - ] + def configure_optimizers(self): + optimizers = [] + + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q1_network.parameters() + ) + ) + if self.q2_network: + optimizers.append( + self.q_network_optimizer.make_optimizer_scheduler( + self.q2_network.parameters() + ) + ) + optimizers.append( + self.actor_network_optimizer.make_optimizer_scheduler( + self.actor_network.parameters() + ) + ) + + # soft-update + target_params = list(self.q1_network_target.parameters()) + source_params = list(self.q1_network.parameters()) if self.q2_network: - components += ["q2_network", "q2_network_target", "q2_network_optimizer"] + target_params += list(self.q2_network_target.parameters()) + source_params += list(self.q2_network.parameters()) + target_params += list(self.actor_network_target.parameters()) + source_params += list(self.actor_network.parameters()) + optimizers.append( + SoftUpdate.make_optimizer_scheduler( + target_params, source_params, tau=self.tau + ) + ) - return components + return optimizers - def train(self, training_batch: rlt.PolicyNetworkInput) -> None: + def train_step_gen(self, training_batch: rlt.PolicyNetworkInput, batch_idx: int): """ IMPORTANT: the input action here is assumed to be preprocessed to match the range of the output of the actor. """ - if isinstance(training_batch, TrainingDataPage): - training_batch = training_batch.as_policy_network_training_batch() - assert isinstance(training_batch, rlt.PolicyNetworkInput) - self.minibatch += 1 - state = training_batch.state action = training_batch.action next_state = training_batch.next_state @@ -126,67 +152,47 @@ def train(self, training_batch: rlt.PolicyNetworkInput) -> None: target_q_value = reward + self.gamma * next_q_value * not_terminal.float() # Optimize Q1 and Q2 - # NOTE: important to zero here (instead of using _maybe_update) - # since q1 may have accumulated gradients from actor network update - self.q1_network_optimizer.zero_grad() q1_value = self.q1_network(state, action) - q1_loss = self.q_network_loss(q1_value, target_q_value) - q1_loss.backward() - self.q1_network_optimizer.step() + q1_loss = F.mse_loss(q1_value, target_q_value) + if batch_idx % self.trainer.log_every_n_steps == 0: + self.reporter.log( + q1_loss=q1_loss, + q1_value=q1_value, + next_q_value=next_q_value, + target_q_value=target_q_value, + ) + self.log( + "td_loss", q1_loss, prog_bar=True, batch_size=training_batch.batch_size() + ) + yield q1_loss if self.q2_network: - self.q2_network_optimizer.zero_grad() q2_value = self.q2_network(state, action) - q2_loss = self.q_network_loss(q2_value, target_q_value) - q2_loss.backward() - self.q2_network_optimizer.step() + q2_loss = F.mse_loss(q2_value, target_q_value) + if batch_idx % self.trainer.log_every_n_steps == 0: + self.reporter.log( + q2_loss=q2_loss, + q2_value=q2_value, + ) + yield q2_loss # Only update actor and target networks after a fixed number of Q updates - if self.minibatch % self.delayed_policy_update == 0: - self.actor_network_optimizer.zero_grad() + if batch_idx % self.delayed_policy_update == 0: actor_action = self.actor_network(state).action actor_q1_value = self.q1_network(state, rlt.FeatureData(actor_action)) actor_loss = -(actor_q1_value.mean()) - actor_loss.backward() - self.actor_network_optimizer.step() - - self._soft_update(self.q1_network, self.q1_network_target, self.tau) - self._soft_update(self.q2_network, self.q2_network_target, self.tau) - self._soft_update(self.actor_network, self.actor_network_target, self.tau) - - # Logging at the end to schedule all the cuda operations first - if ( - self.tensorboard_logging_freq != 0 - and self.minibatch % self.tensorboard_logging_freq == 0 - ): - logs = { - "loss/q1_loss": q1_loss, - "loss/actor_loss": actor_loss, - "q_value/q1_value": q1_value, - "q_value/next_q_value": next_q_value, - "q_value/target_q_value": target_q_value, - "q_value/actor_q1_value": actor_q1_value, - } - if self.q2_network: - logs.update({"loss/q2_loss": q2_loss, "q_value/q2_value": q2_value}) - - for k, v in logs.items(): - v = v.detach().cpu() - if v.dim() == 0: - # pyre-fixme[16]: `SummaryWriterContext` has no attribute - # `add_scalar`. - SummaryWriterContext.add_scalar(k, v.item()) - continue - - elif v.dim() == 2: - v = v.squeeze(1) - assert v.dim() == 1 - SummaryWriterContext.add_histogram(k, v.numpy()) - SummaryWriterContext.add_scalar(f"{k}_mean", v.mean().item()) - - self.loss_reporter.report( - td_loss=float(q1_loss), - reward_loss=None, - logged_rewards=reward, - model_values_on_logged_actions=q1_value, - ) + if batch_idx % self.trainer.log_every_n_steps == 0: + self.reporter.log( + actor_loss=actor_loss, + actor_q1_value=actor_q1_value, + ) + yield actor_loss + + # Use the soft update rule to update the target networks + result = self.soft_update_result() + yield result + + else: + # Yielding None prevents the actor and target networks from updating + yield None + yield None diff --git a/reagent/training/trainer.py b/reagent/training/trainer.py deleted file mode 100644 index 09bb97195..000000000 --- a/reagent/training/trainer.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import logging -from typing import List - - -logger = logging.getLogger(__name__) - - -class Trainer: - def train(self, training_batch) -> None: - raise NotImplementedError() - - def state_dict(self): - return {c: getattr(self, c).state_dict() for c in self.warm_start_components()} - - def load_state_dict(self, state_dict): - for c in self.warm_start_components(): - getattr(self, c).load_state_dict(state_dict[c]) - - def warm_start_components(self) -> List[str]: - """ - The trainer should specify what members to save and load - """ - raise NotImplementedError diff --git a/reagent/training/training_data_page.py b/reagent/training/training_data_page.py deleted file mode 100644 index 392df555c..000000000 --- a/reagent/training/training_data_page.py +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -from typing import Optional - -import numpy as np -import reagent.types as rlt -import torch -from reagent.models.mdn_rnn import transpose - - -class TrainingDataPage(object): - __slots__ = [ - "mdp_ids", - "sequence_numbers", - "states", - "actions", - "propensities", - "rewards", - "possible_actions_state_concat", - "possible_actions_mask", - "next_states", - "next_actions", - "possible_next_actions_state_concat", - "possible_next_actions_mask", - "not_terminal", - "time_diffs", - "metrics", - "step", - "max_num_actions", - "next_propensities", - "rewards_mask", - ] - - def __init__( - self, - mdp_ids: Optional[np.ndarray] = None, - sequence_numbers: Optional[torch.Tensor] = None, - states: Optional[torch.Tensor] = None, - actions: Optional[torch.Tensor] = None, - propensities: Optional[torch.Tensor] = None, - rewards: Optional[torch.Tensor] = None, - possible_actions_mask: Optional[torch.Tensor] = None, - possible_actions_state_concat: Optional[torch.Tensor] = None, - next_states: Optional[torch.Tensor] = None, - next_actions: Optional[torch.Tensor] = None, - possible_next_actions_mask: Optional[torch.Tensor] = None, - possible_next_actions_state_concat: Optional[torch.Tensor] = None, - not_terminal: Optional[torch.Tensor] = None, - time_diffs: Optional[torch.Tensor] = None, - metrics: Optional[torch.Tensor] = None, - step: Optional[torch.Tensor] = None, - max_num_actions: Optional[int] = None, - next_propensities: Optional[torch.Tensor] = None, - rewards_mask: Optional[torch.Tensor] = None, - ) -> None: - """ - Creates a TrainingDataPage object. - - In the case where `not_terminal` can be determined by next_actions or - possible_next_actions, feel free to omit it. - """ - self.mdp_ids = mdp_ids - self.sequence_numbers = sequence_numbers - self.states = states - self.actions = actions - self.propensities = propensities - self.rewards = rewards - self.possible_actions_mask = possible_actions_mask - self.possible_actions_state_concat = possible_actions_state_concat - self.next_states = next_states - self.next_actions = next_actions - self.not_terminal = not_terminal - self.time_diffs = time_diffs - self.possible_next_actions_mask = possible_next_actions_mask - self.possible_next_actions_state_concat = possible_next_actions_state_concat - self.metrics = metrics - self.step = step - self.max_num_actions = max_num_actions - self.next_propensities = next_propensities - self.rewards_mask = rewards_mask - - def as_policy_network_training_batch(self): - return rlt.PolicyNetworkInput( - state=rlt.FeatureData(float_features=self.states), - action=rlt.FeatureData(float_features=self.actions), - next_state=rlt.FeatureData(float_features=self.next_states), - next_action=rlt.FeatureData(float_features=self.next_actions), - reward=self.rewards, - not_terminal=self.not_terminal, - step=self.step, - time_diff=self.time_diffs, - extras=rlt.ExtraData(), - ) - - def as_discrete_maxq_training_batch(self): - return rlt.DiscreteDqnInput( - state=rlt.FeatureData(float_features=self.states), - action=self.actions, - next_state=rlt.FeatureData(float_features=self.next_states), - next_action=self.next_actions, - possible_actions_mask=self.possible_actions_mask, - possible_next_actions_mask=self.possible_next_actions_mask, - reward=self.rewards, - not_terminal=self.not_terminal, - step=self.step, - time_diff=self.time_diffs, - extras=rlt.ExtraData( - mdp_id=self.mdp_ids, - sequence_number=self.sequence_numbers, - action_probability=self.propensities, - max_num_actions=self.max_num_actions, - metrics=self.metrics, - ), - ) - - def size(self) -> int: - if self.states: - # pyre-fixme[6]: Expected `Sized` for 1st param but got - # `Optional[torch.Tensor]`. - return len(self.states) - raise Exception("Cannot get size of TrainingDataPage missing states.") - - def set_type(self, dtype): - # TODO: Clean this up in a future diff. Figure out which should be long/float - for x in TrainingDataPage.__slots__: - if x in ("mdp_ids", "sequence_numbers", "max_num_actions"): - continue # Torch does not support tensors of strings - t = getattr(self, x) - if t is not None: - assert isinstance(t, torch.Tensor), ( - x + " is not a torch tensor (is " + str(type(t)) + ")" - ) - if x == "possible_next_actions_lengths": - setattr(self, x, t.type(dtype).long()) - else: - setattr(self, x, t.type(dtype)) - - def set_device(self, device): - for x in TrainingDataPage.__slots__: - if x in ("mdp_ids", "sequence_numbers", "max_num_actions"): - continue # Torch does not support tensors of strings - t = getattr(self, x) - if t is not None: - assert isinstance(t, torch.Tensor), ( - x + " is not a torch tensor (is " + str(type(t)) + ")" - ) - if x == "possible_next_actions_lengths": - setattr(self, x, t.to(device=device).long()) - else: - setattr(self, x, t.to(device=device).float()) diff --git a/reagent/training/utils.py b/reagent/training/utils.py index b8e8999ff..3db217bb1 100644 --- a/reagent/training/utils.py +++ b/reagent/training/utils.py @@ -1,31 +1,61 @@ #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -from typing import Union - import numpy as np +import torch +import torch.nn.functional as F + + +EPS = np.finfo(float).eps.item() def rescale_actions( - actions: np.ndarray, - new_min: Union[np.ndarray, float], - new_max: Union[np.ndarray, float], - prev_min: Union[np.ndarray, float], - prev_max: Union[np.ndarray, float], -) -> np.ndarray: - """ Scale from [prev_min, prev_max] to [new_min, new_max] """ - # pyre-fixme[6]: Expected `float` for 1st param but got `ndarray`. - assert np.all(prev_min <= actions) and np.all( + actions: torch.Tensor, + new_min: torch.Tensor, + new_max: torch.Tensor, + prev_min: torch.Tensor, + prev_max: torch.Tensor, +) -> torch.Tensor: + """Scale from [prev_min, prev_max] to [new_min, new_max]""" + assert torch.all(prev_min <= actions) and torch.all( actions <= prev_max ), f"{actions} has values outside of [{prev_min}, {prev_max}]." - assert np.all( - new_min - # pyre-fixme[6]: Expected `float` for 1st param but got `Union[float, - # np.ndarray]`. - <= new_max + assert torch.all( + new_min <= new_max ), f"{new_min} is (has coordinate) greater than {new_max}." - # pyre-fixme[6]: Expected `float` for 1st param but got `Union[float, np.ndarray]`. prev_range = prev_max - prev_min - # pyre-fixme[6]: Expected `float` for 1st param but got `Union[float, np.ndarray]`. new_range = new_max - new_min return ((actions - prev_min) / prev_range) * new_range + new_min + + +def whiten(x: torch.Tensor, subtract_mean: bool) -> torch.Tensor: + numer = x + if subtract_mean: + numer -= x.mean() + return numer / (x.std() + EPS) + + +def discounted_returns(rewards: torch.Tensor, gamma: float = 0) -> torch.Tensor: + """Perform rollout to compute reward to go + and do a baseline subtraction.""" + if gamma == 0: + return rewards.float() + else: + R = 0 + returns = [] + for r in rewards.numpy()[::-1]: + R = r + gamma * R + returns.insert(0, R) + return torch.tensor(returns).float() + + +def gen_permutations(seq_len: int, num_action: int) -> torch.Tensor: + """ + generate all seq_len permutations for a given action set + the return shape is (SEQ_LEN, PERM_NUM, ACTION_DIM) + """ + all_permut = torch.cartesian_prod(*[torch.arange(num_action)] * seq_len) + if seq_len == 1: + all_permut = all_permut.unsqueeze(1) + all_permut = F.one_hot(all_permut, num_action).transpose(0, 1) + return all_permut.float() diff --git a/reagent/training/world_model/__init__.py b/reagent/training/world_model/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/training/world_model/__init__.py +++ b/reagent/training/world_model/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/training/world_model/compress_model_trainer.py b/reagent/training/world_model/compress_model_trainer.py new file mode 100644 index 000000000..b3014cac2 --- /dev/null +++ b/reagent/training/world_model/compress_model_trainer.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import reagent.core.types as rlt +import torch +import torch.nn.functional as F +from reagent.core.parameters import Seq2RewardTrainerParameters +from reagent.core.types import FeatureData +from reagent.models.fully_connected_network import FloatFeatureFullyConnected +from reagent.models.seq2reward_model import Seq2RewardNetwork +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.utils import gen_permutations +from reagent.training.world_model.seq2reward_trainer import get_Q + + +logger = logging.getLogger(__name__) + + +class CompressModelTrainer(ReAgentLightningModule): + """Trainer for fitting Seq2Reward planning outcomes to a neural network-based policy""" + + def __init__( + self, + compress_model_network: FloatFeatureFullyConnected, + seq2reward_network: Seq2RewardNetwork, + params: Seq2RewardTrainerParameters, + ): + super().__init__() + self.compress_model_network = compress_model_network + self.seq2reward_network = seq2reward_network + self.params = params + + # permutations used to do planning + self.all_permut = gen_permutations( + params.multi_steps, len(self.params.action_names) + ) + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + { + "optimizer": torch.optim.Adam( + self.compress_model_network.parameters(), + lr=self.params.compress_model_learning_rate, + ) + } + ) + return optimizers + + def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int): + loss, accuracy = self.get_loss(training_batch) + detached_loss = loss.cpu().detach().item() + accuracy = accuracy.item() + logger.info( + f"Seq2Reward Compress trainer MSE/Accuracy: {detached_loss}, {accuracy}" + ) + self.reporter.log(mse_loss=detached_loss, accuracy=accuracy) + yield loss + + @staticmethod + def extract_state_first_step(batch): + return FeatureData(batch.state.float_features[0]) + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int): + mse, acc = self.get_loss(batch) + detached_loss = mse.cpu().detach().item() + acc = acc.item() + + state_first_step = CompressModelTrainer.extract_state_first_step(batch) + # shape: batch_size, action_dim + q_values_all_action_all_data = ( + self.compress_model_network(state_first_step).cpu().detach() + ) + q_values = q_values_all_action_all_data.mean(0).tolist() + + action_distribution = torch.bincount( + torch.argmax(q_values_all_action_all_data, dim=1), + minlength=len(self.params.action_names), + ) + # normalize + action_distribution = ( + action_distribution.float() / torch.sum(action_distribution) + ).tolist() + + self.reporter.log( + eval_mse_loss=detached_loss, + eval_accuracy=acc, + eval_q_values=[q_values], + eval_action_distribution=[action_distribution], + ) + + return (detached_loss, q_values, action_distribution, acc) + + def get_loss(self, batch: rlt.MemoryNetworkInput): + state_first_step = CompressModelTrainer.extract_state_first_step(batch) + # shape: batch_size, num_action + compress_model_output = self.compress_model_network(state_first_step) + + target = get_Q( + self.seq2reward_network, + state_first_step.float_features, + self.all_permut, + ) + assert ( + compress_model_output.size() == target.size() + ), f"{compress_model_output.size()}!={target.size()}" + mse = F.mse_loss(compress_model_output, target) + + with torch.no_grad(): + target_action = torch.max(target, dim=1).indices + model_action = torch.max(compress_model_output, dim=1).indices + accuracy = torch.mean((target_action == model_action).float()) + + return mse, accuracy + + def warm_start_components(self): + logger.info("No warm start components yet...") + components = [] + return components diff --git a/reagent/training/world_model/mdnrnn_trainer.py b/reagent/training/world_model/mdnrnn_trainer.py index 427f7727b..a26856dd2 100644 --- a/reagent/training/world_model/mdnrnn_trainer.py +++ b/reagent/training/world_model/mdnrnn_trainer.py @@ -2,23 +2,22 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import logging -from collections import deque -from typing import Deque, Optional +from typing import Optional -import reagent.types as rlt +import reagent.core.types as rlt import torch import torch.nn.functional as F +from reagent.core.parameters import MDNRNNTrainerParameters from reagent.models.mdn_rnn import gmm_loss from reagent.models.world_model import MemoryNetwork -from reagent.parameters import MDNRNNTrainerParameters -from reagent.training.trainer import Trainer +from reagent.training.reagent_lightning_module import ReAgentLightningModule logger = logging.getLogger(__name__) -class MDNRNNTrainer(Trainer): - """ Trainer for MDN-RNN """ +class MDNRNNTrainer(ReAgentLightningModule): + """Trainer for MDN-RNN""" def __init__( self, @@ -26,36 +25,86 @@ def __init__( params: MDNRNNTrainerParameters, cum_loss_hist: int = 100, ): + super().__init__() self.memory_network = memory_network self.params = params - self.optimizer = torch.optim.Adam( - self.memory_network.mdnrnn.parameters(), lr=params.learning_rate + + def configure_optimizers(self): + optimizers = [] + + optimizers.append( + torch.optim.Adam( + self.memory_network.mdnrnn.parameters(), lr=self.params.learning_rate + ) + ) + + return optimizers + + def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int): + (seq_len, batch_size, state_dim) = training_batch.state.float_features.shape + + losses = self.get_loss(training_batch, state_dim) + + detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()} + self.reporter.log( + loss=detached_losses["loss"], + gmm=detached_losses["gmm"], + bce=detached_losses["bce"], + mse=detached_losses["mse"], + ) + if self.all_batches_processed % 10 == 0: + logger.info( + f'loss={detached_losses["loss"]}, gmm={detached_losses["loss"]}, bce={detached_losses["bce"]}, mse={detached_losses["mse"]}' + ) + loss = losses["loss"] + # TODO: Must setup (or mock) trainer and a LoggerConnector to call self.log()! + if self.trainer is not None and self.trainer.logger is not None: + self.log( + "td_loss", loss, prog_bar=True, batch_size=training_batch.batch_size() + ) + yield loss + + def validation_step( # pyre-ignore inconsistent override because lightning doesn't use types + self, + training_batch: rlt.MemoryNetworkInput, + batch_idx: int, + ): + (seq_len, batch_size, state_dim) = training_batch.state.float_features.shape + + losses = self.get_loss(training_batch, state_dim) + + detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()} + self.reporter.log( + eval_loss=detached_losses["loss"], + eval_gmm=detached_losses["gmm"], + eval_bce=detached_losses["bce"], + eval_mse=detached_losses["mse"], ) - self.minibatch = 0 - self.cum_loss: Deque[float] = deque([], maxlen=cum_loss_hist) - self.cum_bce: Deque[float] = deque([], maxlen=cum_loss_hist) - self.cum_gmm: Deque[float] = deque([], maxlen=cum_loss_hist) - self.cum_mse: Deque[float] = deque([], maxlen=cum_loss_hist) - def train(self, training_batch: rlt.MemoryNetworkInput): - self.minibatch += 1 + loss = losses["loss"] + self.log("td_loss", loss, prog_bar=True, batch_size=training_batch.batch_size()) + return loss + def test_step( # pyre-ignore inconsistent override because lightning doesn't use types + self, + training_batch: rlt.MemoryNetworkInput, + batch_idx: int, + ): (seq_len, batch_size, state_dim) = training_batch.state.float_features.shape - self.memory_network.mdnrnn.train() - self.optimizer.zero_grad() losses = self.get_loss(training_batch, state_dim) - losses["loss"].backward() - self.optimizer.step() detached_losses = {k: loss.cpu().detach().item() for k, loss in losses.items()} - self.cum_loss.append(detached_losses["loss"]) - self.cum_gmm.append(detached_losses["gmm"]) - self.cum_bce.append(detached_losses["bce"]) - self.cum_mse.append(detached_losses["mse"]) - del losses + self.reporter.log( + test_loss=detached_losses["loss"], + test_gmm=detached_losses["gmm"], + test_bce=detached_losses["bce"], + test_mse=detached_losses["mse"], + ) - return detached_losses + loss = losses["loss"] + self.log("td_loss", loss, prog_bar=True, batch_size=training_batch.batch_size()) + return loss def get_loss( self, training_batch: rlt.MemoryNetworkInput, state_dim: Optional[int] = None @@ -88,9 +137,7 @@ def get_loss( assert isinstance(training_batch, rlt.MemoryNetworkInput) # mdnrnn's input should have seq_len as the first dimension - mdnrnn_output = self.memory_network( - training_batch.state, rlt.FeatureData(training_batch.action) - ) + mdnrnn_output = self.memory_network(training_batch.state, training_batch.action) # mus, sigmas: [seq_len, batch_size, num_gaussian, state_dim] mus, sigmas, logpi, rs, nts = ( mdnrnn_output.mus, @@ -101,7 +148,7 @@ def get_loss( ) next_state = training_batch.next_state.float_features - not_terminal = training_batch.not_terminal + not_terminal = training_batch.not_terminal.float() reward = training_batch.reward if self.params.fit_only_one_next_step: next_state, not_terminal, reward, mus, sigmas, logpi, nts, rs = tuple( diff --git a/reagent/training/world_model/seq2reward_trainer.py b/reagent/training/world_model/seq2reward_trainer.py new file mode 100644 index 000000000..064ca2920 --- /dev/null +++ b/reagent/training/world_model/seq2reward_trainer.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +import logging + +import reagent.core.types as rlt +import torch +import torch.nn as nn +import torch.nn.functional as F +from reagent.core.parameters import Seq2RewardTrainerParameters +from reagent.models.fully_connected_network import FullyConnectedNetwork +from reagent.models.seq2reward_model import Seq2RewardNetwork +from reagent.training.reagent_lightning_module import ReAgentLightningModule +from reagent.training.utils import gen_permutations + +logger = logging.getLogger(__name__) + + +@torch.no_grad() +def get_step_prediction( + step_predict_network: FullyConnectedNetwork, training_batch: rlt.MemoryNetworkInput +): + first_step_state = training_batch.state.float_features[0] + pred_step = step_predict_network(first_step_state) + step_probability = F.softmax(pred_step, dim=1) + return step_probability + + +@torch.no_grad() +def get_Q( + seq2reward_network: Seq2RewardNetwork, + cur_state: torch.Tensor, + all_permut: torch.Tensor, +) -> torch.Tensor: + """ + Input: + cur_state: the current state from where we start planning. + shape: batch_size x state_dim + all_permut: all action sequences (sorted in lexical order) for enumeration + shape: seq_len x num_perm x action_dim + """ + batch_size = cur_state.shape[0] + _, num_permut, num_action = all_permut.shape + num_permut_per_action = int(num_permut / num_action) + + preprocessed_state = cur_state.unsqueeze(0).repeat_interleave(num_permut, dim=1) + state_feature_vector = rlt.FeatureData(preprocessed_state) + + # expand action to match the expanded state sequence + action = rlt.FeatureData(all_permut.repeat(1, batch_size, 1)) + acc_reward = seq2reward_network(state_feature_vector, action).acc_reward.reshape( + batch_size, num_action, num_permut_per_action + ) + + # The permuations are generated with lexical order + # the output has shape [num_perm, num_action,1] + # that means we can aggregate on the max reward + # then reshape it to (BATCH_SIZE, ACT_DIM) + max_acc_reward = ( + torch.max(acc_reward, dim=2).values.detach().reshape(batch_size, num_action) + ) + + return max_acc_reward + + +class Seq2RewardTrainer(ReAgentLightningModule): + """Trainer for Seq2Reward""" + + def __init__( + self, seq2reward_network: Seq2RewardNetwork, params: Seq2RewardTrainerParameters + ): + super().__init__() + self.seq2reward_network = seq2reward_network + self.params = params + + # Turning off Q value output during training: + self.view_q_value = params.view_q_value + # permutations used to do planning + self.all_permut = gen_permutations( + params.multi_steps, len(self.params.action_names) + ) + self.mse_loss = nn.MSELoss(reduction="mean") + + # Predict how many steps are remaining from the current step + self.step_predict_network = FullyConnectedNetwork( + [ + self.seq2reward_network.state_dim, + self.params.step_predict_net_size, + self.params.step_predict_net_size, + self.params.multi_steps, + ], + ["relu", "relu", "linear"], + use_layer_norm=False, + ) + self.step_loss = nn.CrossEntropyLoss(reduction="mean") + + def configure_optimizers(self): + optimizers = [] + optimizers.append( + { + "optimizer": torch.optim.Adam( + self.seq2reward_network.parameters(), lr=self.params.learning_rate + ), + } + ) + optimizers.append( + { + "optimizer": torch.optim.Adam( + self.step_predict_network.parameters(), lr=self.params.learning_rate + ) + }, + ) + return optimizers + + def train_step_gen(self, training_batch: rlt.MemoryNetworkInput, batch_idx: int): + mse_loss = self.get_mse_loss(training_batch) + detached_mse_loss = mse_loss.cpu().detach().item() + yield mse_loss + + step_entropy_loss = self.get_step_entropy_loss(training_batch) + detached_step_entropy_loss = step_entropy_loss.cpu().detach().item() + + if self.view_q_value: + state_first_step = training_batch.state.float_features[0] + q_values = ( + get_Q( + self.seq2reward_network, + state_first_step, + self.all_permut, + ) + .cpu() + .mean(0) + .tolist() + ) + else: + q_values = [0] * len(self.params.action_names) + + step_probability = ( + get_step_prediction(self.step_predict_network, training_batch) + .cpu() + .mean(dim=0) + .numpy() + ) + logger.info( + f"Seq2Reward trainer output: mse_loss={detached_mse_loss}, " + f"step_entropy_loss={detached_step_entropy_loss}, q_values={q_values}, " + f"step_probability={step_probability}" + ) + self.reporter.log( + mse_loss=detached_mse_loss, + step_entropy_loss=detached_step_entropy_loss, + q_values=[q_values], + ) + + yield step_entropy_loss + + # pyre-ignore inconsistent override because lightning doesn't use types + def validation_step(self, batch: rlt.MemoryNetworkInput, batch_idx: int): + detached_mse_loss = self.get_mse_loss(batch).cpu().detach().item() + + detached_step_entropy_loss = ( + self.get_step_entropy_loss(batch).cpu().detach().item() + ) + + state_first_step = batch.state.float_features[0] + # shape: batch_size, action_dim + q_values_all_action_all_data = get_Q( + self.seq2reward_network, + state_first_step, + self.all_permut, + ).cpu() + q_values = q_values_all_action_all_data.mean(0).tolist() + + action_distribution = torch.bincount( + torch.argmax(q_values_all_action_all_data, dim=1), + minlength=len(self.params.action_names), + ) + # normalize + action_distribution = ( + action_distribution.float() / torch.sum(action_distribution) + ).tolist() + + self.reporter.log( + eval_mse_loss=detached_mse_loss, + eval_step_entropy_loss=detached_step_entropy_loss, + eval_q_values=[q_values], + eval_action_distribution=[action_distribution], + ) + return ( + detached_mse_loss, + detached_step_entropy_loss, + q_values, + action_distribution, + ) + + def get_mse_loss(self, training_batch: rlt.MemoryNetworkInput): + """ + Compute losses: + MSE(predicted_acc_reward, target_acc_reward) + + :param training_batch: + training_batch has these fields: + - state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor + - action: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor + - reward: (SEQ_LEN, BATCH_SIZE) torch tensor + + :returns: + mse loss on reward + """ + # pyre-fixme[16]: Optional type has no attribute `flatten`. + valid_step = training_batch.valid_step.flatten() + + seq2reward_output = self.seq2reward_network( + training_batch.state, + training_batch.action, + valid_step, + ) + predicted_acc_reward = seq2reward_output.acc_reward + + seq_len, batch_size = training_batch.reward.size() + gamma = self.params.gamma + gamma_mask = ( + torch.Tensor( + [[gamma**i for i in range(seq_len)] for _ in range(batch_size)] + ) + .transpose(0, 1) + .to(training_batch.reward.device) + ) + + target_acc_rewards = torch.cumsum(training_batch.reward * gamma_mask, dim=0) + target_acc_reward = target_acc_rewards[ + valid_step - 1, torch.arange(batch_size) + ].unsqueeze(1) + + # make sure the prediction and target tensors have the same size + # the size should both be (BATCH_SIZE, 1) in this case. + assert ( + predicted_acc_reward.size() == target_acc_reward.size() + ), f"{predicted_acc_reward.size()}!={target_acc_reward.size()}" + return self.mse_loss(predicted_acc_reward, target_acc_reward) + + def get_step_entropy_loss(self, training_batch: rlt.MemoryNetworkInput): + """ + Compute cross-entropy losses of step predictions + + :param training_batch: + training_batch has these fields: + - state: (SEQ_LEN, BATCH_SIZE, STATE_DIM) torch tensor + - action: (SEQ_LEN, BATCH_SIZE, ACTION_DIM) torch tensor + - reward: (SEQ_LEN, BATCH_SIZE) torch tensor + + :returns: + step_entropy_loss on step prediction + """ + # pyre-fixme[16]: Optional type has no attribute `flatten`. + valid_step = training_batch.valid_step.flatten() + + first_step_state = training_batch.state.float_features[0] + valid_step_output = self.step_predict_network(first_step_state) + + # step loss's target is zero-based indexed, so subtract 1 from valid_step + return self.step_loss(valid_step_output, valid_step - 1) + + def warm_start_components(self): + components = ["seq2reward_network"] + return components diff --git a/reagent/types.py b/reagent/types.py deleted file mode 100644 index 4f6e83886..000000000 --- a/reagent/types.py +++ /dev/null @@ -1,591 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import dataclasses -import logging - -# The dataclasses in this file should be vanilla dataclass to have minimal overhead -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Union, cast - -import torch -from reagent.core.dataclasses import dataclass as pydantic_dataclass - - -class NoDuplicatedWarningLogger: - def __init__(self, logger): - self.logger = logger - self.msg = set([]) - - def warning(self, msg): - if msg not in self.msg: - self.logger.warning(msg) - self.msg.add(msg) - - -logger = logging.getLogger(__name__) -no_dup_logger = NoDuplicatedWarningLogger(logger) - - -def isinstance_namedtuple(x): - return isinstance(x, tuple) and hasattr(x, "_fields") - - -""" -We should revisit this at some point. Config classes shouldn't subclass from this. -""" - - -@dataclass -class BaseDataClass: - def _replace(self, **kwargs): - return cast(type(self), dataclasses.replace(self, **kwargs)) - - -@dataclass -class TensorDataClass(BaseDataClass): - def __getattr__(self, attr): - if attr.startswith("__") and attr.endswith("__"): - raise AttributeError - - tensor_attr = getattr(torch.Tensor, attr, None) - - if tensor_attr is None or not callable(tensor_attr): - logger.error( - f"Attemping to call torch.Tensor.{attr} on " - f"{type(self)} (instance of TensorDataClass)." - ) - if tensor_attr is None: - raise AttributeError(f"torch.Tensor doesn't have {attr} attribute.") - else: - raise RuntimeError(f"Tensor.{attr} is not callable.") - - def f(*args, **kwargs): - values = {} - for k, v in self.__dict__.items(): # noqa F402 - if isinstance(v, (torch.Tensor, TensorDataClass)): - values[k] = getattr(v, attr)(*args, **kwargs) - else: - values[k] = v - return type(self)(**values) - - return f - - def cuda(self, *args, **kwargs): - cuda_tensor = {} - for k, v in self.__dict__.items(): # noqa F402 - if isinstance(v, torch.Tensor): - kwargs["non_blocking"] = kwargs.get("non_blocking", True) - cuda_tensor[k] = v.cuda(*args, **kwargs) - elif isinstance(v, TensorDataClass): - cuda_tensor[k] = v.cuda(*args, **kwargs) - else: - cuda_tensor[k] = v - return type(self)(**cuda_tensor) - - -##### -# FIXME: These config types are misplaced but we need to write FBL config adapter -# if we moved them. -###### - - -@pydantic_dataclass -class IdListFeatureConfig(BaseDataClass): - """ - This describes how to map raw features to model features - """ - - name: str - feature_id: int # integer feature ID - id_mapping_name: str # key to ModelPreprocessingConfig.id_mapping_config - # max_length: int - - -@pydantic_dataclass -class FloatFeatureInfo(BaseDataClass): - name: str - feature_id: int - - -@pydantic_dataclass -class IdMapping(BaseDataClass): - ids: List[int] - - -@pydantic_dataclass -class ModelFeatureConfig(BaseDataClass): - float_feature_infos: List[FloatFeatureInfo] - id_mapping_config: Dict[str, IdMapping] = field(default_factory=dict) - id_list_feature_configs: List[IdListFeatureConfig] = field(default_factory=list) - - -###### -# dataclasses for internal API -###### - - -@dataclass -class ValuePresence(TensorDataClass): - value: torch.Tensor - presence: Optional[torch.Tensor] - - -IdListFeatureValue = Tuple[torch.Tensor, torch.Tensor] -IdListFeatures = Dict[str, IdListFeatureValue] - - -@dataclass -class RawFeatureData(TensorDataClass): - float_features: ValuePresence - id_list_features: IdListFeatures = dataclasses.field(default_factory=dict) - # Experimental: sticking this here instead of putting it in float_features - # because a lot of places derive the shape of float_features from - # normalization parameters. - time_since_first: Optional[torch.Tensor] = None - - -@dataclass -class ActorOutput(TensorDataClass): - action: torch.Tensor - log_prob: Optional[torch.Tensor] = None - action_mean: Optional[torch.Tensor] = None - - -@dataclass -class DocList(TensorDataClass): - # the shape is (batch_size, num_candidates, num_document_features) - float_features: torch.Tensor - # the shapes are (batch_size, num_candidates) - mask: torch.Tensor - value: torch.Tensor - - def __post_init__(self): - assert ( - len(self.float_features.shape) == 3 - ), f"Unexpected shape: {self.float_features.shape}" - - @torch.no_grad() - def select_slate(self, action: torch.Tensor): - row_idx = torch.repeat_interleave( - torch.arange(action.shape[0]).unsqueeze(1), action.shape[1], dim=1 - ) - mask = self.mask[row_idx, action] - # Make sure the indices are in the right range - assert mask.to(torch.bool).all() - float_features = self.float_features[row_idx, action] - value = self.value[row_idx, action] - return DocList(float_features, mask, value) - - def as_feature_data(self): - _batch_size, _slate_size, feature_dim = self.float_features.shape - return FeatureData(self.float_features.view(-1, feature_dim)) - - -@dataclass -class FeatureData(TensorDataClass): - # For dense features, shape is (batch_size, feature_dim) - float_features: torch.Tensor - # For sequence, shape is (stack_size, batch_size, feature_dim) - stacked_float_features: Optional[torch.Tensor] = None - id_list_features: IdListFeatures = dataclasses.field(default_factory=dict) - # For ranking algos, - candidate_docs: Optional[DocList] = None - # Experimental: sticking this here instead of putting it in float_features - # because a lot of places derive the shape of float_features from - # normalization parameters. - time_since_first: Optional[torch.Tensor] = None - - def __post_init__(self): - def usage(): - return ( - f"For sequence features, use `stacked_float_features`." - f"For document features, use `candidate_doc_float_features`." - ) - - if self.float_features.ndim == 3: - no_dup_logger.warning(f"`float_features` should be 2D.\n{usage()}") - elif self.float_features.ndim != 2: - raise ValueError( - f"float_features should be 2D; got {self.float_features.shape}.\n{usage()}" - ) - - @classmethod - def from_raw_feature_data(cls, feature_vector: RawFeatureData, preprocessor): - return cls( - float_features=preprocessor( - feature_vector.float_features.value, - feature_vector.float_features.presence, - ), - id_list_features=feature_vector.id_list_features, - time_since_first=feature_vector.time_since_first, - ) - - @classmethod - def from_dict(cls, d, name: str): - # TODO: Looks for id_list_features - return cls(float_features=d[name]) - - @property - def has_float_features_only(self) -> bool: - return ( - not self.id_list_features - and self.time_since_first is None - and self.candidate_docs is None - ) - - def get_tiled_batch(self, num_tiles: int): - assert ( - self.has_float_features_only - ), f"only works for float features now: {self}" - """ - tiled_feature should be (batch_size * num_tiles, feature_dim) - forall i in [batch_size], - tiled_feature[i*num_tiles:(i+1)*num_tiles] should be feat[i] - """ - feat = self.float_features - assert ( - len(feat.shape) == 2 - ), f"Need feat shape to be (batch_size, feature_dim), got {feat.shape}." - batch_size, _ = feat.shape - # pyre-fixme[16]: `Tensor` has no attribute `repeat_interleave`. - tiled_feat = feat.repeat_interleave(repeats=num_tiles, dim=0) - return FeatureData(float_features=tiled_feat) - - -class TensorFeatureData(torch.nn.Module): - """ - Primarily for using in nn.Sequential - """ - - def forward(self, input: torch.Tensor) -> FeatureData: - assert isinstance(input, torch.Tensor) - return FeatureData(input) - - -@dataclass -class PreprocessedRankingInput(TensorDataClass): - state: FeatureData - src_seq: FeatureData - src_src_mask: torch.Tensor - tgt_in_seq: Optional[FeatureData] = None - tgt_out_seq: Optional[FeatureData] = None - tgt_tgt_mask: Optional[torch.Tensor] = None - slate_reward: Optional[torch.Tensor] = None - position_reward: Optional[torch.Tensor] = None - # all indices will be +2 to account for padding - # symbol (0) and decoder_start_symbol (1) - src_in_idx: Optional[torch.Tensor] = None - tgt_in_idx: Optional[torch.Tensor] = None - tgt_out_idx: Optional[torch.Tensor] = None - tgt_out_probs: Optional[torch.Tensor] = None - # store ground-truth target sequences - optim_tgt_in_idx: Optional[torch.Tensor] = None - optim_tgt_out_idx: Optional[torch.Tensor] = None - optim_tgt_in_seq: Optional[FeatureData] = None - optim_tgt_out_seq: Optional[FeatureData] = None - - def batch_size(self) -> int: - return self.state.float_features.size()[0] - - @classmethod - def from_tensors( - cls, - state: torch.Tensor, - src_seq: torch.Tensor, - src_src_mask: torch.Tensor, - tgt_in_seq: Optional[torch.Tensor] = None, - tgt_out_seq: Optional[torch.Tensor] = None, - tgt_tgt_mask: Optional[torch.Tensor] = None, - slate_reward: Optional[torch.Tensor] = None, - position_reward: Optional[torch.Tensor] = None, - src_in_idx: Optional[torch.Tensor] = None, - tgt_in_idx: Optional[torch.Tensor] = None, - tgt_out_idx: Optional[torch.Tensor] = None, - tgt_out_probs: Optional[torch.Tensor] = None, - optim_tgt_in_idx: Optional[torch.Tensor] = None, - optim_tgt_out_idx: Optional[torch.Tensor] = None, - optim_tgt_in_seq: Optional[torch.Tensor] = None, - optim_tgt_out_seq: Optional[torch.Tensor] = None, - **kwargs, - ): - assert isinstance(state, torch.Tensor) - assert isinstance(src_seq, torch.Tensor) - assert isinstance(src_src_mask, torch.Tensor) - assert tgt_in_seq is None or isinstance(tgt_in_seq, torch.Tensor) - assert tgt_out_seq is None or isinstance(tgt_out_seq, torch.Tensor) - assert tgt_tgt_mask is None or isinstance(tgt_tgt_mask, torch.Tensor) - assert slate_reward is None or isinstance(slate_reward, torch.Tensor) - assert position_reward is None or isinstance(position_reward, torch.Tensor) - assert src_in_idx is None or isinstance(src_in_idx, torch.Tensor) - assert tgt_in_idx is None or isinstance(tgt_in_idx, torch.Tensor) - assert tgt_out_idx is None or isinstance(tgt_out_idx, torch.Tensor) - assert tgt_out_probs is None or isinstance(tgt_out_probs, torch.Tensor) - assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor) - assert optim_tgt_out_idx is None or isinstance(optim_tgt_out_idx, torch.Tensor) - assert optim_tgt_in_seq is None or isinstance(optim_tgt_in_seq, torch.Tensor) - assert optim_tgt_out_seq is None or isinstance(optim_tgt_out_seq, torch.Tensor) - - return cls( - state=FeatureData(float_features=state), - src_seq=FeatureData(float_features=src_seq), - src_src_mask=src_src_mask, - tgt_in_seq=FeatureData(float_features=tgt_in_seq) - if tgt_in_seq is not None - else None, - tgt_out_seq=FeatureData(float_features=tgt_out_seq) - if tgt_out_seq is not None - else None, - tgt_tgt_mask=tgt_tgt_mask, - slate_reward=slate_reward, - position_reward=position_reward, - src_in_idx=src_in_idx, - tgt_in_idx=tgt_in_idx, - tgt_out_idx=tgt_out_idx, - tgt_out_probs=tgt_out_probs, - optim_tgt_in_idx=optim_tgt_in_idx, - optim_tgt_out_idx=optim_tgt_out_idx, - optim_tgt_in_seq=FeatureData(float_features=optim_tgt_in_seq) - if optim_tgt_in_seq is not None - else None, - optim_tgt_out_seq=FeatureData(float_features=optim_tgt_out_seq) - if optim_tgt_out_seq is not None - else None, - ) - - def __post_init__(self): - if ( - isinstance(self.state, torch.Tensor) - or isinstance(self.src_seq, torch.Tensor) - or isinstance(self.tgt_in_seq, torch.Tensor) - or isinstance(self.tgt_out_seq, torch.Tensor) - or isinstance(self.optim_tgt_in_seq, torch.Tensor) - or isinstance(self.optim_tgt_out_seq, torch.Tensor) - ): - raise ValueError( - f"Use from_tensors() {type(self.state)} {type(self.src_seq)} " - f"{type(self.tgt_in_seq)} {type(self.tgt_out_seq)} " - f"{type(self.optim_tgt_in_seq)} {type(self.optim_tgt_out_seq)} " - ) - - -@dataclass -class CommonInput(TensorDataClass): - """ - Base class for all inputs, both raw and preprocessed - """ - - reward: torch.Tensor - time_diff: torch.Tensor - step: Optional[torch.Tensor] - not_terminal: torch.Tensor - - -@dataclass -class ExtraData(TensorDataClass): - mdp_id: Optional[torch.Tensor] = None - sequence_number: Optional[torch.Tensor] = None - action_probability: Optional[torch.Tensor] = None - max_num_actions: Optional[int] = None - metrics: Optional[torch.Tensor] = None - - @classmethod - def from_dict(cls, d): - return cls(**{f.name: d.get(f.name, None) for f in dataclasses.fields(cls)}) - - -@dataclass -class PreprocessedBaseInput(CommonInput): - state: FeatureData - next_state: FeatureData - - def batch_size(self): - return self.state.float_features.size()[0] - - -@dataclass -class DiscreteDqnInput(PreprocessedBaseInput): - action: torch.Tensor - next_action: torch.Tensor - possible_actions_mask: torch.Tensor - possible_next_actions_mask: torch.Tensor - extras: ExtraData - - -@dataclass -class SlateQInput(PreprocessedBaseInput): - """ - The shapes of `reward`, `reward_mask`, & `next_item_mask` are - `(batch_size, slate_size)`. - - `reward_mask` indicated whether the reward could be observed, e.g., - the item got into viewport or not. - """ - - action: torch.Tensor - next_action: torch.Tensor - reward_mask: torch.Tensor - extras: Optional[ExtraData] = None - - @classmethod - def from_dict(cls, d): - action = d["action"] - next_action = d["next_action"] - return cls( - state=FeatureData( - float_features=d["state_features"], - candidate_docs=DocList( - float_features=d["candidate_features"], - mask=d["item_mask"], - value=d["item_probability"], - ), - ), - next_state=FeatureData( - float_features=d["next_state_features"], - candidate_docs=DocList( - float_features=d["next_candidate_features"], - mask=d["next_item_mask"], - value=d["next_item_probability"], - ), - ), - action=action, - next_action=next_action, - reward=d["position_reward"], - reward_mask=d["reward_mask"], - time_diff=d["time_diff"], - not_terminal=d["not_terminal"], - step=None, - extras=ExtraData.from_dict(d), - ) - - -@dataclass -class ParametricDqnInput(PreprocessedBaseInput): - action: FeatureData - next_action: FeatureData - possible_actions: FeatureData - possible_actions_mask: torch.Tensor - possible_next_actions: FeatureData - possible_next_actions_mask: torch.Tensor - extras: Optional[ExtraData] = None - - @classmethod - def from_dict(cls, batch): - return cls( - state=FeatureData(float_features=batch["state_features"]), - action=FeatureData(float_features=batch["action"]), - next_state=FeatureData(float_features=batch["next_state_features"]), - next_action=FeatureData(float_features=batch["next_action"]), - possible_actions=FeatureData(float_features=batch["possible_actions"]), - possible_actions_mask=batch["possible_actions_mask"], - possible_next_actions=FeatureData( - float_features=batch["possible_next_actions"] - ), - possible_next_actions_mask=batch["possible_next_actions_mask"], - reward=batch["reward"], - not_terminal=batch["not_terminal"], - time_diff=batch["time_diff"], - step=batch["step"], - extras=batch["extras"], - ) - - -@dataclass -class PolicyNetworkInput(PreprocessedBaseInput): - action: FeatureData - next_action: FeatureData - extras: Optional[ExtraData] = None - - @classmethod - def from_dict(cls, batch): - return cls( - state=FeatureData(float_features=batch["state_features"]), - action=FeatureData(float_features=batch["action"]), - next_state=FeatureData(float_features=batch["next_state_features"]), - next_action=FeatureData(float_features=batch["next_action"]), - reward=batch["reward"], - not_terminal=batch["not_terminal"], - time_diff=batch["time_diff"], - step=batch["step"], - extras=batch["extras"], - ) - - def batch_size(self) -> int: - return self.state.float_features.shape[0] - - -# TODO(T67083627): state and next_state should use stack_float_features -@dataclass -class MemoryNetworkInput(PreprocessedBaseInput): - action: torch.Tensor - - -@dataclass -class RawBaseInput(CommonInput): - state: RawFeatureData - next_state: RawFeatureData - - -@dataclass -class PreprocessedTrainingBatch(TensorDataClass): - training_input: Union[PreprocessedRankingInput] - # TODO: deplicate this and move into individual ones. - extras: ExtraData = field(default_factory=ExtraData) - - def batch_size(self): - return self.training_input.state.float_features.size()[0] - - -@dataclass -class MemoryNetworkOutput(TensorDataClass): - mus: torch.Tensor - sigmas: torch.Tensor - logpi: torch.Tensor - reward: torch.Tensor - not_terminal: torch.Tensor - last_step_lstm_hidden: torch.Tensor - last_step_lstm_cell: torch.Tensor - all_steps_lstm_hidden: torch.Tensor - - -@dataclass -class DqnPolicyActionSet(TensorDataClass): - greedy: int - softmax: Optional[int] = None - greedy_act_name: Optional[str] = None - softmax_act_name: Optional[str] = None - softmax_act_prob: Optional[float] = None - - -@dataclass -class SacPolicyActionSet(TensorDataClass): - greedy: torch.Tensor - greedy_propensity: float - - -@dataclass -class PlanningPolicyOutput(TensorDataClass): - # best action to take next - next_best_continuous_action: Optional[torch.Tensor] = None - next_best_discrete_action_one_hot: Optional[torch.Tensor] = None - next_best_discrete_action_idx: Optional[int] = None - - -@dataclass -class RankingOutput(TensorDataClass): - # a tensor of integer indices w.r.t. to possible candidates - # shape: batch_size, tgt_seq_len - ranked_tgt_out_idx: Optional[torch.Tensor] = None - # generative probability of ranked tgt sequences at each decoding step - # shape: batch_size, tgt_seq_len, candidate_size - ranked_tgt_out_probs: Optional[torch.Tensor] = None - # log probabilities of given tgt sequences are used in REINFORCE - # shape: batch_size - log_probs: Optional[torch.Tensor] = None - # encoder scores in tgt_out_idx order - encoder_scores: Optional[torch.Tensor] = None - - -@dataclass -class RewardNetworkOutput(TensorDataClass): - predicted_reward: torch.Tensor diff --git a/reagent/validators/__init__.py b/reagent/validators/__init__.py new file mode 100644 index 000000000..5be5087fd --- /dev/null +++ b/reagent/validators/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/workflow/validators/model_validator.py b/reagent/validators/model_validator.py similarity index 62% rename from reagent/workflow/validators/model_validator.py rename to reagent/validators/model_validator.py index fcd15a62b..5e9b0674c 100644 --- a/reagent/workflow/validators/model_validator.py +++ b/reagent/validators/model_validator.py @@ -1,13 +1,14 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import abc import inspect import logging +from typing import List, Optional from reagent.core.registry_meta import RegistryMeta -from reagent.workflow.result_registries import ValidationResult -from reagent.workflow.types import RLTrainingOutput - +from reagent.core.result_registries import ValidationResult +from reagent.workflow.types import RLTrainingOutput, TableSpec logger = logging.getLogger(__name__) @@ -18,12 +19,19 @@ class ModelValidator(metaclass=RegistryMeta): they can be registered in the workflows. """ - def validate(self, training_output: RLTrainingOutput): + def validate( + self, + training_output: RLTrainingOutput, + result_history: Optional[List[RLTrainingOutput]] = None, + input_table_spec: Optional[TableSpec] = None, + ): """ This method takes RLTrainingOutput so that it can extract anything it might need from it. """ - result = self.do_validate(training_output) + result = self.do_validate( + training_output, result_history, input_table_spec=input_table_spec + ) # Avoid circular dependency at import time from reagent.workflow.types import ValidationResult__Union @@ -38,7 +46,12 @@ def validate(self, training_output: RLTrainingOutput): return ValidationResult__Union.make_union_instance(result, result_type) @abc.abstractmethod - def do_validate(self, training_output: RLTrainingOutput) -> ValidationResult: + def do_validate( + self, + training_output: RLTrainingOutput, + result_history: Optional[List[RLTrainingOutput]] = None, + input_table_spec: Optional[TableSpec] = None, + ) -> ValidationResult: """ This method takes RLTrainingOutput so that it can extract anything it might need from it. diff --git a/reagent/validators/no_validation.py b/reagent/validators/no_validation.py new file mode 100644 index 000000000..ea92a667d --- /dev/null +++ b/reagent/validators/no_validation.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. +from typing import List, Optional + +from reagent.core.dataclasses import dataclass +from reagent.core.result_types import NoValidationResults +from reagent.validators.model_validator import ModelValidator +from reagent.workflow.types import RLTrainingOutput, TableSpec + + +@dataclass +class NoValidation(ModelValidator): + """ + This is an example of how to create a validator. This validator performs no + validation. In your own validator, you would want to have `validate()` performs + some validation. + """ + + def do_validate( + self, + training_output: RLTrainingOutput, + result_history: Optional[List[RLTrainingOutput]] = None, + input_table_spec: Optional[TableSpec] = None, + ) -> NoValidationResults: + return NoValidationResults(should_publish=True) diff --git a/reagent/validators/union.py b/reagent/validators/union.py new file mode 100644 index 000000000..f197ce020 --- /dev/null +++ b/reagent/validators/union.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from reagent.core.fb_checker import IS_FB_ENVIRONMENT +from reagent.core.tagged_union import TaggedUnion + +from .model_validator import ModelValidator +from .no_validation import NoValidation # noqa + + +if IS_FB_ENVIRONMENT: + import fblearner.flow.projects.rl.validation.clients # noqa + import fblearner.flow.projects.rl.validation.common # noqa + + +@ModelValidator.fill_union() +class ModelValidator__Union(TaggedUnion): + pass diff --git a/reagent/workflow/__init__.py b/reagent/workflow/__init__.py index e69de29bb..5be5087fd 100644 --- a/reagent/workflow/__init__.py +++ b/reagent/workflow/__init__.py @@ -0,0 +1,2 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. diff --git a/reagent/workflow/cli.py b/reagent/workflow/cli.py index 72bc96dae..1f8a127aa 100755 --- a/reagent/workflow/cli.py +++ b/reagent/workflow/cli.py @@ -4,17 +4,20 @@ import dataclasses import importlib +import json import logging import os import sys import click + +# pyre-fixme[21]: Could not find name `YAML` in `ruamel.yaml`. from ruamel.yaml import YAML @click.group() -def reagent(): - from reagent import debug_on_error +def reagent() -> None: + from reagent.core import debug_on_error debug_on_error.start() @@ -58,7 +61,8 @@ def select_relevant_params(config_dict, ConfigClass): @reagent.command(short_help="Run the workflow with config file") @click.argument("workflow") @click.argument("config_file", type=click.File("r")) -def run(workflow, config_file): +@click.option("--extra-options", default=None) +def run(workflow, config_file, extra_options) -> None: func, ConfigClass = _load_func_and_config_class(workflow) @@ -67,9 +71,12 @@ def run(workflow, config_file): # ConfigClass. Then convert that instance to dict (via .asdict()) and apply to # the function + # pyre-fixme[16]: Module `yaml` has no attribute `YAML`. yaml = YAML(typ="safe") config_dict = yaml.load(config_file.read()) assert config_dict is not None, "failed to read yaml file" + if extra_options is not None: + config_dict.update(json.loads(extra_options)) config_dict = select_relevant_params(config_dict, ConfigClass) config = ConfigClass(**config_dict) func(**config.asdict()) @@ -77,7 +84,7 @@ def run(workflow, config_file): @reagent.command(short_help="Print JSON-schema of the workflow") @click.argument("workflow") -def print_schema(workflow): +def print_schema(workflow) -> None: func, ConfigClass = _load_func_and_config_class(workflow) print(ConfigClass.__pydantic_model__.schema_json()) diff --git a/reagent/workflow/env.py b/reagent/workflow/env.py index 693585ef5..b643412ae 100644 --- a/reagent/workflow/env.py +++ b/reagent/workflow/env.py @@ -1,6 +1,26 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. + +from typing import List + +from reagent.workflow.types import ModuleNameToEntityId def get_workflow_id() -> int: # This is just stub. You will want to replace this file. return 987654321 + + +def get_new_named_entity_ids(module_names: List[str]) -> ModuleNameToEntityId: + result = {} + i = 1 + done_one = False + for name in module_names: + if not done_one: + result[name] = get_workflow_id() + done_one = True + else: + # this is just random, you'll want to replace + result[name] = 987654321 - i + i += 1 + return result diff --git a/reagent/workflow/gym_batch_rl.py b/reagent/workflow/gym_batch_rl.py index f4fa23cf7..270c4a705 100644 --- a/reagent/workflow/gym_batch_rl.py +++ b/reagent/workflow/gym_batch_rl.py @@ -3,54 +3,85 @@ import json import logging -import random from typing import Optional import gym import numpy as np import pandas as pd +import pytorch_lightning as pl import torch +from reagent.data.spark_utils import call_spark_class, get_spark_session from reagent.gym.agents.agent import Agent -from reagent.gym.envs.env_factory import EnvFactory +from reagent.gym.envs import Gym from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model +from reagent.gym.policies.random_policies import make_random_policy_for_env from reagent.gym.runners.gymrunner import evaluate_for_n_episodes from reagent.gym.utils import fill_replay_buffer +from reagent.model_managers.union import ModelManager__Union +from reagent.publishers.union import FileSystemPublisher, ModelPublisher__Union from reagent.replay_memory.circular_replay_buffer import ReplayBuffer from reagent.replay_memory.utils import replay_buffer_to_pre_timeline_df -from reagent.workflow.model_managers.union import ModelManager__Union -from reagent.workflow.publishers.union import FileSystemPublisher, ModelPublisher__Union -from reagent.workflow.spark_utils import call_spark_class, get_spark_session -from reagent.workflow.types import TableSpec + +from .types import TableSpec logger = logging.getLogger(__name__) -def initialize_seed(seed: Optional[int] = None): - if seed is not None: - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) +def initialize_seed(seed: int, env): + pl.seed_everything(seed) + env.seed(seed) + env.action_space.seed(seed) -def offline_gym( +def offline_gym_random( env_name: str, pkl_path: str, num_train_transitions: int, max_steps: Optional[int], - seed: Optional[int] = None, + seed: int = 1, ): """ - Generate samples from a DiscreteRandomPolicy on the Gym environment and + Generate samples from a random Policy on the Gym environment and saves results in a pandas df parquet. """ - initialize_seed(seed) - env = EnvFactory.make(env_name) + env = Gym(env_name=env_name) + random_policy = make_random_policy_for_env(env) + agent = Agent.create_for_env(env, policy=random_policy) + return _offline_gym(env, agent, pkl_path, num_train_transitions, max_steps, seed) - replay_buffer = ReplayBuffer.create_from_env( - env=env, replay_memory_size=num_train_transitions, batch_size=1 - ) - fill_replay_buffer(env, replay_buffer, num_train_transitions) + +def offline_gym_predictor( + env_name: str, + model: ModelManager__Union, + publisher: ModelPublisher__Union, + pkl_path: str, + num_train_transitions: int, + max_steps: Optional[int], + module_name: str = "default_model", + seed: int = 1, +): + """ + Generate samples from a trained Policy on the Gym environment and + saves results in a pandas df parquet. + """ + env = Gym(env_name=env_name) + agent = make_agent_from_model(env, model, publisher, module_name) + return _offline_gym(env, agent, pkl_path, num_train_transitions, max_steps, seed) + + +def _offline_gym( + env: Gym, + agent: Agent, + pkl_path: str, + num_train_transitions: int, + max_steps: Optional[int], + seed: int = 1, +): + initialize_seed(seed, env) + + replay_buffer = ReplayBuffer(replay_capacity=num_train_transitions, batch_size=1) + fill_replay_buffer(env, replay_buffer, num_train_transitions, agent) if isinstance(env.action_space, gym.spaces.Discrete): is_discrete_action = True else: @@ -65,8 +96,8 @@ def offline_gym( def timeline_operator(pkl_path: str, input_table_spec: TableSpec): - """ Loads a pandas parquet, converts to pyspark, and uploads df to Hive. - Then call the timeline operator. + """Loads a pandas parquet, converts to pyspark, and uploads df to Hive. + Then call the timeline operator. """ pd_df = pd.read_pickle(pkl_path) @@ -91,30 +122,48 @@ def timeline_operator(pkl_path: str, input_table_spec: TableSpec): call_spark_class(spark, class_name="Timeline", args=json.dumps(arg)) -def evaluate_gym( - env_name: str, +def make_agent_from_model( + env: Gym, model: ModelManager__Union, publisher: ModelPublisher__Union, - num_eval_episodes: int, - passing_score_bar: float, - max_steps: Optional[int] = None, + module_name: str, ): publisher_manager = publisher.value assert isinstance( publisher_manager, FileSystemPublisher ), f"publishing manager is type {type(publisher_manager)}, not FileSystemPublisher" - env = EnvFactory.make(env_name) - torchscript_path = publisher_manager.get_latest_published_model(model.value) + module_names = model.value.serving_module_names() + assert module_name in module_names, f"{module_name} not in {module_names}" + torchscript_path = publisher_manager.get_latest_published_model( + model.value, module_name + ) jit_model = torch.jit.load(torchscript_path) policy = create_predictor_policy_from_model(jit_model) agent = Agent.create_for_env_with_serving_policy(env, policy) + return agent + + +def evaluate_gym( + env_name: str, + model: ModelManager__Union, + publisher: ModelPublisher__Union, + num_eval_episodes: int, + passing_score_bar: float, + module_name: str = "default_model", + max_steps: Optional[int] = None, +): + env = Gym(env_name=env_name) + initialize_seed(1, env) + agent = make_agent_from_model(env, model, publisher, module_name) + rewards = evaluate_for_n_episodes( n=num_eval_episodes, env=env, agent=agent, max_steps=max_steps ) avg_reward = np.mean(rewards) logger.info( f"Average reward over {num_eval_episodes} is {avg_reward}.\n" - f"List of rewards: {rewards}" + f"List of rewards: {rewards}\n" + f"Passing score bar: {passing_score_bar}" ) assert ( avg_reward >= passing_score_bar diff --git a/reagent/workflow/identify_types_flow.py b/reagent/workflow/identify_types_flow.py index c249849a3..45f946cf4 100644 --- a/reagent/workflow/identify_types_flow.py +++ b/reagent/workflow/identify_types_flow.py @@ -3,15 +3,18 @@ from typing import Dict, List, Optional +import reagent.core.types as rlt + # pyre-fixme[21]: Could not find `pyspark`. # pyre-fixme[21]: Could not find `pyspark`. from pyspark.sql.functions import col, collect_list, explode +from reagent.data.spark_utils import get_spark_session from reagent.preprocessing.normalization import ( - NormalizationParameters, get_feature_norm_metadata, + NormalizationParameters, ) -from reagent.workflow.spark_utils import get_spark_session -from reagent.workflow.types import PreprocessingOptions, TableSpec + +from .types import PreprocessingOptions, TableSpec def normalization_helper( @@ -21,10 +24,10 @@ def normalization_helper( skip_box_cox: bool = False, skip_quantiles: bool = False, feature_overrides: Optional[Dict[int, str]] = None, - whitelist_features: Optional[List[int]] = None, - assert_whitelist_feature_coverage: bool = True, + allowedlist_features: Optional[List[int]] = None, + assert_allowedlist_feature_coverage: bool = True, ): - """ Construct a preprocessing closure to obtain normalization parameters + """Construct a preprocessing closure to obtain normalization parameters from rows of feature_name and a sample of feature_values. """ @@ -36,22 +39,24 @@ def normalization_helper( "skip_quantiles": skip_quantiles, "feature_overrides": feature_overrides, } - # pyre-fixme[9]: whitelist_features has type `Optional[List[int]]`; used as + # pyre-fixme[9]: allowedlist_features has type `Optional[List[int]]`; used as # `Set[int]`. - # pyre-fixme[9]: whitelist_features has type `Optional[List[int]]`; used as + # pyre-fixme[9]: allowedlist_features has type `Optional[List[int]]`; used as # `Set[int]`. - whitelist_features = set(whitelist_features or []) + allowedlist_features = set(allowedlist_features or []) - def validate_whitelist_features(params: Dict[int, NormalizationParameters]) -> None: - if not whitelist_features: + def validate_allowedlist_features( + params: Dict[int, NormalizationParameters] + ) -> None: + if not allowedlist_features: return - whitelist_feature_set = {int(fid) for fid in whitelist_features} + allowedlist_feature_set = {int(fid) for fid in allowedlist_features} available_features = set(params.keys()) - assert whitelist_feature_set == available_features, ( + assert allowedlist_feature_set == available_features, ( "Could not identify preprocessing type for these features: {}; " "extra features: {}".format( - whitelist_feature_set - available_features, - available_features - whitelist_feature_set, + allowedlist_feature_set - available_features, + available_features - allowedlist_feature_set, ) ) @@ -64,12 +69,12 @@ def process(rows: List) -> Dict[int, NormalizationParameters]: row["feature_name"], row["feature_values"], norm_params ) if norm_metdata is not None and ( - not whitelist_features or row["feature_name"] in whitelist_features + not allowedlist_features or row["feature_name"] in allowedlist_features ): params[row["feature_name"]] = norm_metdata - if assert_whitelist_feature_coverage: - validate_whitelist_features(params) + if assert_allowedlist_feature_coverage: + validate_allowedlist_features(params) return params return process @@ -81,7 +86,7 @@ def identify_normalization_parameters( preprocessing_options: PreprocessingOptions, seed: Optional[int] = None, ) -> Dict[int, NormalizationParameters]: - """ Get normalization parameters """ + """Get normalization parameters""" sqlCtx = get_spark_session() df = sqlCtx.sql(f"SELECT * FROM {table_spec.table_name}") df = create_normalization_spec_spark( @@ -96,8 +101,8 @@ def identify_normalization_parameters( skip_box_cox=preprocessing_options.skip_box_cox, skip_quantiles=preprocessing_options.skip_quantiles, feature_overrides=preprocessing_options.feature_overrides, - whitelist_features=preprocessing_options.whitelist_features, - assert_whitelist_feature_coverage=preprocessing_options.assert_whitelist_feature_coverage, + allowedlist_features=preprocessing_options.allowedlist_features, + assert_allowedlist_feature_coverage=preprocessing_options.assert_allowedlist_feature_coverage, ) return normalization_processor(rows) @@ -109,6 +114,7 @@ def create_normalization_spec_spark( # assumes column has a type of map df = df.select( + # pyre-fixme[16]: Module `functions` has no attribute `col`. explode(col(column).alias("features")).alias("feature_name", "feature_value") ) @@ -123,6 +129,18 @@ def create_normalization_spec_spark( # perform sampling and collect them df = df.sampleBy("feature_name", fractions=frac, seed=seed) df = df.groupBy("feature_name").agg( + # pyre-fixme[16]: Module `functions` has no attribute `collect_list`. collect_list("feature_value").alias("feature_values") ) return df + + +# TODO: for OSS +def identify_sparse_normalization_parameters( + feature_config: rlt.ModelFeatureConfig, + table_spec: TableSpec, + id_list_column: str, + id_score_list_column: str, + preprocessing_options: PreprocessingOptions, +): + return {} diff --git a/reagent/workflow/model_managers/actor_critic_base.py b/reagent/workflow/model_managers/actor_critic_base.py deleted file mode 100644 index 8a6687932..000000000 --- a/reagent/workflow/model_managers/actor_critic_base.py +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -import logging -from typing import Dict, List, Optional, Tuple - -import reagent.types as rlt -import torch -from reagent.core.dataclasses import dataclass, field -from reagent.evaluation.evaluator import Evaluator, get_metrics_to_score -from reagent.gym.policies.policy import Policy -from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model -from reagent.models.base import ModelBase -from reagent.parameters import EvaluationParameters, NormalizationData, NormalizationKey -from reagent.preprocessing.batch_preprocessor import ( - BatchPreprocessor, - InputColumn, - PolicyNetworkBatchPreprocessor, - Preprocessor, -) -from reagent.preprocessing.normalization import get_feature_config -from reagent.workflow.data_fetcher import query_data -from reagent.workflow.identify_types_flow import identify_normalization_parameters -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.reporters.actor_critic_reporter import ActorCriticReporter -from reagent.workflow.types import ( - Dataset, - PreprocessingOptions, - ReaderOptions, - RewardOptions, - RLTrainingOutput, - RLTrainingReport, - TableSpec, -) -from reagent.workflow.utils import train_and_evaluate_generic - - -logger = logging.getLogger(__name__) - - -class ActorPolicyWrapper(Policy): - """ Actor's forward function is our act """ - - def __init__(self, actor_network): - self.actor_network = actor_network - - @torch.no_grad() - def act(self, obs: rlt.FeatureData) -> rlt.ActorOutput: - self.actor_network.eval() - output = self.actor_network(obs) - self.actor_network.train() - return output.detach().cpu() - - -@dataclass -class ActorCriticBase(ModelManager): - state_preprocessing_options: Optional[PreprocessingOptions] = None - action_preprocessing_options: Optional[PreprocessingOptions] = None - action_feature_override: Optional[str] = None - state_float_features: Optional[List[Tuple[int, str]]] = None - action_float_features: List[Tuple[int, str]] = field(default_factory=list) - reader_options: Optional[ReaderOptions] = None - eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) - - def __post_init_post_parse__(self): - super().__init__() - assert ( - self.state_preprocessing_options is None - or self.state_preprocessing_options.whitelist_features is None - ), ( - "Please set state whitelist features in state_float_features field of " - "config instead" - ) - assert ( - self.action_preprocessing_options is None - or self.action_preprocessing_options.whitelist_features is None - ), ( - "Please set action whitelist features in action_float_features field of " - "config instead" - ) - self._state_preprocessing_options = self.state_preprocessing_options - self._action_preprocessing_options = self.action_preprocessing_options - - # To be filled by property metrics_to_score - self._metrics_to_score: Optional[List[str]] = None - - # To be filled by subclasses - self._actor_network: Optional[ModelBase] = None - self._q1_network: Optional[ModelBase] = None - - @property - def should_generate_eval_dataset(self) -> bool: - return self.eval_parameters.calc_cpe_in_training - - def create_policy(self, serving: bool) -> Policy: - """ Create online actor critic policy. """ - - if serving: - return create_predictor_policy_from_model(self.build_serving_module()) - else: - return ActorPolicyWrapper(self._actor_network) - - @property - def metrics_to_score(self) -> List[str]: - assert self._reward_options is not None - if self._metrics_to_score is None: - # pyre-fixme[16]: `ActorCriticBase` has no attribute `_metrics_to_score`. - # pyre-fixme[16]: `ActorCriticBase` has no attribute `_metrics_to_score`. - self._metrics_to_score = get_metrics_to_score( - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - self._reward_options.metric_reward_values - ) - return self._metrics_to_score - - @property - def state_feature_config(self) -> rlt.ModelFeatureConfig: - return get_feature_config(self.state_float_features) - - @property - def action_feature_config(self) -> rlt.ModelFeatureConfig: - assert len(self.action_float_features) > 0, "You must set action_float_features" - return get_feature_config(self.action_float_features) - - def run_feature_identification( - self, input_table_spec: TableSpec - ) -> Dict[str, NormalizationData]: - # Run state feature identification - state_preprocessing_options = ( - self._state_preprocessing_options or PreprocessingOptions() - ) - state_features = [ - ffi.feature_id for ffi in self.state_feature_config.float_feature_infos - ] - logger.info(f"state whitelist_features: {state_features}") - state_preprocessing_options = state_preprocessing_options._replace( - whitelist_features=state_features - ) - - state_normalization_parameters = identify_normalization_parameters( - input_table_spec, InputColumn.STATE_FEATURES, state_preprocessing_options - ) - - # Run action feature identification - action_preprocessing_options = ( - self._action_preprocessing_options or PreprocessingOptions() - ) - action_features = [ - ffi.feature_id for ffi in self.action_feature_config.float_feature_infos - ] - logger.info(f"action whitelist_features: {action_features}") - - actor_net_builder = self.actor_net_builder.value - action_feature_override = actor_net_builder.default_action_preprocessing - logger.info(f"Default action_feature_override is {action_feature_override}") - if self.action_feature_override is not None: - action_feature_override = self.action_feature_override - - assert action_preprocessing_options.feature_overrides is None - action_preprocessing_options = action_preprocessing_options._replace( - whitelist_features=action_features, - feature_overrides={fid: action_feature_override for fid in action_features}, - ) - action_normalization_parameters = identify_normalization_parameters( - input_table_spec, InputColumn.ACTION, action_preprocessing_options - ) - - return { - NormalizationKey.STATE: NormalizationData( - dense_normalization_parameters=state_normalization_parameters - ), - NormalizationKey.ACTION: NormalizationData( - dense_normalization_parameters=action_normalization_parameters - ), - } - - @property - def required_normalization_keys(self) -> List[str]: - return [NormalizationKey.STATE, NormalizationKey.ACTION] - - def query_data( - self, - input_table_spec: TableSpec, - sample_range: Optional[Tuple[float, float]], - reward_options: RewardOptions, - ) -> Dataset: - logger.info("Starting query") - return query_data( - input_table_spec=input_table_spec, - discrete_action=False, - include_possible_actions=False, - custom_reward_expression=reward_options.custom_reward_expression, - sample_range=sample_range, - ) - - def build_batch_preprocessor(self) -> BatchPreprocessor: - state_preprocessor = Preprocessor( - self.state_normalization_data.dense_normalization_parameters, - use_gpu=self.use_gpu, - ) - action_preprocessor = Preprocessor( - self.action_normalization_data.dense_normalization_parameters, - use_gpu=self.use_gpu, - ) - return PolicyNetworkBatchPreprocessor( - state_preprocessor=state_preprocessor, - action_preprocessor=action_preprocessor, - use_gpu=self.use_gpu, - ) - - # TODO: deprecate, once we deprecate internal page handlers - def train( - self, train_dataset: Dataset, eval_dataset: Optional[Dataset], num_epochs: int - ) -> RLTrainingOutput: - - reporter = ActorCriticReporter() - # pyre-fixme[16]: `RLTrainer` has no attribute `add_observer`. - self.trainer.add_observer(reporter) - - evaluator = Evaluator( - action_names=None, - gamma=self.rl_parameters.gamma, - model=self.trainer, - metrics_to_score=self.metrics_to_score, - ) - # pyre-fixme[16]: `Evaluator` has no attribute `add_observer`. - evaluator.add_observer(reporter) - - batch_preprocessor = self.build_batch_preprocessor() - train_and_evaluate_generic( - train_dataset=train_dataset, - eval_dataset=eval_dataset, - # pyre-fixme[6]: Expected `RLTrainer` for 3rd param but got `Trainer`. - trainer=self.trainer, - num_epochs=num_epochs, - use_gpu=self.use_gpu, - batch_preprocessor=batch_preprocessor, - reporter=reporter, - evaluator=evaluator, - reader_options=self.reader_options, - ) - # pyre-fixme[16]: `RLTrainingReport` has no attribute `make_union_instance`. - training_report = RLTrainingReport.make_union_instance( - reporter.generate_training_report() - ) - - return RLTrainingOutput(training_report=training_report) diff --git a/reagent/workflow/model_managers/discrete/discrete_dqn.py b/reagent/workflow/model_managers/discrete/discrete_dqn.py deleted file mode 100644 index 16da545d3..000000000 --- a/reagent/workflow/model_managers/discrete/discrete_dqn.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python3 - -import logging - -import torch -from reagent.core.dataclasses import dataclass, field -from reagent.net_builder.discrete_dqn.dueling import Dueling -from reagent.net_builder.discrete_dqn.fully_connected import FullyConnected -from reagent.net_builder.unions import DiscreteDQNNetBuilder__Union -from reagent.parameters import param_hash -from reagent.training import DQNTrainer, DQNTrainerParameters -from reagent.training.loss_reporter import NoOpLossReporter -from reagent.workflow.model_managers.discrete_dqn_base import DiscreteDQNBase - - -logger = logging.getLogger(__name__) - - -@dataclass -class DiscreteDQN(DiscreteDQNBase): - __hash__ = param_hash - - trainer_param: DQNTrainerParameters = field(default_factory=DQNTrainerParameters) - net_builder: DiscreteDQNNetBuilder__Union = field( - # pyre-fixme[28]: Unexpected keyword argument `Dueling`. - # pyre-fixme[28]: Unexpected keyword argument `Dueling`. - default_factory=lambda: DiscreteDQNNetBuilder__Union(Dueling=Dueling()) - ) - cpe_net_builder: DiscreteDQNNetBuilder__Union = field( - # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. - # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. - default_factory=lambda: DiscreteDQNNetBuilder__Union( - FullyConnected=FullyConnected() - ) - ) - # TODO: move evaluation parameters to here from trainer_param.evaluation - # note that only DiscreteDQN and QRDQN call RLTrainer._initialize_cpe, - # so maybe can be removed from the RLTrainer class. - - def __post_init_post_parse__(self): - super().__post_init_post_parse__() - self.rl_parameters = self.trainer_param.rl - self.action_names = self.trainer_param.actions - assert ( - len(self.action_names) > 1 - ), f"DiscreteDQNModel needs at least 2 actions. Got {self.action_names}." - if self.trainer_param.minibatch_size % 8 != 0: - logger.warn( - f"minibatch size ({self.trainer_param.minibatch_size}) " - "should be divisible by 8 for performance reasons!" - ) - - def build_trainer(self) -> DQNTrainer: - net_builder = self.net_builder.value - q_network = net_builder.build_q_network( - self.state_feature_config, - self.state_normalization_data, - len(self.action_names), - ) - - if self.use_gpu: - q_network = q_network.cuda() - - q_network_target = q_network.get_target_network() - - reward_network, q_network_cpe, q_network_cpe_target = None, None, None - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `evaluation`. - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `evaluation`. - if self.trainer_param.evaluation.calc_cpe_in_training: - # Metrics + reward - num_output_nodes = (len(self.metrics_to_score) + 1) * len( - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `actions`. - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `actions`. - self.trainer_param.actions - ) - - cpe_net_builder = self.cpe_net_builder.value - reward_network = cpe_net_builder.build_q_network( - self.state_feature_config, - self.state_normalization_data, - num_output_nodes, - ) - q_network_cpe = cpe_net_builder.build_q_network( - self.state_feature_config, - self.state_normalization_data, - num_output_nodes, - ) - - if self.use_gpu: - reward_network.cuda() - q_network_cpe.cuda() - - q_network_cpe_target = q_network_cpe.get_target_network() - - # pyre-fixme[16]: `DiscreteDQN` has no attribute `_q_network`. - # pyre-fixme[16]: `DiscreteDQN` has no attribute `_q_network`. - self._q_network = q_network - # pyre-fixme[29]: `Type[reagent.training.dqn_trainer.DQNTrainer]` is not a - # function. - # pyre-fixme[29]: `Type[reagent.training.dqn_trainer.DQNTrainer]` is not a - # function. - trainer = DQNTrainer( - q_network=q_network, - q_network_target=q_network_target, - reward_network=reward_network, - q_network_cpe=q_network_cpe, - q_network_cpe_target=q_network_cpe_target, - metrics_to_score=self.metrics_to_score, - loss_reporter=NoOpLossReporter(), - use_gpu=self.use_gpu, - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `asdict`. - # pyre-fixme[16]: `DQNTrainerParameters` has no attribute `asdict`. - **self.trainer_param.asdict(), - ) - return trainer - - def build_serving_module(self) -> torch.nn.Module: - """ - Returns a TorchScript predictor module - """ - assert self._q_network is not None, "_q_network was not initialized" - - net_builder = self.net_builder.value - return net_builder.build_serving_module( - self._q_network, - self.state_normalization_data, - action_names=self.action_names, - state_feature_config=self.state_feature_config, - ) diff --git a/reagent/workflow/model_managers/discrete_dqn_base.py b/reagent/workflow/model_managers/discrete_dqn_base.py deleted file mode 100644 index 2e91dd4f3..000000000 --- a/reagent/workflow/model_managers/discrete_dqn_base.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 - -import logging -from typing import Dict, List, Optional, Tuple - -from reagent import types as rlt -from reagent.core.dataclasses import dataclass, field -from reagent.evaluation.evaluator import Evaluator, get_metrics_to_score -from reagent.gym.policies.policy import Policy -from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model -from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler -from reagent.gym.policies.scorers.discrete_scorer import discrete_dqn_scorer -from reagent.models.base import ModelBase -from reagent.parameters import EvaluationParameters, NormalizationData, NormalizationKey -from reagent.preprocessing.batch_preprocessor import ( - BatchPreprocessor, - DiscreteDqnBatchPreprocessor, - InputColumn, -) -from reagent.preprocessing.preprocessor import Preprocessor -from reagent.workflow.data_fetcher import query_data -from reagent.workflow.identify_types_flow import identify_normalization_parameters -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.reporters.discrete_dqn_reporter import DiscreteDQNReporter -from reagent.workflow.types import ( - Dataset, - PreprocessingOptions, - ReaderOptions, - RewardOptions, - RLTrainingOutput, - RLTrainingReport, - TableSpec, -) -from reagent.workflow.utils import train_and_evaluate_generic - - -logger = logging.getLogger(__name__) - - -@dataclass -class DiscreteDQNBase(ModelManager): - target_action_distribution: Optional[List[float]] = None - state_feature_config: rlt.ModelFeatureConfig = field( - default_factory=lambda: rlt.ModelFeatureConfig(float_feature_infos=[]) - ) - preprocessing_options: Optional[PreprocessingOptions] = None - reader_options: Optional[ReaderOptions] = None - eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters) - - def __post_init_post_parse__(self): - super().__init__() - self._metrics_to_score = None - self._q_network: Optional[ModelBase] = None - - def create_policy(self, serving: bool) -> Policy: - """ Create an online DiscreteDQN Policy from env. """ - if serving: - return create_predictor_policy_from_model(self.build_serving_module()) - else: - sampler = SoftmaxActionSampler(temperature=self.rl_parameters.temperature) - # pyre-fixme[16]: `RLTrainer` has no attribute `q_network`. - scorer = discrete_dqn_scorer(self.trainer.q_network) - return Policy(scorer=scorer, sampler=sampler) - - @property - def metrics_to_score(self) -> List[str]: - assert self._reward_options is not None - if self._metrics_to_score is None: - # pyre-fixme[16]: `DiscreteDQNBase` has no attribute `_metrics_to_score`. - # pyre-fixme[16]: `DiscreteDQNBase` has no attribute `_metrics_to_score`. - self._metrics_to_score = get_metrics_to_score( - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - # pyre-fixme[16]: `Optional` has no attribute `metric_reward_values`. - self._reward_options.metric_reward_values - ) - return self._metrics_to_score - - @property - def should_generate_eval_dataset(self) -> bool: - return self.eval_parameters.calc_cpe_in_training - - @property - def required_normalization_keys(self) -> List[str]: - return [NormalizationKey.STATE] - - def run_feature_identification( - self, input_table_spec: TableSpec - ) -> Dict[str, NormalizationData]: - preprocessing_options = self.preprocessing_options or PreprocessingOptions() - logger.info("Overriding whitelist_features") - state_features = [ - ffi.feature_id for ffi in self.state_feature_config.float_feature_infos - ] - preprocessing_options = preprocessing_options._replace( - whitelist_features=state_features - ) - return { - NormalizationKey.STATE: NormalizationData( - dense_normalization_parameters=identify_normalization_parameters( - input_table_spec, InputColumn.STATE_FEATURES, preprocessing_options - ) - ) - } - - def query_data( - self, - input_table_spec: TableSpec, - sample_range: Optional[Tuple[float, float]], - reward_options: RewardOptions, - ) -> Dataset: - return query_data( - input_table_spec=input_table_spec, - discrete_action=True, - actions=self.action_names, - include_possible_actions=True, - sample_range=sample_range, - custom_reward_expression=reward_options.custom_reward_expression, - multi_steps=self.multi_steps, - gamma=self.rl_parameters.gamma, - ) - - @property - def multi_steps(self) -> Optional[int]: - return self.rl_parameters.multi_steps - - def build_batch_preprocessor(self) -> BatchPreprocessor: - state_preprocessor = Preprocessor( - self.state_normalization_data.dense_normalization_parameters, - use_gpu=self.use_gpu, - ) - return DiscreteDqnBatchPreprocessor( - num_actions=len(self.action_names), - state_preprocessor=state_preprocessor, - use_gpu=self.use_gpu, - ) - - def train( - self, train_dataset: Dataset, eval_dataset: Optional[Dataset], num_epochs: int - ) -> RLTrainingOutput: - """ - Train the model - - Returns partially filled RLTrainingOutput. - The field that should not be filled are: - - output_path - """ - reporter = DiscreteDQNReporter( - self.trainer_param.actions, - target_action_distribution=self.target_action_distribution, - ) - # pyre-fixme[16]: `RLTrainer` has no attribute `add_observer`. - self.trainer.add_observer(reporter) - - evaluator = Evaluator( - self.action_names, - self.rl_parameters.gamma, - self.trainer, - metrics_to_score=self.metrics_to_score, - ) - # pyre-fixme[16]: `Evaluator` has no attribute `add_observer`. - evaluator.add_observer(reporter) - - batch_preprocessor = self.build_batch_preprocessor() - train_and_evaluate_generic( - train_dataset, - eval_dataset, - # pyre-fixme[6]: Expected `RLTrainer` for 3rd param but got `Trainer`. - # pyre-fixme[6]: Expected `RLTrainer` for 3rd param but got `Trainer`. - self.trainer, - num_epochs, - self.use_gpu, - batch_preprocessor, - reporter, - evaluator, - reader_options=self.reader_options, - ) - # pyre-fixme[16]: `RLTrainingReport` has no attribute `make_union_instance`. - training_report = RLTrainingReport.make_union_instance( - reporter.generate_training_report() - ) - return RLTrainingOutput(training_report=training_report) diff --git a/reagent/workflow/model_managers/model_manager.py b/reagent/workflow/model_managers/model_manager.py deleted file mode 100644 index f65b7980d..000000000 --- a/reagent/workflow/model_managers/model_manager.py +++ /dev/null @@ -1,237 +0,0 @@ -#!/usr/bin/env python3 - -import abc -import dataclasses -import logging -import time -from typing import Dict, List, Optional, Tuple - -import torch -from reagent.core.registry_meta import RegistryMeta -from reagent.parameters import NormalizationData -from reagent.tensorboardX import summary_writer_context -from reagent.training.trainer import Trainer -from reagent.workflow.types import Dataset, RewardOptions, RLTrainingOutput, TableSpec -from torch.utils.tensorboard import SummaryWriter - - -logger = logging.getLogger(__name__) - - -class ModelManager(metaclass=RegistryMeta): - """ - ModelManager manages how to train models. - - Each type of models can have their own config type, implemented as - `config_type()` class method. `__init__()` of the concrete class must take - this type. - - ModelManager abstracts over common phases of training, i.e.,: - 1. `run_feature_identification()` defines how to derive feature preprocessing - parameters from given data. - 2. `query_data()` massages the input table into the format expected by the trainer - 3. `initialize_trainer()` creates the trainer - 4. `train()` - 5. `build_serving_module()` builds the module for prediction - 6. `save_tainer()` saves the trainer for warmstarting - """ - - def __init__(self): - super().__init__() - # initialization is delayed to `initialize_trainer()` - self._normalization_data_map: Optional[Dict[str, NormalizationData]] = None - self._reward_options: Optional[RewardOptions] = None - self._trainer: Optional[Trainer] = None - self._use_gpu: Optional[bool] = None - - @property - def use_gpu(self) -> bool: - assert ( - self._use_gpu is not None - ), "Call initialize_trainer() to set the value first" - # pyre-fixme[7]: Expected `bool` but got `Optional[bool]`. - # pyre-fixme[7]: Expected `bool` but got `Optional[bool]`. - return self._use_gpu - - @property - def reward_options(self) -> RewardOptions: - assert self._reward_options is not None - # pyre-fixme[7]: Expected `RewardOptions` but got `Optional[RewardOptions]`. - # pyre-fixme[7]: Expected `RewardOptions` but got `Optional[RewardOptions]`. - return self._reward_options - - @reward_options.setter - def reward_options(self, reward_options: RewardOptions): - assert self._reward_options is None - self._reward_options = reward_options - - @abc.abstractmethod - def run_feature_identification( - self, input_table_spec: TableSpec - ) -> Dict[str, NormalizationData]: - """ - Derive preprocessing parameters from data. The keys of the dict should - match the keys from `required_normalization_keys()` - """ - pass - - @property - @abc.abstractmethod - def required_normalization_keys(self) -> List[str]: - """ Get the normalization keys required for current instance """ - pass - - def __getattr__(self, attr): - """ Get X_normalization_data by attribute """ - normalization_data_suffix = "_normalization_data" - if attr.endswith(normalization_data_suffix): - assert self._normalization_data_map is not None, ( - f"Trying to access {attr} but normalization_data_map " - "has not been set via `initialize_trainer`." - ) - normalization_key = attr[: -len(normalization_data_suffix)] - normalization_data = self._normalization_data_map.get( - normalization_key, None - ) - if normalization_data is None: - raise AttributeError( - f"normalization key `{normalization_key}` is unavailable. " - f"Available keys are: {self._normalization_data_map.keys()}." - ) - return normalization_data - - raise AttributeError( - f"attr {attr} not available {type(self)} (subclass of ModelManager)." - ) - - @property - @abc.abstractmethod - def should_generate_eval_dataset(self) -> bool: - pass - - @abc.abstractmethod - def query_data( - self, - input_table_spec: TableSpec, - sample_range: Optional[Tuple[float, float]], - reward_options: RewardOptions, - ) -> Dataset: - """ - Massage input table into the format expected by the trainer - """ - pass - - @property - def trainer(self) -> Trainer: - assert self._trainer is not None, "Call initialize_trainer() first" - # pyre-fixme[7]: Expected `Trainer` but got `Optional[Trainer]`. - # pyre-fixme[7]: Expected `Trainer` but got `Optional[Trainer]`. - return self._trainer - - def initialize_trainer( - self, - use_gpu: bool, - reward_options: RewardOptions, - normalization_data_map: Dict[str, NormalizationData], - warmstart_path: Optional[str] = None, - ) -> Trainer: - """ - Initialize the trainer. Subclass should not override this. Instead, - subclass should implement `required_normalization_keys()` and - `build_trainer()`. - """ - assert self._trainer is None, "Trainer was intialized" - self._use_gpu = use_gpu - self.reward_options = reward_options - # validate that we have all the required keys - for normalization_key in self.required_normalization_keys: - normalization_data = normalization_data_map.get(normalization_key, None) - assert normalization_data is not None, ( - f"NormalizationData for {normalization_key} " - "is required but not provided." - ) - # NOTE: Don't need this check in the future, for non-dense parameters - assert normalization_data.dense_normalization_parameters is not None, ( - f"Dense normalization parameters for " - f"{normalization_key} is not provided." - ) - assert ( - self._normalization_data_map is None - ), "Cannot reset self._normalization_data_map" - self._normalization_data_map = normalization_data_map - self._trainer = self.build_trainer() - if warmstart_path is not None: - trainer_state = torch.load(warmstart_path) - # pyre-fixme[16]: `Optional` has no attribute `load_state_dict`. - # pyre-fixme[16]: `Optional` has no attribute `load_state_dict`. - self._trainer.load_state_dict(trainer_state) - # pyre-fixme[7]: Expected `Trainer` but got `Optional[Trainer]`. - # pyre-fixme[7]: Expected `Trainer` but got `Optional[Trainer]`. - return self._trainer - - @abc.abstractmethod - def build_trainer(self) -> Trainer: - """ - Implement this to build the trainer, given the config - """ - pass - - def train_workflow( - self, - train_dataset: Dataset, - eval_dataset: Optional[Dataset], - normalization_data_map: Dict[str, NormalizationData], - num_epochs: int, - use_gpu: bool, - parent_workflow_id: int, - child_workflow_id: int, - reward_options: Optional[RewardOptions] = None, - warmstart_path: Optional[str] = None, - ) -> RLTrainingOutput: - writer = SummaryWriter() - logger.info("TensorBoard logging location is: {}".format(writer.log_dir)) - - warmstart_input_path = warmstart_path or None - self.initialize_trainer( - use_gpu=use_gpu, - # pyre-fixme[6]: Expected `RewardOptions` for 2nd param but got - # `Optional[RewardOptions]`. - # pyre-fixme[6]: Expected `RewardOptions` for 2nd param but got - # `Optional[RewardOptions]`. - reward_options=reward_options, - normalization_data_map=normalization_data_map, - warmstart_path=warmstart_input_path, - ) - - with summary_writer_context(writer): - train_output = self.train(train_dataset, eval_dataset, num_epochs) - - # TODO: make this a parameter - torchscript_output_path = f"model_{round(time.time())}.torchscript" - serving_module = self.build_serving_module() - torch.jit.save(serving_module, torchscript_output_path) - logger.info(f"Saved torchscript model to {torchscript_output_path}") - return dataclasses.replace(train_output, output_path=torchscript_output_path) - - @abc.abstractmethod - def train( - self, train_dataset: Dataset, eval_dataset: Optional[Dataset], num_epochs: int - ) -> RLTrainingOutput: - """ - Train the model - """ - pass - - @abc.abstractmethod - def build_serving_module(self) -> torch.nn.Module: - """ - Returns TorchScript module to be used in predictor - """ - pass - - def save_trainer(self, output_path: str) -> None: - """ - Save the trainer for warmstarting/checkpointing. - """ - trainer_state = self.trainer.state_dict() - torch.save(trainer_state, output_path) diff --git a/reagent/workflow/model_managers/parametric/parametric_dqn.py b/reagent/workflow/model_managers/parametric/parametric_dqn.py deleted file mode 100644 index 58a87668b..000000000 --- a/reagent/workflow/model_managers/parametric/parametric_dqn.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 - -import logging - -import torch -from reagent.core.dataclasses import dataclass, field -from reagent.net_builder.parametric_dqn.fully_connected import FullyConnected -from reagent.net_builder.unions import ParametricDQNNetBuilder__Union -from reagent.parameters import param_hash -from reagent.training import ParametricDQNTrainer, ParametricDQNTrainerParameters -from reagent.workflow.model_managers.parametric_dqn_base import ParametricDQNBase - - -logger = logging.getLogger(__name__) - - -@dataclass -class ParametricDQN(ParametricDQNBase): - __hash__ = param_hash - - trainer_param: ParametricDQNTrainerParameters = field( - default_factory=ParametricDQNTrainerParameters - ) - net_builder: ParametricDQNNetBuilder__Union = field( - # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. - # pyre-fixme[28]: Unexpected keyword argument `FullyConnected`. - default_factory=lambda: ParametricDQNNetBuilder__Union( - FullyConnected=FullyConnected() - ) - ) - - def __post_init_post_parse__(self): - super().__post_init_post_parse__() - self.rl_parameters = self.trainer_param.rl - - def build_trainer(self) -> ParametricDQNTrainer: - net_builder = self.net_builder.value - # pyre-fixme[16]: `ParametricDQN` has no attribute `_q_network`. - # pyre-fixme[16]: `ParametricDQN` has no attribute `_q_network`. - self._q_network = net_builder.build_q_network( - self.state_normalization_data, self.action_normalization_data - ) - # Metrics + reward - reward_output_dim = len(self.metrics_to_score) + 1 - reward_network = net_builder.build_q_network( - self.state_normalization_data, - self.action_normalization_data, - output_dim=reward_output_dim, - ) - - if self.use_gpu: - self._q_network = self._q_network.cuda() - reward_network = reward_network.cuda() - - q_network_target = self._q_network.get_target_network() - # pyre-fixme[29]: `Type[ParametricDQNTrainer]` is not a function. - # pyre-fixme[29]: `Type[ParametricDQNTrainer]` is not a function. - return ParametricDQNTrainer( - q_network=self._q_network, - q_network_target=q_network_target, - reward_network=reward_network, - params=self.trainer_param, - use_gpu=self.use_gpu, - ) - - def build_serving_module(self) -> torch.nn.Module: - net_builder = self.net_builder.value - assert self._q_network is not None - return net_builder.build_serving_module( - self._q_network, - self.state_normalization_data, - self.action_normalization_data, - ) diff --git a/reagent/workflow/model_managers/union.py b/reagent/workflow/model_managers/union.py deleted file mode 100644 index a7e212490..000000000 --- a/reagent/workflow/model_managers/union.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -""" Register all ModelManagers. Must import them before filling union. """ - -from reagent.workflow import types -from reagent.workflow.model_managers.model_manager import ModelManager - -from .actor_critic import * # noqa -from .discrete import * # noqa -from .model_based import * # noqa -from .parametric import * # noqa - - -@ModelManager.fill_union() -class ModelManager__Union(types.TaggedUnion): - pass diff --git a/reagent/workflow/model_managers/world_model_base.py b/reagent/workflow/model_managers/world_model_base.py deleted file mode 100644 index 9bea40b38..000000000 --- a/reagent/workflow/model_managers/world_model_base.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python3 - -import logging -from typing import Dict, List, Optional, Tuple - -from reagent.core.dataclasses import dataclass -from reagent.gym.policies.policy import Policy -from reagent.parameters import NormalizationData, NormalizationKey -from reagent.preprocessing.batch_preprocessor import BatchPreprocessor -from reagent.workflow.model_managers.model_manager import ModelManager -from reagent.workflow.types import Dataset, RewardOptions, RLTrainingOutput, TableSpec - - -logger = logging.getLogger(__name__) - - -@dataclass -class WorldModelBase(ModelManager): - def __post_init_post_parse__(self): - super().__init__() - - @classmethod - def normalization_key(cls) -> str: - raise NotImplementedError() - - def create_policy(self) -> Policy: - """ Create a WorldModel Policy from env. """ - raise NotImplementedError() - - @property - def should_generate_eval_dataset(self) -> bool: - return False - - @property - def required_normalization_keys(self) -> List[str]: - return [NormalizationKey.STATE, NormalizationKey.ACTION] - - def run_feature_identification( - self, input_table_spec: TableSpec - ) -> Dict[str, NormalizationData]: - raise NotImplementedError() - - def query_data( - self, - input_table_spec: TableSpec, - sample_range: Optional[Tuple[float, float]], - reward_options: RewardOptions, - ) -> Dataset: - raise NotImplementedError() - - def build_batch_preprocessor(self) -> BatchPreprocessor: - raise NotImplementedError() - - def train( - self, train_dataset: Dataset, eval_dataset: Optional[Dataset], num_epochs: int - ) -> RLTrainingOutput: - """ - Train the model - - Returns partially filled RLTrainingOutput. The field that should not be filled - are: - - output_path - - warmstart_output_path - - vis_metrics - - validation_output - """ - raise NotImplementedError() diff --git a/reagent/workflow/publishers/union.py b/reagent/workflow/publishers/union.py deleted file mode 100644 index 06e446881..000000000 --- a/reagent/workflow/publishers/union.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python3 - -from reagent.workflow import types - -from .file_system_publisher import FileSystemPublisher # noqa -from .model_publisher import ModelPublisher -from .no_publishing import NoPublishing # noqa - - -@ModelPublisher.fill_union() -class ModelPublisher__Union(types.TaggedUnion): - pass diff --git a/reagent/workflow/reporters/actor_critic_reporter.py b/reagent/workflow/reporters/actor_critic_reporter.py deleted file mode 100644 index dc7d2788e..000000000 --- a/reagent/workflow/reporters/actor_critic_reporter.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 - -import itertools -import logging -from collections import OrderedDict - -from reagent.core import aggregators as agg -from reagent.core.observers import IntervalAggregatingObserver, ValueListObserver -from reagent.workflow.reporters.reporter_base import ReporterBase -from reagent.workflow.training_reports import ActorCriticTrainingReport - - -logger = logging.getLogger(__name__) - - -class ActorCriticReporter(ReporterBase): - def __init__(self, report_interval: int = 100): - self.value_list_observers = {"cpe_results": ValueListObserver("cpe_details")} - self.aggregating_observers = OrderedDict( - (name, IntervalAggregatingObserver(report_interval, aggregator)) - for name, aggregator in itertools.chain( - [ - ("td_loss", agg.MeanAggregator("td_loss")), - ("reward_loss", agg.MeanAggregator("reward_loss")), - ("recent_rewards", agg.RecentValuesAggregator("logged_rewards")), - ], - [ - ( - f"{key}_tb", - agg.TensorBoardHistogramAndMeanAggregator(key, log_key), - ) - for key, log_key in [ - ("td_loss", "td_loss"), - ("reward_loss", "reward_loss"), - ("logged_propensities", "propensities/logged"), - ("logged_rewards", "reward/logged"), - ] - ], - ) - ) - super().__init__(self.value_list_observers, self.aggregating_observers) - - # TODO: write this for OSS - def generate_training_report(self) -> ActorCriticTrainingReport: - return ActorCriticTrainingReport() diff --git a/reagent/workflow/reporters/discrete_dqn_reporter.py b/reagent/workflow/reporters/discrete_dqn_reporter.py deleted file mode 100644 index 908dae062..000000000 --- a/reagent/workflow/reporters/discrete_dqn_reporter.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 - -import itertools -import logging -from collections import OrderedDict -from typing import List, Optional - -import torch -from reagent.core import aggregators as agg -from reagent.core.observers import IntervalAggregatingObserver, ValueListObserver -from reagent.workflow.reporters.reporter_base import ReporterBase -from reagent.workflow.training_reports import DQNTrainingReport - - -logger = logging.getLogger(__name__) - - -class DiscreteDQNReporter(ReporterBase): - def __init__( - self, - actions: List[str], - report_interval: int = 100, - target_action_distribution: Optional[List[float]] = None, - recent_window_size: int = 100, - ): - self.value_list_observers = {"cpe_results": ValueListObserver("cpe_details")} - self.aggregating_observers = OrderedDict( - (name, IntervalAggregatingObserver(report_interval, aggregator)) - for name, aggregator in itertools.chain( - [ - ("td_loss", agg.MeanAggregator("td_loss")), - ("reward_loss", agg.MeanAggregator("reward_loss")), - ( - "model_values", - agg.FunctionsByActionAggregator( - "model_values", - actions, - {"mean": torch.mean, "std": torch.std}, - ), - ), - ( - "logged_action", - agg.ActionCountAggregator("logged_actions", actions), - ), - ( - "model_action", - agg.ActionCountAggregator("model_action_idxs", actions), - ), - ("recent_rewards", agg.RecentValuesAggregator("logged_rewards")), - ], - [ - ( - f"{key}_tb", - agg.TensorBoardActionCountAggregator(key, title, actions), - ) - for key, title in [ - ("logged_actions", "logged"), - ("model_action_idxs", "model"), - ] - ], - [ - ( - f"{key}_tb", - agg.TensorBoardHistogramAndMeanAggregator(key, log_key), - ) - for key, log_key in [ - ("td_loss", "td_loss"), - ("reward_loss", "reward_loss"), - ("logged_propensities", "propensities/logged"), - ("logged_rewards", "reward/logged"), - ] - ], - [ - ( - f"{key}_tb", - agg.TensorBoardActionHistogramAndMeanAggregator( - key, category, title, actions - ), - ) - for key, category, title in [ - ("model_propensities", "propensities", "model"), - ("model_rewards", "reward", "model"), - ("model_values", "value", "model"), - ] - ], - ) - ) - super().__init__(self.value_list_observers, self.aggregating_observers) - self.target_action_distribution = target_action_distribution - self.recent_window_size = recent_window_size - - # TODO: write this for OSS - def generate_training_report(self) -> DQNTrainingReport: - cpe_results = self.value_list_observers["cpe_results"].values # noqa - return DQNTrainingReport() diff --git a/reagent/workflow/reporters/reporter_base.py b/reagent/workflow/reporters/reporter_base.py deleted file mode 100644 index b5f54d920..000000000 --- a/reagent/workflow/reporters/reporter_base.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 - -import abc -import logging -from typing import Dict - -from reagent.core.observers import ( - CompositeObserver, - EpochEndObserver, - IntervalAggregatingObserver, - ValueListObserver, -) -from reagent.workflow.result_registries import TrainingReport - - -logger = logging.getLogger(__name__) - - -class ReporterBase(CompositeObserver): - def __init__( - self, - value_list_observers: Dict[str, ValueListObserver], - aggregating_observers: Dict[str, IntervalAggregatingObserver], - ): - epoch_end_observer = EpochEndObserver(self._epoch_end_callback) - self.last_epoch_end_num_batches: int = 0 - self.num_data_points_per_epoch = None - super().__init__( - list(value_list_observers.values()) - # pyre-fixme[6]: Expected `List[ValueListObserver]` for 1st param but - # got `List[IntervalAggregatingObserver]`. - + list(aggregating_observers.values()) - # pyre-fixme[6]: Expected `List[ValueListObserver]` for 1st param but - # got `List[EpochEndObserver]`. - + [epoch_end_observer] - ) - - def _epoch_end_callback(self, epoch: int): - logger.info(f"Epoch {epoch} ended") - - for observer in self.aggregating_observers.values(): - observer.flush() - - num_batches = len(self.td_loss.values) - self.last_epoch_end_num_batches - self.last_epoch_end_num_batches = len(self.td_loss.values) - if self.num_data_points_per_epoch is None: - self.num_data_points_per_epoch = num_batches - else: - assert self.num_data_points_per_epoch == num_batches - logger.info(f"Epoch {epoch} contains {num_batches} aggregated data points") - - def __getattr__(self, key: str): - if key in self.value_list_observers: - return self.value_list_observers[key] - return self.aggregating_observers[key].aggregator - - # TODO: write this for OSS - @abc.abstractmethod - def generate_training_report(self) -> TrainingReport: - pass diff --git a/reagent/workflow/sample_configs/sac_pendulum_offline.yaml b/reagent/workflow/sample_configs/sac_pendulum_offline.yaml index 557e0dfc1..20888935c 100644 --- a/reagent/workflow/sample_configs/sac_pendulum_offline.yaml +++ b/reagent/workflow/sample_configs/sac_pendulum_offline.yaml @@ -11,8 +11,8 @@ model: rl: gamma: 0.9 target_update_rate: 0.5 + softmax_policy: true entropy_temperature: 0.01 - minibatch_size: 1024 q_network_optimizer: Adam: lr: 0.001 @@ -22,7 +22,6 @@ model: actor_network_optimizer: Adam: lr: 0.001 - alpha_optimizer: null actor_net_builder: GaussianFullyConnected: sizes: @@ -56,11 +55,13 @@ model: calc_cpe_in_training: false num_train_transitions: 40000 # approx. 200 episodes -max_steps: 200 +max_steps: 1000 seed: 42 num_epochs: 80 publisher: FileSystemPublisher: {} num_eval_episodes: 30 # TODO: raise this bar after training stabilize -passing_score_bar: -900 +passing_score_bar: -1000 +reader_options: + minibatch_size: 1024 diff --git a/reagent/workflow/training.py b/reagent/workflow/training.py index 9df3f8124..c7024477c 100644 --- a/reagent/workflow/training.py +++ b/reagent/workflow/training.py @@ -1,21 +1,32 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. import dataclasses import logging -from typing import Dict, NamedTuple, Optional, Tuple +import time +from typing import Dict, Optional import torch -from reagent.parameters import NormalizationData -from reagent.workflow.env import get_workflow_id -from reagent.workflow.model_managers.union import ModelManager__Union -from reagent.workflow.publishers.union import ModelPublisher__Union +from reagent.core.parameters import NormalizationData +from reagent.core.tensorboardX import summary_writer_context +from reagent.data.manual_data_module import get_sample_range +from reagent.data.oss_data_fetcher import OssDataFetcher +from reagent.model_managers.model_manager import ModelManager +from reagent.model_managers.union import ModelManager__Union +from reagent.publishers.union import ModelPublisher__Union +from reagent.validators.union import ModelValidator__Union +from reagent.workflow.env import get_new_named_entity_ids, get_workflow_id from reagent.workflow.types import ( + Dataset, + ModuleNameToEntityId, + ReaderOptions, RecurringPeriod, + ResourceOptions, RewardOptions, RLTrainingOutput, TableSpec, ) -from reagent.workflow.validators.union import ModelValidator__Union +from torch.utils.tensorboard import SummaryWriter logger = logging.getLogger(__name__) @@ -27,117 +38,148 @@ def identify_and_train_network( num_epochs: int, use_gpu: Optional[bool] = None, reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + resource_options: Optional[ResourceOptions] = None, warmstart_path: Optional[str] = None, validator: Optional[ModelValidator__Union] = None, publisher: Optional[ModelPublisher__Union] = None, ) -> RLTrainingOutput: if use_gpu is None: + # pyre-fixme[35]: Target cannot be annotated. use_gpu: bool = torch.cuda.is_available() + reward_options = reward_options or RewardOptions() + reader_options = reader_options or ReaderOptions() + manager = model.value - normalization_data_map = manager.run_feature_identification(input_table_spec) + + normalization_data_map = None + setup_data = None + + data_module = manager.get_data_module( + input_table_spec=input_table_spec, + reward_options=reward_options, + reader_options=reader_options, + resource_options=resource_options, + ) + if data_module is not None: + data_module.prepare_data() + setup_data = data_module.setup_data + else: + normalization_data_map = manager.run_feature_identification(input_table_spec) return query_and_train( input_table_spec, model, - normalization_data_map, num_epochs, use_gpu=use_gpu, + setup_data=setup_data, + normalization_data_map=normalization_data_map, reward_options=reward_options, + reader_options=reader_options, + resource_options=resource_options, warmstart_path=warmstart_path, validator=validator, publisher=publisher, ) -class TrainEvalSampleRanges(NamedTuple): - train_sample_range: Tuple[float, float] - eval_sample_range: Tuple[float, float] - - -def get_sample_range( - input_table_spec: TableSpec, calc_cpe_in_training: bool -) -> TrainEvalSampleRanges: - table_sample = input_table_spec.table_sample - eval_table_sample = input_table_spec.eval_table_sample - - if not calc_cpe_in_training: - # use all data if table sample = None - if table_sample is None: - train_sample_range = (0.0, 100.0) - else: - train_sample_range = (0.0, table_sample) - return TrainEvalSampleRanges( - train_sample_range=train_sample_range, - # eval samples will not be used - eval_sample_range=(0.0, 0.0), - ) - - error_msg = ( - "calc_cpe_in_training is set to True. " - f"Please specify table_sample(current={table_sample}) and " - f"eval_table_sample(current={eval_table_sample}) such that " - "eval_table_sample + table_sample <= 100. " - "In order to reliably calculate CPE, eval_table_sample " - "should not be too small." - ) - assert table_sample is not None, error_msg - assert eval_table_sample is not None, error_msg - assert (eval_table_sample + table_sample) <= (100.0 + 1e-3), error_msg - - return TrainEvalSampleRanges( - train_sample_range=(0.0, table_sample), - eval_sample_range=(100.0 - eval_table_sample, 100.0), - ) - - def query_and_train( input_table_spec: TableSpec, model: ModelManager__Union, - normalization_data_map: Dict[str, NormalizationData], num_epochs: int, use_gpu: bool, + *, + setup_data: Optional[Dict[str, bytes]] = None, + saved_setup_data: Optional[Dict[str, bytes]] = None, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + resource_options: Optional[ResourceOptions] = None, warmstart_path: Optional[str] = None, validator: Optional[ModelValidator__Union] = None, publisher: Optional[ModelPublisher__Union] = None, - parent_workflow_id: Optional[int] = None, + named_model_ids: Optional[ModuleNameToEntityId] = None, recurring_period: Optional[RecurringPeriod] = None, ) -> RLTrainingOutput: child_workflow_id = get_workflow_id() - if parent_workflow_id is None: - parent_workflow_id = child_workflow_id + if named_model_ids is None: + # pyre-fixme[20]: Argument `model_type_id` expected. + named_model_ids = get_new_named_entity_ids(model.value.serving_module_names()) logger.info("Starting query") reward_options = reward_options or RewardOptions() + reader_options = reader_options or ReaderOptions() + resource_options = resource_options or ResourceOptions() manager = model.value - calc_cpe_in_training = manager.should_generate_eval_dataset - sample_range_output = get_sample_range(input_table_spec, calc_cpe_in_training) - train_dataset = manager.query_data( - input_table_spec=input_table_spec, - sample_range=sample_range_output.train_sample_range, - reward_options=reward_options, - ) + resource_options.gpu = int(use_gpu) + + if saved_setup_data is not None: + + def _maybe_get_bytes(v) -> bytes: + if isinstance(v, bytes): + return v + + # HACK: FBLearner sometimes pack bytes into Blob + return v.data + + saved_setup_data = {k: _maybe_get_bytes(v) for k, v in saved_setup_data.items()} + + if setup_data is None: + data_module = manager.get_data_module( + input_table_spec=input_table_spec, + reward_options=reward_options, + reader_options=reader_options, + resource_options=resource_options, + saved_setup_data=saved_setup_data, + ) + if data_module is not None: + data_module.prepare_data() + setup_data = data_module.setup_data + # Throw away existing normalization data map + normalization_data_map = None + + if sum([int(setup_data is not None), int(normalization_data_map is not None)]) != 1: + raise ValueError("setup_data and normalization_data_map are mutually exclusive") + + train_dataset = None eval_dataset = None - if calc_cpe_in_training: - eval_dataset = manager.query_data( + data_fetcher = OssDataFetcher() + if normalization_data_map is not None: + calc_cpe_in_training = manager.should_generate_eval_dataset + sample_range_output = get_sample_range(input_table_spec, calc_cpe_in_training) + train_dataset = manager.query_data( input_table_spec=input_table_spec, - sample_range=sample_range_output.eval_sample_range, + sample_range=sample_range_output.train_sample_range, reward_options=reward_options, + data_fetcher=data_fetcher, ) + eval_dataset = None + if calc_cpe_in_training: + eval_dataset = manager.query_data( + input_table_spec=input_table_spec, + sample_range=sample_range_output.eval_sample_range, + reward_options=reward_options, + data_fetcher=data_fetcher, + ) logger.info("Starting training") - results = manager.train_workflow( + + results = train_workflow( + manager, train_dataset, eval_dataset, - normalization_data_map, - num_epochs, - use_gpu, - parent_workflow_id=parent_workflow_id, + num_epochs=num_epochs, + use_gpu=use_gpu, + setup_data=setup_data, + normalization_data_map=normalization_data_map, + named_model_ids=named_model_ids, child_workflow_id=child_workflow_id, reward_options=reward_options, + reader_options=reader_options, + resource_options=resource_options, warmstart_path=warmstart_path, ) @@ -149,7 +191,8 @@ def query_and_train( publisher, model, results, - parent_workflow_id, + setup_data, + named_model_ids, child_workflow_id, recurring_period, ) @@ -157,6 +200,78 @@ def query_and_train( return results +def train_workflow( + model_manager: ModelManager, + train_dataset: Optional[Dataset], + eval_dataset: Optional[Dataset], + *, + num_epochs: int, + use_gpu: bool, + named_model_ids: ModuleNameToEntityId, + child_workflow_id: int, + setup_data: Optional[Dict[str, bytes]] = None, + normalization_data_map: Optional[Dict[str, NormalizationData]] = None, + reward_options: Optional[RewardOptions] = None, + reader_options: Optional[ReaderOptions] = None, + resource_options: Optional[ResourceOptions] = None, + warmstart_path: Optional[str] = None, +) -> RLTrainingOutput: + writer = SummaryWriter() + logger.info("TensorBoard logging location is: {}".format(writer.log_dir)) + + if setup_data is not None: + data_module = model_manager.get_data_module( + setup_data=setup_data, + reward_options=reward_options, + reader_options=reader_options, + resource_options=resource_options, + ) + assert data_module is not None + data_module.setup() + else: + data_module = None + + if normalization_data_map is None: + assert data_module is not None + normalization_data_map = data_module.get_normalization_data_map() + + warmstart_input_path = warmstart_path or None + trainer_module = model_manager.build_trainer( + use_gpu=use_gpu, + reward_options=reward_options, + normalization_data_map=normalization_data_map, + ) + + if not reader_options: + reader_options = ReaderOptions() + + if not resource_options: + resource_options = ResourceOptions() + + with summary_writer_context(writer): + train_output, lightning_trainer = model_manager.train( + trainer_module, + train_dataset, + eval_dataset, + None, + data_module, + num_epochs, + reader_options, + resource_options, + checkpoint_path=warmstart_input_path, + ) + + output_paths = {} + for module_name, serving_module in model_manager.build_serving_modules( + trainer_module, normalization_data_map + ).items(): + torchscript_output_path = f"{model_manager.__class__.__name__}_{module_name}_{round(time.time())}.torchscript" + torch.jit.save(serving_module, torchscript_output_path) + logger.info(f"Saved {module_name} to {torchscript_output_path}") + output_paths[module_name] = torchscript_output_path + return dataclasses.replace(train_output, output_paths=output_paths) + + def run_validator( validator: ModelValidator__Union, training_output: RLTrainingOutput ) -> RLTrainingOutput: @@ -172,7 +287,8 @@ def run_publisher( publisher: ModelPublisher__Union, model_chooser: ModelManager__Union, training_output: RLTrainingOutput, - recurring_workflow_id: int, + setup_data: Optional[Dict[str, bytes]], + recurring_workflow_ids: ModuleNameToEntityId, child_workflow_id: int, recurring_period: Optional[RecurringPeriod], ) -> RLTrainingOutput: @@ -184,7 +300,8 @@ def run_publisher( publishing_result = model_publisher.publish( model_manager, training_output, - recurring_workflow_id, + setup_data, + recurring_workflow_ids, child_workflow_id, recurring_period, ) diff --git a/reagent/workflow/training_reports.py b/reagent/workflow/training_reports.py index 3f605b9a8..ffc797d3b 100644 --- a/reagent/workflow/training_reports.py +++ b/reagent/workflow/training_reports.py @@ -1,10 +1,11 @@ #!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from typing import Optional from reagent.core.dataclasses import dataclass +from reagent.core.result_registries import TrainingReport from reagent.evaluation.cpe import CpeEstimate -from reagent.workflow.result_registries import TrainingReport @dataclass @@ -12,7 +13,6 @@ class DQNTrainingReport(TrainingReport): __registry_name__ = "dqn_report" td_loss: Optional[float] = None - mc_loss: Optional[float] = None reward_ips: Optional[CpeEstimate] = None reward_dm: Optional[CpeEstimate] = None reward_dr: Optional[CpeEstimate] = None @@ -26,6 +26,21 @@ class ActorCriticTrainingReport(TrainingReport): __registry_name__ = "actor_critic_report" +@dataclass +class WorldModelTrainingReport(TrainingReport): + __registry_name__ = "world_model_report" + + @dataclass class ParametricDQNTrainingReport(TrainingReport): __registry_name__ = "parametric_dqn_report" + + +@dataclass +class SlateQTrainingReport(TrainingReport): + __registry_name__ = "slate_q_report" + + +@dataclass +class Seq2RewardTrainingReport(TrainingReport): + __registry_name__ = "seq2reward_report" diff --git a/reagent/workflow/types.py b/reagent/workflow/types.py index 70515afc3..3c3a62b5e 100644 --- a/reagent/workflow/types.py +++ b/reagent/workflow/types.py @@ -2,25 +2,28 @@ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. from datetime import datetime as RecurringPeriod # noqa -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple # Triggering registration to registries -import reagent.workflow.result_types # noqa +import reagent.core.result_types # noqa import reagent.workflow.training_reports # noqa -from reagent.core.dataclasses import dataclass +from reagent.core.dataclasses import dataclass, field +from reagent.core.result_registries import ( + PublishingResult, + TrainingReport, + ValidationResult, +) +from reagent.core.tagged_union import TaggedUnion +from reagent.models.model_feature_config_provider import ModelFeatureConfigProvider from reagent.preprocessing.normalization import ( DEFAULT_MAX_QUANTILE_SIZE, DEFAULT_MAX_UNIQUE_ENUM, DEFAULT_NUM_SAMPLES, DEFAULT_QUANTILE_K2_THRESHOLD, ) -from reagent.types import BaseDataClass -from reagent.workflow.result_registries import ( - PublishingResult, - TrainingReport, - ValidationResult, -) -from reagent.workflow.tagged_union import TaggedUnion # noqa F401 + + +ModuleNameToEntityId = Dict[str, int] @dataclass @@ -33,6 +36,7 @@ class TableSpec: table_name: str table_sample: Optional[float] = None eval_table_sample: Optional[float] = None + test_table_sample: Optional[float] = None @dataclass @@ -43,11 +47,28 @@ class RewardOptions: @dataclass class ReaderOptions: + minibatch_size: int = 1024 petastorm_reader_pool_type: str = "thread" @dataclass -class PreprocessingOptions(BaseDataClass): +class ResourceOptions: + gpu: int = 1 + + @property + def use_gpu(self) -> bool: + return self.gpu > 0 + + ## Below is for internal use + cpu: Optional[int] = None + # "-1" or "xxG" where "xx" is a positive integer + memory: Optional[str] = "40g" + min_nodes: Optional[int] = 1 + max_nodes: Optional[int] = 1 + + +@dataclass +class PreprocessingOptions: num_samples: int = DEFAULT_NUM_SAMPLES max_unique_enum_values: int = DEFAULT_MAX_UNIQUE_ENUM quantile_size: int = DEFAULT_MAX_QUANTILE_SIZE @@ -57,8 +78,13 @@ class PreprocessingOptions(BaseDataClass): feature_overrides: Optional[Dict[int, str]] = None tablesample: Optional[float] = None set_missing_value_to_zero: Optional[bool] = False - whitelist_features: Optional[List[int]] = None - assert_whitelist_feature_coverage: bool = True + allowedlist_features: Optional[List[int]] = None + assert_allowedlist_feature_coverage: bool = True + + +@ModelFeatureConfigProvider.fill_union() +class ModelFeatureConfigProvider__Union(TaggedUnion): + pass @PublishingResult.fill_union() @@ -78,7 +104,15 @@ class RLTrainingReport(TaggedUnion): @dataclass class RLTrainingOutput: - output_path: Optional[str] = None + output_paths: Dict[str, str] = field(default_factory=dict) validation_result: Optional[ValidationResult__Union] = None publishing_result: Optional[PublishingResult__Union] = None training_report: Optional[RLTrainingReport] = None + logger_data: Dict[str, Dict[str, List[Tuple[float, float]]]] = field( + default_factory=dict + ) + + +@dataclass +class TrainerConf: + pass diff --git a/reagent/workflow/utils.py b/reagent/workflow/utils.py index 55bcb5e95..b4687666e 100644 --- a/reagent/workflow/utils.py +++ b/reagent/workflow/utils.py @@ -4,20 +4,22 @@ import logging from typing import Dict, List, Optional -import reagent.types as rlt +import pytorch_lightning as pl +import torch # pyre-fixme[21]: Could not find `petastorm`. from petastorm import make_batch_reader + +# pyre-fixme[21]: Could not find module `petastorm.pytorch`. +# pyre-fixme[21]: Could not find module `petastorm.pytorch`. from petastorm.pytorch import DataLoader, decimal_friendly_collate -from reagent.core.tracker import Observer -from reagent.evaluation.evaluation_data_page import EvaluationDataPage -from reagent.evaluation.evaluator import Evaluator +from reagent.core.oss_tensorboard_logger import OssTensorboardLogger +from reagent.data.spark_utils import get_spark_session from reagent.preprocessing.batch_preprocessor import BatchPreprocessor -from reagent.torch_utils import dict_to_tensor -from reagent.training import RLTrainer, SACTrainer, TD3Trainer -from reagent.workflow.spark_utils import get_spark_session -from reagent.workflow.types import Dataset, ReaderOptions -from reagent.workflow_utils.iterators import DataLoaderWrapper, EpochIterator +from reagent.training import StoppingEpochCallback +from reagent.training.reagent_lightning_module import has_test_step_override + +from .types import Dataset, ReaderOptions, ResourceOptions logger = logging.getLogger(__name__) @@ -29,7 +31,7 @@ def get_table_row_count(parquet_url: str): def collate_and_preprocess(batch_preprocessor: BatchPreprocessor, use_gpu: bool): - """ Helper for Petastorm's DataLoader to preprocess. + """Helper for Petastorm's DataLoader to preprocess. TODO(kaiwenw): parallelize preprocessing by using transform of Petastorm reader Should pin memory and preprocess in reader and convert to gpu in collate_fn. """ @@ -51,13 +53,12 @@ def get_petastorm_dataloader( use_gpu: bool, reader_options: ReaderOptions, ): - """ get petastorm loader for dataset (with preprocessor) """ + """get petastorm loader for dataset (with preprocessor)""" data_reader = make_batch_reader( dataset.parquet_url, num_epochs=1, reader_pool_type=reader_options.petastorm_reader_pool_type, ) - # NOTE: must be wrapped by DataLoaderWrapper to call __exit__() on end of epoch return DataLoader( data_reader, batch_size=batch_size, @@ -67,84 +68,88 @@ def get_petastorm_dataloader( ) -def gather_eval_data( - trainer: RLTrainer, - eval_dataset: Dataset, - batch_preprocessor: BatchPreprocessor, - use_gpu: bool, - reader_options: ReaderOptions, -) -> EvaluationDataPage: - """ Sorts, computes logged values and validates the EvaluationDataPage """ - if isinstance(trainer, (SACTrainer, TD3Trainer)): - raise NotImplementedError("TODO: Implement CPE for continuous algos") - assert ( - trainer.calc_cpe_in_training - ), "this function should only be called when this is true." - - # first read the eval_dataset as EvaluationDataPages - device = "cuda" if use_gpu else "cpu" - eval_data = None - with make_batch_reader( - eval_dataset.parquet_url, - num_epochs=1, - reader_pool_type=reader_options.petastorm_reader_pool_type, - ) as reader: - for batch in reader: - assert rlt.isinstance_namedtuple(batch) - tensor_batch = dict_to_tensor(batch._asdict(), device=device) - # pyre-fixme[9]: tdp has type `PreprocessedTrainingBatch`; used as - # `TensorDataClass`. - tdp: rlt.PreprocessedTrainingBatch = batch_preprocessor(tensor_batch) - edp = EvaluationDataPage.create_from_training_batch(tdp, trainer) - if eval_data is None: - eval_data = edp - else: - eval_data = eval_data.append(edp) - - eval_data = eval_data.sort() - eval_data = eval_data.compute_values(trainer.gamma) - eval_data.validate() - return eval_data - - -def train_and_evaluate_generic( - train_dataset: Dataset, - eval_dataset: Optional[Dataset], - trainer: RLTrainer, - num_epochs: int, - use_gpu: bool, - batch_preprocessor: BatchPreprocessor, - reporter: Observer, - evaluator: Evaluator, - reader_options: Optional[ReaderOptions] = None, -) -> None: - reader_options = reader_options or ReaderOptions() - epoch_iterator = EpochIterator(num_epochs=num_epochs) - train_dataset_size = get_table_row_count(train_dataset.parquet_url) - # pyre-fixme[16]: `EpochIterator` has no attribute `add_observer`. - for epoch in epoch_iterator.add_observer(reporter): - logger.info(f"Starting training epoch {epoch}.") +# TODO: Move this to appropriate location +class PetastormLightningDataModule(pl.LightningDataModule): + def __init__(self, train_dataset, eval_dataset, batch_preprocessor, reader_options): + super().__init__() + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.batch_preprocessor = batch_preprocessor + self.reader_options = reader_options + + def _closing_iter(self, dataloader): + yield from dataloader + dataloader.__exit__(None, None, None) + + def train_dataloader(self): dataloader = get_petastorm_dataloader( - dataset=train_dataset, - # pyre-fixme[6]: Expected `int` for 2nd param but got `Optional[int]`. - batch_size=trainer.minibatch_size, - batch_preprocessor=batch_preprocessor, - use_gpu=use_gpu, - reader_options=reader_options, + dataset=self.train_dataset, + batch_size=self.reader_options.minibatch_size, + batch_preprocessor=self.batch_preprocessor, + use_gpu=False, + reader_options=self.reader_options, ) - dataloader_wrapper = DataLoaderWrapper( - dataloader=dataloader, dataloader_size=train_dataset_size + return self._closing_iter(dataloader) + + def test_dataloader(self): + dataloader = get_petastorm_dataloader( + dataset=self.eval_dataset, + batch_size=self.reader_options.minibatch_size, + batch_preprocessor=self.batch_preprocessor, + use_gpu=False, + reader_options=self.reader_options, + ) + return self._closing_iter(dataloader) + + +def get_rank() -> int: + """ + Returns the torch.distributed rank of the process. 0 represents + the main process and is the default if torch.distributed isn't set up + """ + return ( + torch.distributed.get_rank() + if torch.distributed.is_available() and torch.distributed.is_initialized() + else 0 + ) + + +def train_eval_lightning( + train_dataset, + eval_dataset, + test_dataset, + trainer_module, + data_module, + num_epochs, + logger_name: str, + batch_preprocessor=None, + reader_options: Optional[ReaderOptions] = None, + checkpoint_path: Optional[str] = None, + resource_options: Optional[ResourceOptions] = None, +) -> pl.Trainer: + resource_options = resource_options or ResourceOptions() + use_gpu = resource_options.use_gpu + reader_options = reader_options or ReaderOptions() + datamodule = data_module or PetastormLightningDataModule( + train_dataset, eval_dataset, batch_preprocessor, reader_options + ) + trainer = pl.Trainer( + logger=OssTensorboardLogger(save_dir="pl_log_tensorboard", name=logger_name), + max_epochs=num_epochs * 1000, + gpus=int(use_gpu), + reload_dataloaders_every_n_epochs=1, + resume_from_checkpoint=checkpoint_path, + callbacks=[StoppingEpochCallback(num_epochs)], + ) + trainer.fit(trainer_module, datamodule=datamodule) + if has_test_step_override(trainer_module): + trainer.test(ckpt_path=None, datamodule=datamodule) + else: + logger.warning( + f"Module {type(trainer_module).__name__} doesn't implement test_step(). Skipping testing" ) - for batch in dataloader_wrapper: - trainer.train(batch) - - if eval_dataset is not None: - eval_data = gather_eval_data( - trainer=trainer, - eval_dataset=eval_dataset, - batch_preprocessor=batch_preprocessor, - use_gpu=use_gpu, - reader_options=reader_options, - ) - # evaluator passes cpe_details to reporter via notify_observers - evaluator.evaluate_post_training(eval_data) + if checkpoint_path is not None: + # Overwrite the warmstart path with the new model + trainer_module.set_clean_stop(True) + trainer.save_checkpoint(checkpoint_path) + return trainer diff --git a/reagent/workflow/validators/no_validation.py b/reagent/workflow/validators/no_validation.py deleted file mode 100644 index 73a3801a2..000000000 --- a/reagent/workflow/validators/no_validation.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python3 - -from reagent.core.dataclasses import dataclass -from reagent.workflow.result_types import NoValidationResults -from reagent.workflow.types import RLTrainingOutput -from reagent.workflow.validators.model_validator import ModelValidator - - -@dataclass -class NoValidation(ModelValidator): - """ - This is an example of how to create a validator. This validator performs no - validation. In your own validator, you would want to have `validate()` performs - some validation. - """ - - def do_validate(self, training_output: RLTrainingOutput) -> NoValidationResults: - return NoValidationResults(should_publish=True) diff --git a/reagent/workflow/validators/union.py b/reagent/workflow/validators/union.py deleted file mode 100644 index 9ac2f90f0..000000000 --- a/reagent/workflow/validators/union.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -from reagent.workflow import types - -from .model_validator import ModelValidator -from .no_validation import NoValidation # noqa - - -@ModelValidator.fill_union() -class ModelValidator__Union(types.TaggedUnion): - pass diff --git a/reagent/workflow_utils/iterators.py b/reagent/workflow_utils/iterators.py deleted file mode 100644 index 41b424b04..000000000 --- a/reagent/workflow_utils/iterators.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import logging -from collections import OrderedDict - -from reagent.core.tracker import observable -from reagent.tensorboardX import SummaryWriterContext -from torch.utils.data import IterableDataset -from tqdm import tqdm - - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - - -@observable(epoch_start=int, epoch_end=int) -class EpochIterator: - def __init__(self, num_epochs: int): - assert num_epochs > 0 - self.num_epochs = num_epochs - - def __iter__(self): - SummaryWriterContext._reset_globals() - for epoch in range(self.num_epochs): - self.notify_observers(epoch_start=epoch) - yield epoch - self.notify_observers(epoch_end=epoch) - # TODO: flush at end of epoch? - - -def get_batch_size(batch): - try: - return batch.batch_size() - except AttributeError: - pass - if isinstance(batch, OrderedDict): - first_key = next(iter(batch.keys())) - batch_size = len(batch[first_key]) - else: - raise NotImplementedError() - return batch_size - - -class DataLoaderWrapper(IterableDataset): - def __init__(self, dataloader: IterableDataset, dataloader_size: int): - """ Wraps around an Iterable Dataloader to report progress bars and - increase global step of SummaryWriter. At last iteration, will call - dataloader.__exit__ if needed (e.g. Petastorm DataLoader). - - Args: - dataloader: the iteratable dataloader to wrap around - dataloader_size: size of the dataset we're iterating over - """ - - self.dataloader = dataloader - self.dataloader_iter = iter(dataloader) - self.dataloader_size = dataloader_size - - def __iter__(self): - t = tqdm(total=self.dataloader_size, desc="iterating dataloader") - for batch in self.dataloader: - batch_size = get_batch_size(batch) - yield batch - t.update(batch_size) - SummaryWriterContext.increase_global_step() - - # clean up if need to (e.g. Petastorm Dataloader) - if hasattr(self.dataloader, "__exit__"): - self.dataloader.__exit__(None, None, None) diff --git a/reagent/workflow_utils/page_handler.py b/reagent/workflow_utils/page_handler.py deleted file mode 100644 index c6e6b1c8d..000000000 --- a/reagent/workflow_utils/page_handler.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import logging -import time -from collections import OrderedDict -from typing import Dict, List, Optional - -import numpy as np -import torch -from reagent.core.tracker import observable -from reagent.evaluation.cpe import CpeDetails -from reagent.evaluation.evaluation_data_page import EvaluationDataPage -from reagent.tensorboardX import SummaryWriterContext -from reagent.training.sac_trainer import SACTrainer -from reagent.training.td3_trainer import TD3Trainer -from reagent.types import MemoryNetworkInput, PreprocessedTrainingBatch - - -logger = logging.getLogger(__name__) - - -class PageHandler: - def __init__(self, trainer_or_evaluator): - self.trainer_or_evaluator = trainer_or_evaluator - self.results: List[Dict] = [] - self.epoch = 0 - - def refresh_results(self) -> None: - self.results: List[Dict] = [] - - def get_loss(self, loss_name="loss"): - """ See usage in get_mean_loss """ - return [float(result[loss_name]) for result in self.results] - - def get_mean_loss(self, loss_name="loss", axis=None): - """ - Get the average of a certain type of loss - - :param loss_name: possible loss names: - For world model: - 'loss' (referring to total loss), - 'bce' (loss for predicting not_terminal), - 'gmm' (loss for next state prediction), - 'mse' (loss for predicting reward) - For ranking model: - 'pg' (policy gradient loss) - 'baseline' (the baseline model's loss, usually for fitting V(s)) - 'kendall_tau' (kendall_tau coefficient between advantage and log_probs, - used in evaluation page handlers) - 'kendaull_tau_p_value' (the p-value for kendall_tau test, used in - evaluation page handlers) - :param axis: axis to perform mean function. - """ - return np.mean([result[loss_name] for result in self.results], axis=axis) - - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - raise NotImplementedError() - - def finish(self) -> None: - pass - - def set_epoch(self, epoch) -> None: - self.epoch = epoch - - -# TODO: remove. -# Use new DataLoaderWrapper & EpochIterator (see OSS train_and_evaluate_generic) -@observable(epoch_end=int) -class TrainingPageHandler(PageHandler): - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - SummaryWriterContext.increase_global_step() - self.trainer_or_evaluator.train(tdp) - - def finish(self) -> None: - # pyre-fixme[16]: `TrainingPageHandler` has no attribute `notify_observers`. - self.notify_observers(epoch_end=self.epoch) - self.trainer_or_evaluator.loss_reporter.flush() - self.epoch += 1 - - -# TODO: remove. -# Use new DataLoaderWrapper & EpochIterator (see OSS train_and_evaluate_generic) -class EvaluationPageHandler(PageHandler): - def __init__(self, trainer, evaluator, reporter): - self.trainer = trainer - self.evaluator = evaluator - self.evaluation_data: Optional[EvaluationDataPage] = None - self.reporter = reporter - self.results: List[CpeDetails] = [] - - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - if not self.trainer.calc_cpe_in_training: - return - # TODO: Perhaps we can make an RLTrainer param to check if continuous? - if isinstance(self.trainer, (SACTrainer, TD3Trainer)): - # TODO: Implement CPE for continuous algos - edp = None - else: - edp = EvaluationDataPage.create_from_training_batch(tdp, self.trainer) - if self.evaluation_data is None: - self.evaluation_data = edp - else: - # pyre-fixme[16]: `Optional` has no attribute `append`. - self.evaluation_data = self.evaluation_data.append(edp) - - def finish(self) -> None: - if self.evaluation_data is None: - return - # Making sure the data is sorted for CPE - # pyre-fixme[16]: `Optional` has no attribute `sort`. - self.evaluation_data = self.evaluation_data.sort() - # pyre-fixme[16]: `Optional` has no attribute `compute_values`. - self.evaluation_data = self.evaluation_data.compute_values(self.trainer.gamma) - # pyre-fixme[16]: `Optional` has no attribute `validate`. - self.evaluation_data.validate() - start_time = time.time() - evaluation_details = self.evaluator.evaluate_post_training(self.evaluation_data) - self.reporter.report(evaluation_details) - self.results.append(evaluation_details) - logger.info("CPE evaluation took {} seconds.".format(time.time() - start_time)) - self.evaluation_data = None - - def get_last_cpe_results(self): - if len(self.results) == 0: - return CpeDetails() - return self.results[-1] - - -class WorldModelTrainingPageHandler(PageHandler): - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - losses = self.trainer_or_evaluator.train(tdp) - self.results.append(losses) - - -class WorldModelRandomTrainingPageHandler(PageHandler): - """ Train a baseline model based on randomly shuffled data """ - - # pyre-fixme[14]: `handle` overrides method defined in `PageHandler` inconsistently. - def handle(self, training_input: MemoryNetworkInput) -> None: - _, batch_size, _ = training_input.next_state.float_features.size() - - tdp = MemoryNetworkInput( - state=training_input.state, - action=training_input.action, - time_diff=torch.ones_like(training_input.reward), - # shuffle the data - next_state=training_input.next_state._replace( - float_features=training_input.next_state.float_features[ - :, torch.randperm(batch_size), : - ] - ), - reward=training_input.reward[:, torch.randperm(batch_size)], - not_terminal=training_input.not_terminal[ # type: ignore - :, torch.randperm(batch_size) - ], - step=None, - ) - losses = self.trainer_or_evaluator.train(tdp) - self.results.append(losses) - - -class WorldModelEvaluationPageHandler(PageHandler): - # pyre-fixme[14]: `handle` overrides method defined in `PageHandler` inconsistently. - def handle(self, tdp: MemoryNetworkInput) -> None: - losses = self.trainer_or_evaluator.evaluate(tdp) - self.results.append(losses) - - -@observable(epoch_end=int) -class RankingTrainingPageHandler(PageHandler): - def __init__(self, trainer) -> None: - super().__init__(trainer) - self.policy_gradient_loss: List[float] = [] - self.baseline_loss: List[float] = [] - self.per_seq_probs: List[float] = [] - - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - res_dict = self.trainer_or_evaluator.train(tdp) - self.results.append(res_dict) - - def finish(self): - self.notify_observers(epoch_end=self.epoch) - if "ips_rl_loss" in self.results[0]: - self.policy_gradient_loss.append( - float(self.get_mean_loss(loss_name="ips_rl_loss")) - ) - if "baseline_loss" in self.results[0]: - self.baseline_loss.append( - float(self.get_mean_loss(loss_name="baseline_loss")) - ) - if "per_seq_probs" in self.results[0]: - self.per_seq_probs.append( - float(self.get_mean_loss(loss_name="per_seq_probs")) - ) - self.refresh_results() - - -@observable(epoch_end=int) -class RankingEvaluationPageHandler(PageHandler): - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - self.trainer_or_evaluator.evaluate(tdp) - - def finish(self): - eval_res = self.trainer_or_evaluator.evaluate_post_training() - self.notify_observers(epoch_end=self.epoch) # type: ignore - self.results.append(eval_res) - - -class RewardNetTrainingPageHandler(PageHandler): - def __init__(self, trainer): - super().__init__(trainer) - self.mse_loss = [] - - def handle(self, tdp: PreprocessedTrainingBatch) -> None: - mse_loss = self.trainer_or_evaluator.train(tdp) - self.results.append({"mse": mse_loss.cpu().numpy()}) - - def finish(self): - self.mse_loss.append(float(self.get_mean_loss(loss_name="mse"))) - self.refresh_results() - - -# TODO: remove. -# Use new DataLoaderWrapper & EpochIterator (see OSS train_and_evaluate_generic) -def get_actual_minibatch_size(batch, minibatch_size_preset): - try: - return batch.batch_size() - except AttributeError: - pass - if isinstance(batch, OrderedDict): - first_key = next(iter(batch.keys())) - batch_size = len(batch[first_key]) - else: - raise NotImplementedError() - return batch_size - - -# TODO: remove. -# Use new DataLoaderWrapper & EpochIterator (see OSS train_and_evaluate_generic) -def feed_pages( - data_loader, - dataset_num_rows, - epoch, - minibatch_size, - use_gpu, - page_handler, - batch_preprocessor=None, -): - num_rows_processed = 0 - num_rows_to_process_for_progress_tick = max(1, dataset_num_rows // 100) - last_percent_reported = -1 - - for batch in data_loader: - if use_gpu: - batch = batch.cuda() - batch_size = get_actual_minibatch_size(batch, minibatch_size) - num_rows_processed += batch_size - - if ( - num_rows_processed // num_rows_to_process_for_progress_tick - ) != last_percent_reported: - last_percent_reported = ( - num_rows_processed // num_rows_to_process_for_progress_tick - ) - logger.info( - "Feeding page. Epoch: {}, Epoch Progress: {} of {} ({}%)".format( - epoch, - num_rows_processed, - dataset_num_rows, - (100 * num_rows_processed) // dataset_num_rows, - ) - ) - - if batch_preprocessor: - batch = batch_preprocessor(batch) - page_handler.handle(batch) - - page_handler.finish() diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 367975796..000000000 --- a/requirements.txt +++ /dev/null @@ -1,20 +0,0 @@ -click==7.0 -gym[classic_control,box2d,atari] -gym-minigrid -numpy==1.17.2 -pandas==0.25.0 -pydantic==1.4 -torch -tqdm==4.46.0 -petastorm==0.9.0 -parameterized==0.7.4 -pyspark==2.4.5 -pytest==5.3 -pytest-xdist==1.30.0 -recsim-no-tf==0.2.3 -ruamel.yaml==0.15.99 -spark-testing-base==0.10.0 -scipy==1.3.1 -tensorboard==1.14 -scikit-learn==0.20.0 -xgboost==0.90 diff --git a/scripts/recurring_training_sac_offline.sh b/scripts/recurring_training_sac_offline.sh new file mode 100644 index 000000000..443b2649d --- /dev/null +++ b/scripts/recurring_training_sac_offline.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -x -e + +rm -f /tmp/file_system_publisher +rm -Rf test_warmstart model_* pl_log* runs + +CONFIG=reagent/workflow/sample_configs/sac_pendulum_offline.yaml + +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym_random "$CONFIG" +rm -Rf spark-warehouse derby.log metastore_db +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.timeline_operator "$CONFIG" +python ./reagent/workflow/cli.py run reagent.workflow.training.identify_and_train_network "$CONFIG" +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.evaluate_gym "$CONFIG" + +for _ in {0..30} +do +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.offline_gym_predictor "$CONFIG" +rm -Rf spark-warehouse derby.log metastore_db +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.timeline_operator "$CONFIG" +python ./reagent/workflow/cli.py run reagent.workflow.training.identify_and_train_network "$CONFIG" +python ./reagent/workflow/cli.py run reagent.workflow.gym_batch_rl.evaluate_gym "$CONFIG" +done diff --git a/serving/examples/__init__.py b/serving/examples/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/serving/examples/ecommerce/__init__.py b/serving/examples/ecommerce/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/serving/examples/ecommerce/training/contextual_bandit.yaml b/serving/examples/ecommerce/training/contextual_bandit.yaml index ef0452818..2abdaf280 100644 --- a/serving/examples/ecommerce/training/contextual_bandit.yaml +++ b/serving/examples/ecommerce/training/contextual_bandit.yaml @@ -22,8 +22,8 @@ model: optimizer: Adam: lr: 0.01 - evaluation: - calc_cpe_in_training: true + eval_parameters: + calc_cpe_in_training: true net_builder: FullyConnected: sizes: [] diff --git a/serving/external/exprtk b/serving/external/exprtk index 7c9b2370f..f46bffcd6 160000 --- a/serving/external/exprtk +++ b/serving/external/exprtk @@ -1 +1 @@ -Subproject commit 7c9b2370f80f2145e91edfc481c916ff5d1260d7 +Subproject commit f46bffcd6966d38a09023fb37ba9335214c9b959 diff --git a/serving/reagent/serving/config/applications/example/example.py b/serving/reagent/serving/config/applications/example/example.py index 737abf92d..3f47f3651 100644 --- a/serving/reagent/serving/config/applications/example/example.py +++ b/serving/reagent/serving/config/applications/example/example.py @@ -2,14 +2,14 @@ # Copyright 2004-present Facebook. All Rights Reserved. from reagent.serving.config.builder import ( - UCB, DecisionPlanBuilder, EpsilonGreedyRanker, + export, Frechet, InputFromRequest, Softmax, SoftmaxRanker, - export, + UCB, ) diff --git a/serving/reagent/serving/config/config.py b/serving/reagent/serving/config/config.py index ffd086e07..34205f91d 100644 --- a/serving/reagent/serving/config/config.py +++ b/serving/reagent/serving/config/config.py @@ -2,8 +2,8 @@ # Copyright 2004-present Facebook. All Rights Reserved. from collections import OrderedDict -from typing import Dict, List, Union from enum import Enum +from typing import Dict, List, Union class ConfigBaseMeta(type): @@ -50,9 +50,9 @@ def _replace(self, **kwargs): def __init__(self, **kwargs): """Configs can be constructed by specifying values by keyword. - If a keyword is supplied that isn't in the config, or if a config requires - a value that isn't specified and doesn't have a default, a TypeError will be - raised.""" + If a keyword is supplied that isn't in the config, or if a config requires + a value that isn't specified and doesn't have a default, a TypeError will be + raised.""" specified = kwargs.keys() | type(self)._field_defaults.keys() required = type(self).__annotations__.keys() # Unspecified fields have no default and weren't provided by the caller @@ -106,9 +106,9 @@ class Constant(ConfigBase): class DecisionRewardAggreation(Enum): - DRA_INVALID = None, - DRA_SUM = 'sum', - DRA_MAX = 'max', + DRA_INVALID = (None,) + DRA_SUM = ("sum",) + DRA_MAX = ("max",) class DecisionConfig(ConfigBase): diff --git a/serving/reagent/serving/config/main.py b/serving/reagent/serving/config/main.py index 582f4baa1..5abb9097a 100644 --- a/serving/reagent/serving/config/main.py +++ b/serving/reagent/serving/config/main.py @@ -28,7 +28,7 @@ def export(app_id, config_dir): if not os.path.exists(sub_config_dir): os.makedirs(sub_config_dir) for config_name, config in configs.items(): - config_file = os.path.join(sub_config_dir, config_name + '.json') + config_file = os.path.join(sub_config_dir, config_name + ".json") print(f"{app_id}:{config_name} exported to {config_file}") with open(config_file, "w") as f: json.dump(config, f, indent=2) diff --git a/serving/reagent/serving/core/ActionValueScorer.cpp b/serving/reagent/serving/core/ActionValueScorer.cpp index 1bca46531..e69de29bb 100644 --- a/serving/reagent/serving/core/ActionValueScorer.cpp +++ b/serving/reagent/serving/core/ActionValueScorer.cpp @@ -1 +0,0 @@ -#include "reagent/serving/core/ActionValueScorer.h" diff --git a/serving/reagent/serving/core/ConfigProvider.cpp b/serving/reagent/serving/core/ConfigProvider.cpp index bf8595c7b..e69de29bb 100644 --- a/serving/reagent/serving/core/ConfigProvider.cpp +++ b/serving/reagent/serving/core/ConfigProvider.cpp @@ -1 +0,0 @@ -#include "reagent/serving/core/ConfigProvider.h" diff --git a/serving/reagent/serving/core/DecisionServiceException.cpp b/serving/reagent/serving/core/DecisionServiceException.cpp index abde21ebf..e69de29bb 100644 --- a/serving/reagent/serving/core/DecisionServiceException.cpp +++ b/serving/reagent/serving/core/DecisionServiceException.cpp @@ -1 +0,0 @@ -#include "reagent/serving/core/DecisionServiceException.h" diff --git a/serving/reagent/serving/core/PytorchActionValueScorer.cpp b/serving/reagent/serving/core/PytorchActionValueScorer.cpp index 8904e636f..cb3069365 100644 --- a/serving/reagent/serving/core/PytorchActionValueScorer.cpp +++ b/serving/reagent/serving/core/PytorchActionValueScorer.cpp @@ -1,5 +1,7 @@ #include "reagent/serving/core/PytorchActionValueScorer.h" - +#ifdef FB_INTERNAL +#include "caffe2/caffe2/fb/predictor/PyTorchPredictorContainer.h" // @manual=//caffe2/caffe2/fb/predictor:pytorch_predictor_container +#endif #include "reagent/serving/core/Operator.h" namespace reagent { @@ -7,15 +9,25 @@ namespace reagent { PytorchActionValueScorer::PytorchActionValueScorer() : ActionValueScorer() {} StringDoubleMap PytorchActionValueScorer::predict( - const DecisionRequest& request, int modelId, int snapshotId) { + const DecisionRequest& request, + int modelId, + int snapshotId) { try { std::string path = "/tmp/" + std::to_string(modelId) + "/" + std::to_string(snapshotId); if (models_.find(path) == models_.end()) { try { +#ifdef FB_INTERNAL + // First load predictor container, then extract module + std::shared_ptr pytorchPredictor_; + pytorchPredictor_ = + std::make_shared(path); + auto module = pytorchPredictor_->getPredictor()->get_module(); +#else // Deserialize the ScriptModule from a file using torch::jit::load(). torch::jit::script::Module module = torch::jit::load(path); +#endif models_[path] = std::move(module); } catch (const c10::Error& e) { LOG(ERROR) << "Error loading the model: " << e.what(); @@ -78,4 +90,4 @@ StringDoubleMap PytorchActionValueScorer::predict( LOG(FATAL) << "Should never get here"; } -} // namespace reagent +} // namespace reagent diff --git a/serving/reagent/serving/core/RealTimeCounter.cpp b/serving/reagent/serving/core/RealTimeCounter.cpp index c284d26d6..e69de29bb 100644 --- a/serving/reagent/serving/core/RealTimeCounter.cpp +++ b/serving/reagent/serving/core/RealTimeCounter.cpp @@ -1 +0,0 @@ -#include "reagent/serving/core/RealTimeCounter.h" diff --git a/serving/requirements.txt b/serving/requirements.txt index 5d8d8dd7a..aee8532af 100644 --- a/serving/requirements.txt +++ b/serving/requirements.txt @@ -1 +1 @@ -python>=3.7 +python>=3.8 diff --git a/serving/scripts/__init__.py b/serving/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/serving/scripts/rasp_to_model.py b/serving/scripts/rasp_to_model.py index 97862abb0..808aad667 100644 --- a/serving/scripts/rasp_to_model.py +++ b/serving/scripts/rasp_to_model.py @@ -8,9 +8,7 @@ import pandas as pd -logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) -logger.setLevel(logging.info) def keys_to_int(d: Dict[str, Any]) -> Dict[int, Any]: diff --git a/serving/setup.py b/serving/setup.py index d199f02f4..fcba2c4d2 100644 --- a/serving/setup.py +++ b/serving/setup.py @@ -9,11 +9,6 @@ def readme(): return f.read() -def requirements(): - with open("requirements.txt") as f: - return f.read() - - setup( name="ReAgentServing", version="0.1", diff --git a/setup.cfg b/setup.cfg index 63258bf6a..f2ad16641 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,41 +10,55 @@ license = BSD 3-Clause License [options] packages = find: -python_requires = >=3.7 +python_requires = >=3.8 install_requires = + # v1.48.0 and above upgraded the protobuf dependency, which lead to an error: + # https://stackoverflow.com/questions/72441758/typeerror-descriptors-cannot-not-be-created-directly + grpcio-tools>=1.44.0,<1.48.0 click>=7.0 # ~=1.2.0 for compatibility with gym # issue: https://github.com/openai/spinningup/issues/178 cloudpickle~=1.2.0 - numpy>=1.17.2 + iopath + numpy>=1.17.2,<1.24.0 pandas>=1.0.3 - pydantic>=1.4 + # https://github.com/samuelcolvin/pydantic/issues/2042 + pydantic>=1.4,<1.7 tinydb >= 4.1.1 - torch tqdm>=4.46.0 petastorm>=0.9.0 parameterized>=0.7.4 - pyspark>=2.4.5 + pyspark==3.1.1 + pytorch-lightning==1.6.0 ruamel.yaml>=0.15.99 scipy>=1.3.1 tensorboard>=1.14 scikit-learn>=0.20.0 - xgboost==0.90 + [options.extras_require] gym = - gym[classic_control,box2d,atari] + # Some issue with https://github.com/openai/gym/pull/1974 + # Remove the pinning when https://github.com/openai/gym/issues/2058 is fixed + gym[classic_control,box2d,atari]==0.17.2 gym_minigrid recsim-no-tf test = coverage>=5.1 - pytest-xdist==1.30.0 - # Pinning due to https://github.com/pytest-dev/pytest/issues/6925 - pytest==5.3 + pytest-xdist>=1.30.0 + pytest>=5.3 spark-testing-base==0.10.0 pytest-cov +ax = ax-platform + +lite = nevergrad>=0.4.3 + +torchrec_gpu = torchrec + +# July 2022: torchrec-cpu stable (0.1.1) caused test errors +torchrec_cpu = torchrec-nightly-cpu ########### diff --git a/setup.py b/setup.py index d71bde351..822f03eaf 100644 --- a/setup.py +++ b/setup.py @@ -3,5 +3,6 @@ from setuptools import setup + # see config.cfg setup() diff --git a/tox.ini b/tox.ini index 3acf9cc93..27703e978 100644 --- a/tox.ini +++ b/tox.ini @@ -3,35 +3,130 @@ # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. +# This post discusses how to specify patterns for testing specific tests +# https://stackoverflow.com/questions/36456920/is-there-a-way-to-specify-which-pytest-tests-to-run-from-a-file + [tox] -envlist = py37 -isolated_build = True +envlist = py38 + +[pytest] +addopts = --verbose -d --tx popen --cov=reagent --cov-report=xml --cov-append --junitxml={envlogdir}/junit-{envname}.xml + -# install CUDA 10.1 Torch -[ubuntu_gpu] -install_command=pip install torch==1.5.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html {opts} {packages} + +# Refer to https://docs.pytest.org/en/stable/example/markers.html +# for how we include/exclude tests in pytest [testenv] +# Install the latest pip, setuptools, wheel; this is needed for downloading opencv-python wheel, +# instead of building from source (which is super slow). +download = true extras = gym test -setenv = - PYTEST_ADDOPTS=--verbose -d --tx popen --cov --cov-report=xml --cov-append --junitxml={envlogdir}/junit-{envname}.xml + torchrec_cpu +install_command = + pip install --pre --extra-index-url https://download.pytorch.org/whl/cpu -f https://download.pytorch.org/whl/nightly/torchrec_nightly_cpu/ {opts} {packages} --progress-bar off commands = - pytest -n 4 -m "not serial" + pytest -n2 -m "(not serial) and (not seq2slate_long)" pytest -n0 -m "serial" -[testenv:circleci_unittest] -install_command={[ubuntu_gpu]install_command} + +# install CUDA 11.3 Torch +[ubuntu_gpu] +extras = + gym + test + torchrec_gpu +install_command = + pip install --pre --extra-index-url https://download.pytorch.org/whl/cu113 -f https://download.pytorch.org/whl/torchrec/ {opts} {packages} --progress-bar off + + +[testenv:circleci_misc_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} commands = - pytest reagent/test -n auto -m "not serial" - pytest reagent/test -n0 -m "serial" + pytest reagent/test -n2 -m "not serial" --ignore=reagent/test/mab/ --ignore=reagent/test/lite/ --ignore=reagent/test/ranking/ --ignore=reagent/test/training/ --ignore=reagent/test/prediction/ --ignore=reagent/test/world_model/ + pytest reagent/test -n0 -m "serial" --ignore=reagent/test/mab/ --ignore=reagent/test/lite/ --ignore=reagent/test/ranking/ --ignore=reagent/test/training/ --ignore=reagent/test/prediction/ --ignore=reagent/test/world_model/ + + +[testenv:circleci_gym_replay_buffer_1_cpu_unittest] +commands = + pytest reagent/gym/tests -n2 -m "not serial" -k "test_replay_buffer_gym_cpu_1" + + +[testenv:circleci_gym_replay_buffer_2_cpu_unittest] +commands = + pytest reagent/gym/tests -n2 -m "not serial" -k "test_replay_buffer_gym_cpu_2" + + +# all cpu tests in reagent/gym/tests except test_replay_buffer_gym_cpu_x +[testenv:circleci_gym_cpu_unittest] +commands = + pytest reagent/gym/tests -n2 -m "not serial" -k "not test_replay_buffer_gym_cpu" + + +[testenv:circleci_gym_replay_buffer_1_gpu_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/gym/tests -n0 -m "serial" -k "test_replay_buffer_gym_gpu_1" + -[testenv:circleci_gym_unittest] -install_command={[ubuntu_gpu]install_command} +[testenv:circleci_gym_replay_buffer_2_gpu_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} commands = - pytest reagent/gym -n4 + pytest reagent/gym/tests -n0 -m "serial" -k "test_replay_buffer_gym_gpu_2" -[testenv:debug] -commands= - pytest -n4 --tx popen {posargs} + +# all gpu tests in reagent/gym/tests except test_replay_buffer_gym_gpu_x +[testenv:circleci_gym_gpu_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/gym/tests -n0 -m "serial" -k "not test_replay_buffer_gym_gpu" + + +[testenv:circleci_ranking_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/test/ranking -n2 + + +[testenv:circleci_training_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/test/training -n2 + + +[testenv:circleci_prediction_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/test/prediction -n2 + + +[testenv:circleci_world_model_unittest] +install_command = {[ubuntu_gpu]install_command} +extras = {[ubuntu_gpu]extras} +commands = + pytest reagent/test/world_model -n2 + + +[testenv:circleci_lite_api_unittest] +extras = + lite + test +commands = + pytest reagent/test/lite -n2 + pytest --doctest-modules reagent/lite -n2 --doctest-continue-on-failure + + +[testenv:circleci_mab_unittest] +extras = + test +commands = + pytest reagent/test/mab -n2