|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the MIT license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +import torch |
| 7 | +import torchrl |
| 8 | +from tensordict import TensorDict, TensorDictBase |
| 9 | +from torch import nn |
| 10 | + |
| 11 | +from torchrl.data.map import MCTSForest, Tree |
| 12 | +from torchrl.envs import EnvBase |
| 13 | + |
| 14 | + |
| 15 | +class MCTS(nn.Module): |
| 16 | + """Monte-Carlo tree search. |
| 17 | +
|
| 18 | + Attributes: |
| 19 | + num_traversals (int): Number of times to traverse the tree. |
| 20 | + rollout_max_steps (int): Maximum number of steps for each rollout. |
| 21 | +
|
| 22 | + Methods: |
| 23 | + forward: Runs the tree search. |
| 24 | + """ |
| 25 | + |
| 26 | + def __init__( |
| 27 | + self, |
| 28 | + num_traversals: int, |
| 29 | + rollout_max_steps: int | None = None, |
| 30 | + ): |
| 31 | + super().__init__() |
| 32 | + self.num_traversals = num_traversals |
| 33 | + self.rollout_max_steps = rollout_max_steps |
| 34 | + |
| 35 | + def forward( |
| 36 | + self, |
| 37 | + forest: MCTSForest, |
| 38 | + root: TensorDictBase, |
| 39 | + env: EnvBase, |
| 40 | + ) -> Tree: |
| 41 | + """Performs Monte-Carlo tree search in an environment. |
| 42 | +
|
| 43 | + Args: |
| 44 | + forest (MCTSForest): Forest of the tree to update. If the tree does not |
| 45 | + exist yet, it is added. |
| 46 | + root (TensorDict): The root step of the tree to update. |
| 47 | + env (EnvBase): Environment to performs actions in. |
| 48 | + """ |
| 49 | + for action in env.all_actions(root): |
| 50 | + td = env.step(env.reset(root.clone()).update(action)) |
| 51 | + forest.extend(td.unsqueeze(0)) |
| 52 | + |
| 53 | + tree = forest.get_tree(root) |
| 54 | + |
| 55 | + tree.wins = torch.zeros_like(td["next", env.reward_key]) |
| 56 | + for subtree in tree.subtree: |
| 57 | + subtree.wins = torch.zeros_like(td["next", env.reward_key]) |
| 58 | + |
| 59 | + for _ in range(self.num_traversals): |
| 60 | + self._traverse_MCTS_one_step(forest, tree, env, self.rollout_max_steps) |
| 61 | + |
| 62 | + return tree |
| 63 | + |
| 64 | + def _traverse_MCTS_one_step(self, forest, tree, env, rollout_max_steps): |
| 65 | + done = False |
| 66 | + trees_visited = [tree] |
| 67 | + |
| 68 | + while not done: |
| 69 | + if tree.subtree is None: |
| 70 | + td_tree = tree.rollout[-1]["next"].clone() |
| 71 | + |
| 72 | + if (tree.visits > 0 or tree.parent is None) and not td_tree["done"]: |
| 73 | + actions = env.all_actions(td_tree) |
| 74 | + subtrees = [] |
| 75 | + |
| 76 | + for action in actions: |
| 77 | + td = env.step(env.reset(td_tree).update(action)) |
| 78 | + new_node = torchrl.data.Tree( |
| 79 | + rollout=td.unsqueeze(0), |
| 80 | + node_data=td["next"].select(*forest.node_map.in_keys), |
| 81 | + count=torch.tensor(0), |
| 82 | + wins=torch.zeros_like(td["next", env.reward_key]), |
| 83 | + ) |
| 84 | + subtrees.append(new_node) |
| 85 | + |
| 86 | + # NOTE: This whole script runs about 2x faster with lazy stack |
| 87 | + # versus eager stack. |
| 88 | + tree.subtree = TensorDict.lazy_stack(subtrees) |
| 89 | + chosen_idx = torch.randint(0, len(subtrees), ()).item() |
| 90 | + rollout_state = subtrees[chosen_idx].rollout[-1]["next"] |
| 91 | + |
| 92 | + else: |
| 93 | + rollout_state = td_tree |
| 94 | + |
| 95 | + if rollout_state["done"]: |
| 96 | + rollout_reward = rollout_state[env.reward_key] |
| 97 | + else: |
| 98 | + rollout = env.rollout( |
| 99 | + max_steps=rollout_max_steps, |
| 100 | + tensordict=rollout_state, |
| 101 | + ) |
| 102 | + rollout_reward = rollout[-1]["next", env.reward_key] |
| 103 | + done = True |
| 104 | + |
| 105 | + else: |
| 106 | + priorities = self._traversal_priority_UCB1(tree) |
| 107 | + chosen_idx = torch.argmax(priorities).item() |
| 108 | + tree = tree.subtree[chosen_idx] |
| 109 | + trees_visited.append(tree) |
| 110 | + |
| 111 | + for tree in trees_visited: |
| 112 | + tree.visits += 1 |
| 113 | + tree.wins += rollout_reward |
| 114 | + |
| 115 | + # TODO: Allow user to specify different priority functions with PR #2358 |
| 116 | + def _traversal_priority_UCB1(self, tree): |
| 117 | + subtree = tree.subtree |
| 118 | + visits = subtree.visits |
| 119 | + reward_sum = subtree.wins |
| 120 | + |
| 121 | + # If it's black's turn, flip the reward, since black wants to optimize for |
| 122 | + # the lowest reward, not highest. |
| 123 | + # TODO: Need a more generic way to do this, since not all use cases of MCTS |
| 124 | + # will be two player turn based games. |
| 125 | + if not subtree.rollout[0, 0]["turn"]: |
| 126 | + reward_sum = -reward_sum |
| 127 | + |
| 128 | + parent_visits = tree.visits |
| 129 | + reward_sum = reward_sum.squeeze(-1) |
| 130 | + C = 2.0**0.5 |
| 131 | + priority = (reward_sum + C * torch.sqrt(torch.log(parent_visits))) / visits |
| 132 | + priority[visits == 0] = float("inf") |
| 133 | + return priority |
0 commit comments