forked from quantylab/rltrader
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpolicy_learner.py
More file actions
228 lines (197 loc) · 9.65 KB
/
policy_learner.py
File metadata and controls
228 lines (197 loc) · 9.65 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
import os
import locale
import logging
import numpy as np
import settings
from environment import Environment
from agent import Agent
from policy_network import PolicyNetwork
from visualizer import Visualizer
locale.setlocale(locale.LC_ALL, 'ko_KR.UTF-8')
class PolicyLearner:
def __init__(self, stock_code, chart_data, training_data=None,
min_trading_unit=1, max_trading_unit=2,
delayed_reward_threshold=.05, lr=0.01):
self.stock_code = stock_code # 종목코드
self.chart_data = chart_data
self.environment = Environment(chart_data) # 환경 객체
# 에이전트 객체
self.agent = Agent(self.environment,
min_trading_unit=min_trading_unit,
max_trading_unit=max_trading_unit,
delayed_reward_threshold=delayed_reward_threshold)
self.training_data = training_data # 학습 데이터
self.sample = None
self.training_data_idx = -1
# 정책 신경망; 입력 크기 = 학습 데이터의 크기 + 에이전트 상태 크기
self.num_features = self.training_data.shape[1] + self.agent.STATE_DIM
self.policy_network = PolicyNetwork(
input_dim=self.num_features, output_dim=self.agent.NUM_ACTIONS, lr=lr)
self.visualizer = Visualizer() # 가시화 모듈
def reset(self):
self.sample = None
self.training_data_idx = -1
def fit(
self, num_epoches=1000, max_memory=60, balance=10000000,
discount_factor=0, start_epsilon=.5, learning=True):
logging.info("LR: {lr}, DF: {discount_factor}, "
"TU: [{min_trading_unit}, {max_trading_unit}], "
"DRT: {delayed_reward_threshold}".format(
lr=self.policy_network.lr,
discount_factor=discount_factor,
min_trading_unit=self.agent.min_trading_unit,
max_trading_unit=self.agent.max_trading_unit,
delayed_reward_threshold=self.agent.delayed_reward_threshold
))
# 가시화 준비
# 차트 데이터는 변하지 않으므로 미리 가시화
self.visualizer.prepare(self.environment.chart_data)
# 가시화 결과 저장할 폴더 준비
epoch_summary_dir = os.path.join(
settings.BASE_DIR, 'epoch_summary/%s/epoch_summary_%s' % (
self.stock_code, settings.timestr))
if not os.path.isdir(epoch_summary_dir):
os.makedirs(epoch_summary_dir)
# 에이전트 초기 자본금 설정
self.agent.set_balance(balance)
# 학습에 대한 정보 초기화
max_portfolio_value = 0
epoch_win_cnt = 0
# 학습 반복
for epoch in range(num_epoches):
# 에포크 관련 정보 초기화
loss = 0.
itr_cnt = 0
win_cnt = 0
exploration_cnt = 0
batch_size = 0
pos_learning_cnt = 0
neg_learning_cnt = 0
# 메모리 초기화
memory_sample = []
memory_action = []
memory_reward = []
memory_prob = []
memory_pv = []
memory_num_stocks = []
memory_exp_idx = []
memory_learning_idx = []
# 환경, 에이전트, 정책 신경망 초기화
self.environment.reset()
self.agent.reset()
self.policy_network.reset()
self.reset()
# 가시화 초기화
self.visualizer.clear([0, len(self.chart_data)])
# 학습을 진행할 수록 탐험 비율 감소
if learning:
epsilon = start_epsilon * (1. - float(epoch) / (num_epoches - 1))
else:
epsilon = 0
while True:
# 샘플 생성
next_sample = self._build_sample()
if next_sample is None:
break
# 정책 신경망 또는 탐험에 의한 행동 결정
action, confidence, exploration = self.agent.decide_action(
self.policy_network, self.sample, epsilon)
# 결정한 행동을 수행하고 즉시 보상과 지연 보상 획득
immediate_reward, delayed_reward = self.agent.act(action, confidence)
# 행동 및 행동에 대한 결과를 기억
memory_sample.append(next_sample)
memory_action.append(action)
memory_reward.append(immediate_reward)
memory_pv.append(self.agent.portfolio_value)
memory_num_stocks.append(self.agent.num_stocks)
memory = [(
memory_sample[i],
memory_action[i],
memory_reward[i])
for i in list(range(len(memory_action)))[-max_memory:]
]
if exploration:
memory_exp_idx.append(itr_cnt)
memory_prob.append([np.nan] * Agent.NUM_ACTIONS)
else:
memory_prob.append(self.policy_network.prob)
# 반복에 대한 정보 갱신
batch_size += 1
itr_cnt += 1
exploration_cnt += 1 if exploration else 0
win_cnt += 1 if delayed_reward > 0 else 0
# 학습 모드이고 지연 보상이 존재할 경우 정책 신경망 갱신
if delayed_reward == 0 and batch_size >= max_memory:
delayed_reward = immediate_reward
self.agent.base_portfolio_value = self.agent.portfolio_value
if learning and delayed_reward != 0:
# 배치 학습 데이터 크기
batch_size = min(batch_size, max_memory)
# 배치 학습 데이터 생성
x, y = self._get_batch(
memory, batch_size, discount_factor, delayed_reward)
if len(x) > 0:
if delayed_reward > 0:
pos_learning_cnt += 1
else:
neg_learning_cnt += 1
# 정책 신경망 갱신
loss += self.policy_network.train_on_batch(x, y)
memory_learning_idx.append([itr_cnt, delayed_reward])
batch_size = 0
# 에포크 관련 정보 가시화
num_epoches_digit = len(str(num_epoches))
epoch_str = str(epoch + 1).rjust(num_epoches_digit, '0')
self.visualizer.plot(
epoch_str=epoch_str, num_epoches=num_epoches, epsilon=epsilon,
action_list=Agent.ACTIONS, actions=memory_action,
num_stocks=memory_num_stocks, outvals=memory_prob,
exps=memory_exp_idx, learning=memory_learning_idx,
initial_balance=self.agent.initial_balance, pvs=memory_pv
)
self.visualizer.save(os.path.join(
epoch_summary_dir, 'epoch_summary_%s_%s.png' % (
settings.timestr, epoch_str)))
# 에포크 관련 정보 로그 기록
if pos_learning_cnt + neg_learning_cnt > 0:
loss /= pos_learning_cnt + neg_learning_cnt
logging.info("[Epoch %s/%s]\tEpsilon:%.4f\t#Expl.:%d/%d\t"
"#Buy:%d\t#Sell:%d\t#Hold:%d\t"
"#Stocks:%d\tPV:%s\t"
"POS:%s\tNEG:%s\tLoss:%10.6f" % (
epoch_str, num_epoches, epsilon, exploration_cnt, itr_cnt,
self.agent.num_buy, self.agent.num_sell, self.agent.num_hold,
self.agent.num_stocks,
locale.currency(self.agent.portfolio_value, grouping=True),
pos_learning_cnt, neg_learning_cnt, loss))
# 학습 관련 정보 갱신
max_portfolio_value = max(
max_portfolio_value, self.agent.portfolio_value)
if self.agent.portfolio_value > self.agent.initial_balance:
epoch_win_cnt += 1
# 학습 관련 정보 로그 기록
logging.info("Max PV: %s, \t # Win: %d" % (
locale.currency(max_portfolio_value, grouping=True), epoch_win_cnt))
def _get_batch(self, memory, batch_size, discount_factor, delayed_reward):
x = np.zeros((batch_size, 1, self.num_features))
y = np.full((batch_size, self.agent.NUM_ACTIONS), 0.5)
for i, (sample, action, reward) in enumerate(
reversed(memory[-batch_size:])):
x[i] = np.array(sample).reshape((-1, 1, self.num_features))
y[i, action] = (delayed_reward + 1) / 2
if discount_factor > 0:
y[i, action] *= discount_factor ** i
return x, y
def _build_sample(self):
self.environment.observe()
if len(self.training_data) > self.training_data_idx + 1:
self.training_data_idx += 1
self.sample = self.training_data.iloc[self.training_data_idx].tolist()
self.sample.extend(self.agent.get_states())
return self.sample
return None
def trade(self, model_path=None, balance=2000000):
if model_path is None:
return
self.policy_network.load_model(model_path=model_path)
self.fit(balance=balance, num_epoches=1, learning=False)