티스토리 뷰

5. 파이썬

[텐서플로] 애플 주식 트레이딩 학습

패스트코드블로그 2020. 5. 9. 15:29
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import tensorflow as tf
from collections import deque
import random
import math
import pandas_datareader as data_reader
import numpy as np
from tqdm import tqdm
 
class Trader:
    def __init__(self, state_size, action_space=3, model_name='AITrader'):
        self.state_size = state_size
        self.action_space = action_space
        self.memory = deque(maxlen=2000)
        self.inventory = []
        self.model_name = model_name
        self.gamma = 0.95
        self.epsilon = 1.0
        self.epsilon_final = 0.01
        self.epsilon_decay = 0.995
 
 
    def model_builer(self):
        model = tf.keras.models.Sequential()
        model.add(tf.keras.layers.Dense(units=32, activation='relu', input_dim=self.state_size))
        model.add(tf.keras.layers.Dense(units=64, activation='relu'))
        model.add(tf.keras.layers.Dense(units=128, activation='relu'))
        model.add(tf.keras.layers.Dense(units=self.action_space, activation='linear'))
        model.compile(loss='mse', optimizer=tf.keras.optimizers.Adam(lr=0.001))
        return model
 
    def trade(self, state, model):
        if random.random() <= self.epsilon:
            return random.randrange(self.action_space)
        actions = model.predict(state)
        return np.argmax(actions[0])
 
    def batch_train(self, batch_size, model):
        batch = []
        for i in range(len(self.memory) - batch_size + 1len(self.memory)):
            batch.append(self.memory[i])
        for state, action, reward, next_state, done in batch:
            reward = reward
            if not done:
                reward = reward + self.gamma * np.amax(model.predict(next_state)[0])
            target = model.predict(state)
            target[0][action] = reward
 
            model.fit(state, target, epochs=1, verbose=0)
        if self.epsilon > self.epsilon_final:
            self.epsilon *= self.epsilon_decay
 
 
class Trading:
    @staticmethod
    def sigmoid(x):
        return 1 / (1 + math.exp(-x))
 
    @staticmethod
    def stocks_price_format(n):
        if n < 0:
            return "- $ {0:2f}".format(abs(n))
        else:
            return "$ {0:2f}".format(abs(n))
 
    @staticmethod
    def dataset_loader(stock_name):
        dataset = data_reader.DataReader(stock_name, data_source="yahoo")
        start_date = str(dataset.index[0]).split()[0]
        end_date = str(dataset.index[-1]).split()[0]
        close = dataset['Close']
        return close
 
    def state_creator(self, data, timestep, window_size):
        starting_id = timestep - window_size + 1
 
        if starting_id >= 0:
            windowed_data = data[starting_id: timestep + 1]
        else:
            windowed_data = - starting_id * [data[0]] + list(data[0:timestep + 1])
 
        state = []
        for i in range(window_size - 1):
            state.append(self.sigmoid(windowed_data[i + 1- windowed_data[i]))
 
        return np.array([state])
 
    """
    hook method
    """
 
    def transaction(self, target):
        stock_name = target
        data = self.dataset_loader(stock_name)
        window_size = 10
        episodes = 1000
        batch_size = 32
        data_samples = len(data) - 1
        trader = Trader(window_size)
        model = trader.model_builer()
        print('==== Model Summary ===')
        print(model.summary())
        for episode in range(1, episodes + 1):
            print("Episode: {}/{}".format(episode, episodes))
            state = self.state_creator(data, 0, window_size + 1)
            total_profit = 0
            trader.inventory = []
 
            for t in tqdm(range(data_samples)):
                action = trader.trade(state, model)
                next_state = self.state_creator(data, t + 1, window_size + 1)
                reward = 0
 
                if action == 1:  # Buying
                    trader.inventory.append(data[t])
                    print("AI 트레이더 매수: ", self.stocks_price_format(data[t]))
                elif action == 2 and len(trader.inventory) > 0:  # Selling
                    buy_price = trader.inventory.pop(0)
                    reward = max(data[t] - buy_price, 0)
                    total_profit += data[t] - buy_price
                    print("AI 트레이더 매도: ", self.stocks_price_format(data[t]),
                          "이익: " + self.stocks_price_format(data[t] - buy_price))
                if t == data_samples - 1:
                    done = True
                else:
                    done = False
 
                trader.memory.append((state, action, reward, next_state, done))
                state = next_state
 
                if done:
                    print('#################')
                    print('총이익: {}'.format(total_profit))
                    print('#################')
 
                if len(trader.memory) > batch_size:
                    trader.batch_train(batch_size, model)
            if episode % 10 == 0:
                trader.model.save('ai_trader_{}.h5'.format(episode))
 
if __name__ == '__main__':
    trading = Trading()
    trading.transaction('AAPL')
cs

 

 

댓글
공지사항
최근에 올라온 글
최근에 달린 댓글
Total
Today
Yesterday
링크
«   2025/07   »
1 2 3 4 5
6 7 8 9 10 11 12
13 14 15 16 17 18 19
20 21 22 23 24 25 26
27 28 29 30 31
글 보관함