-
살사 코드IT&컴퓨터공학/딥러닝 2020. 12. 6. 23:31
environment.py
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139import timeimport numpy as npimport tkinter as tkfrom PIL import ImageTk, Imagenp.random.seed(1)PhotoImage = ImageTk.PhotoImageUNIT = 100 # 필셀 수HEIGHT = 5 # 그리드 월드 가로WIDTH = 5 # 그리드 월드 세로class Env(tk.Tk):def __init__(self):super(Env, self).__init__()self.action_space = ['u', 'd', 'l', 'r'] #상,하,좌,우self.n_actions = len(self.action_space)self.title('SARSA')self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))self.shapes = self.load_images()self.canvas = self._build_canvas()self.texts = []def _build_canvas(self):canvas = tk.Canvas(self, bg='white',height=HEIGHT * UNIT,width=WIDTH * UNIT)# 그리드 생성for c in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80x0, y0, x1, y1 = c, 0, c, HEIGHT * UNITcanvas.create_line(x0, y0, x1, y1)for r in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, rcanvas.create_line(x0, y0, x1, y1)# 캔버스에 이미지 추가self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])self.triangle1 = canvas.create_image(250, 150, image=self.shapes[1])self.triangle2 = canvas.create_image(150, 250, image=self.shapes[1])self.circle = canvas.create_image(250, 250, image=self.shapes[2])canvas.pack()return canvasdef load_images(self):rectangle = PhotoImage(Image.open("../img/rectangle.png").resize((65, 65)))triangle = PhotoImage(Image.open("../img/triangle.png").resize((65, 65)))circle = PhotoImage(Image.open("../img/circle.png").resize((65, 65)))return rectangle, triangle, circledef text_value(self, row, col, contents, action, font='Helvetica', size=10,style='normal', anchor="nw"):if action == 0:origin_x, origin_y = 7, 42elif action == 1:origin_x, origin_y = 85, 42elif action == 2:origin_x, origin_y = 42, 5else:origin_x, origin_y = 42, 77x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)font = (font, str(size), style)text = self.canvas.create_text(x, y, fill="black", text=contents,font=font, anchor=anchor)return self.texts.append(text)def print_value_all(self, q_table):for i in self.texts:self.canvas.delete(i)self.texts.clear()for x in range(HEIGHT):for y in range(WIDTH):for action in range(0, 4):state = [x, y]if str(state) in q_table.keys():temp = q_table[str(state)][action]self.text_value(y, x, round(temp, 3), action)def coords_to_state(self, coords):x = int((coords[0] - 50) / 100)y = int((coords[1] - 50) / 100)return [x, y]def reset(self):self.update()time.sleep(0.5)x, y = self.canvas.coords(self.rectangle)self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)self.render()return self.coords_to_state(self.canvas.coords(self.rectangle))def step(self, action):state = self.canvas.coords(self.rectangle)base_action = np.array([0, 0])self.render()if action == 0: # 상if state[1] > UNIT:base_action[1] -= UNITelif action == 1: # 하if state[1] < (HEIGHT - 1) * UNIT:base_action[1] += UNITelif action == 2: # 좌if state[0] > UNIT:base_action[0] -= UNITelif action == 3: # 우if state[0] < (WIDTH - 1) * UNIT:base_action[0] += UNIT# 에이전트 이동self.canvas.move(self.rectangle, base_action[0], base_action[1])# 에이전트(빨간 네모)를 가장 상위로 배치self.canvas.tag_raise(self.rectangle)next_state = self.canvas.coords(self.rectangle)# 보상 함수if next_state == self.canvas.coords(self.circle):reward = 100done = Trueelif next_state in [self.canvas.coords(self.triangle1),self.canvas.coords(self.triangle2)]:reward = -100done = Trueelse:reward = 0done = Falsenext_state = self.coords_to_state(next_state)return next_state, reward, donedef render(self):time.sleep(0.03)self.update()cs agent.py
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172import numpy as npimport randomfrom collections import defaultdictfrom environment import Envclass SARSAgent:def __init__(self, actions):self.actions = actionsself.step_size = 0.01 # 학습률 0.01self.discount_factor = 0.9 # 감가율 0.9self.epsilon = 0.1 # 입실론 0.1# 0을 초기값으로 가지는 큐함수 테이블 생성self.q_table = defaultdict(lambda: [0.0, 0.0, 0.0, 0.0])# <s, a, r, s', a'>의 샘플로부터 큐함수를 업데이트 ( 벨만 기대방정식 이용 )def learn(self, state, action, reward, next_state, next_action):state, next_state = str(state), str(next_state)current_q = self.q_table[state][action]next_state_q = self.q_table[next_state][next_action]td = reward + self.discount_factor * next_state_q - current_qnew_q = current_q + self.step_size * td # 살사의 큐함수 업데이트 식self.q_table[state][action] = new_q# 입실론 탐욕 정책에 따라서 행동을 반환def get_action(self, state):if np.random.rand() < self.epsilon:# 무작위 행동 반환action = np.random.choice(self.actions)else:# 큐함수에 따른 행동 반환state = str(state)q_list = self.q_table[state]action = arg_max(q_list)return action# 큐함수의 값에 따라 최적의 행동을 반환def arg_max(q_list):max_idx_list = np.argwhere(q_list == np.amax(q_list))max_idx_list = max_idx_list.flatten().tolist()return random.choice(max_idx_list)if __name__ == "__main__":env = Env() #환경설정agent = SARSAgent(actions=list(range(env.n_actions))) # 에이전트생성for episode in range(1000): # 에피소드는 1000 개# 게임 환경과 상태를 초기화state = env.reset()# 현재 상태에 대한 행동을 선택action = agent.get_action(state)while True:env.render()# 행동을 위한 후 다음상태 보상 에피소드의 종료 여부를 받아옴next_state, reward, done = env.step(action)# 다음 상태에서의 다음 행동 선택next_action = agent.get_action(next_state)# <s,a,r,s',a'>로 큐함수를 업데이트agent.learn(state, action, reward, next_state, next_action)state = next_stateaction = next_action# 모든 큐함수를 화면에 표시env.print_value_all(agent.q_table)if done:breakcs 'IT&컴퓨터공학 > 딥러닝' 카테고리의 다른 글
딥살사 코드 - 딥러닝의 시작 (0) 2020.12.07 큐러닝 코드 ( off policy TD control ) => 학습정책 =! 행동정책 (0) 2020.12.06 살사 계산문제 = on policy TD control ( 행동정책 = 학습정책 ) (0) 2020.12.06 가치이터레이션 코드 (0) 2020.12.06 정책이터레이션 코드 (0) 2020.12.06 댓글