|
| 1 | +/** |
| 2 | + * @license |
| 3 | + * Copyright 2019 Google LLC. All Rights Reserved. |
| 4 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | + * you may not use this file except in compliance with the License. |
| 6 | + * You may obtain a copy of the License at |
| 7 | + * |
| 8 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | + * |
| 10 | + * Unless required by applicable law or agreed to in writing, software |
| 11 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | + * See the License for the specific language governing permissions and |
| 14 | + * limitations under the License. |
| 15 | + * ============================================================================= |
| 16 | + */ |
| 17 | + |
| 18 | +import * as tf from '@tensorflow/tfjs'; |
| 19 | + |
| 20 | +import {createDeepQNetwork} from './dqn'; |
| 21 | +import {getRandomAction, SnakeGame, NUM_ACTIONS, ALL_ACTIONS, getStateTensor} from './snake_game'; |
| 22 | +import {ReplayMemory} from './replay_memory'; |
| 23 | +import { assertPositiveInteger } from './utils'; |
| 24 | + |
| 25 | +export class SnakeGameAgent { |
| 26 | + /** |
| 27 | + * Constructor of SnakeGameAgent. |
| 28 | + * |
| 29 | + * @param {SnakeGame} game A game object. |
| 30 | + * @param {object} config The configuration object with the following keys: |
| 31 | + * - `replayBufferSize` {number} Size of the replay memory. Must be a |
| 32 | + * positive integer. |
| 33 | + * - `epsilonInit` {number} Initial value of epsilon (for the epsilon- |
| 34 | + * greedy algorithm). Must be >= 0 and <= 1. |
| 35 | + * - `epsilonFinal` {number} The final value of epsilon. Must be >= 0 and |
| 36 | + * <= 1. |
| 37 | + * - `epsilonDecayFrames` {number} The # of frames over which the value of |
| 38 | + * `epsilon` decreases from `episloInit` to `epsilonFinal`, via a linear |
| 39 | + * schedule. |
| 40 | + */ |
| 41 | + constructor(game, config) { |
| 42 | + assertPositiveInteger(config.epsilonDecayFrames); |
| 43 | + |
| 44 | + this.game = game; |
| 45 | + |
| 46 | + this.epsilonInit = config.epsilonInit; |
| 47 | + this.epsilonFinal = config.epsilonFinal; |
| 48 | + this.epsilonDecayFrames = config.epsilonDecayFrames; |
| 49 | + this.epsilonIncrement_ = (this.epsilonFinal - this.epsilonInit) / |
| 50 | + this.epsilonDecayFrames; |
| 51 | + |
| 52 | + this.onlineNetwork = |
| 53 | + createDeepQNetwork(game.height, game.width, NUM_ACTIONS); |
| 54 | + this.targetNetwork = |
| 55 | + createDeepQNetwork(game.height, game.width, NUM_ACTIONS); |
| 56 | + // Freeze taget network: it's weights are updated only through copying from |
| 57 | + // the online network. |
| 58 | + this.targetNetwork.trainable = false; |
| 59 | + |
| 60 | + this.optimizer = tf.train.adam(config.learningRate); |
| 61 | + |
| 62 | + this.replayBufferSize = config.replayBufferSize; |
| 63 | + this.replayMemory = new ReplayMemory(config.replayBufferSize); |
| 64 | + this.frameCount = 0; |
| 65 | + this.reset(); |
| 66 | + } |
| 67 | + |
| 68 | + reset() { |
| 69 | + this.cumulativeReward_ = 0; |
| 70 | + this.game.reset(); |
| 71 | + } |
| 72 | + |
| 73 | + /** |
| 74 | + * Play one step of the game. |
| 75 | + * |
| 76 | + * @returns {number | null} If this step leads to the end of the game, |
| 77 | + * the total reward from the game as a plain number. Else, `null`. |
| 78 | + */ |
| 79 | + playStep() { |
| 80 | + this.epsilon = this.frameCount >= this.epsilonDecayFrames ? |
| 81 | + this.epsilonFinal : |
| 82 | + this.epsilonInit + this.epsilonIncrement_ * this.frameCount; |
| 83 | + this.frameCount++; |
| 84 | + |
| 85 | + // The epsilon-greedy algorithm. |
| 86 | + let action; |
| 87 | + const state = this.game.getState(); |
| 88 | + if (Math.random() < this.epsilon) { |
| 89 | + // Pick an action at random. |
| 90 | + action = getRandomAction(); |
| 91 | + } else { |
| 92 | + // Greedily pick an action based on online DQN output. |
| 93 | + tf.tidy(() => { |
| 94 | + const stateTensor = |
| 95 | + getStateTensor(state, this.game.height, this.game.width) |
| 96 | + action = ALL_ACTIONS[ |
| 97 | + this.onlineNetwork.predict(stateTensor).argMax(-1).dataSync()[0]]; |
| 98 | + }); |
| 99 | + } |
| 100 | + |
| 101 | + const {state: nextState, reward, done} = this.game.step(action); |
| 102 | + |
| 103 | + this.replayMemory.append([state, action, reward, done, nextState]); |
| 104 | + |
| 105 | + this.cumulativeReward_ += reward; |
| 106 | + const output = { |
| 107 | + action, |
| 108 | + cumulativeReward: this.cumulativeReward_, |
| 109 | + done |
| 110 | + }; |
| 111 | + if (done) { |
| 112 | + this.reset(); |
| 113 | + } |
| 114 | + return output; |
| 115 | + } |
| 116 | + |
| 117 | + /** |
| 118 | + * Perform training on a randomly sampled batch from the replay buffer. |
| 119 | + * |
| 120 | + * @param {number} batchSize Batch size. |
| 121 | + * @param {numebr} gamma Reward discount rate. Must be >= 0 and <= 1. |
| 122 | + * @param {tf.train.Optimizer} optimizer The optimizer object used to update |
| 123 | + * the weights of the online network. |
| 124 | + */ |
| 125 | + trainOnReplayBatch(batchSize, gamma, optimizer) { |
| 126 | + // Get a batch of examples from the replay buffer. |
| 127 | + const batch = this.replayMemory.sample(batchSize); |
| 128 | + const lossFunction = () => tf.tidy(() => { |
| 129 | + const stateTensor = getStateTensor( |
| 130 | + batch.map(example => example[0]), this.game.height, this.game.width); |
| 131 | + const actionTensor = tf.tensor1d( |
| 132 | + batch.map(example => example[1]), 'int32'); |
| 133 | + const qs = this.onlineNetwork.predict( |
| 134 | + stateTensor).mul(tf.oneHot(actionTensor, NUM_ACTIONS)).sum(-1); |
| 135 | + |
| 136 | + const rewardTensor = tf.tensor1d(batch.map(example => example[2])); |
| 137 | + const nextStateTensor = getStateTensor( |
| 138 | + batch.map(example => example[4]), this.game.height, this.game.width); |
| 139 | + const nextMaxQTensor = |
| 140 | + this.targetNetwork.predict(nextStateTensor).max(-1); |
| 141 | + const doneMask = tf.scalar(1).sub( |
| 142 | + tf.tensor1d(batch.map(example => example[3])).asType('float32')); |
| 143 | + const targetQs = |
| 144 | + rewardTensor.add(nextMaxQTensor.mul(doneMask).mul(gamma)); |
| 145 | + return tf.losses.meanSquaredError(targetQs, qs); |
| 146 | + }); |
| 147 | + |
| 148 | + // TODO(cais): Remove the second argument when `variableGrads()` obeys the |
| 149 | + // trainable flag. |
| 150 | + const grads = |
| 151 | + tf.variableGrads(lossFunction, this.onlineNetwork.getWeights()); |
| 152 | + optimizer.applyGradients(grads.grads); |
| 153 | + tf.dispose(grads); |
| 154 | + // TODO(cais): Return the loss value here? |
| 155 | + } |
| 156 | +} |
0 commit comments