Skip to content

Commit

Permalink
[Feat]:
Browse files Browse the repository at this point in the history
- Implementation for Adam Optimizer with demo example to test it
  • Loading branch information
ahmedelsayed968 committed Apr 22, 2024
1 parent fe7d07b commit 5ed31c2
Showing 1 changed file with 68 additions and 0 deletions.
68 changes: 68 additions & 0 deletions optimization/adam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import tensorflow as tf
from typing import Callable
class Adam:
def __init__(self,
learning_rate:float,
beta1:float,
beta2:float,
objective_function:Callable,
params:tf.Tensor,
max_iteration:int) -> None:

self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.objective_fn =objective_function
self.params = params
self.max_iteration = max_iteration
self.first_momentum = tf.zeros_like(self.params)
self.second_momentum = tf.zeros_like(self.params)
self.epsilon = 1e-6
self.losses = []

def optimize(self):
for t in range(1,self.max_iteration+1):

with tf.GradientTape() as tape:
loss = self.objective_fn(self.params)
self.losses.append(loss.numpy())
grad = tape.gradient(loss,self.params)
grad_squared = tf.square(grad)

first_momentum_update = self.first_momentum*self.beta1 + grad * (1-self.beta1)
second_momentum_update = self.second_momentum * self.beta2 + grad_squared* (1-self.beta2)

self.first_momentum= first_momentum_update
self.second_momentum = second_momentum_update

first_momentum_cap = self.first_momentum/(1-tf.pow(self.beta1,t))
second_momentum_cap = self.second_momentum/(1-tf.pow(self.beta2,t))

step = first_momentum_cap*self.learning_rate/tf.sqrt(second_momentum_cap+self.epsilon)

self.params.assign_sub(step)

def quadratic_loss(params):
return tf.reduce_sum(tf.square(params))
if __name__ == '__main__':

# Define initial parameters
initial_params = tf.Variable([1.0, 2.0, 3.0], dtype=tf.float32)

# Adam optimizer parameters
learning_rate = 0.01
beta1 = 0.9
beta2 = 0.999
max_iteration = 1000

# Create an instance of Adam optimizer
optimizer = Adam(learning_rate, beta1, beta2, quadratic_loss, initial_params, max_iteration)

# Optimize the parameters
optimizer.optimize()

# Get the optimized parameters
optimized_params = optimizer.params.numpy()

print(optimized_params)
print(optimizer.losses)

0 comments on commit 5ed31c2

Please sign in to comment.