From eec42adec8e24c349d9721bc1b9290d1c8f5d4a3 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:13:06 +0600 Subject: [PATCH 01/24] Add __init__.py file --- machine_learning/neural_network/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 machine_learning/neural_network/__init__.py diff --git a/machine_learning/neural_network/__init__.py b/machine_learning/neural_network/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 836c8ac4f8649a6cd851090a98a63dc4ee460434 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:15:20 +0600 Subject: [PATCH 02/24] Add __init__.py file --- machine_learning/{neural_network => neural_networks}/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename machine_learning/{neural_network => neural_networks}/__init__.py (100%) diff --git a/machine_learning/neural_network/__init__.py b/machine_learning/neural_networks/__init__.py similarity index 100% rename from machine_learning/neural_network/__init__.py rename to machine_learning/neural_networks/__init__.py From 74b6229caea3becb75c4d72fd86563871f9a70d1 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:25:01 +0600 Subject: [PATCH 03/24] fully_connected_mnist.py: For the basic fully connected neural network. --- .../neural_networks/fully_connected_mnist.py | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 machine_learning/neural_networks/fully_connected_mnist.py diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py new file mode 100644 index 000000000000..6540aac40d56 --- /dev/null +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -0,0 +1,53 @@ +import tensorflow as tf +from tensorflow.keras import layers, models +import numpy as np + +# Load the MNIST dataset from Keras +mnist = tf.keras.datasets.mnist +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +# Normalize the images from 0-255 to 0-1 by dividing by 255 +X_train, X_test = X_train / 255.0, X_test / 255.0 + +# Print TensorFlow and Keras information +print(f"TensorFlow Version: {tf.__version__}") +print(f"Keras Layers Module: {layers.__name__}") +print(f"Keras Models Module: {models.__name__}") + +# Build a simple Sequential model +model = models.Sequential() + +# Flatten the 28x28 images into vectors of length 784 +model.add(layers.Flatten(input_shape=(28, 28))) + +# First hidden layer with 128 neurons and ReLU activation +model.add(layers.Dense(128, activation='relu')) + +# Dropout layer to prevent overfitting (randomly drops 20% of neurons) +model.add(layers.Dropout(0.2)) + +# Second hidden layer with 64 neurons and ReLU activation +model.add(layers.Dense(64, activation='relu')) + +# Output layer with 10 neurons (one for each digit class 0-9), softmax for probabilities +model.add(layers.Dense(10, activation='softmax')) + +# Compile the model +model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) + +# Train the model on the MNIST training data +model.fit(X_train, y_train, epochs=5, batch_size=32) + +# Evaluate the model on the test set +test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) +print(f'\nTest accuracy: {test_acc}') + +# Make a prediction on a random test image +random_index = np.random.randint(0, len(X_test)) +random_image = np.expand_dims(X_test[random_index], axis=0) +prediction = model.predict(random_image) +predicted_digit = np.argmax(prediction) + +# Print the predicted result and actual label +print(f'Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}') + From f4e3b979dcb46942b69a1a53df19421157764410 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:35:16 +0600 Subject: [PATCH 04/24] fully_connected_mnist.py: For the basic fully connected neural network. --- .../neural_networks/fully_connected_mnist.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index 6540aac40d56..ec6821b19828 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -1,3 +1,17 @@ +""" +Fully Connected Neural Network for MNIST Classification + +Goal: This script implements a fully connected feed-forward neural network using TensorFlow and Keras to classify the + MNIST dataset (28x28 grayscale images of handwritten digits from 0 to 9). The network has two hidden layers with ReLU + activations and dropout for regularization. + +Objectives: +- Normalize and preprocess MNIST data. +- Build a basic neural network with dense layers. +- Train the model, evaluate its accuracy and loss at each epoch, and predict sample outputs. +""" + + import tensorflow as tf from tensorflow.keras import layers, models import numpy as np From bc1f9b98dc4571b62c857ab9ba07dd3a8f2a312f Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:39:19 +0600 Subject: [PATCH 05/24] cnn_mnist.py: For the Convolutional Neural Network (CNN) model. --- machine_learning/neural_networks/cnn_mnist.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 machine_learning/neural_networks/cnn_mnist.py diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py new file mode 100644 index 000000000000..b6611a497253 --- /dev/null +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -0,0 +1,70 @@ +""" +Convolutional Neural Network (CNN) for MNIST Classification + +Goal: This script builds a deep CNN to classify the MNIST dataset using TensorFlow and Keras. It leverages + convolutional layers for feature extraction and pooling layers for down-sampling, followed by fully connected layers + for classification. + +Objectives: +- Load and preprocess MNIST data (reshape for CNN input). +- Build a CNN with multiple convolutional, pooling, and batch normalization layers. +- Train the CNN, evaluate its accuracy, and display model performance. + +""" + + +import tensorflow as tf +from tensorflow.keras import layers, models +from tensorflow.keras.datasets import mnist +from tensorflow.keras.utils import to_categorical + +# Load and preprocess the MNIST data +(X_train, y_train), (X_test, y_test) = mnist.load_data() + +# Normalize the pixel values (0 to 1) +X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255 +X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255 + +# Convert labels to one-hot encoding +y_train = to_categorical(y_train, 10) +y_test = to_categorical(y_test, 10) + +# Building the CNN model +model = models.Sequential() + +# 1st Convolutional Layer +model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) +model.add(layers.MaxPooling2D((2, 2))) +model.add(layers.BatchNormalization()) + +# 2nd Convolutional Layer +model.add(layers.Conv2D(64, (3, 3), activation='relu')) +model.add(layers.MaxPooling2D((2, 2))) +model.add(layers.BatchNormalization()) + +# 3rd Convolutional Layer +model.add(layers.Conv2D(128, (3, 3), activation='relu')) +model.add(layers.BatchNormalization()) + +# Flattening the data before fully connected layers +model.add(layers.Flatten()) + +# Fully Connected (Dense) Layer with Dropout for regularization +model.add(layers.Dense(128, activation='relu')) +model.add(layers.Dropout(0.5)) + +# Output Layer for classification +model.add(layers.Dense(10, activation='softmax')) + +# Compile the model +model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) + +# Display the model summary +model.summary() + +# Train the model +history = model.fit(X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test)) + +# Evaluate the model on test data +test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) +print(f'\nTest accuracy: {test_acc}') From 96c374a820825d54d8bb46b3608c76ba065c38f0 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 01:40:08 +0600 Subject: [PATCH 06/24] fully_connected_mnist.py: For the basic fully connected neural network. --- machine_learning/neural_networks/fully_connected_mnist.py | 1 - 1 file changed, 1 deletion(-) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index ec6821b19828..f6bd7d07c048 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -64,4 +64,3 @@ # Print the predicted result and actual label print(f'Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}') - From e4dcab860f28ebb6e39a3e67a410c6b2d42c0e2d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:04:55 +0000 Subject: [PATCH 07/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/neural_networks/cnn_mnist.py | 23 ++++++++++--------- .../neural_networks/fully_connected_mnist.py | 15 ++++++------ 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index b6611a497253..4f9c2a342dd9 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -12,7 +12,6 @@ """ - import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.datasets import mnist @@ -22,8 +21,8 @@ (X_train, y_train), (X_test, y_test) = mnist.load_data() # Normalize the pixel values (0 to 1) -X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255 -X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255 +X_train = X_train.reshape(-1, 28, 28, 1).astype("float32") / 255 +X_test = X_test.reshape(-1, 28, 28, 1).astype("float32") / 255 # Convert labels to one-hot encoding y_train = to_categorical(y_train, 10) @@ -33,38 +32,40 @@ model = models.Sequential() # 1st Convolutional Layer -model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) +model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.BatchNormalization()) # 2nd Convolutional Layer -model.add(layers.Conv2D(64, (3, 3), activation='relu')) +model.add(layers.Conv2D(64, (3, 3), activation="relu")) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.BatchNormalization()) # 3rd Convolutional Layer -model.add(layers.Conv2D(128, (3, 3), activation='relu')) +model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.BatchNormalization()) # Flattening the data before fully connected layers model.add(layers.Flatten()) # Fully Connected (Dense) Layer with Dropout for regularization -model.add(layers.Dense(128, activation='relu')) +model.add(layers.Dense(128, activation="relu")) model.add(layers.Dropout(0.5)) # Output Layer for classification -model.add(layers.Dense(10, activation='softmax')) +model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) +model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) # Display the model summary model.summary() # Train the model -history = model.fit(X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test)) +history = model.fit( + X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test) +) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) -print(f'\nTest accuracy: {test_acc}') +print(f"\nTest accuracy: {test_acc}") diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index f6bd7d07c048..a716043a4d2e 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -11,7 +11,6 @@ - Train the model, evaluate its accuracy and loss at each epoch, and predict sample outputs. """ - import tensorflow as tf from tensorflow.keras import layers, models import numpy as np @@ -35,26 +34,28 @@ model.add(layers.Flatten(input_shape=(28, 28))) # First hidden layer with 128 neurons and ReLU activation -model.add(layers.Dense(128, activation='relu')) +model.add(layers.Dense(128, activation="relu")) # Dropout layer to prevent overfitting (randomly drops 20% of neurons) model.add(layers.Dropout(0.2)) # Second hidden layer with 64 neurons and ReLU activation -model.add(layers.Dense(64, activation='relu')) +model.add(layers.Dense(64, activation="relu")) # Output layer with 10 neurons (one for each digit class 0-9), softmax for probabilities -model.add(layers.Dense(10, activation='softmax')) +model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) +model.compile( + optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] +) # Train the model on the MNIST training data model.fit(X_train, y_train, epochs=5, batch_size=32) # Evaluate the model on the test set test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) -print(f'\nTest accuracy: {test_acc}') +print(f"\nTest accuracy: {test_acc}") # Make a prediction on a random test image random_index = np.random.randint(0, len(X_test)) @@ -63,4 +64,4 @@ predicted_digit = np.argmax(prediction) # Print the predicted result and actual label -print(f'Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}') +print(f"Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}") From 1a69f8ae996cd393c01d2fc8f33bbac7bdbf0a8f Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 02:40:17 +0600 Subject: [PATCH 08/24] fully_connected_mnist.py: For the basic fully connected neural network. --- .../neural_networks/fully_connected_mnist.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index f6bd7d07c048..e84a422b178a 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -1,20 +1,25 @@ """ Fully Connected Neural Network for MNIST Classification -Goal: This script implements a fully connected feed-forward neural network using TensorFlow and Keras to classify the - MNIST dataset (28x28 grayscale images of handwritten digits from 0 to 9). The network has two hidden layers with ReLU - activations and dropout for regularization. +Goal: This script implements a fully connected feed-forward neural network + using TensorFlow and Keras to classify the MNIST dataset (28x28 grayscale + images of handwritten digits from 0 to 9). + The network has two hidden layers with ReLU activations and dropout + for regularization. Objectives: - Normalize and preprocess MNIST data. - Build a basic neural network with dense layers. -- Train the model, evaluate its accuracy and loss at each epoch, and predict sample outputs. +- Train the model, evaluate its accuracy and loss at each epoch, + and predict sample outputs. + """ +import numpy as np import tensorflow as tf from tensorflow.keras import layers, models -import numpy as np + # Load the MNIST dataset from Keras mnist = tf.keras.datasets.mnist @@ -57,7 +62,8 @@ print(f'\nTest accuracy: {test_acc}') # Make a prediction on a random test image -random_index = np.random.randint(0, len(X_test)) +rng = np.random.default_rng() +random_index = rng.integers(0, len(X_test)) random_image = np.expand_dims(X_test[random_index], axis=0) prediction = model.predict(random_image) predicted_digit = np.argmax(prediction) From 860c0c2bd481646de453c256d265c8f4d3ee1a09 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 02:40:30 +0600 Subject: [PATCH 09/24] cnn_mnist.py: For the Convolutional Neural Network (CNN) model. --- machine_learning/neural_networks/cnn_mnist.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index b6611a497253..7ec691f4c8c2 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -1,9 +1,10 @@ """ Convolutional Neural Network (CNN) for MNIST Classification -Goal: This script builds a deep CNN to classify the MNIST dataset using TensorFlow and Keras. It leverages - convolutional layers for feature extraction and pooling layers for down-sampling, followed by fully connected layers - for classification. +Goal: This script builds a deep CNN to classify the MNIST dataset + using TensorFlow and Keras. It leverages convolutional layers + for feature extraction and pooling layers for down-sampling, + followed by fully connected layers for classification. Objectives: - Load and preprocess MNIST data (reshape for CNN input). @@ -13,7 +14,7 @@ """ -import tensorflow as tf +# import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.datasets import mnist from tensorflow.keras.utils import to_categorical From 99a58be312d130c7971e8ff2b4c7f3612f44393a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:43:37 +0000 Subject: [PATCH 10/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/neural_networks/cnn_mnist.py | 23 ++++++++++--------- .../neural_networks/fully_connected_mnist.py | 15 ++++++------ 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index b6611a497253..4f9c2a342dd9 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -12,7 +12,6 @@ """ - import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.datasets import mnist @@ -22,8 +21,8 @@ (X_train, y_train), (X_test, y_test) = mnist.load_data() # Normalize the pixel values (0 to 1) -X_train = X_train.reshape(-1, 28, 28, 1).astype('float32') / 255 -X_test = X_test.reshape(-1, 28, 28, 1).astype('float32') / 255 +X_train = X_train.reshape(-1, 28, 28, 1).astype("float32") / 255 +X_test = X_test.reshape(-1, 28, 28, 1).astype("float32") / 255 # Convert labels to one-hot encoding y_train = to_categorical(y_train, 10) @@ -33,38 +32,40 @@ model = models.Sequential() # 1st Convolutional Layer -model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) +model.add(layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.BatchNormalization()) # 2nd Convolutional Layer -model.add(layers.Conv2D(64, (3, 3), activation='relu')) +model.add(layers.Conv2D(64, (3, 3), activation="relu")) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.BatchNormalization()) # 3rd Convolutional Layer -model.add(layers.Conv2D(128, (3, 3), activation='relu')) +model.add(layers.Conv2D(128, (3, 3), activation="relu")) model.add(layers.BatchNormalization()) # Flattening the data before fully connected layers model.add(layers.Flatten()) # Fully Connected (Dense) Layer with Dropout for regularization -model.add(layers.Dense(128, activation='relu')) +model.add(layers.Dense(128, activation="relu")) model.add(layers.Dropout(0.5)) # Output Layer for classification -model.add(layers.Dense(10, activation='softmax')) +model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) +model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) # Display the model summary model.summary() # Train the model -history = model.fit(X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test)) +history = model.fit( + X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test) +) # Evaluate the model on test data test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) -print(f'\nTest accuracy: {test_acc}') +print(f"\nTest accuracy: {test_acc}") diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index f6bd7d07c048..a716043a4d2e 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -11,7 +11,6 @@ - Train the model, evaluate its accuracy and loss at each epoch, and predict sample outputs. """ - import tensorflow as tf from tensorflow.keras import layers, models import numpy as np @@ -35,26 +34,28 @@ model.add(layers.Flatten(input_shape=(28, 28))) # First hidden layer with 128 neurons and ReLU activation -model.add(layers.Dense(128, activation='relu')) +model.add(layers.Dense(128, activation="relu")) # Dropout layer to prevent overfitting (randomly drops 20% of neurons) model.add(layers.Dropout(0.2)) # Second hidden layer with 64 neurons and ReLU activation -model.add(layers.Dense(64, activation='relu')) +model.add(layers.Dense(64, activation="relu")) # Output layer with 10 neurons (one for each digit class 0-9), softmax for probabilities -model.add(layers.Dense(10, activation='softmax')) +model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) +model.compile( + optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] +) # Train the model on the MNIST training data model.fit(X_train, y_train, epochs=5, batch_size=32) # Evaluate the model on the test set test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) -print(f'\nTest accuracy: {test_acc}') +print(f"\nTest accuracy: {test_acc}") # Make a prediction on a random test image random_index = np.random.randint(0, len(X_test)) @@ -63,4 +64,4 @@ predicted_digit = np.argmax(prediction) # Print the predicted result and actual label -print(f'Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}') +print(f"Predicted digit: {predicted_digit}, Actual digit: {y_test[random_index]}") From c004e54419024a9342eb59e98030ff7d90a79cc7 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 02:45:56 +0600 Subject: [PATCH 11/24] cnn_mnist.py: For the Convolutional Neural Network (CNN) model. --- machine_learning/neural_networks/cnn_mnist.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index 4f9c2a342dd9..5a4bbe70dfeb 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -1,9 +1,10 @@ """ Convolutional Neural Network (CNN) for MNIST Classification -Goal: This script builds a deep CNN to classify the MNIST dataset using TensorFlow and Keras. It leverages - convolutional layers for feature extraction and pooling layers for down-sampling, followed by fully connected layers - for classification. +Goal: This script builds a deep CNN to classify the MNIST dataset using + TensorFlow and Keras. It leverages convolutional layers for feature + extraction and pooling layers for down-sampling, followed by fully + connected layers for classification. Objectives: - Load and preprocess MNIST data (reshape for CNN input). @@ -12,7 +13,7 @@ """ -import tensorflow as tf +# import tensorflow as tf from tensorflow.keras import layers, models from tensorflow.keras.datasets import mnist from tensorflow.keras.utils import to_categorical @@ -56,7 +57,8 @@ model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) +model.compile(optimizer="adam", loss="categorical_crossentropy", + metrics=["accuracy"]) # Display the model summary model.summary() From 35b13cf831057c4078f76836318fb493fe2fa5fb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:46:46 +0000 Subject: [PATCH 12/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/neural_networks/cnn_mnist.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index 5a4bbe70dfeb..e4ef937b60bd 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -57,8 +57,7 @@ model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer="adam", loss="categorical_crossentropy", - metrics=["accuracy"]) +model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) # Display the model summary model.summary() From 83e5295f760a03532f3503b389bf6f370cce3c79 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 02:48:19 +0600 Subject: [PATCH 13/24] fully_connected_mnist.py: For the basic fully connected neural network. --- .../neural_networks/fully_connected_mnist.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index a716043a4d2e..30d951e1f67e 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -1,19 +1,24 @@ """ Fully Connected Neural Network for MNIST Classification -Goal: This script implements a fully connected feed-forward neural network using TensorFlow and Keras to classify the - MNIST dataset (28x28 grayscale images of handwritten digits from 0 to 9). The network has two hidden layers with ReLU - activations and dropout for regularization. +Goal: This script implements a fully connected feed-forward neural network + using TensorFlow and Keras to classify the MNIST dataset + (28x28 grayscale images of handwritten digits from 0 to 9). + The network has two hidden layers with ReLU activations and dropout + for regularization. Objectives: - Normalize and preprocess MNIST data. - Build a basic neural network with dense layers. -- Train the model, evaluate its accuracy and loss at each epoch, and predict sample outputs. +- Train the model, evaluate its accuracy and loss at each epoch, + and predict sample outputs. + """ +import numpy as np import tensorflow as tf from tensorflow.keras import layers, models -import numpy as np + # Load the MNIST dataset from Keras mnist = tf.keras.datasets.mnist @@ -42,12 +47,14 @@ # Second hidden layer with 64 neurons and ReLU activation model.add(layers.Dense(64, activation="relu")) -# Output layer with 10 neurons (one for each digit class 0-9), softmax for probabilities +# Output layer with 10 neurons (one for each digit class 0-9), +# softmax for probabilities model.add(layers.Dense(10, activation="softmax")) # Compile the model model.compile( - optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] + optimizer="adam", loss="sparse_categorical_crossentropy", + metrics=["accuracy"] ) # Train the model on the MNIST training data @@ -58,7 +65,8 @@ print(f"\nTest accuracy: {test_acc}") # Make a prediction on a random test image -random_index = np.random.randint(0, len(X_test)) +rng = np.random.default_rng() +random_index = rng.integers(0, len(X_test)) random_image = np.expand_dims(X_test[random_index], axis=0) prediction = model.predict(random_image) predicted_digit = np.argmax(prediction) From aa47df00f45579f088833677b4c86913d99df848 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 02:50:25 +0600 Subject: [PATCH 14/24] cnn_mnist.py: For the Convolutional Neural Network (CNN) model. --- machine_learning/neural_networks/cnn_mnist.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index e4ef937b60bd..5a4bbe70dfeb 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -57,7 +57,8 @@ model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) +model.compile(optimizer="adam", loss="categorical_crossentropy", + metrics=["accuracy"]) # Display the model summary model.summary() From 35ba5dffdfa27b4f90ce349d1c7e413ad47352a1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:51:48 +0000 Subject: [PATCH 15/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/neural_networks/cnn_mnist.py | 3 +-- machine_learning/neural_networks/fully_connected_mnist.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index 5a4bbe70dfeb..e4ef937b60bd 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -57,8 +57,7 @@ model.add(layers.Dense(10, activation="softmax")) # Compile the model -model.compile(optimizer="adam", loss="categorical_crossentropy", - metrics=["accuracy"]) +model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) # Display the model summary model.summary() diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index 30d951e1f67e..82dd0d75f2c7 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -53,8 +53,7 @@ # Compile the model model.compile( - optimizer="adam", loss="sparse_categorical_crossentropy", - metrics=["accuracy"] + optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) # Train the model on the MNIST training data From 7d69b82bba5511467ee74b2f9204184aceea1cc1 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 03:01:18 +0600 Subject: [PATCH 16/24] fully_connected_mnist.py: For the basic fully connected neural network. --- machine_learning/neural_networks/fully_connected_mnist.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index 30d951e1f67e..ac7d115af50e 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -15,11 +15,14 @@ """ +# Standard library imports +# (None in this case) + +# Third-party imports import numpy as np import tensorflow as tf from tensorflow.keras import layers, models - # Load the MNIST dataset from Keras mnist = tf.keras.datasets.mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() From 2603c8ac3745581cb70a500295c4cd17deca6bb5 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 03:24:12 +0600 Subject: [PATCH 17/24] fully_connected_mnist.py: For the basic fully connected neural network. --- .../neural_networks/fully_connected_mnist.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/machine_learning/neural_networks/fully_connected_mnist.py b/machine_learning/neural_networks/fully_connected_mnist.py index 24da20de8717..1b7188c4c224 100644 --- a/machine_learning/neural_networks/fully_connected_mnist.py +++ b/machine_learning/neural_networks/fully_connected_mnist.py @@ -25,10 +25,10 @@ # Load the MNIST dataset from Keras mnist = tf.keras.datasets.mnist -(X_train, y_train), (X_test, y_test) = mnist.load_data() +(x_train, y_train), (x_test, y_test) = mnist.load_data() # Normalize the images from 0-255 to 0-1 by dividing by 255 -X_train, X_test = X_train / 255.0, X_test / 255.0 +x_train, x_test = x_train / 255.0, x_test / 255.0 # Print TensorFlow and Keras information print(f"TensorFlow Version: {tf.__version__}") @@ -60,16 +60,16 @@ ) # Train the model on the MNIST training data -model.fit(X_train, y_train, epochs=5, batch_size=32) +model.fit(x_train, y_train, epochs=5, batch_size=32) # Evaluate the model on the test set -test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) +test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2) print(f"\nTest accuracy: {test_acc}") # Make a prediction on a random test image rng = np.random.default_rng() -random_index = rng.integers(0, len(X_test)) -random_image = np.expand_dims(X_test[random_index], axis=0) +random_index = rng.integers(0, len(x_test)) +random_image = np.expand_dims(x_test[random_index], axis=0) prediction = model.predict(random_image) predicted_digit = np.argmax(prediction) From 0d9804abbaa2a048da4554fc2f26ccfe8df8c684 Mon Sep 17 00:00:00 2001 From: asif Date: Thu, 19 Sep 2024 03:34:54 +0600 Subject: [PATCH 18/24] cnn_mnist.py: For the Convolutional Neural Network (CNN) model. --- machine_learning/neural_networks/cnn_mnist.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index e4ef937b60bd..60ca2f10850b 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -19,11 +19,11 @@ from tensorflow.keras.utils import to_categorical # Load and preprocess the MNIST data -(X_train, y_train), (X_test, y_test) = mnist.load_data() +(x_train, y_train), (x_test, y_test) = mnist.load_data() # Normalize the pixel values (0 to 1) -X_train = X_train.reshape(-1, 28, 28, 1).astype("float32") / 255 -X_test = X_test.reshape(-1, 28, 28, 1).astype("float32") / 255 +x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255 +x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255 # Convert labels to one-hot encoding y_train = to_categorical(y_train, 10) @@ -64,9 +64,9 @@ # Train the model history = model.fit( - X_train, y_train, epochs=5, batch_size=32, validation_data=(X_test, y_test) + x_train, y_train, epochs=5, batch_size=32, validation_data=(x_test, y_test) ) # Evaluate the model on test data -test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2) +test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2) print(f"\nTest accuracy: {test_acc}") From 037b0073632ca959b5a1ae90a6173ddb37c8e4a3 Mon Sep 17 00:00:00 2001 From: asif Date: Fri, 20 Sep 2024 00:21:14 +0600 Subject: [PATCH 19/24] fixed size sliding window technique --- .../sliding_window/fixed_size_window.py | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 data_structures/sliding_window/fixed_size_window.py diff --git a/data_structures/sliding_window/fixed_size_window.py b/data_structures/sliding_window/fixed_size_window.py new file mode 100644 index 000000000000..20db7846d395 --- /dev/null +++ b/data_structures/sliding_window/fixed_size_window.py @@ -0,0 +1,59 @@ +""" +Fixed-Size Sliding Window Algorithm + +This module contains an implementation of the fixed-size +sliding window algorithm with doctests. + +Examples: + >>> max_sum_subarray([1, 2, 3, 4, 5], 3) + 12 + + >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) + 11 +""" + + +def max_sum_subarray(arr: list[int], k: int) -> int: + """ + Find the maximum sum of any subarray of size `k`. + + Args: + arr: The input array of integers. + k: The size of the subarray. + + Returns: + The maximum sum of a subarray of size `k`. + + Raises: + ValueError: If the length of the array is less than `k`. + + Examples: + >>> max_sum_subarray([1, 2, 3, 4, 5], 3) + 12 + + >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) + 11 + + >>> max_sum_subarray([1, 2], 3) + Traceback (most recent call last): + ... + ValueError: Array length must be at least as large as the window size. + """ + if len(arr) < k: + raise ValueError("Array length must be at least as large as the window size.") + + max_sum = float('-inf') + window_sum = sum(arr[:k]) + max_sum = max(max_sum, window_sum) + + for i in range(len(arr) - k): + window_sum = window_sum - arr[i] + arr[i + k] + max_sum = max(max_sum, window_sum) + + return max_sum + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 947ff7f1ced0828cc768639d1592188405352d6f Mon Sep 17 00:00:00 2001 From: asif Date: Fri, 20 Sep 2024 00:21:28 +0600 Subject: [PATCH 20/24] Add __init__.py file --- data_structures/sliding_window/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 data_structures/sliding_window/__init__.py diff --git a/data_structures/sliding_window/__init__.py b/data_structures/sliding_window/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From 8d068c835d76522763335f296804d3066dd850f3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 18:22:15 +0000 Subject: [PATCH 21/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- data_structures/sliding_window/fixed_size_window.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/sliding_window/fixed_size_window.py b/data_structures/sliding_window/fixed_size_window.py index 20db7846d395..42a80b7bc9c0 100644 --- a/data_structures/sliding_window/fixed_size_window.py +++ b/data_structures/sliding_window/fixed_size_window.py @@ -42,7 +42,7 @@ def max_sum_subarray(arr: list[int], k: int) -> int: if len(arr) < k: raise ValueError("Array length must be at least as large as the window size.") - max_sum = float('-inf') + max_sum = float("-inf") window_sum = sum(arr[:k]) max_sum = max(max_sum, window_sum) From 33b6162a7320af1e608753f795b4f04e931448be Mon Sep 17 00:00:00 2001 From: asif Date: Fri, 20 Sep 2024 00:34:16 +0600 Subject: [PATCH 22/24] Add __init__.py file --- data_structures/sliding_window/__init__.py | 0 .../sliding_window/fixed_size_window.py | 59 ------------------- 2 files changed, 59 deletions(-) delete mode 100644 data_structures/sliding_window/__init__.py delete mode 100644 data_structures/sliding_window/fixed_size_window.py diff --git a/data_structures/sliding_window/__init__.py b/data_structures/sliding_window/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/data_structures/sliding_window/fixed_size_window.py b/data_structures/sliding_window/fixed_size_window.py deleted file mode 100644 index 20db7846d395..000000000000 --- a/data_structures/sliding_window/fixed_size_window.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Fixed-Size Sliding Window Algorithm - -This module contains an implementation of the fixed-size -sliding window algorithm with doctests. - -Examples: - >>> max_sum_subarray([1, 2, 3, 4, 5], 3) - 12 - - >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) - 11 -""" - - -def max_sum_subarray(arr: list[int], k: int) -> int: - """ - Find the maximum sum of any subarray of size `k`. - - Args: - arr: The input array of integers. - k: The size of the subarray. - - Returns: - The maximum sum of a subarray of size `k`. - - Raises: - ValueError: If the length of the array is less than `k`. - - Examples: - >>> max_sum_subarray([1, 2, 3, 4, 5], 3) - 12 - - >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) - 11 - - >>> max_sum_subarray([1, 2], 3) - Traceback (most recent call last): - ... - ValueError: Array length must be at least as large as the window size. - """ - if len(arr) < k: - raise ValueError("Array length must be at least as large as the window size.") - - max_sum = float('-inf') - window_sum = sum(arr[:k]) - max_sum = max(max_sum, window_sum) - - for i in range(len(arr) - k): - window_sum = window_sum - arr[i] + arr[i + k] - max_sum = max(max_sum, window_sum) - - return max_sum - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 2cae46588dd0ea9bd6a2022706079ac86f8265ae Mon Sep 17 00:00:00 2001 From: Asif Ahmed <121171874+asRot0@users.noreply.github.com> Date: Fri, 20 Sep 2024 00:35:45 +0600 Subject: [PATCH 23/24] Delete data_structures/sliding_window directory --- data_structures/sliding_window/__init__.py | 0 .../sliding_window/fixed_size_window.py | 59 ------------------- 2 files changed, 59 deletions(-) delete mode 100644 data_structures/sliding_window/__init__.py delete mode 100644 data_structures/sliding_window/fixed_size_window.py diff --git a/data_structures/sliding_window/__init__.py b/data_structures/sliding_window/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/data_structures/sliding_window/fixed_size_window.py b/data_structures/sliding_window/fixed_size_window.py deleted file mode 100644 index 42a80b7bc9c0..000000000000 --- a/data_structures/sliding_window/fixed_size_window.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Fixed-Size Sliding Window Algorithm - -This module contains an implementation of the fixed-size -sliding window algorithm with doctests. - -Examples: - >>> max_sum_subarray([1, 2, 3, 4, 5], 3) - 12 - - >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) - 11 -""" - - -def max_sum_subarray(arr: list[int], k: int) -> int: - """ - Find the maximum sum of any subarray of size `k`. - - Args: - arr: The input array of integers. - k: The size of the subarray. - - Returns: - The maximum sum of a subarray of size `k`. - - Raises: - ValueError: If the length of the array is less than `k`. - - Examples: - >>> max_sum_subarray([1, 2, 3, 4, 5], 3) - 12 - - >>> max_sum_subarray([2, 1, 5, 1, 3, 2], 4) - 11 - - >>> max_sum_subarray([1, 2], 3) - Traceback (most recent call last): - ... - ValueError: Array length must be at least as large as the window size. - """ - if len(arr) < k: - raise ValueError("Array length must be at least as large as the window size.") - - max_sum = float("-inf") - window_sum = sum(arr[:k]) - max_sum = max(max_sum, window_sum) - - for i in range(len(arr) - k): - window_sum = window_sum - arr[i] + arr[i + k] - max_sum = max(max_sum, window_sum) - - return max_sum - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 00f7f7cf536476fd4e47bd314b07699a303013e0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Sep 2024 18:39:41 +0000 Subject: [PATCH 24/24] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- machine_learning/neural_networks/cnn_mnist.py | 1 - 1 file changed, 1 deletion(-) diff --git a/machine_learning/neural_networks/cnn_mnist.py b/machine_learning/neural_networks/cnn_mnist.py index 1b2a8c48233c..60ca2f10850b 100644 --- a/machine_learning/neural_networks/cnn_mnist.py +++ b/machine_learning/neural_networks/cnn_mnist.py @@ -70,4 +70,3 @@ # Evaluate the model on test data test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2) print(f"\nTest accuracy: {test_acc}") -