From 020d406b666e80c247153f83538ef3448d95c3db Mon Sep 17 00:00:00 2001 From: Jetha Chan Date: Thu, 27 Jun 2024 15:50:08 +0900 Subject: [PATCH] Run the quickstarts through nbfmt, add note re: large GPU --- Gemma/Keras_Gemma_2_Quickstart.ipynb | 44 ++++----------- Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb | 65 +++++------------------ 2 files changed, 21 insertions(+), 88 deletions(-) diff --git a/Gemma/Keras_Gemma_2_Quickstart.ipynb b/Gemma/Keras_Gemma_2_Quickstart.ipynb index 856c260..a4f7bab 100644 --- a/Gemma/Keras_Gemma_2_Quickstart.ipynb +++ b/Gemma/Keras_Gemma_2_Quickstart.ipynb @@ -14,8 +14,7 @@ "execution_count": null, "metadata": { "cellView": "form", - "id": "tuOe1ymfHZPu", - "tags": [] + "id": "tuOe1ymfHZPu" }, "outputs": [], "source": [ @@ -42,10 +41,12 @@ "- To be added to a private github repo for Gemma.\n", "- To be added to a private Kaggle model for weights.\n", "\n", + "Note that you will need a large GPU (e.g. A100) to run this as well.\n", + "\n", "General Keras reading:\n", "- [Getting started with Keras](https://keras.io/getting_started/)\n", "- [Getting started with KerasNLP](https://keras.io/guides/keras_nlp/getting_started/)\n", - "- [Generation and fine-tuning guide for GPT2](https://keras.io/guides/keras_nlp/getting_started/)", + "- [Generation and fine-tuning guide for GPT2](https://keras.io/guides/keras_nlp/getting_started/)\n", "\n", "\n", "
\n", @@ -91,11 +92,7 @@ "cell_type": "code", "execution_count": 2, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "bMboT70Xop8G", - "outputId": "f515845e-a15d-4c52-ace4-56944d48045b" + "id": "bMboT70Xop8G" }, "outputs": [ { @@ -157,12 +154,7 @@ "cell_type": "code", "execution_count": 9, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 605 - }, - "id": "yygIK9DEIldp", - "outputId": "93e3ca01-4770-4e27-fc7f-fb86a6b83bae" + "id": "yygIK9DEIldp" }, "outputs": [ { @@ -317,12 +309,7 @@ "cell_type": "code", "execution_count": 11, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "aae5GHrdpj2_", - "outputId": "fa242464-a217-4a7b-901e-27b00d0e995b" + "id": "aae5GHrdpj2_" }, "outputs": [ { @@ -342,30 +329,17 @@ "source": [ "gemma_lm.generate(\"What is the meaning of life?\", max_length=32)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Sb5uZ0BLv8Ot" - }, - "outputs": [], - "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { - "gpuType": "A100", - "machine_shape": "hm", - "provenance": [] + "name": "Keras_Gemma_2_Quickstart.ipynb", + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" - }, - "language_info": { - "name": "python" } }, "nbformat": 4, diff --git a/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb b/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb index c581a37..c02d0ec 100644 --- a/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb +++ b/Gemma/Keras_Gemma_2_Quickstart_Chat.ipynb @@ -14,8 +14,7 @@ "execution_count": null, "metadata": { "cellView": "form", - "id": "tuOe1ymfHZPu", - "tags": [] + "id": "tuOe1ymfHZPu" }, "outputs": [], "source": [ @@ -42,10 +41,12 @@ "- To be added to a private github repo for Gemma.\n", "- To be added to a private Kaggle model for weights.\n", "\n", + "Note that you will need a large GPU (e.g. A100) to run this as well.\n", + "\n", "General Keras reading:\n", "- [Getting started with Keras](https://keras.io/getting_started/)\n", "- [Getting started with KerasNLP](https://keras.io/guides/keras_nlp/getting_started/)\n", - "- [Generation and fine-tuning guide for GPT2](https://keras.io/guides/keras_nlp/getting_started/)", + "- [Generation and fine-tuning guide for GPT2](https://keras.io/guides/keras_nlp/getting_started/)\n", "\n", "\n", "
\n", @@ -147,12 +148,7 @@ "cell_type": "code", "execution_count": 5, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 605 - }, - "id": "yygIK9DEIldp", - "outputId": "26b639b4-0f0c-47c0-8418-237bf14ebbc9" + "id": "yygIK9DEIldp" }, "outputs": [ { @@ -307,12 +303,7 @@ "cell_type": "code", "execution_count": 6, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 347 - }, - "id": "aae5GHrdpj2_", - "outputId": "6c2b7977-5bae-4cfb-fa97-4d56fd8dd1d1" + "id": "aae5GHrdpj2_" }, "outputs": [ { @@ -408,12 +399,7 @@ "cell_type": "code", "execution_count": 8, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 907 - }, - "id": "shfCZDFPKghT", - "outputId": "602c6b74-d67a-4281-ae79-be553d37ec28" + "id": "shfCZDFPKghT" }, "outputs": [ { @@ -502,12 +488,7 @@ "cell_type": "code", "execution_count": 9, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "bLxtlhjtLkWQ", - "outputId": "665e5de2-9b1a-4ed4-c11f-e6e1d82599d9" + "id": "bLxtlhjtLkWQ" }, "outputs": [ { @@ -632,12 +613,7 @@ "cell_type": "code", "execution_count": 10, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "tfvgl4sC9g2p", - "outputId": "89326571-90d4-4fc5-db2f-0295831f2581" + "id": "tfvgl4sC9g2p" }, "outputs": [ { @@ -864,11 +840,7 @@ "cell_type": "code", "execution_count": 11, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "P2GjAVybBez3", - "outputId": "2c6d3d1b-fa64-40ab-827b-a40072cbbf8a" + "id": "P2GjAVybBez3" }, "outputs": [ { @@ -1263,30 +1235,17 @@ "source": [ "chat.show_history()" ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "id": "z5jMVU0Ymwxn" - }, - "outputs": [], - "source": [] } ], "metadata": { "accelerator": "GPU", "colab": { - "gpuType": "A100", - "machine_shape": "hm", - "provenance": [] + "name": "Keras_Gemma_2_Quickstart_Chat.ipynb", + "toc_visible": true }, "kernelspec": { "display_name": "Python 3", "name": "python3" - }, - "language_info": { - "name": "python" } }, "nbformat": 4,