diff --git a/README.md b/README.md index 5b8bdf3..b4c16e1 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ A dedicated section for constructing and deploying data pipelines and workflows Before you begin, ensure you have met the following requirements: - **Snowflake Account**: Ensure you have an active Snowflake account in a region where Cortex functionalities are supported. For detailed information, refer to the [Availability Region documentation](https://docs.snowflake.com/en/user-guide/snowflake-cortex/llm-functions#availability). +- **Private Preview Access**: To try private preview models in Snowflake, you need to contact your Snowflake account team to request access to the private preview feature. - **Role Permissions**: ACCOUNTADMIN or equivalent role with permissions to create: - Stages - Databases diff --git a/src/build.py b/src/build.py index 5322c11..ec33e9b 100644 --- a/src/build.py +++ b/src/build.py @@ -104,7 +104,10 @@ def get_functionality_settings(functionality, config): defaults = config["default_settings"] if functionality == "Complete": - settings['model'] = st.selectbox("Change chatbot model:", defaults['model']) + is_private_preview_model_shown = st.checkbox("Show private preview models", value=False) + settings['model'] = st.selectbox("Change chatbot model:", defaults[ + "private_preview_models" if is_private_preview_model_shown else "model" + ]) settings['temperature'] = st.slider("Temperature:", defaults['temperature_min'], defaults['temperature_max'], defaults['temperature']) settings['max_tokens'] = st.slider("Max Tokens:", defaults['max_tokens_min'], defaults['max_tokens_max'], defaults['max_tokens']) settings['guardrails'] = st.checkbox("Enable Guardrails", value=defaults['guardrails']) diff --git a/src/playground.py b/src/playground.py index c73b7b7..038cab3 100644 --- a/src/playground.py +++ b/src/playground.py @@ -64,7 +64,10 @@ def get_functionality_settings(functionality, config): defaults = config["default_settings"] if functionality == "Complete": - settings['model'] = st.selectbox("Change chatbot model:", defaults['model']) + is_private_preview_model_shown = st.checkbox("Show private preview models", value=False) + settings['model'] = st.selectbox("Change chatbot model:", defaults[ + "private_preview_models" if is_private_preview_model_shown else "model" + ]) settings['temperature'] = st.slider("Temperature:", defaults['temperature_min'], defaults['temperature_max'], defaults['temperature']) settings['max_tokens'] = st.slider("Max Tokens:", defaults['max_tokens_min'], defaults['max_tokens_max'], defaults['max_tokens']) settings['guardrails'] = st.checkbox("Enable Guardrails", value=defaults['guardrails']) diff --git a/src/rag.py b/src/rag.py index d0bb9eb..8096c59 100644 --- a/src/rag.py +++ b/src/rag.py @@ -12,6 +12,9 @@ with open(config_path, "r") as f: config = json.load(f) +settings = {} +defaults = config["default_settings"] + def display_rag(session): """ Displays the Retrieval-Augmented Generation (RAG) interface in Streamlit. @@ -134,9 +137,12 @@ def display_rag(session): selected_column = st.selectbox("Select Column", ["Vector_Embeddings"]) #st.subheader("Select Model, Embedding Type and Emdedding Model") st.info("Use the same embedding type and model consistently when creating embeddings.") + is_private_preview_model_shown = st.checkbox("Show private preview models", value=False) col1,col2,col3 = st.columns(3) with col1: - selected_model = st.selectbox("Select Model", config["default_settings"]["model"]) + selected_model = st.selectbox("Select Model", config["default_settings"][ + "private_preview_models" if is_private_preview_model_shown else "model" + ]) with col2: embedding_type = st.selectbox("Select Embeddings", config["default_settings"]["embeddings"].keys()) with col3: diff --git a/src/settings_config.json b/src/settings_config.json index ee03ed7..2d232c7 100644 --- a/src/settings_config.json +++ b/src/settings_config.json @@ -19,6 +19,7 @@ "mixtral-8x7b", "reka-core", "reka-flash", "snowflake-arctic" ], "fine_tune_models" : ["llama3-8b","llama3-70b","llama3.1-8b","llama3.1-70b","mistral-7b","mixtral-8x7b"], + "private_preview_models": ["deepseek-r1"], "temperature_min": 0.0, "temperature_max": 1.0, "temperature": 0.7,