Skip to content

Commit

Permalink
models.ts cleaned up working on azure
Browse files Browse the repository at this point in the history
  • Loading branch information
Suchit25 committed Jul 3, 2024
1 parent 119de2f commit b746032
Show file tree
Hide file tree
Showing 7 changed files with 91 additions and 123 deletions.
16 changes: 16 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
"analyze": "cross-env ANALYZE=true next build"
},
"dependencies": {
"@ai-sdk/openai": "^0.0.34",
"@aws-sdk/client-s3": "^3.338.0",
"@aws-sdk/s3-presigned-post": "^3.338.0",
"@aws-sdk/s3-request-presigner": "^3.338.0",
Expand Down
145 changes: 47 additions & 98 deletions src/pages/api/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import {

import { OpenAIModel, OpenAIModelID, OpenAIModels } from '@/types/openai'
import { decrypt, isEncrypted } from '~/utils/crypto'
import { LLMProvider, ProviderNames } from '~/types/LLMProvider'
import { LLMProvider, ProviderNames, SupportedModels } from '~/types/LLMProvider'
import { getOllamaModels, runOllamaChat } from '~/utils/modelProviders/ollama'
import { getOpenAIModels } from '~/utils/modelProviders/openai'
import { getAzureModels } from '~/utils/modelProviders/azure'
Expand All @@ -17,16 +17,19 @@ import { WebLLMModels, WebllmModel } from '~/utils/modelProviders/WebLLM'
export const config = {
runtime: 'edge',
}

// is this what gives me the json object lwt me print to find out
const handler = async (req: Request): Promise<Response> => {
console.log('in handler')
let apiKey = ''
let apiType = OPENAI_API_TYPE
let endpoint = OPENAI_API_HOST
console.log('this is what reuest is', req)
try {
const { key } = (await req.json()) as {
key: string

}
console.log('key', key)

// Eventually we'll use this. For now, there's no API Key for Ollama
const ollamaProvider: LLMProvider = {
Expand All @@ -37,39 +40,62 @@ const handler = async (req: Request): Promise<Response> => {

const OpenAIProvider: LLMProvider = {
provider: ProviderNames.OpenAI,
enabled: true,
enabled: true,
apiKey:process.env.OPENAI_API_KEY,
baseUrl: 'https://ollama.ncsa.ai/api/tags',

}

// next task is to add the proper providers to azure then from there
// add streaming to azure
const AzureProvider: LLMProvider = {
provider: ProviderNames.Azure,
enabled: true,
apiKey: apiKey,
baseUrl: '',

AzureKey =b1a402d721154a97a4eeaa61200eb93f,

AzureDeployment: 'gpt-35-turbo-16k',


//baseUrl: '',
//models?: SupportedModels
//endpoint, deployment, and api key
AzureEndpoint: 'https://uiuc-chat-canada-east.openai.azure.com/'

}

const llmProviderKeys: LLMProvider[] = [ollamaProvider, OpenAIProvider]
// I need input providers for all the models and then return of the list from each provider
for(let i in llmProviderKeys) {
// this is to print the mdoel type for all providers in provider keys just general test
let totalModels: SupportedModels[] = []
for(const provider of llmProviderKeys) {
if(provider.provider == 'Ollama') {
// 1. Call An endpoint to check what Ollama models are available.
//console.log('entering ollama')
const ollamaModels = await getOllamaModels(ollamaProvider)
totalModels.push(ollamaModels)
//console.log('Ollama Models in models.ts: ', ollamaModels)


}
else if(provider.provider == 'OpenAI') {
//2. call an endpoint to check which openai modle available
//console.log('check if it got out of ollama fetch to openai')
const openAIModels = await getOpenAIModels(OpenAIProvider)
totalModels.push(openAIModels)
//console.log('OpenAi models.ts: ', openAIModels)
}
else {
continue
}

}
// 1. Call An endpoint to check what Ollama models are available.
console.log('entering ollama')
const ollamaModels = await getOllamaModels(ollamaProvider)
console.log('Ollama Models in models.ts: ', ollamaModels)

//2. call an endpoint to check which openai modle available
console.log('check if it got out of ollama fetch to openai')
const openAIModels = await getOpenAIModels(OpenAIProvider)
console.log('OpenAi models.ts: ', openAIModels)

console.log('total models available', totalModels)

//3. call endpoint for azure
console.log('check if azure models fetch')
const azureOpenaiModels = await getAzureModels(AzureProvider)
//console.log('check if azure models fetch')
//const azureOpenaiModels = await getAzureModels(AzureProvider)

// Test chat function
const ret = await runOllamaChat()
Expand All @@ -89,87 +115,10 @@ const handler = async (req: Request): Promise<Response> => {
apiKey = decryptedText as string
// console.log('models.ts Decrypted api key: ', apiKey)
}
// console.log('models.ts Final openai key: ', apiKey)

if (apiKey && !apiKey.startsWith('sk-')) {
// console.log('setting azure variables')
// have to figure out what tyeps of keys fit with the users api key and see which ones are available is enabled flag.
// add in new stuff here to get beginning of new providers to check start name of each model
apiType = 'azure'
endpoint = process.env.AZURE_OPENAI_ENDPOINT || OPENAI_API_HOST
}

if (!apiKey) {
return new Response('Warning: OpenAI Key was not found', { status: 400 })
}

let url = `${endpoint}/v1/models`
if (apiType === 'azure') {
url = `${endpoint}/openai/deployments?api-version=${OPENAI_API_VERSION}`
}

const response = await fetch(url, {
headers: {
'Content-Type': 'application/json',
...(apiType === 'openai' && {
Authorization: `Bearer ${apiKey}`,
}),
...(apiType === 'azure' && {
'api-key': `${apiKey}`,
}),
...(apiType === 'openai' &&
OPENAI_ORGANIZATION && {
'OpenAI-Organization': OPENAI_ORGANIZATION,
}),
},
})

if (response.status === 401) {
return new Response(response.body, {
status: 500,
headers: response.headers,
})
} else if (response.status !== 200) {
console.error(
`OpenAI API returned an error ${
response.status
}: ${await response.text()}`,
)
throw new Error('OpenAI API returned an error')
}

const json = await response.json()

const uniqueModels: string[] = Array.from(
new Set(json.data.map((model: any) => model.id)),
)

const models: OpenAIModel[] = uniqueModels
.map((modelId: string) => {
const model = json.data.find((m: any) => m.id === modelId)
if (!model) return undefined

for (const [key, value] of Object.entries(OpenAIModelID)) {
if (value === model.id) {
return {
id: model.id,
name: OpenAIModels[value].name,
tokenLimit: OpenAIModels[value].tokenLimit,
}
}
}
return undefined
})
.filter((model): model is OpenAIModel => model !== undefined)

const finalModels = [
...models,
...ollamaModels,
...Object.values(WebLLMModels),
]
console.log('Final combined model list:', finalModels)

return new Response(JSON.stringify(finalModels), { status: 200 })
console.log('models.ts Final openai key: ', apiKey)
// this is my attempt at simplifying what is belowby creating if statements for each model type

return new Response(JSON.stringify(totalModels), { status: 200 })
} catch (error) {
console.error(error)
return new Response('Error', { status: 500 })
Expand Down
2 changes: 1 addition & 1 deletion src/types/LLMProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export type SupportedModels = OllamaModel[] | OpenAIModel[]
export interface LLMProvider {
provider: ProviderNames
enabled: boolean
baseUrl: string
baseUrl?: string
apiKey?: string
models?: SupportedModels
AzureEndpoint?: string
Expand Down
43 changes: 21 additions & 22 deletions src/utils/modelProviders/azure.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ import { decrypt, isEncrypted } from '~/utils/crypto'
import { LLMProvider, ProviderNames } from '~/types/LLMProvider'
import { getOllamaModels, runOllamaChat } from '~/utils/modelProviders/ollama'
//import { VercelAISDK } from 'vercel-ai-sdk'

import { openai } from '@ai-sdk/openai';
export const config = {
runtime: 'edge',
}
Expand All @@ -20,28 +22,25 @@ export interface AzureModel {
name: string
tokenLimit: number
}
import { CoreMessage, streamText } from 'ai';


// active model will be passed in form front-end just hard-code for now
// just make it a standard funciton for testing without post

//export async function POST(req: Request) {
export async function runAzure(messages, AzureProvider, activeModel) {
//const { messages, AzureProvider, activeModel }: { messages: CoreMessage[], AzureProvider: LLMProvider, activeModel: string } = await req.json();

const result = await streamText({
model: openai(activeModel), // replace with active model
system: 'You are a helpful assistant.',
messages,
});

return result.toAIStreamResponse();
}

/*
export const runAzureChat = async () => {
console.log('In azure RunAzureChat function')
const ollama = createAzure({
// custom settings
baseURL: 'https://ollama.ncsa.ai/api',
})
console.log('Right before calling fetch')
const result = await generateText({
maxTokens: 50,
model: ollama('llama3:8b'),
prompt: 'Invent a new holiday and describe its traditions.',
})
console.log(
'generateText result: ---------------------------------------------',
)
console.log(result.text)
console.log('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
return result.text

// This should work, but we're getting JSON Parse errors.
// const result = await streamText({
Expand Down Expand Up @@ -83,7 +82,7 @@ export const runAzureChat = async () => {
// TODO: Check out the server example for how to handle streaming responses
// https://sdk.vercel.ai/examples/next-app/chat/stream-chat-completion#server
}
*/



export const getAzureModels = async (AzureProvider: LLMProvider): Promise<AzureModel[]> => {
Expand Down
3 changes: 3 additions & 0 deletions src/utils/modelProviders/ollama.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ export const runOllamaChat = async () => {
}

export const getOllamaModels = async (ollamaProvider: LLMProvider): Promise<OllamaModel[]> => {
if(!ollamaProvider.baseUrl) {
throw new Error(`Ollama baseurl not defined: ${ollamaProvider.baseUrl}`)
}
const response = await fetch(ollamaProvider.baseUrl)
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`)
Expand Down
4 changes: 2 additions & 2 deletions src/utils/modelProviders/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,13 @@ export const runOpenAIChat = async () => {
}
*/

export const getOpenAIModels = async (ollamaProvider: LLMProvider) => {
export const getOpenAIModels = async (openAIProvider: LLMProvider) => {
console.log('in openai get models')

const { OpenAI } = require("openai");

const client = new OpenAI({
apiKey: ollamaProvider.apiKey,
apiKey: openAIProvider.apiKey, // change to openai
});
console.log('created openai client')

Expand Down

0 comments on commit b746032

Please sign in to comment.