Skip to content

Commit 6c25833

Browse files
authored
Merge pull request #1189 from openai/release-please--branches--master--changes--next--components--openai
release: 4.73.0
2 parents a92cc1d + 1e9391b commit 6c25833

22 files changed

+104
-436
lines changed

.release-please-manifest.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "4.72.0"
2+
".": "4.73.0"
33
}

.stats.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 68
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-aa9b01fc0c17eb0cbc200533fc20d6a49c5e764ceaf8049e08b294532be6e9ff.yml

CHANGELOG.md

+29
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,34 @@
11
# Changelog
22

3+
## 4.73.0 (2024-11-20)
4+
5+
Full Changelog: [v4.72.0...v4.73.0](https://github.com/openai/openai-node/compare/v4.72.0...v4.73.0)
6+
7+
### Features
8+
9+
* **api:** add gpt-4o-2024-11-20 model ([#1201](https://github.com/openai/openai-node/issues/1201)) ([0feeafd](https://github.com/openai/openai-node/commit/0feeafd21ba4b6281cc3b9dafa2919b1e2e4d1c3))
10+
* bump model in all example snippets to gpt-4o ([6961c37](https://github.com/openai/openai-node/commit/6961c37f2e581bcc12ec2bbe77df2b9b260fe297))
11+
12+
13+
### Bug Fixes
14+
15+
* **docs:** add missing await to pagination example ([#1190](https://github.com/openai/openai-node/issues/1190)) ([524b9e8](https://github.com/openai/openai-node/commit/524b9e82ae13a3b5093dcfbfd1169a798cf99ab4))
16+
17+
18+
### Chores
19+
20+
* **client:** drop unused devDependency ([#1191](https://github.com/openai/openai-node/issues/1191)) ([8ee6c03](https://github.com/openai/openai-node/commit/8ee6c0335673f2ecf84ea11bdfc990adab607e20))
21+
* **internal:** spec update ([#1195](https://github.com/openai/openai-node/issues/1195)) ([12f9334](https://github.com/openai/openai-node/commit/12f93346857196b93f94865cc3744d769e5e519c))
22+
* **internal:** use reexports not destructuring ([#1181](https://github.com/openai/openai-node/issues/1181)) ([f555dd6](https://github.com/openai/openai-node/commit/f555dd6503bc4ccd4d13f4e1a1d36fbbfd51c369))
23+
24+
25+
### Documentation
26+
27+
* bump models in example snippets to gpt-4o ([#1184](https://github.com/openai/openai-node/issues/1184)) ([4ec4027](https://github.com/openai/openai-node/commit/4ec402790cf3cfbccbf3ef9b61d577b0118977e8))
28+
* change readme title ([#1198](https://github.com/openai/openai-node/issues/1198)) ([e34981c](https://github.com/openai/openai-node/commit/e34981c00f2f0360baffe870bcc38786030671bf))
29+
* improve jsr documentation ([#1197](https://github.com/openai/openai-node/issues/1197)) ([ebdb4f7](https://github.com/openai/openai-node/commit/ebdb4f72cc01afbee649aca009fdaf413e61c507))
30+
* **readme:** fix incorrect fileBatches.uploadAndPoll params ([#1200](https://github.com/openai/openai-node/issues/1200)) ([3968ef1](https://github.com/openai/openai-node/commit/3968ef1c4fa860ff246e0e803808752b261c18ce))
31+
332
## 4.72.0 (2024-11-12)
433

534
Full Changelog: [v4.71.1...v4.72.0](https://github.com/openai/openai-node/compare/v4.71.1...v4.72.0)

README.md

+25-20
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# OpenAI Node API Library
1+
# OpenAI TypeScript and JavaScript API Library
22

33
[![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai)
44

@@ -14,16 +14,21 @@ To learn how to use the OpenAI API, check out our [API Reference](https://platfo
1414
npm install openai
1515
```
1616

17-
You can also import from jsr:
17+
### Installation from JSR
1818

19-
<!-- x-release-please-start-version -->
19+
```sh
20+
deno add jsr:@openai/openai
21+
npx jsr add @openai/openai
22+
```
23+
24+
These commands will make the module importable from the `@openai/openai` scope:
25+
26+
You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime:
2027

2128
```ts
2229
import OpenAI from 'jsr:@openai/openai';
2330
```
2431

25-
<!-- x-release-please-end -->
26-
2732
## Usage
2833

2934
The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API.
@@ -39,7 +44,7 @@ const client = new OpenAI({
3944
async function main() {
4045
const chatCompletion = await client.chat.completions.create({
4146
messages: [{ role: 'user', content: 'Say this is a test' }],
42-
model: 'gpt-3.5-turbo',
47+
model: 'gpt-4o',
4348
});
4449
}
4550

@@ -57,7 +62,7 @@ const client = new OpenAI();
5762

5863
async function main() {
5964
const stream = await client.chat.completions.create({
60-
model: 'gpt-4',
65+
model: 'gpt-4o',
6166
messages: [{ role: 'user', content: 'Say this is a test' }],
6267
stream: true,
6368
});
@@ -87,7 +92,7 @@ const client = new OpenAI({
8792
async function main() {
8893
const params: OpenAI.Chat.ChatCompletionCreateParams = {
8994
messages: [{ role: 'user', content: 'Say this is a test' }],
90-
model: 'gpt-3.5-turbo',
95+
model: 'gpt-4o',
9196
};
9297
const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params);
9398
}
@@ -128,7 +133,7 @@ const fileList = [
128133
...
129134
];
130135

131-
const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, fileList);
136+
const batch = await openai.vectorStores.fileBatches.uploadAndPoll(vectorStore.id, {files: fileList});
132137
```
133138

134139
### Streaming Helpers
@@ -173,7 +178,7 @@ const openai = new OpenAI();
173178

174179
async function main() {
175180
const stream = await openai.beta.chat.completions.stream({
176-
model: 'gpt-4',
181+
model: 'gpt-4o',
177182
messages: [{ role: 'user', content: 'Say this is a test' }],
178183
stream: true,
179184
});
@@ -226,7 +231,7 @@ const client = new OpenAI();
226231
async function main() {
227232
const runner = client.beta.chat.completions
228233
.runTools({
229-
model: 'gpt-3.5-turbo',
234+
model: 'gpt-4o',
230235
messages: [{ role: 'user', content: 'How is the weather this week?' }],
231236
tools: [
232237
{
@@ -333,7 +338,7 @@ a subclass of `APIError` will be thrown:
333338
```ts
334339
async function main() {
335340
const job = await client.fineTuning.jobs
336-
.create({ model: 'gpt-3.5-turbo', training_file: 'file-abc123' })
341+
.create({ model: 'gpt-4o', training_file: 'file-abc123' })
337342
.catch(async (err) => {
338343
if (err instanceof OpenAI.APIError) {
339344
console.log(err.status); // 400
@@ -368,7 +373,7 @@ Error codes are as followed:
368373
All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI.
369374

370375
```ts
371-
const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4' });
376+
const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' });
372377
console.log(completion._request_id) // req_123
373378
```
374379

@@ -392,7 +397,7 @@ const azureADTokenProvider = getBearerTokenProvider(credential, scope);
392397
const openai = new AzureOpenAI({ azureADTokenProvider });
393398

394399
const result = await openai.chat.completions.create({
395-
model: 'gpt-4-1106-preview',
400+
model: 'gpt-4o',
396401
messages: [{ role: 'user', content: 'Say hello!' }],
397402
});
398403

@@ -415,7 +420,7 @@ const client = new OpenAI({
415420
});
416421

417422
// Or, configure per-request:
418-
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in Node.js?' }], model: 'gpt-3.5-turbo' }, {
423+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I get the name of the current day in JavaScript?' }], model: 'gpt-4o' }, {
419424
maxRetries: 5,
420425
});
421426
```
@@ -432,7 +437,7 @@ const client = new OpenAI({
432437
});
433438

434439
// Override per-request:
435-
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-3.5-turbo' }, {
440+
await client.chat.completions.create({ messages: [{ role: 'user', content: 'How can I list all files in a directory using Python?' }], model: 'gpt-4o' }, {
436441
timeout: 5 * 1000,
437442
});
438443
```
@@ -467,7 +472,7 @@ for (const fineTuningJob of page.data) {
467472

468473
// Convenience methods are provided for manually paginating:
469474
while (page.hasNextPage()) {
470-
page = page.getNextPage();
475+
page = await page.getNextPage();
471476
// ...
472477
}
473478
```
@@ -485,13 +490,13 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
485490
const client = new OpenAI();
486491

487492
const response = await client.chat.completions
488-
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
493+
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' })
489494
.asResponse();
490495
console.log(response.headers.get('X-My-Header'));
491496
console.log(response.statusText); // access the underlying Response object
492497

493498
const { data: chatCompletion, response: raw } = await client.chat.completions
494-
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-3.5-turbo' })
499+
.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' })
495500
.withResponse();
496501
console.log(raw.headers.get('X-My-Header'));
497502
console.log(chatCompletion);
@@ -622,7 +627,7 @@ TypeScript >= 4.5 is supported.
622627
The following runtimes are supported:
623628

624629
- Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions.
625-
- Deno v1.28.0 or higher, using `import OpenAI from "npm:openai"`.
630+
- Deno v1.28.0 or higher.
626631
- Bun 1.0 or later.
627632
- Cloudflare Workers.
628633
- Vercel Edge Runtime.

jsr.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@openai/openai",
3-
"version": "4.72.0",
3+
"version": "4.73.0",
44
"exports": "./index.ts",
55
"publish": {
66
"exclude": [

package.json

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "openai",
3-
"version": "4.72.0",
3+
"version": "4.73.0",
44
"description": "The official TypeScript library for the OpenAI API",
55
"author": "OpenAI <[email protected]>",
66
"types": "dist/index.d.ts",
@@ -47,7 +47,6 @@
4747
"prettier": "^3.0.0",
4848
"prettier-2": "npm:prettier@^2",
4949
"ts-jest": "^29.1.0",
50-
"ts-morph": "^19.0.0",
5150
"ts-node": "^10.5.0",
5251
"tsc-multi": "^1.1.0",
5352
"tsconfig-paths": "^4.0.0",

scripts/build

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ npm exec tsc-multi
3232
# copy over handwritten .js/.mjs/.d.ts files
3333
cp src/_shims/*.{d.ts,js,mjs,md} dist/_shims
3434
cp src/_shims/auto/*.{d.ts,js,mjs} dist/_shims/auto
35-
# we need to add exports = module.exports = OpenAI Node to index.js;
35+
# we need to add exports = module.exports = OpenAI to index.js;
3636
# No way to get that from index.ts because it would cause compile errors
3737
# when building .mjs
3838
node scripts/utils/fix-index-exports.cjs

src/index.ts

+17-20
Original file line numberDiff line numberDiff line change
@@ -306,25 +306,6 @@ export class OpenAI extends Core.APIClient {
306306
static fileFromPath = Uploads.fileFromPath;
307307
}
308308

309-
export {
310-
OpenAIError,
311-
APIError,
312-
APIConnectionError,
313-
APIConnectionTimeoutError,
314-
APIUserAbortError,
315-
NotFoundError,
316-
ConflictError,
317-
RateLimitError,
318-
BadRequestError,
319-
AuthenticationError,
320-
InternalServerError,
321-
PermissionDeniedError,
322-
UnprocessableEntityError,
323-
} from './error';
324-
325-
export import toFile = Uploads.toFile;
326-
export import fileFromPath = Uploads.fileFromPath;
327-
328309
OpenAI.Completions = Completions;
329310
OpenAI.Chat = Chat;
330311
OpenAI.Embeddings = Embeddings;
@@ -340,7 +321,6 @@ OpenAI.Beta = Beta;
340321
OpenAI.Batches = Batches;
341322
OpenAI.BatchesPage = BatchesPage;
342323
OpenAI.Uploads = UploadsAPIUploads;
343-
344324
export declare namespace OpenAI {
345325
export type RequestOptions = Core.RequestOptions;
346326

@@ -664,4 +644,21 @@ const API_KEY_SENTINEL = '<Missing Key>';
664644

665645
// ---------------------- End Azure ----------------------
666646

647+
export { toFile, fileFromPath } from './uploads';
648+
export {
649+
OpenAIError,
650+
APIError,
651+
APIConnectionError,
652+
APIConnectionTimeoutError,
653+
APIUserAbortError,
654+
NotFoundError,
655+
ConflictError,
656+
RateLimitError,
657+
BadRequestError,
658+
AuthenticationError,
659+
InternalServerError,
660+
PermissionDeniedError,
661+
UnprocessableEntityError,
662+
} from './error';
663+
667664
export default OpenAI;

src/resources/batches.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ export interface BatchCreateParams {
232232
* Your input file must be formatted as a
233233
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
234234
* and must be uploaded with the purpose `batch`. The file can contain up to 50,000
235-
* requests, and can be up to 100 MB in size.
235+
* requests, and can be up to 200 MB in size.
236236
*/
237237
input_file_id: string;
238238

src/resources/chat/chat.ts

+1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ export type ChatModel =
4949
| 'o1-mini'
5050
| 'o1-mini-2024-09-12'
5151
| 'gpt-4o'
52+
| 'gpt-4o-2024-11-20'
5253
| 'gpt-4o-2024-08-06'
5354
| 'gpt-4o-2024-05-13'
5455
| 'gpt-4o-realtime-preview'

src/resources/chat/completions.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -250,8 +250,9 @@ export interface ChatCompletionAudioParam {
250250
format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16';
251251

252252
/**
253-
* The voice the model uses to respond. Supported voices are `alloy`, `ash`,
254-
* `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
253+
* The voice the model uses to respond. Supported voices are `ash`, `ballad`,
254+
* `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`,
255+
* `echo`, and `shimmer`; these voices are less expressive).
255256
*/
256257
voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse';
257258
}

src/resources/files.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ export class Files extends APIResource {
2525
* [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
2626
* models.
2727
*
28-
* The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
28+
* The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
2929
* has a specific required
3030
* [format](https://platform.openai.com/docs/api-reference/batch/request-input).
3131
*

src/version.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
export const VERSION = '4.72.0'; // x-release-please-version
1+
export const VERSION = '4.73.0'; // x-release-please-version

tests/api-resources/audio/transcriptions.test.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ describe('resource transcriptions', () => {
3131
prompt: 'prompt',
3232
response_format: 'json',
3333
temperature: 0,
34-
timestamp_granularities: ['word', 'segment'],
34+
timestamp_granularities: ['word'],
3535
});
3636
});
3737
});

tests/api-resources/beta/assistants.test.ts

+3-5
Original file line numberDiff line numberDiff line change
@@ -30,15 +30,13 @@ describe('resource assistants', () => {
3030
response_format: 'auto',
3131
temperature: 1,
3232
tool_resources: {
33-
code_interpreter: { file_ids: ['string', 'string', 'string'] },
33+
code_interpreter: { file_ids: ['string'] },
3434
file_search: {
3535
vector_store_ids: ['string'],
36-
vector_stores: [
37-
{ chunking_strategy: { type: 'auto' }, file_ids: ['string', 'string', 'string'], metadata: {} },
38-
],
36+
vector_stores: [{ chunking_strategy: { type: 'auto' }, file_ids: ['string'], metadata: {} }],
3937
},
4038
},
41-
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
39+
tools: [{ type: 'code_interpreter' }],
4240
top_p: 1,
4341
});
4442
});

tests/api-resources/beta/threads/messages.test.ts

+1-14
Original file line numberDiff line numberDiff line change
@@ -27,20 +27,7 @@ describe('resource messages', () => {
2727
const response = await client.beta.threads.messages.create('thread_id', {
2828
content: 'string',
2929
role: 'user',
30-
attachments: [
31-
{
32-
file_id: 'file_id',
33-
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
34-
},
35-
{
36-
file_id: 'file_id',
37-
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
38-
},
39-
{
40-
file_id: 'file_id',
41-
tools: [{ type: 'code_interpreter' }, { type: 'code_interpreter' }, { type: 'code_interpreter' }],
42-
},
43-
],
30+
attachments: [{ file_id: 'file_id', tools: [{ type: 'code_interpreter' }] }],
4431
metadata: {},
4532
});
4633
});

0 commit comments

Comments
 (0)