diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 0000000..73f16fc --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,27 @@ +module.exports = { + parser: '@typescript-eslint/parser', + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended', + ], + plugins: ['@typescript-eslint'], + parserOptions: { + ecmaVersion: 2020, + sourceType: 'module', + }, + rules: { + // Generated code often has these issues, so we'll be lenient + '@typescript-eslint/no-unused-vars': 'warn', + '@typescript-eslint/no-explicit-any': 'off', + '@typescript-eslint/ban-ts-comment': 'off', + '@typescript-eslint/no-empty-interface': 'off', + // Allow console for SDK logging + 'no-console': 'off', + }, + ignorePatterns: [ + 'dist/**/*', + 'node_modules/**/*', + '.openapi-generator/**/*', + 'example.js' + ] +}; \ No newline at end of file diff --git a/.github/workflows/auto-release-simple.yml b/.github/workflows/auto-release-simple.yml new file mode 100644 index 0000000..8ce388e --- /dev/null +++ b/.github/workflows/auto-release-simple.yml @@ -0,0 +1,106 @@ +name: Auto Release (Simple) + +on: + push: + branches: [ main ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [16.x, 18.x, 20.x] + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run lint + run: npm run lint + + - name: Build package + run: npm run build + + - name: Test package can be imported + run: node -e "require('./dist/index.js')" + + check-version: + runs-on: ubuntu-latest + outputs: + version-changed: ${{ steps.version-check.outputs.changed }} + new-version: ${{ steps.version-check.outputs.version }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + + - name: Check if version changed + id: version-check + run: | + # Get current version + CURRENT_VERSION=$(node -p "require('./package.json').version") + + # Get previous version (from previous commit) + git checkout HEAD~1 + PREVIOUS_VERSION=$(node -p "require('./package.json').version") + git checkout - + + echo "Previous version: $PREVIOUS_VERSION" + echo "Current version: $CURRENT_VERSION" + + if [ "$CURRENT_VERSION" != "$PREVIOUS_VERSION" ]; then + echo "changed=true" >> $GITHUB_OUTPUT + echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT + else + echo "changed=false" >> $GITHUB_OUTPUT + fi + + release-and-publish: + needs: [test, check-version] + runs-on: ubuntu-latest + if: needs.check-version.outputs.version-changed == 'true' + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js 18 + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'npm' + registry-url: 'https://registry.npmjs.org' + + - name: Install dependencies + run: npm ci + + - name: Build package + run: npm run build + + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v${{ needs.check-version.outputs.new-version }} + release_name: Release v${{ needs.check-version.outputs.new-version }} + draft: false + prerelease: false + body: | + Changes in this release: + - Version bump to ${{ needs.check-version.outputs.new-version }} + + For detailed changes, see the commit history. + + - name: Publish to NPM + run: npm publish + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..2ae0806 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,95 @@ +name: CI/CD + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + release: + types: [ created ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + node-version: [16.x, 18.x, 20.x] + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Run lint + run: npm run lint + + - name: Build package + run: npm run build + + - name: Test package can be imported + run: node -e "require('./dist/index.js')" + + # Semantic Release - automatically creates releases and publishes on merge to main + release: + needs: test + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch full history for semantic-release + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Use Node.js 18 + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'npm' + registry-url: 'https://registry.npmjs.org' + + - name: Install dependencies + run: npm ci + + - name: Build package + run: npm run build + + - name: Semantic Release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + run: npx semantic-release + + # Keep the old publish job for manual releases (optional) + publish: + needs: test + runs-on: ubuntu-latest + if: github.event_name == 'release' + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js 18 + uses: actions/setup-node@v4 + with: + node-version: 18 + cache: 'npm' + registry-url: 'https://registry.npmjs.org' + + - name: Install dependencies + run: npm ci + + - name: Build package + run: npm run build + + - name: Publish to NPM + run: npm publish + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 3e1f71b..d6662d5 100644 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,8 @@ pids/ # Generated types (will be regenerated) src/types/generated.ts +wwwroot/*.js +typings # Temporary files tmp/ diff --git a/.npmignore b/.npmignore new file mode 100644 index 0000000..fdc1113 --- /dev/null +++ b/.npmignore @@ -0,0 +1,47 @@ +# empty npmignore to ensure all required files (e.g., in the dist folder) are published by npm + +# Source files (only include the built dist folder) +*.ts +!*.d.ts + +# Development files +.openapi-generator/ +.openapi-generator-ignore +git_push.sh +docs/ +tsconfig*.json +.eslintrc.js + +# Node.js +node_modules/ +.npm +.nvmrc + +# Testing +coverage/ +.nyc_output/ +*.test.* +*.spec.* + +# Development +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Environment +.env +.env.local +.env.*.local \ No newline at end of file diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 0000000..622e363 --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +18 \ No newline at end of file diff --git a/.openapi-generator-ignore b/.openapi-generator-ignore new file mode 100644 index 0000000..7484ee5 --- /dev/null +++ b/.openapi-generator-ignore @@ -0,0 +1,23 @@ +# OpenAPI Generator Ignore +# Generated by openapi-generator https://github.com/openapitools/openapi-generator + +# Use this file to prevent files from being overwritten by the generator. +# The patterns follow closely to .gitignore or .dockerignore. + +# As an example, the C# client generator defines ApiClient.cs. +# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: +#ApiClient.cs + +# You can match any string of characters against a directory, file or extension with a single asterisk (*): +#foo/*/qux +# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux + +# You can recursively match patterns against a directory, file or extension with a double asterisk (**): +#foo/**/qux +# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux + +# You can also negate patterns with an exclamation (!). +# For example, you can ignore all files in a docs folder with the file extension .md: +#docs/*.md +# Then explicitly reverse the ignore rule for a single file: +#!docs/README.md diff --git a/.openapi-generator/FILES b/.openapi-generator/FILES new file mode 100644 index 0000000..14a9bfc --- /dev/null +++ b/.openapi-generator/FILES @@ -0,0 +1,40 @@ +.gitignore +.npmignore +.openapi-generator-ignore +api.ts +base.ts +common.ts +configuration.ts +docs/CreateReplacementRuleset201Response.md +docs/CreateReplacementRulesetRequest.md +docs/ErrorResponse.md +docs/ExactRule.md +docs/OpenAIAudioResponseFormat.md +docs/OpenAICompatibleSpeechToTextApi.md +docs/OpenAICreateTranscriptionResponseJson.md +docs/OpenAICreateTranscriptionResponseVerboseJson.md +docs/OpenAICreateTranslationRequestModel.md +docs/OpenAICreateTranslationResponseJson.md +docs/OpenAICreateTranslationResponseVerboseJson.md +docs/OpenAITranscriptionSegment.md +docs/OpenAITranscriptionWord.md +docs/OpenaiCompatibleCreateTranscription200Response.md +docs/OpenaiCompatibleCreateTranslation200Response.md +docs/RegexGroupRule.md +docs/RegexRule.md +docs/ReplacementRule.md +docs/ReplacementRulesApi.md +docs/SpeechToTextApi.md +docs/SpeechToTextModel.md +docs/TranscriptLanguageCode.md +docs/TranscriptOutputFormat.md +docs/TranscriptionDetailed.md +docs/TranscriptionModelIdentifier.md +docs/TranscriptionOnlyText.md +docs/TranscriptionOptions.md +docs/TranscriptionProvider.md +docs/TranscriptionResponse.md +docs/TranscriptionSegment.md +docs/TranscriptionWord.md +git_push.sh +index.ts diff --git a/.openapi-generator/VERSION b/.openapi-generator/VERSION new file mode 100644 index 0000000..eb1dc6a --- /dev/null +++ b/.openapi-generator/VERSION @@ -0,0 +1 @@ +7.13.0 diff --git a/.releaserc.json b/.releaserc.json new file mode 100644 index 0000000..2b4fa9f --- /dev/null +++ b/.releaserc.json @@ -0,0 +1,32 @@ +{ + "branches": ["main"], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/changelog", + [ + "@semantic-release/npm", + { + "npmPublish": true + } + ], + [ + "@semantic-release/github", + { + "assets": [ + { + "path": "CHANGELOG.md", + "label": "Changelog" + } + ] + } + ], + [ + "@semantic-release/git", + { + "assets": ["package.json", "package-lock.json", "CHANGELOG.md"], + "message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" + } + ] + ] +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..7df50af --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,21 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.0.1] - 2025-05-29 + +### Added +- Initial release of the Speechall TypeScript SDK +- Support for core transcription functionality via `SpeechToTextApi` +- OpenAI-compatible endpoints via `OpenAICompatibleSpeechToTextApi` +- Custom text replacement rules via `ReplacementRulesApi` +- Full TypeScript support with comprehensive type definitions +- Support for multiple output formats (JSON, text, SRT, VTT) +- Support for multiple STT providers through unified API +- CommonJS and ESM build outputs +- Comprehensive documentation and examples \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..1cf1d60 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Speechall + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/PUBLISHING_GUIDE.md b/PUBLISHING_GUIDE.md new file mode 100644 index 0000000..2c7f6d1 --- /dev/null +++ b/PUBLISHING_GUIDE.md @@ -0,0 +1,303 @@ +# Publishing Guide for @speechall/sdk + +This guide explains how to build and publish the Speechall TypeScript SDK to npm, both locally and through GitHub Actions. + +## Prerequisites + +Before you start, ensure you have: +- [ ] Node.js 16+ installed +- [ ] npm account with access to publish packages +- [ ] NPM access token generated from npmjs.org +- [ ] Repository cloned locally + +## Publishing Options + +### 🤖 Option 1: Automated Publishing (Recommended) + +We've set up automated workflows that can publish your package when PRs are merged. Choose between: + +#### A. Semantic Release (Smart Automation) +- **What it does**: Automatically determines version numbers based on commit messages and creates releases +- **When it publishes**: Only when there are meaningful changes (features, fixes, breaking changes) +- **Workflow**: `.github/workflows/ci.yml` (release job) +- **Configuration**: `.releaserc.json` + +**How to use:** +1. Use conventional commit messages in your PRs: + - `feat: add new feature` → minor version bump + - `fix: resolve bug` → patch version bump + - `feat!: breaking change` → major version bump + - `docs: update readme` → no release +2. Merge PR to main → automatic release + npm publish + +#### B. Version-Based Auto Release (Simple) +- **What it does**: Creates releases when `package.json` version changes +- **When it publishes**: Every time version in package.json is updated +- **Workflow**: `.github/workflows/auto-release-simple.yml` + +**How to use:** +1. Update version in `package.json` in your PR +2. Merge PR to main → automatic release + npm publish + +### 📱 Option 2: Manual Publishing + +Traditional approach where you manually create releases. + +## Setup for Automated Publishing + +### Step 1: Configure GitHub Secrets +Ensure these secrets are set in your repository: +- `NPM_TOKEN`: Your npm access token +- `GITHUB_TOKEN`: (automatically provided by GitHub) + +### Step 2: Choose Your Workflow + +**For Semantic Release:** +1. The workflow is already configured in `ci.yml` +2. Install semantic-release dependencies (optional, they're loaded via npx): + ```bash + npm install --save-dev semantic-release @semantic-release/git @semantic-release/changelog + ``` + +**For Version-Based Release:** +1. Rename `auto-release-simple.yml` to replace `ci.yml`, or +2. Use both workflows (semantic release is smarter) + +### Step 3: Configure Commit Messages (for Semantic Release) + +Use these commit message formats: +- `feat: description` - New feature (minor bump) +- `fix: description` - Bug fix (patch bump) +- `docs: description` - Documentation only (no release) +- `style: description` - Code style changes (no release) +- `refactor: description` - Code refactoring (no release) +- `test: description` - Test updates (no release) +- `chore: description` - Maintenance tasks (no release) + +For breaking changes, add `!` or use footer: +- `feat!: breaking change description` +- Or use commit body with `BREAKING CHANGE: description` + +## Automated Workflow Benefits + +✅ **Pros:** +- No manual release steps +- Consistent versioning +- Automatic changelog generation (semantic-release) +- Faster deployment cycle +- Less human error + +⚠️ **Considerations:** +- Need to be more careful about what gets merged to main +- Requires team to follow commit message conventions (semantic-release) +- Every merge could potentially trigger a release + +## Setup NPM Authentication + +### Option 1: Local Publishing Setup + +1. **Configure npm with your access token:** + ```bash + npm config set //registry.npmjs.org/:_authToken YOUR_ACCESS_TOKEN + ``` + +2. **Verify your authentication:** + ```bash + npm whoami + ``` + This should display your npm username. + +### Option 2: Using .npmrc file (Alternative) +Create a `.npmrc` file in your home directory: +``` +//registry.npmjs.org/:_authToken=YOUR_ACCESS_TOKEN +``` + +⚠️ **Important:** Never commit your access token to version control! + +## Publishing Locally + +### Step 1: Install Dependencies +```bash +npm install +``` + +### Step 2: Run Quality Checks +```bash +# Run linting +npm run lint + +# Fix any linting issues +npm run lint:fix +``` + +### Step 3: Build the Package +```bash +# Clean previous builds +npm run clean + +# Build both CommonJS and ESM versions +npm run build +``` + +This will create: +- `dist/` - CommonJS build +- `dist/esm/` - ES Modules build + +### Step 4: Test the Build +```bash +# Test that the package can be imported +node -e "require('./dist/index.js')" +``` + +### Step 5: Version Management + +Before publishing, update the version in `package.json`: + +```bash +# For patch releases (0.0.1 -> 0.0.2) +npm version patch + +# For minor releases (0.0.1 -> 0.1.0) +npm version minor + +# For major releases (0.0.1 -> 1.0.0) +npm version major +``` + +This automatically: +- Updates `package.json` +- Creates a git commit +- Creates a git tag + +### Step 6: Publish to npm + +#### Dry Run (Recommended first time) +```bash +npm publish --dry-run +``` + +This shows what would be published without actually doing it. + +#### Actual Publishing +```bash +npm publish +``` + +For scoped packages (like `@speechall/sdk`), you might need: +```bash +npm publish --access public +``` + +## Publishing via GitHub Actions (Recommended) + +Your repository already has a CI/CD workflow configured. Here's how to use it: + +### Step 1: Setup NPM Token in GitHub + +1. Go to your GitHub repository +2. Navigate to Settings → Secrets and variables → Actions +3. Click "New repository secret" +4. Name: `NPM_TOKEN` +5. Value: Your npm access token +6. Click "Add secret" + +### Step 2: Create a Release + +1. **Commit and push your changes:** + ```bash + git add . + git commit -m "feat: prepare for v0.0.1 release" + git push origin main + ``` + +2. **Update version and create a tag:** + ```bash + npm version patch # or minor/major + git push origin main --tags + ``` + +3. **Create a GitHub Release:** + - Go to your repository on GitHub + - Click "Releases" → "Create a new release" + - Choose the tag you just created + - Add release notes + - Click "Publish release" + +The GitHub Action will automatically: +- Run tests on Node.js 16, 18, and 20 +- Build the package +- Publish to npm (only on release creation) + +## Verification + +After publishing, verify your package: + +1. **Check on npmjs.org:** + Visit: https://www.npmjs.com/package/@speechall/sdk + +2. **Test installation in a new project:** + ```bash + mkdir test-install + cd test-install + npm init -y + npm install @speechall/sdk + ``` + +3. **Test importing:** + ```javascript + // test.js + const { SpeechallSDK } = require('@speechall/sdk'); + console.log('Package imported successfully!'); + ``` + +## Troubleshooting + +### Common Issues + +1. **Authentication Error:** + ``` + npm ERR! 401 Unauthorized + ``` + Solution: Check your npm token is correctly configured. + +2. **Package Name Conflicts:** + ``` + npm ERR! 403 Forbidden + ``` + Solution: The package name might be taken. Update `name` in `package.json`. + +3. **Build Errors:** + - Ensure all TypeScript files compile without errors + - Check that all dependencies are installed + - Verify `tsconfig.json` configuration + +### Best Practices + +- [ ] Always run `npm run lint` before publishing +- [ ] Test the built package locally before publishing +- [ ] Use semantic versioning (semver) +- [ ] Write clear release notes +- [ ] Test your package after publishing +- [ ] Keep your npm token secure + +## Available Scripts + +- `npm run build` - Build both CommonJS and ESM versions +- `npm run build:cjs` - Build CommonJS version only +- `npm run build:esm` - Build ESM version only +- `npm run clean` - Remove dist directory +- `npm run lint` - Run ESLint +- `npm run lint:fix` - Fix ESLint issues automatically +- `npm run prepublishOnly` - Clean and build (runs automatically before publish) + +## Next Steps + +1. Set up your npm authentication +2. Test building the package locally +3. Set up the GitHub secret for automated publishing +4. Create your first release! + +--- + +**Need help?** Check the [npm documentation](https://docs.npmjs.com/) or create an issue in this repository. \ No newline at end of file diff --git a/README.md b/README.md index 70c379b..dda6d58 100644 --- a/README.md +++ b/README.md @@ -1 +1,251 @@ -Hello world \ No newline at end of file +# Speechall TypeScript SDK + +A TypeScript/JavaScript SDK for the Speechall API, providing powerful and flexible speech-to-text capabilities. This SDK allows you to transcribe audio files using various STT providers and models, apply custom text replacement rules, and access results in multiple formats. + +## Features + +- **Multiple STT Providers**: Access various speech-to-text providers through a unified API +- **Custom Text Replacement**: Apply custom replacement rules to improve transcription accuracy +- **Multiple Output Formats**: Get results in JSON, text, SRT, VTT, or verbose JSON formats +- **OpenAI Compatibility**: Use OpenAI-compatible endpoints for easy migration +- **TypeScript Support**: Full TypeScript support with comprehensive type definitions +- **Promise-based**: Modern async/await support with Axios under the hood + +## Installation + +```bash +npm install @speechall/sdk +``` + +## Quick Start + +```typescript +import { Configuration, SpeechToTextApi } from '@speechall/sdk'; + +// Configure the SDK +const config = new Configuration({ + apiKey: 'your-api-key-here', + basePath: 'https://api.speechall.com' // Replace with actual API base path +}); + +// Create API instance +const speechApi = new SpeechToTextApi(config); + +// Transcribe audio from URL +async function transcribeAudio() { + try { + const response = await speechApi.transcribeRemote({ + file_url: 'https://example.com/audio.mp3', + model: 'deepgram.nova-2-general', + language: 'en', + output_format: 'json' + }); + + console.log('Transcription:', response.data.text); + } catch (error) { + console.error('Transcription failed:', error); + } +} + +transcribeAudio(); +``` + +## API Reference + +### Main API Classes + +- **SpeechToTextApi**: Core transcription functionality +- **OpenAICompatibleSpeechToTextApi**: OpenAI-compatible endpoints +- **ReplacementRulesApi**: Manage custom text replacement rules + +### Configuration + +```typescript +const config = new Configuration({ + apiKey: 'your-api-key', // Your API key + basePath: 'https://api.speechall.com', // API base URL + // Optional: custom axios configuration + baseOptions: { + timeout: 30000, + headers: { + 'Custom-Header': 'value' + } + } +}); +``` + +### Transcription Options + +```typescript +interface TranscriptionOptions { + file_url: string; // Audio file URL + model: TranscriptionModelIdentifier; // Model to use + language?: string; // Language code (e.g., 'en', 'es') + output_format?: string; // 'json', 'text', 'srt', 'vtt' + punctuation?: boolean; // Add punctuation + timestamp_granularity?: string; // 'word' or 'segment' + diarization?: boolean; // Speaker identification + initial_prompt?: string; // Transcription hint + temperature?: number; // Model randomness (0-1) + smart_format?: boolean; // Provider-specific formatting + speakers_expected?: number; // Expected number of speakers + custom_vocabulary?: string[]; // Custom words/phrases + replacement_ruleset?: ReplacementRule[]; // Custom replacement rules +} +``` + +## Examples + +### Basic Transcription + +```typescript +import { Configuration, SpeechToTextApi } from '@speechall/sdk'; + +const config = new Configuration({ apiKey: 'your-api-key' }); +const api = new SpeechToTextApi(config); + +const result = await api.transcribeRemote({ + file_url: 'https://example.com/audio.wav', + model: 'deepgram.nova-2-general' +}); + +console.log(result.data.text); +``` + +### File Upload Transcription + +```typescript +// For file uploads, you'll need to create a File object +const file = new File([audioBuffer], 'audio.mp3', { type: 'audio/mpeg' }); + +const result = await api.transcribe( + 'deepgram.nova-2-general', // model + file, // audio file + 'en', // language + 'json' // output format +); +``` + +### Advanced Options + +```typescript +const result = await api.transcribeRemote({ + file_url: 'https://example.com/meeting.mp3', + model: 'deepgram.nova-2-meeting', + language: 'en', + output_format: 'json', + diarization: true, // Identify speakers + punctuation: true, // Add punctuation + timestamp_granularity: 'word', // Word-level timestamps + speakers_expected: 3, // Hint for speaker count + custom_vocabulary: ['API', 'TypeScript', 'Speechall'] +}); +``` + +### OpenAI-Compatible API + +```typescript +import { OpenAICompatibleSpeechToTextApi } from '@speechall/sdk'; + +const openaiApi = new OpenAICompatibleSpeechToTextApi(config); + +const result = await openaiApi.openaiCompatibleCreateTranscription( + file, // File object + 'deepgram.nova-2-general', // model + 'en', // language + 'Transcribe this audio file', // prompt + 'json', // response_format + 0.2 // temperature +); +``` + +### Custom Replacement Rules + +```typescript +import { ReplacementRulesApi } from '@speechall/sdk'; + +const rulesApi = new ReplacementRulesApi(config); + +// Create a replacement ruleset +const ruleset = await rulesApi.createReplacementRuleset({ + name: 'Technical Terms', + rules: [ + { + kind: 'exact', + search: 'API', + replacement: 'A.P.I.', + caseSensitive: false + }, + { + kind: 'regex', + pattern: '\\b(\\d+)\\s*dollars?\\b', + replacement: '$$$1', + flags: ['i'] + } + ] +}); + +// Use the ruleset in transcription +const result = await api.transcribeRemote({ + file_url: 'https://example.com/audio.mp3', + model: 'deepgram.nova-2-general', + // Reference the created ruleset + // ruleset_id: ruleset.data.id +}); +``` + +### List Available Models + +```typescript +const models = await api.listSpeechToTextModels(); + +models.data.forEach(model => { + console.log(`${model.id}: ${model.display_name}`); + console.log(` Provider: ${model.provider}`); + console.log(` Languages: ${model.supported_languages?.join(', ')}`); + console.log(` Cost: $${model.cost_per_second_usd}/second`); +}); +``` + +## Error Handling + +```typescript +import { AxiosError } from 'axios'; + +try { + const result = await api.transcribeRemote({ + file_url: 'https://example.com/audio.mp3', + model: 'deepgram.nova-2-general' + }); +} catch (error) { + if (error instanceof AxiosError) { + console.error('API Error:', error.response?.data); + console.error('Status:', error.response?.status); + } else { + console.error('Unexpected error:', error); + } +} +``` + +## Types + +The SDK includes comprehensive TypeScript types for all API entities: + +- `TranscriptionResponse` - Transcription results +- `TranscriptionOptions` - Transcription request options +- `SpeechToTextModel` - Model information +- `ReplacementRule` - Text replacement rules +- `Configuration` - SDK configuration +- And many more... + +## Contributing + +This SDK is auto-generated from the Speechall OpenAPI specification. Please report issues or feature requests on the [GitHub repository](https://github.com/speechall/speechall-typescript-sdk). + +## License + +MIT + +## Support + +For support, please contact [support@speechall.ai](mailto:support@speechall.ai) or visit our [documentation](https://docs.speechall.ai). \ No newline at end of file diff --git a/api.ts b/api.ts new file mode 100644 index 0000000..b44202e --- /dev/null +++ b/api.ts @@ -0,0 +1,1874 @@ +/* tslint:disable */ +/* eslint-disable */ +/** + * Speechall API + * The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure. + * + * The version of the OpenAPI document: 0.0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +import type { Configuration } from './configuration'; +import type { AxiosPromise, AxiosInstance, RawAxiosRequestConfig } from 'axios'; +import globalAxios from 'axios'; +// Some imports not used depending on template conditions +// @ts-ignore +import { DUMMY_BASE_URL, assertParamExists, setApiKeyToObject, setBasicAuthToObject, setBearerAuthToObject, setOAuthToObject, setSearchParams, serializeDataIfNeeded, toPathString, createRequestFunction } from './common'; +import type { RequestArgs } from './base'; +// @ts-ignore +import { BASE_PATH, COLLECTION_FORMATS, BaseAPI, RequiredError, operationServerMap } from './base'; + +/** + * + * @export + * @interface CreateReplacementRuleset201Response + */ +export interface CreateReplacementRuleset201Response { + /** + * The unique identifier (UUID) generated for this ruleset. Use this ID in the `ruleset_id` parameter of transcription requests. + * @type {string} + * @memberof CreateReplacementRuleset201Response + */ + 'id': string; +} +/** + * + * @export + * @interface CreateReplacementRulesetRequest + */ +export interface CreateReplacementRulesetRequest { + /** + * A user-defined name for this ruleset for easier identification. + * @type {string} + * @memberof CreateReplacementRulesetRequest + */ + 'name': string; + /** + * An ordered array of replacement rules. Rules are applied in the order they appear in this list. See the `ReplacementRule` schema for different rule types (exact, regex, regex_group). + * @type {Array} + * @memberof CreateReplacementRulesetRequest + */ + 'rules': Array; +} +/** + * Standard structure for error responses. May include additional properties depending on the error type. + * @export + * @interface ErrorResponse + */ +export interface ErrorResponse { + [key: string]: any; + + /** + * A human-readable message describing the error. + * @type {string} + * @memberof ErrorResponse + */ + 'message': string; +} +/** + * Defines a replacement rule based on finding an exact string match. + * @export + * @interface ExactRule + */ +export interface ExactRule { + /** + * Discriminator field identifying the rule type as \'exact\'. + * @type {string} + * @memberof ExactRule + */ + 'kind': ExactRuleKindEnum; + /** + * The exact text string to search for within the transcription. + * @type {string} + * @memberof ExactRule + */ + 'search': string; + /** + * The text string to replace the found \'search\' text with. + * @type {string} + * @memberof ExactRule + */ + 'replacement': string; + /** + * If true, the search will match only if the case is identical. If false (default), the search ignores case. + * @type {boolean} + * @memberof ExactRule + */ + 'caseSensitive'?: boolean; +} + +export const ExactRuleKindEnum = { + Exact: 'exact' +} as const; + +export type ExactRuleKindEnum = typeof ExactRuleKindEnum[keyof typeof ExactRuleKindEnum]; + +/** + * The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + * @export + * @enum {string} + */ + +export const OpenAIAudioResponseFormat = { + Json: 'json', + Text: 'text', + Srt: 'srt', + VerboseJson: 'verbose_json', + Vtt: 'vtt' +} as const; + +export type OpenAIAudioResponseFormat = typeof OpenAIAudioResponseFormat[keyof typeof OpenAIAudioResponseFormat]; + + +/** + * Represents a transcription response returned by model, based on the provided input. + * @export + * @interface OpenAICreateTranscriptionResponseJson + */ +export interface OpenAICreateTranscriptionResponseJson { + /** + * The transcribed text. + * @type {string} + * @memberof OpenAICreateTranscriptionResponseJson + */ + 'text': string; +} +/** + * Represents a verbose json transcription response returned by model, based on the provided input. + * @export + * @interface OpenAICreateTranscriptionResponseVerboseJson + */ +export interface OpenAICreateTranscriptionResponseVerboseJson { + /** + * The language of the input audio. + * @type {string} + * @memberof OpenAICreateTranscriptionResponseVerboseJson + */ + 'language': string; + /** + * The duration of the input audio. + * @type {number} + * @memberof OpenAICreateTranscriptionResponseVerboseJson + */ + 'duration': number; + /** + * The transcribed text. + * @type {string} + * @memberof OpenAICreateTranscriptionResponseVerboseJson + */ + 'text': string; + /** + * Extracted words and their corresponding timestamps. + * @type {Array} + * @memberof OpenAICreateTranscriptionResponseVerboseJson + */ + 'words'?: Array; + /** + * Segments of the transcribed text and their corresponding details. + * @type {Array} + * @memberof OpenAICreateTranscriptionResponseVerboseJson + */ + 'segments'?: Array; +} +/** + * ID of the model to use. It follows the naming convention provider/model-name + * @export + * @interface OpenAICreateTranslationRequestModel + */ +export interface OpenAICreateTranslationRequestModel { +} +/** + * Standard JSON response for OpenAI-compatible translation requests when `response_format` is `json`. Contains the translated English text. + * @export + * @interface OpenAICreateTranslationResponseJson + */ +export interface OpenAICreateTranslationResponseJson { + /** + * + * @type {string} + * @memberof OpenAICreateTranslationResponseJson + */ + 'text': string; +} +/** + * + * @export + * @interface OpenAICreateTranslationResponseVerboseJson + */ +export interface OpenAICreateTranslationResponseVerboseJson { + /** + * The language of the output translation (always `english`). + * @type {string} + * @memberof OpenAICreateTranslationResponseVerboseJson + */ + 'language': string; + /** + * The duration of the input audio. + * @type {string} + * @memberof OpenAICreateTranslationResponseVerboseJson + */ + 'duration': string; + /** + * The translated text. + * @type {string} + * @memberof OpenAICreateTranslationResponseVerboseJson + */ + 'text': string; + /** + * Segments of the translated text and their corresponding details. + * @type {Array} + * @memberof OpenAICreateTranslationResponseVerboseJson + */ + 'segments'?: Array; +} +/** + * Represents a segment of transcribed or translated text, based on OpenAI\'s verbose JSON structure. + * @export + * @interface OpenAITranscriptionSegment + */ +export interface OpenAITranscriptionSegment { + /** + * Unique identifier of the segment. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'id': number; + /** + * Seek offset of the segment. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'seek': number; + /** + * Start time of the segment in seconds. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'start': number; + /** + * End time of the segment in seconds. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'end': number; + /** + * Text content of the segment. + * @type {string} + * @memberof OpenAITranscriptionSegment + */ + 'text': string; + /** + * Array of token IDs for the text content. + * @type {Array} + * @memberof OpenAITranscriptionSegment + */ + 'tokens': Array; + /** + * Temperature parameter used for generating the segment. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'temperature': number; + /** + * Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'avg_logprob': number; + /** + * Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'compression_ratio': number; + /** + * Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + * @type {number} + * @memberof OpenAITranscriptionSegment + */ + 'no_speech_prob': number; +} +/** + * Represents a single word identified during transcription, including its start and end times. Included in `verbose_json` response when `word` granularity is requested. + * @export + * @interface OpenAITranscriptionWord + */ +export interface OpenAITranscriptionWord { + /** + * The text content of the word. + * @type {string} + * @memberof OpenAITranscriptionWord + */ + 'word': string; + /** + * Start time of the word in seconds. + * @type {number} + * @memberof OpenAITranscriptionWord + */ + 'start': number; + /** + * End time of the word in seconds. + * @type {number} + * @memberof OpenAITranscriptionWord + */ + 'end': number; +} +/** + * @type OpenaiCompatibleCreateTranscription200Response + * @export + */ +export type OpenaiCompatibleCreateTranscription200Response = OpenAICreateTranscriptionResponseJson | OpenAICreateTranscriptionResponseVerboseJson; + +/** + * @type OpenaiCompatibleCreateTranslation200Response + * @export + */ +export type OpenaiCompatibleCreateTranslation200Response = OpenAICreateTranslationResponseJson | OpenAICreateTranslationResponseVerboseJson; + +/** + * Defines a replacement rule that uses regex capture groups to apply different replacements to different parts of the matched text. + * @export + * @interface RegexGroupRule + */ +export interface RegexGroupRule { + /** + * Discriminator field identifying the rule type as \'regex_group\'. + * @type {string} + * @memberof RegexGroupRule + */ + 'kind': RegexGroupRuleKindEnum; + /** + * The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur. + * @type {string} + * @memberof RegexGroupRule + */ + 'pattern': string; + /** + * An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements. + * @type {{ [key: string]: string; }} + * @memberof RegexGroupRule + */ + 'groupReplacements': { [key: string]: string; }; + /** + * An array of flags to modify the regex behavior. + * @type {Array} + * @memberof RegexGroupRule + */ + 'flags'?: Array; +} + +export const RegexGroupRuleKindEnum = { + RegexGroup: 'regex_group' +} as const; + +export type RegexGroupRuleKindEnum = typeof RegexGroupRuleKindEnum[keyof typeof RegexGroupRuleKindEnum]; +export const RegexGroupRuleFlagsEnum = { + I: 'i', + M: 'm', + S: 's', + X: 'x', + U: 'u' +} as const; + +export type RegexGroupRuleFlagsEnum = typeof RegexGroupRuleFlagsEnum[keyof typeof RegexGroupRuleFlagsEnum]; + +/** + * Defines a replacement rule based on matching a regular expression pattern. + * @export + * @interface RegexRule + */ +export interface RegexRule { + /** + * Discriminator field identifying the rule type as \'regex\'. + * @type {string} + * @memberof RegexRule + */ + 'kind': RegexRuleKindEnum; + /** + * The regular expression pattern to search for. Uses standard regex syntax (implementation specific, often PCRE-like). Remember to escape special characters if needed (e.g., `\\\\.` for a literal dot). + * @type {string} + * @memberof RegexRule + */ + 'pattern': string; + /** + * The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`). + * @type {string} + * @memberof RegexRule + */ + 'replacement': string; + /** + * An array of flags to modify the regex behavior (e.g., \'i\' for case-insensitivity). + * @type {Array} + * @memberof RegexRule + */ + 'flags'?: Array; +} + +export const RegexRuleKindEnum = { + Regex: 'regex' +} as const; + +export type RegexRuleKindEnum = typeof RegexRuleKindEnum[keyof typeof RegexRuleKindEnum]; +export const RegexRuleFlagsEnum = { + I: 'i', + M: 'm', + S: 's', + X: 'x', + U: 'u' +} as const; + +export type RegexRuleFlagsEnum = typeof RegexRuleFlagsEnum[keyof typeof RegexRuleFlagsEnum]; + +/** + * @type ReplacementRule + * Defines a single rule for finding and replacing text in a transcription. Use one of the specific rule types (`ExactRule`, `RegexRule`, `RegexGroupRule`). The `kind` property acts as a discriminator. + * @export + */ +export type ReplacementRule = { kind: 'exact' } & ExactRule | { kind: 'regex' } & RegexRule | { kind: 'regex_group' } & RegexGroupRule; + +/** + * Describes an available speech-to-text model, its provider, capabilities, and characteristics. + * @export + * @interface SpeechToTextModel + */ +export interface SpeechToTextModel { + /** + * + * @type {TranscriptionModelIdentifier} + * @memberof SpeechToTextModel + */ + 'id': TranscriptionModelIdentifier; + /** + * A user-friendly name for the model. + * @type {string} + * @memberof SpeechToTextModel + */ + 'display_name': string; + /** + * + * @type {TranscriptionProvider} + * @memberof SpeechToTextModel + */ + 'provider': TranscriptionProvider; + /** + * A brief description of the model, its intended use case, or version notes. + * @type {string} + * @memberof SpeechToTextModel + */ + 'description'?: string | null; + /** + * The cost per second of audio processed in USD. + * @type {number} + * @memberof SpeechToTextModel + */ + 'cost_per_second_usd'?: number | null; + /** + * Indicates whether the model is currently available for use. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'is_available': boolean; + /** + * A list of language codes (preferably BCP 47, e.g., \"en-US\", \"en-GB\", \"es-ES\") supported by this model. May include `auto` if automatic language detection is supported across multiple languages within a single audio file. + * @type {Array} + * @memberof SpeechToTextModel + */ + 'supported_languages'?: Array | null; + /** + * Indicates whether the model generally supports automatic punctuation insertion. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'punctuation'?: boolean | null; + /** + * Indicates whether the model generally supports speaker diarization (identifying different speakers). + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'diarization'?: boolean | null; + /** + * Indicates whether the model can be used for real-time streaming transcription via a WebSocket connection (if offered by Speechall). + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'streamable'?: boolean | null; + /** + * An approximate measure of processing speed for batch processing. Defined as (audio duration) / (processing time). A higher value means faster processing (e.g., RTF=2 means it processes 1 second of audio in 0.5 seconds). May not be available for all models or streaming scenarios. + * @type {number} + * @memberof SpeechToTextModel + */ + 'real_time_factor'?: number | null; + /** + * The maximum duration of a single audio file (in seconds) that the model can reliably process in one request. May vary by provider or plan. + * @type {number} + * @memberof SpeechToTextModel + */ + 'max_duration_seconds'?: number | null; + /** + * The maximum size of a single audio file (in bytes) that can be uploaded for processing by this model. May vary by provider or plan. + * @type {number} + * @memberof SpeechToTextModel + */ + 'max_file_size_bytes'?: number | null; + /** + * The specific version identifier for the model. + * @type {string} + * @memberof SpeechToTextModel + */ + 'version'?: string | null; + /** + * The date when this specific version of the model was released or last updated. + * @type {string} + * @memberof SpeechToTextModel + */ + 'release_date'?: string | null; + /** + * The primary type or training domain of the model. Helps identify suitability for different audio types. + * @type {string} + * @memberof SpeechToTextModel + */ + 'model_type'?: SpeechToTextModelModelTypeEnum | null; + /** + * A general indication of the model\'s expected accuracy level relative to other models. Not a guaranteed metric. + * @type {string} + * @memberof SpeechToTextModel + */ + 'accuracy_tier'?: SpeechToTextModelAccuracyTierEnum | null; + /** + * A list of audio encodings that this model supports or is optimized for (e.g., LINEAR16, FLAC, MP3, Opus). + * @type {Array} + * @memberof SpeechToTextModel + */ + 'supported_audio_encodings'?: Array | null; + /** + * A list of audio sample rates (in Hz) that this model supports or is optimized for. + * @type {Array} + * @memberof SpeechToTextModel + */ + 'supported_sample_rates'?: Array | null; + /** + * Indicates whether the model can provide speaker labels for the transcription. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'speaker_labels'?: boolean | null; + /** + * Indicates whether the model can provide timestamps for individual words. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'word_timestamps'?: boolean | null; + /** + * Indicates whether the model provides confidence scores for the transcription or individual words. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'confidence_scores'?: boolean | null; + /** + * Indicates whether the model supports automatic language detection for input audio. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'language_detection'?: boolean | null; + /** + * Indicates if the model can leverage a custom vocabulary or language model adaptation. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'custom_vocabulary_support'?: boolean | null; + /** + * Indicates if the model supports filtering or masking of profanity. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'profanity_filtering'?: boolean | null; + /** + * Indicates if the model supports noise reduction. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'noise_reduction'?: boolean | null; + /** + * Indicates whether the model supports SRT subtitle format output. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'supports_srt': boolean; + /** + * Indicates whether the model supports VTT subtitle format output. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'supports_vtt': boolean; + /** + * Indicates whether the model supports voice activity detection (VAD) to identify speech segments. + * @type {boolean} + * @memberof SpeechToTextModel + */ + 'voice_activity_detection'?: boolean | null; +} + +export const SpeechToTextModelModelTypeEnum = { + General: 'general', + PhoneCall: 'phone_call', + Video: 'video', + CommandAndSearch: 'command_and_search', + Medical: 'medical', + Legal: 'legal', + Voicemail: 'voicemail', + Meeting: 'meeting' +} as const; + +export type SpeechToTextModelModelTypeEnum = typeof SpeechToTextModelModelTypeEnum[keyof typeof SpeechToTextModelModelTypeEnum]; +export const SpeechToTextModelAccuracyTierEnum = { + Basic: 'basic', + Standard: 'standard', + Enhanced: 'enhanced', + Premium: 'premium' +} as const; + +export type SpeechToTextModelAccuracyTierEnum = typeof SpeechToTextModelAccuracyTierEnum[keyof typeof SpeechToTextModelAccuracyTierEnum]; + +/** + * The language code of the audio file, typically in ISO 639-1 format. Specifying the correct language improves transcription accuracy and speed. The special value `auto` can be used to request automatic language detection, if supported by the selected model. If omitted, the default language is English (`en`). + * @export + * @enum {string} + */ + +export const TranscriptLanguageCode = { + Auto: 'auto', + En: 'en', + EnAu: 'en_au', + EnUk: 'en_uk', + EnUs: 'en_us', + Af: 'af', + Am: 'am', + Ar: 'ar', + As: 'as', + Az: 'az', + Ba: 'ba', + Be: 'be', + Bg: 'bg', + Bn: 'bn', + Bo: 'bo', + Br: 'br', + Bs: 'bs', + Ca: 'ca', + Cs: 'cs', + Cy: 'cy', + Da: 'da', + De: 'de', + El: 'el', + Es: 'es', + Et: 'et', + Eu: 'eu', + Fa: 'fa', + Fi: 'fi', + Fo: 'fo', + Fr: 'fr', + Gl: 'gl', + Gu: 'gu', + Ha: 'ha', + Haw: 'haw', + He: 'he', + Hi: 'hi', + Hr: 'hr', + Ht: 'ht', + Hu: 'hu', + Hy: 'hy', + Id: 'id', + Is: 'is', + It: 'it', + Ja: 'ja', + Jw: 'jw', + Ka: 'ka', + Kk: 'kk', + Km: 'km', + Kn: 'kn', + Ko: 'ko', + La: 'la', + Lb: 'lb', + Ln: 'ln', + Lo: 'lo', + Lt: 'lt', + Lv: 'lv', + Mg: 'mg', + Mi: 'mi', + Mk: 'mk', + Ml: 'ml', + Mn: 'mn', + Mr: 'mr', + Ms: 'ms', + Mt: 'mt', + My: 'my', + Ne: 'ne', + Nl: 'nl', + Nn: 'nn', + False: 'false', + Oc: 'oc', + Pa: 'pa', + Pl: 'pl', + Ps: 'ps', + Pt: 'pt', + Ro: 'ro', + Ru: 'ru', + Sa: 'sa', + Sd: 'sd', + Si: 'si', + Sk: 'sk', + Sl: 'sl', + Sn: 'sn', + So: 'so', + Sq: 'sq', + Sr: 'sr', + Su: 'su', + Sv: 'sv', + Sw: 'sw', + Ta: 'ta', + Te: 'te', + Tg: 'tg', + Th: 'th', + Tk: 'tk', + Tl: 'tl', + Tr: 'tr', + Tt: 'tt', + Uk: 'uk', + Ur: 'ur', + Uz: 'uz', + Vi: 'vi', + Yi: 'yi', + Yo: 'yo', + Zh: 'zh' +} as const; + +export type TranscriptLanguageCode = typeof TranscriptLanguageCode[keyof typeof TranscriptLanguageCode]; + + +/** + * Specifies the desired format of the transcription output. - `text`: Plain text containing the full transcription. - `json_text`: A simple JSON object containing the transcription ID and the full text (`TranscriptionOnlyText` schema). - `json`: A detailed JSON object including segments, timestamps (based on `timestamp_granularity`), language, and potentially speaker labels and provider metadata (`TranscriptionDetailed` schema). - `srt`: SubRip subtitle format (returned as plain text). - `vtt`: WebVTT subtitle format (returned as plain text). + * @export + * @enum {string} + */ + +export const TranscriptOutputFormat = { + Text: 'text', + JsonText: 'json_text', + Json: 'json', + Srt: 'srt', + Vtt: 'vtt' +} as const; + +export type TranscriptOutputFormat = typeof TranscriptOutputFormat[keyof typeof TranscriptOutputFormat]; + + +/** + * A detailed JSON response format containing the full text, detected language, duration, individual timed segments, and potentially speaker labels and provider-specific metadata. Returned when `output_format` is `json`. + * @export + * @interface TranscriptionDetailed + */ +export interface TranscriptionDetailed { + /** + * A unique identifier for the transcription job/request. + * @type {string} + * @memberof TranscriptionDetailed + */ + 'id': string; + /** + * The full transcribed text as a single string. + * @type {string} + * @memberof TranscriptionDetailed + */ + 'text': string; + /** + * The detected or specified language of the audio (ISO 639-1 code). + * @type {string} + * @memberof TranscriptionDetailed + */ + 'language'?: string; + /** + * The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. + * @type {number} + * @memberof TranscriptionDetailed + * @deprecated + */ + 'duration'?: number; + /** + * An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. + * @type {Array} + * @memberof TranscriptionDetailed + */ + 'segments'?: Array; + /** + * An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. + * @type {Array} + * @memberof TranscriptionDetailed + */ + 'words'?: Array; + /** + * An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent. + * @type {{ [key: string]: any; }} + * @memberof TranscriptionDetailed + * @deprecated + */ + 'provider_metadata'?: { [key: string]: any; }; +} +/** + * Unique identifier for a specific Speech-to-Text model, composed as `provider.model_name`. Used to select the engine for transcription. + * @export + * @enum {string} + */ + +export const TranscriptionModelIdentifier = { + AmazonTranscribe: 'amazon.transcribe', + AssemblyaiBest: 'assemblyai.best', + AssemblyaiNano: 'assemblyai.nano', + AzureStandard: 'azure.standard', + CloudflareWhisper: 'cloudflare.whisper', + DeepgramBase: 'deepgram.base', + DeepgramBaseConversationalai: 'deepgram.base-conversationalai', + DeepgramBaseFinance: 'deepgram.base-finance', + DeepgramBaseGeneral: 'deepgram.base-general', + DeepgramBaseMeeting: 'deepgram.base-meeting', + DeepgramBasePhonecall: 'deepgram.base-phonecall', + DeepgramBaseVideo: 'deepgram.base-video', + DeepgramBaseVoicemail: 'deepgram.base-voicemail', + DeepgramEnhanced: 'deepgram.enhanced', + DeepgramEnhancedFinance: 'deepgram.enhanced-finance', + DeepgramEnhancedGeneral: 'deepgram.enhanced-general', + DeepgramEnhancedMeeting: 'deepgram.enhanced-meeting', + DeepgramEnhancedPhonecall: 'deepgram.enhanced-phonecall', + DeepgramNova: 'deepgram.nova', + DeepgramNova2: 'deepgram.nova-2', + DeepgramNova2Atc: 'deepgram.nova-2-atc', + DeepgramNova2Automotive: 'deepgram.nova-2-automotive', + DeepgramNova2Conversationalai: 'deepgram.nova-2-conversationalai', + DeepgramNova2Drivethru: 'deepgram.nova-2-drivethru', + DeepgramNova2Finance: 'deepgram.nova-2-finance', + DeepgramNova2General: 'deepgram.nova-2-general', + DeepgramNova2Medical: 'deepgram.nova-2-medical', + DeepgramNova2Meeting: 'deepgram.nova-2-meeting', + DeepgramNova2Phonecall: 'deepgram.nova-2-phonecall', + DeepgramNova2Video: 'deepgram.nova-2-video', + DeepgramNova2Voicemail: 'deepgram.nova-2-voicemail', + DeepgramNova3: 'deepgram.nova-3', + DeepgramNovaGeneral: 'deepgram.nova-general', + DeepgramNovaPhonecall: 'deepgram.nova-phonecall', + DeepgramWhisper: 'deepgram.whisper', + DeepgramWhisperBase: 'deepgram.whisper-base', + DeepgramWhisperLarge: 'deepgram.whisper-large', + DeepgramWhisperMedium: 'deepgram.whisper-medium', + DeepgramWhisperSmall: 'deepgram.whisper-small', + DeepgramWhisperTiny: 'deepgram.whisper-tiny', + FalaiWhisper: 'falai.whisper', + FalaiWizper: 'falai.wizper', + FireworksaiWhisperV3: 'fireworksai.whisper-v3', + FireworksaiWhisperV3Turbo: 'fireworksai.whisper-v3-turbo', + GladiaStandard: 'gladia.standard', + GoogleEnhanced: 'google.enhanced', + GoogleStandard: 'google.standard', + GroqDistilWhisperLargeV3En: 'groq.distil-whisper-large-v3-en', + GroqWhisperLargeV3: 'groq.whisper-large-v3', + GroqWhisperLargeV3Turbo: 'groq.whisper-large-v3-turbo', + IbmStandard: 'ibm.standard', + OpenaiWhisper1: 'openai.whisper-1', + OpenaiGpt4oTranscribe: 'openai.gpt-4o-transcribe', + OpenaiGpt4oMiniTranscribe: 'openai.gpt-4o-mini-transcribe', + RevaiMachine: 'revai.machine', + SpeechmaticsEnhanced: 'speechmatics.enhanced', + SpeechmaticsStandard: 'speechmatics.standard' +} as const; + +export type TranscriptionModelIdentifier = typeof TranscriptionModelIdentifier[keyof typeof TranscriptionModelIdentifier]; + + +/** + * A simplified JSON response format containing only the transcription ID and the full transcribed text. Returned when `output_format` is `json_text`. + * @export + * @interface TranscriptionOnlyText + */ +export interface TranscriptionOnlyText { + /** + * A unique identifier for the transcription job/request. + * @type {string} + * @memberof TranscriptionOnlyText + */ + 'id': string; + /** + * The full transcribed text as a single string. + * @type {string} + * @memberof TranscriptionOnlyText + */ + 'text': string; +} +/** + * Configuration options for transcribing audio specified by a remote URL via the `/transcribe-remote` endpoint. + * @export + * @interface TranscriptionOptions + */ +export interface TranscriptionOptions { + /** + * The publicly accessible URL of the audio file to transcribe. The API server must be able to fetch the audio from this URL. + * @type {string} + * @memberof TranscriptionOptions + */ + 'file_url': string; + /** + * + * @type {TranscriptionModelIdentifier} + * @memberof TranscriptionOptions + */ + 'model': TranscriptionModelIdentifier; + /** + * + * @type {TranscriptLanguageCode} + * @memberof TranscriptionOptions + */ + 'language'?: TranscriptLanguageCode; + /** + * + * @type {TranscriptOutputFormat} + * @memberof TranscriptionOptions + */ + 'output_format'?: TranscriptOutputFormat; + /** + * Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`. + * @type {boolean} + * @memberof TranscriptionOptions + */ + 'punctuation'?: boolean; + /** + * Level of timestamp detail (`word` or `segment`). Defaults to `segment`. + * @type {string} + * @memberof TranscriptionOptions + */ + 'timestamp_granularity'?: TranscriptionOptionsTimestampGranularityEnum; + /** + * Enable speaker diarization. Defaults to `false`. + * @type {boolean} + * @memberof TranscriptionOptions + */ + 'diarization'?: boolean; + /** + * Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI). + * @type {string} + * @memberof TranscriptionOptions + */ + 'initial_prompt'?: string; + /** + * Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1. + * @type {number} + * @memberof TranscriptionOptions + */ + 'temperature'?: number; + /** + * Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary. + * @type {boolean} + * @memberof TranscriptionOptions + */ + 'smart_format'?: boolean; + /** + * Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram). + * @type {number} + * @memberof TranscriptionOptions + */ + 'speakers_expected'?: number; + /** + * List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI). + * @type {Array} + * @memberof TranscriptionOptions + */ + 'custom_vocabulary'?: Array; + /** + * An array of replacement rules to be applied directly to this transcription request, in order. This allows defining rules inline instead of using a pre-saved `ruleset_id`. + * @type {Array} + * @memberof TranscriptionOptions + */ + 'replacement_ruleset'?: Array; +} + +export const TranscriptionOptionsTimestampGranularityEnum = { + Word: 'word', + Segment: 'segment' +} as const; + +export type TranscriptionOptionsTimestampGranularityEnum = typeof TranscriptionOptionsTimestampGranularityEnum[keyof typeof TranscriptionOptionsTimestampGranularityEnum]; + +/** + * The identifier for the underlying Speech-to-Text service provider (e.g., \'openai\', \'deepgram\'). + * @export + * @enum {string} + */ + +export const TranscriptionProvider = { + Amazon: 'amazon', + Assemblyai: 'assemblyai', + Azure: 'azure', + Cloudflare: 'cloudflare', + Deepgram: 'deepgram', + Falai: 'falai', + Fireworksai: 'fireworksai', + Gladia: 'gladia', + Google: 'google', + Groq: 'groq', + Ibm: 'ibm', + Openai: 'openai', + Revai: 'revai', + Speechmatics: 'speechmatics' +} as const; + +export type TranscriptionProvider = typeof TranscriptionProvider[keyof typeof TranscriptionProvider]; + + +/** + * @type TranscriptionResponse + * Represents the JSON structure returned when a JSON-based `output_format` (`json` or `json_text`) is requested. It can be either a detailed structure or a simple text-only structure. + * @export + */ +export type TranscriptionResponse = TranscriptionDetailed | TranscriptionOnlyText; + +/** + * Represents a time-coded segment of the transcription, typically corresponding to a phrase, sentence, or speaker turn. + * @export + * @interface TranscriptionSegment + */ +export interface TranscriptionSegment { + /** + * The start time of the segment in seconds from the beginning of the audio. + * @type {number} + * @memberof TranscriptionSegment + */ + 'start'?: number; + /** + * The end time of the segment in seconds from the beginning of the audio. + * @type {number} + * @memberof TranscriptionSegment + */ + 'end'?: number; + /** + * The transcribed text content of this segment. + * @type {string} + * @memberof TranscriptionSegment + */ + 'text'?: string; + /** + * An identifier for the speaker of this segment, present if diarization was enabled and successful. + * @type {string} + * @memberof TranscriptionSegment + */ + 'speaker'?: string; + /** + * The model\'s confidence score for the transcription of this segment, typically between 0 and 1 (if provided by the model). + * @type {number} + * @memberof TranscriptionSegment + */ + 'confidence'?: number; +} +/** + * Represents a word in the transcription, providing time-coded chunks of the transcription. + * @export + * @interface TranscriptionWord + */ +export interface TranscriptionWord { + /** + * The start time of the word in seconds from the beginning of the audio. + * @type {number} + * @memberof TranscriptionWord + */ + 'start': number; + /** + * The end time of the word in seconds from the beginning of the audio. + * @type {number} + * @memberof TranscriptionWord + */ + 'end': number; + /** + * The transcribed word. + * @type {string} + * @memberof TranscriptionWord + */ + 'word': string; + /** + * An identifier for the speaker of this word, present if diarization was enabled and successful. + * @type {string} + * @memberof TranscriptionWord + */ + 'speaker'?: string; + /** + * The model\'s confidence score for the transcription of this word, typically between 0 and 1 (if provided by the model). + * @type {number} + * @memberof TranscriptionWord + */ + 'confidence'?: number; +} + +/** + * OpenAICompatibleSpeechToTextApi - axios parameter creator + * @export + */ +export const OpenAICompatibleSpeechToTextApiAxiosParamCreator = function (configuration?: Configuration) { + return { + /** + * Mimics the OpenAI `/audio/transcriptions` endpoint. Accepts audio file uploads via `multipart/form-data`. Allows specifying model, language, prompt, response format, temperature, and timestamp granularity similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format. + * @summary Transcribes audio into the input language, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {TranscriptionModelIdentifier} model + * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {Array} [timestampGranularities] The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + openaiCompatibleCreateTranscription: async (file: File, model: TranscriptionModelIdentifier, language?: string, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, timestampGranularities?: Array, options: RawAxiosRequestConfig = {}): Promise => { + // verify required parameter 'file' is not null or undefined + assertParamExists('openaiCompatibleCreateTranscription', 'file', file) + // verify required parameter 'model' is not null or undefined + assertParamExists('openaiCompatibleCreateTranscription', 'model', model) + const localVarPath = `/openai-compatible/audio/transcriptions`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)(); + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + + if (file !== undefined) { + localVarFormParams.append('file', file as any); + } + + if (model !== undefined) { + localVarFormParams.append('model', model as any); + } + + if (language !== undefined) { + localVarFormParams.append('language', language as any); + } + + if (prompt !== undefined) { + localVarFormParams.append('prompt', prompt as any); + } + + if (responseFormat !== undefined) { + localVarFormParams.append('response_format', responseFormat as any); + } + + if (temperature !== undefined) { + localVarFormParams.append('temperature', temperature as any); + } + if (timestampGranularities) { + localVarFormParams.append('timestamp_granularities[]', timestampGranularities.join(COLLECTION_FORMATS.csv)); + } + + + + localVarHeaderParameter['Content-Type'] = 'multipart/form-data'; + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + localVarRequestOptions.data = localVarFormParams; + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + /** + * Mimics the OpenAI `/audio/translations` endpoint. Accepts audio file uploads via `multipart/form-data` and translates the speech into English text. Allows specifying model, prompt, response format, and temperature similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format (ensure the selected model supports translation). + * @summary Translates audio into English, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {OpenAICreateTranslationRequestModel} model + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + openaiCompatibleCreateTranslation: async (file: File, model: OpenAICreateTranslationRequestModel, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, options: RawAxiosRequestConfig = {}): Promise => { + // verify required parameter 'file' is not null or undefined + assertParamExists('openaiCompatibleCreateTranslation', 'file', file) + // verify required parameter 'model' is not null or undefined + assertParamExists('openaiCompatibleCreateTranslation', 'model', model) + const localVarPath = `/openai-compatible/audio/translations`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + const localVarFormParams = new ((configuration && configuration.formDataCtor) || FormData)(); + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + + if (file !== undefined) { + localVarFormParams.append('file', file as any); + } + + if (model !== undefined) { + localVarFormParams.append('model', new Blob([JSON.stringify(model)], { type: "application/json", })); + } + + if (prompt !== undefined) { + localVarFormParams.append('prompt', prompt as any); + } + + if (responseFormat !== undefined) { + localVarFormParams.append('response_format', responseFormat as any); + } + + if (temperature !== undefined) { + localVarFormParams.append('temperature', temperature as any); + } + + + localVarHeaderParameter['Content-Type'] = 'multipart/form-data'; + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + localVarRequestOptions.data = localVarFormParams; + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + } +}; + +/** + * OpenAICompatibleSpeechToTextApi - functional programming interface + * @export + */ +export const OpenAICompatibleSpeechToTextApiFp = function(configuration?: Configuration) { + const localVarAxiosParamCreator = OpenAICompatibleSpeechToTextApiAxiosParamCreator(configuration) + return { + /** + * Mimics the OpenAI `/audio/transcriptions` endpoint. Accepts audio file uploads via `multipart/form-data`. Allows specifying model, language, prompt, response format, temperature, and timestamp granularity similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format. + * @summary Transcribes audio into the input language, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {TranscriptionModelIdentifier} model + * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {Array} [timestampGranularities] The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async openaiCompatibleCreateTranscription(file: File, model: TranscriptionModelIdentifier, language?: string, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, timestampGranularities?: Array, options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> { + const localVarAxiosArgs = await localVarAxiosParamCreator.openaiCompatibleCreateTranscription(file, model, language, prompt, responseFormat, temperature, timestampGranularities, options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['OpenAICompatibleSpeechToTextApi.openaiCompatibleCreateTranscription']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + /** + * Mimics the OpenAI `/audio/translations` endpoint. Accepts audio file uploads via `multipart/form-data` and translates the speech into English text. Allows specifying model, prompt, response format, and temperature similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format (ensure the selected model supports translation). + * @summary Translates audio into English, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {OpenAICreateTranslationRequestModel} model + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async openaiCompatibleCreateTranslation(file: File, model: OpenAICreateTranslationRequestModel, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> { + const localVarAxiosArgs = await localVarAxiosParamCreator.openaiCompatibleCreateTranslation(file, model, prompt, responseFormat, temperature, options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['OpenAICompatibleSpeechToTextApi.openaiCompatibleCreateTranslation']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + } +}; + +/** + * OpenAICompatibleSpeechToTextApi - factory interface + * @export + */ +export const OpenAICompatibleSpeechToTextApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) { + const localVarFp = OpenAICompatibleSpeechToTextApiFp(configuration) + return { + /** + * Mimics the OpenAI `/audio/transcriptions` endpoint. Accepts audio file uploads via `multipart/form-data`. Allows specifying model, language, prompt, response format, temperature, and timestamp granularity similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format. + * @summary Transcribes audio into the input language, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {TranscriptionModelIdentifier} model + * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {Array} [timestampGranularities] The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + openaiCompatibleCreateTranscription(file: File, model: TranscriptionModelIdentifier, language?: string, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, timestampGranularities?: Array, options?: RawAxiosRequestConfig): AxiosPromise { + return localVarFp.openaiCompatibleCreateTranscription(file, model, language, prompt, responseFormat, temperature, timestampGranularities, options).then((request) => request(axios, basePath)); + }, + /** + * Mimics the OpenAI `/audio/translations` endpoint. Accepts audio file uploads via `multipart/form-data` and translates the speech into English text. Allows specifying model, prompt, response format, and temperature similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format (ensure the selected model supports translation). + * @summary Translates audio into English, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {OpenAICreateTranslationRequestModel} model + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + openaiCompatibleCreateTranslation(file: File, model: OpenAICreateTranslationRequestModel, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, options?: RawAxiosRequestConfig): AxiosPromise { + return localVarFp.openaiCompatibleCreateTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(axios, basePath)); + }, + }; +}; + +/** + * OpenAICompatibleSpeechToTextApi - object-oriented interface + * @export + * @class OpenAICompatibleSpeechToTextApi + * @extends {BaseAPI} + */ +export class OpenAICompatibleSpeechToTextApi extends BaseAPI { + /** + * Mimics the OpenAI `/audio/transcriptions` endpoint. Accepts audio file uploads via `multipart/form-data`. Allows specifying model, language, prompt, response format, temperature, and timestamp granularity similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format. + * @summary Transcribes audio into the input language, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {TranscriptionModelIdentifier} model + * @param {string} [language] The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {Array} [timestampGranularities] The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof OpenAICompatibleSpeechToTextApi + */ + public openaiCompatibleCreateTranscription(file: File, model: TranscriptionModelIdentifier, language?: string, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, timestampGranularities?: Array, options?: RawAxiosRequestConfig) { + return OpenAICompatibleSpeechToTextApiFp(this.configuration).openaiCompatibleCreateTranscription(file, model, language, prompt, responseFormat, temperature, timestampGranularities, options).then((request) => request(this.axios, this.basePath)); + } + + /** + * Mimics the OpenAI `/audio/translations` endpoint. Accepts audio file uploads via `multipart/form-data` and translates the speech into English text. Allows specifying model, prompt, response format, and temperature similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format (ensure the selected model supports translation). + * @summary Translates audio into English, using OpenAI-compatible request format. + * @param {File} file The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + * @param {OpenAICreateTranslationRequestModel} model + * @param {string} [prompt] An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + * @param {OpenAIAudioResponseFormat} [responseFormat] + * @param {number} [temperature] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof OpenAICompatibleSpeechToTextApi + */ + public openaiCompatibleCreateTranslation(file: File, model: OpenAICreateTranslationRequestModel, prompt?: string, responseFormat?: OpenAIAudioResponseFormat, temperature?: number, options?: RawAxiosRequestConfig) { + return OpenAICompatibleSpeechToTextApiFp(this.configuration).openaiCompatibleCreateTranslation(file, model, prompt, responseFormat, temperature, options).then((request) => request(this.axios, this.basePath)); + } +} + +/** + * @export + */ +export const OpenaiCompatibleCreateTranscriptionTimestampGranularitiesEnum = { + Word: 'word', + Segment: 'segment' +} as const; +export type OpenaiCompatibleCreateTranscriptionTimestampGranularitiesEnum = typeof OpenaiCompatibleCreateTranscriptionTimestampGranularitiesEnum[keyof typeof OpenaiCompatibleCreateTranscriptionTimestampGranularitiesEnum]; + + +/** + * ReplacementRulesApi - axios parameter creator + * @export + */ +export const ReplacementRulesApiAxiosParamCreator = function (configuration?: Configuration) { + return { + /** + * Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. + * @summary Create a reusable set of text replacement rules. + * @param {CreateReplacementRulesetRequest} createReplacementRulesetRequest JSON object containing the name for the ruleset and an array of replacement rule objects. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + createReplacementRuleset: async (createReplacementRulesetRequest: CreateReplacementRulesetRequest, options: RawAxiosRequestConfig = {}): Promise => { + // verify required parameter 'createReplacementRulesetRequest' is not null or undefined + assertParamExists('createReplacementRuleset', 'createReplacementRulesetRequest', createReplacementRulesetRequest) + const localVarPath = `/replacement-rulesets`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + + + localVarHeaderParameter['Content-Type'] = 'application/json'; + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + localVarRequestOptions.data = serializeDataIfNeeded(createReplacementRulesetRequest, localVarRequestOptions, configuration) + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + } +}; + +/** + * ReplacementRulesApi - functional programming interface + * @export + */ +export const ReplacementRulesApiFp = function(configuration?: Configuration) { + const localVarAxiosParamCreator = ReplacementRulesApiAxiosParamCreator(configuration) + return { + /** + * Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. + * @summary Create a reusable set of text replacement rules. + * @param {CreateReplacementRulesetRequest} createReplacementRulesetRequest JSON object containing the name for the ruleset and an array of replacement rule objects. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async createReplacementRuleset(createReplacementRulesetRequest: CreateReplacementRulesetRequest, options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> { + const localVarAxiosArgs = await localVarAxiosParamCreator.createReplacementRuleset(createReplacementRulesetRequest, options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['ReplacementRulesApi.createReplacementRuleset']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + } +}; + +/** + * ReplacementRulesApi - factory interface + * @export + */ +export const ReplacementRulesApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) { + const localVarFp = ReplacementRulesApiFp(configuration) + return { + /** + * Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. + * @summary Create a reusable set of text replacement rules. + * @param {CreateReplacementRulesetRequest} createReplacementRulesetRequest JSON object containing the name for the ruleset and an array of replacement rule objects. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + createReplacementRuleset(createReplacementRulesetRequest: CreateReplacementRulesetRequest, options?: RawAxiosRequestConfig): AxiosPromise { + return localVarFp.createReplacementRuleset(createReplacementRulesetRequest, options).then((request) => request(axios, basePath)); + }, + }; +}; + +/** + * ReplacementRulesApi - object-oriented interface + * @export + * @class ReplacementRulesApi + * @extends {BaseAPI} + */ +export class ReplacementRulesApi extends BaseAPI { + /** + * Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. + * @summary Create a reusable set of text replacement rules. + * @param {CreateReplacementRulesetRequest} createReplacementRulesetRequest JSON object containing the name for the ruleset and an array of replacement rule objects. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof ReplacementRulesApi + */ + public createReplacementRuleset(createReplacementRulesetRequest: CreateReplacementRulesetRequest, options?: RawAxiosRequestConfig) { + return ReplacementRulesApiFp(this.configuration).createReplacementRuleset(createReplacementRulesetRequest, options).then((request) => request(this.axios, this.basePath)); + } +} + + + +/** + * SpeechToTextApi - axios parameter creator + * @export + */ +export const SpeechToTextApiAxiosParamCreator = function (configuration?: Configuration) { + return { + /** + * Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. + * @summary Retrieve a list of all available speech-to-text models. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + listSpeechToTextModels: async (options: RawAxiosRequestConfig = {}): Promise => { + const localVarPath = `/speech-to-text-models`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'GET', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + /** + * This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. + * @summary Upload an audio file directly and receive a transcription. + * @param {TranscriptionModelIdentifier} model The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. + * @param {File} body The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. + * @param {TranscriptLanguageCode} [language] The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. + * @param {TranscriptOutputFormat} [outputFormat] The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. + * @param {string} [rulesetId] The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. + * @param {boolean} [punctuation] Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. + * @param {TranscribeTimestampGranularityEnum} [timestampGranularity] Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. + * @param {boolean} [diarization] Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. + * @param {string} [initialPrompt] An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). + * @param {number} [temperature] Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. + * @param {boolean} [smartFormat] Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. + * @param {number} [speakersExpected] Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). + * @param {Array} [customVocabulary] Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + transcribe: async (model: TranscriptionModelIdentifier, body: File, language?: TranscriptLanguageCode, outputFormat?: TranscriptOutputFormat, rulesetId?: string, punctuation?: boolean, timestampGranularity?: TranscribeTimestampGranularityEnum, diarization?: boolean, initialPrompt?: string, temperature?: number, smartFormat?: boolean, speakersExpected?: number, customVocabulary?: Array, options: RawAxiosRequestConfig = {}): Promise => { + // verify required parameter 'model' is not null or undefined + assertParamExists('transcribe', 'model', model) + // verify required parameter 'body' is not null or undefined + assertParamExists('transcribe', 'body', body) + const localVarPath = `/transcribe`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + if (model !== undefined) { + localVarQueryParameter['model'] = model; + } + + if (language !== undefined) { + localVarQueryParameter['language'] = language; + } + + if (outputFormat !== undefined) { + localVarQueryParameter['output_format'] = outputFormat; + } + + if (rulesetId !== undefined) { + localVarQueryParameter['ruleset_id'] = rulesetId; + } + + if (punctuation !== undefined) { + localVarQueryParameter['punctuation'] = punctuation; + } + + if (timestampGranularity !== undefined) { + localVarQueryParameter['timestamp_granularity'] = timestampGranularity; + } + + if (diarization !== undefined) { + localVarQueryParameter['diarization'] = diarization; + } + + if (initialPrompt !== undefined) { + localVarQueryParameter['initial_prompt'] = initialPrompt; + } + + if (temperature !== undefined) { + localVarQueryParameter['temperature'] = temperature; + } + + if (smartFormat !== undefined) { + localVarQueryParameter['smart_format'] = smartFormat; + } + + if (speakersExpected !== undefined) { + localVarQueryParameter['speakers_expected'] = speakersExpected; + } + + if (customVocabulary) { + localVarQueryParameter['custom_vocabulary'] = customVocabulary; + } + + + + localVarHeaderParameter['Content-Type'] = 'audio/*'; + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + localVarRequestOptions.data = serializeDataIfNeeded(body, localVarRequestOptions, configuration) + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + /** + * This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. + * @summary Transcribe an audio file located at a remote URL. + * @param {TranscriptionOptions} transcriptionOptions JSON object containing the URL of the audio file and the desired transcription options. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + transcribeRemote: async (transcriptionOptions: TranscriptionOptions, options: RawAxiosRequestConfig = {}): Promise => { + // verify required parameter 'transcriptionOptions' is not null or undefined + assertParamExists('transcribeRemote', 'transcriptionOptions', transcriptionOptions) + const localVarPath = `/transcribe-remote`; + // use dummy base URL string because the URL constructor only accepts absolute URLs. + const localVarUrlObj = new URL(localVarPath, DUMMY_BASE_URL); + let baseOptions; + if (configuration) { + baseOptions = configuration.baseOptions; + } + + const localVarRequestOptions = { method: 'POST', ...baseOptions, ...options}; + const localVarHeaderParameter = {} as any; + const localVarQueryParameter = {} as any; + + // authentication bearerAuth required + // http bearer authentication required + await setBearerAuthToObject(localVarHeaderParameter, configuration) + + + + localVarHeaderParameter['Content-Type'] = 'application/json'; + + setSearchParams(localVarUrlObj, localVarQueryParameter); + let headersFromBaseOptions = baseOptions && baseOptions.headers ? baseOptions.headers : {}; + localVarRequestOptions.headers = {...localVarHeaderParameter, ...headersFromBaseOptions, ...options.headers}; + localVarRequestOptions.data = serializeDataIfNeeded(transcriptionOptions, localVarRequestOptions, configuration) + + return { + url: toPathString(localVarUrlObj), + options: localVarRequestOptions, + }; + }, + } +}; + +/** + * SpeechToTextApi - functional programming interface + * @export + */ +export const SpeechToTextApiFp = function(configuration?: Configuration) { + const localVarAxiosParamCreator = SpeechToTextApiAxiosParamCreator(configuration) + return { + /** + * Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. + * @summary Retrieve a list of all available speech-to-text models. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async listSpeechToTextModels(options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise>> { + const localVarAxiosArgs = await localVarAxiosParamCreator.listSpeechToTextModels(options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['SpeechToTextApi.listSpeechToTextModels']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + /** + * This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. + * @summary Upload an audio file directly and receive a transcription. + * @param {TranscriptionModelIdentifier} model The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. + * @param {File} body The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. + * @param {TranscriptLanguageCode} [language] The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. + * @param {TranscriptOutputFormat} [outputFormat] The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. + * @param {string} [rulesetId] The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. + * @param {boolean} [punctuation] Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. + * @param {TranscribeTimestampGranularityEnum} [timestampGranularity] Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. + * @param {boolean} [diarization] Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. + * @param {string} [initialPrompt] An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). + * @param {number} [temperature] Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. + * @param {boolean} [smartFormat] Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. + * @param {number} [speakersExpected] Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). + * @param {Array} [customVocabulary] Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async transcribe(model: TranscriptionModelIdentifier, body: File, language?: TranscriptLanguageCode, outputFormat?: TranscriptOutputFormat, rulesetId?: string, punctuation?: boolean, timestampGranularity?: TranscribeTimestampGranularityEnum, diarization?: boolean, initialPrompt?: string, temperature?: number, smartFormat?: boolean, speakersExpected?: number, customVocabulary?: Array, options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> { + const localVarAxiosArgs = await localVarAxiosParamCreator.transcribe(model, body, language, outputFormat, rulesetId, punctuation, timestampGranularity, diarization, initialPrompt, temperature, smartFormat, speakersExpected, customVocabulary, options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['SpeechToTextApi.transcribe']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + /** + * This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. + * @summary Transcribe an audio file located at a remote URL. + * @param {TranscriptionOptions} transcriptionOptions JSON object containing the URL of the audio file and the desired transcription options. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + async transcribeRemote(transcriptionOptions: TranscriptionOptions, options?: RawAxiosRequestConfig): Promise<(axios?: AxiosInstance, basePath?: string) => AxiosPromise> { + const localVarAxiosArgs = await localVarAxiosParamCreator.transcribeRemote(transcriptionOptions, options); + const localVarOperationServerIndex = configuration?.serverIndex ?? 0; + const localVarOperationServerBasePath = operationServerMap['SpeechToTextApi.transcribeRemote']?.[localVarOperationServerIndex]?.url; + return (axios, basePath) => createRequestFunction(localVarAxiosArgs, globalAxios, BASE_PATH, configuration)(axios, localVarOperationServerBasePath || basePath); + }, + } +}; + +/** + * SpeechToTextApi - factory interface + * @export + */ +export const SpeechToTextApiFactory = function (configuration?: Configuration, basePath?: string, axios?: AxiosInstance) { + const localVarFp = SpeechToTextApiFp(configuration) + return { + /** + * Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. + * @summary Retrieve a list of all available speech-to-text models. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + listSpeechToTextModels(options?: RawAxiosRequestConfig): AxiosPromise> { + return localVarFp.listSpeechToTextModels(options).then((request) => request(axios, basePath)); + }, + /** + * This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. + * @summary Upload an audio file directly and receive a transcription. + * @param {TranscriptionModelIdentifier} model The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. + * @param {File} body The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. + * @param {TranscriptLanguageCode} [language] The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. + * @param {TranscriptOutputFormat} [outputFormat] The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. + * @param {string} [rulesetId] The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. + * @param {boolean} [punctuation] Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. + * @param {TranscribeTimestampGranularityEnum} [timestampGranularity] Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. + * @param {boolean} [diarization] Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. + * @param {string} [initialPrompt] An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). + * @param {number} [temperature] Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. + * @param {boolean} [smartFormat] Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. + * @param {number} [speakersExpected] Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). + * @param {Array} [customVocabulary] Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + transcribe(model: TranscriptionModelIdentifier, body: File, language?: TranscriptLanguageCode, outputFormat?: TranscriptOutputFormat, rulesetId?: string, punctuation?: boolean, timestampGranularity?: TranscribeTimestampGranularityEnum, diarization?: boolean, initialPrompt?: string, temperature?: number, smartFormat?: boolean, speakersExpected?: number, customVocabulary?: Array, options?: RawAxiosRequestConfig): AxiosPromise { + return localVarFp.transcribe(model, body, language, outputFormat, rulesetId, punctuation, timestampGranularity, diarization, initialPrompt, temperature, smartFormat, speakersExpected, customVocabulary, options).then((request) => request(axios, basePath)); + }, + /** + * This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. + * @summary Transcribe an audio file located at a remote URL. + * @param {TranscriptionOptions} transcriptionOptions JSON object containing the URL of the audio file and the desired transcription options. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + */ + transcribeRemote(transcriptionOptions: TranscriptionOptions, options?: RawAxiosRequestConfig): AxiosPromise { + return localVarFp.transcribeRemote(transcriptionOptions, options).then((request) => request(axios, basePath)); + }, + }; +}; + +/** + * SpeechToTextApi - object-oriented interface + * @export + * @class SpeechToTextApi + * @extends {BaseAPI} + */ +export class SpeechToTextApi extends BaseAPI { + /** + * Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. + * @summary Retrieve a list of all available speech-to-text models. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof SpeechToTextApi + */ + public listSpeechToTextModels(options?: RawAxiosRequestConfig) { + return SpeechToTextApiFp(this.configuration).listSpeechToTextModels(options).then((request) => request(this.axios, this.basePath)); + } + + /** + * This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. + * @summary Upload an audio file directly and receive a transcription. + * @param {TranscriptionModelIdentifier} model The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. + * @param {File} body The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. + * @param {TranscriptLanguageCode} [language] The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. + * @param {TranscriptOutputFormat} [outputFormat] The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. + * @param {string} [rulesetId] The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. + * @param {boolean} [punctuation] Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. + * @param {TranscribeTimestampGranularityEnum} [timestampGranularity] Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. + * @param {boolean} [diarization] Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. + * @param {string} [initialPrompt] An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). + * @param {number} [temperature] Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. + * @param {boolean} [smartFormat] Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. + * @param {number} [speakersExpected] Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). + * @param {Array} [customVocabulary] Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof SpeechToTextApi + */ + public transcribe(model: TranscriptionModelIdentifier, body: File, language?: TranscriptLanguageCode, outputFormat?: TranscriptOutputFormat, rulesetId?: string, punctuation?: boolean, timestampGranularity?: TranscribeTimestampGranularityEnum, diarization?: boolean, initialPrompt?: string, temperature?: number, smartFormat?: boolean, speakersExpected?: number, customVocabulary?: Array, options?: RawAxiosRequestConfig) { + return SpeechToTextApiFp(this.configuration).transcribe(model, body, language, outputFormat, rulesetId, punctuation, timestampGranularity, diarization, initialPrompt, temperature, smartFormat, speakersExpected, customVocabulary, options).then((request) => request(this.axios, this.basePath)); + } + + /** + * This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. + * @summary Transcribe an audio file located at a remote URL. + * @param {TranscriptionOptions} transcriptionOptions JSON object containing the URL of the audio file and the desired transcription options. + * @param {*} [options] Override http request option. + * @throws {RequiredError} + * @memberof SpeechToTextApi + */ + public transcribeRemote(transcriptionOptions: TranscriptionOptions, options?: RawAxiosRequestConfig) { + return SpeechToTextApiFp(this.configuration).transcribeRemote(transcriptionOptions, options).then((request) => request(this.axios, this.basePath)); + } +} + +/** + * @export + */ +export const TranscribeTimestampGranularityEnum = { + Word: 'word', + Segment: 'segment' +} as const; +export type TranscribeTimestampGranularityEnum = typeof TranscribeTimestampGranularityEnum[keyof typeof TranscribeTimestampGranularityEnum]; + + diff --git a/base.ts b/base.ts new file mode 100644 index 0000000..7743d94 --- /dev/null +++ b/base.ts @@ -0,0 +1,86 @@ +/* tslint:disable */ +/* eslint-disable */ +/** + * Speechall API + * The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure. + * + * The version of the OpenAPI document: 0.0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +import type { Configuration } from './configuration'; +// Some imports not used depending on template conditions +// @ts-ignore +import type { AxiosPromise, AxiosInstance, RawAxiosRequestConfig } from 'axios'; +import globalAxios from 'axios'; + +export const BASE_PATH = "https://api.speechall.com/v1".replace(/\/+$/, ""); + +/** + * + * @export + */ +export const COLLECTION_FORMATS = { + csv: ",", + ssv: " ", + tsv: "\t", + pipes: "|", +}; + +/** + * + * @export + * @interface RequestArgs + */ +export interface RequestArgs { + url: string; + options: RawAxiosRequestConfig; +} + +/** + * + * @export + * @class BaseAPI + */ +export class BaseAPI { + protected configuration: Configuration | undefined; + + constructor(configuration?: Configuration, protected basePath: string = BASE_PATH, protected axios: AxiosInstance = globalAxios) { + if (configuration) { + this.configuration = configuration; + this.basePath = configuration.basePath ?? basePath; + } + } +}; + +/** + * + * @export + * @class RequiredError + * @extends {Error} + */ +export class RequiredError extends Error { + constructor(public field: string, msg?: string) { + super(msg); + this.name = "RequiredError" + } +} + +interface ServerMap { + [key: string]: { + url: string, + description: string, + }[]; +} + +/** + * + * @export + */ +export const operationServerMap: ServerMap = { +} diff --git a/common.ts b/common.ts new file mode 100644 index 0000000..0d26416 --- /dev/null +++ b/common.ts @@ -0,0 +1,150 @@ +/* tslint:disable */ +/* eslint-disable */ +/** + * Speechall API + * The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure. + * + * The version of the OpenAPI document: 0.0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +import type { Configuration } from "./configuration"; +import type { RequestArgs } from "./base"; +import type { AxiosInstance, AxiosResponse } from 'axios'; +import { RequiredError } from "./base"; + +/** + * + * @export + */ +export const DUMMY_BASE_URL = 'https://example.com' + +/** + * + * @throws {RequiredError} + * @export + */ +export const assertParamExists = function (functionName: string, paramName: string, paramValue: unknown) { + if (paramValue === null || paramValue === undefined) { + throw new RequiredError(paramName, `Required parameter ${paramName} was null or undefined when calling ${functionName}.`); + } +} + +/** + * + * @export + */ +export const setApiKeyToObject = async function (object: any, keyParamName: string, configuration?: Configuration) { + if (configuration && configuration.apiKey) { + const localVarApiKeyValue = typeof configuration.apiKey === 'function' + ? await configuration.apiKey(keyParamName) + : await configuration.apiKey; + object[keyParamName] = localVarApiKeyValue; + } +} + +/** + * + * @export + */ +export const setBasicAuthToObject = function (object: any, configuration?: Configuration) { + if (configuration && (configuration.username || configuration.password)) { + object["auth"] = { username: configuration.username, password: configuration.password }; + } +} + +/** + * + * @export + */ +export const setBearerAuthToObject = async function (object: any, configuration?: Configuration) { + if (configuration && configuration.accessToken) { + const accessToken = typeof configuration.accessToken === 'function' + ? await configuration.accessToken() + : await configuration.accessToken; + object["Authorization"] = "Bearer " + accessToken; + } +} + +/** + * + * @export + */ +export const setOAuthToObject = async function (object: any, name: string, scopes: string[], configuration?: Configuration) { + if (configuration && configuration.accessToken) { + const localVarAccessTokenValue = typeof configuration.accessToken === 'function' + ? await configuration.accessToken(name, scopes) + : await configuration.accessToken; + object["Authorization"] = "Bearer " + localVarAccessTokenValue; + } +} + +function setFlattenedQueryParams(urlSearchParams: URLSearchParams, parameter: any, key: string = ""): void { + if (parameter == null) return; + if (typeof parameter === "object") { + if (Array.isArray(parameter)) { + (parameter as any[]).forEach(item => setFlattenedQueryParams(urlSearchParams, item, key)); + } + else { + Object.keys(parameter).forEach(currentKey => + setFlattenedQueryParams(urlSearchParams, parameter[currentKey], `${key}${key !== '' ? '.' : ''}${currentKey}`) + ); + } + } + else { + if (urlSearchParams.has(key)) { + urlSearchParams.append(key, parameter); + } + else { + urlSearchParams.set(key, parameter); + } + } +} + +/** + * + * @export + */ +export const setSearchParams = function (url: URL, ...objects: any[]) { + const searchParams = new URLSearchParams(url.search); + setFlattenedQueryParams(searchParams, objects); + url.search = searchParams.toString(); +} + +/** + * + * @export + */ +export const serializeDataIfNeeded = function (value: any, requestOptions: any, configuration?: Configuration) { + const nonString = typeof value !== 'string'; + const needsSerialization = nonString && configuration && configuration.isJsonMime + ? configuration.isJsonMime(requestOptions.headers['Content-Type']) + : nonString; + return needsSerialization + ? JSON.stringify(value !== undefined ? value : {}) + : (value || ""); +} + +/** + * + * @export + */ +export const toPathString = function (url: URL) { + return url.pathname + url.search + url.hash +} + +/** + * + * @export + */ +export const createRequestFunction = function (axiosArgs: RequestArgs, globalAxios: AxiosInstance, BASE_PATH: string, configuration?: Configuration) { + return >(axios: AxiosInstance = globalAxios, basePath: string = BASE_PATH) => { + const axiosRequestArgs = {...axiosArgs.options, url: (axios.defaults.baseURL ? '' : configuration?.basePath ?? basePath) + axiosArgs.url}; + return axios.request(axiosRequestArgs); + }; +} diff --git a/configuration.ts b/configuration.ts new file mode 100644 index 0000000..96fe6d6 --- /dev/null +++ b/configuration.ts @@ -0,0 +1,115 @@ +/* tslint:disable */ +/* eslint-disable */ +/** + * Speechall API + * The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure. + * + * The version of the OpenAPI document: 0.0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ConfigurationParameters { + apiKey?: string | Promise | ((name: string) => string) | ((name: string) => Promise); + username?: string; + password?: string; + accessToken?: string | Promise | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise); + basePath?: string; + serverIndex?: number; + baseOptions?: any; + formDataCtor?: new () => any; +} + +export class Configuration { + /** + * parameter for apiKey security + * @param name security name + * @memberof Configuration + */ + apiKey?: string | Promise | ((name: string) => string) | ((name: string) => Promise); + /** + * parameter for basic security + * + * @type {string} + * @memberof Configuration + */ + username?: string; + /** + * parameter for basic security + * + * @type {string} + * @memberof Configuration + */ + password?: string; + /** + * parameter for oauth2 security + * @param name security name + * @param scopes oauth2 scope + * @memberof Configuration + */ + accessToken?: string | Promise | ((name?: string, scopes?: string[]) => string) | ((name?: string, scopes?: string[]) => Promise); + /** + * override base path + * + * @type {string} + * @memberof Configuration + */ + basePath?: string; + /** + * override server index + * + * @type {number} + * @memberof Configuration + */ + serverIndex?: number; + /** + * base options for axios calls + * + * @type {any} + * @memberof Configuration + */ + baseOptions?: any; + /** + * The FormData constructor that will be used to create multipart form data + * requests. You can inject this here so that execution environments that + * do not support the FormData class can still run the generated client. + * + * @type {new () => FormData} + */ + formDataCtor?: new () => any; + + constructor(param: ConfigurationParameters = {}) { + this.apiKey = param.apiKey; + this.username = param.username; + this.password = param.password; + this.accessToken = param.accessToken; + this.basePath = param.basePath; + this.serverIndex = param.serverIndex; + this.baseOptions = { + ...param.baseOptions, + headers: { + ...param.baseOptions?.headers, + }, + }; + this.formDataCtor = param.formDataCtor; + } + + /** + * Check if the given MIME is a JSON MIME. + * JSON MIME examples: + * application/json + * application/json; charset=UTF8 + * APPLICATION/JSON + * application/vnd.company+json + * @param mime - MIME (Multipurpose Internet Mail Extensions) + * @return True if the given MIME is JSON, false otherwise. + */ + public isJsonMime(mime: string): boolean { + const jsonMime: RegExp = new RegExp('^(application\/json|[^;/ \t]+\/[^;/ \t]+[+]json)[ \t]*(;.*)?$', 'i'); + return mime !== null && (jsonMime.test(mime) || mime.toLowerCase() === 'application/json-patch+json'); + } +} diff --git a/docs/CreateReplacementRuleset201Response.md b/docs/CreateReplacementRuleset201Response.md new file mode 100644 index 0000000..64205f6 --- /dev/null +++ b/docs/CreateReplacementRuleset201Response.md @@ -0,0 +1,20 @@ +# CreateReplacementRuleset201Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **string** | The unique identifier (UUID) generated for this ruleset. Use this ID in the `ruleset_id` parameter of transcription requests. | [default to undefined] + +## Example + +```typescript +import { CreateReplacementRuleset201Response } from './api'; + +const instance: CreateReplacementRuleset201Response = { + id, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/CreateReplacementRulesetRequest.md b/docs/CreateReplacementRulesetRequest.md new file mode 100644 index 0000000..09e5f7d --- /dev/null +++ b/docs/CreateReplacementRulesetRequest.md @@ -0,0 +1,22 @@ +# CreateReplacementRulesetRequest + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **string** | A user-defined name for this ruleset for easier identification. | [default to undefined] +**rules** | [**Array<ReplacementRule>**](ReplacementRule.md) | An ordered array of replacement rules. Rules are applied in the order they appear in this list. See the `ReplacementRule` schema for different rule types (exact, regex, regex_group). | [default to undefined] + +## Example + +```typescript +import { CreateReplacementRulesetRequest } from './api'; + +const instance: CreateReplacementRulesetRequest = { + name, + rules, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/ErrorResponse.md b/docs/ErrorResponse.md new file mode 100644 index 0000000..bcbb908 --- /dev/null +++ b/docs/ErrorResponse.md @@ -0,0 +1,21 @@ +# ErrorResponse + +Standard structure for error responses. May include additional properties depending on the error type. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**message** | **string** | A human-readable message describing the error. | [default to undefined] + +## Example + +```typescript +import { ErrorResponse } from './api'; + +const instance: ErrorResponse = { + message, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/ExactRule.md b/docs/ExactRule.md new file mode 100644 index 0000000..ffa51c5 --- /dev/null +++ b/docs/ExactRule.md @@ -0,0 +1,27 @@ +# ExactRule + +Defines a replacement rule based on finding an exact string match. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kind** | **string** | Discriminator field identifying the rule type as \'exact\'. | [default to undefined] +**search** | **string** | The exact text string to search for within the transcription. | [default to undefined] +**replacement** | **string** | The text string to replace the found \'search\' text with. | [default to undefined] +**caseSensitive** | **boolean** | If true, the search will match only if the case is identical. If false (default), the search ignores case. | [optional] [default to false] + +## Example + +```typescript +import { ExactRule } from './api'; + +const instance: ExactRule = { + kind, + search, + replacement, + caseSensitive, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAIAudioResponseFormat.md b/docs/OpenAIAudioResponseFormat.md new file mode 100644 index 0000000..3c8d937 --- /dev/null +++ b/docs/OpenAIAudioResponseFormat.md @@ -0,0 +1,17 @@ +# OpenAIAudioResponseFormat + +The format of the output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + +## Enum + +* `Json` (value: `'json'`) + +* `Text` (value: `'text'`) + +* `Srt` (value: `'srt'`) + +* `VerboseJson` (value: `'verbose_json'`) + +* `Vtt` (value: `'vtt'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAICompatibleSpeechToTextApi.md b/docs/OpenAICompatibleSpeechToTextApi.md new file mode 100644 index 0000000..34b60da --- /dev/null +++ b/docs/OpenAICompatibleSpeechToTextApi.md @@ -0,0 +1,158 @@ +# OpenAICompatibleSpeechToTextApi + +All URIs are relative to *https://api.speechall.com/v1* + +|Method | HTTP request | Description| +|------------- | ------------- | -------------| +|[**openaiCompatibleCreateTranscription**](#openaicompatiblecreatetranscription) | **POST** /openai-compatible/audio/transcriptions | Transcribes audio into the input language, using OpenAI-compatible request format.| +|[**openaiCompatibleCreateTranslation**](#openaicompatiblecreatetranslation) | **POST** /openai-compatible/audio/translations | Translates audio into English, using OpenAI-compatible request format.| + +# **openaiCompatibleCreateTranscription** +> OpenaiCompatibleCreateTranscription200Response openaiCompatibleCreateTranscription() + +Mimics the OpenAI `/audio/transcriptions` endpoint. Accepts audio file uploads via `multipart/form-data`. Allows specifying model, language, prompt, response format, temperature, and timestamp granularity similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format. + +### Example + +```typescript +import { + OpenAICompatibleSpeechToTextApi, + Configuration +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new OpenAICompatibleSpeechToTextApi(configuration); + +let file: File; //The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. (default to undefined) +let model: TranscriptionModelIdentifier; // (default to undefined) +let language: string; //The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. (optional) (default to undefined) +let prompt: string; //An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. (optional) (default to undefined) +let responseFormat: OpenAIAudioResponseFormat; // (optional) (default to undefined) +let temperature: number; //The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. (optional) (default to 0) +let timestampGranularities: Array; //The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. (optional) (default to undefined) + +const { status, data } = await apiInstance.openaiCompatibleCreateTranscription( + file, + model, + language, + prompt, + responseFormat, + temperature, + timestampGranularities +); +``` + +### Parameters + +|Name | Type | Description | Notes| +|------------- | ------------- | ------------- | -------------| +| **file** | [**File**] | The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. | defaults to undefined| +| **model** | **TranscriptionModelIdentifier** | | defaults to undefined| +| **language** | [**string**] | The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. | (optional) defaults to undefined| +| **prompt** | [**string**] | An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. | (optional) defaults to undefined| +| **responseFormat** | **OpenAIAudioResponseFormat** | | (optional) defaults to undefined| +| **temperature** | [**number**] | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. | (optional) defaults to 0| +| **timestampGranularities** | **Array<'word' | 'segment'>** | The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. | (optional) defaults to undefined| + + +### Return type + +**OpenaiCompatibleCreateTranscription200Response** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: multipart/form-data + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**200** | Transcription successful. The response body format depends on the `response_format` parameter specified in the request: - `json`: Returns `OpenAI_CreateTranscriptionResponseJson`. - `verbose_json`: Returns `OpenAI_CreateTranscriptionResponseVerboseJson` with detailed segments and optional word timestamps. - `text`, `srt`, `vtt`: Returns the transcription as plain text in the specified format. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn\'t exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **openaiCompatibleCreateTranslation** +> OpenaiCompatibleCreateTranslation200Response openaiCompatibleCreateTranslation() + +Mimics the OpenAI `/audio/translations` endpoint. Accepts audio file uploads via `multipart/form-data` and translates the speech into English text. Allows specifying model, prompt, response format, and temperature similar to OpenAI. Note: The `model` parameter should use Speechall\'s `provider.model` format (ensure the selected model supports translation). + +### Example + +```typescript +import { + OpenAICompatibleSpeechToTextApi, + Configuration, + OpenAICreateTranslationRequestModel +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new OpenAICompatibleSpeechToTextApi(configuration); + +let file: File; //The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. (default to undefined) +let model: OpenAICreateTranslationRequestModel; // (default to undefined) +let prompt: string; //An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. (optional) (default to undefined) +let responseFormat: OpenAIAudioResponseFormat; // (optional) (default to undefined) +let temperature: number; //The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. (optional) (default to 0) + +const { status, data } = await apiInstance.openaiCompatibleCreateTranslation( + file, + model, + prompt, + responseFormat, + temperature +); +``` + +### Parameters + +|Name | Type | Description | Notes| +|------------- | ------------- | ------------- | -------------| +| **file** | [**File**] | The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. | defaults to undefined| +| **model** | **OpenAICreateTranslationRequestModel** | | defaults to undefined| +| **prompt** | [**string**] | An optional text to guide the model\\\'s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. | (optional) defaults to undefined| +| **responseFormat** | **OpenAIAudioResponseFormat** | | (optional) defaults to undefined| +| **temperature** | [**number**] | The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. | (optional) defaults to 0| + + +### Return type + +**OpenaiCompatibleCreateTranslation200Response** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: multipart/form-data + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**200** | Translation successful. The output is always English text. The response body format depends on the `response_format` parameter: - `json`: Returns `OpenAI_CreateTranslationResponseJson`. - `verbose_json`: Returns `OpenAI_CreateTranslationResponseVerboseJson` with detailed segments. - `text`, `srt`, `vtt`: Returns the translated English text as plain text in the specified format. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn\'t exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/docs/OpenAICreateTranscriptionResponseJson.md b/docs/OpenAICreateTranscriptionResponseJson.md new file mode 100644 index 0000000..33552ae --- /dev/null +++ b/docs/OpenAICreateTranscriptionResponseJson.md @@ -0,0 +1,21 @@ +# OpenAICreateTranscriptionResponseJson + +Represents a transcription response returned by model, based on the provided input. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **string** | The transcribed text. | [default to undefined] + +## Example + +```typescript +import { OpenAICreateTranscriptionResponseJson } from './api'; + +const instance: OpenAICreateTranscriptionResponseJson = { + text, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAICreateTranscriptionResponseVerboseJson.md b/docs/OpenAICreateTranscriptionResponseVerboseJson.md new file mode 100644 index 0000000..3dcf3ce --- /dev/null +++ b/docs/OpenAICreateTranscriptionResponseVerboseJson.md @@ -0,0 +1,29 @@ +# OpenAICreateTranscriptionResponseVerboseJson + +Represents a verbose json transcription response returned by model, based on the provided input. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**language** | **string** | The language of the input audio. | [default to undefined] +**duration** | **number** | The duration of the input audio. | [default to undefined] +**text** | **string** | The transcribed text. | [default to undefined] +**words** | [**Array<OpenAITranscriptionWord>**](OpenAITranscriptionWord.md) | Extracted words and their corresponding timestamps. | [optional] [default to undefined] +**segments** | [**Array<OpenAITranscriptionSegment>**](OpenAITranscriptionSegment.md) | Segments of the transcribed text and their corresponding details. | [optional] [default to undefined] + +## Example + +```typescript +import { OpenAICreateTranscriptionResponseVerboseJson } from './api'; + +const instance: OpenAICreateTranscriptionResponseVerboseJson = { + language, + duration, + text, + words, + segments, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAICreateTranslationRequestModel.md b/docs/OpenAICreateTranslationRequestModel.md new file mode 100644 index 0000000..15f7bee --- /dev/null +++ b/docs/OpenAICreateTranslationRequestModel.md @@ -0,0 +1,19 @@ +# OpenAICreateTranslationRequestModel + +ID of the model to use. It follows the naming convention provider/model-name + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +## Example + +```typescript +import { OpenAICreateTranslationRequestModel } from './api'; + +const instance: OpenAICreateTranslationRequestModel = { +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAICreateTranslationResponseJson.md b/docs/OpenAICreateTranslationResponseJson.md new file mode 100644 index 0000000..e9171f8 --- /dev/null +++ b/docs/OpenAICreateTranslationResponseJson.md @@ -0,0 +1,21 @@ +# OpenAICreateTranslationResponseJson + +Standard JSON response for OpenAI-compatible translation requests when `response_format` is `json`. Contains the translated English text. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**text** | **string** | | [default to undefined] + +## Example + +```typescript +import { OpenAICreateTranslationResponseJson } from './api'; + +const instance: OpenAICreateTranslationResponseJson = { + text, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAICreateTranslationResponseVerboseJson.md b/docs/OpenAICreateTranslationResponseVerboseJson.md new file mode 100644 index 0000000..7c45349 --- /dev/null +++ b/docs/OpenAICreateTranslationResponseVerboseJson.md @@ -0,0 +1,26 @@ +# OpenAICreateTranslationResponseVerboseJson + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**language** | **string** | The language of the output translation (always `english`). | [default to undefined] +**duration** | **string** | The duration of the input audio. | [default to undefined] +**text** | **string** | The translated text. | [default to undefined] +**segments** | [**Array<OpenAITranscriptionSegment>**](OpenAITranscriptionSegment.md) | Segments of the translated text and their corresponding details. | [optional] [default to undefined] + +## Example + +```typescript +import { OpenAICreateTranslationResponseVerboseJson } from './api'; + +const instance: OpenAICreateTranslationResponseVerboseJson = { + language, + duration, + text, + segments, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAITranscriptionSegment.md b/docs/OpenAITranscriptionSegment.md new file mode 100644 index 0000000..27ab99e --- /dev/null +++ b/docs/OpenAITranscriptionSegment.md @@ -0,0 +1,39 @@ +# OpenAITranscriptionSegment + +Represents a segment of transcribed or translated text, based on OpenAI\'s verbose JSON structure. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **number** | Unique identifier of the segment. | [default to undefined] +**seek** | **number** | Seek offset of the segment. | [default to undefined] +**start** | **number** | Start time of the segment in seconds. | [default to undefined] +**end** | **number** | End time of the segment in seconds. | [default to undefined] +**text** | **string** | Text content of the segment. | [default to undefined] +**tokens** | **Array<number>** | Array of token IDs for the text content. | [default to undefined] +**temperature** | **number** | Temperature parameter used for generating the segment. | [default to undefined] +**avg_logprob** | **number** | Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. | [default to undefined] +**compression_ratio** | **number** | Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. | [default to undefined] +**no_speech_prob** | **number** | Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. | [default to undefined] + +## Example + +```typescript +import { OpenAITranscriptionSegment } from './api'; + +const instance: OpenAITranscriptionSegment = { + id, + seek, + start, + end, + text, + tokens, + temperature, + avg_logprob, + compression_ratio, + no_speech_prob, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenAITranscriptionWord.md b/docs/OpenAITranscriptionWord.md new file mode 100644 index 0000000..0d81307 --- /dev/null +++ b/docs/OpenAITranscriptionWord.md @@ -0,0 +1,25 @@ +# OpenAITranscriptionWord + +Represents a single word identified during transcription, including its start and end times. Included in `verbose_json` response when `word` granularity is requested. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**word** | **string** | The text content of the word. | [default to undefined] +**start** | **number** | Start time of the word in seconds. | [default to undefined] +**end** | **number** | End time of the word in seconds. | [default to undefined] + +## Example + +```typescript +import { OpenAITranscriptionWord } from './api'; + +const instance: OpenAITranscriptionWord = { + word, + start, + end, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenaiCompatibleCreateTranscription200Response.md b/docs/OpenaiCompatibleCreateTranscription200Response.md new file mode 100644 index 0000000..e32249b --- /dev/null +++ b/docs/OpenaiCompatibleCreateTranscription200Response.md @@ -0,0 +1,28 @@ +# OpenaiCompatibleCreateTranscription200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**language** | **string** | The language of the input audio. | [default to undefined] +**duration** | **number** | The duration of the input audio. | [default to undefined] +**text** | **string** | The transcribed text. | [default to undefined] +**words** | [**Array<OpenAITranscriptionWord>**](OpenAITranscriptionWord.md) | Extracted words and their corresponding timestamps. | [optional] [default to undefined] +**segments** | [**Array<OpenAITranscriptionSegment>**](OpenAITranscriptionSegment.md) | Segments of the transcribed text and their corresponding details. | [optional] [default to undefined] + +## Example + +```typescript +import { OpenaiCompatibleCreateTranscription200Response } from './api'; + +const instance: OpenaiCompatibleCreateTranscription200Response = { + language, + duration, + text, + words, + segments, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/OpenaiCompatibleCreateTranslation200Response.md b/docs/OpenaiCompatibleCreateTranslation200Response.md new file mode 100644 index 0000000..c15b865 --- /dev/null +++ b/docs/OpenaiCompatibleCreateTranslation200Response.md @@ -0,0 +1,26 @@ +# OpenaiCompatibleCreateTranslation200Response + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**language** | **string** | The language of the output translation (always `english`). | [default to undefined] +**duration** | **string** | The duration of the input audio. | [default to undefined] +**text** | **string** | | [default to undefined] +**segments** | [**Array<OpenAITranscriptionSegment>**](OpenAITranscriptionSegment.md) | Segments of the translated text and their corresponding details. | [optional] [default to undefined] + +## Example + +```typescript +import { OpenaiCompatibleCreateTranslation200Response } from './api'; + +const instance: OpenaiCompatibleCreateTranslation200Response = { + language, + duration, + text, + segments, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/RegexGroupRule.md b/docs/RegexGroupRule.md new file mode 100644 index 0000000..e025164 --- /dev/null +++ b/docs/RegexGroupRule.md @@ -0,0 +1,27 @@ +# RegexGroupRule + +Defines a replacement rule that uses regex capture groups to apply different replacements to different parts of the matched text. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kind** | **string** | Discriminator field identifying the rule type as \'regex_group\'. | [default to undefined] +**pattern** | **string** | The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur. | [default to undefined] +**groupReplacements** | **{ [key: string]: string; }** | An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements. | [default to undefined] +**flags** | **Array<string>** | An array of flags to modify the regex behavior. | [optional] [default to undefined] + +## Example + +```typescript +import { RegexGroupRule } from './api'; + +const instance: RegexGroupRule = { + kind, + pattern, + groupReplacements, + flags, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/RegexRule.md b/docs/RegexRule.md new file mode 100644 index 0000000..9216f5f --- /dev/null +++ b/docs/RegexRule.md @@ -0,0 +1,27 @@ +# RegexRule + +Defines a replacement rule based on matching a regular expression pattern. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kind** | **string** | Discriminator field identifying the rule type as \'regex\'. | [default to undefined] +**pattern** | **string** | The regular expression pattern to search for. Uses standard regex syntax (implementation specific, often PCRE-like). Remember to escape special characters if needed (e.g., `\\\\.` for a literal dot). | [default to undefined] +**replacement** | **string** | The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`). | [default to undefined] +**flags** | **Array<string>** | An array of flags to modify the regex behavior (e.g., \'i\' for case-insensitivity). | [optional] [default to undefined] + +## Example + +```typescript +import { RegexRule } from './api'; + +const instance: RegexRule = { + kind, + pattern, + replacement, + flags, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/ReplacementRule.md b/docs/ReplacementRule.md new file mode 100644 index 0000000..c2fd6a8 --- /dev/null +++ b/docs/ReplacementRule.md @@ -0,0 +1,33 @@ +# ReplacementRule + +Defines a single rule for finding and replacing text in a transcription. Use one of the specific rule types (`ExactRule`, `RegexRule`, `RegexGroupRule`). The `kind` property acts as a discriminator. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**kind** | **string** | Discriminator field identifying the rule type as \'regex_group\'. | [default to undefined] +**search** | **string** | The exact text string to search for within the transcription. | [default to undefined] +**replacement** | **string** | The replacement text. Can include backreferences to capture groups from the pattern, like `$1`, `$2`, etc. A literal `$` should be escaped (e.g., `$$`). | [default to undefined] +**caseSensitive** | **boolean** | If true, the search will match only if the case is identical. If false (default), the search ignores case. | [optional] [default to false] +**pattern** | **string** | The regular expression pattern containing capture groups `(...)`. The entire pattern must match for replacements to occur. | [default to undefined] +**flags** | **Array<string>** | An array of flags to modify the regex behavior. | [optional] [default to undefined] +**groupReplacements** | **{ [key: string]: string; }** | An object where keys are capture group numbers (as strings, e.g., \"1\", \"2\") and values are the respective replacement strings for those groups. Groups not listed are kept as matched. The entire match is reconstructed using these replacements. | [default to undefined] + +## Example + +```typescript +import { ReplacementRule } from './api'; + +const instance: ReplacementRule = { + kind, + search, + replacement, + caseSensitive, + pattern, + flags, + groupReplacements, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/ReplacementRulesApi.md b/docs/ReplacementRulesApi.md new file mode 100644 index 0000000..efc2d80 --- /dev/null +++ b/docs/ReplacementRulesApi.md @@ -0,0 +1,67 @@ +# ReplacementRulesApi + +All URIs are relative to *https://api.speechall.com/v1* + +|Method | HTTP request | Description| +|------------- | ------------- | -------------| +|[**createReplacementRuleset**](#createreplacementruleset) | **POST** /replacement-rulesets | Create a reusable set of text replacement rules.| + +# **createReplacementRuleset** +> CreateReplacementRuleset201Response createReplacementRuleset(createReplacementRulesetRequest) + +Defines a named set of replacement rules (exact match, regex) that can be applied during transcription requests using its `ruleset_id`. Rules within a set are applied sequentially to the transcription text. + +### Example + +```typescript +import { + ReplacementRulesApi, + Configuration, + CreateReplacementRulesetRequest +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new ReplacementRulesApi(configuration); + +let createReplacementRulesetRequest: CreateReplacementRulesetRequest; //JSON object containing the name for the ruleset and an array of replacement rule objects. + +const { status, data } = await apiInstance.createReplacementRuleset( + createReplacementRulesetRequest +); +``` + +### Parameters + +|Name | Type | Description | Notes| +|------------- | ------------- | ------------- | -------------| +| **createReplacementRulesetRequest** | **CreateReplacementRulesetRequest**| JSON object containing the name for the ruleset and an array of replacement rule objects. | | + + +### Return type + +**CreateReplacementRuleset201Response** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**201** | Ruleset created successfully. The response body contains the unique ID assigned to the new ruleset. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/docs/SpeechToTextApi.md b/docs/SpeechToTextApi.md new file mode 100644 index 0000000..b5b2423 --- /dev/null +++ b/docs/SpeechToTextApi.md @@ -0,0 +1,217 @@ +# SpeechToTextApi + +All URIs are relative to *https://api.speechall.com/v1* + +|Method | HTTP request | Description| +|------------- | ------------- | -------------| +|[**listSpeechToTextModels**](#listspeechtotextmodels) | **GET** /speech-to-text-models | Retrieve a list of all available speech-to-text models.| +|[**transcribe**](#transcribe) | **POST** /transcribe | Upload an audio file directly and receive a transcription.| +|[**transcribeRemote**](#transcriberemote) | **POST** /transcribe-remote | Transcribe an audio file located at a remote URL.| + +# **listSpeechToTextModels** +> Array listSpeechToTextModels() + +Returns a detailed list of all STT models accessible through the Speechall API. Each model entry includes its identifier (`provider.model`), display name, description, supported features (languages, formats, punctuation, diarization), and performance characteristics. Use this endpoint to discover available models and their capabilities before making transcription requests. + +### Example + +```typescript +import { + SpeechToTextApi, + Configuration +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new SpeechToTextApi(configuration); + +const { status, data } = await apiInstance.listSpeechToTextModels(); +``` + +### Parameters +This endpoint does not have any parameters. + + +### Return type + +**Array** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**200** | A list of available speech-to-text models and their properties. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn\'t exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **transcribe** +> TranscriptionResponse transcribe(body) + +This endpoint allows you to send raw audio data in the request body for transcription. You can specify the desired model, language, output format, and various provider-specific features using query parameters. Suitable for transcribing local audio files. + +### Example + +```typescript +import { + SpeechToTextApi, + Configuration +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new SpeechToTextApi(configuration); + +let model: TranscriptionModelIdentifier; //The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. (default to undefined) +let body: File; //The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. +let language: TranscriptLanguageCode; //The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. (optional) (default to undefined) +let outputFormat: TranscriptOutputFormat; //The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. (optional) (default to undefined) +let rulesetId: string; //The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. (optional) (default to undefined) +let punctuation: boolean; //Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. (optional) (default to true) +let timestampGranularity: 'word' | 'segment'; //Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. (optional) (default to 'segment') +let diarization: boolean; //Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. (optional) (default to false) +let initialPrompt: string; //An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). (optional) (default to undefined) +let temperature: number; //Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. (optional) (default to undefined) +let smartFormat: boolean; //Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. (optional) (default to undefined) +let speakersExpected: number; //Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). (optional) (default to undefined) +let customVocabulary: Array; //Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). (optional) (default to undefined) + +const { status, data } = await apiInstance.transcribe( + model, + body, + language, + outputFormat, + rulesetId, + punctuation, + timestampGranularity, + diarization, + initialPrompt, + temperature, + smartFormat, + speakersExpected, + customVocabulary +); +``` + +### Parameters + +|Name | Type | Description | Notes| +|------------- | ------------- | ------------- | -------------| +| **body** | **File**| The audio file to transcribe. Send the raw audio data as the request body. Supported formats typically include WAV, MP3, FLAC, Ogg, M4A, etc., depending on the chosen model/provider. Check provider documentation for specific limits on file size and duration. | | +| **model** | **TranscriptionModelIdentifier** | The identifier of the speech-to-text model to use for the transcription, in the format `provider.model`. See the `/speech-to-text-models` endpoint for available models. | defaults to undefined| +| **language** | **TranscriptLanguageCode** | The language of the audio file in ISO 639-1 format (e.g., `en`, `es`, `fr`). Specify `auto` for automatic language detection (if supported by the model). Defaults to `en` if not provided. Providing the correct language improves accuracy and latency. | (optional) defaults to undefined| +| **outputFormat** | **TranscriptOutputFormat** | The desired format for the transcription output. Can be plain text, JSON objects (simple or detailed), or subtitle formats (SRT, VTT). Defaults to `text`. | (optional) defaults to undefined| +| **rulesetId** | [**string**] | The unique identifier (UUID) of a pre-defined replacement ruleset to apply to the final transcription text. Create rulesets using the `/replacement-rulesets` endpoint. | (optional) defaults to undefined| +| **punctuation** | [**boolean**] | Enable automatic punctuation (commas, periods, question marks) in the transcription. Support varies by model/provider (e.g., Deepgram, AssemblyAI). Defaults to `true`. | (optional) defaults to true| +| **timestampGranularity** | [**'word' | 'segment'**]**Array<'word' | 'segment'>** | Specifies the level of detail for timestamps in the response (if `output_format` is `json` or `verbose_json`). `segment` provides timestamps for larger chunks of speech, while `word` provides timestamps for individual words (may increase latency). Defaults to `segment`. | (optional) defaults to 'segment'| +| **diarization** | [**boolean**] | Enable speaker diarization to identify and label different speakers in the audio. Support and quality vary by model/provider. Defaults to `false`. When enabled, the `speaker` field may be populated in the response segments. | (optional) defaults to false| +| **initialPrompt** | [**string**] | An optional text prompt to provide context, guide the model\'s style (e.g., spelling of specific names), or improve accuracy for subsequent audio segments. Support varies by model (e.g., OpenAI models). | (optional) defaults to undefined| +| **temperature** | [**number**] | Controls the randomness of the output for certain models (e.g., OpenAI). A value between 0 and 1. Lower values (e.g., 0.2) make the output more deterministic, while higher values (e.g., 0.8) make it more random. Defaults vary by model. | (optional) defaults to undefined| +| **smartFormat** | [**boolean**] | Enable provider-specific \"smart formatting\" features, which might include formatting for numbers, dates, currency, etc. Currently supported by Deepgram models. Defaults vary. | (optional) defaults to undefined| +| **speakersExpected** | [**number**] | Provides a hint to the diarization process about the number of expected speakers. May improve accuracy for some providers (e.g., RevAI, Deepgram). | (optional) defaults to undefined| +| **customVocabulary** | **Array<string>** | Provide a list of specific words or phrases (e.g., proper nouns, jargon) to increase their recognition likelihood. Support varies by provider (e.g., Deepgram, AssemblyAI). | (optional) defaults to undefined| + + +### Return type + +**TranscriptionResponse** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: audio/* + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**200** | Successful transcription response. The content type and structure depend on the `output_format` parameter specified in the request. - `application/json`: Returned for `output_format=json` or `json_text`. See `TranscriptionResponse` schema (`TranscriptionDetailed` or `TranscriptionOnlyText`). - `text/plain`: Returned for `output_format=text`. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn\'t exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **transcribeRemote** +> TranscriptionResponse transcribeRemote(transcriptionOptions) + +This endpoint allows you to transcribe an audio file hosted at a publicly accessible URL. Provide the URL and transcription options within the JSON request body. Useful for transcribing files already stored online. + +### Example + +```typescript +import { + SpeechToTextApi, + Configuration, + TranscriptionOptions +} from './api'; + +const configuration = new Configuration(); +const apiInstance = new SpeechToTextApi(configuration); + +let transcriptionOptions: TranscriptionOptions; //JSON object containing the URL of the audio file and the desired transcription options. + +const { status, data } = await apiInstance.transcribeRemote( + transcriptionOptions +); +``` + +### Parameters + +|Name | Type | Description | Notes| +|------------- | ------------- | ------------- | -------------| +| **transcriptionOptions** | **TranscriptionOptions**| JSON object containing the URL of the audio file and the desired transcription options. | | + + +### Return type + +**TranscriptionResponse** + +### Authorization + +[bearerAuth](../README.md#bearerAuth) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json, text/plain + + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +|**200** | Successful transcription response. The content type and structure depend on the `output_format` parameter specified in the request. - `application/json`: Returned for `output_format=json` or `json_text`. See `TranscriptionResponse` schema (`TranscriptionDetailed` or `TranscriptionOnlyText`). - `text/plain`: Returned for `output_format=text`. | - | +|**400** | Bad Request - The request was malformed or contained invalid parameters (e.g., invalid language code, missing required field, unsupported option). The response body provides details. | - | +|**401** | Unauthorized - Authentication failed. The API key is missing, invalid, or expired. | - | +|**402** | Payment Required - There is no credit left on your account. | - | +|**404** | Not Found - The requested resource could not be found. This could be an invalid API endpoint path, or a referenced resource ID (like `ruleset_id`) that doesn\'t exist. For `/transcribe-remote`, it could also mean the `file_url` was inaccessible. | - | +|**429** | Too Many Requests - The client has exceeded the rate limit for API requests. Check the `Retry-After` header for guidance on when to retry. | * Retry-After - The recommended number of seconds to wait before making another request.
| +|**500** | Internal Server Error - An unexpected error occurred on the server side while processing the request. Retrying the request later might succeed. If the problem persists, contact support. | - | +|**503** | Service Unavailable - The server is temporarily unable to handle the request, possibly due to maintenance or overload. Try again later. | - | +|**504** | Gateway Timeout - The server, while acting as a gateway or proxy, did not receive a timely response from an upstream server (e.g., the underlying STT provider). This might be a temporary issue with the provider. | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/docs/SpeechToTextModel.md b/docs/SpeechToTextModel.md new file mode 100644 index 0000000..a4f4541 --- /dev/null +++ b/docs/SpeechToTextModel.md @@ -0,0 +1,77 @@ +# SpeechToTextModel + +Describes an available speech-to-text model, its provider, capabilities, and characteristics. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | [**TranscriptionModelIdentifier**](TranscriptionModelIdentifier.md) | | [default to undefined] +**display_name** | **string** | A user-friendly name for the model. | [default to undefined] +**provider** | [**TranscriptionProvider**](TranscriptionProvider.md) | | [default to undefined] +**description** | **string** | A brief description of the model, its intended use case, or version notes. | [optional] [default to undefined] +**cost_per_second_usd** | **number** | The cost per second of audio processed in USD. | [optional] [default to undefined] +**is_available** | **boolean** | Indicates whether the model is currently available for use. | [default to true] +**supported_languages** | **Array<string>** | A list of language codes (preferably BCP 47, e.g., \"en-US\", \"en-GB\", \"es-ES\") supported by this model. May include `auto` if automatic language detection is supported across multiple languages within a single audio file. | [optional] [default to undefined] +**punctuation** | **boolean** | Indicates whether the model generally supports automatic punctuation insertion. | [optional] [default to undefined] +**diarization** | **boolean** | Indicates whether the model generally supports speaker diarization (identifying different speakers). | [optional] [default to undefined] +**streamable** | **boolean** | Indicates whether the model can be used for real-time streaming transcription via a WebSocket connection (if offered by Speechall). | [optional] [default to undefined] +**real_time_factor** | **number** | An approximate measure of processing speed for batch processing. Defined as (audio duration) / (processing time). A higher value means faster processing (e.g., RTF=2 means it processes 1 second of audio in 0.5 seconds). May not be available for all models or streaming scenarios. | [optional] [default to undefined] +**max_duration_seconds** | **number** | The maximum duration of a single audio file (in seconds) that the model can reliably process in one request. May vary by provider or plan. | [optional] [default to undefined] +**max_file_size_bytes** | **number** | The maximum size of a single audio file (in bytes) that can be uploaded for processing by this model. May vary by provider or plan. | [optional] [default to undefined] +**version** | **string** | The specific version identifier for the model. | [optional] [default to undefined] +**release_date** | **string** | The date when this specific version of the model was released or last updated. | [optional] [default to undefined] +**model_type** | **string** | The primary type or training domain of the model. Helps identify suitability for different audio types. | [optional] [default to undefined] +**accuracy_tier** | **string** | A general indication of the model\'s expected accuracy level relative to other models. Not a guaranteed metric. | [optional] [default to undefined] +**supported_audio_encodings** | **Array<string>** | A list of audio encodings that this model supports or is optimized for (e.g., LINEAR16, FLAC, MP3, Opus). | [optional] [default to undefined] +**supported_sample_rates** | **Array<number>** | A list of audio sample rates (in Hz) that this model supports or is optimized for. | [optional] [default to undefined] +**speaker_labels** | **boolean** | Indicates whether the model can provide speaker labels for the transcription. | [optional] [default to undefined] +**word_timestamps** | **boolean** | Indicates whether the model can provide timestamps for individual words. | [optional] [default to undefined] +**confidence_scores** | **boolean** | Indicates whether the model provides confidence scores for the transcription or individual words. | [optional] [default to undefined] +**language_detection** | **boolean** | Indicates whether the model supports automatic language detection for input audio. | [optional] [default to undefined] +**custom_vocabulary_support** | **boolean** | Indicates if the model can leverage a custom vocabulary or language model adaptation. | [optional] [default to undefined] +**profanity_filtering** | **boolean** | Indicates if the model supports filtering or masking of profanity. | [optional] [default to undefined] +**noise_reduction** | **boolean** | Indicates if the model supports noise reduction. | [optional] [default to undefined] +**supports_srt** | **boolean** | Indicates whether the model supports SRT subtitle format output. | [default to false] +**supports_vtt** | **boolean** | Indicates whether the model supports VTT subtitle format output. | [default to false] +**voice_activity_detection** | **boolean** | Indicates whether the model supports voice activity detection (VAD) to identify speech segments. | [optional] [default to undefined] + +## Example + +```typescript +import { SpeechToTextModel } from './api'; + +const instance: SpeechToTextModel = { + id, + display_name, + provider, + description, + cost_per_second_usd, + is_available, + supported_languages, + punctuation, + diarization, + streamable, + real_time_factor, + max_duration_seconds, + max_file_size_bytes, + version, + release_date, + model_type, + accuracy_tier, + supported_audio_encodings, + supported_sample_rates, + speaker_labels, + word_timestamps, + confidence_scores, + language_detection, + custom_vocabulary_support, + profanity_filtering, + noise_reduction, + supports_srt, + supports_vtt, + voice_activity_detection, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptLanguageCode.md b/docs/TranscriptLanguageCode.md new file mode 100644 index 0000000..11d41a0 --- /dev/null +++ b/docs/TranscriptLanguageCode.md @@ -0,0 +1,213 @@ +# TranscriptLanguageCode + +The language code of the audio file, typically in ISO 639-1 format. Specifying the correct language improves transcription accuracy and speed. The special value `auto` can be used to request automatic language detection, if supported by the selected model. If omitted, the default language is English (`en`). + +## Enum + +* `Auto` (value: `'auto'`) + +* `En` (value: `'en'`) + +* `EnAu` (value: `'en_au'`) + +* `EnUk` (value: `'en_uk'`) + +* `EnUs` (value: `'en_us'`) + +* `Af` (value: `'af'`) + +* `Am` (value: `'am'`) + +* `Ar` (value: `'ar'`) + +* `As` (value: `'as'`) + +* `Az` (value: `'az'`) + +* `Ba` (value: `'ba'`) + +* `Be` (value: `'be'`) + +* `Bg` (value: `'bg'`) + +* `Bn` (value: `'bn'`) + +* `Bo` (value: `'bo'`) + +* `Br` (value: `'br'`) + +* `Bs` (value: `'bs'`) + +* `Ca` (value: `'ca'`) + +* `Cs` (value: `'cs'`) + +* `Cy` (value: `'cy'`) + +* `Da` (value: `'da'`) + +* `De` (value: `'de'`) + +* `El` (value: `'el'`) + +* `Es` (value: `'es'`) + +* `Et` (value: `'et'`) + +* `Eu` (value: `'eu'`) + +* `Fa` (value: `'fa'`) + +* `Fi` (value: `'fi'`) + +* `Fo` (value: `'fo'`) + +* `Fr` (value: `'fr'`) + +* `Gl` (value: `'gl'`) + +* `Gu` (value: `'gu'`) + +* `Ha` (value: `'ha'`) + +* `Haw` (value: `'haw'`) + +* `He` (value: `'he'`) + +* `Hi` (value: `'hi'`) + +* `Hr` (value: `'hr'`) + +* `Ht` (value: `'ht'`) + +* `Hu` (value: `'hu'`) + +* `Hy` (value: `'hy'`) + +* `Id` (value: `'id'`) + +* `Is` (value: `'is'`) + +* `It` (value: `'it'`) + +* `Ja` (value: `'ja'`) + +* `Jw` (value: `'jw'`) + +* `Ka` (value: `'ka'`) + +* `Kk` (value: `'kk'`) + +* `Km` (value: `'km'`) + +* `Kn` (value: `'kn'`) + +* `Ko` (value: `'ko'`) + +* `La` (value: `'la'`) + +* `Lb` (value: `'lb'`) + +* `Ln` (value: `'ln'`) + +* `Lo` (value: `'lo'`) + +* `Lt` (value: `'lt'`) + +* `Lv` (value: `'lv'`) + +* `Mg` (value: `'mg'`) + +* `Mi` (value: `'mi'`) + +* `Mk` (value: `'mk'`) + +* `Ml` (value: `'ml'`) + +* `Mn` (value: `'mn'`) + +* `Mr` (value: `'mr'`) + +* `Ms` (value: `'ms'`) + +* `Mt` (value: `'mt'`) + +* `My` (value: `'my'`) + +* `Ne` (value: `'ne'`) + +* `Nl` (value: `'nl'`) + +* `Nn` (value: `'nn'`) + +* `False` (value: `'false'`) + +* `Oc` (value: `'oc'`) + +* `Pa` (value: `'pa'`) + +* `Pl` (value: `'pl'`) + +* `Ps` (value: `'ps'`) + +* `Pt` (value: `'pt'`) + +* `Ro` (value: `'ro'`) + +* `Ru` (value: `'ru'`) + +* `Sa` (value: `'sa'`) + +* `Sd` (value: `'sd'`) + +* `Si` (value: `'si'`) + +* `Sk` (value: `'sk'`) + +* `Sl` (value: `'sl'`) + +* `Sn` (value: `'sn'`) + +* `So` (value: `'so'`) + +* `Sq` (value: `'sq'`) + +* `Sr` (value: `'sr'`) + +* `Su` (value: `'su'`) + +* `Sv` (value: `'sv'`) + +* `Sw` (value: `'sw'`) + +* `Ta` (value: `'ta'`) + +* `Te` (value: `'te'`) + +* `Tg` (value: `'tg'`) + +* `Th` (value: `'th'`) + +* `Tk` (value: `'tk'`) + +* `Tl` (value: `'tl'`) + +* `Tr` (value: `'tr'`) + +* `Tt` (value: `'tt'`) + +* `Uk` (value: `'uk'`) + +* `Ur` (value: `'ur'`) + +* `Uz` (value: `'uz'`) + +* `Vi` (value: `'vi'`) + +* `Yi` (value: `'yi'`) + +* `Yo` (value: `'yo'`) + +* `Zh` (value: `'zh'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptOutputFormat.md b/docs/TranscriptOutputFormat.md new file mode 100644 index 0000000..9bc2651 --- /dev/null +++ b/docs/TranscriptOutputFormat.md @@ -0,0 +1,17 @@ +# TranscriptOutputFormat + +Specifies the desired format of the transcription output. - `text`: Plain text containing the full transcription. - `json_text`: A simple JSON object containing the transcription ID and the full text (`TranscriptionOnlyText` schema). - `json`: A detailed JSON object including segments, timestamps (based on `timestamp_granularity`), language, and potentially speaker labels and provider metadata (`TranscriptionDetailed` schema). - `srt`: SubRip subtitle format (returned as plain text). - `vtt`: WebVTT subtitle format (returned as plain text). + +## Enum + +* `Text` (value: `'text'`) + +* `JsonText` (value: `'json_text'`) + +* `Json` (value: `'json'`) + +* `Srt` (value: `'srt'`) + +* `Vtt` (value: `'vtt'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionDetailed.md b/docs/TranscriptionDetailed.md new file mode 100644 index 0000000..8822e04 --- /dev/null +++ b/docs/TranscriptionDetailed.md @@ -0,0 +1,33 @@ +# TranscriptionDetailed + +A detailed JSON response format containing the full text, detected language, duration, individual timed segments, and potentially speaker labels and provider-specific metadata. Returned when `output_format` is `json`. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **string** | A unique identifier for the transcription job/request. | [default to undefined] +**text** | **string** | The full transcribed text as a single string. | [default to undefined] +**language** | **string** | The detected or specified language of the audio (ISO 639-1 code). | [optional] [default to undefined] +**duration** | **number** | The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. | [optional] [default to undefined] +**segments** | [**Array<TranscriptionSegment>**](TranscriptionSegment.md) | An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional] [default to undefined] +**words** | [**Array<TranscriptionWord>**](TranscriptionWord.md) | An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional] [default to undefined] +**provider_metadata** | **{ [key: string]: any; }** | An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent. | [optional] [default to undefined] + +## Example + +```typescript +import { TranscriptionDetailed } from './api'; + +const instance: TranscriptionDetailed = { + id, + text, + language, + duration, + segments, + words, + provider_metadata, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionModelIdentifier.md b/docs/TranscriptionModelIdentifier.md new file mode 100644 index 0000000..2b7841f --- /dev/null +++ b/docs/TranscriptionModelIdentifier.md @@ -0,0 +1,121 @@ +# TranscriptionModelIdentifier + +Unique identifier for a specific Speech-to-Text model, composed as `provider.model_name`. Used to select the engine for transcription. + +## Enum + +* `AmazonTranscribe` (value: `'amazon.transcribe'`) + +* `AssemblyaiBest` (value: `'assemblyai.best'`) + +* `AssemblyaiNano` (value: `'assemblyai.nano'`) + +* `AzureStandard` (value: `'azure.standard'`) + +* `CloudflareWhisper` (value: `'cloudflare.whisper'`) + +* `DeepgramBase` (value: `'deepgram.base'`) + +* `DeepgramBaseConversationalai` (value: `'deepgram.base-conversationalai'`) + +* `DeepgramBaseFinance` (value: `'deepgram.base-finance'`) + +* `DeepgramBaseGeneral` (value: `'deepgram.base-general'`) + +* `DeepgramBaseMeeting` (value: `'deepgram.base-meeting'`) + +* `DeepgramBasePhonecall` (value: `'deepgram.base-phonecall'`) + +* `DeepgramBaseVideo` (value: `'deepgram.base-video'`) + +* `DeepgramBaseVoicemail` (value: `'deepgram.base-voicemail'`) + +* `DeepgramEnhanced` (value: `'deepgram.enhanced'`) + +* `DeepgramEnhancedFinance` (value: `'deepgram.enhanced-finance'`) + +* `DeepgramEnhancedGeneral` (value: `'deepgram.enhanced-general'`) + +* `DeepgramEnhancedMeeting` (value: `'deepgram.enhanced-meeting'`) + +* `DeepgramEnhancedPhonecall` (value: `'deepgram.enhanced-phonecall'`) + +* `DeepgramNova` (value: `'deepgram.nova'`) + +* `DeepgramNova2` (value: `'deepgram.nova-2'`) + +* `DeepgramNova2Atc` (value: `'deepgram.nova-2-atc'`) + +* `DeepgramNova2Automotive` (value: `'deepgram.nova-2-automotive'`) + +* `DeepgramNova2Conversationalai` (value: `'deepgram.nova-2-conversationalai'`) + +* `DeepgramNova2Drivethru` (value: `'deepgram.nova-2-drivethru'`) + +* `DeepgramNova2Finance` (value: `'deepgram.nova-2-finance'`) + +* `DeepgramNova2General` (value: `'deepgram.nova-2-general'`) + +* `DeepgramNova2Medical` (value: `'deepgram.nova-2-medical'`) + +* `DeepgramNova2Meeting` (value: `'deepgram.nova-2-meeting'`) + +* `DeepgramNova2Phonecall` (value: `'deepgram.nova-2-phonecall'`) + +* `DeepgramNova2Video` (value: `'deepgram.nova-2-video'`) + +* `DeepgramNova2Voicemail` (value: `'deepgram.nova-2-voicemail'`) + +* `DeepgramNova3` (value: `'deepgram.nova-3'`) + +* `DeepgramNovaGeneral` (value: `'deepgram.nova-general'`) + +* `DeepgramNovaPhonecall` (value: `'deepgram.nova-phonecall'`) + +* `DeepgramWhisper` (value: `'deepgram.whisper'`) + +* `DeepgramWhisperBase` (value: `'deepgram.whisper-base'`) + +* `DeepgramWhisperLarge` (value: `'deepgram.whisper-large'`) + +* `DeepgramWhisperMedium` (value: `'deepgram.whisper-medium'`) + +* `DeepgramWhisperSmall` (value: `'deepgram.whisper-small'`) + +* `DeepgramWhisperTiny` (value: `'deepgram.whisper-tiny'`) + +* `FalaiWhisper` (value: `'falai.whisper'`) + +* `FalaiWizper` (value: `'falai.wizper'`) + +* `FireworksaiWhisperV3` (value: `'fireworksai.whisper-v3'`) + +* `FireworksaiWhisperV3Turbo` (value: `'fireworksai.whisper-v3-turbo'`) + +* `GladiaStandard` (value: `'gladia.standard'`) + +* `GoogleEnhanced` (value: `'google.enhanced'`) + +* `GoogleStandard` (value: `'google.standard'`) + +* `GroqDistilWhisperLargeV3En` (value: `'groq.distil-whisper-large-v3-en'`) + +* `GroqWhisperLargeV3` (value: `'groq.whisper-large-v3'`) + +* `GroqWhisperLargeV3Turbo` (value: `'groq.whisper-large-v3-turbo'`) + +* `IbmStandard` (value: `'ibm.standard'`) + +* `OpenaiWhisper1` (value: `'openai.whisper-1'`) + +* `OpenaiGpt4oTranscribe` (value: `'openai.gpt-4o-transcribe'`) + +* `OpenaiGpt4oMiniTranscribe` (value: `'openai.gpt-4o-mini-transcribe'`) + +* `RevaiMachine` (value: `'revai.machine'`) + +* `SpeechmaticsEnhanced` (value: `'speechmatics.enhanced'`) + +* `SpeechmaticsStandard` (value: `'speechmatics.standard'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionOnlyText.md b/docs/TranscriptionOnlyText.md new file mode 100644 index 0000000..f5fd9e0 --- /dev/null +++ b/docs/TranscriptionOnlyText.md @@ -0,0 +1,23 @@ +# TranscriptionOnlyText + +A simplified JSON response format containing only the transcription ID and the full transcribed text. Returned when `output_format` is `json_text`. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **string** | A unique identifier for the transcription job/request. | [default to undefined] +**text** | **string** | The full transcribed text as a single string. | [default to undefined] + +## Example + +```typescript +import { TranscriptionOnlyText } from './api'; + +const instance: TranscriptionOnlyText = { + id, + text, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionOptions.md b/docs/TranscriptionOptions.md new file mode 100644 index 0000000..7546c4f --- /dev/null +++ b/docs/TranscriptionOptions.md @@ -0,0 +1,45 @@ +# TranscriptionOptions + +Configuration options for transcribing audio specified by a remote URL via the `/transcribe-remote` endpoint. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**file_url** | **string** | The publicly accessible URL of the audio file to transcribe. The API server must be able to fetch the audio from this URL. | [default to undefined] +**model** | [**TranscriptionModelIdentifier**](TranscriptionModelIdentifier.md) | | [default to undefined] +**language** | [**TranscriptLanguageCode**](TranscriptLanguageCode.md) | | [optional] [default to undefined] +**output_format** | [**TranscriptOutputFormat**](TranscriptOutputFormat.md) | | [optional] [default to undefined] +**punctuation** | **boolean** | Whether to add punctuation. Support varies by model (e.g., Deepgram, AssemblyAI). Defaults to `true`. | [optional] [default to true] +**timestamp_granularity** | **string** | Level of timestamp detail (`word` or `segment`). Defaults to `segment`. | [optional] [default to TimestampGranularityEnum_Segment] +**diarization** | **boolean** | Enable speaker diarization. Defaults to `false`. | [optional] [default to false] +**initial_prompt** | **string** | Optional text prompt to guide the transcription model. Support varies (e.g., OpenAI). | [optional] [default to undefined] +**temperature** | **number** | Controls output randomness for supported models (e.g., OpenAI). Value between 0 and 1. | [optional] [default to undefined] +**smart_format** | **boolean** | Enable provider-specific smart formatting (e.g., Deepgram). Defaults vary. | [optional] [default to undefined] +**speakers_expected** | **number** | Hint for the number of expected speakers for diarization (e.g., RevAI, Deepgram). | [optional] [default to undefined] +**custom_vocabulary** | **Array<string>** | List of custom words/phrases to improve recognition (e.g., Deepgram, AssemblyAI). | [optional] [default to undefined] +**replacement_ruleset** | [**Array<ReplacementRule>**](ReplacementRule.md) | An array of replacement rules to be applied directly to this transcription request, in order. This allows defining rules inline instead of using a pre-saved `ruleset_id`. | [optional] [default to undefined] + +## Example + +```typescript +import { TranscriptionOptions } from './api'; + +const instance: TranscriptionOptions = { + file_url, + model, + language, + output_format, + punctuation, + timestamp_granularity, + diarization, + initial_prompt, + temperature, + smart_format, + speakers_expected, + custom_vocabulary, + replacement_ruleset, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionProvider.md b/docs/TranscriptionProvider.md new file mode 100644 index 0000000..d03ae3b --- /dev/null +++ b/docs/TranscriptionProvider.md @@ -0,0 +1,35 @@ +# TranscriptionProvider + +The identifier for the underlying Speech-to-Text service provider (e.g., \'openai\', \'deepgram\'). + +## Enum + +* `Amazon` (value: `'amazon'`) + +* `Assemblyai` (value: `'assemblyai'`) + +* `Azure` (value: `'azure'`) + +* `Cloudflare` (value: `'cloudflare'`) + +* `Deepgram` (value: `'deepgram'`) + +* `Falai` (value: `'falai'`) + +* `Fireworksai` (value: `'fireworksai'`) + +* `Gladia` (value: `'gladia'`) + +* `Google` (value: `'google'`) + +* `Groq` (value: `'groq'`) + +* `Ibm` (value: `'ibm'`) + +* `Openai` (value: `'openai'`) + +* `Revai` (value: `'revai'`) + +* `Speechmatics` (value: `'speechmatics'`) + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionResponse.md b/docs/TranscriptionResponse.md new file mode 100644 index 0000000..682a84a --- /dev/null +++ b/docs/TranscriptionResponse.md @@ -0,0 +1,33 @@ +# TranscriptionResponse + +Represents the JSON structure returned when a JSON-based `output_format` (`json` or `json_text`) is requested. It can be either a detailed structure or a simple text-only structure. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**id** | **string** | A unique identifier for the transcription job/request. | [default to undefined] +**text** | **string** | The full transcribed text as a single string. | [default to undefined] +**language** | **string** | The detected or specified language of the audio (ISO 639-1 code). | [optional] [default to undefined] +**duration** | **number** | The total duration of the processed audio file in seconds. **Deprecated**: This property may be removed in future versions as duration analysis might occur asynchronously. Rely on segment end times for duration information if needed. | [optional] [default to undefined] +**segments** | [**Array<TranscriptionSegment>**](TranscriptionSegment.md) | An array of transcribed segments, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional] [default to undefined] +**words** | [**Array<TranscriptionWord>**](TranscriptionWord.md) | An array of transcribed words, providing time-coded chunks of the transcription. The level of detail (word vs. segment timestamps) depends on the `timestamp_granularity` request parameter. May include speaker labels if diarization was enabled. | [optional] [default to undefined] +**provider_metadata** | **{ [key: string]: any; }** | An optional object containing additional metadata returned directly from the underlying STT provider. The structure of this object is provider-dependent. | [optional] [default to undefined] + +## Example + +```typescript +import { TranscriptionResponse } from './api'; + +const instance: TranscriptionResponse = { + id, + text, + language, + duration, + segments, + words, + provider_metadata, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionSegment.md b/docs/TranscriptionSegment.md new file mode 100644 index 0000000..eb2b970 --- /dev/null +++ b/docs/TranscriptionSegment.md @@ -0,0 +1,29 @@ +# TranscriptionSegment + +Represents a time-coded segment of the transcription, typically corresponding to a phrase, sentence, or speaker turn. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**start** | **number** | The start time of the segment in seconds from the beginning of the audio. | [optional] [default to undefined] +**end** | **number** | The end time of the segment in seconds from the beginning of the audio. | [optional] [default to undefined] +**text** | **string** | The transcribed text content of this segment. | [optional] [default to undefined] +**speaker** | **string** | An identifier for the speaker of this segment, present if diarization was enabled and successful. | [optional] [default to undefined] +**confidence** | **number** | The model\'s confidence score for the transcription of this segment, typically between 0 and 1 (if provided by the model). | [optional] [default to undefined] + +## Example + +```typescript +import { TranscriptionSegment } from './api'; + +const instance: TranscriptionSegment = { + start, + end, + text, + speaker, + confidence, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/docs/TranscriptionWord.md b/docs/TranscriptionWord.md new file mode 100644 index 0000000..0b91858 --- /dev/null +++ b/docs/TranscriptionWord.md @@ -0,0 +1,29 @@ +# TranscriptionWord + +Represents a word in the transcription, providing time-coded chunks of the transcription. + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**start** | **number** | The start time of the word in seconds from the beginning of the audio. | [default to undefined] +**end** | **number** | The end time of the word in seconds from the beginning of the audio. | [default to undefined] +**word** | **string** | The transcribed word. | [default to undefined] +**speaker** | **string** | An identifier for the speaker of this word, present if diarization was enabled and successful. | [optional] [default to undefined] +**confidence** | **number** | The model\'s confidence score for the transcription of this word, typically between 0 and 1 (if provided by the model). | [optional] [default to undefined] + +## Example + +```typescript +import { TranscriptionWord } from './api'; + +const instance: TranscriptionWord = { + start, + end, + word, + speaker, + confidence, +}; +``` + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/example.js b/example.js new file mode 100644 index 0000000..0abe2cb --- /dev/null +++ b/example.js @@ -0,0 +1,44 @@ +const { Configuration, SpeechToTextApi } = require('@speechall/sdk'); + +// Example usage of the Speechall TypeScript SDK +async function example() { + // Configure the SDK + const config = new Configuration({ + apiKey: process.env.SPEECHALL_API_KEY || 'your-api-key-here', + basePath: 'https://api.speechall.com' // Replace with actual API base path + }); + + // Create API instance + const speechApi = new SpeechToTextApi(config); + + try { + // List available models + console.log('Fetching available models...'); + const models = await speechApi.listSpeechToTextModels(); + console.log('Available models:'); + models.data.slice(0, 3).forEach(model => { + console.log(` - ${model.id}: ${model.display_name} (${model.provider})`); + }); + + // Transcribe audio from URL + console.log('\nTranscribing audio...'); + const response = await speechApi.transcribeRemote({ + file_url: 'https://example.com/sample-audio.mp3', // Replace with actual audio URL + model: 'deepgram.nova-2-general', + language: 'en', + output_format: 'json', + punctuation: true + }); + + console.log('Transcription result:', response.data.text); + } catch (error) { + console.error('Error:', error.response?.data || error.message); + } +} + +// Only run if this file is executed directly +if (require.main === module) { + example(); +} + +module.exports = { example }; \ No newline at end of file diff --git a/example.ts b/example.ts new file mode 100644 index 0000000..fdd0a40 --- /dev/null +++ b/example.ts @@ -0,0 +1,195 @@ +import { + Configuration, + SpeechToTextApi, + OpenAICompatibleSpeechToTextApi, + ReplacementRulesApi, + TranscriptionOptions, + TranscriptionModelIdentifier +} from './index'; + +// Example usage of the Speechall TypeScript SDK +async function main(): Promise { + // Configure the SDK + const config = new Configuration({ + apiKey: process.env.SPEECHALL_API_KEY || 'your-api-key-here', + basePath: 'https://api.speechall.com' // Replace with actual API base path + }); + + // Create API instances + const speechApi = new SpeechToTextApi(config); + const openaiApi = new OpenAICompatibleSpeechToTextApi(config); + const rulesApi = new ReplacementRulesApi(config); + + try { + // List available models + console.log('Fetching available models...'); + const models = await speechApi.listSpeechToTextModels(); + console.log('Available models:'); + models.data.slice(0, 3).forEach(model => { + console.log(` - ${model.id}: ${model.display_name} (${model.provider})`); + console.log(` Languages: ${model.supported_languages?.join(', ')}`); + console.log(` Cost: $${model.cost_per_second_usd}/second\n`); + }); + + // Example 1: Basic transcription + console.log('Example 1: Basic transcription...'); + const basicOptions: TranscriptionOptions = { + file_url: 'https://example.com/sample-audio.mp3', + model: TranscriptionModelIdentifier.DeepgramNova2General, + language: 'en', + output_format: 'json' + }; + + const basicResult = await speechApi.transcribeRemote(basicOptions); + console.log('Basic transcription:', basicResult.data.text); + + // Example 2: Advanced transcription with options + console.log('\nExample 2: Advanced transcription...'); + const advancedOptions: TranscriptionOptions = { + file_url: 'https://example.com/meeting-audio.mp3', + model: TranscriptionModelIdentifier.DeepgramNova2Meeting, + language: 'en', + output_format: 'json', + diarization: true, + punctuation: true, + timestamp_granularity: 'word', + speakers_expected: 3, + custom_vocabulary: ['API', 'TypeScript', 'Speechall'] + }; + + const advancedResult = await speechApi.transcribeRemote(advancedOptions); + console.log('Advanced transcription:', advancedResult.data.text); + + // Example 3: Custom replacement rules + console.log('\nExample 3: Creating replacement rules...'); + const rulesetResponse = await rulesApi.createReplacementRuleset({ + name: 'Technical Terms Enhancement', + rules: [ + { + kind: 'exact', + search: 'API', + replacement: 'A.P.I.', + caseSensitive: false + }, + { + kind: 'regex', + pattern: '\\b(\\d+)\\s*dollars?\\b', + replacement: '$$$1', + flags: ['i'] + } + ] + }); + + console.log('Created ruleset with ID:', rulesetResponse.data.id); + + // Example 4: OpenAI-compatible endpoint + console.log('\nExample 4: OpenAI-compatible transcription...'); + // Note: This would require a File object in a real implementation + // const file = new File([audioBuffer], 'audio.mp3', { type: 'audio/mpeg' }); + // const openaiResult = await openaiApi.openaiCompatibleCreateTranscription( + // file, + // TranscriptionModelIdentifier.DeepgramNova2General, + // 'en', + // 'Transcribe this audio file', + // 'json', + // 0.2 + // ); + console.log('OpenAI-compatible endpoint would be used here with file upload'); + + // Example 5: Direct file upload transcription + console.log('\nExample 5: Direct file upload transcription...'); + // This example shows how to transcribe a local file or file from HTML form + // In a real browser environment, you might get the file from: + // const fileInput = document.getElementById('audio-file') as HTMLInputElement; + // const file = fileInput.files[0]; + + // For this example, we'll simulate creating a File object + // In practice, you'd have actual audio data from a file input or local file + try { + // Simulating file creation - in real use, you'd have actual audio data + const audioData = new Uint8Array(1024); // Placeholder for actual audio data + const audioFile = new File([audioData], 'sample-audio.wav', { + type: 'audio/wav' + }); + + console.log('Transcribing uploaded file:', audioFile.name); + + const fileResult = await speechApi.transcribe( + TranscriptionModelIdentifier.DeepgramNova2General, // model + audioFile, // File object + 'en', // language + 'json', // output format + undefined, // ruleset_id + true, // punctuation + 'word', // timestamp granularity + false, // diarization + 'Please transcribe this audio file clearly', // initial prompt + 0.1 // temperature + ); + + console.log('Direct file transcription result:', fileResult.data); + } catch (error: any) { + console.log('File transcription example (simulated file):', error.message); + console.log('In a real implementation, you would provide actual audio file data'); + } + + // Example of how you might handle file input in a browser environment: + console.log('\n--- Browser File Input Example ---'); + console.log(` +// HTML: +// +// + +async function transcribeFile() { + const fileInput = document.getElementById('audioFile') as HTMLInputElement; + const file = fileInput.files?.[0]; + + if (!file) { + alert('Please select an audio file'); + return; + } + + try { + const result = await speechApi.transcribe( + TranscriptionModelIdentifier.DeepgramNova2General, + file, + 'en', + 'json', + undefined, // no ruleset + true, // punctuation + 'segment', // timestamp granularity + true, // diarization + 'Transcribe this uploaded audio file' // prompt + ); + + console.log('Transcription:', result.data.text); + + // Handle detailed response if format is 'json' + if ('segments' in result.data) { + result.data.segments?.forEach((segment, index) => { + console.log(\`Segment \${index + 1}: \${segment.text}\`); + if (segment.speaker) { + console.log(\` Speaker: \${segment.speaker}\`); + } + if (segment.start && segment.end) { + console.log(\` Time: \${segment.start}s - \${segment.end}s\`); + } + }); + } + } catch (error) { + console.error('Transcription failed:', error); + } +} + `); + + } catch (error: any) { + console.error('Error occurred:', error.response?.data || error.message); + } +} + +// Only run if this file is executed directly +if (require.main === module) { + main().catch(console.error); +} + +export { main as example }; \ No newline at end of file diff --git a/git_push.sh b/git_push.sh new file mode 100644 index 0000000..f53a75d --- /dev/null +++ b/git_push.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/ +# +# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com" + +git_user_id=$1 +git_repo_id=$2 +release_note=$3 +git_host=$4 + +if [ "$git_host" = "" ]; then + git_host="github.com" + echo "[INFO] No command line input provided. Set \$git_host to $git_host" +fi + +if [ "$git_user_id" = "" ]; then + git_user_id="GIT_USER_ID" + echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id" +fi + +if [ "$git_repo_id" = "" ]; then + git_repo_id="GIT_REPO_ID" + echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id" +fi + +if [ "$release_note" = "" ]; then + release_note="Minor update" + echo "[INFO] No command line input provided. Set \$release_note to $release_note" +fi + +# Initialize the local directory as a Git repository +git init + +# Adds the files in the local repository and stages them for commit. +git add . + +# Commits the tracked changes and prepares them to be pushed to a remote repository. +git commit -m "$release_note" + +# Sets the new remote +git_remote=$(git remote) +if [ "$git_remote" = "" ]; then # git remote not defined + + if [ "$GIT_TOKEN" = "" ]; then + echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment." + git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git + else + git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git + fi + +fi + +git pull origin master + +# Pushes (Forces) the changes in the local repository up to the remote repository +echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git" +git push origin master 2>&1 | grep -v 'To https' diff --git a/index.ts b/index.ts new file mode 100644 index 0000000..7803840 --- /dev/null +++ b/index.ts @@ -0,0 +1,18 @@ +/* tslint:disable */ +/* eslint-disable */ +/** + * Speechall API + * The Speechall REST API provides powerful and flexible speech-to-text capabilities. It allows you to transcribe audio files using various underlying STT providers and models, optionally apply custom text replacement rules, and access results in multiple formats. The API includes standard endpoints for transcription and endpoints compatible with the OpenAI API structure. + * + * The version of the OpenAPI document: 0.0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export * from "./api"; +export * from "./configuration"; + diff --git a/package-lock.json b/package-lock.json new file mode 100644 index 0000000..f8d738a --- /dev/null +++ b/package-lock.json @@ -0,0 +1,2486 @@ +{ + "name": "@speechall/sdk", + "version": "0.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@speechall/sdk", + "version": "0.0.1", + "license": "MIT", + "dependencies": { + "axios": "^1.6.0" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "@typescript-eslint/eslint-plugin": "^6.0.0", + "@typescript-eslint/parser": "^6.0.0", + "eslint": "^8.0.0", + "rimraf": "^5.0.0", + "typescript": "^5.0.0" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.7.0.tgz", + "integrity": "sha512-dyybb3AcajC7uha6CvhdVRJqaKyn7w2YKqKyAN37NKYgZT36w+iRb0Dymmc5qEJ549c/S31cMMSFd75bteCpCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.17.52", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.17.52.tgz", + "integrity": "sha512-2aj++KfxubvW/Lc0YyXE3OEW7Es8TWn1MsRzYgcOGyTNQxi0L8rxQUCZ7ZbyOBWZQD5I63PV9egZWMsapVaklg==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.19.2" + } + }, + "node_modules/@types/semver": { + "version": "7.7.0", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz", + "integrity": "sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "dev": true, + "license": "ISC" + }, + "node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.9.0.tgz", + "integrity": "sha512-re4CqKTJaURpzbLHtIi6XpDv20/CnpXOtjRY5/CU32L8gU8ek9UIivcfvSWvmKEngmVbrUtPpdDwWDWL7DNHvg==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flat-cache/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/flat-cache/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/flat-cache/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/flat-cache/node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.9", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz", + "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.2.tgz", + "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-5.0.10.tgz", + "integrity": "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^10.3.7" + }, + "bin": { + "rimraf": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", + "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.19.8", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.19.8.tgz", + "integrity": "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw==", + "dev": true, + "license": "MIT" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/package.json b/package.json new file mode 100644 index 0000000..72e114e --- /dev/null +++ b/package.json @@ -0,0 +1,59 @@ +{ + "name": "@speechall/sdk", + "version": "0.0.1", + "description": "TypeScript SDK for the Speechall API - A powerful and flexible speech-to-text service", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "module": "dist/esm/index.js", + "sideEffects": false, + "files": [ + "dist/**/*", + "README.md", + "LICENSE" + ], + "scripts": { + "build": "npm run build:cjs && npm run build:esm", + "build:cjs": "tsc -p tsconfig.json", + "build:esm": "tsc -p tsconfig.esm.json && node scripts/fix-esm-imports.js", + "clean": "rimraf dist", + "prepublishOnly": "npm run clean && npm run build", + "test": "echo \"Error: no test specified\" && exit 1", + "lint": "eslint . --ext .ts", + "lint:fix": "eslint . --ext .ts --fix" + }, + "keywords": [ + "speechall", + "speech-to-text", + "stt", + "transcription", + "audio", + "voice", + "api", + "typescript", + "sdk" + ], + "author": "Speechall", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/speechall/speechall-typescript-sdk.git" + }, + "bugs": { + "url": "https://github.com/speechall/speechall-typescript-sdk/issues" + }, + "homepage": "https://github.com/speechall/speechall-typescript-sdk#readme", + "dependencies": { + "axios": "^1.6.0" + }, + "devDependencies": { + "@types/node": "^20.0.0", + "@typescript-eslint/eslint-plugin": "^6.0.0", + "@typescript-eslint/parser": "^6.0.0", + "eslint": "^8.0.0", + "rimraf": "^5.0.0", + "typescript": "^5.0.0" + }, + "engines": { + "node": ">=16" + } +} diff --git a/scripts/fix-esm-imports.js b/scripts/fix-esm-imports.js new file mode 100644 index 0000000..4bd7b8b --- /dev/null +++ b/scripts/fix-esm-imports.js @@ -0,0 +1,40 @@ +const fs = require('fs'); +const path = require('path'); + +function fixEsmImports(dir) { + const files = fs.readdirSync(dir); + + files.forEach(file => { + if (file.endsWith('.js')) { + const filePath = path.join(dir, file); + let content = fs.readFileSync(filePath, 'utf8'); + + // Fix relative imports to include .js extension + content = content.replace( + /from ["'](\.[^"']*?)["']/g, + (match, importPath) => { + if (!importPath.endsWith('.js')) { + return match.replace(importPath, importPath + '.js'); + } + return match; + } + ); + + // Fix export * from relative imports + content = content.replace( + /export \* from ["'](\.[^"']*?)["']/g, + (match, importPath) => { + if (!importPath.endsWith('.js')) { + return match.replace(importPath, importPath + '.js'); + } + return match; + } + ); + + fs.writeFileSync(filePath, content); + } + }); +} + +fixEsmImports('./dist/esm'); +console.log('ESM imports fixed!'); \ No newline at end of file diff --git a/tsconfig.esm.json b/tsconfig.esm.json new file mode 100644 index 0000000..1de44a0 --- /dev/null +++ b/tsconfig.esm.json @@ -0,0 +1,11 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "module": "ESNext", + "moduleResolution": "bundler", + "outDir": "./dist/esm", + "declaration": false, + "declarationMap": false, + "allowImportingTsExtensions": false + } +} \ No newline at end of file diff --git a/tsconfig.json b/tsconfig.json new file mode 100644 index 0000000..dd05f57 --- /dev/null +++ b/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "target": "ES2020", + "module": "CommonJS", + "lib": ["ES2020", "DOM"], + "declaration": true, + "declarationMap": true, + "outDir": "./dist", + "rootDir": "./", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "moduleResolution": "node", + "resolveJsonModule": true, + "noEmitOnError": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedIndexedAccess": false, + "exactOptionalPropertyTypes": false, + "removeComments": true + }, + "include": [ + "*.ts" + ], + "exclude": [ + "node_modules", + "dist", + "**/*.test.ts", + "**/*.spec.ts" + ] +} \ No newline at end of file