From fa1a65a3a0cf488d81e6d34997f6edc27d4061ec Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 22:43:12 +0000 Subject: [PATCH 1/7] SDK regeneration --- .mock/definition/__package__.yml | 46 --- .mock/definition/projects.yml | 77 ++++ .mock/definition/prompts.yml | 112 +---- .mock/definition/prompts/runs.yml | 26 -- .mock/definition/tasks.yml | 93 +---- .mock/openapi/openapi.yaml | 179 ++------ reference.md | 389 ++++-------------- src/label_studio_sdk/__init__.py | 10 - src/label_studio_sdk/projects/client.py | 105 +++++ src/label_studio_sdk/prompts/client.py | 341 +-------------- src/label_studio_sdk/prompts/runs/client.py | 127 ------ src/label_studio_sdk/tasks/client.py | 9 +- src/label_studio_sdk/types/__init__.py | 10 - .../types/cancel_model_run_response.py | 19 - ...ated_project_subset_tasks_response_list.py | 23 -- .../types/project_subset_item.py | 21 - .../types/project_subset_task_item.py | 24 -- .../types/project_subset_tasks_response.py | 27 -- tests/prompts/test_runs.py | 10 - tests/test_projects.py | 15 + tests/test_prompts.py | 50 --- tests/test_tasks.py | 82 ++-- 22 files changed, 401 insertions(+), 1394 deletions(-) delete mode 100644 src/label_studio_sdk/types/cancel_model_run_response.py delete mode 100644 src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py delete mode 100644 src/label_studio_sdk/types/project_subset_item.py delete mode 100644 src/label_studio_sdk/types/project_subset_task_item.py delete mode 100644 src/label_studio_sdk/types/project_subset_tasks_response.py diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml index dd0ffd77d..0fb7e7cff 100644 --- a/.mock/definition/__package__.yml +++ b/.mock/definition/__package__.yml @@ -1490,11 +1490,6 @@ types: * `Yearly` - Yearly source: openapi: openapi/openapi.yaml - CancelModelRunResponse: - properties: - detail: string - source: - openapi: openapi/openapi.yaml ChildFilter: properties: column: @@ -5934,20 +5929,6 @@ types: maxLength: 256 source: openapi: openapi/openapi.yaml - PaginatedProjectSubsetTasksResponseList: - properties: - count: integer - next: - type: optional - validation: - format: uri - previous: - type: optional - validation: - format: uri - results: list - source: - openapi: openapi/openapi.yaml PaginatedRoleBasedTaskList: properties: tasks: list @@ -6437,33 +6418,6 @@ types: * `Sample` - Sample source: openapi: openapi/openapi.yaml - ProjectSubsetItem: - properties: - columns_schema: optional>> - count: integer - subset: string - source: - openapi: openapi/openapi.yaml - ProjectSubsetTaskItem: - properties: - data: map - error: optional> - ground_truth: optional> - id: optional - prediction: optional> - score: optional - source: - openapi: openapi/openapi.yaml - ProjectSubsetTasksResponse: - properties: - next_cursor: optional - previous_cursor: optional - task_count: - type: optional - docs: Present only when include_total=true - task_result_list: list - source: - openapi: openapi/openapi.yaml ProjectTemplate: properties: assignment_settings: optional diff --git a/.mock/definition/projects.yml b/.mock/definition/projects.yml index be3dac50c..e7ea2e811 100644 --- a/.mock/definition/projects.yml +++ b/.mock/definition/projects.yml @@ -840,6 +840,31 @@ service: workspace_title: workspace_title audiences: - public + annotators: + path: /api/projects/{id}/annotators/ + method: GET + auth: true + docs: Return users who have submitted annotations in the specified project. + source: + openapi: openapi/openapi.yaml + path-parameters: + id: integer + display-name: List annotators for project + response: + docs: List of annotator users + type: list + examples: + - path-parameters: + id: 1 + response: + body: + - avatar: avatar + email: email + first_name: first_name + id: 1 + last_name: last_name + audiences: + - public duplicate: path: /api/projects/{id}/duplicate/ method: POST @@ -1287,5 +1312,57 @@ service: label_config: label_config audiences: - public + api_projects_subset_tasks_list: + path: /api/projects/{project_pk}/subset-tasks + method: GET + auth: true + docs: |2- + + Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. + + source: + openapi: openapi/openapi.yaml + path-parameters: + project_pk: integer + display-name: Get Project Subset Task List with Predictions and Accuracy details + request: + name: ApiProjectsSubsetTasksListRequest + query-parameters: + model_run: + type: optional + docs: A unique ID of a ModelRun + ordering: + type: optional + docs: Which field to use when ordering the results. + page: + type: optional + docs: A page number within the paginated result set. + page_size: + type: optional + docs: Number of results to return per page. + project_subset: + type: optional + docs: The project subset to retrieve tasks for + errors: + - root.BadRequestError + examples: + - path-parameters: + project_pk: 1 + api_projects_subsets_retrieve: + path: /api/projects/{project_pk}/subsets + method: GET + auth: true + docs: |2- + + Provides list of available subsets for a project along with count of tasks in each subset + + source: + openapi: openapi/openapi.yaml + path-parameters: + project_pk: integer + display-name: Get available subsets of a project (for prompts usage) + examples: + - path-parameters: + project_pk: 1 source: openapi: openapi/openapi.yaml diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml index 63037d5ee..822dc918b 100644 --- a/.mock/definition/prompts.yml +++ b/.mock/definition/prompts.yml @@ -84,100 +84,6 @@ service: - key: value audiences: - public - subset_tasks: - path: /api/projects/{project_pk}/subset-tasks - method: GET - auth: true - docs: |2- - - Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. - - source: - openapi: openapi/openapi.yaml - path-parameters: - project_pk: integer - display-name: Get Project Subset Task List with Predictions and Accuracy details - request: - name: PromptsSubsetTasksRequest - query-parameters: - include_total: - type: optional - docs: >- - If true (default), includes task_count in response; if false, - omits it. - model_run: - type: optional - docs: A unique ID of a ModelRun - ordering: - type: optional - docs: Which field to use when ordering the results. - page: - type: optional - docs: A page number within the paginated result set. - page_size: - type: optional - docs: Number of results to return per page. - parent_model: - type: optional - docs: The ID of the parent model (ModelInterface) for this Inference Run - project_subset: - type: optional - docs: The project subset to retrieve tasks for - response: - docs: '' - type: root.PaginatedProjectSubsetTasksResponseList - errors: - - root.BadRequestError - examples: - - path-parameters: - project_pk: 1 - response: - body: - count: 123 - next: http://api.example.org/accounts/?page=4 - previous: http://api.example.org/accounts/?page=2 - results: - - next_cursor: next_cursor - previous_cursor: previous_cursor - task_count: 1 - task_result_list: - - data: - key: value - audiences: - - public - subsets: - path: /api/projects/{project_pk}/subsets - method: GET - auth: true - docs: |2- - - Provides list of available subsets for a project along with count of tasks in each subset - - source: - openapi: openapi/openapi.yaml - path-parameters: - project_pk: integer - display-name: Get available subsets of a project (for prompts usage) - request: - name: PromptsSubsetsRequest - query-parameters: - ordering: - type: optional - docs: Which field to use when ordering the results. - response: - docs: '' - type: list - examples: - - path-parameters: - project_pk: 1 - response: - body: - - columns_schema: - - key: value - count: 1 - subset: subset - audiences: - - public list: path: /api/prompts/ method: GET @@ -585,6 +491,24 @@ service: updated_at: '2024-01-15T09:30:00Z' audiences: - internal + api_prompts_versions_inference_runs_cancel_create: + path: >- + /api/prompts/{prompt_id}/versions/{version_id}/inference-runs/{inference_run_id}/cancel + method: POST + auth: true + docs: Cancel the inference run for the given api + source: + openapi: openapi/openapi.yaml + path-parameters: + inference_run_id: integer + prompt_id: integer + version_id: integer + display-name: Cancel Inference Run API + examples: + - path-parameters: + inference_run_id: 1 + prompt_id: 1 + version_id: 1 source: openapi: openapi/openapi.yaml types: diff --git a/.mock/definition/prompts/runs.yml b/.mock/definition/prompts/runs.yml index 82c606df0..26679188a 100644 --- a/.mock/definition/prompts/runs.yml +++ b/.mock/definition/prompts/runs.yml @@ -135,31 +135,5 @@ service: triggered_at: '2024-01-15T09:30:00Z' audiences: - public - cancel: - path: >- - /api/prompts/{prompt_id}/versions/{version_id}/inference-runs/{inference_run_id}/cancel - method: POST - auth: true - docs: Cancel the inference run for the given api - source: - openapi: openapi/openapi.yaml - path-parameters: - inference_run_id: integer - prompt_id: integer - version_id: integer - display-name: Cancel Inference Run API - response: - docs: '' - type: root.CancelModelRunResponse - examples: - - path-parameters: - inference_run_id: 1 - prompt_id: 1 - version_id: 1 - response: - body: - detail: detail - audiences: - - public source: openapi: openapi/openapi.yaml diff --git a/.mock/definition/tasks.yml b/.mock/definition/tasks.yml index ffd036176..c45adf0fa 100644 --- a/.mock/definition/tasks.yml +++ b/.mock/definition/tasks.yml @@ -305,79 +305,6 @@ service: response: docs: '' type: root.LseTask - examples: - - name: Create Task - request: - data: - image: https://example.com/image.jpg - text: Hello, world! - project: 1 - response: - body: - agreement: agreement - agreement_selected: agreement_selected - annotations: annotations - annotations_ids: annotations_ids - annotations_results: annotations_results - annotators: - - 1 - annotators_count: 1 - avg_lead_time: 1.1 - cancelled_annotations: 1 - comment_authors: - - key: value - comment_authors_count: 1 - comment_count: 1 - comments: comments - completed_at: '2024-01-15T09:30:00Z' - created_at: '2024-01-15T09:30:00Z' - data: - key: value - draft_exists: true - drafts: - - created_at: '2024-01-15T09:30:00Z' - result: - - key: value - updated_at: '2024-01-15T09:30:00Z' - file_upload: file_upload - ground_truth: true - id: 1 - inner_id: 1 - is_labeled: true - last_comment_updated_at: '2024-01-15T09:30:00Z' - meta: - key: value - overlap: 1 - predictions: - - created_at: '2024-01-15T09:30:00Z' - model: - key: value - model_run: - key: value - model_version: model_version - project: 1 - result: - - key: value - score: 1.1 - task: 1 - updated_at: '2024-01-15T09:30:00Z' - predictions_model_versions: predictions_model_versions - predictions_results: predictions_results - predictions_score: 1.1 - project: 1 - reviewed: true - reviewers: - - key: value - reviewers_count: 1 - reviews_accepted: 1 - reviews_rejected: 1 - storage_filename: storage_filename - total_annotations: 1 - total_predictions: 1 - unresolved_comment_count: 1 - updated_at: '2024-01-15T09:30:00Z' - updated_by: - - key: value audiences: - public get: @@ -629,41 +556,41 @@ service: docs: |2- Create a new task event to track user interactions and system events during annotation. - + This endpoint is designed to receive events from the frontend labeling interface to enable accurate lead time calculation and detailed annotation analytics. - + ## Event Types - + **Core Annotation Events:** - `annotation_loaded` - When annotation interface is loaded - `annotation_submitted` - When annotation is submitted - `annotation_updated` - When annotation is modified - `annotation_reviewed` - When annotation is reviewed - + **User Activity Events:** - `visibility_change` - When page visibility changes (tab switch, minimize) - `idle_detected` - When user goes idle - `idle_resumed` - When user returns from idle - + **Interaction Events:** - `region_finished_drawing` - When annotation region is completed - `region_deleted` - When annotation regions are removed - `hotkey_pressed` - When keyboard shortcuts are used - + **Media Events:** - `video_playback_start/end` - Video playback control - `audio_playback_start/end` - Audio playback control - `video_scrub` - Video timeline scrubbing - + ## Usage - + Events are automatically associated with the task specified in the URL path. The current user is automatically set as the actor. Project and organization are derived from the task context. - + ## Example Request - + ```json { "event_key": "annotation_loaded", diff --git a/.mock/openapi/openapi.yaml b/.mock/openapi/openapi.yaml index 81cf61632..f0a243866 100644 --- a/.mock/openapi/openapi.yaml +++ b/.mock/openapi/openapi.yaml @@ -6899,6 +6899,34 @@ paths: - public x-fern-sdk-group-name: annotation_history x-fern-sdk-method-name: list_for_project + /api/projects/{id}/annotators/: + get: + description: Return users who have submitted annotations in the specified project. + operationId: api_projects_annotators_retrieve + parameters: + - in: path + name: id + required: true + schema: + type: integer + responses: + '200': + content: + application/json: + schema: + items: + $ref: '#/components/schemas/UserSimple' + type: array + description: List of annotator users + security: + - Token: [] + summary: List annotators for project + tags: + - Projects + x-fern-audiences: + - public + x-fern-sdk-group-name: projects + x-fern-sdk-method-name: annotators /api/projects/{id}/aws-custom-function: get: description: Get the AWS Lambda code for the custom metric configured for this project. @@ -9731,11 +9759,6 @@ paths: description: "\n Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided.\n " operationId: api_projects_subset_tasks_list parameters: - - description: If true (default), includes task_count in response; if false, omits it. - in: query - name: include_total - schema: - type: boolean - description: A unique ID of a ModelRun in: query name: model_run @@ -9759,11 +9782,6 @@ paths: required: false schema: type: integer - - description: The ID of the parent model (ModelInterface) for this Inference Run - in: query - name: parent_model - schema: - type: integer - in: path name: project_pk required: true @@ -9776,11 +9794,7 @@ paths: type: string responses: '200': - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedProjectSubsetTasksResponseList' - description: '' + description: Project subset task list '400': description: Bad request - missing parent_model security: @@ -9788,21 +9802,11 @@ paths: summary: Get Project Subset Task List with Predictions and Accuracy details tags: - Projects - x-fern-audiences: - - public - x-fern-sdk-group-name: prompts - x-fern-sdk-method-name: subset_tasks /api/projects/{project_pk}/subsets: get: description: "\n Provides list of available subsets for a project along with count of tasks in each subset\n " - operationId: api_projects_subsets_list + operationId: api_projects_subsets_retrieve parameters: - - description: Which field to use when ordering the results. - in: query - name: ordering - required: false - schema: - type: string - in: path name: project_pk required: true @@ -9810,22 +9814,12 @@ paths: type: integer responses: '200': - content: - application/json: - schema: - items: - $ref: '#/components/schemas/ProjectSubsetItem' - type: array - description: '' + description: No response body security: - Token: [] summary: Get available subsets of a project (for prompts usage) tags: - Projects - x-fern-audiences: - - public - x-fern-sdk-group-name: prompts - x-fern-sdk-method-name: subsets /api/prompts/: get: description: List all prompts. @@ -10453,23 +10447,13 @@ paths: schema: type: integer responses: - '200': - content: - application/json: - schema: - $ref: '#/components/schemas/CancelModelRunResponse' - description: '' + '201': + description: No response body security: - Token: [] summary: Cancel Inference Run API tags: - Prompts - x-fern-audiences: - - public - x-fern-sdk-group-name: - - prompts - - runs - x-fern-sdk-method-name: cancel /api/prompts/{prompt_id}/versions/{version_id}/refine: get: description: Get the refined prompt based on the `refinement_job_id`. @@ -15758,15 +15742,6 @@ paths: requestBody: content: application/json: - examples: - CreateTask: - description: Example of Create Task - summary: Create Task - value: - data: - image: https://example.com/image.jpg - text: Hello, world! - project: 1 schema: $ref: '#/components/schemas/LseTaskRequest' application/x-www-form-urlencoded: @@ -16066,7 +16041,7 @@ paths: x-fern-sdk-method-name: create /api/tasks/{id}/events/: post: - description: "\n Create a new task event to track user interactions and system events during annotation.\n\n This endpoint is designed to receive events from the frontend labeling interface to enable\n accurate lead time calculation and detailed annotation analytics.\n\n ## Event Types\n\n **Core Annotation Events:**\n - `annotation_loaded` - When annotation interface is loaded\n - `annotation_submitted` - When annotation is submitted\n - `annotation_updated` - When annotation is modified\n - `annotation_reviewed` - When annotation is reviewed\n\n **User Activity Events:**\n - `visibility_change` - When page visibility changes (tab switch, minimize)\n - `idle_detected` - When user goes idle\n - `idle_resumed` - When user returns from idle\n\n **Interaction Events:**\n - `region_finished_drawing` - When annotation region is completed\n - `region_deleted` - When annotation regions are removed\n - `hotkey_pressed` - When keyboard shortcuts are used\n\n **Media Events:**\n - `video_playback_start/end` - Video playback control\n - `audio_playback_start/end` - Audio playback control\n - `video_scrub` - Video timeline scrubbing\n\n ## Usage\n\n Events are automatically associated with the task specified in the URL path.\n The current user is automatically set as the actor. Project and organization\n are derived from the task context.\n\n ## Example Request\n\n ```json\n {\n \"event_key\": \"annotation_loaded\",\n \"event_time\": \"2024-01-15T10:30:00Z\",\n \"annotation\": 123,\n \"meta\": {\n \"annotation_count\": 5,\n \"estimated_time\": 300\n }\n }\n ```\n " + description: "\n Create a new task event to track user interactions and system events during annotation.\n \n This endpoint is designed to receive events from the frontend labeling interface to enable\n accurate lead time calculation and detailed annotation analytics.\n \n ## Event Types\n \n **Core Annotation Events:**\n - `annotation_loaded` - When annotation interface is loaded\n - `annotation_submitted` - When annotation is submitted\n - `annotation_updated` - When annotation is modified\n - `annotation_reviewed` - When annotation is reviewed\n \n **User Activity Events:**\n - `visibility_change` - When page visibility changes (tab switch, minimize)\n - `idle_detected` - When user goes idle\n - `idle_resumed` - When user returns from idle\n \n **Interaction Events:**\n - `region_finished_drawing` - When annotation region is completed\n - `region_deleted` - When annotation regions are removed\n - `hotkey_pressed` - When keyboard shortcuts are used\n \n **Media Events:**\n - `video_playback_start/end` - Video playback control\n - `audio_playback_start/end` - Audio playback control\n - `video_scrub` - Video timeline scrubbing\n \n ## Usage\n \n Events are automatically associated with the task specified in the URL path.\n The current user is automatically set as the actor. Project and organization\n are derived from the task context.\n \n ## Example Request\n \n ```json\n {\n \"event_key\": \"annotation_loaded\",\n \"event_time\": \"2024-01-15T10:30:00Z\",\n \"annotation\": 123,\n \"meta\": {\n \"annotation_count\": 5,\n \"estimated_time\": 300\n }\n }\n ```\n " operationId: api_tasks_events_create parameters: - description: Task ID to associate the event with @@ -19467,13 +19442,6 @@ components: - Monthly - Yearly type: string - CancelModelRunResponse: - properties: - detail: - type: string - required: - - detail - type: object CheckMatchingFunctionRequestRequest: properties: code: @@ -25706,29 +25674,6 @@ components: - project_role - username type: object - PaginatedProjectSubsetTasksResponseList: - properties: - count: - example: 123 - type: integer - next: - example: http://api.example.org/accounts/?page=4 - format: uri - nullable: true - type: string - previous: - example: http://api.example.org/accounts/?page=2 - format: uri - nullable: true - type: string - results: - items: - $ref: '#/components/schemas/ProjectSubsetTasksResponse' - type: array - required: - - count - - results - type: object PaginatedRoleBasedTaskList: properties: tasks: @@ -28422,62 +28367,6 @@ components: - HasGT - Sample type: string - ProjectSubsetItem: - properties: - columns_schema: - items: - additionalProperties: {} - type: object - type: array - count: - type: integer - subset: - type: string - required: - - count - - subset - type: object - ProjectSubsetTaskItem: - properties: - data: - additionalProperties: {} - type: object - error: - additionalProperties: {} - type: object - ground_truth: - additionalProperties: {} - type: object - id: - type: integer - prediction: - additionalProperties: {} - type: object - score: - format: double - nullable: true - type: number - required: - - data - type: object - ProjectSubsetTasksResponse: - properties: - next_cursor: - nullable: true - type: string - previous_cursor: - nullable: true - type: string - task_count: - description: Present only when include_total=true - type: integer - task_result_list: - items: - $ref: '#/components/schemas/ProjectSubsetTaskItem' - type: array - required: - - task_result_list - type: object ProjectTemplate: properties: assignment_settings: diff --git a/reference.md b/reference.md index bb1a2e435..3627aaa91 100644 --- a/reference.md +++ b/reference.md @@ -6686,214 +6686,6 @@ client.prompts.batch_predictions( - - - - -
client.prompts.subset_tasks(...) -
-
- -#### 📝 Description - -
-
- -
-
- - - Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. - -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from label_studio_sdk import LabelStudio - -client = LabelStudio( - api_key="YOUR_API_KEY", -) -client.prompts.subset_tasks( - project_pk=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**project_pk:** `int` - -
-
- -
-
- -**include_total:** `typing.Optional[bool]` — If true (default), includes task_count in response; if false, omits it. - -
-
- -
-
- -**model_run:** `typing.Optional[int]` — A unique ID of a ModelRun - -
-
- -
-
- -**ordering:** `typing.Optional[str]` — Which field to use when ordering the results. - -
-
- -
-
- -**page:** `typing.Optional[int]` — A page number within the paginated result set. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` — Number of results to return per page. - -
-
- -
-
- -**parent_model:** `typing.Optional[int]` — The ID of the parent model (ModelInterface) for this Inference Run - -
-
- -
-
- -**project_subset:** `typing.Optional[str]` — The project subset to retrieve tasks for - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.prompts.subsets(...) -
-
- -#### 📝 Description - -
-
- -
-
- - - Provides list of available subsets for a project along with count of tasks in each subset - -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from label_studio_sdk import LabelStudio - -client = LabelStudio( - api_key="YOUR_API_KEY", -) -client.prompts.subsets( - project_pk=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**project_pk:** `int` - -
-
- -
-
- -**ordering:** `typing.Optional[str]` — Which field to use when ordering the results. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- -
@@ -9636,6 +9428,76 @@ client.projects.update( + + + + +
client.projects.annotators(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Return users who have submitted annotations in the specified project. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from label_studio_sdk import LabelStudio + +client = LabelStudio( + api_key="YOUR_API_KEY", +) +client.projects.annotators( + id=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `int` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -10415,8 +10277,7 @@ client = LabelStudio( api_key="YOUR_API_KEY", ) client.tasks.create( - data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, - project=1, + data={"key": "value"}, ) ``` @@ -10949,41 +10810,41 @@ client.tasks.update( Create a new task event to track user interactions and system events during annotation. - + This endpoint is designed to receive events from the frontend labeling interface to enable accurate lead time calculation and detailed annotation analytics. - + ## Event Types - + **Core Annotation Events:** - `annotation_loaded` - When annotation interface is loaded - `annotation_submitted` - When annotation is submitted - `annotation_updated` - When annotation is modified - `annotation_reviewed` - When annotation is reviewed - + **User Activity Events:** - `visibility_change` - When page visibility changes (tab switch, minimize) - `idle_detected` - When user goes idle - `idle_resumed` - When user returns from idle - + **Interaction Events:** - `region_finished_drawing` - When annotation region is completed - `region_deleted` - When annotation regions are removed - `hotkey_pressed` - When keyboard shortcuts are used - + **Media Events:** - `video_playback_start/end` - Video playback control - `audio_playback_start/end` - Audio playback control - `video_scrub` - Video timeline scrubbing - + ## Usage - + Events are automatically associated with the task specified in the URL path. The current user is automatically set as the actor. Project and organization are derived from the task context. - + ## Example Request - + ```json { "event_key": "annotation_loaded", @@ -33708,94 +33569,6 @@ client.prompts.runs.create( - - - - -
client.prompts.runs.cancel(...) -
-
- -#### 📝 Description - -
-
- -
-
- -Cancel the inference run for the given api -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from label_studio_sdk import LabelStudio - -client = LabelStudio( - api_key="YOUR_API_KEY", -) -client.prompts.runs.cancel( - inference_run_id=1, - prompt_id=1, - version_id=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**inference_run_id:** `int` - -
-
- -
-
- -**prompt_id:** `int` - -
-
- -
-
- -**version_id:** `int` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- -
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py index 28d451aaa..b9f0d5892 100644 --- a/src/label_studio_sdk/__init__.py +++ b/src/label_studio_sdk/__init__.py @@ -37,7 +37,6 @@ BlankEnum, BlueprintList, BudgetResetPeriodEnum, - CancelModelRunResponse, ChildFilter, Comment, CommentRequest, @@ -153,7 +152,6 @@ PaginatedLseUserList, PaginatedPaginatedProjectMemberList, PaginatedProjectMember, - PaginatedProjectSubsetTasksResponseList, PaginatedRoleBasedTaskList, Pause, PauseRequest, @@ -170,9 +168,6 @@ ProjectSampling, ProjectSkipQueue, ProjectSubsetEnum, - ProjectSubsetItem, - ProjectSubsetTaskItem, - ProjectSubsetTasksResponse, ProjectTemplate, ProjectTemplateRequest, PromptsStatusEnum, @@ -404,7 +399,6 @@ "BlankEnum", "BlueprintList", "BudgetResetPeriodEnum", - "CancelModelRunResponse", "ChildFilter", "Client", "Comment", @@ -535,7 +529,6 @@ "PaginatedLseUserList", "PaginatedPaginatedProjectMemberList", "PaginatedProjectMember", - "PaginatedProjectSubsetTasksResponseList", "PaginatedRoleBasedTaskList", "PatchedDefaultRoleRequestCustomScriptsEditableBy", "PatchedLseProjectUpdateRequestSampling", @@ -555,9 +548,6 @@ "ProjectSampling", "ProjectSkipQueue", "ProjectSubsetEnum", - "ProjectSubsetItem", - "ProjectSubsetTaskItem", - "ProjectSubsetTasksResponse", "ProjectTemplate", "ProjectTemplateRequest", "ProjectsDuplicateResponse", diff --git a/src/label_studio_sdk/projects/client.py b/src/label_studio_sdk/projects/client.py index 953cf22bd..0af0a1361 100644 --- a/src/label_studio_sdk/projects/client.py +++ b/src/label_studio_sdk/projects/client.py @@ -30,6 +30,7 @@ from .types.patched_lse_project_update_request_sampling import PatchedLseProjectUpdateRequestSampling from .types.patched_lse_project_update_request_skip_queue import PatchedLseProjectUpdateRequestSkipQueue from ..types.lse_project_update import LseProjectUpdate +from ..types.user_simple import UserSimple from ..types.mode_enum import ModeEnum from .types.projects_duplicate_response import ProjectsDuplicateResponse from ..types.import_api_request import ImportApiRequest @@ -799,6 +800,54 @@ def update( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def annotators( + self, id: int, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[UserSimple]: + """ + Return users who have submitted annotations in the specified project. + + Parameters + ---------- + id : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[UserSimple] + List of annotator users + + Examples + -------- + from label_studio_sdk import LabelStudio + + client = LabelStudio( + api_key="YOUR_API_KEY", + ) + client.projects.annotators( + id=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(id)}/annotators/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[UserSimple], + construct_type( + type_=typing.List[UserSimple], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def duplicate( self, id: int, @@ -1949,6 +1998,62 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + async def annotators( + self, id: int, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[UserSimple]: + """ + Return users who have submitted annotations in the specified project. + + Parameters + ---------- + id : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[UserSimple] + List of annotator users + + Examples + -------- + import asyncio + + from label_studio_sdk import AsyncLabelStudio + + client = AsyncLabelStudio( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.projects.annotators( + id=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(id)}/annotators/", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[UserSimple], + construct_type( + type_=typing.List[UserSimple], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def duplicate( self, id: int, diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py index 6f1e15812..15852d99c 100644 --- a/src/label_studio_sdk/prompts/client.py +++ b/src/label_studio_sdk/prompts/client.py @@ -11,10 +11,6 @@ from json.decoder import JSONDecodeError from ..core.api_error import ApiError from ..types.batch_predictions import BatchPredictions -from ..types.paginated_project_subset_tasks_response_list import PaginatedProjectSubsetTasksResponseList -from ..core.jsonable_encoder import jsonable_encoder -from ..errors.bad_request_error import BadRequestError -from ..types.project_subset_item import ProjectSubsetItem from ..types.model_interface_serializer_get import ModelInterfaceSerializerGet from ..types.user_simple_request import UserSimpleRequest from ..types.skill_name_enum import SkillNameEnum @@ -22,6 +18,7 @@ from ..core.serialization import convert_and_respect_annotation_metadata from .types.prompts_compatible_projects_request_project_type import PromptsCompatibleProjectsRequestProjectType from ..types.paginated_all_roles_project_list_list import PaginatedAllRolesProjectListList +from ..core.jsonable_encoder import jsonable_encoder from ..core.client_wrapper import AsyncClientWrapper from .indicators.client import AsyncIndicatorsClient from .versions.client import AsyncVersionsClient @@ -186,166 +183,6 @@ def batch_predictions( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def subset_tasks( - self, - project_pk: int, - *, - include_total: typing.Optional[bool] = None, - model_run: typing.Optional[int] = None, - ordering: typing.Optional[str] = None, - page: typing.Optional[int] = None, - page_size: typing.Optional[int] = None, - parent_model: typing.Optional[int] = None, - project_subset: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedProjectSubsetTasksResponseList: - """ - - Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. - - - Parameters - ---------- - project_pk : int - - include_total : typing.Optional[bool] - If true (default), includes task_count in response; if false, omits it. - - model_run : typing.Optional[int] - A unique ID of a ModelRun - - ordering : typing.Optional[str] - Which field to use when ordering the results. - - page : typing.Optional[int] - A page number within the paginated result set. - - page_size : typing.Optional[int] - Number of results to return per page. - - parent_model : typing.Optional[int] - The ID of the parent model (ModelInterface) for this Inference Run - - project_subset : typing.Optional[str] - The project subset to retrieve tasks for - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedProjectSubsetTasksResponseList - - - Examples - -------- - from label_studio_sdk import LabelStudio - - client = LabelStudio( - api_key="YOUR_API_KEY", - ) - client.prompts.subset_tasks( - project_pk=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks", - method="GET", - params={ - "include_total": include_total, - "model_run": model_run, - "ordering": ordering, - "page": page, - "page_size": page_size, - "parent_model": parent_model, - "project_subset": project_subset, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedProjectSubsetTasksResponseList, - construct_type( - type_=PaginatedProjectSubsetTasksResponseList, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def subsets( - self, - project_pk: int, - *, - ordering: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.List[ProjectSubsetItem]: - """ - - Provides list of available subsets for a project along with count of tasks in each subset - - - Parameters - ---------- - project_pk : int - - ordering : typing.Optional[str] - Which field to use when ordering the results. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[ProjectSubsetItem] - - - Examples - -------- - from label_studio_sdk import LabelStudio - - client = LabelStudio( - api_key="YOUR_API_KEY", - ) - client.prompts.subsets( - project_pk=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"api/projects/{jsonable_encoder(project_pk)}/subsets", - method="GET", - params={ - "ordering": ordering, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[ProjectSubsetItem], - construct_type( - type_=typing.List[ProjectSubsetItem], # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def list( self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[ModelInterfaceSerializerGet]: @@ -903,182 +740,6 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def subset_tasks( - self, - project_pk: int, - *, - include_total: typing.Optional[bool] = None, - model_run: typing.Optional[int] = None, - ordering: typing.Optional[str] = None, - page: typing.Optional[int] = None, - page_size: typing.Optional[int] = None, - parent_model: typing.Optional[int] = None, - project_subset: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedProjectSubsetTasksResponseList: - """ - - Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. - - - Parameters - ---------- - project_pk : int - - include_total : typing.Optional[bool] - If true (default), includes task_count in response; if false, omits it. - - model_run : typing.Optional[int] - A unique ID of a ModelRun - - ordering : typing.Optional[str] - Which field to use when ordering the results. - - page : typing.Optional[int] - A page number within the paginated result set. - - page_size : typing.Optional[int] - Number of results to return per page. - - parent_model : typing.Optional[int] - The ID of the parent model (ModelInterface) for this Inference Run - - project_subset : typing.Optional[str] - The project subset to retrieve tasks for - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedProjectSubsetTasksResponseList - - - Examples - -------- - import asyncio - - from label_studio_sdk import AsyncLabelStudio - - client = AsyncLabelStudio( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.prompts.subset_tasks( - project_pk=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks", - method="GET", - params={ - "include_total": include_total, - "model_run": model_run, - "ordering": ordering, - "page": page, - "page_size": page_size, - "parent_model": parent_model, - "project_subset": project_subset, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedProjectSubsetTasksResponseList, - construct_type( - type_=PaginatedProjectSubsetTasksResponseList, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 400: - raise BadRequestError( - typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def subsets( - self, - project_pk: int, - *, - ordering: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.List[ProjectSubsetItem]: - """ - - Provides list of available subsets for a project along with count of tasks in each subset - - - Parameters - ---------- - project_pk : int - - ordering : typing.Optional[str] - Which field to use when ordering the results. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[ProjectSubsetItem] - - - Examples - -------- - import asyncio - - from label_studio_sdk import AsyncLabelStudio - - client = AsyncLabelStudio( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.prompts.subsets( - project_pk=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"api/projects/{jsonable_encoder(project_pk)}/subsets", - method="GET", - params={ - "ordering": ordering, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[ProjectSubsetItem], - construct_type( - type_=typing.List[ProjectSubsetItem], # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - async def list( self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[ModelInterfaceSerializerGet]: diff --git a/src/label_studio_sdk/prompts/runs/client.py b/src/label_studio_sdk/prompts/runs/client.py index 22d795000..3fdf84074 100644 --- a/src/label_studio_sdk/prompts/runs/client.py +++ b/src/label_studio_sdk/prompts/runs/client.py @@ -11,7 +11,6 @@ from ...core.api_error import ApiError import datetime as dt from ...types.project_subset_enum import ProjectSubsetEnum -from ...types.cancel_model_run_response import CancelModelRunResponse from ...core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -194,65 +193,6 @@ def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def cancel( - self, - inference_run_id: int, - prompt_id: int, - version_id: int, - *, - request_options: typing.Optional[RequestOptions] = None, - ) -> CancelModelRunResponse: - """ - Cancel the inference run for the given api - - Parameters - ---------- - inference_run_id : int - - prompt_id : int - - version_id : int - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CancelModelRunResponse - - - Examples - -------- - from label_studio_sdk import LabelStudio - - client = LabelStudio( - api_key="YOUR_API_KEY", - ) - client.prompts.runs.cancel( - inference_run_id=1, - prompt_id=1, - version_id=1, - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CancelModelRunResponse, - construct_type( - type_=CancelModelRunResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - class AsyncRunsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -445,70 +385,3 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - - async def cancel( - self, - inference_run_id: int, - prompt_id: int, - version_id: int, - *, - request_options: typing.Optional[RequestOptions] = None, - ) -> CancelModelRunResponse: - """ - Cancel the inference run for the given api - - Parameters - ---------- - inference_run_id : int - - prompt_id : int - - version_id : int - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CancelModelRunResponse - - - Examples - -------- - import asyncio - - from label_studio_sdk import AsyncLabelStudio - - client = AsyncLabelStudio( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.prompts.runs.cancel( - inference_run_id=1, - prompt_id=1, - version_id=1, - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CancelModelRunResponse, - construct_type( - type_=CancelModelRunResponse, # type: ignore - object_=_response.json(), - ), - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/label_studio_sdk/tasks/client.py b/src/label_studio_sdk/tasks/client.py index b8970b091..1c50065ee 100644 --- a/src/label_studio_sdk/tasks/client.py +++ b/src/label_studio_sdk/tasks/client.py @@ -372,8 +372,7 @@ def create( api_key="YOUR_API_KEY", ) client.tasks.create( - data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, - project=1, + data={"key": "value"}, ) """ _response = self._client_wrapper.httpx_client.request( @@ -1204,11 +1203,7 @@ async def create( async def main() -> None: await client.tasks.create( - data={ - "image": "https://example.com/image.jpg", - "text": "Hello, world!", - }, - project=1, + data={"key": "value"}, ) diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py index f3078c9d4..ecac1985a 100644 --- a/src/label_studio_sdk/types/__init__.py +++ b/src/label_studio_sdk/types/__init__.py @@ -38,7 +38,6 @@ from .blank_enum import BlankEnum from .blueprint_list import BlueprintList from .budget_reset_period_enum import BudgetResetPeriodEnum -from .cancel_model_run_response import CancelModelRunResponse from .child_filter import ChildFilter from .comment import Comment from .comment_request import CommentRequest @@ -154,7 +153,6 @@ from .paginated_lse_user_list import PaginatedLseUserList from .paginated_paginated_project_member_list import PaginatedPaginatedProjectMemberList from .paginated_project_member import PaginatedProjectMember -from .paginated_project_subset_tasks_response_list import PaginatedProjectSubsetTasksResponseList from .paginated_role_based_task_list import PaginatedRoleBasedTaskList from .pause import Pause from .pause_request import PauseRequest @@ -171,9 +169,6 @@ from .project_sampling import ProjectSampling from .project_skip_queue import ProjectSkipQueue from .project_subset_enum import ProjectSubsetEnum -from .project_subset_item import ProjectSubsetItem -from .project_subset_task_item import ProjectSubsetTaskItem -from .project_subset_tasks_response import ProjectSubsetTasksResponse from .project_template import ProjectTemplate from .project_template_request import ProjectTemplateRequest from .prompts_status_enum import PromptsStatusEnum @@ -272,7 +267,6 @@ "BlankEnum", "BlueprintList", "BudgetResetPeriodEnum", - "CancelModelRunResponse", "ChildFilter", "Comment", "CommentRequest", @@ -388,7 +382,6 @@ "PaginatedLseUserList", "PaginatedPaginatedProjectMemberList", "PaginatedProjectMember", - "PaginatedProjectSubsetTasksResponseList", "PaginatedRoleBasedTaskList", "Pause", "PauseRequest", @@ -405,9 +398,6 @@ "ProjectSampling", "ProjectSkipQueue", "ProjectSubsetEnum", - "ProjectSubsetItem", - "ProjectSubsetTaskItem", - "ProjectSubsetTasksResponse", "ProjectTemplate", "ProjectTemplateRequest", "PromptsStatusEnum", diff --git a/src/label_studio_sdk/types/cancel_model_run_response.py b/src/label_studio_sdk/types/cancel_model_run_response.py deleted file mode 100644 index 8db748797..000000000 --- a/src/label_studio_sdk/types/cancel_model_run_response.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing -import pydantic - - -class CancelModelRunResponse(UncheckedBaseModel): - detail: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py b/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py deleted file mode 100644 index 47937908f..000000000 --- a/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -from .project_subset_tasks_response import ProjectSubsetTasksResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic - - -class PaginatedProjectSubsetTasksResponseList(UncheckedBaseModel): - count: int - next: typing.Optional[str] = None - previous: typing.Optional[str] = None - results: typing.List[ProjectSubsetTasksResponse] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_item.py b/src/label_studio_sdk/types/project_subset_item.py deleted file mode 100644 index 1fcb35ec2..000000000 --- a/src/label_studio_sdk/types/project_subset_item.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic - - -class ProjectSubsetItem(UncheckedBaseModel): - columns_schema: typing.Optional[typing.List[typing.Dict[str, typing.Optional[typing.Any]]]] = None - count: int - subset: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_task_item.py b/src/label_studio_sdk/types/project_subset_task_item.py deleted file mode 100644 index dd418752b..000000000 --- a/src/label_studio_sdk/types/project_subset_task_item.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic - - -class ProjectSubsetTaskItem(UncheckedBaseModel): - data: typing.Dict[str, typing.Optional[typing.Any]] - error: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None - ground_truth: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None - id: typing.Optional[int] = None - prediction: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None - score: typing.Optional[float] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_tasks_response.py b/src/label_studio_sdk/types/project_subset_tasks_response.py deleted file mode 100644 index 21c24cece..000000000 --- a/src/label_studio_sdk/types/project_subset_tasks_response.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -import pydantic -from .project_subset_task_item import ProjectSubsetTaskItem -from ..core.pydantic_utilities import IS_PYDANTIC_V2 - - -class ProjectSubsetTasksResponse(UncheckedBaseModel): - next_cursor: typing.Optional[str] = None - previous_cursor: typing.Optional[str] = None - task_count: typing.Optional[int] = pydantic.Field(default=None) - """ - Present only when include_total=true - """ - - task_result_list: typing.List[ProjectSubsetTaskItem] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/tests/prompts/test_runs.py b/tests/prompts/test_runs.py index cc00dcd4a..a42f776cd 100644 --- a/tests/prompts/test_runs.py +++ b/tests/prompts/test_runs.py @@ -95,13 +95,3 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No async_response = await async_client.prompts.runs.create(prompt_id=1, version_id=1, project=1) validate_response(async_response, expected_response, expected_types) - - -async def test_cancel(client: LabelStudio, async_client: AsyncLabelStudio) -> None: - expected_response: typing.Any = {"detail": "detail"} - expected_types: typing.Any = {"detail": None} - response = client.prompts.runs.cancel(inference_run_id=1, prompt_id=1, version_id=1) - validate_response(response, expected_response, expected_types) - - async_response = await async_client.prompts.runs.cancel(inference_run_id=1, prompt_id=1, version_id=1) - validate_response(async_response, expected_response, expected_types) diff --git a/tests/test_projects.py b/tests/test_projects.py index 376a5a31d..98230095f 100644 --- a/tests/test_projects.py +++ b/tests/test_projects.py @@ -459,6 +459,21 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No validate_response(async_response, expected_response, expected_types) +async def test_annotators(client: LabelStudio, async_client: AsyncLabelStudio) -> None: + expected_response: typing.Any = [ + {"avatar": "avatar", "email": "email", "first_name": "first_name", "id": 1, "last_name": "last_name"} + ] + expected_types: typing.Tuple[typing.Any, typing.Any] = ( + "list", + {0: {"avatar": None, "email": None, "first_name": None, "id": "integer", "last_name": None}}, + ) + response = client.projects.annotators(id=1) + validate_response(response, expected_response, expected_types) + + async_response = await async_client.projects.annotators(id=1) + validate_response(async_response, expected_response, expected_types) + + async def test_duplicate(client: LabelStudio, async_client: AsyncLabelStudio) -> None: expected_response: typing.Any = {"id": 1} expected_types: typing.Any = {"id": "integer"} diff --git a/tests/test_prompts.py b/tests/test_prompts.py index 2eefc4703..d64449c92 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -26,56 +26,6 @@ async def test_batch_predictions(client: LabelStudio, async_client: AsyncLabelSt validate_response(async_response, expected_response, expected_types) -async def test_subset_tasks(client: LabelStudio, async_client: AsyncLabelStudio) -> None: - expected_response: typing.Any = { - "count": 123, - "next": "http://api.example.org/accounts/?page=4", - "previous": "http://api.example.org/accounts/?page=2", - "results": [ - { - "next_cursor": "next_cursor", - "previous_cursor": "previous_cursor", - "task_count": 1, - "task_result_list": [{"data": {"key": "value"}}], - } - ], - } - expected_types: typing.Any = { - "count": "integer", - "next": None, - "previous": None, - "results": ( - "list", - { - 0: { - "next_cursor": None, - "previous_cursor": None, - "task_count": "integer", - "task_result_list": ("list", {0: {"data": ("dict", {0: (None, None)})}}), - } - }, - ), - } - response = client.prompts.subset_tasks(project_pk=1) - validate_response(response, expected_response, expected_types) - - async_response = await async_client.prompts.subset_tasks(project_pk=1) - validate_response(async_response, expected_response, expected_types) - - -async def test_subsets(client: LabelStudio, async_client: AsyncLabelStudio) -> None: - expected_response: typing.Any = [{"columns_schema": [{"key": "value"}], "count": 1, "subset": "subset"}] - expected_types: typing.Tuple[typing.Any, typing.Any] = ( - "list", - {0: {"columns_schema": ("list", {0: ("dict", {0: (None, None)})}), "count": "integer", "subset": None}}, - ) - response = client.prompts.subsets(project_pk=1) - validate_response(response, expected_response, expected_types) - - async_response = await async_client.prompts.subsets(project_pk=1) - validate_response(async_response, expected_response, expected_types) - - async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> None: expected_response: typing.Any = [ { diff --git a/tests/test_tasks.py b/tests/test_tasks.py index a9634b271..cff84d376 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -81,20 +81,29 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "annotations": "annotations", "annotations_ids": "annotations_ids", "annotations_results": "annotations_results", - "annotators": [1], + "annotators": [1, 1], "annotators_count": 1, "avg_lead_time": 1.1, "cancelled_annotations": 1, - "comment_authors": [{"key": "value"}], + "comment_authors": [{"comment_authors": {"key": "value"}}, {"comment_authors": {"key": "value"}}], "comment_authors_count": 1, - "comment_count": 1, + "comment_count": 2147483647, "comments": "comments", "completed_at": "2024-01-15T09:30:00Z", "created_at": "2024-01-15T09:30:00Z", "data": {"key": "value"}, "draft_exists": True, "drafts": [ - {"created_at": "2024-01-15T09:30:00Z", "result": [{"key": "value"}], "updated_at": "2024-01-15T09:30:00Z"} + { + "created_at": "2024-01-15T09:30:00Z", + "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], + "updated_at": "2024-01-15T09:30:00Z", + }, + { + "created_at": "2024-01-15T09:30:00Z", + "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], + "updated_at": "2024-01-15T09:30:00Z", + }, ], "file_upload": "file_upload", "ground_truth": True, @@ -103,35 +112,46 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "is_labeled": True, "last_comment_updated_at": "2024-01-15T09:30:00Z", "meta": {"key": "value"}, - "overlap": 1, + "overlap": 2147483647, "predictions": [ { "created_at": "2024-01-15T09:30:00Z", - "model": {"key": "value"}, - "model_run": {"key": "value"}, + "model": {"model": {"key": "value"}}, + "model_run": {"model_run": {"key": "value"}}, "model_version": "model_version", "project": 1, - "result": [{"key": "value"}], + "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], "score": 1.1, "task": 1, "updated_at": "2024-01-15T09:30:00Z", - } + }, + { + "created_at": "2024-01-15T09:30:00Z", + "model": {"model": {"key": "value"}}, + "model_run": {"model_run": {"key": "value"}}, + "model_version": "model_version", + "project": 1, + "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], + "score": 1.1, + "task": 1, + "updated_at": "2024-01-15T09:30:00Z", + }, ], "predictions_model_versions": "predictions_model_versions", "predictions_results": "predictions_results", "predictions_score": 1.1, "project": 1, "reviewed": True, - "reviewers": [{"key": "value"}], + "reviewers": [{"reviewers": {"key": "value"}}, {"reviewers": {"key": "value"}}], "reviewers_count": 1, "reviews_accepted": 1, "reviews_rejected": 1, "storage_filename": "storage_filename", "total_annotations": 1, "total_predictions": 1, - "unresolved_comment_count": 1, + "unresolved_comment_count": 2147483647, "updated_at": "2024-01-15T09:30:00Z", - "updated_by": [{"key": "value"}], + "updated_by": [{"updated_by": {"key": "value"}}, {"updated_by": {"key": "value"}}], } expected_types: typing.Any = { "agreement": None, @@ -139,11 +159,11 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "annotations": None, "annotations_ids": None, "annotations_results": None, - "annotators": ("list", {0: "integer"}), + "annotators": ("list", {0: "integer", 1: "integer"}), "annotators_count": "integer", "avg_lead_time": None, "cancelled_annotations": "integer", - "comment_authors": ("list", {0: ("dict", {0: (None, None)})}), + "comment_authors": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), "comment_authors_count": "integer", "comment_count": "integer", "comments": None, @@ -156,9 +176,14 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No { 0: { "created_at": "datetime", - "result": ("list", {0: ("dict", {0: (None, None)})}), + "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), "updated_at": "datetime", - } + }, + 1: { + "created_at": "datetime", + "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "updated_at": "datetime", + }, }, ), "file_upload": None, @@ -178,11 +203,22 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "model_run": ("dict", {0: (None, None)}), "model_version": None, "project": "integer", - "result": ("list", {0: ("dict", {0: (None, None)})}), + "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), "score": None, "task": "integer", "updated_at": "datetime", - } + }, + 1: { + "created_at": "datetime", + "model": ("dict", {0: (None, None)}), + "model_run": ("dict", {0: (None, None)}), + "model_version": None, + "project": "integer", + "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "score": None, + "task": "integer", + "updated_at": "datetime", + }, }, ), "predictions_model_versions": None, @@ -190,7 +226,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "predictions_score": None, "project": "integer", "reviewed": None, - "reviewers": ("list", {0: ("dict", {0: (None, None)})}), + "reviewers": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), "reviewers_count": "integer", "reviews_accepted": "integer", "reviews_rejected": "integer", @@ -199,14 +235,12 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "total_predictions": "integer", "unresolved_comment_count": "integer", "updated_at": "datetime", - "updated_by": ("list", {0: ("dict", {0: (None, None)})}), + "updated_by": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), } - response = client.tasks.create(data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, project=1) + response = client.tasks.create(data={"key": "value"}) validate_response(response, expected_response, expected_types) - async_response = await async_client.tasks.create( - data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, project=1 - ) + async_response = await async_client.tasks.create(data={"key": "value"}) validate_response(async_response, expected_response, expected_types) From 33498956f9972670fb6acd519b9ffb9ed7519ff1 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 23:01:54 +0000 Subject: [PATCH 2/7] SDK regeneration --- .mock/definition/projects.yml | 8 +++++--- .mock/openapi/openapi.yaml | 6 +++--- reference.md | 6 +++--- src/label_studio_sdk/projects/client.py | 12 ++++++------ tests/test_projects.py | 6 +++--- 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/.mock/definition/projects.yml b/.mock/definition/projects.yml index e7ea2e811..cda9fbe78 100644 --- a/.mock/definition/projects.yml +++ b/.mock/definition/projects.yml @@ -840,16 +840,18 @@ service: workspace_title: workspace_title audiences: - public - annotators: + list_unique_annotators: path: /api/projects/{id}/annotators/ method: GET auth: true - docs: Return users who have submitted annotations in the specified project. + docs: >- + Return unique users who have submitted annotations in the specified + project. source: openapi: openapi/openapi.yaml path-parameters: id: integer - display-name: List annotators for project + display-name: List unique annotators for project response: docs: List of annotator users type: list diff --git a/.mock/openapi/openapi.yaml b/.mock/openapi/openapi.yaml index f0a243866..e5b14cc1a 100644 --- a/.mock/openapi/openapi.yaml +++ b/.mock/openapi/openapi.yaml @@ -6901,7 +6901,7 @@ paths: x-fern-sdk-method-name: list_for_project /api/projects/{id}/annotators/: get: - description: Return users who have submitted annotations in the specified project. + description: Return unique users who have submitted annotations in the specified project. operationId: api_projects_annotators_retrieve parameters: - in: path @@ -6920,13 +6920,13 @@ paths: description: List of annotator users security: - Token: [] - summary: List annotators for project + summary: List unique annotators for project tags: - Projects x-fern-audiences: - public x-fern-sdk-group-name: projects - x-fern-sdk-method-name: annotators + x-fern-sdk-method-name: list_unique_annotators /api/projects/{id}/aws-custom-function: get: description: Get the AWS Lambda code for the custom metric configured for this project. diff --git a/reference.md b/reference.md index 3627aaa91..2d21f51b4 100644 --- a/reference.md +++ b/reference.md @@ -9432,7 +9432,7 @@ client.projects.update( -
client.projects.annotators(...) +
client.projects.list_unique_annotators(...)
@@ -9444,7 +9444,7 @@ client.projects.update(
-Return users who have submitted annotations in the specified project. +Return unique users who have submitted annotations in the specified project.
@@ -9464,7 +9464,7 @@ from label_studio_sdk import LabelStudio client = LabelStudio( api_key="YOUR_API_KEY", ) -client.projects.annotators( +client.projects.list_unique_annotators( id=1, ) diff --git a/src/label_studio_sdk/projects/client.py b/src/label_studio_sdk/projects/client.py index 0af0a1361..8a403acbb 100644 --- a/src/label_studio_sdk/projects/client.py +++ b/src/label_studio_sdk/projects/client.py @@ -800,11 +800,11 @@ def update( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def annotators( + def list_unique_annotators( self, id: int, *, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[UserSimple]: """ - Return users who have submitted annotations in the specified project. + Return unique users who have submitted annotations in the specified project. Parameters ---------- @@ -825,7 +825,7 @@ def annotators( client = LabelStudio( api_key="YOUR_API_KEY", ) - client.projects.annotators( + client.projects.list_unique_annotators( id=1, ) """ @@ -1998,11 +1998,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def annotators( + async def list_unique_annotators( self, id: int, *, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[UserSimple]: """ - Return users who have submitted annotations in the specified project. + Return unique users who have submitted annotations in the specified project. Parameters ---------- @@ -2028,7 +2028,7 @@ async def annotators( async def main() -> None: - await client.projects.annotators( + await client.projects.list_unique_annotators( id=1, ) diff --git a/tests/test_projects.py b/tests/test_projects.py index 98230095f..e1f530262 100644 --- a/tests/test_projects.py +++ b/tests/test_projects.py @@ -459,7 +459,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No validate_response(async_response, expected_response, expected_types) -async def test_annotators(client: LabelStudio, async_client: AsyncLabelStudio) -> None: +async def test_list_unique_annotators(client: LabelStudio, async_client: AsyncLabelStudio) -> None: expected_response: typing.Any = [ {"avatar": "avatar", "email": "email", "first_name": "first_name", "id": 1, "last_name": "last_name"} ] @@ -467,10 +467,10 @@ async def test_annotators(client: LabelStudio, async_client: AsyncLabelStudio) - "list", {0: {"avatar": None, "email": None, "first_name": None, "id": "integer", "last_name": None}}, ) - response = client.projects.annotators(id=1) + response = client.projects.list_unique_annotators(id=1) validate_response(response, expected_response, expected_types) - async_response = await async_client.projects.annotators(id=1) + async_response = await async_client.projects.list_unique_annotators(id=1) validate_response(async_response, expected_response, expected_types) From b66ff6324b154b6b83b702c67e52029a93f6af8a Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 13:13:08 +0000 Subject: [PATCH 3/7] SDK regeneration --- .mock/definition/__package__.yml | 46 +++ .mock/definition/projects.yml | 52 --- .mock/definition/prompts.yml | 112 +++++- .mock/definition/prompts/runs.yml | 26 ++ .mock/definition/tasks.yml | 93 ++++- .mock/openapi/openapi.yaml | 151 +++++++- reference.md | 319 +++++++++++++++- src/label_studio_sdk/__init__.py | 10 + src/label_studio_sdk/prompts/client.py | 341 +++++++++++++++++- src/label_studio_sdk/prompts/runs/client.py | 127 +++++++ src/label_studio_sdk/tasks/client.py | 9 +- src/label_studio_sdk/types/__init__.py | 10 + .../types/cancel_model_run_response.py | 19 + ...ated_project_subset_tasks_response_list.py | 23 ++ .../types/project_subset_item.py | 21 ++ .../types/project_subset_task_item.py | 24 ++ .../types/project_subset_tasks_response.py | 27 ++ tests/prompts/test_runs.py | 10 + tests/test_prompts.py | 50 +++ tests/test_tasks.py | 82 ++--- 20 files changed, 1394 insertions(+), 158 deletions(-) create mode 100644 src/label_studio_sdk/types/cancel_model_run_response.py create mode 100644 src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py create mode 100644 src/label_studio_sdk/types/project_subset_item.py create mode 100644 src/label_studio_sdk/types/project_subset_task_item.py create mode 100644 src/label_studio_sdk/types/project_subset_tasks_response.py diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml index 0fb7e7cff..dd0ffd77d 100644 --- a/.mock/definition/__package__.yml +++ b/.mock/definition/__package__.yml @@ -1490,6 +1490,11 @@ types: * `Yearly` - Yearly source: openapi: openapi/openapi.yaml + CancelModelRunResponse: + properties: + detail: string + source: + openapi: openapi/openapi.yaml ChildFilter: properties: column: @@ -5929,6 +5934,20 @@ types: maxLength: 256 source: openapi: openapi/openapi.yaml + PaginatedProjectSubsetTasksResponseList: + properties: + count: integer + next: + type: optional + validation: + format: uri + previous: + type: optional + validation: + format: uri + results: list + source: + openapi: openapi/openapi.yaml PaginatedRoleBasedTaskList: properties: tasks: list @@ -6418,6 +6437,33 @@ types: * `Sample` - Sample source: openapi: openapi/openapi.yaml + ProjectSubsetItem: + properties: + columns_schema: optional>> + count: integer + subset: string + source: + openapi: openapi/openapi.yaml + ProjectSubsetTaskItem: + properties: + data: map + error: optional> + ground_truth: optional> + id: optional + prediction: optional> + score: optional + source: + openapi: openapi/openapi.yaml + ProjectSubsetTasksResponse: + properties: + next_cursor: optional + previous_cursor: optional + task_count: + type: optional + docs: Present only when include_total=true + task_result_list: list + source: + openapi: openapi/openapi.yaml ProjectTemplate: properties: assignment_settings: optional diff --git a/.mock/definition/projects.yml b/.mock/definition/projects.yml index cda9fbe78..810124ae8 100644 --- a/.mock/definition/projects.yml +++ b/.mock/definition/projects.yml @@ -1314,57 +1314,5 @@ service: label_config: label_config audiences: - public - api_projects_subset_tasks_list: - path: /api/projects/{project_pk}/subset-tasks - method: GET - auth: true - docs: |2- - - Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. - - source: - openapi: openapi/openapi.yaml - path-parameters: - project_pk: integer - display-name: Get Project Subset Task List with Predictions and Accuracy details - request: - name: ApiProjectsSubsetTasksListRequest - query-parameters: - model_run: - type: optional - docs: A unique ID of a ModelRun - ordering: - type: optional - docs: Which field to use when ordering the results. - page: - type: optional - docs: A page number within the paginated result set. - page_size: - type: optional - docs: Number of results to return per page. - project_subset: - type: optional - docs: The project subset to retrieve tasks for - errors: - - root.BadRequestError - examples: - - path-parameters: - project_pk: 1 - api_projects_subsets_retrieve: - path: /api/projects/{project_pk}/subsets - method: GET - auth: true - docs: |2- - - Provides list of available subsets for a project along with count of tasks in each subset - - source: - openapi: openapi/openapi.yaml - path-parameters: - project_pk: integer - display-name: Get available subsets of a project (for prompts usage) - examples: - - path-parameters: - project_pk: 1 source: openapi: openapi/openapi.yaml diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml index 822dc918b..63037d5ee 100644 --- a/.mock/definition/prompts.yml +++ b/.mock/definition/prompts.yml @@ -84,6 +84,100 @@ service: - key: value audiences: - public + subset_tasks: + path: /api/projects/{project_pk}/subset-tasks + method: GET + auth: true + docs: |2- + + Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. + + source: + openapi: openapi/openapi.yaml + path-parameters: + project_pk: integer + display-name: Get Project Subset Task List with Predictions and Accuracy details + request: + name: PromptsSubsetTasksRequest + query-parameters: + include_total: + type: optional + docs: >- + If true (default), includes task_count in response; if false, + omits it. + model_run: + type: optional + docs: A unique ID of a ModelRun + ordering: + type: optional + docs: Which field to use when ordering the results. + page: + type: optional + docs: A page number within the paginated result set. + page_size: + type: optional + docs: Number of results to return per page. + parent_model: + type: optional + docs: The ID of the parent model (ModelInterface) for this Inference Run + project_subset: + type: optional + docs: The project subset to retrieve tasks for + response: + docs: '' + type: root.PaginatedProjectSubsetTasksResponseList + errors: + - root.BadRequestError + examples: + - path-parameters: + project_pk: 1 + response: + body: + count: 123 + next: http://api.example.org/accounts/?page=4 + previous: http://api.example.org/accounts/?page=2 + results: + - next_cursor: next_cursor + previous_cursor: previous_cursor + task_count: 1 + task_result_list: + - data: + key: value + audiences: + - public + subsets: + path: /api/projects/{project_pk}/subsets + method: GET + auth: true + docs: |2- + + Provides list of available subsets for a project along with count of tasks in each subset + + source: + openapi: openapi/openapi.yaml + path-parameters: + project_pk: integer + display-name: Get available subsets of a project (for prompts usage) + request: + name: PromptsSubsetsRequest + query-parameters: + ordering: + type: optional + docs: Which field to use when ordering the results. + response: + docs: '' + type: list + examples: + - path-parameters: + project_pk: 1 + response: + body: + - columns_schema: + - key: value + count: 1 + subset: subset + audiences: + - public list: path: /api/prompts/ method: GET @@ -491,24 +585,6 @@ service: updated_at: '2024-01-15T09:30:00Z' audiences: - internal - api_prompts_versions_inference_runs_cancel_create: - path: >- - /api/prompts/{prompt_id}/versions/{version_id}/inference-runs/{inference_run_id}/cancel - method: POST - auth: true - docs: Cancel the inference run for the given api - source: - openapi: openapi/openapi.yaml - path-parameters: - inference_run_id: integer - prompt_id: integer - version_id: integer - display-name: Cancel Inference Run API - examples: - - path-parameters: - inference_run_id: 1 - prompt_id: 1 - version_id: 1 source: openapi: openapi/openapi.yaml types: diff --git a/.mock/definition/prompts/runs.yml b/.mock/definition/prompts/runs.yml index 26679188a..82c606df0 100644 --- a/.mock/definition/prompts/runs.yml +++ b/.mock/definition/prompts/runs.yml @@ -135,5 +135,31 @@ service: triggered_at: '2024-01-15T09:30:00Z' audiences: - public + cancel: + path: >- + /api/prompts/{prompt_id}/versions/{version_id}/inference-runs/{inference_run_id}/cancel + method: POST + auth: true + docs: Cancel the inference run for the given api + source: + openapi: openapi/openapi.yaml + path-parameters: + inference_run_id: integer + prompt_id: integer + version_id: integer + display-name: Cancel Inference Run API + response: + docs: '' + type: root.CancelModelRunResponse + examples: + - path-parameters: + inference_run_id: 1 + prompt_id: 1 + version_id: 1 + response: + body: + detail: detail + audiences: + - public source: openapi: openapi/openapi.yaml diff --git a/.mock/definition/tasks.yml b/.mock/definition/tasks.yml index c45adf0fa..ffd036176 100644 --- a/.mock/definition/tasks.yml +++ b/.mock/definition/tasks.yml @@ -305,6 +305,79 @@ service: response: docs: '' type: root.LseTask + examples: + - name: Create Task + request: + data: + image: https://example.com/image.jpg + text: Hello, world! + project: 1 + response: + body: + agreement: agreement + agreement_selected: agreement_selected + annotations: annotations + annotations_ids: annotations_ids + annotations_results: annotations_results + annotators: + - 1 + annotators_count: 1 + avg_lead_time: 1.1 + cancelled_annotations: 1 + comment_authors: + - key: value + comment_authors_count: 1 + comment_count: 1 + comments: comments + completed_at: '2024-01-15T09:30:00Z' + created_at: '2024-01-15T09:30:00Z' + data: + key: value + draft_exists: true + drafts: + - created_at: '2024-01-15T09:30:00Z' + result: + - key: value + updated_at: '2024-01-15T09:30:00Z' + file_upload: file_upload + ground_truth: true + id: 1 + inner_id: 1 + is_labeled: true + last_comment_updated_at: '2024-01-15T09:30:00Z' + meta: + key: value + overlap: 1 + predictions: + - created_at: '2024-01-15T09:30:00Z' + model: + key: value + model_run: + key: value + model_version: model_version + project: 1 + result: + - key: value + score: 1.1 + task: 1 + updated_at: '2024-01-15T09:30:00Z' + predictions_model_versions: predictions_model_versions + predictions_results: predictions_results + predictions_score: 1.1 + project: 1 + reviewed: true + reviewers: + - key: value + reviewers_count: 1 + reviews_accepted: 1 + reviews_rejected: 1 + storage_filename: storage_filename + total_annotations: 1 + total_predictions: 1 + unresolved_comment_count: 1 + updated_at: '2024-01-15T09:30:00Z' + updated_by: + - key: value audiences: - public get: @@ -556,41 +629,41 @@ service: docs: |2- Create a new task event to track user interactions and system events during annotation. - + This endpoint is designed to receive events from the frontend labeling interface to enable accurate lead time calculation and detailed annotation analytics. - + ## Event Types - + **Core Annotation Events:** - `annotation_loaded` - When annotation interface is loaded - `annotation_submitted` - When annotation is submitted - `annotation_updated` - When annotation is modified - `annotation_reviewed` - When annotation is reviewed - + **User Activity Events:** - `visibility_change` - When page visibility changes (tab switch, minimize) - `idle_detected` - When user goes idle - `idle_resumed` - When user returns from idle - + **Interaction Events:** - `region_finished_drawing` - When annotation region is completed - `region_deleted` - When annotation regions are removed - `hotkey_pressed` - When keyboard shortcuts are used - + **Media Events:** - `video_playback_start/end` - Video playback control - `audio_playback_start/end` - Audio playback control - `video_scrub` - Video timeline scrubbing - + ## Usage - + Events are automatically associated with the task specified in the URL path. The current user is automatically set as the actor. Project and organization are derived from the task context. - + ## Example Request - + ```json { "event_key": "annotation_loaded", diff --git a/.mock/openapi/openapi.yaml b/.mock/openapi/openapi.yaml index e5b14cc1a..c2009e6ed 100644 --- a/.mock/openapi/openapi.yaml +++ b/.mock/openapi/openapi.yaml @@ -9759,6 +9759,11 @@ paths: description: "\n Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided.\n " operationId: api_projects_subset_tasks_list parameters: + - description: If true (default), includes task_count in response; if false, omits it. + in: query + name: include_total + schema: + type: boolean - description: A unique ID of a ModelRun in: query name: model_run @@ -9782,6 +9787,11 @@ paths: required: false schema: type: integer + - description: The ID of the parent model (ModelInterface) for this Inference Run + in: query + name: parent_model + schema: + type: integer - in: path name: project_pk required: true @@ -9794,7 +9804,11 @@ paths: type: string responses: '200': - description: Project subset task list + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedProjectSubsetTasksResponseList' + description: '' '400': description: Bad request - missing parent_model security: @@ -9802,11 +9816,21 @@ paths: summary: Get Project Subset Task List with Predictions and Accuracy details tags: - Projects + x-fern-audiences: + - public + x-fern-sdk-group-name: prompts + x-fern-sdk-method-name: subset_tasks /api/projects/{project_pk}/subsets: get: description: "\n Provides list of available subsets for a project along with count of tasks in each subset\n " - operationId: api_projects_subsets_retrieve + operationId: api_projects_subsets_list parameters: + - description: Which field to use when ordering the results. + in: query + name: ordering + required: false + schema: + type: string - in: path name: project_pk required: true @@ -9814,12 +9838,22 @@ paths: type: integer responses: '200': - description: No response body + content: + application/json: + schema: + items: + $ref: '#/components/schemas/ProjectSubsetItem' + type: array + description: '' security: - Token: [] summary: Get available subsets of a project (for prompts usage) tags: - Projects + x-fern-audiences: + - public + x-fern-sdk-group-name: prompts + x-fern-sdk-method-name: subsets /api/prompts/: get: description: List all prompts. @@ -10447,13 +10481,23 @@ paths: schema: type: integer responses: - '201': - description: No response body + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/CancelModelRunResponse' + description: '' security: - Token: [] summary: Cancel Inference Run API tags: - Prompts + x-fern-audiences: + - public + x-fern-sdk-group-name: + - prompts + - runs + x-fern-sdk-method-name: cancel /api/prompts/{prompt_id}/versions/{version_id}/refine: get: description: Get the refined prompt based on the `refinement_job_id`. @@ -15742,6 +15786,15 @@ paths: requestBody: content: application/json: + examples: + CreateTask: + description: Example of Create Task + summary: Create Task + value: + data: + image: https://example.com/image.jpg + text: Hello, world! + project: 1 schema: $ref: '#/components/schemas/LseTaskRequest' application/x-www-form-urlencoded: @@ -16041,7 +16094,7 @@ paths: x-fern-sdk-method-name: create /api/tasks/{id}/events/: post: - description: "\n Create a new task event to track user interactions and system events during annotation.\n \n This endpoint is designed to receive events from the frontend labeling interface to enable\n accurate lead time calculation and detailed annotation analytics.\n \n ## Event Types\n \n **Core Annotation Events:**\n - `annotation_loaded` - When annotation interface is loaded\n - `annotation_submitted` - When annotation is submitted\n - `annotation_updated` - When annotation is modified\n - `annotation_reviewed` - When annotation is reviewed\n \n **User Activity Events:**\n - `visibility_change` - When page visibility changes (tab switch, minimize)\n - `idle_detected` - When user goes idle\n - `idle_resumed` - When user returns from idle\n \n **Interaction Events:**\n - `region_finished_drawing` - When annotation region is completed\n - `region_deleted` - When annotation regions are removed\n - `hotkey_pressed` - When keyboard shortcuts are used\n \n **Media Events:**\n - `video_playback_start/end` - Video playback control\n - `audio_playback_start/end` - Audio playback control\n - `video_scrub` - Video timeline scrubbing\n \n ## Usage\n \n Events are automatically associated with the task specified in the URL path.\n The current user is automatically set as the actor. Project and organization\n are derived from the task context.\n \n ## Example Request\n \n ```json\n {\n \"event_key\": \"annotation_loaded\",\n \"event_time\": \"2024-01-15T10:30:00Z\",\n \"annotation\": 123,\n \"meta\": {\n \"annotation_count\": 5,\n \"estimated_time\": 300\n }\n }\n ```\n " + description: "\n Create a new task event to track user interactions and system events during annotation.\n\n This endpoint is designed to receive events from the frontend labeling interface to enable\n accurate lead time calculation and detailed annotation analytics.\n\n ## Event Types\n\n **Core Annotation Events:**\n - `annotation_loaded` - When annotation interface is loaded\n - `annotation_submitted` - When annotation is submitted\n - `annotation_updated` - When annotation is modified\n - `annotation_reviewed` - When annotation is reviewed\n\n **User Activity Events:**\n - `visibility_change` - When page visibility changes (tab switch, minimize)\n - `idle_detected` - When user goes idle\n - `idle_resumed` - When user returns from idle\n\n **Interaction Events:**\n - `region_finished_drawing` - When annotation region is completed\n - `region_deleted` - When annotation regions are removed\n - `hotkey_pressed` - When keyboard shortcuts are used\n\n **Media Events:**\n - `video_playback_start/end` - Video playback control\n - `audio_playback_start/end` - Audio playback control\n - `video_scrub` - Video timeline scrubbing\n\n ## Usage\n\n Events are automatically associated with the task specified in the URL path.\n The current user is automatically set as the actor. Project and organization\n are derived from the task context.\n\n ## Example Request\n\n ```json\n {\n \"event_key\": \"annotation_loaded\",\n \"event_time\": \"2024-01-15T10:30:00Z\",\n \"annotation\": 123,\n \"meta\": {\n \"annotation_count\": 5,\n \"estimated_time\": 300\n }\n }\n ```\n " operationId: api_tasks_events_create parameters: - description: Task ID to associate the event with @@ -19442,6 +19495,13 @@ components: - Monthly - Yearly type: string + CancelModelRunResponse: + properties: + detail: + type: string + required: + - detail + type: object CheckMatchingFunctionRequestRequest: properties: code: @@ -25674,6 +25734,29 @@ components: - project_role - username type: object + PaginatedProjectSubsetTasksResponseList: + properties: + count: + example: 123 + type: integer + next: + example: http://api.example.org/accounts/?page=4 + format: uri + nullable: true + type: string + previous: + example: http://api.example.org/accounts/?page=2 + format: uri + nullable: true + type: string + results: + items: + $ref: '#/components/schemas/ProjectSubsetTasksResponse' + type: array + required: + - count + - results + type: object PaginatedRoleBasedTaskList: properties: tasks: @@ -28367,6 +28450,62 @@ components: - HasGT - Sample type: string + ProjectSubsetItem: + properties: + columns_schema: + items: + additionalProperties: {} + type: object + type: array + count: + type: integer + subset: + type: string + required: + - count + - subset + type: object + ProjectSubsetTaskItem: + properties: + data: + additionalProperties: {} + type: object + error: + additionalProperties: {} + type: object + ground_truth: + additionalProperties: {} + type: object + id: + type: integer + prediction: + additionalProperties: {} + type: object + score: + format: double + nullable: true + type: number + required: + - data + type: object + ProjectSubsetTasksResponse: + properties: + next_cursor: + nullable: true + type: string + previous_cursor: + nullable: true + type: string + task_count: + description: Present only when include_total=true + type: integer + task_result_list: + items: + $ref: '#/components/schemas/ProjectSubsetTaskItem' + type: array + required: + - task_result_list + type: object ProjectTemplate: properties: assignment_settings: diff --git a/reference.md b/reference.md index 2d21f51b4..21305097e 100644 --- a/reference.md +++ b/reference.md @@ -6686,6 +6686,214 @@ client.prompts.batch_predictions(
+ + +
+ +
client.prompts.subset_tasks(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ + + Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from label_studio_sdk import LabelStudio + +client = LabelStudio( + api_key="YOUR_API_KEY", +) +client.prompts.subset_tasks( + project_pk=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_pk:** `int` + +
+
+ +
+
+ +**include_total:** `typing.Optional[bool]` — If true (default), includes task_count in response; if false, omits it. + +
+
+ +
+
+ +**model_run:** `typing.Optional[int]` — A unique ID of a ModelRun + +
+
+ +
+
+ +**ordering:** `typing.Optional[str]` — Which field to use when ordering the results. + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — A page number within the paginated result set. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` — Number of results to return per page. + +
+
+ +
+
+ +**parent_model:** `typing.Optional[int]` — The ID of the parent model (ModelInterface) for this Inference Run + +
+
+ +
+
+ +**project_subset:** `typing.Optional[str]` — The project subset to retrieve tasks for + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.prompts.subsets(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ + + Provides list of available subsets for a project along with count of tasks in each subset + +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from label_studio_sdk import LabelStudio + +client = LabelStudio( + api_key="YOUR_API_KEY", +) +client.prompts.subsets( + project_pk=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_pk:** `int` + +
+
+ +
+
+ +**ordering:** `typing.Optional[str]` — Which field to use when ordering the results. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -10277,7 +10485,8 @@ client = LabelStudio( api_key="YOUR_API_KEY", ) client.tasks.create( - data={"key": "value"}, + data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, + project=1, ) ``` @@ -10810,41 +11019,41 @@ client.tasks.update( Create a new task event to track user interactions and system events during annotation. - + This endpoint is designed to receive events from the frontend labeling interface to enable accurate lead time calculation and detailed annotation analytics. - + ## Event Types - + **Core Annotation Events:** - `annotation_loaded` - When annotation interface is loaded - `annotation_submitted` - When annotation is submitted - `annotation_updated` - When annotation is modified - `annotation_reviewed` - When annotation is reviewed - + **User Activity Events:** - `visibility_change` - When page visibility changes (tab switch, minimize) - `idle_detected` - When user goes idle - `idle_resumed` - When user returns from idle - + **Interaction Events:** - `region_finished_drawing` - When annotation region is completed - `region_deleted` - When annotation regions are removed - `hotkey_pressed` - When keyboard shortcuts are used - + **Media Events:** - `video_playback_start/end` - Video playback control - `audio_playback_start/end` - Audio playback control - `video_scrub` - Video timeline scrubbing - + ## Usage - + Events are automatically associated with the task specified in the URL path. The current user is automatically set as the actor. Project and organization are derived from the task context. - + ## Example Request - + ```json { "event_key": "annotation_loaded", @@ -33569,6 +33778,94 @@ client.prompts.runs.create( + + +
+ +
client.prompts.runs.cancel(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Cancel the inference run for the given api +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from label_studio_sdk import LabelStudio + +client = LabelStudio( + api_key="YOUR_API_KEY", +) +client.prompts.runs.cancel( + inference_run_id=1, + prompt_id=1, + version_id=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**inference_run_id:** `int` + +
+
+ +
+
+ +**prompt_id:** `int` + +
+
+ +
+
+ +**version_id:** `int` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py index b9f0d5892..28d451aaa 100644 --- a/src/label_studio_sdk/__init__.py +++ b/src/label_studio_sdk/__init__.py @@ -37,6 +37,7 @@ BlankEnum, BlueprintList, BudgetResetPeriodEnum, + CancelModelRunResponse, ChildFilter, Comment, CommentRequest, @@ -152,6 +153,7 @@ PaginatedLseUserList, PaginatedPaginatedProjectMemberList, PaginatedProjectMember, + PaginatedProjectSubsetTasksResponseList, PaginatedRoleBasedTaskList, Pause, PauseRequest, @@ -168,6 +170,9 @@ ProjectSampling, ProjectSkipQueue, ProjectSubsetEnum, + ProjectSubsetItem, + ProjectSubsetTaskItem, + ProjectSubsetTasksResponse, ProjectTemplate, ProjectTemplateRequest, PromptsStatusEnum, @@ -399,6 +404,7 @@ "BlankEnum", "BlueprintList", "BudgetResetPeriodEnum", + "CancelModelRunResponse", "ChildFilter", "Client", "Comment", @@ -529,6 +535,7 @@ "PaginatedLseUserList", "PaginatedPaginatedProjectMemberList", "PaginatedProjectMember", + "PaginatedProjectSubsetTasksResponseList", "PaginatedRoleBasedTaskList", "PatchedDefaultRoleRequestCustomScriptsEditableBy", "PatchedLseProjectUpdateRequestSampling", @@ -548,6 +555,9 @@ "ProjectSampling", "ProjectSkipQueue", "ProjectSubsetEnum", + "ProjectSubsetItem", + "ProjectSubsetTaskItem", + "ProjectSubsetTasksResponse", "ProjectTemplate", "ProjectTemplateRequest", "ProjectsDuplicateResponse", diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py index 15852d99c..6f1e15812 100644 --- a/src/label_studio_sdk/prompts/client.py +++ b/src/label_studio_sdk/prompts/client.py @@ -11,6 +11,10 @@ from json.decoder import JSONDecodeError from ..core.api_error import ApiError from ..types.batch_predictions import BatchPredictions +from ..types.paginated_project_subset_tasks_response_list import PaginatedProjectSubsetTasksResponseList +from ..core.jsonable_encoder import jsonable_encoder +from ..errors.bad_request_error import BadRequestError +from ..types.project_subset_item import ProjectSubsetItem from ..types.model_interface_serializer_get import ModelInterfaceSerializerGet from ..types.user_simple_request import UserSimpleRequest from ..types.skill_name_enum import SkillNameEnum @@ -18,7 +22,6 @@ from ..core.serialization import convert_and_respect_annotation_metadata from .types.prompts_compatible_projects_request_project_type import PromptsCompatibleProjectsRequestProjectType from ..types.paginated_all_roles_project_list_list import PaginatedAllRolesProjectListList -from ..core.jsonable_encoder import jsonable_encoder from ..core.client_wrapper import AsyncClientWrapper from .indicators.client import AsyncIndicatorsClient from .versions.client import AsyncVersionsClient @@ -183,6 +186,166 @@ def batch_predictions( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def subset_tasks( + self, + project_pk: int, + *, + include_total: typing.Optional[bool] = None, + model_run: typing.Optional[int] = None, + ordering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + parent_model: typing.Optional[int] = None, + project_subset: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedProjectSubsetTasksResponseList: + """ + + Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. + + + Parameters + ---------- + project_pk : int + + include_total : typing.Optional[bool] + If true (default), includes task_count in response; if false, omits it. + + model_run : typing.Optional[int] + A unique ID of a ModelRun + + ordering : typing.Optional[str] + Which field to use when ordering the results. + + page : typing.Optional[int] + A page number within the paginated result set. + + page_size : typing.Optional[int] + Number of results to return per page. + + parent_model : typing.Optional[int] + The ID of the parent model (ModelInterface) for this Inference Run + + project_subset : typing.Optional[str] + The project subset to retrieve tasks for + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedProjectSubsetTasksResponseList + + + Examples + -------- + from label_studio_sdk import LabelStudio + + client = LabelStudio( + api_key="YOUR_API_KEY", + ) + client.prompts.subset_tasks( + project_pk=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks", + method="GET", + params={ + "include_total": include_total, + "model_run": model_run, + "ordering": ordering, + "page": page, + "page_size": page_size, + "parent_model": parent_model, + "project_subset": project_subset, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PaginatedProjectSubsetTasksResponseList, + construct_type( + type_=PaginatedProjectSubsetTasksResponseList, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def subsets( + self, + project_pk: int, + *, + ordering: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ProjectSubsetItem]: + """ + + Provides list of available subsets for a project along with count of tasks in each subset + + + Parameters + ---------- + project_pk : int + + ordering : typing.Optional[str] + Which field to use when ordering the results. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ProjectSubsetItem] + + + Examples + -------- + from label_studio_sdk import LabelStudio + + client = LabelStudio( + api_key="YOUR_API_KEY", + ) + client.prompts.subsets( + project_pk=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(project_pk)}/subsets", + method="GET", + params={ + "ordering": ordering, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ProjectSubsetItem], + construct_type( + type_=typing.List[ProjectSubsetItem], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def list( self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[ModelInterfaceSerializerGet]: @@ -740,6 +903,182 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + async def subset_tasks( + self, + project_pk: int, + *, + include_total: typing.Optional[bool] = None, + model_run: typing.Optional[int] = None, + ordering: typing.Optional[str] = None, + page: typing.Optional[int] = None, + page_size: typing.Optional[int] = None, + parent_model: typing.Optional[int] = None, + project_subset: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedProjectSubsetTasksResponseList: + """ + + Provides list of tasks, based on project subset. Includes predictions for tasks. For the 'HasGT' subset, accuracy metrics will also be provided. + + + Parameters + ---------- + project_pk : int + + include_total : typing.Optional[bool] + If true (default), includes task_count in response; if false, omits it. + + model_run : typing.Optional[int] + A unique ID of a ModelRun + + ordering : typing.Optional[str] + Which field to use when ordering the results. + + page : typing.Optional[int] + A page number within the paginated result set. + + page_size : typing.Optional[int] + Number of results to return per page. + + parent_model : typing.Optional[int] + The ID of the parent model (ModelInterface) for this Inference Run + + project_subset : typing.Optional[str] + The project subset to retrieve tasks for + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedProjectSubsetTasksResponseList + + + Examples + -------- + import asyncio + + from label_studio_sdk import AsyncLabelStudio + + client = AsyncLabelStudio( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.prompts.subset_tasks( + project_pk=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(project_pk)}/subset-tasks", + method="GET", + params={ + "include_total": include_total, + "model_run": model_run, + "ordering": ordering, + "page": page, + "page_size": page_size, + "parent_model": parent_model, + "project_subset": project_subset, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PaginatedProjectSubsetTasksResponseList, + construct_type( + type_=PaginatedProjectSubsetTasksResponseList, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 400: + raise BadRequestError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def subsets( + self, + project_pk: int, + *, + ordering: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[ProjectSubsetItem]: + """ + + Provides list of available subsets for a project along with count of tasks in each subset + + + Parameters + ---------- + project_pk : int + + ordering : typing.Optional[str] + Which field to use when ordering the results. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[ProjectSubsetItem] + + + Examples + -------- + import asyncio + + from label_studio_sdk import AsyncLabelStudio + + client = AsyncLabelStudio( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.prompts.subsets( + project_pk=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/projects/{jsonable_encoder(project_pk)}/subsets", + method="GET", + params={ + "ordering": ordering, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[ProjectSubsetItem], + construct_type( + type_=typing.List[ProjectSubsetItem], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def list( self, *, ordering: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None ) -> typing.List[ModelInterfaceSerializerGet]: diff --git a/src/label_studio_sdk/prompts/runs/client.py b/src/label_studio_sdk/prompts/runs/client.py index 3fdf84074..22d795000 100644 --- a/src/label_studio_sdk/prompts/runs/client.py +++ b/src/label_studio_sdk/prompts/runs/client.py @@ -11,6 +11,7 @@ from ...core.api_error import ApiError import datetime as dt from ...types.project_subset_enum import ProjectSubsetEnum +from ...types.cancel_model_run_response import CancelModelRunResponse from ...core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -193,6 +194,65 @@ def create( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def cancel( + self, + inference_run_id: int, + prompt_id: int, + version_id: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> CancelModelRunResponse: + """ + Cancel the inference run for the given api + + Parameters + ---------- + inference_run_id : int + + prompt_id : int + + version_id : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CancelModelRunResponse + + + Examples + -------- + from label_studio_sdk import LabelStudio + + client = LabelStudio( + api_key="YOUR_API_KEY", + ) + client.prompts.runs.cancel( + inference_run_id=1, + prompt_id=1, + version_id=1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CancelModelRunResponse, + construct_type( + type_=CancelModelRunResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + class AsyncRunsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -385,3 +445,70 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def cancel( + self, + inference_run_id: int, + prompt_id: int, + version_id: int, + *, + request_options: typing.Optional[RequestOptions] = None, + ) -> CancelModelRunResponse: + """ + Cancel the inference run for the given api + + Parameters + ---------- + inference_run_id : int + + prompt_id : int + + version_id : int + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CancelModelRunResponse + + + Examples + -------- + import asyncio + + from label_studio_sdk import AsyncLabelStudio + + client = AsyncLabelStudio( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.prompts.runs.cancel( + inference_run_id=1, + prompt_id=1, + version_id=1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"api/prompts/{jsonable_encoder(prompt_id)}/versions/{jsonable_encoder(version_id)}/inference-runs/{jsonable_encoder(inference_run_id)}/cancel", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CancelModelRunResponse, + construct_type( + type_=CancelModelRunResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/label_studio_sdk/tasks/client.py b/src/label_studio_sdk/tasks/client.py index 1c50065ee..b8970b091 100644 --- a/src/label_studio_sdk/tasks/client.py +++ b/src/label_studio_sdk/tasks/client.py @@ -372,7 +372,8 @@ def create( api_key="YOUR_API_KEY", ) client.tasks.create( - data={"key": "value"}, + data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, + project=1, ) """ _response = self._client_wrapper.httpx_client.request( @@ -1203,7 +1204,11 @@ async def create( async def main() -> None: await client.tasks.create( - data={"key": "value"}, + data={ + "image": "https://example.com/image.jpg", + "text": "Hello, world!", + }, + project=1, ) diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py index ecac1985a..f3078c9d4 100644 --- a/src/label_studio_sdk/types/__init__.py +++ b/src/label_studio_sdk/types/__init__.py @@ -38,6 +38,7 @@ from .blank_enum import BlankEnum from .blueprint_list import BlueprintList from .budget_reset_period_enum import BudgetResetPeriodEnum +from .cancel_model_run_response import CancelModelRunResponse from .child_filter import ChildFilter from .comment import Comment from .comment_request import CommentRequest @@ -153,6 +154,7 @@ from .paginated_lse_user_list import PaginatedLseUserList from .paginated_paginated_project_member_list import PaginatedPaginatedProjectMemberList from .paginated_project_member import PaginatedProjectMember +from .paginated_project_subset_tasks_response_list import PaginatedProjectSubsetTasksResponseList from .paginated_role_based_task_list import PaginatedRoleBasedTaskList from .pause import Pause from .pause_request import PauseRequest @@ -169,6 +171,9 @@ from .project_sampling import ProjectSampling from .project_skip_queue import ProjectSkipQueue from .project_subset_enum import ProjectSubsetEnum +from .project_subset_item import ProjectSubsetItem +from .project_subset_task_item import ProjectSubsetTaskItem +from .project_subset_tasks_response import ProjectSubsetTasksResponse from .project_template import ProjectTemplate from .project_template_request import ProjectTemplateRequest from .prompts_status_enum import PromptsStatusEnum @@ -267,6 +272,7 @@ "BlankEnum", "BlueprintList", "BudgetResetPeriodEnum", + "CancelModelRunResponse", "ChildFilter", "Comment", "CommentRequest", @@ -382,6 +388,7 @@ "PaginatedLseUserList", "PaginatedPaginatedProjectMemberList", "PaginatedProjectMember", + "PaginatedProjectSubsetTasksResponseList", "PaginatedRoleBasedTaskList", "Pause", "PauseRequest", @@ -398,6 +405,9 @@ "ProjectSampling", "ProjectSkipQueue", "ProjectSubsetEnum", + "ProjectSubsetItem", + "ProjectSubsetTaskItem", + "ProjectSubsetTasksResponse", "ProjectTemplate", "ProjectTemplateRequest", "PromptsStatusEnum", diff --git a/src/label_studio_sdk/types/cancel_model_run_response.py b/src/label_studio_sdk/types/cancel_model_run_response.py new file mode 100644 index 000000000..8db748797 --- /dev/null +++ b/src/label_studio_sdk/types/cancel_model_run_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class CancelModelRunResponse(UncheckedBaseModel): + detail: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py b/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py new file mode 100644 index 000000000..47937908f --- /dev/null +++ b/src/label_studio_sdk/types/paginated_project_subset_tasks_response_list.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .project_subset_tasks_response import ProjectSubsetTasksResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PaginatedProjectSubsetTasksResponseList(UncheckedBaseModel): + count: int + next: typing.Optional[str] = None + previous: typing.Optional[str] = None + results: typing.List[ProjectSubsetTasksResponse] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_item.py b/src/label_studio_sdk/types/project_subset_item.py new file mode 100644 index 000000000..1fcb35ec2 --- /dev/null +++ b/src/label_studio_sdk/types/project_subset_item.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ProjectSubsetItem(UncheckedBaseModel): + columns_schema: typing.Optional[typing.List[typing.Dict[str, typing.Optional[typing.Any]]]] = None + count: int + subset: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_task_item.py b/src/label_studio_sdk/types/project_subset_task_item.py new file mode 100644 index 000000000..dd418752b --- /dev/null +++ b/src/label_studio_sdk/types/project_subset_task_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class ProjectSubsetTaskItem(UncheckedBaseModel): + data: typing.Dict[str, typing.Optional[typing.Any]] + error: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + ground_truth: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + id: typing.Optional[int] = None + prediction: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None + score: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/label_studio_sdk/types/project_subset_tasks_response.py b/src/label_studio_sdk/types/project_subset_tasks_response.py new file mode 100644 index 000000000..21c24cece --- /dev/null +++ b/src/label_studio_sdk/types/project_subset_tasks_response.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from .project_subset_task_item import ProjectSubsetTaskItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ProjectSubsetTasksResponse(UncheckedBaseModel): + next_cursor: typing.Optional[str] = None + previous_cursor: typing.Optional[str] = None + task_count: typing.Optional[int] = pydantic.Field(default=None) + """ + Present only when include_total=true + """ + + task_result_list: typing.List[ProjectSubsetTaskItem] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/tests/prompts/test_runs.py b/tests/prompts/test_runs.py index a42f776cd..cc00dcd4a 100644 --- a/tests/prompts/test_runs.py +++ b/tests/prompts/test_runs.py @@ -95,3 +95,13 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No async_response = await async_client.prompts.runs.create(prompt_id=1, version_id=1, project=1) validate_response(async_response, expected_response, expected_types) + + +async def test_cancel(client: LabelStudio, async_client: AsyncLabelStudio) -> None: + expected_response: typing.Any = {"detail": "detail"} + expected_types: typing.Any = {"detail": None} + response = client.prompts.runs.cancel(inference_run_id=1, prompt_id=1, version_id=1) + validate_response(response, expected_response, expected_types) + + async_response = await async_client.prompts.runs.cancel(inference_run_id=1, prompt_id=1, version_id=1) + validate_response(async_response, expected_response, expected_types) diff --git a/tests/test_prompts.py b/tests/test_prompts.py index d64449c92..2eefc4703 100644 --- a/tests/test_prompts.py +++ b/tests/test_prompts.py @@ -26,6 +26,56 @@ async def test_batch_predictions(client: LabelStudio, async_client: AsyncLabelSt validate_response(async_response, expected_response, expected_types) +async def test_subset_tasks(client: LabelStudio, async_client: AsyncLabelStudio) -> None: + expected_response: typing.Any = { + "count": 123, + "next": "http://api.example.org/accounts/?page=4", + "previous": "http://api.example.org/accounts/?page=2", + "results": [ + { + "next_cursor": "next_cursor", + "previous_cursor": "previous_cursor", + "task_count": 1, + "task_result_list": [{"data": {"key": "value"}}], + } + ], + } + expected_types: typing.Any = { + "count": "integer", + "next": None, + "previous": None, + "results": ( + "list", + { + 0: { + "next_cursor": None, + "previous_cursor": None, + "task_count": "integer", + "task_result_list": ("list", {0: {"data": ("dict", {0: (None, None)})}}), + } + }, + ), + } + response = client.prompts.subset_tasks(project_pk=1) + validate_response(response, expected_response, expected_types) + + async_response = await async_client.prompts.subset_tasks(project_pk=1) + validate_response(async_response, expected_response, expected_types) + + +async def test_subsets(client: LabelStudio, async_client: AsyncLabelStudio) -> None: + expected_response: typing.Any = [{"columns_schema": [{"key": "value"}], "count": 1, "subset": "subset"}] + expected_types: typing.Tuple[typing.Any, typing.Any] = ( + "list", + {0: {"columns_schema": ("list", {0: ("dict", {0: (None, None)})}), "count": "integer", "subset": None}}, + ) + response = client.prompts.subsets(project_pk=1) + validate_response(response, expected_response, expected_types) + + async_response = await async_client.prompts.subsets(project_pk=1) + validate_response(async_response, expected_response, expected_types) + + async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> None: expected_response: typing.Any = [ { diff --git a/tests/test_tasks.py b/tests/test_tasks.py index cff84d376..a9634b271 100644 --- a/tests/test_tasks.py +++ b/tests/test_tasks.py @@ -81,29 +81,20 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "annotations": "annotations", "annotations_ids": "annotations_ids", "annotations_results": "annotations_results", - "annotators": [1, 1], + "annotators": [1], "annotators_count": 1, "avg_lead_time": 1.1, "cancelled_annotations": 1, - "comment_authors": [{"comment_authors": {"key": "value"}}, {"comment_authors": {"key": "value"}}], + "comment_authors": [{"key": "value"}], "comment_authors_count": 1, - "comment_count": 2147483647, + "comment_count": 1, "comments": "comments", "completed_at": "2024-01-15T09:30:00Z", "created_at": "2024-01-15T09:30:00Z", "data": {"key": "value"}, "draft_exists": True, "drafts": [ - { - "created_at": "2024-01-15T09:30:00Z", - "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], - "updated_at": "2024-01-15T09:30:00Z", - }, - { - "created_at": "2024-01-15T09:30:00Z", - "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], - "updated_at": "2024-01-15T09:30:00Z", - }, + {"created_at": "2024-01-15T09:30:00Z", "result": [{"key": "value"}], "updated_at": "2024-01-15T09:30:00Z"} ], "file_upload": "file_upload", "ground_truth": True, @@ -112,46 +103,35 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "is_labeled": True, "last_comment_updated_at": "2024-01-15T09:30:00Z", "meta": {"key": "value"}, - "overlap": 2147483647, + "overlap": 1, "predictions": [ { "created_at": "2024-01-15T09:30:00Z", - "model": {"model": {"key": "value"}}, - "model_run": {"model_run": {"key": "value"}}, - "model_version": "model_version", - "project": 1, - "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], - "score": 1.1, - "task": 1, - "updated_at": "2024-01-15T09:30:00Z", - }, - { - "created_at": "2024-01-15T09:30:00Z", - "model": {"model": {"key": "value"}}, - "model_run": {"model_run": {"key": "value"}}, + "model": {"key": "value"}, + "model_run": {"key": "value"}, "model_version": "model_version", "project": 1, - "result": [{"result": {"key": "value"}}, {"result": {"key": "value"}}], + "result": [{"key": "value"}], "score": 1.1, "task": 1, "updated_at": "2024-01-15T09:30:00Z", - }, + } ], "predictions_model_versions": "predictions_model_versions", "predictions_results": "predictions_results", "predictions_score": 1.1, "project": 1, "reviewed": True, - "reviewers": [{"reviewers": {"key": "value"}}, {"reviewers": {"key": "value"}}], + "reviewers": [{"key": "value"}], "reviewers_count": 1, "reviews_accepted": 1, "reviews_rejected": 1, "storage_filename": "storage_filename", "total_annotations": 1, "total_predictions": 1, - "unresolved_comment_count": 2147483647, + "unresolved_comment_count": 1, "updated_at": "2024-01-15T09:30:00Z", - "updated_by": [{"updated_by": {"key": "value"}}, {"updated_by": {"key": "value"}}], + "updated_by": [{"key": "value"}], } expected_types: typing.Any = { "agreement": None, @@ -159,11 +139,11 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "annotations": None, "annotations_ids": None, "annotations_results": None, - "annotators": ("list", {0: "integer", 1: "integer"}), + "annotators": ("list", {0: "integer"}), "annotators_count": "integer", "avg_lead_time": None, "cancelled_annotations": "integer", - "comment_authors": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "comment_authors": ("list", {0: ("dict", {0: (None, None)})}), "comment_authors_count": "integer", "comment_count": "integer", "comments": None, @@ -176,14 +156,9 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No { 0: { "created_at": "datetime", - "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), - "updated_at": "datetime", - }, - 1: { - "created_at": "datetime", - "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "result": ("list", {0: ("dict", {0: (None, None)})}), "updated_at": "datetime", - }, + } }, ), "file_upload": None, @@ -203,22 +178,11 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "model_run": ("dict", {0: (None, None)}), "model_version": None, "project": "integer", - "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), - "score": None, - "task": "integer", - "updated_at": "datetime", - }, - 1: { - "created_at": "datetime", - "model": ("dict", {0: (None, None)}), - "model_run": ("dict", {0: (None, None)}), - "model_version": None, - "project": "integer", - "result": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "result": ("list", {0: ("dict", {0: (None, None)})}), "score": None, "task": "integer", "updated_at": "datetime", - }, + } }, ), "predictions_model_versions": None, @@ -226,7 +190,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "predictions_score": None, "project": "integer", "reviewed": None, - "reviewers": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "reviewers": ("list", {0: ("dict", {0: (None, None)})}), "reviewers_count": "integer", "reviews_accepted": "integer", "reviews_rejected": "integer", @@ -235,12 +199,14 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "total_predictions": "integer", "unresolved_comment_count": "integer", "updated_at": "datetime", - "updated_by": ("list", {0: ("dict", {0: (None, None)}), 1: ("dict", {0: (None, None)})}), + "updated_by": ("list", {0: ("dict", {0: (None, None)})}), } - response = client.tasks.create(data={"key": "value"}) + response = client.tasks.create(data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, project=1) validate_response(response, expected_response, expected_types) - async_response = await async_client.tasks.create(data={"key": "value"}) + async_response = await async_client.tasks.create( + data={"image": "https://example.com/image.jpg", "text": "Hello, world!"}, project=1 + ) validate_response(async_response, expected_response, expected_types) From 6cc5fa2b8189aab04e8e805663d8a32db44d93ac Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 14:16:45 +0000 Subject: [PATCH 4/7] SDK regeneration --- .mock/definition/__package__.yml | 73 +++++++++++++++++-- .mock/definition/projectTemplates.yml | 2 + .mock/definition/projects.yml | 6 +- .mock/definition/workspaces.yml | 2 +- .mock/openapi/openapi.yaml | 60 ++++++++++++--- src/label_studio_sdk/__init__.py | 10 ++- .../lse_project_create_request_sampling.py | 4 +- ...hed_lse_project_update_request_sampling.py | 4 +- src/label_studio_sdk/types/__init__.py | 10 ++- .../types/all_roles_project_list_sampling.py | 4 +- .../types/lse_project_create_sampling.py | 4 +- .../types/lse_project_sampling.py | 4 +- .../types/lse_project_update_sampling.py | 4 +- .../types/project_sampling.py | 4 +- src/label_studio_sdk/types/review_settings.py | 14 ++++ .../types/review_settings_request.py | 14 ++++ .../types/review_settings_request_sampling.py | 8 ++ .../types/review_settings_sampling.py | 8 ++ .../types/review_settings_sampling_enum.py | 5 ++ .../{sampling_enum.py => sampling_de5enum.py} | 2 +- tests/test_project_templates.py | 4 + tests/test_projects.py | 4 + 22 files changed, 213 insertions(+), 37 deletions(-) create mode 100644 src/label_studio_sdk/types/review_settings_request_sampling.py create mode 100644 src/label_studio_sdk/types/review_settings_sampling.py create mode 100644 src/label_studio_sdk/types/review_settings_sampling_enum.py rename src/label_studio_sdk/types/{sampling_enum.py => sampling_de5enum.py} (84%) diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml index dd0ffd77d..a9e91b8df 100644 --- a/.mock/definition/__package__.yml +++ b/.mock/definition/__package__.yml @@ -165,7 +165,7 @@ types: AllRolesProjectListSampling: discriminated: false union: - - SamplingEnum + - SamplingDe5Enum - NullEnum source: openapi: openapi/openapi.yaml @@ -3657,7 +3657,7 @@ types: LseProjectSampling: discriminated: false union: - - SamplingEnum + - SamplingDe5Enum - NullEnum source: openapi: openapi/openapi.yaml @@ -3892,7 +3892,7 @@ types: LseProjectCreateSampling: discriminated: false union: - - SamplingEnum + - SamplingDe5Enum - NullEnum source: openapi: openapi/openapi.yaml @@ -4055,7 +4055,7 @@ types: LseProjectUpdateSampling: discriminated: false union: - - SamplingEnum + - SamplingDe5Enum - NullEnum source: openapi: openapi/openapi.yaml @@ -6108,7 +6108,7 @@ types: ProjectSampling: discriminated: false union: - - SamplingEnum + - SamplingDe5Enum - NullEnum source: openapi: openapi/openapi.yaml @@ -6796,6 +6796,20 @@ types: source: openapi: openapi/openapi.yaml inline: true + ReviewSettingsSampling: + discriminated: false + docs: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + union: + - ReviewSettingsSamplingEnum + - BlankEnum + - NullEnum + source: + openapi: openapi/openapi.yaml + inline: true ReviewSettings: properties: anonymize_annotations: @@ -6831,6 +6845,18 @@ types: review_only_manual_assignments: type: optional docs: When set True, review queue is built only from manually assigned tasks + review_task_limit_percent: + type: optional + docs: Percent of tasks to include in review stream (0-100). Null/0 disables. + validation: + pattern: ^-?\d{0,3}(?:\.\d{0,2})?$ + sampling: + type: optional + docs: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random show_agreement_to_reviewers: type: optional docs: Show the agreement column to reviewers @@ -6875,6 +6901,20 @@ types: source: openapi: openapi/openapi.yaml inline: true + ReviewSettingsRequestSampling: + discriminated: false + docs: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + union: + - ReviewSettingsSamplingEnum + - BlankEnum + - NullEnum + source: + openapi: openapi/openapi.yaml + inline: true ReviewSettingsRequest: properties: anonymize_annotations: @@ -6908,6 +6948,18 @@ types: review_only_manual_assignments: type: optional docs: When set True, review queue is built only from manually assigned tasks + review_task_limit_percent: + type: optional + docs: Percent of tasks to include in review stream (0-100). Null/0 disables. + validation: + pattern: ^-?\d{0,3}(?:\.\d{0,2})?$ + sampling: + type: optional + docs: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random show_agreement_to_reviewers: type: optional docs: Show the agreement column to reviewers @@ -6924,6 +6976,15 @@ types: false, hides columns not referenced by the label interface source: openapi: openapi/openapi.yaml + ReviewSettingsSamplingEnum: + enum: + - task_id + - random + docs: |- + * `task_id` - By Task ID + * `random` - Random + source: + openapi: openapi/openapi.yaml ReviewedEnum: enum: - only @@ -7317,7 +7378,7 @@ types: workspaces_groups: optional>> source: openapi: openapi/openapi.yaml - SamplingEnum: + SamplingDe5Enum: enum: - value: Sequential sampling name: SequentialSampling diff --git a/.mock/definition/projectTemplates.yml b/.mock/definition/projectTemplates.yml index 3f83a55b7..3aa660ed1 100644 --- a/.mock/definition/projectTemplates.yml +++ b/.mock/definition/projectTemplates.yml @@ -357,6 +357,8 @@ service: require_comment_on_reject: true review_criteria: all review_only_manual_assignments: true + review_task_limit_percent: review_task_limit_percent + sampling: task_id show_agreement_to_reviewers: true show_data_manager_to_reviewers: true show_instruction: true diff --git a/.mock/definition/projects.yml b/.mock/definition/projects.yml index 810124ae8..99b2ae3fb 100644 --- a/.mock/definition/projects.yml +++ b/.mock/definition/projects.yml @@ -9,7 +9,7 @@ types: LseProjectCreateRequestSampling: discriminated: false union: - - root.SamplingEnum + - root.SamplingDe5Enum - root.NullEnum source: openapi: openapi/openapi.yaml @@ -25,7 +25,7 @@ types: PatchedLseProjectUpdateRequestSampling: discriminated: false union: - - root.SamplingEnum + - root.SamplingDe5Enum - root.NullEnum source: openapi: openapi/openapi.yaml @@ -814,6 +814,8 @@ service: require_comment_on_reject: true review_criteria: all review_only_manual_assignments: true + review_task_limit_percent: review_task_limit_percent + sampling: task_id show_agreement_to_reviewers: true show_data_manager_to_reviewers: true show_instruction: true diff --git a/.mock/definition/workspaces.yml b/.mock/definition/workspaces.yml index a5a3bee8c..06c79e9d3 100644 --- a/.mock/definition/workspaces.yml +++ b/.mock/definition/workspaces.yml @@ -447,7 +447,7 @@ types: ProjectRequestSampling: discriminated: false union: - - root.SamplingEnum + - root.SamplingDe5Enum - root.NullEnum source: openapi: openapi/openapi.yaml diff --git a/.mock/openapi/openapi.yaml b/.mock/openapi/openapi.yaml index c2009e6ed..b420616fa 100644 --- a/.mock/openapi/openapi.yaml +++ b/.mock/openapi/openapi.yaml @@ -17773,7 +17773,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -22501,7 +22501,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -22781,7 +22781,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -22939,7 +22939,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -23145,7 +23145,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -26876,7 +26876,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -27941,7 +27941,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -28347,7 +28347,7 @@ components: sampling: nullable: true oneOf: - - $ref: '#/components/schemas/SamplingEnum' + - $ref: '#/components/schemas/SamplingDe5Enum' - $ref: '#/components/schemas/NullEnum' show_annotation_history: description: Show annotation history to annotator @@ -28950,6 +28950,23 @@ components: review_only_manual_assignments: description: When set True, review queue is built only from manually assigned tasks type: boolean + review_task_limit_percent: + description: Percent of tasks to include in review stream (0-100). Null/0 disables. + format: decimal + nullable: true + pattern: ^-?\d{0,3}(?:\.\d{0,2})?$ + type: string + sampling: + description: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + nullable: true + oneOf: + - $ref: '#/components/schemas/ReviewSettingsSamplingEnum' + - $ref: '#/components/schemas/BlankEnum' + - $ref: '#/components/schemas/NullEnum' show_agreement_to_reviewers: description: Show the agreement column to reviewers type: boolean @@ -29010,6 +29027,23 @@ components: review_only_manual_assignments: description: When set True, review queue is built only from manually assigned tasks type: boolean + review_task_limit_percent: + description: Percent of tasks to include in review stream (0-100). Null/0 disables. + format: decimal + nullable: true + pattern: ^-?\d{0,3}(?:\.\d{0,2})?$ + type: string + sampling: + description: |- + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + nullable: true + oneOf: + - $ref: '#/components/schemas/ReviewSettingsSamplingEnum' + - $ref: '#/components/schemas/BlankEnum' + - $ref: '#/components/schemas/NullEnum' show_agreement_to_reviewers: description: Show the agreement column to reviewers type: boolean @@ -29024,6 +29058,14 @@ components: nullable: true type: boolean type: object + ReviewSettingsSamplingEnum: + description: |- + * `task_id` - By Task ID + * `random` - Random + enum: + - task_id + - random + type: string ReviewedEnum: description: |- * `only` - only @@ -29553,7 +29595,7 @@ components: type: array type: array type: object - SamplingEnum: + SamplingDe5Enum: description: |- * `Sequential sampling` - Tasks are ordered by Data manager ordering * `Uniform sampling` - Tasks are chosen randomly diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py index 28d451aaa..cc6a8774d 100644 --- a/src/label_studio_sdk/__init__.py +++ b/src/label_studio_sdk/__init__.py @@ -187,8 +187,11 @@ ReviewSettingsRequest, ReviewSettingsRequestRequeueRejectedTasksMode, ReviewSettingsRequestReviewCriteria, + ReviewSettingsRequestSampling, ReviewSettingsRequeueRejectedTasksMode, ReviewSettingsReviewCriteria, + ReviewSettingsSampling, + ReviewSettingsSamplingEnum, ReviewedEnum, Role9E7Enum, RoleBasedTask, @@ -196,7 +199,7 @@ S3ImportStorage, SamlSettings, SamlSettingsUpdate, - SamplingEnum, + SamplingDe5Enum, ScimSettings, ScimSettingsUpdate, ScopeEnum, @@ -577,8 +580,11 @@ "ReviewSettingsRequest", "ReviewSettingsRequestRequeueRejectedTasksMode", "ReviewSettingsRequestReviewCriteria", + "ReviewSettingsRequestSampling", "ReviewSettingsRequeueRejectedTasksMode", "ReviewSettingsReviewCriteria", + "ReviewSettingsSampling", + "ReviewSettingsSamplingEnum", "ReviewedEnum", "Role9E7Enum", "RoleBasedTask", @@ -586,7 +592,7 @@ "S3ImportStorage", "SamlSettings", "SamlSettingsUpdate", - "SamplingEnum", + "SamplingDe5Enum", "ScimSettings", "ScimSettingsUpdate", "ScopeEnum", diff --git a/src/label_studio_sdk/projects/types/lse_project_create_request_sampling.py b/src/label_studio_sdk/projects/types/lse_project_create_request_sampling.py index 04e070a9b..5134a0dbc 100644 --- a/src/label_studio_sdk/projects/types/lse_project_create_request_sampling.py +++ b/src/label_studio_sdk/projects/types/lse_project_create_request_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from ...types.sampling_enum import SamplingEnum +from ...types.sampling_de5enum import SamplingDe5Enum from ...types.null_enum import NullEnum -LseProjectCreateRequestSampling = typing.Union[SamplingEnum, NullEnum] +LseProjectCreateRequestSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py b/src/label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py index ce868747d..1788f3c1f 100644 --- a/src/label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py +++ b/src/label_studio_sdk/projects/types/patched_lse_project_update_request_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from ...types.sampling_enum import SamplingEnum +from ...types.sampling_de5enum import SamplingDe5Enum from ...types.null_enum import NullEnum -PatchedLseProjectUpdateRequestSampling = typing.Union[SamplingEnum, NullEnum] +PatchedLseProjectUpdateRequestSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py index f3078c9d4..4ac4627ae 100644 --- a/src/label_studio_sdk/types/__init__.py +++ b/src/label_studio_sdk/types/__init__.py @@ -188,8 +188,11 @@ from .review_settings_request import ReviewSettingsRequest from .review_settings_request_requeue_rejected_tasks_mode import ReviewSettingsRequestRequeueRejectedTasksMode from .review_settings_request_review_criteria import ReviewSettingsRequestReviewCriteria +from .review_settings_request_sampling import ReviewSettingsRequestSampling from .review_settings_requeue_rejected_tasks_mode import ReviewSettingsRequeueRejectedTasksMode from .review_settings_review_criteria import ReviewSettingsReviewCriteria +from .review_settings_sampling import ReviewSettingsSampling +from .review_settings_sampling_enum import ReviewSettingsSamplingEnum from .reviewed_enum import ReviewedEnum from .role9e7enum import Role9E7Enum from .role_based_task import RoleBasedTask @@ -197,7 +200,7 @@ from .s3import_storage import S3ImportStorage from .saml_settings import SamlSettings from .saml_settings_update import SamlSettingsUpdate -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .scim_settings import ScimSettings from .scim_settings_update import ScimSettingsUpdate from .scope_enum import ScopeEnum @@ -422,8 +425,11 @@ "ReviewSettingsRequest", "ReviewSettingsRequestRequeueRejectedTasksMode", "ReviewSettingsRequestReviewCriteria", + "ReviewSettingsRequestSampling", "ReviewSettingsRequeueRejectedTasksMode", "ReviewSettingsReviewCriteria", + "ReviewSettingsSampling", + "ReviewSettingsSamplingEnum", "ReviewedEnum", "Role9E7Enum", "RoleBasedTask", @@ -431,7 +437,7 @@ "S3ImportStorage", "SamlSettings", "SamlSettingsUpdate", - "SamplingEnum", + "SamplingDe5Enum", "ScimSettings", "ScimSettingsUpdate", "ScopeEnum", diff --git a/src/label_studio_sdk/types/all_roles_project_list_sampling.py b/src/label_studio_sdk/types/all_roles_project_list_sampling.py index c92dcee55..6d7c3a27a 100644 --- a/src/label_studio_sdk/types/all_roles_project_list_sampling.py +++ b/src/label_studio_sdk/types/all_roles_project_list_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .null_enum import NullEnum -AllRolesProjectListSampling = typing.Union[SamplingEnum, NullEnum] +AllRolesProjectListSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/lse_project_create_sampling.py b/src/label_studio_sdk/types/lse_project_create_sampling.py index 41ade7ac7..9979401ba 100644 --- a/src/label_studio_sdk/types/lse_project_create_sampling.py +++ b/src/label_studio_sdk/types/lse_project_create_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .null_enum import NullEnum -LseProjectCreateSampling = typing.Union[SamplingEnum, NullEnum] +LseProjectCreateSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/lse_project_sampling.py b/src/label_studio_sdk/types/lse_project_sampling.py index fa58a1f30..383addf49 100644 --- a/src/label_studio_sdk/types/lse_project_sampling.py +++ b/src/label_studio_sdk/types/lse_project_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .null_enum import NullEnum -LseProjectSampling = typing.Union[SamplingEnum, NullEnum] +LseProjectSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/lse_project_update_sampling.py b/src/label_studio_sdk/types/lse_project_update_sampling.py index 0763b90e4..206e123f2 100644 --- a/src/label_studio_sdk/types/lse_project_update_sampling.py +++ b/src/label_studio_sdk/types/lse_project_update_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .null_enum import NullEnum -LseProjectUpdateSampling = typing.Union[SamplingEnum, NullEnum] +LseProjectUpdateSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/project_sampling.py b/src/label_studio_sdk/types/project_sampling.py index ee1079d71..a3aab25e4 100644 --- a/src/label_studio_sdk/types/project_sampling.py +++ b/src/label_studio_sdk/types/project_sampling.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .sampling_enum import SamplingEnum +from .sampling_de5enum import SamplingDe5Enum from .null_enum import NullEnum -ProjectSampling = typing.Union[SamplingEnum, NullEnum] +ProjectSampling = typing.Union[SamplingDe5Enum, NullEnum] diff --git a/src/label_studio_sdk/types/review_settings.py b/src/label_studio_sdk/types/review_settings.py index 9ccfd0605..afb39d3ee 100644 --- a/src/label_studio_sdk/types/review_settings.py +++ b/src/label_studio_sdk/types/review_settings.py @@ -5,6 +5,7 @@ import pydantic from .review_settings_requeue_rejected_tasks_mode import ReviewSettingsRequeueRejectedTasksMode from .review_settings_review_criteria import ReviewSettingsReviewCriteria +from .review_settings_sampling import ReviewSettingsSampling from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -54,6 +55,19 @@ class ReviewSettings(UncheckedBaseModel): When set True, review queue is built only from manually assigned tasks """ + review_task_limit_percent: typing.Optional[str] = pydantic.Field(default=None) + """ + Percent of tasks to include in review stream (0-100). Null/0 disables. + """ + + sampling: typing.Optional[ReviewSettingsSampling] = pydantic.Field(default=None) + """ + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + """ + show_agreement_to_reviewers: typing.Optional[bool] = pydantic.Field(default=None) """ Show the agreement column to reviewers diff --git a/src/label_studio_sdk/types/review_settings_request.py b/src/label_studio_sdk/types/review_settings_request.py index e42a24869..7faf2f0f0 100644 --- a/src/label_studio_sdk/types/review_settings_request.py +++ b/src/label_studio_sdk/types/review_settings_request.py @@ -5,6 +5,7 @@ import pydantic from .review_settings_request_requeue_rejected_tasks_mode import ReviewSettingsRequestRequeueRejectedTasksMode from .review_settings_request_review_criteria import ReviewSettingsRequestReviewCriteria +from .review_settings_request_sampling import ReviewSettingsRequestSampling from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -54,6 +55,19 @@ class ReviewSettingsRequest(UncheckedBaseModel): When set True, review queue is built only from manually assigned tasks """ + review_task_limit_percent: typing.Optional[str] = pydantic.Field(default=None) + """ + Percent of tasks to include in review stream (0-100). Null/0 disables. + """ + + sampling: typing.Optional[ReviewSettingsRequestSampling] = pydantic.Field(default=None) + """ + Task sampling strategy in the review stream (by task id or random) + + * `task_id` - By Task ID + * `random` - Random + """ + show_agreement_to_reviewers: typing.Optional[bool] = pydantic.Field(default=None) """ Show the agreement column to reviewers diff --git a/src/label_studio_sdk/types/review_settings_request_sampling.py b/src/label_studio_sdk/types/review_settings_request_sampling.py new file mode 100644 index 000000000..5187b391f --- /dev/null +++ b/src/label_studio_sdk/types/review_settings_request_sampling.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .review_settings_sampling_enum import ReviewSettingsSamplingEnum +from .blank_enum import BlankEnum +from .null_enum import NullEnum + +ReviewSettingsRequestSampling = typing.Union[ReviewSettingsSamplingEnum, BlankEnum, NullEnum] diff --git a/src/label_studio_sdk/types/review_settings_sampling.py b/src/label_studio_sdk/types/review_settings_sampling.py new file mode 100644 index 000000000..d9cdeb1af --- /dev/null +++ b/src/label_studio_sdk/types/review_settings_sampling.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .review_settings_sampling_enum import ReviewSettingsSamplingEnum +from .blank_enum import BlankEnum +from .null_enum import NullEnum + +ReviewSettingsSampling = typing.Union[ReviewSettingsSamplingEnum, BlankEnum, NullEnum] diff --git a/src/label_studio_sdk/types/review_settings_sampling_enum.py b/src/label_studio_sdk/types/review_settings_sampling_enum.py new file mode 100644 index 000000000..95f6eb83f --- /dev/null +++ b/src/label_studio_sdk/types/review_settings_sampling_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ReviewSettingsSamplingEnum = typing.Union[typing.Literal["task_id", "random"], typing.Any] diff --git a/src/label_studio_sdk/types/sampling_enum.py b/src/label_studio_sdk/types/sampling_de5enum.py similarity index 84% rename from src/label_studio_sdk/types/sampling_enum.py rename to src/label_studio_sdk/types/sampling_de5enum.py index b94881c72..37b7ea282 100644 --- a/src/label_studio_sdk/types/sampling_enum.py +++ b/src/label_studio_sdk/types/sampling_de5enum.py @@ -2,6 +2,6 @@ import typing -SamplingEnum = typing.Union[ +SamplingDe5Enum = typing.Union[ typing.Literal["Sequential sampling", "Uniform sampling", "Uncertainty sampling"], typing.Any ] diff --git a/tests/test_project_templates.py b/tests/test_project_templates.py index 2c8855bfc..5a8b1a091 100644 --- a/tests/test_project_templates.py +++ b/tests/test_project_templates.py @@ -249,6 +249,8 @@ async def test_create_project_from_template(client: LabelStudio, async_client: A "require_comment_on_reject": True, "review_criteria": "all", "review_only_manual_assignments": True, + "review_task_limit_percent": "review_task_limit_percent", + "sampling": "task_id", "show_agreement_to_reviewers": True, "show_data_manager_to_reviewers": True, "show_instruction": True, @@ -332,6 +334,8 @@ async def test_create_project_from_template(client: LabelStudio, async_client: A "require_comment_on_reject": None, "review_criteria": None, "review_only_manual_assignments": None, + "review_task_limit_percent": None, + "sampling": None, "show_agreement_to_reviewers": None, "show_data_manager_to_reviewers": None, "show_instruction": None, diff --git a/tests/test_projects.py b/tests/test_projects.py index e1f530262..14cac8ce3 100644 --- a/tests/test_projects.py +++ b/tests/test_projects.py @@ -350,6 +350,8 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "require_comment_on_reject": True, "review_criteria": "all", "review_only_manual_assignments": True, + "review_task_limit_percent": "review_task_limit_percent", + "sampling": "task_id", "show_agreement_to_reviewers": True, "show_data_manager_to_reviewers": True, "show_instruction": True, @@ -426,6 +428,8 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "require_comment_on_reject": None, "review_criteria": None, "review_only_manual_assignments": None, + "review_task_limit_percent": None, + "sampling": None, "show_agreement_to_reviewers": None, "show_data_manager_to_reviewers": None, "show_instruction": None, From 9cc1a9796ec8e8dc7f8f1946ccdf62dca2a92954 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 16:34:15 +0000 Subject: [PATCH 5/7] SDK regeneration --- .mock/definition/__package__.yml | 27 +++++++++++ .mock/definition/datasetStorageAzure.yml | 10 ++++ .mock/definition/datasetStorageGcs.yml | 10 ++++ .mock/definition/importStorage/azure.yml | 5 ++ .mock/definition/importStorage/gcs.yml | 5 ++ .mock/definition/importStorage/gcswif.yml | 8 ++++ .mock/definition/importStorage/local.yml | 5 ++ .mock/openapi/openapi.yaml | 48 +++++++++++++++++++ reference.md | 24 ++++++++++ .../import_storage/gcswif/client.py | 30 ++++++++++++ .../types/azure_blob_import_storage.py | 5 ++ .../types/gcs_import_storage.py | 5 ++ .../types/gcswif_import_storage.py | 5 ++ .../types/gcswif_import_storage_request.py | 5 ++ .../types/local_files_import_storage.py | 5 ++ tests/import_storage/test_azure.py | 10 ++++ tests/import_storage/test_gcs.py | 10 ++++ tests/import_storage/test_gcswif.py | 10 ++++ tests/import_storage/test_local.py | 10 ++++ 19 files changed, 237 insertions(+) diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml index a9e91b8df..7b7b6c00e 100644 --- a/.mock/definition/__package__.yml +++ b/.mock/definition/__package__.yml @@ -885,6 +885,9 @@ types: project: type: integer docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the container content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -961,6 +964,9 @@ types: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the container content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -1034,6 +1040,9 @@ types: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the container content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -2418,6 +2427,9 @@ types: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -2491,6 +2503,9 @@ types: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -2627,6 +2642,9 @@ types: project: type: integer docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -2857,6 +2875,9 @@ types: project: type: integer docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -2936,6 +2957,9 @@ types: project: type: integer docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -3323,6 +3347,9 @@ types: project: type: integer docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the directory content regex_filter: type: optional docs: Regex for filtering objects diff --git a/.mock/definition/datasetStorageAzure.yml b/.mock/definition/datasetStorageAzure.yml index bc4b08991..d2641c526 100644 --- a/.mock/definition/datasetStorageAzure.yml +++ b/.mock/definition/datasetStorageAzure.yml @@ -44,6 +44,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -90,6 +91,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -138,6 +140,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -184,6 +187,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -229,6 +233,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -313,6 +318,9 @@ service: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the container content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -361,6 +369,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -426,6 +435,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true diff --git a/.mock/definition/datasetStorageGcs.yml b/.mock/definition/datasetStorageGcs.yml index dae91abf1..a4b70d5c0 100644 --- a/.mock/definition/datasetStorageGcs.yml +++ b/.mock/definition/datasetStorageGcs.yml @@ -44,6 +44,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -90,6 +91,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -138,6 +140,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -184,6 +187,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -229,6 +233,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -313,6 +318,9 @@ service: validation: min: 0 max: 32767 + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -361,6 +369,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true @@ -426,6 +435,7 @@ service: prefix: prefix presign: true presign_ttl: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synced: true diff --git a/.mock/definition/importStorage/azure.yml b/.mock/definition/importStorage/azure.yml index d70c13ee8..cadeca972 100644 --- a/.mock/definition/importStorage/azure.yml +++ b/.mock/definition/importStorage/azure.yml @@ -42,6 +42,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -130,6 +131,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -236,6 +238,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -343,6 +346,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -387,6 +391,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true diff --git a/.mock/definition/importStorage/gcs.yml b/.mock/definition/importStorage/gcs.yml index 088f6a655..bfc71e315 100644 --- a/.mock/definition/importStorage/gcs.yml +++ b/.mock/definition/importStorage/gcs.yml @@ -42,6 +42,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -133,6 +134,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -242,6 +244,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -352,6 +355,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -396,6 +400,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true diff --git a/.mock/definition/importStorage/gcswif.yml b/.mock/definition/importStorage/gcswif.yml index 39e1762b3..14e417892 100644 --- a/.mock/definition/importStorage/gcswif.yml +++ b/.mock/definition/importStorage/gcswif.yml @@ -48,6 +48,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -95,6 +96,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -159,6 +161,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -255,6 +258,9 @@ service: project: type: optional docs: A unique integer value identifying this project. + recursive_scan: + type: optional + docs: Perform recursive scan over the bucket content regex_filter: type: optional docs: Cloud storage regex for filtering objects @@ -302,6 +308,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -350,6 +357,7 @@ service: presign: true presign_ttl: 1 project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true diff --git a/.mock/definition/importStorage/local.yml b/.mock/definition/importStorage/local.yml index 0eb37cbed..0d654969c 100644 --- a/.mock/definition/importStorage/local.yml +++ b/.mock/definition/importStorage/local.yml @@ -37,6 +37,7 @@ service: key: value path: path project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -101,6 +102,7 @@ service: key: value path: path project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -183,6 +185,7 @@ service: key: value path: path project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -266,6 +269,7 @@ service: key: value path: path project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true @@ -305,6 +309,7 @@ service: key: value path: path project: 1 + recursive_scan: true regex_filter: regex_filter status: initialized synchronizable: true diff --git a/.mock/openapi/openapi.yaml b/.mock/openapi/openapi.yaml index b420616fa..e307b80a6 100644 --- a/.mock/openapi/openapi.yaml +++ b/.mock/openapi/openapi.yaml @@ -18569,6 +18569,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the container content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -18669,6 +18673,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the container content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -18760,6 +18768,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the container content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -20703,6 +20715,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -20794,6 +20810,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -20968,6 +20988,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -21269,6 +21293,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -21369,6 +21397,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -21958,6 +21990,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the directory content + nullable: true + type: boolean regex_filter: description: Regex for filtering objects nullable: true @@ -25868,6 +25904,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the container content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -26456,6 +26496,10 @@ components: maximum: 32767 minimum: 0 type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true @@ -26637,6 +26681,10 @@ components: project: description: A unique integer value identifying this project. type: integer + recursive_scan: + description: Perform recursive scan over the bucket content + nullable: true + type: boolean regex_filter: description: Cloud storage regex for filtering objects nullable: true diff --git a/reference.md b/reference.md index 21305097e..7f3ddf61b 100644 --- a/reference.md +++ b/reference.md @@ -24042,6 +24042,14 @@ client.import_storage.gcswif.create(
+**recursive_scan:** `typing.Optional[bool]` — Perform recursive scan over the bucket content + +
+
+ +
+
+ **regex_filter:** `typing.Optional[str]` — Cloud storage regex for filtering objects
@@ -24280,6 +24288,14 @@ client.import_storage.gcswif.validate(
+**recursive_scan:** `typing.Optional[bool]` — Perform recursive scan over the bucket content + +
+
+ +
+
+ **regex_filter:** `typing.Optional[str]` — Cloud storage regex for filtering objects
@@ -24666,6 +24682,14 @@ client.import_storage.gcswif.update(
+**recursive_scan:** `typing.Optional[bool]` — Perform recursive scan over the bucket content + +
+
+ +
+
+ **regex_filter:** `typing.Optional[str]` — Cloud storage regex for filtering objects
diff --git a/src/label_studio_sdk/import_storage/gcswif/client.py b/src/label_studio_sdk/import_storage/gcswif/client.py index 9fd522546..09a181e78 100644 --- a/src/label_studio_sdk/import_storage/gcswif/client.py +++ b/src/label_studio_sdk/import_storage/gcswif/client.py @@ -97,6 +97,7 @@ def create( prefix: typing.Optional[str] = OMIT, presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -156,6 +157,9 @@ def create( presign_ttl : typing.Optional[int] Presigned URLs TTL (in minutes) + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -211,6 +215,7 @@ def create( "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, @@ -254,6 +259,7 @@ def validate( prefix: typing.Optional[str] = OMIT, presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -313,6 +319,9 @@ def validate( presign_ttl : typing.Optional[int] Presigned URLs TTL (in minutes) + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -367,6 +376,7 @@ def validate( "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, @@ -490,6 +500,7 @@ def update( presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, project: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -551,6 +562,9 @@ def update( project : typing.Optional[int] A unique integer value identifying this project. + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -606,6 +620,7 @@ def update( "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, @@ -769,6 +784,7 @@ async def create( prefix: typing.Optional[str] = OMIT, presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -828,6 +844,9 @@ async def create( presign_ttl : typing.Optional[int] Presigned URLs TTL (in minutes) + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -891,6 +910,7 @@ async def main() -> None: "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, @@ -934,6 +954,7 @@ async def validate( prefix: typing.Optional[str] = OMIT, presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -993,6 +1014,9 @@ async def validate( presign_ttl : typing.Optional[int] Presigned URLs TTL (in minutes) + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -1055,6 +1079,7 @@ async def main() -> None: "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, @@ -1194,6 +1219,7 @@ async def update( presign: typing.Optional[bool] = OMIT, presign_ttl: typing.Optional[int] = OMIT, project: typing.Optional[int] = OMIT, + recursive_scan: typing.Optional[bool] = OMIT, regex_filter: typing.Optional[str] = OMIT, status: typing.Optional[StatusC5AEnum] = OMIT, synchronizable: typing.Optional[bool] = OMIT, @@ -1255,6 +1281,9 @@ async def update( project : typing.Optional[int] A unique integer value identifying this project. + recursive_scan : typing.Optional[bool] + Perform recursive scan over the bucket content + regex_filter : typing.Optional[str] Cloud storage regex for filtering objects @@ -1318,6 +1347,7 @@ async def main() -> None: "presign": presign, "presign_ttl": presign_ttl, "project": project, + "recursive_scan": recursive_scan, "regex_filter": regex_filter, "status": status, "synchronizable": synchronizable, diff --git a/src/label_studio_sdk/types/azure_blob_import_storage.py b/src/label_studio_sdk/types/azure_blob_import_storage.py index 65cf56cb4..0a5fe833e 100644 --- a/src/label_studio_sdk/types/azure_blob_import_storage.py +++ b/src/label_studio_sdk/types/azure_blob_import_storage.py @@ -67,6 +67,11 @@ class AzureBlobImportStorage(UncheckedBaseModel): A unique integer value identifying this project. """ + recursive_scan: typing.Optional[bool] = pydantic.Field(default=None) + """ + Perform recursive scan over the container content + """ + regex_filter: typing.Optional[str] = pydantic.Field(default=None) """ Cloud storage regex for filtering objects diff --git a/src/label_studio_sdk/types/gcs_import_storage.py b/src/label_studio_sdk/types/gcs_import_storage.py index 9b5700f3e..d1bf42bb7 100644 --- a/src/label_studio_sdk/types/gcs_import_storage.py +++ b/src/label_studio_sdk/types/gcs_import_storage.py @@ -67,6 +67,11 @@ class GcsImportStorage(UncheckedBaseModel): A unique integer value identifying this project. """ + recursive_scan: typing.Optional[bool] = pydantic.Field(default=None) + """ + Perform recursive scan over the bucket content + """ + regex_filter: typing.Optional[str] = pydantic.Field(default=None) """ Cloud storage regex for filtering objects diff --git a/src/label_studio_sdk/types/gcswif_import_storage.py b/src/label_studio_sdk/types/gcswif_import_storage.py index 99d3204e1..b2c784a76 100644 --- a/src/label_studio_sdk/types/gcswif_import_storage.py +++ b/src/label_studio_sdk/types/gcswif_import_storage.py @@ -87,6 +87,11 @@ class GcswifImportStorage(UncheckedBaseModel): A unique integer value identifying this project. """ + recursive_scan: typing.Optional[bool] = pydantic.Field(default=None) + """ + Perform recursive scan over the bucket content + """ + regex_filter: typing.Optional[str] = pydantic.Field(default=None) """ Cloud storage regex for filtering objects diff --git a/src/label_studio_sdk/types/gcswif_import_storage_request.py b/src/label_studio_sdk/types/gcswif_import_storage_request.py index 1517240f3..eb1d1c04a 100644 --- a/src/label_studio_sdk/types/gcswif_import_storage_request.py +++ b/src/label_studio_sdk/types/gcswif_import_storage_request.py @@ -81,6 +81,11 @@ class GcswifImportStorageRequest(UncheckedBaseModel): A unique integer value identifying this project. """ + recursive_scan: typing.Optional[bool] = pydantic.Field(default=None) + """ + Perform recursive scan over the bucket content + """ + regex_filter: typing.Optional[str] = pydantic.Field(default=None) """ Cloud storage regex for filtering objects diff --git a/src/label_studio_sdk/types/local_files_import_storage.py b/src/label_studio_sdk/types/local_files_import_storage.py index 4e3cd4f9e..f9f89fb20 100644 --- a/src/label_studio_sdk/types/local_files_import_storage.py +++ b/src/label_studio_sdk/types/local_files_import_storage.py @@ -46,6 +46,11 @@ class LocalFilesImportStorage(UncheckedBaseModel): A unique integer value identifying this project. """ + recursive_scan: typing.Optional[bool] = pydantic.Field(default=None) + """ + Perform recursive scan over the directory content + """ + regex_filter: typing.Optional[str] = pydantic.Field(default=None) """ Regex for filtering objects diff --git a/tests/import_storage/test_azure.py b/tests/import_storage/test_azure.py index c531ff055..76a6b1901 100644 --- a/tests/import_storage/test_azure.py +++ b/tests/import_storage/test_azure.py @@ -23,6 +23,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -50,6 +51,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -83,6 +85,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -106,6 +109,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -150,6 +154,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -173,6 +178,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -217,6 +223,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -240,6 +247,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -271,6 +279,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -294,6 +303,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, diff --git a/tests/import_storage/test_gcs.py b/tests/import_storage/test_gcs.py index 0ddbda076..a54df8978 100644 --- a/tests/import_storage/test_gcs.py +++ b/tests/import_storage/test_gcs.py @@ -23,6 +23,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -50,6 +51,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -83,6 +85,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -106,6 +109,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -150,6 +154,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -173,6 +178,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -217,6 +223,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -240,6 +247,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -271,6 +279,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -294,6 +303,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, diff --git a/tests/import_storage/test_gcswif.py b/tests/import_storage/test_gcswif.py index 530f7b291..cedc92fbd 100644 --- a/tests/import_storage/test_gcswif.py +++ b/tests/import_storage/test_gcswif.py @@ -27,6 +27,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -58,6 +59,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -95,6 +97,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -122,6 +125,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -170,6 +174,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -197,6 +202,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -245,6 +251,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -272,6 +279,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -307,6 +315,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": True, "presign_ttl": 1, "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -334,6 +343,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "presign": None, "presign_ttl": "integer", "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, diff --git a/tests/import_storage/test_local.py b/tests/import_storage/test_local.py index 0829644b7..03dd8014d 100644 --- a/tests/import_storage/test_local.py +++ b/tests/import_storage/test_local.py @@ -18,6 +18,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "meta": {"key": "value"}, "path": "path", "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -40,6 +41,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non "meta": None, "path": None, "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -68,6 +70,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "meta": {"key": "value"}, "path": "path", "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -86,6 +89,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No "meta": None, "path": None, "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -125,6 +129,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "meta": {"key": "value"}, "path": "path", "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -143,6 +148,7 @@ async def test_get(client: LabelStudio, async_client: AsyncLabelStudio) -> None: "meta": None, "path": None, "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -182,6 +188,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "meta": {"key": "value"}, "path": "path", "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -200,6 +207,7 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No "meta": None, "path": None, "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, @@ -226,6 +234,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "meta": {"key": "value"}, "path": "path", "project": 1, + "recursive_scan": True, "regex_filter": "regex_filter", "status": "initialized", "synchronizable": True, @@ -244,6 +253,7 @@ async def test_sync(client: LabelStudio, async_client: AsyncLabelStudio) -> None "meta": None, "path": None, "project": "integer", + "recursive_scan": None, "regex_filter": None, "status": None, "synchronizable": None, From 9c96857fc48364926e6eecbb52987ac1884e7fb1 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 16:50:34 +0000 Subject: [PATCH 6/7] SDK regeneration From 48b3458394e45c6e78997795c03a2be3adb2d533 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 25 Sep 2025 17:30:53 +0000 Subject: [PATCH 7/7] SDK regeneration