diff --git a/.mock/definition/dashboard.yml b/.mock/definition/dashboard.yml
index 98417b5ec..f9dcbd342 100644
--- a/.mock/definition/dashboard.yml
+++ b/.mock/definition/dashboard.yml
@@ -21,56 +21,6 @@ types:
docs: Total number of tasks
source:
openapi: openapi/openapi.yaml
- ApiProjectsDashboardMembersRetrieveResponseStatsItem:
- docs: User statistics
- properties:
- accepted:
- type: optional
- docs: Number of annotations marked as "Accepted" by reviewer
- finished:
- type: optional
- gt:
- type: optional
- docs: Average agreement with Ground Truth annotations
- mean_time:
- type: optional
- docs: Average mean time spent on annotation
- median_time:
- type: optional
- docs: Average median time spent on annotation
- predictions:
- type: optional
- docs: Average agreement with predictions
- progress:
- type: optional
- docs: Fraction of annotation work done so far
- rejected:
- type: optional
- docs: Number of annotations marked as "Rejected" by reviewer
- review_score:
- type: optional
- docs: >-
- Average reviewing score, when calling with "per_label=true", returns
- dictionary with labels to score breakdown
- skipped:
- type: optional
- source:
- openapi: openapi/openapi.yaml
- inline: true
- ApiProjectsDashboardMembersRetrieveResponse:
- docs: Task creation response
- properties:
- similarity:
- type: optional>>
- docs: Consensus statistics between different users
- stats:
- type: optional>
- docs: Number of annotated (completed) tasks
- users:
- type: optional>>
- docs: List of users
- source:
- openapi: openapi/openapi.yaml
service:
auth: false
base-path: ''
@@ -102,72 +52,5 @@ service:
tasks: 1
audiences:
- internal
- api_projects_dashboard_members_retrieve:
- path: /api/projects/{id}/dashboard-members
- method: GET
- auth: true
- docs: >-
- Get dashboard for members, including similarity matrix, user statistics
- and users list.
- source:
- openapi: openapi/openapi.yaml
- path-parameters:
- id: integer
- display-name: Get dashboard data for members
- request:
- name: ApiProjectsDashboardMembersRetrieveRequest
- query-parameters:
- action:
- type: optional
- docs: >-
- All data objects will be filtered by updated_at or created_at
- field
- end_date:
- type: optional
- docs: >-
- End date for dashboard stats calculation. UTC timezone by default.
- Use iso format (yyyy-mm-dd-hh-mm) to specify timezone.
- per_label:
- type: optional
- docs: Per label calculation
- start_date:
- type: optional
- docs: >-
- Start date for dashboard stats calculation. UTC timezone by
- default. Use iso format (yyyy-mm-dd-hh-mm) to specify timezone.
- updated_by:
- type: optional
- docs: >-
- All task objects will be filtered by updated_by field. Only tasks
- that were updated by concrete user will be taken in account.
- use_kappa:
- type: optional
- docs: Use kappa statistics for calculation
- response:
- docs: Successful response returns project-related annotation statistics
- type: ApiProjectsDashboardMembersRetrieveResponse
- availability: deprecated
- examples:
- - path-parameters:
- id: 1
- response:
- body:
- similarity:
- - key: value
- stats:
- - accepted: 1
- finished: 1
- gt: 1.1
- mean_time: 1.1
- median_time: 1.1
- predictions: 1.1
- progress: 1
- rejected: 1
- review_score: 1.1
- skipped: 1
- users:
- - key: value
- audiences:
- - internal
source:
openapi: openapi/openapi.yaml
diff --git a/.mock/definition/projects/stats.yml b/.mock/definition/projects/stats.yml
index 600f92a50..e9c628440 100644
--- a/.mock/definition/projects/stats.yml
+++ b/.mock/definition/projects/stats.yml
@@ -175,6 +175,29 @@ types:
- StatsTotalAgreementResponseOne
source:
openapi: openapi/openapi.yaml
+ StatsUsersPredictionAgreementResponseAgreementValue:
+ discriminated: false
+ union:
+ - type: double
+ docs: >-
+ Average prediction agreement score for the user (0-1) when
+ per_label=False
+ - type: map
+ docs: >-
+ Average prediction agreement score per label for the user (0-1) when
+ per_label=True
+ source:
+ openapi: openapi/openapi.yaml
+ inline: true
+ StatsUsersPredictionAgreementResponse:
+ properties:
+ agreement:
+ type: >-
+ optional
Get prediction agreement statistics for a specific user within a project.
- operationId: api_projects_user_stats_prediction_retrieve
+ operationId: api_projects_user_stats_prediction_retrieve_2
parameters:
- in: path
name: id
diff --git a/reference.md b/reference.md
index d6018ab86..bb00daf16 100644
--- a/reference.md
+++ b/reference.md
@@ -31767,7 +31767,7 @@ client.projects.stats.agreement_annotators(
-
-**ids:** `str` — Comma-separated list of annotator user IDs to get agreement scores for
+**ids:** `str` — Comma separated list of annotator user IDs to get agreement scores for
@@ -32165,6 +32165,99 @@ client.projects.stats.update_stats(
+
+
+
+
+client.projects.stats.users_prediction_agreement(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+
+
+
+ This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
+
+
+Get prediction agreement statistics for multiple annotators within a project.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.projects.stats.users_prediction_agreement(
+ id=1,
+ ids="ids",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `int`
+
+
+
+
+
+-
+
+**ids:** `str` — Comma separated list of annotator user IDs to get agreement scores for
+
+
+
+
+
+-
+
+**per_label:** `typing.Optional[bool]` — Per label
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
diff --git a/src/label_studio_sdk/projects/__init__.py b/src/label_studio_sdk/projects/__init__.py
index 7242173d3..5f563c086 100644
--- a/src/label_studio_sdk/projects/__init__.py
+++ b/src/label_studio_sdk/projects/__init__.py
@@ -53,6 +53,8 @@
StatsUserReviewScoreResponse,
StatsUserReviewScoreResponsePerformanceScore,
StatsUserReviewScoreResponseReviewScore,
+ StatsUsersPredictionAgreementResponse,
+ StatsUsersPredictionAgreementResponseAgreementValue,
)
__all__ = [
@@ -103,6 +105,8 @@
"StatsUserReviewScoreResponse",
"StatsUserReviewScoreResponsePerformanceScore",
"StatsUserReviewScoreResponseReviewScore",
+ "StatsUsersPredictionAgreementResponse",
+ "StatsUsersPredictionAgreementResponseAgreementValue",
"assignments",
"exports",
"members",
diff --git a/src/label_studio_sdk/projects/stats/__init__.py b/src/label_studio_sdk/projects/stats/__init__.py
index 8bda68459..9ec1b022b 100644
--- a/src/label_studio_sdk/projects/stats/__init__.py
+++ b/src/label_studio_sdk/projects/stats/__init__.py
@@ -26,6 +26,8 @@
StatsUserReviewScoreResponse,
StatsUserReviewScoreResponsePerformanceScore,
StatsUserReviewScoreResponseReviewScore,
+ StatsUsersPredictionAgreementResponse,
+ StatsUsersPredictionAgreementResponseAgreementValue,
)
__all__ = [
@@ -54,4 +56,6 @@
"StatsUserReviewScoreResponse",
"StatsUserReviewScoreResponsePerformanceScore",
"StatsUserReviewScoreResponseReviewScore",
+ "StatsUsersPredictionAgreementResponse",
+ "StatsUsersPredictionAgreementResponseAgreementValue",
]
diff --git a/src/label_studio_sdk/projects/stats/client.py b/src/label_studio_sdk/projects/stats/client.py
index f1556c847..cd1beecff 100644
--- a/src/label_studio_sdk/projects/stats/client.py
+++ b/src/label_studio_sdk/projects/stats/client.py
@@ -17,6 +17,7 @@
from .types.stats_finished_tasks_response import StatsFinishedTasksResponse
from .types.stats_lead_time_response import StatsLeadTimeResponse
from .types.stats_total_agreement_response import StatsTotalAgreementResponse
+from .types.stats_users_prediction_agreement_response import StatsUsersPredictionAgreementResponse
from .types.stats_user_prediction_agreement_response import StatsUserPredictionAgreementResponse
from .types.stats_user_review_score_response import StatsUserReviewScoreResponse
from .types.stats_user_ground_truth_agreement_response import StatsUserGroundTruthAgreementResponse
@@ -373,7 +374,7 @@ def agreement_annotators(
id : int
ids : str
- Comma-separated list of annotator user IDs to get agreement scores for
+ Comma separated list of annotator user IDs to get agreement scores for
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -689,6 +690,76 @@ def update_stats(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def users_prediction_agreement(
+ self,
+ id: int,
+ *,
+ ids: str,
+ per_label: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> StatsUsersPredictionAgreementResponse:
+ """
+
+
+
+ This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
+
+
+ Get prediction agreement statistics for multiple annotators within a project.
+
+ Parameters
+ ----------
+ id : int
+
+ ids : str
+ Comma separated list of annotator user IDs to get agreement scores for
+
+ per_label : typing.Optional[bool]
+ Per label
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ StatsUsersPredictionAgreementResponse
+ Prediction agreement statistics for multiple annotators
+
+ Examples
+ --------
+ from label_studio_sdk import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.projects.stats.users_prediction_agreement(
+ id=1,
+ ids="ids",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/projects/{jsonable_encoder(id)}/user-stats/prediction",
+ method="GET",
+ params={
+ "ids": ids,
+ "per_label": per_label,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ StatsUsersPredictionAgreementResponse,
+ construct_type(
+ type_=StatsUsersPredictionAgreementResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def user_prediction_agreement(
self,
id: int,
@@ -1284,7 +1355,7 @@ async def agreement_annotators(
id : int
ids : str
- Comma-separated list of annotator user IDs to get agreement scores for
+ Comma separated list of annotator user IDs to get agreement scores for
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1650,6 +1721,84 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def users_prediction_agreement(
+ self,
+ id: int,
+ *,
+ ids: str,
+ per_label: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> StatsUsersPredictionAgreementResponse:
+ """
+
+
+
+ This endpoint is not available in Label Studio Community Edition. [Learn more about Label Studio Enterprise](https://humansignal.com/goenterprise)
+
+
+ Get prediction agreement statistics for multiple annotators within a project.
+
+ Parameters
+ ----------
+ id : int
+
+ ids : str
+ Comma separated list of annotator user IDs to get agreement scores for
+
+ per_label : typing.Optional[bool]
+ Per label
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ StatsUsersPredictionAgreementResponse
+ Prediction agreement statistics for multiple annotators
+
+ Examples
+ --------
+ import asyncio
+
+ from label_studio_sdk import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.projects.stats.users_prediction_agreement(
+ id=1,
+ ids="ids",
+ )
+
+
+ asyncio.run(main())
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/projects/{jsonable_encoder(id)}/user-stats/prediction",
+ method="GET",
+ params={
+ "ids": ids,
+ "per_label": per_label,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return typing.cast(
+ StatsUsersPredictionAgreementResponse,
+ construct_type(
+ type_=StatsUsersPredictionAgreementResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def user_prediction_agreement(
self,
id: int,
diff --git a/src/label_studio_sdk/projects/stats/types/__init__.py b/src/label_studio_sdk/projects/stats/types/__init__.py
index 770c7e1fb..880b95f3f 100644
--- a/src/label_studio_sdk/projects/stats/types/__init__.py
+++ b/src/label_studio_sdk/projects/stats/types/__init__.py
@@ -27,6 +27,10 @@
from .stats_user_review_score_response import StatsUserReviewScoreResponse
from .stats_user_review_score_response_performance_score import StatsUserReviewScoreResponsePerformanceScore
from .stats_user_review_score_response_review_score import StatsUserReviewScoreResponseReviewScore
+from .stats_users_prediction_agreement_response import StatsUsersPredictionAgreementResponse
+from .stats_users_prediction_agreement_response_agreement_value import (
+ StatsUsersPredictionAgreementResponseAgreementValue,
+)
__all__ = [
"StatsAgreementAnnotatorResponse",
@@ -54,4 +58,6 @@
"StatsUserReviewScoreResponse",
"StatsUserReviewScoreResponsePerformanceScore",
"StatsUserReviewScoreResponseReviewScore",
+ "StatsUsersPredictionAgreementResponse",
+ "StatsUsersPredictionAgreementResponseAgreementValue",
]
diff --git a/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response.py b/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response.py
new file mode 100644
index 000000000..bc431ddeb
--- /dev/null
+++ b/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ....core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .stats_users_prediction_agreement_response_agreement_value import (
+ StatsUsersPredictionAgreementResponseAgreementValue,
+)
+import pydantic
+from ....core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class StatsUsersPredictionAgreementResponse(UncheckedBaseModel):
+ agreement: typing.Optional[typing.Dict[str, StatsUsersPredictionAgreementResponseAgreementValue]] = pydantic.Field(
+ default=None
+ )
+ """
+ Dictionary mapping user IDs to their prediction agreement scores
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response_agreement_value.py b/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response_agreement_value.py
new file mode 100644
index 000000000..7bc134f98
--- /dev/null
+++ b/src/label_studio_sdk/projects/stats/types/stats_users_prediction_agreement_response_agreement_value.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+StatsUsersPredictionAgreementResponseAgreementValue = typing.Union[float, typing.Dict[str, float]]
diff --git a/tests/projects/test_stats.py b/tests/projects/test_stats.py
index a485a8fca..933fc18ea 100644
--- a/tests/projects/test_stats.py
+++ b/tests/projects/test_stats.py
@@ -144,6 +144,16 @@ async def test_update_stats(client: LabelStudio, async_client: AsyncLabelStudio)
validate_response(async_response, expected_response, expected_types)
+async def test_users_prediction_agreement(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = {"agreement": {"key": 1.1}}
+ expected_types: typing.Any = {"agreement": ("dict", {0: (None, None)})}
+ response = client.projects.stats.users_prediction_agreement(id=1, ids="ids")
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.projects.stats.users_prediction_agreement(id=1, ids="ids")
+ validate_response(async_response, expected_response, expected_types)
+
+
async def test_user_prediction_agreement(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {"average_prediction_agreement_per_user": 1.1}
expected_types: typing.Any = {"average_prediction_agreement_per_user": None}