Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
lewtun committed Nov 3, 2022
1 parent 2a56560 commit 06fb50b
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion src/huggingface_hub/repocard.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def metadata_eval_result(
The name of the metric configuration used in `load_metric()`.
Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
metrics_verified (`bool`, *optional*, defaults to `False`):
Indicates whether the metrics have originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
dataset_config (`str`, *optional*):
Example: fr. The name of the dataset configuration used in `load_dataset()`.
dataset_split (`str`, *optional*):
Expand Down
4 changes: 2 additions & 2 deletions src/huggingface_hub/repocard_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ class EvalResult:
metric_args (`Dict[str, Any]`, *optional*):
The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
verified (`bool`, *optional*):
Indicates whether the metrics have originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
verifyToken (`str`, *optional*):
A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
"""
Expand Down Expand Up @@ -115,7 +115,7 @@ class EvalResult:
# Example for `bleu`: max_order: 4
metric_args: Optional[Dict[str, Any]] = None

# Indicates whether the metrics have originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
# Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
verified: Optional[bool] = None

# A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
Expand Down

0 comments on commit 06fb50b

Please sign in to comment.