Skip to content

Commit

Permalink
Merge pull request #3 from cooelf/main
Browse files Browse the repository at this point in the history
update readme & argument helps
  • Loading branch information
cooelf authored Jan 22, 2023
2 parents ea58f8d + e3ec5f5 commit ec9caa3
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 9 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Auto-CoT: Automatic Chain of Thought Prompting in Large Language Models
# Auto-CoT: Automatic Chain of Thought Prompting in Large Language Models (ICLR 2023)

[![Open Auto-CoT in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/amazon-science/auto-cot/blob/main/try_cot_colab.ipynb)

Expand Down Expand Up @@ -56,11 +56,11 @@ python run_inference.py \

## Citing Auto-CoT
```
@article{zhang2022automatic,
@inproceedings{zhang2023automatic,
title={Automatic Chain of Thought Prompting in Large Language Models},
author={Zhang, Zhuosheng and Zhang, Aston and Li, Mu and Smola, Alex},
journal={arXiv preprint arXiv:2210.03493},
year={2022}
booktitle={The Eleventh International Conference on Learning Representations (ICLR 2023)},
year={2023}
}
```

Expand Down
2 changes: 1 addition & 1 deletion run_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def parse_arguments():
)
parser.add_argument("--random_seed", type=int, default=192, help="random seed")
parser.add_argument(
"--encoder", type=str, default="all-MiniLM-L6-v2", help="where to save the contructed demonstrations"
"--encoder", type=str, default="all-MiniLM-L6-v2", help="which sentence-transformer encoder for clustering"
)
parser.add_argument(
"--sampling", type=str, default="center", help="whether to sample the cluster center first"
Expand Down
8 changes: 4 additions & 4 deletions run_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,10 @@ def parse_arguments():
"--dataset", type=str, default="multiarith", choices=["aqua", "gsm8k", "commonsensqa", "addsub", "multiarith", "strategyqa", "svamp", "singleeq", "coin_flip", "last_letters"], help="dataset used for experiment"
)
parser.add_argument(
"--demo_path", type=str, default="demos/multiarith", help="dataset used for experiment"
"--demo_path", type=str, default="demos/multiarith", help="pre-generated demos used for experiment"
)
parser.add_argument(
"--resume_id", type=int, default=0, help="whether to limit test dataset size. if 0, the dataset size is unlimited and we use all the samples in the dataset for testing."
"--resume_id", type=int, default=0, help="resume from which question id (current line number in the output file), if the experiment fails accidently (e.g., network error)"
)
parser.add_argument("--minibatch_size", type=int, default=1, choices=[1], help="minibatch size should be 1 because GPT-3 API takes only 1 input for each request")

Expand All @@ -147,10 +147,10 @@ def parse_arguments():
"--limit_dataset_size", type=int, default=0, help="whether to limit test dataset size. if 0, the dataset size is unlimited and we use all the samples in the dataset for testing."
)
parser.add_argument(
"--api_time_interval", type=float, default=1.0, help=""
"--api_time_interval", type=float, default=1.0, help="sleep between runs to avoid excedding the rate limit of openai api"
)
parser.add_argument(
"--temperature", type=float, default=0, help=""
"--temperature", type=float, default=0, help="temperature for GPT-3"
)
parser.add_argument(
"--log_dir", type=str, default="./log/", help="log directory"
Expand Down

0 comments on commit ec9caa3

Please sign in to comment.