From 19d53d59e75ad6d6a7d1455e4069ddc0e75ab387 Mon Sep 17 00:00:00 2001 From: Daniel Korat Date: Tue, 30 Aug 2022 01:41:46 -0700 Subject: [PATCH 1/5] templates for `SetFit/SentEval-CR` --- .../SetFit/SentEval-CR/templates.yaml | 176 ++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 promptsource/templates/SetFit/SentEval-CR/templates.yaml diff --git a/promptsource/templates/SetFit/SentEval-CR/templates.yaml b/promptsource/templates/SetFit/SentEval-CR/templates.yaml new file mode 100644 index 000000000..432e7c3ef --- /dev/null +++ b/promptsource/templates/SetFit/SentEval-CR/templates.yaml @@ -0,0 +1,176 @@ +dataset: SentEval-CR +templates: + 3cb16f5d-1953-480c-bdea-785aa2d6aa34: !Template + answer_choices: Negative ||| Positive + id: 3cb16f5d-1953-480c-bdea-785aa2d6aa34 + jinja: 'Review: {{text}} + + Is the review positive or negative? ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_review + reference: '' + f32fb361-f9c5-42fa-b6ae-0af60fd00e6a: !Template + answer_choices: No ||| Yes + id: f32fb361-f9c5-42fa-b6ae-0af60fd00e6a + jinja: 'Based on this review, would the user recommend this product? + + === + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: User_recommend_this_product + reference: 'Reformulation equivalent to sent analysis: would the user recommend + this product?' + 374e0086-077c-4be2-b533-e41d662cff5c: !Template + answer_choices: No ||| Yes + id: 374e0086-077c-4be2-b533-e41d662cff5c + jinja: 'Is this product review positive? + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_product_review_positive + reference: '' + 3798de66-71c3-4264-9910-72cc4f6546c0: !Template + answer_choices: Yes ||| No + id: 3798de66-71c3-4264-9910-72cc4f6546c0 + jinja: 'Review: {{text}} + + Is this product review negative?||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_review_negative + reference: '' + 5a3bd9c8-f95d-4617-bdbd-15a46a810dcc: !Template + answer_choices: Negative ||| Positive + id: 5a3bd9c8-f95d-4617-bdbd-15a46a810dcc + jinja: 'Review: {{text}} + + Does this product review convey a negative or positive sentiment?||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: convey_negative_or_positive_sentiment + reference: '' + 5dda0a63-495f-4c65-8270-d3fb712d027b: !Template + answer_choices: Negative ||| Positive + id: 5dda0a63-495f-4c65-8270-d3fb712d027b + jinja: 'Is there a negative or positive tone to this product review? + + === + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: negative_or_positive_tone + reference: '' + 13353be3-aa37-490a-b9cb-253ab119b8e9: !Template + answer_choices: dissatisfied ||| satisfied + id: 13353be3-aa37-490a-b9cb-253ab119b8e9 + jinja: 'Here is a review left by a customer on a product. Would you say he was + {{answer_choices[1]}} or {{answer_choices[0]}}? + + Review: {{text}} + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: user_satisfied + reference: '' + d0517853-707c-46f6-80ff-2e8904e8657f: !Template + answer_choices: decrease ||| increase + id: d0517853-707c-46f6-80ff-2e8904e8657f + jinja: 'You are considering whether to buy a product. You look at the reviews. + Would the following review {{answer_choices[0]}} or {{answer_choices[1]}} the + chances of you buying the product? + + Review Product review: {{text}} + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: would_you_buy + reference: '' + c8aef874-4f2a-4382-9d3d-96fc52d3dba2: !Template + answer_choices: unflattering ||| flattering + id: c8aef874-4f2a-4382-9d3d-96fc52d3dba2 + jinja: 'Product review: {{text}} + + Would you say this review depicts the product in a {{answer_choices[1]}} or + {{answer_choices[0]}} light? + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: flattering_or_not + reference: '' From 248b0548c91180b339d6e5268a9a94a3ffc30bd7 Mon Sep 17 00:00:00 2001 From: Daniel Korat Date: Thu, 8 Sep 2022 07:04:34 -0700 Subject: [PATCH 2/5] templates for `sst5`, `enron_spam` and `amazon_cf` --- .../amazon_counterfactual_en/templates.yaml | 114 ++++++++++++++++ .../SetFit/enron_spam/templates.yaml | 75 +++++++++++ .../templates/SetFit/sst5/templates.yaml | 123 ++++++++++++++++++ 3 files changed, 312 insertions(+) create mode 100644 promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml create mode 100644 promptsource/templates/SetFit/enron_spam/templates.yaml create mode 100644 promptsource/templates/SetFit/sst5/templates.yaml diff --git a/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml new file mode 100644 index 000000000..ce772f243 --- /dev/null +++ b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml @@ -0,0 +1,114 @@ +dataset: amazon_counterfactual_en +templates: + 6eb62aee-a983-4571-9d49-9836e685ee93: !Template + answer_choices: Yes ||| No + id: 6eb62aee-a983-4571-9d49-9836e685ee93 + jinja: "{{text}} Is the statement factual? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_factual + reference: '' + 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58: !Template + answer_choices: Yes ||| No + id: 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58 + jinja: "{{text}} Does the statement describe a fact? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: describe_fact + reference: '' + e36e9d87-5366-4070-b95d-51ff5a890f4b: !Template + answer_choices: non-counterfactual ||| counterfactual + id: e36e9d87-5366-4070-b95d-51ff5a890f4b + jinja: "{{text}} Is the statement non-counterfactual or counterfactual? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: choice_text_before + reference: '' + 044ee01d-a06d-47b6-9872-6c89f785a961: !Template + answer_choices: No ||| Yes + id: 044ee01d-a06d-47b6-9872-6c89f785a961 + jinja: "{{text}} Is the statement counterfactual? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_counterfactual + reference: '' + 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7: !Template + answer_choices: No ||| Yes + id: 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7 + jinja: "{{text}} Does the sentence express an event that did not happen? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: did_not_happen + reference: '' + 920ace8e-e063-4edf-b4f6-ac1a1e9f8559: !Template + answer_choices: Yes ||| No + id: 920ace8e-e063-4edf-b4f6-ac1a1e9f8559 + jinja: "{{text}} Does this describe an actual event? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: actual_event + reference: '' + 9e2a56f5-a11d-497c-b02b-2ccc3e760503: !Template + answer_choices: Yes ||| No + id: 9e2a56f5-a11d-497c-b02b-2ccc3e760503 + jinja: "{{text}} Does the sentence contain events that did not or cannot take place? \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: take_place + reference: '' + 40c0007f-78be-43d0-80c8-df22d37ee64b: !Template + answer_choices: non-counterfactual ||| counterfactual + id: 40c0007f-78be-43d0-80c8-df22d37ee64b + jinja: "Is the label for the following sentence non-counterfactual or counterfactual? {{text}} \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: choice_text_after + reference: '' \ No newline at end of file diff --git a/promptsource/templates/SetFit/enron_spam/templates.yaml b/promptsource/templates/SetFit/enron_spam/templates.yaml new file mode 100644 index 000000000..6f37e683b --- /dev/null +++ b/promptsource/templates/SetFit/enron_spam/templates.yaml @@ -0,0 +1,75 @@ +dataset: enron_spam +templates: + 9a9d877c-aeb1-4808-868d-47ac9627f333: !Template + answer_choices: not spam ||| spam + id: 9a9d877c-aeb1-4808-868d-47ac9627f333 + jinja: "What is the spam label for the following email message? {{text}} \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: spam_label + reference: '' + 59f74c9b-909f-4fe7-b822-334184a51d3f: !Template + answer_choices: True ||| False + id: 59f74c9b-909f-4fe7-b822-334184a51d3f + jinja: "Is this email message considered {{\"ham\"}} (i.e. not spam)? \n{{text}}\n\ + |||\n{{answer_choices[label]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: ham_True_False + reference: '' + 29cc5489-e0d7-4c41-8bc2-a1735f91ca95: !Template + answer_choices: ham ||| spam + id: 29cc5489-e0d7-4c41-8bc2-a1735f91ca95 + jinja: 'Is the label for the following email message {{"ham"}} (not spam) or {{"spam"}}? + {{text}} + + ||| + + {{ answer_choices [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_the_label + reference: '' + 58a62a45-4cf7-4d57-aa70-0f8fb0a3043c: !Template + answer_choices: not spam||| spam + id: 58a62a45-4cf7-4d57-aa70-0f8fb0a3043c + jinja: "The following email message should be marked as \"spam\" or \"not spam\"\ + ? {{text}} \n|||\n{{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: marked as + reference: '' + 5e7a18eb-8692-4312-b496-67d1e64f91fe: !Template + answer_choices: False ||| True + id: 5e7a18eb-8692-4312-b496-67d1e64f91fe + jinja: "Is this email message considered {{\"spam\"}}? \n{{text}}\n|||\n{{answer_choices[label]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: spam_True_False + reference: '' \ No newline at end of file diff --git a/promptsource/templates/SetFit/sst5/templates.yaml b/promptsource/templates/SetFit/sst5/templates.yaml new file mode 100644 index 000000000..c7ed8649c --- /dev/null +++ b/promptsource/templates/SetFit/sst5/templates.yaml @@ -0,0 +1,123 @@ +dataset: sst5 +templates: + b969303e-d0ab-4ba5-ba0d-9a364b495313: !Template + answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + id: b969303e-d0ab-4ba5-ba0d-9a364b495313 + jinja: '{{ text }} + + So I would rate it ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: so_i_would + reference: '' + 157770ee-e4d5-4e1b-b4bc-5ddd78e0f057: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 157770ee-e4d5-4e1b-b4bc-5ddd78e0f057 + jinja: '{{ text }} + + === + + Based on that, my rating is ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: based_on_that + reference: '' + 7c5734b2-c05c-4bb0-a409-63efaed7ec7e: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 7c5734b2-c05c-4bb0-a409-63efaed7ec7e + jinja: 'Review text: + + {{ text }} + + + Stars: ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_star + reference: simulating webpage + b262b6a5-8b0e-4be2-bf05-1b34ae9ee757: !Template + answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + id: b262b6a5-8b0e-4be2-bf05-1b34ae9ee757 + jinja: '{{ text }} My rating for this place is ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: this_place + reference: '' + 02b9e30e-b096-4ce9-b621-8ceb1dc24aa6: !Template + answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 + id: 02b9e30e-b096-4ce9-b621-8ceb1dc24aa6 + jinja: 'Review text: + + {{ text }} + + + Review score (between 1 and 5): ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_score + reference: Simulating webpage + 8b34f5d-a195-428a-ad7f-4f6a56790e9c: !Template + answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 + id: 8b34f5d-a195-428a-ad7f-4f6a56790e9c + jinja: 'Review: {{text}} + + On a scale of 1 to 5, I would give this product ||| {{ answer_choices[label] + }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: on_a_scale + reference: '' + 7652def8-ce5a-4de6-a381-32f77a9596d5: !Template + answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + id: 7652def8-ce5a-4de6-a381-32f77a9596d5 + jinja: 'Review text: + + {{ text }} + + + Review rating: ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_rating + reference: It's simulating the format of a webpage. From 5c23a6faadc3a954fe48b1f381e38b127082bc7c Mon Sep 17 00:00:00 2001 From: Daniel Korat Date: Tue, 13 Sep 2022 02:12:09 -0700 Subject: [PATCH 3/5] updated sst5 templates --- promptsource/templates/SetFit/sst5/templates.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/promptsource/templates/SetFit/sst5/templates.yaml b/promptsource/templates/SetFit/sst5/templates.yaml index c7ed8649c..43db1ff72 100644 --- a/promptsource/templates/SetFit/sst5/templates.yaml +++ b/promptsource/templates/SetFit/sst5/templates.yaml @@ -1,7 +1,7 @@ dataset: sst5 templates: b969303e-d0ab-4ba5-ba0d-9a364b495313: !Template - answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive id: b969303e-d0ab-4ba5-ba0d-9a364b495313 jinja: '{{ text }} @@ -40,7 +40,7 @@ templates: {{ text }} - Stars: ||| + Score: ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata @@ -53,9 +53,9 @@ templates: name: format_star reference: simulating webpage b262b6a5-8b0e-4be2-bf05-1b34ae9ee757: !Template - answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive id: b262b6a5-8b0e-4be2-bf05-1b34ae9ee757 - jinja: '{{ text }} My rating for this place is ||| {{ answer_choices[label] }}' + jinja: '{{ text }} My opinion of this movie is ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false languages: @@ -63,7 +63,7 @@ templates: metrics: - Accuracy original_task: true - name: this_place + name: this_movie reference: '' 02b9e30e-b096-4ce9-b621-8ceb1dc24aa6: !Template answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 @@ -90,7 +90,7 @@ templates: id: 8b34f5d-a195-428a-ad7f-4f6a56790e9c jinja: 'Review: {{text}} - On a scale of 1 to 5, I would give this product ||| {{ answer_choices[label] + On a scale of 1 to 5, I would give this movie ||| {{ answer_choices[label] }}' metadata: !TemplateMetadata choices_in_prompt: false @@ -102,7 +102,7 @@ templates: name: on_a_scale reference: '' 7652def8-ce5a-4de6-a381-32f77a9596d5: !Template - answer_choices: 1 star ||| 2 stars ||| 3 stars ||| 4 stars ||| 5 stars + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive id: 7652def8-ce5a-4de6-a381-32f77a9596d5 jinja: 'Review text: From 45de55bb2f2d41e49b5ac7ce9174f57ba42e907e Mon Sep 17 00:00:00 2001 From: Daniel Korat Date: Thu, 15 Sep 2022 05:12:51 -0700 Subject: [PATCH 4/5] additional templates for sst5 --- .../templates/SetFit/sst5/templates.yaml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/promptsource/templates/SetFit/sst5/templates.yaml b/promptsource/templates/SetFit/sst5/templates.yaml index 43db1ff72..30c42ddac 100644 --- a/promptsource/templates/SetFit/sst5/templates.yaml +++ b/promptsource/templates/SetFit/sst5/templates.yaml @@ -121,3 +121,29 @@ templates: original_task: true name: format_rating reference: It's simulating the format of a webpage. + 3798de66-71c3-4264-9910-72cc4f6546c2: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 3798de66-71c3-4264-9910-72cc4f6546c2 + jinja: 'How do you feel about the following sentence? {{ text }} ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: feel + reference: It's simulating the format of a webpage. + 5dda0a63-495f-4c65-8270-d3fb712d027c: !Template + answer_choices: terrible ||| bad ||| okay ||| good ||| great + id: 5dda0a63-495f-4c65-8270-d3fb712d027c + jinja: '{{ text }} This movie is a very ||| {{ answer_choices[label] }} one' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: feel + reference: It's simulating the format of a webpage. \ No newline at end of file From fe28717272bb0a75c38267c4d1ee6b001a44242e Mon Sep 17 00:00:00 2001 From: Daniel Korat Date: Thu, 22 Sep 2022 08:14:12 +0300 Subject: [PATCH 5/5] Update templates.yaml --- .../amazon_counterfactual_en/templates.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml index ce772f243..5314fc635 100644 --- a/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml +++ b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml @@ -3,7 +3,7 @@ templates: 6eb62aee-a983-4571-9d49-9836e685ee93: !Template answer_choices: Yes ||| No id: 6eb62aee-a983-4571-9d49-9836e685ee93 - jinja: "{{text}} Is the statement factual? \n|||\n\ + jinja: "{{text}} Is the statement factual? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -17,7 +17,7 @@ templates: 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58: !Template answer_choices: Yes ||| No id: 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58 - jinja: "{{text}} Does the statement describe a fact? \n|||\n\ + jinja: "{{text}} Does the statement describe a fact? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -31,7 +31,7 @@ templates: e36e9d87-5366-4070-b95d-51ff5a890f4b: !Template answer_choices: non-counterfactual ||| counterfactual id: e36e9d87-5366-4070-b95d-51ff5a890f4b - jinja: "{{text}} Is the statement non-counterfactual or counterfactual? \n|||\n\ + jinja: "{{text}} Is the statement non-counterfactual or counterfactual? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true @@ -45,7 +45,7 @@ templates: 044ee01d-a06d-47b6-9872-6c89f785a961: !Template answer_choices: No ||| Yes id: 044ee01d-a06d-47b6-9872-6c89f785a961 - jinja: "{{text}} Is the statement counterfactual? \n|||\n\ + jinja: "{{text}} Is the statement counterfactual? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -59,7 +59,7 @@ templates: 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7: !Template answer_choices: No ||| Yes id: 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7 - jinja: "{{text}} Does the sentence express an event that did not happen? \n|||\n\ + jinja: "{{text}} Does the sentence express an event that did not happen? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -73,7 +73,7 @@ templates: 920ace8e-e063-4edf-b4f6-ac1a1e9f8559: !Template answer_choices: Yes ||| No id: 920ace8e-e063-4edf-b4f6-ac1a1e9f8559 - jinja: "{{text}} Does this describe an actual event? \n|||\n\ + jinja: "{{text}} Does this describe an actual event? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -87,7 +87,7 @@ templates: 9e2a56f5-a11d-497c-b02b-2ccc3e760503: !Template answer_choices: Yes ||| No id: 9e2a56f5-a11d-497c-b02b-2ccc3e760503 - jinja: "{{text}} Does the sentence contain events that did not or cannot take place? \n|||\n\ + jinja: "{{text}} Does the sentence contain events that did not or cannot take place? \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: false @@ -101,7 +101,7 @@ templates: 40c0007f-78be-43d0-80c8-df22d37ee64b: !Template answer_choices: non-counterfactual ||| counterfactual id: 40c0007f-78be-43d0-80c8-df22d37ee64b - jinja: "Is the label for the following sentence non-counterfactual or counterfactual? {{text}} \n|||\n\ + jinja: "Is the label for the following sentence non-counterfactual or counterfactual? {{text}} \n|||\n {{ answer_choices [label] }}" metadata: !TemplateMetadata choices_in_prompt: true @@ -111,4 +111,4 @@ templates: - Accuracy original_task: true name: choice_text_after - reference: '' \ No newline at end of file + reference: ''