forked from EleutherAI/elk
-
Notifications
You must be signed in to change notification settings - Fork 0
/
templates.yaml
189 lines (161 loc) · 5.03 KB
/
templates.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
dataset: boolq_pt
templates:
3e386463-1715-4578-9cba-07d11a0d3b61: !Template
answer_choices: False ||| True
id: 3e386463-1715-4578-9cba-07d11a0d3b61
jinja: 'Passagem: {{passage}}
Depois de ler esta passagem, tenho uma pergunta: {{question}}? Verdadeiro ou falso?
|||
{% if label != -1 %}
{{answer_choices[label]}}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: true
languages:
- pt
metrics:
- Accuracy
original_task: true
name: after_reading
reference: ''
492f0f88-4370-46cd-839b-1de37a55aeda: !Template
answer_choices: No ||| Yes
id: 492f0f88-4370-46cd-839b-1de37a55aeda
jinja: "{{ passage }} \nPergunta: {{ question }}\nResposta: ||| \n{% if label !=\
\ -1 %}\n{{ answer_choices[label] }}\n{% endif %}"
metadata: !TemplateMetadata
choices_in_prompt: false
languages:
- pt
metrics:
- Accuracy
original_task: true
name: GPT-3 Style
reference: Same as Figure G29, p. 58 of the GPT-3 paper
6cb6a026-c070-470a-b75d-bb8fdf424e35: !Template
answer_choices: No ||| Yes
id: 6cb6a026-c070-470a-b75d-bb8fdf424e35
jinja: "{{ passage }}\n\nDepois de ler isso, eu me pergunto {{ question }}? |||\n{% if\
\ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}"
metadata: !TemplateMetadata
choices_in_prompt: false
languages:
- pt
metrics:
- Accuracy
original_task: true
name: "I wonder\u2026"
reference: ''
7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5: !Template
answer_choices: No ||| Yes
id: 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5
jinja: 'Texto: {{passage}}
Responda sim/não à seguinte pergunta: {{question}}? Sim ou não? |||
{% if label != -1 %}
{{answer_choices[label]}}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: true
languages:
- pt
metrics:
- Accuracy
original_task: true
name: yes_no_question
reference: ''
7d21d974-0624-4d4f-9e8c-644e2d009cb5: !Template
answer_choices: No ||| Yes
id: 7d21d974-0624-4d4f-9e8c-644e2d009cb5
jinja: "{{ passage }}\n\nDepois de ler isso, você poderia me dizer {{ question }}? \
\ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}"
metadata: !TemplateMetadata
choices_in_prompt: false
languages:
- pt
metrics:
- Accuracy
original_task: true
name: "could you tell me\u2026"
reference: ''
922d3e87-ac58-4731-84d1-f0a40e47afb5: !Template
answer_choices: No ||| Yes
id: 922d3e87-ac58-4731-84d1-f0a40e47afb5
jinja: "EXAME\n1. Responda sim ou não.\nDocumento: {{passage}}\nPergunta: {{question}}? \
\ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}"
metadata: !TemplateMetadata
choices_in_prompt: true
languages:
- pt
metrics:
- Accuracy
original_task: true
name: exam
reference: ''
9a1bf459-8047-437c-9def-f21e960429cc: !Template
answer_choices: No ||| Yes
id: 9a1bf459-8047-437c-9def-f21e960429cc
jinja: 'Com base na seguinte passagem, {{ question }}? {{ passage }}
|||
{% if label != -1 %}
{{ answer_choices[label] }}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: false
languages:
- pt
metrics:
- Accuracy
original_task: true
name: based on the following passage
reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
9f4c6b0a-437b-40c0-b467-db4b7218d38d: !Template
answer_choices: False ||| True
id: 9f4c6b0a-437b-40c0-b467-db4b7218d38d
jinja: 'Exercício: leia o texto e responda à questão com Verdadeiro ou Falso.
Texto: {{passage}}
Pergunta: {{question}}? |||
{% if label != -1 %}
{{answer_choices[label]}}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: true
languages:
- pt
metrics:
- Accuracy
original_task: true
name: exercise
reference: ''
b2b3cb60-d6e3-491c-a09a-8201e13e417e: !Template
answer_choices: No ||| Yes
id: b2b3cb60-d6e3-491c-a09a-8201e13e417e
jinja: '{{ passage }}
Com base na passagem anterior, {{ question }}? ||| {% if label != -1 %}{{ answer_choices[label]
}}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: false
languages:
- pt
metrics:
- Accuracy
original_task: true
name: based on the previous passage
reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021."
eb78772c-e81e-4b8a-a77b-b75efd1c212a: !Template
answer_choices: False ||| True
id: eb78772c-e81e-4b8a-a77b-b75efd1c212a
jinja: '{{passage}}
P: {{question}}? Verdadeiro ou falso? |||
{% if label != -1 %}
{{answer_choices[label]}}
{% endif %}'
metadata: !TemplateMetadata
choices_in_prompt: true
languages:
- pt
metrics:
- Accuracy
original_task: true
name: valid_binary
reference: ''