diff --git a/elk/promptsource/templates.py b/elk/promptsource/templates.py index 8f1828f8..ea4e9196 100644 --- a/elk/promptsource/templates.py +++ b/elk/promptsource/templates.py @@ -24,7 +24,7 @@ # These are users whose datasets should be included in the results returned by # filter_english_datasets (regardless of their metadata) -INCLUDED_USERS = {"Zaid", "craffel", "lauritowal"} +INCLUDED_USERS = {"Zaid", "craffel", "lauritowal", "christykoh"} def highlight(input): diff --git a/elk/promptsource/templates/boolq_pt/templates.yaml b/elk/promptsource/templates/boolq_pt/templates.yaml new file mode 100644 index 00000000..93528f32 --- /dev/null +++ b/elk/promptsource/templates/boolq_pt/templates.yaml @@ -0,0 +1,189 @@ +dataset: boolq_pt +templates: + 3e386463-1715-4578-9cba-07d11a0d3b61: !Template + answer_choices: False ||| True + id: 3e386463-1715-4578-9cba-07d11a0d3b61 + jinja: 'Passagem: {{passage}} + + + Depois de ler esta passagem, tenho uma pergunta: {{question}}? Verdadeiro ou falso? + ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: after_reading + reference: '' + 492f0f88-4370-46cd-839b-1de37a55aeda: !Template + answer_choices: No ||| Yes + id: 492f0f88-4370-46cd-839b-1de37a55aeda + jinja: "{{ passage }} \nPergunta: {{ question }}\nResposta: ||| \n{% if label !=\ + \ -1 %}\n{{ answer_choices[label] }}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: GPT-3 Style + reference: Same as Figure G29, p. 58 of the GPT-3 paper + 6cb6a026-c070-470a-b75d-bb8fdf424e35: !Template + answer_choices: No ||| Yes + id: 6cb6a026-c070-470a-b75d-bb8fdf424e35 + jinja: "{{ passage }}\n\nDepois de ler isso, eu me pergunto {{ question }}? |||\n{% if\ + \ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: "I wonder\u2026" + reference: '' + 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5: !Template + answer_choices: No ||| Yes + id: 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5 + jinja: 'Texto: {{passage}} + + + Responda sim/não à seguinte pergunta: {{question}}? Sim ou não? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: yes_no_question + reference: '' + 7d21d974-0624-4d4f-9e8c-644e2d009cb5: !Template + answer_choices: No ||| Yes + id: 7d21d974-0624-4d4f-9e8c-644e2d009cb5 + jinja: "{{ passage }}\n\nDepois de ler isso, você poderia me dizer {{ question }}? \ + \ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: "could you tell me\u2026" + reference: '' + 922d3e87-ac58-4731-84d1-f0a40e47afb5: !Template + answer_choices: No ||| Yes + id: 922d3e87-ac58-4731-84d1-f0a40e47afb5 + jinja: "EXAME\n1. Responda sim ou não.\nDocumento: {{passage}}\nPergunta: {{question}}? \ + \ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: exam + reference: '' + 9a1bf459-8047-437c-9def-f21e960429cc: !Template + answer_choices: No ||| Yes + id: 9a1bf459-8047-437c-9def-f21e960429cc + jinja: 'Com base na seguinte passagem, {{ question }}? {{ passage }} + + + ||| + + {% if label != -1 %} + + {{ answer_choices[label] }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: based on the following passage + reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021." + 9f4c6b0a-437b-40c0-b467-db4b7218d38d: !Template + answer_choices: False ||| True + id: 9f4c6b0a-437b-40c0-b467-db4b7218d38d + jinja: 'Exercício: leia o texto e responda à questão com Verdadeiro ou Falso. + + + Texto: {{passage}} + + Pergunta: {{question}}? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: exercise + reference: '' + b2b3cb60-d6e3-491c-a09a-8201e13e417e: !Template + answer_choices: No ||| Yes + id: b2b3cb60-d6e3-491c-a09a-8201e13e417e + jinja: '{{ passage }} + + Com base na passagem anterior, {{ question }}? ||| {% if label != -1 %}{{ answer_choices[label] + }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: based on the previous passage + reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021." + eb78772c-e81e-4b8a-a77b-b75efd1c212a: !Template + answer_choices: False ||| True + id: eb78772c-e81e-4b8a-a77b-b75efd1c212a + jinja: '{{passage}} + + + P: {{question}}? Verdadeiro ou falso? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: valid_binary + reference: '' diff --git a/elk/promptsource/templates/christykoh/ag_news_pt/templates.yaml b/elk/promptsource/templates/christykoh/ag_news_pt/templates.yaml new file mode 100644 index 00000000..2dc94749 --- /dev/null +++ b/elk/promptsource/templates/christykoh/ag_news_pt/templates.yaml @@ -0,0 +1,215 @@ +dataset: ag_news_pt +templates: + 24e44a81-a18a-42dd-a71c-5b31b2d2cb39: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: 24e44a81-a18a-42dd-a71c-5b31b2d2cb39 + jinja: "Qual rótulo melhor descreve este artigo de notícias?\n{{text}} ||| \n{{answer_choices[label]\ + \ }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: classify_question_first + reference: '' + 8fdc1056-1029-41a1-9c67-354fc2b8ceaf: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: 8fdc1056-1029-41a1-9c67-354fc2b8ceaf + jinja: "Isso é uma notícia sobre {{\"política mundial, esportes, negócios,\ + \ ou ciência e tecnologia\"}}?\n{{text}} \n||| \n{{answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: classify_with_choices_question_first + reference: '' + 918267e0-af68-4117-892d-2dbe66a58ce9: !Template + answer_choices: Político ||| Atleta ||| Executivo de negócios ||| Cientista + id: 918267e0-af68-4117-892d-2dbe66a58ce9 + jinja: 'Você recomendaria o seguinte artigo para um {{"político"}}, um {{"atleta"}}, + um {{"executivo de negócios"}} ou um {{"cientista"}}? + + + {{ text }} + + ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: recommend + reference: '' + 9345df33-4f23-4944-a33c-eef94e626862: !Template + answer_choices: Notícias do mundo ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: 9345df33-4f23-4944-a33c-eef94e626862 + jinja: "{{text}} \n\nQual das seguintes seções de um jornal seria esta\ + \ artigo provavelmente aparece em? {{\"Notícias do mundo\"}}, {{\"Esportes\"}}, {{\"Negócios\"\ + }} ou {{\"Ciência e Tecnologia\"}}? ||| \n{{answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: which_section_choices + reference: '' + 98534347-fff7-4c39-a795-4e69a44791f7: !Template + answer_choices: Notícias do mundo ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: 98534347-fff7-4c39-a795-4e69a44791f7 + jinja: "{{text}} \n\nEm qual seção de um jornal este artigo provavelmente apareceria\ + \ em? ||| \n{{answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: which_section + reference: '' + b1d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b1d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: 'Considere o seguinte exemplo: '''''' {{text}} '''''' + Opção 1: {{answer_choices[label]}}. + Opção 2: {{answer_choices[1 - label]}}. Entre a opção 1 e a opção 2, o tópico deste exemplo é ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_1 + reference: Burns et al + b2d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b2d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: 'Consider the following example: '''''' {{text}} '''''' + Opção 1: {{answer_choices[label]}}. + Opção 2: {{answer_choices[1 - label]}}. Entre a opção 1 e a opção 2, qual é o tópico deste exemplo? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_2 + reference: Burns et al + b3d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b3d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: 'Considere o seguinte exemplo: '''''' {{text}} '''''' + Qual é o tópico deste exemplo, opção 1: {{answer_choices[label]}}, + ou opção 2: {{answer_choices[1 - label]}}? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_3 + reference: Burns et al + b4d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b4d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: '{{text}} + Opção 1: {{answer_choices[label]}}. Opção 2: {{answer_choices[1 - label]}}. Entre a opção 1 e a opção 2, o tópico deste exemplo é ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_4 + reference: Burns et al + b5d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b5d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: '{{text}} + Opção 1: {{answer_choices[label]}}. Opção 2: {{answer_choices[1 - label]}}. Entre a opção 1 e a opção 2, qual é o tema deste exemplo? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_5 + reference: Burns et al + b6d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b6d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: '{{text}} + Qual é o tópico deste exemplo, escolha 1: {{answer_choices[label]}}, ou escolha 2: {{answer_choices[1 - label]}}? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_6 + reference: Burns et al + b7d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b7d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: '{{text}} + Qual rótulo melhor descreve este artigo de notícias, escolha 1: {{answer_choices[label]}}, ou escolha 2: {{answer_choices[1 - label]}}? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_7 + reference: Burns et al + b8d49782-dfd9-41a5-87a6-8fceab9c2198: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b8d49782-dfd9-41a5-87a6-8fceab9c2198 + jinja: '{{text}} + Em qual seção de um jornal esse artigo provavelmente apareceria, escolha 1: {{answer_choices[label]}}, ou escolha 2: {{answer_choices[1 - label]}}? ||| {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: [] + original_task: true + name: burns_8 + reference: Burns et al + b401b0ee-6ffe-4a91-8e15-77ee073cd858: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: b401b0ee-6ffe-4a91-8e15-77ee073cd858 + jinja: "{{text}} \nIsto é uma notícia sobre {{\"política mundial, esportes,\ + \ negócios ou ciência e tecnologia\"}}? ||| \n{{answer_choices[label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: classify_with_choices + reference: '' + cb355f33-7e8c-4455-a72b-48d315bd4f60: !Template + answer_choices: Política mundial ||| Esportes ||| Negócios ||| Ciência e Tecnologia + id: cb355f33-7e8c-4455-a72b-48d315bd4f60 + jinja: "{{text}} \nQual rótulo melhor descreve esta notícia? ||| \n{{answer_choices[label]\ + \ }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: classify + reference: '' diff --git a/elk/promptsource/templates/christykoh/boolq_pt/templates.yaml b/elk/promptsource/templates/christykoh/boolq_pt/templates.yaml new file mode 100644 index 00000000..a83c4d95 --- /dev/null +++ b/elk/promptsource/templates/christykoh/boolq_pt/templates.yaml @@ -0,0 +1,189 @@ +dataset: boolq_pt +templates: + 3e386463-1715-4578-9cba-07d11a0d3b61: !Template + answer_choices: Falso ||| Verdadeiro + id: 3e386463-1715-4578-9cba-07d11a0d3b61 + jinja: 'Passagem: {{passage}} + + + Depois de ler esta passagem, tenho uma pergunta: {{question}}? Verdadeiro ou falso? + ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: after_reading + reference: '' + 492f0f88-4370-46cd-839b-1de37a55aeda: !Template + answer_choices: Não ||| Sim + id: 492f0f88-4370-46cd-839b-1de37a55aeda + jinja: "{{ passage }} \nPergunta: {{ question }}\nResposta: ||| \n{% if label !=\ + \ -1 %}\n{{ answer_choices[label] }}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: GPT-3 Style + reference: Same as Figure G29, p. 58 of the GPT-3 paper + 6cb6a026-c070-470a-b75d-bb8fdf424e35: !Template + answer_choices: Não ||| Sim + id: 6cb6a026-c070-470a-b75d-bb8fdf424e35 + jinja: "{{ passage }}\n\nDepois de ler isso, eu me pergunto {{ question }}? |||\n{% if\ + \ label != -1 %}\n{{ answer_choices[label] }} \n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: "I wonder\u2026" + reference: '' + 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5: !Template + answer_choices: Não ||| Sim + id: 7cf7acdf-e3a2-459f-a3e8-2e2d27dd6aa5 + jinja: 'Texto: {{passage}} + + + Responda sim/não à seguinte pergunta: {{question}}? Sim ou não? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: yes_no_question + reference: '' + 7d21d974-0624-4d4f-9e8c-644e2d009cb5: !Template + answer_choices: Não ||| Sim + id: 7d21d974-0624-4d4f-9e8c-644e2d009cb5 + jinja: "{{ passage }}\n\nDepois de ler isso, você poderia me dizer {{ question }}? \ + \ ||| {% if label != -1 %}{{ answer_choices[label] }}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: "could you tell me\u2026" + reference: '' + 922d3e87-ac58-4731-84d1-f0a40e47afb5: !Template + answer_choices: Não ||| Sim + id: 922d3e87-ac58-4731-84d1-f0a40e47afb5 + jinja: "EXAME\n1. Responda sim ou não.\nDocumento: {{passage}}\nPergunta: {{question}}? \ + \ ||| \n{% if label != -1 %}\n{{answer_choices[label]}}\n{% endif %}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: exam + reference: '' + 9a1bf459-8047-437c-9def-f21e960429cc: !Template + answer_choices: Não ||| Sim + id: 9a1bf459-8047-437c-9def-f21e960429cc + jinja: 'Com base na seguinte passagem, {{ question }}? {{ passage }} + + + ||| + + {% if label != -1 %} + + {{ answer_choices[label] }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: based on the following passage + reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021." + 9f4c6b0a-437b-40c0-b467-db4b7218d38d: !Template + answer_choices: False ||| True + id: 9f4c6b0a-437b-40c0-b467-db4b7218d38d + jinja: 'Exercício: leia o texto e responda à questão com Verdadeiro ou Falso. + + + Texto: {{passage}} + + Pergunta: {{question}}? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: exercise + reference: '' + b2b3cb60-d6e3-491c-a09a-8201e13e417e: !Template + answer_choices: Não ||| Sim + id: b2b3cb60-d6e3-491c-a09a-8201e13e417e + jinja: '{{ passage }} + + Com base na passagem anterior, {{ question }}? ||| {% if label != -1 %}{{ answer_choices[label] + }} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: based on the previous passage + reference: "Adapted from Perez et al. 2021 and Schick & Sch\xFCtz 2021." + eb78772c-e81e-4b8a-a77b-b75efd1c212a: !Template + answer_choices: False ||| True + id: eb78772c-e81e-4b8a-a77b-b75efd1c212a + jinja: '{{passage}} + + + P: {{question}}? Verdadeiro ou falso? ||| + + {% if label != -1 %} + + {{answer_choices[label]}} + + {% endif %}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: valid_binary + reference: '' diff --git a/elk/promptsource/templates/christykoh/imdb_pt/templates.yaml b/elk/promptsource/templates/christykoh/imdb_pt/templates.yaml new file mode 100644 index 00000000..bc18de77 --- /dev/null +++ b/elk/promptsource/templates/christykoh/imdb_pt/templates.yaml @@ -0,0 +1,219 @@ +dataset: imdb_pt +templates: + 02ff2949-0f45-4d97-941e-6fa4c0afbc2d: !Template + answer_choices: negativo ||| positivo + id: 02ff2949-0f45-4d97-941e-6fa4c0afbc2d + jinja: 'A crítica de filme a seguir expressa que sentimento? {{text}} + + + ||| {{ answer_choices + [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Movie Expressed Sentiment 2 + reference: '' + 2351d12a-e630-4d19-8b41-e199266e38f7: !Template + answer_choices: mal ||| bom + id: 2351d12a-e630-4d19-8b41-e199266e38f7 + jinja: '{{text}} + + + O crítico achou este filme {{"bom ou mal"}}? ||| {{ answer_choices + [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Reviewer Opinion bad good choices + reference: '' + 5f372fb1-795a-47b6-8ddf-c4fd1579e76a: !Template + answer_choices: negativo ||| positivo + id: 5f372fb1-795a-47b6-8ddf-c4fd1579e76a + jinja: "{{text}} \nEsta avaliação é {{\"positivo ou negativo\"}}? ||| \n{{answer_choices[label]}}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: 'Sentiment with choices ' + reference: '' + 866474a5-1498-46b7-bfee-ac0c5160707f: !Template + answer_choices: negativo ||| positivo + id: 866474a5-1498-46b7-bfee-ac0c5160707f + jinja: '{{text}} + + + Como o espectador se sente sobre o filme? + + + ||| {{ answer_choices + [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Reviewer Sentiment Feeling + reference: '' + 96538f30-f2c1-430e-8fc6-936a16966d9c: !Template + answer_choices: negativo ||| positivo + id: 96538f30-f2c1-430e-8fc6-936a16966d9c + jinja: '{{text}} Que sentimento o escritor expressa pelo filme? ||| {{ + answer_choices [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Writer Expressed Sentiment + reference: '' + af51297c-38a3-4d6c-a8b5-04b1243d7443: !Template + answer_choices: negativo ||| positivo + id: af51297c-38a3-4d6c-a8b5-04b1243d7443 + jinja: '{{text}} + + O sentimento expresso pelo filme é ||| {{ answer_choices + [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Movie Expressed Sentiment + reference: '' + b93b74ac-fe95-40b4-9610-318b46ab820f: !Template + answer_choices: negativo ||| positivo + id: b93b74ac-fe95-40b4-9610-318b46ab820f + jinja: '{{text}} + + + Qual é o sentimento expresso neste texto? + + + ||| {{ answer_choices + [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Text Expressed Sentiment + reference: '' + b9b5d79d-f0b3-4bec-a724-f585db3e93ff: !Template + answer_choices: negativo ||| positivo + id: b9b5d79d-f0b3-4bec-a724-f585db3e93ff + jinja: '{{text}} + + + Isso definitivamente não é um ||| {{ answer_choices [1-label]}} avaliação.' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: false + name: Negation template for positive and negative + reference: '' + bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9: !Template + answer_choices: Não ||| Sim + id: bd82ba0f-01d4-4fa1-bf8d-07e392c00cd9 + jinja: '{{text}} + + O crítico gostou do filme? ||| {{ answer_choices [label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Reviewer Enjoyment Yes No + reference: '' + c70d1687-2421-49a2-9553-91b8bac4cfbe: !Template + answer_choices: negativo ||| positivo + id: c70d1687-2421-49a2-9553-91b8bac4cfbe + jinja: '{{text}} + + Qual é o sentimento expresso pelo crítico para o filme? + + ||| {{ answer_choices [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Reviewer Expressed Sentiment + reference: '' + dacb5f03-dc80-428c-b707-8574436675c9: !Template + answer_choices: 0 ||| 1 + id: dacb5f03-dc80-428c-b707-8574436675c9 + jinja: 'Considere o seguinte exemplo: '''''' {{text}} '''''' + + + Entre {{ answer_choices[0] }} e {{answer_choices[1] }}, que é o sentimento + deste exemplo? + + ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: burns_2 + reference: '' + e01970ab-42c0-4e6e-a08f-4940d889ef37: !Template + answer_choices: Eles não gostaram! ||| Eles adoraram + id: e01970ab-42c0-4e6e-a08f-4940d889ef37 + jinja: '{{text}} + + Como o crítico se sente sobre o filme? ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - pt + metrics: + - Accuracy + original_task: true + name: Reviewer Enjoyment + reference: '' + eb791ab2-d2b4-4be6-a569-64086983abee: !Template + answer_choices: 0 ||| 1 + id: eb791ab2-d2b4-4be6-a569-64086983abee + jinja: 'Considere o seguinte exemplo: '''''' {{text}} '''''' + + Entre {{ answer_choices[0] }} e {{answer_choices[1] }}, o sentimento de + este exemplo é ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - pt + metrics: + - Accuracy + original_task: true + name: burns_1 + reference: ''