|
7 | 7 | ###################### NLI/paraphrase ###############################
|
8 | 8 |
|
9 | 9 |
|
| 10 | +anli__a1 = Classification('premise','hypothesis','label', splits=['train_r1','dev_r1','test_r1']) |
| 11 | +anli__a2 = Classification('premise','hypothesis','label', splits=['train_r2','dev_r2','test_r2']) |
| 12 | +anli__a3 = Classification('premise','hypothesis','label', splits=['train_r3','dev_r3','test_r3']) |
| 13 | + |
| 14 | + |
10 | 15 | babi_nli = Classification("premise", "hypothesis", "label",
|
11 | 16 | dataset_name="metaeval/babi_nli",
|
12 | 17 | config_name=set(get_dataset_config_names("metaeval/babi_nli"))-{"agents-motivations"}
|
13 | 18 | ) # agents-motivations task is not as clear-cut as the others
|
14 | 19 |
|
15 |
| -anli__a1 = Classification('premise','hypothesis','label', splits=['train_r1','dev_r1','test_r1']) |
16 |
| -anli__a2 = Classification('premise','hypothesis','label', splits=['train_r2','dev_r2','test_r2']) |
17 |
| -anli__a3 = Classification('premise','hypothesis','label', splits=['train_r3','dev_r3','test_r3']) |
18 | 20 |
|
19 | 21 | sick__label = Classification('sentence_A','sentence_B','label')
|
20 | 22 | sick__relatedness = Classification('sentence_A','sentence_B','relatedness_score')
|
@@ -308,7 +310,20 @@ def split_choices(s):
|
308 | 310 | ######################## Classification (other) ########################
|
309 | 311 |
|
310 | 312 | utilitarianism = Classification("comparison",labels="label",
|
311 |
| -dataset_name="") |
| 313 | +dataset_name="metaeval/utilitarianism") |
| 314 | + |
| 315 | +amazon_counterfactual = Classification( |
| 316 | + "text", labels="label", |
| 317 | + dataset_name="mteb/amazon_counterfactual", |
| 318 | + config_name="en") |
| 319 | + |
| 320 | +insincere_questions = Classification( |
| 321 | + "text", labels="label", |
| 322 | + dataset_name="SetFit/insincere-questions") |
| 323 | + |
| 324 | +toxic_conversations = Classification( |
| 325 | + "text", labels="label", |
| 326 | + dataset_name="SetFit/toxic_conversations") |
312 | 327 |
|
313 | 328 | turingbench = Classification("Generation",labels="label",
|
314 | 329 | dataset_name="turingbench/TuringBench",
|
@@ -378,6 +393,10 @@ def split_choices(s):
|
378 | 393 | "persuasiveness-eloquence", "persuasiveness-premisetype", "persuasiveness-relevance", "persuasiveness-specificity",
|
379 | 394 | "persuasiveness-strength", "sarcasm","stac"])
|
380 | 395 |
|
| 396 | +silicone = Classification("Uterance",labels="Label", |
| 397 | + config_name=['dyda_da', 'dyda_e', 'iemocap', 'maptask', 'meld_e', 'meld_s', 'oasis', 'sem'] # +['swda', 'mrda'] # in pragmeval |
| 398 | +) |
| 399 | + |
381 | 400 | #lex_glue___ecthr_a = Classification(sentence1="text", labels="labels") # too long
|
382 | 401 | #lex_glue___ecthr_b = Classification(sentence1="text", labels="labels") # too long
|
383 | 402 | lex_glue___eurlex = Classification(sentence1="text", labels="labels")
|
@@ -591,6 +610,8 @@ def split_choices(s):
|
591 | 610 | sarcasm_news = Classification("headline", labels="is_sarcastic",
|
592 | 611 | dataset_name="raquiba/Sarcasm_News_Headline")
|
593 | 612 |
|
| 613 | +sem_eval_2010_task_8 = Classification("sentence",labels="relation") |
| 614 | + |
594 | 615 | ###END
|
595 | 616 | ################### END OF SUPPORT ######################
|
596 | 617 |
|
|
0 commit comments