You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "train.py", line 94, in
student=config.create_student(nocrf=args.nocrf)
File "D:\KB-NER-main\flair\config_parser.py", line 240, in create_student
return self.create_model(self.config,pretrained=self.load_pretrained(self.config), is_student=True)
File "D:\KB-NER-main\flair\config_parser.py", line 193, in create_model
embeddings, word_map, char_map, lemma_map, postag_map=self.create_embeddings(config['embeddings'])
File "D:\KB-NER-main\flair\config_parser.py", line 168, in create_embeddings
embedding_list.append(getattr(Embeddings,embedding.split('-')[0])(**embeddings[embedding]))
File "D:\KB-NER-main\flair\embeddings.py", line 2951, in init
self.tokenizer = AutoTokenizer.from_pretrained(model)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 643, in from_pretrained
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 487, in get_tokenizer_config
resolved_config_file = cached_file(
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\utils\hub.py", line 417, in cached_file
resolved_file = hf_hub_download(
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\huggingface_hub\utils_validators.py", line 110, in _inner_fn
validate_repo_id(arg_value)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\huggingface_hub\utils_validators.py", line 158, in validate_repo_id
raise HFValidationError(
huggingface_hub.utils._validators.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': 'resources/taggers/xlmr-large-pretuned-tuned-wiki-first_3epoch_1batch_4accumulate_0.000005lr_10000lrrate_multi_monolingual_crf_fast_norelearn_sentbatch_sentloss_withdev_finetune_saving_amz_doc_wiki_v3_10upsample_addmix_ner23/xlm-roberta-large'. Use repo_type argument if needed.
The text was updated successfully, but these errors were encountered:
I got the error show:
Traceback (most recent call last):
File "train.py", line 94, in
student=config.create_student(nocrf=args.nocrf)
File "D:\KB-NER-main\flair\config_parser.py", line 240, in create_student
return self.create_model(self.config,pretrained=self.load_pretrained(self.config), is_student=True)
File "D:\KB-NER-main\flair\config_parser.py", line 193, in create_model
embeddings, word_map, char_map, lemma_map, postag_map=self.create_embeddings(config['embeddings'])
File "D:\KB-NER-main\flair\config_parser.py", line 168, in create_embeddings
embedding_list.append(getattr(Embeddings,embedding.split('-')[0])(**embeddings[embedding]))
File "D:\KB-NER-main\flair\embeddings.py", line 2951, in init
self.tokenizer = AutoTokenizer.from_pretrained(model)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 643, in from_pretrained
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\models\auto\tokenization_auto.py", line 487, in get_tokenizer_config
resolved_config_file = cached_file(
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\transformers\utils\hub.py", line 417, in cached_file
resolved_file = hf_hub_download(
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\huggingface_hub\utils_validators.py", line 110, in _inner_fn
validate_repo_id(arg_value)
File "C:\Users\RichardFan\anaconda3\envs\adaseq\lib\site-packages\huggingface_hub\utils_validators.py", line 158, in validate_repo_id
raise HFValidationError(
huggingface_hub.utils._validators.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': 'resources/taggers/xlmr-large-pretuned-tuned-wiki-first_3epoch_1batch_4accumulate_0.000005lr_10000lrrate_multi_monolingual_crf_fast_norelearn_sentbatch_sentloss_withdev_finetune_saving_amz_doc_wiki_v3_10upsample_addmix_ner23/xlm-roberta-large'. Use
repo_type
argument if needed.The text was updated successfully, but these errors were encountered: