send editions to git
This commit is contained in:
parent
de17c18a1a
commit
ae1a9b11c0
|
@ -1,7 +1,7 @@
|
|||
from transformers import AutoTokenizer
|
||||
import json
|
||||
|
||||
file = open('models_info.json', 'r')
|
||||
file = open('./data/models_info.json', 'r')
|
||||
models = json.load(file)
|
||||
|
||||
# Strips the newline character
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
from huggingface_hub import HfApi, ModelFilter
|
||||
import json
|
||||
|
||||
# Initialize the Hugging Face API
|
||||
api = HfApi()
|
||||
|
||||
# Define the languages you're interested in
|
||||
# persian languages tags
|
||||
languages = ['pes', 'fas', 'fa'] # Language codes for Persian, Pashto, and Turkish
|
||||
nlp_tasks = ['text-classification', 'token-classification','table-question-answering','question-answering','zero-shot-classification','translation','summarization','feature-extraction','text-generation','text2text-generation','fill-mask','sentence-similarity']
|
||||
# Initialize a list to store the model information
|
||||
# list of model information
|
||||
models_info = []
|
||||
|
||||
# Iterate over each language
|
||||
# Iterate languages
|
||||
for lang in languages:
|
||||
# Filter models by language
|
||||
models = api.list_models(filter=ModelFilter(language=lang))
|
||||
|
@ -30,8 +29,8 @@ for lang in languages:
|
|||
}
|
||||
models_info.append(model_info)
|
||||
|
||||
# Save the collected data to a JSON file
|
||||
with open('models_info.json', 'w', encoding='utf-8') as f:
|
||||
# Save models_info
|
||||
with open('./data/models_info.json', 'w', encoding='utf-8') as f:
|
||||
json.dump(models_info, f, ensure_ascii=False, indent=4)
|
||||
|
||||
print("Data collection complete. Saved to models_info.json")
|
||||
print("Finished!")
|
Loading…
Reference in New Issue
Block a user