|
| 1 | +name: Daily Python Script Execution |
| 2 | + |
| 3 | +on: |
| 4 | + pull_request: |
| 5 | + schedule: |
| 6 | + # Runs at 00:00 UTC every day |
| 7 | + - cron: '0 0 * * *' |
| 8 | + |
| 9 | +jobs: |
| 10 | + run-python-script: |
| 11 | + runs-on: ubuntu-latest |
| 12 | + |
| 13 | + steps: |
| 14 | + - name: Checkout repository |
| 15 | + uses: actions/checkout@v3 |
| 16 | + |
| 17 | + - name: Set up Python |
| 18 | + uses: actions/setup-python@v4 |
| 19 | + with: |
| 20 | + python-version: '3.10' # Ensure this matches the Python version you need |
| 21 | + |
| 22 | + - name: Install dependencies |
| 23 | + run: | |
| 24 | + python -m pip install --upgrade pip |
| 25 | + pip install requests huggingface-hub |
| 26 | +
|
| 27 | + - name: Execute Python script |
| 28 | + env: |
| 29 | + HF_TOKEN: ${{ secrets.HF_TOKEN }} # Make sure to set this secret in your repository settings |
| 30 | + run: python -c ' |
| 31 | +import os |
| 32 | +import ast |
| 33 | +import json |
| 34 | +import requests |
| 35 | +from huggingface_hub import HfApi |
| 36 | + |
| 37 | + |
| 38 | +def extract_models_sub_dict(parsed_code, sub_dict_name): |
| 39 | + class MODELS_SUB_LIST_VISITOR(ast.NodeVisitor): |
| 40 | + def __init__(self): |
| 41 | + self.key = sub_dict_name |
| 42 | + self.value = None |
| 43 | + |
| 44 | + def visit_Assign(self, node): |
| 45 | + for target in node.targets: |
| 46 | + if isinstance(target, ast.Name) and target.id == self.key: |
| 47 | + self.value = ast.literal_eval(node.value) |
| 48 | + |
| 49 | + visitor = MODELS_SUB_LIST_VISITOR() |
| 50 | + visitor.visit(parsed_code) |
| 51 | + return visitor.value |
| 52 | + |
| 53 | + |
| 54 | +def extract_models_dict(source_code): |
| 55 | + parsed_code = ast.parse(source_code) |
| 56 | + |
| 57 | + class MODELS_LIST_VISITOR(ast.NodeVisitor): |
| 58 | + def __init__(self): |
| 59 | + self.key = "_MODELS" |
| 60 | + self.value = {} |
| 61 | + |
| 62 | + def visit_Assign(self, node): |
| 63 | + for target in node.targets: |
| 64 | + if not isinstance(target, ast.Name): |
| 65 | + return |
| 66 | + if target.id == self.key: |
| 67 | + for value in node.value.values: |
| 68 | + dict = extract_models_sub_dict(parsed_code, value.id) |
| 69 | + self.value.update(dict) |
| 70 | + |
| 71 | + visitor = MODELS_LIST_VISITOR() |
| 72 | + visitor.visit(parsed_code) |
| 73 | + return visitor.value |
| 74 | + |
| 75 | + |
| 76 | +# Fetch the content of the file |
| 77 | +url = "https://raw.githubusercontent.com/vllm-project/vllm/refs/heads/main/vllm/model_executor/models/registry.py" |
| 78 | +response = requests.get(url) |
| 79 | +response.raise_for_status() # Raise an exception for bad status codes |
| 80 | +source_code = response.text |
| 81 | + |
| 82 | +if __name__ == '__main__': |
| 83 | + # extract models dict that consists of sub dicts |
| 84 | + # _MODELS = { |
| 85 | + # **_TEXT_GENERATION_MODELS, |
| 86 | + # **_EMBEDDING_MODELS, |
| 87 | + # **_MULTIMODAL_MODELS, |
| 88 | + # **_SPECULATIVE_DECODING_MODELS, |
| 89 | + # } |
| 90 | + # _TEXT_GENERATION_MODELS = { |
| 91 | + # "AquilaModel": ("llama", "LlamaForCausalLM"), |
| 92 | + # "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2 |
| 93 | + # "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"), |
| 94 | + # ... |
| 95 | + models_dict = extract_models_dict(source_code) |
| 96 | + architectures = [item for tup in models_dict.values() for item in tup] |
| 97 | + architectures_json_str = json.dumps(architectures, indent=4) |
| 98 | + json_bytes = architectures_json_str.encode('utf-8') |
| 99 | + print(architectures_json_str) |
| 100 | + |
| 101 | + # api = HfApi(token=os.environ["HF_TOKEN"]) |
| 102 | + # api.upload_file( |
| 103 | + # path_or_fileobj=json_bytes, |
| 104 | + # path_in_repo="archtiectures.json", |
| 105 | + # repo_id="mishig/test-vllm", |
| 106 | + # repo_type="dataset", |
| 107 | + # )' |
| 108 | + |
0 commit comments