From 1778e50d73cac6cf8d82a588b0e3ac5332dbab24 Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Thu, 17 Oct 2024 17:00:58 +0400 Subject: [PATCH] [WWB]: Some fixes (#983) --- .github/workflows/causal_lm_cpp.yml | 1 + .github/workflows/llm_bench-python.yml | 1 + .../python/{who_what_bench => who_what_benchmark}/README.md | 0 tools/who_what_benchmark/whowhatbench/wwb.py | 5 +++-- 4 files changed, 5 insertions(+), 2 deletions(-) rename llm_bench/python/{who_what_bench => who_what_benchmark}/README.md (100%) diff --git a/.github/workflows/causal_lm_cpp.yml b/.github/workflows/causal_lm_cpp.yml index 4b1a516257..2537a8eb36 100644 --- a/.github/workflows/causal_lm_cpp.yml +++ b/.github/workflows/causal_lm_cpp.yml @@ -7,6 +7,7 @@ on: branches: - master - 'releases/**' + permissions: read-all # Required by https://github.com/ossf/scorecard/blob/e23b8ad91fd6a64a0a971ca4fc0a4d1650725615/docs/checks.md#token-permissions concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/llm_bench-python.yml b/.github/workflows/llm_bench-python.yml index 464758d0a7..1eca051ad6 100644 --- a/.github/workflows/llm_bench-python.yml +++ b/.github/workflows/llm_bench-python.yml @@ -44,6 +44,7 @@ jobs: run: | # stop the build if there are Python syntax errors or undefined names python -m flake8 ${{ env.LLM_BENCH_PYPATH }} --config=${{ env.LLM_BENCH_PYPATH }}/setup.cfg + python -m flake8 ${{ env.WWB_PATH }} --config=${{ env.LLM_BENCH_PYPATH }}/setup.cfg - name: Create code style diff for samples if: failure() run: | diff --git a/llm_bench/python/who_what_bench/README.md b/llm_bench/python/who_what_benchmark/README.md similarity index 100% rename from llm_bench/python/who_what_bench/README.md rename to llm_bench/python/who_what_benchmark/README.md diff --git a/tools/who_what_benchmark/whowhatbench/wwb.py b/tools/who_what_benchmark/whowhatbench/wwb.py index 19c6aed2cd..50297d9a3d 100644 --- a/tools/who_what_benchmark/whowhatbench/wwb.py +++ b/tools/who_what_benchmark/whowhatbench/wwb.py @@ -37,7 +37,7 @@ class GenAIModelWrapper: def __init__(self, model, model_dir): self.model = model - self.config = AutoConfig.from_pretrained(model_dir) + self.config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True) def __getattr__(self, attr): if attr in self.__dict__: @@ -199,7 +199,7 @@ def parse_args(): type=str, choices=["text", "text-to-image"], default="text", - help="Indicated the model type, e.g. 'text' - for LLMs, 't2im' - for text-to-image pipelines.", + help="Indicated the model type, e.g. 'text' - for causal text generation, 'text-to-image' - for image generation.", ) parser.add_argument( "--data-encoder", @@ -335,6 +335,7 @@ def diff_strings(a: str, b: str, *, use_loguru_colors: bool = False) -> str: def genai_gen_answer(model, tokenizer, question, max_new_tokens, skip_question): config = openvino_genai.GenerationConfig() config.max_new_tokens = max_new_tokens + config.do_sample = False out = model.generate(question, config) return out