diff --git a/.github/workflows/llm_bench-python.yml b/.github/workflows/llm_bench-python.yml index 8356805e19..0b22097e68 100644 --- a/.github/workflows/llm_bench-python.yml +++ b/.github/workflows/llm_bench-python.yml @@ -114,14 +114,14 @@ jobs: - name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux Optimum Intel run: | huggingface-cli download OpenVINO/LCM_Dreamshaper_v7-int8-ov --local-dir ov_models/lcm_dreamshaper_v7 - python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --optimum -ic 4 + python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --optimum --num_steps 4 - name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux with GenAI run: | - python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 -ic 4 + python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --num_steps 4 - name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux with GenAI and LoRA run: | wget -O ./ov_models/soulcard.safetensors https://civitai.com/api/download/models/72591 - python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --lora ./ov_models/soulcard.safetensors --lora_alphas 0.7 -ic 4 + python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --lora ./ov_models/soulcard.safetensors --lora_alphas 0.7 --num_steps 4 rm -rf ./ov_models/lcm_dreamshaper_v7/ - name: Test TinyLlama-1.1B-Chat-v1.0 in Speculative Deconding mode on Linux run: | diff --git a/tools/llm_bench/benchmark.py b/tools/llm_bench/benchmark.py index 5fa22497c1..39b6306e7f 100644 --- a/tools/llm_bench/benchmark.py +++ b/tools/llm_bench/benchmark.py @@ -158,7 +158,9 @@ def get_argprser(): parser.add_argument('--set_torch_thread', default=0, type=num_infer_count_type, help='Set the number of Torch thread. ') parser.add_argument('-tl', '--tokens_len', type=int, required=False, help='The length of tokens print each time in streaming mode, chunk streaming.') parser.add_argument('--streaming', action='store_true', help='Set whether to use streaming mode, only applicable to LLM.') - + parser.add_argument("--num_steps", type=int, required=False, help="Number of inference steps for image generation") + parser.add_argument("--height", type=int, required=False, help="Generated image height. Applicable only for Image Generation.") + parser.add_argument("--width", type=int, required=False, help="Generated image width. Applicable only for Image Generation.") return parser.parse_args() diff --git a/tools/llm_bench/llm_bench_utils/model_utils.py b/tools/llm_bench/llm_bench_utils/model_utils.py index 78f72147c7..b3e2f23f0b 100644 --- a/tools/llm_bench/llm_bench_utils/model_utils.py +++ b/tools/llm_bench/llm_bench_utils/model_utils.py @@ -97,6 +97,9 @@ def analyze_args(args): model_args['prompt'] = args.prompt model_args['prompt_file'] = args.prompt_file model_args['infer_count'] = args.infer_count + model_args["num_steps"] = args.num_steps + model_args["height"] = args.height + model_args["width"] = args.width model_args['images'] = args.images model_args['seed'] = args.seed model_args['mem_consumption'] = args.memory_consumption diff --git a/tools/llm_bench/task/image_generation.py b/tools/llm_bench/task/image_generation.py index 7f43afe6e2..65ca29c263 100644 --- a/tools/llm_bench/task/image_generation.py +++ b/tools/llm_bench/task/image_generation.py @@ -25,10 +25,10 @@ stable_diffusion_hook = StableDiffusionHook() -def collects_input_args(image_param, model_type, model_name, infer_count=None, callback=None): +def collects_input_args(image_param, model_type, model_name, infer_count=None, height=None, width=None, callback=None): input_args = {} - input_args["width"] = image_param.get('width', DEFAULT_IMAGE_WIDTH) - input_args["height"] = image_param.get('height', DEFAULT_IMAGE_HEIGHT) + input_args["width"] = image_param.get('width', width or DEFAULT_IMAGE_WIDTH) + input_args["height"] = image_param.get('height', height or DEFAULT_IMAGE_HEIGHT) if infer_count is None: input_args["num_inference_steps"] = image_param.get('steps', DEFAULT_INFERENCE_STEPS if 'lcm' not in model_name else LCM_DEFAULT_INFERENCE_STEPS) else: @@ -60,7 +60,7 @@ def collects_input_args(image_param, model_type, model_name, infer_count=None, c def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list, proc_id, mem_consumption, callback=None): set_seed(args['seed']) input_text = image_param['prompt'] - input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"]) + input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"], args.get("height"), args.get("width")) out_str = f"Input params: Batch_size={args['batch_size']}, " \ f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}" if 'guidance_scale' in input_args: @@ -84,7 +84,7 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list, for bs_idx, in_text in enumerate(input_text_list): llm_bench_utils.output_file.output_image_input_text(in_text, args, image_id, bs_idx, proc_id) start = time.perf_counter() - res = pipe(input_text_list, **input_args).images + res = pipe(input_text_list, **input_args, num_images_per_prompt=2).images end = time.perf_counter() if (args['mem_consumption'] == 1 and num == 0) or args['mem_consumption'] == 2: mem_consumption.end_collect_momory_consumption() @@ -123,7 +123,7 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list, def run_image_generation_genai(image_param, num, image_id, pipe, args, iter_data_list, proc_id, mem_consumption, callback=None): set_seed(args['seed']) input_text = image_param['prompt'] - input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"], callback) + input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["num_steps"], args.get("height"), args.get("width"), callback) out_str = f"Input params: Batch_size={args['batch_size']}, " \ f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}" if 'guidance_scale' in input_args: