Skip to content

Commit

Permalink
temporary use num_steps insted of infer_count for image generation
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 24, 2024
1 parent db28c8c commit d5bf817
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 10 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/llm_bench-python.yml
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,14 @@ jobs:
- name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux Optimum Intel
run: |
huggingface-cli download OpenVINO/LCM_Dreamshaper_v7-int8-ov --local-dir ov_models/lcm_dreamshaper_v7
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --optimum -ic 4
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --optimum --num_steps 4
- name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux with GenAI
run: |
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 -ic 4
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --num_steps 4
- name: Test OpenVINO/LCM_Dreamshaper_v7-int8-ov on Linux with GenAI and LoRA
run: |
wget -O ./ov_models/soulcard.safetensors https://civitai.com/api/download/models/72591
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --lora ./ov_models/soulcard.safetensors --lora_alphas 0.7 -ic 4
python ./tools/llm_bench/benchmark.py -m ./ov_models/lcm_dreamshaper_v7/ -pf ./tools/llm_bench/prompts/stable-diffusion.jsonl -d cpu -n 1 --lora ./ov_models/soulcard.safetensors --lora_alphas 0.7 --num_steps 4
rm -rf ./ov_models/lcm_dreamshaper_v7/
- name: Test TinyLlama-1.1B-Chat-v1.0 in Speculative Deconding mode on Linux
run: |
Expand Down
4 changes: 3 additions & 1 deletion tools/llm_bench/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,9 @@ def get_argprser():
parser.add_argument('--set_torch_thread', default=0, type=num_infer_count_type, help='Set the number of Torch thread. ')
parser.add_argument('-tl', '--tokens_len', type=int, required=False, help='The length of tokens print each time in streaming mode, chunk streaming.')
parser.add_argument('--streaming', action='store_true', help='Set whether to use streaming mode, only applicable to LLM.')

parser.add_argument("--num_steps", type=int, required=False, help="Number of inference steps for image generation")
parser.add_argument("--height", type=int, required=False, help="Generated image height. Applicable only for Image Generation.")
parser.add_argument("--width", type=int, required=False, help="Generated image width. Applicable only for Image Generation.")
return parser.parse_args()


Expand Down
3 changes: 3 additions & 0 deletions tools/llm_bench/llm_bench_utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ def analyze_args(args):
model_args['prompt'] = args.prompt
model_args['prompt_file'] = args.prompt_file
model_args['infer_count'] = args.infer_count
model_args["num_steps"] = args.num_steps
model_args["height"] = args.height
model_args["width"] = args.width
model_args['images'] = args.images
model_args['seed'] = args.seed
model_args['mem_consumption'] = args.memory_consumption
Expand Down
12 changes: 6 additions & 6 deletions tools/llm_bench/task/image_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@
stable_diffusion_hook = StableDiffusionHook()


def collects_input_args(image_param, model_type, model_name, infer_count=None, callback=None):
def collects_input_args(image_param, model_type, model_name, infer_count=None, height=None, width=None, callback=None):
input_args = {}
input_args["width"] = image_param.get('width', DEFAULT_IMAGE_WIDTH)
input_args["height"] = image_param.get('height', DEFAULT_IMAGE_HEIGHT)
input_args["width"] = image_param.get('width', width or DEFAULT_IMAGE_WIDTH)
input_args["height"] = image_param.get('height', height or DEFAULT_IMAGE_HEIGHT)
if infer_count is None:
input_args["num_inference_steps"] = image_param.get('steps', DEFAULT_INFERENCE_STEPS if 'lcm' not in model_name else LCM_DEFAULT_INFERENCE_STEPS)
else:
Expand Down Expand Up @@ -60,7 +60,7 @@ def collects_input_args(image_param, model_type, model_name, infer_count=None, c
def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list, proc_id, mem_consumption, callback=None):
set_seed(args['seed'])
input_text = image_param['prompt']
input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"])
input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"], args.get("height"), args.get("width"))
out_str = f"Input params: Batch_size={args['batch_size']}, " \
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
if 'guidance_scale' in input_args:
Expand All @@ -84,7 +84,7 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list,
for bs_idx, in_text in enumerate(input_text_list):
llm_bench_utils.output_file.output_image_input_text(in_text, args, image_id, bs_idx, proc_id)
start = time.perf_counter()
res = pipe(input_text_list, **input_args).images
res = pipe(input_text_list, **input_args, num_images_per_prompt=2).images
end = time.perf_counter()
if (args['mem_consumption'] == 1 and num == 0) or args['mem_consumption'] == 2:
mem_consumption.end_collect_momory_consumption()
Expand Down Expand Up @@ -123,7 +123,7 @@ def run_image_generation(image_param, num, image_id, pipe, args, iter_data_list,
def run_image_generation_genai(image_param, num, image_id, pipe, args, iter_data_list, proc_id, mem_consumption, callback=None):
set_seed(args['seed'])
input_text = image_param['prompt']
input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["infer_count"], callback)
input_args = collects_input_args(image_param, args['model_type'], args['model_name'], args["num_steps"], args.get("height"), args.get("width"), callback)
out_str = f"Input params: Batch_size={args['batch_size']}, " \
f"steps={input_args['num_inference_steps']}, width={input_args['width']}, height={input_args['height']}"
if 'guidance_scale' in input_args:
Expand Down

0 comments on commit d5bf817

Please sign in to comment.