File tree Expand file tree Collapse file tree 4 files changed +8
-8
lines changed
examples/cpu/llm/inference Expand file tree Collapse file tree 4 files changed +8
-8
lines changed Original file line number Diff line number Diff line change 292
292
293
293
def load_image (image_file ):
294
294
if image_file .startswith ("http://" ) or image_file .startswith ("https://" ):
295
- response = requests .get (image_file , verify = False )
295
+ response = requests .get (image_file )
296
296
image = Image .open (BytesIO (response .content )).convert ("RGB" )
297
297
else :
298
298
image = Image .open (image_file ).convert ("RGB" )
@@ -400,7 +400,7 @@ def trace_handler(prof):
400
400
if not hasattr (model .config , "token_latency" ):
401
401
model .config .token_latency = True
402
402
if model_type == "git" :
403
- prompt = Image .open (requests .get (args .image_url , stream = True , verify = False ).raw )
403
+ prompt = Image .open (requests .get (args .image_url , stream = True ).raw )
404
404
generate_kwargs .pop ("min_new_tokens" , None )
405
405
elif model_type == "llava" :
406
406
if args .prompt is not None :
Original file line number Diff line number Diff line change @@ -557,7 +557,7 @@ def write_checkpoints_json():
557
557
from PIL import Image
558
558
import requests
559
559
560
- prompt = Image .open (requests .get (args .image_url , stream = True , verify = False ).raw )
560
+ prompt = Image .open (requests .get (args .image_url , stream = True ).raw )
561
561
inputs = [prompt ] * args .batch_size
562
562
generate_kwargs .pop ("min_new_tokens" , None )
563
563
elif model_type == "llava" :
@@ -567,7 +567,7 @@ def write_checkpoints_json():
567
567
568
568
def load_image (image_file ):
569
569
if image_file .startswith ("http://" ) or image_file .startswith ("https://" ):
570
- response = requests .get (image_file , verify = False )
570
+ response = requests .get (image_file )
571
571
image = Image .open (BytesIO (response .content )).convert ("RGB" )
572
572
else :
573
573
image = Image .open (image_file ).convert ("RGB" )
Original file line number Diff line number Diff line change 239
239
240
240
def load_image (image_file ):
241
241
if image_file .startswith ("http://" ) or image_file .startswith ("https://" ):
242
- response = requests .get (image_file , verify = False )
242
+ response = requests .get (image_file )
243
243
image = Image .open (BytesIO (response .content )).convert ("RGB" )
244
244
else :
245
245
image = Image .open (image_file ).convert ("RGB" )
@@ -299,7 +299,7 @@ def trace_handler(prof):
299
299
if not hasattr (model .config , "token_latency" ):
300
300
model .config .token_latency = True
301
301
if model_type == "git" :
302
- prompt = Image .open (requests .get (args .image_url , stream = True , verify = False ).raw )
302
+ prompt = Image .open (requests .get (args .image_url , stream = True ).raw )
303
303
generate_kwargs .pop ("min_new_tokens" , None )
304
304
elif model_type == "llava" :
305
305
if args .prompt is not None :
Original file line number Diff line number Diff line change 362
362
363
363
def load_image (image_file ):
364
364
if image_file .startswith ("http://" ) or image_file .startswith ("https://" ):
365
- response = requests .get (image_file , verify = False )
365
+ response = requests .get (image_file )
366
366
image = Image .open (BytesIO (response .content )).convert ("RGB" )
367
367
else :
368
368
image = Image .open (image_file ).convert ("RGB" )
@@ -1154,7 +1154,7 @@ def calib_func(prepared_model):
1154
1154
)
1155
1155
1156
1156
if model .name == "git" :
1157
- prompt = Image .open (requests .get (args .image_url , stream = True , verify = False ).raw )
1157
+ prompt = Image .open (requests .get (args .image_url , stream = True ).raw )
1158
1158
elif model .name == "llava" :
1159
1159
if args .prompt is not None :
1160
1160
prompt = args .prompt
You can’t perform that action at this time.
0 commit comments