直接贴code

  • 加载,推理
import onnxruntime as ort
import torch
import time

import cv2
import numpy as np


def time_sync():
    if torch.cuda.is_available():
        torch.cuda.synchronize()
    return time.time()

ort_session = ort.InferenceSession('./semseg.onnx')
onnx_input_name = ort_session.get_inputs()[0].name
onnx_outputs_names = ort_session.get_outputs()
output_names = []
for o in onnx_outputs_names:
    output_names.append(o.name)

img = cv2.imread('./demo.png')
img = cv2.resize(img, (2048,1024)) # height = 1024, width = 2048
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, (2,0,1)) #HWC ->CHW
input_blob = np.expand_dims(img,axis=0).astype(np.float32)

total_time = 0;
for i in range(0, 10): #推理10次,统计平均时间
    start_t = time_sync()
    onnx_result = ort_session.run(output_names, input_feed = {onnx_input_name:input_blob})
    end_t = time_sync()
    total_time += ((end_t - start_t) * 1E3)
    print(f'{i+1} Speed: %.1fms ' % ((end_t - start_t) * 1E3))
    
print(f'Avg Speed: %.1fms ' % (total_time/10.0))


      
  • 输出保存semseg图
//draw semseg mask image
img2 = cv2.imread('./demo.png')
img2 = cv2.resize(img2, (2048,1024)) # you can also use other way to create a temp image

mCityscapesColors = [
    (128, 64,128), (244, 35,232), ( 70, 70, 70), (102,102,156), 
    (190,153,153), (153,153,153), (250,170, 30), (220,220,  0), 
    (107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), 
    (255,  0,  0), (  0,  0,142), (  0,  0, 70), (  0, 60,100), 
    (  0, 80,100), (  0,  0,230), (119, 11, 32)];

for h in range(0, img2.shape[0]):
    for w in range(0, img2.shape[1]):
        img2[h,w] = mCityscapesColors[onnx_result[0][0][0][h][w]]
cv2.imwrite('./mask_semseg.png', img2) 
  • 统计语义分割的类别
a = onnx_result[0][0][0]
list_b = list(np.array(a).flatten())
print set(list_b)

GitHub 加速计划 / on / onnxruntime
21
3
下载
microsoft/onnxruntime: 是一个用于运行各种机器学习模型的开源库。适合对机器学习和深度学习有兴趣的人,特别是在开发和部署机器学习模型时需要处理各种不同框架和算子的人。特点是支持多种机器学习框架和算子,包括 TensorFlow、PyTorch、Caffe 等,具有高性能和广泛的兼容性。
最近提交(Master分支:5 个月前 )
97c2bbe3 ### Description <!-- Describe your changes. --> Fix shape infer of onnx GroupNorm. ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. --> Unable to run shape inference for onnx `GroupNorm`. [model.onnx](https://raw.githubusercontent.com/onnx/onnx/refs/heads/main/onnx/backend/test/data/node/test_group_normalization_example/model.onnx) > python D:\source\cognition\onnxruntime\onnxruntime\python\tools\symbolic_shape_infer.py --input model.onnx Traceback (most recent call last): File "D:\source\cognition\onnxruntime\onnxruntime\python\tools\symbolic_shape_infer.py", line 2999, in <module> out_mp = SymbolicShapeInference.infer_shapes( File "D:\source\cognition\onnxruntime\onnxruntime\python\tools\symbolic_shape_infer.py", line 2935, in infer_shapes raise Exception("Incomplete symbolic shape inference") 1 天前
1fc9c482 ### Description Enable coremltools for Linux build. In order to do this, I did: 1. Add uuid-devel to the Linux images and regenerate them. 2. Patch the coremltools code a little bit to add some missing header files. ### Motivation and Context To make the code simpler. Later on I will create another PR to remove the COREML_ENABLE_MLPROGRAM C/C++ macro. Also, after this PR I will bring more changes to onnxruntime_provider_coreml.cmake to make it work with vcpkg. 2 天前
Logo

旨在为数千万中国开发者提供一个无缝且高效的云端环境,以支持学习、使用和贡献开源项目。

更多推荐