高通跃龙QCS9100平台上工业缺陷检测实战(1): 从摄像头到端侧推理的最小闭环
前言本系列将聚焦高通跃龙QCS9100平台实施工业缺陷检测。本文第一篇我们在该QCS9100平台将缺陷检测链路完整跑通。你跑完这篇应该能看到两件非常具体的东西runs/里不断冒出带框的图片终端里能看到每次推理大概多少毫秒、FPS 大概多少本文先使用ONNXRuntimeCPU完成闭环目的是优先把“摄像头/预处理/后处理/可视化输出”链路固定下来在此基础上第二篇再切换到 QNN/HTP。1. 运行结果输入USB 摄像头一般是/dev/video0也可以换成本地图片/视频先自测输出runs/下持续生成带框图片控制台持续打印推理耗时ms和 FPS滑动窗口2. 环境准备2.1 硬件Thundercomm的基于高通跃龙QCS9100 平台 Linux OS的AI边缘盒子USB UVC 摄像头最省事MIPI 相机后续工程化再展开2.2 软件Python 3.10GStreamer用于验证相机与定位问题Python 包numpy、opencv-python-headless、onnxruntime查看基础信息确认工具与版本uname-apython3-Vgst-launch-1.0--version3. 摄像头验证确认摄像头节点ls-l/dev/video*无显示环境也能验证只看 fpsgst-launch-1.0 v4l2srcdevice/dev/video0!videoconvert!fpsdisplaysink video-sinkfakesink text-overlayfalsesyncfalse-v如果这一步不稳定掉帧/卡住/打不开请先排查驱动/权限/像素格式问题再继续后续推理步骤本文第 7 节提供了排查清单。4. 安装依赖Ubuntu/Debian 类系统示例sudoaptupdatesudoaptinstall-ypython3-pip python3-venvmkdir-p~/defect_democd~/defect_demo python3-mvenv venvsourcevenv/bin/activate pipinstall-Upip pipinstallnumpy opencv-python-headless onnxruntime5. 模型准备你最终肯定会用自训练的缺陷模型。但在这一步先用通用检测模型比如YOLO把流程跑通流程没跑通前模型对不对根本无从谈起。在 PC 上导出 ONNX示例pipinstallultralytics yoloexportmodelyolov8n.ptformatonnximgsz640opset12得到yolov8n.onnx后拷贝到设备端mkdir-p~/defect_demo/modelsscpyolov8n.onnxuserdevice-ip:/home/user/defect_demo/models/提示若你已自训练缺陷模型直接把文件命名为models/defect.onnx下文也同样适用。6. 推理与可视化在设备端创建infer_cam_onnx.py可直接复制粘贴运行importosimporttimefrompathlibimportPathimportcv2importnumpyasnpimportonnxruntimeasort MODEL_PATHmodels/yolov8n.onnx# 换成你的缺陷模型也可以DEVICE/dev/video0IMG_SIZE640CONF_THRES0.25IOU_THRES0.45SAVE_DIRPath(runs)SAVE_DIR.mkdir(parentsTrue,exist_okTrue)defletterbox(im,new_shape640,color(114,114,114)):h,wim.shape[:2]ifisinstance(new_shape,int):new_shape(new_shape,new_shape)rmin(new_shape[0]/h,new_shape[1]/w)nh,nwint(round(h*r)),int(round(w*r))im_resizedcv2.resize(im,(nw,nh),interpolationcv2.INTER_LINEAR)top(new_shape[0]-nh)//2bottomnew_shape[0]-nh-top left(new_shape[1]-nw)//2rightnew_shape[1]-nw-left im_paddedcv2.copyMakeBorder(im_resized,top,bottom,left,right,cv2.BORDER_CONSTANT,valuecolor)returnim_padded,r,(left,top)defxywh2xyxy(x):yx.copy()y[...,0]x[...,0]-x[...,2]/2y[...,1]x[...,1]-x[...,3]/2y[...,2]x[...,0]x[...,2]/2y[...,3]x[...,1]x[...,3]/2returnydefnms(boxes,scores,iou_thres):x1,y1,x2,y2boxes.T areas(x2-x1)*(y2-y1)orderscores.argsort()[::-1]keep[]whileorder.size0:iorder[0]keep.append(i)iforder.size1:breakxx1np.maximum(x1[i],x1[order[1:]])yy1np.maximum(y1[i],y1[order[1:]])xx2np.minimum(x2[i],x2[order[1:]])yy2np.minimum(y2[i],y2[order[1:]])wnp.maximum(0.0,xx2-xx1)hnp.maximum(0.0,yy2-yy1)interw*h iouinter/(areas[i]areas[order[1:]]-inter1e-9)indsnp.where(iouiou_thres)[0]orderorder[inds1]returnkeepdefpostprocess_yolov8(outputs,conf_thres0.25,iou_thres0.45): 常见 YOLOv8 ONNX 输出形态之一 outputs[0] shape: (1, 84, 8400) 或 (1, 8400, 84) 84 4(Box) num_cls outoutputs[0]outnp.squeeze(out,axis0)ifout.shape[0]out.shape[1]:outout.T boxesout[:,:4]cls_scoresout[:,4:]cls_confcls_scores.max(axis1)cls_idcls_scores.argmax(axis1)maskcls_confconf_thres boxes,cls_conf,cls_idboxes[mask],cls_conf[mask],cls_id[mask]ifboxes.shape[0]0:returnnp.empty((0,4)),np.empty((0,)),np.empty((0,),dtypenp.int32)boxesxywh2xyxy(boxes)keepnms(boxes,cls_conf,iou_thres)returnboxes[keep],cls_conf[keep],cls_id[keep].astype(np.int32)defmain():ifnotos.path.exists(MODEL_PATH):raiseFileNotFoundError(f模型不存在:{MODEL_PATH})sessort.InferenceSession(MODEL_PATH,providers[CPUExecutionProvider])in_namesess.get_inputs()[0].name capcv2.VideoCapture(DEVICE)ifnotcap.isOpened():raiseRuntimeError(f无法打开摄像头:{DEVICE}(检查权限/驱动/占用))frame_id0fps_win[]print(开始推理按 CtrlC 退出。输出保存到 runs/ 目录。)try:whileTrue:ok,framecap.read()ifnotok:print(读帧失败退出。)breakimg,r,(padw,padh)letterbox(frame,IMG_SIZE)rgbcv2.cvtColor(img,cv2.COLOR_BGR2RGB)inprgb.astype(np.float32)/255.0inpnp.transpose(inp,(2,0,1))[None,...]t1time.time()outputssess.run(None,{in_name:inp})boxes,scores,cls_idspostprocess_yolov8(outputs,CONF_THRES,IOU_THRES)t2time.time()# 映射回原图坐标ifboxes.shape[0]0:boxes[:,[0,2]]-padw boxes[:,[1,3]]-padh boxes/r boxes[:,[0,2]]np.clip(boxes[:,[0,2]],0,frame.shape[1]-1)boxes[:,[1,3]]np.clip(boxes[:,[1,3]],0,frame.shape[0]-1)visframe.copy()for(x1,y1,x2,y2),sc,cidinzip(boxes,scores,cls_ids):x1,y1,x2,y2map(int,[x1,y1,x2,y2])cv2.rectangle(vis,(x1,y1),(x2,y2),(0,255,0),2)cv2.putText(vis,fid{cid}{sc:.2f},(x1,max(0,y1-6)),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,255,0),2)infer_ms(t2-t1)*1000.0fps1.0/max(1e-6,(t2-t1))fps_win.append(fps)iflen(fps_win)30:fps_win.pop(0)cv2.putText(vis,fInfer{infer_ms:.1f}ms FPS~{np.mean(fps_win):.1f},(10,30),cv2.FONT_HERSHEY_SIMPLEX,0.9,(255,0,0),2)# 限制写盘频率避免 I/O 拖慢ifframe_id%200:out_pathSAVE_DIR/fframe_{frame_id:06d}.jpgcv2.imwrite(str(out_path),vis)print(fsave:{out_path}det{len(boxes)}fps~{np.mean(fps_win):.1f})frame_id1exceptKeyboardInterrupt:passfinally:cap.release()print(结束。)if__name____main__:main()运行cd~/defect_demosourcevenv/bin/activate python3 infer_cam_onnx.py7. 常见问题摄像头打不开先用第 3 节的gst-launch-1.0验证确认是否被占用、权限是否足够。能取流但推理很慢将写盘频率降到每 50 帧一张将输入尺寸改小320/416验证加速部分在二展开。输出框太多导致后处理慢提高CONF_THRES必要时先做 topK 再 NMS四会展开。下一篇介绍至此我们已经完成了高通QCS9100 上从摄像头取流、ONNX 模型推理、后处理到结果可视化的最小闭环。**下一篇**我们将切换到QNN/HTP真正发挥 NPU 的加速能力敬请期待版权声明本文为博主原创文章遵循 CC 4.0 BY-SA 版权协议转载请附上原文出处链接和本声明。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2409231.html
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!