Deprecated function, please use draw_string_advanced.Deprecated function, please use draw_string_advanced.total took 73.00 ms为什么模型引用代码力会提示这个?

Viewed 30

Deprecated function, please use draw_string_advanced.Deprecated function, please use draw_string_advanced.total took 73.00 ms

2 Answers

你好,原本得image.draw_string 效果比较差,如果可以接受这个效果,也可以使用这个接口的。

import os
import ujson
import aicube
from libs.PipeLine import ScopedTiming
from libs.Utils import *
from media.sensor import *
from media.display import *
from media.media import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import gc
from machine import UART
from machine import FPIOA

import time

舵机控制初始化

fpioa = FPIOA()
fpioa.set_function(3,FPIOA.UART1_TXD)
fpioa.set_function(4,FPIOA.UART1_RXD)
uart = UART(UART.UART1, 115200)
sensor_id=0

系统配置

display_mode = "hdmi"
TRACK_THRESHOLD = 50 # 像素偏移阈值
SERVO_INIT = {1: 500, 2: 500, 3: 500} # 舵机初始位置
CAM_CHN_ID_0 = 0 # 主显示通道
CAM_CHN_ID_1 = 1 # AI处理通道

显示分辨率配置

if display_mode == "lcd":
DISPLAY_WIDTH = ALIGN_UP(800, 16)
DISPLAY_HEIGHT = 480
else:
DISPLAY_WIDTH = ALIGN_UP(1920, 16)
DISPLAY_HEIGHT = 1080

OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16)
OUT_RGB888P_HEIGH = 720

root_path = "/sdcard/mp_deployment_source/"
config_path = root_path + "deploy_config.json"
debug_mode = 1

def two_side_pad_param(input_size, output_size):
ratio_w = output_size[0] / input_size[0]
ratio_h = output_size[1] / input_size[1]
ratio = min(ratio_w, ratio_h)
new_w = int(ratio * input_size[0])
new_h = int(ratio * input_size[1])
dw = (output_size[0] - new_w) / 2
dh = (output_size[1] - new_h) / 2
top = int(round(dh - 0.1))
bottom = int(round(dh + 0.1))
left = int(round(dw - 0.1))
right = int(round(dw - 0.1))
return top, bottom, left, right, ratio

def read_deploy_config(config_path):
with open(config_path, 'r') as f:
return ujson.load(f)

def build_servo_packet(servo_id, time_ms, position):
if servo_id > 253 or position > 1000:
return None
packet = bytearray(10)
packet[0:2] = b'\x55\x55'
packet[2] = servo_id
packet[3] = 7
packet[4] = 0x01
packet[5] = position & 0xFF
packet[6] = (position >> 8) & 0xFF
packet[7] = time_ms & 0xFF
packet[8] = (time_ms >> 8) & 0xFF
packet[9] = (~sum(packet[2:9])) & 0xFF
return packet

def servo_control(servo_id, position, move_time=500):
packet = build_servo_packet(servo_id, move_time, int(position))
if packet:
uart.write(packet)

def servo_self_test():
# 水平舵机自检
servo_control(1, 1000, 1000)
time.sleep(1)
servo_control(1, 0, 1000)
time.sleep(1)

# 垂直舵机对称自检
servo_control(2, 0, 1000)
servo_control(3, 1000, 1000)
time.sleep(1)
servo_control(2, 1000, 1000)
servo_control(3, 0, 1000)
time.sleep(1)

# 返回初始位置
for sid, pos in SERVO_INIT.items():
    servo_control(sid, pos)
time.sleep(1)

def calculate_center(box):
x1, y1, x2, y2 = box[2], box[3], box[4], box[5]
center_x = int((x1 + x2) / 2 * DISPLAY_WIDTH / OUT_RGB888P_WIDTH)
center_y = int((y1 + y2) / 2 * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH)
return center_x, center_y

def track_object(center_x, center_y):
screen_center_x = DISPLAY_WIDTH // 2
screen_center_y = DISPLAY_HEIGHT // 2
dx = center_x - screen_center_x
dy = center_y - screen_center_y

# 水平跟踪
if abs(dx) > TRACK_THRESHOLD:
    adj = (dx / TRACK_THRESHOLD) * 50
    new_pos = max(0, min(1000, SERVO_INIT[1] + adj))
    servo_control(1, new_pos, 200)

# 垂直跟踪(对称运动)
if abs(dy) > TRACK_THRESHOLD:
    vertical_adj = (dy / TRACK_THRESHOLD) * 50
    servo2_pos = max(0, min(1000, SERVO_INIT[2] + vertical_adj))
    servo3_pos = max(0, min(1000, SERVO_INIT[3] - vertical_adj))
    servo_control(2, servo2_pos, 200)
    servo_control(3, servo3_pos, 200)

def detection():
print("启动检测系统...")
deploy_conf = read_deploy_config(config_path)
kmodel_name = deploy_conf["kmodel_path"]
labels = deploy_conf["categories"]
confidence_threshold = deploy_conf["confidence_threshold"]
nms_threshold = deploy_conf["nms_threshold"]
img_size = deploy_conf["img_size"]
num_classes = deploy_conf["num_classes"]
color_four = get_colors(num_classes)
nms_option = deploy_conf["nms_option"]
model_type = deploy_conf["model_type"]

if model_type == "AnchorBaseDet":
    anchors = sum(deploy_conf["anchors"], [])

kpu = nn.kpu()
kpu.load_kmodel(root_path + kmodel_name)

ai2d = nn.ai2d()
ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8)
top, bottom, left, right, _ = two_side_pad_param(
    [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], img_size)
ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114])
ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH],
                        [1,3,img_size[1],img_size[0]])

sensor = Sensor(id=sensor_id)
sensor.reset()
sensor.set_hmirror(False)
sensor.set_vflip(False)
sensor.set_framesize(width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT)
sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420)
sensor.set_framesize(width=OUT_RGB888P_WIDTH, height=OUT_RGB888P_HEIGH, chn=CAM_CHN_ID_2)
sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2)

sensor_bind_info = sensor.bind_info(0, 0, CAM_CHN_ID_0)
Display.bind_layer(**sensor_bind_info, layer=Display.LAYER_VIDEO1)

if display_mode == "lcd":
    Display.init(Display.ST7701, to_ide=True)
else:
    Display.init(Display.LT9611, to_ide=True)

osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888)
MediaManager.init()
sensor.run()

print("执行舵机自检...")
servo_self_test()
print("自检完成,进入跟踪模式")

ai2d_output_tensor = nn.from_numpy(np.ones((1,3,img_size[1],img_size[0]), dtype=np.uint8))

while True:
    with ScopedTiming("total", debug_mode > 0):
        rgb888p_img = sensor.snapshot(chn=CAM_CHN_ID_2)
        if rgb888p_img.format() == image.RGBP888:
            ai2d_input = rgb888p_img.to_numpy_ref()
            ai2d_input_tensor = nn.from_numpy(ai2d_input)

            ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor)
            kpu.set_input_tensor(0, ai2d_output_tensor)
            kpu.run()

            outputs = [kpu.get_output_tensor(i).to_numpy().flatten()
                      for i in range(kpu.outputs_size())]

            det_boxes = aicube.anchorbasedet_post_process(
                outputs[0], outputs[1], outputs[2], img_size,
                [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], [8,16,32],
                num_classes, confidence_threshold, nms_threshold,
                anchors, nms_option)

            osd_img.clear()
            if det_boxes:
                main_box = max(det_boxes, key=lambda x: (x[4]-x[2])*(x[5]-x[3]))
                center_x, center_y = calculate_center(main_box)

                # 显示中心坐标
                osd_img.draw_string(10, 10, f"X:{center_x} Y:{center_y}",
                                  color=(255,0,0), scale=2)

                # 执行跟踪控制
                track_object(center_x, center_y)

                # 绘制检测框
                x = int(main_box[2] * DISPLAY_WIDTH / OUT_RGB888P_WIDTH)
                y = int(main_box[3] * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH)
                w = int((main_box[4]-main_box[2]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH)
                h = int((main_box[5]-main_box[3]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGH)
                osd_img.draw_rectangle(x, y, w, h, color=color_four[main_box[0]][1:])
                text = f"{labels[main_box[0]]} {main_box[1]:.2f}"
                osd_img.draw_string(x, y-30, text, color=color_four[main_box[0]][1:])

            Display.show_image(osd_img, 0, 0, Display.LAYER_OSD3)
            gc.collect()

        rgb888p_img = None

sensor.stop()
Display.deinit()
MediaManager.deinit()
nn.shrink_memory_pool()
print("系统关闭")
return 0

if name == "main":
# 初始化舵机位置
for sid, pos in SERVO_INIT.items():
servo_control(sid, pos)
time.sleep(1)
detection()
这个代码里面为什么会提示这个信息呢?