demo.html (抜粋)
<link rel="stylesheet" href="https://pyscript.net/releases/2025.10.3/core.css">
<script type="module" src="https://pyscript.net/releases/2025.10.3/core.js"></script>

<py-config src="./pyscript.toml"></py-config>
<py-script src="./main.py"></py-script>
<video id="myCamera" width="800" height="600" autoplay style="display:none;"></video>
<canvas id="canvas" width="544" height="408"></canvas>
main.py (抜粋)
import js, cv2, numpy as np, time, asyncio
from js import ImageData, Uint8ClampedArray
from yolov9wholebody28 import preprocess, postprocess_nms, postprocess_subclass, draw_debug

# (中略: ビデオ・キャンバス要素の取得)

# YOLOv9-Wholebody28-Refine準備(28クラス+キーポイント)
net = cv2.dnn.readNet('model/yolov9_n_wholebody28_refine_0100_1x3x256x320.onnx')
model_input_size = (320, 256)

camera_stream = None
is_running = True

async def start_camera():
    global camera_stream
    camera_stream = await js.navigator.mediaDevices.getUserMedia(...)
    video.srcObject = camera_stream
    await process_frames()

def stop_camera():
    global camera_stream, is_running
    is_running = False
    if camera_stream:
        tracks = camera_stream.getTracks()
        for track in tracks:
            track.stop()
        video.srcObject = None

async def process_frames():
    while is_running:
        # ビデオフレームを取得
        context.drawImage(video, 0, 0, canvas.width, canvas.height)
        image_data = context.getImageData(0, 0, canvas.width, canvas.height)

        # NumPy配列に変換
        data = np.array(image_data.data.to_py(), dtype=np.uint8)
        image = data.reshape((canvas.height, canvas.width, 4))
        image = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)

        # 前処理(BGR->RGB、リサイズ、正規化、NCHW)
        input_image = preprocess(image, model_input_size)

        # 推論実施
        net.setInput(input_image, 'images')
        outputs = net.forward('output0')

        # 後処理(NMS + サブクラス分類)
        boxes = postprocess_nms(outputs, (image_width, image_height),
                                   model_input_size, score_th=0.3, nms_th=0.3)
        processed_boxes = postprocess_subclass(image, 0.3, 0.75, boxes, ...)

        # 描画(姿勢推定結果)
        image = draw_debug(image, processed_boxes, ...)
        cv2.putText(image, f'{processing_time*1000:.2f} ms', (10, 30), ...)

        # 結果をキャンバスに描画
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        buffer = image.flatten().tobytes()
        js_image = ImageData.new(Uint8ClampedArray.new(buffer), width, height)
        context.putImageData(js_image, 0, 0)
        await asyncio.sleep(0)

# JavaScriptから呼び出せるようにする
js.window.stopCamera = stop_camera
asyncio.ensure_future(start_camera())
pyscript.toml
packages = ["opencv-python", "numpy"]
name = "Test"
[[fetch]]
files = ["./yolov9wholebody28.py", "./model/yolov9_n_wholebody28_refine_0100_1x3x256x320.onnx"]