FireBeetle ESP32-P4

Before use, please flash the firmware according to the MicroPython Usage Tutorial.

1. Basic Examples

Basic examples help you quickly verify whether the on-board functions of the development board work properly.

1.1 Audio Recording

This example allows you to verify if the development board's MIC (microphone) is functional. The sample program demonstrates how to capture audio via the MIC and save it to the ESP32-P4.

import os
from machine import Pin
from machine import I2S

SCK_PIN = 12
#WS_PIN = 25
SD_PIN = 9
I2S_ID = 0
BUFFER_LENGTH_IN_BYTES = 40000

# ======= AUDIO CONFIGURATION =======
WAV_FILE = "mic.wav"
RECORD_TIME_IN_SECONDS = 4
WAV_SAMPLE_SIZE_IN_BITS = 16
FORMAT = I2S.MONO
SAMPLE_RATE_IN_HZ = 4000
# ======= AUDIO CONFIGURATION =======

format_to_channels = {I2S.MONO: 1, I2S.STEREO: 2}
NUM_CHANNELS = format_to_channels[FORMAT]
WAV_SAMPLE_SIZE_IN_BYTES = WAV_SAMPLE_SIZE_IN_BITS // 8
RECORDING_SIZE_IN_BYTES = (
    RECORD_TIME_IN_SECONDS * SAMPLE_RATE_IN_HZ * WAV_SAMPLE_SIZE_IN_BYTES * NUM_CHANNELS
)

def create_wav_header(sampleRate, bitsPerSample, num_channels, num_samples):
    datasize = num_samples * num_channels * bitsPerSample // 8
    o = bytes("RIFF", "ascii")  # (4-byte) Marks the file as RIFF format
    o += (datasize + 36).to_bytes(
        4, "little"
    )  # (4-byte) File size in bytes (excludes this field and the RIFF marker)
    o += bytes("WAVE", "ascii")  # (4-byte) File type identifier
    o += bytes("fmt ", "ascii")  # (4-byte) Format Chunk Marker
    o += (16).to_bytes(4, "little")  # (4-byte) Length of the above format data
    o += (1).to_bytes(2, "little")  # (2-byte) Format type (1 = PCM)
    o += (num_channels).to_bytes(2, "little")  # (2-byte) Number of audio channels
    o += (sampleRate).to_bytes(4, "little")  # (4-byte) Audio sample rate (Hz)
    o += (sampleRate * num_channels * bitsPerSample // 8).to_bytes(4, "little")  # (4-byte) Byte rate (sample rate × channels × bits per sample / 8)
    o += (num_channels * bitsPerSample // 8).to_bytes(2, "little")  # (2-byte) Block alignment (channels × bits per sample / 8)
    o += (bitsPerSample).to_bytes(2, "little")  # (2-byte) Bits per audio sample
    o += bytes("data", "ascii")  # (4-byte) Data Chunk Marker
    o += (datasize).to_bytes(4, "little")  # (4-byte) Size of the audio data in bytes
    return o

audio_in = I2S(
    I2S_ID,
    sck=Pin(SCK_PIN),
    #ws=Pin(WS_PIN),
    sd=Pin(SD_PIN),
    mode=I2S.PDM_RX,
    bits=WAV_SAMPLE_SIZE_IN_BITS,
    format=FORMAT,
    rate=SAMPLE_RATE_IN_HZ * 4,
    ibuf=BUFFER_LENGTH_IN_BYTES,
)

# Allocate sample arrays
# memoryview is used to reduce heap allocation in the while loop
mic_samples = bytearray(40000)
mic_samples_mv = memoryview(mic_samples)

recording_buffer = bytearray(RECORDING_SIZE_IN_BYTES)
bytes_received = 0

print("Recording size: {} bytes".format(RECORDING_SIZE_IN_BYTES))
print("==========  START RECORDING ==========")
try:
    while bytes_received < RECORDING_SIZE_IN_BYTES:
        # Read a block of samples from the I2S microphone
        bytes_read = audio_in.readinto(mic_samples_mv)
        if bytes_read > 0:
            bytes_to_write = min(
                bytes_read, RECORDING_SIZE_IN_BYTES - bytes_received
            )
            recording_buffer[bytes_received:bytes_received+bytes_to_write] = mic_samples_mv[0:bytes_to_write]
            print('FILL', bytes_received, bytes_to_write)
            bytes_received += bytes_read
    print("==========  DONE RECORDING ==========")
except (KeyboardInterrupt, Exception) as e:
    print("caught exception {} {}".format(type(e).__name__, e))

# Write to WAV file
wav = open(WAV_FILE, "wb")

# Create header for WAV file and write to storage
wav_header = create_wav_header(
    SAMPLE_RATE_IN_HZ,
    WAV_SAMPLE_SIZE_IN_BITS,
    NUM_CHANNELS,
    SAMPLE_RATE_IN_HZ * RECORD_TIME_IN_SECONDS,
)
wav.write(wav_header)

# Write audio samples to WAV file
wav.write(recording_buffer)

# Cleanup resources
wav.close()
print("Wrote ", WAV_FILE)
audio_in.deinit()

1.2 TF Card

This example verifies if the TF card slot of the development board works. The sample program demonstrates how to read the file list from a TF card (insert a TF card first before use).

from machine import Pin, SDCard
import os

sd = SDCard(slot=0, width=4, sck=43, cmd=44, data=(39, 40, 41, 42), freq=40000000)

os.mount(sd, '/sd')
os.listdir('/sd')
print(os.listdir('/sd'))
os.listdir('/sd')

1.3 WiFi Connection

This example verifies if the ESP32-C6 wireless communication module of the development board is functional. The sample program demonstrates connecting to a WiFi hotspot via the ESP32-C6 (modify the SSID and PWD in Line 7 before use).

import network, time
def connect():
    wlan = network.WLAN(network.STA_IF)
    wlan.active(True)
    if not wlan.isconnected():
        print('ESP32-P4 is connecting to WiFi', end="")
        wlan.connect('SSID', 'PWD')  # Replace 'SSID' with your WiFi name and 'PWD' with your WiFi password
        while not wlan.isconnected():
            print(".", end="")
            time.sleep(1)
    print('\nNetwork information: ', wlan.ifconfig())  

connect()

2. Application Examples

2.1 Camera Photo Capture

This example demonstrates how to take photos with the ESP32-P4 and save them to the device (connect a camera first before use).

Compatible cameras:

import camera, time, jpeg
camera.init()
time.sleep(5)
img = camera.capture()  # Returns image data in bytes
camera.deinit()
with open("capture.jpg", "wb") as f:
    f.write(img)

print("JPEG encoding completed")

2.2 Web-Based Image Streaming

This example uses the ESP32-P4 to create a web server, allowing you to view real-time camera feed via a web browser.

  1. Upload the microdot.py file to the ESP32-P4.
  2. Modify the SSID and PWD in Line 9, then run the script.
  3. According to the printed information, access the IP address via a web browser (the device used to access the web page and the ESP32-P4 must be on the same local area network).
from microdot import Microdot
import time, jpeg, camera, network

def connect():
    wlan = network.WLAN(network.STA_IF)
    wlan.active(True)
    if not wlan.isconnected():
        print('ESP32-P4 is connecting to WiFi', end="")
        wlan.connect('ssid', 'PWD')  # Replace 'ssid' with your WiFi name and 'PWD' with your WiFi password
        while not wlan.isconnected():
            print(".", end="")
            time.sleep(1)
    print('\nNetwork information: ', wlan.ifconfig())
    ifconfig = wlan.ifconfig()
    print('Please open in browser: {}:5000'.format(ifconfig[0]))

connect()
app = Microdot()
camera.init()

@app.route('/')
def index(request):
    return '''<!doctype html>
<html>
  <head>
    <title>ESP32-P4 Web Image Streaming</title>
    <meta charset="UTF-8">
  </head>
  <body>
    <h1>ESP32-P4 Web Image Streaming:</h1>
    <img src="/video_feed" width="50%">
  </body>
</html>''', 200, {'Content-Type': 'text/html; charset=utf-8'}

@app.route('/video_feed')
def video_feed(request):
    def stream():
        yield b'--frame\r\n'
        while True:
            frame = camera.capture()
            yield b'Content-Type: image/jpeg\r\n\r\n' + frame + \
                b'\r\n--frame\r\n'
            gc.collect()  # Garbage collection to free up memory
            #time.sleep_ms(50)  # Optional delay to adjust frame rate

    return stream(), 200, {'Content-Type':
                           'multipart/x-mixed-replace; boundary=frame'}

if __name__ == '__main__':
    app.run(debug=True)
    camera.deinit()  # Release camera resources when the program exits

2.3 Cat Detection

This example demonstrates how to use the ESP32-P4 to detect cats in an image. The ESP32-P4 will mark the detected cats in the image and save it as a new file.

  1. Upload the [cat.jpg] file to the ESP32-P4.
  2. Run the script to view the position information of the detected cats.
  3. Click the [Stop/Restart Backend Process] button to view the marked image on the MicroPython device.
from espdl import CatDetector
from jpeg import Decoder, Encoder

decoder = Decoder()
# Capture and process the image
img = open("cat.jpg", "rb").read()  # Capture the original image (usually in JPEG format)
wh = decoder.get_img_info(img)  # Get the width and height of the image
# Extract image width and height
width, height = wh
encoder = Encoder(width=width, height=height, pixel_format="RGB888")
face_detector = CatDetector(width=width, height=height)


framebuffer = decoder.decode(img)  # Convert to RGB888 format
# Convert memoryview to bytearray for modification
framebuffer = bytearray(framebuffer)
# Run detection
results = face_detector.run(framebuffer)

# Draw bounding box
def draw_rectangle(buffer, width, height, x, y, w, h, list1, color=(255, 0, 0)):
    """
    Draw a rectangular bounding box on an RGB888 format image buffer
    :param buffer: Image buffer
    :param width: Image width
    :param height: Image height
    :param x: X-coordinate of the top-left corner of the bounding box
    :param y: Y-coordinate of the top-left corner of the bounding box
    :param w: Width of the bounding box
    :param h: Height of the bounding box
    :param color: Bounding box color (in RGB format)
    """
    # Helper function: Set color for a single pixel
    def set_pixel(buffer, width, x, y, color):
        offset = (y * width + x) * 3
        buffer[offset] = color[0]  # R (Red channel)
        buffer[offset + 1] = color[1]  # G (Green channel)
        buffer[offset + 2] = color[2]  # B (Blue channel)

    # Helper function: Draw a larger dot
    def draw_large_dot(buffer, width, x, y, color, size=3):
        for i in range(x - size, x + size + 1):
            for j in range(y - size, y + size + 1):
                if 0 <= i < width and 0 <= j < height:
                    set_pixel(buffer, width, i, j, color)

    # Draw top edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y < height:
            set_pixel(buffer, width, i, y, color)

    # Draw bottom edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y + h < height:
            set_pixel(buffer, width, i, y + h, color)

    # Draw left edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x < width:
            set_pixel(buffer, width, x, j, color)

    # Draw right edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x + w < width:
            set_pixel(buffer, width, x + w, j, color)

    # Draw feature points
    if list1:
        draw_large_dot(buffer, width, list1[0], list1[1], (0, 0, 255), size=2)
        draw_large_dot(buffer, width, list1[2], list1[3], (0, 0, 255), size=2)
        draw_large_dot(buffer, width, list1[4], list1[5], (0, 255, 0), size=2)
        draw_large_dot(buffer, width, list1[6], list1[7], (255, 0, 0), size=2)
        draw_large_dot(buffer, width, list1[8], list1[9], (255, 0, 0), size=2)

if results:
    # Draw bounding boxes for detected cats on the image
    for face in results:
        print(face)
        x1, y1, x2, y2 = face['box']
        draw_rectangle(framebuffer, width, height, x1, y1, x2 - x1, y2 - y1, None, color=(255, 0, 0))  # Use red bounding box

# Re-encode the image with bounding boxes to JPEG format and save
marked_img = encoder.encode(framebuffer)
with open("cat_marked.jpg", "wb") as f:
    f.write(marked_img)

Marked Result

2.4 Face Detection

This example demonstrates how to use the ESP32-P4 to detect human faces in an image. The ESP32-P4 will mark the detected faces and save the annotated image as a new file.

  1. Upload the [face.jpg] file to the ESP32-P4.
  2. Run the script to view the position information of the detected faces.
  3. Click the [Stop/Restart Backend Process] button to view the marked image on the MicroPython device.
from espdl import FaceDetector
from jpeg import Decoder, Encoder

decoder = Decoder()
encoder = Encoder(width=320, height=240, pixel_format="RGB888")
face_detector = FaceDetector()

# Capture and process the image
img = open("face.jpg", "rb").read()  # Capture the original image (usually in JPEG format)
framebuffer = decoder.decode(img)  # Convert to RGB888 format
# Convert memoryview to bytearray for modification
framebuffer = bytearray(framebuffer)
# Run face detection
results = face_detector.run(framebuffer)

# Draw bounding box
def draw_rectangle(buffer, width, height, x, y, w, h, list1, color=(255, 0, 0)):
    """
    Draw a rectangular bounding box on an RGB888 format image buffer
    :param buffer: Image buffer
    :param width: Image width
    :param height: Image height
    :param x: X-coordinate of the top-left corner of the bounding box
    :param y: Y-coordinate of the top-left corner of the bounding box
    :param w: Width of the bounding box
    :param h: Height of the bounding box
    :param color: Bounding box color (in RGB format)
    """
    # Helper function: Set color for a single pixel
    def set_pixel(buffer, width, x, y, color):
        offset = (y * width + x) * 3
        buffer[offset] = color[0]  # R (Red channel)
        buffer[offset + 1] = color[1]  # G (Green channel)
        buffer[offset + 2] = color[2]  # B (Blue channel)

    # Helper function: Draw a larger dot
    def draw_large_dot(buffer, width, x, y, color, size=3):
        for i in range(x - size, x + size + 1):
            for j in range(y - size, y + size + 1):
                if 0 <= i < width and 0 <= j < height:
                    set_pixel(buffer, width, i, j, color)

    # Draw top edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y < height:
            set_pixel(buffer, width, i, y, color)

    # Draw bottom edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y + h < height:
            set_pixel(buffer, width, i, y + h, color)

    # Draw left edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x < width:
            set_pixel(buffer, width, x, j, color)

    # Draw right edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x + w < width:
            set_pixel(buffer, width, x + w, j, color)

    # Draw feature points
    if list1:
        draw_large_dot(buffer, width, list1[0], list1[1], (0, 0, 255), size=2)
        draw_large_dot(buffer, width, list1[2], list1[3], (0, 0, 255), size=2)
        draw_large_dot(buffer, width, list1[4], list1[5], (0, 255, 0), size=2)
        draw_large_dot(buffer, width, list1[6], list1[7], (255, 0, 0), size=2)
        draw_large_dot(buffer, width, list1[8], list1[9], (255, 0, 0), size=2)

# Draw bounding boxes for detected faces on the image
for face in results:
    print(face)
    x1, y1, x2, y2 = face['box']
    draw_rectangle(framebuffer, 320, 240, x1, y1, x2 - x1, y2 - y1, face['features'], color=(255, 0, 0))  # Use red bounding box

# Re-encode the image with bounding boxes to JPEG format and save
marked_img = encoder.encode(framebuffer)
with open("face_marked.jpg", "wb") as f:
    f.write(marked_img)

Marked Result

2.5 Pedestrian Detection

This example demonstrates how to use the ESP32-P4 to detect pedestrians in an image. The ESP32-P4 will mark the detected pedestrians and save the annotated image as a new file.

  1. Upload the [pedestrian.jpg] file to the ESP32-P4.
  2. Run the script to view the position information of the detected pedestrians.
  3. Click the [Stop/Restart Backend Process] button to view the marked image on the MicroPython device.
from espdl import HumanDetector
from jpeg import Decoder, Encoder


decoder = Decoder()
encoder = Encoder(width=640, height=480, pixel_format="RGB888")
human_detector = HumanDetector(width=640, height=480)

# Capture and process the image
img = open("pedestrian.jpg", "rb").read()  # Capture the original image (usually in JPEG format)
framebuffer = decoder.decode(img)  # Convert to RGB888 format
# Convert memoryview to bytearray for modification
framebuffer = bytearray(framebuffer)
# Run pedestrian detection
results = human_detector.run(framebuffer)

# Draw bounding box
def draw_rectangle(buffer, width, height, x, y, w, h, color=(255, 0, 0)):
    """
    Draw a rectangular bounding box on an RGB888 format image buffer
    :param buffer: Image buffer
    :param width: Image width
    :param height: Image height
    :param x: X-coordinate of the top-left corner of the bounding box
    :param y: Y-coordinate of the top-left corner of the bounding box
    :param w: Width of the bounding box
    :param h: Height of the bounding box
    :param color: Bounding box color (in RGB format)
    """
    # Helper function: Set color for a single pixel
    def set_pixel(buffer, width, x, y, color):
        offset = (y * width + x) * 3
        buffer[offset] = color[0]  # R (Red channel)
        buffer[offset + 1] = color[1]  # G (Green channel)
        buffer[offset + 2] = color[2]  # B (Blue channel)

    # Draw top edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y < height:
            set_pixel(buffer, width, i, y, color)

    # Draw bottom edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y + h < height:
            set_pixel(buffer, width, i, y + h, color)

    # Draw left edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x < width:
            set_pixel(buffer, width, x, j, color)

    # Draw right edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x + w < width:
            set_pixel(buffer, width, x + w, j, color)

# Draw bounding boxes for detected pedestrians on the image
for face in results:
    print(face)
    x1, y1, x2, y2 = face['box']
    draw_rectangle(framebuffer, 640, 480, x1, y1, x2 - x1, y2 - y1, color=(255, 0, 0))  # Use red bounding box

# Re-encode the image with bounding boxes to JPEG format and save
marked_img = encoder.encode(framebuffer)
with open("pedestrian_marked.jpg", "wb") as f:
    f.write(marked_img)

Marked Result

2.6 YOLO11

This example demonstrates how to use the ESP32-P4 to classify objects in an image using YOLO11. The ESP32-P4 will mark object information in the image and save the annotated image as a new file.

  1. Upload the [yolo.jpg] and [myufont.py] files to the ESP32-P4.
  2. Run the script to view the position information of the detected objects.
  3. Click the [Stop/Restart Backend Process] button to view the marked image on the MicroPython device.
from espdl import CocoDetector
from jpeg import Decoder, Encoder
from myufont import CustomBMFont
from machine import Pin, SDCard
import os

sd = SDCard(slot=0, width=4, sck=43, cmd=44, data=(39, 40, 41, 42))
os.mount(sd, '/sd')
decoder = Decoder()
encoder = Encoder(width=405, height=540, pixel_format="RGB888")
object_detector = CocoDetector(width=405, height=540)  # Renamed from "face_detector" for accuracy (detects all COCO objects, not just faces)

# MS COCO dataset object classes (Chinese to English translation)
MSCOCO_CLASSES = [
    "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
    "fire hydrant", "fire hose", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
    "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
    "snowboard", "ski poles", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "bowl",
    "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake",
    "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
    "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase",
    "scissors", "teddy bear", "hair dryer", "toothbrush"
]

font = CustomBMFont('/sd/text_full_16px_2312.v3.bmf')  # Load custom Chinese bitmap font

# Capture and process the image
img = open("yolo.jpg", "rb").read()  # Read original image (typically JPEG format)
framebuffer = decoder.decode(img)    # Decode image to RGB888 format
framebuffer = bytearray(framebuffer) # Convert memoryview to bytearray for pixel modification

# Run object detection (COCO dataset)
results = object_detector.run(framebuffer)

# Function to draw bounding boxes and labels on the image
def draw_rectangle(buffer, width, height, x, y, w, h, font, label, color=(255, 0, 0)):
    """
    Draw a rectangular bounding box and label on an RGB888 image buffer.
    :param buffer: Image buffer (bytearray in RGB888 format)
    :param width: Total width of the image
    :param height: Total height of the image
    :param x: X-coordinate of the top-left corner of the bounding box
    :param y: Y-coordinate of the top-left corner of the bounding box
    :param w: Width of the bounding box
    :param h: Height of the bounding box
    :param font: CustomBMFont object for text rendering
    :param label: Text label to display above the bounding box
    :param color: Bounding box and label color (RGB tuple, default: red)
    """
    # Helper function: Set color for a single pixel in the RGB888 buffer
    def set_pixel(buffer, width, x, y, color):
        offset = (y * width + x) * 3  # Calculate pixel position (3 bytes per RGB pixel)
        buffer[offset] = color[0]     # Red channel
        buffer[offset + 1] = color[1] # Green channel
        buffer[offset + 2] = color[2] # Blue channel

    # Helper function: Check if a character is Chinese
    def is_chinese(ch):
        """Determine if a character is a Chinese character (covers CJK unified ideographs)."""
        if ('\u4e00' <= ch <= '\u9fff') or  # Main Chinese character range
           ('\u3400' <= ch <= '\u4dbf') or  # Extended Chinese character range A
           ('\u20000' <= ch <= '\u2a6df'): # Extended Chinese character range B
            return True
        return False

    # Helper function: Render text on the image buffer
    def render_text(font, text, x_start, y_start, color, spacing=0, line_spacing=0, max_width=width):
        font_size = font.font_size
        bytes_per_row = (font_size + 7) // 8  # Bytes needed to store one row of the character bitmap (round up)
        x, y = x_start, y_start

        for char in text:
            # Handle line breaks
            if char == '\n':
                y += font_size + line_spacing
                x = x_start
                continue
            # Handle carriage returns (shift X position)
            if char == '\r':
                x += 2 * font_size
                continue

            # Set character width: full width for Chinese, half width for ASCII
            char_width = font_size if is_chinese(char) else font_size // 2

            # Wrap text if it exceeds max width
            if max_width is not None and (x + char_width) > (x_start + max_width):
                y += font_size + line_spacing
                x = x_start

            # Get the bitmap data for the current character
            char_bitmap = font.get_char_bitmap(char)

            # Draw each pixel of the character
            for row in range(font_size):
                for col in range(char_width if not is_chinese(char) else font_size):
                    byte_index = row * bytes_per_row + (col // 8)  # Calculate which byte contains the target bit
                    bit_mask = 0x80 >> (col % 8)                  # Mask to isolate the target bit (MSB first)

                    # If the bit is set (1), draw the pixel
                    if byte_index < len(char_bitmap) and (char_bitmap[byte_index] & bit_mask):
                        set_pixel(framebuffer, max_width, x + col, y + row, color)

            # Move to the next character position
            x += char_width + spacing

    # Draw the top edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= y < height:
            set_pixel(buffer, width, i, y, color)

    # Draw the bottom edge of the bounding box
    for i in range(x, x + w):
        if 0 <= i < width and 0 <= (y + h) < height:
            set_pixel(buffer, width, i, y + h, color)

    # Draw the left edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= x < width:
            set_pixel(buffer, width, x, j, color)

    # Draw the right edge of the bounding box
    for j in range(y, y + h):
        if 0 <= j < height and 0 <= (x + w) < width:
            set_pixel(buffer, width, x + w, j, color)

    # Draw the label above the bounding box
    render_text(font, label, x, y - 20, color)

# Draw bounding boxes and labels for all detected objects
for obj in results:
    # Extract bounding box coordinates (top-left: (x1,y1), bottom-right: (x2,y2))
    x1, y1, x2, y2 = obj['box']
    # Create label: "Object Class: Confidence%"
    class_name = MSCOCO_CLASSES[obj['category']]
    confidence = int(obj['score'] * 100)  # Convert confidence to percentage
    label = f"{class_name}: {confidence}%"
    # Draw bounding box and label (red color by default)
    draw_rectangle(framebuffer, 405, 540, x1, y1, x2 - x1, y2 - y1, font, label)
    # Print label to serial monitor for debugging
    print(label)

# Re-encode the annotated image to JPEG format and save
marked_img = encoder.encode(framebuffer)
with open("yolo_marked.jpg", "wb") as f:
    f.write(marked_img)

2.7 MQTT Data Reporting

This example demonstrates how the ESP32-P4 reports data to a network via the MQTT protocol.
(Modify the configuration in lines 11–20 before use.)

#!/usr/bin/env python3
# main.py
import json
import network
import time
import random
from machine import ADC, Pin
from umqtt.simple import MQTTClient

# ========== User Configurable Section ==========
WIFI_SSID = "xxx"                  # Your WiFi network name
WIFI_PASS = "xxx"                  # Your WiFi password
ADC_PIN   = 16                     # ADC0 pin of the ESP32-P4 (for temperature sensor input)
MQTT_SERVER = "192.168.31.160"     # MQTT broker IP address
MQTT_PORT   = 32768                # MQTT broker port (default: 1883 for unencrypted, 8883 for TLS)
# Generate a unique MQTT client ID (8-bit random number to avoid conflicts)
MQTT_CLIENT_ID = f"micropython-client-{random.getrandbits(8)}"
MQTT_USER   = "xxx"                # MQTT broker username (if authentication is enabled)
MQTT_PASS   = "xxx"                # MQTT broker password (if authentication is enabled)
PUB_TOPIC   = "esp/adc/temp"       # MQTT topic to publish data to
PUB_INTERVAL = 2                   # Data reporting interval (in seconds)
# ===============================================

def wifi_connect(ssid, pwd):
    """Connect the ESP32-P4 to a WiFi network."""
    sta_interface = network.WLAN(network.STA_IF)  # Initialize station (client) mode
    sta_interface.active(True)                    # Enable WiFi
    if not sta_interface.isconnected():
        print("Connecting to Wi-Fi...")
        sta_interface.connect(ssid, pwd)          # Attempt WiFi connection
        # Wait up to 20 seconds for connection
        for _ in range(20):
            if sta_interface.isconnected():
                break
            time.sleep(1)
    # Print connection status and network info (IP, subnet, gateway, DNS)
    print("Wi-Fi connected:", sta_interface.ifconfig())

# --------------- Temperature Conversion ---------------
def read_temperature():
    """
    Read and calculate temperature from the ADC-connected sensor.
    Linear conversion logic:
    - Reference voltage: 3.3V
    - Temperature sensor output: 10mV/°C (typical for analog temp sensors like TMP36)
    """
    adc = ADC(Pin(ADC_PIN))                      # Initialize ADC on the specified pin
    adc.atten(ADC.ATTN_11DB)                     # Set ADC attenuation to measure 0–3.3V (11dB range)
    raw_adc_value = adc.read()                   # Read raw ADC value (0–4095 for 12-bit ADC)
    voltage = raw_adc_value / 4095 * 3.3         # Convert raw value to voltage (3.3V full scale)
    temperature = (voltage - 1.4) * 100          # Calculate temperature (adjust offset based on sensor specs)
    return round(temperature, 1)                 # Round to 1 decimal place for readability

# --------------- MQTT Connection ---------------
def mqtt_connect():
    """Establish a connection to the MQTT broker and return the client object."""
    client = MQTTClient(
        client_id=MQTT_CLIENT_ID,
        server=MQTT_SERVER,
        port=MQTT_PORT,
        user=MQTT_USER,
        password=MQTT_PASS
    )
    client.connect()  # Connect to the MQTT broker
    print(f"[MQTT] Connected to broker: {MQTT_SERVER}")
    return client

def mqtt_publish(client, data: dict):
    """Publish data to the preconfigured MQTT topic."""
    payload = json.dumps(data)  # Convert Python dictionary to JSON string
    client.publish(PUB_TOPIC, payload)  # Publish payload to the topic
    print(f"[MQTT] Sent -> Topic: {PUB_TOPIC}, Payload: {payload}")

# --------------- Main Function ---------------
def main():
    wifi_connect(WIFI_SSID, WIFI_PASS)  # Connect to WiFi first
    mqtt_client = mqtt_connect()        # Connect to MQTT broker
    # Continuously read temperature and publish data
    while True:
        current_temp = read_temperature()
        # Publish temperature as a JSON object (e.g., {"temperature": 25.3})
        mqtt_publish(mqtt_client, {"temperature": current_temp})
        time.sleep(PUB_INTERVAL)  # Wait for the next reporting cycle

# Run the main function when the script is executed directly
if __name__ == "__main__":
    main()