From 439af9753d8e1bc7171e04c76c6563585071522b Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Tue, 23 Sep 2025 15:22:26 +0200 Subject: [PATCH 1/7] doc: build: dts: api: document the zephyr,videoenc chosen node Add the zephyr,videoenc chosen node documentation for hardware video encoder support such as H264 or MJPEG video encoder. Signed-off-by: Hugues Fruchet --- doc/build/dts/api/api.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/build/dts/api/api.rst b/doc/build/dts/api/api.rst index 31cfb3842edc6..7352317e9dfa5 100644 --- a/doc/build/dts/api/api.rst +++ b/doc/build/dts/api/api.rst @@ -466,6 +466,8 @@ device. WS2812 GPIO driver * - zephyr,touch - touchscreen controller device node. + * - zephyr,videoenc + - Video encoder device, typically an H264 or MJPEG video encoder. * - mcuboot,ram-load-dev - When a Zephyr application is built to be loaded to RAM by MCUboot, with :kconfig:option:`CONFIG_MCUBOOT_BOOTLOADER_MODE_SINGLE_APP_RAM_LOAD`, From 96ef856a379bb272f4dcfbad6efd34b03f71a3a1 Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Thu, 2 Oct 2025 21:26:18 +0200 Subject: [PATCH 2/7] snippets: stm32: addition of video-stm32-venc Addition of a snippet in order to enable and configure the STM32 VENC encoder Signed-off-by: Alain Volmat --- snippets/stm32/index.rst | 10 ++++++++++ snippets/stm32/video-stm32-venc/README.rst | 19 +++++++++++++++++++ snippets/stm32/video-stm32-venc/snippet.yml | 4 ++++ .../video-stm32-venc/video-stm32-venc.conf | 9 +++++++++ .../video-stm32-venc/video-stm32-venc.overlay | 14 ++++++++++++++ 5 files changed, 56 insertions(+) create mode 100644 snippets/stm32/index.rst create mode 100644 snippets/stm32/video-stm32-venc/README.rst create mode 100644 snippets/stm32/video-stm32-venc/snippet.yml create mode 100644 snippets/stm32/video-stm32-venc/video-stm32-venc.conf create mode 100644 snippets/stm32/video-stm32-venc/video-stm32-venc.overlay diff --git a/snippets/stm32/index.rst b/snippets/stm32/index.rst new file mode 100644 index 0000000000000..0dd321541a724 --- /dev/null +++ b/snippets/stm32/index.rst @@ -0,0 +1,10 @@ +.. _stm32-snippets: + +STM32 snippets +############## + +.. toctree:: + :maxdepth: 1 + :glob: + + **/* diff --git a/snippets/stm32/video-stm32-venc/README.rst b/snippets/stm32/video-stm32-venc/README.rst new file mode 100644 index 0000000000000..276ac2ba3cc59 --- /dev/null +++ b/snippets/stm32/video-stm32-venc/README.rst @@ -0,0 +1,19 @@ +.. _snippet-video-stm32-venc: + +STM32 Video ENCoder (VENC) Snippet (video-stm32-venc) +##################################################### + +.. code-block:: console + + west build -S video-stm32-venc [...] + +Overview +******** + +This snippet instantiate the STM32 Video ENCoder (VENC) and set it +as ``zephyr,videoenc`` :ref:`devicetree` chosen node. + +Requirements +************ + +The board must have the hardware support for the Video ENCoder (VENC). diff --git a/snippets/stm32/video-stm32-venc/snippet.yml b/snippets/stm32/video-stm32-venc/snippet.yml new file mode 100644 index 0000000000000..e026bbed9cb3c --- /dev/null +++ b/snippets/stm32/video-stm32-venc/snippet.yml @@ -0,0 +1,4 @@ +name: video-stm32-venc +append: + EXTRA_DTC_OVERLAY_FILE: video-stm32-venc.overlay + EXTRA_CONF_FILE: video-stm32-venc.conf diff --git a/snippets/stm32/video-stm32-venc/video-stm32-venc.conf b/snippets/stm32/video-stm32-venc/video-stm32-venc.conf new file mode 100644 index 0000000000000..7e386d983c6a7 --- /dev/null +++ b/snippets/stm32/video-stm32-venc/video-stm32-venc.conf @@ -0,0 +1,9 @@ +# VENC output format +CONFIG_VIDEO_ENCODED_PIXEL_FORMAT="H264" + +# Default frame size +CONFIG_VIDEO_FRAME_WIDTH=1920 +CONFIG_VIDEO_FRAME_HEIGHT=1080 + +# VENC default input format +CONFIG_VIDEO_PIXEL_FORMAT="NV12" diff --git a/snippets/stm32/video-stm32-venc/video-stm32-venc.overlay b/snippets/stm32/video-stm32-venc/video-stm32-venc.overlay new file mode 100644 index 0000000000000..723762400cf66 --- /dev/null +++ b/snippets/stm32/video-stm32-venc/video-stm32-venc.overlay @@ -0,0 +1,14 @@ +/* + * Copyright (c) 2025 STMicroelectronics + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + chosen { + zephyr,videoenc = &venc; + }; +}; + +&venc { + status = "okay"; +}; From 5b80b483c46d6cb2449692e515dc251deb5b0fe7 Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Mon, 6 Oct 2025 15:27:42 +0200 Subject: [PATCH 3/7] samples: video: tcpserversink: change printk to LOG_ Change printk debug traces to LOG_*. Signed-off-by: Hugues Fruchet --- samples/drivers/video/tcpserversink/src/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index 640a728f64eea..4c6a22f54db73 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -87,7 +87,7 @@ int main(void) return 0; } - printk("Video device detected, format: %s %ux%u\n", + LOG_INF("Video device detected, format: %s %ux%u", VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); if (caps.min_line_count != LINE_COUNT_HEIGHT) { @@ -107,15 +107,15 @@ int main(void) /* Connection loop */ do { - printk("TCP: Waiting for client...\n"); + LOG_INF("TCP: Waiting for client..."); client = accept(sock, (struct sockaddr *)&client_addr, &client_addr_len); if (client < 0) { - printk("Failed to accept: %d\n", errno); + LOG_ERR("Failed to accept: %d", errno); return 0; } - printk("TCP: Accepted connection\n"); + LOG_INF("TCP: Accepted connection"); /* Enqueue Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { @@ -128,7 +128,7 @@ int main(void) return 0; } - printk("Stream started\n"); + LOG_INF("Stream started"); /* Capture loop */ i = 0; @@ -140,13 +140,13 @@ int main(void) return 0; } - printk("\rSending frame %d\n", i++); + LOG_INF("Sending frame %d", i++); /* Send video buffer to TCP client */ ret = sendall(client, vbuf->buffer, vbuf->bytesused); if (ret && ret != -EAGAIN) { /* client disconnected */ - printk("\nTCP: Client disconnected %d\n", ret); + LOG_ERR("TCP: Client disconnected %d", ret); close(client); } From b16c13a097dd68e4e91aa5532be1556ac09e38aa Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Tue, 24 Jun 2025 14:04:09 +0200 Subject: [PATCH 4/7] samples: video: tcpserversink: sync with capture sample Sync with video capture sample. Signed-off-by: Hugues Fruchet --- samples/drivers/video/tcpserversink/Kconfig | 63 ++++++++ .../drivers/video/tcpserversink/src/main.c | 153 +++++++++++++++++- 2 files changed, 214 insertions(+), 2 deletions(-) create mode 100644 samples/drivers/video/tcpserversink/Kconfig diff --git a/samples/drivers/video/tcpserversink/Kconfig b/samples/drivers/video/tcpserversink/Kconfig new file mode 100644 index 0000000000000..46fa6a00fc8e9 --- /dev/null +++ b/samples/drivers/video/tcpserversink/Kconfig @@ -0,0 +1,63 @@ +# Copyright (c) 2024 Espressif Systems (Shanghai) Co., Ltd. +# SPDX-License-Identifier: Apache-2.0 + +mainmenu "TCP camera streaming sample application" + +menu "Video capture configuration" + +config VIDEO_SOURCE_CROP_LEFT + int "Crop area left value" + default 0 + help + Left value of the crop area within the video source. + +config VIDEO_SOURCE_CROP_TOP + int "Crop area top value" + default 0 + help + Top value of the crop area within the video source. + +config VIDEO_SOURCE_CROP_WIDTH + int "Crop area width value" + default 0 + help + Width value of the crop area within the video source. + If set to 0, the crop is not applied. + +config VIDEO_SOURCE_CROP_HEIGHT + int "Crop area height value" + default 0 + help + Height value of the crop area within the video source. + If set to 0, the crop is not applied. + +config VIDEO_FRAME_HEIGHT + int "Height of the video frame" + default 0 + help + Height of the video frame. If set to 0, the default height is used. + +config VIDEO_FRAME_WIDTH + int "Width of the video frame" + default 0 + help + Width of the video frame. If set to 0, the default width is used. + +config VIDEO_PIXEL_FORMAT + string "Pixel format of the video frame" + help + Pixel format of the video frame. If not set, the default pixel format is used. + +config VIDEO_CTRL_HFLIP + bool "Mirror the video frame horizontally" + help + If set, mirror the video frame horizontally + +config VIDEO_CTRL_VFLIP + bool "Mirror the video frame vertically" + help + If set, mirror the video frame vertically + +endmenu + +source "Kconfig.zephyr" diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index 4c6a22f54db73..9cee369ee9496 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -1,11 +1,13 @@ /* * Copyright (c) 2019 Linaro Limited + * Copyright 2025 NXP * * SPDX-License-Identifier: Apache-2.0 */ #include #include +#include #include #include #include @@ -36,11 +38,28 @@ int main(void) socklen_t client_addr_len = sizeof(client_addr); struct video_buffer *buffers[2]; struct video_buffer *vbuf = &(struct video_buffer){}; - int i, ret, sock, client; + int ret, sock, client; struct video_format fmt; struct video_caps caps; + struct video_frmival frmival; + struct video_frmival_enum fie; enum video_buf_type type = VIDEO_BUF_TYPE_OUTPUT; const struct device *video_dev; +#if (CONFIG_VIDEO_SOURCE_CROP_WIDTH && CONFIG_VIDEO_SOURCE_CROP_HEIGHT) || \ + CONFIG_VIDEO_FRAME_HEIGHT || CONFIG_VIDEO_FRAME_WIDTH + struct video_selection sel = { + .type = VIDEO_BUF_TYPE_OUTPUT, + }; +#endif + size_t bsize; + int i = 0; +#if CONFIG_VIDEO_FRAME_HEIGHT || CONFIG_VIDEO_FRAME_WIDTH + int err; +#endif + const struct device *last_dev = NULL; + struct video_ctrl_query cq = {.id = VIDEO_CTRL_FLAG_NEXT_CTRL}; + struct video_control ctrl = {.id = VIDEO_CID_HFLIP, .val = 1}; + int tp_set_ret = -ENOTSUP; video_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_camera)); if (!device_is_ready(video_dev)) { @@ -80,6 +99,16 @@ int main(void) return 0; } + LOG_INF("- Capabilities:"); + while (caps.format_caps[i].pixelformat) { + const struct video_format_cap *fcap = &caps.format_caps[i]; + /* fourcc to string */ + LOG_INF(" %s width [%u; %u; %u] height [%u; %u; %u]", + VIDEO_FOURCC_TO_STR(fcap->pixelformat), fcap->width_min, fcap->width_max, + fcap->width_step, fcap->height_min, fcap->height_max, fcap->height_step); + i++; + } + /* Get default/native format */ fmt.type = type; if (video_get_format(video_dev, &fmt)) { @@ -95,9 +124,129 @@ int main(void) return 0; } + /* Set the crop setting if necessary */ +#if CONFIG_VIDEO_SOURCE_CROP_WIDTH && CONFIG_VIDEO_SOURCE_CROP_HEIGHT + sel.target = VIDEO_SEL_TGT_CROP; + sel.rect.left = CONFIG_VIDEO_SOURCE_CROP_LEFT; + sel.rect.top = CONFIG_VIDEO_SOURCE_CROP_TOP; + sel.rect.width = CONFIG_VIDEO_SOURCE_CROP_WIDTH; + sel.rect.height = CONFIG_VIDEO_SOURCE_CROP_HEIGHT; + if (video_set_selection(video_dev, &sel)) { + LOG_ERR("Unable to set selection crop"); + return 0; + } + LOG_INF("Selection crop set to (%u,%u)/%ux%u", sel.rect.left, sel.rect.top, sel.rect.width, + sel.rect.height); +#endif + +#if CONFIG_VIDEO_FRAME_HEIGHT || CONFIG_VIDEO_FRAME_WIDTH +#if CONFIG_VIDEO_FRAME_HEIGHT + fmt.height = CONFIG_VIDEO_FRAME_HEIGHT; +#endif + +#if CONFIG_VIDEO_FRAME_WIDTH + fmt.width = CONFIG_VIDEO_FRAME_WIDTH; +#endif + + /* + * Check (if possible) if targeted size is same as crop + * and if compose is necessary + */ + sel.target = VIDEO_SEL_TGT_CROP; + err = video_get_selection(video_dev, &sel); + if (err < 0 && err != -ENOSYS) { + LOG_ERR("Unable to get selection crop"); + return 0; + } + + if (err == 0 && (sel.rect.width != fmt.width || sel.rect.height != fmt.height)) { + sel.target = VIDEO_SEL_TGT_COMPOSE; + sel.rect.left = 0; + sel.rect.top = 0; + sel.rect.width = fmt.width; + sel.rect.height = fmt.height; + err = video_set_selection(video_dev, &sel); + if (err < 0 && err != -ENOSYS) { + LOG_ERR("Unable to set selection compose"); + return 0; + } + } +#endif + + if (strcmp(CONFIG_VIDEO_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_PIXEL_FORMAT); + } + + LOG_INF("- Video format: %s %ux%u", VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, + fmt.height); + + if (video_set_format(video_dev, &fmt)) { + LOG_ERR("Unable to set format"); + return 0; + } + + if (!video_get_frmival(video_dev, &frmival)) { + LOG_INF("- Default frame rate : %f fps", + 1.0 * frmival.denominator / frmival.numerator); + } + + LOG_INF("- Supported frame intervals for the default format:"); + memset(&fie, 0, sizeof(fie)); + fie.format = &fmt; + while (video_enum_frmival(video_dev, &fie) == 0) { + if (fie.type == VIDEO_FRMIVAL_TYPE_DISCRETE) { + LOG_INF(" %u/%u", fie.discrete.numerator, fie.discrete.denominator); + } else { + LOG_INF(" [min = %u/%u; max = %u/%u; step = %u/%u]", + fie.stepwise.min.numerator, fie.stepwise.min.denominator, + fie.stepwise.max.numerator, fie.stepwise.max.denominator, + fie.stepwise.step.numerator, fie.stepwise.step.denominator); + } + fie.index++; + } + + /* Get supported controls */ + LOG_INF("- Supported controls:"); + cq.dev = video_dev; + while (!video_query_ctrl(&cq)) { + if (cq.dev != last_dev) { + last_dev = cq.dev; + LOG_INF("\t\tdevice: %s", cq.dev->name); + } + video_print_ctrl(&cq); + cq.id |= VIDEO_CTRL_FLAG_NEXT_CTRL; + } + + /* Set controls */ + if (IS_ENABLED(CONFIG_VIDEO_CTRL_HFLIP)) { + video_set_ctrl(video_dev, &ctrl); + } + + if (IS_ENABLED(CONFIG_VIDEO_CTRL_VFLIP)) { + ctrl.id = VIDEO_CID_VFLIP; + video_set_ctrl(video_dev, &ctrl); + } + + if (IS_ENABLED(CONFIG_TEST)) { + ctrl.id = VIDEO_CID_TEST_PATTERN; + tp_set_ret = video_set_ctrl(video_dev, &ctrl); + } + + /* Size to allocate for each buffer */ + if (caps.min_line_count == LINE_COUNT_HEIGHT) { + bsize = fmt.pitch * fmt.height; + } else { + bsize = fmt.pitch * caps.min_line_count; + } + /* Alloc Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { - buffers[i] = video_buffer_alloc(fmt.pitch * fmt.height, K_FOREVER); + /* + * For some hardwares, such as the PxP used on i.MX RT1170 to do image rotation, + * buffer alignment is needed in order to achieve the best performance + */ + buffers[i] = video_buffer_aligned_alloc(bsize, CONFIG_VIDEO_BUFFER_POOL_ALIGN, + K_FOREVER); if (buffers[i] == NULL) { LOG_ERR("Unable to alloc video buffer"); return 0; From 17df352864745eb3200600969ddbee0516ac81a9 Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Tue, 8 Jul 2025 10:49:02 +0200 Subject: [PATCH 5/7] samples: video: tcpserversink: N-buffering configuration Allow to configure the number of allocated capture frames. This allows to make tradeof between framerate versus memory usage. 2 buffers allows to capture while sending data (optimal framerate). 1 buffer allows to reduce memory usage but capture framerate is lower. Signed-off-by: Hugues Fruchet --- samples/drivers/video/tcpserversink/Kconfig | 10 ++++++++++ samples/drivers/video/tcpserversink/src/main.c | 3 ++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/samples/drivers/video/tcpserversink/Kconfig b/samples/drivers/video/tcpserversink/Kconfig index 46fa6a00fc8e9..68c03e104b600 100644 --- a/samples/drivers/video/tcpserversink/Kconfig +++ b/samples/drivers/video/tcpserversink/Kconfig @@ -1,4 +1,5 @@ # Copyright (c) 2024 Espressif Systems (Shanghai) Co., Ltd. +# Copyright (c) 2025 STMicroelectronics. # SPDX-License-Identifier: Apache-2.0 mainmenu "TCP camera streaming sample application" @@ -48,6 +49,15 @@ config VIDEO_PIXEL_FORMAT help Pixel format of the video frame. If not set, the default pixel format is used. +config VIDEO_CAPTURE_N_BUFFERING + int "Capture N-buffering" + default 2 + help + Framerate versus memory usage tradeoff. + "2" allows to capture while sending data (optimal framerate). + "1" allows to reduce memory usage but capture framerate is lower. + If not set defaults to "2". + config VIDEO_CTRL_HFLIP bool "Mirror the video frame horizontally" help diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index 9cee369ee9496..a63264325a572 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2019 Linaro Limited * Copyright 2025 NXP + * Copyright (c) 2025 STMicroelectronics. * * SPDX-License-Identifier: Apache-2.0 */ @@ -36,7 +37,7 @@ int main(void) { struct sockaddr_in addr, client_addr; socklen_t client_addr_len = sizeof(client_addr); - struct video_buffer *buffers[2]; + struct video_buffer *buffers[CONFIG_VIDEO_CAPTURE_N_BUFFERING]; struct video_buffer *vbuf = &(struct video_buffer){}; int ret, sock, client; struct video_format fmt; From 1102764dfd277e57683d88a6485cc595a8525ef4 Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Thu, 26 Jun 2025 17:21:35 +0200 Subject: [PATCH 6/7] samples: video: tcpserversink: video compression support Add video compression support to lowerize network bandwidth. To visualise camera content on host PC, use GStreamer command line: $> gst-launch-1.0 tcpclientsrc host= port=5000 ! \ decodebin ! autovideosink sync=false Signed-off-by: Hugues Fruchet --- samples/drivers/video/tcpserversink/Kconfig | 7 + .../drivers/video/tcpserversink/README.rst | 7 + .../drivers/video/tcpserversink/src/main.c | 185 +++++++++++++++++- 3 files changed, 198 insertions(+), 1 deletion(-) diff --git a/samples/drivers/video/tcpserversink/Kconfig b/samples/drivers/video/tcpserversink/Kconfig index 68c03e104b600..c117a01471185 100644 --- a/samples/drivers/video/tcpserversink/Kconfig +++ b/samples/drivers/video/tcpserversink/Kconfig @@ -68,6 +68,13 @@ config VIDEO_CTRL_VFLIP help If set, mirror the video frame vertically +config VIDEO_ENCODED_PIXEL_FORMAT + string "Pixel format of the encoded frame" + default VIDEO_PIX_FMT_H264 + help + Compression format used by the video encoder if enabled. + If not set defaults to H264 video bitstream with startcodes. + endmenu source "Kconfig.zephyr" diff --git a/samples/drivers/video/tcpserversink/README.rst b/samples/drivers/video/tcpserversink/README.rst index b44d9f3105ad7..4b967b3d32972 100644 --- a/samples/drivers/video/tcpserversink/README.rst +++ b/samples/drivers/video/tcpserversink/README.rst @@ -71,6 +71,13 @@ Example with gstreamer: For video software generator, the default resolution should be width=320 and height=160. +When using compression support, use this GStreamer command line: + +.. code-block:: console + + gst-launch-1.0 tcpclientsrc host=192.0.2.1 port=5000 \ + ! queue ! decodebin ! queue ! fpsdisplaysink sync=false + References ********** diff --git a/samples/drivers/video/tcpserversink/src/main.c b/samples/drivers/video/tcpserversink/src/main.c index a63264325a572..3ee04c1b32b7d 100644 --- a/samples/drivers/video/tcpserversink/src/main.c +++ b/samples/drivers/video/tcpserversink/src/main.c @@ -33,12 +33,171 @@ static ssize_t sendall(int sock, const void *buf, size_t len) return 0; } +#if DT_HAS_CHOSEN(zephyr_videoenc) +const struct device *encoder_dev; + +int configure_encoder(void) +{ + struct video_buffer *buffer; + struct video_format fmt; + struct video_caps caps; + uint32_t size; + int i; + + encoder_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_videoenc)); + if (!device_is_ready(encoder_dev)) { + LOG_ERR("%s: encoder video device not ready.", encoder_dev->name); + return -1; + } + + /* Get capabilities */ + caps.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_get_caps(encoder_dev, &caps)) { + LOG_ERR("Unable to retrieve video capabilities"); + return -1; + } + + LOG_INF("- Encoder output capabilities:"); + i = 0; + while (caps.format_caps[i].pixelformat) { + const struct video_format_cap *fcap = &caps.format_caps[i]; + /* fourcc to string */ + LOG_INF(" %s width [%u; %u; %u] height [%u; %u; %u]", + VIDEO_FOURCC_TO_STR(fcap->pixelformat), fcap->width_min, fcap->width_max, + fcap->width_step, fcap->height_min, fcap->height_max, fcap->height_step); + i++; + } + + caps.type = VIDEO_BUF_TYPE_INPUT; + if (video_get_caps(encoder_dev, &caps)) { + LOG_ERR("Unable to retrieve video capabilities"); + return -1; + } + + LOG_INF("- Encoder input capabilities:"); + i = 0; + while (caps.format_caps[i].pixelformat) { + const struct video_format_cap *fcap = &caps.format_caps[i]; + /* fourcc to string */ + LOG_INF(" %s width [%u; %u; %u] height [%u; %u; %u]", + VIDEO_FOURCC_TO_STR(fcap->pixelformat), fcap->width_min, fcap->width_max, + fcap->width_step, fcap->height_min, fcap->height_max, fcap->height_step); + i++; + } + + /* Get default/native format */ + fmt.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_get_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to retrieve video format"); + return -1; + } + + LOG_INF("Video encoder device detected, format: %s %ux%u", + VIDEO_FOURCC_TO_STR(fmt.pixelformat), fmt.width, fmt.height); + +#if CONFIG_VIDEO_FRAME_HEIGHT + fmt.height = CONFIG_VIDEO_FRAME_HEIGHT; +#endif + +#if CONFIG_VIDEO_FRAME_WIDTH + fmt.width = CONFIG_VIDEO_FRAME_WIDTH; +#endif + + /* Set output format */ + if (strcmp(CONFIG_VIDEO_ENCODED_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_ENCODED_PIXEL_FORMAT); + } + + LOG_INF("- Video encoder output format: %s %ux%u", VIDEO_FOURCC_TO_STR(fmt.pixelformat), + fmt.width, fmt.height); + + fmt.type = VIDEO_BUF_TYPE_OUTPUT; + if (video_set_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to set format"); + return -1; + } + + /* Alloc output buffer */ + size = fmt.size; + if (size == 0) { + LOG_ERR("Encoder driver must set format size"); + return -1; + } + + buffer = video_buffer_aligned_alloc(size, CONFIG_VIDEO_BUFFER_POOL_ALIGN, K_FOREVER); + if (buffer == NULL) { + LOG_ERR("Unable to alloc compressed video buffer size=%d", size); + return -1; + } + + buffer->type = VIDEO_BUF_TYPE_OUTPUT; + video_enqueue(encoder_dev, buffer); + + /* Set input format */ + if (strcmp(CONFIG_VIDEO_PIXEL_FORMAT, "")) { + fmt.pixelformat = VIDEO_FOURCC_FROM_STR(CONFIG_VIDEO_PIXEL_FORMAT); + } + + LOG_INF("- Video encoder input format: %s %ux%u", VIDEO_FOURCC_TO_STR(fmt.pixelformat), + fmt.width, fmt.height); + + fmt.type = VIDEO_BUF_TYPE_INPUT; + if (video_set_format(encoder_dev, &fmt)) { + LOG_ERR("Unable to set input format"); + return -1; + } + + /* Start video encoder */ + if (video_stream_start(encoder_dev, VIDEO_BUF_TYPE_INPUT)) { + LOG_ERR("Unable to start video encoder (input)"); + return -1; + } + if (video_stream_start(encoder_dev, VIDEO_BUF_TYPE_OUTPUT)) { + LOG_ERR("Unable to start video encoder (output)"); + return -1; + } + + return 0; +} + +int encode_frame(struct video_buffer *in, struct video_buffer **out) +{ + int ret; + + in->type = VIDEO_BUF_TYPE_INPUT; + video_enqueue(encoder_dev, in); + + (*out)->type = VIDEO_BUF_TYPE_OUTPUT; + ret = video_dequeue(encoder_dev, out, K_FOREVER); + if (ret) { + LOG_ERR("Unable to dequeue encoder buf"); + return ret; + } + + return 0; +} + +void stop_encoder(void) +{ + if (video_stream_stop(encoder_dev, VIDEO_BUF_TYPE_OUTPUT)) { + LOG_ERR("Unable to stop encoder (output)"); + } + + if (video_stream_stop(encoder_dev, VIDEO_BUF_TYPE_INPUT)) { + LOG_ERR("Unable to stop encoder (input)"); + } +} +#endif + int main(void) { struct sockaddr_in addr, client_addr; socklen_t client_addr_len = sizeof(client_addr); struct video_buffer *buffers[CONFIG_VIDEO_CAPTURE_N_BUFFERING]; struct video_buffer *vbuf = &(struct video_buffer){}; +#if DT_HAS_CHOSEN(zephyr_videoenc) + struct video_buffer *vbuf_out = &(struct video_buffer){}; +#endif int ret, sock, client; struct video_format fmt; struct video_caps caps; @@ -267,6 +426,13 @@ int main(void) LOG_INF("TCP: Accepted connection"); +#if DT_HAS_CHOSEN(zephyr_videoenc) + if (configure_encoder()) { + LOG_ERR("Unable to configure video encoder"); + return 0; + } +#endif + /* Enqueue Buffers */ for (i = 0; i < ARRAY_SIZE(buffers); i++) { video_enqueue(video_dev, buffers[i]); @@ -290,16 +456,28 @@ int main(void) return 0; } - LOG_INF("Sending frame %d", i++); +#if DT_HAS_CHOSEN(zephyr_videoenc) + encode_frame(vbuf, &vbuf_out); + + LOG_INF("Sending compressed frame %d (size=%d bytes)", i++, + vbuf_out->bytesused); + /* Send compressed video buffer to TCP client */ + ret = sendall(client, vbuf_out->buffer, vbuf_out->bytesused); + vbuf_out->type = VIDEO_BUF_TYPE_OUTPUT; + video_enqueue(encoder_dev, vbuf_out); +#else + LOG_INF("Sending frame %d", i++); /* Send video buffer to TCP client */ ret = sendall(client, vbuf->buffer, vbuf->bytesused); +#endif if (ret && ret != -EAGAIN) { /* client disconnected */ LOG_ERR("TCP: Client disconnected %d", ret); close(client); } + vbuf->type = VIDEO_BUF_TYPE_INPUT; (void)video_enqueue(video_dev, vbuf); } while (!ret); @@ -309,8 +487,13 @@ int main(void) return 0; } +#if DT_HAS_CHOSEN(zephyr_videoenc) + stop_encoder(); +#endif + /* Flush remaining buffers */ do { + vbuf->type = VIDEO_BUF_TYPE_INPUT; ret = video_dequeue(video_dev, &vbuf, K_NO_WAIT); } while (!ret); From ddd242f94c8b1e816ea129bbe78ef8af39d37764 Mon Sep 17 00:00:00 2001 From: Hugues Fruchet Date: Tue, 24 Jun 2025 11:24:34 +0200 Subject: [PATCH 7/7] samples: video: tcpserversink: add stm32n6570_dk support Add configuration files for the stm32n6570_dk board. This enables streaming over ethernet of the images captured by MB1854 camera module compressed in 1920x1080 H264 video bitstream. Signed-off-by: Hugues Fruchet --- .../drivers/video/tcpserversink/README.rst | 31 ++++++++++++++++++- .../tcpserversink/boards/stm32n6570_dk.conf | 15 +++++++++ .../stm32n6570_dk_stm32n657xx_fsbl.conf | 15 +++++++++ .../boards/stm32n6570_dk_stm32n657xx_sb.conf | 15 +++++++++ 4 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf create mode 100644 samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf create mode 100644 samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf diff --git a/samples/drivers/video/tcpserversink/README.rst b/samples/drivers/video/tcpserversink/README.rst index 4b967b3d32972..7caa9a19f6201 100644 --- a/samples/drivers/video/tcpserversink/README.rst +++ b/samples/drivers/video/tcpserversink/README.rst @@ -18,6 +18,9 @@ This samples requires a video capture device and network support. - :zephyr:board:`mimxrt1064_evk` - `MT9M114 camera module`_ +- :zephyr:board:`stm32n6570_dk` +- :ref:`ST B-CAMS-IMX-MB1854 ` + Wiring ****** @@ -26,6 +29,12 @@ J35 camera connector. A USB cable should be connected from a host to the micro USB debug connector (J41) in order to get console output via the freelink interface. Ethernet cable must be connected to RJ45 connector. +On :zephyr:board:`stm32n6570_dk`, the MB1854 IMX335 camera module must be plugged in +the CSI-2 camera connector. A RJ45 ethernet cable must be plugged in the ethernet CN6 +connector. For an optimal image experience, it is advice to embed STM32 image signal +processing middleware: https://github.com/stm32-hotspot/zephyr-stm32-mw-isp. + + Building and Running ******************** @@ -49,6 +58,26 @@ a video software pattern generator is supported by using :ref:`snippet-video-sw- :goals: build :compact: +For :zephyr:board:`stm32n6570_dk`, the sample can be built with the following command: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/tcpserversink + :board: stm32n6570_dk + :shield: st_b_cams_imx_mb1854 + :goals: build + :compact: + +The same can be built with H264 video compression support using :ref:`snippet-video-stm32-venc`: + +.. zephyr-app-commands:: + :zephyr-app: samples/drivers/video/tcpserversink + :board: stm32n6570_dk + :shield: st_b_cams_imx_mb1854 + :snippets: video-stm32-venc + :goals: build + :compact: + + Sample Output ============= @@ -71,7 +100,7 @@ Example with gstreamer: For video software generator, the default resolution should be width=320 and height=160. -When using compression support, use this GStreamer command line: +When using video compression support, use this GStreamer command line: .. code-block:: console diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf new file mode 100644 index 0000000000000..dad55fefaad4d --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk.conf @@ -0,0 +1,15 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_MAIN_STACK_SIZE=4096 + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8 diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf new file mode 100644 index 0000000000000..dad55fefaad4d --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_fsbl.conf @@ -0,0 +1,15 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_MAIN_STACK_SIZE=4096 + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8 diff --git a/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf new file mode 100644 index 0000000000000..dad55fefaad4d --- /dev/null +++ b/samples/drivers/video/tcpserversink/boards/stm32n6570_dk_stm32n657xx_sb.conf @@ -0,0 +1,15 @@ +# Video buffer pool +CONFIG_VIDEO_BUFFER_POOL_SZ_MAX=10000000 +CONFIG_VIDEO_BUFFER_POOL_NUM_MAX=10 + +CONFIG_FPU=y + +# Capture +CONFIG_VIDEO_CAPTURE_N_BUFFERING=2 + +# Video encoder +CONFIG_MAIN_STACK_SIZE=4096 + +# Network buffers +CONFIG_NET_BUF_RX_COUNT=4 +CONFIG_NET_BUF_TX_COUNT=8