@@ -64,6 +64,18 @@ struct native_pty_status {
6464 K_KERNEL_STACK_MEMBER (rx_stack , CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE );
6565 } async ;
6666#endif /* CONFIG_UART_ASYNC_API */
67+ #ifdef CONFIG_UART_INTERRUPT_DRIVEN
68+ struct {
69+ bool tx_enabled ;
70+ bool rx_enabled ;
71+ uart_irq_callback_user_data_t callback ;
72+ void * cb_data ;
73+ /* Instance-specific IRQ emulation thread. */
74+ struct k_thread poll_thread ;
75+ /* Stack for IRQ emulation thread */
76+ K_KERNEL_STACK_MEMBER (poll_stack , CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE );
77+ } irq ;
78+ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
6779};
6880
6981static void np_uart_poll_out (const struct device * dev , unsigned char out_char );
@@ -81,6 +93,22 @@ static int np_uart_rx_enable(const struct device *dev, uint8_t *buf, size_t len,
8193static int np_uart_rx_disable (const struct device * dev );
8294#endif /* CONFIG_UART_ASYNC_API */
8395
96+ #ifdef CONFIG_UART_INTERRUPT_DRIVEN
97+ static int np_uart_fifo_fill (const struct device * dev , const uint8_t * tx_data , int size );
98+ static int np_uart_fifo_read (const struct device * dev , uint8_t * rx_data , const int size );
99+ static void np_uart_irq_tx_enable (const struct device * dev );
100+ static void np_uart_irq_tx_disable (const struct device * dev );
101+ static int np_uart_irq_tx_ready (const struct device * dev );
102+ static int np_uart_irq_tx_complete (const struct device * dev );
103+ static void np_uart_irq_rx_enable (const struct device * dev );
104+ static void np_uart_irq_rx_disable (const struct device * dev );
105+ static int np_uart_irq_rx_ready (const struct device * dev );
106+ static int np_uart_irq_is_pending (const struct device * dev );
107+ static int np_uart_irq_update (const struct device * dev );
108+ static void np_uart_irq_callback_set (const struct device * dev , uart_irq_callback_user_data_t cb ,
109+ void * cb_data );
110+ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
111+
84112static DEVICE_API (uart , np_uart_driver_api ) = {
85113 .poll_out = np_uart_poll_out ,
86114 .poll_in = np_uart_poll_in ,
@@ -92,6 +120,20 @@ static DEVICE_API(uart, np_uart_driver_api) = {
92120 .rx_enable = np_uart_rx_enable ,
93121 .rx_disable = np_uart_rx_disable ,
94122#endif /* CONFIG_UART_ASYNC_API */
123+ #ifdef CONFIG_UART_INTERRUPT_DRIVEN
124+ .fifo_fill = np_uart_fifo_fill ,
125+ .fifo_read = np_uart_fifo_read ,
126+ .irq_tx_enable = np_uart_irq_tx_enable ,
127+ .irq_tx_disable = np_uart_irq_tx_disable ,
128+ .irq_tx_ready = np_uart_irq_tx_ready ,
129+ .irq_tx_complete = np_uart_irq_tx_complete ,
130+ .irq_rx_enable = np_uart_irq_rx_enable ,
131+ .irq_rx_disable = np_uart_irq_rx_disable ,
132+ .irq_rx_ready = np_uart_irq_rx_ready ,
133+ .irq_is_pending = np_uart_irq_is_pending ,
134+ .irq_update = np_uart_irq_update ,
135+ .irq_callback_set = np_uart_irq_callback_set ,
136+ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
95137};
96138
97139#define NATIVE_PTY_INSTANCE (inst ) \
@@ -403,6 +445,179 @@ static int np_uart_rx_disable(const struct device *dev)
403445
404446#endif /* CONFIG_UART_ASYNC_API */
405447
448+ #ifdef CONFIG_UART_INTERRUPT_DRIVEN
449+ static void np_uart_irq_handler (const struct device * dev )
450+ {
451+ struct native_pty_status * data = dev -> data ;
452+
453+ if (data -> irq .callback ) {
454+ data -> irq .callback (dev , data -> irq .cb_data );
455+ } else {
456+ WARN ("No callback!\n" );
457+ }
458+ }
459+
460+ /*
461+ * Emulate uart interrupts using a polling thread
462+ */
463+ static void np_uart_irq_thread (void * arg1 , void * arg2 , void * arg3 )
464+ {
465+ ARG_UNUSED (arg2 );
466+ ARG_UNUSED (arg3 );
467+
468+ struct device * dev = (struct device * )arg1 ;
469+ struct native_pty_status * data = dev -> data ;
470+
471+ while (1 ) {
472+ if (data -> irq .rx_enabled ) {
473+ int ret = np_uart_stdin_poll_in_bottom (data -> in_fd , NULL , 0 );
474+
475+ if (ret == 0 ) {
476+ np_uart_irq_handler (dev );
477+ } else if (ret == -1 ) {
478+ k_sleep (K_MSEC (1 ));
479+ } else {
480+ WARN ("Poll returned error %d\n" , ret );
481+ }
482+ }
483+ if (data -> irq .tx_enabled ) {
484+ np_uart_irq_handler (dev );
485+ }
486+ if (!data -> irq .tx_enabled && !data -> irq .rx_enabled ) {
487+ break ; /* No IRQs enabled, exit the thread */
488+ }
489+ }
490+ }
491+
492+ static void np_uart_irq_thread_start (const struct device * dev )
493+ {
494+ struct native_pty_status * data = dev -> data ;
495+
496+ /* Create a thread which will wait for data - replacement for IRQ */
497+ k_thread_create (& data -> irq .poll_thread , data -> irq .poll_stack ,
498+ K_KERNEL_STACK_SIZEOF (data -> irq .poll_stack ),
499+ np_uart_irq_thread ,
500+ (void * )dev , NULL , NULL ,
501+ K_HIGHEST_THREAD_PRIO , 0 , K_NO_WAIT );
502+ }
503+
504+ static void np_uart_irq_thread_stop (const struct device * dev )
505+ {
506+ struct native_pty_status * data = dev -> data ;
507+
508+ /* Wait for IRQ thread to terminate */
509+ k_thread_join (& data -> irq .poll_thread , K_FOREVER );
510+ }
511+
512+ static int np_uart_fifo_fill (const struct device * dev , const uint8_t * tx_data , int size )
513+ {
514+ struct native_pty_status * data = dev -> data ;
515+
516+ return nsi_host_write (data -> out_fd , (const void * )tx_data , size );
517+ }
518+
519+ static int np_uart_fifo_read (const struct device * dev , uint8_t * rx_data , const int size )
520+ {
521+ struct native_pty_status * data = dev -> data ;
522+
523+ return np_uart_stdin_poll_in_bottom (data -> in_fd , rx_data , size );
524+ }
525+
526+ static int np_uart_irq_tx_ready (const struct device * dev )
527+ {
528+ struct native_pty_status * data = dev -> data ;
529+
530+ return data -> irq .tx_enabled ? 1 : 0 ;
531+ }
532+
533+ static int np_uart_irq_tx_complete (const struct device * dev )
534+ {
535+ ARG_UNUSED (dev );
536+
537+ return 1 ;
538+ }
539+
540+ static void np_uart_irq_tx_enable (const struct device * dev )
541+ {
542+ struct native_pty_status * data = dev -> data ;
543+
544+ bool start_thread = !data -> irq .rx_enabled && !data -> irq .tx_enabled ;
545+
546+ data -> irq .tx_enabled = true;
547+
548+ if (start_thread ) {
549+ np_uart_irq_thread_start (dev );
550+ }
551+ }
552+
553+ static void np_uart_irq_tx_disable (const struct device * dev )
554+ {
555+ struct native_pty_status * data = dev -> data ;
556+
557+ data -> irq .tx_enabled = false;
558+
559+ if (!data -> irq .rx_enabled && !data -> irq .tx_enabled ) {
560+ np_uart_irq_thread_stop (dev );
561+ }
562+ }
563+
564+ static void np_uart_irq_rx_enable (const struct device * dev )
565+ {
566+ struct native_pty_status * data = dev -> data ;
567+
568+ bool start_thread = !data -> irq .rx_enabled && !data -> irq .tx_enabled ;
569+
570+ data -> irq .rx_enabled = true;
571+
572+ if (start_thread ) {
573+ np_uart_irq_thread_start (dev );
574+ }
575+ }
576+
577+ static void np_uart_irq_rx_disable (const struct device * dev )
578+ {
579+ struct native_pty_status * data = dev -> data ;
580+
581+ data -> irq .rx_enabled = false;
582+
583+ if (!data -> irq .rx_enabled && !data -> irq .tx_enabled ) {
584+ np_uart_irq_thread_stop (dev );
585+ }
586+ }
587+
588+ static int np_uart_irq_rx_ready (const struct device * dev )
589+ {
590+ struct native_pty_status * data = dev -> data ;
591+
592+ if (data -> irq .rx_enabled && np_uart_stdin_poll_in_bottom (data -> in_fd , NULL , 0 ) == 0 ) {
593+ return 1 ;
594+ }
595+ return 0 ;
596+ }
597+
598+ static int np_uart_irq_is_pending (const struct device * dev )
599+ {
600+ return np_uart_irq_rx_ready (dev ) ||
601+ np_uart_irq_tx_ready (dev );
602+ }
603+
604+ static int np_uart_irq_update (const struct device * dev )
605+ {
606+ ARG_UNUSED (dev );
607+
608+ return 1 ;
609+ }
610+
611+ static void np_uart_irq_callback_set (const struct device * dev , uart_irq_callback_user_data_t cb ,
612+ void * cb_data )
613+ {
614+ struct native_pty_status * data = dev -> data ;
615+
616+ data -> irq .callback = cb ;
617+ data -> irq .cb_data = cb_data ;
618+ }
619+ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
620+
406621
407622#define NATIVE_PTY_SET_AUTO_ATTACH_CMD (inst , cmd ) \
408623 native_pty_status_##inst.auto_attach_cmd = cmd;
0 commit comments